aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/radeon
diff options
context:
space:
mode:
authorChristian König <christian.koenig@amd.com>2014-08-27 09:21:58 -0400
committerAlex Deucher <alexander.deucher@amd.com>2014-08-27 17:42:11 -0400
commit0bfa4b41268ad5fd741f16f484e4fee190822ec6 (patch)
tree670093f179cd39a4dcf555a85074235b95bbea99 /drivers/gpu/drm/radeon
parent9bb39ff43e15e85bc1bd9bbbdc5b9cef7a670fd5 (diff)
drm/radeon: handle lockup in delayed work, v5
v5 (chk): complete rework, start when the first fence is emitted, stop when the last fence is signalled, make it work correctly with GPU resets, cleanup radeon_fence_wait_seq Signed-off-by: Maarten Lankhorst <maarten.lankhorst@canonical.com> Signed-off-by: Christian König <christian.koenig@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/radeon')
-rw-r--r--drivers/gpu/drm/radeon/radeon.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c200
2 files changed, 124 insertions, 78 deletions
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 8cd1b3f60d4a..74919ef57ac3 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -350,6 +350,7 @@ extern void evergreen_tiling_fields(unsigned tiling_flags, unsigned *bankw,
350 * Fences. 350 * Fences.
351 */ 351 */
352struct radeon_fence_driver { 352struct radeon_fence_driver {
353 struct radeon_device *rdev;
353 uint32_t scratch_reg; 354 uint32_t scratch_reg;
354 uint64_t gpu_addr; 355 uint64_t gpu_addr;
355 volatile uint32_t *cpu_addr; 356 volatile uint32_t *cpu_addr;
@@ -357,6 +358,7 @@ struct radeon_fence_driver {
357 uint64_t sync_seq[RADEON_NUM_RINGS]; 358 uint64_t sync_seq[RADEON_NUM_RINGS];
358 atomic64_t last_seq; 359 atomic64_t last_seq;
359 bool initialized; 360 bool initialized;
361 struct delayed_work lockup_work;
360}; 362};
361 363
362struct radeon_fence { 364struct radeon_fence {
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index e8a28e7b39c7..ac15f3418478 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -98,6 +98,25 @@ static u32 radeon_fence_read(struct radeon_device *rdev, int ring)
98} 98}
99 99
100/** 100/**
101 * radeon_fence_schedule_check - schedule lockup check
102 *
103 * @rdev: radeon_device pointer
104 * @ring: ring index we should work with
105 *
106 * Queues a delayed work item to check for lockups.
107 */
108static void radeon_fence_schedule_check(struct radeon_device *rdev, int ring)
109{
110 /*
111 * Do not reset the timer here with mod_delayed_work,
112 * this can livelock in an interaction with TTM delayed destroy.
113 */
114 queue_delayed_work(system_power_efficient_wq,
115 &rdev->fence_drv[ring].lockup_work,
116 RADEON_FENCE_JIFFIES_TIMEOUT);
117}
118
119/**
101 * radeon_fence_emit - emit a fence on the requested ring 120 * radeon_fence_emit - emit a fence on the requested ring
102 * 121 *
103 * @rdev: radeon_device pointer 122 * @rdev: radeon_device pointer
@@ -122,19 +141,21 @@ int radeon_fence_emit(struct radeon_device *rdev,
122 (*fence)->ring = ring; 141 (*fence)->ring = ring;
123 radeon_fence_ring_emit(rdev, ring, *fence); 142 radeon_fence_ring_emit(rdev, ring, *fence);
124 trace_radeon_fence_emit(rdev->ddev, ring, (*fence)->seq); 143 trace_radeon_fence_emit(rdev->ddev, ring, (*fence)->seq);
144 radeon_fence_schedule_check(rdev, ring);
125 return 0; 145 return 0;
126} 146}
127 147
128/** 148/**
129 * radeon_fence_process - process a fence 149 * radeon_fence_activity - check for fence activity
130 * 150 *
131 * @rdev: radeon_device pointer 151 * @rdev: radeon_device pointer
132 * @ring: ring index the fence is associated with 152 * @ring: ring index the fence is associated with
133 * 153 *
134 * Checks the current fence value and wakes the fence queue 154 * Checks the current fence value and calculates the last
135 * if the sequence number has increased (all asics). 155 * signalled fence value. Returns true if activity occured
156 * on the ring, and the fence_queue should be waken up.
136 */ 157 */
137void radeon_fence_process(struct radeon_device *rdev, int ring) 158static bool radeon_fence_activity(struct radeon_device *rdev, int ring)
138{ 159{
139 uint64_t seq, last_seq, last_emitted; 160 uint64_t seq, last_seq, last_emitted;
140 unsigned count_loop = 0; 161 unsigned count_loop = 0;
@@ -190,7 +211,67 @@ void radeon_fence_process(struct radeon_device *rdev, int ring)
190 } 211 }
191 } while (atomic64_xchg(&rdev->fence_drv[ring].last_seq, seq) > seq); 212 } while (atomic64_xchg(&rdev->fence_drv[ring].last_seq, seq) > seq);
192 213
193 if (wake) 214 if (seq < last_emitted)
215 radeon_fence_schedule_check(rdev, ring);
216
217 return wake;
218}
219
220/**
221 * radeon_fence_check_lockup - check for hardware lockup
222 *
223 * @work: delayed work item
224 *
225 * Checks for fence activity and if there is none probe
226 * the hardware if a lockup occured.
227 */
228static void radeon_fence_check_lockup(struct work_struct *work)
229{
230 struct radeon_fence_driver *fence_drv;
231 struct radeon_device *rdev;
232 int ring;
233
234 fence_drv = container_of(work, struct radeon_fence_driver,
235 lockup_work.work);
236 rdev = fence_drv->rdev;
237 ring = fence_drv - &rdev->fence_drv[0];
238
239 if (!down_read_trylock(&rdev->exclusive_lock)) {
240 /* just reschedule the check if a reset is going on */
241 radeon_fence_schedule_check(rdev, ring);
242 return;
243 }
244
245 if (radeon_fence_activity(rdev, ring))
246 wake_up_all(&rdev->fence_queue);
247
248 else if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) {
249
250 /* good news we believe it's a lockup */
251 dev_warn(rdev->dev, "GPU lockup (current fence id "
252 "0x%016llx last fence id 0x%016llx on ring %d)\n",
253 (uint64_t)atomic64_read(&fence_drv->last_seq),
254 fence_drv->sync_seq[ring], ring);
255
256 /* remember that we need an reset */
257 rdev->needs_reset = true;
258 wake_up_all(&rdev->fence_queue);
259 }
260 up_read(&rdev->exclusive_lock);
261}
262
263/**
264 * radeon_fence_process - process a fence
265 *
266 * @rdev: radeon_device pointer
267 * @ring: ring index the fence is associated with
268 *
269 * Checks the current fence value and wakes the fence queue
270 * if the sequence number has increased (all asics).
271 */
272void radeon_fence_process(struct radeon_device *rdev, int ring)
273{
274 if (radeon_fence_activity(rdev, ring))
194 wake_up_all(&rdev->fence_queue); 275 wake_up_all(&rdev->fence_queue);
195} 276}
196 277
@@ -300,86 +381,43 @@ static bool radeon_fence_any_seq_signaled(struct radeon_device *rdev, u64 *seq)
300static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 *target_seq, 381static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 *target_seq,
301 bool intr) 382 bool intr)
302{ 383{
303 uint64_t last_seq[RADEON_NUM_RINGS]; 384 long r;
304 bool signaled; 385 int i;
305 int i, r;
306
307 while (!radeon_fence_any_seq_signaled(rdev, target_seq)) {
308 386
309 /* Save current sequence values, used to check for GPU lockups */ 387 if (radeon_fence_any_seq_signaled(rdev, target_seq))
310 for (i = 0; i < RADEON_NUM_RINGS; ++i) { 388 return 0;
311 if (!target_seq[i])
312 continue;
313 389
314 last_seq[i] = atomic64_read(&rdev->fence_drv[i].last_seq); 390 /* enable IRQs and tracing */
315 trace_radeon_fence_wait_begin(rdev->ddev, i, target_seq[i]); 391 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
316 radeon_irq_kms_sw_irq_get(rdev, i); 392 if (!target_seq[i])
317 } 393 continue;
318 394
319 if (intr) { 395 trace_radeon_fence_wait_begin(rdev->ddev, i, target_seq[i]);
320 r = wait_event_interruptible_timeout(rdev->fence_queue, ( 396 radeon_irq_kms_sw_irq_get(rdev, i);
321 (signaled = radeon_fence_any_seq_signaled(rdev, target_seq)) 397 }
322 || rdev->needs_reset), RADEON_FENCE_JIFFIES_TIMEOUT);
323 } else {
324 r = wait_event_timeout(rdev->fence_queue, (
325 (signaled = radeon_fence_any_seq_signaled(rdev, target_seq))
326 || rdev->needs_reset), RADEON_FENCE_JIFFIES_TIMEOUT);
327 }
328 398
329 for (i = 0; i < RADEON_NUM_RINGS; ++i) { 399 if (intr) {
330 if (!target_seq[i]) 400 r = wait_event_interruptible_timeout(rdev->fence_queue, (
331 continue; 401 radeon_fence_any_seq_signaled(rdev, target_seq)
402 || rdev->needs_reset), MAX_SCHEDULE_TIMEOUT);
403 } else {
404 r = wait_event_timeout(rdev->fence_queue, (
405 radeon_fence_any_seq_signaled(rdev, target_seq)
406 || rdev->needs_reset), MAX_SCHEDULE_TIMEOUT);
407 }
332 408
333 radeon_irq_kms_sw_irq_put(rdev, i); 409 if (rdev->needs_reset)
334 trace_radeon_fence_wait_end(rdev->ddev, i, target_seq[i]); 410 r = -EDEADLK;
335 }
336 411
337 if (unlikely(r < 0)) 412 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
338 return r; 413 if (!target_seq[i])
414 continue;
339 415
340 if (unlikely(!signaled)) { 416 radeon_irq_kms_sw_irq_put(rdev, i);
341 if (rdev->needs_reset) 417 trace_radeon_fence_wait_end(rdev->ddev, i, target_seq[i]);
342 return -EDEADLK;
343
344 /* we were interrupted for some reason and fence
345 * isn't signaled yet, resume waiting */
346 if (r)
347 continue;
348
349 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
350 if (!target_seq[i])
351 continue;
352
353 if (last_seq[i] != atomic64_read(&rdev->fence_drv[i].last_seq))
354 break;
355 }
356
357 if (i != RADEON_NUM_RINGS)
358 continue;
359
360 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
361 if (!target_seq[i])
362 continue;
363
364 if (radeon_ring_is_lockup(rdev, i, &rdev->ring[i]))
365 break;
366 }
367
368 if (i < RADEON_NUM_RINGS) {
369 /* good news we believe it's a lockup */
370 dev_warn(rdev->dev, "GPU lockup (waiting for "
371 "0x%016llx last fence id 0x%016llx on"
372 " ring %d)\n",
373 target_seq[i], last_seq[i], i);
374
375 /* remember that we need an reset */
376 rdev->needs_reset = true;
377 wake_up_all(&rdev->fence_queue);
378 return -EDEADLK;
379 }
380 }
381 } 418 }
382 return 0; 419
420 return r < 0 ? r : 0;
383} 421}
384 422
385/** 423/**
@@ -711,6 +749,9 @@ static void radeon_fence_driver_init_ring(struct radeon_device *rdev, int ring)
711 rdev->fence_drv[ring].sync_seq[i] = 0; 749 rdev->fence_drv[ring].sync_seq[i] = 0;
712 atomic64_set(&rdev->fence_drv[ring].last_seq, 0); 750 atomic64_set(&rdev->fence_drv[ring].last_seq, 0);
713 rdev->fence_drv[ring].initialized = false; 751 rdev->fence_drv[ring].initialized = false;
752 INIT_DELAYED_WORK(&rdev->fence_drv[ring].lockup_work,
753 radeon_fence_check_lockup);
754 rdev->fence_drv[ring].rdev = rdev;
714} 755}
715 756
716/** 757/**
@@ -760,6 +801,7 @@ void radeon_fence_driver_fini(struct radeon_device *rdev)
760 /* no need to trigger GPU reset as we are unloading */ 801 /* no need to trigger GPU reset as we are unloading */
761 radeon_fence_driver_force_completion(rdev, ring); 802 radeon_fence_driver_force_completion(rdev, ring);
762 } 803 }
804 cancel_delayed_work_sync(&rdev->fence_drv[ring].lockup_work);
763 wake_up_all(&rdev->fence_queue); 805 wake_up_all(&rdev->fence_queue);
764 radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg); 806 radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
765 rdev->fence_drv[ring].initialized = false; 807 rdev->fence_drv[ring].initialized = false;
@@ -778,8 +820,10 @@ void radeon_fence_driver_fini(struct radeon_device *rdev)
778 */ 820 */
779void radeon_fence_driver_force_completion(struct radeon_device *rdev, int ring) 821void radeon_fence_driver_force_completion(struct radeon_device *rdev, int ring)
780{ 822{
781 if (rdev->fence_drv[ring].initialized) 823 if (rdev->fence_drv[ring].initialized) {
782 radeon_fence_write(rdev, rdev->fence_drv[ring].sync_seq[ring], ring); 824 radeon_fence_write(rdev, rdev->fence_drv[ring].sync_seq[ring], ring);
825 cancel_delayed_work_sync(&rdev->fence_drv[ring].lockup_work);
826 }
783} 827}
784 828
785 829