diff options
Diffstat (limited to 'drivers/gpu/drm/radeon/radeon_fence.c')
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_fence.c | 347 |
1 files changed, 118 insertions, 229 deletions
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c index ddb8f8e04eb5..b8f68b2c47d4 100644 --- a/drivers/gpu/drm/radeon/radeon_fence.c +++ b/drivers/gpu/drm/radeon/radeon_fence.c | |||
@@ -190,10 +190,8 @@ void radeon_fence_process(struct radeon_device *rdev, int ring) | |||
190 | } | 190 | } |
191 | } while (atomic64_xchg(&rdev->fence_drv[ring].last_seq, seq) > seq); | 191 | } while (atomic64_xchg(&rdev->fence_drv[ring].last_seq, seq) > seq); |
192 | 192 | ||
193 | if (wake) { | 193 | if (wake) |
194 | rdev->fence_drv[ring].last_activity = jiffies; | ||
195 | wake_up_all(&rdev->fence_queue); | 194 | wake_up_all(&rdev->fence_queue); |
196 | } | ||
197 | } | 195 | } |
198 | 196 | ||
199 | /** | 197 | /** |
@@ -212,13 +210,13 @@ static void radeon_fence_destroy(struct kref *kref) | |||
212 | } | 210 | } |
213 | 211 | ||
214 | /** | 212 | /** |
215 | * radeon_fence_seq_signaled - check if a fence sequeuce number has signaled | 213 | * radeon_fence_seq_signaled - check if a fence sequence number has signaled |
216 | * | 214 | * |
217 | * @rdev: radeon device pointer | 215 | * @rdev: radeon device pointer |
218 | * @seq: sequence number | 216 | * @seq: sequence number |
219 | * @ring: ring index the fence is associated with | 217 | * @ring: ring index the fence is associated with |
220 | * | 218 | * |
221 | * Check if the last singled fence sequnce number is >= the requested | 219 | * Check if the last signaled fence sequnce number is >= the requested |
222 | * sequence number (all asics). | 220 | * sequence number (all asics). |
223 | * Returns true if the fence has signaled (current fence value | 221 | * Returns true if the fence has signaled (current fence value |
224 | * is >= requested value) or false if it has not (current fence | 222 | * is >= requested value) or false if it has not (current fence |
@@ -263,113 +261,131 @@ bool radeon_fence_signaled(struct radeon_fence *fence) | |||
263 | } | 261 | } |
264 | 262 | ||
265 | /** | 263 | /** |
266 | * radeon_fence_wait_seq - wait for a specific sequence number | 264 | * radeon_fence_any_seq_signaled - check if any sequence number is signaled |
267 | * | 265 | * |
268 | * @rdev: radeon device pointer | 266 | * @rdev: radeon device pointer |
269 | * @target_seq: sequence number we want to wait for | 267 | * @seq: sequence numbers |
270 | * @ring: ring index the fence is associated with | 268 | * |
269 | * Check if the last signaled fence sequnce number is >= the requested | ||
270 | * sequence number (all asics). | ||
271 | * Returns true if any has signaled (current value is >= requested value) | ||
272 | * or false if it has not. Helper function for radeon_fence_wait_seq. | ||
273 | */ | ||
274 | static bool radeon_fence_any_seq_signaled(struct radeon_device *rdev, u64 *seq) | ||
275 | { | ||
276 | unsigned i; | ||
277 | |||
278 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { | ||
279 | if (seq[i] && radeon_fence_seq_signaled(rdev, seq[i], i)) | ||
280 | return true; | ||
281 | } | ||
282 | return false; | ||
283 | } | ||
284 | |||
285 | /** | ||
286 | * radeon_fence_wait_seq - wait for a specific sequence numbers | ||
287 | * | ||
288 | * @rdev: radeon device pointer | ||
289 | * @target_seq: sequence number(s) we want to wait for | ||
271 | * @intr: use interruptable sleep | 290 | * @intr: use interruptable sleep |
272 | * @lock_ring: whether the ring should be locked or not | 291 | * @lock_ring: whether the ring should be locked or not |
273 | * | 292 | * |
274 | * Wait for the requested sequence number to be written (all asics). | 293 | * Wait for the requested sequence number(s) to be written by any ring |
294 | * (all asics). Sequnce number array is indexed by ring id. | ||
275 | * @intr selects whether to use interruptable (true) or non-interruptable | 295 | * @intr selects whether to use interruptable (true) or non-interruptable |
276 | * (false) sleep when waiting for the sequence number. Helper function | 296 | * (false) sleep when waiting for the sequence number. Helper function |
277 | * for radeon_fence_wait(), et al. | 297 | * for radeon_fence_wait_*(). |
278 | * Returns 0 if the sequence number has passed, error for all other cases. | 298 | * Returns 0 if the sequence number has passed, error for all other cases. |
279 | * -EDEADLK is returned when a GPU lockup has been detected and the ring is | 299 | * -EDEADLK is returned when a GPU lockup has been detected. |
280 | * marked as not ready so no further jobs get scheduled until a successful | ||
281 | * reset. | ||
282 | */ | 300 | */ |
283 | static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 target_seq, | 301 | static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 *target_seq, |
284 | unsigned ring, bool intr, bool lock_ring) | 302 | bool intr, bool lock_ring) |
285 | { | 303 | { |
286 | unsigned long timeout, last_activity; | 304 | uint64_t last_seq[RADEON_NUM_RINGS]; |
287 | uint64_t seq; | ||
288 | unsigned i; | ||
289 | bool signaled; | 305 | bool signaled; |
290 | int r; | 306 | int i, r; |
307 | |||
308 | while (!radeon_fence_any_seq_signaled(rdev, target_seq)) { | ||
309 | |||
310 | /* Save current sequence values, used to check for GPU lockups */ | ||
311 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { | ||
312 | if (!target_seq[i]) | ||
313 | continue; | ||
291 | 314 | ||
292 | while (target_seq > atomic64_read(&rdev->fence_drv[ring].last_seq)) { | 315 | last_seq[i] = atomic64_read(&rdev->fence_drv[i].last_seq); |
293 | if (!rdev->ring[ring].ready) { | 316 | trace_radeon_fence_wait_begin(rdev->ddev, target_seq[i]); |
294 | return -EBUSY; | 317 | radeon_irq_kms_sw_irq_get(rdev, i); |
295 | } | 318 | } |
296 | 319 | ||
297 | timeout = jiffies - RADEON_FENCE_JIFFIES_TIMEOUT; | 320 | if (intr) { |
298 | if (time_after(rdev->fence_drv[ring].last_activity, timeout)) { | 321 | r = wait_event_interruptible_timeout(rdev->fence_queue, ( |
299 | /* the normal case, timeout is somewhere before last_activity */ | 322 | (signaled = radeon_fence_any_seq_signaled(rdev, target_seq)) |
300 | timeout = rdev->fence_drv[ring].last_activity - timeout; | 323 | || rdev->needs_reset), RADEON_FENCE_JIFFIES_TIMEOUT); |
301 | } else { | 324 | } else { |
302 | /* either jiffies wrapped around, or no fence was signaled in the last 500ms | 325 | r = wait_event_timeout(rdev->fence_queue, ( |
303 | * anyway we will just wait for the minimum amount and then check for a lockup | 326 | (signaled = radeon_fence_any_seq_signaled(rdev, target_seq)) |
304 | */ | 327 | || rdev->needs_reset), RADEON_FENCE_JIFFIES_TIMEOUT); |
305 | timeout = 1; | ||
306 | } | 328 | } |
307 | seq = atomic64_read(&rdev->fence_drv[ring].last_seq); | ||
308 | /* Save current last activity valuee, used to check for GPU lockups */ | ||
309 | last_activity = rdev->fence_drv[ring].last_activity; | ||
310 | 329 | ||
311 | trace_radeon_fence_wait_begin(rdev->ddev, seq); | 330 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { |
312 | radeon_irq_kms_sw_irq_get(rdev, ring); | 331 | if (!target_seq[i]) |
313 | if (intr) { | 332 | continue; |
314 | r = wait_event_interruptible_timeout(rdev->fence_queue, | 333 | |
315 | (signaled = radeon_fence_seq_signaled(rdev, target_seq, ring)), | 334 | radeon_irq_kms_sw_irq_put(rdev, i); |
316 | timeout); | 335 | trace_radeon_fence_wait_end(rdev->ddev, target_seq[i]); |
317 | } else { | ||
318 | r = wait_event_timeout(rdev->fence_queue, | ||
319 | (signaled = radeon_fence_seq_signaled(rdev, target_seq, ring)), | ||
320 | timeout); | ||
321 | } | 336 | } |
322 | radeon_irq_kms_sw_irq_put(rdev, ring); | 337 | |
323 | if (unlikely(r < 0)) { | 338 | if (unlikely(r < 0)) |
324 | return r; | 339 | return r; |
325 | } | ||
326 | trace_radeon_fence_wait_end(rdev->ddev, seq); | ||
327 | 340 | ||
328 | if (unlikely(!signaled)) { | 341 | if (unlikely(!signaled)) { |
342 | if (rdev->needs_reset) | ||
343 | return -EDEADLK; | ||
344 | |||
329 | /* we were interrupted for some reason and fence | 345 | /* we were interrupted for some reason and fence |
330 | * isn't signaled yet, resume waiting */ | 346 | * isn't signaled yet, resume waiting */ |
331 | if (r) { | 347 | if (r) |
332 | continue; | 348 | continue; |
349 | |||
350 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { | ||
351 | if (!target_seq[i]) | ||
352 | continue; | ||
353 | |||
354 | if (last_seq[i] != atomic64_read(&rdev->fence_drv[i].last_seq)) | ||
355 | break; | ||
333 | } | 356 | } |
334 | 357 | ||
335 | /* check if sequence value has changed since last_activity */ | 358 | if (i != RADEON_NUM_RINGS) |
336 | if (seq != atomic64_read(&rdev->fence_drv[ring].last_seq)) { | ||
337 | continue; | 359 | continue; |
338 | } | ||
339 | 360 | ||
340 | if (lock_ring) { | 361 | if (lock_ring) |
341 | mutex_lock(&rdev->ring_lock); | 362 | mutex_lock(&rdev->ring_lock); |
342 | } | ||
343 | 363 | ||
344 | /* test if somebody else has already decided that this is a lockup */ | 364 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { |
345 | if (last_activity != rdev->fence_drv[ring].last_activity) { | 365 | if (!target_seq[i]) |
346 | if (lock_ring) { | 366 | continue; |
347 | mutex_unlock(&rdev->ring_lock); | 367 | |
348 | } | 368 | if (radeon_ring_is_lockup(rdev, i, &rdev->ring[i])) |
349 | continue; | 369 | break; |
350 | } | 370 | } |
351 | 371 | ||
352 | if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) { | 372 | if (i < RADEON_NUM_RINGS) { |
353 | /* good news we believe it's a lockup */ | 373 | /* good news we believe it's a lockup */ |
354 | dev_warn(rdev->dev, "GPU lockup (waiting for 0x%016llx last fence id 0x%016llx)\n", | 374 | dev_warn(rdev->dev, "GPU lockup (waiting for " |
355 | target_seq, seq); | 375 | "0x%016llx last fence id 0x%016llx on" |
356 | 376 | " ring %d)\n", | |
357 | /* change last activity so nobody else think there is a lockup */ | 377 | target_seq[i], last_seq[i], i); |
358 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { | 378 | |
359 | rdev->fence_drv[i].last_activity = jiffies; | 379 | /* remember that we need an reset */ |
360 | } | 380 | rdev->needs_reset = true; |
361 | 381 | if (lock_ring) | |
362 | /* mark the ring as not ready any more */ | ||
363 | rdev->ring[ring].ready = false; | ||
364 | if (lock_ring) { | ||
365 | mutex_unlock(&rdev->ring_lock); | 382 | mutex_unlock(&rdev->ring_lock); |
366 | } | 383 | wake_up_all(&rdev->fence_queue); |
367 | return -EDEADLK; | 384 | return -EDEADLK; |
368 | } | 385 | } |
369 | 386 | ||
370 | if (lock_ring) { | 387 | if (lock_ring) |
371 | mutex_unlock(&rdev->ring_lock); | 388 | mutex_unlock(&rdev->ring_lock); |
372 | } | ||
373 | } | 389 | } |
374 | } | 390 | } |
375 | return 0; | 391 | return 0; |
@@ -388,6 +404,7 @@ static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 target_seq, | |||
388 | */ | 404 | */ |
389 | int radeon_fence_wait(struct radeon_fence *fence, bool intr) | 405 | int radeon_fence_wait(struct radeon_fence *fence, bool intr) |
390 | { | 406 | { |
407 | uint64_t seq[RADEON_NUM_RINGS] = {}; | ||
391 | int r; | 408 | int r; |
392 | 409 | ||
393 | if (fence == NULL) { | 410 | if (fence == NULL) { |
@@ -395,147 +412,15 @@ int radeon_fence_wait(struct radeon_fence *fence, bool intr) | |||
395 | return -EINVAL; | 412 | return -EINVAL; |
396 | } | 413 | } |
397 | 414 | ||
398 | r = radeon_fence_wait_seq(fence->rdev, fence->seq, | 415 | seq[fence->ring] = fence->seq; |
399 | fence->ring, intr, true); | 416 | if (seq[fence->ring] == RADEON_FENCE_SIGNALED_SEQ) |
400 | if (r) { | 417 | return 0; |
401 | return r; | ||
402 | } | ||
403 | fence->seq = RADEON_FENCE_SIGNALED_SEQ; | ||
404 | return 0; | ||
405 | } | ||
406 | |||
407 | static bool radeon_fence_any_seq_signaled(struct radeon_device *rdev, u64 *seq) | ||
408 | { | ||
409 | unsigned i; | ||
410 | |||
411 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { | ||
412 | if (seq[i] && radeon_fence_seq_signaled(rdev, seq[i], i)) { | ||
413 | return true; | ||
414 | } | ||
415 | } | ||
416 | return false; | ||
417 | } | ||
418 | |||
419 | /** | ||
420 | * radeon_fence_wait_any_seq - wait for a sequence number on any ring | ||
421 | * | ||
422 | * @rdev: radeon device pointer | ||
423 | * @target_seq: sequence number(s) we want to wait for | ||
424 | * @intr: use interruptable sleep | ||
425 | * | ||
426 | * Wait for the requested sequence number(s) to be written by any ring | ||
427 | * (all asics). Sequnce number array is indexed by ring id. | ||
428 | * @intr selects whether to use interruptable (true) or non-interruptable | ||
429 | * (false) sleep when waiting for the sequence number. Helper function | ||
430 | * for radeon_fence_wait_any(), et al. | ||
431 | * Returns 0 if the sequence number has passed, error for all other cases. | ||
432 | */ | ||
433 | static int radeon_fence_wait_any_seq(struct radeon_device *rdev, | ||
434 | u64 *target_seq, bool intr) | ||
435 | { | ||
436 | unsigned long timeout, last_activity, tmp; | ||
437 | unsigned i, ring = RADEON_NUM_RINGS; | ||
438 | bool signaled; | ||
439 | int r; | ||
440 | |||
441 | for (i = 0, last_activity = 0; i < RADEON_NUM_RINGS; ++i) { | ||
442 | if (!target_seq[i]) { | ||
443 | continue; | ||
444 | } | ||
445 | |||
446 | /* use the most recent one as indicator */ | ||
447 | if (time_after(rdev->fence_drv[i].last_activity, last_activity)) { | ||
448 | last_activity = rdev->fence_drv[i].last_activity; | ||
449 | } | ||
450 | |||
451 | /* For lockup detection just pick the lowest ring we are | ||
452 | * actively waiting for | ||
453 | */ | ||
454 | if (i < ring) { | ||
455 | ring = i; | ||
456 | } | ||
457 | } | ||
458 | |||
459 | /* nothing to wait for ? */ | ||
460 | if (ring == RADEON_NUM_RINGS) { | ||
461 | return -ENOENT; | ||
462 | } | ||
463 | |||
464 | while (!radeon_fence_any_seq_signaled(rdev, target_seq)) { | ||
465 | timeout = jiffies - RADEON_FENCE_JIFFIES_TIMEOUT; | ||
466 | if (time_after(last_activity, timeout)) { | ||
467 | /* the normal case, timeout is somewhere before last_activity */ | ||
468 | timeout = last_activity - timeout; | ||
469 | } else { | ||
470 | /* either jiffies wrapped around, or no fence was signaled in the last 500ms | ||
471 | * anyway we will just wait for the minimum amount and then check for a lockup | ||
472 | */ | ||
473 | timeout = 1; | ||
474 | } | ||
475 | 418 | ||
476 | trace_radeon_fence_wait_begin(rdev->ddev, target_seq[ring]); | 419 | r = radeon_fence_wait_seq(fence->rdev, seq, intr, true); |
477 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { | 420 | if (r) |
478 | if (target_seq[i]) { | 421 | return r; |
479 | radeon_irq_kms_sw_irq_get(rdev, i); | ||
480 | } | ||
481 | } | ||
482 | if (intr) { | ||
483 | r = wait_event_interruptible_timeout(rdev->fence_queue, | ||
484 | (signaled = radeon_fence_any_seq_signaled(rdev, target_seq)), | ||
485 | timeout); | ||
486 | } else { | ||
487 | r = wait_event_timeout(rdev->fence_queue, | ||
488 | (signaled = radeon_fence_any_seq_signaled(rdev, target_seq)), | ||
489 | timeout); | ||
490 | } | ||
491 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { | ||
492 | if (target_seq[i]) { | ||
493 | radeon_irq_kms_sw_irq_put(rdev, i); | ||
494 | } | ||
495 | } | ||
496 | if (unlikely(r < 0)) { | ||
497 | return r; | ||
498 | } | ||
499 | trace_radeon_fence_wait_end(rdev->ddev, target_seq[ring]); | ||
500 | |||
501 | if (unlikely(!signaled)) { | ||
502 | /* we were interrupted for some reason and fence | ||
503 | * isn't signaled yet, resume waiting */ | ||
504 | if (r) { | ||
505 | continue; | ||
506 | } | ||
507 | |||
508 | mutex_lock(&rdev->ring_lock); | ||
509 | for (i = 0, tmp = 0; i < RADEON_NUM_RINGS; ++i) { | ||
510 | if (time_after(rdev->fence_drv[i].last_activity, tmp)) { | ||
511 | tmp = rdev->fence_drv[i].last_activity; | ||
512 | } | ||
513 | } | ||
514 | /* test if somebody else has already decided that this is a lockup */ | ||
515 | if (last_activity != tmp) { | ||
516 | last_activity = tmp; | ||
517 | mutex_unlock(&rdev->ring_lock); | ||
518 | continue; | ||
519 | } | ||
520 | |||
521 | if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) { | ||
522 | /* good news we believe it's a lockup */ | ||
523 | dev_warn(rdev->dev, "GPU lockup (waiting for 0x%016llx)\n", | ||
524 | target_seq[ring]); | ||
525 | |||
526 | /* change last activity so nobody else think there is a lockup */ | ||
527 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { | ||
528 | rdev->fence_drv[i].last_activity = jiffies; | ||
529 | } | ||
530 | 422 | ||
531 | /* mark the ring as not ready any more */ | 423 | fence->seq = RADEON_FENCE_SIGNALED_SEQ; |
532 | rdev->ring[ring].ready = false; | ||
533 | mutex_unlock(&rdev->ring_lock); | ||
534 | return -EDEADLK; | ||
535 | } | ||
536 | mutex_unlock(&rdev->ring_lock); | ||
537 | } | ||
538 | } | ||
539 | return 0; | 424 | return 0; |
540 | } | 425 | } |
541 | 426 | ||
@@ -557,7 +442,7 @@ int radeon_fence_wait_any(struct radeon_device *rdev, | |||
557 | bool intr) | 442 | bool intr) |
558 | { | 443 | { |
559 | uint64_t seq[RADEON_NUM_RINGS]; | 444 | uint64_t seq[RADEON_NUM_RINGS]; |
560 | unsigned i; | 445 | unsigned i, num_rings = 0; |
561 | int r; | 446 | int r; |
562 | 447 | ||
563 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { | 448 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { |
@@ -567,15 +452,19 @@ int radeon_fence_wait_any(struct radeon_device *rdev, | |||
567 | continue; | 452 | continue; |
568 | } | 453 | } |
569 | 454 | ||
570 | if (fences[i]->seq == RADEON_FENCE_SIGNALED_SEQ) { | ||
571 | /* something was allready signaled */ | ||
572 | return 0; | ||
573 | } | ||
574 | |||
575 | seq[i] = fences[i]->seq; | 455 | seq[i] = fences[i]->seq; |
456 | ++num_rings; | ||
457 | |||
458 | /* test if something was allready signaled */ | ||
459 | if (seq[i] == RADEON_FENCE_SIGNALED_SEQ) | ||
460 | return 0; | ||
576 | } | 461 | } |
577 | 462 | ||
578 | r = radeon_fence_wait_any_seq(rdev, seq, intr); | 463 | /* nothing to wait for ? */ |
464 | if (num_rings == 0) | ||
465 | return -ENOENT; | ||
466 | |||
467 | r = radeon_fence_wait_seq(rdev, seq, intr, true); | ||
579 | if (r) { | 468 | if (r) { |
580 | return r; | 469 | return r; |
581 | } | 470 | } |
@@ -594,15 +483,15 @@ int radeon_fence_wait_any(struct radeon_device *rdev, | |||
594 | */ | 483 | */ |
595 | int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring) | 484 | int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring) |
596 | { | 485 | { |
597 | uint64_t seq; | 486 | uint64_t seq[RADEON_NUM_RINGS] = {}; |
598 | 487 | ||
599 | seq = atomic64_read(&rdev->fence_drv[ring].last_seq) + 1ULL; | 488 | seq[ring] = atomic64_read(&rdev->fence_drv[ring].last_seq) + 1ULL; |
600 | if (seq >= rdev->fence_drv[ring].sync_seq[ring]) { | 489 | if (seq[ring] >= rdev->fence_drv[ring].sync_seq[ring]) { |
601 | /* nothing to wait for, last_seq is | 490 | /* nothing to wait for, last_seq is |
602 | already the last emited fence */ | 491 | already the last emited fence */ |
603 | return -ENOENT; | 492 | return -ENOENT; |
604 | } | 493 | } |
605 | return radeon_fence_wait_seq(rdev, seq, ring, false, false); | 494 | return radeon_fence_wait_seq(rdev, seq, false, false); |
606 | } | 495 | } |
607 | 496 | ||
608 | /** | 497 | /** |
@@ -617,14 +506,15 @@ int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring) | |||
617 | */ | 506 | */ |
618 | int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring) | 507 | int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring) |
619 | { | 508 | { |
620 | uint64_t seq = rdev->fence_drv[ring].sync_seq[ring]; | 509 | uint64_t seq[RADEON_NUM_RINGS] = {}; |
621 | int r; | 510 | int r; |
622 | 511 | ||
623 | r = radeon_fence_wait_seq(rdev, seq, ring, false, false); | 512 | seq[ring] = rdev->fence_drv[ring].sync_seq[ring]; |
513 | r = radeon_fence_wait_seq(rdev, seq, false, false); | ||
624 | if (r) { | 514 | if (r) { |
625 | if (r == -EDEADLK) { | 515 | if (r == -EDEADLK) |
626 | return -EDEADLK; | 516 | return -EDEADLK; |
627 | } | 517 | |
628 | dev_err(rdev->dev, "error waiting for ring[%d] to become idle (%d)\n", | 518 | dev_err(rdev->dev, "error waiting for ring[%d] to become idle (%d)\n", |
629 | ring, r); | 519 | ring, r); |
630 | } | 520 | } |
@@ -826,7 +716,6 @@ static void radeon_fence_driver_init_ring(struct radeon_device *rdev, int ring) | |||
826 | for (i = 0; i < RADEON_NUM_RINGS; ++i) | 716 | for (i = 0; i < RADEON_NUM_RINGS; ++i) |
827 | rdev->fence_drv[ring].sync_seq[i] = 0; | 717 | rdev->fence_drv[ring].sync_seq[i] = 0; |
828 | atomic64_set(&rdev->fence_drv[ring].last_seq, 0); | 718 | atomic64_set(&rdev->fence_drv[ring].last_seq, 0); |
829 | rdev->fence_drv[ring].last_activity = jiffies; | ||
830 | rdev->fence_drv[ring].initialized = false; | 719 | rdev->fence_drv[ring].initialized = false; |
831 | } | 720 | } |
832 | 721 | ||