diff options
author | Maarten Lankhorst <maarten.lankhorst@canonical.com> | 2014-08-27 09:21:59 -0400 |
---|---|---|
committer | Alex Deucher <alexander.deucher@amd.com> | 2014-08-27 17:42:12 -0400 |
commit | 9867d00dbaef42e346e5d12eaa9591b057fea6d8 (patch) | |
tree | b72e6e5ab5ea44c300b16a89a5b47d482b892141 | |
parent | 0bfa4b41268ad5fd741f16f484e4fee190822ec6 (diff) |
drm/radeon: add timeout argument to radeon_fence_wait_seq v2
This makes it possible to wait for a specific amount of time,
rather than wait until infinity.
v2 (chk): rebased on other changes
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@canonical.com>
Signed-off-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_fence.c | 48 |
1 files changed, 28 insertions, 20 deletions
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c index ac15f3418478..a54bfd60510b 100644 --- a/drivers/gpu/drm/radeon/radeon_fence.c +++ b/drivers/gpu/drm/radeon/radeon_fence.c | |||
@@ -364,28 +364,31 @@ static bool radeon_fence_any_seq_signaled(struct radeon_device *rdev, u64 *seq) | |||
364 | } | 364 | } |
365 | 365 | ||
366 | /** | 366 | /** |
367 | * radeon_fence_wait_seq - wait for a specific sequence numbers | 367 | * radeon_fence_wait_seq_timeout - wait for a specific sequence numbers |
368 | * | 368 | * |
369 | * @rdev: radeon device pointer | 369 | * @rdev: radeon device pointer |
370 | * @target_seq: sequence number(s) we want to wait for | 370 | * @target_seq: sequence number(s) we want to wait for |
371 | * @intr: use interruptable sleep | 371 | * @intr: use interruptable sleep |
372 | * @timeout: maximum time to wait, or MAX_SCHEDULE_TIMEOUT for infinite wait | ||
372 | * | 373 | * |
373 | * Wait for the requested sequence number(s) to be written by any ring | 374 | * Wait for the requested sequence number(s) to be written by any ring |
374 | * (all asics). Sequnce number array is indexed by ring id. | 375 | * (all asics). Sequnce number array is indexed by ring id. |
375 | * @intr selects whether to use interruptable (true) or non-interruptable | 376 | * @intr selects whether to use interruptable (true) or non-interruptable |
376 | * (false) sleep when waiting for the sequence number. Helper function | 377 | * (false) sleep when waiting for the sequence number. Helper function |
377 | * for radeon_fence_wait_*(). | 378 | * for radeon_fence_wait_*(). |
378 | * Returns 0 if the sequence number has passed, error for all other cases. | 379 | * Returns remaining time if the sequence number has passed, 0 when |
380 | * the wait timeout, or an error for all other cases. | ||
379 | * -EDEADLK is returned when a GPU lockup has been detected. | 381 | * -EDEADLK is returned when a GPU lockup has been detected. |
380 | */ | 382 | */ |
381 | static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 *target_seq, | 383 | static long radeon_fence_wait_seq_timeout(struct radeon_device *rdev, |
382 | bool intr) | 384 | u64 *target_seq, bool intr, |
385 | long timeout) | ||
383 | { | 386 | { |
384 | long r; | 387 | long r; |
385 | int i; | 388 | int i; |
386 | 389 | ||
387 | if (radeon_fence_any_seq_signaled(rdev, target_seq)) | 390 | if (radeon_fence_any_seq_signaled(rdev, target_seq)) |
388 | return 0; | 391 | return timeout; |
389 | 392 | ||
390 | /* enable IRQs and tracing */ | 393 | /* enable IRQs and tracing */ |
391 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { | 394 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { |
@@ -399,11 +402,11 @@ static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 *target_seq, | |||
399 | if (intr) { | 402 | if (intr) { |
400 | r = wait_event_interruptible_timeout(rdev->fence_queue, ( | 403 | r = wait_event_interruptible_timeout(rdev->fence_queue, ( |
401 | radeon_fence_any_seq_signaled(rdev, target_seq) | 404 | radeon_fence_any_seq_signaled(rdev, target_seq) |
402 | || rdev->needs_reset), MAX_SCHEDULE_TIMEOUT); | 405 | || rdev->needs_reset), timeout); |
403 | } else { | 406 | } else { |
404 | r = wait_event_timeout(rdev->fence_queue, ( | 407 | r = wait_event_timeout(rdev->fence_queue, ( |
405 | radeon_fence_any_seq_signaled(rdev, target_seq) | 408 | radeon_fence_any_seq_signaled(rdev, target_seq) |
406 | || rdev->needs_reset), MAX_SCHEDULE_TIMEOUT); | 409 | || rdev->needs_reset), timeout); |
407 | } | 410 | } |
408 | 411 | ||
409 | if (rdev->needs_reset) | 412 | if (rdev->needs_reset) |
@@ -417,14 +420,14 @@ static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 *target_seq, | |||
417 | trace_radeon_fence_wait_end(rdev->ddev, i, target_seq[i]); | 420 | trace_radeon_fence_wait_end(rdev->ddev, i, target_seq[i]); |
418 | } | 421 | } |
419 | 422 | ||
420 | return r < 0 ? r : 0; | 423 | return r; |
421 | } | 424 | } |
422 | 425 | ||
423 | /** | 426 | /** |
424 | * radeon_fence_wait - wait for a fence to signal | 427 | * radeon_fence_wait - wait for a fence to signal |
425 | * | 428 | * |
426 | * @fence: radeon fence object | 429 | * @fence: radeon fence object |
427 | * @intr: use interruptable sleep | 430 | * @intr: use interruptible sleep |
428 | * | 431 | * |
429 | * Wait for the requested fence to signal (all asics). | 432 | * Wait for the requested fence to signal (all asics). |
430 | * @intr selects whether to use interruptable (true) or non-interruptable | 433 | * @intr selects whether to use interruptable (true) or non-interruptable |
@@ -434,7 +437,7 @@ static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 *target_seq, | |||
434 | int radeon_fence_wait(struct radeon_fence *fence, bool intr) | 437 | int radeon_fence_wait(struct radeon_fence *fence, bool intr) |
435 | { | 438 | { |
436 | uint64_t seq[RADEON_NUM_RINGS] = {}; | 439 | uint64_t seq[RADEON_NUM_RINGS] = {}; |
437 | int r; | 440 | long r; |
438 | 441 | ||
439 | if (fence == NULL) { | 442 | if (fence == NULL) { |
440 | WARN(1, "Querying an invalid fence : %p !\n", fence); | 443 | WARN(1, "Querying an invalid fence : %p !\n", fence); |
@@ -445,9 +448,10 @@ int radeon_fence_wait(struct radeon_fence *fence, bool intr) | |||
445 | if (seq[fence->ring] == RADEON_FENCE_SIGNALED_SEQ) | 448 | if (seq[fence->ring] == RADEON_FENCE_SIGNALED_SEQ) |
446 | return 0; | 449 | return 0; |
447 | 450 | ||
448 | r = radeon_fence_wait_seq(fence->rdev, seq, intr); | 451 | r = radeon_fence_wait_seq_timeout(fence->rdev, seq, intr, MAX_SCHEDULE_TIMEOUT); |
449 | if (r) | 452 | if (r < 0) { |
450 | return r; | 453 | return r; |
454 | } | ||
451 | 455 | ||
452 | fence->seq = RADEON_FENCE_SIGNALED_SEQ; | 456 | fence->seq = RADEON_FENCE_SIGNALED_SEQ; |
453 | return 0; | 457 | return 0; |
@@ -472,7 +476,7 @@ int radeon_fence_wait_any(struct radeon_device *rdev, | |||
472 | { | 476 | { |
473 | uint64_t seq[RADEON_NUM_RINGS]; | 477 | uint64_t seq[RADEON_NUM_RINGS]; |
474 | unsigned i, num_rings = 0; | 478 | unsigned i, num_rings = 0; |
475 | int r; | 479 | long r; |
476 | 480 | ||
477 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { | 481 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { |
478 | seq[i] = 0; | 482 | seq[i] = 0; |
@@ -493,8 +497,8 @@ int radeon_fence_wait_any(struct radeon_device *rdev, | |||
493 | if (num_rings == 0) | 497 | if (num_rings == 0) |
494 | return -ENOENT; | 498 | return -ENOENT; |
495 | 499 | ||
496 | r = radeon_fence_wait_seq(rdev, seq, intr); | 500 | r = radeon_fence_wait_seq_timeout(rdev, seq, intr, MAX_SCHEDULE_TIMEOUT); |
497 | if (r) { | 501 | if (r < 0) { |
498 | return r; | 502 | return r; |
499 | } | 503 | } |
500 | return 0; | 504 | return 0; |
@@ -513,6 +517,7 @@ int radeon_fence_wait_any(struct radeon_device *rdev, | |||
513 | int radeon_fence_wait_next(struct radeon_device *rdev, int ring) | 517 | int radeon_fence_wait_next(struct radeon_device *rdev, int ring) |
514 | { | 518 | { |
515 | uint64_t seq[RADEON_NUM_RINGS] = {}; | 519 | uint64_t seq[RADEON_NUM_RINGS] = {}; |
520 | long r; | ||
516 | 521 | ||
517 | seq[ring] = atomic64_read(&rdev->fence_drv[ring].last_seq) + 1ULL; | 522 | seq[ring] = atomic64_read(&rdev->fence_drv[ring].last_seq) + 1ULL; |
518 | if (seq[ring] >= rdev->fence_drv[ring].sync_seq[ring]) { | 523 | if (seq[ring] >= rdev->fence_drv[ring].sync_seq[ring]) { |
@@ -520,7 +525,10 @@ int radeon_fence_wait_next(struct radeon_device *rdev, int ring) | |||
520 | already the last emited fence */ | 525 | already the last emited fence */ |
521 | return -ENOENT; | 526 | return -ENOENT; |
522 | } | 527 | } |
523 | return radeon_fence_wait_seq(rdev, seq, false); | 528 | r = radeon_fence_wait_seq_timeout(rdev, seq, false, MAX_SCHEDULE_TIMEOUT); |
529 | if (r < 0) | ||
530 | return r; | ||
531 | return 0; | ||
524 | } | 532 | } |
525 | 533 | ||
526 | /** | 534 | /** |
@@ -536,18 +544,18 @@ int radeon_fence_wait_next(struct radeon_device *rdev, int ring) | |||
536 | int radeon_fence_wait_empty(struct radeon_device *rdev, int ring) | 544 | int radeon_fence_wait_empty(struct radeon_device *rdev, int ring) |
537 | { | 545 | { |
538 | uint64_t seq[RADEON_NUM_RINGS] = {}; | 546 | uint64_t seq[RADEON_NUM_RINGS] = {}; |
539 | int r; | 547 | long r; |
540 | 548 | ||
541 | seq[ring] = rdev->fence_drv[ring].sync_seq[ring]; | 549 | seq[ring] = rdev->fence_drv[ring].sync_seq[ring]; |
542 | if (!seq[ring]) | 550 | if (!seq[ring]) |
543 | return 0; | 551 | return 0; |
544 | 552 | ||
545 | r = radeon_fence_wait_seq(rdev, seq, false); | 553 | r = radeon_fence_wait_seq_timeout(rdev, seq, false, MAX_SCHEDULE_TIMEOUT); |
546 | if (r) { | 554 | if (r < 0) { |
547 | if (r == -EDEADLK) | 555 | if (r == -EDEADLK) |
548 | return -EDEADLK; | 556 | return -EDEADLK; |
549 | 557 | ||
550 | dev_err(rdev->dev, "error waiting for ring[%d] to become idle (%d)\n", | 558 | dev_err(rdev->dev, "error waiting for ring[%d] to become idle (%ld)\n", |
551 | ring, r); | 559 | ring, r); |
552 | } | 560 | } |
553 | return 0; | 561 | return 0; |