diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-04-08 12:52:16 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-04-08 12:52:16 -0400 |
commit | e9f37d3a8d126e73f5737ef548cdf6f618e295e4 (patch) | |
tree | 831eb4952637828a7bbafa361185e0ca57aa86ed /drivers/gpu/drm/radeon/radeon_fence.c | |
parent | 5fb6b953bb7aa86a9c8ea760934982cedc45c52b (diff) | |
parent | c39b06951f1dc2e384650288676c5b7dcc0ec92c (diff) |
Merge branch 'drm-next' of git://people.freedesktop.org/~airlied/linux
Pull drm updates from Dave Airlie:
"Highlights:
- drm:
Generic display port aux features, primary plane support, drm
master management fixes, logging cleanups, enforced locking checks
(instead of docs), documentation improvements, minor number
handling cleanup, pseudofs for shared inodes.
- ttm:
add ability to allocate from both ends
- i915:
broadwell features, power domain and runtime pm, per-process
address space infrastructure (not enabled)
- msm:
power management, hdmi audio support
- nouveau:
ongoing GPU fault recovery, initial maxwell support, random fixes
- exynos:
refactored driver to clean up a lot of abstraction, DP support
moved into drm, LVDS bridge support added, parallel panel support
- gma500:
SGX MMU support, SGX irq handling, asle irq work fixes
- radeon:
video engine bringup, ring handling fixes, use dp aux helpers
- vmwgfx:
add rendernode support"
* 'drm-next' of git://people.freedesktop.org/~airlied/linux: (849 commits)
DRM: armada: fix corruption while loading cursors
drm/dp_helper: don't return EPROTO for defers (v2)
drm/bridge: export ptn3460_init function
drm/exynos: remove MODULE_DEVICE_TABLE definitions
ARM: dts: exynos4412-trats2: enable exynos/fimd node
ARM: dts: exynos4210-trats: enable exynos/fimd node
ARM: dts: exynos4412-trats2: add panel node
ARM: dts: exynos4210-trats: add panel node
ARM: dts: exynos4: add MIPI DSI Master node
drm/panel: add S6E8AA0 driver
ARM: dts: exynos4210-universal_c210: add proper panel node
drm/panel: add ld9040 driver
panel/ld9040: add DT bindings
panel/s6e8aa0: add DT bindings
drm/exynos: add DSIM driver
exynos/dsim: add DT bindings
drm/exynos: disallow fbdev initialization if no device is connected
drm/mipi_dsi: create dsi devices only for nodes with reg property
drm/mipi_dsi: add flags to DSI messages
Skip intel_crt_init for Dell XPS 8700
...
Diffstat (limited to 'drivers/gpu/drm/radeon/radeon_fence.c')
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_fence.c | 59 |
1 files changed, 10 insertions, 49 deletions
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c index c37cb79a9489..a77b1c13ea43 100644 --- a/drivers/gpu/drm/radeon/radeon_fence.c +++ b/drivers/gpu/drm/radeon/radeon_fence.c | |||
@@ -288,7 +288,6 @@ static bool radeon_fence_any_seq_signaled(struct radeon_device *rdev, u64 *seq) | |||
288 | * @rdev: radeon device pointer | 288 | * @rdev: radeon device pointer |
289 | * @target_seq: sequence number(s) we want to wait for | 289 | * @target_seq: sequence number(s) we want to wait for |
290 | * @intr: use interruptable sleep | 290 | * @intr: use interruptable sleep |
291 | * @lock_ring: whether the ring should be locked or not | ||
292 | * | 291 | * |
293 | * Wait for the requested sequence number(s) to be written by any ring | 292 | * Wait for the requested sequence number(s) to be written by any ring |
294 | * (all asics). Sequnce number array is indexed by ring id. | 293 | * (all asics). Sequnce number array is indexed by ring id. |
@@ -299,7 +298,7 @@ static bool radeon_fence_any_seq_signaled(struct radeon_device *rdev, u64 *seq) | |||
299 | * -EDEADLK is returned when a GPU lockup has been detected. | 298 | * -EDEADLK is returned when a GPU lockup has been detected. |
300 | */ | 299 | */ |
301 | static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 *target_seq, | 300 | static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 *target_seq, |
302 | bool intr, bool lock_ring) | 301 | bool intr) |
303 | { | 302 | { |
304 | uint64_t last_seq[RADEON_NUM_RINGS]; | 303 | uint64_t last_seq[RADEON_NUM_RINGS]; |
305 | bool signaled; | 304 | bool signaled; |
@@ -358,9 +357,6 @@ static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 *target_seq, | |||
358 | if (i != RADEON_NUM_RINGS) | 357 | if (i != RADEON_NUM_RINGS) |
359 | continue; | 358 | continue; |
360 | 359 | ||
361 | if (lock_ring) | ||
362 | mutex_lock(&rdev->ring_lock); | ||
363 | |||
364 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { | 360 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { |
365 | if (!target_seq[i]) | 361 | if (!target_seq[i]) |
366 | continue; | 362 | continue; |
@@ -378,14 +374,9 @@ static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 *target_seq, | |||
378 | 374 | ||
379 | /* remember that we need an reset */ | 375 | /* remember that we need an reset */ |
380 | rdev->needs_reset = true; | 376 | rdev->needs_reset = true; |
381 | if (lock_ring) | ||
382 | mutex_unlock(&rdev->ring_lock); | ||
383 | wake_up_all(&rdev->fence_queue); | 377 | wake_up_all(&rdev->fence_queue); |
384 | return -EDEADLK; | 378 | return -EDEADLK; |
385 | } | 379 | } |
386 | |||
387 | if (lock_ring) | ||
388 | mutex_unlock(&rdev->ring_lock); | ||
389 | } | 380 | } |
390 | } | 381 | } |
391 | return 0; | 382 | return 0; |
@@ -416,7 +407,7 @@ int radeon_fence_wait(struct radeon_fence *fence, bool intr) | |||
416 | if (seq[fence->ring] == RADEON_FENCE_SIGNALED_SEQ) | 407 | if (seq[fence->ring] == RADEON_FENCE_SIGNALED_SEQ) |
417 | return 0; | 408 | return 0; |
418 | 409 | ||
419 | r = radeon_fence_wait_seq(fence->rdev, seq, intr, true); | 410 | r = radeon_fence_wait_seq(fence->rdev, seq, intr); |
420 | if (r) | 411 | if (r) |
421 | return r; | 412 | return r; |
422 | 413 | ||
@@ -464,7 +455,7 @@ int radeon_fence_wait_any(struct radeon_device *rdev, | |||
464 | if (num_rings == 0) | 455 | if (num_rings == 0) |
465 | return -ENOENT; | 456 | return -ENOENT; |
466 | 457 | ||
467 | r = radeon_fence_wait_seq(rdev, seq, intr, true); | 458 | r = radeon_fence_wait_seq(rdev, seq, intr); |
468 | if (r) { | 459 | if (r) { |
469 | return r; | 460 | return r; |
470 | } | 461 | } |
@@ -472,37 +463,7 @@ int radeon_fence_wait_any(struct radeon_device *rdev, | |||
472 | } | 463 | } |
473 | 464 | ||
474 | /** | 465 | /** |
475 | * radeon_fence_wait_locked - wait for a fence to signal | 466 | * radeon_fence_wait_next - wait for the next fence to signal |
476 | * | ||
477 | * @fence: radeon fence object | ||
478 | * | ||
479 | * Wait for the requested fence to signal (all asics). | ||
480 | * Returns 0 if the fence has passed, error for all other cases. | ||
481 | */ | ||
482 | int radeon_fence_wait_locked(struct radeon_fence *fence) | ||
483 | { | ||
484 | uint64_t seq[RADEON_NUM_RINGS] = {}; | ||
485 | int r; | ||
486 | |||
487 | if (fence == NULL) { | ||
488 | WARN(1, "Querying an invalid fence : %p !\n", fence); | ||
489 | return -EINVAL; | ||
490 | } | ||
491 | |||
492 | seq[fence->ring] = fence->seq; | ||
493 | if (seq[fence->ring] == RADEON_FENCE_SIGNALED_SEQ) | ||
494 | return 0; | ||
495 | |||
496 | r = radeon_fence_wait_seq(fence->rdev, seq, false, false); | ||
497 | if (r) | ||
498 | return r; | ||
499 | |||
500 | fence->seq = RADEON_FENCE_SIGNALED_SEQ; | ||
501 | return 0; | ||
502 | } | ||
503 | |||
504 | /** | ||
505 | * radeon_fence_wait_next_locked - wait for the next fence to signal | ||
506 | * | 467 | * |
507 | * @rdev: radeon device pointer | 468 | * @rdev: radeon device pointer |
508 | * @ring: ring index the fence is associated with | 469 | * @ring: ring index the fence is associated with |
@@ -511,7 +472,7 @@ int radeon_fence_wait_locked(struct radeon_fence *fence) | |||
511 | * Returns 0 if the next fence has passed, error for all other cases. | 472 | * Returns 0 if the next fence has passed, error for all other cases. |
512 | * Caller must hold ring lock. | 473 | * Caller must hold ring lock. |
513 | */ | 474 | */ |
514 | int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring) | 475 | int radeon_fence_wait_next(struct radeon_device *rdev, int ring) |
515 | { | 476 | { |
516 | uint64_t seq[RADEON_NUM_RINGS] = {}; | 477 | uint64_t seq[RADEON_NUM_RINGS] = {}; |
517 | 478 | ||
@@ -521,11 +482,11 @@ int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring) | |||
521 | already the last emited fence */ | 482 | already the last emited fence */ |
522 | return -ENOENT; | 483 | return -ENOENT; |
523 | } | 484 | } |
524 | return radeon_fence_wait_seq(rdev, seq, false, false); | 485 | return radeon_fence_wait_seq(rdev, seq, false); |
525 | } | 486 | } |
526 | 487 | ||
527 | /** | 488 | /** |
528 | * radeon_fence_wait_empty_locked - wait for all fences to signal | 489 | * radeon_fence_wait_empty - wait for all fences to signal |
529 | * | 490 | * |
530 | * @rdev: radeon device pointer | 491 | * @rdev: radeon device pointer |
531 | * @ring: ring index the fence is associated with | 492 | * @ring: ring index the fence is associated with |
@@ -534,7 +495,7 @@ int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring) | |||
534 | * Returns 0 if the fences have passed, error for all other cases. | 495 | * Returns 0 if the fences have passed, error for all other cases. |
535 | * Caller must hold ring lock. | 496 | * Caller must hold ring lock. |
536 | */ | 497 | */ |
537 | int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring) | 498 | int radeon_fence_wait_empty(struct radeon_device *rdev, int ring) |
538 | { | 499 | { |
539 | uint64_t seq[RADEON_NUM_RINGS] = {}; | 500 | uint64_t seq[RADEON_NUM_RINGS] = {}; |
540 | int r; | 501 | int r; |
@@ -543,7 +504,7 @@ int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring) | |||
543 | if (!seq[ring]) | 504 | if (!seq[ring]) |
544 | return 0; | 505 | return 0; |
545 | 506 | ||
546 | r = radeon_fence_wait_seq(rdev, seq, false, false); | 507 | r = radeon_fence_wait_seq(rdev, seq, false); |
547 | if (r) { | 508 | if (r) { |
548 | if (r == -EDEADLK) | 509 | if (r == -EDEADLK) |
549 | return -EDEADLK; | 510 | return -EDEADLK; |
@@ -794,7 +755,7 @@ void radeon_fence_driver_fini(struct radeon_device *rdev) | |||
794 | for (ring = 0; ring < RADEON_NUM_RINGS; ring++) { | 755 | for (ring = 0; ring < RADEON_NUM_RINGS; ring++) { |
795 | if (!rdev->fence_drv[ring].initialized) | 756 | if (!rdev->fence_drv[ring].initialized) |
796 | continue; | 757 | continue; |
797 | r = radeon_fence_wait_empty_locked(rdev, ring); | 758 | r = radeon_fence_wait_empty(rdev, ring); |
798 | if (r) { | 759 | if (r) { |
799 | /* no need to trigger GPU reset as we are unloading */ | 760 | /* no need to trigger GPU reset as we are unloading */ |
800 | radeon_fence_driver_force_completion(rdev); | 761 | radeon_fence_driver_force_completion(rdev); |