diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_display.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_display.c | 81 |
1 files changed, 53 insertions, 28 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index b16b9256883e..e3d70772b531 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c | |||
@@ -35,6 +35,36 @@ | |||
35 | #include <drm/drm_crtc_helper.h> | 35 | #include <drm/drm_crtc_helper.h> |
36 | #include <drm/drm_edid.h> | 36 | #include <drm/drm_edid.h> |
37 | 37 | ||
38 | static void amdgpu_flip_wait_fence(struct amdgpu_device *adev, | ||
39 | struct fence **f) | ||
40 | { | ||
41 | struct amdgpu_fence *fence; | ||
42 | long r; | ||
43 | |||
44 | if (*f == NULL) | ||
45 | return; | ||
46 | |||
47 | fence = to_amdgpu_fence(*f); | ||
48 | if (fence) { | ||
49 | r = fence_wait(&fence->base, false); | ||
50 | if (r == -EDEADLK) { | ||
51 | up_read(&adev->exclusive_lock); | ||
52 | r = amdgpu_gpu_reset(adev); | ||
53 | down_read(&adev->exclusive_lock); | ||
54 | } | ||
55 | } else | ||
56 | r = fence_wait(*f, false); | ||
57 | |||
58 | if (r) | ||
59 | DRM_ERROR("failed to wait on page flip fence (%ld)!\n", r); | ||
60 | |||
61 | /* We continue with the page flip even if we failed to wait on | ||
62 | * the fence, otherwise the DRM core and userspace will be | ||
63 | * confused about which BO the CRTC is scanning out | ||
64 | */ | ||
65 | fence_put(*f); | ||
66 | *f = NULL; | ||
67 | } | ||
38 | 68 | ||
39 | static void amdgpu_flip_work_func(struct work_struct *__work) | 69 | static void amdgpu_flip_work_func(struct work_struct *__work) |
40 | { | 70 | { |
@@ -44,34 +74,13 @@ static void amdgpu_flip_work_func(struct work_struct *__work) | |||
44 | struct amdgpu_crtc *amdgpuCrtc = adev->mode_info.crtcs[work->crtc_id]; | 74 | struct amdgpu_crtc *amdgpuCrtc = adev->mode_info.crtcs[work->crtc_id]; |
45 | 75 | ||
46 | struct drm_crtc *crtc = &amdgpuCrtc->base; | 76 | struct drm_crtc *crtc = &amdgpuCrtc->base; |
47 | struct amdgpu_fence *fence; | ||
48 | unsigned long flags; | 77 | unsigned long flags; |
49 | int r; | 78 | unsigned i; |
50 | 79 | ||
51 | down_read(&adev->exclusive_lock); | 80 | down_read(&adev->exclusive_lock); |
52 | if (work->fence) { | 81 | amdgpu_flip_wait_fence(adev, &work->excl); |
53 | fence = to_amdgpu_fence(work->fence); | 82 | for (i = 0; i < work->shared_count; ++i) |
54 | if (fence) { | 83 | amdgpu_flip_wait_fence(adev, &work->shared[i]); |
55 | r = amdgpu_fence_wait(fence, false); | ||
56 | if (r == -EDEADLK) { | ||
57 | up_read(&adev->exclusive_lock); | ||
58 | r = amdgpu_gpu_reset(adev); | ||
59 | down_read(&adev->exclusive_lock); | ||
60 | } | ||
61 | } else | ||
62 | r = fence_wait(work->fence, false); | ||
63 | |||
64 | if (r) | ||
65 | DRM_ERROR("failed to wait on page flip fence (%d)!\n", r); | ||
66 | |||
67 | /* We continue with the page flip even if we failed to wait on | ||
68 | * the fence, otherwise the DRM core and userspace will be | ||
69 | * confused about which BO the CRTC is scanning out | ||
70 | */ | ||
71 | |||
72 | fence_put(work->fence); | ||
73 | work->fence = NULL; | ||
74 | } | ||
75 | 84 | ||
76 | /* We borrow the event spin lock for protecting flip_status */ | 85 | /* We borrow the event spin lock for protecting flip_status */ |
77 | spin_lock_irqsave(&crtc->dev->event_lock, flags); | 86 | spin_lock_irqsave(&crtc->dev->event_lock, flags); |
@@ -108,6 +117,7 @@ static void amdgpu_unpin_work_func(struct work_struct *__work) | |||
108 | DRM_ERROR("failed to reserve buffer after flip\n"); | 117 | DRM_ERROR("failed to reserve buffer after flip\n"); |
109 | 118 | ||
110 | drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base); | 119 | drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base); |
120 | kfree(work->shared); | ||
111 | kfree(work); | 121 | kfree(work); |
112 | } | 122 | } |
113 | 123 | ||
@@ -127,7 +137,7 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc, | |||
127 | unsigned long flags; | 137 | unsigned long flags; |
128 | u64 tiling_flags; | 138 | u64 tiling_flags; |
129 | u64 base; | 139 | u64 base; |
130 | int r; | 140 | int i, r; |
131 | 141 | ||
132 | work = kzalloc(sizeof *work, GFP_KERNEL); | 142 | work = kzalloc(sizeof *work, GFP_KERNEL); |
133 | if (work == NULL) | 143 | if (work == NULL) |
@@ -167,7 +177,19 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc, | |||
167 | goto cleanup; | 177 | goto cleanup; |
168 | } | 178 | } |
169 | 179 | ||
170 | work->fence = fence_get(reservation_object_get_excl(new_rbo->tbo.resv)); | 180 | r = reservation_object_get_fences_rcu(new_rbo->tbo.resv, &work->excl, |
181 | &work->shared_count, | ||
182 | &work->shared); | ||
183 | if (unlikely(r != 0)) { | ||
184 | amdgpu_bo_unreserve(new_rbo); | ||
185 | DRM_ERROR("failed to get fences for buffer\n"); | ||
186 | goto cleanup; | ||
187 | } | ||
188 | |||
189 | fence_get(work->excl); | ||
190 | for (i = 0; i < work->shared_count; ++i) | ||
191 | fence_get(work->shared[i]); | ||
192 | |||
171 | amdgpu_bo_get_tiling_flags(new_rbo, &tiling_flags); | 193 | amdgpu_bo_get_tiling_flags(new_rbo, &tiling_flags); |
172 | amdgpu_bo_unreserve(new_rbo); | 194 | amdgpu_bo_unreserve(new_rbo); |
173 | 195 | ||
@@ -212,7 +234,10 @@ pflip_cleanup: | |||
212 | 234 | ||
213 | cleanup: | 235 | cleanup: |
214 | drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base); | 236 | drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base); |
215 | fence_put(work->fence); | 237 | fence_put(work->excl); |
238 | for (i = 0; i < work->shared_count; ++i) | ||
239 | fence_put(work->shared[i]); | ||
240 | kfree(work->shared); | ||
216 | kfree(work); | 241 | kfree(work); |
217 | 242 | ||
218 | return r; | 243 | return r; |