aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_display.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c96
1 files changed, 29 insertions, 67 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index abeb6aaf89a9..9af8d3c7ae8b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -41,7 +41,7 @@ static void amdgpu_flip_callback(struct fence *f, struct fence_cb *cb)
41 container_of(cb, struct amdgpu_flip_work, cb); 41 container_of(cb, struct amdgpu_flip_work, cb);
42 42
43 fence_put(f); 43 fence_put(f);
44 schedule_work(&work->flip_work); 44 schedule_work(&work->flip_work.work);
45} 45}
46 46
47static bool amdgpu_flip_handle_fence(struct amdgpu_flip_work *work, 47static bool amdgpu_flip_handle_fence(struct amdgpu_flip_work *work,
@@ -63,16 +63,17 @@ static bool amdgpu_flip_handle_fence(struct amdgpu_flip_work *work,
63 63
64static void amdgpu_flip_work_func(struct work_struct *__work) 64static void amdgpu_flip_work_func(struct work_struct *__work)
65{ 65{
66 struct delayed_work *delayed_work =
67 container_of(__work, struct delayed_work, work);
66 struct amdgpu_flip_work *work = 68 struct amdgpu_flip_work *work =
67 container_of(__work, struct amdgpu_flip_work, flip_work); 69 container_of(delayed_work, struct amdgpu_flip_work, flip_work);
68 struct amdgpu_device *adev = work->adev; 70 struct amdgpu_device *adev = work->adev;
69 struct amdgpu_crtc *amdgpuCrtc = adev->mode_info.crtcs[work->crtc_id]; 71 struct amdgpu_crtc *amdgpuCrtc = adev->mode_info.crtcs[work->crtc_id];
70 72
71 struct drm_crtc *crtc = &amdgpuCrtc->base; 73 struct drm_crtc *crtc = &amdgpuCrtc->base;
72 unsigned long flags; 74 unsigned long flags;
73 unsigned i, repcnt = 4; 75 unsigned i;
74 int vpos, hpos, stat, min_udelay = 0; 76 int vpos, hpos;
75 struct drm_vblank_crtc *vblank = &crtc->dev->vblank[work->crtc_id];
76 77
77 if (amdgpu_flip_handle_fence(work, &work->excl)) 78 if (amdgpu_flip_handle_fence(work, &work->excl))
78 return; 79 return;
@@ -81,55 +82,23 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
81 if (amdgpu_flip_handle_fence(work, &work->shared[i])) 82 if (amdgpu_flip_handle_fence(work, &work->shared[i]))
82 return; 83 return;
83 84
84 /* We borrow the event spin lock for protecting flip_status */ 85 /* Wait until we're out of the vertical blank period before the one
85 spin_lock_irqsave(&crtc->dev->event_lock, flags); 86 * targeted by the flip
86
87 /* If this happens to execute within the "virtually extended" vblank
88 * interval before the start of the real vblank interval then it needs
89 * to delay programming the mmio flip until the real vblank is entered.
90 * This prevents completing a flip too early due to the way we fudge
91 * our vblank counter and vblank timestamps in order to work around the
92 * problem that the hw fires vblank interrupts before actual start of
93 * vblank (when line buffer refilling is done for a frame). It
94 * complements the fudging logic in amdgpu_get_crtc_scanoutpos() for
95 * timestamping and amdgpu_get_vblank_counter_kms() for vblank counts.
96 *
97 * In practice this won't execute very often unless on very fast
98 * machines because the time window for this to happen is very small.
99 */ 87 */
100 while (amdgpuCrtc->enabled && --repcnt) { 88 if (amdgpuCrtc->enabled &&
101 /* GET_DISTANCE_TO_VBLANKSTART returns distance to real vblank 89 (amdgpu_get_crtc_scanoutpos(adev->ddev, work->crtc_id, 0,
102 * start in hpos, and to the "fudged earlier" vblank start in 90 &vpos, &hpos, NULL, NULL,
103 * vpos. 91 &crtc->hwmode)
104 */ 92 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
105 stat = amdgpu_get_crtc_scanoutpos(adev->ddev, work->crtc_id, 93 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
106 GET_DISTANCE_TO_VBLANKSTART, 94 (int)(work->target_vblank -
107 &vpos, &hpos, NULL, NULL, 95 amdgpu_get_vblank_counter_kms(adev->ddev, amdgpuCrtc->crtc_id)) > 0) {
108 &crtc->hwmode); 96 schedule_delayed_work(&work->flip_work, usecs_to_jiffies(1000));
109 97 return;
110 if ((stat & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE)) !=
111 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE) ||
112 !(vpos >= 0 && hpos <= 0))
113 break;
114
115 /* Sleep at least until estimated real start of hw vblank */
116 min_udelay = (-hpos + 1) * max(vblank->linedur_ns / 1000, 5);
117 if (min_udelay > vblank->framedur_ns / 2000) {
118 /* Don't wait ridiculously long - something is wrong */
119 repcnt = 0;
120 break;
121 }
122 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
123 usleep_range(min_udelay, 2 * min_udelay);
124 spin_lock_irqsave(&crtc->dev->event_lock, flags);
125 } 98 }
126 99
127 if (!repcnt) 100 /* We borrow the event spin lock for protecting flip_status */
128 DRM_DEBUG_DRIVER("Delay problem on crtc %d: min_udelay %d, " 101 spin_lock_irqsave(&crtc->dev->event_lock, flags);
129 "framedur %d, linedur %d, stat %d, vpos %d, "
130 "hpos %d\n", work->crtc_id, min_udelay,
131 vblank->framedur_ns / 1000,
132 vblank->linedur_ns / 1000, stat, vpos, hpos);
133 102
134 /* Do the flip (mmio) */ 103 /* Do the flip (mmio) */
135 adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base, work->async); 104 adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base, work->async);
@@ -169,10 +138,10 @@ static void amdgpu_unpin_work_func(struct work_struct *__work)
169 kfree(work); 138 kfree(work);
170} 139}
171 140
172int amdgpu_crtc_page_flip(struct drm_crtc *crtc, 141int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc,
173 struct drm_framebuffer *fb, 142 struct drm_framebuffer *fb,
174 struct drm_pending_vblank_event *event, 143 struct drm_pending_vblank_event *event,
175 uint32_t page_flip_flags) 144 uint32_t page_flip_flags, uint32_t target)
176{ 145{
177 struct drm_device *dev = crtc->dev; 146 struct drm_device *dev = crtc->dev;
178 struct amdgpu_device *adev = dev->dev_private; 147 struct amdgpu_device *adev = dev->dev_private;
@@ -191,7 +160,7 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc,
191 if (work == NULL) 160 if (work == NULL)
192 return -ENOMEM; 161 return -ENOMEM;
193 162
194 INIT_WORK(&work->flip_work, amdgpu_flip_work_func); 163 INIT_DELAYED_WORK(&work->flip_work, amdgpu_flip_work_func);
195 INIT_WORK(&work->unpin_work, amdgpu_unpin_work_func); 164 INIT_WORK(&work->unpin_work, amdgpu_unpin_work_func);
196 165
197 work->event = event; 166 work->event = event;
@@ -237,12 +206,8 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc,
237 amdgpu_bo_unreserve(new_rbo); 206 amdgpu_bo_unreserve(new_rbo);
238 207
239 work->base = base; 208 work->base = base;
240 209 work->target_vblank = target - drm_crtc_vblank_count(crtc) +
241 r = drm_crtc_vblank_get(crtc); 210 amdgpu_get_vblank_counter_kms(dev, work->crtc_id);
242 if (r) {
243 DRM_ERROR("failed to get vblank before flip\n");
244 goto pflip_cleanup;
245 }
246 211
247 /* we borrow the event spin lock for protecting flip_wrok */ 212 /* we borrow the event spin lock for protecting flip_wrok */
248 spin_lock_irqsave(&crtc->dev->event_lock, flags); 213 spin_lock_irqsave(&crtc->dev->event_lock, flags);
@@ -250,7 +215,7 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc,
250 DRM_DEBUG_DRIVER("flip queue: crtc already busy\n"); 215 DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
251 spin_unlock_irqrestore(&crtc->dev->event_lock, flags); 216 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
252 r = -EBUSY; 217 r = -EBUSY;
253 goto vblank_cleanup; 218 goto pflip_cleanup;
254 } 219 }
255 220
256 amdgpu_crtc->pflip_status = AMDGPU_FLIP_PENDING; 221 amdgpu_crtc->pflip_status = AMDGPU_FLIP_PENDING;
@@ -262,12 +227,9 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc,
262 /* update crtc fb */ 227 /* update crtc fb */
263 crtc->primary->fb = fb; 228 crtc->primary->fb = fb;
264 spin_unlock_irqrestore(&crtc->dev->event_lock, flags); 229 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
265 amdgpu_flip_work_func(&work->flip_work); 230 amdgpu_flip_work_func(&work->flip_work.work);
266 return 0; 231 return 0;
267 232
268vblank_cleanup:
269 drm_crtc_vblank_put(crtc);
270
271pflip_cleanup: 233pflip_cleanup:
272 if (unlikely(amdgpu_bo_reserve(new_rbo, false) != 0)) { 234 if (unlikely(amdgpu_bo_reserve(new_rbo, false) != 0)) {
273 DRM_ERROR("failed to reserve new rbo in error path\n"); 235 DRM_ERROR("failed to reserve new rbo in error path\n");