diff options
author | Rob Clark <robdclark@gmail.com> | 2013-12-13 10:41:07 -0500 |
---|---|---|
committer | Rob Clark <robdclark@gmail.com> | 2014-02-05 11:23:07 -0500 |
commit | b69720c0f5d417310fbfd59c2d681bd90430a4f5 (patch) | |
tree | 875255037afdf030ca23d4195a6d2ff456953209 /drivers/gpu/drm/msm | |
parent | 37033a7689b01d0c46f9cc450bdf9f02d86b7e57 (diff) |
drm/msm/mdp4: pageflip fixes
Backport a few fixes found in the course of getting mdp5 working.
There is a window of time after pageflip is requested, before we
start scanning out the new fb (ie. while we are waiting for gpu).
During that time we need to continue holding a reference to the
still-current scanout fb, to avoid the backing gem bo's from being
destroyed.
Possibly a common mdp_crtc parent class could be useful to share
some of this logic between mdp4_crtc and mdp5_crtc. OTOH, this
all can be removed from the driver once atomic is in place, as
plane/crtc updates get deferred until all fb's are ready before
calling in to .page_flip(), etc.
Signed-off-by: Rob Clark <robdclark@gmail.com>
Diffstat (limited to 'drivers/gpu/drm/msm')
-rw-r--r-- | drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c | 157 |
1 files changed, 100 insertions, 57 deletions
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c index 1964f4f0d452..ed739e887c25 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c | |||
@@ -57,9 +57,16 @@ struct mdp4_crtc { | |||
57 | #define PENDING_FLIP 0x2 | 57 | #define PENDING_FLIP 0x2 |
58 | atomic_t pending; | 58 | atomic_t pending; |
59 | 59 | ||
60 | /* the fb that we currently hold a scanout ref to: */ | 60 | /* the fb that we logically (from PoV of KMS API) hold a ref |
61 | * to. Which we may not yet be scanning out (we may still | ||
62 | * be scanning out previous in case of page_flip while waiting | ||
63 | * for gpu rendering to complete: | ||
64 | */ | ||
61 | struct drm_framebuffer *fb; | 65 | struct drm_framebuffer *fb; |
62 | 66 | ||
67 | /* the fb that we currently hold a scanout ref to: */ | ||
68 | struct drm_framebuffer *scanout_fb; | ||
69 | |||
63 | /* for unref'ing framebuffers after scanout completes: */ | 70 | /* for unref'ing framebuffers after scanout completes: */ |
64 | struct drm_flip_work unref_fb_work; | 71 | struct drm_flip_work unref_fb_work; |
65 | 72 | ||
@@ -77,24 +84,73 @@ static struct mdp4_kms *get_kms(struct drm_crtc *crtc) | |||
77 | return to_mdp4_kms(to_mdp_kms(priv->kms)); | 84 | return to_mdp4_kms(to_mdp_kms(priv->kms)); |
78 | } | 85 | } |
79 | 86 | ||
80 | static void update_fb(struct drm_crtc *crtc, bool async, | 87 | static void request_pending(struct drm_crtc *crtc, uint32_t pending) |
81 | struct drm_framebuffer *new_fb) | ||
82 | { | 88 | { |
83 | struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); | 89 | struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); |
84 | struct drm_framebuffer *old_fb = mdp4_crtc->fb; | ||
85 | 90 | ||
86 | if (old_fb) | 91 | atomic_or(pending, &mdp4_crtc->pending); |
87 | drm_flip_work_queue(&mdp4_crtc->unref_fb_work, old_fb); | 92 | mdp_irq_register(&get_kms(crtc)->base, &mdp4_crtc->vblank); |
93 | } | ||
94 | |||
95 | static void crtc_flush(struct drm_crtc *crtc) | ||
96 | { | ||
97 | struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); | ||
98 | struct mdp4_kms *mdp4_kms = get_kms(crtc); | ||
99 | uint32_t i, flush = 0; | ||
100 | |||
101 | for (i = 0; i < ARRAY_SIZE(mdp4_crtc->planes); i++) { | ||
102 | struct drm_plane *plane = mdp4_crtc->planes[i]; | ||
103 | if (plane) { | ||
104 | enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane); | ||
105 | flush |= pipe2flush(pipe_id); | ||
106 | } | ||
107 | } | ||
108 | flush |= ovlp2flush(mdp4_crtc->ovlp); | ||
109 | |||
110 | DBG("%s: flush=%08x", mdp4_crtc->name, flush); | ||
111 | |||
112 | mdp4_write(mdp4_kms, REG_MDP4_OVERLAY_FLUSH, flush); | ||
113 | } | ||
114 | |||
115 | static void update_fb(struct drm_crtc *crtc, struct drm_framebuffer *new_fb) | ||
116 | { | ||
117 | struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); | ||
118 | struct drm_framebuffer *old_fb = mdp4_crtc->fb; | ||
88 | 119 | ||
89 | /* grab reference to incoming scanout fb: */ | 120 | /* grab reference to incoming scanout fb: */ |
90 | drm_framebuffer_reference(new_fb); | 121 | drm_framebuffer_reference(new_fb); |
91 | mdp4_crtc->base.fb = new_fb; | 122 | mdp4_crtc->base.fb = new_fb; |
92 | mdp4_crtc->fb = new_fb; | 123 | mdp4_crtc->fb = new_fb; |
93 | 124 | ||
94 | if (!async) { | 125 | if (old_fb) |
95 | /* enable vblank to pick up the old_fb */ | 126 | drm_flip_work_queue(&mdp4_crtc->unref_fb_work, old_fb); |
96 | mdp_irq_register(&get_kms(crtc)->base, &mdp4_crtc->vblank); | 127 | } |
97 | } | 128 | |
129 | /* unlike update_fb(), take a ref to the new scanout fb *before* updating | ||
130 | * plane, then call this. Needed to ensure we don't unref the buffer that | ||
131 | * is actually still being scanned out. | ||
132 | * | ||
133 | * Note that this whole thing goes away with atomic.. since we can defer | ||
134 | * calling into driver until rendering is done. | ||
135 | */ | ||
136 | static void update_scanout(struct drm_crtc *crtc, struct drm_framebuffer *fb) | ||
137 | { | ||
138 | struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); | ||
139 | |||
140 | /* flush updates, to make sure hw is updated to new scanout fb, | ||
141 | * so that we can safely queue unref to current fb (ie. next | ||
142 | * vblank we know hw is done w/ previous scanout_fb). | ||
143 | */ | ||
144 | crtc_flush(crtc); | ||
145 | |||
146 | if (mdp4_crtc->scanout_fb) | ||
147 | drm_flip_work_queue(&mdp4_crtc->unref_fb_work, | ||
148 | mdp4_crtc->scanout_fb); | ||
149 | |||
150 | mdp4_crtc->scanout_fb = fb; | ||
151 | |||
152 | /* enable vblank to complete flip: */ | ||
153 | request_pending(crtc, PENDING_FLIP); | ||
98 | } | 154 | } |
99 | 155 | ||
100 | /* if file!=NULL, this is preclose potential cancel-flip path */ | 156 | /* if file!=NULL, this is preclose potential cancel-flip path */ |
@@ -120,34 +176,6 @@ static void complete_flip(struct drm_crtc *crtc, struct drm_file *file) | |||
120 | spin_unlock_irqrestore(&dev->event_lock, flags); | 176 | spin_unlock_irqrestore(&dev->event_lock, flags); |
121 | } | 177 | } |
122 | 178 | ||
123 | static void crtc_flush(struct drm_crtc *crtc) | ||
124 | { | ||
125 | struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); | ||
126 | struct mdp4_kms *mdp4_kms = get_kms(crtc); | ||
127 | uint32_t i, flush = 0; | ||
128 | |||
129 | for (i = 0; i < ARRAY_SIZE(mdp4_crtc->planes); i++) { | ||
130 | struct drm_plane *plane = mdp4_crtc->planes[i]; | ||
131 | if (plane) { | ||
132 | enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane); | ||
133 | flush |= pipe2flush(pipe_id); | ||
134 | } | ||
135 | } | ||
136 | flush |= ovlp2flush(mdp4_crtc->ovlp); | ||
137 | |||
138 | DBG("%s: flush=%08x", mdp4_crtc->name, flush); | ||
139 | |||
140 | mdp4_write(mdp4_kms, REG_MDP4_OVERLAY_FLUSH, flush); | ||
141 | } | ||
142 | |||
143 | static void request_pending(struct drm_crtc *crtc, uint32_t pending) | ||
144 | { | ||
145 | struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); | ||
146 | |||
147 | atomic_or(pending, &mdp4_crtc->pending); | ||
148 | mdp_irq_register(&get_kms(crtc)->base, &mdp4_crtc->vblank); | ||
149 | } | ||
150 | |||
151 | static void pageflip_cb(struct msm_fence_cb *cb) | 179 | static void pageflip_cb(struct msm_fence_cb *cb) |
152 | { | 180 | { |
153 | struct mdp4_crtc *mdp4_crtc = | 181 | struct mdp4_crtc *mdp4_crtc = |
@@ -158,11 +186,9 @@ static void pageflip_cb(struct msm_fence_cb *cb) | |||
158 | if (!fb) | 186 | if (!fb) |
159 | return; | 187 | return; |
160 | 188 | ||
189 | drm_framebuffer_reference(fb); | ||
161 | mdp4_plane_set_scanout(mdp4_crtc->plane, fb); | 190 | mdp4_plane_set_scanout(mdp4_crtc->plane, fb); |
162 | crtc_flush(crtc); | 191 | update_scanout(crtc, fb); |
163 | |||
164 | /* enable vblank to complete flip: */ | ||
165 | request_pending(crtc, PENDING_FLIP); | ||
166 | } | 192 | } |
167 | 193 | ||
168 | static void unref_fb_worker(struct drm_flip_work *work, void *val) | 194 | static void unref_fb_worker(struct drm_flip_work *work, void *val) |
@@ -320,6 +346,20 @@ static int mdp4_crtc_mode_set(struct drm_crtc *crtc, | |||
320 | mode->vsync_end, mode->vtotal, | 346 | mode->vsync_end, mode->vtotal, |
321 | mode->type, mode->flags); | 347 | mode->type, mode->flags); |
322 | 348 | ||
349 | /* grab extra ref for update_scanout() */ | ||
350 | drm_framebuffer_reference(crtc->fb); | ||
351 | |||
352 | ret = mdp4_plane_mode_set(mdp4_crtc->plane, crtc, crtc->fb, | ||
353 | 0, 0, mode->hdisplay, mode->vdisplay, | ||
354 | x << 16, y << 16, | ||
355 | mode->hdisplay << 16, mode->vdisplay << 16); | ||
356 | if (ret) { | ||
357 | drm_framebuffer_unreference(crtc->fb); | ||
358 | dev_err(crtc->dev->dev, "%s: failed to set mode on plane: %d\n", | ||
359 | mdp4_crtc->name, ret); | ||
360 | return ret; | ||
361 | } | ||
362 | |||
323 | mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_SIZE(dma), | 363 | mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_SIZE(dma), |
324 | MDP4_DMA_SRC_SIZE_WIDTH(mode->hdisplay) | | 364 | MDP4_DMA_SRC_SIZE_WIDTH(mode->hdisplay) | |
325 | MDP4_DMA_SRC_SIZE_HEIGHT(mode->vdisplay)); | 365 | MDP4_DMA_SRC_SIZE_HEIGHT(mode->vdisplay)); |
@@ -341,24 +381,15 @@ static int mdp4_crtc_mode_set(struct drm_crtc *crtc, | |||
341 | 381 | ||
342 | mdp4_write(mdp4_kms, REG_MDP4_OVLP_CFG(ovlp), 1); | 382 | mdp4_write(mdp4_kms, REG_MDP4_OVLP_CFG(ovlp), 1); |
343 | 383 | ||
344 | update_fb(crtc, false, crtc->fb); | ||
345 | |||
346 | ret = mdp4_plane_mode_set(mdp4_crtc->plane, crtc, crtc->fb, | ||
347 | 0, 0, mode->hdisplay, mode->vdisplay, | ||
348 | x << 16, y << 16, | ||
349 | mode->hdisplay << 16, mode->vdisplay << 16); | ||
350 | if (ret) { | ||
351 | dev_err(crtc->dev->dev, "%s: failed to set mode on plane: %d\n", | ||
352 | mdp4_crtc->name, ret); | ||
353 | return ret; | ||
354 | } | ||
355 | |||
356 | if (dma == DMA_E) { | 384 | if (dma == DMA_E) { |
357 | mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(0), 0x00ff0000); | 385 | mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(0), 0x00ff0000); |
358 | mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(1), 0x00ff0000); | 386 | mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(1), 0x00ff0000); |
359 | mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(2), 0x00ff0000); | 387 | mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(2), 0x00ff0000); |
360 | } | 388 | } |
361 | 389 | ||
390 | update_fb(crtc, crtc->fb); | ||
391 | update_scanout(crtc, crtc->fb); | ||
392 | |||
362 | return 0; | 393 | return 0; |
363 | } | 394 | } |
364 | 395 | ||
@@ -385,13 +416,24 @@ static int mdp4_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, | |||
385 | struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); | 416 | struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); |
386 | struct drm_plane *plane = mdp4_crtc->plane; | 417 | struct drm_plane *plane = mdp4_crtc->plane; |
387 | struct drm_display_mode *mode = &crtc->mode; | 418 | struct drm_display_mode *mode = &crtc->mode; |
419 | int ret; | ||
388 | 420 | ||
389 | update_fb(crtc, false, crtc->fb); | 421 | /* grab extra ref for update_scanout() */ |
422 | drm_framebuffer_reference(crtc->fb); | ||
390 | 423 | ||
391 | return mdp4_plane_mode_set(plane, crtc, crtc->fb, | 424 | ret = mdp4_plane_mode_set(plane, crtc, crtc->fb, |
392 | 0, 0, mode->hdisplay, mode->vdisplay, | 425 | 0, 0, mode->hdisplay, mode->vdisplay, |
393 | x << 16, y << 16, | 426 | x << 16, y << 16, |
394 | mode->hdisplay << 16, mode->vdisplay << 16); | 427 | mode->hdisplay << 16, mode->vdisplay << 16); |
428 | if (ret) { | ||
429 | drm_framebuffer_unreference(crtc->fb); | ||
430 | return ret; | ||
431 | } | ||
432 | |||
433 | update_fb(crtc, crtc->fb); | ||
434 | update_scanout(crtc, crtc->fb); | ||
435 | |||
436 | return 0; | ||
395 | } | 437 | } |
396 | 438 | ||
397 | static void mdp4_crtc_load_lut(struct drm_crtc *crtc) | 439 | static void mdp4_crtc_load_lut(struct drm_crtc *crtc) |
@@ -419,7 +461,7 @@ static int mdp4_crtc_page_flip(struct drm_crtc *crtc, | |||
419 | mdp4_crtc->event = event; | 461 | mdp4_crtc->event = event; |
420 | spin_unlock_irqrestore(&dev->event_lock, flags); | 462 | spin_unlock_irqrestore(&dev->event_lock, flags); |
421 | 463 | ||
422 | update_fb(crtc, true, new_fb); | 464 | update_fb(crtc, new_fb); |
423 | 465 | ||
424 | return msm_gem_queue_inactive_cb(obj, &mdp4_crtc->pageflip_cb); | 466 | return msm_gem_queue_inactive_cb(obj, &mdp4_crtc->pageflip_cb); |
425 | } | 467 | } |
@@ -713,6 +755,7 @@ struct drm_crtc *mdp4_crtc_init(struct drm_device *dev, | |||
713 | crtc = &mdp4_crtc->base; | 755 | crtc = &mdp4_crtc->base; |
714 | 756 | ||
715 | mdp4_crtc->plane = plane; | 757 | mdp4_crtc->plane = plane; |
758 | mdp4_crtc->id = id; | ||
716 | 759 | ||
717 | mdp4_crtc->ovlp = ovlp_id; | 760 | mdp4_crtc->ovlp = ovlp_id; |
718 | mdp4_crtc->dma = dma_id; | 761 | mdp4_crtc->dma = dma_id; |