aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/imx/ipuv3-crtc.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/imx/ipuv3-crtc.c')
-rw-r--r--drivers/gpu/drm/imx/ipuv3-crtc.c209
1 files changed, 27 insertions, 182 deletions
diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c
index f9d5d7c5cd79..3e8253455121 100644
--- a/drivers/gpu/drm/imx/ipuv3-crtc.c
+++ b/drivers/gpu/drm/imx/ipuv3-crtc.c
@@ -24,8 +24,6 @@
24#include <linux/fb.h> 24#include <linux/fb.h>
25#include <linux/clk.h> 25#include <linux/clk.h>
26#include <linux/errno.h> 26#include <linux/errno.h>
27#include <linux/reservation.h>
28#include <linux/dma-buf.h>
29#include <drm/drm_gem_cma_helper.h> 27#include <drm/drm_gem_cma_helper.h>
30#include <drm/drm_fb_cma_helper.h> 28#include <drm/drm_fb_cma_helper.h>
31 29
@@ -35,23 +33,6 @@
35 33
36#define DRIVER_DESC "i.MX IPUv3 Graphics" 34#define DRIVER_DESC "i.MX IPUv3 Graphics"
37 35
38enum ipu_flip_status {
39 IPU_FLIP_NONE,
40 IPU_FLIP_PENDING,
41 IPU_FLIP_SUBMITTED,
42};
43
44struct ipu_flip_work {
45 struct work_struct unref_work;
46 struct drm_gem_object *bo;
47 struct drm_pending_vblank_event *page_flip_event;
48 struct work_struct fence_work;
49 struct ipu_crtc *crtc;
50 struct fence *excl;
51 unsigned shared_count;
52 struct fence **shared;
53};
54
55struct ipu_crtc { 36struct ipu_crtc {
56 struct device *dev; 37 struct device *dev;
57 struct drm_crtc base; 38 struct drm_crtc base;
@@ -62,10 +43,6 @@ struct ipu_crtc {
62 43
63 struct ipu_dc *dc; 44 struct ipu_dc *dc;
64 struct ipu_di *di; 45 struct ipu_di *di;
65 int enabled;
66 enum ipu_flip_status flip_state;
67 struct workqueue_struct *flip_queue;
68 struct ipu_flip_work *flip_work;
69 int irq; 46 int irq;
70}; 47};
71 48
@@ -75,34 +52,26 @@ static void ipu_crtc_enable(struct ipu_crtc *ipu_crtc)
75{ 52{
76 struct ipu_soc *ipu = dev_get_drvdata(ipu_crtc->dev->parent); 53 struct ipu_soc *ipu = dev_get_drvdata(ipu_crtc->dev->parent);
77 54
78 if (ipu_crtc->enabled)
79 return;
80
81 ipu_dc_enable(ipu); 55 ipu_dc_enable(ipu);
82 ipu_dc_enable_channel(ipu_crtc->dc); 56 ipu_dc_enable_channel(ipu_crtc->dc);
83 ipu_di_enable(ipu_crtc->di); 57 ipu_di_enable(ipu_crtc->di);
84 ipu_crtc->enabled = 1;
85
86 /*
87 * In order not to be warned on enabling vblank failure,
88 * we should call drm_crtc_vblank_on() after ->enabled is set to 1.
89 */
90 drm_crtc_vblank_on(&ipu_crtc->base);
91} 58}
92 59
93static void ipu_crtc_disable(struct ipu_crtc *ipu_crtc) 60static void ipu_crtc_disable(struct ipu_crtc *ipu_crtc)
94{ 61{
95 struct ipu_soc *ipu = dev_get_drvdata(ipu_crtc->dev->parent); 62 struct ipu_soc *ipu = dev_get_drvdata(ipu_crtc->dev->parent);
96 63 struct drm_crtc *crtc = &ipu_crtc->base;
97 if (!ipu_crtc->enabled)
98 return;
99 64
100 ipu_dc_disable_channel(ipu_crtc->dc); 65 ipu_dc_disable_channel(ipu_crtc->dc);
101 ipu_di_disable(ipu_crtc->di); 66 ipu_di_disable(ipu_crtc->di);
102 ipu_dc_disable(ipu); 67 ipu_dc_disable(ipu);
103 ipu_crtc->enabled = 0;
104 68
105 drm_crtc_vblank_off(&ipu_crtc->base); 69 spin_lock_irq(&crtc->dev->event_lock);
70 if (crtc->state->event) {
71 drm_crtc_send_vblank_event(crtc, crtc->state->event);
72 crtc->state->event = NULL;
73 }
74 spin_unlock_irq(&crtc->dev->event_lock);
106} 75}
107 76
108static void ipu_crtc_dpms(struct drm_crtc *crtc, int mode) 77static void ipu_crtc_dpms(struct drm_crtc *crtc, int mode)
@@ -123,151 +92,21 @@ static void ipu_crtc_dpms(struct drm_crtc *crtc, int mode)
123 } 92 }
124} 93}
125 94
126static void ipu_flip_unref_work_func(struct work_struct *__work)
127{
128 struct ipu_flip_work *work =
129 container_of(__work, struct ipu_flip_work, unref_work);
130
131 drm_gem_object_unreference_unlocked(work->bo);
132 kfree(work);
133}
134
135static void ipu_flip_fence_work_func(struct work_struct *__work)
136{
137 struct ipu_flip_work *work =
138 container_of(__work, struct ipu_flip_work, fence_work);
139 int i;
140
141 /* wait for all fences attached to the FB obj to signal */
142 if (work->excl) {
143 fence_wait(work->excl, false);
144 fence_put(work->excl);
145 }
146 for (i = 0; i < work->shared_count; i++) {
147 fence_wait(work->shared[i], false);
148 fence_put(work->shared[i]);
149 }
150
151 work->crtc->flip_state = IPU_FLIP_SUBMITTED;
152}
153
154static int ipu_page_flip(struct drm_crtc *crtc,
155 struct drm_framebuffer *fb,
156 struct drm_pending_vblank_event *event,
157 uint32_t page_flip_flags)
158{
159 struct drm_gem_cma_object *cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
160 struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc);
161 struct ipu_flip_work *flip_work;
162 int ret;
163
164 if (ipu_crtc->flip_state != IPU_FLIP_NONE)
165 return -EBUSY;
166
167 ret = imx_drm_crtc_vblank_get(ipu_crtc->imx_crtc);
168 if (ret) {
169 dev_dbg(ipu_crtc->dev, "failed to acquire vblank counter\n");
170 list_del(&event->base.link);
171
172 return ret;
173 }
174
175 flip_work = kzalloc(sizeof *flip_work, GFP_KERNEL);
176 if (!flip_work) {
177 ret = -ENOMEM;
178 goto put_vblank;
179 }
180 INIT_WORK(&flip_work->unref_work, ipu_flip_unref_work_func);
181 flip_work->page_flip_event = event;
182
183 /* get BO backing the old framebuffer and take a reference */
184 flip_work->bo = &drm_fb_cma_get_gem_obj(crtc->primary->fb, 0)->base;
185 drm_gem_object_reference(flip_work->bo);
186
187 ipu_crtc->flip_work = flip_work;
188 /*
189 * If the object has a DMABUF attached, we need to wait on its fences
190 * if there are any.
191 */
192 if (cma_obj->base.dma_buf) {
193 INIT_WORK(&flip_work->fence_work, ipu_flip_fence_work_func);
194 flip_work->crtc = ipu_crtc;
195
196 ret = reservation_object_get_fences_rcu(
197 cma_obj->base.dma_buf->resv, &flip_work->excl,
198 &flip_work->shared_count, &flip_work->shared);
199
200 if (unlikely(ret)) {
201 DRM_ERROR("failed to get fences for buffer\n");
202 goto free_flip_work;
203 }
204
205 /* No need to queue the worker if the are no fences */
206 if (!flip_work->excl && !flip_work->shared_count) {
207 ipu_crtc->flip_state = IPU_FLIP_SUBMITTED;
208 } else {
209 ipu_crtc->flip_state = IPU_FLIP_PENDING;
210 queue_work(ipu_crtc->flip_queue,
211 &flip_work->fence_work);
212 }
213 } else {
214 ipu_crtc->flip_state = IPU_FLIP_SUBMITTED;
215 }
216
217 if (crtc->primary->state)
218 drm_atomic_set_fb_for_plane(crtc->primary->state, fb);
219
220 return 0;
221
222free_flip_work:
223 drm_gem_object_unreference_unlocked(flip_work->bo);
224 kfree(flip_work);
225 ipu_crtc->flip_work = NULL;
226put_vblank:
227 imx_drm_crtc_vblank_put(ipu_crtc->imx_crtc);
228
229 return ret;
230}
231
232static const struct drm_crtc_funcs ipu_crtc_funcs = { 95static const struct drm_crtc_funcs ipu_crtc_funcs = {
233 .set_config = drm_crtc_helper_set_config, 96 .set_config = drm_atomic_helper_set_config,
234 .destroy = drm_crtc_cleanup, 97 .destroy = drm_crtc_cleanup,
235 .page_flip = ipu_page_flip, 98 .page_flip = drm_atomic_helper_page_flip,
236 .reset = drm_atomic_helper_crtc_reset, 99 .reset = drm_atomic_helper_crtc_reset,
237 .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, 100 .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
238 .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, 101 .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
239}; 102};
240 103
241static void ipu_crtc_handle_pageflip(struct ipu_crtc *ipu_crtc)
242{
243 unsigned long flags;
244 struct drm_device *drm = ipu_crtc->base.dev;
245 struct ipu_flip_work *work = ipu_crtc->flip_work;
246
247 spin_lock_irqsave(&drm->event_lock, flags);
248 if (work->page_flip_event)
249 drm_crtc_send_vblank_event(&ipu_crtc->base,
250 work->page_flip_event);
251 imx_drm_crtc_vblank_put(ipu_crtc->imx_crtc);
252 spin_unlock_irqrestore(&drm->event_lock, flags);
253}
254
255static irqreturn_t ipu_irq_handler(int irq, void *dev_id) 104static irqreturn_t ipu_irq_handler(int irq, void *dev_id)
256{ 105{
257 struct ipu_crtc *ipu_crtc = dev_id; 106 struct ipu_crtc *ipu_crtc = dev_id;
258 107
259 imx_drm_handle_vblank(ipu_crtc->imx_crtc); 108 imx_drm_handle_vblank(ipu_crtc->imx_crtc);
260 109
261 if (ipu_crtc->flip_state == IPU_FLIP_SUBMITTED) {
262 struct ipu_plane *plane = ipu_crtc->plane[0];
263
264 ipu_plane_set_base(plane, ipu_crtc->base.primary->fb);
265 ipu_crtc_handle_pageflip(ipu_crtc);
266 queue_work(ipu_crtc->flip_queue,
267 &ipu_crtc->flip_work->unref_work);
268 ipu_crtc->flip_state = IPU_FLIP_NONE;
269 }
270
271 return IRQ_HANDLED; 110 return IRQ_HANDLED;
272} 111}
273 112
@@ -310,9 +149,26 @@ static void ipu_crtc_commit(struct drm_crtc *crtc)
310static int ipu_crtc_atomic_check(struct drm_crtc *crtc, 149static int ipu_crtc_atomic_check(struct drm_crtc *crtc,
311 struct drm_crtc_state *state) 150 struct drm_crtc_state *state)
312{ 151{
152 u32 primary_plane_mask = 1 << drm_plane_index(crtc->primary);
153
154 if (state->active && (primary_plane_mask & state->plane_mask) == 0)
155 return -EINVAL;
156
313 return 0; 157 return 0;
314} 158}
315 159
160static void ipu_crtc_atomic_begin(struct drm_crtc *crtc,
161 struct drm_crtc_state *old_crtc_state)
162{
163 spin_lock_irq(&crtc->dev->event_lock);
164 if (crtc->state->event) {
165 WARN_ON(drm_crtc_vblank_get(crtc));
166 drm_crtc_arm_vblank_event(crtc, crtc->state->event);
167 crtc->state->event = NULL;
168 }
169 spin_unlock_irq(&crtc->dev->event_lock);
170}
171
316static void ipu_crtc_mode_set_nofb(struct drm_crtc *crtc) 172static void ipu_crtc_mode_set_nofb(struct drm_crtc *crtc)
317{ 173{
318 struct drm_device *dev = crtc->dev; 174 struct drm_device *dev = crtc->dev;
@@ -371,25 +227,17 @@ static void ipu_crtc_mode_set_nofb(struct drm_crtc *crtc)
371static const struct drm_crtc_helper_funcs ipu_helper_funcs = { 227static const struct drm_crtc_helper_funcs ipu_helper_funcs = {
372 .dpms = ipu_crtc_dpms, 228 .dpms = ipu_crtc_dpms,
373 .mode_fixup = ipu_crtc_mode_fixup, 229 .mode_fixup = ipu_crtc_mode_fixup,
374 .mode_set = drm_helper_crtc_mode_set,
375 .mode_set_nofb = ipu_crtc_mode_set_nofb, 230 .mode_set_nofb = ipu_crtc_mode_set_nofb,
376 .prepare = ipu_crtc_prepare, 231 .prepare = ipu_crtc_prepare,
377 .commit = ipu_crtc_commit, 232 .commit = ipu_crtc_commit,
378 .atomic_check = ipu_crtc_atomic_check, 233 .atomic_check = ipu_crtc_atomic_check,
234 .atomic_begin = ipu_crtc_atomic_begin,
379}; 235};
380 236
381static int ipu_enable_vblank(struct drm_crtc *crtc) 237static int ipu_enable_vblank(struct drm_crtc *crtc)
382{ 238{
383 struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc); 239 struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc);
384 240
385 /*
386 * ->commit is done after ->mode_set in drm_crtc_helper_set_mode(),
387 * so waiting for vblank in drm_plane_helper_commit() will timeout.
388 * Check the state here to avoid the waiting.
389 */
390 if (!ipu_crtc->enabled)
391 return -EINVAL;
392
393 enable_irq(ipu_crtc->irq); 241 enable_irq(ipu_crtc->irq);
394 242
395 return 0; 243 return 0;
@@ -508,8 +356,6 @@ static int ipu_crtc_init(struct ipu_crtc *ipu_crtc,
508 /* Only enable IRQ when we actually need it to trigger work. */ 356 /* Only enable IRQ when we actually need it to trigger work. */
509 disable_irq(ipu_crtc->irq); 357 disable_irq(ipu_crtc->irq);
510 358
511 ipu_crtc->flip_queue = create_singlethread_workqueue("ipu-crtc-flip");
512
513 return 0; 359 return 0;
514 360
515err_put_plane1_res: 361err_put_plane1_res:
@@ -554,7 +400,6 @@ static void ipu_drm_unbind(struct device *dev, struct device *master,
554 400
555 imx_drm_remove_crtc(ipu_crtc->imx_crtc); 401 imx_drm_remove_crtc(ipu_crtc->imx_crtc);
556 402
557 destroy_workqueue(ipu_crtc->flip_queue);
558 ipu_put_resources(ipu_crtc); 403 ipu_put_resources(ipu_crtc);
559 if (ipu_crtc->plane[1]) 404 if (ipu_crtc->plane[1])
560 ipu_plane_put_resources(ipu_crtc->plane[1]); 405 ipu_plane_put_resources(ipu_crtc->plane[1]);