aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/imx
diff options
context:
space:
mode:
authorLiu Ying <gnuiyl@gmail.com>2016-07-08 05:40:59 -0400
committerPhilipp Zabel <p.zabel@pengutronix.de>2016-07-12 12:24:03 -0400
commit5f2f911578fb13b0110e125d43775f08cf1dd281 (patch)
tree06cb1b02843dd9c874504e143af14d2546c2a985 /drivers/gpu/drm/imx
parent032003c5cd744e8e0baf5430fc5b3fd5462208e4 (diff)
drm/imx: atomic phase 3 step 1: Use atomic configuration
Replacing drm_crtc_helper_set_config() by drm_atomic_helper_set_config() and converting the suspend/resume operations to atomic make us be able to use atomic configurations. All of these allow us to remove the crtc_funcs->mode_set callback as it is no longer used. Also, change the plane_funcs->update/disable_plane callbacks from the transitional version to the atomic version. Furthermore, switching to the pure atomic version of set_config callback means that we may implement CRTC/plane atomic checks by using the new CRTC/plane states instead of the legacy ones and we may remove the private ipu_crtc->enabled state which was left there for the transitional atomic helpers in phase 1. Page flip is also switched to the atomic version. Last, the legacy function drm_helper_disable_unused_functions() is removed from ->load in order not to confuse the atomic driver. Signed-off-by: Liu Ying <gnuiyl@gmail.com> Acked-by: Daniel Vetter <daniel.vetter@ffwll.ch> Signed-off-by: Philipp Zabel <p.zabel@pengutronix.de>
Diffstat (limited to 'drivers/gpu/drm/imx')
-rw-r--r--drivers/gpu/drm/imx/imx-drm-core.c76
-rw-r--r--drivers/gpu/drm/imx/ipuv3-crtc.c209
-rw-r--r--drivers/gpu/drm/imx/ipuv3-plane.c135
-rw-r--r--drivers/gpu/drm/imx/ipuv3-plane.h2
4 files changed, 114 insertions, 308 deletions
diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c
index f6e44c220874..f14ad2bbc1d7 100644
--- a/drivers/gpu/drm/imx/imx-drm-core.c
+++ b/drivers/gpu/drm/imx/imx-drm-core.c
@@ -15,10 +15,14 @@
15 */ 15 */
16#include <linux/component.h> 16#include <linux/component.h>
17#include <linux/device.h> 17#include <linux/device.h>
18#include <linux/dma-buf.h>
18#include <linux/fb.h> 19#include <linux/fb.h>
19#include <linux/module.h> 20#include <linux/module.h>
20#include <linux/platform_device.h> 21#include <linux/platform_device.h>
22#include <linux/reservation.h>
21#include <drm/drmP.h> 23#include <drm/drmP.h>
24#include <drm/drm_atomic.h>
25#include <drm/drm_atomic_helper.h>
22#include <drm/drm_fb_helper.h> 26#include <drm/drm_fb_helper.h>
23#include <drm/drm_crtc_helper.h> 27#include <drm/drm_crtc_helper.h>
24#include <drm/drm_gem_cma_helper.h> 28#include <drm/drm_gem_cma_helper.h>
@@ -41,6 +45,7 @@ struct imx_drm_device {
41 struct imx_drm_crtc *crtc[MAX_CRTC]; 45 struct imx_drm_crtc *crtc[MAX_CRTC];
42 unsigned int pipes; 46 unsigned int pipes;
43 struct drm_fbdev_cma *fbhelper; 47 struct drm_fbdev_cma *fbhelper;
48 struct drm_atomic_state *state;
44}; 49};
45 50
46struct imx_drm_crtc { 51struct imx_drm_crtc {
@@ -169,6 +174,63 @@ static void imx_drm_output_poll_changed(struct drm_device *drm)
169static const struct drm_mode_config_funcs imx_drm_mode_config_funcs = { 174static const struct drm_mode_config_funcs imx_drm_mode_config_funcs = {
170 .fb_create = drm_fb_cma_create, 175 .fb_create = drm_fb_cma_create,
171 .output_poll_changed = imx_drm_output_poll_changed, 176 .output_poll_changed = imx_drm_output_poll_changed,
177 .atomic_check = drm_atomic_helper_check,
178 .atomic_commit = drm_atomic_helper_commit,
179};
180
181static void imx_drm_atomic_commit_tail(struct drm_atomic_state *state)
182{
183 struct drm_device *dev = state->dev;
184 struct drm_crtc *crtc;
185 struct drm_crtc_state *crtc_state;
186 struct drm_plane_state *plane_state;
187 struct drm_gem_cma_object *cma_obj;
188 struct fence *excl;
189 unsigned shared_count;
190 struct fence **shared;
191 unsigned int i, j;
192 int ret;
193
194 /* Wait for fences. */
195 for_each_crtc_in_state(state, crtc, crtc_state, i) {
196 plane_state = crtc->primary->state;
197 if (plane_state->fb) {
198 cma_obj = drm_fb_cma_get_gem_obj(plane_state->fb, 0);
199 if (cma_obj->base.dma_buf) {
200 ret = reservation_object_get_fences_rcu(
201 cma_obj->base.dma_buf->resv, &excl,
202 &shared_count, &shared);
203 if (unlikely(ret))
204 DRM_ERROR("failed to get fences "
205 "for buffer\n");
206
207 if (excl) {
208 fence_wait(excl, false);
209 fence_put(excl);
210 }
211 for (j = 0; j < shared_count; i++) {
212 fence_wait(shared[j], false);
213 fence_put(shared[j]);
214 }
215 }
216 }
217 }
218
219 drm_atomic_helper_commit_modeset_disables(dev, state);
220
221 drm_atomic_helper_commit_planes(dev, state, true);
222
223 drm_atomic_helper_commit_modeset_enables(dev, state);
224
225 drm_atomic_helper_commit_hw_done(state);
226
227 drm_atomic_helper_wait_for_vblanks(dev, state);
228
229 drm_atomic_helper_cleanup_planes(dev, state);
230}
231
232static struct drm_mode_config_helper_funcs imx_drm_mode_config_helpers = {
233 .atomic_commit_tail = imx_drm_atomic_commit_tail,
172}; 234};
173 235
174/* 236/*
@@ -210,6 +272,7 @@ static int imx_drm_driver_load(struct drm_device *drm, unsigned long flags)
210 drm->mode_config.max_width = 4096; 272 drm->mode_config.max_width = 4096;
211 drm->mode_config.max_height = 4096; 273 drm->mode_config.max_height = 4096;
212 drm->mode_config.funcs = &imx_drm_mode_config_funcs; 274 drm->mode_config.funcs = &imx_drm_mode_config_funcs;
275 drm->mode_config.helper_private = &imx_drm_mode_config_helpers;
213 276
214 drm_mode_config_init(drm); 277 drm_mode_config_init(drm);
215 278
@@ -252,7 +315,6 @@ static int imx_drm_driver_load(struct drm_device *drm, unsigned long flags)
252 dev_warn(drm->dev, "Invalid legacyfb_depth. Defaulting to 16bpp\n"); 315 dev_warn(drm->dev, "Invalid legacyfb_depth. Defaulting to 16bpp\n");
253 legacyfb_depth = 16; 316 legacyfb_depth = 16;
254 } 317 }
255 drm_helper_disable_unused_functions(drm);
256 imxdrm->fbhelper = drm_fbdev_cma_init(drm, legacyfb_depth, 318 imxdrm->fbhelper = drm_fbdev_cma_init(drm, legacyfb_depth,
257 drm->mode_config.num_crtc, MAX_CRTC); 319 drm->mode_config.num_crtc, MAX_CRTC);
258 if (IS_ERR(imxdrm->fbhelper)) { 320 if (IS_ERR(imxdrm->fbhelper)) {
@@ -454,6 +516,7 @@ static int imx_drm_platform_remove(struct platform_device *pdev)
454static int imx_drm_suspend(struct device *dev) 516static int imx_drm_suspend(struct device *dev)
455{ 517{
456 struct drm_device *drm_dev = dev_get_drvdata(dev); 518 struct drm_device *drm_dev = dev_get_drvdata(dev);
519 struct imx_drm_device *imxdrm;
457 520
458 /* The drm_dev is NULL before .load hook is called */ 521 /* The drm_dev is NULL before .load hook is called */
459 if (drm_dev == NULL) 522 if (drm_dev == NULL)
@@ -461,17 +524,26 @@ static int imx_drm_suspend(struct device *dev)
461 524
462 drm_kms_helper_poll_disable(drm_dev); 525 drm_kms_helper_poll_disable(drm_dev);
463 526
527 imxdrm = drm_dev->dev_private;
528 imxdrm->state = drm_atomic_helper_suspend(drm_dev);
529 if (IS_ERR(imxdrm->state)) {
530 drm_kms_helper_poll_enable(drm_dev);
531 return PTR_ERR(imxdrm->state);
532 }
533
464 return 0; 534 return 0;
465} 535}
466 536
467static int imx_drm_resume(struct device *dev) 537static int imx_drm_resume(struct device *dev)
468{ 538{
469 struct drm_device *drm_dev = dev_get_drvdata(dev); 539 struct drm_device *drm_dev = dev_get_drvdata(dev);
540 struct imx_drm_device *imx_drm;
470 541
471 if (drm_dev == NULL) 542 if (drm_dev == NULL)
472 return 0; 543 return 0;
473 544
474 drm_helper_resume_force_mode(drm_dev); 545 imx_drm = drm_dev->dev_private;
546 drm_atomic_helper_resume(drm_dev, imx_drm->state);
475 drm_kms_helper_poll_enable(drm_dev); 547 drm_kms_helper_poll_enable(drm_dev);
476 548
477 return 0; 549 return 0;
diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c
index f9d5d7c5cd79..3e8253455121 100644
--- a/drivers/gpu/drm/imx/ipuv3-crtc.c
+++ b/drivers/gpu/drm/imx/ipuv3-crtc.c
@@ -24,8 +24,6 @@
24#include <linux/fb.h> 24#include <linux/fb.h>
25#include <linux/clk.h> 25#include <linux/clk.h>
26#include <linux/errno.h> 26#include <linux/errno.h>
27#include <linux/reservation.h>
28#include <linux/dma-buf.h>
29#include <drm/drm_gem_cma_helper.h> 27#include <drm/drm_gem_cma_helper.h>
30#include <drm/drm_fb_cma_helper.h> 28#include <drm/drm_fb_cma_helper.h>
31 29
@@ -35,23 +33,6 @@
35 33
36#define DRIVER_DESC "i.MX IPUv3 Graphics" 34#define DRIVER_DESC "i.MX IPUv3 Graphics"
37 35
38enum ipu_flip_status {
39 IPU_FLIP_NONE,
40 IPU_FLIP_PENDING,
41 IPU_FLIP_SUBMITTED,
42};
43
44struct ipu_flip_work {
45 struct work_struct unref_work;
46 struct drm_gem_object *bo;
47 struct drm_pending_vblank_event *page_flip_event;
48 struct work_struct fence_work;
49 struct ipu_crtc *crtc;
50 struct fence *excl;
51 unsigned shared_count;
52 struct fence **shared;
53};
54
55struct ipu_crtc { 36struct ipu_crtc {
56 struct device *dev; 37 struct device *dev;
57 struct drm_crtc base; 38 struct drm_crtc base;
@@ -62,10 +43,6 @@ struct ipu_crtc {
62 43
63 struct ipu_dc *dc; 44 struct ipu_dc *dc;
64 struct ipu_di *di; 45 struct ipu_di *di;
65 int enabled;
66 enum ipu_flip_status flip_state;
67 struct workqueue_struct *flip_queue;
68 struct ipu_flip_work *flip_work;
69 int irq; 46 int irq;
70}; 47};
71 48
@@ -75,34 +52,26 @@ static void ipu_crtc_enable(struct ipu_crtc *ipu_crtc)
75{ 52{
76 struct ipu_soc *ipu = dev_get_drvdata(ipu_crtc->dev->parent); 53 struct ipu_soc *ipu = dev_get_drvdata(ipu_crtc->dev->parent);
77 54
78 if (ipu_crtc->enabled)
79 return;
80
81 ipu_dc_enable(ipu); 55 ipu_dc_enable(ipu);
82 ipu_dc_enable_channel(ipu_crtc->dc); 56 ipu_dc_enable_channel(ipu_crtc->dc);
83 ipu_di_enable(ipu_crtc->di); 57 ipu_di_enable(ipu_crtc->di);
84 ipu_crtc->enabled = 1;
85
86 /*
87 * In order not to be warned on enabling vblank failure,
88 * we should call drm_crtc_vblank_on() after ->enabled is set to 1.
89 */
90 drm_crtc_vblank_on(&ipu_crtc->base);
91} 58}
92 59
93static void ipu_crtc_disable(struct ipu_crtc *ipu_crtc) 60static void ipu_crtc_disable(struct ipu_crtc *ipu_crtc)
94{ 61{
95 struct ipu_soc *ipu = dev_get_drvdata(ipu_crtc->dev->parent); 62 struct ipu_soc *ipu = dev_get_drvdata(ipu_crtc->dev->parent);
96 63 struct drm_crtc *crtc = &ipu_crtc->base;
97 if (!ipu_crtc->enabled)
98 return;
99 64
100 ipu_dc_disable_channel(ipu_crtc->dc); 65 ipu_dc_disable_channel(ipu_crtc->dc);
101 ipu_di_disable(ipu_crtc->di); 66 ipu_di_disable(ipu_crtc->di);
102 ipu_dc_disable(ipu); 67 ipu_dc_disable(ipu);
103 ipu_crtc->enabled = 0;
104 68
105 drm_crtc_vblank_off(&ipu_crtc->base); 69 spin_lock_irq(&crtc->dev->event_lock);
70 if (crtc->state->event) {
71 drm_crtc_send_vblank_event(crtc, crtc->state->event);
72 crtc->state->event = NULL;
73 }
74 spin_unlock_irq(&crtc->dev->event_lock);
106} 75}
107 76
108static void ipu_crtc_dpms(struct drm_crtc *crtc, int mode) 77static void ipu_crtc_dpms(struct drm_crtc *crtc, int mode)
@@ -123,151 +92,21 @@ static void ipu_crtc_dpms(struct drm_crtc *crtc, int mode)
123 } 92 }
124} 93}
125 94
126static void ipu_flip_unref_work_func(struct work_struct *__work)
127{
128 struct ipu_flip_work *work =
129 container_of(__work, struct ipu_flip_work, unref_work);
130
131 drm_gem_object_unreference_unlocked(work->bo);
132 kfree(work);
133}
134
135static void ipu_flip_fence_work_func(struct work_struct *__work)
136{
137 struct ipu_flip_work *work =
138 container_of(__work, struct ipu_flip_work, fence_work);
139 int i;
140
141 /* wait for all fences attached to the FB obj to signal */
142 if (work->excl) {
143 fence_wait(work->excl, false);
144 fence_put(work->excl);
145 }
146 for (i = 0; i < work->shared_count; i++) {
147 fence_wait(work->shared[i], false);
148 fence_put(work->shared[i]);
149 }
150
151 work->crtc->flip_state = IPU_FLIP_SUBMITTED;
152}
153
154static int ipu_page_flip(struct drm_crtc *crtc,
155 struct drm_framebuffer *fb,
156 struct drm_pending_vblank_event *event,
157 uint32_t page_flip_flags)
158{
159 struct drm_gem_cma_object *cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
160 struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc);
161 struct ipu_flip_work *flip_work;
162 int ret;
163
164 if (ipu_crtc->flip_state != IPU_FLIP_NONE)
165 return -EBUSY;
166
167 ret = imx_drm_crtc_vblank_get(ipu_crtc->imx_crtc);
168 if (ret) {
169 dev_dbg(ipu_crtc->dev, "failed to acquire vblank counter\n");
170 list_del(&event->base.link);
171
172 return ret;
173 }
174
175 flip_work = kzalloc(sizeof *flip_work, GFP_KERNEL);
176 if (!flip_work) {
177 ret = -ENOMEM;
178 goto put_vblank;
179 }
180 INIT_WORK(&flip_work->unref_work, ipu_flip_unref_work_func);
181 flip_work->page_flip_event = event;
182
183 /* get BO backing the old framebuffer and take a reference */
184 flip_work->bo = &drm_fb_cma_get_gem_obj(crtc->primary->fb, 0)->base;
185 drm_gem_object_reference(flip_work->bo);
186
187 ipu_crtc->flip_work = flip_work;
188 /*
189 * If the object has a DMABUF attached, we need to wait on its fences
190 * if there are any.
191 */
192 if (cma_obj->base.dma_buf) {
193 INIT_WORK(&flip_work->fence_work, ipu_flip_fence_work_func);
194 flip_work->crtc = ipu_crtc;
195
196 ret = reservation_object_get_fences_rcu(
197 cma_obj->base.dma_buf->resv, &flip_work->excl,
198 &flip_work->shared_count, &flip_work->shared);
199
200 if (unlikely(ret)) {
201 DRM_ERROR("failed to get fences for buffer\n");
202 goto free_flip_work;
203 }
204
205 /* No need to queue the worker if the are no fences */
206 if (!flip_work->excl && !flip_work->shared_count) {
207 ipu_crtc->flip_state = IPU_FLIP_SUBMITTED;
208 } else {
209 ipu_crtc->flip_state = IPU_FLIP_PENDING;
210 queue_work(ipu_crtc->flip_queue,
211 &flip_work->fence_work);
212 }
213 } else {
214 ipu_crtc->flip_state = IPU_FLIP_SUBMITTED;
215 }
216
217 if (crtc->primary->state)
218 drm_atomic_set_fb_for_plane(crtc->primary->state, fb);
219
220 return 0;
221
222free_flip_work:
223 drm_gem_object_unreference_unlocked(flip_work->bo);
224 kfree(flip_work);
225 ipu_crtc->flip_work = NULL;
226put_vblank:
227 imx_drm_crtc_vblank_put(ipu_crtc->imx_crtc);
228
229 return ret;
230}
231
232static const struct drm_crtc_funcs ipu_crtc_funcs = { 95static const struct drm_crtc_funcs ipu_crtc_funcs = {
233 .set_config = drm_crtc_helper_set_config, 96 .set_config = drm_atomic_helper_set_config,
234 .destroy = drm_crtc_cleanup, 97 .destroy = drm_crtc_cleanup,
235 .page_flip = ipu_page_flip, 98 .page_flip = drm_atomic_helper_page_flip,
236 .reset = drm_atomic_helper_crtc_reset, 99 .reset = drm_atomic_helper_crtc_reset,
237 .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, 100 .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
238 .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, 101 .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
239}; 102};
240 103
241static void ipu_crtc_handle_pageflip(struct ipu_crtc *ipu_crtc)
242{
243 unsigned long flags;
244 struct drm_device *drm = ipu_crtc->base.dev;
245 struct ipu_flip_work *work = ipu_crtc->flip_work;
246
247 spin_lock_irqsave(&drm->event_lock, flags);
248 if (work->page_flip_event)
249 drm_crtc_send_vblank_event(&ipu_crtc->base,
250 work->page_flip_event);
251 imx_drm_crtc_vblank_put(ipu_crtc->imx_crtc);
252 spin_unlock_irqrestore(&drm->event_lock, flags);
253}
254
255static irqreturn_t ipu_irq_handler(int irq, void *dev_id) 104static irqreturn_t ipu_irq_handler(int irq, void *dev_id)
256{ 105{
257 struct ipu_crtc *ipu_crtc = dev_id; 106 struct ipu_crtc *ipu_crtc = dev_id;
258 107
259 imx_drm_handle_vblank(ipu_crtc->imx_crtc); 108 imx_drm_handle_vblank(ipu_crtc->imx_crtc);
260 109
261 if (ipu_crtc->flip_state == IPU_FLIP_SUBMITTED) {
262 struct ipu_plane *plane = ipu_crtc->plane[0];
263
264 ipu_plane_set_base(plane, ipu_crtc->base.primary->fb);
265 ipu_crtc_handle_pageflip(ipu_crtc);
266 queue_work(ipu_crtc->flip_queue,
267 &ipu_crtc->flip_work->unref_work);
268 ipu_crtc->flip_state = IPU_FLIP_NONE;
269 }
270
271 return IRQ_HANDLED; 110 return IRQ_HANDLED;
272} 111}
273 112
@@ -310,9 +149,26 @@ static void ipu_crtc_commit(struct drm_crtc *crtc)
310static int ipu_crtc_atomic_check(struct drm_crtc *crtc, 149static int ipu_crtc_atomic_check(struct drm_crtc *crtc,
311 struct drm_crtc_state *state) 150 struct drm_crtc_state *state)
312{ 151{
152 u32 primary_plane_mask = 1 << drm_plane_index(crtc->primary);
153
154 if (state->active && (primary_plane_mask & state->plane_mask) == 0)
155 return -EINVAL;
156
313 return 0; 157 return 0;
314} 158}
315 159
160static void ipu_crtc_atomic_begin(struct drm_crtc *crtc,
161 struct drm_crtc_state *old_crtc_state)
162{
163 spin_lock_irq(&crtc->dev->event_lock);
164 if (crtc->state->event) {
165 WARN_ON(drm_crtc_vblank_get(crtc));
166 drm_crtc_arm_vblank_event(crtc, crtc->state->event);
167 crtc->state->event = NULL;
168 }
169 spin_unlock_irq(&crtc->dev->event_lock);
170}
171
316static void ipu_crtc_mode_set_nofb(struct drm_crtc *crtc) 172static void ipu_crtc_mode_set_nofb(struct drm_crtc *crtc)
317{ 173{
318 struct drm_device *dev = crtc->dev; 174 struct drm_device *dev = crtc->dev;
@@ -371,25 +227,17 @@ static void ipu_crtc_mode_set_nofb(struct drm_crtc *crtc)
371static const struct drm_crtc_helper_funcs ipu_helper_funcs = { 227static const struct drm_crtc_helper_funcs ipu_helper_funcs = {
372 .dpms = ipu_crtc_dpms, 228 .dpms = ipu_crtc_dpms,
373 .mode_fixup = ipu_crtc_mode_fixup, 229 .mode_fixup = ipu_crtc_mode_fixup,
374 .mode_set = drm_helper_crtc_mode_set,
375 .mode_set_nofb = ipu_crtc_mode_set_nofb, 230 .mode_set_nofb = ipu_crtc_mode_set_nofb,
376 .prepare = ipu_crtc_prepare, 231 .prepare = ipu_crtc_prepare,
377 .commit = ipu_crtc_commit, 232 .commit = ipu_crtc_commit,
378 .atomic_check = ipu_crtc_atomic_check, 233 .atomic_check = ipu_crtc_atomic_check,
234 .atomic_begin = ipu_crtc_atomic_begin,
379}; 235};
380 236
381static int ipu_enable_vblank(struct drm_crtc *crtc) 237static int ipu_enable_vblank(struct drm_crtc *crtc)
382{ 238{
383 struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc); 239 struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc);
384 240
385 /*
386 * ->commit is done after ->mode_set in drm_crtc_helper_set_mode(),
387 * so waiting for vblank in drm_plane_helper_commit() will timeout.
388 * Check the state here to avoid the waiting.
389 */
390 if (!ipu_crtc->enabled)
391 return -EINVAL;
392
393 enable_irq(ipu_crtc->irq); 241 enable_irq(ipu_crtc->irq);
394 242
395 return 0; 243 return 0;
@@ -508,8 +356,6 @@ static int ipu_crtc_init(struct ipu_crtc *ipu_crtc,
508 /* Only enable IRQ when we actually need it to trigger work. */ 356 /* Only enable IRQ when we actually need it to trigger work. */
509 disable_irq(ipu_crtc->irq); 357 disable_irq(ipu_crtc->irq);
510 358
511 ipu_crtc->flip_queue = create_singlethread_workqueue("ipu-crtc-flip");
512
513 return 0; 359 return 0;
514 360
515err_put_plane1_res: 361err_put_plane1_res:
@@ -554,7 +400,6 @@ static void ipu_drm_unbind(struct device *dev, struct device *master,
554 400
555 imx_drm_remove_crtc(ipu_crtc->imx_crtc); 401 imx_drm_remove_crtc(ipu_crtc->imx_crtc);
556 402
557 destroy_workqueue(ipu_crtc->flip_queue);
558 ipu_put_resources(ipu_crtc); 403 ipu_put_resources(ipu_crtc);
559 if (ipu_crtc->plane[1]) 404 if (ipu_crtc->plane[1])
560 ipu_plane_put_resources(ipu_crtc->plane[1]); 405 ipu_plane_put_resources(ipu_crtc->plane[1]);
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c
index afbc7402bff1..3f5f9566b152 100644
--- a/drivers/gpu/drm/imx/ipuv3-plane.c
+++ b/drivers/gpu/drm/imx/ipuv3-plane.c
@@ -14,6 +14,7 @@
14 */ 14 */
15 15
16#include <drm/drmP.h> 16#include <drm/drmP.h>
17#include <drm/drm_atomic.h>
17#include <drm/drm_atomic_helper.h> 18#include <drm/drm_atomic_helper.h>
18#include <drm/drm_fb_cma_helper.h> 19#include <drm/drm_fb_cma_helper.h>
19#include <drm/drm_gem_cma_helper.h> 20#include <drm/drm_gem_cma_helper.h>
@@ -55,122 +56,6 @@ int ipu_plane_irq(struct ipu_plane *ipu_plane)
55 IPU_IRQ_EOF); 56 IPU_IRQ_EOF);
56} 57}
57 58
58int ipu_plane_set_base(struct ipu_plane *ipu_plane, struct drm_framebuffer *fb)
59{
60 struct drm_gem_cma_object *cma_obj[3], *old_cma_obj[3];
61 struct drm_plane_state *state = ipu_plane->base.state;
62 struct drm_framebuffer *old_fb = state->fb;
63 unsigned long eba, ubo, vbo, old_eba, old_ubo, old_vbo;
64 int active, i;
65 int x = state->src_x >> 16;
66 int y = state->src_y >> 16;
67
68 for (i = 0; i < drm_format_num_planes(fb->pixel_format); i++) {
69 cma_obj[i] = drm_fb_cma_get_gem_obj(fb, i);
70 if (!cma_obj[i]) {
71 DRM_DEBUG_KMS("plane %d entry is null.\n", i);
72 return -EFAULT;
73 }
74 }
75
76 for (i = 0; i < drm_format_num_planes(old_fb->pixel_format); i++) {
77 old_cma_obj[i] = drm_fb_cma_get_gem_obj(old_fb, i);
78 if (!old_cma_obj[i]) {
79 DRM_DEBUG_KMS("plane %d entry is null.\n", i);
80 return -EFAULT;
81 }
82 }
83
84 eba = cma_obj[0]->paddr + fb->offsets[0] +
85 fb->pitches[0] * y + (fb->bits_per_pixel >> 3) * x;
86
87 if (eba & 0x7) {
88 DRM_DEBUG_KMS("base address must be a multiple of 8.\n");
89 return -EINVAL;
90 }
91
92 if (fb->pitches[0] < 1 || fb->pitches[0] > 16384) {
93 DRM_DEBUG_KMS("pitches out of range.\n");
94 return -EINVAL;
95 }
96
97 if (fb->pitches[0] != old_fb->pitches[0]) {
98 DRM_DEBUG_KMS("pitches must not change while plane is enabled.\n");
99 return -EINVAL;
100 }
101
102 switch (fb->pixel_format) {
103 case DRM_FORMAT_YUV420:
104 case DRM_FORMAT_YVU420:
105 /*
106 * Multiplanar formats have to meet the following restrictions:
107 * - The (up to) three plane addresses are EBA, EBA+UBO, EBA+VBO
108 * - EBA, UBO and VBO are a multiple of 8
109 * - UBO and VBO are unsigned and not larger than 0xfffff8
110 * - Only EBA may be changed while scanout is active
111 * - The strides of U and V planes must be identical.
112 */
113 ubo = cma_obj[1]->paddr + fb->offsets[1] +
114 fb->pitches[1] * y / 2 + x / 2 - eba;
115 vbo = cma_obj[2]->paddr + fb->offsets[2] +
116 fb->pitches[2] * y / 2 + x / 2 - eba;
117
118 old_eba = old_cma_obj[0]->paddr + old_fb->offsets[0] +
119 old_fb->pitches[0] * y +
120 (old_fb->bits_per_pixel >> 3) * x;
121 old_ubo = old_cma_obj[1]->paddr + old_fb->offsets[1] +
122 old_fb->pitches[1] * y / 2 + x / 2 - old_eba;
123 old_vbo = old_cma_obj[2]->paddr + old_fb->offsets[2] +
124 old_fb->pitches[2] * y / 2 + x / 2 - old_eba;
125
126 if ((ubo & 0x7) || (vbo & 0x7)) {
127 DRM_DEBUG_KMS("U/V buffer offsets must be a multiple of 8.\n");
128 return -EINVAL;
129 }
130
131 if ((ubo > 0xfffff8) || (vbo > 0xfffff8)) {
132 DRM_DEBUG_KMS("U/V buffer offsets must be positive and not larger than 0xfffff8.\n");
133 return -EINVAL;
134 }
135
136 if (old_ubo != ubo || old_vbo != vbo) {
137 DRM_DEBUG_KMS("U/V buffer offsets must not change while plane is enabled.\n");
138 return -EINVAL;
139 }
140
141 if (fb->pitches[1] != fb->pitches[2]) {
142 DRM_DEBUG_KMS("U/V pitches must be identical.\n");
143 return -EINVAL;
144 }
145
146 if (fb->pitches[1] < 1 || fb->pitches[1] > 16384) {
147 DRM_DEBUG_KMS("U/V pitches out of range.\n");
148 return -EINVAL;
149 }
150
151 if (old_fb->pitches[1] != fb->pitches[1]) {
152 DRM_DEBUG_KMS("U/V pitches must not change while plane is enabled.\n");
153 return -EINVAL;
154 }
155
156 dev_dbg(ipu_plane->base.dev->dev,
157 "phys = %pad %pad %pad, x = %d, y = %d",
158 &cma_obj[0]->paddr, &cma_obj[1]->paddr,
159 &cma_obj[2]->paddr, x, y);
160 break;
161 default:
162 dev_dbg(ipu_plane->base.dev->dev, "phys = %pad, x = %d, y = %d",
163 &cma_obj[0]->paddr, x, y);
164 break;
165 }
166
167 active = ipu_idmac_get_current_buffer(ipu_plane->ipu_ch);
168 ipu_cpmem_set_buffer(ipu_plane->ipu_ch, !active, eba);
169 ipu_idmac_select_buffer(ipu_plane->ipu_ch, !active);
170
171 return 0;
172}
173
174static inline unsigned long 59static inline unsigned long
175drm_plane_state_to_eba(struct drm_plane_state *state) 60drm_plane_state_to_eba(struct drm_plane_state *state)
176{ 61{
@@ -360,8 +245,8 @@ static void ipu_plane_destroy(struct drm_plane *plane)
360} 245}
361 246
362static const struct drm_plane_funcs ipu_plane_funcs = { 247static const struct drm_plane_funcs ipu_plane_funcs = {
363 .update_plane = drm_plane_helper_update, 248 .update_plane = drm_atomic_helper_update_plane,
364 .disable_plane = drm_plane_helper_disable, 249 .disable_plane = drm_atomic_helper_disable_plane,
365 .destroy = ipu_plane_destroy, 250 .destroy = ipu_plane_destroy,
366 .reset = drm_atomic_helper_plane_reset, 251 .reset = drm_atomic_helper_plane_reset,
367 .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state, 252 .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
@@ -380,10 +265,18 @@ static int ipu_plane_atomic_check(struct drm_plane *plane,
380 265
381 /* Ok to disable */ 266 /* Ok to disable */
382 if (!fb) 267 if (!fb)
383 return old_fb ? 0 : -EINVAL; 268 return 0;
269
270 if (!state->crtc)
271 return -EINVAL;
272
273 crtc_state =
274 drm_atomic_get_existing_crtc_state(state->state, state->crtc);
275 if (WARN_ON(!crtc_state))
276 return -EINVAL;
384 277
385 /* CRTC should be enabled */ 278 /* CRTC should be enabled */
386 if (!state->crtc->enabled) 279 if (!crtc_state->enable)
387 return -EINVAL; 280 return -EINVAL;
388 281
389 /* no scaling */ 282 /* no scaling */
@@ -391,8 +284,6 @@ static int ipu_plane_atomic_check(struct drm_plane *plane,
391 state->src_h >> 16 != state->crtc_h) 284 state->src_h >> 16 != state->crtc_h)
392 return -EINVAL; 285 return -EINVAL;
393 286
394 crtc_state = state->crtc->state;
395
396 switch (plane->type) { 287 switch (plane->type) {
397 case DRM_PLANE_TYPE_PRIMARY: 288 case DRM_PLANE_TYPE_PRIMARY:
398 /* full plane doesn't support partial off screen */ 289 /* full plane doesn't support partial off screen */
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.h b/drivers/gpu/drm/imx/ipuv3-plane.h
index c51a44ba3ddd..338b88a74eb6 100644
--- a/drivers/gpu/drm/imx/ipuv3-plane.h
+++ b/drivers/gpu/drm/imx/ipuv3-plane.h
@@ -37,8 +37,6 @@ int ipu_plane_mode_set(struct ipu_plane *plane, struct drm_crtc *crtc,
37 uint32_t src_x, uint32_t src_y, uint32_t src_w, 37 uint32_t src_x, uint32_t src_y, uint32_t src_w,
38 uint32_t src_h, bool interlaced); 38 uint32_t src_h, bool interlaced);
39 39
40int ipu_plane_set_base(struct ipu_plane *plane, struct drm_framebuffer *fb);
41
42int ipu_plane_get_resources(struct ipu_plane *plane); 40int ipu_plane_get_resources(struct ipu_plane *plane);
43void ipu_plane_put_resources(struct ipu_plane *plane); 41void ipu_plane_put_resources(struct ipu_plane *plane);
44 42