aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/msm
diff options
context:
space:
mode:
authorRob Clark <robdclark@gmail.com>2013-11-30 17:51:47 -0500
committerRob Clark <robdclark@gmail.com>2014-01-09 14:44:06 -0500
commit06c0dd96bfbba8a9368ffd7c4b12d3bfed37001d (patch)
tree806d07197b4fd3ab0d73885329f7de1d890f22d6 /drivers/gpu/drm/msm
parentdada25bd22a52a4351357209a8c227070cfd406d (diff)
drm/msm: add mdp5/apq8x74
Add support for the new MDP5 display controller block. The mapping between parts of the display controller and KMS is: plane -> PIPE{RGBn,VIGn} \ crtc -> LM (layer mixer) |-> MDP "device" encoder -> INTF / connector -> HDMI/DSI/eDP/etc --> other device(s) Unlike MDP4, it appears we can get by with a single encoder, rather than needing a different implementation for DTV, DSI, etc. (Ie. the register interface is same, just different bases.) Also unlike MDP4, all the IRQs for other blocks (HDMI, DSI, etc) are routed through MDP. And finally, MDP5 has this "Shared Memory Pool" (called "SMP"), from which blocks need to be allocated to the active pipes based on fetch stride. Signed-off-by: Rob Clark <robdclark@gmail.com>
Diffstat (limited to 'drivers/gpu/drm/msm')
-rw-r--r--drivers/gpu/drm/msm/Makefile6
-rw-r--r--drivers/gpu/drm/msm/NOTES20
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c569
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c258
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c111
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c350
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h213
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c389
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c173
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h41
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c38
-rw-r--r--drivers/gpu/drm/msm/msm_kms.h1
12 files changed, 2167 insertions, 2 deletions
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index 59b76edb5fdd..4f977a593bea 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -20,6 +20,12 @@ msm-y := \
20 mdp/mdp4/mdp4_irq.o \ 20 mdp/mdp4/mdp4_irq.o \
21 mdp/mdp4/mdp4_kms.o \ 21 mdp/mdp4/mdp4_kms.o \
22 mdp/mdp4/mdp4_plane.o \ 22 mdp/mdp4/mdp4_plane.o \
23 mdp/mdp5/mdp5_crtc.o \
24 mdp/mdp5/mdp5_encoder.o \
25 mdp/mdp5/mdp5_irq.o \
26 mdp/mdp5/mdp5_kms.o \
27 mdp/mdp5/mdp5_plane.o \
28 mdp/mdp5/mdp5_smp.o \
23 msm_drv.o \ 29 msm_drv.o \
24 msm_fb.o \ 30 msm_fb.o \
25 msm_gem.o \ 31 msm_gem.o \
diff --git a/drivers/gpu/drm/msm/NOTES b/drivers/gpu/drm/msm/NOTES
index e036f6c1db94..9c4255b98021 100644
--- a/drivers/gpu/drm/msm/NOTES
+++ b/drivers/gpu/drm/msm/NOTES
@@ -4,7 +4,7 @@ In the current snapdragon SoC's, we have (at least) 3 different
4display controller blocks at play: 4display controller blocks at play:
5 + MDP3 - ?? seems to be what is on geeksphone peak device 5 + MDP3 - ?? seems to be what is on geeksphone peak device
6 + MDP4 - S3 (APQ8060, touchpad), S4-pro (APQ8064, nexus4 & ifc6410) 6 + MDP4 - S3 (APQ8060, touchpad), S4-pro (APQ8064, nexus4 & ifc6410)
7 + MDSS - snapdragon 800 7 + MDP5 - snapdragon 800
8 8
9(I don't have a completely clear picture on which display controller 9(I don't have a completely clear picture on which display controller
10maps to which part #) 10maps to which part #)
@@ -46,6 +46,24 @@ and treat the MDP4 block's irq as "the" irq. Even though the connectors
46may have their own irqs which they install themselves. For this reason 46may have their own irqs which they install themselves. For this reason
47the display controller is the "master" device. 47the display controller is the "master" device.
48 48
49For MDP5, the mapping is:
50
51 plane -> PIPE{RGBn,VIGn} \
52 crtc -> LM (layer mixer) |-> MDP "device"
53 encoder -> INTF /
54 connector -> HDMI/DSI/eDP/etc --> other device(s)
55
56Unlike MDP4, it appears we can get by with a single encoder, rather
57than needing a different implementation for DTV, DSI, etc. (Ie. the
58register interface is same, just different bases.)
59
60Also unlike MDP4, with MDP5 all the IRQs for other blocks (HDMI, DSI,
61etc) are routed through MDP.
62
63And finally, MDP5 has this "Shared Memory Pool" (called "SMP"), from
64which blocks need to be allocated to the active pipes based on fetch
65stride.
66
49Each connector probably ends up being a separate device, just for the 67Each connector probably ends up being a separate device, just for the
50logistics of finding/mapping io region, irq, etc. Idealy we would 68logistics of finding/mapping io region, irq, etc. Idealy we would
51have a better way than just stashing the platform device in a global 69have a better way than just stashing the platform device in a global
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
new file mode 100644
index 000000000000..71a3b2345eb3
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
@@ -0,0 +1,569 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include "mdp5_kms.h"
19
20#include <drm/drm_mode.h>
21#include "drm_crtc.h"
22#include "drm_crtc_helper.h"
23#include "drm_flip_work.h"
24
25struct mdp5_crtc {
26 struct drm_crtc base;
27 char name[8];
28 struct drm_plane *plane;
29 struct drm_plane *planes[8];
30 int id;
31 bool enabled;
32
33 /* which mixer/encoder we route output to: */
34 int mixer;
35
36 /* if there is a pending flip, these will be non-null: */
37 struct drm_pending_vblank_event *event;
38 struct msm_fence_cb pageflip_cb;
39
40#define PENDING_CURSOR 0x1
41#define PENDING_FLIP 0x2
42 atomic_t pending;
43
44 /* the fb that we logically (from PoV of KMS API) hold a ref
45 * to. Which we may not yet be scanning out (we may still
46 * be scanning out previous in case of page_flip while waiting
47 * for gpu rendering to complete:
48 */
49 struct drm_framebuffer *fb;
50
51 /* the fb that we currently hold a scanout ref to: */
52 struct drm_framebuffer *scanout_fb;
53
54 /* for unref'ing framebuffers after scanout completes: */
55 struct drm_flip_work unref_fb_work;
56
57 struct mdp_irq vblank;
58 struct mdp_irq err;
59};
60#define to_mdp5_crtc(x) container_of(x, struct mdp5_crtc, base)
61
62static struct mdp5_kms *get_kms(struct drm_crtc *crtc)
63{
64 struct msm_drm_private *priv = crtc->dev->dev_private;
65 return to_mdp5_kms(to_mdp_kms(priv->kms));
66}
67
68static void request_pending(struct drm_crtc *crtc, uint32_t pending)
69{
70 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
71
72 atomic_or(pending, &mdp5_crtc->pending);
73 mdp_irq_register(&get_kms(crtc)->base, &mdp5_crtc->vblank);
74}
75
76static void crtc_flush(struct drm_crtc *crtc)
77{
78 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
79 struct mdp5_kms *mdp5_kms = get_kms(crtc);
80 int id = mdp5_crtc->id;
81 uint32_t i, flush = 0;
82
83 for (i = 0; i < ARRAY_SIZE(mdp5_crtc->planes); i++) {
84 struct drm_plane *plane = mdp5_crtc->planes[i];
85 if (plane) {
86 enum mdp5_pipe pipe = mdp5_plane_pipe(plane);
87 flush |= pipe2flush(pipe);
88 }
89 }
90 flush |= mixer2flush(mdp5_crtc->id);
91 flush |= MDP5_CTL_FLUSH_CTL;
92
93 DBG("%s: flush=%08x", mdp5_crtc->name, flush);
94
95 mdp5_write(mdp5_kms, REG_MDP5_CTL_FLUSH(id), flush);
96}
97
98static void update_fb(struct drm_crtc *crtc, struct drm_framebuffer *new_fb)
99{
100 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
101 struct drm_framebuffer *old_fb = mdp5_crtc->fb;
102
103 /* grab reference to incoming scanout fb: */
104 drm_framebuffer_reference(new_fb);
105 mdp5_crtc->base.fb = new_fb;
106 mdp5_crtc->fb = new_fb;
107
108 if (old_fb)
109 drm_flip_work_queue(&mdp5_crtc->unref_fb_work, old_fb);
110}
111
112/* unlike update_fb(), take a ref to the new scanout fb *before* updating
113 * plane, then call this. Needed to ensure we don't unref the buffer that
114 * is actually still being scanned out.
115 *
116 * Note that this whole thing goes away with atomic.. since we can defer
117 * calling into driver until rendering is done.
118 */
119static void update_scanout(struct drm_crtc *crtc, struct drm_framebuffer *fb)
120{
121 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
122
123 /* flush updates, to make sure hw is updated to new scanout fb,
124 * so that we can safely queue unref to current fb (ie. next
125 * vblank we know hw is done w/ previous scanout_fb).
126 */
127 crtc_flush(crtc);
128
129 if (mdp5_crtc->scanout_fb)
130 drm_flip_work_queue(&mdp5_crtc->unref_fb_work,
131 mdp5_crtc->scanout_fb);
132
133 mdp5_crtc->scanout_fb = fb;
134
135 /* enable vblank to complete flip: */
136 request_pending(crtc, PENDING_FLIP);
137}
138
139/* if file!=NULL, this is preclose potential cancel-flip path */
140static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
141{
142 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
143 struct drm_device *dev = crtc->dev;
144 struct drm_pending_vblank_event *event;
145 unsigned long flags, i;
146
147 spin_lock_irqsave(&dev->event_lock, flags);
148 event = mdp5_crtc->event;
149 if (event) {
150 /* if regular vblank case (!file) or if cancel-flip from
151 * preclose on file that requested flip, then send the
152 * event:
153 */
154 if (!file || (event->base.file_priv == file)) {
155 mdp5_crtc->event = NULL;
156 drm_send_vblank_event(dev, mdp5_crtc->id, event);
157 }
158 }
159 spin_unlock_irqrestore(&dev->event_lock, flags);
160
161 for (i = 0; i < ARRAY_SIZE(mdp5_crtc->planes); i++) {
162 struct drm_plane *plane = mdp5_crtc->planes[i];
163 if (plane)
164 mdp5_plane_complete_flip(plane);
165 }
166}
167
168static void pageflip_cb(struct msm_fence_cb *cb)
169{
170 struct mdp5_crtc *mdp5_crtc =
171 container_of(cb, struct mdp5_crtc, pageflip_cb);
172 struct drm_crtc *crtc = &mdp5_crtc->base;
173 struct drm_framebuffer *fb = mdp5_crtc->fb;
174
175 if (!fb)
176 return;
177
178 drm_framebuffer_reference(fb);
179 mdp5_plane_set_scanout(mdp5_crtc->plane, fb);
180 update_scanout(crtc, fb);
181}
182
183static void unref_fb_worker(struct drm_flip_work *work, void *val)
184{
185 struct mdp5_crtc *mdp5_crtc =
186 container_of(work, struct mdp5_crtc, unref_fb_work);
187 struct drm_device *dev = mdp5_crtc->base.dev;
188
189 mutex_lock(&dev->mode_config.mutex);
190 drm_framebuffer_unreference(val);
191 mutex_unlock(&dev->mode_config.mutex);
192}
193
194static void mdp5_crtc_destroy(struct drm_crtc *crtc)
195{
196 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
197
198 mdp5_crtc->plane->funcs->destroy(mdp5_crtc->plane);
199
200 drm_crtc_cleanup(crtc);
201 drm_flip_work_cleanup(&mdp5_crtc->unref_fb_work);
202
203 kfree(mdp5_crtc);
204}
205
206static void mdp5_crtc_dpms(struct drm_crtc *crtc, int mode)
207{
208 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
209 struct mdp5_kms *mdp5_kms = get_kms(crtc);
210 bool enabled = (mode == DRM_MODE_DPMS_ON);
211
212 DBG("%s: mode=%d", mdp5_crtc->name, mode);
213
214 if (enabled != mdp5_crtc->enabled) {
215 if (enabled) {
216 mdp5_enable(mdp5_kms);
217 mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->err);
218 } else {
219 mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->err);
220 mdp5_disable(mdp5_kms);
221 }
222 mdp5_crtc->enabled = enabled;
223 }
224}
225
226static bool mdp5_crtc_mode_fixup(struct drm_crtc *crtc,
227 const struct drm_display_mode *mode,
228 struct drm_display_mode *adjusted_mode)
229{
230 return true;
231}
232
233static void blend_setup(struct drm_crtc *crtc)
234{
235 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
236 struct mdp5_kms *mdp5_kms = get_kms(crtc);
237 int id = mdp5_crtc->id;
238
239 /*
240 * Hard-coded setup for now until I figure out how the
241 * layer-mixer works
242 */
243
244 /* LM[id]: */
245 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(id),
246 MDP5_LM_BLEND_COLOR_OUT_STAGE0_FG_ALPHA);
247 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_OP_MODE(id, 0),
248 MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST) |
249 MDP5_LM_BLEND_OP_MODE_BG_ALPHA(FG_PIXEL) |
250 MDP5_LM_BLEND_OP_MODE_BG_INV_ALPHA);
251 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_FG_ALPHA(id, 0), 0xff);
252 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_BG_ALPHA(id, 0), 0x00);
253
254 /* NOTE: seems that LM[n] and CTL[m], we do not need n==m.. but
255 * we want to be setting CTL[m].LAYER[n]. Not sure what the
256 * point of having CTL[m].LAYER[o] (for o!=n).. maybe that is
257 * used when chaining up mixers for high resolution displays?
258 */
259
260 /* CTL[id]: */
261 mdp5_write(mdp5_kms, REG_MDP5_CTL_LAYER_REG(id, 0),
262 MDP5_CTL_LAYER_REG_RGB0(STAGE0) |
263 MDP5_CTL_LAYER_REG_BORDER_COLOR);
264 mdp5_write(mdp5_kms, REG_MDP5_CTL_LAYER_REG(id, 1), 0);
265 mdp5_write(mdp5_kms, REG_MDP5_CTL_LAYER_REG(id, 2), 0);
266 mdp5_write(mdp5_kms, REG_MDP5_CTL_LAYER_REG(id, 3), 0);
267 mdp5_write(mdp5_kms, REG_MDP5_CTL_LAYER_REG(id, 4), 0);
268}
269
270static int mdp5_crtc_mode_set(struct drm_crtc *crtc,
271 struct drm_display_mode *mode,
272 struct drm_display_mode *adjusted_mode,
273 int x, int y,
274 struct drm_framebuffer *old_fb)
275{
276 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
277 struct mdp5_kms *mdp5_kms = get_kms(crtc);
278 int ret;
279
280 mode = adjusted_mode;
281
282 DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
283 mdp5_crtc->name, mode->base.id, mode->name,
284 mode->vrefresh, mode->clock,
285 mode->hdisplay, mode->hsync_start,
286 mode->hsync_end, mode->htotal,
287 mode->vdisplay, mode->vsync_start,
288 mode->vsync_end, mode->vtotal,
289 mode->type, mode->flags);
290
291 /* grab extra ref for update_scanout() */
292 drm_framebuffer_reference(crtc->fb);
293
294 ret = mdp5_plane_mode_set(mdp5_crtc->plane, crtc, crtc->fb,
295 0, 0, mode->hdisplay, mode->vdisplay,
296 x << 16, y << 16,
297 mode->hdisplay << 16, mode->vdisplay << 16);
298 if (ret) {
299 dev_err(crtc->dev->dev, "%s: failed to set mode on plane: %d\n",
300 mdp5_crtc->name, ret);
301 return ret;
302 }
303
304 mdp5_write(mdp5_kms, REG_MDP5_LM_OUT_SIZE(mdp5_crtc->id),
305 MDP5_LM_OUT_SIZE_WIDTH(mode->hdisplay) |
306 MDP5_LM_OUT_SIZE_HEIGHT(mode->vdisplay));
307
308 update_fb(crtc, crtc->fb);
309 update_scanout(crtc, crtc->fb);
310
311 return 0;
312}
313
314static void mdp5_crtc_prepare(struct drm_crtc *crtc)
315{
316 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
317 DBG("%s", mdp5_crtc->name);
318 /* make sure we hold a ref to mdp clks while setting up mode: */
319 mdp5_enable(get_kms(crtc));
320 mdp5_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
321}
322
323static void mdp5_crtc_commit(struct drm_crtc *crtc)
324{
325 mdp5_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
326 crtc_flush(crtc);
327 /* drop the ref to mdp clk's that we got in prepare: */
328 mdp5_disable(get_kms(crtc));
329}
330
331static int mdp5_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
332 struct drm_framebuffer *old_fb)
333{
334 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
335 struct drm_plane *plane = mdp5_crtc->plane;
336 struct drm_display_mode *mode = &crtc->mode;
337 int ret;
338
339 /* grab extra ref for update_scanout() */
340 drm_framebuffer_reference(crtc->fb);
341
342 ret = mdp5_plane_mode_set(plane, crtc, crtc->fb,
343 0, 0, mode->hdisplay, mode->vdisplay,
344 x << 16, y << 16,
345 mode->hdisplay << 16, mode->vdisplay << 16);
346
347 update_fb(crtc, crtc->fb);
348 update_scanout(crtc, crtc->fb);
349
350 return ret;
351}
352
353static void mdp5_crtc_load_lut(struct drm_crtc *crtc)
354{
355}
356
357static int mdp5_crtc_page_flip(struct drm_crtc *crtc,
358 struct drm_framebuffer *new_fb,
359 struct drm_pending_vblank_event *event,
360 uint32_t page_flip_flags)
361{
362 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
363 struct drm_device *dev = crtc->dev;
364 struct drm_gem_object *obj;
365 unsigned long flags;
366
367 if (mdp5_crtc->event) {
368 dev_err(dev->dev, "already pending flip!\n");
369 return -EBUSY;
370 }
371
372 obj = msm_framebuffer_bo(new_fb, 0);
373
374 spin_lock_irqsave(&dev->event_lock, flags);
375 mdp5_crtc->event = event;
376 spin_unlock_irqrestore(&dev->event_lock, flags);
377
378 update_fb(crtc, new_fb);
379
380 return msm_gem_queue_inactive_cb(obj, &mdp5_crtc->pageflip_cb);
381}
382
383static int mdp5_crtc_set_property(struct drm_crtc *crtc,
384 struct drm_property *property, uint64_t val)
385{
386 // XXX
387 return -EINVAL;
388}
389
390static const struct drm_crtc_funcs mdp5_crtc_funcs = {
391 .set_config = drm_crtc_helper_set_config,
392 .destroy = mdp5_crtc_destroy,
393 .page_flip = mdp5_crtc_page_flip,
394 .set_property = mdp5_crtc_set_property,
395};
396
397static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs = {
398 .dpms = mdp5_crtc_dpms,
399 .mode_fixup = mdp5_crtc_mode_fixup,
400 .mode_set = mdp5_crtc_mode_set,
401 .prepare = mdp5_crtc_prepare,
402 .commit = mdp5_crtc_commit,
403 .mode_set_base = mdp5_crtc_mode_set_base,
404 .load_lut = mdp5_crtc_load_lut,
405};
406
407static void mdp5_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus)
408{
409 struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, vblank);
410 struct drm_crtc *crtc = &mdp5_crtc->base;
411 struct msm_drm_private *priv = crtc->dev->dev_private;
412 unsigned pending;
413
414 mdp_irq_unregister(&get_kms(crtc)->base, &mdp5_crtc->vblank);
415
416 pending = atomic_xchg(&mdp5_crtc->pending, 0);
417
418 if (pending & PENDING_FLIP) {
419 complete_flip(crtc, NULL);
420 drm_flip_work_commit(&mdp5_crtc->unref_fb_work, priv->wq);
421 }
422}
423
424static void mdp5_crtc_err_irq(struct mdp_irq *irq, uint32_t irqstatus)
425{
426 struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, err);
427 struct drm_crtc *crtc = &mdp5_crtc->base;
428 DBG("%s: error: %08x", mdp5_crtc->name, irqstatus);
429 crtc_flush(crtc);
430}
431
432uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc)
433{
434 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
435 return mdp5_crtc->vblank.irqmask;
436}
437
438void mdp5_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file)
439{
440 DBG("cancel: %p", file);
441 complete_flip(crtc, file);
442}
443
444/* set interface for routing crtc->encoder: */
445void mdp5_crtc_set_intf(struct drm_crtc *crtc, int intf,
446 enum mdp5_intf intf_id)
447{
448 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
449 struct mdp5_kms *mdp5_kms = get_kms(crtc);
450 static const enum mdp5_intfnum intfnum[] = {
451 INTF0, INTF1, INTF2, INTF3,
452 };
453 uint32_t intf_sel;
454
455 /* now that we know what irq's we want: */
456 mdp5_crtc->err.irqmask = intf2err(intf);
457 mdp5_crtc->vblank.irqmask = intf2vblank(intf);
458
459 /* when called from modeset_init(), skip the rest until later: */
460 if (!mdp5_kms)
461 return;
462
463 intf_sel = mdp5_read(mdp5_kms, REG_MDP5_DISP_INTF_SEL);
464
465 switch (intf) {
466 case 0:
467 intf_sel &= ~MDP5_DISP_INTF_SEL_INTF0__MASK;
468 intf_sel |= MDP5_DISP_INTF_SEL_INTF0(intf_id);
469 break;
470 case 1:
471 intf_sel &= ~MDP5_DISP_INTF_SEL_INTF1__MASK;
472 intf_sel |= MDP5_DISP_INTF_SEL_INTF1(intf_id);
473 break;
474 case 2:
475 intf_sel &= ~MDP5_DISP_INTF_SEL_INTF2__MASK;
476 intf_sel |= MDP5_DISP_INTF_SEL_INTF2(intf_id);
477 break;
478 case 3:
479 intf_sel &= ~MDP5_DISP_INTF_SEL_INTF3__MASK;
480 intf_sel |= MDP5_DISP_INTF_SEL_INTF3(intf_id);
481 break;
482 default:
483 BUG();
484 break;
485 }
486
487 blend_setup(crtc);
488
489 DBG("%s: intf_sel=%08x", mdp5_crtc->name, intf_sel);
490
491 mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, intf_sel);
492 mdp5_write(mdp5_kms, REG_MDP5_CTL_OP(mdp5_crtc->id),
493 MDP5_CTL_OP_MODE(MODE_NONE) |
494 MDP5_CTL_OP_INTF_NUM(intfnum[intf]));
495
496 crtc_flush(crtc);
497}
498
499static void set_attach(struct drm_crtc *crtc, enum mdp5_pipe pipe_id,
500 struct drm_plane *plane)
501{
502 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
503
504 BUG_ON(pipe_id >= ARRAY_SIZE(mdp5_crtc->planes));
505
506 if (mdp5_crtc->planes[pipe_id] == plane)
507 return;
508
509 mdp5_crtc->planes[pipe_id] = plane;
510 blend_setup(crtc);
511 if (mdp5_crtc->enabled && (plane != mdp5_crtc->plane))
512 crtc_flush(crtc);
513}
514
515void mdp5_crtc_attach(struct drm_crtc *crtc, struct drm_plane *plane)
516{
517 set_attach(crtc, mdp5_plane_pipe(plane), plane);
518}
519
520void mdp5_crtc_detach(struct drm_crtc *crtc, struct drm_plane *plane)
521{
522 set_attach(crtc, mdp5_plane_pipe(plane), NULL);
523}
524
525/* initialize crtc */
526struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
527 struct drm_plane *plane, int id)
528{
529 struct drm_crtc *crtc = NULL;
530 struct mdp5_crtc *mdp5_crtc;
531 int ret;
532
533 mdp5_crtc = kzalloc(sizeof(*mdp5_crtc), GFP_KERNEL);
534 if (!mdp5_crtc) {
535 ret = -ENOMEM;
536 goto fail;
537 }
538
539 crtc = &mdp5_crtc->base;
540
541 mdp5_crtc->plane = plane;
542 mdp5_crtc->id = id;
543
544 mdp5_crtc->vblank.irq = mdp5_crtc_vblank_irq;
545 mdp5_crtc->err.irq = mdp5_crtc_err_irq;
546
547 snprintf(mdp5_crtc->name, sizeof(mdp5_crtc->name), "%s:%d",
548 pipe2name(mdp5_plane_pipe(plane)), id);
549
550 ret = drm_flip_work_init(&mdp5_crtc->unref_fb_work, 16,
551 "unref fb", unref_fb_worker);
552 if (ret)
553 goto fail;
554
555 INIT_FENCE_CB(&mdp5_crtc->pageflip_cb, pageflip_cb);
556
557 drm_crtc_init(dev, crtc, &mdp5_crtc_funcs);
558 drm_crtc_helper_add(crtc, &mdp5_crtc_helper_funcs);
559
560 mdp5_plane_install_properties(mdp5_crtc->plane, &crtc->base);
561
562 return crtc;
563
564fail:
565 if (crtc)
566 mdp5_crtc_destroy(crtc);
567
568 return ERR_PTR(ret);
569}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
new file mode 100644
index 000000000000..edec7bfaa952
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
@@ -0,0 +1,258 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include "mdp5_kms.h"
19
20#include "drm_crtc.h"
21#include "drm_crtc_helper.h"
22
23struct mdp5_encoder {
24 struct drm_encoder base;
25 int intf;
26 enum mdp5_intf intf_id;
27 bool enabled;
28 uint32_t bsc;
29};
30#define to_mdp5_encoder(x) container_of(x, struct mdp5_encoder, base)
31
32static struct mdp5_kms *get_kms(struct drm_encoder *encoder)
33{
34 struct msm_drm_private *priv = encoder->dev->dev_private;
35 return to_mdp5_kms(to_mdp_kms(priv->kms));
36}
37
38#ifdef CONFIG_MSM_BUS_SCALING
39#include <mach/board.h>
40#include <mach/msm_bus.h>
41#include <mach/msm_bus_board.h>
42#define MDP_BUS_VECTOR_ENTRY(ab_val, ib_val) \
43 { \
44 .src = MSM_BUS_MASTER_MDP_PORT0, \
45 .dst = MSM_BUS_SLAVE_EBI_CH0, \
46 .ab = (ab_val), \
47 .ib = (ib_val), \
48 }
49
50static struct msm_bus_vectors mdp_bus_vectors[] = {
51 MDP_BUS_VECTOR_ENTRY(0, 0),
52 MDP_BUS_VECTOR_ENTRY(2000000000, 2000000000),
53};
54static struct msm_bus_paths mdp_bus_usecases[] = { {
55 .num_paths = 1,
56 .vectors = &mdp_bus_vectors[0],
57}, {
58 .num_paths = 1,
59 .vectors = &mdp_bus_vectors[1],
60} };
61static struct msm_bus_scale_pdata mdp_bus_scale_table = {
62 .usecase = mdp_bus_usecases,
63 .num_usecases = ARRAY_SIZE(mdp_bus_usecases),
64 .name = "mdss_mdp",
65};
66
67static void bs_init(struct mdp5_encoder *mdp5_encoder)
68{
69 mdp5_encoder->bsc = msm_bus_scale_register_client(
70 &mdp_bus_scale_table);
71 DBG("bus scale client: %08x", mdp5_encoder->bsc);
72}
73
74static void bs_fini(struct mdp5_encoder *mdp5_encoder)
75{
76 if (mdp5_encoder->bsc) {
77 msm_bus_scale_unregister_client(mdp5_encoder->bsc);
78 mdp5_encoder->bsc = 0;
79 }
80}
81
82static void bs_set(struct mdp5_encoder *mdp5_encoder, int idx)
83{
84 if (mdp5_encoder->bsc) {
85 DBG("set bus scaling: %d", idx);
86 /* HACK: scaling down, and then immediately back up
87 * seems to leave things broken (underflow).. so
88 * never disable:
89 */
90 idx = 1;
91 msm_bus_scale_client_update_request(mdp5_encoder->bsc, idx);
92 }
93}
94#else
95static void bs_init(struct mdp5_encoder *mdp5_encoder) {}
96static void bs_fini(struct mdp5_encoder *mdp5_encoder) {}
97static void bs_set(struct mdp5_encoder *mdp5_encoder, int idx) {}
98#endif
99
100static void mdp5_encoder_destroy(struct drm_encoder *encoder)
101{
102 struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
103 bs_fini(mdp5_encoder);
104 drm_encoder_cleanup(encoder);
105 kfree(mdp5_encoder);
106}
107
108static const struct drm_encoder_funcs mdp5_encoder_funcs = {
109 .destroy = mdp5_encoder_destroy,
110};
111
112static void mdp5_encoder_dpms(struct drm_encoder *encoder, int mode)
113{
114 struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
115 struct mdp5_kms *mdp5_kms = get_kms(encoder);
116 int intf = mdp5_encoder->intf;
117 bool enabled = (mode == DRM_MODE_DPMS_ON);
118
119 DBG("mode=%d", mode);
120
121 if (enabled == mdp5_encoder->enabled)
122 return;
123
124 if (enabled) {
125 bs_set(mdp5_encoder, 1);
126 mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intf), 1);
127 } else {
128 mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intf), 0);
129 bs_set(mdp5_encoder, 0);
130 }
131
132 mdp5_encoder->enabled = enabled;
133}
134
135static bool mdp5_encoder_mode_fixup(struct drm_encoder *encoder,
136 const struct drm_display_mode *mode,
137 struct drm_display_mode *adjusted_mode)
138{
139 return true;
140}
141
142static void mdp5_encoder_mode_set(struct drm_encoder *encoder,
143 struct drm_display_mode *mode,
144 struct drm_display_mode *adjusted_mode)
145{
146 struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
147 struct mdp5_kms *mdp5_kms = get_kms(encoder);
148 int intf = mdp5_encoder->intf;
149 uint32_t dtv_hsync_skew, vsync_period, vsync_len, ctrl_pol;
150 uint32_t display_v_start, display_v_end;
151 uint32_t hsync_start_x, hsync_end_x;
152 uint32_t format;
153
154 mode = adjusted_mode;
155
156 DBG("set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
157 mode->base.id, mode->name,
158 mode->vrefresh, mode->clock,
159 mode->hdisplay, mode->hsync_start,
160 mode->hsync_end, mode->htotal,
161 mode->vdisplay, mode->vsync_start,
162 mode->vsync_end, mode->vtotal,
163 mode->type, mode->flags);
164
165 ctrl_pol = 0;
166 if (mode->flags & DRM_MODE_FLAG_NHSYNC)
167 ctrl_pol |= MDP5_INTF_POLARITY_CTL_HSYNC_LOW;
168 if (mode->flags & DRM_MODE_FLAG_NVSYNC)
169 ctrl_pol |= MDP5_INTF_POLARITY_CTL_VSYNC_LOW;
170 /* probably need to get DATA_EN polarity from panel.. */
171
172 dtv_hsync_skew = 0; /* get this from panel? */
173 format = 0x213f; /* get this from panel? */
174
175 hsync_start_x = (mode->htotal - mode->hsync_start);
176 hsync_end_x = mode->htotal - (mode->hsync_start - mode->hdisplay) - 1;
177
178 vsync_period = mode->vtotal * mode->htotal;
179 vsync_len = (mode->vsync_end - mode->vsync_start) * mode->htotal;
180 display_v_start = (mode->vtotal - mode->vsync_start) * mode->htotal + dtv_hsync_skew;
181 display_v_end = vsync_period - ((mode->vsync_start - mode->vdisplay) * mode->htotal) + dtv_hsync_skew - 1;
182
183 mdp5_write(mdp5_kms, REG_MDP5_INTF_HSYNC_CTL(intf),
184 MDP5_INTF_HSYNC_CTL_PULSEW(mode->hsync_end - mode->hsync_start) |
185 MDP5_INTF_HSYNC_CTL_PERIOD(mode->htotal));
186 mdp5_write(mdp5_kms, REG_MDP5_INTF_VSYNC_PERIOD_F0(intf), vsync_period);
187 mdp5_write(mdp5_kms, REG_MDP5_INTF_VSYNC_LEN_F0(intf), vsync_len);
188 mdp5_write(mdp5_kms, REG_MDP5_INTF_DISPLAY_HCTL(intf),
189 MDP5_INTF_DISPLAY_HCTL_START(hsync_start_x) |
190 MDP5_INTF_DISPLAY_HCTL_END(hsync_end_x));
191 mdp5_write(mdp5_kms, REG_MDP5_INTF_DISPLAY_VSTART_F0(intf), display_v_start);
192 mdp5_write(mdp5_kms, REG_MDP5_INTF_DISPLAY_VEND_F0(intf), display_v_end);
193 mdp5_write(mdp5_kms, REG_MDP5_INTF_BORDER_COLOR(intf), 0);
194 mdp5_write(mdp5_kms, REG_MDP5_INTF_UNDERFLOW_COLOR(intf), 0xff);
195 mdp5_write(mdp5_kms, REG_MDP5_INTF_HSYNC_SKEW(intf), dtv_hsync_skew);
196 mdp5_write(mdp5_kms, REG_MDP5_INTF_POLARITY_CTL(intf), ctrl_pol);
197 mdp5_write(mdp5_kms, REG_MDP5_INTF_ACTIVE_HCTL(intf),
198 MDP5_INTF_ACTIVE_HCTL_START(0) |
199 MDP5_INTF_ACTIVE_HCTL_END(0));
200 mdp5_write(mdp5_kms, REG_MDP5_INTF_ACTIVE_VSTART_F0(intf), 0);
201 mdp5_write(mdp5_kms, REG_MDP5_INTF_ACTIVE_VEND_F0(intf), 0);
202 mdp5_write(mdp5_kms, REG_MDP5_INTF_PANEL_FORMAT(intf), format);
203 mdp5_write(mdp5_kms, REG_MDP5_INTF_FRAME_LINE_COUNT_EN(intf), 0x3); /* frame+line? */
204}
205
206static void mdp5_encoder_prepare(struct drm_encoder *encoder)
207{
208 mdp5_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
209}
210
211static void mdp5_encoder_commit(struct drm_encoder *encoder)
212{
213 struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
214 mdp5_crtc_set_intf(encoder->crtc, mdp5_encoder->intf,
215 mdp5_encoder->intf_id);
216 mdp5_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
217}
218
219static const struct drm_encoder_helper_funcs mdp5_encoder_helper_funcs = {
220 .dpms = mdp5_encoder_dpms,
221 .mode_fixup = mdp5_encoder_mode_fixup,
222 .mode_set = mdp5_encoder_mode_set,
223 .prepare = mdp5_encoder_prepare,
224 .commit = mdp5_encoder_commit,
225};
226
227/* initialize encoder */
228struct drm_encoder *mdp5_encoder_init(struct drm_device *dev, int intf,
229 enum mdp5_intf intf_id)
230{
231 struct drm_encoder *encoder = NULL;
232 struct mdp5_encoder *mdp5_encoder;
233 int ret;
234
235 mdp5_encoder = kzalloc(sizeof(*mdp5_encoder), GFP_KERNEL);
236 if (!mdp5_encoder) {
237 ret = -ENOMEM;
238 goto fail;
239 }
240
241 mdp5_encoder->intf = intf;
242 mdp5_encoder->intf_id = intf_id;
243 encoder = &mdp5_encoder->base;
244
245 drm_encoder_init(dev, encoder, &mdp5_encoder_funcs,
246 DRM_MODE_ENCODER_TMDS);
247 drm_encoder_helper_add(encoder, &mdp5_encoder_helper_funcs);
248
249 bs_init(mdp5_encoder);
250
251 return encoder;
252
253fail:
254 if (encoder)
255 mdp5_encoder_destroy(encoder);
256
257 return ERR_PTR(ret);
258}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
new file mode 100644
index 000000000000..353d494a497f
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
@@ -0,0 +1,111 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18
19#include "msm_drv.h"
20#include "mdp5_kms.h"
21
22void mdp5_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask)
23{
24 mdp5_write(to_mdp5_kms(mdp_kms), REG_MDP5_INTR_EN, irqmask);
25}
26
27static void mdp5_irq_error_handler(struct mdp_irq *irq, uint32_t irqstatus)
28{
29 DRM_ERROR("errors: %08x\n", irqstatus);
30}
31
32void mdp5_irq_preinstall(struct msm_kms *kms)
33{
34 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
35 mdp5_write(mdp5_kms, REG_MDP5_INTR_CLEAR, 0xffffffff);
36}
37
38int mdp5_irq_postinstall(struct msm_kms *kms)
39{
40 struct mdp_kms *mdp_kms = to_mdp_kms(kms);
41 struct mdp5_kms *mdp5_kms = to_mdp5_kms(mdp_kms);
42 struct mdp_irq *error_handler = &mdp5_kms->error_handler;
43
44 error_handler->irq = mdp5_irq_error_handler;
45 error_handler->irqmask = MDP5_IRQ_INTF0_UNDER_RUN |
46 MDP5_IRQ_INTF1_UNDER_RUN |
47 MDP5_IRQ_INTF2_UNDER_RUN |
48 MDP5_IRQ_INTF3_UNDER_RUN;
49
50 mdp_irq_register(mdp_kms, error_handler);
51
52 return 0;
53}
54
55void mdp5_irq_uninstall(struct msm_kms *kms)
56{
57 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
58 mdp5_write(mdp5_kms, REG_MDP5_INTR_EN, 0x00000000);
59}
60
61static void mdp5_irq_mdp(struct mdp_kms *mdp_kms)
62{
63 struct mdp5_kms *mdp5_kms = to_mdp5_kms(mdp_kms);
64 struct drm_device *dev = mdp5_kms->dev;
65 struct msm_drm_private *priv = dev->dev_private;
66 unsigned int id;
67 uint32_t status;
68
69 status = mdp5_read(mdp5_kms, REG_MDP5_INTR_STATUS);
70 mdp5_write(mdp5_kms, REG_MDP5_INTR_CLEAR, status);
71
72 VERB("status=%08x", status);
73
74 for (id = 0; id < priv->num_crtcs; id++)
75 if (status & mdp5_crtc_vblank(priv->crtcs[id]))
76 drm_handle_vblank(dev, id);
77
78 mdp_dispatch_irqs(mdp_kms, status);
79}
80
81irqreturn_t mdp5_irq(struct msm_kms *kms)
82{
83 struct mdp_kms *mdp_kms = to_mdp_kms(kms);
84 struct mdp5_kms *mdp5_kms = to_mdp5_kms(mdp_kms);
85 uint32_t intr;
86
87 intr = mdp5_read(mdp5_kms, REG_MDP5_HW_INTR_STATUS);
88
89 VERB("intr=%08x", intr);
90
91 if (intr & MDP5_HW_INTR_STATUS_INTR_MDP)
92 mdp5_irq_mdp(mdp_kms);
93
94 if (intr & MDP5_HW_INTR_STATUS_INTR_HDMI)
95 hdmi_irq(0, mdp5_kms->hdmi);
96
97 return IRQ_HANDLED;
98}
99
100int mdp5_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
101{
102 mdp_update_vblank_mask(to_mdp_kms(kms),
103 mdp5_crtc_vblank(crtc), true);
104 return 0;
105}
106
107void mdp5_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
108{
109 mdp_update_vblank_mask(to_mdp_kms(kms),
110 mdp5_crtc_vblank(crtc), false);
111}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
new file mode 100644
index 000000000000..ee8446c1b5f6
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
@@ -0,0 +1,350 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18
19#include "msm_drv.h"
20#include "msm_mmu.h"
21#include "mdp5_kms.h"
22
23static struct mdp5_platform_config *mdp5_get_config(struct platform_device *dev);
24
25static int mdp5_hw_init(struct msm_kms *kms)
26{
27 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
28 struct drm_device *dev = mdp5_kms->dev;
29 uint32_t version, major, minor;
30 int ret = 0;
31
32 pm_runtime_get_sync(dev->dev);
33
34 mdp5_enable(mdp5_kms);
35 version = mdp5_read(mdp5_kms, REG_MDP5_MDP_VERSION);
36 mdp5_disable(mdp5_kms);
37
38 major = FIELD(version, MDP5_MDP_VERSION_MAJOR);
39 minor = FIELD(version, MDP5_MDP_VERSION_MINOR);
40
41 DBG("found MDP5 version v%d.%d", major, minor);
42
43 if ((major != 1) || ((minor != 0) && (minor != 2))) {
44 dev_err(dev->dev, "unexpected MDP version: v%d.%d\n",
45 major, minor);
46 ret = -ENXIO;
47 goto out;
48 }
49
50 mdp5_kms->rev = minor;
51
52 /* Magic unknown register writes:
53 *
54 * W VBIF:0x004 00000001 (mdss_mdp.c:839)
55 * W MDP5:0x2e0 0xe9 (mdss_mdp.c:839)
56 * W MDP5:0x2e4 0x55 (mdss_mdp.c:839)
57 * W MDP5:0x3ac 0xc0000ccc (mdss_mdp.c:839)
58 * W MDP5:0x3b4 0xc0000ccc (mdss_mdp.c:839)
59 * W MDP5:0x3bc 0xcccccc (mdss_mdp.c:839)
60 * W MDP5:0x4a8 0xcccc0c0 (mdss_mdp.c:839)
61 * W MDP5:0x4b0 0xccccc0c0 (mdss_mdp.c:839)
62 * W MDP5:0x4b8 0xccccc000 (mdss_mdp.c:839)
63 *
64 * Downstream fbdev driver gets these register offsets/values
65 * from DT.. not really sure what these registers are or if
66 * different values for different boards/SoC's, etc. I guess
67 * they are the golden registers.
68 *
69 * Not setting these does not seem to cause any problem. But
70 * we may be getting lucky with the bootloader initializing
71 * them for us. OTOH, if we can always count on the bootloader
72 * setting the golden registers, then perhaps we don't need to
73 * care.
74 */
75
76 mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, 0);
77 mdp5_write(mdp5_kms, REG_MDP5_CTL_OP(0), 0);
78 mdp5_write(mdp5_kms, REG_MDP5_CTL_OP(1), 0);
79 mdp5_write(mdp5_kms, REG_MDP5_CTL_OP(2), 0);
80 mdp5_write(mdp5_kms, REG_MDP5_CTL_OP(3), 0);
81
82out:
83 pm_runtime_put_sync(dev->dev);
84
85 return ret;
86}
87
88static long mdp5_round_pixclk(struct msm_kms *kms, unsigned long rate,
89 struct drm_encoder *encoder)
90{
91 return rate;
92}
93
94static void mdp5_preclose(struct msm_kms *kms, struct drm_file *file)
95{
96 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
97 struct msm_drm_private *priv = mdp5_kms->dev->dev_private;
98 unsigned i;
99
100 for (i = 0; i < priv->num_crtcs; i++)
101 mdp5_crtc_cancel_pending_flip(priv->crtcs[i], file);
102}
103
104static void mdp5_destroy(struct msm_kms *kms)
105{
106 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
107 kfree(mdp5_kms);
108}
109
110static const struct mdp_kms_funcs kms_funcs = {
111 .base = {
112 .hw_init = mdp5_hw_init,
113 .irq_preinstall = mdp5_irq_preinstall,
114 .irq_postinstall = mdp5_irq_postinstall,
115 .irq_uninstall = mdp5_irq_uninstall,
116 .irq = mdp5_irq,
117 .enable_vblank = mdp5_enable_vblank,
118 .disable_vblank = mdp5_disable_vblank,
119 .get_format = mdp_get_format,
120 .round_pixclk = mdp5_round_pixclk,
121 .preclose = mdp5_preclose,
122 .destroy = mdp5_destroy,
123 },
124 .set_irqmask = mdp5_set_irqmask,
125};
126
127int mdp5_disable(struct mdp5_kms *mdp5_kms)
128{
129 DBG("");
130
131 clk_disable_unprepare(mdp5_kms->ahb_clk);
132 clk_disable_unprepare(mdp5_kms->axi_clk);
133 clk_disable_unprepare(mdp5_kms->core_clk);
134 clk_disable_unprepare(mdp5_kms->lut_clk);
135
136 return 0;
137}
138
139int mdp5_enable(struct mdp5_kms *mdp5_kms)
140{
141 DBG("");
142
143 clk_prepare_enable(mdp5_kms->ahb_clk);
144 clk_prepare_enable(mdp5_kms->axi_clk);
145 clk_prepare_enable(mdp5_kms->core_clk);
146 clk_prepare_enable(mdp5_kms->lut_clk);
147
148 return 0;
149}
150
151static int modeset_init(struct mdp5_kms *mdp5_kms)
152{
153 static const enum mdp5_pipe crtcs[] = {
154 SSPP_RGB0, SSPP_RGB1, SSPP_RGB2,
155 };
156 struct drm_device *dev = mdp5_kms->dev;
157 struct msm_drm_private *priv = dev->dev_private;
158 struct drm_encoder *encoder;
159 int i, ret;
160
161 /* construct CRTCs: */
162 for (i = 0; i < ARRAY_SIZE(crtcs); i++) {
163 struct drm_plane *plane;
164 struct drm_crtc *crtc;
165
166 plane = mdp5_plane_init(dev, crtcs[i], true);
167 if (IS_ERR(plane)) {
168 ret = PTR_ERR(plane);
169 dev_err(dev->dev, "failed to construct plane for %s (%d)\n",
170 pipe2name(crtcs[i]), ret);
171 goto fail;
172 }
173
174 crtc = mdp5_crtc_init(dev, plane, i);
175 if (IS_ERR(crtc)) {
176 ret = PTR_ERR(crtc);
177 dev_err(dev->dev, "failed to construct crtc for %s (%d)\n",
178 pipe2name(crtcs[i]), ret);
179 goto fail;
180 }
181 priv->crtcs[priv->num_crtcs++] = crtc;
182 }
183
184 /* Construct encoder for HDMI: */
185 encoder = mdp5_encoder_init(dev, 3, INTF_HDMI);
186 if (IS_ERR(encoder)) {
187 dev_err(dev->dev, "failed to construct encoder\n");
188 ret = PTR_ERR(encoder);
189 goto fail;
190 }
191
192 /* NOTE: the vsync and error irq's are actually associated with
193 * the INTF/encoder.. the easiest way to deal with this (ie. what
194 * we do now) is assume a fixed relationship between crtc's and
195 * encoders. I'm not sure if there is ever a need to more freely
196 * assign crtcs to encoders, but if there is then we need to take
197 * care of error and vblank irq's that the crtc has registered,
198 * and also update user-requested vblank_mask.
199 */
200 encoder->possible_crtcs = BIT(0);
201 mdp5_crtc_set_intf(priv->crtcs[0], 3, INTF_HDMI);
202
203 priv->encoders[priv->num_encoders++] = encoder;
204
205 /* Construct bridge/connector for HDMI: */
206 mdp5_kms->hdmi = hdmi_init(dev, encoder);
207 if (IS_ERR(mdp5_kms->hdmi)) {
208 ret = PTR_ERR(mdp5_kms->hdmi);
209 dev_err(dev->dev, "failed to initialize HDMI: %d\n", ret);
210 goto fail;
211 }
212
213 return 0;
214
215fail:
216 return ret;
217}
218
219static const char *iommu_ports[] = {
220 "mdp_0",
221};
222
223static int get_clk(struct platform_device *pdev, struct clk **clkp,
224 const char *name)
225{
226 struct device *dev = &pdev->dev;
227 struct clk *clk = devm_clk_get(dev, name);
228 if (IS_ERR(clk)) {
229 dev_err(dev, "failed to get %s (%ld)\n", name, PTR_ERR(clk));
230 return PTR_ERR(clk);
231 }
232 *clkp = clk;
233 return 0;
234}
235
236struct msm_kms *mdp5_kms_init(struct drm_device *dev)
237{
238 struct platform_device *pdev = dev->platformdev;
239 struct mdp5_platform_config *config = mdp5_get_config(pdev);
240 struct mdp5_kms *mdp5_kms;
241 struct msm_kms *kms = NULL;
242 struct msm_mmu *mmu;
243 int ret;
244
245 mdp5_kms = kzalloc(sizeof(*mdp5_kms), GFP_KERNEL);
246 if (!mdp5_kms) {
247 dev_err(dev->dev, "failed to allocate kms\n");
248 ret = -ENOMEM;
249 goto fail;
250 }
251
252 mdp_kms_init(&mdp5_kms->base, &kms_funcs);
253
254 kms = &mdp5_kms->base.base;
255
256 mdp5_kms->dev = dev;
257 mdp5_kms->smp_blk_cnt = config->smp_blk_cnt;
258
259 mdp5_kms->mmio = msm_ioremap(pdev, "mdp_phys", "MDP5");
260 if (IS_ERR(mdp5_kms->mmio)) {
261 ret = PTR_ERR(mdp5_kms->mmio);
262 goto fail;
263 }
264
265 mdp5_kms->vbif = msm_ioremap(pdev, "vbif_phys", "VBIF");
266 if (IS_ERR(mdp5_kms->vbif)) {
267 ret = PTR_ERR(mdp5_kms->vbif);
268 goto fail;
269 }
270
271 mdp5_kms->vdd = devm_regulator_get(&pdev->dev, "vdd");
272 if (IS_ERR(mdp5_kms->vdd)) {
273 ret = PTR_ERR(mdp5_kms->vdd);
274 goto fail;
275 }
276
277 ret = regulator_enable(mdp5_kms->vdd);
278 if (ret) {
279 dev_err(dev->dev, "failed to enable regulator vdd: %d\n", ret);
280 goto fail;
281 }
282
283 ret = get_clk(pdev, &mdp5_kms->axi_clk, "bus_clk") ||
284 get_clk(pdev, &mdp5_kms->ahb_clk, "iface_clk") ||
285 get_clk(pdev, &mdp5_kms->src_clk, "core_clk_src") ||
286 get_clk(pdev, &mdp5_kms->core_clk, "core_clk") ||
287 get_clk(pdev, &mdp5_kms->lut_clk, "lut_clk") ||
288 get_clk(pdev, &mdp5_kms->vsync_clk, "vsync_clk");
289 if (ret)
290 goto fail;
291
292 ret = clk_set_rate(mdp5_kms->src_clk, config->max_clk);
293
294 /* make sure things are off before attaching iommu (bootloader could
295 * have left things on, in which case we'll start getting faults if
296 * we don't disable):
297 */
298 mdp5_enable(mdp5_kms);
299 mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(0), 0);
300 mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(1), 0);
301 mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(2), 0);
302 mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(3), 0);
303 mdp5_disable(mdp5_kms);
304 mdelay(16);
305
306 if (config->iommu) {
307 mmu = msm_iommu_new(dev, config->iommu);
308 if (IS_ERR(mmu)) {
309 ret = PTR_ERR(mmu);
310 goto fail;
311 }
312 ret = mmu->funcs->attach(mmu, iommu_ports,
313 ARRAY_SIZE(iommu_ports));
314 if (ret)
315 goto fail;
316 } else {
317 dev_info(dev->dev, "no iommu, fallback to phys "
318 "contig buffers for scanout\n");
319 mmu = NULL;
320 }
321
322 mdp5_kms->id = msm_register_mmu(dev, mmu);
323 if (mdp5_kms->id < 0) {
324 ret = mdp5_kms->id;
325 dev_err(dev->dev, "failed to register mdp5 iommu: %d\n", ret);
326 goto fail;
327 }
328
329 ret = modeset_init(mdp5_kms);
330 if (ret) {
331 dev_err(dev->dev, "modeset_init failed: %d\n", ret);
332 goto fail;
333 }
334
335 return kms;
336
337fail:
338 if (kms)
339 mdp5_destroy(kms);
340 return ERR_PTR(ret);
341}
342
343static struct mdp5_platform_config *mdp5_get_config(struct platform_device *dev)
344{
345 static struct mdp5_platform_config config = {};
346#ifdef CONFIG_OF
347 /* TODO */
348#endif
349 return &config;
350}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
new file mode 100644
index 000000000000..c8b1a2522c25
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
@@ -0,0 +1,213 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#ifndef __MDP5_KMS_H__
19#define __MDP5_KMS_H__
20
21#include "msm_drv.h"
22#include "msm_kms.h"
23#include "mdp/mdp_kms.h"
24#include "mdp5.xml.h"
25#include "mdp5_smp.h"
26
27struct mdp5_kms {
28 struct mdp_kms base;
29
30 struct drm_device *dev;
31
32 int rev;
33
34 /* mapper-id used to request GEM buffer mapped for scanout: */
35 int id;
36
37 /* for tracking smp allocation amongst pipes: */
38 mdp5_smp_state_t smp_state;
39 struct mdp5_client_smp_state smp_client_state[CID_MAX];
40 int smp_blk_cnt;
41
42 /* io/register spaces: */
43 void __iomem *mmio, *vbif;
44
45 struct regulator *vdd;
46
47 struct clk *axi_clk;
48 struct clk *ahb_clk;
49 struct clk *src_clk;
50 struct clk *core_clk;
51 struct clk *lut_clk;
52 struct clk *vsync_clk;
53
54 struct hdmi *hdmi;
55
56 struct mdp_irq error_handler;
57};
58#define to_mdp5_kms(x) container_of(x, struct mdp5_kms, base)
59
60/* platform config data (ie. from DT, or pdata) */
61struct mdp5_platform_config {
62 struct iommu_domain *iommu;
63 uint32_t max_clk;
64 int smp_blk_cnt;
65};
66
67static inline void mdp5_write(struct mdp5_kms *mdp5_kms, u32 reg, u32 data)
68{
69 msm_writel(data, mdp5_kms->mmio + reg);
70}
71
72static inline u32 mdp5_read(struct mdp5_kms *mdp5_kms, u32 reg)
73{
74 return msm_readl(mdp5_kms->mmio + reg);
75}
76
77static inline const char *pipe2name(enum mdp5_pipe pipe)
78{
79 static const char *names[] = {
80#define NAME(n) [SSPP_ ## n] = #n
81 NAME(VIG0), NAME(VIG1), NAME(VIG2),
82 NAME(RGB0), NAME(RGB1), NAME(RGB2),
83 NAME(DMA0), NAME(DMA1),
84#undef NAME
85 };
86 return names[pipe];
87}
88
89static inline uint32_t pipe2flush(enum mdp5_pipe pipe)
90{
91 switch (pipe) {
92 case SSPP_VIG0: return MDP5_CTL_FLUSH_VIG0;
93 case SSPP_VIG1: return MDP5_CTL_FLUSH_VIG1;
94 case SSPP_VIG2: return MDP5_CTL_FLUSH_VIG2;
95 case SSPP_RGB0: return MDP5_CTL_FLUSH_RGB0;
96 case SSPP_RGB1: return MDP5_CTL_FLUSH_RGB1;
97 case SSPP_RGB2: return MDP5_CTL_FLUSH_RGB2;
98 case SSPP_DMA0: return MDP5_CTL_FLUSH_DMA0;
99 case SSPP_DMA1: return MDP5_CTL_FLUSH_DMA1;
100 default: return 0;
101 }
102}
103
104static inline int pipe2nclients(enum mdp5_pipe pipe)
105{
106 switch (pipe) {
107 case SSPP_RGB0:
108 case SSPP_RGB1:
109 case SSPP_RGB2:
110 return 1;
111 default:
112 return 3;
113 }
114}
115
116static inline enum mdp5_client_id pipe2client(enum mdp5_pipe pipe, int plane)
117{
118 WARN_ON(plane >= pipe2nclients(pipe));
119 switch (pipe) {
120 case SSPP_VIG0: return CID_VIG0_Y + plane;
121 case SSPP_VIG1: return CID_VIG1_Y + plane;
122 case SSPP_VIG2: return CID_VIG2_Y + plane;
123 case SSPP_RGB0: return CID_RGB0;
124 case SSPP_RGB1: return CID_RGB1;
125 case SSPP_RGB2: return CID_RGB2;
126 case SSPP_DMA0: return CID_DMA0_Y + plane;
127 case SSPP_DMA1: return CID_DMA1_Y + plane;
128 default: return CID_UNUSED;
129 }
130}
131
132static inline uint32_t mixer2flush(int lm)
133{
134 switch (lm) {
135 case 0: return MDP5_CTL_FLUSH_LM0;
136 case 1: return MDP5_CTL_FLUSH_LM1;
137 case 2: return MDP5_CTL_FLUSH_LM2;
138 default: return 0;
139 }
140}
141
142static inline uint32_t intf2err(int intf)
143{
144 switch (intf) {
145 case 0: return MDP5_IRQ_INTF0_UNDER_RUN;
146 case 1: return MDP5_IRQ_INTF1_UNDER_RUN;
147 case 2: return MDP5_IRQ_INTF2_UNDER_RUN;
148 case 3: return MDP5_IRQ_INTF3_UNDER_RUN;
149 default: return 0;
150 }
151}
152
153static inline uint32_t intf2vblank(int intf)
154{
155 switch (intf) {
156 case 0: return MDP5_IRQ_INTF0_VSYNC;
157 case 1: return MDP5_IRQ_INTF1_VSYNC;
158 case 2: return MDP5_IRQ_INTF2_VSYNC;
159 case 3: return MDP5_IRQ_INTF3_VSYNC;
160 default: return 0;
161 }
162}
163
164int mdp5_disable(struct mdp5_kms *mdp5_kms);
165int mdp5_enable(struct mdp5_kms *mdp5_kms);
166
167void mdp5_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask);
168void mdp5_irq_preinstall(struct msm_kms *kms);
169int mdp5_irq_postinstall(struct msm_kms *kms);
170void mdp5_irq_uninstall(struct msm_kms *kms);
171irqreturn_t mdp5_irq(struct msm_kms *kms);
172int mdp5_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
173void mdp5_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
174
175static inline
176uint32_t mdp5_get_formats(enum mdp5_pipe pipe, uint32_t *pixel_formats,
177 uint32_t max_formats)
178{
179 /* TODO when we have YUV, we need to filter supported formats
180 * based on pipe id..
181 */
182 return mdp_get_formats(pixel_formats, max_formats);
183}
184
185void mdp5_plane_install_properties(struct drm_plane *plane,
186 struct drm_mode_object *obj);
187void mdp5_plane_set_scanout(struct drm_plane *plane,
188 struct drm_framebuffer *fb);
189int mdp5_plane_mode_set(struct drm_plane *plane,
190 struct drm_crtc *crtc, struct drm_framebuffer *fb,
191 int crtc_x, int crtc_y,
192 unsigned int crtc_w, unsigned int crtc_h,
193 uint32_t src_x, uint32_t src_y,
194 uint32_t src_w, uint32_t src_h);
195void mdp5_plane_complete_flip(struct drm_plane *plane);
196enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane);
197struct drm_plane *mdp5_plane_init(struct drm_device *dev,
198 enum mdp5_pipe pipe, bool private_plane);
199
200uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc);
201
202void mdp5_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file);
203void mdp5_crtc_set_intf(struct drm_crtc *crtc, int intf,
204 enum mdp5_intf intf_id);
205void mdp5_crtc_attach(struct drm_crtc *crtc, struct drm_plane *plane);
206void mdp5_crtc_detach(struct drm_crtc *crtc, struct drm_plane *plane);
207struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
208 struct drm_plane *plane, int id);
209
210struct drm_encoder *mdp5_encoder_init(struct drm_device *dev, int intf,
211 enum mdp5_intf intf_id);
212
213#endif /* __MDP5_KMS_H__ */
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
new file mode 100644
index 000000000000..0ac8bb5e7e85
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
@@ -0,0 +1,389 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include "mdp5_kms.h"
19
20
21struct mdp5_plane {
22 struct drm_plane base;
23 const char *name;
24
25 enum mdp5_pipe pipe;
26
27 uint32_t nformats;
28 uint32_t formats[32];
29
30 bool enabled;
31};
32#define to_mdp5_plane(x) container_of(x, struct mdp5_plane, base)
33
34static struct mdp5_kms *get_kms(struct drm_plane *plane)
35{
36 struct msm_drm_private *priv = plane->dev->dev_private;
37 return to_mdp5_kms(to_mdp_kms(priv->kms));
38}
39
40static int mdp5_plane_update(struct drm_plane *plane,
41 struct drm_crtc *crtc, struct drm_framebuffer *fb,
42 int crtc_x, int crtc_y,
43 unsigned int crtc_w, unsigned int crtc_h,
44 uint32_t src_x, uint32_t src_y,
45 uint32_t src_w, uint32_t src_h)
46{
47 struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
48
49 mdp5_plane->enabled = true;
50
51 if (plane->fb)
52 drm_framebuffer_unreference(plane->fb);
53
54 drm_framebuffer_reference(fb);
55
56 return mdp5_plane_mode_set(plane, crtc, fb,
57 crtc_x, crtc_y, crtc_w, crtc_h,
58 src_x, src_y, src_w, src_h);
59}
60
61static int mdp5_plane_disable(struct drm_plane *plane)
62{
63 struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
64 struct mdp5_kms *mdp5_kms = get_kms(plane);
65 enum mdp5_pipe pipe = mdp5_plane->pipe;
66 int i;
67
68 DBG("%s: disable", mdp5_plane->name);
69
70 /* update our SMP request to zero (release all our blks): */
71 for (i = 0; i < pipe2nclients(pipe); i++)
72 mdp5_smp_request(mdp5_kms, pipe2client(pipe, i), 0);
73
74 /* TODO detaching now will cause us not to get the last
75 * vblank and mdp5_smp_commit().. so other planes will
76 * still see smp blocks previously allocated to us as
77 * in-use..
78 */
79 if (plane->crtc)
80 mdp5_crtc_detach(plane->crtc, plane);
81
82 return 0;
83}
84
85static void mdp5_plane_destroy(struct drm_plane *plane)
86{
87 struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
88
89 mdp5_plane_disable(plane);
90 drm_plane_cleanup(plane);
91
92 kfree(mdp5_plane);
93}
94
95/* helper to install properties which are common to planes and crtcs */
96void mdp5_plane_install_properties(struct drm_plane *plane,
97 struct drm_mode_object *obj)
98{
99 // XXX
100}
101
102int mdp5_plane_set_property(struct drm_plane *plane,
103 struct drm_property *property, uint64_t val)
104{
105 // XXX
106 return -EINVAL;
107}
108
109static const struct drm_plane_funcs mdp5_plane_funcs = {
110 .update_plane = mdp5_plane_update,
111 .disable_plane = mdp5_plane_disable,
112 .destroy = mdp5_plane_destroy,
113 .set_property = mdp5_plane_set_property,
114};
115
116void mdp5_plane_set_scanout(struct drm_plane *plane,
117 struct drm_framebuffer *fb)
118{
119 struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
120 struct mdp5_kms *mdp5_kms = get_kms(plane);
121 enum mdp5_pipe pipe = mdp5_plane->pipe;
122 uint32_t nplanes = drm_format_num_planes(fb->pixel_format);
123 uint32_t iova[4];
124 int i;
125
126 for (i = 0; i < nplanes; i++) {
127 struct drm_gem_object *bo = msm_framebuffer_bo(fb, i);
128 msm_gem_get_iova(bo, mdp5_kms->id, &iova[i]);
129 }
130 for (; i < 4; i++)
131 iova[i] = 0;
132
133 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_STRIDE_A(pipe),
134 MDP5_PIPE_SRC_STRIDE_A_P0(fb->pitches[0]) |
135 MDP5_PIPE_SRC_STRIDE_A_P1(fb->pitches[1]));
136
137 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_STRIDE_B(pipe),
138 MDP5_PIPE_SRC_STRIDE_B_P2(fb->pitches[2]) |
139 MDP5_PIPE_SRC_STRIDE_B_P3(fb->pitches[3]));
140
141 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC0_ADDR(pipe), iova[0]);
142 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC1_ADDR(pipe), iova[1]);
143 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC2_ADDR(pipe), iova[2]);
144 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC3_ADDR(pipe), iova[3]);
145
146 plane->fb = fb;
147}
148
149/* NOTE: looks like if horizontal decimation is used (if we supported that)
150 * then the width used to calculate SMP block requirements is the post-
151 * decimated width. Ie. SMP buffering sits downstream of decimation (which
152 * presumably happens during the dma from scanout buffer).
153 */
154static int request_smp_blocks(struct drm_plane *plane, uint32_t format,
155 uint32_t nplanes, uint32_t width)
156{
157 struct drm_device *dev = plane->dev;
158 struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
159 struct mdp5_kms *mdp5_kms = get_kms(plane);
160 enum mdp5_pipe pipe = mdp5_plane->pipe;
161 int i, hsub, nlines, nblks, ret;
162
163 hsub = drm_format_horz_chroma_subsampling(format);
164
165 /* different if BWC (compressed framebuffer?) enabled: */
166 nlines = 2;
167
168 for (i = 0, nblks = 0; i < nplanes; i++) {
169 int n, fetch_stride, cpp;
170
171 cpp = drm_format_plane_cpp(format, i);
172 fetch_stride = width * cpp / (i ? hsub : 1);
173
174 n = DIV_ROUND_UP(fetch_stride * nlines, SMP_BLK_SIZE);
175
176 /* for hw rev v1.00 */
177 if (mdp5_kms->rev == 0)
178 n = roundup_pow_of_two(n);
179
180 DBG("%s[%d]: request %d SMP blocks", mdp5_plane->name, i, n);
181 ret = mdp5_smp_request(mdp5_kms, pipe2client(pipe, i), n);
182 if (ret) {
183 dev_err(dev->dev, "Could not allocate %d SMP blocks: %d\n",
184 n, ret);
185 return ret;
186 }
187
188 nblks += n;
189 }
190
191 /* in success case, return total # of blocks allocated: */
192 return nblks;
193}
194
195static void set_fifo_thresholds(struct drm_plane *plane, int nblks)
196{
197 struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
198 struct mdp5_kms *mdp5_kms = get_kms(plane);
199 enum mdp5_pipe pipe = mdp5_plane->pipe;
200 uint32_t val;
201
202 /* 1/4 of SMP pool that is being fetched */
203 val = (nblks * SMP_ENTRIES_PER_BLK) / 4;
204
205 mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_0(pipe), val * 1);
206 mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_1(pipe), val * 2);
207 mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_2(pipe), val * 3);
208
209}
210
211int mdp5_plane_mode_set(struct drm_plane *plane,
212 struct drm_crtc *crtc, struct drm_framebuffer *fb,
213 int crtc_x, int crtc_y,
214 unsigned int crtc_w, unsigned int crtc_h,
215 uint32_t src_x, uint32_t src_y,
216 uint32_t src_w, uint32_t src_h)
217{
218 struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
219 struct mdp5_kms *mdp5_kms = get_kms(plane);
220 enum mdp5_pipe pipe = mdp5_plane->pipe;
221 const struct mdp_format *format;
222 uint32_t nplanes, config = 0;
223 uint32_t phasex_step = 0, phasey_step = 0;
224 uint32_t hdecm = 0, vdecm = 0;
225 int i, nblks;
226
227 nplanes = drm_format_num_planes(fb->pixel_format);
228
229 /* bad formats should already be rejected: */
230 if (WARN_ON(nplanes > pipe2nclients(pipe)))
231 return -EINVAL;
232
233 /* src values are in Q16 fixed point, convert to integer: */
234 src_x = src_x >> 16;
235 src_y = src_y >> 16;
236 src_w = src_w >> 16;
237 src_h = src_h >> 16;
238
239 DBG("%s: FB[%u] %u,%u,%u,%u -> CRTC[%u] %d,%d,%u,%u", mdp5_plane->name,
240 fb->base.id, src_x, src_y, src_w, src_h,
241 crtc->base.id, crtc_x, crtc_y, crtc_w, crtc_h);
242
243 /*
244 * Calculate and request required # of smp blocks:
245 */
246 nblks = request_smp_blocks(plane, fb->pixel_format, nplanes, src_w);
247 if (nblks < 0)
248 return nblks;
249
250 /*
251 * Currently we update the hw for allocations/requests immediately,
252 * but once atomic modeset/pageflip is in place, the allocation
253 * would move into atomic->check_plane_state(), while updating the
254 * hw would remain here:
255 */
256 for (i = 0; i < pipe2nclients(pipe); i++)
257 mdp5_smp_configure(mdp5_kms, pipe2client(pipe, i));
258
259 if (src_w != crtc_w) {
260 config |= MDP5_PIPE_SCALE_CONFIG_SCALEX_EN;
261 /* TODO calc phasex_step, hdecm */
262 }
263
264 if (src_h != crtc_h) {
265 config |= MDP5_PIPE_SCALE_CONFIG_SCALEY_EN;
266 /* TODO calc phasey_step, vdecm */
267 }
268
269 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_IMG_SIZE(pipe),
270 MDP5_PIPE_SRC_IMG_SIZE_WIDTH(src_w) |
271 MDP5_PIPE_SRC_IMG_SIZE_HEIGHT(src_h));
272
273 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_SIZE(pipe),
274 MDP5_PIPE_SRC_SIZE_WIDTH(src_w) |
275 MDP5_PIPE_SRC_SIZE_HEIGHT(src_h));
276
277 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_XY(pipe),
278 MDP5_PIPE_SRC_XY_X(src_x) |
279 MDP5_PIPE_SRC_XY_Y(src_y));
280
281 mdp5_write(mdp5_kms, REG_MDP5_PIPE_OUT_SIZE(pipe),
282 MDP5_PIPE_OUT_SIZE_WIDTH(crtc_w) |
283 MDP5_PIPE_OUT_SIZE_HEIGHT(crtc_h));
284
285 mdp5_write(mdp5_kms, REG_MDP5_PIPE_OUT_XY(pipe),
286 MDP5_PIPE_OUT_XY_X(crtc_x) |
287 MDP5_PIPE_OUT_XY_Y(crtc_y));
288
289 mdp5_plane_set_scanout(plane, fb);
290
291 format = to_mdp_format(msm_framebuffer_format(fb));
292
293 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_FORMAT(pipe),
294 MDP5_PIPE_SRC_FORMAT_A_BPC(format->bpc_a) |
295 MDP5_PIPE_SRC_FORMAT_R_BPC(format->bpc_r) |
296 MDP5_PIPE_SRC_FORMAT_G_BPC(format->bpc_g) |
297 MDP5_PIPE_SRC_FORMAT_B_BPC(format->bpc_b) |
298 COND(format->alpha_enable, MDP5_PIPE_SRC_FORMAT_ALPHA_ENABLE) |
299 MDP5_PIPE_SRC_FORMAT_CPP(format->cpp - 1) |
300 MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT(format->unpack_count - 1) |
301 COND(format->unpack_tight, MDP5_PIPE_SRC_FORMAT_UNPACK_TIGHT) |
302 MDP5_PIPE_SRC_FORMAT_NUM_PLANES(nplanes - 1) |
303 MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP(CHROMA_RGB));
304
305 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_UNPACK(pipe),
306 MDP5_PIPE_SRC_UNPACK_ELEM0(format->unpack[0]) |
307 MDP5_PIPE_SRC_UNPACK_ELEM1(format->unpack[1]) |
308 MDP5_PIPE_SRC_UNPACK_ELEM2(format->unpack[2]) |
309 MDP5_PIPE_SRC_UNPACK_ELEM3(format->unpack[3]));
310
311 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_OP_MODE(pipe),
312 MDP5_PIPE_SRC_OP_MODE_BWC(BWC_LOSSLESS));
313
314 /* not using secure mode: */
315 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_ADDR_SW_STATUS(pipe), 0);
316
317 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_PHASE_STEP_X(pipe), phasex_step);
318 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_PHASE_STEP_Y(pipe), phasey_step);
319 mdp5_write(mdp5_kms, REG_MDP5_PIPE_DECIMATION(pipe),
320 MDP5_PIPE_DECIMATION_VERT(vdecm) |
321 MDP5_PIPE_DECIMATION_HORZ(hdecm));
322 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CONFIG(pipe),
323 MDP5_PIPE_SCALE_CONFIG_SCALEX_MIN_FILTER(SCALE_FILTER_NEAREST) |
324 MDP5_PIPE_SCALE_CONFIG_SCALEY_MIN_FILTER(SCALE_FILTER_NEAREST) |
325 MDP5_PIPE_SCALE_CONFIG_SCALEX_CR_FILTER(SCALE_FILTER_NEAREST) |
326 MDP5_PIPE_SCALE_CONFIG_SCALEY_CR_FILTER(SCALE_FILTER_NEAREST) |
327 MDP5_PIPE_SCALE_CONFIG_SCALEX_MAX_FILTER(SCALE_FILTER_NEAREST) |
328 MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER(SCALE_FILTER_NEAREST));
329
330 set_fifo_thresholds(plane, nblks);
331
332 /* TODO detach from old crtc (if we had more than one) */
333 mdp5_crtc_attach(crtc, plane);
334
335 return 0;
336}
337
338void mdp5_plane_complete_flip(struct drm_plane *plane)
339{
340 struct mdp5_kms *mdp5_kms = get_kms(plane);
341 enum mdp5_pipe pipe = to_mdp5_plane(plane)->pipe;
342 int i;
343
344 for (i = 0; i < pipe2nclients(pipe); i++)
345 mdp5_smp_commit(mdp5_kms, pipe2client(pipe, i));
346}
347
348enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane)
349{
350 struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
351 return mdp5_plane->pipe;
352}
353
354/* initialize plane */
355struct drm_plane *mdp5_plane_init(struct drm_device *dev,
356 enum mdp5_pipe pipe, bool private_plane)
357{
358 struct drm_plane *plane = NULL;
359 struct mdp5_plane *mdp5_plane;
360 int ret;
361
362 mdp5_plane = kzalloc(sizeof(*mdp5_plane), GFP_KERNEL);
363 if (!mdp5_plane) {
364 ret = -ENOMEM;
365 goto fail;
366 }
367
368 plane = &mdp5_plane->base;
369
370 mdp5_plane->pipe = pipe;
371 mdp5_plane->name = pipe2name(pipe);
372
373 mdp5_plane->nformats = mdp5_get_formats(pipe, mdp5_plane->formats,
374 ARRAY_SIZE(mdp5_plane->formats));
375
376 drm_plane_init(dev, plane, 0xff, &mdp5_plane_funcs,
377 mdp5_plane->formats, mdp5_plane->nformats,
378 private_plane);
379
380 mdp5_plane_install_properties(plane, &plane->base);
381
382 return plane;
383
384fail:
385 if (plane)
386 mdp5_plane_destroy(plane);
387
388 return ERR_PTR(ret);
389}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
new file mode 100644
index 000000000000..2d0236b963a6
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
@@ -0,0 +1,173 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18
19#include "mdp5_kms.h"
20#include "mdp5_smp.h"
21
22
23/* SMP - Shared Memory Pool
24 *
25 * These are shared between all the clients, where each plane in a
26 * scanout buffer is a SMP client. Ie. scanout of 3 plane I420 on
27 * pipe VIG0 => 3 clients: VIG0_Y, VIG0_CB, VIG0_CR.
28 *
29 * Based on the size of the attached scanout buffer, a certain # of
30 * blocks must be allocated to that client out of the shared pool.
31 *
32 * For each block, it can be either free, or pending/in-use by a
33 * client. The updates happen in three steps:
34 *
35 * 1) mdp5_smp_request():
36 * When plane scanout is setup, calculate required number of
37 * blocks needed per client, and request. Blocks not inuse or
38 * pending by any other client are added to client's pending
39 * set.
40 *
41 * 2) mdp5_smp_configure():
42 * As hw is programmed, before FLUSH, MDP5_SMP_ALLOC registers
43 * are configured for the union(pending, inuse)
44 *
45 * 3) mdp5_smp_commit():
46 * After next vblank, copy pending -> inuse. Optionally update
47 * MDP5_SMP_ALLOC registers if there are newly unused blocks
48 *
49 * On the next vblank after changes have been committed to hw, the
50 * client's pending blocks become it's in-use blocks (and no-longer
51 * in-use blocks become available to other clients).
52 *
53 * btw, hurray for confusing overloaded acronyms! :-/
54 *
55 * NOTE: for atomic modeset/pageflip NONBLOCK operations, step #1
56 * should happen at (or before)? atomic->check(). And we'd need
57 * an API to discard previous requests if update is aborted or
58 * (test-only).
59 *
60 * TODO would perhaps be nice to have debugfs to dump out kernel
61 * inuse and pending state of all clients..
62 */
63
64static DEFINE_SPINLOCK(smp_lock);
65
66
67/* step #1: update # of blocks pending for the client: */
68int mdp5_smp_request(struct mdp5_kms *mdp5_kms,
69 enum mdp5_client_id cid, int nblks)
70{
71 struct mdp5_client_smp_state *ps = &mdp5_kms->smp_client_state[cid];
72 int i, ret, avail, cur_nblks, cnt = mdp5_kms->smp_blk_cnt;
73 unsigned long flags;
74
75 spin_lock_irqsave(&smp_lock, flags);
76
77 avail = cnt - bitmap_weight(mdp5_kms->smp_state, cnt);
78 if (nblks > avail) {
79 ret = -ENOSPC;
80 goto fail;
81 }
82
83 cur_nblks = bitmap_weight(ps->pending, cnt);
84 if (nblks > cur_nblks) {
85 /* grow the existing pending reservation: */
86 for (i = cur_nblks; i < nblks; i++) {
87 int blk = find_first_zero_bit(mdp5_kms->smp_state, cnt);
88 set_bit(blk, ps->pending);
89 set_bit(blk, mdp5_kms->smp_state);
90 }
91 } else {
92 /* shrink the existing pending reservation: */
93 for (i = cur_nblks; i > nblks; i--) {
94 int blk = find_first_bit(ps->pending, cnt);
95 clear_bit(blk, ps->pending);
96 /* don't clear in global smp_state until _commit() */
97 }
98 }
99
100fail:
101 spin_unlock_irqrestore(&smp_lock, flags);
102 return 0;
103}
104
105static void update_smp_state(struct mdp5_kms *mdp5_kms,
106 enum mdp5_client_id cid, mdp5_smp_state_t *assigned)
107{
108 int cnt = mdp5_kms->smp_blk_cnt;
109 uint32_t blk, val;
110
111 for_each_set_bit(blk, *assigned, cnt) {
112 int idx = blk / 3;
113 int fld = blk % 3;
114
115 val = mdp5_read(mdp5_kms, REG_MDP5_SMP_ALLOC_W_REG(idx));
116
117 switch (fld) {
118 case 0:
119 val &= ~MDP5_SMP_ALLOC_W_REG_CLIENT0__MASK;
120 val |= MDP5_SMP_ALLOC_W_REG_CLIENT0(cid);
121 break;
122 case 1:
123 val &= ~MDP5_SMP_ALLOC_W_REG_CLIENT1__MASK;
124 val |= MDP5_SMP_ALLOC_W_REG_CLIENT1(cid);
125 break;
126 case 2:
127 val &= ~MDP5_SMP_ALLOC_W_REG_CLIENT2__MASK;
128 val |= MDP5_SMP_ALLOC_W_REG_CLIENT2(cid);
129 break;
130 }
131
132 mdp5_write(mdp5_kms, REG_MDP5_SMP_ALLOC_W_REG(idx), val);
133 mdp5_write(mdp5_kms, REG_MDP5_SMP_ALLOC_R_REG(idx), val);
134 }
135}
136
137/* step #2: configure hw for union(pending, inuse): */
138void mdp5_smp_configure(struct mdp5_kms *mdp5_kms, enum mdp5_client_id cid)
139{
140 struct mdp5_client_smp_state *ps = &mdp5_kms->smp_client_state[cid];
141 int cnt = mdp5_kms->smp_blk_cnt;
142 mdp5_smp_state_t assigned;
143
144 bitmap_or(assigned, ps->inuse, ps->pending, cnt);
145 update_smp_state(mdp5_kms, cid, &assigned);
146}
147
148/* step #3: after vblank, copy pending -> inuse: */
149void mdp5_smp_commit(struct mdp5_kms *mdp5_kms, enum mdp5_client_id cid)
150{
151 struct mdp5_client_smp_state *ps = &mdp5_kms->smp_client_state[cid];
152 int cnt = mdp5_kms->smp_blk_cnt;
153 mdp5_smp_state_t released;
154
155 /*
156 * Figure out if there are any blocks we where previously
157 * using, which can be released and made available to other
158 * clients:
159 */
160 if (bitmap_andnot(released, ps->inuse, ps->pending, cnt)) {
161 unsigned long flags;
162
163 spin_lock_irqsave(&smp_lock, flags);
164 /* clear released blocks: */
165 bitmap_andnot(mdp5_kms->smp_state, mdp5_kms->smp_state,
166 released, cnt);
167 spin_unlock_irqrestore(&smp_lock, flags);
168
169 update_smp_state(mdp5_kms, CID_UNUSED, &released);
170 }
171
172 bitmap_copy(ps->inuse, ps->pending, cnt);
173}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h
new file mode 100644
index 000000000000..0ab739e1a1dd
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h
@@ -0,0 +1,41 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#ifndef __MDP5_SMP_H__
19#define __MDP5_SMP_H__
20
21#include "msm_drv.h"
22
23#define MAX_SMP_BLOCKS 22
24#define SMP_BLK_SIZE 4096
25#define SMP_ENTRIES_PER_BLK (SMP_BLK_SIZE / 16)
26
27typedef DECLARE_BITMAP(mdp5_smp_state_t, MAX_SMP_BLOCKS);
28
29struct mdp5_client_smp_state {
30 mdp5_smp_state_t inuse;
31 mdp5_smp_state_t pending;
32};
33
34struct mdp5_kms;
35
36int mdp5_smp_request(struct mdp5_kms *mdp5_kms, enum mdp5_client_id cid, int nblks);
37void mdp5_smp_configure(struct mdp5_kms *mdp5_kms, enum mdp5_client_id cid);
38void mdp5_smp_commit(struct mdp5_kms *mdp5_kms, enum mdp5_client_id cid);
39
40
41#endif /* __MDP5_SMP_H__ */
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 4fa9a03d2a6c..63ed79fe8a05 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -150,6 +150,24 @@ static int msm_unload(struct drm_device *dev)
150 return 0; 150 return 0;
151} 151}
152 152
153static int get_mdp_ver(struct platform_device *pdev)
154{
155#ifdef CONFIG_OF
156 const static struct of_device_id match_types[] = { {
157 .compatible = "qcom,mdss_mdp",
158 .data = (void *)5,
159 }, {
160 /* end node */
161 } };
162 struct device *dev = &pdev->dev;
163 const struct of_device_id *match;
164 match = of_match_node(match_types, dev->of_node);
165 if (match)
166 return (int)match->data;
167#endif
168 return 4;
169}
170
153static int msm_load(struct drm_device *dev, unsigned long flags) 171static int msm_load(struct drm_device *dev, unsigned long flags)
154{ 172{
155 struct platform_device *pdev = dev->platformdev; 173 struct platform_device *pdev = dev->platformdev;
@@ -208,7 +226,18 @@ static int msm_load(struct drm_device *dev, unsigned long flags)
208 (uint32_t)(priv->vram.paddr + size)); 226 (uint32_t)(priv->vram.paddr + size));
209 } 227 }
210 228
211 kms = mdp4_kms_init(dev); 229 switch (get_mdp_ver(pdev)) {
230 case 4:
231 kms = mdp4_kms_init(dev);
232 break;
233 case 5:
234 kms = mdp5_kms_init(dev);
235 break;
236 default:
237 kms = ERR_PTR(-ENODEV);
238 break;
239 }
240
212 if (IS_ERR(kms)) { 241 if (IS_ERR(kms)) {
213 /* 242 /*
214 * NOTE: once we have GPU support, having no kms should not 243 * NOTE: once we have GPU support, having no kms should not
@@ -811,12 +840,19 @@ static const struct platform_device_id msm_id[] = {
811 { } 840 { }
812}; 841};
813 842
843static const struct of_device_id dt_match[] = {
844 { .compatible = "qcom,mdss_mdp" },
845 {}
846};
847MODULE_DEVICE_TABLE(of, dt_match);
848
814static struct platform_driver msm_platform_driver = { 849static struct platform_driver msm_platform_driver = {
815 .probe = msm_pdev_probe, 850 .probe = msm_pdev_probe,
816 .remove = msm_pdev_remove, 851 .remove = msm_pdev_remove,
817 .driver = { 852 .driver = {
818 .owner = THIS_MODULE, 853 .owner = THIS_MODULE,
819 .name = "msm", 854 .name = "msm",
855 .of_match_table = dt_match,
820 .pm = &msm_pm_ops, 856 .pm = &msm_pm_ops,
821 }, 857 },
822 .id_table = msm_id, 858 .id_table = msm_id,
diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h
index dc0d30f5b291..06437745bc2c 100644
--- a/drivers/gpu/drm/msm/msm_kms.h
+++ b/drivers/gpu/drm/msm/msm_kms.h
@@ -63,5 +63,6 @@ static inline void msm_kms_init(struct msm_kms *kms,
63} 63}
64 64
65struct msm_kms *mdp4_kms_init(struct drm_device *dev); 65struct msm_kms *mdp4_kms_init(struct drm_device *dev);
66struct msm_kms *mdp5_kms_init(struct drm_device *dev);
66 67
67#endif /* __MSM_KMS_H__ */ 68#endif /* __MSM_KMS_H__ */