aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2017-04-10 17:47:02 -0400
committerDave Airlie <airlied@redhat.com>2017-04-10 17:47:02 -0400
commitd455937ed1cff44b9e5567f0ab697ad486429c0f (patch)
tree504b31fa69cdefa2902fb2f977f1a54007608f44 /drivers/gpu
parentdf45eaca51f4826f328859e5b203fbeab6fcf2a3 (diff)
parent98db803f6413e6d4bf1f590ea57e9e7dfe1eb32b (diff)
Merge branch 'msm-next' of git://people.freedesktop.org/~robclark/linux into drm-next
Noteworthy changes this time: 1) 4k support for newer chips (ganging up hwpipes and mixers) 2) using OPP bindings for gpu 3) more prep work towards per-process pagetables * 'msm-next' of git://people.freedesktop.org/~robclark/linux: (47 commits) msm/drm: gpu: Dynamically locate the clocks from the device tree drm/msm: gpu: Use OPP tables if we can drm/msm: Hard code the GPU "slow frequency" drm/msm: Add MSM_PARAM_GMEM_BASE drm/msm: Reference count address spaces drm/msm: Make sure to detach the MMU during GPU cleanup drm/msm/mdp5: Enable 3D mux in mdp5_ctl drm/msm/mdp5: Reset CTL blend registers before configuring them drm/msm/mdp5: Assign 'right' mixer to CRTC state drm/msm/mdp5: Stage border out on base stage if CRTC has 2 LMs drm/msm/mdp5: Stage right side hwpipes on Right-side Layer Mixer drm/msm/mdp5: Prepare Layer Mixers for source split drm/msm/mdp5: Configure 'right' hwpipe drm/msm/mdp5: Assign a 'right hwpipe' to plane state drm/msm/mdp5: Create mdp5_hwpipe_mode_set drm/msm/mdp5: Add optional 'right' Layer Mixer in CRTC state drm/msm/mdp5: Add a CAP for Source Split drm/msm/mdp5: Remove mixer/intf pointers from mdp5_ctl drm/msm/mdp5: Start using parameters from CRTC state drm/msm/mdp5: Add more stuff to CRTC state ...
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/msm/Makefile1
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.c2
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx_gpu.c4
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.c22
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_device.c126
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c43
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.h2
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c12
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c2
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c81
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h8
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c30
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c466
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c192
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h21
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c66
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c123
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h53
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_mixer.c172
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_mixer.h47
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.c2
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.h1
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c340
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp_kms.h6
-rw-r--r--drivers/gpu/drm/msm/msm_debugfs.c2
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c13
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h3
-rw-r--r--drivers/gpu/drm/msm/msm_gem.h2
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c39
-rw-r--r--drivers/gpu/drm/msm/msm_gem_vma.c35
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c183
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.h18
-rw-r--r--drivers/gpu/drm/msm/msm_iommu.c69
-rw-r--r--drivers/gpu/drm/msm/msm_rd.c4
34 files changed, 1568 insertions, 622 deletions
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index 39055362da95..5241ac8803ba 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -40,6 +40,7 @@ msm-y := \
40 mdp/mdp5/mdp5_mdss.o \ 40 mdp/mdp5/mdp5_mdss.o \
41 mdp/mdp5/mdp5_kms.o \ 41 mdp/mdp5/mdp5_kms.o \
42 mdp/mdp5/mdp5_pipe.o \ 42 mdp/mdp5/mdp5_pipe.o \
43 mdp/mdp5/mdp5_mixer.o \
43 mdp/mdp5/mdp5_plane.o \ 44 mdp/mdp5/mdp5_plane.o \
44 mdp/mdp5/mdp5_smp.o \ 45 mdp/mdp5/mdp5_smp.o \
45 msm_atomic.o \ 46 msm_atomic.o \
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
index b999349b7d2d..7fd77958a436 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
@@ -412,10 +412,8 @@ static const unsigned int a3xx_registers[] = {
412#ifdef CONFIG_DEBUG_FS 412#ifdef CONFIG_DEBUG_FS
413static void a3xx_show(struct msm_gpu *gpu, struct seq_file *m) 413static void a3xx_show(struct msm_gpu *gpu, struct seq_file *m)
414{ 414{
415 gpu->funcs->pm_resume(gpu);
416 seq_printf(m, "status: %08x\n", 415 seq_printf(m, "status: %08x\n",
417 gpu_read(gpu, REG_A3XX_RBBM_STATUS)); 416 gpu_read(gpu, REG_A3XX_RBBM_STATUS));
418 gpu->funcs->pm_suspend(gpu);
419 adreno_show(gpu, m); 417 adreno_show(gpu, m);
420} 418}
421#endif 419#endif
diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
index 511bc855cc7f..dfe0eceaae3b 100644
--- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
@@ -456,12 +456,8 @@ static const unsigned int a4xx_registers[] = {
456#ifdef CONFIG_DEBUG_FS 456#ifdef CONFIG_DEBUG_FS
457static void a4xx_show(struct msm_gpu *gpu, struct seq_file *m) 457static void a4xx_show(struct msm_gpu *gpu, struct seq_file *m)
458{ 458{
459 gpu->funcs->pm_resume(gpu);
460
461 seq_printf(m, "status: %08x\n", 459 seq_printf(m, "status: %08x\n",
462 gpu_read(gpu, REG_A4XX_RBBM_STATUS)); 460 gpu_read(gpu, REG_A4XX_RBBM_STATUS));
463 gpu->funcs->pm_suspend(gpu);
464
465 adreno_show(gpu, m); 461 adreno_show(gpu, m);
466 462
467} 463}
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
index 36602ac7e248..31a9bceed32c 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -638,10 +638,8 @@ static void a5xx_cp_err_irq(struct msm_gpu *gpu)
638 } 638 }
639} 639}
640 640
641static void a5xx_rbbm_err_irq(struct msm_gpu *gpu) 641static void a5xx_rbbm_err_irq(struct msm_gpu *gpu, u32 status)
642{ 642{
643 u32 status = gpu_read(gpu, REG_A5XX_RBBM_INT_0_STATUS);
644
645 if (status & A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR) { 643 if (status & A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR) {
646 u32 val = gpu_read(gpu, REG_A5XX_RBBM_AHB_ERROR_STATUS); 644 u32 val = gpu_read(gpu, REG_A5XX_RBBM_AHB_ERROR_STATUS);
647 645
@@ -653,6 +651,10 @@ static void a5xx_rbbm_err_irq(struct msm_gpu *gpu)
653 651
654 /* Clear the error */ 652 /* Clear the error */
655 gpu_write(gpu, REG_A5XX_RBBM_AHB_CMD, (1 << 4)); 653 gpu_write(gpu, REG_A5XX_RBBM_AHB_CMD, (1 << 4));
654
655 /* Clear the interrupt */
656 gpu_write(gpu, REG_A5XX_RBBM_INT_CLEAR_CMD,
657 A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR);
656 } 658 }
657 659
658 if (status & A5XX_RBBM_INT_0_MASK_RBBM_TRANSFER_TIMEOUT) 660 if (status & A5XX_RBBM_INT_0_MASK_RBBM_TRANSFER_TIMEOUT)
@@ -704,10 +706,16 @@ static irqreturn_t a5xx_irq(struct msm_gpu *gpu)
704{ 706{
705 u32 status = gpu_read(gpu, REG_A5XX_RBBM_INT_0_STATUS); 707 u32 status = gpu_read(gpu, REG_A5XX_RBBM_INT_0_STATUS);
706 708
707 gpu_write(gpu, REG_A5XX_RBBM_INT_CLEAR_CMD, status); 709 /*
710 * Clear all the interrupts except RBBM_AHB_ERROR - if we clear it
711 * before the source is cleared the interrupt will storm.
712 */
713 gpu_write(gpu, REG_A5XX_RBBM_INT_CLEAR_CMD,
714 status & ~A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR);
708 715
716 /* Pass status to a5xx_rbbm_err_irq because we've already cleared it */
709 if (status & RBBM_ERROR_MASK) 717 if (status & RBBM_ERROR_MASK)
710 a5xx_rbbm_err_irq(gpu); 718 a5xx_rbbm_err_irq(gpu, status);
711 719
712 if (status & A5XX_RBBM_INT_0_MASK_CP_HW_ERROR) 720 if (status & A5XX_RBBM_INT_0_MASK_CP_HW_ERROR)
713 a5xx_cp_err_irq(gpu); 721 a5xx_cp_err_irq(gpu);
@@ -837,12 +845,8 @@ static int a5xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
837#ifdef CONFIG_DEBUG_FS 845#ifdef CONFIG_DEBUG_FS
838static void a5xx_show(struct msm_gpu *gpu, struct seq_file *m) 846static void a5xx_show(struct msm_gpu *gpu, struct seq_file *m)
839{ 847{
840 gpu->funcs->pm_resume(gpu);
841
842 seq_printf(m, "status: %08x\n", 848 seq_printf(m, "status: %08x\n",
843 gpu_read(gpu, REG_A5XX_RBBM_STATUS)); 849 gpu_read(gpu, REG_A5XX_RBBM_STATUS));
844 gpu->funcs->pm_suspend(gpu);
845
846 adreno_show(gpu, m); 850 adreno_show(gpu, m);
847} 851}
848#endif 852#endif
diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
index ece39b16a864..c0fa5d1c75ff 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_device.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
@@ -2,7 +2,7 @@
2 * Copyright (C) 2013-2014 Red Hat 2 * Copyright (C) 2013-2014 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com> 3 * Author: Rob Clark <robdclark@gmail.com>
4 * 4 *
5 * Copyright (c) 2014 The Linux Foundation. All rights reserved. 5 * Copyright (c) 2014,2017 The Linux Foundation. All rights reserved.
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify it 7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as published by 8 * under the terms of the GNU General Public License version 2 as published by
@@ -17,6 +17,7 @@
17 * this program. If not, see <http://www.gnu.org/licenses/>. 17 * this program. If not, see <http://www.gnu.org/licenses/>.
18 */ 18 */
19 19
20#include <linux/pm_opp.h>
20#include "adreno_gpu.h" 21#include "adreno_gpu.h"
21 22
22#define ANY_ID 0xff 23#define ANY_ID 0xff
@@ -155,21 +156,14 @@ struct msm_gpu *adreno_load_gpu(struct drm_device *dev)
155 156
156 if (gpu) { 157 if (gpu) {
157 int ret; 158 int ret;
158 mutex_lock(&dev->struct_mutex);
159 gpu->funcs->pm_resume(gpu);
160 mutex_unlock(&dev->struct_mutex);
161 159
162 disable_irq(gpu->irq); 160 pm_runtime_get_sync(&pdev->dev);
163 161 ret = msm_gpu_hw_init(gpu);
164 ret = gpu->funcs->hw_init(gpu); 162 pm_runtime_put_sync(&pdev->dev);
165 if (ret) { 163 if (ret) {
166 dev_err(dev->dev, "gpu hw init failed: %d\n", ret); 164 dev_err(dev->dev, "gpu hw init failed: %d\n", ret);
167 gpu->funcs->destroy(gpu); 165 gpu->funcs->destroy(gpu);
168 gpu = NULL; 166 gpu = NULL;
169 } else {
170 enable_irq(gpu->irq);
171 /* give inactive pm a chance to kick in: */
172 msm_gpu_retire(gpu);
173 } 167 }
174 } 168 }
175 169
@@ -220,10 +214,71 @@ static int find_chipid(struct device *dev, u32 *chipid)
220 return 0; 214 return 0;
221} 215}
222 216
217/* Get legacy powerlevels from qcom,gpu-pwrlevels and populate the opp table */
218static int adreno_get_legacy_pwrlevels(struct device *dev)
219{
220 struct device_node *child, *node;
221 int ret;
222
223 node = of_find_compatible_node(dev->of_node, NULL,
224 "qcom,gpu-pwrlevels");
225 if (!node) {
226 dev_err(dev, "Could not find the GPU powerlevels\n");
227 return -ENXIO;
228 }
229
230 for_each_child_of_node(node, child) {
231 unsigned int val;
232
233 ret = of_property_read_u32(child, "qcom,gpu-freq", &val);
234 if (ret)
235 continue;
236
237 /*
238 * Skip the intentionally bogus clock value found at the bottom
239 * of most legacy frequency tables
240 */
241 if (val != 27000000)
242 dev_pm_opp_add(dev, val, 0);
243 }
244
245 return 0;
246}
247
248static int adreno_get_pwrlevels(struct device *dev,
249 struct adreno_platform_config *config)
250{
251 unsigned long freq = ULONG_MAX;
252 struct dev_pm_opp *opp;
253 int ret;
254
255 /* You down with OPP? */
256 if (!of_find_property(dev->of_node, "operating-points-v2", NULL))
257 ret = adreno_get_legacy_pwrlevels(dev);
258 else
259 ret = dev_pm_opp_of_add_table(dev);
260
261 if (ret)
262 return ret;
263
264 /* Find the fastest defined rate */
265 opp = dev_pm_opp_find_freq_floor(dev, &freq);
266 if (!IS_ERR(opp))
267 config->fast_rate = dev_pm_opp_get_freq(opp);
268
269 if (!config->fast_rate) {
270 DRM_DEV_INFO(dev,
271 "Could not find clock rate. Using default\n");
272 /* Pick a suitably safe clock speed for any target */
273 config->fast_rate = 200000000;
274 }
275
276 return 0;
277}
278
223static int adreno_bind(struct device *dev, struct device *master, void *data) 279static int adreno_bind(struct device *dev, struct device *master, void *data)
224{ 280{
225 static struct adreno_platform_config config = {}; 281 static struct adreno_platform_config config = {};
226 struct device_node *child, *node = dev->of_node;
227 u32 val; 282 u32 val;
228 int ret; 283 int ret;
229 284
@@ -238,28 +293,10 @@ static int adreno_bind(struct device *dev, struct device *master, void *data)
238 293
239 /* find clock rates: */ 294 /* find clock rates: */
240 config.fast_rate = 0; 295 config.fast_rate = 0;
241 config.slow_rate = ~0;
242 for_each_child_of_node(node, child) {
243 if (of_device_is_compatible(child, "qcom,gpu-pwrlevels")) {
244 struct device_node *pwrlvl;
245 for_each_child_of_node(child, pwrlvl) {
246 ret = of_property_read_u32(pwrlvl, "qcom,gpu-freq", &val);
247 if (ret) {
248 dev_err(dev, "could not find gpu-freq: %d\n", ret);
249 return ret;
250 }
251 config.fast_rate = max(config.fast_rate, val);
252 config.slow_rate = min(config.slow_rate, val);
253 }
254 }
255 }
256 296
257 if (!config.fast_rate) { 297 ret = adreno_get_pwrlevels(dev, &config);
258 dev_warn(dev, "could not find clk rates\n"); 298 if (ret)
259 /* This is a safe low speed for all devices: */ 299 return ret;
260 config.fast_rate = 200000000;
261 config.slow_rate = 27000000;
262 }
263 300
264 dev->platform_data = &config; 301 dev->platform_data = &config;
265 set_gpu_pdev(dev_get_drvdata(master), to_platform_device(dev)); 302 set_gpu_pdev(dev_get_drvdata(master), to_platform_device(dev));
@@ -296,12 +333,35 @@ static const struct of_device_id dt_match[] = {
296 {} 333 {}
297}; 334};
298 335
336#ifdef CONFIG_PM
337static int adreno_resume(struct device *dev)
338{
339 struct platform_device *pdev = to_platform_device(dev);
340 struct msm_gpu *gpu = platform_get_drvdata(pdev);
341
342 return gpu->funcs->pm_resume(gpu);
343}
344
345static int adreno_suspend(struct device *dev)
346{
347 struct platform_device *pdev = to_platform_device(dev);
348 struct msm_gpu *gpu = platform_get_drvdata(pdev);
349
350 return gpu->funcs->pm_suspend(gpu);
351}
352#endif
353
354static const struct dev_pm_ops adreno_pm_ops = {
355 SET_RUNTIME_PM_OPS(adreno_suspend, adreno_resume, NULL)
356};
357
299static struct platform_driver adreno_driver = { 358static struct platform_driver adreno_driver = {
300 .probe = adreno_probe, 359 .probe = adreno_probe,
301 .remove = adreno_remove, 360 .remove = adreno_remove,
302 .driver = { 361 .driver = {
303 .name = "adreno", 362 .name = "adreno",
304 .of_match_table = dt_match, 363 .of_match_table = dt_match,
364 .pm = &adreno_pm_ops,
305 }, 365 },
306}; 366};
307 367
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 5ae65426b4e5..5b63fc649dcc 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -35,6 +35,9 @@ int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value)
35 case MSM_PARAM_GMEM_SIZE: 35 case MSM_PARAM_GMEM_SIZE:
36 *value = adreno_gpu->gmem; 36 *value = adreno_gpu->gmem;
37 return 0; 37 return 0;
38 case MSM_PARAM_GMEM_BASE:
39 *value = 0x100000;
40 return 0;
38 case MSM_PARAM_CHIP_ID: 41 case MSM_PARAM_CHIP_ID:
39 *value = adreno_gpu->rev.patchid | 42 *value = adreno_gpu->rev.patchid |
40 (adreno_gpu->rev.minor << 8) | 43 (adreno_gpu->rev.minor << 8) |
@@ -68,6 +71,14 @@ int adreno_hw_init(struct msm_gpu *gpu)
68 return ret; 71 return ret;
69 } 72 }
70 73
74 /* reset ringbuffer: */
75 gpu->rb->cur = gpu->rb->start;
76
77 /* reset completed fence seqno: */
78 adreno_gpu->memptrs->fence = gpu->fctx->completed_fence;
79 adreno_gpu->memptrs->rptr = 0;
80 adreno_gpu->memptrs->wptr = 0;
81
71 /* Setup REG_CP_RB_CNTL: */ 82 /* Setup REG_CP_RB_CNTL: */
72 adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_CNTL, 83 adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_CNTL,
73 /* size is log2(quad-words): */ 84 /* size is log2(quad-words): */
@@ -111,29 +122,20 @@ uint32_t adreno_last_fence(struct msm_gpu *gpu)
111 122
112void adreno_recover(struct msm_gpu *gpu) 123void adreno_recover(struct msm_gpu *gpu)
113{ 124{
114 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
115 struct drm_device *dev = gpu->dev; 125 struct drm_device *dev = gpu->dev;
116 int ret; 126 int ret;
117 127
118 gpu->funcs->pm_suspend(gpu); 128 // XXX pm-runtime?? we *need* the device to be off after this
119 129 // so maybe continuing to call ->pm_suspend/resume() is better?
120 /* reset ringbuffer: */
121 gpu->rb->cur = gpu->rb->start;
122
123 /* reset completed fence seqno: */
124 adreno_gpu->memptrs->fence = gpu->fctx->completed_fence;
125 adreno_gpu->memptrs->rptr = 0;
126 adreno_gpu->memptrs->wptr = 0;
127 130
131 gpu->funcs->pm_suspend(gpu);
128 gpu->funcs->pm_resume(gpu); 132 gpu->funcs->pm_resume(gpu);
129 133
130 disable_irq(gpu->irq); 134 ret = msm_gpu_hw_init(gpu);
131 ret = gpu->funcs->hw_init(gpu);
132 if (ret) { 135 if (ret) {
133 dev_err(dev->dev, "gpu hw init failed: %d\n", ret); 136 dev_err(dev->dev, "gpu hw init failed: %d\n", ret);
134 /* hmm, oh well? */ 137 /* hmm, oh well? */
135 } 138 }
136 enable_irq(gpu->irq);
137} 139}
138 140
139void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, 141void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
@@ -259,8 +261,6 @@ void adreno_show(struct msm_gpu *gpu, struct seq_file *m)
259 seq_printf(m, "wptr: %d\n", adreno_gpu->memptrs->wptr); 261 seq_printf(m, "wptr: %d\n", adreno_gpu->memptrs->wptr);
260 seq_printf(m, "rb wptr: %d\n", get_wptr(gpu->rb)); 262 seq_printf(m, "rb wptr: %d\n", get_wptr(gpu->rb));
261 263
262 gpu->funcs->pm_resume(gpu);
263
264 /* dump these out in a form that can be parsed by demsm: */ 264 /* dump these out in a form that can be parsed by demsm: */
265 seq_printf(m, "IO:region %s 00000000 00020000\n", gpu->name); 265 seq_printf(m, "IO:region %s 00000000 00020000\n", gpu->name);
266 for (i = 0; adreno_gpu->registers[i] != ~0; i += 2) { 266 for (i = 0; adreno_gpu->registers[i] != ~0; i += 2) {
@@ -273,8 +273,6 @@ void adreno_show(struct msm_gpu *gpu, struct seq_file *m)
273 seq_printf(m, "IO:R %08x %08x\n", addr<<2, val); 273 seq_printf(m, "IO:R %08x %08x\n", addr<<2, val);
274 } 274 }
275 } 275 }
276
277 gpu->funcs->pm_suspend(gpu);
278} 276}
279#endif 277#endif
280 278
@@ -354,14 +352,13 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
354 adreno_gpu->rev = config->rev; 352 adreno_gpu->rev = config->rev;
355 353
356 gpu->fast_rate = config->fast_rate; 354 gpu->fast_rate = config->fast_rate;
357 gpu->slow_rate = config->slow_rate;
358 gpu->bus_freq = config->bus_freq; 355 gpu->bus_freq = config->bus_freq;
359#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING 356#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING
360 gpu->bus_scale_table = config->bus_scale_table; 357 gpu->bus_scale_table = config->bus_scale_table;
361#endif 358#endif
362 359
363 DBG("fast_rate=%u, slow_rate=%u, bus_freq=%u", 360 DBG("fast_rate=%u, slow_rate=27000000, bus_freq=%u",
364 gpu->fast_rate, gpu->slow_rate, gpu->bus_freq); 361 gpu->fast_rate, gpu->bus_freq);
365 362
366 ret = msm_gpu_init(drm, pdev, &adreno_gpu->base, &funcs->base, 363 ret = msm_gpu_init(drm, pdev, &adreno_gpu->base, &funcs->base,
367 adreno_gpu->info->name, "kgsl_3d0_reg_memory", "kgsl_3d0_irq", 364 adreno_gpu->info->name, "kgsl_3d0_reg_memory", "kgsl_3d0_irq",
@@ -369,6 +366,10 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
369 if (ret) 366 if (ret)
370 return ret; 367 return ret;
371 368
369 pm_runtime_set_autosuspend_delay(&pdev->dev, DRM_MSM_INACTIVE_PERIOD);
370 pm_runtime_use_autosuspend(&pdev->dev);
371 pm_runtime_enable(&pdev->dev);
372
372 ret = request_firmware(&adreno_gpu->pm4, adreno_gpu->info->pm4fw, drm->dev); 373 ret = request_firmware(&adreno_gpu->pm4, adreno_gpu->info->pm4fw, drm->dev);
373 if (ret) { 374 if (ret) {
374 dev_err(drm->dev, "failed to load %s PM4 firmware: %d\n", 375 dev_err(drm->dev, "failed to load %s PM4 firmware: %d\n",
@@ -439,6 +440,6 @@ void adreno_gpu_cleanup(struct adreno_gpu *adreno_gpu)
439 if (gpu->aspace) { 440 if (gpu->aspace) {
440 gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu, 441 gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu,
441 iommu_ports, ARRAY_SIZE(iommu_ports)); 442 iommu_ports, ARRAY_SIZE(iommu_ports));
442 msm_gem_address_space_destroy(gpu->aspace); 443 msm_gem_address_space_put(gpu->aspace);
443 } 444 }
444} 445}
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
index 42e444a67630..fb4831f9f80b 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
@@ -123,7 +123,7 @@ struct adreno_gpu {
123/* platform config data (ie. from DT, or pdata) */ 123/* platform config data (ie. from DT, or pdata) */
124struct adreno_platform_config { 124struct adreno_platform_config {
125 struct adreno_rev rev; 125 struct adreno_rev rev;
126 uint32_t fast_rate, slow_rate, bus_freq; 126 uint32_t fast_rate, bus_freq;
127#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING 127#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING
128 struct msm_bus_scale_pdata *bus_scale_table; 128 struct msm_bus_scale_pdata *bus_scale_table;
129#endif 129#endif
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
index 1c29618f4ddb..f29194a74a19 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
@@ -114,15 +114,9 @@ static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
114 spin_lock_irqsave(&dev->event_lock, flags); 114 spin_lock_irqsave(&dev->event_lock, flags);
115 event = mdp4_crtc->event; 115 event = mdp4_crtc->event;
116 if (event) { 116 if (event) {
117 /* if regular vblank case (!file) or if cancel-flip from 117 mdp4_crtc->event = NULL;
118 * preclose on file that requested flip, then send the 118 DBG("%s: send event: %p", mdp4_crtc->name, event);
119 * event: 119 drm_crtc_send_vblank_event(crtc, event);
120 */
121 if (!file || (event->base.file_priv == file)) {
122 mdp4_crtc->event = NULL;
123 DBG("%s: send event: %p", mdp4_crtc->name, event);
124 drm_crtc_send_vblank_event(crtc, event);
125 }
126 } 120 }
127 spin_unlock_irqrestore(&dev->event_lock, flags); 121 spin_unlock_irqrestore(&dev->event_lock, flags);
128} 122}
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
index 5b6516bb9d06..3d26d7774c08 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
@@ -169,7 +169,7 @@ static void mdp4_destroy(struct msm_kms *kms)
169 if (aspace) { 169 if (aspace) {
170 aspace->mmu->funcs->detach(aspace->mmu, 170 aspace->mmu->funcs->detach(aspace->mmu,
171 iommu_ports, ARRAY_SIZE(iommu_ports)); 171 iommu_ports, ARRAY_SIZE(iommu_ports));
172 msm_gem_address_space_destroy(aspace); 172 msm_gem_address_space_put(aspace);
173 } 173 }
174 174
175 if (mdp4_kms->rpm_enabled) 175 if (mdp4_kms->rpm_enabled)
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
index ba2d017f6591..c2bdad88447e 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
@@ -70,6 +70,18 @@ const struct mdp5_cfg_hw msm8x74v1_config = {
70 .lm = { 70 .lm = {
71 .count = 5, 71 .count = 5,
72 .base = { 0x03100, 0x03500, 0x03900, 0x03d00, 0x04100 }, 72 .base = { 0x03100, 0x03500, 0x03900, 0x03d00, 0x04100 },
73 .instances = {
74 { .id = 0, .pp = 0, .dspp = 0,
75 .caps = MDP_LM_CAP_DISPLAY, },
76 { .id = 1, .pp = 1, .dspp = 1,
77 .caps = MDP_LM_CAP_DISPLAY, },
78 { .id = 2, .pp = 2, .dspp = 2,
79 .caps = MDP_LM_CAP_DISPLAY, },
80 { .id = 3, .pp = -1, .dspp = -1,
81 .caps = MDP_LM_CAP_WB },
82 { .id = 4, .pp = -1, .dspp = -1,
83 .caps = MDP_LM_CAP_WB },
84 },
73 .nb_stages = 5, 85 .nb_stages = 5,
74 }, 86 },
75 .dspp = { 87 .dspp = {
@@ -134,6 +146,18 @@ const struct mdp5_cfg_hw msm8x74v2_config = {
134 .lm = { 146 .lm = {
135 .count = 5, 147 .count = 5,
136 .base = { 0x03100, 0x03500, 0x03900, 0x03d00, 0x04100 }, 148 .base = { 0x03100, 0x03500, 0x03900, 0x03d00, 0x04100 },
149 .instances = {
150 { .id = 0, .pp = 0, .dspp = 0,
151 .caps = MDP_LM_CAP_DISPLAY, },
152 { .id = 1, .pp = 1, .dspp = 1,
153 .caps = MDP_LM_CAP_DISPLAY, },
154 { .id = 2, .pp = 2, .dspp = 2,
155 .caps = MDP_LM_CAP_DISPLAY, },
156 { .id = 3, .pp = -1, .dspp = -1,
157 .caps = MDP_LM_CAP_WB, },
158 { .id = 4, .pp = -1, .dspp = -1,
159 .caps = MDP_LM_CAP_WB, },
160 },
137 .nb_stages = 5, 161 .nb_stages = 5,
138 .max_width = 2048, 162 .max_width = 2048,
139 .max_height = 0xFFFF, 163 .max_height = 0xFFFF,
@@ -167,6 +191,7 @@ const struct mdp5_cfg_hw apq8084_config = {
167 .mdp = { 191 .mdp = {
168 .count = 1, 192 .count = 1,
169 .caps = MDP_CAP_SMP | 193 .caps = MDP_CAP_SMP |
194 MDP_CAP_SRC_SPLIT |
170 0, 195 0,
171 }, 196 },
172 .smp = { 197 .smp = {
@@ -211,6 +236,22 @@ const struct mdp5_cfg_hw apq8084_config = {
211 .lm = { 236 .lm = {
212 .count = 6, 237 .count = 6,
213 .base = { 0x03900, 0x03d00, 0x04100, 0x04500, 0x04900, 0x04d00 }, 238 .base = { 0x03900, 0x03d00, 0x04100, 0x04500, 0x04900, 0x04d00 },
239 .instances = {
240 { .id = 0, .pp = 0, .dspp = 0,
241 .caps = MDP_LM_CAP_DISPLAY |
242 MDP_LM_CAP_PAIR, },
243 { .id = 1, .pp = 1, .dspp = 1,
244 .caps = MDP_LM_CAP_DISPLAY, },
245 { .id = 2, .pp = 2, .dspp = 2,
246 .caps = MDP_LM_CAP_DISPLAY |
247 MDP_LM_CAP_PAIR, },
248 { .id = 3, .pp = -1, .dspp = -1,
249 .caps = MDP_LM_CAP_WB, },
250 { .id = 4, .pp = -1, .dspp = -1,
251 .caps = MDP_LM_CAP_WB, },
252 { .id = 5, .pp = 3, .dspp = 3,
253 .caps = MDP_LM_CAP_DISPLAY, },
254 },
214 .nb_stages = 5, 255 .nb_stages = 5,
215 .max_width = 2048, 256 .max_width = 2048,
216 .max_height = 0xFFFF, 257 .max_height = 0xFFFF,
@@ -282,6 +323,12 @@ const struct mdp5_cfg_hw msm8x16_config = {
282 .lm = { 323 .lm = {
283 .count = 2, /* LM0 and LM3 */ 324 .count = 2, /* LM0 and LM3 */
284 .base = { 0x44000, 0x47000 }, 325 .base = { 0x44000, 0x47000 },
326 .instances = {
327 { .id = 0, .pp = 0, .dspp = 0,
328 .caps = MDP_LM_CAP_DISPLAY, },
329 { .id = 3, .pp = -1, .dspp = -1,
330 .caps = MDP_LM_CAP_WB },
331 },
285 .nb_stages = 8, 332 .nb_stages = 8,
286 .max_width = 2048, 333 .max_width = 2048,
287 .max_height = 0xFFFF, 334 .max_height = 0xFFFF,
@@ -306,6 +353,7 @@ const struct mdp5_cfg_hw msm8x94_config = {
306 .mdp = { 353 .mdp = {
307 .count = 1, 354 .count = 1,
308 .caps = MDP_CAP_SMP | 355 .caps = MDP_CAP_SMP |
356 MDP_CAP_SRC_SPLIT |
309 0, 357 0,
310 }, 358 },
311 .smp = { 359 .smp = {
@@ -350,6 +398,22 @@ const struct mdp5_cfg_hw msm8x94_config = {
350 .lm = { 398 .lm = {
351 .count = 6, 399 .count = 6,
352 .base = { 0x44000, 0x45000, 0x46000, 0x47000, 0x48000, 0x49000 }, 400 .base = { 0x44000, 0x45000, 0x46000, 0x47000, 0x48000, 0x49000 },
401 .instances = {
402 { .id = 0, .pp = 0, .dspp = 0,
403 .caps = MDP_LM_CAP_DISPLAY |
404 MDP_LM_CAP_PAIR, },
405 { .id = 1, .pp = 1, .dspp = 1,
406 .caps = MDP_LM_CAP_DISPLAY, },
407 { .id = 2, .pp = 2, .dspp = 2,
408 .caps = MDP_LM_CAP_DISPLAY |
409 MDP_LM_CAP_PAIR, },
410 { .id = 3, .pp = -1, .dspp = -1,
411 .caps = MDP_LM_CAP_WB, },
412 { .id = 4, .pp = -1, .dspp = -1,
413 .caps = MDP_LM_CAP_WB, },
414 { .id = 5, .pp = 3, .dspp = 3,
415 .caps = MDP_LM_CAP_DISPLAY, },
416 },
353 .nb_stages = 8, 417 .nb_stages = 8,
354 .max_width = 2048, 418 .max_width = 2048,
355 .max_height = 0xFFFF, 419 .max_height = 0xFFFF,
@@ -385,6 +449,7 @@ const struct mdp5_cfg_hw msm8x96_config = {
385 .count = 1, 449 .count = 1,
386 .caps = MDP_CAP_DSC | 450 .caps = MDP_CAP_DSC |
387 MDP_CAP_CDM | 451 MDP_CAP_CDM |
452 MDP_CAP_SRC_SPLIT |
388 0, 453 0,
389 }, 454 },
390 .ctl = { 455 .ctl = {
@@ -434,6 +499,22 @@ const struct mdp5_cfg_hw msm8x96_config = {
434 .lm = { 499 .lm = {
435 .count = 6, 500 .count = 6,
436 .base = { 0x44000, 0x45000, 0x46000, 0x47000, 0x48000, 0x49000 }, 501 .base = { 0x44000, 0x45000, 0x46000, 0x47000, 0x48000, 0x49000 },
502 .instances = {
503 { .id = 0, .pp = 0, .dspp = 0,
504 .caps = MDP_LM_CAP_DISPLAY |
505 MDP_LM_CAP_PAIR, },
506 { .id = 1, .pp = 1, .dspp = 1,
507 .caps = MDP_LM_CAP_DISPLAY, },
508 { .id = 2, .pp = 2, .dspp = -1,
509 .caps = MDP_LM_CAP_DISPLAY |
510 MDP_LM_CAP_PAIR, },
511 { .id = 3, .pp = -1, .dspp = -1,
512 .caps = MDP_LM_CAP_WB, },
513 { .id = 4, .pp = -1, .dspp = -1,
514 .caps = MDP_LM_CAP_WB, },
515 { .id = 5, .pp = 3, .dspp = -1,
516 .caps = MDP_LM_CAP_DISPLAY, },
517 },
437 .nb_stages = 8, 518 .nb_stages = 8,
438 .max_width = 2560, 519 .max_width = 2560,
439 .max_height = 0xFFFF, 520 .max_height = 0xFFFF,
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h
index b1c7daaede86..75910d0f2f4c 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h
@@ -39,8 +39,16 @@ struct mdp5_sub_block {
39 MDP5_SUB_BLOCK_DEFINITION; 39 MDP5_SUB_BLOCK_DEFINITION;
40}; 40};
41 41
42struct mdp5_lm_instance {
43 int id;
44 int pp;
45 int dspp;
46 uint32_t caps;
47};
48
42struct mdp5_lm_block { 49struct mdp5_lm_block {
43 MDP5_SUB_BLOCK_DEFINITION; 50 MDP5_SUB_BLOCK_DEFINITION;
51 struct mdp5_lm_instance instances[MAX_BASES];
44 uint32_t nb_stages; /* number of stages per blender */ 52 uint32_t nb_stages; /* number of stages per blender */
45 uint32_t max_width; /* Maximum output resolution */ 53 uint32_t max_width; /* Maximum output resolution */
46 uint32_t max_height; 54 uint32_t max_height;
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c
index df1c8adec3f3..8dafc7bdba48 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c
@@ -51,7 +51,8 @@ static int pingpong_tearcheck_setup(struct drm_encoder *encoder,
51 struct device *dev = encoder->dev->dev; 51 struct device *dev = encoder->dev->dev;
52 u32 total_lines_x100, vclks_line, cfg; 52 u32 total_lines_x100, vclks_line, cfg;
53 long vsync_clk_speed; 53 long vsync_clk_speed;
54 int pp_id = GET_PING_PONG_ID(mdp5_crtc_get_lm(encoder->crtc)); 54 struct mdp5_hw_mixer *mixer = mdp5_crtc_get_mixer(encoder->crtc);
55 int pp_id = mixer->pp;
55 56
56 if (IS_ERR_OR_NULL(mdp5_kms->vsync_clk)) { 57 if (IS_ERR_OR_NULL(mdp5_kms->vsync_clk)) {
57 dev_err(dev, "vsync_clk is not initialized\n"); 58 dev_err(dev, "vsync_clk is not initialized\n");
@@ -94,7 +95,8 @@ static int pingpong_tearcheck_setup(struct drm_encoder *encoder,
94static int pingpong_tearcheck_enable(struct drm_encoder *encoder) 95static int pingpong_tearcheck_enable(struct drm_encoder *encoder)
95{ 96{
96 struct mdp5_kms *mdp5_kms = get_kms(encoder); 97 struct mdp5_kms *mdp5_kms = get_kms(encoder);
97 int pp_id = GET_PING_PONG_ID(mdp5_crtc_get_lm(encoder->crtc)); 98 struct mdp5_hw_mixer *mixer = mdp5_crtc_get_mixer(encoder->crtc);
99 int pp_id = mixer->pp;
98 int ret; 100 int ret;
99 101
100 ret = clk_set_rate(mdp5_kms->vsync_clk, 102 ret = clk_set_rate(mdp5_kms->vsync_clk,
@@ -119,7 +121,8 @@ static int pingpong_tearcheck_enable(struct drm_encoder *encoder)
119static void pingpong_tearcheck_disable(struct drm_encoder *encoder) 121static void pingpong_tearcheck_disable(struct drm_encoder *encoder)
120{ 122{
121 struct mdp5_kms *mdp5_kms = get_kms(encoder); 123 struct mdp5_kms *mdp5_kms = get_kms(encoder);
122 int pp_id = GET_PING_PONG_ID(mdp5_crtc_get_lm(encoder->crtc)); 124 struct mdp5_hw_mixer *mixer = mdp5_crtc_get_mixer(encoder->crtc);
125 int pp_id = mixer->pp;
123 126
124 mdp5_write(mdp5_kms, REG_MDP5_PP_TEAR_CHECK_EN(pp_id), 0); 127 mdp5_write(mdp5_kms, REG_MDP5_PP_TEAR_CHECK_EN(pp_id), 0);
125 clk_disable_unprepare(mdp5_kms->vsync_clk); 128 clk_disable_unprepare(mdp5_kms->vsync_clk);
@@ -129,8 +132,6 @@ void mdp5_cmd_encoder_mode_set(struct drm_encoder *encoder,
129 struct drm_display_mode *mode, 132 struct drm_display_mode *mode,
130 struct drm_display_mode *adjusted_mode) 133 struct drm_display_mode *adjusted_mode)
131{ 134{
132 struct mdp5_encoder *mdp5_cmd_enc = to_mdp5_encoder(encoder);
133
134 mode = adjusted_mode; 135 mode = adjusted_mode;
135 136
136 DBG("set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x", 137 DBG("set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
@@ -142,23 +143,23 @@ void mdp5_cmd_encoder_mode_set(struct drm_encoder *encoder,
142 mode->vsync_end, mode->vtotal, 143 mode->vsync_end, mode->vtotal,
143 mode->type, mode->flags); 144 mode->type, mode->flags);
144 pingpong_tearcheck_setup(encoder, mode); 145 pingpong_tearcheck_setup(encoder, mode);
145 mdp5_crtc_set_pipeline(encoder->crtc, &mdp5_cmd_enc->intf, 146 mdp5_crtc_set_pipeline(encoder->crtc);
146 mdp5_cmd_enc->ctl);
147} 147}
148 148
149void mdp5_cmd_encoder_disable(struct drm_encoder *encoder) 149void mdp5_cmd_encoder_disable(struct drm_encoder *encoder)
150{ 150{
151 struct mdp5_encoder *mdp5_cmd_enc = to_mdp5_encoder(encoder); 151 struct mdp5_encoder *mdp5_cmd_enc = to_mdp5_encoder(encoder);
152 struct mdp5_ctl *ctl = mdp5_cmd_enc->ctl; 152 struct mdp5_ctl *ctl = mdp5_cmd_enc->ctl;
153 struct mdp5_interface *intf = &mdp5_cmd_enc->intf; 153 struct mdp5_interface *intf = mdp5_cmd_enc->intf;
154 struct mdp5_pipeline *pipeline = mdp5_crtc_get_pipeline(encoder->crtc);
154 155
155 if (WARN_ON(!mdp5_cmd_enc->enabled)) 156 if (WARN_ON(!mdp5_cmd_enc->enabled))
156 return; 157 return;
157 158
158 pingpong_tearcheck_disable(encoder); 159 pingpong_tearcheck_disable(encoder);
159 160
160 mdp5_ctl_set_encoder_state(ctl, false); 161 mdp5_ctl_set_encoder_state(ctl, pipeline, false);
161 mdp5_ctl_commit(ctl, mdp_ctl_flush_mask_encoder(intf)); 162 mdp5_ctl_commit(ctl, pipeline, mdp_ctl_flush_mask_encoder(intf));
162 163
163 bs_set(mdp5_cmd_enc, 0); 164 bs_set(mdp5_cmd_enc, 0);
164 165
@@ -169,7 +170,8 @@ void mdp5_cmd_encoder_enable(struct drm_encoder *encoder)
169{ 170{
170 struct mdp5_encoder *mdp5_cmd_enc = to_mdp5_encoder(encoder); 171 struct mdp5_encoder *mdp5_cmd_enc = to_mdp5_encoder(encoder);
171 struct mdp5_ctl *ctl = mdp5_cmd_enc->ctl; 172 struct mdp5_ctl *ctl = mdp5_cmd_enc->ctl;
172 struct mdp5_interface *intf = &mdp5_cmd_enc->intf; 173 struct mdp5_interface *intf = mdp5_cmd_enc->intf;
174 struct mdp5_pipeline *pipeline = mdp5_crtc_get_pipeline(encoder->crtc);
173 175
174 if (WARN_ON(mdp5_cmd_enc->enabled)) 176 if (WARN_ON(mdp5_cmd_enc->enabled))
175 return; 177 return;
@@ -178,9 +180,9 @@ void mdp5_cmd_encoder_enable(struct drm_encoder *encoder)
178 if (pingpong_tearcheck_enable(encoder)) 180 if (pingpong_tearcheck_enable(encoder))
179 return; 181 return;
180 182
181 mdp5_ctl_commit(ctl, mdp_ctl_flush_mask_encoder(intf)); 183 mdp5_ctl_commit(ctl, pipeline, mdp_ctl_flush_mask_encoder(intf));
182 184
183 mdp5_ctl_set_encoder_state(ctl, true); 185 mdp5_ctl_set_encoder_state(ctl, pipeline, true);
184 186
185 mdp5_cmd_enc->enabled = true; 187 mdp5_cmd_enc->enabled = true;
186} 188}
@@ -197,7 +199,7 @@ int mdp5_cmd_encoder_set_split_display(struct drm_encoder *encoder,
197 return -EINVAL; 199 return -EINVAL;
198 200
199 mdp5_kms = get_kms(encoder); 201 mdp5_kms = get_kms(encoder);
200 intf_num = mdp5_cmd_enc->intf.num; 202 intf_num = mdp5_cmd_enc->intf->num;
201 203
202 /* Switch slave encoder's trigger MUX, to use the master's 204 /* Switch slave encoder's trigger MUX, to use the master's
203 * start signal for the slave encoder 205 * start signal for the slave encoder
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
index d0c8b38b96ce..9217e0d6e93e 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
@@ -32,13 +32,7 @@ struct mdp5_crtc {
32 int id; 32 int id;
33 bool enabled; 33 bool enabled;
34 34
35 /* layer mixer used for this CRTC (+ its lock): */ 35 spinlock_t lm_lock; /* protect REG_MDP5_LM_* registers */
36#define GET_LM_ID(crtc_id) ((crtc_id == 3) ? 5 : crtc_id)
37 int lm;
38 spinlock_t lm_lock; /* protect REG_MDP5_LM_* registers */
39
40 /* CTL used for this CRTC: */
41 struct mdp5_ctl *ctl;
42 36
43 /* if there is a pending flip, these will be non-null: */ 37 /* if there is a pending flip, these will be non-null: */
44 struct drm_pending_vblank_event *event; 38 struct drm_pending_vblank_event *event;
@@ -61,8 +55,6 @@ struct mdp5_crtc {
61 55
62 struct completion pp_completion; 56 struct completion pp_completion;
63 57
64 bool cmd_mode;
65
66 struct { 58 struct {
67 /* protect REG_MDP5_LM_CURSOR* registers and cursor scanout_bo*/ 59 /* protect REG_MDP5_LM_CURSOR* registers and cursor scanout_bo*/
68 spinlock_t lock; 60 spinlock_t lock;
@@ -97,10 +89,12 @@ static void request_pp_done_pending(struct drm_crtc *crtc)
97 89
98static u32 crtc_flush(struct drm_crtc *crtc, u32 flush_mask) 90static u32 crtc_flush(struct drm_crtc *crtc, u32 flush_mask)
99{ 91{
100 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); 92 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
93 struct mdp5_ctl *ctl = mdp5_cstate->ctl;
94 struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
101 95
102 DBG("%s: flush=%08x", crtc->name, flush_mask); 96 DBG("%s: flush=%08x", crtc->name, flush_mask);
103 return mdp5_ctl_commit(mdp5_crtc->ctl, flush_mask); 97 return mdp5_ctl_commit(ctl, pipeline, flush_mask);
104} 98}
105 99
106/* 100/*
@@ -110,19 +104,25 @@ static u32 crtc_flush(struct drm_crtc *crtc, u32 flush_mask)
110 */ 104 */
111static u32 crtc_flush_all(struct drm_crtc *crtc) 105static u32 crtc_flush_all(struct drm_crtc *crtc)
112{ 106{
113 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); 107 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
108 struct mdp5_hw_mixer *mixer, *r_mixer;
114 struct drm_plane *plane; 109 struct drm_plane *plane;
115 uint32_t flush_mask = 0; 110 uint32_t flush_mask = 0;
116 111
117 /* this should not happen: */ 112 /* this should not happen: */
118 if (WARN_ON(!mdp5_crtc->ctl)) 113 if (WARN_ON(!mdp5_cstate->ctl))
119 return 0; 114 return 0;
120 115
121 drm_atomic_crtc_for_each_plane(plane, crtc) { 116 drm_atomic_crtc_for_each_plane(plane, crtc) {
122 flush_mask |= mdp5_plane_get_flush(plane); 117 flush_mask |= mdp5_plane_get_flush(plane);
123 } 118 }
124 119
125 flush_mask |= mdp_ctl_flush_mask_lm(mdp5_crtc->lm); 120 mixer = mdp5_cstate->pipeline.mixer;
121 flush_mask |= mdp_ctl_flush_mask_lm(mixer->lm);
122
123 r_mixer = mdp5_cstate->pipeline.r_mixer;
124 if (r_mixer)
125 flush_mask |= mdp_ctl_flush_mask_lm(r_mixer->lm);
126 126
127 return crtc_flush(crtc, flush_mask); 127 return crtc_flush(crtc, flush_mask);
128} 128}
@@ -130,7 +130,10 @@ static u32 crtc_flush_all(struct drm_crtc *crtc)
130/* if file!=NULL, this is preclose potential cancel-flip path */ 130/* if file!=NULL, this is preclose potential cancel-flip path */
131static void complete_flip(struct drm_crtc *crtc, struct drm_file *file) 131static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
132{ 132{
133 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
134 struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
133 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); 135 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
136 struct mdp5_ctl *ctl = mdp5_cstate->ctl;
134 struct drm_device *dev = crtc->dev; 137 struct drm_device *dev = crtc->dev;
135 struct drm_pending_vblank_event *event; 138 struct drm_pending_vblank_event *event;
136 unsigned long flags; 139 unsigned long flags;
@@ -138,22 +141,17 @@ static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
138 spin_lock_irqsave(&dev->event_lock, flags); 141 spin_lock_irqsave(&dev->event_lock, flags);
139 event = mdp5_crtc->event; 142 event = mdp5_crtc->event;
140 if (event) { 143 if (event) {
141 /* if regular vblank case (!file) or if cancel-flip from 144 mdp5_crtc->event = NULL;
142 * preclose on file that requested flip, then send the 145 DBG("%s: send event: %p", crtc->name, event);
143 * event: 146 drm_crtc_send_vblank_event(crtc, event);
144 */
145 if (!file || (event->base.file_priv == file)) {
146 mdp5_crtc->event = NULL;
147 DBG("%s: send event: %p", crtc->name, event);
148 drm_crtc_send_vblank_event(crtc, event);
149 }
150 } 147 }
151 spin_unlock_irqrestore(&dev->event_lock, flags); 148 spin_unlock_irqrestore(&dev->event_lock, flags);
152 149
153 if (mdp5_crtc->ctl && !crtc->state->enable) { 150 if (ctl && !crtc->state->enable) {
154 /* set STAGE_UNUSED for all layers */ 151 /* set STAGE_UNUSED for all layers */
155 mdp5_ctl_blend(mdp5_crtc->ctl, NULL, 0, 0); 152 mdp5_ctl_blend(ctl, pipeline, NULL, NULL, 0, 0);
156 mdp5_crtc->ctl = NULL; 153 /* XXX: What to do here? */
154 /* mdp5_crtc->ctl = NULL; */
157 } 155 }
158} 156}
159 157
@@ -193,6 +191,12 @@ static inline u32 mdp5_lm_use_fg_alpha_mask(enum mdp_mixer_stage_id stage)
193} 191}
194 192
195/* 193/*
194 * left/right pipe offsets for the stage array used in blend_setup()
195 */
196#define PIPE_LEFT 0
197#define PIPE_RIGHT 1
198
199/*
196 * blend_setup() - blend all the planes of a CRTC 200 * blend_setup() - blend all the planes of a CRTC
197 * 201 *
198 * If no base layer is available, border will be enabled as the base layer. 202 * If no base layer is available, border will be enabled as the base layer.
@@ -202,18 +206,26 @@ static inline u32 mdp5_lm_use_fg_alpha_mask(enum mdp_mixer_stage_id stage)
202static void blend_setup(struct drm_crtc *crtc) 206static void blend_setup(struct drm_crtc *crtc)
203{ 207{
204 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); 208 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
209 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
210 struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
205 struct mdp5_kms *mdp5_kms = get_kms(crtc); 211 struct mdp5_kms *mdp5_kms = get_kms(crtc);
206 struct drm_plane *plane; 212 struct drm_plane *plane;
207 const struct mdp5_cfg_hw *hw_cfg; 213 const struct mdp5_cfg_hw *hw_cfg;
208 struct mdp5_plane_state *pstate, *pstates[STAGE_MAX + 1] = {NULL}; 214 struct mdp5_plane_state *pstate, *pstates[STAGE_MAX + 1] = {NULL};
209 const struct mdp_format *format; 215 const struct mdp_format *format;
210 uint32_t lm = mdp5_crtc->lm; 216 struct mdp5_hw_mixer *mixer = pipeline->mixer;
217 uint32_t lm = mixer->lm;
218 struct mdp5_hw_mixer *r_mixer = pipeline->r_mixer;
219 uint32_t r_lm = r_mixer ? r_mixer->lm : 0;
220 struct mdp5_ctl *ctl = mdp5_cstate->ctl;
211 uint32_t blend_op, fg_alpha, bg_alpha, ctl_blend_flags = 0; 221 uint32_t blend_op, fg_alpha, bg_alpha, ctl_blend_flags = 0;
212 unsigned long flags; 222 unsigned long flags;
213 enum mdp5_pipe stage[STAGE_MAX + 1] = { SSPP_NONE }; 223 enum mdp5_pipe stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { SSPP_NONE };
224 enum mdp5_pipe r_stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { SSPP_NONE };
214 int i, plane_cnt = 0; 225 int i, plane_cnt = 0;
215 bool bg_alpha_enabled = false; 226 bool bg_alpha_enabled = false;
216 u32 mixer_op_mode = 0; 227 u32 mixer_op_mode = 0;
228 u32 val;
217#define blender(stage) ((stage) - STAGE0) 229#define blender(stage) ((stage) - STAGE0)
218 230
219 hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg); 231 hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
@@ -221,14 +233,35 @@ static void blend_setup(struct drm_crtc *crtc)
221 spin_lock_irqsave(&mdp5_crtc->lm_lock, flags); 233 spin_lock_irqsave(&mdp5_crtc->lm_lock, flags);
222 234
223 /* ctl could be released already when we are shutting down: */ 235 /* ctl could be released already when we are shutting down: */
224 if (!mdp5_crtc->ctl) 236 /* XXX: Can this happen now? */
237 if (!ctl)
225 goto out; 238 goto out;
226 239
227 /* Collect all plane information */ 240 /* Collect all plane information */
228 drm_atomic_crtc_for_each_plane(plane, crtc) { 241 drm_atomic_crtc_for_each_plane(plane, crtc) {
242 enum mdp5_pipe right_pipe;
243
229 pstate = to_mdp5_plane_state(plane->state); 244 pstate = to_mdp5_plane_state(plane->state);
230 pstates[pstate->stage] = pstate; 245 pstates[pstate->stage] = pstate;
231 stage[pstate->stage] = mdp5_plane_pipe(plane); 246 stage[pstate->stage][PIPE_LEFT] = mdp5_plane_pipe(plane);
247 /*
248 * if we have a right mixer, stage the same pipe as we
249 * have on the left mixer
250 */
251 if (r_mixer)
252 r_stage[pstate->stage][PIPE_LEFT] =
253 mdp5_plane_pipe(plane);
254 /*
255 * if we have a right pipe (i.e, the plane comprises of 2
256 * hwpipes, then stage the right pipe on the right side of both
257 * the layer mixers
258 */
259 right_pipe = mdp5_plane_right_pipe(plane);
260 if (right_pipe) {
261 stage[pstate->stage][PIPE_RIGHT] = right_pipe;
262 r_stage[pstate->stage][PIPE_RIGHT] = right_pipe;
263 }
264
232 plane_cnt++; 265 plane_cnt++;
233 } 266 }
234 267
@@ -294,12 +327,27 @@ static void blend_setup(struct drm_crtc *crtc)
294 blender(i)), fg_alpha); 327 blender(i)), fg_alpha);
295 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_BG_ALPHA(lm, 328 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_BG_ALPHA(lm,
296 blender(i)), bg_alpha); 329 blender(i)), bg_alpha);
330 if (r_mixer) {
331 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_OP_MODE(r_lm,
332 blender(i)), blend_op);
333 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_FG_ALPHA(r_lm,
334 blender(i)), fg_alpha);
335 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_BG_ALPHA(r_lm,
336 blender(i)), bg_alpha);
337 }
297 } 338 }
298 339
299 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(lm), mixer_op_mode); 340 val = mdp5_read(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(lm));
300 341 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(lm),
301 mdp5_ctl_blend(mdp5_crtc->ctl, stage, plane_cnt, ctl_blend_flags); 342 val | mixer_op_mode);
343 if (r_mixer) {
344 val = mdp5_read(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(r_lm));
345 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(r_lm),
346 val | mixer_op_mode);
347 }
302 348
349 mdp5_ctl_blend(ctl, pipeline, stage, r_stage, plane_cnt,
350 ctl_blend_flags);
303out: 351out:
304 spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags); 352 spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags);
305} 353}
@@ -307,7 +355,12 @@ out:
307static void mdp5_crtc_mode_set_nofb(struct drm_crtc *crtc) 355static void mdp5_crtc_mode_set_nofb(struct drm_crtc *crtc)
308{ 356{
309 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); 357 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
358 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
310 struct mdp5_kms *mdp5_kms = get_kms(crtc); 359 struct mdp5_kms *mdp5_kms = get_kms(crtc);
360 struct mdp5_hw_mixer *mixer = mdp5_cstate->pipeline.mixer;
361 struct mdp5_hw_mixer *r_mixer = mdp5_cstate->pipeline.r_mixer;
362 uint32_t lm = mixer->lm;
363 u32 mixer_width, val;
311 unsigned long flags; 364 unsigned long flags;
312 struct drm_display_mode *mode; 365 struct drm_display_mode *mode;
313 366
@@ -325,16 +378,40 @@ static void mdp5_crtc_mode_set_nofb(struct drm_crtc *crtc)
325 mode->vsync_end, mode->vtotal, 378 mode->vsync_end, mode->vtotal,
326 mode->type, mode->flags); 379 mode->type, mode->flags);
327 380
381 mixer_width = mode->hdisplay;
382 if (r_mixer)
383 mixer_width /= 2;
384
328 spin_lock_irqsave(&mdp5_crtc->lm_lock, flags); 385 spin_lock_irqsave(&mdp5_crtc->lm_lock, flags);
329 mdp5_write(mdp5_kms, REG_MDP5_LM_OUT_SIZE(mdp5_crtc->lm), 386 mdp5_write(mdp5_kms, REG_MDP5_LM_OUT_SIZE(lm),
330 MDP5_LM_OUT_SIZE_WIDTH(mode->hdisplay) | 387 MDP5_LM_OUT_SIZE_WIDTH(mixer_width) |
331 MDP5_LM_OUT_SIZE_HEIGHT(mode->vdisplay)); 388 MDP5_LM_OUT_SIZE_HEIGHT(mode->vdisplay));
389
390 /* Assign mixer to LEFT side in source split mode */
391 val = mdp5_read(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(lm));
392 val &= ~MDP5_LM_BLEND_COLOR_OUT_SPLIT_LEFT_RIGHT;
393 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(lm), val);
394
395 if (r_mixer) {
396 u32 r_lm = r_mixer->lm;
397
398 mdp5_write(mdp5_kms, REG_MDP5_LM_OUT_SIZE(r_lm),
399 MDP5_LM_OUT_SIZE_WIDTH(mixer_width) |
400 MDP5_LM_OUT_SIZE_HEIGHT(mode->vdisplay));
401
402 /* Assign mixer to RIGHT side in source split mode */
403 val = mdp5_read(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(r_lm));
404 val |= MDP5_LM_BLEND_COLOR_OUT_SPLIT_LEFT_RIGHT;
405 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(r_lm), val);
406 }
407
332 spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags); 408 spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags);
333} 409}
334 410
335static void mdp5_crtc_disable(struct drm_crtc *crtc) 411static void mdp5_crtc_disable(struct drm_crtc *crtc)
336{ 412{
337 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); 413 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
414 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
338 struct mdp5_kms *mdp5_kms = get_kms(crtc); 415 struct mdp5_kms *mdp5_kms = get_kms(crtc);
339 416
340 DBG("%s", crtc->name); 417 DBG("%s", crtc->name);
@@ -342,7 +419,7 @@ static void mdp5_crtc_disable(struct drm_crtc *crtc)
342 if (WARN_ON(!mdp5_crtc->enabled)) 419 if (WARN_ON(!mdp5_crtc->enabled))
343 return; 420 return;
344 421
345 if (mdp5_crtc->cmd_mode) 422 if (mdp5_cstate->cmd_mode)
346 mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->pp_done); 423 mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->pp_done);
347 424
348 mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->err); 425 mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->err);
@@ -354,6 +431,7 @@ static void mdp5_crtc_disable(struct drm_crtc *crtc)
354static void mdp5_crtc_enable(struct drm_crtc *crtc) 431static void mdp5_crtc_enable(struct drm_crtc *crtc)
355{ 432{
356 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); 433 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
434 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
357 struct mdp5_kms *mdp5_kms = get_kms(crtc); 435 struct mdp5_kms *mdp5_kms = get_kms(crtc);
358 436
359 DBG("%s", crtc->name); 437 DBG("%s", crtc->name);
@@ -364,12 +442,73 @@ static void mdp5_crtc_enable(struct drm_crtc *crtc)
364 mdp5_enable(mdp5_kms); 442 mdp5_enable(mdp5_kms);
365 mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->err); 443 mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->err);
366 444
367 if (mdp5_crtc->cmd_mode) 445 if (mdp5_cstate->cmd_mode)
368 mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->pp_done); 446 mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->pp_done);
369 447
370 mdp5_crtc->enabled = true; 448 mdp5_crtc->enabled = true;
371} 449}
372 450
451int mdp5_crtc_setup_pipeline(struct drm_crtc *crtc,
452 struct drm_crtc_state *new_crtc_state,
453 bool need_right_mixer)
454{
455 struct mdp5_crtc_state *mdp5_cstate =
456 to_mdp5_crtc_state(new_crtc_state);
457 struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
458 struct mdp5_interface *intf;
459 bool new_mixer = false;
460
461 new_mixer = !pipeline->mixer;
462
463 if ((need_right_mixer && !pipeline->r_mixer) ||
464 (!need_right_mixer && pipeline->r_mixer))
465 new_mixer = true;
466
467 if (new_mixer) {
468 struct mdp5_hw_mixer *old_mixer = pipeline->mixer;
469 struct mdp5_hw_mixer *old_r_mixer = pipeline->r_mixer;
470 u32 caps;
471 int ret;
472
473 caps = MDP_LM_CAP_DISPLAY;
474 if (need_right_mixer)
475 caps |= MDP_LM_CAP_PAIR;
476
477 ret = mdp5_mixer_assign(new_crtc_state->state, crtc, caps,
478 &pipeline->mixer, need_right_mixer ?
479 &pipeline->r_mixer : NULL);
480 if (ret)
481 return ret;
482
483 mdp5_mixer_release(new_crtc_state->state, old_mixer);
484 if (old_r_mixer) {
485 mdp5_mixer_release(new_crtc_state->state, old_r_mixer);
486 if (!need_right_mixer)
487 pipeline->r_mixer = NULL;
488 }
489 }
490
491 /*
492 * these should have been already set up in the encoder's atomic
493 * check (called by drm_atomic_helper_check_modeset)
494 */
495 intf = pipeline->intf;
496
497 mdp5_cstate->err_irqmask = intf2err(intf->num);
498 mdp5_cstate->vblank_irqmask = intf2vblank(pipeline->mixer, intf);
499
500 if ((intf->type == INTF_DSI) &&
501 (intf->mode == MDP5_INTF_DSI_MODE_COMMAND)) {
502 mdp5_cstate->pp_done_irqmask = lm2ppdone(pipeline->mixer);
503 mdp5_cstate->cmd_mode = true;
504 } else {
505 mdp5_cstate->pp_done_irqmask = 0;
506 mdp5_cstate->cmd_mode = false;
507 }
508
509 return 0;
510}
511
373struct plane_state { 512struct plane_state {
374 struct drm_plane *plane; 513 struct drm_plane *plane;
375 struct mdp5_plane_state *state; 514 struct mdp5_plane_state *state;
@@ -391,6 +530,29 @@ static bool is_fullscreen(struct drm_crtc_state *cstate,
391 ((pstate->crtc_y + pstate->crtc_h) >= cstate->mode.vdisplay); 530 ((pstate->crtc_y + pstate->crtc_h) >= cstate->mode.vdisplay);
392} 531}
393 532
533enum mdp_mixer_stage_id get_start_stage(struct drm_crtc *crtc,
534 struct drm_crtc_state *new_crtc_state,
535 struct drm_plane_state *bpstate)
536{
537 struct mdp5_crtc_state *mdp5_cstate =
538 to_mdp5_crtc_state(new_crtc_state);
539
540 /*
541 * if we're in source split mode, it's mandatory to have
542 * border out on the base stage
543 */
544 if (mdp5_cstate->pipeline.r_mixer)
545 return STAGE0;
546
547 /* if the bottom-most layer is not fullscreen, we need to use
548 * it for solid-color:
549 */
550 if (!is_fullscreen(new_crtc_state, bpstate))
551 return STAGE0;
552
553 return STAGE_BASE;
554}
555
394static int mdp5_crtc_atomic_check(struct drm_crtc *crtc, 556static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
395 struct drm_crtc_state *state) 557 struct drm_crtc_state *state)
396{ 558{
@@ -400,8 +562,12 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
400 struct plane_state pstates[STAGE_MAX + 1]; 562 struct plane_state pstates[STAGE_MAX + 1];
401 const struct mdp5_cfg_hw *hw_cfg; 563 const struct mdp5_cfg_hw *hw_cfg;
402 const struct drm_plane_state *pstate; 564 const struct drm_plane_state *pstate;
565 const struct drm_display_mode *mode = &state->adjusted_mode;
403 bool cursor_plane = false; 566 bool cursor_plane = false;
404 int cnt = 0, base = 0, i; 567 bool need_right_mixer = false;
568 int cnt = 0, i;
569 int ret;
570 enum mdp_mixer_stage_id start;
405 571
406 DBG("%s: check", crtc->name); 572 DBG("%s: check", crtc->name);
407 573
@@ -409,32 +575,52 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
409 pstates[cnt].plane = plane; 575 pstates[cnt].plane = plane;
410 pstates[cnt].state = to_mdp5_plane_state(pstate); 576 pstates[cnt].state = to_mdp5_plane_state(pstate);
411 577
578 /*
579 * if any plane on this crtc uses 2 hwpipes, then we need
580 * the crtc to have a right hwmixer.
581 */
582 if (pstates[cnt].state->r_hwpipe)
583 need_right_mixer = true;
412 cnt++; 584 cnt++;
413 585
414 if (plane->type == DRM_PLANE_TYPE_CURSOR) 586 if (plane->type == DRM_PLANE_TYPE_CURSOR)
415 cursor_plane = true; 587 cursor_plane = true;
416 } 588 }
417 589
418 /* assign a stage based on sorted zpos property */ 590 /* bail out early if there aren't any planes */
419 sort(pstates, cnt, sizeof(pstates[0]), pstate_cmp, NULL); 591 if (!cnt)
592 return 0;
420 593
421 /* if the bottom-most layer is not fullscreen, we need to use 594 hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
422 * it for solid-color: 595
596 /*
597 * we need a right hwmixer if the mode's width is greater than a single
598 * LM's max width
423 */ 599 */
424 if ((cnt > 0) && !is_fullscreen(state, &pstates[0].state->base)) 600 if (mode->hdisplay > hw_cfg->lm.max_width)
425 base++; 601 need_right_mixer = true;
602
603 ret = mdp5_crtc_setup_pipeline(crtc, state, need_right_mixer);
604 if (ret) {
605 dev_err(dev->dev, "couldn't assign mixers %d\n", ret);
606 return ret;
607 }
608
609 /* assign a stage based on sorted zpos property */
610 sort(pstates, cnt, sizeof(pstates[0]), pstate_cmp, NULL);
426 611
427 /* trigger a warning if cursor isn't the highest zorder */ 612 /* trigger a warning if cursor isn't the highest zorder */
428 WARN_ON(cursor_plane && 613 WARN_ON(cursor_plane &&
429 (pstates[cnt - 1].plane->type != DRM_PLANE_TYPE_CURSOR)); 614 (pstates[cnt - 1].plane->type != DRM_PLANE_TYPE_CURSOR));
430 615
616 start = get_start_stage(crtc, state, &pstates[0].state->base);
617
431 /* verify that there are not too many planes attached to crtc 618 /* verify that there are not too many planes attached to crtc
432 * and that we don't have conflicting mixer stages: 619 * and that we don't have conflicting mixer stages:
433 */ 620 */
434 hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg); 621 if ((cnt + start - 1) >= hw_cfg->lm.nb_stages) {
435 622 dev_err(dev->dev, "too many planes! cnt=%d, start stage=%d\n",
436 if ((cnt + base) >= hw_cfg->lm.nb_stages) { 623 cnt, start);
437 dev_err(dev->dev, "too many planes! cnt=%d, base=%d\n", cnt, base);
438 return -EINVAL; 624 return -EINVAL;
439 } 625 }
440 626
@@ -442,7 +628,7 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
442 if (cursor_plane && (i == (cnt - 1))) 628 if (cursor_plane && (i == (cnt - 1)))
443 pstates[i].state->stage = hw_cfg->lm.nb_stages; 629 pstates[i].state->stage = hw_cfg->lm.nb_stages;
444 else 630 else
445 pstates[i].state->stage = STAGE_BASE + i + base; 631 pstates[i].state->stage = start + i;
446 DBG("%s: assign pipe %s on stage=%d", crtc->name, 632 DBG("%s: assign pipe %s on stage=%d", crtc->name,
447 pstates[i].plane->name, 633 pstates[i].plane->name,
448 pstates[i].state->stage); 634 pstates[i].state->stage);
@@ -461,6 +647,7 @@ static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc,
461 struct drm_crtc_state *old_crtc_state) 647 struct drm_crtc_state *old_crtc_state)
462{ 648{
463 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); 649 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
650 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
464 struct drm_device *dev = crtc->dev; 651 struct drm_device *dev = crtc->dev;
465 unsigned long flags; 652 unsigned long flags;
466 653
@@ -477,7 +664,8 @@ static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc,
477 * it means we are trying to flush a CRTC whose state is disabled: 664 * it means we are trying to flush a CRTC whose state is disabled:
478 * nothing else needs to be done. 665 * nothing else needs to be done.
479 */ 666 */
480 if (unlikely(!mdp5_crtc->ctl)) 667 /* XXX: Can this happen now ? */
668 if (unlikely(!mdp5_cstate->ctl))
481 return; 669 return;
482 670
483 blend_setup(crtc); 671 blend_setup(crtc);
@@ -488,11 +676,16 @@ static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc,
488 * This is safe because no pp_done will happen before SW trigger 676 * This is safe because no pp_done will happen before SW trigger
489 * in command mode. 677 * in command mode.
490 */ 678 */
491 if (mdp5_crtc->cmd_mode) 679 if (mdp5_cstate->cmd_mode)
492 request_pp_done_pending(crtc); 680 request_pp_done_pending(crtc);
493 681
494 mdp5_crtc->flushed_mask = crtc_flush_all(crtc); 682 mdp5_crtc->flushed_mask = crtc_flush_all(crtc);
495 683
684 /* XXX are we leaking out state here? */
685 mdp5_crtc->vblank.irqmask = mdp5_cstate->vblank_irqmask;
686 mdp5_crtc->err.irqmask = mdp5_cstate->err_irqmask;
687 mdp5_crtc->pp_done.irqmask = mdp5_cstate->pp_done_irqmask;
688
496 request_pending(crtc, PENDING_FLIP); 689 request_pending(crtc, PENDING_FLIP);
497} 690}
498 691
@@ -527,11 +720,14 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
527 uint32_t width, uint32_t height) 720 uint32_t width, uint32_t height)
528{ 721{
529 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); 722 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
723 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
724 struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
530 struct drm_device *dev = crtc->dev; 725 struct drm_device *dev = crtc->dev;
531 struct mdp5_kms *mdp5_kms = get_kms(crtc); 726 struct mdp5_kms *mdp5_kms = get_kms(crtc);
532 struct drm_gem_object *cursor_bo, *old_bo = NULL; 727 struct drm_gem_object *cursor_bo, *old_bo = NULL;
533 uint32_t blendcfg, stride; 728 uint32_t blendcfg, stride;
534 uint64_t cursor_addr; 729 uint64_t cursor_addr;
730 struct mdp5_ctl *ctl;
535 int ret, lm; 731 int ret, lm;
536 enum mdp5_cursor_alpha cur_alpha = CURSOR_ALPHA_PER_PIXEL; 732 enum mdp5_cursor_alpha cur_alpha = CURSOR_ALPHA_PER_PIXEL;
537 uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0); 733 uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0);
@@ -544,7 +740,12 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
544 return -EINVAL; 740 return -EINVAL;
545 } 741 }
546 742
547 if (NULL == mdp5_crtc->ctl) 743 ctl = mdp5_cstate->ctl;
744 if (!ctl)
745 return -EINVAL;
746
747 /* don't support LM cursors when we we have source split enabled */
748 if (mdp5_cstate->pipeline.r_mixer)
548 return -EINVAL; 749 return -EINVAL;
549 750
550 if (!handle) { 751 if (!handle) {
@@ -561,7 +762,7 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
561 if (ret) 762 if (ret)
562 return -EINVAL; 763 return -EINVAL;
563 764
564 lm = mdp5_crtc->lm; 765 lm = mdp5_cstate->pipeline.mixer->lm;
565 stride = width * drm_format_plane_cpp(DRM_FORMAT_ARGB8888, 0); 766 stride = width * drm_format_plane_cpp(DRM_FORMAT_ARGB8888, 0);
566 767
567 spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags); 768 spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
@@ -591,7 +792,7 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
591 spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags); 792 spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags);
592 793
593set_cursor: 794set_cursor:
594 ret = mdp5_ctl_set_cursor(mdp5_crtc->ctl, 0, cursor_enable); 795 ret = mdp5_ctl_set_cursor(ctl, pipeline, 0, cursor_enable);
595 if (ret) { 796 if (ret) {
596 dev_err(dev->dev, "failed to %sable cursor: %d\n", 797 dev_err(dev->dev, "failed to %sable cursor: %d\n",
597 cursor_enable ? "en" : "dis", ret); 798 cursor_enable ? "en" : "dis", ret);
@@ -613,11 +814,17 @@ static int mdp5_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
613{ 814{
614 struct mdp5_kms *mdp5_kms = get_kms(crtc); 815 struct mdp5_kms *mdp5_kms = get_kms(crtc);
615 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); 816 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
817 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
818 uint32_t lm = mdp5_cstate->pipeline.mixer->lm;
616 uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0); 819 uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0);
617 uint32_t roi_w; 820 uint32_t roi_w;
618 uint32_t roi_h; 821 uint32_t roi_h;
619 unsigned long flags; 822 unsigned long flags;
620 823
824 /* don't support LM cursors when we we have source split enabled */
825 if (mdp5_cstate->pipeline.r_mixer)
826 return -EINVAL;
827
621 /* In case the CRTC is disabled, just drop the cursor update */ 828 /* In case the CRTC is disabled, just drop the cursor update */
622 if (unlikely(!crtc->state->enable)) 829 if (unlikely(!crtc->state->enable))
623 return 0; 830 return 0;
@@ -628,10 +835,10 @@ static int mdp5_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
628 get_roi(crtc, &roi_w, &roi_h); 835 get_roi(crtc, &roi_w, &roi_h);
629 836
630 spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags); 837 spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
631 mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(mdp5_crtc->lm), 838 mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(lm),
632 MDP5_LM_CURSOR_SIZE_ROI_H(roi_h) | 839 MDP5_LM_CURSOR_SIZE_ROI_H(roi_h) |
633 MDP5_LM_CURSOR_SIZE_ROI_W(roi_w)); 840 MDP5_LM_CURSOR_SIZE_ROI_W(roi_w));
634 mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_START_XY(mdp5_crtc->lm), 841 mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_START_XY(lm),
635 MDP5_LM_CURSOR_START_XY_Y_START(y) | 842 MDP5_LM_CURSOR_START_XY_Y_START(y) |
636 MDP5_LM_CURSOR_START_XY_X_START(x)); 843 MDP5_LM_CURSOR_START_XY_X_START(x));
637 spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags); 844 spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags);
@@ -641,16 +848,80 @@ static int mdp5_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
641 return 0; 848 return 0;
642} 849}
643 850
851static void
852mdp5_crtc_atomic_print_state(struct drm_printer *p,
853 const struct drm_crtc_state *state)
854{
855 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(state);
856 struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
857 struct mdp5_kms *mdp5_kms = get_kms(state->crtc);
858
859 if (WARN_ON(!pipeline))
860 return;
861
862 drm_printf(p, "\thwmixer=%s\n", pipeline->mixer ?
863 pipeline->mixer->name : "(null)");
864
865 if (mdp5_kms->caps & MDP_CAP_SRC_SPLIT)
866 drm_printf(p, "\tright hwmixer=%s\n", pipeline->r_mixer ?
867 pipeline->r_mixer->name : "(null)");
868}
869
870static void mdp5_crtc_reset(struct drm_crtc *crtc)
871{
872 struct mdp5_crtc_state *mdp5_cstate;
873
874 if (crtc->state) {
875 __drm_atomic_helper_crtc_destroy_state(crtc->state);
876 kfree(to_mdp5_crtc_state(crtc->state));
877 }
878
879 mdp5_cstate = kzalloc(sizeof(*mdp5_cstate), GFP_KERNEL);
880
881 if (mdp5_cstate) {
882 mdp5_cstate->base.crtc = crtc;
883 crtc->state = &mdp5_cstate->base;
884 }
885}
886
887static struct drm_crtc_state *
888mdp5_crtc_duplicate_state(struct drm_crtc *crtc)
889{
890 struct mdp5_crtc_state *mdp5_cstate;
891
892 if (WARN_ON(!crtc->state))
893 return NULL;
894
895 mdp5_cstate = kmemdup(to_mdp5_crtc_state(crtc->state),
896 sizeof(*mdp5_cstate), GFP_KERNEL);
897 if (!mdp5_cstate)
898 return NULL;
899
900 __drm_atomic_helper_crtc_duplicate_state(crtc, &mdp5_cstate->base);
901
902 return &mdp5_cstate->base;
903}
904
905static void mdp5_crtc_destroy_state(struct drm_crtc *crtc, struct drm_crtc_state *state)
906{
907 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(state);
908
909 __drm_atomic_helper_crtc_destroy_state(state);
910
911 kfree(mdp5_cstate);
912}
913
644static const struct drm_crtc_funcs mdp5_crtc_funcs = { 914static const struct drm_crtc_funcs mdp5_crtc_funcs = {
645 .set_config = drm_atomic_helper_set_config, 915 .set_config = drm_atomic_helper_set_config,
646 .destroy = mdp5_crtc_destroy, 916 .destroy = mdp5_crtc_destroy,
647 .page_flip = drm_atomic_helper_page_flip, 917 .page_flip = drm_atomic_helper_page_flip,
648 .set_property = drm_atomic_helper_crtc_set_property, 918 .set_property = drm_atomic_helper_crtc_set_property,
649 .reset = drm_atomic_helper_crtc_reset, 919 .reset = mdp5_crtc_reset,
650 .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, 920 .atomic_duplicate_state = mdp5_crtc_duplicate_state,
651 .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, 921 .atomic_destroy_state = mdp5_crtc_destroy_state,
652 .cursor_set = mdp5_crtc_cursor_set, 922 .cursor_set = mdp5_crtc_cursor_set,
653 .cursor_move = mdp5_crtc_cursor_move, 923 .cursor_move = mdp5_crtc_cursor_move,
924 .atomic_print_state = mdp5_crtc_atomic_print_state,
654}; 925};
655 926
656static const struct drm_crtc_funcs mdp5_crtc_no_lm_cursor_funcs = { 927static const struct drm_crtc_funcs mdp5_crtc_no_lm_cursor_funcs = {
@@ -658,9 +929,10 @@ static const struct drm_crtc_funcs mdp5_crtc_no_lm_cursor_funcs = {
658 .destroy = mdp5_crtc_destroy, 929 .destroy = mdp5_crtc_destroy,
659 .page_flip = drm_atomic_helper_page_flip, 930 .page_flip = drm_atomic_helper_page_flip,
660 .set_property = drm_atomic_helper_crtc_set_property, 931 .set_property = drm_atomic_helper_crtc_set_property,
661 .reset = drm_atomic_helper_crtc_reset, 932 .reset = mdp5_crtc_reset,
662 .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, 933 .atomic_duplicate_state = mdp5_crtc_duplicate_state,
663 .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, 934 .atomic_destroy_state = mdp5_crtc_destroy_state,
935 .atomic_print_state = mdp5_crtc_atomic_print_state,
664}; 936};
665 937
666static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs = { 938static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs = {
@@ -710,22 +982,26 @@ static void mdp5_crtc_wait_for_pp_done(struct drm_crtc *crtc)
710{ 982{
711 struct drm_device *dev = crtc->dev; 983 struct drm_device *dev = crtc->dev;
712 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); 984 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
985 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
713 int ret; 986 int ret;
714 987
715 ret = wait_for_completion_timeout(&mdp5_crtc->pp_completion, 988 ret = wait_for_completion_timeout(&mdp5_crtc->pp_completion,
716 msecs_to_jiffies(50)); 989 msecs_to_jiffies(50));
717 if (ret == 0) 990 if (ret == 0)
718 dev_warn(dev->dev, "pp done time out, lm=%d\n", mdp5_crtc->lm); 991 dev_warn(dev->dev, "pp done time out, lm=%d\n",
992 mdp5_cstate->pipeline.mixer->lm);
719} 993}
720 994
721static void mdp5_crtc_wait_for_flush_done(struct drm_crtc *crtc) 995static void mdp5_crtc_wait_for_flush_done(struct drm_crtc *crtc)
722{ 996{
723 struct drm_device *dev = crtc->dev; 997 struct drm_device *dev = crtc->dev;
724 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); 998 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
999 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
1000 struct mdp5_ctl *ctl = mdp5_cstate->ctl;
725 int ret; 1001 int ret;
726 1002
727 /* Should not call this function if crtc is disabled. */ 1003 /* Should not call this function if crtc is disabled. */
728 if (!mdp5_crtc->ctl) 1004 if (!ctl)
729 return; 1005 return;
730 1006
731 ret = drm_crtc_vblank_get(crtc); 1007 ret = drm_crtc_vblank_get(crtc);
@@ -733,7 +1009,7 @@ static void mdp5_crtc_wait_for_flush_done(struct drm_crtc *crtc)
733 return; 1009 return;
734 1010
735 ret = wait_event_timeout(dev->vblank[drm_crtc_index(crtc)].queue, 1011 ret = wait_event_timeout(dev->vblank[drm_crtc_index(crtc)].queue,
736 ((mdp5_ctl_get_commit_status(mdp5_crtc->ctl) & 1012 ((mdp5_ctl_get_commit_status(ctl) &
737 mdp5_crtc->flushed_mask) == 0), 1013 mdp5_crtc->flushed_mask) == 0),
738 msecs_to_jiffies(50)); 1014 msecs_to_jiffies(50));
739 if (ret <= 0) 1015 if (ret <= 0)
@@ -750,52 +1026,54 @@ uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc)
750 return mdp5_crtc->vblank.irqmask; 1026 return mdp5_crtc->vblank.irqmask;
751} 1027}
752 1028
753void mdp5_crtc_set_pipeline(struct drm_crtc *crtc, 1029void mdp5_crtc_set_pipeline(struct drm_crtc *crtc)
754 struct mdp5_interface *intf, struct mdp5_ctl *ctl)
755{ 1030{
756 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); 1031 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
757 struct mdp5_kms *mdp5_kms = get_kms(crtc); 1032 struct mdp5_kms *mdp5_kms = get_kms(crtc);
758 int lm = mdp5_crtc_get_lm(crtc);
759
760 /* now that we know what irq's we want: */
761 mdp5_crtc->err.irqmask = intf2err(intf->num);
762 mdp5_crtc->vblank.irqmask = intf2vblank(lm, intf);
763
764 if ((intf->type == INTF_DSI) &&
765 (intf->mode == MDP5_INTF_DSI_MODE_COMMAND)) {
766 mdp5_crtc->pp_done.irqmask = lm2ppdone(lm);
767 mdp5_crtc->pp_done.irq = mdp5_crtc_pp_done_irq;
768 mdp5_crtc->cmd_mode = true;
769 } else {
770 mdp5_crtc->pp_done.irqmask = 0;
771 mdp5_crtc->pp_done.irq = NULL;
772 mdp5_crtc->cmd_mode = false;
773 }
774 1033
1034 /* should this be done elsewhere ? */
775 mdp_irq_update(&mdp5_kms->base); 1035 mdp_irq_update(&mdp5_kms->base);
776 1036
777 mdp5_crtc->ctl = ctl; 1037 mdp5_ctl_set_pipeline(mdp5_cstate->ctl, &mdp5_cstate->pipeline);
778 mdp5_ctl_set_pipeline(ctl, intf, lm);
779} 1038}
780 1039
781struct mdp5_ctl *mdp5_crtc_get_ctl(struct drm_crtc *crtc) 1040struct mdp5_ctl *mdp5_crtc_get_ctl(struct drm_crtc *crtc)
782{ 1041{
783 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); 1042 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
784 1043
785 return mdp5_crtc->ctl; 1044 return mdp5_cstate->ctl;
786} 1045}
787 1046
788int mdp5_crtc_get_lm(struct drm_crtc *crtc) 1047struct mdp5_hw_mixer *mdp5_crtc_get_mixer(struct drm_crtc *crtc)
789{ 1048{
790 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); 1049 struct mdp5_crtc_state *mdp5_cstate;
791 return WARN_ON(!crtc) ? -EINVAL : mdp5_crtc->lm; 1050
1051 if (WARN_ON(!crtc))
1052 return ERR_PTR(-EINVAL);
1053
1054 mdp5_cstate = to_mdp5_crtc_state(crtc->state);
1055
1056 return WARN_ON(!mdp5_cstate->pipeline.mixer) ?
1057 ERR_PTR(-EINVAL) : mdp5_cstate->pipeline.mixer;
1058}
1059
1060struct mdp5_pipeline *mdp5_crtc_get_pipeline(struct drm_crtc *crtc)
1061{
1062 struct mdp5_crtc_state *mdp5_cstate;
1063
1064 if (WARN_ON(!crtc))
1065 return ERR_PTR(-EINVAL);
1066
1067 mdp5_cstate = to_mdp5_crtc_state(crtc->state);
1068
1069 return &mdp5_cstate->pipeline;
792} 1070}
793 1071
794void mdp5_crtc_wait_for_commit_done(struct drm_crtc *crtc) 1072void mdp5_crtc_wait_for_commit_done(struct drm_crtc *crtc)
795{ 1073{
796 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); 1074 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
797 1075
798 if (mdp5_crtc->cmd_mode) 1076 if (mdp5_cstate->cmd_mode)
799 mdp5_crtc_wait_for_pp_done(crtc); 1077 mdp5_crtc_wait_for_pp_done(crtc);
800 else 1078 else
801 mdp5_crtc_wait_for_flush_done(crtc); 1079 mdp5_crtc_wait_for_flush_done(crtc);
@@ -816,7 +1094,6 @@ struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
816 crtc = &mdp5_crtc->base; 1094 crtc = &mdp5_crtc->base;
817 1095
818 mdp5_crtc->id = id; 1096 mdp5_crtc->id = id;
819 mdp5_crtc->lm = GET_LM_ID(id);
820 1097
821 spin_lock_init(&mdp5_crtc->lm_lock); 1098 spin_lock_init(&mdp5_crtc->lm_lock);
822 spin_lock_init(&mdp5_crtc->cursor.lock); 1099 spin_lock_init(&mdp5_crtc->cursor.lock);
@@ -824,6 +1101,7 @@ struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
824 1101
825 mdp5_crtc->vblank.irq = mdp5_crtc_vblank_irq; 1102 mdp5_crtc->vblank.irq = mdp5_crtc_vblank_irq;
826 mdp5_crtc->err.irq = mdp5_crtc_err_irq; 1103 mdp5_crtc->err.irq = mdp5_crtc_err_irq;
1104 mdp5_crtc->pp_done.irq = mdp5_crtc_pp_done_irq;
827 1105
828 if (cursor_plane) 1106 if (cursor_plane)
829 drm_crtc_init_with_planes(dev, crtc, plane, cursor_plane, 1107 drm_crtc_init_with_planes(dev, crtc, plane, cursor_plane,
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c
index 8b93f7e13200..439e0a300e25 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c
@@ -32,24 +32,16 @@
32#define CTL_STAT_BUSY 0x1 32#define CTL_STAT_BUSY 0x1
33#define CTL_STAT_BOOKED 0x2 33#define CTL_STAT_BOOKED 0x2
34 34
35struct op_mode {
36 struct mdp5_interface intf;
37
38 bool encoder_enabled;
39 uint32_t start_mask;
40};
41
42struct mdp5_ctl { 35struct mdp5_ctl {
43 struct mdp5_ctl_manager *ctlm; 36 struct mdp5_ctl_manager *ctlm;
44 37
45 u32 id; 38 u32 id;
46 int lm;
47 39
48 /* CTL status bitmask */ 40 /* CTL status bitmask */
49 u32 status; 41 u32 status;
50 42
51 /* Operation Mode Configuration for the Pipeline */ 43 bool encoder_enabled;
52 struct op_mode pipeline; 44 uint32_t start_mask;
53 45
54 /* REG_MDP5_CTL_*(<id>) registers access info + lock: */ 46 /* REG_MDP5_CTL_*(<id>) registers access info + lock: */
55 spinlock_t hw_lock; 47 spinlock_t hw_lock;
@@ -146,9 +138,10 @@ static void set_display_intf(struct mdp5_kms *mdp5_kms,
146 spin_unlock_irqrestore(&mdp5_kms->resource_lock, flags); 138 spin_unlock_irqrestore(&mdp5_kms->resource_lock, flags);
147} 139}
148 140
149static void set_ctl_op(struct mdp5_ctl *ctl, struct mdp5_interface *intf) 141static void set_ctl_op(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline)
150{ 142{
151 unsigned long flags; 143 unsigned long flags;
144 struct mdp5_interface *intf = pipeline->intf;
152 u32 ctl_op = 0; 145 u32 ctl_op = 0;
153 146
154 if (!mdp5_cfg_intf_is_virtual(intf->type)) 147 if (!mdp5_cfg_intf_is_virtual(intf->type))
@@ -169,52 +162,50 @@ static void set_ctl_op(struct mdp5_ctl *ctl, struct mdp5_interface *intf)
169 break; 162 break;
170 } 163 }
171 164
165 if (pipeline->r_mixer)
166 ctl_op |= MDP5_CTL_OP_PACK_3D_ENABLE |
167 MDP5_CTL_OP_PACK_3D(1);
168
172 spin_lock_irqsave(&ctl->hw_lock, flags); 169 spin_lock_irqsave(&ctl->hw_lock, flags);
173 ctl_write(ctl, REG_MDP5_CTL_OP(ctl->id), ctl_op); 170 ctl_write(ctl, REG_MDP5_CTL_OP(ctl->id), ctl_op);
174 spin_unlock_irqrestore(&ctl->hw_lock, flags); 171 spin_unlock_irqrestore(&ctl->hw_lock, flags);
175} 172}
176 173
177int mdp5_ctl_set_pipeline(struct mdp5_ctl *ctl, 174int mdp5_ctl_set_pipeline(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline)
178 struct mdp5_interface *intf, int lm)
179{ 175{
180 struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm; 176 struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
181 struct mdp5_kms *mdp5_kms = get_kms(ctl_mgr); 177 struct mdp5_kms *mdp5_kms = get_kms(ctl_mgr);
178 struct mdp5_interface *intf = pipeline->intf;
179 struct mdp5_hw_mixer *mixer = pipeline->mixer;
180 struct mdp5_hw_mixer *r_mixer = pipeline->r_mixer;
182 181
183 if (unlikely(WARN_ON(intf->num != ctl->pipeline.intf.num))) { 182 ctl->start_mask = mdp_ctl_flush_mask_lm(mixer->lm) |
184 dev_err(mdp5_kms->dev->dev, 183 mdp_ctl_flush_mask_encoder(intf);
185 "CTL %d is allocated by INTF %d, but used by INTF %d\n", 184 if (r_mixer)
186 ctl->id, ctl->pipeline.intf.num, intf->num); 185 ctl->start_mask |= mdp_ctl_flush_mask_lm(r_mixer->lm);
187 return -EINVAL;
188 }
189
190 ctl->lm = lm;
191
192 memcpy(&ctl->pipeline.intf, intf, sizeof(*intf));
193
194 ctl->pipeline.start_mask = mdp_ctl_flush_mask_lm(ctl->lm) |
195 mdp_ctl_flush_mask_encoder(intf);
196 186
197 /* Virtual interfaces need not set a display intf (e.g.: Writeback) */ 187 /* Virtual interfaces need not set a display intf (e.g.: Writeback) */
198 if (!mdp5_cfg_intf_is_virtual(intf->type)) 188 if (!mdp5_cfg_intf_is_virtual(intf->type))
199 set_display_intf(mdp5_kms, intf); 189 set_display_intf(mdp5_kms, intf);
200 190
201 set_ctl_op(ctl, intf); 191 set_ctl_op(ctl, pipeline);
202 192
203 return 0; 193 return 0;
204} 194}
205 195
206static bool start_signal_needed(struct mdp5_ctl *ctl) 196static bool start_signal_needed(struct mdp5_ctl *ctl,
197 struct mdp5_pipeline *pipeline)
207{ 198{
208 struct op_mode *pipeline = &ctl->pipeline; 199 struct mdp5_interface *intf = pipeline->intf;
209 200
210 if (!pipeline->encoder_enabled || pipeline->start_mask != 0) 201 if (!ctl->encoder_enabled || ctl->start_mask != 0)
211 return false; 202 return false;
212 203
213 switch (pipeline->intf.type) { 204 switch (intf->type) {
214 case INTF_WB: 205 case INTF_WB:
215 return true; 206 return true;
216 case INTF_DSI: 207 case INTF_DSI:
217 return pipeline->intf.mode == MDP5_INTF_DSI_MODE_COMMAND; 208 return intf->mode == MDP5_INTF_DSI_MODE_COMMAND;
218 default: 209 default:
219 return false; 210 return false;
220 } 211 }
@@ -236,19 +227,23 @@ static void send_start_signal(struct mdp5_ctl *ctl)
236 spin_unlock_irqrestore(&ctl->hw_lock, flags); 227 spin_unlock_irqrestore(&ctl->hw_lock, flags);
237} 228}
238 229
239static void refill_start_mask(struct mdp5_ctl *ctl) 230static void refill_start_mask(struct mdp5_ctl *ctl,
231 struct mdp5_pipeline *pipeline)
240{ 232{
241 struct op_mode *pipeline = &ctl->pipeline; 233 struct mdp5_interface *intf = pipeline->intf;
242 struct mdp5_interface *intf = &ctl->pipeline.intf; 234 struct mdp5_hw_mixer *mixer = pipeline->mixer;
235 struct mdp5_hw_mixer *r_mixer = pipeline->r_mixer;
243 236
244 pipeline->start_mask = mdp_ctl_flush_mask_lm(ctl->lm); 237 ctl->start_mask = mdp_ctl_flush_mask_lm(mixer->lm);
238 if (r_mixer)
239 ctl->start_mask |= mdp_ctl_flush_mask_lm(r_mixer->lm);
245 240
246 /* 241 /*
247 * Writeback encoder needs to program & flush 242 * Writeback encoder needs to program & flush
248 * address registers for each page flip.. 243 * address registers for each page flip..
249 */ 244 */
250 if (intf->type == INTF_WB) 245 if (intf->type == INTF_WB)
251 pipeline->start_mask |= mdp_ctl_flush_mask_encoder(intf); 246 ctl->start_mask |= mdp_ctl_flush_mask_encoder(intf);
252} 247}
253 248
254/** 249/**
@@ -259,17 +254,21 @@ static void refill_start_mask(struct mdp5_ctl *ctl)
259 * Note: 254 * Note:
260 * This encoder state is needed to trigger START signal (data path kickoff). 255 * This encoder state is needed to trigger START signal (data path kickoff).
261 */ 256 */
262int mdp5_ctl_set_encoder_state(struct mdp5_ctl *ctl, bool enabled) 257int mdp5_ctl_set_encoder_state(struct mdp5_ctl *ctl,
258 struct mdp5_pipeline *pipeline,
259 bool enabled)
263{ 260{
261 struct mdp5_interface *intf = pipeline->intf;
262
264 if (WARN_ON(!ctl)) 263 if (WARN_ON(!ctl))
265 return -EINVAL; 264 return -EINVAL;
266 265
267 ctl->pipeline.encoder_enabled = enabled; 266 ctl->encoder_enabled = enabled;
268 DBG("intf_%d: %s", ctl->pipeline.intf.num, enabled ? "on" : "off"); 267 DBG("intf_%d: %s", intf->num, enabled ? "on" : "off");
269 268
270 if (start_signal_needed(ctl)) { 269 if (start_signal_needed(ctl, pipeline)) {
271 send_start_signal(ctl); 270 send_start_signal(ctl);
272 refill_start_mask(ctl); 271 refill_start_mask(ctl, pipeline);
273 } 272 }
274 273
275 return 0; 274 return 0;
@@ -280,29 +279,35 @@ int mdp5_ctl_set_encoder_state(struct mdp5_ctl *ctl, bool enabled)
280 * CTL registers need to be flushed after calling this function 279 * CTL registers need to be flushed after calling this function
281 * (call mdp5_ctl_commit() with mdp_ctl_flush_mask_ctl() mask) 280 * (call mdp5_ctl_commit() with mdp_ctl_flush_mask_ctl() mask)
282 */ 281 */
283int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, int cursor_id, bool enable) 282int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline,
283 int cursor_id, bool enable)
284{ 284{
285 struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm; 285 struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
286 unsigned long flags; 286 unsigned long flags;
287 u32 blend_cfg; 287 u32 blend_cfg;
288 int lm = ctl->lm; 288 struct mdp5_hw_mixer *mixer = pipeline->mixer;
289
290 if (unlikely(WARN_ON(!mixer))) {
291 dev_err(ctl_mgr->dev->dev, "CTL %d cannot find LM",
292 ctl->id);
293 return -EINVAL;
294 }
289 295
290 if (unlikely(WARN_ON(lm < 0))) { 296 if (pipeline->r_mixer) {
291 dev_err(ctl_mgr->dev->dev, "CTL %d cannot find LM: %d", 297 dev_err(ctl_mgr->dev->dev, "unsupported configuration");
292 ctl->id, lm);
293 return -EINVAL; 298 return -EINVAL;
294 } 299 }
295 300
296 spin_lock_irqsave(&ctl->hw_lock, flags); 301 spin_lock_irqsave(&ctl->hw_lock, flags);
297 302
298 blend_cfg = ctl_read(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, lm)); 303 blend_cfg = ctl_read(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, mixer->lm));
299 304
300 if (enable) 305 if (enable)
301 blend_cfg |= MDP5_CTL_LAYER_REG_CURSOR_OUT; 306 blend_cfg |= MDP5_CTL_LAYER_REG_CURSOR_OUT;
302 else 307 else
303 blend_cfg &= ~MDP5_CTL_LAYER_REG_CURSOR_OUT; 308 blend_cfg &= ~MDP5_CTL_LAYER_REG_CURSOR_OUT;
304 309
305 ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, lm), blend_cfg); 310 ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, mixer->lm), blend_cfg);
306 ctl->cursor_on = enable; 311 ctl->cursor_on = enable;
307 312
308 spin_unlock_irqrestore(&ctl->hw_lock, flags); 313 spin_unlock_irqrestore(&ctl->hw_lock, flags);
@@ -355,37 +360,88 @@ static u32 mdp_ctl_blend_ext_mask(enum mdp5_pipe pipe,
355 } 360 }
356} 361}
357 362
358int mdp5_ctl_blend(struct mdp5_ctl *ctl, enum mdp5_pipe *stage, u32 stage_cnt, 363static void mdp5_ctl_reset_blend_regs(struct mdp5_ctl *ctl)
359 u32 ctl_blend_op_flags) 364{
365 unsigned long flags;
366 struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
367 int i;
368
369 spin_lock_irqsave(&ctl->hw_lock, flags);
370
371 for (i = 0; i < ctl_mgr->nlm; i++) {
372 ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, i), 0x0);
373 ctl_write(ctl, REG_MDP5_CTL_LAYER_EXT_REG(ctl->id, i), 0x0);
374 }
375
376 spin_unlock_irqrestore(&ctl->hw_lock, flags);
377}
378
379#define PIPE_LEFT 0
380#define PIPE_RIGHT 1
381int mdp5_ctl_blend(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline,
382 enum mdp5_pipe stage[][MAX_PIPE_STAGE],
383 enum mdp5_pipe r_stage[][MAX_PIPE_STAGE],
384 u32 stage_cnt, u32 ctl_blend_op_flags)
360{ 385{
386 struct mdp5_hw_mixer *mixer = pipeline->mixer;
387 struct mdp5_hw_mixer *r_mixer = pipeline->r_mixer;
361 unsigned long flags; 388 unsigned long flags;
362 u32 blend_cfg = 0, blend_ext_cfg = 0; 389 u32 blend_cfg = 0, blend_ext_cfg = 0;
390 u32 r_blend_cfg = 0, r_blend_ext_cfg = 0;
363 int i, start_stage; 391 int i, start_stage;
364 392
393 mdp5_ctl_reset_blend_regs(ctl);
394
365 if (ctl_blend_op_flags & MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT) { 395 if (ctl_blend_op_flags & MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT) {
366 start_stage = STAGE0; 396 start_stage = STAGE0;
367 blend_cfg |= MDP5_CTL_LAYER_REG_BORDER_COLOR; 397 blend_cfg |= MDP5_CTL_LAYER_REG_BORDER_COLOR;
398 if (r_mixer)
399 r_blend_cfg |= MDP5_CTL_LAYER_REG_BORDER_COLOR;
368 } else { 400 } else {
369 start_stage = STAGE_BASE; 401 start_stage = STAGE_BASE;
370 } 402 }
371 403
372 for (i = start_stage; stage_cnt && i <= STAGE_MAX; i++) { 404 for (i = start_stage; stage_cnt && i <= STAGE_MAX; i++) {
373 blend_cfg |= mdp_ctl_blend_mask(stage[i], i); 405 blend_cfg |=
374 blend_ext_cfg |= mdp_ctl_blend_ext_mask(stage[i], i); 406 mdp_ctl_blend_mask(stage[i][PIPE_LEFT], i) |
407 mdp_ctl_blend_mask(stage[i][PIPE_RIGHT], i);
408 blend_ext_cfg |=
409 mdp_ctl_blend_ext_mask(stage[i][PIPE_LEFT], i) |
410 mdp_ctl_blend_ext_mask(stage[i][PIPE_RIGHT], i);
411 if (r_mixer) {
412 r_blend_cfg |=
413 mdp_ctl_blend_mask(r_stage[i][PIPE_LEFT], i) |
414 mdp_ctl_blend_mask(r_stage[i][PIPE_RIGHT], i);
415 r_blend_ext_cfg |=
416 mdp_ctl_blend_ext_mask(r_stage[i][PIPE_LEFT], i) |
417 mdp_ctl_blend_ext_mask(r_stage[i][PIPE_RIGHT], i);
418 }
375 } 419 }
376 420
377 spin_lock_irqsave(&ctl->hw_lock, flags); 421 spin_lock_irqsave(&ctl->hw_lock, flags);
378 if (ctl->cursor_on) 422 if (ctl->cursor_on)
379 blend_cfg |= MDP5_CTL_LAYER_REG_CURSOR_OUT; 423 blend_cfg |= MDP5_CTL_LAYER_REG_CURSOR_OUT;
380 424
381 ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, ctl->lm), blend_cfg); 425 ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, mixer->lm), blend_cfg);
382 ctl_write(ctl, REG_MDP5_CTL_LAYER_EXT_REG(ctl->id, ctl->lm), blend_ext_cfg); 426 ctl_write(ctl, REG_MDP5_CTL_LAYER_EXT_REG(ctl->id, mixer->lm),
427 blend_ext_cfg);
428 if (r_mixer) {
429 ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, r_mixer->lm),
430 r_blend_cfg);
431 ctl_write(ctl, REG_MDP5_CTL_LAYER_EXT_REG(ctl->id, r_mixer->lm),
432 r_blend_ext_cfg);
433 }
383 spin_unlock_irqrestore(&ctl->hw_lock, flags); 434 spin_unlock_irqrestore(&ctl->hw_lock, flags);
384 435
385 ctl->pending_ctl_trigger = mdp_ctl_flush_mask_lm(ctl->lm); 436 ctl->pending_ctl_trigger = mdp_ctl_flush_mask_lm(mixer->lm);
437 if (r_mixer)
438 ctl->pending_ctl_trigger |= mdp_ctl_flush_mask_lm(r_mixer->lm);
386 439
387 DBG("lm%d: blend config = 0x%08x. ext_cfg = 0x%08x", ctl->lm, 440 DBG("lm%d: blend config = 0x%08x. ext_cfg = 0x%08x", mixer->lm,
388 blend_cfg, blend_ext_cfg); 441 blend_cfg, blend_ext_cfg);
442 if (r_mixer)
443 DBG("lm%d: blend config = 0x%08x. ext_cfg = 0x%08x",
444 r_mixer->lm, r_blend_cfg, r_blend_ext_cfg);
389 445
390 return 0; 446 return 0;
391} 447}
@@ -443,7 +499,8 @@ u32 mdp_ctl_flush_mask_lm(int lm)
443 } 499 }
444} 500}
445 501
446static u32 fix_sw_flush(struct mdp5_ctl *ctl, u32 flush_mask) 502static u32 fix_sw_flush(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline,
503 u32 flush_mask)
447{ 504{
448 struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm; 505 struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
449 u32 sw_mask = 0; 506 u32 sw_mask = 0;
@@ -452,7 +509,7 @@ static u32 fix_sw_flush(struct mdp5_ctl *ctl, u32 flush_mask)
452 509
453 /* for some targets, cursor bit is the same as LM bit */ 510 /* for some targets, cursor bit is the same as LM bit */
454 if (BIT_NEEDS_SW_FIX(MDP5_CTL_FLUSH_CURSOR_0)) 511 if (BIT_NEEDS_SW_FIX(MDP5_CTL_FLUSH_CURSOR_0))
455 sw_mask |= mdp_ctl_flush_mask_lm(ctl->lm); 512 sw_mask |= mdp_ctl_flush_mask_lm(pipeline->mixer->lm);
456 513
457 return sw_mask; 514 return sw_mask;
458} 515}
@@ -498,25 +555,26 @@ static void fix_for_single_flush(struct mdp5_ctl *ctl, u32 *flush_mask,
498 * 555 *
499 * Return H/W flushed bit mask. 556 * Return H/W flushed bit mask.
500 */ 557 */
501u32 mdp5_ctl_commit(struct mdp5_ctl *ctl, u32 flush_mask) 558u32 mdp5_ctl_commit(struct mdp5_ctl *ctl,
559 struct mdp5_pipeline *pipeline,
560 u32 flush_mask)
502{ 561{
503 struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm; 562 struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
504 struct op_mode *pipeline = &ctl->pipeline;
505 unsigned long flags; 563 unsigned long flags;
506 u32 flush_id = ctl->id; 564 u32 flush_id = ctl->id;
507 u32 curr_ctl_flush_mask; 565 u32 curr_ctl_flush_mask;
508 566
509 pipeline->start_mask &= ~flush_mask; 567 ctl->start_mask &= ~flush_mask;
510 568
511 VERB("flush_mask=%x, start_mask=%x, trigger=%x", flush_mask, 569 VERB("flush_mask=%x, start_mask=%x, trigger=%x", flush_mask,
512 pipeline->start_mask, ctl->pending_ctl_trigger); 570 ctl->start_mask, ctl->pending_ctl_trigger);
513 571
514 if (ctl->pending_ctl_trigger & flush_mask) { 572 if (ctl->pending_ctl_trigger & flush_mask) {
515 flush_mask |= MDP5_CTL_FLUSH_CTL; 573 flush_mask |= MDP5_CTL_FLUSH_CTL;
516 ctl->pending_ctl_trigger = 0; 574 ctl->pending_ctl_trigger = 0;
517 } 575 }
518 576
519 flush_mask |= fix_sw_flush(ctl, flush_mask); 577 flush_mask |= fix_sw_flush(ctl, pipeline, flush_mask);
520 578
521 flush_mask &= ctl_mgr->flush_hw_mask; 579 flush_mask &= ctl_mgr->flush_hw_mask;
522 580
@@ -530,9 +588,9 @@ u32 mdp5_ctl_commit(struct mdp5_ctl *ctl, u32 flush_mask)
530 spin_unlock_irqrestore(&ctl->hw_lock, flags); 588 spin_unlock_irqrestore(&ctl->hw_lock, flags);
531 } 589 }
532 590
533 if (start_signal_needed(ctl)) { 591 if (start_signal_needed(ctl, pipeline)) {
534 send_start_signal(ctl); 592 send_start_signal(ctl);
535 refill_start_mask(ctl); 593 refill_start_mask(ctl, pipeline);
536 } 594 }
537 595
538 return curr_ctl_flush_mask; 596 return curr_ctl_flush_mask;
@@ -619,8 +677,6 @@ struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctl_mgr,
619 677
620found: 678found:
621 ctl = &ctl_mgr->ctls[c]; 679 ctl = &ctl_mgr->ctls[c];
622 ctl->pipeline.intf.num = intf_num;
623 ctl->lm = -1;
624 ctl->status |= CTL_STAT_BUSY; 680 ctl->status |= CTL_STAT_BUSY;
625 ctl->pending_ctl_trigger = 0; 681 ctl->pending_ctl_trigger = 0;
626 DBG("CTL %d allocated", ctl->id); 682 DBG("CTL %d allocated", ctl->id);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h
index fda00d33e4db..b63120388dc6 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h
@@ -37,13 +37,17 @@ struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctlm, int intf_num);
37int mdp5_ctl_get_ctl_id(struct mdp5_ctl *ctl); 37int mdp5_ctl_get_ctl_id(struct mdp5_ctl *ctl);
38 38
39struct mdp5_interface; 39struct mdp5_interface;
40int mdp5_ctl_set_pipeline(struct mdp5_ctl *ctl, struct mdp5_interface *intf, 40struct mdp5_pipeline;
41 int lm); 41int mdp5_ctl_set_pipeline(struct mdp5_ctl *ctl, struct mdp5_pipeline *p);
42int mdp5_ctl_set_encoder_state(struct mdp5_ctl *ctl, bool enabled); 42int mdp5_ctl_set_encoder_state(struct mdp5_ctl *ctl, struct mdp5_pipeline *p,
43 bool enabled);
43 44
44int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, int cursor_id, bool enable); 45int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline,
46 int cursor_id, bool enable);
45int mdp5_ctl_pair(struct mdp5_ctl *ctlx, struct mdp5_ctl *ctly, bool enable); 47int mdp5_ctl_pair(struct mdp5_ctl *ctlx, struct mdp5_ctl *ctly, bool enable);
46 48
49#define MAX_PIPE_STAGE 2
50
47/* 51/*
48 * mdp5_ctl_blend() - Blend multiple layers on a Layer Mixer (LM) 52 * mdp5_ctl_blend() - Blend multiple layers on a Layer Mixer (LM)
49 * 53 *
@@ -56,8 +60,10 @@ int mdp5_ctl_pair(struct mdp5_ctl *ctlx, struct mdp5_ctl *ctly, bool enable);
56 * (call mdp5_ctl_commit() with mdp_ctl_flush_mask_ctl() mask) 60 * (call mdp5_ctl_commit() with mdp_ctl_flush_mask_ctl() mask)
57 */ 61 */
58#define MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT BIT(0) 62#define MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT BIT(0)
59int mdp5_ctl_blend(struct mdp5_ctl *ctl, enum mdp5_pipe *stage, u32 stage_cnt, 63int mdp5_ctl_blend(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline,
60 u32 ctl_blend_op_flags); 64 enum mdp5_pipe stage[][MAX_PIPE_STAGE],
65 enum mdp5_pipe r_stage[][MAX_PIPE_STAGE],
66 u32 stage_cnt, u32 ctl_blend_op_flags);
61 67
62/** 68/**
63 * mdp_ctl_flush_mask...() - Register FLUSH masks 69 * mdp_ctl_flush_mask...() - Register FLUSH masks
@@ -71,7 +77,8 @@ u32 mdp_ctl_flush_mask_cursor(int cursor_id);
71u32 mdp_ctl_flush_mask_encoder(struct mdp5_interface *intf); 77u32 mdp_ctl_flush_mask_encoder(struct mdp5_interface *intf);
72 78
73/* @flush_mask: see CTL flush masks definitions below */ 79/* @flush_mask: see CTL flush masks definitions below */
74u32 mdp5_ctl_commit(struct mdp5_ctl *ctl, u32 flush_mask); 80u32 mdp5_ctl_commit(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline,
81 u32 flush_mask);
75u32 mdp5_ctl_get_commit_status(struct mdp5_ctl *ctl); 82u32 mdp5_ctl_get_commit_status(struct mdp5_ctl *ctl);
76 83
77 84
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
index 80fa482ae8ed..c2ab0f033031 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
@@ -109,7 +109,7 @@ static void mdp5_vid_encoder_mode_set(struct drm_encoder *encoder,
109 struct mdp5_kms *mdp5_kms = get_kms(encoder); 109 struct mdp5_kms *mdp5_kms = get_kms(encoder);
110 struct drm_device *dev = encoder->dev; 110 struct drm_device *dev = encoder->dev;
111 struct drm_connector *connector; 111 struct drm_connector *connector;
112 int intf = mdp5_encoder->intf.num; 112 int intf = mdp5_encoder->intf->num;
113 uint32_t dtv_hsync_skew, vsync_period, vsync_len, ctrl_pol; 113 uint32_t dtv_hsync_skew, vsync_period, vsync_len, ctrl_pol;
114 uint32_t display_v_start, display_v_end; 114 uint32_t display_v_start, display_v_end;
115 uint32_t hsync_start_x, hsync_end_x; 115 uint32_t hsync_start_x, hsync_end_x;
@@ -130,7 +130,7 @@ static void mdp5_vid_encoder_mode_set(struct drm_encoder *encoder,
130 ctrl_pol = 0; 130 ctrl_pol = 0;
131 131
132 /* DSI controller cannot handle active-low sync signals. */ 132 /* DSI controller cannot handle active-low sync signals. */
133 if (mdp5_encoder->intf.type != INTF_DSI) { 133 if (mdp5_encoder->intf->type != INTF_DSI) {
134 if (mode->flags & DRM_MODE_FLAG_NHSYNC) 134 if (mode->flags & DRM_MODE_FLAG_NHSYNC)
135 ctrl_pol |= MDP5_INTF_POLARITY_CTL_HSYNC_LOW; 135 ctrl_pol |= MDP5_INTF_POLARITY_CTL_HSYNC_LOW;
136 if (mode->flags & DRM_MODE_FLAG_NVSYNC) 136 if (mode->flags & DRM_MODE_FLAG_NVSYNC)
@@ -175,7 +175,7 @@ static void mdp5_vid_encoder_mode_set(struct drm_encoder *encoder,
175 * DISPLAY_V_START = (VBP * HCYCLE) + HBP 175 * DISPLAY_V_START = (VBP * HCYCLE) + HBP
176 * DISPLAY_V_END = (VBP + VACTIVE) * HCYCLE - 1 - HFP 176 * DISPLAY_V_END = (VBP + VACTIVE) * HCYCLE - 1 - HFP
177 */ 177 */
178 if (mdp5_encoder->intf.type == INTF_eDP) { 178 if (mdp5_encoder->intf->type == INTF_eDP) {
179 display_v_start += mode->htotal - mode->hsync_start; 179 display_v_start += mode->htotal - mode->hsync_start;
180 display_v_end -= mode->hsync_start - mode->hdisplay; 180 display_v_end -= mode->hsync_start - mode->hdisplay;
181 } 181 }
@@ -206,8 +206,7 @@ static void mdp5_vid_encoder_mode_set(struct drm_encoder *encoder,
206 206
207 spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags); 207 spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags);
208 208
209 mdp5_crtc_set_pipeline(encoder->crtc, &mdp5_encoder->intf, 209 mdp5_crtc_set_pipeline(encoder->crtc);
210 mdp5_encoder->ctl);
211} 210}
212 211
213static void mdp5_vid_encoder_disable(struct drm_encoder *encoder) 212static void mdp5_vid_encoder_disable(struct drm_encoder *encoder)
@@ -215,20 +214,21 @@ static void mdp5_vid_encoder_disable(struct drm_encoder *encoder)
215 struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); 214 struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
216 struct mdp5_kms *mdp5_kms = get_kms(encoder); 215 struct mdp5_kms *mdp5_kms = get_kms(encoder);
217 struct mdp5_ctl *ctl = mdp5_encoder->ctl; 216 struct mdp5_ctl *ctl = mdp5_encoder->ctl;
218 int lm = mdp5_crtc_get_lm(encoder->crtc); 217 struct mdp5_pipeline *pipeline = mdp5_crtc_get_pipeline(encoder->crtc);
219 struct mdp5_interface *intf = &mdp5_encoder->intf; 218 struct mdp5_hw_mixer *mixer = mdp5_crtc_get_mixer(encoder->crtc);
220 int intfn = mdp5_encoder->intf.num; 219 struct mdp5_interface *intf = mdp5_encoder->intf;
220 int intfn = mdp5_encoder->intf->num;
221 unsigned long flags; 221 unsigned long flags;
222 222
223 if (WARN_ON(!mdp5_encoder->enabled)) 223 if (WARN_ON(!mdp5_encoder->enabled))
224 return; 224 return;
225 225
226 mdp5_ctl_set_encoder_state(ctl, false); 226 mdp5_ctl_set_encoder_state(ctl, pipeline, false);
227 227
228 spin_lock_irqsave(&mdp5_encoder->intf_lock, flags); 228 spin_lock_irqsave(&mdp5_encoder->intf_lock, flags);
229 mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intfn), 0); 229 mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intfn), 0);
230 spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags); 230 spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags);
231 mdp5_ctl_commit(ctl, mdp_ctl_flush_mask_encoder(intf)); 231 mdp5_ctl_commit(ctl, pipeline, mdp_ctl_flush_mask_encoder(intf));
232 232
233 /* 233 /*
234 * Wait for a vsync so we know the ENABLE=0 latched before 234 * Wait for a vsync so we know the ENABLE=0 latched before
@@ -238,7 +238,7 @@ static void mdp5_vid_encoder_disable(struct drm_encoder *encoder)
238 * the settings changes for the new modeset (like new 238 * the settings changes for the new modeset (like new
239 * scanout buffer) don't latch properly.. 239 * scanout buffer) don't latch properly..
240 */ 240 */
241 mdp_irq_wait(&mdp5_kms->base, intf2vblank(lm, intf)); 241 mdp_irq_wait(&mdp5_kms->base, intf2vblank(mixer, intf));
242 242
243 bs_set(mdp5_encoder, 0); 243 bs_set(mdp5_encoder, 0);
244 244
@@ -250,8 +250,9 @@ static void mdp5_vid_encoder_enable(struct drm_encoder *encoder)
250 struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); 250 struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
251 struct mdp5_kms *mdp5_kms = get_kms(encoder); 251 struct mdp5_kms *mdp5_kms = get_kms(encoder);
252 struct mdp5_ctl *ctl = mdp5_encoder->ctl; 252 struct mdp5_ctl *ctl = mdp5_encoder->ctl;
253 struct mdp5_interface *intf = &mdp5_encoder->intf; 253 struct mdp5_interface *intf = mdp5_encoder->intf;
254 int intfn = mdp5_encoder->intf.num; 254 struct mdp5_pipeline *pipeline = mdp5_crtc_get_pipeline(encoder->crtc);
255 int intfn = intf->num;
255 unsigned long flags; 256 unsigned long flags;
256 257
257 if (WARN_ON(mdp5_encoder->enabled)) 258 if (WARN_ON(mdp5_encoder->enabled))
@@ -261,9 +262,9 @@ static void mdp5_vid_encoder_enable(struct drm_encoder *encoder)
261 spin_lock_irqsave(&mdp5_encoder->intf_lock, flags); 262 spin_lock_irqsave(&mdp5_encoder->intf_lock, flags);
262 mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intfn), 1); 263 mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intfn), 1);
263 spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags); 264 spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags);
264 mdp5_ctl_commit(ctl, mdp_ctl_flush_mask_encoder(intf)); 265 mdp5_ctl_commit(ctl, pipeline, mdp_ctl_flush_mask_encoder(intf));
265 266
266 mdp5_ctl_set_encoder_state(ctl, true); 267 mdp5_ctl_set_encoder_state(ctl, pipeline, true);
267 268
268 mdp5_encoder->enabled = true; 269 mdp5_encoder->enabled = true;
269} 270}
@@ -273,7 +274,7 @@ static void mdp5_encoder_mode_set(struct drm_encoder *encoder,
273 struct drm_display_mode *adjusted_mode) 274 struct drm_display_mode *adjusted_mode)
274{ 275{
275 struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); 276 struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
276 struct mdp5_interface *intf = &mdp5_encoder->intf; 277 struct mdp5_interface *intf = mdp5_encoder->intf;
277 278
278 if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND) 279 if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND)
279 mdp5_cmd_encoder_mode_set(encoder, mode, adjusted_mode); 280 mdp5_cmd_encoder_mode_set(encoder, mode, adjusted_mode);
@@ -284,7 +285,7 @@ static void mdp5_encoder_mode_set(struct drm_encoder *encoder,
284static void mdp5_encoder_disable(struct drm_encoder *encoder) 285static void mdp5_encoder_disable(struct drm_encoder *encoder)
285{ 286{
286 struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); 287 struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
287 struct mdp5_interface *intf = &mdp5_encoder->intf; 288 struct mdp5_interface *intf = mdp5_encoder->intf;
288 289
289 if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND) 290 if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND)
290 mdp5_cmd_encoder_disable(encoder); 291 mdp5_cmd_encoder_disable(encoder);
@@ -295,7 +296,7 @@ static void mdp5_encoder_disable(struct drm_encoder *encoder)
295static void mdp5_encoder_enable(struct drm_encoder *encoder) 296static void mdp5_encoder_enable(struct drm_encoder *encoder)
296{ 297{
297 struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); 298 struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
298 struct mdp5_interface *intf = &mdp5_encoder->intf; 299 struct mdp5_interface *intf = mdp5_encoder->intf;
299 300
300 if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND) 301 if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND)
301 mdp5_cmd_encoder_disable(encoder); 302 mdp5_cmd_encoder_disable(encoder);
@@ -303,17 +304,33 @@ static void mdp5_encoder_enable(struct drm_encoder *encoder)
303 mdp5_vid_encoder_enable(encoder); 304 mdp5_vid_encoder_enable(encoder);
304} 305}
305 306
307static int mdp5_encoder_atomic_check(struct drm_encoder *encoder,
308 struct drm_crtc_state *crtc_state,
309 struct drm_connector_state *conn_state)
310{
311 struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
312 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc_state);
313 struct mdp5_interface *intf = mdp5_encoder->intf;
314 struct mdp5_ctl *ctl = mdp5_encoder->ctl;
315
316 mdp5_cstate->ctl = ctl;
317 mdp5_cstate->pipeline.intf = intf;
318
319 return 0;
320}
321
306static const struct drm_encoder_helper_funcs mdp5_encoder_helper_funcs = { 322static const struct drm_encoder_helper_funcs mdp5_encoder_helper_funcs = {
307 .mode_set = mdp5_encoder_mode_set, 323 .mode_set = mdp5_encoder_mode_set,
308 .disable = mdp5_encoder_disable, 324 .disable = mdp5_encoder_disable,
309 .enable = mdp5_encoder_enable, 325 .enable = mdp5_encoder_enable,
326 .atomic_check = mdp5_encoder_atomic_check,
310}; 327};
311 328
312int mdp5_encoder_get_linecount(struct drm_encoder *encoder) 329int mdp5_encoder_get_linecount(struct drm_encoder *encoder)
313{ 330{
314 struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); 331 struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
315 struct mdp5_kms *mdp5_kms = get_kms(encoder); 332 struct mdp5_kms *mdp5_kms = get_kms(encoder);
316 int intf = mdp5_encoder->intf.num; 333 int intf = mdp5_encoder->intf->num;
317 334
318 return mdp5_read(mdp5_kms, REG_MDP5_INTF_LINE_COUNT(intf)); 335 return mdp5_read(mdp5_kms, REG_MDP5_INTF_LINE_COUNT(intf));
319} 336}
@@ -322,7 +339,7 @@ u32 mdp5_encoder_get_framecount(struct drm_encoder *encoder)
322{ 339{
323 struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); 340 struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
324 struct mdp5_kms *mdp5_kms = get_kms(encoder); 341 struct mdp5_kms *mdp5_kms = get_kms(encoder);
325 int intf = mdp5_encoder->intf.num; 342 int intf = mdp5_encoder->intf->num;
326 343
327 return mdp5_read(mdp5_kms, REG_MDP5_INTF_FRAME_COUNT(intf)); 344 return mdp5_read(mdp5_kms, REG_MDP5_INTF_FRAME_COUNT(intf));
328} 345}
@@ -340,7 +357,7 @@ int mdp5_vid_encoder_set_split_display(struct drm_encoder *encoder,
340 return -EINVAL; 357 return -EINVAL;
341 358
342 mdp5_kms = get_kms(encoder); 359 mdp5_kms = get_kms(encoder);
343 intf_num = mdp5_encoder->intf.num; 360 intf_num = mdp5_encoder->intf->num;
344 361
345 /* Switch slave encoder's TimingGen Sync mode, 362 /* Switch slave encoder's TimingGen Sync mode,
346 * to use the master's enable signal for the slave encoder. 363 * to use the master's enable signal for the slave encoder.
@@ -369,7 +386,7 @@ int mdp5_vid_encoder_set_split_display(struct drm_encoder *encoder,
369void mdp5_encoder_set_intf_mode(struct drm_encoder *encoder, bool cmd_mode) 386void mdp5_encoder_set_intf_mode(struct drm_encoder *encoder, bool cmd_mode)
370{ 387{
371 struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); 388 struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
372 struct mdp5_interface *intf = &mdp5_encoder->intf; 389 struct mdp5_interface *intf = mdp5_encoder->intf;
373 390
374 /* TODO: Expand this to set writeback modes too */ 391 /* TODO: Expand this to set writeback modes too */
375 if (cmd_mode) { 392 if (cmd_mode) {
@@ -385,7 +402,8 @@ void mdp5_encoder_set_intf_mode(struct drm_encoder *encoder, bool cmd_mode)
385 402
386/* initialize encoder */ 403/* initialize encoder */
387struct drm_encoder *mdp5_encoder_init(struct drm_device *dev, 404struct drm_encoder *mdp5_encoder_init(struct drm_device *dev,
388 struct mdp5_interface *intf, struct mdp5_ctl *ctl) 405 struct mdp5_interface *intf,
406 struct mdp5_ctl *ctl)
389{ 407{
390 struct drm_encoder *encoder = NULL; 408 struct drm_encoder *encoder = NULL;
391 struct mdp5_encoder *mdp5_encoder; 409 struct mdp5_encoder *mdp5_encoder;
@@ -399,9 +417,9 @@ struct drm_encoder *mdp5_encoder_init(struct drm_device *dev,
399 goto fail; 417 goto fail;
400 } 418 }
401 419
402 memcpy(&mdp5_encoder->intf, intf, sizeof(mdp5_encoder->intf));
403 encoder = &mdp5_encoder->base; 420 encoder = &mdp5_encoder->base;
404 mdp5_encoder->ctl = ctl; 421 mdp5_encoder->ctl = ctl;
422 mdp5_encoder->intf = intf;
405 423
406 spin_lock_init(&mdp5_encoder->intf_lock); 424 spin_lock_init(&mdp5_encoder->intf_lock);
407 425
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
index 41ccd2a15d3c..d3d6b4cae1e6 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
@@ -93,6 +93,7 @@ struct mdp5_state *mdp5_get_state(struct drm_atomic_state *s)
93 93
94 /* Copy state: */ 94 /* Copy state: */
95 new_state->hwpipe = mdp5_kms->state->hwpipe; 95 new_state->hwpipe = mdp5_kms->state->hwpipe;
96 new_state->hwmixer = mdp5_kms->state->hwmixer;
96 if (mdp5_kms->smp) 97 if (mdp5_kms->smp)
97 new_state->smp = mdp5_kms->state->smp; 98 new_state->smp = mdp5_kms->state->smp;
98 99
@@ -165,13 +166,16 @@ static void mdp5_kms_destroy(struct msm_kms *kms)
165 struct msm_gem_address_space *aspace = mdp5_kms->aspace; 166 struct msm_gem_address_space *aspace = mdp5_kms->aspace;
166 int i; 167 int i;
167 168
169 for (i = 0; i < mdp5_kms->num_hwmixers; i++)
170 mdp5_mixer_destroy(mdp5_kms->hwmixers[i]);
171
168 for (i = 0; i < mdp5_kms->num_hwpipes; i++) 172 for (i = 0; i < mdp5_kms->num_hwpipes; i++)
169 mdp5_pipe_destroy(mdp5_kms->hwpipes[i]); 173 mdp5_pipe_destroy(mdp5_kms->hwpipes[i]);
170 174
171 if (aspace) { 175 if (aspace) {
172 aspace->mmu->funcs->detach(aspace->mmu, 176 aspace->mmu->funcs->detach(aspace->mmu,
173 iommu_ports, ARRAY_SIZE(iommu_ports)); 177 iommu_ports, ARRAY_SIZE(iommu_ports));
174 msm_gem_address_space_destroy(aspace); 178 msm_gem_address_space_put(aspace);
175 } 179 }
176} 180}
177 181
@@ -268,19 +272,14 @@ int mdp5_enable(struct mdp5_kms *mdp5_kms)
268} 272}
269 273
270static struct drm_encoder *construct_encoder(struct mdp5_kms *mdp5_kms, 274static struct drm_encoder *construct_encoder(struct mdp5_kms *mdp5_kms,
271 enum mdp5_intf_type intf_type, int intf_num, 275 struct mdp5_interface *intf,
272 struct mdp5_ctl *ctl) 276 struct mdp5_ctl *ctl)
273{ 277{
274 struct drm_device *dev = mdp5_kms->dev; 278 struct drm_device *dev = mdp5_kms->dev;
275 struct msm_drm_private *priv = dev->dev_private; 279 struct msm_drm_private *priv = dev->dev_private;
276 struct drm_encoder *encoder; 280 struct drm_encoder *encoder;
277 struct mdp5_interface intf = {
278 .num = intf_num,
279 .type = intf_type,
280 .mode = MDP5_INTF_MODE_NONE,
281 };
282 281
283 encoder = mdp5_encoder_init(dev, &intf, ctl); 282 encoder = mdp5_encoder_init(dev, intf, ctl);
284 if (IS_ERR(encoder)) { 283 if (IS_ERR(encoder)) {
285 dev_err(dev->dev, "failed to construct encoder\n"); 284 dev_err(dev->dev, "failed to construct encoder\n");
286 return encoder; 285 return encoder;
@@ -309,32 +308,28 @@ static int get_dsi_id_from_intf(const struct mdp5_cfg_hw *hw_cfg, int intf_num)
309 return -EINVAL; 308 return -EINVAL;
310} 309}
311 310
312static int modeset_init_intf(struct mdp5_kms *mdp5_kms, int intf_num) 311static int modeset_init_intf(struct mdp5_kms *mdp5_kms,
312 struct mdp5_interface *intf)
313{ 313{
314 struct drm_device *dev = mdp5_kms->dev; 314 struct drm_device *dev = mdp5_kms->dev;
315 struct msm_drm_private *priv = dev->dev_private; 315 struct msm_drm_private *priv = dev->dev_private;
316 const struct mdp5_cfg_hw *hw_cfg =
317 mdp5_cfg_get_hw_config(mdp5_kms->cfg);
318 enum mdp5_intf_type intf_type = hw_cfg->intf.connect[intf_num];
319 struct mdp5_ctl_manager *ctlm = mdp5_kms->ctlm; 316 struct mdp5_ctl_manager *ctlm = mdp5_kms->ctlm;
320 struct mdp5_ctl *ctl; 317 struct mdp5_ctl *ctl;
321 struct drm_encoder *encoder; 318 struct drm_encoder *encoder;
322 int ret = 0; 319 int ret = 0;
323 320
324 switch (intf_type) { 321 switch (intf->type) {
325 case INTF_DISABLED:
326 break;
327 case INTF_eDP: 322 case INTF_eDP:
328 if (!priv->edp) 323 if (!priv->edp)
329 break; 324 break;
330 325
331 ctl = mdp5_ctlm_request(ctlm, intf_num); 326 ctl = mdp5_ctlm_request(ctlm, intf->num);
332 if (!ctl) { 327 if (!ctl) {
333 ret = -EINVAL; 328 ret = -EINVAL;
334 break; 329 break;
335 } 330 }
336 331
337 encoder = construct_encoder(mdp5_kms, INTF_eDP, intf_num, ctl); 332 encoder = construct_encoder(mdp5_kms, intf, ctl);
338 if (IS_ERR(encoder)) { 333 if (IS_ERR(encoder)) {
339 ret = PTR_ERR(encoder); 334 ret = PTR_ERR(encoder);
340 break; 335 break;
@@ -346,13 +341,13 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms, int intf_num)
346 if (!priv->hdmi) 341 if (!priv->hdmi)
347 break; 342 break;
348 343
349 ctl = mdp5_ctlm_request(ctlm, intf_num); 344 ctl = mdp5_ctlm_request(ctlm, intf->num);
350 if (!ctl) { 345 if (!ctl) {
351 ret = -EINVAL; 346 ret = -EINVAL;
352 break; 347 break;
353 } 348 }
354 349
355 encoder = construct_encoder(mdp5_kms, INTF_HDMI, intf_num, ctl); 350 encoder = construct_encoder(mdp5_kms, intf, ctl);
356 if (IS_ERR(encoder)) { 351 if (IS_ERR(encoder)) {
357 ret = PTR_ERR(encoder); 352 ret = PTR_ERR(encoder);
358 break; 353 break;
@@ -362,11 +357,13 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms, int intf_num)
362 break; 357 break;
363 case INTF_DSI: 358 case INTF_DSI:
364 { 359 {
365 int dsi_id = get_dsi_id_from_intf(hw_cfg, intf_num); 360 const struct mdp5_cfg_hw *hw_cfg =
361 mdp5_cfg_get_hw_config(mdp5_kms->cfg);
362 int dsi_id = get_dsi_id_from_intf(hw_cfg, intf->num);
366 363
367 if ((dsi_id >= ARRAY_SIZE(priv->dsi)) || (dsi_id < 0)) { 364 if ((dsi_id >= ARRAY_SIZE(priv->dsi)) || (dsi_id < 0)) {
368 dev_err(dev->dev, "failed to find dsi from intf %d\n", 365 dev_err(dev->dev, "failed to find dsi from intf %d\n",
369 intf_num); 366 intf->num);
370 ret = -EINVAL; 367 ret = -EINVAL;
371 break; 368 break;
372 } 369 }
@@ -374,13 +371,13 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms, int intf_num)
374 if (!priv->dsi[dsi_id]) 371 if (!priv->dsi[dsi_id])
375 break; 372 break;
376 373
377 ctl = mdp5_ctlm_request(ctlm, intf_num); 374 ctl = mdp5_ctlm_request(ctlm, intf->num);
378 if (!ctl) { 375 if (!ctl) {
379 ret = -EINVAL; 376 ret = -EINVAL;
380 break; 377 break;
381 } 378 }
382 379
383 encoder = construct_encoder(mdp5_kms, INTF_DSI, intf_num, ctl); 380 encoder = construct_encoder(mdp5_kms, intf, ctl);
384 if (IS_ERR(encoder)) { 381 if (IS_ERR(encoder)) {
385 ret = PTR_ERR(encoder); 382 ret = PTR_ERR(encoder);
386 break; 383 break;
@@ -390,7 +387,7 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms, int intf_num)
390 break; 387 break;
391 } 388 }
392 default: 389 default:
393 dev_err(dev->dev, "unknown intf: %d\n", intf_type); 390 dev_err(dev->dev, "unknown intf: %d\n", intf->type);
394 ret = -EINVAL; 391 ret = -EINVAL;
395 break; 392 break;
396 } 393 }
@@ -414,8 +411,8 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
414 * Construct encoders and modeset initialize connector devices 411 * Construct encoders and modeset initialize connector devices
415 * for each external display interface. 412 * for each external display interface.
416 */ 413 */
417 for (i = 0; i < ARRAY_SIZE(hw_cfg->intf.connect); i++) { 414 for (i = 0; i < mdp5_kms->num_intfs; i++) {
418 ret = modeset_init_intf(mdp5_kms, i); 415 ret = modeset_init_intf(mdp5_kms, mdp5_kms->intfs[i]);
419 if (ret) 416 if (ret)
420 goto fail; 417 goto fail;
421 } 418 }
@@ -425,7 +422,7 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
425 * the MDP5 interfaces) than the number of layer mixers present in HW, 422 * the MDP5 interfaces) than the number of layer mixers present in HW,
426 * but let's be safe here anyway 423 * but let's be safe here anyway
427 */ 424 */
428 num_crtcs = min(priv->num_encoders, mdp5_cfg->lm.count); 425 num_crtcs = min(priv->num_encoders, mdp5_kms->num_hwmixers);
429 426
430 /* 427 /*
431 * Construct planes equaling the number of hw pipes, and CRTCs for the 428 * Construct planes equaling the number of hw pipes, and CRTCs for the
@@ -744,6 +741,7 @@ fail:
744static void mdp5_destroy(struct platform_device *pdev) 741static void mdp5_destroy(struct platform_device *pdev)
745{ 742{
746 struct mdp5_kms *mdp5_kms = platform_get_drvdata(pdev); 743 struct mdp5_kms *mdp5_kms = platform_get_drvdata(pdev);
744 int i;
747 745
748 if (mdp5_kms->ctlm) 746 if (mdp5_kms->ctlm)
749 mdp5_ctlm_destroy(mdp5_kms->ctlm); 747 mdp5_ctlm_destroy(mdp5_kms->ctlm);
@@ -752,6 +750,9 @@ static void mdp5_destroy(struct platform_device *pdev)
752 if (mdp5_kms->cfg) 750 if (mdp5_kms->cfg)
753 mdp5_cfg_destroy(mdp5_kms->cfg); 751 mdp5_cfg_destroy(mdp5_kms->cfg);
754 752
753 for (i = 0; i < mdp5_kms->num_intfs; i++)
754 kfree(mdp5_kms->intfs[i]);
755
755 if (mdp5_kms->rpm_enabled) 756 if (mdp5_kms->rpm_enabled)
756 pm_runtime_disable(&pdev->dev); 757 pm_runtime_disable(&pdev->dev);
757 758
@@ -829,6 +830,64 @@ static int hwpipe_init(struct mdp5_kms *mdp5_kms)
829 return 0; 830 return 0;
830} 831}
831 832
833static int hwmixer_init(struct mdp5_kms *mdp5_kms)
834{
835 struct drm_device *dev = mdp5_kms->dev;
836 const struct mdp5_cfg_hw *hw_cfg;
837 int i, ret;
838
839 hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
840
841 for (i = 0; i < hw_cfg->lm.count; i++) {
842 struct mdp5_hw_mixer *mixer;
843
844 mixer = mdp5_mixer_init(&hw_cfg->lm.instances[i]);
845 if (IS_ERR(mixer)) {
846 ret = PTR_ERR(mixer);
847 dev_err(dev->dev, "failed to construct LM%d (%d)\n",
848 i, ret);
849 return ret;
850 }
851
852 mixer->idx = mdp5_kms->num_hwmixers;
853 mdp5_kms->hwmixers[mdp5_kms->num_hwmixers++] = mixer;
854 }
855
856 return 0;
857}
858
859static int interface_init(struct mdp5_kms *mdp5_kms)
860{
861 struct drm_device *dev = mdp5_kms->dev;
862 const struct mdp5_cfg_hw *hw_cfg;
863 const enum mdp5_intf_type *intf_types;
864 int i;
865
866 hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
867 intf_types = hw_cfg->intf.connect;
868
869 for (i = 0; i < ARRAY_SIZE(hw_cfg->intf.connect); i++) {
870 struct mdp5_interface *intf;
871
872 if (intf_types[i] == INTF_DISABLED)
873 continue;
874
875 intf = kzalloc(sizeof(*intf), GFP_KERNEL);
876 if (!intf) {
877 dev_err(dev->dev, "failed to construct INTF%d\n", i);
878 return -ENOMEM;
879 }
880
881 intf->num = i;
882 intf->type = intf_types[i];
883 intf->mode = MDP5_INTF_MODE_NONE;
884 intf->idx = mdp5_kms->num_intfs;
885 mdp5_kms->intfs[mdp5_kms->num_intfs++] = intf;
886 }
887
888 return 0;
889}
890
832static int mdp5_init(struct platform_device *pdev, struct drm_device *dev) 891static int mdp5_init(struct platform_device *pdev, struct drm_device *dev)
833{ 892{
834 struct msm_drm_private *priv = dev->dev_private; 893 struct msm_drm_private *priv = dev->dev_private;
@@ -929,6 +988,14 @@ static int mdp5_init(struct platform_device *pdev, struct drm_device *dev)
929 if (ret) 988 if (ret)
930 goto fail; 989 goto fail;
931 990
991 ret = hwmixer_init(mdp5_kms);
992 if (ret)
993 goto fail;
994
995 ret = interface_init(mdp5_kms);
996 if (ret)
997 goto fail;
998
932 /* set uninit-ed kms */ 999 /* set uninit-ed kms */
933 priv->kms = &mdp5_kms->base.base; 1000 priv->kms = &mdp5_kms->base.base;
934 1001
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
index 9de471191eba..8bdb7ee4983b 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
@@ -23,8 +23,9 @@
23#include "mdp/mdp_kms.h" 23#include "mdp/mdp_kms.h"
24#include "mdp5_cfg.h" /* must be included before mdp5.xml.h */ 24#include "mdp5_cfg.h" /* must be included before mdp5.xml.h */
25#include "mdp5.xml.h" 25#include "mdp5.xml.h"
26#include "mdp5_ctl.h"
27#include "mdp5_pipe.h" 26#include "mdp5_pipe.h"
27#include "mdp5_mixer.h"
28#include "mdp5_ctl.h"
28#include "mdp5_smp.h" 29#include "mdp5_smp.h"
29 30
30struct mdp5_state; 31struct mdp5_state;
@@ -39,6 +40,12 @@ struct mdp5_kms {
39 unsigned num_hwpipes; 40 unsigned num_hwpipes;
40 struct mdp5_hw_pipe *hwpipes[SSPP_MAX]; 41 struct mdp5_hw_pipe *hwpipes[SSPP_MAX];
41 42
43 unsigned num_hwmixers;
44 struct mdp5_hw_mixer *hwmixers[8];
45
46 unsigned num_intfs;
47 struct mdp5_interface *intfs[5];
48
42 struct mdp5_cfg_handler *cfg; 49 struct mdp5_cfg_handler *cfg;
43 uint32_t caps; /* MDP capabilities (MDP_CAP_XXX bits) */ 50 uint32_t caps; /* MDP capabilities (MDP_CAP_XXX bits) */
44 51
@@ -83,6 +90,7 @@ struct mdp5_kms {
83 */ 90 */
84struct mdp5_state { 91struct mdp5_state {
85 struct mdp5_hw_pipe_state hwpipe; 92 struct mdp5_hw_pipe_state hwpipe;
93 struct mdp5_hw_mixer_state hwmixer;
86 struct mdp5_smp_state smp; 94 struct mdp5_smp_state smp;
87}; 95};
88 96
@@ -96,6 +104,7 @@ struct mdp5_plane_state {
96 struct drm_plane_state base; 104 struct drm_plane_state base;
97 105
98 struct mdp5_hw_pipe *hwpipe; 106 struct mdp5_hw_pipe *hwpipe;
107 struct mdp5_hw_pipe *r_hwpipe; /* right hwpipe */
99 108
100 /* aligned with property */ 109 /* aligned with property */
101 uint8_t premultiplied; 110 uint8_t premultiplied;
@@ -108,6 +117,28 @@ struct mdp5_plane_state {
108#define to_mdp5_plane_state(x) \ 117#define to_mdp5_plane_state(x) \
109 container_of(x, struct mdp5_plane_state, base) 118 container_of(x, struct mdp5_plane_state, base)
110 119
120struct mdp5_pipeline {
121 struct mdp5_interface *intf;
122 struct mdp5_hw_mixer *mixer;
123 struct mdp5_hw_mixer *r_mixer; /* right mixer */
124};
125
126struct mdp5_crtc_state {
127 struct drm_crtc_state base;
128
129 struct mdp5_ctl *ctl;
130 struct mdp5_pipeline pipeline;
131
132 /* these are derivatives of intf/mixer state in mdp5_pipeline */
133 u32 vblank_irqmask;
134 u32 err_irqmask;
135 u32 pp_done_irqmask;
136
137 bool cmd_mode;
138};
139#define to_mdp5_crtc_state(x) \
140 container_of(x, struct mdp5_crtc_state, base)
141
111enum mdp5_intf_mode { 142enum mdp5_intf_mode {
112 MDP5_INTF_MODE_NONE = 0, 143 MDP5_INTF_MODE_NONE = 0,
113 144
@@ -121,6 +152,7 @@ enum mdp5_intf_mode {
121}; 152};
122 153
123struct mdp5_interface { 154struct mdp5_interface {
155 int idx;
124 int num; /* display interface number */ 156 int num; /* display interface number */
125 enum mdp5_intf_type type; 157 enum mdp5_intf_type type;
126 enum mdp5_intf_mode mode; 158 enum mdp5_intf_mode mode;
@@ -128,11 +160,11 @@ struct mdp5_interface {
128 160
129struct mdp5_encoder { 161struct mdp5_encoder {
130 struct drm_encoder base; 162 struct drm_encoder base;
131 struct mdp5_interface intf;
132 spinlock_t intf_lock; /* protect REG_MDP5_INTF_* registers */ 163 spinlock_t intf_lock; /* protect REG_MDP5_INTF_* registers */
133 bool enabled; 164 bool enabled;
134 uint32_t bsc; 165 uint32_t bsc;
135 166
167 struct mdp5_interface *intf;
136 struct mdp5_ctl *ctl; 168 struct mdp5_ctl *ctl;
137}; 169};
138#define to_mdp5_encoder(x) container_of(x, struct mdp5_encoder, base) 170#define to_mdp5_encoder(x) container_of(x, struct mdp5_encoder, base)
@@ -197,8 +229,8 @@ static inline uint32_t intf2err(int intf_num)
197 } 229 }
198} 230}
199 231
200#define GET_PING_PONG_ID(layer_mixer) ((layer_mixer == 5) ? 3 : layer_mixer) 232static inline uint32_t intf2vblank(struct mdp5_hw_mixer *mixer,
201static inline uint32_t intf2vblank(int lm, struct mdp5_interface *intf) 233 struct mdp5_interface *intf)
202{ 234{
203 /* 235 /*
204 * In case of DSI Command Mode, the Ping Pong's read pointer IRQ 236 * In case of DSI Command Mode, the Ping Pong's read pointer IRQ
@@ -208,7 +240,7 @@ static inline uint32_t intf2vblank(int lm, struct mdp5_interface *intf)
208 240
209 if ((intf->type == INTF_DSI) && 241 if ((intf->type == INTF_DSI) &&
210 (intf->mode == MDP5_INTF_DSI_MODE_COMMAND)) 242 (intf->mode == MDP5_INTF_DSI_MODE_COMMAND))
211 return MDP5_IRQ_PING_PONG_0_RD_PTR << GET_PING_PONG_ID(lm); 243 return MDP5_IRQ_PING_PONG_0_RD_PTR << mixer->pp;
212 244
213 if (intf->type == INTF_WB) 245 if (intf->type == INTF_WB)
214 return MDP5_IRQ_WB_2_DONE; 246 return MDP5_IRQ_WB_2_DONE;
@@ -222,9 +254,9 @@ static inline uint32_t intf2vblank(int lm, struct mdp5_interface *intf)
222 } 254 }
223} 255}
224 256
225static inline uint32_t lm2ppdone(int lm) 257static inline uint32_t lm2ppdone(struct mdp5_hw_mixer *mixer)
226{ 258{
227 return MDP5_IRQ_PING_PONG_0_DONE << GET_PING_PONG_ID(lm); 259 return MDP5_IRQ_PING_PONG_0_DONE << mixer->pp;
228} 260}
229 261
230int mdp5_disable(struct mdp5_kms *mdp5_kms); 262int mdp5_disable(struct mdp5_kms *mdp5_kms);
@@ -243,15 +275,16 @@ void mdp5_irq_domain_fini(struct mdp5_kms *mdp5_kms);
243 275
244uint32_t mdp5_plane_get_flush(struct drm_plane *plane); 276uint32_t mdp5_plane_get_flush(struct drm_plane *plane);
245enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane); 277enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane);
278enum mdp5_pipe mdp5_plane_right_pipe(struct drm_plane *plane);
246struct drm_plane *mdp5_plane_init(struct drm_device *dev, 279struct drm_plane *mdp5_plane_init(struct drm_device *dev,
247 enum drm_plane_type type); 280 enum drm_plane_type type);
248 281
249struct mdp5_ctl *mdp5_crtc_get_ctl(struct drm_crtc *crtc); 282struct mdp5_ctl *mdp5_crtc_get_ctl(struct drm_crtc *crtc);
250uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc); 283uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc);
251 284
252int mdp5_crtc_get_lm(struct drm_crtc *crtc); 285struct mdp5_hw_mixer *mdp5_crtc_get_mixer(struct drm_crtc *crtc);
253void mdp5_crtc_set_pipeline(struct drm_crtc *crtc, 286struct mdp5_pipeline *mdp5_crtc_get_pipeline(struct drm_crtc *crtc);
254 struct mdp5_interface *intf, struct mdp5_ctl *ctl); 287void mdp5_crtc_set_pipeline(struct drm_crtc *crtc);
255void mdp5_crtc_wait_for_commit_done(struct drm_crtc *crtc); 288void mdp5_crtc_wait_for_commit_done(struct drm_crtc *crtc);
256struct drm_crtc *mdp5_crtc_init(struct drm_device *dev, 289struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
257 struct drm_plane *plane, 290 struct drm_plane *plane,
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_mixer.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_mixer.c
new file mode 100644
index 000000000000..8a00991f03c7
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_mixer.c
@@ -0,0 +1,172 @@
1/*
2 * Copyright (C) 2017 The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License version 2 as published by
6 * the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16
17#include "mdp5_kms.h"
18
19/*
20 * As of now, there are only 2 combinations possible for source split:
21 *
22 * Left | Right
23 * -----|------
24 * LM0 | LM1
25 * LM2 | LM5
26 *
27 */
28static int lm_right_pair[] = { 1, -1, 5, -1, -1, -1 };
29
30static int get_right_pair_idx(struct mdp5_kms *mdp5_kms, int lm)
31{
32 int i;
33 int pair_lm;
34
35 pair_lm = lm_right_pair[lm];
36 if (pair_lm < 0)
37 return -EINVAL;
38
39 for (i = 0; i < mdp5_kms->num_hwmixers; i++) {
40 struct mdp5_hw_mixer *mixer = mdp5_kms->hwmixers[i];
41
42 if (mixer->lm == pair_lm)
43 return mixer->idx;
44 }
45
46 return -1;
47}
48
49int mdp5_mixer_assign(struct drm_atomic_state *s, struct drm_crtc *crtc,
50 uint32_t caps, struct mdp5_hw_mixer **mixer,
51 struct mdp5_hw_mixer **r_mixer)
52{
53 struct msm_drm_private *priv = s->dev->dev_private;
54 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms));
55 struct mdp5_state *state = mdp5_get_state(s);
56 struct mdp5_hw_mixer_state *new_state;
57 int i;
58
59 if (IS_ERR(state))
60 return PTR_ERR(state);
61
62 new_state = &state->hwmixer;
63
64 for (i = 0; i < mdp5_kms->num_hwmixers; i++) {
65 struct mdp5_hw_mixer *cur = mdp5_kms->hwmixers[i];
66
67 /*
68 * skip if already in-use by a different CRTC. If there is a
69 * mixer already assigned to this CRTC, it means this call is
70 * a request to get an additional right mixer. Assume that the
71 * existing mixer is the 'left' one, and try to see if we can
72 * get its corresponding 'right' pair.
73 */
74 if (new_state->hwmixer_to_crtc[cur->idx] &&
75 new_state->hwmixer_to_crtc[cur->idx] != crtc)
76 continue;
77
78 /* skip if doesn't support some required caps: */
79 if (caps & ~cur->caps)
80 continue;
81
82 if (r_mixer) {
83 int pair_idx;
84
85 pair_idx = get_right_pair_idx(mdp5_kms, cur->lm);
86 if (pair_idx < 0)
87 return -EINVAL;
88
89 if (new_state->hwmixer_to_crtc[pair_idx])
90 continue;
91
92 *r_mixer = mdp5_kms->hwmixers[pair_idx];
93 }
94
95 /*
96 * prefer a pair-able LM over an unpairable one. We can
97 * switch the CRTC from Normal mode to Source Split mode
98 * without requiring a full modeset if we had already
99 * assigned this CRTC a pair-able LM.
100 *
101 * TODO: There will be assignment sequences which would
102 * result in the CRTC requiring a full modeset, even
103 * if we have the LM resources to prevent it. For a platform
104 * with a few displays, we don't run out of pair-able LMs
105 * so easily. For now, ignore the possibility of requiring
106 * a full modeset.
107 */
108 if (!(*mixer) || cur->caps & MDP_LM_CAP_PAIR)
109 *mixer = cur;
110 }
111
112 if (!(*mixer))
113 return -ENOMEM;
114
115 if (r_mixer && !(*r_mixer))
116 return -ENOMEM;
117
118 DBG("assigning Layer Mixer %d to crtc %s", (*mixer)->lm, crtc->name);
119
120 new_state->hwmixer_to_crtc[(*mixer)->idx] = crtc;
121 if (r_mixer) {
122 DBG("assigning Right Layer Mixer %d to crtc %s", (*r_mixer)->lm,
123 crtc->name);
124 new_state->hwmixer_to_crtc[(*r_mixer)->idx] = crtc;
125 }
126
127 return 0;
128}
129
130void mdp5_mixer_release(struct drm_atomic_state *s, struct mdp5_hw_mixer *mixer)
131{
132 struct mdp5_state *state = mdp5_get_state(s);
133 struct mdp5_hw_mixer_state *new_state = &state->hwmixer;
134
135 if (!mixer)
136 return;
137
138 if (WARN_ON(!new_state->hwmixer_to_crtc[mixer->idx]))
139 return;
140
141 DBG("%s: release from crtc %s", mixer->name,
142 new_state->hwmixer_to_crtc[mixer->idx]->name);
143
144 new_state->hwmixer_to_crtc[mixer->idx] = NULL;
145}
146
147void mdp5_mixer_destroy(struct mdp5_hw_mixer *mixer)
148{
149 kfree(mixer);
150}
151
152static const char * const mixer_names[] = {
153 "LM0", "LM1", "LM2", "LM3", "LM4", "LM5",
154};
155
156struct mdp5_hw_mixer *mdp5_mixer_init(const struct mdp5_lm_instance *lm)
157{
158 struct mdp5_hw_mixer *mixer;
159
160 mixer = kzalloc(sizeof(*mixer), GFP_KERNEL);
161 if (!mixer)
162 return ERR_PTR(-ENOMEM);
163
164 mixer->name = mixer_names[lm->id];
165 mixer->lm = lm->id;
166 mixer->caps = lm->caps;
167 mixer->pp = lm->pp;
168 mixer->dspp = lm->dspp;
169 mixer->flush_mask = mdp_ctl_flush_mask_lm(lm->id);
170
171 return mixer;
172}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_mixer.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_mixer.h
new file mode 100644
index 000000000000..9be94f567fbd
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_mixer.h
@@ -0,0 +1,47 @@
1/*
2 * Copyright (C) 2017 The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License version 2 as published by
6 * the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16
17#ifndef __MDP5_LM_H__
18#define __MDP5_LM_H__
19
20/* represents a hw Layer Mixer, one (or more) is dynamically assigned to a crtc */
21struct mdp5_hw_mixer {
22 int idx;
23
24 const char *name;
25
26 int lm; /* the LM instance # */
27 uint32_t caps;
28 int pp;
29 int dspp;
30
31 uint32_t flush_mask; /* used to commit LM registers */
32};
33
34/* global atomic state of assignment between CRTCs and Layer Mixers: */
35struct mdp5_hw_mixer_state {
36 struct drm_crtc *hwmixer_to_crtc[8];
37};
38
39struct mdp5_hw_mixer *mdp5_mixer_init(const struct mdp5_lm_instance *lm);
40void mdp5_mixer_destroy(struct mdp5_hw_mixer *lm);
41int mdp5_mixer_assign(struct drm_atomic_state *s, struct drm_crtc *crtc,
42 uint32_t caps, struct mdp5_hw_mixer **mixer,
43 struct mdp5_hw_mixer **r_mixer);
44void mdp5_mixer_release(struct drm_atomic_state *s,
45 struct mdp5_hw_mixer *mixer);
46
47#endif /* __MDP5_LM_H__ */
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.c
index 35c4dabb0c0c..2bfac3712685 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.c
@@ -135,7 +135,5 @@ struct mdp5_hw_pipe *mdp5_pipe_init(enum mdp5_pipe pipe,
135 hwpipe->caps = caps; 135 hwpipe->caps = caps;
136 hwpipe->flush_mask = mdp_ctl_flush_mask_pipe(pipe); 136 hwpipe->flush_mask = mdp_ctl_flush_mask_pipe(pipe);
137 137
138 spin_lock_init(&hwpipe->pipe_lock);
139
140 return hwpipe; 138 return hwpipe;
141} 139}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.h
index 238901987e00..924c3e6f9517 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.h
@@ -28,7 +28,6 @@ struct mdp5_hw_pipe {
28 const char *name; 28 const char *name;
29 enum mdp5_pipe pipe; 29 enum mdp5_pipe pipe;
30 30
31 spinlock_t pipe_lock; /* protect REG_MDP5_PIPE_* registers */
32 uint32_t reg_offset; 31 uint32_t reg_offset;
33 uint32_t caps; 32 uint32_t caps;
34 33
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
index 60a5451ae0b9..a38c5fe6cc19 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
@@ -22,6 +22,8 @@
22struct mdp5_plane { 22struct mdp5_plane {
23 struct drm_plane base; 23 struct drm_plane base;
24 24
25 spinlock_t pipe_lock; /* protect REG_MDP5_PIPE_* registers */
26
25 uint32_t nformats; 27 uint32_t nformats;
26 uint32_t formats[32]; 28 uint32_t formats[32];
27}; 29};
@@ -40,9 +42,6 @@ static int mdp5_update_cursor_plane_legacy(struct drm_plane *plane,
40 uint32_t src_w, uint32_t src_h, 42 uint32_t src_w, uint32_t src_h,
41 struct drm_modeset_acquire_ctx *ctx); 43 struct drm_modeset_acquire_ctx *ctx);
42 44
43static void set_scanout_locked(struct drm_plane *plane,
44 struct drm_framebuffer *fb);
45
46static struct mdp5_kms *get_kms(struct drm_plane *plane) 45static struct mdp5_kms *get_kms(struct drm_plane *plane)
47{ 46{
48 struct msm_drm_private *priv = plane->dev->dev_private; 47 struct msm_drm_private *priv = plane->dev->dev_private;
@@ -178,9 +177,14 @@ mdp5_plane_atomic_print_state(struct drm_printer *p,
178 const struct drm_plane_state *state) 177 const struct drm_plane_state *state)
179{ 178{
180 struct mdp5_plane_state *pstate = to_mdp5_plane_state(state); 179 struct mdp5_plane_state *pstate = to_mdp5_plane_state(state);
180 struct mdp5_kms *mdp5_kms = get_kms(state->plane);
181 181
182 drm_printf(p, "\thwpipe=%s\n", pstate->hwpipe ? 182 drm_printf(p, "\thwpipe=%s\n", pstate->hwpipe ?
183 pstate->hwpipe->name : "(null)"); 183 pstate->hwpipe->name : "(null)");
184 if (mdp5_kms->caps & MDP_CAP_SRC_SPLIT)
185 drm_printf(p, "\tright-hwpipe=%s\n",
186 pstate->r_hwpipe ? pstate->r_hwpipe->name :
187 "(null)");
184 drm_printf(p, "\tpremultiplied=%u\n", pstate->premultiplied); 188 drm_printf(p, "\tpremultiplied=%u\n", pstate->premultiplied);
185 drm_printf(p, "\tzpos=%u\n", pstate->zpos); 189 drm_printf(p, "\tzpos=%u\n", pstate->zpos);
186 drm_printf(p, "\talpha=%u\n", pstate->alpha); 190 drm_printf(p, "\talpha=%u\n", pstate->alpha);
@@ -300,7 +304,9 @@ static int mdp5_plane_atomic_check_with_state(struct drm_crtc_state *crtc_state,
300 struct drm_plane_state *old_state = plane->state; 304 struct drm_plane_state *old_state = plane->state;
301 struct mdp5_cfg *config = mdp5_cfg_get_config(get_kms(plane)->cfg); 305 struct mdp5_cfg *config = mdp5_cfg_get_config(get_kms(plane)->cfg);
302 bool new_hwpipe = false; 306 bool new_hwpipe = false;
307 bool need_right_hwpipe = false;
303 uint32_t max_width, max_height; 308 uint32_t max_width, max_height;
309 bool out_of_bounds = false;
304 uint32_t caps = 0; 310 uint32_t caps = 0;
305 struct drm_rect clip; 311 struct drm_rect clip;
306 int min_scale, max_scale; 312 int min_scale, max_scale;
@@ -313,7 +319,23 @@ static int mdp5_plane_atomic_check_with_state(struct drm_crtc_state *crtc_state,
313 max_height = config->hw->lm.max_height << 16; 319 max_height = config->hw->lm.max_height << 16;
314 320
315 /* Make sure source dimensions are within bounds. */ 321 /* Make sure source dimensions are within bounds. */
316 if ((state->src_w > max_width) || (state->src_h > max_height)) { 322 if (state->src_h > max_height)
323 out_of_bounds = true;
324
325 if (state->src_w > max_width) {
326 /* If source split is supported, we can go up to 2x
327 * the max LM width, but we'd need to stage another
328 * hwpipe to the right LM. So, the drm_plane would
329 * consist of 2 hwpipes.
330 */
331 if (config->hw->mdp.caps & MDP_CAP_SRC_SPLIT &&
332 (state->src_w <= 2 * max_width))
333 need_right_hwpipe = true;
334 else
335 out_of_bounds = true;
336 }
337
338 if (out_of_bounds) {
317 struct drm_rect src = drm_plane_state_src(state); 339 struct drm_rect src = drm_plane_state_src(state);
318 DBG("Invalid source size "DRM_RECT_FP_FMT, 340 DBG("Invalid source size "DRM_RECT_FP_FMT,
319 DRM_RECT_FP_ARG(&src)); 341 DRM_RECT_FP_ARG(&src));
@@ -364,6 +386,15 @@ static int mdp5_plane_atomic_check_with_state(struct drm_crtc_state *crtc_state,
364 if (!mdp5_state->hwpipe || (caps & ~mdp5_state->hwpipe->caps)) 386 if (!mdp5_state->hwpipe || (caps & ~mdp5_state->hwpipe->caps))
365 new_hwpipe = true; 387 new_hwpipe = true;
366 388
389 /*
390 * (re)allocte hw pipe if we're either requesting for 2 hw pipes
391 * or we're switching from 2 hw pipes to 1 hw pipe because the
392 * new src_w can be supported by 1 hw pipe itself.
393 */
394 if ((need_right_hwpipe && !mdp5_state->r_hwpipe) ||
395 (!need_right_hwpipe && mdp5_state->r_hwpipe))
396 new_hwpipe = true;
397
367 if (mdp5_kms->smp) { 398 if (mdp5_kms->smp) {
368 const struct mdp_format *format = 399 const struct mdp_format *format =
369 to_mdp_format(msm_framebuffer_format(state->fb)); 400 to_mdp_format(msm_framebuffer_format(state->fb));
@@ -382,13 +413,36 @@ static int mdp5_plane_atomic_check_with_state(struct drm_crtc_state *crtc_state,
382 * it available for other planes? 413 * it available for other planes?
383 */ 414 */
384 struct mdp5_hw_pipe *old_hwpipe = mdp5_state->hwpipe; 415 struct mdp5_hw_pipe *old_hwpipe = mdp5_state->hwpipe;
416 struct mdp5_hw_pipe *old_right_hwpipe =
417 mdp5_state->r_hwpipe;
418
385 mdp5_state->hwpipe = mdp5_pipe_assign(state->state, 419 mdp5_state->hwpipe = mdp5_pipe_assign(state->state,
386 plane, caps, blkcfg); 420 plane, caps, blkcfg);
387 if (IS_ERR(mdp5_state->hwpipe)) { 421 if (IS_ERR(mdp5_state->hwpipe)) {
388 DBG("%s: failed to assign hwpipe!", plane->name); 422 DBG("%s: failed to assign hwpipe!", plane->name);
389 return PTR_ERR(mdp5_state->hwpipe); 423 return PTR_ERR(mdp5_state->hwpipe);
390 } 424 }
425
426 if (need_right_hwpipe) {
427 mdp5_state->r_hwpipe =
428 mdp5_pipe_assign(state->state, plane,
429 caps, blkcfg);
430 if (IS_ERR(mdp5_state->r_hwpipe)) {
431 DBG("%s: failed to assign right hwpipe",
432 plane->name);
433 return PTR_ERR(mdp5_state->r_hwpipe);
434 }
435 } else {
436 /*
437 * set it to NULL so that the driver knows we
438 * don't have a right hwpipe when committing a
439 * new state
440 */
441 mdp5_state->r_hwpipe = NULL;
442 }
443
391 mdp5_pipe_release(state->state, old_hwpipe); 444 mdp5_pipe_release(state->state, old_hwpipe);
445 mdp5_pipe_release(state->state, old_right_hwpipe);
392 } 446 }
393 } 447 }
394 448
@@ -437,13 +491,10 @@ static const struct drm_plane_helper_funcs mdp5_plane_helper_funcs = {
437 .atomic_update = mdp5_plane_atomic_update, 491 .atomic_update = mdp5_plane_atomic_update,
438}; 492};
439 493
440static void set_scanout_locked(struct drm_plane *plane, 494static void set_scanout_locked(struct mdp5_kms *mdp5_kms,
441 struct drm_framebuffer *fb) 495 enum mdp5_pipe pipe,
496 struct drm_framebuffer *fb)
442{ 497{
443 struct mdp5_kms *mdp5_kms = get_kms(plane);
444 struct mdp5_hw_pipe *hwpipe = to_mdp5_plane_state(plane->state)->hwpipe;
445 enum mdp5_pipe pipe = hwpipe->pipe;
446
447 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_STRIDE_A(pipe), 498 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_STRIDE_A(pipe),
448 MDP5_PIPE_SRC_STRIDE_A_P0(fb->pitches[0]) | 499 MDP5_PIPE_SRC_STRIDE_A_P0(fb->pitches[0]) |
449 MDP5_PIPE_SRC_STRIDE_A_P1(fb->pitches[1])); 500 MDP5_PIPE_SRC_STRIDE_A_P1(fb->pitches[1]));
@@ -460,8 +511,6 @@ static void set_scanout_locked(struct drm_plane *plane,
460 msm_framebuffer_iova(fb, mdp5_kms->id, 2)); 511 msm_framebuffer_iova(fb, mdp5_kms->id, 2));
461 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC3_ADDR(pipe), 512 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC3_ADDR(pipe),
462 msm_framebuffer_iova(fb, mdp5_kms->id, 3)); 513 msm_framebuffer_iova(fb, mdp5_kms->id, 3));
463
464 plane->fb = fb;
465} 514}
466 515
467/* Note: mdp5_plane->pipe_lock must be locked */ 516/* Note: mdp5_plane->pipe_lock must be locked */
@@ -714,21 +763,129 @@ static void mdp5_write_pixel_ext(struct mdp5_kms *mdp5_kms, enum mdp5_pipe pipe,
714 } 763 }
715} 764}
716 765
766struct pixel_ext {
767 int left[COMP_MAX];
768 int right[COMP_MAX];
769 int top[COMP_MAX];
770 int bottom[COMP_MAX];
771};
772
773struct phase_step {
774 u32 x[COMP_MAX];
775 u32 y[COMP_MAX];
776};
777
778static void mdp5_hwpipe_mode_set(struct mdp5_kms *mdp5_kms,
779 struct mdp5_hw_pipe *hwpipe,
780 struct drm_framebuffer *fb,
781 struct phase_step *step,
782 struct pixel_ext *pe,
783 u32 scale_config, u32 hdecm, u32 vdecm,
784 bool hflip, bool vflip,
785 int crtc_x, int crtc_y,
786 unsigned int crtc_w, unsigned int crtc_h,
787 u32 src_img_w, u32 src_img_h,
788 u32 src_x, u32 src_y,
789 u32 src_w, u32 src_h)
790{
791 enum mdp5_pipe pipe = hwpipe->pipe;
792 bool has_pe = hwpipe->caps & MDP_PIPE_CAP_SW_PIX_EXT;
793 const struct mdp_format *format =
794 to_mdp_format(msm_framebuffer_format(fb));
795
796 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_IMG_SIZE(pipe),
797 MDP5_PIPE_SRC_IMG_SIZE_WIDTH(src_img_w) |
798 MDP5_PIPE_SRC_IMG_SIZE_HEIGHT(src_img_h));
799
800 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_SIZE(pipe),
801 MDP5_PIPE_SRC_SIZE_WIDTH(src_w) |
802 MDP5_PIPE_SRC_SIZE_HEIGHT(src_h));
803
804 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_XY(pipe),
805 MDP5_PIPE_SRC_XY_X(src_x) |
806 MDP5_PIPE_SRC_XY_Y(src_y));
807
808 mdp5_write(mdp5_kms, REG_MDP5_PIPE_OUT_SIZE(pipe),
809 MDP5_PIPE_OUT_SIZE_WIDTH(crtc_w) |
810 MDP5_PIPE_OUT_SIZE_HEIGHT(crtc_h));
811
812 mdp5_write(mdp5_kms, REG_MDP5_PIPE_OUT_XY(pipe),
813 MDP5_PIPE_OUT_XY_X(crtc_x) |
814 MDP5_PIPE_OUT_XY_Y(crtc_y));
815
816 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_FORMAT(pipe),
817 MDP5_PIPE_SRC_FORMAT_A_BPC(format->bpc_a) |
818 MDP5_PIPE_SRC_FORMAT_R_BPC(format->bpc_r) |
819 MDP5_PIPE_SRC_FORMAT_G_BPC(format->bpc_g) |
820 MDP5_PIPE_SRC_FORMAT_B_BPC(format->bpc_b) |
821 COND(format->alpha_enable, MDP5_PIPE_SRC_FORMAT_ALPHA_ENABLE) |
822 MDP5_PIPE_SRC_FORMAT_CPP(format->cpp - 1) |
823 MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT(format->unpack_count - 1) |
824 COND(format->unpack_tight, MDP5_PIPE_SRC_FORMAT_UNPACK_TIGHT) |
825 MDP5_PIPE_SRC_FORMAT_FETCH_TYPE(format->fetch_type) |
826 MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP(format->chroma_sample));
827
828 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_UNPACK(pipe),
829 MDP5_PIPE_SRC_UNPACK_ELEM0(format->unpack[0]) |
830 MDP5_PIPE_SRC_UNPACK_ELEM1(format->unpack[1]) |
831 MDP5_PIPE_SRC_UNPACK_ELEM2(format->unpack[2]) |
832 MDP5_PIPE_SRC_UNPACK_ELEM3(format->unpack[3]));
833
834 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_OP_MODE(pipe),
835 (hflip ? MDP5_PIPE_SRC_OP_MODE_FLIP_LR : 0) |
836 (vflip ? MDP5_PIPE_SRC_OP_MODE_FLIP_UD : 0) |
837 COND(has_pe, MDP5_PIPE_SRC_OP_MODE_SW_PIX_EXT_OVERRIDE) |
838 MDP5_PIPE_SRC_OP_MODE_BWC(BWC_LOSSLESS));
839
840 /* not using secure mode: */
841 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_ADDR_SW_STATUS(pipe), 0);
842
843 if (hwpipe->caps & MDP_PIPE_CAP_SW_PIX_EXT)
844 mdp5_write_pixel_ext(mdp5_kms, pipe, format,
845 src_w, pe->left, pe->right,
846 src_h, pe->top, pe->bottom);
847
848 if (hwpipe->caps & MDP_PIPE_CAP_SCALE) {
849 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_PHASE_STEP_X(pipe),
850 step->x[COMP_0]);
851 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_PHASE_STEP_Y(pipe),
852 step->y[COMP_0]);
853 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CR_PHASE_STEP_X(pipe),
854 step->x[COMP_1_2]);
855 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CR_PHASE_STEP_Y(pipe),
856 step->y[COMP_1_2]);
857 mdp5_write(mdp5_kms, REG_MDP5_PIPE_DECIMATION(pipe),
858 MDP5_PIPE_DECIMATION_VERT(vdecm) |
859 MDP5_PIPE_DECIMATION_HORZ(hdecm));
860 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CONFIG(pipe),
861 scale_config);
862 }
863
864 if (hwpipe->caps & MDP_PIPE_CAP_CSC) {
865 if (MDP_FORMAT_IS_YUV(format))
866 csc_enable(mdp5_kms, pipe,
867 mdp_get_default_csc_cfg(CSC_YUV2RGB));
868 else
869 csc_disable(mdp5_kms, pipe);
870 }
871
872 set_scanout_locked(mdp5_kms, pipe, fb);
873}
717 874
718static int mdp5_plane_mode_set(struct drm_plane *plane, 875static int mdp5_plane_mode_set(struct drm_plane *plane,
719 struct drm_crtc *crtc, struct drm_framebuffer *fb, 876 struct drm_crtc *crtc, struct drm_framebuffer *fb,
720 struct drm_rect *src, struct drm_rect *dest) 877 struct drm_rect *src, struct drm_rect *dest)
721{ 878{
879 struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
722 struct drm_plane_state *pstate = plane->state; 880 struct drm_plane_state *pstate = plane->state;
723 struct mdp5_hw_pipe *hwpipe = to_mdp5_plane_state(pstate)->hwpipe; 881 struct mdp5_hw_pipe *hwpipe = to_mdp5_plane_state(pstate)->hwpipe;
724 struct mdp5_kms *mdp5_kms = get_kms(plane); 882 struct mdp5_kms *mdp5_kms = get_kms(plane);
725 enum mdp5_pipe pipe = hwpipe->pipe; 883 enum mdp5_pipe pipe = hwpipe->pipe;
884 struct mdp5_hw_pipe *right_hwpipe;
726 const struct mdp_format *format; 885 const struct mdp_format *format;
727 uint32_t nplanes, config = 0; 886 uint32_t nplanes, config = 0;
728 uint32_t phasex_step[COMP_MAX] = {0,}, phasey_step[COMP_MAX] = {0,}; 887 struct phase_step step = { 0 };
729 bool pe = hwpipe->caps & MDP_PIPE_CAP_SW_PIX_EXT; 888 struct pixel_ext pe = { 0 };
730 int pe_left[COMP_MAX], pe_right[COMP_MAX];
731 int pe_top[COMP_MAX], pe_bottom[COMP_MAX];
732 uint32_t hdecm = 0, vdecm = 0; 889 uint32_t hdecm = 0, vdecm = 0;
733 uint32_t pix_format; 890 uint32_t pix_format;
734 unsigned int rotation; 891 unsigned int rotation;
@@ -737,6 +894,9 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
737 unsigned int crtc_w, crtc_h; 894 unsigned int crtc_w, crtc_h;
738 uint32_t src_x, src_y; 895 uint32_t src_x, src_y;
739 uint32_t src_w, src_h; 896 uint32_t src_w, src_h;
897 uint32_t src_img_w, src_img_h;
898 uint32_t src_x_r;
899 int crtc_x_r;
740 unsigned long flags; 900 unsigned long flags;
741 int ret; 901 int ret;
742 902
@@ -765,23 +925,41 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
765 src_w = src_w >> 16; 925 src_w = src_w >> 16;
766 src_h = src_h >> 16; 926 src_h = src_h >> 16;
767 927
928 src_img_w = min(fb->width, src_w);
929 src_img_h = min(fb->height, src_h);
930
768 DBG("%s: FB[%u] %u,%u,%u,%u -> CRTC[%u] %d,%d,%u,%u", plane->name, 931 DBG("%s: FB[%u] %u,%u,%u,%u -> CRTC[%u] %d,%d,%u,%u", plane->name,
769 fb->base.id, src_x, src_y, src_w, src_h, 932 fb->base.id, src_x, src_y, src_w, src_h,
770 crtc->base.id, crtc_x, crtc_y, crtc_w, crtc_h); 933 crtc->base.id, crtc_x, crtc_y, crtc_w, crtc_h);
771 934
772 ret = calc_scalex_steps(plane, pix_format, src_w, crtc_w, phasex_step); 935 right_hwpipe = to_mdp5_plane_state(pstate)->r_hwpipe;
936 if (right_hwpipe) {
937 /*
938 * if the plane comprises of 2 hw pipes, assume that the width
939 * is split equally across them. The only parameters that varies
940 * between the 2 pipes are src_x and crtc_x
941 */
942 crtc_w /= 2;
943 src_w /= 2;
944 src_img_w /= 2;
945
946 crtc_x_r = crtc_x + crtc_w;
947 src_x_r = src_x + src_w;
948 }
949
950 ret = calc_scalex_steps(plane, pix_format, src_w, crtc_w, step.x);
773 if (ret) 951 if (ret)
774 return ret; 952 return ret;
775 953
776 ret = calc_scaley_steps(plane, pix_format, src_h, crtc_h, phasey_step); 954 ret = calc_scaley_steps(plane, pix_format, src_h, crtc_h, step.y);
777 if (ret) 955 if (ret)
778 return ret; 956 return ret;
779 957
780 if (hwpipe->caps & MDP_PIPE_CAP_SW_PIX_EXT) { 958 if (hwpipe->caps & MDP_PIPE_CAP_SW_PIX_EXT) {
781 calc_pixel_ext(format, src_w, crtc_w, phasex_step, 959 calc_pixel_ext(format, src_w, crtc_w, step.x,
782 pe_left, pe_right, true); 960 pe.left, pe.right, true);
783 calc_pixel_ext(format, src_h, crtc_h, phasey_step, 961 calc_pixel_ext(format, src_h, crtc_h, step.y,
784 pe_top, pe_bottom, false); 962 pe.top, pe.bottom, false);
785 } 963 }
786 964
787 /* TODO calc hdecm, vdecm */ 965 /* TODO calc hdecm, vdecm */
@@ -798,86 +976,23 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
798 hflip = !!(rotation & DRM_REFLECT_X); 976 hflip = !!(rotation & DRM_REFLECT_X);
799 vflip = !!(rotation & DRM_REFLECT_Y); 977 vflip = !!(rotation & DRM_REFLECT_Y);
800 978
801 spin_lock_irqsave(&hwpipe->pipe_lock, flags); 979 spin_lock_irqsave(&mdp5_plane->pipe_lock, flags);
802
803 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_IMG_SIZE(pipe),
804 MDP5_PIPE_SRC_IMG_SIZE_WIDTH(min(fb->width, src_w)) |
805 MDP5_PIPE_SRC_IMG_SIZE_HEIGHT(min(fb->height, src_h)));
806
807 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_SIZE(pipe),
808 MDP5_PIPE_SRC_SIZE_WIDTH(src_w) |
809 MDP5_PIPE_SRC_SIZE_HEIGHT(src_h));
810 980
811 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_XY(pipe), 981 mdp5_hwpipe_mode_set(mdp5_kms, hwpipe, fb, &step, &pe,
812 MDP5_PIPE_SRC_XY_X(src_x) | 982 config, hdecm, vdecm, hflip, vflip,
813 MDP5_PIPE_SRC_XY_Y(src_y)); 983 crtc_x, crtc_y, crtc_w, crtc_h,
984 src_img_w, src_img_h,
985 src_x, src_y, src_w, src_h);
986 if (right_hwpipe)
987 mdp5_hwpipe_mode_set(mdp5_kms, right_hwpipe, fb, &step, &pe,
988 config, hdecm, vdecm, hflip, vflip,
989 crtc_x_r, crtc_y, crtc_w, crtc_h,
990 src_img_w, src_img_h,
991 src_x_r, src_y, src_w, src_h);
814 992
815 mdp5_write(mdp5_kms, REG_MDP5_PIPE_OUT_SIZE(pipe), 993 spin_unlock_irqrestore(&mdp5_plane->pipe_lock, flags);
816 MDP5_PIPE_OUT_SIZE_WIDTH(crtc_w) |
817 MDP5_PIPE_OUT_SIZE_HEIGHT(crtc_h));
818 994
819 mdp5_write(mdp5_kms, REG_MDP5_PIPE_OUT_XY(pipe), 995 plane->fb = fb;
820 MDP5_PIPE_OUT_XY_X(crtc_x) |
821 MDP5_PIPE_OUT_XY_Y(crtc_y));
822
823 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_FORMAT(pipe),
824 MDP5_PIPE_SRC_FORMAT_A_BPC(format->bpc_a) |
825 MDP5_PIPE_SRC_FORMAT_R_BPC(format->bpc_r) |
826 MDP5_PIPE_SRC_FORMAT_G_BPC(format->bpc_g) |
827 MDP5_PIPE_SRC_FORMAT_B_BPC(format->bpc_b) |
828 COND(format->alpha_enable, MDP5_PIPE_SRC_FORMAT_ALPHA_ENABLE) |
829 MDP5_PIPE_SRC_FORMAT_CPP(format->cpp - 1) |
830 MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT(format->unpack_count - 1) |
831 COND(format->unpack_tight, MDP5_PIPE_SRC_FORMAT_UNPACK_TIGHT) |
832 MDP5_PIPE_SRC_FORMAT_FETCH_TYPE(format->fetch_type) |
833 MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP(format->chroma_sample));
834
835 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_UNPACK(pipe),
836 MDP5_PIPE_SRC_UNPACK_ELEM0(format->unpack[0]) |
837 MDP5_PIPE_SRC_UNPACK_ELEM1(format->unpack[1]) |
838 MDP5_PIPE_SRC_UNPACK_ELEM2(format->unpack[2]) |
839 MDP5_PIPE_SRC_UNPACK_ELEM3(format->unpack[3]));
840
841 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_OP_MODE(pipe),
842 (hflip ? MDP5_PIPE_SRC_OP_MODE_FLIP_LR : 0) |
843 (vflip ? MDP5_PIPE_SRC_OP_MODE_FLIP_UD : 0) |
844 COND(pe, MDP5_PIPE_SRC_OP_MODE_SW_PIX_EXT_OVERRIDE) |
845 MDP5_PIPE_SRC_OP_MODE_BWC(BWC_LOSSLESS));
846
847 /* not using secure mode: */
848 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_ADDR_SW_STATUS(pipe), 0);
849
850 if (hwpipe->caps & MDP_PIPE_CAP_SW_PIX_EXT)
851 mdp5_write_pixel_ext(mdp5_kms, pipe, format,
852 src_w, pe_left, pe_right,
853 src_h, pe_top, pe_bottom);
854
855 if (hwpipe->caps & MDP_PIPE_CAP_SCALE) {
856 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_PHASE_STEP_X(pipe),
857 phasex_step[COMP_0]);
858 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_PHASE_STEP_Y(pipe),
859 phasey_step[COMP_0]);
860 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CR_PHASE_STEP_X(pipe),
861 phasex_step[COMP_1_2]);
862 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CR_PHASE_STEP_Y(pipe),
863 phasey_step[COMP_1_2]);
864 mdp5_write(mdp5_kms, REG_MDP5_PIPE_DECIMATION(pipe),
865 MDP5_PIPE_DECIMATION_VERT(vdecm) |
866 MDP5_PIPE_DECIMATION_HORZ(hdecm));
867 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CONFIG(pipe), config);
868 }
869
870 if (hwpipe->caps & MDP_PIPE_CAP_CSC) {
871 if (MDP_FORMAT_IS_YUV(format))
872 csc_enable(mdp5_kms, pipe,
873 mdp_get_default_csc_cfg(CSC_YUV2RGB));
874 else
875 csc_disable(mdp5_kms, pipe);
876 }
877
878 set_scanout_locked(plane, fb);
879
880 spin_unlock_irqrestore(&hwpipe->pipe_lock, flags);
881 996
882 return ret; 997 return ret;
883} 998}
@@ -934,6 +1049,7 @@ static int mdp5_update_cursor_plane_legacy(struct drm_plane *plane,
934 1049
935 if (new_plane_state->visible) { 1050 if (new_plane_state->visible) {
936 struct mdp5_ctl *ctl; 1051 struct mdp5_ctl *ctl;
1052 struct mdp5_pipeline *pipeline = mdp5_crtc_get_pipeline(crtc);
937 1053
938 ret = mdp5_plane_mode_set(plane, crtc, fb, 1054 ret = mdp5_plane_mode_set(plane, crtc, fb,
939 &new_plane_state->src, 1055 &new_plane_state->src,
@@ -942,7 +1058,7 @@ static int mdp5_update_cursor_plane_legacy(struct drm_plane *plane,
942 1058
943 ctl = mdp5_crtc_get_ctl(crtc); 1059 ctl = mdp5_crtc_get_ctl(crtc);
944 1060
945 mdp5_ctl_commit(ctl, mdp5_plane_get_flush(plane)); 1061 mdp5_ctl_commit(ctl, pipeline, mdp5_plane_get_flush(plane));
946 } 1062 }
947 1063
948 *to_mdp5_plane_state(plane_state) = 1064 *to_mdp5_plane_state(plane_state) =
@@ -959,6 +1075,10 @@ slow:
959 src_x, src_y, src_w, src_h, ctx); 1075 src_x, src_y, src_w, src_h, ctx);
960} 1076}
961 1077
1078/*
1079 * Use this func and the one below only after the atomic state has been
1080 * successfully swapped
1081 */
962enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane) 1082enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane)
963{ 1083{
964 struct mdp5_plane_state *pstate = to_mdp5_plane_state(plane->state); 1084 struct mdp5_plane_state *pstate = to_mdp5_plane_state(plane->state);
@@ -969,14 +1089,30 @@ enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane)
969 return pstate->hwpipe->pipe; 1089 return pstate->hwpipe->pipe;
970} 1090}
971 1091
1092enum mdp5_pipe mdp5_plane_right_pipe(struct drm_plane *plane)
1093{
1094 struct mdp5_plane_state *pstate = to_mdp5_plane_state(plane->state);
1095
1096 if (!pstate->r_hwpipe)
1097 return SSPP_NONE;
1098
1099 return pstate->r_hwpipe->pipe;
1100}
1101
972uint32_t mdp5_plane_get_flush(struct drm_plane *plane) 1102uint32_t mdp5_plane_get_flush(struct drm_plane *plane)
973{ 1103{
974 struct mdp5_plane_state *pstate = to_mdp5_plane_state(plane->state); 1104 struct mdp5_plane_state *pstate = to_mdp5_plane_state(plane->state);
1105 u32 mask;
975 1106
976 if (WARN_ON(!pstate->hwpipe)) 1107 if (WARN_ON(!pstate->hwpipe))
977 return 0; 1108 return 0;
978 1109
979 return pstate->hwpipe->flush_mask; 1110 mask = pstate->hwpipe->flush_mask;
1111
1112 if (pstate->r_hwpipe)
1113 mask |= pstate->r_hwpipe->flush_mask;
1114
1115 return mask;
980} 1116}
981 1117
982/* initialize plane */ 1118/* initialize plane */
@@ -998,6 +1134,8 @@ struct drm_plane *mdp5_plane_init(struct drm_device *dev,
998 mdp5_plane->nformats = mdp_get_formats(mdp5_plane->formats, 1134 mdp5_plane->nformats = mdp_get_formats(mdp5_plane->formats,
999 ARRAY_SIZE(mdp5_plane->formats), false); 1135 ARRAY_SIZE(mdp5_plane->formats), false);
1000 1136
1137 spin_lock_init(&mdp5_plane->pipe_lock);
1138
1001 if (type == DRM_PLANE_TYPE_CURSOR) 1139 if (type == DRM_PLANE_TYPE_CURSOR)
1002 ret = drm_universal_plane_init(dev, plane, 0xff, 1140 ret = drm_universal_plane_init(dev, plane, 0xff,
1003 &mdp5_cursor_plane_funcs, 1141 &mdp5_cursor_plane_funcs,
diff --git a/drivers/gpu/drm/msm/mdp/mdp_kms.h b/drivers/gpu/drm/msm/mdp/mdp_kms.h
index 7574cdfef418..1185487e7e5e 100644
--- a/drivers/gpu/drm/msm/mdp/mdp_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp_kms.h
@@ -104,6 +104,7 @@ const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format);
104#define MDP_CAP_SMP BIT(0) /* Shared Memory Pool */ 104#define MDP_CAP_SMP BIT(0) /* Shared Memory Pool */
105#define MDP_CAP_DSC BIT(1) /* VESA Display Stream Compression */ 105#define MDP_CAP_DSC BIT(1) /* VESA Display Stream Compression */
106#define MDP_CAP_CDM BIT(2) /* Chroma Down Module (HDMI 2.0 YUV) */ 106#define MDP_CAP_CDM BIT(2) /* Chroma Down Module (HDMI 2.0 YUV) */
107#define MDP_CAP_SRC_SPLIT BIT(3) /* Source Split of SSPPs */
107 108
108/* MDP pipe capabilities */ 109/* MDP pipe capabilities */
109#define MDP_PIPE_CAP_HFLIP BIT(0) 110#define MDP_PIPE_CAP_HFLIP BIT(0)
@@ -114,6 +115,11 @@ const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format);
114#define MDP_PIPE_CAP_SW_PIX_EXT BIT(5) 115#define MDP_PIPE_CAP_SW_PIX_EXT BIT(5)
115#define MDP_PIPE_CAP_CURSOR BIT(6) 116#define MDP_PIPE_CAP_CURSOR BIT(6)
116 117
118/* MDP layer mixer caps */
119#define MDP_LM_CAP_DISPLAY BIT(0)
120#define MDP_LM_CAP_WB BIT(1)
121#define MDP_LM_CAP_PAIR BIT(2)
122
117static inline bool pipe_supports_yuv(uint32_t pipe_caps) 123static inline bool pipe_supports_yuv(uint32_t pipe_caps)
118{ 124{
119 return (pipe_caps & MDP_PIPE_CAP_SCALE) && 125 return (pipe_caps & MDP_PIPE_CAP_SCALE) &&
diff --git a/drivers/gpu/drm/msm/msm_debugfs.c b/drivers/gpu/drm/msm/msm_debugfs.c
index 4f35d4eb85d0..1855182c76ce 100644
--- a/drivers/gpu/drm/msm/msm_debugfs.c
+++ b/drivers/gpu/drm/msm/msm_debugfs.c
@@ -28,7 +28,9 @@ static int msm_gpu_show(struct drm_device *dev, struct seq_file *m)
28 28
29 if (gpu) { 29 if (gpu) {
30 seq_printf(m, "%s Status:\n", gpu->name); 30 seq_printf(m, "%s Status:\n", gpu->name);
31 pm_runtime_get_sync(&gpu->pdev->dev);
31 gpu->funcs->show(gpu, m); 32 gpu->funcs->show(gpu, m);
33 pm_runtime_put_sync(&gpu->pdev->dev);
32 } 34 }
33 35
34 return 0; 36 return 0;
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 9208e67be453..87b5695d4034 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -55,14 +55,13 @@ int msm_register_address_space(struct drm_device *dev,
55 struct msm_gem_address_space *aspace) 55 struct msm_gem_address_space *aspace)
56{ 56{
57 struct msm_drm_private *priv = dev->dev_private; 57 struct msm_drm_private *priv = dev->dev_private;
58 int idx = priv->num_aspaces++;
59 58
60 if (WARN_ON(idx >= ARRAY_SIZE(priv->aspace))) 59 if (WARN_ON(priv->num_aspaces >= ARRAY_SIZE(priv->aspace)))
61 return -EINVAL; 60 return -EINVAL;
62 61
63 priv->aspace[idx] = aspace; 62 priv->aspace[priv->num_aspaces] = aspace;
64 63
65 return idx; 64 return priv->num_aspaces++;
66} 65}
67 66
68#ifdef CONFIG_DRM_MSM_REGISTER_LOGGING 67#ifdef CONFIG_DRM_MSM_REGISTER_LOGGING
@@ -265,6 +264,8 @@ static int msm_drm_uninit(struct device *dev)
265 264
266 if (gpu) { 265 if (gpu) {
267 mutex_lock(&ddev->struct_mutex); 266 mutex_lock(&ddev->struct_mutex);
267 // XXX what do we do here?
268 //pm_runtime_enable(&pdev->dev);
268 gpu->funcs->pm_suspend(gpu); 269 gpu->funcs->pm_suspend(gpu);
269 mutex_unlock(&ddev->struct_mutex); 270 mutex_unlock(&ddev->struct_mutex);
270 gpu->funcs->destroy(gpu); 271 gpu->funcs->destroy(gpu);
@@ -539,7 +540,7 @@ static int msm_open(struct drm_device *dev, struct drm_file *file)
539 return 0; 540 return 0;
540} 541}
541 542
542static void msm_preclose(struct drm_device *dev, struct drm_file *file) 543static void msm_postclose(struct drm_device *dev, struct drm_file *file)
543{ 544{
544 struct msm_drm_private *priv = dev->dev_private; 545 struct msm_drm_private *priv = dev->dev_private;
545 struct msm_file_private *ctx = file->driver_priv; 546 struct msm_file_private *ctx = file->driver_priv;
@@ -812,7 +813,7 @@ static struct drm_driver msm_driver = {
812 DRIVER_ATOMIC | 813 DRIVER_ATOMIC |
813 DRIVER_MODESET, 814 DRIVER_MODESET,
814 .open = msm_open, 815 .open = msm_open,
815 .preclose = msm_preclose, 816 .postclose = msm_postclose,
816 .lastclose = msm_lastclose, 817 .lastclose = msm_lastclose,
817 .irq_handler = msm_irq, 818 .irq_handler = msm_irq,
818 .irq_preinstall = msm_irq_preinstall, 819 .irq_preinstall = msm_irq_preinstall,
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index b885c3d5ae4d..28b6f9ba5066 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -191,7 +191,8 @@ void msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
191int msm_gem_map_vma(struct msm_gem_address_space *aspace, 191int msm_gem_map_vma(struct msm_gem_address_space *aspace,
192 struct msm_gem_vma *vma, struct sg_table *sgt, int npages); 192 struct msm_gem_vma *vma, struct sg_table *sgt, int npages);
193 193
194void msm_gem_address_space_destroy(struct msm_gem_address_space *aspace); 194void msm_gem_address_space_put(struct msm_gem_address_space *aspace);
195
195struct msm_gem_address_space * 196struct msm_gem_address_space *
196msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain, 197msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain,
197 const char *name); 198 const char *name);
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index 7d529516b332..1b4cf20043ea 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -18,6 +18,7 @@
18#ifndef __MSM_GEM_H__ 18#ifndef __MSM_GEM_H__
19#define __MSM_GEM_H__ 19#define __MSM_GEM_H__
20 20
21#include <linux/kref.h>
21#include <linux/reservation.h> 22#include <linux/reservation.h>
22#include "msm_drv.h" 23#include "msm_drv.h"
23 24
@@ -31,6 +32,7 @@ struct msm_gem_address_space {
31 */ 32 */
32 struct drm_mm mm; 33 struct drm_mm mm;
33 struct msm_mmu *mmu; 34 struct msm_mmu *mmu;
35 struct kref kref;
34}; 36};
35 37
36struct msm_gem_vma { 38struct msm_gem_vma {
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index 1172fe7a9252..1c545ebe6a5a 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -404,6 +404,24 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
404 if (MSM_PIPE_FLAGS(args->flags) & ~MSM_SUBMIT_FLAGS) 404 if (MSM_PIPE_FLAGS(args->flags) & ~MSM_SUBMIT_FLAGS)
405 return -EINVAL; 405 return -EINVAL;
406 406
407 if (args->flags & MSM_SUBMIT_FENCE_FD_IN) {
408 in_fence = sync_file_get_fence(args->fence_fd);
409
410 if (!in_fence)
411 return -EINVAL;
412
413 /* TODO if we get an array-fence due to userspace merging multiple
414 * fences, we need a way to determine if all the backing fences
415 * are from our own context..
416 */
417
418 if (in_fence->context != gpu->fctx->context) {
419 ret = dma_fence_wait(in_fence, true);
420 if (ret)
421 return ret;
422 }
423 }
424
407 ret = mutex_lock_interruptible(&dev->struct_mutex); 425 ret = mutex_lock_interruptible(&dev->struct_mutex);
408 if (ret) 426 if (ret)
409 return ret; 427 return ret;
@@ -431,27 +449,6 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
431 if (ret) 449 if (ret)
432 goto out; 450 goto out;
433 451
434 if (args->flags & MSM_SUBMIT_FENCE_FD_IN) {
435 in_fence = sync_file_get_fence(args->fence_fd);
436
437 if (!in_fence) {
438 ret = -EINVAL;
439 goto out;
440 }
441
442 /* TODO if we get an array-fence due to userspace merging multiple
443 * fences, we need a way to determine if all the backing fences
444 * are from our own context..
445 */
446
447 if (in_fence->context != gpu->fctx->context) {
448 ret = dma_fence_wait(in_fence, true);
449 if (ret)
450 goto out;
451 }
452
453 }
454
455 if (!(args->fence & MSM_SUBMIT_NO_IMPLICIT)) { 452 if (!(args->fence & MSM_SUBMIT_NO_IMPLICIT)) {
456 ret = submit_fence_sync(submit); 453 ret = submit_fence_sync(submit);
457 if (ret) 454 if (ret)
diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c
index b654eca7636a..f285d7e210db 100644
--- a/drivers/gpu/drm/msm/msm_gem_vma.c
+++ b/drivers/gpu/drm/msm/msm_gem_vma.c
@@ -19,6 +19,25 @@
19#include "msm_gem.h" 19#include "msm_gem.h"
20#include "msm_mmu.h" 20#include "msm_mmu.h"
21 21
22static void
23msm_gem_address_space_destroy(struct kref *kref)
24{
25 struct msm_gem_address_space *aspace = container_of(kref,
26 struct msm_gem_address_space, kref);
27
28 drm_mm_takedown(&aspace->mm);
29 if (aspace->mmu)
30 aspace->mmu->funcs->destroy(aspace->mmu);
31 kfree(aspace);
32}
33
34
35void msm_gem_address_space_put(struct msm_gem_address_space *aspace)
36{
37 if (aspace)
38 kref_put(&aspace->kref, msm_gem_address_space_destroy);
39}
40
22void 41void
23msm_gem_unmap_vma(struct msm_gem_address_space *aspace, 42msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
24 struct msm_gem_vma *vma, struct sg_table *sgt) 43 struct msm_gem_vma *vma, struct sg_table *sgt)
@@ -34,6 +53,8 @@ msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
34 drm_mm_remove_node(&vma->node); 53 drm_mm_remove_node(&vma->node);
35 54
36 vma->iova = 0; 55 vma->iova = 0;
56
57 msm_gem_address_space_put(aspace);
37} 58}
38 59
39int 60int
@@ -57,16 +78,10 @@ msm_gem_map_vma(struct msm_gem_address_space *aspace,
57 size, IOMMU_READ | IOMMU_WRITE); 78 size, IOMMU_READ | IOMMU_WRITE);
58 } 79 }
59 80
60 return ret; 81 /* Get a reference to the aspace to keep it around */
61} 82 kref_get(&aspace->kref);
62 83
63void 84 return ret;
64msm_gem_address_space_destroy(struct msm_gem_address_space *aspace)
65{
66 drm_mm_takedown(&aspace->mm);
67 if (aspace->mmu)
68 aspace->mmu->funcs->destroy(aspace->mmu);
69 kfree(aspace);
70} 85}
71 86
72struct msm_gem_address_space * 87struct msm_gem_address_space *
@@ -85,5 +100,7 @@ msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain,
85 drm_mm_init(&aspace->mm, (domain->geometry.aperture_start >> PAGE_SHIFT), 100 drm_mm_init(&aspace->mm, (domain->geometry.aperture_start >> PAGE_SHIFT),
86 (domain->geometry.aperture_end >> PAGE_SHIFT) - 1); 101 (domain->geometry.aperture_end >> PAGE_SHIFT) - 1);
87 102
103 kref_init(&aspace->kref);
104
88 return aspace; 105 return aspace;
89} 106}
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index af5b6ba4095b..97b9c38c6b3f 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -93,18 +93,18 @@ static int enable_clk(struct msm_gpu *gpu)
93{ 93{
94 int i; 94 int i;
95 95
96 if (gpu->grp_clks[0] && gpu->fast_rate) 96 if (gpu->core_clk && gpu->fast_rate)
97 clk_set_rate(gpu->grp_clks[0], gpu->fast_rate); 97 clk_set_rate(gpu->core_clk, gpu->fast_rate);
98 98
99 /* Set the RBBM timer rate to 19.2Mhz */ 99 /* Set the RBBM timer rate to 19.2Mhz */
100 if (gpu->grp_clks[2]) 100 if (gpu->rbbmtimer_clk)
101 clk_set_rate(gpu->grp_clks[2], 19200000); 101 clk_set_rate(gpu->rbbmtimer_clk, 19200000);
102 102
103 for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i >= 0; i--) 103 for (i = gpu->nr_clocks - 1; i >= 0; i--)
104 if (gpu->grp_clks[i]) 104 if (gpu->grp_clks[i])
105 clk_prepare(gpu->grp_clks[i]); 105 clk_prepare(gpu->grp_clks[i]);
106 106
107 for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i >= 0; i--) 107 for (i = gpu->nr_clocks - 1; i >= 0; i--)
108 if (gpu->grp_clks[i]) 108 if (gpu->grp_clks[i])
109 clk_enable(gpu->grp_clks[i]); 109 clk_enable(gpu->grp_clks[i]);
110 110
@@ -115,19 +115,24 @@ static int disable_clk(struct msm_gpu *gpu)
115{ 115{
116 int i; 116 int i;
117 117
118 for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i >= 0; i--) 118 for (i = gpu->nr_clocks - 1; i >= 0; i--)
119 if (gpu->grp_clks[i]) 119 if (gpu->grp_clks[i])
120 clk_disable(gpu->grp_clks[i]); 120 clk_disable(gpu->grp_clks[i]);
121 121
122 for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i >= 0; i--) 122 for (i = gpu->nr_clocks - 1; i >= 0; i--)
123 if (gpu->grp_clks[i]) 123 if (gpu->grp_clks[i])
124 clk_unprepare(gpu->grp_clks[i]); 124 clk_unprepare(gpu->grp_clks[i]);
125 125
126 if (gpu->grp_clks[0] && gpu->slow_rate) 126 /*
127 clk_set_rate(gpu->grp_clks[0], gpu->slow_rate); 127 * Set the clock to a deliberately low rate. On older targets the clock
128 * speed had to be non zero to avoid problems. On newer targets this
129 * will be rounded down to zero anyway so it all works out.
130 */
131 if (gpu->core_clk)
132 clk_set_rate(gpu->core_clk, 27000000);
128 133
129 if (gpu->grp_clks[2]) 134 if (gpu->rbbmtimer_clk)
130 clk_set_rate(gpu->grp_clks[2], 0); 135 clk_set_rate(gpu->rbbmtimer_clk, 0);
131 136
132 return 0; 137 return 0;
133} 138}
@@ -152,18 +157,9 @@ static int disable_axi(struct msm_gpu *gpu)
152 157
153int msm_gpu_pm_resume(struct msm_gpu *gpu) 158int msm_gpu_pm_resume(struct msm_gpu *gpu)
154{ 159{
155 struct drm_device *dev = gpu->dev;
156 int ret; 160 int ret;
157 161
158 DBG("%s: active_cnt=%d", gpu->name, gpu->active_cnt); 162 DBG("%s", gpu->name);
159
160 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
161
162 if (gpu->active_cnt++ > 0)
163 return 0;
164
165 if (WARN_ON(gpu->active_cnt <= 0))
166 return -EINVAL;
167 163
168 ret = enable_pwrrail(gpu); 164 ret = enable_pwrrail(gpu);
169 if (ret) 165 if (ret)
@@ -177,23 +173,16 @@ int msm_gpu_pm_resume(struct msm_gpu *gpu)
177 if (ret) 173 if (ret)
178 return ret; 174 return ret;
179 175
176 gpu->needs_hw_init = true;
177
180 return 0; 178 return 0;
181} 179}
182 180
183int msm_gpu_pm_suspend(struct msm_gpu *gpu) 181int msm_gpu_pm_suspend(struct msm_gpu *gpu)
184{ 182{
185 struct drm_device *dev = gpu->dev;
186 int ret; 183 int ret;
187 184
188 DBG("%s: active_cnt=%d", gpu->name, gpu->active_cnt); 185 DBG("%s", gpu->name);
189
190 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
191
192 if (--gpu->active_cnt > 0)
193 return 0;
194
195 if (WARN_ON(gpu->active_cnt < 0))
196 return -EINVAL;
197 186
198 ret = disable_axi(gpu); 187 ret = disable_axi(gpu);
199 if (ret) 188 if (ret)
@@ -210,53 +199,20 @@ int msm_gpu_pm_suspend(struct msm_gpu *gpu)
210 return 0; 199 return 0;
211} 200}
212 201
213/* 202int msm_gpu_hw_init(struct msm_gpu *gpu)
214 * Inactivity detection (for suspend):
215 */
216
217static void inactive_worker(struct work_struct *work)
218{
219 struct msm_gpu *gpu = container_of(work, struct msm_gpu, inactive_work);
220 struct drm_device *dev = gpu->dev;
221
222 if (gpu->inactive)
223 return;
224
225 DBG("%s: inactive!\n", gpu->name);
226 mutex_lock(&dev->struct_mutex);
227 if (!(msm_gpu_active(gpu) || gpu->inactive)) {
228 disable_axi(gpu);
229 disable_clk(gpu);
230 gpu->inactive = true;
231 }
232 mutex_unlock(&dev->struct_mutex);
233}
234
235static void inactive_handler(unsigned long data)
236{ 203{
237 struct msm_gpu *gpu = (struct msm_gpu *)data; 204 int ret;
238 struct msm_drm_private *priv = gpu->dev->dev_private;
239 205
240 queue_work(priv->wq, &gpu->inactive_work); 206 if (!gpu->needs_hw_init)
241} 207 return 0;
242 208
243/* cancel inactive timer and make sure we are awake: */ 209 disable_irq(gpu->irq);
244static void inactive_cancel(struct msm_gpu *gpu) 210 ret = gpu->funcs->hw_init(gpu);
245{ 211 if (!ret)
246 DBG("%s", gpu->name); 212 gpu->needs_hw_init = false;
247 del_timer(&gpu->inactive_timer); 213 enable_irq(gpu->irq);
248 if (gpu->inactive) {
249 enable_clk(gpu);
250 enable_axi(gpu);
251 gpu->inactive = false;
252 }
253}
254 214
255static void inactive_start(struct msm_gpu *gpu) 215 return ret;
256{
257 DBG("%s", gpu->name);
258 mod_timer(&gpu->inactive_timer,
259 round_jiffies_up(jiffies + DRM_MSM_INACTIVE_JIFFIES));
260} 216}
261 217
262/* 218/*
@@ -296,8 +252,9 @@ static void recover_worker(struct work_struct *work)
296 /* retire completed submits, plus the one that hung: */ 252 /* retire completed submits, plus the one that hung: */
297 retire_submits(gpu); 253 retire_submits(gpu);
298 254
299 inactive_cancel(gpu); 255 pm_runtime_get_sync(&gpu->pdev->dev);
300 gpu->funcs->recover(gpu); 256 gpu->funcs->recover(gpu);
257 pm_runtime_put_sync(&gpu->pdev->dev);
301 258
302 /* replay the remaining submits after the one that hung: */ 259 /* replay the remaining submits after the one that hung: */
303 list_for_each_entry(submit, &gpu->submit_list, node) { 260 list_for_each_entry(submit, &gpu->submit_list, node) {
@@ -400,6 +357,8 @@ void msm_gpu_perfcntr_start(struct msm_gpu *gpu)
400{ 357{
401 unsigned long flags; 358 unsigned long flags;
402 359
360 pm_runtime_get_sync(&gpu->pdev->dev);
361
403 spin_lock_irqsave(&gpu->perf_lock, flags); 362 spin_lock_irqsave(&gpu->perf_lock, flags);
404 /* we could dynamically enable/disable perfcntr registers too.. */ 363 /* we could dynamically enable/disable perfcntr registers too.. */
405 gpu->last_sample.active = msm_gpu_active(gpu); 364 gpu->last_sample.active = msm_gpu_active(gpu);
@@ -413,6 +372,7 @@ void msm_gpu_perfcntr_start(struct msm_gpu *gpu)
413void msm_gpu_perfcntr_stop(struct msm_gpu *gpu) 372void msm_gpu_perfcntr_stop(struct msm_gpu *gpu)
414{ 373{
415 gpu->perfcntr_active = false; 374 gpu->perfcntr_active = false;
375 pm_runtime_put_sync(&gpu->pdev->dev);
416} 376}
417 377
418/* returns -errno or # of cntrs sampled */ 378/* returns -errno or # of cntrs sampled */
@@ -458,6 +418,8 @@ static void retire_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
458 drm_gem_object_unreference(&msm_obj->base); 418 drm_gem_object_unreference(&msm_obj->base);
459 } 419 }
460 420
421 pm_runtime_mark_last_busy(&gpu->pdev->dev);
422 pm_runtime_put_autosuspend(&gpu->pdev->dev);
461 msm_gem_submit_free(submit); 423 msm_gem_submit_free(submit);
462} 424}
463 425
@@ -492,9 +454,6 @@ static void retire_worker(struct work_struct *work)
492 mutex_lock(&dev->struct_mutex); 454 mutex_lock(&dev->struct_mutex);
493 retire_submits(gpu); 455 retire_submits(gpu);
494 mutex_unlock(&dev->struct_mutex); 456 mutex_unlock(&dev->struct_mutex);
495
496 if (!msm_gpu_active(gpu))
497 inactive_start(gpu);
498} 457}
499 458
500/* call from irq handler to schedule work to retire bo's */ 459/* call from irq handler to schedule work to retire bo's */
@@ -515,7 +474,9 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
515 474
516 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 475 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
517 476
518 inactive_cancel(gpu); 477 pm_runtime_get_sync(&gpu->pdev->dev);
478
479 msm_gpu_hw_init(gpu);
519 480
520 list_add_tail(&submit->node, &gpu->submit_list); 481 list_add_tail(&submit->node, &gpu->submit_list);
521 482
@@ -559,16 +520,52 @@ static irqreturn_t irq_handler(int irq, void *data)
559 return gpu->funcs->irq(gpu); 520 return gpu->funcs->irq(gpu);
560} 521}
561 522
562static const char *clk_names[] = { 523static struct clk *get_clock(struct device *dev, const char *name)
563 "core", "iface", "rbbmtimer", "mem", "mem_iface", "alt_mem_iface", 524{
564}; 525 struct clk *clk = devm_clk_get(dev, name);
526
527 return IS_ERR(clk) ? NULL : clk;
528}
529
530static int get_clocks(struct platform_device *pdev, struct msm_gpu *gpu)
531{
532 struct device *dev = &pdev->dev;
533 struct property *prop;
534 const char *name;
535 int i = 0;
536
537 gpu->nr_clocks = of_property_count_strings(dev->of_node, "clock-names");
538 if (gpu->nr_clocks < 1) {
539 gpu->nr_clocks = 0;
540 return 0;
541 }
542
543 gpu->grp_clks = devm_kcalloc(dev, sizeof(struct clk *), gpu->nr_clocks,
544 GFP_KERNEL);
545 if (!gpu->grp_clks)
546 return -ENOMEM;
547
548 of_property_for_each_string(dev->of_node, "clock-names", prop, name) {
549 gpu->grp_clks[i] = get_clock(dev, name);
550
551 /* Remember the key clocks that we need to control later */
552 if (!strcmp(name, "core"))
553 gpu->core_clk = gpu->grp_clks[i];
554 else if (!strcmp(name, "rbbmtimer"))
555 gpu->rbbmtimer_clk = gpu->grp_clks[i];
556
557 ++i;
558 }
559
560 return 0;
561}
565 562
566int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, 563int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
567 struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs, 564 struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
568 const char *name, const char *ioname, const char *irqname, int ringsz) 565 const char *name, const char *ioname, const char *irqname, int ringsz)
569{ 566{
570 struct iommu_domain *iommu; 567 struct iommu_domain *iommu;
571 int i, ret; 568 int ret;
572 569
573 if (WARN_ON(gpu->num_perfcntrs > ARRAY_SIZE(gpu->last_cntrs))) 570 if (WARN_ON(gpu->num_perfcntrs > ARRAY_SIZE(gpu->last_cntrs)))
574 gpu->num_perfcntrs = ARRAY_SIZE(gpu->last_cntrs); 571 gpu->num_perfcntrs = ARRAY_SIZE(gpu->last_cntrs);
@@ -576,7 +573,6 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
576 gpu->dev = drm; 573 gpu->dev = drm;
577 gpu->funcs = funcs; 574 gpu->funcs = funcs;
578 gpu->name = name; 575 gpu->name = name;
579 gpu->inactive = true;
580 gpu->fctx = msm_fence_context_alloc(drm, name); 576 gpu->fctx = msm_fence_context_alloc(drm, name);
581 if (IS_ERR(gpu->fctx)) { 577 if (IS_ERR(gpu->fctx)) {
582 ret = PTR_ERR(gpu->fctx); 578 ret = PTR_ERR(gpu->fctx);
@@ -586,19 +582,15 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
586 582
587 INIT_LIST_HEAD(&gpu->active_list); 583 INIT_LIST_HEAD(&gpu->active_list);
588 INIT_WORK(&gpu->retire_work, retire_worker); 584 INIT_WORK(&gpu->retire_work, retire_worker);
589 INIT_WORK(&gpu->inactive_work, inactive_worker);
590 INIT_WORK(&gpu->recover_work, recover_worker); 585 INIT_WORK(&gpu->recover_work, recover_worker);
591 586
592 INIT_LIST_HEAD(&gpu->submit_list); 587 INIT_LIST_HEAD(&gpu->submit_list);
593 588
594 setup_timer(&gpu->inactive_timer, inactive_handler,
595 (unsigned long)gpu);
596 setup_timer(&gpu->hangcheck_timer, hangcheck_handler, 589 setup_timer(&gpu->hangcheck_timer, hangcheck_handler,
597 (unsigned long)gpu); 590 (unsigned long)gpu);
598 591
599 spin_lock_init(&gpu->perf_lock); 592 spin_lock_init(&gpu->perf_lock);
600 593
601 BUG_ON(ARRAY_SIZE(clk_names) != ARRAY_SIZE(gpu->grp_clks));
602 594
603 /* Map registers: */ 595 /* Map registers: */
604 gpu->mmio = msm_ioremap(pdev, ioname, name); 596 gpu->mmio = msm_ioremap(pdev, ioname, name);
@@ -622,13 +614,9 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
622 goto fail; 614 goto fail;
623 } 615 }
624 616
625 /* Acquire clocks: */ 617 ret = get_clocks(pdev, gpu);
626 for (i = 0; i < ARRAY_SIZE(clk_names); i++) { 618 if (ret)
627 gpu->grp_clks[i] = msm_clk_get(pdev, clk_names[i]); 619 goto fail;
628 DBG("grp_clks[%s]: %p", clk_names[i], gpu->grp_clks[i]);
629 if (IS_ERR(gpu->grp_clks[i]))
630 gpu->grp_clks[i] = NULL;
631 }
632 620
633 gpu->ebi1_clk = msm_clk_get(pdev, "bus"); 621 gpu->ebi1_clk = msm_clk_get(pdev, "bus");
634 DBG("ebi1_clk: %p", gpu->ebi1_clk); 622 DBG("ebi1_clk: %p", gpu->ebi1_clk);
@@ -684,6 +672,9 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
684 goto fail; 672 goto fail;
685 } 673 }
686 674
675 gpu->pdev = pdev;
676 platform_set_drvdata(pdev, gpu);
677
687 bs_init(gpu); 678 bs_init(gpu);
688 679
689 return 0; 680 return 0;
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index c4c39d3272c7..aa3241000455 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -64,6 +64,7 @@ struct msm_gpu_funcs {
64struct msm_gpu { 64struct msm_gpu {
65 const char *name; 65 const char *name;
66 struct drm_device *dev; 66 struct drm_device *dev;
67 struct platform_device *pdev;
67 const struct msm_gpu_funcs *funcs; 68 const struct msm_gpu_funcs *funcs;
68 69
69 /* performance counters (hw & sw): */ 70 /* performance counters (hw & sw): */
@@ -88,9 +89,8 @@ struct msm_gpu {
88 /* fencing: */ 89 /* fencing: */
89 struct msm_fence_context *fctx; 90 struct msm_fence_context *fctx;
90 91
91 /* is gpu powered/active? */ 92 /* does gpu need hw_init? */
92 int active_cnt; 93 bool needs_hw_init;
93 bool inactive;
94 94
95 /* worker for handling active-list retiring: */ 95 /* worker for handling active-list retiring: */
96 struct work_struct retire_work; 96 struct work_struct retire_work;
@@ -103,8 +103,10 @@ struct msm_gpu {
103 103
104 /* Power Control: */ 104 /* Power Control: */
105 struct regulator *gpu_reg, *gpu_cx; 105 struct regulator *gpu_reg, *gpu_cx;
106 struct clk *ebi1_clk, *grp_clks[6]; 106 struct clk **grp_clks;
107 uint32_t fast_rate, slow_rate, bus_freq; 107 int nr_clocks;
108 struct clk *ebi1_clk, *core_clk, *rbbmtimer_clk;
109 uint32_t fast_rate, bus_freq;
108 110
109#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING 111#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING
110 struct msm_bus_scale_pdata *bus_scale_table; 112 struct msm_bus_scale_pdata *bus_scale_table;
@@ -114,9 +116,7 @@ struct msm_gpu {
114 /* Hang and Inactivity Detection: 116 /* Hang and Inactivity Detection:
115 */ 117 */
116#define DRM_MSM_INACTIVE_PERIOD 66 /* in ms (roughly four frames) */ 118#define DRM_MSM_INACTIVE_PERIOD 66 /* in ms (roughly four frames) */
117#define DRM_MSM_INACTIVE_JIFFIES msecs_to_jiffies(DRM_MSM_INACTIVE_PERIOD) 119
118 struct timer_list inactive_timer;
119 struct work_struct inactive_work;
120#define DRM_MSM_HANGCHECK_PERIOD 500 /* in ms */ 120#define DRM_MSM_HANGCHECK_PERIOD 500 /* in ms */
121#define DRM_MSM_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_MSM_HANGCHECK_PERIOD) 121#define DRM_MSM_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_MSM_HANGCHECK_PERIOD)
122 struct timer_list hangcheck_timer; 122 struct timer_list hangcheck_timer;
@@ -196,6 +196,8 @@ static inline void gpu_write64(struct msm_gpu *gpu, u32 lo, u32 hi, u64 val)
196int msm_gpu_pm_suspend(struct msm_gpu *gpu); 196int msm_gpu_pm_suspend(struct msm_gpu *gpu);
197int msm_gpu_pm_resume(struct msm_gpu *gpu); 197int msm_gpu_pm_resume(struct msm_gpu *gpu);
198 198
199int msm_gpu_hw_init(struct msm_gpu *gpu);
200
199void msm_gpu_perfcntr_start(struct msm_gpu *gpu); 201void msm_gpu_perfcntr_start(struct msm_gpu *gpu);
200void msm_gpu_perfcntr_stop(struct msm_gpu *gpu); 202void msm_gpu_perfcntr_stop(struct msm_gpu *gpu);
201int msm_gpu_perfcntr_sample(struct msm_gpu *gpu, uint32_t *activetime, 203int msm_gpu_perfcntr_sample(struct msm_gpu *gpu, uint32_t *activetime,
diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c
index 7f5779daf5c8..b23d33622f37 100644
--- a/drivers/gpu/drm/msm/msm_iommu.c
+++ b/drivers/gpu/drm/msm/msm_iommu.c
@@ -38,78 +38,47 @@ static int msm_iommu_attach(struct msm_mmu *mmu, const char * const *names,
38 int cnt) 38 int cnt)
39{ 39{
40 struct msm_iommu *iommu = to_msm_iommu(mmu); 40 struct msm_iommu *iommu = to_msm_iommu(mmu);
41 return iommu_attach_device(iommu->domain, mmu->dev); 41 int ret;
42
43 pm_runtime_get_sync(mmu->dev);
44 ret = iommu_attach_device(iommu->domain, mmu->dev);
45 pm_runtime_put_sync(mmu->dev);
46
47 return ret;
42} 48}
43 49
44static void msm_iommu_detach(struct msm_mmu *mmu, const char * const *names, 50static void msm_iommu_detach(struct msm_mmu *mmu, const char * const *names,
45 int cnt) 51 int cnt)
46{ 52{
47 struct msm_iommu *iommu = to_msm_iommu(mmu); 53 struct msm_iommu *iommu = to_msm_iommu(mmu);
54
55 pm_runtime_get_sync(mmu->dev);
48 iommu_detach_device(iommu->domain, mmu->dev); 56 iommu_detach_device(iommu->domain, mmu->dev);
57 pm_runtime_put_sync(mmu->dev);
49} 58}
50 59
51static int msm_iommu_map(struct msm_mmu *mmu, uint64_t iova, 60static int msm_iommu_map(struct msm_mmu *mmu, uint64_t iova,
52 struct sg_table *sgt, unsigned len, int prot) 61 struct sg_table *sgt, unsigned len, int prot)
53{ 62{
54 struct msm_iommu *iommu = to_msm_iommu(mmu); 63 struct msm_iommu *iommu = to_msm_iommu(mmu);
55 struct iommu_domain *domain = iommu->domain; 64 size_t ret;
56 struct scatterlist *sg;
57 unsigned long da = iova;
58 unsigned int i, j;
59 int ret;
60
61 if (!domain || !sgt)
62 return -EINVAL;
63
64 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
65 dma_addr_t pa = sg_phys(sg) - sg->offset;
66 size_t bytes = sg->length + sg->offset;
67
68 VERB("map[%d]: %08lx %08lx(%zx)", i, da, (unsigned long)pa, bytes);
69 65
70 ret = iommu_map(domain, da, pa, bytes, prot); 66// pm_runtime_get_sync(mmu->dev);
71 if (ret) 67 ret = iommu_map_sg(iommu->domain, iova, sgt->sgl, sgt->nents, prot);
72 goto fail; 68// pm_runtime_put_sync(mmu->dev);
69 WARN_ON(ret < 0);
73 70
74 da += bytes; 71 return (ret == len) ? 0 : -EINVAL;
75 }
76
77 return 0;
78
79fail:
80 da = iova;
81
82 for_each_sg(sgt->sgl, sg, i, j) {
83 size_t bytes = sg->length + sg->offset;
84 iommu_unmap(domain, da, bytes);
85 da += bytes;
86 }
87 return ret;
88} 72}
89 73
90static int msm_iommu_unmap(struct msm_mmu *mmu, uint64_t iova, 74static int msm_iommu_unmap(struct msm_mmu *mmu, uint64_t iova,
91 struct sg_table *sgt, unsigned len) 75 struct sg_table *sgt, unsigned len)
92{ 76{
93 struct msm_iommu *iommu = to_msm_iommu(mmu); 77 struct msm_iommu *iommu = to_msm_iommu(mmu);
94 struct iommu_domain *domain = iommu->domain;
95 struct scatterlist *sg;
96 unsigned long da = iova;
97 int i;
98
99 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
100 size_t bytes = sg->length + sg->offset;
101 size_t unmapped;
102
103 unmapped = iommu_unmap(domain, da, bytes);
104 if (unmapped < bytes)
105 return unmapped;
106
107 VERB("unmap[%d]: %08lx(%zx)", i, da, bytes);
108
109 BUG_ON(!PAGE_ALIGNED(bytes));
110 78
111 da += bytes; 79 pm_runtime_get_sync(mmu->dev);
112 } 80 iommu_unmap(iommu->domain, iova, len);
81 pm_runtime_put_sync(mmu->dev);
113 82
114 return 0; 83 return 0;
115} 84}
diff --git a/drivers/gpu/drm/msm/msm_rd.c b/drivers/gpu/drm/msm/msm_rd.c
index 3df7322fd74e..0e81faab2c50 100644
--- a/drivers/gpu/drm/msm/msm_rd.c
+++ b/drivers/gpu/drm/msm/msm_rd.c
@@ -322,7 +322,7 @@ void msm_rd_dump_submit(struct msm_gem_submit *submit)
322 } 322 }
323 323
324 for (i = 0; i < submit->nr_cmds; i++) { 324 for (i = 0; i < submit->nr_cmds; i++) {
325 uint32_t iova = submit->cmd[i].iova; 325 uint64_t iova = submit->cmd[i].iova;
326 uint32_t szd = submit->cmd[i].size; /* in dwords */ 326 uint32_t szd = submit->cmd[i].size; /* in dwords */
327 327
328 /* snapshot cmdstream bo's (if we haven't already): */ 328 /* snapshot cmdstream bo's (if we haven't already): */
@@ -341,7 +341,7 @@ void msm_rd_dump_submit(struct msm_gem_submit *submit)
341 case MSM_SUBMIT_CMD_CTX_RESTORE_BUF: 341 case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
342 case MSM_SUBMIT_CMD_BUF: 342 case MSM_SUBMIT_CMD_BUF:
343 rd_write_section(rd, RD_CMDSTREAM_ADDR, 343 rd_write_section(rd, RD_CMDSTREAM_ADDR,
344 (uint32_t[2]){ iova, szd }, 8); 344 (uint32_t[3]){ iova, szd, iova >> 32 }, 12);
345 break; 345 break;
346 } 346 }
347 } 347 }