aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2014-11-25 07:10:53 -0500
committerDave Airlie <airlied@redhat.com>2014-11-25 07:10:53 -0500
commit955289c7cfad158dc939e150896a240f549ccc60 (patch)
tree3c3afeec99f63a52904f55906eb987bf89710612
parented1e8777a56f3523712506d608a29f57ed37b613 (diff)
parent46df9adb2e7709e56ab8aacaff2fc997a6d17239 (diff)
Merge branch 'msm-next' of git://people.freedesktop.org/~robclark/linux into drm-next
Now that we have the bits needed for mdp5 atomic, here is the followup pull request I mentioned. Main highlights are: 1) mdp5 multiple crtc and public plane support (no more hard-coded mixer setup!) 2) mdp5 atomic conversion 3) couple atomic helper fixes for issues found during mdp5 atomic debug (reviewed by danvet.. but he didn't plane to send an atomic-fixes pull request so I agreed to tack them on to mine) * 'msm-next' of git://people.freedesktop.org/~robclark/linux: drm/atomic: shutdown *current* encoder drm/atomic: check mode_changed *after* atomic_check drm/msm/mdp4: fix mixer setup for multi-crtc + planes drm/msm/mdp5: dpms(OFF) cleanups drm/msm/mdp5: atomic drm/msm: atomic fixes drm/msm/mdp5: remove global mdp5_ctl_mgr drm/msm/mdp5: don't use void * for opaque types drm/msm: add multiple CRTC and overlay support drm/msm/mdp5: set rate before enabling clk drm/msm/mdp5: introduce mdp5_cfg module drm/msm/mdp5: make SMP module dynamically configurable drm/msm/hdmi: remove useless kref drm/msm/mdp5: get the core clock rate from MDP5 config drm/msm/mdp5: use irqdomains
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c17
-rw-r--r--drivers/gpu/drm/msm/Makefile2
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.c57
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.h17
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_bridge.c3
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_connector.c4
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c70
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h7
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c207
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h91
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c430
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c322
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h122
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c24
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c94
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c262
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h131
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c327
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c241
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h23
-rw-r--r--drivers/gpu/drm/msm/msm_atomic.c2
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h1
-rw-r--r--drivers/gpu/drm/msm/msm_fb.c2
-rw-r--r--drivers/gpu/drm/msm/msm_kms.h20
24 files changed, 1737 insertions, 739 deletions
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 690360038dc1..a17b8e9c0a81 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -331,7 +331,7 @@ mode_fixup(struct drm_atomic_state *state)
331} 331}
332 332
333static int 333static int
334drm_atomic_helper_check_prepare(struct drm_device *dev, 334drm_atomic_helper_check_modeset(struct drm_device *dev,
335 struct drm_atomic_state *state) 335 struct drm_atomic_state *state)
336{ 336{
337 int ncrtcs = dev->mode_config.num_crtc; 337 int ncrtcs = dev->mode_config.num_crtc;
@@ -428,10 +428,6 @@ int drm_atomic_helper_check(struct drm_device *dev,
428 int ncrtcs = dev->mode_config.num_crtc; 428 int ncrtcs = dev->mode_config.num_crtc;
429 int i, ret = 0; 429 int i, ret = 0;
430 430
431 ret = drm_atomic_helper_check_prepare(dev, state);
432 if (ret)
433 return ret;
434
435 for (i = 0; i < nplanes; i++) { 431 for (i = 0; i < nplanes; i++) {
436 struct drm_plane_helper_funcs *funcs; 432 struct drm_plane_helper_funcs *funcs;
437 struct drm_plane *plane = state->planes[i]; 433 struct drm_plane *plane = state->planes[i];
@@ -475,6 +471,10 @@ int drm_atomic_helper_check(struct drm_device *dev,
475 } 471 }
476 } 472 }
477 473
474 ret = drm_atomic_helper_check_modeset(dev, state);
475 if (ret)
476 return ret;
477
478 return ret; 478 return ret;
479} 479}
480EXPORT_SYMBOL(drm_atomic_helper_check); 480EXPORT_SYMBOL(drm_atomic_helper_check);
@@ -499,9 +499,12 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
499 if (!old_conn_state || !old_conn_state->crtc) 499 if (!old_conn_state || !old_conn_state->crtc)
500 continue; 500 continue;
501 501
502 encoder = connector->state->best_encoder; 502 encoder = old_conn_state->best_encoder;
503 503
504 if (!encoder) 504 /* We shouldn't get this far if we didn't previously have
505 * an encoder.. but WARN_ON() rather than explode.
506 */
507 if (WARN_ON(!encoder))
505 continue; 508 continue;
506 509
507 funcs = encoder->helper_private; 510 funcs = encoder->helper_private;
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index 0d96132df059..143d988f8add 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -25,6 +25,8 @@ msm-y := \
25 mdp/mdp4/mdp4_irq.o \ 25 mdp/mdp4/mdp4_irq.o \
26 mdp/mdp4/mdp4_kms.o \ 26 mdp/mdp4/mdp4_kms.o \
27 mdp/mdp4/mdp4_plane.o \ 27 mdp/mdp4/mdp4_plane.o \
28 mdp/mdp5/mdp5_cfg.o \
29 mdp/mdp5/mdp5_ctl.o \
28 mdp/mdp5/mdp5_crtc.o \ 30 mdp/mdp5/mdp5_crtc.o \
29 mdp/mdp5/mdp5_encoder.o \ 31 mdp/mdp5/mdp5_encoder.o \
30 mdp/mdp5/mdp5_irq.o \ 32 mdp/mdp5/mdp5_irq.o \
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
index 90077619029d..062c68725376 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
@@ -15,6 +15,7 @@
15 * this program. If not, see <http://www.gnu.org/licenses/>. 15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */ 16 */
17 17
18#include <linux/of_irq.h>
18#include "hdmi.h" 19#include "hdmi.h"
19 20
20void hdmi_set_mode(struct hdmi *hdmi, bool power_on) 21void hdmi_set_mode(struct hdmi *hdmi, bool power_on)
@@ -39,7 +40,7 @@ void hdmi_set_mode(struct hdmi *hdmi, bool power_on)
39 power_on ? "Enable" : "Disable", ctrl); 40 power_on ? "Enable" : "Disable", ctrl);
40} 41}
41 42
42irqreturn_t hdmi_irq(int irq, void *dev_id) 43static irqreturn_t hdmi_irq(int irq, void *dev_id)
43{ 44{
44 struct hdmi *hdmi = dev_id; 45 struct hdmi *hdmi = dev_id;
45 46
@@ -54,9 +55,8 @@ irqreturn_t hdmi_irq(int irq, void *dev_id)
54 return IRQ_HANDLED; 55 return IRQ_HANDLED;
55} 56}
56 57
57void hdmi_destroy(struct kref *kref) 58static void hdmi_destroy(struct hdmi *hdmi)
58{ 59{
59 struct hdmi *hdmi = container_of(kref, struct hdmi, refcount);
60 struct hdmi_phy *phy = hdmi->phy; 60 struct hdmi_phy *phy = hdmi->phy;
61 61
62 if (phy) 62 if (phy)
@@ -84,8 +84,6 @@ static struct hdmi *hdmi_init(struct platform_device *pdev)
84 goto fail; 84 goto fail;
85 } 85 }
86 86
87 kref_init(&hdmi->refcount);
88
89 hdmi->pdev = pdev; 87 hdmi->pdev = pdev;
90 hdmi->config = config; 88 hdmi->config = config;
91 89
@@ -182,7 +180,7 @@ static struct hdmi *hdmi_init(struct platform_device *pdev)
182 180
183fail: 181fail:
184 if (hdmi) 182 if (hdmi)
185 hdmi_destroy(&hdmi->refcount); 183 hdmi_destroy(hdmi);
186 184
187 return ERR_PTR(ret); 185 return ERR_PTR(ret);
188} 186}
@@ -200,7 +198,6 @@ int hdmi_modeset_init(struct hdmi *hdmi,
200{ 198{
201 struct msm_drm_private *priv = dev->dev_private; 199 struct msm_drm_private *priv = dev->dev_private;
202 struct platform_device *pdev = hdmi->pdev; 200 struct platform_device *pdev = hdmi->pdev;
203 struct hdmi_platform_config *config = pdev->dev.platform_data;
204 int ret; 201 int ret;
205 202
206 hdmi->dev = dev; 203 hdmi->dev = dev;
@@ -224,22 +221,20 @@ int hdmi_modeset_init(struct hdmi *hdmi,
224 goto fail; 221 goto fail;
225 } 222 }
226 223
227 if (!config->shared_irq) { 224 hdmi->irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
228 hdmi->irq = platform_get_irq(pdev, 0); 225 if (hdmi->irq < 0) {
229 if (hdmi->irq < 0) { 226 ret = hdmi->irq;
230 ret = hdmi->irq; 227 dev_err(dev->dev, "failed to get irq: %d\n", ret);
231 dev_err(dev->dev, "failed to get irq: %d\n", ret); 228 goto fail;
232 goto fail; 229 }
233 }
234 230
235 ret = devm_request_threaded_irq(&pdev->dev, hdmi->irq, 231 ret = devm_request_irq(&pdev->dev, hdmi->irq,
236 NULL, hdmi_irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT, 232 hdmi_irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
237 "hdmi_isr", hdmi); 233 "hdmi_isr", hdmi);
238 if (ret < 0) { 234 if (ret < 0) {
239 dev_err(dev->dev, "failed to request IRQ%u: %d\n", 235 dev_err(dev->dev, "failed to request IRQ%u: %d\n",
240 hdmi->irq, ret); 236 hdmi->irq, ret);
241 goto fail; 237 goto fail;
242 }
243 } 238 }
244 239
245 encoder->bridge = hdmi->bridge; 240 encoder->bridge = hdmi->bridge;
@@ -271,12 +266,6 @@ fail:
271 266
272#include <linux/of_gpio.h> 267#include <linux/of_gpio.h>
273 268
274static void set_hdmi(struct drm_device *dev, struct hdmi *hdmi)
275{
276 struct msm_drm_private *priv = dev->dev_private;
277 priv->hdmi = hdmi;
278}
279
280#ifdef CONFIG_OF 269#ifdef CONFIG_OF
281static int get_gpio(struct device *dev, struct device_node *of_node, const char *name) 270static int get_gpio(struct device *dev, struct device_node *of_node, const char *name)
282{ 271{
@@ -297,6 +286,8 @@ static int get_gpio(struct device *dev, struct device_node *of_node, const char
297 286
298static int hdmi_bind(struct device *dev, struct device *master, void *data) 287static int hdmi_bind(struct device *dev, struct device *master, void *data)
299{ 288{
289 struct drm_device *drm = dev_get_drvdata(master);
290 struct msm_drm_private *priv = drm->dev_private;
300 static struct hdmi_platform_config config = {}; 291 static struct hdmi_platform_config config = {};
301 struct hdmi *hdmi; 292 struct hdmi *hdmi;
302#ifdef CONFIG_OF 293#ifdef CONFIG_OF
@@ -318,7 +309,6 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data)
318 config.hpd_clk_cnt = ARRAY_SIZE(hpd_clk_names); 309 config.hpd_clk_cnt = ARRAY_SIZE(hpd_clk_names);
319 config.pwr_clk_names = pwr_clk_names; 310 config.pwr_clk_names = pwr_clk_names;
320 config.pwr_clk_cnt = ARRAY_SIZE(pwr_clk_names); 311 config.pwr_clk_cnt = ARRAY_SIZE(pwr_clk_names);
321 config.shared_irq = true;
322 } else if (of_device_is_compatible(of_node, "qcom,hdmi-tx-8960")) { 312 } else if (of_device_is_compatible(of_node, "qcom,hdmi-tx-8960")) {
323 static const char *hpd_clk_names[] = {"core_clk", "master_iface_clk", "slave_iface_clk"}; 313 static const char *hpd_clk_names[] = {"core_clk", "master_iface_clk", "slave_iface_clk"};
324 static const char *hpd_reg_names[] = {"core-vdda", "hdmi-mux"}; 314 static const char *hpd_reg_names[] = {"core-vdda", "hdmi-mux"};
@@ -392,14 +382,19 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data)
392 hdmi = hdmi_init(to_platform_device(dev)); 382 hdmi = hdmi_init(to_platform_device(dev));
393 if (IS_ERR(hdmi)) 383 if (IS_ERR(hdmi))
394 return PTR_ERR(hdmi); 384 return PTR_ERR(hdmi);
395 set_hdmi(dev_get_drvdata(master), hdmi); 385 priv->hdmi = hdmi;
396 return 0; 386 return 0;
397} 387}
398 388
399static void hdmi_unbind(struct device *dev, struct device *master, 389static void hdmi_unbind(struct device *dev, struct device *master,
400 void *data) 390 void *data)
401{ 391{
402 set_hdmi(dev_get_drvdata(master), NULL); 392 struct drm_device *drm = dev_get_drvdata(master);
393 struct msm_drm_private *priv = drm->dev_private;
394 if (priv->hdmi) {
395 hdmi_destroy(priv->hdmi);
396 priv->hdmi = NULL;
397 }
403} 398}
404 399
405static const struct component_ops hdmi_ops = { 400static const struct component_ops hdmi_ops = {
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.h b/drivers/gpu/drm/msm/hdmi/hdmi.h
index b981995410b5..43e654f751b7 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.h
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.h
@@ -38,8 +38,6 @@ struct hdmi_audio {
38}; 38};
39 39
40struct hdmi { 40struct hdmi {
41 struct kref refcount;
42
43 struct drm_device *dev; 41 struct drm_device *dev;
44 struct platform_device *pdev; 42 struct platform_device *pdev;
45 43
@@ -97,13 +95,9 @@ struct hdmi_platform_config {
97 /* gpio's: */ 95 /* gpio's: */
98 int ddc_clk_gpio, ddc_data_gpio, hpd_gpio, mux_en_gpio, mux_sel_gpio; 96 int ddc_clk_gpio, ddc_data_gpio, hpd_gpio, mux_en_gpio, mux_sel_gpio;
99 int mux_lpm_gpio; 97 int mux_lpm_gpio;
100
101 /* older devices had their own irq, mdp5+ it is shared w/ mdp: */
102 bool shared_irq;
103}; 98};
104 99
105void hdmi_set_mode(struct hdmi *hdmi, bool power_on); 100void hdmi_set_mode(struct hdmi *hdmi, bool power_on);
106void hdmi_destroy(struct kref *kref);
107 101
108static inline void hdmi_write(struct hdmi *hdmi, u32 reg, u32 data) 102static inline void hdmi_write(struct hdmi *hdmi, u32 reg, u32 data)
109{ 103{
@@ -115,17 +109,6 @@ static inline u32 hdmi_read(struct hdmi *hdmi, u32 reg)
115 return msm_readl(hdmi->mmio + reg); 109 return msm_readl(hdmi->mmio + reg);
116} 110}
117 111
118static inline struct hdmi * hdmi_reference(struct hdmi *hdmi)
119{
120 kref_get(&hdmi->refcount);
121 return hdmi;
122}
123
124static inline void hdmi_unreference(struct hdmi *hdmi)
125{
126 kref_put(&hdmi->refcount, hdmi_destroy);
127}
128
129/* 112/*
130 * The phy appears to be different, for example between 8960 and 8x60, 113 * The phy appears to be different, for example between 8960 and 8x60,
131 * so split the phy related functions out and load the correct one at 114 * so split the phy related functions out and load the correct one at
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
index f6cf745c249e..6902ad6da710 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
@@ -26,7 +26,6 @@ struct hdmi_bridge {
26static void hdmi_bridge_destroy(struct drm_bridge *bridge) 26static void hdmi_bridge_destroy(struct drm_bridge *bridge)
27{ 27{
28 struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge); 28 struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
29 hdmi_unreference(hdmi_bridge->hdmi);
30 drm_bridge_cleanup(bridge); 29 drm_bridge_cleanup(bridge);
31 kfree(hdmi_bridge); 30 kfree(hdmi_bridge);
32} 31}
@@ -218,7 +217,7 @@ struct drm_bridge *hdmi_bridge_init(struct hdmi *hdmi)
218 goto fail; 217 goto fail;
219 } 218 }
220 219
221 hdmi_bridge->hdmi = hdmi_reference(hdmi); 220 hdmi_bridge->hdmi = hdmi;
222 221
223 bridge = &hdmi_bridge->base; 222 bridge = &hdmi_bridge->base;
224 223
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
index 0aecb2580072..fbebb0405d76 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
@@ -330,8 +330,6 @@ static void hdmi_connector_destroy(struct drm_connector *connector)
330 drm_connector_unregister(connector); 330 drm_connector_unregister(connector);
331 drm_connector_cleanup(connector); 331 drm_connector_cleanup(connector);
332 332
333 hdmi_unreference(hdmi_connector->hdmi);
334
335 kfree(hdmi_connector); 333 kfree(hdmi_connector);
336} 334}
337 335
@@ -425,7 +423,7 @@ struct drm_connector *hdmi_connector_init(struct hdmi *hdmi)
425 goto fail; 423 goto fail;
426 } 424 }
427 425
428 hdmi_connector->hdmi = hdmi_reference(hdmi); 426 hdmi_connector->hdmi = hdmi;
429 INIT_WORK(&hdmi_connector->hpd_work, hotplug_work); 427 INIT_WORK(&hdmi_connector->hpd_work, hotplug_work);
430 428
431 connector = &hdmi_connector->base; 429 connector = &hdmi_connector->base;
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
index fef22e8cabb6..6781aa994613 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
@@ -167,34 +167,54 @@ static bool mdp4_crtc_mode_fixup(struct drm_crtc *crtc,
167 return true; 167 return true;
168} 168}
169 169
170static void blend_setup(struct drm_crtc *crtc) 170/* statically (for now) map planes to mixer stage (z-order): */
171static const int idxs[] = {
172 [VG1] = 1,
173 [VG2] = 2,
174 [RGB1] = 0,
175 [RGB2] = 0,
176 [RGB3] = 0,
177 [VG3] = 3,
178 [VG4] = 4,
179
180};
181
182/* setup mixer config, for which we need to consider all crtc's and
183 * the planes attached to them
184 *
185 * TODO may possibly need some extra locking here
186 */
187static void setup_mixer(struct mdp4_kms *mdp4_kms)
171{ 188{
172 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); 189 struct drm_mode_config *config = &mdp4_kms->dev->mode_config;
173 struct mdp4_kms *mdp4_kms = get_kms(crtc); 190 struct drm_crtc *crtc;
174 struct drm_plane *plane;
175 int i, ovlp = mdp4_crtc->ovlp;
176 uint32_t mixer_cfg = 0; 191 uint32_t mixer_cfg = 0;
177 static const enum mdp_mixer_stage_id stages[] = { 192 static const enum mdp_mixer_stage_id stages[] = {
178 STAGE_BASE, STAGE0, STAGE1, STAGE2, STAGE3, 193 STAGE_BASE, STAGE0, STAGE1, STAGE2, STAGE3,
179 }; 194 };
180 /* statically (for now) map planes to mixer stage (z-order): */
181 static const int idxs[] = {
182 [VG1] = 1,
183 [VG2] = 2,
184 [RGB1] = 0,
185 [RGB2] = 0,
186 [RGB3] = 0,
187 [VG3] = 3,
188 [VG4] = 4,
189 195
190 }; 196 list_for_each_entry(crtc, &config->crtc_list, head) {
191 bool alpha[4]= { false, false, false, false }; 197 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
198 struct drm_plane *plane;
192 199
193 /* Don't rely on value read back from hw, but instead use our 200 for_each_plane_on_crtc(crtc, plane) {
194 * own shadowed value. Possibly disable/reenable looses the 201 enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane);
195 * previous value and goes back to power-on default? 202 int idx = idxs[pipe_id];
196 */ 203 mixer_cfg = mixercfg(mixer_cfg, mdp4_crtc->mixer,
197 mixer_cfg = mdp4_kms->mixer_cfg; 204 pipe_id, stages[idx]);
205 }
206 }
207
208 mdp4_write(mdp4_kms, REG_MDP4_LAYERMIXER_IN_CFG, mixer_cfg);
209}
210
211static void blend_setup(struct drm_crtc *crtc)
212{
213 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
214 struct mdp4_kms *mdp4_kms = get_kms(crtc);
215 struct drm_plane *plane;
216 int i, ovlp = mdp4_crtc->ovlp;
217 bool alpha[4]= { false, false, false, false };
198 218
199 mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_LOW0(ovlp), 0); 219 mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_LOW0(ovlp), 0);
200 mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_LOW1(ovlp), 0); 220 mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_LOW1(ovlp), 0);
@@ -209,13 +229,8 @@ static void blend_setup(struct drm_crtc *crtc)
209 to_mdp_format(msm_framebuffer_format(plane->fb)); 229 to_mdp_format(msm_framebuffer_format(plane->fb));
210 alpha[idx-1] = format->alpha_enable; 230 alpha[idx-1] = format->alpha_enable;
211 } 231 }
212 mixer_cfg = mixercfg(mixer_cfg, mdp4_crtc->mixer,
213 pipe_id, stages[idx]);
214 } 232 }
215 233
216 /* this shouldn't happen.. and seems to cause underflow: */
217 WARN_ON(!mixer_cfg);
218
219 for (i = 0; i < 4; i++) { 234 for (i = 0; i < 4; i++) {
220 uint32_t op; 235 uint32_t op;
221 236
@@ -238,8 +253,7 @@ static void blend_setup(struct drm_crtc *crtc)
238 mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_HIGH1(ovlp, i), 0); 253 mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_HIGH1(ovlp, i), 0);
239 } 254 }
240 255
241 mdp4_kms->mixer_cfg = mixer_cfg; 256 setup_mixer(mdp4_kms);
242 mdp4_write(mdp4_kms, REG_MDP4_LAYERMIXER_IN_CFG, mixer_cfg);
243} 257}
244 258
245static void mdp4_crtc_mode_set_nofb(struct drm_crtc *crtc) 259static void mdp4_crtc_mode_set_nofb(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
index 770645296f11..cbd77bc626d5 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
@@ -32,13 +32,6 @@ struct mdp4_kms {
32 32
33 int rev; 33 int rev;
34 34
35 /* Shadow value for MDP4_LAYERMIXER_IN_CFG.. since setup for all
36 * crtcs/encoders is in one shared register, we need to update it
37 * via read/modify/write. But to avoid getting confused by power-
38 * on-default values after resume, use this shadow value instead:
39 */
40 uint32_t mixer_cfg;
41
42 /* mapper-id used to request GEM buffer mapped for scanout: */ 35 /* mapper-id used to request GEM buffer mapped for scanout: */
43 int id; 36 int id;
44 37
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
new file mode 100644
index 000000000000..b0a44310cf2a
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
@@ -0,0 +1,207 @@
1/*
2 * Copyright (c) 2014 The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include "mdp5_kms.h"
15#include "mdp5_cfg.h"
16
17struct mdp5_cfg_handler {
18 int revision;
19 struct mdp5_cfg config;
20};
21
22/* mdp5_cfg must be exposed (used in mdp5.xml.h) */
23const struct mdp5_cfg_hw *mdp5_cfg = NULL;
24
25const struct mdp5_cfg_hw msm8x74_config = {
26 .name = "msm8x74",
27 .smp = {
28 .mmb_count = 22,
29 .mmb_size = 4096,
30 },
31 .ctl = {
32 .count = 5,
33 .base = { 0x00600, 0x00700, 0x00800, 0x00900, 0x00a00 },
34 },
35 .pipe_vig = {
36 .count = 3,
37 .base = { 0x01200, 0x01600, 0x01a00 },
38 },
39 .pipe_rgb = {
40 .count = 3,
41 .base = { 0x01e00, 0x02200, 0x02600 },
42 },
43 .pipe_dma = {
44 .count = 2,
45 .base = { 0x02a00, 0x02e00 },
46 },
47 .lm = {
48 .count = 5,
49 .base = { 0x03200, 0x03600, 0x03a00, 0x03e00, 0x04200 },
50 .nb_stages = 5,
51 },
52 .dspp = {
53 .count = 3,
54 .base = { 0x04600, 0x04a00, 0x04e00 },
55 },
56 .ad = {
57 .count = 2,
58 .base = { 0x13100, 0x13300 }, /* NOTE: no ad in v1.0 */
59 },
60 .intf = {
61 .count = 4,
62 .base = { 0x12500, 0x12700, 0x12900, 0x12b00 },
63 },
64 .max_clk = 200000000,
65};
66
67const struct mdp5_cfg_hw apq8084_config = {
68 .name = "apq8084",
69 .smp = {
70 .mmb_count = 44,
71 .mmb_size = 8192,
72 .reserved_state[0] = GENMASK(7, 0), /* first 8 MMBs */
73 .reserved[CID_RGB0] = 2,
74 .reserved[CID_RGB1] = 2,
75 .reserved[CID_RGB2] = 2,
76 .reserved[CID_RGB3] = 2,
77 },
78 .ctl = {
79 .count = 5,
80 .base = { 0x00600, 0x00700, 0x00800, 0x00900, 0x00a00 },
81 },
82 .pipe_vig = {
83 .count = 4,
84 .base = { 0x01200, 0x01600, 0x01a00, 0x01e00 },
85 },
86 .pipe_rgb = {
87 .count = 4,
88 .base = { 0x02200, 0x02600, 0x02a00, 0x02e00 },
89 },
90 .pipe_dma = {
91 .count = 2,
92 .base = { 0x03200, 0x03600 },
93 },
94 .lm = {
95 .count = 6,
96 .base = { 0x03a00, 0x03e00, 0x04200, 0x04600, 0x04a00, 0x04e00 },
97 .nb_stages = 5,
98 },
99 .dspp = {
100 .count = 4,
101 .base = { 0x05200, 0x05600, 0x05a00, 0x05e00 },
102
103 },
104 .ad = {
105 .count = 3,
106 .base = { 0x13500, 0x13700, 0x13900 },
107 },
108 .intf = {
109 .count = 5,
110 .base = { 0x12500, 0x12700, 0x12900, 0x12b00, 0x12d00 },
111 },
112 .max_clk = 320000000,
113};
114
115static const struct mdp5_cfg_handler cfg_handlers[] = {
116 { .revision = 0, .config = { .hw = &msm8x74_config } },
117 { .revision = 2, .config = { .hw = &msm8x74_config } },
118 { .revision = 3, .config = { .hw = &apq8084_config } },
119};
120
121
122static struct mdp5_cfg_platform *mdp5_get_config(struct platform_device *dev);
123
124const struct mdp5_cfg_hw *mdp5_cfg_get_hw_config(struct mdp5_cfg_handler *cfg_handler)
125{
126 return cfg_handler->config.hw;
127}
128
129struct mdp5_cfg *mdp5_cfg_get_config(struct mdp5_cfg_handler *cfg_handler)
130{
131 return &cfg_handler->config;
132}
133
134int mdp5_cfg_get_hw_rev(struct mdp5_cfg_handler *cfg_handler)
135{
136 return cfg_handler->revision;
137}
138
139void mdp5_cfg_destroy(struct mdp5_cfg_handler *cfg_handler)
140{
141 kfree(cfg_handler);
142}
143
144struct mdp5_cfg_handler *mdp5_cfg_init(struct mdp5_kms *mdp5_kms,
145 uint32_t major, uint32_t minor)
146{
147 struct drm_device *dev = mdp5_kms->dev;
148 struct platform_device *pdev = dev->platformdev;
149 struct mdp5_cfg_handler *cfg_handler;
150 struct mdp5_cfg_platform *pconfig;
151 int i, ret = 0;
152
153 cfg_handler = kzalloc(sizeof(*cfg_handler), GFP_KERNEL);
154 if (unlikely(!cfg_handler)) {
155 ret = -ENOMEM;
156 goto fail;
157 }
158
159 if (major != 1) {
160 dev_err(dev->dev, "unexpected MDP major version: v%d.%d\n",
161 major, minor);
162 ret = -ENXIO;
163 goto fail;
164 }
165
166 /* only after mdp5_cfg global pointer's init can we access the hw */
167 for (i = 0; i < ARRAY_SIZE(cfg_handlers); i++) {
168 if (cfg_handlers[i].revision != minor)
169 continue;
170 mdp5_cfg = cfg_handlers[i].config.hw;
171
172 break;
173 }
174 if (unlikely(!mdp5_cfg)) {
175 dev_err(dev->dev, "unexpected MDP minor revision: v%d.%d\n",
176 major, minor);
177 ret = -ENXIO;
178 goto fail;
179 }
180
181 cfg_handler->revision = minor;
182 cfg_handler->config.hw = mdp5_cfg;
183
184 pconfig = mdp5_get_config(pdev);
185 memcpy(&cfg_handler->config.platform, pconfig, sizeof(*pconfig));
186
187 DBG("MDP5: %s hw config selected", mdp5_cfg->name);
188
189 return cfg_handler;
190
191fail:
192 if (cfg_handler)
193 mdp5_cfg_destroy(cfg_handler);
194
195 return NULL;
196}
197
198static struct mdp5_cfg_platform *mdp5_get_config(struct platform_device *dev)
199{
200 static struct mdp5_cfg_platform config = {};
201#ifdef CONFIG_OF
202 /* TODO */
203#endif
204 config.iommu = iommu_domain_alloc(&platform_bus_type);
205
206 return &config;
207}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h
new file mode 100644
index 000000000000..dba4d52cceeb
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h
@@ -0,0 +1,91 @@
1/*
2 * Copyright (c) 2014 The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#ifndef __MDP5_CFG_H__
15#define __MDP5_CFG_H__
16
17#include "msm_drv.h"
18
19/*
20 * mdp5_cfg
21 *
22 * This module configures the dynamic offsets used by mdp5.xml.h
23 * (initialized in mdp5_cfg.c)
24 */
25extern const struct mdp5_cfg_hw *mdp5_cfg;
26
27#define MAX_CTL 8
28#define MAX_BASES 8
29#define MAX_SMP_BLOCKS 44
30#define MAX_CLIENTS 32
31
32typedef DECLARE_BITMAP(mdp5_smp_state_t, MAX_SMP_BLOCKS);
33
34#define MDP5_SUB_BLOCK_DEFINITION \
35 int count; \
36 uint32_t base[MAX_BASES]
37
38struct mdp5_sub_block {
39 MDP5_SUB_BLOCK_DEFINITION;
40};
41
42struct mdp5_lm_block {
43 MDP5_SUB_BLOCK_DEFINITION;
44 uint32_t nb_stages; /* number of stages per blender */
45};
46
47struct mdp5_smp_block {
48 int mmb_count; /* number of SMP MMBs */
49 int mmb_size; /* MMB: size in bytes */
50 mdp5_smp_state_t reserved_state;/* SMP MMBs statically allocated */
51 int reserved[MAX_CLIENTS]; /* # of MMBs allocated per client */
52};
53
54struct mdp5_cfg_hw {
55 char *name;
56
57 struct mdp5_smp_block smp;
58 struct mdp5_sub_block ctl;
59 struct mdp5_sub_block pipe_vig;
60 struct mdp5_sub_block pipe_rgb;
61 struct mdp5_sub_block pipe_dma;
62 struct mdp5_lm_block lm;
63 struct mdp5_sub_block dspp;
64 struct mdp5_sub_block ad;
65 struct mdp5_sub_block intf;
66
67 uint32_t max_clk;
68};
69
70/* platform config data (ie. from DT, or pdata) */
71struct mdp5_cfg_platform {
72 struct iommu_domain *iommu;
73};
74
75struct mdp5_cfg {
76 const struct mdp5_cfg_hw *hw;
77 struct mdp5_cfg_platform platform;
78};
79
80struct mdp5_kms;
81struct mdp5_cfg_handler;
82
83const struct mdp5_cfg_hw *mdp5_cfg_get_hw_config(struct mdp5_cfg_handler *cfg_hnd);
84struct mdp5_cfg *mdp5_cfg_get_config(struct mdp5_cfg_handler *cfg_hnd);
85int mdp5_cfg_get_hw_rev(struct mdp5_cfg_handler *cfg_hnd);
86
87struct mdp5_cfg_handler *mdp5_cfg_init(struct mdp5_kms *mdp5_kms,
88 uint32_t major, uint32_t minor);
89void mdp5_cfg_destroy(struct mdp5_cfg_handler *cfg_hnd);
90
91#endif /* __MDP5_CFG_H__ */
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
index b7b32c47fd71..0598bdea4ff4 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
@@ -1,4 +1,5 @@
1/* 1/*
2 * Copyright (c) 2014 The Linux Foundation. All rights reserved.
2 * Copyright (C) 2013 Red Hat 3 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com> 4 * Author: Rob Clark <robdclark@gmail.com>
4 * 5 *
@@ -17,41 +18,35 @@
17 18
18#include "mdp5_kms.h" 19#include "mdp5_kms.h"
19 20
21#include <linux/sort.h>
20#include <drm/drm_mode.h> 22#include <drm/drm_mode.h>
21#include "drm_crtc.h" 23#include "drm_crtc.h"
22#include "drm_crtc_helper.h" 24#include "drm_crtc_helper.h"
23#include "drm_flip_work.h" 25#include "drm_flip_work.h"
24 26
27#define SSPP_MAX (SSPP_RGB3 + 1) /* TODO: Add SSPP_MAX in mdp5.xml.h */
28
25struct mdp5_crtc { 29struct mdp5_crtc {
26 struct drm_crtc base; 30 struct drm_crtc base;
27 char name[8]; 31 char name[8];
28 int id; 32 int id;
29 bool enabled; 33 bool enabled;
30 34
31 /* which mixer/encoder we route output to: */ 35 /* layer mixer used for this CRTC (+ its lock): */
32 int mixer; 36#define GET_LM_ID(crtc_id) ((crtc_id == 3) ? 5 : crtc_id)
37 int lm;
38 spinlock_t lm_lock; /* protect REG_MDP5_LM_* registers */
39
40 /* CTL used for this CRTC: */
41 struct mdp5_ctl *ctl;
33 42
34 /* if there is a pending flip, these will be non-null: */ 43 /* if there is a pending flip, these will be non-null: */
35 struct drm_pending_vblank_event *event; 44 struct drm_pending_vblank_event *event;
36 struct msm_fence_cb pageflip_cb;
37 45
38#define PENDING_CURSOR 0x1 46#define PENDING_CURSOR 0x1
39#define PENDING_FLIP 0x2 47#define PENDING_FLIP 0x2
40 atomic_t pending; 48 atomic_t pending;
41 49
42 /* the fb that we logically (from PoV of KMS API) hold a ref
43 * to. Which we may not yet be scanning out (we may still
44 * be scanning out previous in case of page_flip while waiting
45 * for gpu rendering to complete:
46 */
47 struct drm_framebuffer *fb;
48
49 /* the fb that we currently hold a scanout ref to: */
50 struct drm_framebuffer *scanout_fb;
51
52 /* for unref'ing framebuffers after scanout completes: */
53 struct drm_flip_work unref_fb_work;
54
55 struct mdp_irq vblank; 50 struct mdp_irq vblank;
56 struct mdp_irq err; 51 struct mdp_irq err;
57}; 52};
@@ -71,66 +66,38 @@ static void request_pending(struct drm_crtc *crtc, uint32_t pending)
71 mdp_irq_register(&get_kms(crtc)->base, &mdp5_crtc->vblank); 66 mdp_irq_register(&get_kms(crtc)->base, &mdp5_crtc->vblank);
72} 67}
73 68
74static void crtc_flush(struct drm_crtc *crtc) 69#define mdp5_lm_get_flush(lm) mdp_ctl_flush_mask_lm(lm)
75{
76 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
77 struct mdp5_kms *mdp5_kms = get_kms(crtc);
78 int id = mdp5_crtc->id;
79 struct drm_plane *plane;
80 uint32_t flush = 0;
81
82 for_each_plane_on_crtc(crtc, plane) {
83 enum mdp5_pipe pipe = mdp5_plane_pipe(plane);
84 flush |= pipe2flush(pipe);
85 }
86
87 flush |= mixer2flush(mdp5_crtc->id);
88 flush |= MDP5_CTL_FLUSH_CTL;
89 70
90 DBG("%s: flush=%08x", mdp5_crtc->name, flush); 71static void crtc_flush(struct drm_crtc *crtc, u32 flush_mask)
91
92 mdp5_write(mdp5_kms, REG_MDP5_CTL_FLUSH(id), flush);
93}
94
95static void update_fb(struct drm_crtc *crtc, struct drm_framebuffer *new_fb)
96{ 72{
97 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); 73 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
98 struct drm_framebuffer *old_fb = mdp5_crtc->fb;
99
100 /* grab reference to incoming scanout fb: */
101 drm_framebuffer_reference(new_fb);
102 mdp5_crtc->base.primary->fb = new_fb;
103 mdp5_crtc->fb = new_fb;
104 74
105 if (old_fb) 75 DBG("%s: flush=%08x", mdp5_crtc->name, flush_mask);
106 drm_flip_work_queue(&mdp5_crtc->unref_fb_work, old_fb); 76 mdp5_ctl_commit(mdp5_crtc->ctl, flush_mask);
107} 77}
108 78
109/* unlike update_fb(), take a ref to the new scanout fb *before* updating 79/*
110 * plane, then call this. Needed to ensure we don't unref the buffer that 80 * flush updates, to make sure hw is updated to new scanout fb,
111 * is actually still being scanned out. 81 * so that we can safely queue unref to current fb (ie. next
112 * 82 * vblank we know hw is done w/ previous scanout_fb).
113 * Note that this whole thing goes away with atomic.. since we can defer
114 * calling into driver until rendering is done.
115 */ 83 */
116static void update_scanout(struct drm_crtc *crtc, struct drm_framebuffer *fb) 84static void crtc_flush_all(struct drm_crtc *crtc)
117{ 85{
118 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); 86 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
87 struct drm_plane *plane;
88 uint32_t flush_mask = 0;
119 89
120 /* flush updates, to make sure hw is updated to new scanout fb, 90 /* we could have already released CTL in the disable path: */
121 * so that we can safely queue unref to current fb (ie. next 91 if (!mdp5_crtc->ctl)
122 * vblank we know hw is done w/ previous scanout_fb). 92 return;
123 */
124 crtc_flush(crtc);
125
126 if (mdp5_crtc->scanout_fb)
127 drm_flip_work_queue(&mdp5_crtc->unref_fb_work,
128 mdp5_crtc->scanout_fb);
129 93
130 mdp5_crtc->scanout_fb = fb; 94 for_each_plane_on_crtc(crtc, plane) {
95 flush_mask |= mdp5_plane_get_flush(plane);
96 }
97 flush_mask |= mdp5_ctl_get_flush(mdp5_crtc->ctl);
98 flush_mask |= mdp5_lm_get_flush(mdp5_crtc->lm);
131 99
132 /* enable vblank to complete flip: */ 100 crtc_flush(crtc, flush_mask);
133 request_pending(crtc, PENDING_FLIP);
134} 101}
135 102
136/* if file!=NULL, this is preclose potential cancel-flip path */ 103/* if file!=NULL, this is preclose potential cancel-flip path */
@@ -151,6 +118,7 @@ static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
151 */ 118 */
152 if (!file || (event->base.file_priv == file)) { 119 if (!file || (event->base.file_priv == file)) {
153 mdp5_crtc->event = NULL; 120 mdp5_crtc->event = NULL;
121 DBG("%s: send event: %p", mdp5_crtc->name, event);
154 drm_send_vblank_event(dev, mdp5_crtc->id, event); 122 drm_send_vblank_event(dev, mdp5_crtc->id, event);
155 } 123 }
156 } 124 }
@@ -160,38 +128,11 @@ static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
160 mdp5_plane_complete_flip(plane); 128 mdp5_plane_complete_flip(plane);
161} 129}
162 130
163static void pageflip_cb(struct msm_fence_cb *cb)
164{
165 struct mdp5_crtc *mdp5_crtc =
166 container_of(cb, struct mdp5_crtc, pageflip_cb);
167 struct drm_crtc *crtc = &mdp5_crtc->base;
168 struct drm_framebuffer *fb = mdp5_crtc->fb;
169
170 if (!fb)
171 return;
172
173 drm_framebuffer_reference(fb);
174 mdp5_plane_set_scanout(crtc->primary, fb);
175 update_scanout(crtc, fb);
176}
177
178static void unref_fb_worker(struct drm_flip_work *work, void *val)
179{
180 struct mdp5_crtc *mdp5_crtc =
181 container_of(work, struct mdp5_crtc, unref_fb_work);
182 struct drm_device *dev = mdp5_crtc->base.dev;
183
184 mutex_lock(&dev->mode_config.mutex);
185 drm_framebuffer_unreference(val);
186 mutex_unlock(&dev->mode_config.mutex);
187}
188
189static void mdp5_crtc_destroy(struct drm_crtc *crtc) 131static void mdp5_crtc_destroy(struct drm_crtc *crtc)
190{ 132{
191 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); 133 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
192 134
193 drm_crtc_cleanup(crtc); 135 drm_crtc_cleanup(crtc);
194 drm_flip_work_cleanup(&mdp5_crtc->unref_fb_work);
195 136
196 kfree(mdp5_crtc); 137 kfree(mdp5_crtc);
197} 138}
@@ -209,6 +150,8 @@ static void mdp5_crtc_dpms(struct drm_crtc *crtc, int mode)
209 mdp5_enable(mdp5_kms); 150 mdp5_enable(mdp5_kms);
210 mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->err); 151 mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->err);
211 } else { 152 } else {
153 /* set STAGE_UNUSED for all layers */
154 mdp5_ctl_blend(mdp5_crtc->ctl, mdp5_crtc->lm, 0x00000000);
212 mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->err); 155 mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->err);
213 mdp5_disable(mdp5_kms); 156 mdp5_disable(mdp5_kms);
214 } 157 }
@@ -223,54 +166,78 @@ static bool mdp5_crtc_mode_fixup(struct drm_crtc *crtc,
223 return true; 166 return true;
224} 167}
225 168
169/*
170 * blend_setup() - blend all the planes of a CRTC
171 *
172 * When border is enabled, the border color will ALWAYS be the base layer.
173 * Therefore, the first plane (private RGB pipe) will start at STAGE0.
174 * If disabled, the first plane starts at STAGE_BASE.
175 *
176 * Note:
177 * Border is not enabled here because the private plane is exactly
178 * the CRTC resolution.
179 */
226static void blend_setup(struct drm_crtc *crtc) 180static void blend_setup(struct drm_crtc *crtc)
227{ 181{
228 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); 182 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
229 struct mdp5_kms *mdp5_kms = get_kms(crtc); 183 struct mdp5_kms *mdp5_kms = get_kms(crtc);
230 int id = mdp5_crtc->id; 184 struct drm_plane *plane;
185 const struct mdp5_cfg_hw *hw_cfg;
186 uint32_t lm = mdp5_crtc->lm, blend_cfg = 0;
187 unsigned long flags;
188#define blender(stage) ((stage) - STAGE_BASE)
231 189
232 /* 190 hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
233 * Hard-coded setup for now until I figure out how the
234 * layer-mixer works
235 */
236 191
237 /* LM[id]: */ 192 spin_lock_irqsave(&mdp5_crtc->lm_lock, flags);
238 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(id), 193
239 MDP5_LM_BLEND_COLOR_OUT_STAGE0_FG_ALPHA); 194 /* ctl could be released already when we are shutting down: */
240 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_OP_MODE(id, 0), 195 if (!mdp5_crtc->ctl)
241 MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST) | 196 goto out;
242 MDP5_LM_BLEND_OP_MODE_BG_ALPHA(FG_PIXEL) |
243 MDP5_LM_BLEND_OP_MODE_BG_INV_ALPHA);
244 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_FG_ALPHA(id, 0), 0xff);
245 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_BG_ALPHA(id, 0), 0x00);
246
247 /* NOTE: seems that LM[n] and CTL[m], we do not need n==m.. but
248 * we want to be setting CTL[m].LAYER[n]. Not sure what the
249 * point of having CTL[m].LAYER[o] (for o!=n).. maybe that is
250 * used when chaining up mixers for high resolution displays?
251 */
252 197
253 /* CTL[id]: */ 198 for_each_plane_on_crtc(crtc, plane) {
254 mdp5_write(mdp5_kms, REG_MDP5_CTL_LAYER_REG(id, 0), 199 enum mdp_mixer_stage_id stage =
255 MDP5_CTL_LAYER_REG_RGB0(STAGE0) | 200 to_mdp5_plane_state(plane->state)->stage;
256 MDP5_CTL_LAYER_REG_BORDER_COLOR); 201
257 mdp5_write(mdp5_kms, REG_MDP5_CTL_LAYER_REG(id, 1), 0); 202 /*
258 mdp5_write(mdp5_kms, REG_MDP5_CTL_LAYER_REG(id, 2), 0); 203 * Note: This cannot happen with current implementation but
259 mdp5_write(mdp5_kms, REG_MDP5_CTL_LAYER_REG(id, 3), 0); 204 * we need to check this condition once z property is added
260 mdp5_write(mdp5_kms, REG_MDP5_CTL_LAYER_REG(id, 4), 0); 205 */
206 BUG_ON(stage > hw_cfg->lm.nb_stages);
207
208 /* LM */
209 mdp5_write(mdp5_kms,
210 REG_MDP5_LM_BLEND_OP_MODE(lm, blender(stage)),
211 MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST) |
212 MDP5_LM_BLEND_OP_MODE_BG_ALPHA(BG_CONST));
213 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_FG_ALPHA(lm,
214 blender(stage)), 0xff);
215 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_BG_ALPHA(lm,
216 blender(stage)), 0x00);
217 /* CTL */
218 blend_cfg |= mdp_ctl_blend_mask(mdp5_plane_pipe(plane), stage);
219 DBG("%s: blending pipe %s on stage=%d", mdp5_crtc->name,
220 pipe2name(mdp5_plane_pipe(plane)), stage);
221 }
222
223 DBG("%s: lm%d: blend config = 0x%08x", mdp5_crtc->name, lm, blend_cfg);
224 mdp5_ctl_blend(mdp5_crtc->ctl, lm, blend_cfg);
225
226out:
227 spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags);
261} 228}
262 229
263static int mdp5_crtc_mode_set(struct drm_crtc *crtc, 230static void mdp5_crtc_mode_set_nofb(struct drm_crtc *crtc)
264 struct drm_display_mode *mode,
265 struct drm_display_mode *adjusted_mode,
266 int x, int y,
267 struct drm_framebuffer *old_fb)
268{ 231{
269 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); 232 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
270 struct mdp5_kms *mdp5_kms = get_kms(crtc); 233 struct mdp5_kms *mdp5_kms = get_kms(crtc);
271 int ret; 234 unsigned long flags;
235 struct drm_display_mode *mode;
272 236
273 mode = adjusted_mode; 237 if (WARN_ON(!crtc->state))
238 return;
239
240 mode = &crtc->state->adjusted_mode;
274 241
275 DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x", 242 DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
276 mdp5_crtc->name, mode->base.id, mode->name, 243 mdp5_crtc->name, mode->base.id, mode->name,
@@ -281,28 +248,11 @@ static int mdp5_crtc_mode_set(struct drm_crtc *crtc,
281 mode->vsync_end, mode->vtotal, 248 mode->vsync_end, mode->vtotal,
282 mode->type, mode->flags); 249 mode->type, mode->flags);
283 250
284 /* grab extra ref for update_scanout() */ 251 spin_lock_irqsave(&mdp5_crtc->lm_lock, flags);
285 drm_framebuffer_reference(crtc->primary->fb); 252 mdp5_write(mdp5_kms, REG_MDP5_LM_OUT_SIZE(mdp5_crtc->lm),
286
287 ret = mdp5_plane_mode_set(crtc->primary, crtc, crtc->primary->fb,
288 0, 0, mode->hdisplay, mode->vdisplay,
289 x << 16, y << 16,
290 mode->hdisplay << 16, mode->vdisplay << 16);
291 if (ret) {
292 drm_framebuffer_unreference(crtc->primary->fb);
293 dev_err(crtc->dev->dev, "%s: failed to set mode on plane: %d\n",
294 mdp5_crtc->name, ret);
295 return ret;
296 }
297
298 mdp5_write(mdp5_kms, REG_MDP5_LM_OUT_SIZE(mdp5_crtc->id),
299 MDP5_LM_OUT_SIZE_WIDTH(mode->hdisplay) | 253 MDP5_LM_OUT_SIZE_WIDTH(mode->hdisplay) |
300 MDP5_LM_OUT_SIZE_HEIGHT(mode->vdisplay)); 254 MDP5_LM_OUT_SIZE_HEIGHT(mode->vdisplay));
301 255 spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags);
302 update_fb(crtc, crtc->primary->fb);
303 update_scanout(crtc, crtc->primary->fb);
304
305 return 0;
306} 256}
307 257
308static void mdp5_crtc_prepare(struct drm_crtc *crtc) 258static void mdp5_crtc_prepare(struct drm_crtc *crtc)
@@ -316,65 +266,119 @@ static void mdp5_crtc_prepare(struct drm_crtc *crtc)
316 266
317static void mdp5_crtc_commit(struct drm_crtc *crtc) 267static void mdp5_crtc_commit(struct drm_crtc *crtc)
318{ 268{
269 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
270 DBG("%s", mdp5_crtc->name);
319 mdp5_crtc_dpms(crtc, DRM_MODE_DPMS_ON); 271 mdp5_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
320 crtc_flush(crtc); 272 crtc_flush_all(crtc);
321 /* drop the ref to mdp clk's that we got in prepare: */ 273 /* drop the ref to mdp clk's that we got in prepare: */
322 mdp5_disable(get_kms(crtc)); 274 mdp5_disable(get_kms(crtc));
323} 275}
324 276
325static int mdp5_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, 277static void mdp5_crtc_load_lut(struct drm_crtc *crtc)
326 struct drm_framebuffer *old_fb) 278{
279}
280
281struct plane_state {
282 struct drm_plane *plane;
283 struct mdp5_plane_state *state;
284};
285
286static int pstate_cmp(const void *a, const void *b)
287{
288 struct plane_state *pa = (struct plane_state *)a;
289 struct plane_state *pb = (struct plane_state *)b;
290 return pa->state->zpos - pb->state->zpos;
291}
292
293static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
294 struct drm_crtc_state *state)
327{ 295{
328 struct drm_plane *plane = crtc->primary; 296 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
329 struct drm_display_mode *mode = &crtc->mode; 297 struct mdp5_kms *mdp5_kms = get_kms(crtc);
330 int ret; 298 struct drm_plane *plane;
331 299 struct drm_device *dev = crtc->dev;
332 /* grab extra ref for update_scanout() */ 300 struct plane_state pstates[STAGE3 + 1];
333 drm_framebuffer_reference(crtc->primary->fb); 301 int cnt = 0, i;
334 302
335 ret = mdp5_plane_mode_set(plane, crtc, crtc->primary->fb, 303 DBG("%s: check", mdp5_crtc->name);
336 0, 0, mode->hdisplay, mode->vdisplay, 304
337 x << 16, y << 16, 305 if (mdp5_crtc->event) {
338 mode->hdisplay << 16, mode->vdisplay << 16); 306 dev_err(dev->dev, "already pending flip!\n");
339 if (ret) { 307 return -EBUSY;
340 drm_framebuffer_unreference(crtc->primary->fb);
341 return ret;
342 } 308 }
343 309
344 update_fb(crtc, crtc->primary->fb); 310 /* request a free CTL, if none is already allocated for this CRTC */
345 update_scanout(crtc, crtc->primary->fb); 311 if (state->enable && !mdp5_crtc->ctl) {
312 mdp5_crtc->ctl = mdp5_ctlm_request(mdp5_kms->ctlm, crtc);
313 if (WARN_ON(!mdp5_crtc->ctl))
314 return -EINVAL;
315 }
316
317 /* verify that there are not too many planes attached to crtc
318 * and that we don't have conflicting mixer stages:
319 */
320 for_each_pending_plane_on_crtc(state->state, crtc, plane) {
321 struct drm_plane_state *pstate;
322
323 if (cnt >= ARRAY_SIZE(pstates)) {
324 dev_err(dev->dev, "too many planes!\n");
325 return -EINVAL;
326 }
327
328 pstate = state->state->plane_states[drm_plane_index(plane)];
329
330 /* plane might not have changed, in which case take
331 * current state:
332 */
333 if (!pstate)
334 pstate = plane->state;
335
336 pstates[cnt].plane = plane;
337 pstates[cnt].state = to_mdp5_plane_state(pstate);
338
339 cnt++;
340 }
341
342 sort(pstates, cnt, sizeof(pstates[0]), pstate_cmp, NULL);
343
344 for (i = 0; i < cnt; i++) {
345 pstates[i].state->stage = STAGE_BASE + i;
346 DBG("%s: assign pipe %s on stage=%d", mdp5_crtc->name,
347 pipe2name(mdp5_plane_pipe(pstates[i].plane)),
348 pstates[i].state->stage);
349 }
346 350
347 return 0; 351 return 0;
348} 352}
349 353
350static void mdp5_crtc_load_lut(struct drm_crtc *crtc) 354static void mdp5_crtc_atomic_begin(struct drm_crtc *crtc)
351{ 355{
356 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
357 DBG("%s: begin", mdp5_crtc->name);
352} 358}
353 359
354static int mdp5_crtc_page_flip(struct drm_crtc *crtc, 360static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc)
355 struct drm_framebuffer *new_fb,
356 struct drm_pending_vblank_event *event,
357 uint32_t page_flip_flags)
358{ 361{
359 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); 362 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
360 struct drm_device *dev = crtc->dev; 363 struct drm_device *dev = crtc->dev;
361 struct drm_gem_object *obj;
362 unsigned long flags; 364 unsigned long flags;
363 365
364 if (mdp5_crtc->event) { 366 DBG("%s: flush", mdp5_crtc->name);
365 dev_err(dev->dev, "already pending flip!\n");
366 return -EBUSY;
367 }
368 367
369 obj = msm_framebuffer_bo(new_fb, 0); 368 WARN_ON(mdp5_crtc->event);
370 369
371 spin_lock_irqsave(&dev->event_lock, flags); 370 spin_lock_irqsave(&dev->event_lock, flags);
372 mdp5_crtc->event = event; 371 mdp5_crtc->event = crtc->state->event;
373 spin_unlock_irqrestore(&dev->event_lock, flags); 372 spin_unlock_irqrestore(&dev->event_lock, flags);
374 373
375 update_fb(crtc, new_fb); 374 blend_setup(crtc);
375 crtc_flush_all(crtc);
376 request_pending(crtc, PENDING_FLIP);
376 377
377 return msm_gem_queue_inactive_cb(obj, &mdp5_crtc->pageflip_cb); 378 if (mdp5_crtc->ctl && !crtc->state->enable) {
379 mdp5_ctl_release(mdp5_crtc->ctl);
380 mdp5_crtc->ctl = NULL;
381 }
378} 382}
379 383
380static int mdp5_crtc_set_property(struct drm_crtc *crtc, 384static int mdp5_crtc_set_property(struct drm_crtc *crtc,
@@ -385,27 +389,33 @@ static int mdp5_crtc_set_property(struct drm_crtc *crtc,
385} 389}
386 390
387static const struct drm_crtc_funcs mdp5_crtc_funcs = { 391static const struct drm_crtc_funcs mdp5_crtc_funcs = {
388 .set_config = drm_crtc_helper_set_config, 392 .set_config = drm_atomic_helper_set_config,
389 .destroy = mdp5_crtc_destroy, 393 .destroy = mdp5_crtc_destroy,
390 .page_flip = mdp5_crtc_page_flip, 394 .page_flip = drm_atomic_helper_page_flip,
391 .set_property = mdp5_crtc_set_property, 395 .set_property = mdp5_crtc_set_property,
396 .reset = drm_atomic_helper_crtc_reset,
397 .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
398 .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
392}; 399};
393 400
394static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs = { 401static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs = {
395 .dpms = mdp5_crtc_dpms, 402 .dpms = mdp5_crtc_dpms,
396 .mode_fixup = mdp5_crtc_mode_fixup, 403 .mode_fixup = mdp5_crtc_mode_fixup,
397 .mode_set = mdp5_crtc_mode_set, 404 .mode_set_nofb = mdp5_crtc_mode_set_nofb,
405 .mode_set = drm_helper_crtc_mode_set,
406 .mode_set_base = drm_helper_crtc_mode_set_base,
398 .prepare = mdp5_crtc_prepare, 407 .prepare = mdp5_crtc_prepare,
399 .commit = mdp5_crtc_commit, 408 .commit = mdp5_crtc_commit,
400 .mode_set_base = mdp5_crtc_mode_set_base,
401 .load_lut = mdp5_crtc_load_lut, 409 .load_lut = mdp5_crtc_load_lut,
410 .atomic_check = mdp5_crtc_atomic_check,
411 .atomic_begin = mdp5_crtc_atomic_begin,
412 .atomic_flush = mdp5_crtc_atomic_flush,
402}; 413};
403 414
404static void mdp5_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus) 415static void mdp5_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus)
405{ 416{
406 struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, vblank); 417 struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, vblank);
407 struct drm_crtc *crtc = &mdp5_crtc->base; 418 struct drm_crtc *crtc = &mdp5_crtc->base;
408 struct msm_drm_private *priv = crtc->dev->dev_private;
409 unsigned pending; 419 unsigned pending;
410 420
411 mdp_irq_unregister(&get_kms(crtc)->base, &mdp5_crtc->vblank); 421 mdp_irq_unregister(&get_kms(crtc)->base, &mdp5_crtc->vblank);
@@ -414,16 +424,14 @@ static void mdp5_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus)
414 424
415 if (pending & PENDING_FLIP) { 425 if (pending & PENDING_FLIP) {
416 complete_flip(crtc, NULL); 426 complete_flip(crtc, NULL);
417 drm_flip_work_commit(&mdp5_crtc->unref_fb_work, priv->wq);
418 } 427 }
419} 428}
420 429
421static void mdp5_crtc_err_irq(struct mdp_irq *irq, uint32_t irqstatus) 430static void mdp5_crtc_err_irq(struct mdp_irq *irq, uint32_t irqstatus)
422{ 431{
423 struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, err); 432 struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, err);
424 struct drm_crtc *crtc = &mdp5_crtc->base; 433
425 DBG("%s: error: %08x", mdp5_crtc->name, irqstatus); 434 DBG("%s: error: %08x", mdp5_crtc->name, irqstatus);
426 crtc_flush(crtc);
427} 435}
428 436
429uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc) 437uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc)
@@ -444,10 +452,9 @@ void mdp5_crtc_set_intf(struct drm_crtc *crtc, int intf,
444{ 452{
445 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); 453 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
446 struct mdp5_kms *mdp5_kms = get_kms(crtc); 454 struct mdp5_kms *mdp5_kms = get_kms(crtc);
447 static const enum mdp5_intfnum intfnum[] = { 455 uint32_t flush_mask = 0;
448 INTF0, INTF1, INTF2, INTF3,
449 };
450 uint32_t intf_sel; 456 uint32_t intf_sel;
457 unsigned long flags;
451 458
452 /* now that we know what irq's we want: */ 459 /* now that we know what irq's we want: */
453 mdp5_crtc->err.irqmask = intf2err(intf); 460 mdp5_crtc->err.irqmask = intf2err(intf);
@@ -457,6 +464,7 @@ void mdp5_crtc_set_intf(struct drm_crtc *crtc, int intf,
457 if (!mdp5_kms) 464 if (!mdp5_kms)
458 return; 465 return;
459 466
467 spin_lock_irqsave(&mdp5_kms->resource_lock, flags);
460 intf_sel = mdp5_read(mdp5_kms, REG_MDP5_DISP_INTF_SEL); 468 intf_sel = mdp5_read(mdp5_kms, REG_MDP5_DISP_INTF_SEL);
461 469
462 switch (intf) { 470 switch (intf) {
@@ -481,39 +489,25 @@ void mdp5_crtc_set_intf(struct drm_crtc *crtc, int intf,
481 break; 489 break;
482 } 490 }
483 491
484 blend_setup(crtc); 492 mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, intf_sel);
493 spin_unlock_irqrestore(&mdp5_kms->resource_lock, flags);
485 494
486 DBG("%s: intf_sel=%08x", mdp5_crtc->name, intf_sel); 495 DBG("%s: intf_sel=%08x", mdp5_crtc->name, intf_sel);
496 mdp5_ctl_set_intf(mdp5_crtc->ctl, intf);
497 flush_mask |= mdp5_ctl_get_flush(mdp5_crtc->ctl);
498 flush_mask |= mdp5_lm_get_flush(mdp5_crtc->lm);
487 499
488 mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, intf_sel); 500 crtc_flush(crtc, flush_mask);
489 mdp5_write(mdp5_kms, REG_MDP5_CTL_OP(mdp5_crtc->id),
490 MDP5_CTL_OP_MODE(MODE_NONE) |
491 MDP5_CTL_OP_INTF_NUM(intfnum[intf]));
492
493 crtc_flush(crtc);
494} 501}
495 502
496static void set_attach(struct drm_crtc *crtc, enum mdp5_pipe pipe_id, 503int mdp5_crtc_get_lm(struct drm_crtc *crtc)
497 struct drm_plane *plane)
498{ 504{
499 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); 505 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
500 506
501 blend_setup(crtc); 507 if (WARN_ON(!crtc))
502 if (mdp5_crtc->enabled && (plane != crtc->primary)) 508 return -EINVAL;
503 crtc_flush(crtc);
504}
505 509
506void mdp5_crtc_attach(struct drm_crtc *crtc, struct drm_plane *plane) 510 return mdp5_crtc->lm;
507{
508 set_attach(crtc, mdp5_plane_pipe(plane), plane);
509}
510
511void mdp5_crtc_detach(struct drm_crtc *crtc, struct drm_plane *plane)
512{
513 /* don't actually detatch our primary plane: */
514 if (crtc->primary == plane)
515 return;
516 set_attach(crtc, mdp5_plane_pipe(plane), NULL);
517} 511}
518 512
519/* initialize crtc */ 513/* initialize crtc */
@@ -530,6 +524,9 @@ struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
530 crtc = &mdp5_crtc->base; 524 crtc = &mdp5_crtc->base;
531 525
532 mdp5_crtc->id = id; 526 mdp5_crtc->id = id;
527 mdp5_crtc->lm = GET_LM_ID(id);
528
529 spin_lock_init(&mdp5_crtc->lm_lock);
533 530
534 mdp5_crtc->vblank.irq = mdp5_crtc_vblank_irq; 531 mdp5_crtc->vblank.irq = mdp5_crtc_vblank_irq;
535 mdp5_crtc->err.irq = mdp5_crtc_err_irq; 532 mdp5_crtc->err.irq = mdp5_crtc_err_irq;
@@ -537,11 +534,6 @@ struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
537 snprintf(mdp5_crtc->name, sizeof(mdp5_crtc->name), "%s:%d", 534 snprintf(mdp5_crtc->name, sizeof(mdp5_crtc->name), "%s:%d",
538 pipe2name(mdp5_plane_pipe(plane)), id); 535 pipe2name(mdp5_plane_pipe(plane)), id);
539 536
540 drm_flip_work_init(&mdp5_crtc->unref_fb_work,
541 "unref fb", unref_fb_worker);
542
543 INIT_FENCE_CB(&mdp5_crtc->pageflip_cb, pageflip_cb);
544
545 drm_crtc_init_with_planes(dev, crtc, plane, NULL, &mdp5_crtc_funcs); 537 drm_crtc_init_with_planes(dev, crtc, plane, NULL, &mdp5_crtc_funcs);
546 drm_crtc_helper_add(crtc, &mdp5_crtc_helper_funcs); 538 drm_crtc_helper_add(crtc, &mdp5_crtc_helper_funcs);
547 plane->crtc = crtc; 539 plane->crtc = crtc;
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c
new file mode 100644
index 000000000000..dea4505ac963
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c
@@ -0,0 +1,322 @@
1/*
2 * Copyright (c) 2014 The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include "mdp5_kms.h"
15#include "mdp5_ctl.h"
16
17/*
18 * CTL - MDP Control Pool Manager
19 *
20 * Controls are shared between all CRTCs.
21 *
22 * They are intended to be used for data path configuration.
23 * The top level register programming describes the complete data path for
24 * a specific data path ID - REG_MDP5_CTL_*(<id>, ...)
25 *
26 * Hardware capabilities determine the number of concurrent data paths
27 *
28 * In certain use cases (high-resolution dual pipe), one single CTL can be
29 * shared across multiple CRTCs.
30 *
31 * Because the number of CTLs can be less than the number of CRTCs,
32 * CTLs are dynamically allocated from a pool of CTLs, only once a CRTC is
33 * requested by the client (in mdp5_crtc_mode_set()).
34 */
35
36struct mdp5_ctl {
37 struct mdp5_ctl_manager *ctlm;
38
39 u32 id;
40
41 /* whether this CTL has been allocated or not: */
42 bool busy;
43
44 /* memory output connection (@see mdp5_ctl_mode): */
45 u32 mode;
46
47 /* REG_MDP5_CTL_*(<id>) registers access info + lock: */
48 spinlock_t hw_lock;
49 u32 reg_offset;
50
51 /* flush mask used to commit CTL registers */
52 u32 flush_mask;
53
54 bool cursor_on;
55
56 struct drm_crtc *crtc;
57};
58
59struct mdp5_ctl_manager {
60 struct drm_device *dev;
61
62 /* number of CTL / Layer Mixers in this hw config: */
63 u32 nlm;
64 u32 nctl;
65
66 /* pool of CTLs + lock to protect resource allocation (ctls[i].busy) */
67 spinlock_t pool_lock;
68 struct mdp5_ctl ctls[MAX_CTL];
69};
70
71static inline
72struct mdp5_kms *get_kms(struct mdp5_ctl_manager *ctl_mgr)
73{
74 struct msm_drm_private *priv = ctl_mgr->dev->dev_private;
75
76 return to_mdp5_kms(to_mdp_kms(priv->kms));
77}
78
79static inline
80void ctl_write(struct mdp5_ctl *ctl, u32 reg, u32 data)
81{
82 struct mdp5_kms *mdp5_kms = get_kms(ctl->ctlm);
83
84 (void)ctl->reg_offset; /* TODO use this instead of mdp5_write */
85 mdp5_write(mdp5_kms, reg, data);
86}
87
88static inline
89u32 ctl_read(struct mdp5_ctl *ctl, u32 reg)
90{
91 struct mdp5_kms *mdp5_kms = get_kms(ctl->ctlm);
92
93 (void)ctl->reg_offset; /* TODO use this instead of mdp5_write */
94 return mdp5_read(mdp5_kms, reg);
95}
96
97
98int mdp5_ctl_set_intf(struct mdp5_ctl *ctl, enum mdp5_intf intf)
99{
100 unsigned long flags;
101 static const enum mdp5_intfnum intfnum[] = {
102 INTF0, INTF1, INTF2, INTF3,
103 };
104
105 spin_lock_irqsave(&ctl->hw_lock, flags);
106 ctl_write(ctl, REG_MDP5_CTL_OP(ctl->id),
107 MDP5_CTL_OP_MODE(ctl->mode) |
108 MDP5_CTL_OP_INTF_NUM(intfnum[intf]));
109 spin_unlock_irqrestore(&ctl->hw_lock, flags);
110
111 return 0;
112}
113
114int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, bool enable)
115{
116 struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
117 unsigned long flags;
118 u32 blend_cfg;
119 int lm;
120
121 lm = mdp5_crtc_get_lm(ctl->crtc);
122 if (unlikely(WARN_ON(lm < 0))) {
123 dev_err(ctl_mgr->dev->dev, "CTL %d cannot find LM: %d",
124 ctl->id, lm);
125 return -EINVAL;
126 }
127
128 spin_lock_irqsave(&ctl->hw_lock, flags);
129
130 blend_cfg = ctl_read(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, lm));
131
132 if (enable)
133 blend_cfg |= MDP5_CTL_LAYER_REG_CURSOR_OUT;
134 else
135 blend_cfg &= ~MDP5_CTL_LAYER_REG_CURSOR_OUT;
136
137 ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, lm), blend_cfg);
138
139 spin_unlock_irqrestore(&ctl->hw_lock, flags);
140
141 ctl->cursor_on = enable;
142
143 return 0;
144}
145
146
147int mdp5_ctl_blend(struct mdp5_ctl *ctl, u32 lm, u32 blend_cfg)
148{
149 unsigned long flags;
150
151 if (ctl->cursor_on)
152 blend_cfg |= MDP5_CTL_LAYER_REG_CURSOR_OUT;
153 else
154 blend_cfg &= ~MDP5_CTL_LAYER_REG_CURSOR_OUT;
155
156 spin_lock_irqsave(&ctl->hw_lock, flags);
157 ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, lm), blend_cfg);
158 spin_unlock_irqrestore(&ctl->hw_lock, flags);
159
160 return 0;
161}
162
163int mdp5_ctl_commit(struct mdp5_ctl *ctl, u32 flush_mask)
164{
165 struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
166 unsigned long flags;
167
168 if (flush_mask & MDP5_CTL_FLUSH_CURSOR_DUMMY) {
169 int lm = mdp5_crtc_get_lm(ctl->crtc);
170
171 if (unlikely(WARN_ON(lm < 0))) {
172 dev_err(ctl_mgr->dev->dev, "CTL %d cannot find LM: %d",
173 ctl->id, lm);
174 return -EINVAL;
175 }
176
177 /* for current targets, cursor bit is the same as LM bit */
178 flush_mask |= mdp_ctl_flush_mask_lm(lm);
179 }
180
181 spin_lock_irqsave(&ctl->hw_lock, flags);
182 ctl_write(ctl, REG_MDP5_CTL_FLUSH(ctl->id), flush_mask);
183 spin_unlock_irqrestore(&ctl->hw_lock, flags);
184
185 return 0;
186}
187
188u32 mdp5_ctl_get_flush(struct mdp5_ctl *ctl)
189{
190 return ctl->flush_mask;
191}
192
193void mdp5_ctl_release(struct mdp5_ctl *ctl)
194{
195 struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
196 unsigned long flags;
197
198 if (unlikely(WARN_ON(ctl->id >= MAX_CTL) || !ctl->busy)) {
199 dev_err(ctl_mgr->dev->dev, "CTL %d in bad state (%d)",
200 ctl->id, ctl->busy);
201 return;
202 }
203
204 spin_lock_irqsave(&ctl_mgr->pool_lock, flags);
205 ctl->busy = false;
206 spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags);
207
208 DBG("CTL %d released", ctl->id);
209}
210
211/*
212 * mdp5_ctl_request() - CTL dynamic allocation
213 *
214 * Note: Current implementation considers that we can only have one CRTC per CTL
215 *
216 * @return first free CTL
217 */
218struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctl_mgr,
219 struct drm_crtc *crtc)
220{
221 struct mdp5_ctl *ctl = NULL;
222 unsigned long flags;
223 int c;
224
225 spin_lock_irqsave(&ctl_mgr->pool_lock, flags);
226
227 for (c = 0; c < ctl_mgr->nctl; c++)
228 if (!ctl_mgr->ctls[c].busy)
229 break;
230
231 if (unlikely(c >= ctl_mgr->nctl)) {
232 dev_err(ctl_mgr->dev->dev, "No more CTL available!");
233 goto unlock;
234 }
235
236 ctl = &ctl_mgr->ctls[c];
237
238 ctl->crtc = crtc;
239 ctl->busy = true;
240 DBG("CTL %d allocated", ctl->id);
241
242unlock:
243 spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags);
244 return ctl;
245}
246
247void mdp5_ctlm_hw_reset(struct mdp5_ctl_manager *ctl_mgr)
248{
249 unsigned long flags;
250 int c;
251
252 for (c = 0; c < ctl_mgr->nctl; c++) {
253 struct mdp5_ctl *ctl = &ctl_mgr->ctls[c];
254
255 spin_lock_irqsave(&ctl->hw_lock, flags);
256 ctl_write(ctl, REG_MDP5_CTL_OP(ctl->id), 0);
257 spin_unlock_irqrestore(&ctl->hw_lock, flags);
258 }
259}
260
261void mdp5_ctlm_destroy(struct mdp5_ctl_manager *ctl_mgr)
262{
263 kfree(ctl_mgr);
264}
265
266struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev,
267 void __iomem *mmio_base, const struct mdp5_cfg_hw *hw_cfg)
268{
269 struct mdp5_ctl_manager *ctl_mgr;
270 const struct mdp5_sub_block *ctl_cfg = &hw_cfg->ctl;
271 unsigned long flags;
272 int c, ret;
273
274 ctl_mgr = kzalloc(sizeof(*ctl_mgr), GFP_KERNEL);
275 if (!ctl_mgr) {
276 dev_err(dev->dev, "failed to allocate CTL manager\n");
277 ret = -ENOMEM;
278 goto fail;
279 }
280
281 if (unlikely(WARN_ON(ctl_cfg->count > MAX_CTL))) {
282 dev_err(dev->dev, "Increase static pool size to at least %d\n",
283 ctl_cfg->count);
284 ret = -ENOSPC;
285 goto fail;
286 }
287
288 /* initialize the CTL manager: */
289 ctl_mgr->dev = dev;
290 ctl_mgr->nlm = hw_cfg->lm.count;
291 ctl_mgr->nctl = ctl_cfg->count;
292 spin_lock_init(&ctl_mgr->pool_lock);
293
294 /* initialize each CTL of the pool: */
295 spin_lock_irqsave(&ctl_mgr->pool_lock, flags);
296 for (c = 0; c < ctl_mgr->nctl; c++) {
297 struct mdp5_ctl *ctl = &ctl_mgr->ctls[c];
298
299 if (WARN_ON(!ctl_cfg->base[c])) {
300 dev_err(dev->dev, "CTL_%d: base is null!\n", c);
301 ret = -EINVAL;
302 goto fail;
303 }
304 ctl->ctlm = ctl_mgr;
305 ctl->id = c;
306 ctl->mode = MODE_NONE;
307 ctl->reg_offset = ctl_cfg->base[c];
308 ctl->flush_mask = MDP5_CTL_FLUSH_CTL;
309 ctl->busy = false;
310 spin_lock_init(&ctl->hw_lock);
311 }
312 spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags);
313 DBG("Pool of %d CTLs created.", ctl_mgr->nctl);
314
315 return ctl_mgr;
316
317fail:
318 if (ctl_mgr)
319 mdp5_ctlm_destroy(ctl_mgr);
320
321 return ERR_PTR(ret);
322}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h
new file mode 100644
index 000000000000..1018519b6af2
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h
@@ -0,0 +1,122 @@
1/*
2 * Copyright (c) 2014 The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#ifndef __MDP5_CTL_H__
15#define __MDP5_CTL_H__
16
17#include "msm_drv.h"
18
19/*
20 * CTL Manager prototypes:
21 * mdp5_ctlm_init() returns a ctlm (CTL Manager) handler,
22 * which is then used to call the other mdp5_ctlm_*(ctlm, ...) functions.
23 */
24struct mdp5_ctl_manager;
25struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev,
26 void __iomem *mmio_base, const struct mdp5_cfg_hw *hw_cfg);
27void mdp5_ctlm_hw_reset(struct mdp5_ctl_manager *ctlm);
28void mdp5_ctlm_destroy(struct mdp5_ctl_manager *ctlm);
29
30/*
31 * CTL prototypes:
32 * mdp5_ctl_request(ctlm, ...) returns a ctl (CTL resource) handler,
33 * which is then used to call the other mdp5_ctl_*(ctl, ...) functions.
34 */
35struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctlm, struct drm_crtc *crtc);
36
37int mdp5_ctl_set_intf(struct mdp5_ctl *ctl, enum mdp5_intf intf);
38
39int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, bool enable);
40
41/* @blend_cfg: see LM blender config definition below */
42int mdp5_ctl_blend(struct mdp5_ctl *ctl, u32 lm, u32 blend_cfg);
43
44/* @flush_mask: see CTL flush masks definitions below */
45int mdp5_ctl_commit(struct mdp5_ctl *ctl, u32 flush_mask);
46u32 mdp5_ctl_get_flush(struct mdp5_ctl *ctl);
47
48void mdp5_ctl_release(struct mdp5_ctl *ctl);
49
50/*
51 * blend_cfg (LM blender config):
52 *
53 * The function below allows the caller of mdp5_ctl_blend() to specify how pipes
54 * are being blended according to their stage (z-order), through @blend_cfg arg.
55 */
56static inline u32 mdp_ctl_blend_mask(enum mdp5_pipe pipe,
57 enum mdp_mixer_stage_id stage)
58{
59 switch (pipe) {
60 case SSPP_VIG0: return MDP5_CTL_LAYER_REG_VIG0(stage);
61 case SSPP_VIG1: return MDP5_CTL_LAYER_REG_VIG1(stage);
62 case SSPP_VIG2: return MDP5_CTL_LAYER_REG_VIG2(stage);
63 case SSPP_RGB0: return MDP5_CTL_LAYER_REG_RGB0(stage);
64 case SSPP_RGB1: return MDP5_CTL_LAYER_REG_RGB1(stage);
65 case SSPP_RGB2: return MDP5_CTL_LAYER_REG_RGB2(stage);
66 case SSPP_DMA0: return MDP5_CTL_LAYER_REG_DMA0(stage);
67 case SSPP_DMA1: return MDP5_CTL_LAYER_REG_DMA1(stage);
68 case SSPP_VIG3: return MDP5_CTL_LAYER_REG_VIG3(stage);
69 case SSPP_RGB3: return MDP5_CTL_LAYER_REG_RGB3(stage);
70 default: return 0;
71 }
72}
73
74/*
75 * flush_mask (CTL flush masks):
76 *
77 * The following functions allow each DRM entity to get and store
78 * their own flush mask.
79 * Once stored, these masks will then be accessed through each DRM's
80 * interface and used by the caller of mdp5_ctl_commit() to specify
81 * which block(s) need to be flushed through @flush_mask parameter.
82 */
83
84#define MDP5_CTL_FLUSH_CURSOR_DUMMY 0x80000000
85
86static inline u32 mdp_ctl_flush_mask_cursor(int cursor_id)
87{
88 /* TODO: use id once multiple cursor support is present */
89 (void)cursor_id;
90
91 return MDP5_CTL_FLUSH_CURSOR_DUMMY;
92}
93
94static inline u32 mdp_ctl_flush_mask_lm(int lm)
95{
96 switch (lm) {
97 case 0: return MDP5_CTL_FLUSH_LM0;
98 case 1: return MDP5_CTL_FLUSH_LM1;
99 case 2: return MDP5_CTL_FLUSH_LM2;
100 case 5: return MDP5_CTL_FLUSH_LM5;
101 default: return 0;
102 }
103}
104
105static inline u32 mdp_ctl_flush_mask_pipe(enum mdp5_pipe pipe)
106{
107 switch (pipe) {
108 case SSPP_VIG0: return MDP5_CTL_FLUSH_VIG0;
109 case SSPP_VIG1: return MDP5_CTL_FLUSH_VIG1;
110 case SSPP_VIG2: return MDP5_CTL_FLUSH_VIG2;
111 case SSPP_RGB0: return MDP5_CTL_FLUSH_RGB0;
112 case SSPP_RGB1: return MDP5_CTL_FLUSH_RGB1;
113 case SSPP_RGB2: return MDP5_CTL_FLUSH_RGB2;
114 case SSPP_DMA0: return MDP5_CTL_FLUSH_DMA0;
115 case SSPP_DMA1: return MDP5_CTL_FLUSH_DMA1;
116 case SSPP_VIG3: return MDP5_CTL_FLUSH_VIG3;
117 case SSPP_RGB3: return MDP5_CTL_FLUSH_RGB3;
118 default: return 0;
119 }
120}
121
122#endif /* __MDP5_CTL_H__ */
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
index edec7bfaa952..0254bfdeb92f 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
@@ -24,6 +24,7 @@ struct mdp5_encoder {
24 struct drm_encoder base; 24 struct drm_encoder base;
25 int intf; 25 int intf;
26 enum mdp5_intf intf_id; 26 enum mdp5_intf intf_id;
27 spinlock_t intf_lock; /* protect REG_MDP5_INTF_* registers */
27 bool enabled; 28 bool enabled;
28 uint32_t bsc; 29 uint32_t bsc;
29}; 30};
@@ -115,6 +116,7 @@ static void mdp5_encoder_dpms(struct drm_encoder *encoder, int mode)
115 struct mdp5_kms *mdp5_kms = get_kms(encoder); 116 struct mdp5_kms *mdp5_kms = get_kms(encoder);
116 int intf = mdp5_encoder->intf; 117 int intf = mdp5_encoder->intf;
117 bool enabled = (mode == DRM_MODE_DPMS_ON); 118 bool enabled = (mode == DRM_MODE_DPMS_ON);
119 unsigned long flags;
118 120
119 DBG("mode=%d", mode); 121 DBG("mode=%d", mode);
120 122
@@ -123,9 +125,24 @@ static void mdp5_encoder_dpms(struct drm_encoder *encoder, int mode)
123 125
124 if (enabled) { 126 if (enabled) {
125 bs_set(mdp5_encoder, 1); 127 bs_set(mdp5_encoder, 1);
128 spin_lock_irqsave(&mdp5_encoder->intf_lock, flags);
126 mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intf), 1); 129 mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intf), 1);
130 spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags);
127 } else { 131 } else {
132 spin_lock_irqsave(&mdp5_encoder->intf_lock, flags);
128 mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intf), 0); 133 mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intf), 0);
134 spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags);
135
136 /*
137 * Wait for a vsync so we know the ENABLE=0 latched before
138 * the (connector) source of the vsync's gets disabled,
139 * otherwise we end up in a funny state if we re-enable
140 * before the disable latches, which results that some of
141 * the settings changes for the new modeset (like new
142 * scanout buffer) don't latch properly..
143 */
144 mdp_irq_wait(&mdp5_kms->base, intf2vblank(intf));
145
129 bs_set(mdp5_encoder, 0); 146 bs_set(mdp5_encoder, 0);
130 } 147 }
131 148
@@ -150,6 +167,7 @@ static void mdp5_encoder_mode_set(struct drm_encoder *encoder,
150 uint32_t display_v_start, display_v_end; 167 uint32_t display_v_start, display_v_end;
151 uint32_t hsync_start_x, hsync_end_x; 168 uint32_t hsync_start_x, hsync_end_x;
152 uint32_t format; 169 uint32_t format;
170 unsigned long flags;
153 171
154 mode = adjusted_mode; 172 mode = adjusted_mode;
155 173
@@ -180,6 +198,8 @@ static void mdp5_encoder_mode_set(struct drm_encoder *encoder,
180 display_v_start = (mode->vtotal - mode->vsync_start) * mode->htotal + dtv_hsync_skew; 198 display_v_start = (mode->vtotal - mode->vsync_start) * mode->htotal + dtv_hsync_skew;
181 display_v_end = vsync_period - ((mode->vsync_start - mode->vdisplay) * mode->htotal) + dtv_hsync_skew - 1; 199 display_v_end = vsync_period - ((mode->vsync_start - mode->vdisplay) * mode->htotal) + dtv_hsync_skew - 1;
182 200
201 spin_lock_irqsave(&mdp5_encoder->intf_lock, flags);
202
183 mdp5_write(mdp5_kms, REG_MDP5_INTF_HSYNC_CTL(intf), 203 mdp5_write(mdp5_kms, REG_MDP5_INTF_HSYNC_CTL(intf),
184 MDP5_INTF_HSYNC_CTL_PULSEW(mode->hsync_end - mode->hsync_start) | 204 MDP5_INTF_HSYNC_CTL_PULSEW(mode->hsync_end - mode->hsync_start) |
185 MDP5_INTF_HSYNC_CTL_PERIOD(mode->htotal)); 205 MDP5_INTF_HSYNC_CTL_PERIOD(mode->htotal));
@@ -201,6 +221,8 @@ static void mdp5_encoder_mode_set(struct drm_encoder *encoder,
201 mdp5_write(mdp5_kms, REG_MDP5_INTF_ACTIVE_VEND_F0(intf), 0); 221 mdp5_write(mdp5_kms, REG_MDP5_INTF_ACTIVE_VEND_F0(intf), 0);
202 mdp5_write(mdp5_kms, REG_MDP5_INTF_PANEL_FORMAT(intf), format); 222 mdp5_write(mdp5_kms, REG_MDP5_INTF_PANEL_FORMAT(intf), format);
203 mdp5_write(mdp5_kms, REG_MDP5_INTF_FRAME_LINE_COUNT_EN(intf), 0x3); /* frame+line? */ 223 mdp5_write(mdp5_kms, REG_MDP5_INTF_FRAME_LINE_COUNT_EN(intf), 0x3); /* frame+line? */
224
225 spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags);
204} 226}
205 227
206static void mdp5_encoder_prepare(struct drm_encoder *encoder) 228static void mdp5_encoder_prepare(struct drm_encoder *encoder)
@@ -242,6 +264,8 @@ struct drm_encoder *mdp5_encoder_init(struct drm_device *dev, int intf,
242 mdp5_encoder->intf_id = intf_id; 264 mdp5_encoder->intf_id = intf_id;
243 encoder = &mdp5_encoder->base; 265 encoder = &mdp5_encoder->base;
244 266
267 spin_lock_init(&mdp5_encoder->intf_lock);
268
245 drm_encoder_init(dev, encoder, &mdp5_encoder_funcs, 269 drm_encoder_init(dev, encoder, &mdp5_encoder_funcs,
246 DRM_MODE_ENCODER_TMDS); 270 DRM_MODE_ENCODER_TMDS);
247 drm_encoder_helper_add(encoder, &mdp5_encoder_helper_funcs); 271 drm_encoder_helper_add(encoder, &mdp5_encoder_helper_funcs);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
index 812c59bbaf7f..70ac81edd40f 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
@@ -15,6 +15,8 @@
15 * this program. If not, see <http://www.gnu.org/licenses/>. 15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */ 16 */
17 17
18#include <linux/irqdomain.h>
19#include <linux/irq.h>
18 20
19#include "msm_drv.h" 21#include "msm_drv.h"
20#include "mdp5_kms.h" 22#include "mdp5_kms.h"
@@ -82,18 +84,23 @@ irqreturn_t mdp5_irq(struct msm_kms *kms)
82{ 84{
83 struct mdp_kms *mdp_kms = to_mdp_kms(kms); 85 struct mdp_kms *mdp_kms = to_mdp_kms(kms);
84 struct mdp5_kms *mdp5_kms = to_mdp5_kms(mdp_kms); 86 struct mdp5_kms *mdp5_kms = to_mdp5_kms(mdp_kms);
85 struct msm_drm_private *priv = mdp5_kms->dev->dev_private;
86 uint32_t intr; 87 uint32_t intr;
87 88
88 intr = mdp5_read(mdp5_kms, REG_MDP5_HW_INTR_STATUS); 89 intr = mdp5_read(mdp5_kms, REG_MDP5_HW_INTR_STATUS);
89 90
90 VERB("intr=%08x", intr); 91 VERB("intr=%08x", intr);
91 92
92 if (intr & MDP5_HW_INTR_STATUS_INTR_MDP) 93 if (intr & MDP5_HW_INTR_STATUS_INTR_MDP) {
93 mdp5_irq_mdp(mdp_kms); 94 mdp5_irq_mdp(mdp_kms);
95 intr &= ~MDP5_HW_INTR_STATUS_INTR_MDP;
96 }
94 97
95 if (intr & MDP5_HW_INTR_STATUS_INTR_HDMI) 98 while (intr) {
96 hdmi_irq(0, priv->hdmi); 99 irq_hw_number_t hwirq = fls(intr) - 1;
100 generic_handle_irq(irq_find_mapping(
101 mdp5_kms->irqcontroller.domain, hwirq));
102 intr &= ~(1 << hwirq);
103 }
97 104
98 return IRQ_HANDLED; 105 return IRQ_HANDLED;
99} 106}
@@ -110,3 +117,82 @@ void mdp5_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
110 mdp_update_vblank_mask(to_mdp_kms(kms), 117 mdp_update_vblank_mask(to_mdp_kms(kms),
111 mdp5_crtc_vblank(crtc), false); 118 mdp5_crtc_vblank(crtc), false);
112} 119}
120
121/*
122 * interrupt-controller implementation, so sub-blocks (hdmi/eDP/dsi/etc)
123 * can register to get their irq's delivered
124 */
125
126#define VALID_IRQS (MDP5_HW_INTR_STATUS_INTR_DSI0 | \
127 MDP5_HW_INTR_STATUS_INTR_DSI1 | \
128 MDP5_HW_INTR_STATUS_INTR_HDMI | \
129 MDP5_HW_INTR_STATUS_INTR_EDP)
130
131static void mdp5_hw_mask_irq(struct irq_data *irqd)
132{
133 struct mdp5_kms *mdp5_kms = irq_data_get_irq_chip_data(irqd);
134 smp_mb__before_atomic();
135 clear_bit(irqd->hwirq, &mdp5_kms->irqcontroller.enabled_mask);
136 smp_mb__after_atomic();
137}
138
139static void mdp5_hw_unmask_irq(struct irq_data *irqd)
140{
141 struct mdp5_kms *mdp5_kms = irq_data_get_irq_chip_data(irqd);
142 smp_mb__before_atomic();
143 set_bit(irqd->hwirq, &mdp5_kms->irqcontroller.enabled_mask);
144 smp_mb__after_atomic();
145}
146
147static struct irq_chip mdp5_hw_irq_chip = {
148 .name = "mdp5",
149 .irq_mask = mdp5_hw_mask_irq,
150 .irq_unmask = mdp5_hw_unmask_irq,
151};
152
153static int mdp5_hw_irqdomain_map(struct irq_domain *d,
154 unsigned int irq, irq_hw_number_t hwirq)
155{
156 struct mdp5_kms *mdp5_kms = d->host_data;
157
158 if (!(VALID_IRQS & (1 << hwirq)))
159 return -EPERM;
160
161 irq_set_chip_and_handler(irq, &mdp5_hw_irq_chip, handle_level_irq);
162 irq_set_chip_data(irq, mdp5_kms);
163 set_irq_flags(irq, IRQF_VALID);
164
165 return 0;
166}
167
168static struct irq_domain_ops mdp5_hw_irqdomain_ops = {
169 .map = mdp5_hw_irqdomain_map,
170 .xlate = irq_domain_xlate_onecell,
171};
172
173
174int mdp5_irq_domain_init(struct mdp5_kms *mdp5_kms)
175{
176 struct device *dev = mdp5_kms->dev->dev;
177 struct irq_domain *d;
178
179 d = irq_domain_add_linear(dev->of_node, 32,
180 &mdp5_hw_irqdomain_ops, mdp5_kms);
181 if (!d) {
182 dev_err(dev, "mdp5 irq domain add failed\n");
183 return -ENXIO;
184 }
185
186 mdp5_kms->irqcontroller.enabled_mask = 0;
187 mdp5_kms->irqcontroller.domain = d;
188
189 return 0;
190}
191
192void mdp5_irq_domain_fini(struct mdp5_kms *mdp5_kms)
193{
194 if (mdp5_kms->irqcontroller.domain) {
195 irq_domain_remove(mdp5_kms->irqcontroller.domain);
196 mdp5_kms->irqcontroller.domain = NULL;
197 }
198}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
index ce0308124a72..a11f1b80c488 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
@@ -1,4 +1,5 @@
1/* 1/*
2 * Copyright (c) 2014, The Linux Foundation. All rights reserved.
2 * Copyright (C) 2013 Red Hat 3 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com> 4 * Author: Rob Clark <robdclark@gmail.com>
4 * 5 *
@@ -24,145 +25,11 @@ static const char *iommu_ports[] = {
24 "mdp_0", 25 "mdp_0",
25}; 26};
26 27
27static struct mdp5_platform_config *mdp5_get_config(struct platform_device *dev);
28
29const struct mdp5_config *mdp5_cfg;
30
31static const struct mdp5_config msm8x74_config = {
32 .name = "msm8x74",
33 .ctl = {
34 .count = 5,
35 .base = { 0x00600, 0x00700, 0x00800, 0x00900, 0x00a00 },
36 },
37 .pipe_vig = {
38 .count = 3,
39 .base = { 0x01200, 0x01600, 0x01a00 },
40 },
41 .pipe_rgb = {
42 .count = 3,
43 .base = { 0x01e00, 0x02200, 0x02600 },
44 },
45 .pipe_dma = {
46 .count = 2,
47 .base = { 0x02a00, 0x02e00 },
48 },
49 .lm = {
50 .count = 5,
51 .base = { 0x03200, 0x03600, 0x03a00, 0x03e00, 0x04200 },
52 },
53 .dspp = {
54 .count = 3,
55 .base = { 0x04600, 0x04a00, 0x04e00 },
56 },
57 .ad = {
58 .count = 2,
59 .base = { 0x13100, 0x13300 }, /* NOTE: no ad in v1.0 */
60 },
61 .intf = {
62 .count = 4,
63 .base = { 0x12500, 0x12700, 0x12900, 0x12b00 },
64 },
65};
66
67static const struct mdp5_config apq8084_config = {
68 .name = "apq8084",
69 .ctl = {
70 .count = 5,
71 .base = { 0x00600, 0x00700, 0x00800, 0x00900, 0x00a00 },
72 },
73 .pipe_vig = {
74 .count = 4,
75 .base = { 0x01200, 0x01600, 0x01a00, 0x01e00 },
76 },
77 .pipe_rgb = {
78 .count = 4,
79 .base = { 0x02200, 0x02600, 0x02a00, 0x02e00 },
80 },
81 .pipe_dma = {
82 .count = 2,
83 .base = { 0x03200, 0x03600 },
84 },
85 .lm = {
86 .count = 6,
87 .base = { 0x03a00, 0x03e00, 0x04200, 0x04600, 0x04a00, 0x04e00 },
88 },
89 .dspp = {
90 .count = 4,
91 .base = { 0x05200, 0x05600, 0x05a00, 0x05e00 },
92
93 },
94 .ad = {
95 .count = 3,
96 .base = { 0x13500, 0x13700, 0x13900 },
97 },
98 .intf = {
99 .count = 5,
100 .base = { 0x12500, 0x12700, 0x12900, 0x12b00, 0x12d00 },
101 },
102};
103
104struct mdp5_config_entry {
105 int revision;
106 const struct mdp5_config *config;
107};
108
109static const struct mdp5_config_entry mdp5_configs[] = {
110 { .revision = 0, .config = &msm8x74_config },
111 { .revision = 2, .config = &msm8x74_config },
112 { .revision = 3, .config = &apq8084_config },
113};
114
115static int mdp5_select_hw_cfg(struct msm_kms *kms)
116{
117 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
118 struct drm_device *dev = mdp5_kms->dev;
119 uint32_t version, major, minor;
120 int i, ret = 0;
121
122 mdp5_enable(mdp5_kms);
123 version = mdp5_read(mdp5_kms, REG_MDP5_MDP_VERSION);
124 mdp5_disable(mdp5_kms);
125
126 major = FIELD(version, MDP5_MDP_VERSION_MAJOR);
127 minor = FIELD(version, MDP5_MDP_VERSION_MINOR);
128
129 DBG("found MDP5 version v%d.%d", major, minor);
130
131 if (major != 1) {
132 dev_err(dev->dev, "unexpected MDP major version: v%d.%d\n",
133 major, minor);
134 ret = -ENXIO;
135 goto out;
136 }
137
138 mdp5_kms->rev = minor;
139
140 /* only after mdp5_cfg global pointer's init can we access the hw */
141 for (i = 0; i < ARRAY_SIZE(mdp5_configs); i++) {
142 if (mdp5_configs[i].revision != minor)
143 continue;
144 mdp5_kms->hw_cfg = mdp5_cfg = mdp5_configs[i].config;
145 break;
146 }
147 if (unlikely(!mdp5_kms->hw_cfg)) {
148 dev_err(dev->dev, "unexpected MDP minor revision: v%d.%d\n",
149 major, minor);
150 ret = -ENXIO;
151 goto out;
152 }
153
154 DBG("MDP5: %s config selected", mdp5_kms->hw_cfg->name);
155
156 return 0;
157out:
158 return ret;
159}
160
161static int mdp5_hw_init(struct msm_kms *kms) 28static int mdp5_hw_init(struct msm_kms *kms)
162{ 29{
163 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); 30 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
164 struct drm_device *dev = mdp5_kms->dev; 31 struct drm_device *dev = mdp5_kms->dev;
165 int i; 32 unsigned long flags;
166 33
167 pm_runtime_get_sync(dev->dev); 34 pm_runtime_get_sync(dev->dev);
168 35
@@ -190,10 +57,11 @@ static int mdp5_hw_init(struct msm_kms *kms)
190 * care. 57 * care.
191 */ 58 */
192 59
60 spin_lock_irqsave(&mdp5_kms->resource_lock, flags);
193 mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, 0); 61 mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, 0);
62 spin_unlock_irqrestore(&mdp5_kms->resource_lock, flags);
194 63
195 for (i = 0; i < mdp5_kms->hw_cfg->ctl.count; i++) 64 mdp5_ctlm_hw_reset(mdp5_kms->ctlm);
196 mdp5_write(mdp5_kms, REG_MDP5_CTL_OP(i), 0);
197 65
198 pm_runtime_put_sync(dev->dev); 66 pm_runtime_put_sync(dev->dev);
199 67
@@ -221,10 +89,20 @@ static void mdp5_destroy(struct msm_kms *kms)
221 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); 89 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
222 struct msm_mmu *mmu = mdp5_kms->mmu; 90 struct msm_mmu *mmu = mdp5_kms->mmu;
223 91
92 mdp5_irq_domain_fini(mdp5_kms);
93
224 if (mmu) { 94 if (mmu) {
225 mmu->funcs->detach(mmu, iommu_ports, ARRAY_SIZE(iommu_ports)); 95 mmu->funcs->detach(mmu, iommu_ports, ARRAY_SIZE(iommu_ports));
226 mmu->funcs->destroy(mmu); 96 mmu->funcs->destroy(mmu);
227 } 97 }
98
99 if (mdp5_kms->ctlm)
100 mdp5_ctlm_destroy(mdp5_kms->ctlm);
101 if (mdp5_kms->smp)
102 mdp5_smp_destroy(mdp5_kms->smp);
103 if (mdp5_kms->cfg)
104 mdp5_cfg_destroy(mdp5_kms->cfg);
105
228 kfree(mdp5_kms); 106 kfree(mdp5_kms);
229} 107}
230 108
@@ -274,17 +152,31 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
274 static const enum mdp5_pipe crtcs[] = { 152 static const enum mdp5_pipe crtcs[] = {
275 SSPP_RGB0, SSPP_RGB1, SSPP_RGB2, SSPP_RGB3, 153 SSPP_RGB0, SSPP_RGB1, SSPP_RGB2, SSPP_RGB3,
276 }; 154 };
155 static const enum mdp5_pipe pub_planes[] = {
156 SSPP_VIG0, SSPP_VIG1, SSPP_VIG2, SSPP_VIG3,
157 };
277 struct drm_device *dev = mdp5_kms->dev; 158 struct drm_device *dev = mdp5_kms->dev;
278 struct msm_drm_private *priv = dev->dev_private; 159 struct msm_drm_private *priv = dev->dev_private;
279 struct drm_encoder *encoder; 160 struct drm_encoder *encoder;
161 const struct mdp5_cfg_hw *hw_cfg;
280 int i, ret; 162 int i, ret;
281 163
282 /* construct CRTCs: */ 164 hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
283 for (i = 0; i < mdp5_kms->hw_cfg->pipe_rgb.count; i++) { 165
166 /* register our interrupt-controller for hdmi/eDP/dsi/etc
167 * to use for irqs routed through mdp:
168 */
169 ret = mdp5_irq_domain_init(mdp5_kms);
170 if (ret)
171 goto fail;
172
173 /* construct CRTCs and their private planes: */
174 for (i = 0; i < hw_cfg->pipe_rgb.count; i++) {
284 struct drm_plane *plane; 175 struct drm_plane *plane;
285 struct drm_crtc *crtc; 176 struct drm_crtc *crtc;
286 177
287 plane = mdp5_plane_init(dev, crtcs[i], true); 178 plane = mdp5_plane_init(dev, crtcs[i], true,
179 hw_cfg->pipe_rgb.base[i]);
288 if (IS_ERR(plane)) { 180 if (IS_ERR(plane)) {
289 ret = PTR_ERR(plane); 181 ret = PTR_ERR(plane);
290 dev_err(dev->dev, "failed to construct plane for %s (%d)\n", 182 dev_err(dev->dev, "failed to construct plane for %s (%d)\n",
@@ -302,6 +194,20 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
302 priv->crtcs[priv->num_crtcs++] = crtc; 194 priv->crtcs[priv->num_crtcs++] = crtc;
303 } 195 }
304 196
197 /* Construct public planes: */
198 for (i = 0; i < hw_cfg->pipe_vig.count; i++) {
199 struct drm_plane *plane;
200
201 plane = mdp5_plane_init(dev, pub_planes[i], false,
202 hw_cfg->pipe_vig.base[i]);
203 if (IS_ERR(plane)) {
204 ret = PTR_ERR(plane);
205 dev_err(dev->dev, "failed to construct %s plane: %d\n",
206 pipe2name(pub_planes[i]), ret);
207 goto fail;
208 }
209 }
210
305 /* Construct encoder for HDMI: */ 211 /* Construct encoder for HDMI: */
306 encoder = mdp5_encoder_init(dev, 3, INTF_HDMI); 212 encoder = mdp5_encoder_init(dev, 3, INTF_HDMI);
307 if (IS_ERR(encoder)) { 213 if (IS_ERR(encoder)) {
@@ -338,6 +244,21 @@ fail:
338 return ret; 244 return ret;
339} 245}
340 246
247static void read_hw_revision(struct mdp5_kms *mdp5_kms,
248 uint32_t *major, uint32_t *minor)
249{
250 uint32_t version;
251
252 mdp5_enable(mdp5_kms);
253 version = mdp5_read(mdp5_kms, REG_MDP5_MDP_VERSION);
254 mdp5_disable(mdp5_kms);
255
256 *major = FIELD(version, MDP5_MDP_VERSION_MAJOR);
257 *minor = FIELD(version, MDP5_MDP_VERSION_MINOR);
258
259 DBG("MDP5 version v%d.%d", *major, *minor);
260}
261
341static int get_clk(struct platform_device *pdev, struct clk **clkp, 262static int get_clk(struct platform_device *pdev, struct clk **clkp,
342 const char *name) 263 const char *name)
343{ 264{
@@ -354,10 +275,11 @@ static int get_clk(struct platform_device *pdev, struct clk **clkp,
354struct msm_kms *mdp5_kms_init(struct drm_device *dev) 275struct msm_kms *mdp5_kms_init(struct drm_device *dev)
355{ 276{
356 struct platform_device *pdev = dev->platformdev; 277 struct platform_device *pdev = dev->platformdev;
357 struct mdp5_platform_config *config = mdp5_get_config(pdev); 278 struct mdp5_cfg *config;
358 struct mdp5_kms *mdp5_kms; 279 struct mdp5_kms *mdp5_kms;
359 struct msm_kms *kms = NULL; 280 struct msm_kms *kms = NULL;
360 struct msm_mmu *mmu; 281 struct msm_mmu *mmu;
282 uint32_t major, minor;
361 int i, ret; 283 int i, ret;
362 284
363 mdp5_kms = kzalloc(sizeof(*mdp5_kms), GFP_KERNEL); 285 mdp5_kms = kzalloc(sizeof(*mdp5_kms), GFP_KERNEL);
@@ -367,12 +289,13 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
367 goto fail; 289 goto fail;
368 } 290 }
369 291
292 spin_lock_init(&mdp5_kms->resource_lock);
293
370 mdp_kms_init(&mdp5_kms->base, &kms_funcs); 294 mdp_kms_init(&mdp5_kms->base, &kms_funcs);
371 295
372 kms = &mdp5_kms->base.base; 296 kms = &mdp5_kms->base.base;
373 297
374 mdp5_kms->dev = dev; 298 mdp5_kms->dev = dev;
375 mdp5_kms->smp_blk_cnt = config->smp_blk_cnt;
376 299
377 mdp5_kms->mmio = msm_ioremap(pdev, "mdp_phys", "MDP5"); 300 mdp5_kms->mmio = msm_ioremap(pdev, "mdp_phys", "MDP5");
378 if (IS_ERR(mdp5_kms->mmio)) { 301 if (IS_ERR(mdp5_kms->mmio)) {
@@ -417,24 +340,52 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
417 if (ret) 340 if (ret)
418 goto fail; 341 goto fail;
419 342
420 ret = clk_set_rate(mdp5_kms->src_clk, config->max_clk); 343 /* we need to set a default rate before enabling. Set a safe
344 * rate first, then figure out hw revision, and then set a
345 * more optimal rate:
346 */
347 clk_set_rate(mdp5_kms->src_clk, 200000000);
421 348
422 ret = mdp5_select_hw_cfg(kms); 349 read_hw_revision(mdp5_kms, &major, &minor);
423 if (ret) 350
351 mdp5_kms->cfg = mdp5_cfg_init(mdp5_kms, major, minor);
352 if (IS_ERR(mdp5_kms->cfg)) {
353 ret = PTR_ERR(mdp5_kms->cfg);
354 mdp5_kms->cfg = NULL;
355 goto fail;
356 }
357
358 config = mdp5_cfg_get_config(mdp5_kms->cfg);
359
360 /* TODO: compute core clock rate at runtime */
361 clk_set_rate(mdp5_kms->src_clk, config->hw->max_clk);
362
363 mdp5_kms->smp = mdp5_smp_init(mdp5_kms->dev, &config->hw->smp);
364 if (IS_ERR(mdp5_kms->smp)) {
365 ret = PTR_ERR(mdp5_kms->smp);
366 mdp5_kms->smp = NULL;
424 goto fail; 367 goto fail;
368 }
369
370 mdp5_kms->ctlm = mdp5_ctlm_init(dev, mdp5_kms->mmio, config->hw);
371 if (IS_ERR(mdp5_kms->ctlm)) {
372 ret = PTR_ERR(mdp5_kms->ctlm);
373 mdp5_kms->ctlm = NULL;
374 goto fail;
375 }
425 376
426 /* make sure things are off before attaching iommu (bootloader could 377 /* make sure things are off before attaching iommu (bootloader could
427 * have left things on, in which case we'll start getting faults if 378 * have left things on, in which case we'll start getting faults if
428 * we don't disable): 379 * we don't disable):
429 */ 380 */
430 mdp5_enable(mdp5_kms); 381 mdp5_enable(mdp5_kms);
431 for (i = 0; i < mdp5_kms->hw_cfg->intf.count; i++) 382 for (i = 0; i < config->hw->intf.count; i++)
432 mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(i), 0); 383 mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(i), 0);
433 mdp5_disable(mdp5_kms); 384 mdp5_disable(mdp5_kms);
434 mdelay(16); 385 mdelay(16);
435 386
436 if (config->iommu) { 387 if (config->platform.iommu) {
437 mmu = msm_iommu_new(&pdev->dev, config->iommu); 388 mmu = msm_iommu_new(&pdev->dev, config->platform.iommu);
438 if (IS_ERR(mmu)) { 389 if (IS_ERR(mmu)) {
439 ret = PTR_ERR(mmu); 390 ret = PTR_ERR(mmu);
440 dev_err(dev->dev, "failed to init iommu: %d\n", ret); 391 dev_err(dev->dev, "failed to init iommu: %d\n", ret);
@@ -475,18 +426,3 @@ fail:
475 mdp5_destroy(kms); 426 mdp5_destroy(kms);
476 return ERR_PTR(ret); 427 return ERR_PTR(ret);
477} 428}
478
479static struct mdp5_platform_config *mdp5_get_config(struct platform_device *dev)
480{
481 static struct mdp5_platform_config config = {};
482#ifdef CONFIG_OF
483 /* TODO */
484#endif
485 config.iommu = iommu_domain_alloc(&platform_bus_type);
486 /* TODO hard-coded in downstream mdss, but should it be? */
487 config.max_clk = 200000000;
488 /* TODO get from DT: */
489 config.smp_blk_cnt = 22;
490
491 return &config;
492}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
index c91101d5ac0f..dd69c77c0d64 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
@@ -21,25 +21,9 @@
21#include "msm_drv.h" 21#include "msm_drv.h"
22#include "msm_kms.h" 22#include "msm_kms.h"
23#include "mdp/mdp_kms.h" 23#include "mdp/mdp_kms.h"
24/* dynamic offsets used by mdp5.xml.h (initialized in mdp5_kms.c) */ 24#include "mdp5_cfg.h" /* must be included before mdp5.xml.h */
25#define MDP5_MAX_BASES 8
26struct mdp5_sub_block {
27 int count;
28 uint32_t base[MDP5_MAX_BASES];
29};
30struct mdp5_config {
31 char *name;
32 struct mdp5_sub_block ctl;
33 struct mdp5_sub_block pipe_vig;
34 struct mdp5_sub_block pipe_rgb;
35 struct mdp5_sub_block pipe_dma;
36 struct mdp5_sub_block lm;
37 struct mdp5_sub_block dspp;
38 struct mdp5_sub_block ad;
39 struct mdp5_sub_block intf;
40};
41extern const struct mdp5_config *mdp5_cfg;
42#include "mdp5.xml.h" 25#include "mdp5.xml.h"
26#include "mdp5_ctl.h"
43#include "mdp5_smp.h" 27#include "mdp5_smp.h"
44 28
45struct mdp5_kms { 29struct mdp5_kms {
@@ -47,17 +31,14 @@ struct mdp5_kms {
47 31
48 struct drm_device *dev; 32 struct drm_device *dev;
49 33
50 int rev; 34 struct mdp5_cfg_handler *cfg;
51 const struct mdp5_config *hw_cfg;
52 35
53 /* mapper-id used to request GEM buffer mapped for scanout: */ 36 /* mapper-id used to request GEM buffer mapped for scanout: */
54 int id; 37 int id;
55 struct msm_mmu *mmu; 38 struct msm_mmu *mmu;
56 39
57 /* for tracking smp allocation amongst pipes: */ 40 struct mdp5_smp *smp;
58 mdp5_smp_state_t smp_state; 41 struct mdp5_ctl_manager *ctlm;
59 struct mdp5_client_smp_state smp_client_state[CID_MAX];
60 int smp_blk_cnt;
61 42
62 /* io/register spaces: */ 43 /* io/register spaces: */
63 void __iomem *mmio, *vbif; 44 void __iomem *mmio, *vbif;
@@ -71,16 +52,47 @@ struct mdp5_kms {
71 struct clk *lut_clk; 52 struct clk *lut_clk;
72 struct clk *vsync_clk; 53 struct clk *vsync_clk;
73 54
55 /*
56 * lock to protect access to global resources: ie., following register:
57 * - REG_MDP5_DISP_INTF_SEL
58 */
59 spinlock_t resource_lock;
60
74 struct mdp_irq error_handler; 61 struct mdp_irq error_handler;
62
63 struct {
64 volatile unsigned long enabled_mask;
65 struct irq_domain *domain;
66 } irqcontroller;
75}; 67};
76#define to_mdp5_kms(x) container_of(x, struct mdp5_kms, base) 68#define to_mdp5_kms(x) container_of(x, struct mdp5_kms, base)
77 69
78/* platform config data (ie. from DT, or pdata) */ 70struct mdp5_plane_state {
79struct mdp5_platform_config { 71 struct drm_plane_state base;
80 struct iommu_domain *iommu; 72
81 uint32_t max_clk; 73 /* "virtual" zpos.. we calculate actual mixer-stage at runtime
82 int smp_blk_cnt; 74 * by sorting the attached planes by zpos and then assigning
75 * mixer stage lowest to highest. Private planes get default
76 * zpos of zero, and public planes a unique value that is
77 * greater than zero. This way, things work out if a naive
78 * userspace assigns planes to a crtc without setting zpos.
79 */
80 int zpos;
81
82 /* the actual mixer stage, calculated in crtc->atomic_check()
83 * NOTE: this should move to mdp5_crtc_state, when that exists
84 */
85 enum mdp_mixer_stage_id stage;
86
87 /* some additional transactional status to help us know in the
88 * apply path whether we need to update SMP allocation, and
89 * whether current update is still pending:
90 */
91 bool mode_changed : 1;
92 bool pending : 1;
83}; 93};
94#define to_mdp5_plane_state(x) \
95 container_of(x, struct mdp5_plane_state, base)
84 96
85static inline void mdp5_write(struct mdp5_kms *mdp5_kms, u32 reg, u32 data) 97static inline void mdp5_write(struct mdp5_kms *mdp5_kms, u32 reg, u32 data)
86{ 98{
@@ -105,23 +117,6 @@ static inline const char *pipe2name(enum mdp5_pipe pipe)
105 return names[pipe]; 117 return names[pipe];
106} 118}
107 119
108static inline uint32_t pipe2flush(enum mdp5_pipe pipe)
109{
110 switch (pipe) {
111 case SSPP_VIG0: return MDP5_CTL_FLUSH_VIG0;
112 case SSPP_VIG1: return MDP5_CTL_FLUSH_VIG1;
113 case SSPP_VIG2: return MDP5_CTL_FLUSH_VIG2;
114 case SSPP_RGB0: return MDP5_CTL_FLUSH_RGB0;
115 case SSPP_RGB1: return MDP5_CTL_FLUSH_RGB1;
116 case SSPP_RGB2: return MDP5_CTL_FLUSH_RGB2;
117 case SSPP_DMA0: return MDP5_CTL_FLUSH_DMA0;
118 case SSPP_DMA1: return MDP5_CTL_FLUSH_DMA1;
119 case SSPP_VIG3: return MDP5_CTL_FLUSH_VIG3;
120 case SSPP_RGB3: return MDP5_CTL_FLUSH_RGB3;
121 default: return 0;
122 }
123}
124
125static inline int pipe2nclients(enum mdp5_pipe pipe) 120static inline int pipe2nclients(enum mdp5_pipe pipe)
126{ 121{
127 switch (pipe) { 122 switch (pipe) {
@@ -135,34 +130,6 @@ static inline int pipe2nclients(enum mdp5_pipe pipe)
135 } 130 }
136} 131}
137 132
138static inline enum mdp5_client_id pipe2client(enum mdp5_pipe pipe, int plane)
139{
140 WARN_ON(plane >= pipe2nclients(pipe));
141 switch (pipe) {
142 case SSPP_VIG0: return CID_VIG0_Y + plane;
143 case SSPP_VIG1: return CID_VIG1_Y + plane;
144 case SSPP_VIG2: return CID_VIG2_Y + plane;
145 case SSPP_RGB0: return CID_RGB0;
146 case SSPP_RGB1: return CID_RGB1;
147 case SSPP_RGB2: return CID_RGB2;
148 case SSPP_DMA0: return CID_DMA0_Y + plane;
149 case SSPP_DMA1: return CID_DMA1_Y + plane;
150 case SSPP_VIG3: return CID_VIG3_Y + plane;
151 case SSPP_RGB3: return CID_RGB3;
152 default: return CID_UNUSED;
153 }
154}
155
156static inline uint32_t mixer2flush(int lm)
157{
158 switch (lm) {
159 case 0: return MDP5_CTL_FLUSH_LM0;
160 case 1: return MDP5_CTL_FLUSH_LM1;
161 case 2: return MDP5_CTL_FLUSH_LM2;
162 default: return 0;
163 }
164}
165
166static inline uint32_t intf2err(int intf) 133static inline uint32_t intf2err(int intf)
167{ 134{
168 switch (intf) { 135 switch (intf) {
@@ -195,6 +162,8 @@ void mdp5_irq_uninstall(struct msm_kms *kms);
195irqreturn_t mdp5_irq(struct msm_kms *kms); 162irqreturn_t mdp5_irq(struct msm_kms *kms);
196int mdp5_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc); 163int mdp5_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
197void mdp5_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc); 164void mdp5_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
165int mdp5_irq_domain_init(struct mdp5_kms *mdp5_kms);
166void mdp5_irq_domain_fini(struct mdp5_kms *mdp5_kms);
198 167
199static inline 168static inline
200uint32_t mdp5_get_formats(enum mdp5_pipe pipe, uint32_t *pixel_formats, 169uint32_t mdp5_get_formats(enum mdp5_pipe pipe, uint32_t *pixel_formats,
@@ -208,26 +177,18 @@ uint32_t mdp5_get_formats(enum mdp5_pipe pipe, uint32_t *pixel_formats,
208 177
209void mdp5_plane_install_properties(struct drm_plane *plane, 178void mdp5_plane_install_properties(struct drm_plane *plane,
210 struct drm_mode_object *obj); 179 struct drm_mode_object *obj);
211void mdp5_plane_set_scanout(struct drm_plane *plane, 180uint32_t mdp5_plane_get_flush(struct drm_plane *plane);
212 struct drm_framebuffer *fb);
213int mdp5_plane_mode_set(struct drm_plane *plane,
214 struct drm_crtc *crtc, struct drm_framebuffer *fb,
215 int crtc_x, int crtc_y,
216 unsigned int crtc_w, unsigned int crtc_h,
217 uint32_t src_x, uint32_t src_y,
218 uint32_t src_w, uint32_t src_h);
219void mdp5_plane_complete_flip(struct drm_plane *plane); 181void mdp5_plane_complete_flip(struct drm_plane *plane);
220enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane); 182enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane);
221struct drm_plane *mdp5_plane_init(struct drm_device *dev, 183struct drm_plane *mdp5_plane_init(struct drm_device *dev,
222 enum mdp5_pipe pipe, bool private_plane); 184 enum mdp5_pipe pipe, bool private_plane, uint32_t reg_offset);
223 185
224uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc); 186uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc);
225 187
188int mdp5_crtc_get_lm(struct drm_crtc *crtc);
226void mdp5_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file); 189void mdp5_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file);
227void mdp5_crtc_set_intf(struct drm_crtc *crtc, int intf, 190void mdp5_crtc_set_intf(struct drm_crtc *crtc, int intf,
228 enum mdp5_intf intf_id); 191 enum mdp5_intf intf_id);
229void mdp5_crtc_attach(struct drm_crtc *crtc, struct drm_plane *plane);
230void mdp5_crtc_detach(struct drm_crtc *crtc, struct drm_plane *plane);
231struct drm_crtc *mdp5_crtc_init(struct drm_device *dev, 192struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
232 struct drm_plane *plane, int id); 193 struct drm_plane *plane, int id);
233 194
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
index f3daec4412ad..533df7caa310 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
@@ -1,4 +1,5 @@
1/* 1/*
2 * Copyright (c) 2014 The Linux Foundation. All rights reserved.
2 * Copyright (C) 2013 Red Hat 3 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com> 4 * Author: Rob Clark <robdclark@gmail.com>
4 * 5 *
@@ -17,6 +18,7 @@
17 18
18#include "mdp5_kms.h" 19#include "mdp5_kms.h"
19 20
21#define MAX_PLANE 4
20 22
21struct mdp5_plane { 23struct mdp5_plane {
22 struct drm_plane base; 24 struct drm_plane base;
@@ -24,6 +26,11 @@ struct mdp5_plane {
24 26
25 enum mdp5_pipe pipe; 27 enum mdp5_pipe pipe;
26 28
29 spinlock_t pipe_lock; /* protect REG_MDP5_PIPE_* registers */
30 uint32_t reg_offset;
31
32 uint32_t flush_mask; /* used to commit pipe registers */
33
27 uint32_t nformats; 34 uint32_t nformats;
28 uint32_t formats[32]; 35 uint32_t formats[32];
29 36
@@ -31,31 +38,24 @@ struct mdp5_plane {
31}; 38};
32#define to_mdp5_plane(x) container_of(x, struct mdp5_plane, base) 39#define to_mdp5_plane(x) container_of(x, struct mdp5_plane, base)
33 40
41static int mdp5_plane_mode_set(struct drm_plane *plane,
42 struct drm_crtc *crtc, struct drm_framebuffer *fb,
43 int crtc_x, int crtc_y,
44 unsigned int crtc_w, unsigned int crtc_h,
45 uint32_t src_x, uint32_t src_y,
46 uint32_t src_w, uint32_t src_h);
47static void set_scanout_locked(struct drm_plane *plane,
48 struct drm_framebuffer *fb);
49
34static struct mdp5_kms *get_kms(struct drm_plane *plane) 50static struct mdp5_kms *get_kms(struct drm_plane *plane)
35{ 51{
36 struct msm_drm_private *priv = plane->dev->dev_private; 52 struct msm_drm_private *priv = plane->dev->dev_private;
37 return to_mdp5_kms(to_mdp_kms(priv->kms)); 53 return to_mdp5_kms(to_mdp_kms(priv->kms));
38} 54}
39 55
40static int mdp5_plane_update(struct drm_plane *plane, 56static bool plane_enabled(struct drm_plane_state *state)
41 struct drm_crtc *crtc, struct drm_framebuffer *fb,
42 int crtc_x, int crtc_y,
43 unsigned int crtc_w, unsigned int crtc_h,
44 uint32_t src_x, uint32_t src_y,
45 uint32_t src_w, uint32_t src_h)
46{ 57{
47 struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane); 58 return state->fb && state->crtc;
48
49 mdp5_plane->enabled = true;
50
51 if (plane->fb)
52 drm_framebuffer_unreference(plane->fb);
53
54 drm_framebuffer_reference(fb);
55
56 return mdp5_plane_mode_set(plane, crtc, fb,
57 crtc_x, crtc_y, crtc_w, crtc_h,
58 src_x, src_y, src_w, src_h);
59} 59}
60 60
61static int mdp5_plane_disable(struct drm_plane *plane) 61static int mdp5_plane_disable(struct drm_plane *plane)
@@ -63,21 +63,13 @@ static int mdp5_plane_disable(struct drm_plane *plane)
63 struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane); 63 struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
64 struct mdp5_kms *mdp5_kms = get_kms(plane); 64 struct mdp5_kms *mdp5_kms = get_kms(plane);
65 enum mdp5_pipe pipe = mdp5_plane->pipe; 65 enum mdp5_pipe pipe = mdp5_plane->pipe;
66 int i;
67 66
68 DBG("%s: disable", mdp5_plane->name); 67 DBG("%s: disable", mdp5_plane->name);
69 68
70 /* update our SMP request to zero (release all our blks): */ 69 if (mdp5_kms) {
71 for (i = 0; i < pipe2nclients(pipe); i++) 70 /* Release the memory we requested earlier from the SMP: */
72 mdp5_smp_request(mdp5_kms, pipe2client(pipe, i), 0); 71 mdp5_smp_release(mdp5_kms->smp, pipe);
73 72 }
74 /* TODO detaching now will cause us not to get the last
75 * vblank and mdp5_smp_commit().. so other planes will
76 * still see smp blocks previously allocated to us as
77 * in-use..
78 */
79 if (plane->crtc)
80 mdp5_crtc_detach(plane->crtc, plane);
81 73
82 return 0; 74 return 0;
83} 75}
@@ -85,11 +77,8 @@ static int mdp5_plane_disable(struct drm_plane *plane)
85static void mdp5_plane_destroy(struct drm_plane *plane) 77static void mdp5_plane_destroy(struct drm_plane *plane)
86{ 78{
87 struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane); 79 struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
88 struct msm_drm_private *priv = plane->dev->dev_private;
89
90 if (priv->kms)
91 mdp5_plane_disable(plane);
92 80
81 drm_plane_helper_disable(plane);
93 drm_plane_cleanup(plane); 82 drm_plane_cleanup(plane);
94 83
95 kfree(mdp5_plane); 84 kfree(mdp5_plane);
@@ -109,109 +98,185 @@ int mdp5_plane_set_property(struct drm_plane *plane,
109 return -EINVAL; 98 return -EINVAL;
110} 99}
111 100
101static void mdp5_plane_reset(struct drm_plane *plane)
102{
103 struct mdp5_plane_state *mdp5_state;
104
105 if (plane->state && plane->state->fb)
106 drm_framebuffer_unreference(plane->state->fb);
107
108 kfree(to_mdp5_plane_state(plane->state));
109 mdp5_state = kzalloc(sizeof(*mdp5_state), GFP_KERNEL);
110
111 if (plane->type == DRM_PLANE_TYPE_PRIMARY) {
112 mdp5_state->zpos = 0;
113 } else {
114 mdp5_state->zpos = 1 + drm_plane_index(plane);
115 }
116
117 plane->state = &mdp5_state->base;
118}
119
120static struct drm_plane_state *
121mdp5_plane_duplicate_state(struct drm_plane *plane)
122{
123 struct mdp5_plane_state *mdp5_state;
124
125 if (WARN_ON(!plane->state))
126 return NULL;
127
128 mdp5_state = kmemdup(to_mdp5_plane_state(plane->state),
129 sizeof(*mdp5_state), GFP_KERNEL);
130
131 if (mdp5_state && mdp5_state->base.fb)
132 drm_framebuffer_reference(mdp5_state->base.fb);
133
134 mdp5_state->mode_changed = false;
135 mdp5_state->pending = false;
136
137 return &mdp5_state->base;
138}
139
140static void mdp5_plane_destroy_state(struct drm_plane *plane,
141 struct drm_plane_state *state)
142{
143 if (state->fb)
144 drm_framebuffer_unreference(state->fb);
145
146 kfree(to_mdp5_plane_state(state));
147}
148
112static const struct drm_plane_funcs mdp5_plane_funcs = { 149static const struct drm_plane_funcs mdp5_plane_funcs = {
113 .update_plane = mdp5_plane_update, 150 .update_plane = drm_atomic_helper_update_plane,
114 .disable_plane = mdp5_plane_disable, 151 .disable_plane = drm_atomic_helper_disable_plane,
115 .destroy = mdp5_plane_destroy, 152 .destroy = mdp5_plane_destroy,
116 .set_property = mdp5_plane_set_property, 153 .set_property = mdp5_plane_set_property,
154 .reset = mdp5_plane_reset,
155 .atomic_duplicate_state = mdp5_plane_duplicate_state,
156 .atomic_destroy_state = mdp5_plane_destroy_state,
117}; 157};
118 158
119void mdp5_plane_set_scanout(struct drm_plane *plane, 159static int mdp5_plane_prepare_fb(struct drm_plane *plane,
120 struct drm_framebuffer *fb) 160 struct drm_framebuffer *fb)
121{ 161{
122 struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane); 162 struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
123 struct mdp5_kms *mdp5_kms = get_kms(plane); 163 struct mdp5_kms *mdp5_kms = get_kms(plane);
124 enum mdp5_pipe pipe = mdp5_plane->pipe;
125 uint32_t nplanes = drm_format_num_planes(fb->pixel_format);
126 uint32_t iova[4];
127 int i;
128
129 for (i = 0; i < nplanes; i++) {
130 struct drm_gem_object *bo = msm_framebuffer_bo(fb, i);
131 msm_gem_get_iova(bo, mdp5_kms->id, &iova[i]);
132 }
133 for (; i < 4; i++)
134 iova[i] = 0;
135 164
136 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_STRIDE_A(pipe), 165 DBG("%s: prepare: FB[%u]", mdp5_plane->name, fb->base.id);
137 MDP5_PIPE_SRC_STRIDE_A_P0(fb->pitches[0]) | 166 return msm_framebuffer_prepare(fb, mdp5_kms->id);
138 MDP5_PIPE_SRC_STRIDE_A_P1(fb->pitches[1]));
139
140 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_STRIDE_B(pipe),
141 MDP5_PIPE_SRC_STRIDE_B_P2(fb->pitches[2]) |
142 MDP5_PIPE_SRC_STRIDE_B_P3(fb->pitches[3]));
143
144 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC0_ADDR(pipe), iova[0]);
145 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC1_ADDR(pipe), iova[1]);
146 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC2_ADDR(pipe), iova[2]);
147 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC3_ADDR(pipe), iova[3]);
148
149 plane->fb = fb;
150} 167}
151 168
152/* NOTE: looks like if horizontal decimation is used (if we supported that) 169static void mdp5_plane_cleanup_fb(struct drm_plane *plane,
153 * then the width used to calculate SMP block requirements is the post- 170 struct drm_framebuffer *fb)
154 * decimated width. Ie. SMP buffering sits downstream of decimation (which
155 * presumably happens during the dma from scanout buffer).
156 */
157static int request_smp_blocks(struct drm_plane *plane, uint32_t format,
158 uint32_t nplanes, uint32_t width)
159{ 171{
160 struct drm_device *dev = plane->dev;
161 struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane); 172 struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
162 struct mdp5_kms *mdp5_kms = get_kms(plane); 173 struct mdp5_kms *mdp5_kms = get_kms(plane);
163 enum mdp5_pipe pipe = mdp5_plane->pipe;
164 int i, hsub, nlines, nblks, ret;
165 174
166 hsub = drm_format_horz_chroma_subsampling(format); 175 DBG("%s: cleanup: FB[%u]", mdp5_plane->name, fb->base.id);
176 msm_framebuffer_cleanup(fb, mdp5_kms->id);
177}
167 178
168 /* different if BWC (compressed framebuffer?) enabled: */ 179static int mdp5_plane_atomic_check(struct drm_plane *plane,
169 nlines = 2; 180 struct drm_plane_state *state)
181{
182 struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
183 struct drm_plane_state *old_state = plane->state;
170 184
171 for (i = 0, nblks = 0; i < nplanes; i++) { 185 DBG("%s: check (%d -> %d)", mdp5_plane->name,
172 int n, fetch_stride, cpp; 186 plane_enabled(old_state), plane_enabled(state));
173 187
174 cpp = drm_format_plane_cpp(format, i); 188 if (plane_enabled(state) && plane_enabled(old_state)) {
175 fetch_stride = width * cpp / (i ? hsub : 1); 189 /* we cannot change SMP block configuration during scanout: */
190 bool full_modeset = false;
191 if (state->fb->pixel_format != old_state->fb->pixel_format) {
192 DBG("%s: pixel_format change!", mdp5_plane->name);
193 full_modeset = true;
194 }
195 if (state->src_w != old_state->src_w) {
196 DBG("%s: src_w change!", mdp5_plane->name);
197 full_modeset = true;
198 }
199 if (to_mdp5_plane_state(old_state)->pending) {
200 DBG("%s: still pending!", mdp5_plane->name);
201 full_modeset = true;
202 }
203 if (full_modeset) {
204 struct drm_crtc_state *crtc_state =
205 drm_atomic_get_crtc_state(state->state, state->crtc);
206 crtc_state->mode_changed = true;
207 to_mdp5_plane_state(state)->mode_changed = true;
208 }
209 } else {
210 to_mdp5_plane_state(state)->mode_changed = true;
211 }
176 212
177 n = DIV_ROUND_UP(fetch_stride * nlines, SMP_BLK_SIZE); 213 return 0;
214}
178 215
179 /* for hw rev v1.00 */ 216static void mdp5_plane_atomic_update(struct drm_plane *plane)
180 if (mdp5_kms->rev == 0) 217{
181 n = roundup_pow_of_two(n); 218 struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
219 struct drm_plane_state *state = plane->state;
182 220
183 DBG("%s[%d]: request %d SMP blocks", mdp5_plane->name, i, n); 221 DBG("%s: update", mdp5_plane->name);
184 ret = mdp5_smp_request(mdp5_kms, pipe2client(pipe, i), n);
185 if (ret) {
186 dev_err(dev->dev, "Could not allocate %d SMP blocks: %d\n",
187 n, ret);
188 return ret;
189 }
190 222
191 nblks += n; 223 if (!plane_enabled(state)) {
224 to_mdp5_plane_state(state)->pending = true;
225 mdp5_plane_disable(plane);
226 } else if (to_mdp5_plane_state(state)->mode_changed) {
227 int ret;
228 to_mdp5_plane_state(state)->pending = true;
229 ret = mdp5_plane_mode_set(plane,
230 state->crtc, state->fb,
231 state->crtc_x, state->crtc_y,
232 state->crtc_w, state->crtc_h,
233 state->src_x, state->src_y,
234 state->src_w, state->src_h);
235 /* atomic_check should have ensured that this doesn't fail */
236 WARN_ON(ret < 0);
237 } else {
238 unsigned long flags;
239 spin_lock_irqsave(&mdp5_plane->pipe_lock, flags);
240 set_scanout_locked(plane, state->fb);
241 spin_unlock_irqrestore(&mdp5_plane->pipe_lock, flags);
192 } 242 }
193
194 /* in success case, return total # of blocks allocated: */
195 return nblks;
196} 243}
197 244
198static void set_fifo_thresholds(struct drm_plane *plane, int nblks) 245static const struct drm_plane_helper_funcs mdp5_plane_helper_funcs = {
246 .prepare_fb = mdp5_plane_prepare_fb,
247 .cleanup_fb = mdp5_plane_cleanup_fb,
248 .atomic_check = mdp5_plane_atomic_check,
249 .atomic_update = mdp5_plane_atomic_update,
250};
251
252static void set_scanout_locked(struct drm_plane *plane,
253 struct drm_framebuffer *fb)
199{ 254{
200 struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane); 255 struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
201 struct mdp5_kms *mdp5_kms = get_kms(plane); 256 struct mdp5_kms *mdp5_kms = get_kms(plane);
202 enum mdp5_pipe pipe = mdp5_plane->pipe; 257 enum mdp5_pipe pipe = mdp5_plane->pipe;
203 uint32_t val;
204 258
205 /* 1/4 of SMP pool that is being fetched */ 259 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_STRIDE_A(pipe),
206 val = (nblks * SMP_ENTRIES_PER_BLK) / 4; 260 MDP5_PIPE_SRC_STRIDE_A_P0(fb->pitches[0]) |
261 MDP5_PIPE_SRC_STRIDE_A_P1(fb->pitches[1]));
207 262
208 mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_0(pipe), val * 1); 263 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_STRIDE_B(pipe),
209 mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_1(pipe), val * 2); 264 MDP5_PIPE_SRC_STRIDE_B_P2(fb->pitches[2]) |
210 mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_2(pipe), val * 3); 265 MDP5_PIPE_SRC_STRIDE_B_P3(fb->pitches[3]));
266
267 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC0_ADDR(pipe),
268 msm_framebuffer_iova(fb, mdp5_kms->id, 0));
269 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC1_ADDR(pipe),
270 msm_framebuffer_iova(fb, mdp5_kms->id, 1));
271 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC2_ADDR(pipe),
272 msm_framebuffer_iova(fb, mdp5_kms->id, 2));
273 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC3_ADDR(pipe),
274 msm_framebuffer_iova(fb, mdp5_kms->id, 4));
211 275
276 plane->fb = fb;
212} 277}
213 278
214int mdp5_plane_mode_set(struct drm_plane *plane, 279static int mdp5_plane_mode_set(struct drm_plane *plane,
215 struct drm_crtc *crtc, struct drm_framebuffer *fb, 280 struct drm_crtc *crtc, struct drm_framebuffer *fb,
216 int crtc_x, int crtc_y, 281 int crtc_x, int crtc_y,
217 unsigned int crtc_w, unsigned int crtc_h, 282 unsigned int crtc_w, unsigned int crtc_h,
@@ -225,7 +290,8 @@ int mdp5_plane_mode_set(struct drm_plane *plane,
225 uint32_t nplanes, config = 0; 290 uint32_t nplanes, config = 0;
226 uint32_t phasex_step = 0, phasey_step = 0; 291 uint32_t phasex_step = 0, phasey_step = 0;
227 uint32_t hdecm = 0, vdecm = 0; 292 uint32_t hdecm = 0, vdecm = 0;
228 int i, nblks; 293 unsigned long flags;
294 int ret;
229 295
230 nplanes = drm_format_num_planes(fb->pixel_format); 296 nplanes = drm_format_num_planes(fb->pixel_format);
231 297
@@ -243,12 +309,11 @@ int mdp5_plane_mode_set(struct drm_plane *plane,
243 fb->base.id, src_x, src_y, src_w, src_h, 309 fb->base.id, src_x, src_y, src_w, src_h,
244 crtc->base.id, crtc_x, crtc_y, crtc_w, crtc_h); 310 crtc->base.id, crtc_x, crtc_y, crtc_w, crtc_h);
245 311
246 /* 312 /* Request some memory from the SMP: */
247 * Calculate and request required # of smp blocks: 313 ret = mdp5_smp_request(mdp5_kms->smp,
248 */ 314 mdp5_plane->pipe, fb->pixel_format, src_w);
249 nblks = request_smp_blocks(plane, fb->pixel_format, nplanes, src_w); 315 if (ret)
250 if (nblks < 0) 316 return ret;
251 return nblks;
252 317
253 /* 318 /*
254 * Currently we update the hw for allocations/requests immediately, 319 * Currently we update the hw for allocations/requests immediately,
@@ -256,8 +321,7 @@ int mdp5_plane_mode_set(struct drm_plane *plane,
256 * would move into atomic->check_plane_state(), while updating the 321 * would move into atomic->check_plane_state(), while updating the
257 * hw would remain here: 322 * hw would remain here:
258 */ 323 */
259 for (i = 0; i < pipe2nclients(pipe); i++) 324 mdp5_smp_configure(mdp5_kms->smp, pipe);
260 mdp5_smp_configure(mdp5_kms, pipe2client(pipe, i));
261 325
262 if (src_w != crtc_w) { 326 if (src_w != crtc_w) {
263 config |= MDP5_PIPE_SCALE_CONFIG_SCALEX_EN; 327 config |= MDP5_PIPE_SCALE_CONFIG_SCALEX_EN;
@@ -269,6 +333,8 @@ int mdp5_plane_mode_set(struct drm_plane *plane,
269 /* TODO calc phasey_step, vdecm */ 333 /* TODO calc phasey_step, vdecm */
270 } 334 }
271 335
336 spin_lock_irqsave(&mdp5_plane->pipe_lock, flags);
337
272 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_IMG_SIZE(pipe), 338 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_IMG_SIZE(pipe),
273 MDP5_PIPE_SRC_IMG_SIZE_WIDTH(src_w) | 339 MDP5_PIPE_SRC_IMG_SIZE_WIDTH(src_w) |
274 MDP5_PIPE_SRC_IMG_SIZE_HEIGHT(src_h)); 340 MDP5_PIPE_SRC_IMG_SIZE_HEIGHT(src_h));
@@ -289,8 +355,6 @@ int mdp5_plane_mode_set(struct drm_plane *plane,
289 MDP5_PIPE_OUT_XY_X(crtc_x) | 355 MDP5_PIPE_OUT_XY_X(crtc_x) |
290 MDP5_PIPE_OUT_XY_Y(crtc_y)); 356 MDP5_PIPE_OUT_XY_Y(crtc_y));
291 357
292 mdp5_plane_set_scanout(plane, fb);
293
294 format = to_mdp_format(msm_framebuffer_format(fb)); 358 format = to_mdp_format(msm_framebuffer_format(fb));
295 359
296 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_FORMAT(pipe), 360 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_FORMAT(pipe),
@@ -330,22 +394,24 @@ int mdp5_plane_mode_set(struct drm_plane *plane,
330 MDP5_PIPE_SCALE_CONFIG_SCALEX_MAX_FILTER(SCALE_FILTER_NEAREST) | 394 MDP5_PIPE_SCALE_CONFIG_SCALEX_MAX_FILTER(SCALE_FILTER_NEAREST) |
331 MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER(SCALE_FILTER_NEAREST)); 395 MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER(SCALE_FILTER_NEAREST));
332 396
333 set_fifo_thresholds(plane, nblks); 397 set_scanout_locked(plane, fb);
334 398
335 /* TODO detach from old crtc (if we had more than one) */ 399 spin_unlock_irqrestore(&mdp5_plane->pipe_lock, flags);
336 mdp5_crtc_attach(crtc, plane);
337 400
338 return 0; 401 return ret;
339} 402}
340 403
341void mdp5_plane_complete_flip(struct drm_plane *plane) 404void mdp5_plane_complete_flip(struct drm_plane *plane)
342{ 405{
343 struct mdp5_kms *mdp5_kms = get_kms(plane); 406 struct mdp5_kms *mdp5_kms = get_kms(plane);
344 enum mdp5_pipe pipe = to_mdp5_plane(plane)->pipe; 407 struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
345 int i; 408 enum mdp5_pipe pipe = mdp5_plane->pipe;
409
410 DBG("%s: complete flip", mdp5_plane->name);
346 411
347 for (i = 0; i < pipe2nclients(pipe); i++) 412 mdp5_smp_commit(mdp5_kms->smp, pipe);
348 mdp5_smp_commit(mdp5_kms, pipe2client(pipe, i)); 413
414 to_mdp5_plane_state(plane->state)->pending = false;
349} 415}
350 416
351enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane) 417enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane)
@@ -354,9 +420,16 @@ enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane)
354 return mdp5_plane->pipe; 420 return mdp5_plane->pipe;
355} 421}
356 422
423uint32_t mdp5_plane_get_flush(struct drm_plane *plane)
424{
425 struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
426
427 return mdp5_plane->flush_mask;
428}
429
357/* initialize plane */ 430/* initialize plane */
358struct drm_plane *mdp5_plane_init(struct drm_device *dev, 431struct drm_plane *mdp5_plane_init(struct drm_device *dev,
359 enum mdp5_pipe pipe, bool private_plane) 432 enum mdp5_pipe pipe, bool private_plane, uint32_t reg_offset)
360{ 433{
361 struct drm_plane *plane = NULL; 434 struct drm_plane *plane = NULL;
362 struct mdp5_plane *mdp5_plane; 435 struct mdp5_plane *mdp5_plane;
@@ -377,10 +450,18 @@ struct drm_plane *mdp5_plane_init(struct drm_device *dev,
377 mdp5_plane->nformats = mdp5_get_formats(pipe, mdp5_plane->formats, 450 mdp5_plane->nformats = mdp5_get_formats(pipe, mdp5_plane->formats,
378 ARRAY_SIZE(mdp5_plane->formats)); 451 ARRAY_SIZE(mdp5_plane->formats));
379 452
453 mdp5_plane->flush_mask = mdp_ctl_flush_mask_pipe(pipe);
454 mdp5_plane->reg_offset = reg_offset;
455 spin_lock_init(&mdp5_plane->pipe_lock);
456
380 type = private_plane ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY; 457 type = private_plane ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY;
381 drm_universal_plane_init(dev, plane, 0xff, &mdp5_plane_funcs, 458 ret = drm_universal_plane_init(dev, plane, 0xff, &mdp5_plane_funcs,
382 mdp5_plane->formats, mdp5_plane->nformats, 459 mdp5_plane->formats, mdp5_plane->nformats,
383 type); 460 type);
461 if (ret)
462 goto fail;
463
464 drm_plane_helper_add(plane, &mdp5_plane_helper_funcs);
384 465
385 mdp5_plane_install_properties(plane, &plane->base); 466 mdp5_plane_install_properties(plane, &plane->base);
386 467
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
index 2d0236b963a6..bf551885e019 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
@@ -1,4 +1,5 @@
1/* 1/*
2 * Copyright (c) 2014, The Linux Foundation. All rights reserved.
2 * Copyright (C) 2013 Red Hat 3 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com> 4 * Author: Rob Clark <robdclark@gmail.com>
4 * 5 *
@@ -29,8 +30,11 @@
29 * Based on the size of the attached scanout buffer, a certain # of 30 * Based on the size of the attached scanout buffer, a certain # of
30 * blocks must be allocated to that client out of the shared pool. 31 * blocks must be allocated to that client out of the shared pool.
31 * 32 *
32 * For each block, it can be either free, or pending/in-use by a 33 * In some hw, some blocks are statically allocated for certain pipes
33 * client. The updates happen in three steps: 34 * and CANNOT be re-allocated (eg: MMB0 and MMB1 both tied to RGB0).
35 *
36 * For each block that can be dynamically allocated, it can be either
37 * free, or pending/in-use by a client. The updates happen in three steps:
34 * 38 *
35 * 1) mdp5_smp_request(): 39 * 1) mdp5_smp_request():
36 * When plane scanout is setup, calculate required number of 40 * When plane scanout is setup, calculate required number of
@@ -61,21 +65,68 @@
61 * inuse and pending state of all clients.. 65 * inuse and pending state of all clients..
62 */ 66 */
63 67
64static DEFINE_SPINLOCK(smp_lock); 68struct mdp5_smp {
69 struct drm_device *dev;
70
71 int blk_cnt;
72 int blk_size;
73
74 spinlock_t state_lock;
75 mdp5_smp_state_t state; /* to track smp allocation amongst pipes: */
76
77 struct mdp5_client_smp_state client_state[CID_MAX];
78};
65 79
80static inline
81struct mdp5_kms *get_kms(struct mdp5_smp *smp)
82{
83 struct msm_drm_private *priv = smp->dev->dev_private;
84
85 return to_mdp5_kms(to_mdp_kms(priv->kms));
86}
87
88static inline enum mdp5_client_id pipe2client(enum mdp5_pipe pipe, int plane)
89{
90 WARN_ON(plane >= pipe2nclients(pipe));
91 switch (pipe) {
92 case SSPP_VIG0: return CID_VIG0_Y + plane;
93 case SSPP_VIG1: return CID_VIG1_Y + plane;
94 case SSPP_VIG2: return CID_VIG2_Y + plane;
95 case SSPP_RGB0: return CID_RGB0;
96 case SSPP_RGB1: return CID_RGB1;
97 case SSPP_RGB2: return CID_RGB2;
98 case SSPP_DMA0: return CID_DMA0_Y + plane;
99 case SSPP_DMA1: return CID_DMA1_Y + plane;
100 case SSPP_VIG3: return CID_VIG3_Y + plane;
101 case SSPP_RGB3: return CID_RGB3;
102 default: return CID_UNUSED;
103 }
104}
66 105
67/* step #1: update # of blocks pending for the client: */ 106/* step #1: update # of blocks pending for the client: */
68int mdp5_smp_request(struct mdp5_kms *mdp5_kms, 107static int smp_request_block(struct mdp5_smp *smp,
69 enum mdp5_client_id cid, int nblks) 108 enum mdp5_client_id cid, int nblks)
70{ 109{
71 struct mdp5_client_smp_state *ps = &mdp5_kms->smp_client_state[cid]; 110 struct mdp5_kms *mdp5_kms = get_kms(smp);
72 int i, ret, avail, cur_nblks, cnt = mdp5_kms->smp_blk_cnt; 111 const struct mdp5_cfg_hw *hw_cfg;
112 struct mdp5_client_smp_state *ps = &smp->client_state[cid];
113 int i, ret, avail, cur_nblks, cnt = smp->blk_cnt;
114 int reserved;
73 unsigned long flags; 115 unsigned long flags;
74 116
75 spin_lock_irqsave(&smp_lock, flags); 117 hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
118 reserved = hw_cfg->smp.reserved[cid];
119
120 spin_lock_irqsave(&smp->state_lock, flags);
76 121
77 avail = cnt - bitmap_weight(mdp5_kms->smp_state, cnt); 122 nblks -= reserved;
123 if (reserved)
124 DBG("%d MMBs allocated (%d reserved)", nblks, reserved);
125
126 avail = cnt - bitmap_weight(smp->state, cnt);
78 if (nblks > avail) { 127 if (nblks > avail) {
128 dev_err(mdp5_kms->dev->dev, "out of blks (req=%d > avail=%d)\n",
129 nblks, avail);
79 ret = -ENOSPC; 130 ret = -ENOSPC;
80 goto fail; 131 goto fail;
81 } 132 }
@@ -84,9 +135,9 @@ int mdp5_smp_request(struct mdp5_kms *mdp5_kms,
84 if (nblks > cur_nblks) { 135 if (nblks > cur_nblks) {
85 /* grow the existing pending reservation: */ 136 /* grow the existing pending reservation: */
86 for (i = cur_nblks; i < nblks; i++) { 137 for (i = cur_nblks; i < nblks; i++) {
87 int blk = find_first_zero_bit(mdp5_kms->smp_state, cnt); 138 int blk = find_first_zero_bit(smp->state, cnt);
88 set_bit(blk, ps->pending); 139 set_bit(blk, ps->pending);
89 set_bit(blk, mdp5_kms->smp_state); 140 set_bit(blk, smp->state);
90 } 141 }
91 } else { 142 } else {
92 /* shrink the existing pending reservation: */ 143 /* shrink the existing pending reservation: */
@@ -98,15 +149,88 @@ int mdp5_smp_request(struct mdp5_kms *mdp5_kms,
98 } 149 }
99 150
100fail: 151fail:
101 spin_unlock_irqrestore(&smp_lock, flags); 152 spin_unlock_irqrestore(&smp->state_lock, flags);
153 return 0;
154}
155
156static void set_fifo_thresholds(struct mdp5_smp *smp,
157 enum mdp5_pipe pipe, int nblks)
158{
159 struct mdp5_kms *mdp5_kms = get_kms(smp);
160 u32 smp_entries_per_blk = smp->blk_size / (128 / BITS_PER_BYTE);
161 u32 val;
162
163 /* 1/4 of SMP pool that is being fetched */
164 val = (nblks * smp_entries_per_blk) / 4;
165
166 mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_0(pipe), val * 1);
167 mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_1(pipe), val * 2);
168 mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_2(pipe), val * 3);
169}
170
171/*
172 * NOTE: looks like if horizontal decimation is used (if we supported that)
173 * then the width used to calculate SMP block requirements is the post-
174 * decimated width. Ie. SMP buffering sits downstream of decimation (which
175 * presumably happens during the dma from scanout buffer).
176 */
177int mdp5_smp_request(struct mdp5_smp *smp, enum mdp5_pipe pipe, u32 fmt, u32 width)
178{
179 struct mdp5_kms *mdp5_kms = get_kms(smp);
180 struct drm_device *dev = mdp5_kms->dev;
181 int rev = mdp5_cfg_get_hw_rev(mdp5_kms->cfg);
182 int i, hsub, nplanes, nlines, nblks, ret;
183
184 nplanes = drm_format_num_planes(fmt);
185 hsub = drm_format_horz_chroma_subsampling(fmt);
186
187 /* different if BWC (compressed framebuffer?) enabled: */
188 nlines = 2;
189
190 for (i = 0, nblks = 0; i < nplanes; i++) {
191 int n, fetch_stride, cpp;
192
193 cpp = drm_format_plane_cpp(fmt, i);
194 fetch_stride = width * cpp / (i ? hsub : 1);
195
196 n = DIV_ROUND_UP(fetch_stride * nlines, smp->blk_size);
197
198 /* for hw rev v1.00 */
199 if (rev == 0)
200 n = roundup_pow_of_two(n);
201
202 DBG("%s[%d]: request %d SMP blocks", pipe2name(pipe), i, n);
203 ret = smp_request_block(smp, pipe2client(pipe, i), n);
204 if (ret) {
205 dev_err(dev->dev, "Cannot allocate %d SMP blocks: %d\n",
206 n, ret);
207 return ret;
208 }
209
210 nblks += n;
211 }
212
213 set_fifo_thresholds(smp, pipe, nblks);
214
102 return 0; 215 return 0;
103} 216}
104 217
105static void update_smp_state(struct mdp5_kms *mdp5_kms, 218/* Release SMP blocks for all clients of the pipe */
219void mdp5_smp_release(struct mdp5_smp *smp, enum mdp5_pipe pipe)
220{
221 int i, nblks;
222
223 for (i = 0, nblks = 0; i < pipe2nclients(pipe); i++)
224 smp_request_block(smp, pipe2client(pipe, i), 0);
225 set_fifo_thresholds(smp, pipe, 0);
226}
227
228static void update_smp_state(struct mdp5_smp *smp,
106 enum mdp5_client_id cid, mdp5_smp_state_t *assigned) 229 enum mdp5_client_id cid, mdp5_smp_state_t *assigned)
107{ 230{
108 int cnt = mdp5_kms->smp_blk_cnt; 231 struct mdp5_kms *mdp5_kms = get_kms(smp);
109 uint32_t blk, val; 232 int cnt = smp->blk_cnt;
233 u32 blk, val;
110 234
111 for_each_set_bit(blk, *assigned, cnt) { 235 for_each_set_bit(blk, *assigned, cnt) {
112 int idx = blk / 3; 236 int idx = blk / 3;
@@ -135,39 +259,80 @@ static void update_smp_state(struct mdp5_kms *mdp5_kms,
135} 259}
136 260
137/* step #2: configure hw for union(pending, inuse): */ 261/* step #2: configure hw for union(pending, inuse): */
138void mdp5_smp_configure(struct mdp5_kms *mdp5_kms, enum mdp5_client_id cid) 262void mdp5_smp_configure(struct mdp5_smp *smp, enum mdp5_pipe pipe)
139{ 263{
140 struct mdp5_client_smp_state *ps = &mdp5_kms->smp_client_state[cid]; 264 int cnt = smp->blk_cnt;
141 int cnt = mdp5_kms->smp_blk_cnt;
142 mdp5_smp_state_t assigned; 265 mdp5_smp_state_t assigned;
266 int i;
143 267
144 bitmap_or(assigned, ps->inuse, ps->pending, cnt); 268 for (i = 0; i < pipe2nclients(pipe); i++) {
145 update_smp_state(mdp5_kms, cid, &assigned); 269 enum mdp5_client_id cid = pipe2client(pipe, i);
270 struct mdp5_client_smp_state *ps = &smp->client_state[cid];
271
272 bitmap_or(assigned, ps->inuse, ps->pending, cnt);
273 update_smp_state(smp, cid, &assigned);
274 }
146} 275}
147 276
148/* step #3: after vblank, copy pending -> inuse: */ 277/* step #3: after vblank, copy pending -> inuse: */
149void mdp5_smp_commit(struct mdp5_kms *mdp5_kms, enum mdp5_client_id cid) 278void mdp5_smp_commit(struct mdp5_smp *smp, enum mdp5_pipe pipe)
150{ 279{
151 struct mdp5_client_smp_state *ps = &mdp5_kms->smp_client_state[cid]; 280 int cnt = smp->blk_cnt;
152 int cnt = mdp5_kms->smp_blk_cnt;
153 mdp5_smp_state_t released; 281 mdp5_smp_state_t released;
282 int i;
283
284 for (i = 0; i < pipe2nclients(pipe); i++) {
285 enum mdp5_client_id cid = pipe2client(pipe, i);
286 struct mdp5_client_smp_state *ps = &smp->client_state[cid];
287
288 /*
289 * Figure out if there are any blocks we where previously
290 * using, which can be released and made available to other
291 * clients:
292 */
293 if (bitmap_andnot(released, ps->inuse, ps->pending, cnt)) {
294 unsigned long flags;
295
296 spin_lock_irqsave(&smp->state_lock, flags);
297 /* clear released blocks: */
298 bitmap_andnot(smp->state, smp->state, released, cnt);
299 spin_unlock_irqrestore(&smp->state_lock, flags);
154 300
155 /* 301 update_smp_state(smp, CID_UNUSED, &released);
156 * Figure out if there are any blocks we where previously 302 }
157 * using, which can be released and made available to other 303
158 * clients: 304 bitmap_copy(ps->inuse, ps->pending, cnt);
159 */
160 if (bitmap_andnot(released, ps->inuse, ps->pending, cnt)) {
161 unsigned long flags;
162
163 spin_lock_irqsave(&smp_lock, flags);
164 /* clear released blocks: */
165 bitmap_andnot(mdp5_kms->smp_state, mdp5_kms->smp_state,
166 released, cnt);
167 spin_unlock_irqrestore(&smp_lock, flags);
168
169 update_smp_state(mdp5_kms, CID_UNUSED, &released);
170 } 305 }
306}
307
308void mdp5_smp_destroy(struct mdp5_smp *smp)
309{
310 kfree(smp);
311}
312
313struct mdp5_smp *mdp5_smp_init(struct drm_device *dev, const struct mdp5_smp_block *cfg)
314{
315 struct mdp5_smp *smp = NULL;
316 int ret;
317
318 smp = kzalloc(sizeof(*smp), GFP_KERNEL);
319 if (unlikely(!smp)) {
320 ret = -ENOMEM;
321 goto fail;
322 }
323
324 smp->dev = dev;
325 smp->blk_cnt = cfg->mmb_count;
326 smp->blk_size = cfg->mmb_size;
327
328 /* statically tied MMBs cannot be re-allocated: */
329 bitmap_copy(smp->state, cfg->reserved_state, smp->blk_cnt);
330 spin_lock_init(&smp->state_lock);
331
332 return smp;
333fail:
334 if (smp)
335 mdp5_smp_destroy(smp);
171 336
172 bitmap_copy(ps->inuse, ps->pending, cnt); 337 return ERR_PTR(ret);
173} 338}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h
index 0ab739e1a1dd..e47179f63585 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h
@@ -1,4 +1,5 @@
1/* 1/*
2 * Copyright (c) 2014, The Linux Foundation. All rights reserved.
2 * Copyright (C) 2013 Red Hat 3 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com> 4 * Author: Rob Clark <robdclark@gmail.com>
4 * 5 *
@@ -20,22 +21,26 @@
20 21
21#include "msm_drv.h" 22#include "msm_drv.h"
22 23
23#define MAX_SMP_BLOCKS 22
24#define SMP_BLK_SIZE 4096
25#define SMP_ENTRIES_PER_BLK (SMP_BLK_SIZE / 16)
26
27typedef DECLARE_BITMAP(mdp5_smp_state_t, MAX_SMP_BLOCKS);
28
29struct mdp5_client_smp_state { 24struct mdp5_client_smp_state {
30 mdp5_smp_state_t inuse; 25 mdp5_smp_state_t inuse;
31 mdp5_smp_state_t pending; 26 mdp5_smp_state_t pending;
32}; 27};
33 28
34struct mdp5_kms; 29struct mdp5_kms;
30struct mdp5_smp;
31
32/*
33 * SMP module prototypes:
34 * mdp5_smp_init() returns a SMP @handler,
35 * which is then used to call the other mdp5_smp_*(handler, ...) functions.
36 */
35 37
36int mdp5_smp_request(struct mdp5_kms *mdp5_kms, enum mdp5_client_id cid, int nblks); 38struct mdp5_smp *mdp5_smp_init(struct drm_device *dev, const struct mdp5_smp_block *cfg);
37void mdp5_smp_configure(struct mdp5_kms *mdp5_kms, enum mdp5_client_id cid); 39void mdp5_smp_destroy(struct mdp5_smp *smp);
38void mdp5_smp_commit(struct mdp5_kms *mdp5_kms, enum mdp5_client_id cid);
39 40
41int mdp5_smp_request(struct mdp5_smp *smp, enum mdp5_pipe pipe, u32 fmt, u32 width);
42void mdp5_smp_configure(struct mdp5_smp *smp, enum mdp5_pipe pipe);
43void mdp5_smp_commit(struct mdp5_smp *smp, enum mdp5_pipe pipe);
44void mdp5_smp_release(struct mdp5_smp *smp, enum mdp5_pipe pipe);
40 45
41#endif /* __MDP5_SMP_H__ */ 46#endif /* __MDP5_SMP_H__ */
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
index 8cf3361daba3..f0de412e13dc 100644
--- a/drivers/gpu/drm/msm/msm_atomic.c
+++ b/drivers/gpu/drm/msm/msm_atomic.c
@@ -117,7 +117,7 @@ int msm_atomic_commit(struct drm_device *dev,
117 if (!plane) 117 if (!plane)
118 continue; 118 continue;
119 119
120 if (plane->state->fb != new_state->fb) 120 if ((plane->state->fb != new_state->fb) && new_state->fb)
121 add_fb(c, new_state->fb); 121 add_fb(c, new_state->fb);
122 } 122 }
123 123
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 4b52d752bb6e..136303818436 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -215,7 +215,6 @@ struct drm_fb_helper *msm_fbdev_init(struct drm_device *dev);
215struct hdmi; 215struct hdmi;
216int hdmi_modeset_init(struct hdmi *hdmi, struct drm_device *dev, 216int hdmi_modeset_init(struct hdmi *hdmi, struct drm_device *dev,
217 struct drm_encoder *encoder); 217 struct drm_encoder *encoder);
218irqreturn_t hdmi_irq(int irq, void *dev_id);
219void __init hdmi_register(void); 218void __init hdmi_register(void);
220void __exit hdmi_unregister(void); 219void __exit hdmi_unregister(void);
221 220
diff --git a/drivers/gpu/drm/msm/msm_fb.c b/drivers/gpu/drm/msm/msm_fb.c
index f4e42d506ff7..84dec161d836 100644
--- a/drivers/gpu/drm/msm/msm_fb.c
+++ b/drivers/gpu/drm/msm/msm_fb.c
@@ -120,6 +120,8 @@ void msm_framebuffer_cleanup(struct drm_framebuffer *fb, int id)
120uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb, int id, int plane) 120uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb, int id, int plane)
121{ 121{
122 struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb); 122 struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
123 if (!msm_fb->planes[plane])
124 return 0;
123 return msm_gem_iova(msm_fb->planes[plane], id); 125 return msm_gem_iova(msm_fb->planes[plane], id);
124} 126}
125 127
diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h
index 15a0fec99c70..7fb4876388e7 100644
--- a/drivers/gpu/drm/msm/msm_kms.h
+++ b/drivers/gpu/drm/msm/msm_kms.h
@@ -68,6 +68,24 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev);
68/* TODO move these helper iterator macro somewhere common: */ 68/* TODO move these helper iterator macro somewhere common: */
69#define for_each_plane_on_crtc(_crtc, _plane) \ 69#define for_each_plane_on_crtc(_crtc, _plane) \
70 list_for_each_entry((_plane), &(_crtc)->dev->mode_config.plane_list, head) \ 70 list_for_each_entry((_plane), &(_crtc)->dev->mode_config.plane_list, head) \
71 if ((_plane)->crtc == (_crtc)) 71 if ((_plane)->state->crtc == (_crtc))
72
73static inline bool
74__plane_will_be_attached_to_crtc(struct drm_atomic_state *state,
75 struct drm_plane *plane, struct drm_crtc *crtc)
76{
77 int idx = drm_plane_index(plane);
78
79 /* if plane is modified in incoming state, use the new state: */
80 if (state->plane_states[idx])
81 return state->plane_states[idx]->crtc == crtc;
82
83 /* otherwise, current state: */
84 return plane->state->crtc == crtc;
85}
86
87#define for_each_pending_plane_on_crtc(_state, _crtc, _plane) \
88 list_for_each_entry((_plane), &(_crtc)->dev->mode_config.plane_list, head) \
89 if (__plane_will_be_attached_to_crtc((_state), (_plane), (_crtc)))
72 90
73#endif /* __MSM_KMS_H__ */ 91#endif /* __MSM_KMS_H__ */