aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2015-04-07 21:14:40 -0400
committerDave Airlie <airlied@redhat.com>2015-04-07 21:14:40 -0400
commitfa37a8c8237613d525437c4f9ec8add41749b314 (patch)
treeb8c9b9e8eace912b0f3e2fd28969865ce94b2e57 /drivers/gpu
parenta08aad54be7fca595af2cc7f482961e1af99c4a8 (diff)
parentd5af49c92a8aff8236e7b0bb35e9af364000c017 (diff)
Merge branch 'msm-next' of git://people.freedesktop.org/~robclark/linux into drm-next
1) support for "stolen mem" for splash-screen take-over 2) additional hdmi pixel clks 3) various pipe flush related fixes 4) support for snapdragon 410 (8x16) 5) support for DSI and dual-DSI It includes one small patch to export tile-group functions (which was ack'd by you), as these are used to explain to userspace dual-dsi configurations (with left and right tile). * 'msm-next' of git://people.freedesktop.org/~robclark/linux: (24 commits) drm/msm/mdp5: Enable DSI connector in msm drm driver drm/msm: Initial add DSI connector support drm/msm: Add split display interface drm/msm/mdp5: Move *_modeset_init out of construct_encoder function drm: export tile-group functions drm/msm/mdp5: Remove CTL flush dummy bits drm/msm/mdp5: Update headers (add CTL flush bits) drm/msm/mdp5: Add hardware configuration for msm8x16 drm/msm/mdp5: Get SMP client list from mdp5_cfg drm/msm/mdp5: Update headers (remove enum mdp5_client_id) drm/msm/mdp5: Separate MDP5 domain from MDSS domain drm/msm/mdp5: Update headers (introduce MDP5 domain) drm/msm/dsi: Update generated DSI header file drm/msm/mdp5: Fix PIPE source image size settings drm/msm/mdp5: Update generated mdp5 header file with DSI support drm/msm/mdp5: Add pingpong entry to mdp5 config table drm/msm/mdp5: Make the intf connection in config module drm/msm/mdp5: Add START signal to kick off certain pipelines drm/msm/mdp5: Enhance operation mode for pipeline configuration drm/msm/mdp5: Update generated header files ...
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/drm_crtc.c2
-rw-r--r--drivers/gpu/drm/msm/Kconfig11
-rw-r--r--drivers/gpu/drm/msm/Makefile5
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.c212
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.h117
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.xml.h418
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c1993
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_manager.c705
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_phy.c352
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c34
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h399
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c102
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h18
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c343
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c86
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c315
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h75
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c83
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c26
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c200
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h75
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c4
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c64
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c100
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h29
-rw-r--r--drivers/gpu/drm/msm/msm_fbdev.c3
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c25
-rw-r--r--drivers/gpu/drm/msm/msm_gem.h5
-rw-r--r--drivers/gpu/drm/msm/msm_kms.h4
29 files changed, 5327 insertions, 478 deletions
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index d576a4dea64f..b3989e23195e 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -5599,6 +5599,7 @@ struct drm_tile_group *drm_mode_get_tile_group(struct drm_device *dev,
5599 mutex_unlock(&dev->mode_config.idr_mutex); 5599 mutex_unlock(&dev->mode_config.idr_mutex);
5600 return NULL; 5600 return NULL;
5601} 5601}
5602EXPORT_SYMBOL(drm_mode_get_tile_group);
5602 5603
5603/** 5604/**
5604 * drm_mode_create_tile_group - create a tile group from a displayid description 5605 * drm_mode_create_tile_group - create a tile group from a displayid description
@@ -5637,3 +5638,4 @@ struct drm_tile_group *drm_mode_create_tile_group(struct drm_device *dev,
5637 mutex_unlock(&dev->mode_config.idr_mutex); 5638 mutex_unlock(&dev->mode_config.idr_mutex);
5638 return tg; 5639 return tg;
5639} 5640}
5641EXPORT_SYMBOL(drm_mode_create_tile_group);
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index bacbbb70f679..0a6f6764a37c 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -35,3 +35,14 @@ config DRM_MSM_REGISTER_LOGGING
35 Compile in support for logging register reads/writes in a format 35 Compile in support for logging register reads/writes in a format
36 that can be parsed by envytools demsm tool. If enabled, register 36 that can be parsed by envytools demsm tool. If enabled, register
37 logging can be switched on via msm.reglog=y module param. 37 logging can be switched on via msm.reglog=y module param.
38
39config DRM_MSM_DSI
40 bool "Enable DSI support in MSM DRM driver"
41 depends on DRM_MSM
42 select DRM_PANEL
43 select DRM_MIPI_DSI
44 default y
45 help
46 Choose this option if you have a need for MIPI DSI connector
47 support.
48
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index 674a132fd76e..ab2086783fee 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -50,5 +50,10 @@ msm-y := \
50 50
51msm-$(CONFIG_DRM_MSM_FBDEV) += msm_fbdev.o 51msm-$(CONFIG_DRM_MSM_FBDEV) += msm_fbdev.o
52msm-$(CONFIG_COMMON_CLK) += mdp/mdp4/mdp4_lvds_pll.o 52msm-$(CONFIG_COMMON_CLK) += mdp/mdp4/mdp4_lvds_pll.o
53msm-$(CONFIG_DRM_MSM_DSI) += dsi/dsi.o \
54 dsi/dsi_host.o \
55 dsi/dsi_manager.o \
56 dsi/dsi_phy.o \
57 mdp/mdp5/mdp5_cmd_encoder.o
53 58
54obj-$(CONFIG_DRM_MSM) += msm.o 59obj-$(CONFIG_DRM_MSM) += msm.o
diff --git a/drivers/gpu/drm/msm/dsi/dsi.c b/drivers/gpu/drm/msm/dsi/dsi.c
new file mode 100644
index 000000000000..28d1f95a90cc
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/dsi.c
@@ -0,0 +1,212 @@
1/*
2 * Copyright (c) 2015, The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include "dsi.h"
15
16struct drm_encoder *msm_dsi_get_encoder(struct msm_dsi *msm_dsi)
17{
18 if (!msm_dsi || !msm_dsi->panel)
19 return NULL;
20
21 return (msm_dsi->panel_flags & MIPI_DSI_MODE_VIDEO) ?
22 msm_dsi->encoders[MSM_DSI_VIDEO_ENCODER_ID] :
23 msm_dsi->encoders[MSM_DSI_CMD_ENCODER_ID];
24}
25
26static void dsi_destroy(struct msm_dsi *msm_dsi)
27{
28 if (!msm_dsi)
29 return;
30
31 msm_dsi_manager_unregister(msm_dsi);
32 if (msm_dsi->host) {
33 msm_dsi_host_destroy(msm_dsi->host);
34 msm_dsi->host = NULL;
35 }
36
37 platform_set_drvdata(msm_dsi->pdev, NULL);
38}
39
40static struct msm_dsi *dsi_init(struct platform_device *pdev)
41{
42 struct msm_dsi *msm_dsi = NULL;
43 int ret;
44
45 if (!pdev) {
46 dev_err(&pdev->dev, "no dsi device\n");
47 ret = -ENXIO;
48 goto fail;
49 }
50
51 msm_dsi = devm_kzalloc(&pdev->dev, sizeof(*msm_dsi), GFP_KERNEL);
52 if (!msm_dsi) {
53 ret = -ENOMEM;
54 goto fail;
55 }
56 DBG("dsi probed=%p", msm_dsi);
57
58 msm_dsi->pdev = pdev;
59 platform_set_drvdata(pdev, msm_dsi);
60
61 /* Init dsi host */
62 ret = msm_dsi_host_init(msm_dsi);
63 if (ret)
64 goto fail;
65
66 /* Register to dsi manager */
67 ret = msm_dsi_manager_register(msm_dsi);
68 if (ret)
69 goto fail;
70
71 return msm_dsi;
72
73fail:
74 if (msm_dsi)
75 dsi_destroy(msm_dsi);
76
77 return ERR_PTR(ret);
78}
79
80static int dsi_bind(struct device *dev, struct device *master, void *data)
81{
82 struct drm_device *drm = dev_get_drvdata(master);
83 struct msm_drm_private *priv = drm->dev_private;
84 struct platform_device *pdev = to_platform_device(dev);
85 struct msm_dsi *msm_dsi;
86
87 DBG("");
88 msm_dsi = dsi_init(pdev);
89 if (IS_ERR(msm_dsi))
90 return PTR_ERR(msm_dsi);
91
92 priv->dsi[msm_dsi->id] = msm_dsi;
93
94 return 0;
95}
96
97static void dsi_unbind(struct device *dev, struct device *master,
98 void *data)
99{
100 struct drm_device *drm = dev_get_drvdata(master);
101 struct msm_drm_private *priv = drm->dev_private;
102 struct msm_dsi *msm_dsi = dev_get_drvdata(dev);
103 int id = msm_dsi->id;
104
105 if (priv->dsi[id]) {
106 dsi_destroy(msm_dsi);
107 priv->dsi[id] = NULL;
108 }
109}
110
111static const struct component_ops dsi_ops = {
112 .bind = dsi_bind,
113 .unbind = dsi_unbind,
114};
115
116static int dsi_dev_probe(struct platform_device *pdev)
117{
118 return component_add(&pdev->dev, &dsi_ops);
119}
120
121static int dsi_dev_remove(struct platform_device *pdev)
122{
123 DBG("");
124 component_del(&pdev->dev, &dsi_ops);
125 return 0;
126}
127
128static const struct of_device_id dt_match[] = {
129 { .compatible = "qcom,mdss-dsi-ctrl" },
130 {}
131};
132
133static struct platform_driver dsi_driver = {
134 .probe = dsi_dev_probe,
135 .remove = dsi_dev_remove,
136 .driver = {
137 .name = "msm_dsi",
138 .of_match_table = dt_match,
139 },
140};
141
142void __init msm_dsi_register(void)
143{
144 DBG("");
145 platform_driver_register(&dsi_driver);
146}
147
148void __exit msm_dsi_unregister(void)
149{
150 DBG("");
151 platform_driver_unregister(&dsi_driver);
152}
153
154int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
155 struct drm_encoder *encoders[MSM_DSI_ENCODER_NUM])
156{
157 struct msm_drm_private *priv = dev->dev_private;
158 int ret, i;
159
160 if (WARN_ON(!encoders[MSM_DSI_VIDEO_ENCODER_ID] ||
161 !encoders[MSM_DSI_CMD_ENCODER_ID]))
162 return -EINVAL;
163
164 msm_dsi->dev = dev;
165
166 ret = msm_dsi_host_modeset_init(msm_dsi->host, dev);
167 if (ret) {
168 dev_err(dev->dev, "failed to modeset init host: %d\n", ret);
169 goto fail;
170 }
171
172 msm_dsi->bridge = msm_dsi_manager_bridge_init(msm_dsi->id);
173 if (IS_ERR(msm_dsi->bridge)) {
174 ret = PTR_ERR(msm_dsi->bridge);
175 dev_err(dev->dev, "failed to create dsi bridge: %d\n", ret);
176 msm_dsi->bridge = NULL;
177 goto fail;
178 }
179
180 msm_dsi->connector = msm_dsi_manager_connector_init(msm_dsi->id);
181 if (IS_ERR(msm_dsi->connector)) {
182 ret = PTR_ERR(msm_dsi->connector);
183 dev_err(dev->dev, "failed to create dsi connector: %d\n", ret);
184 msm_dsi->connector = NULL;
185 goto fail;
186 }
187
188 for (i = 0; i < MSM_DSI_ENCODER_NUM; i++) {
189 encoders[i]->bridge = msm_dsi->bridge;
190 msm_dsi->encoders[i] = encoders[i];
191 }
192
193 priv->bridges[priv->num_bridges++] = msm_dsi->bridge;
194 priv->connectors[priv->num_connectors++] = msm_dsi->connector;
195
196 return 0;
197fail:
198 if (msm_dsi) {
199 /* bridge/connector are normally destroyed by drm: */
200 if (msm_dsi->bridge) {
201 msm_dsi_manager_bridge_destroy(msm_dsi->bridge);
202 msm_dsi->bridge = NULL;
203 }
204 if (msm_dsi->connector) {
205 msm_dsi->connector->funcs->destroy(msm_dsi->connector);
206 msm_dsi->connector = NULL;
207 }
208 }
209
210 return ret;
211}
212
diff --git a/drivers/gpu/drm/msm/dsi/dsi.h b/drivers/gpu/drm/msm/dsi/dsi.h
new file mode 100644
index 000000000000..10f54d4e379a
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/dsi.h
@@ -0,0 +1,117 @@
1/*
2 * Copyright (c) 2015, The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#ifndef __DSI_CONNECTOR_H__
15#define __DSI_CONNECTOR_H__
16
17#include <linux/platform_device.h>
18
19#include "drm_crtc.h"
20#include "drm_mipi_dsi.h"
21#include "drm_panel.h"
22
23#include "msm_drv.h"
24
25#define DSI_0 0
26#define DSI_1 1
27#define DSI_MAX 2
28
29#define DSI_CLOCK_MASTER DSI_0
30#define DSI_CLOCK_SLAVE DSI_1
31
32#define DSI_LEFT DSI_0
33#define DSI_RIGHT DSI_1
34
35/* According to the current drm framework sequence, take the encoder of
36 * DSI_1 as master encoder
37 */
38#define DSI_ENCODER_MASTER DSI_1
39#define DSI_ENCODER_SLAVE DSI_0
40
41struct msm_dsi {
42 struct drm_device *dev;
43 struct platform_device *pdev;
44
45 struct drm_connector *connector;
46 struct drm_bridge *bridge;
47
48 struct mipi_dsi_host *host;
49 struct msm_dsi_phy *phy;
50 struct drm_panel *panel;
51 unsigned long panel_flags;
52 bool phy_enabled;
53
54 /* the encoders we are hooked to (outside of dsi block) */
55 struct drm_encoder *encoders[MSM_DSI_ENCODER_NUM];
56
57 int id;
58};
59
60/* dsi manager */
61struct drm_bridge *msm_dsi_manager_bridge_init(u8 id);
62void msm_dsi_manager_bridge_destroy(struct drm_bridge *bridge);
63struct drm_connector *msm_dsi_manager_connector_init(u8 id);
64int msm_dsi_manager_phy_enable(int id,
65 const unsigned long bit_rate, const unsigned long esc_rate,
66 u32 *clk_pre, u32 *clk_post);
67void msm_dsi_manager_phy_disable(int id);
68int msm_dsi_manager_cmd_xfer(int id, const struct mipi_dsi_msg *msg);
69bool msm_dsi_manager_cmd_xfer_trigger(int id, u32 iova, u32 len);
70int msm_dsi_manager_register(struct msm_dsi *msm_dsi);
71void msm_dsi_manager_unregister(struct msm_dsi *msm_dsi);
72
73/* msm dsi */
74struct drm_encoder *msm_dsi_get_encoder(struct msm_dsi *msm_dsi);
75
76/* dsi host */
77int msm_dsi_host_xfer_prepare(struct mipi_dsi_host *host,
78 const struct mipi_dsi_msg *msg);
79void msm_dsi_host_xfer_restore(struct mipi_dsi_host *host,
80 const struct mipi_dsi_msg *msg);
81int msm_dsi_host_cmd_tx(struct mipi_dsi_host *host,
82 const struct mipi_dsi_msg *msg);
83int msm_dsi_host_cmd_rx(struct mipi_dsi_host *host,
84 const struct mipi_dsi_msg *msg);
85void msm_dsi_host_cmd_xfer_commit(struct mipi_dsi_host *host,
86 u32 iova, u32 len);
87int msm_dsi_host_enable(struct mipi_dsi_host *host);
88int msm_dsi_host_disable(struct mipi_dsi_host *host);
89int msm_dsi_host_power_on(struct mipi_dsi_host *host);
90int msm_dsi_host_power_off(struct mipi_dsi_host *host);
91int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host,
92 struct drm_display_mode *mode);
93struct drm_panel *msm_dsi_host_get_panel(struct mipi_dsi_host *host,
94 unsigned long *panel_flags);
95int msm_dsi_host_register(struct mipi_dsi_host *host, bool check_defer);
96void msm_dsi_host_unregister(struct mipi_dsi_host *host);
97void msm_dsi_host_destroy(struct mipi_dsi_host *host);
98int msm_dsi_host_modeset_init(struct mipi_dsi_host *host,
99 struct drm_device *dev);
100int msm_dsi_host_init(struct msm_dsi *msm_dsi);
101
102/* dsi phy */
103struct msm_dsi_phy;
104enum msm_dsi_phy_type {
105 MSM_DSI_PHY_UNKNOWN,
106 MSM_DSI_PHY_28NM,
107 MSM_DSI_PHY_MAX
108};
109struct msm_dsi_phy *msm_dsi_phy_init(struct platform_device *pdev,
110 enum msm_dsi_phy_type type, int id);
111int msm_dsi_phy_enable(struct msm_dsi_phy *phy, bool is_dual_panel,
112 const unsigned long bit_rate, const unsigned long esc_rate);
113int msm_dsi_phy_disable(struct msm_dsi_phy *phy);
114void msm_dsi_phy_get_clk_pre_post(struct msm_dsi_phy *phy,
115 u32 *clk_pre, u32 *clk_post);
116#endif /* __DSI_CONNECTOR_H__ */
117
diff --git a/drivers/gpu/drm/msm/dsi/dsi.xml.h b/drivers/gpu/drm/msm/dsi/dsi.xml.h
index abf1bba520bf..1dcfae265e98 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.xml.h
+++ b/drivers/gpu/drm/msm/dsi/dsi.xml.h
@@ -8,19 +8,10 @@ http://github.com/freedreno/envytools/
8git clone https://github.com/freedreno/envytools.git 8git clone https://github.com/freedreno/envytools.git
9 9
10The rules-ng-ng source files this header was generated from are: 10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2014-12-05 15:34:49) 11- /usr2/hali/local/envytools/envytools/rnndb/dsi/dsi.xml ( 18681 bytes, from 2015-03-04 23:08:31)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) 12- /usr2/hali/local/envytools/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-01-28 21:43:22)
13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20908 bytes, from 2014-12-08 16:13:00) 13
14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2357 bytes, from 2014-12-08 16:13:00) 14Copyright (C) 2013-2015 by the following authors:
15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 27208 bytes, from 2015-01-13 23:56:11)
16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57)
19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 26848 bytes, from 2015-01-13 23:55:57)
21- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 8253 bytes, from 2014-12-08 16:13:00)
22
23Copyright (C) 2013 by the following authors:
24- Rob Clark <robdclark@gmail.com> (robclark) 15- Rob Clark <robdclark@gmail.com> (robclark)
25 16
26Permission is hereby granted, free of charge, to any person obtaining 17Permission is hereby granted, free of charge, to any person obtaining
@@ -51,11 +42,11 @@ enum dsi_traffic_mode {
51 BURST_MODE = 2, 42 BURST_MODE = 2,
52}; 43};
53 44
54enum dsi_dst_format { 45enum dsi_vid_dst_format {
55 DST_FORMAT_RGB565 = 0, 46 VID_DST_FORMAT_RGB565 = 0,
56 DST_FORMAT_RGB666 = 1, 47 VID_DST_FORMAT_RGB666 = 1,
57 DST_FORMAT_RGB666_LOOSE = 2, 48 VID_DST_FORMAT_RGB666_LOOSE = 2,
58 DST_FORMAT_RGB888 = 3, 49 VID_DST_FORMAT_RGB888 = 3,
59}; 50};
60 51
61enum dsi_rgb_swap { 52enum dsi_rgb_swap {
@@ -69,20 +60,63 @@ enum dsi_rgb_swap {
69 60
70enum dsi_cmd_trigger { 61enum dsi_cmd_trigger {
71 TRIGGER_NONE = 0, 62 TRIGGER_NONE = 0,
63 TRIGGER_SEOF = 1,
72 TRIGGER_TE = 2, 64 TRIGGER_TE = 2,
73 TRIGGER_SW = 4, 65 TRIGGER_SW = 4,
74 TRIGGER_SW_SEOF = 5, 66 TRIGGER_SW_SEOF = 5,
75 TRIGGER_SW_TE = 6, 67 TRIGGER_SW_TE = 6,
76}; 68};
77 69
70enum dsi_cmd_dst_format {
71 CMD_DST_FORMAT_RGB111 = 0,
72 CMD_DST_FORMAT_RGB332 = 3,
73 CMD_DST_FORMAT_RGB444 = 4,
74 CMD_DST_FORMAT_RGB565 = 6,
75 CMD_DST_FORMAT_RGB666 = 7,
76 CMD_DST_FORMAT_RGB888 = 8,
77};
78
79enum dsi_lane_swap {
80 LANE_SWAP_0123 = 0,
81 LANE_SWAP_3012 = 1,
82 LANE_SWAP_2301 = 2,
83 LANE_SWAP_1230 = 3,
84 LANE_SWAP_0321 = 4,
85 LANE_SWAP_1032 = 5,
86 LANE_SWAP_2103 = 6,
87 LANE_SWAP_3210 = 7,
88};
89
78#define DSI_IRQ_CMD_DMA_DONE 0x00000001 90#define DSI_IRQ_CMD_DMA_DONE 0x00000001
79#define DSI_IRQ_MASK_CMD_DMA_DONE 0x00000002 91#define DSI_IRQ_MASK_CMD_DMA_DONE 0x00000002
80#define DSI_IRQ_CMD_MDP_DONE 0x00000100 92#define DSI_IRQ_CMD_MDP_DONE 0x00000100
81#define DSI_IRQ_MASK_CMD_MDP_DONE 0x00000200 93#define DSI_IRQ_MASK_CMD_MDP_DONE 0x00000200
82#define DSI_IRQ_VIDEO_DONE 0x00010000 94#define DSI_IRQ_VIDEO_DONE 0x00010000
83#define DSI_IRQ_MASK_VIDEO_DONE 0x00020000 95#define DSI_IRQ_MASK_VIDEO_DONE 0x00020000
96#define DSI_IRQ_BTA_DONE 0x00100000
97#define DSI_IRQ_MASK_BTA_DONE 0x00200000
84#define DSI_IRQ_ERROR 0x01000000 98#define DSI_IRQ_ERROR 0x01000000
85#define DSI_IRQ_MASK_ERROR 0x02000000 99#define DSI_IRQ_MASK_ERROR 0x02000000
100#define REG_DSI_6G_HW_VERSION 0x00000000
101#define DSI_6G_HW_VERSION_MAJOR__MASK 0xf0000000
102#define DSI_6G_HW_VERSION_MAJOR__SHIFT 28
103static inline uint32_t DSI_6G_HW_VERSION_MAJOR(uint32_t val)
104{
105 return ((val) << DSI_6G_HW_VERSION_MAJOR__SHIFT) & DSI_6G_HW_VERSION_MAJOR__MASK;
106}
107#define DSI_6G_HW_VERSION_MINOR__MASK 0x0fff0000
108#define DSI_6G_HW_VERSION_MINOR__SHIFT 16
109static inline uint32_t DSI_6G_HW_VERSION_MINOR(uint32_t val)
110{
111 return ((val) << DSI_6G_HW_VERSION_MINOR__SHIFT) & DSI_6G_HW_VERSION_MINOR__MASK;
112}
113#define DSI_6G_HW_VERSION_STEP__MASK 0x0000ffff
114#define DSI_6G_HW_VERSION_STEP__SHIFT 0
115static inline uint32_t DSI_6G_HW_VERSION_STEP(uint32_t val)
116{
117 return ((val) << DSI_6G_HW_VERSION_STEP__SHIFT) & DSI_6G_HW_VERSION_STEP__MASK;
118}
119
86#define REG_DSI_CTRL 0x00000000 120#define REG_DSI_CTRL 0x00000000
87#define DSI_CTRL_ENABLE 0x00000001 121#define DSI_CTRL_ENABLE 0x00000001
88#define DSI_CTRL_VID_MODE_EN 0x00000002 122#define DSI_CTRL_VID_MODE_EN 0x00000002
@@ -96,11 +130,15 @@ enum dsi_cmd_trigger {
96#define DSI_CTRL_CRC_CHECK 0x01000000 130#define DSI_CTRL_CRC_CHECK 0x01000000
97 131
98#define REG_DSI_STATUS0 0x00000004 132#define REG_DSI_STATUS0 0x00000004
133#define DSI_STATUS0_CMD_MODE_ENGINE_BUSY 0x00000001
99#define DSI_STATUS0_CMD_MODE_DMA_BUSY 0x00000002 134#define DSI_STATUS0_CMD_MODE_DMA_BUSY 0x00000002
135#define DSI_STATUS0_CMD_MODE_MDP_BUSY 0x00000004
100#define DSI_STATUS0_VIDEO_MODE_ENGINE_BUSY 0x00000008 136#define DSI_STATUS0_VIDEO_MODE_ENGINE_BUSY 0x00000008
101#define DSI_STATUS0_DSI_BUSY 0x00000010 137#define DSI_STATUS0_DSI_BUSY 0x00000010
138#define DSI_STATUS0_INTERLEAVE_OP_CONTENTION 0x80000000
102 139
103#define REG_DSI_FIFO_STATUS 0x00000008 140#define REG_DSI_FIFO_STATUS 0x00000008
141#define DSI_FIFO_STATUS_CMD_MDP_FIFO_UNDERFLOW 0x00000080
104 142
105#define REG_DSI_VID_CFG0 0x0000000c 143#define REG_DSI_VID_CFG0 0x0000000c
106#define DSI_VID_CFG0_VIRT_CHANNEL__MASK 0x00000003 144#define DSI_VID_CFG0_VIRT_CHANNEL__MASK 0x00000003
@@ -111,7 +149,7 @@ static inline uint32_t DSI_VID_CFG0_VIRT_CHANNEL(uint32_t val)
111} 149}
112#define DSI_VID_CFG0_DST_FORMAT__MASK 0x00000030 150#define DSI_VID_CFG0_DST_FORMAT__MASK 0x00000030
113#define DSI_VID_CFG0_DST_FORMAT__SHIFT 4 151#define DSI_VID_CFG0_DST_FORMAT__SHIFT 4
114static inline uint32_t DSI_VID_CFG0_DST_FORMAT(enum dsi_dst_format val) 152static inline uint32_t DSI_VID_CFG0_DST_FORMAT(enum dsi_vid_dst_format val)
115{ 153{
116 return ((val) << DSI_VID_CFG0_DST_FORMAT__SHIFT) & DSI_VID_CFG0_DST_FORMAT__MASK; 154 return ((val) << DSI_VID_CFG0_DST_FORMAT__SHIFT) & DSI_VID_CFG0_DST_FORMAT__MASK;
117} 155}
@@ -129,21 +167,15 @@ static inline uint32_t DSI_VID_CFG0_TRAFFIC_MODE(enum dsi_traffic_mode val)
129#define DSI_VID_CFG0_PULSE_MODE_HSA_HE 0x10000000 167#define DSI_VID_CFG0_PULSE_MODE_HSA_HE 0x10000000
130 168
131#define REG_DSI_VID_CFG1 0x0000001c 169#define REG_DSI_VID_CFG1 0x0000001c
132#define DSI_VID_CFG1_R_SEL 0x00000010 170#define DSI_VID_CFG1_R_SEL 0x00000001
133#define DSI_VID_CFG1_G_SEL 0x00000100 171#define DSI_VID_CFG1_G_SEL 0x00000010
134#define DSI_VID_CFG1_B_SEL 0x00001000 172#define DSI_VID_CFG1_B_SEL 0x00000100
135#define DSI_VID_CFG1_RGB_SWAP__MASK 0x00070000 173#define DSI_VID_CFG1_RGB_SWAP__MASK 0x00007000
136#define DSI_VID_CFG1_RGB_SWAP__SHIFT 16 174#define DSI_VID_CFG1_RGB_SWAP__SHIFT 12
137static inline uint32_t DSI_VID_CFG1_RGB_SWAP(enum dsi_rgb_swap val) 175static inline uint32_t DSI_VID_CFG1_RGB_SWAP(enum dsi_rgb_swap val)
138{ 176{
139 return ((val) << DSI_VID_CFG1_RGB_SWAP__SHIFT) & DSI_VID_CFG1_RGB_SWAP__MASK; 177 return ((val) << DSI_VID_CFG1_RGB_SWAP__SHIFT) & DSI_VID_CFG1_RGB_SWAP__MASK;
140} 178}
141#define DSI_VID_CFG1_INTERLEAVE_MAX__MASK 0x00f00000
142#define DSI_VID_CFG1_INTERLEAVE_MAX__SHIFT 20
143static inline uint32_t DSI_VID_CFG1_INTERLEAVE_MAX(uint32_t val)
144{
145 return ((val) << DSI_VID_CFG1_INTERLEAVE_MAX__SHIFT) & DSI_VID_CFG1_INTERLEAVE_MAX__MASK;
146}
147 179
148#define REG_DSI_ACTIVE_H 0x00000020 180#define REG_DSI_ACTIVE_H 0x00000020
149#define DSI_ACTIVE_H_START__MASK 0x00000fff 181#define DSI_ACTIVE_H_START__MASK 0x00000fff
@@ -201,32 +233,115 @@ static inline uint32_t DSI_ACTIVE_HSYNC_END(uint32_t val)
201 return ((val) << DSI_ACTIVE_HSYNC_END__SHIFT) & DSI_ACTIVE_HSYNC_END__MASK; 233 return ((val) << DSI_ACTIVE_HSYNC_END__SHIFT) & DSI_ACTIVE_HSYNC_END__MASK;
202} 234}
203 235
204#define REG_DSI_ACTIVE_VSYNC 0x00000034 236#define REG_DSI_ACTIVE_VSYNC_HPOS 0x00000030
205#define DSI_ACTIVE_VSYNC_START__MASK 0x00000fff 237#define DSI_ACTIVE_VSYNC_HPOS_START__MASK 0x00000fff
206#define DSI_ACTIVE_VSYNC_START__SHIFT 0 238#define DSI_ACTIVE_VSYNC_HPOS_START__SHIFT 0
207static inline uint32_t DSI_ACTIVE_VSYNC_START(uint32_t val) 239static inline uint32_t DSI_ACTIVE_VSYNC_HPOS_START(uint32_t val)
208{ 240{
209 return ((val) << DSI_ACTIVE_VSYNC_START__SHIFT) & DSI_ACTIVE_VSYNC_START__MASK; 241 return ((val) << DSI_ACTIVE_VSYNC_HPOS_START__SHIFT) & DSI_ACTIVE_VSYNC_HPOS_START__MASK;
210} 242}
211#define DSI_ACTIVE_VSYNC_END__MASK 0x0fff0000 243#define DSI_ACTIVE_VSYNC_HPOS_END__MASK 0x0fff0000
212#define DSI_ACTIVE_VSYNC_END__SHIFT 16 244#define DSI_ACTIVE_VSYNC_HPOS_END__SHIFT 16
213static inline uint32_t DSI_ACTIVE_VSYNC_END(uint32_t val) 245static inline uint32_t DSI_ACTIVE_VSYNC_HPOS_END(uint32_t val)
214{ 246{
215 return ((val) << DSI_ACTIVE_VSYNC_END__SHIFT) & DSI_ACTIVE_VSYNC_END__MASK; 247 return ((val) << DSI_ACTIVE_VSYNC_HPOS_END__SHIFT) & DSI_ACTIVE_VSYNC_HPOS_END__MASK;
248}
249
250#define REG_DSI_ACTIVE_VSYNC_VPOS 0x00000034
251#define DSI_ACTIVE_VSYNC_VPOS_START__MASK 0x00000fff
252#define DSI_ACTIVE_VSYNC_VPOS_START__SHIFT 0
253static inline uint32_t DSI_ACTIVE_VSYNC_VPOS_START(uint32_t val)
254{
255 return ((val) << DSI_ACTIVE_VSYNC_VPOS_START__SHIFT) & DSI_ACTIVE_VSYNC_VPOS_START__MASK;
256}
257#define DSI_ACTIVE_VSYNC_VPOS_END__MASK 0x0fff0000
258#define DSI_ACTIVE_VSYNC_VPOS_END__SHIFT 16
259static inline uint32_t DSI_ACTIVE_VSYNC_VPOS_END(uint32_t val)
260{
261 return ((val) << DSI_ACTIVE_VSYNC_VPOS_END__SHIFT) & DSI_ACTIVE_VSYNC_VPOS_END__MASK;
216} 262}
217 263
218#define REG_DSI_CMD_DMA_CTRL 0x00000038 264#define REG_DSI_CMD_DMA_CTRL 0x00000038
265#define DSI_CMD_DMA_CTRL_BROADCAST_EN 0x80000000
219#define DSI_CMD_DMA_CTRL_FROM_FRAME_BUFFER 0x10000000 266#define DSI_CMD_DMA_CTRL_FROM_FRAME_BUFFER 0x10000000
220#define DSI_CMD_DMA_CTRL_LOW_POWER 0x04000000 267#define DSI_CMD_DMA_CTRL_LOW_POWER 0x04000000
221 268
222#define REG_DSI_CMD_CFG0 0x0000003c 269#define REG_DSI_CMD_CFG0 0x0000003c
270#define DSI_CMD_CFG0_DST_FORMAT__MASK 0x0000000f
271#define DSI_CMD_CFG0_DST_FORMAT__SHIFT 0
272static inline uint32_t DSI_CMD_CFG0_DST_FORMAT(enum dsi_cmd_dst_format val)
273{
274 return ((val) << DSI_CMD_CFG0_DST_FORMAT__SHIFT) & DSI_CMD_CFG0_DST_FORMAT__MASK;
275}
276#define DSI_CMD_CFG0_R_SEL 0x00000010
277#define DSI_CMD_CFG0_G_SEL 0x00000100
278#define DSI_CMD_CFG0_B_SEL 0x00001000
279#define DSI_CMD_CFG0_INTERLEAVE_MAX__MASK 0x00f00000
280#define DSI_CMD_CFG0_INTERLEAVE_MAX__SHIFT 20
281static inline uint32_t DSI_CMD_CFG0_INTERLEAVE_MAX(uint32_t val)
282{
283 return ((val) << DSI_CMD_CFG0_INTERLEAVE_MAX__SHIFT) & DSI_CMD_CFG0_INTERLEAVE_MAX__MASK;
284}
285#define DSI_CMD_CFG0_RGB_SWAP__MASK 0x00070000
286#define DSI_CMD_CFG0_RGB_SWAP__SHIFT 16
287static inline uint32_t DSI_CMD_CFG0_RGB_SWAP(enum dsi_rgb_swap val)
288{
289 return ((val) << DSI_CMD_CFG0_RGB_SWAP__SHIFT) & DSI_CMD_CFG0_RGB_SWAP__MASK;
290}
223 291
224#define REG_DSI_CMD_CFG1 0x00000040 292#define REG_DSI_CMD_CFG1 0x00000040
293#define DSI_CMD_CFG1_WR_MEM_START__MASK 0x000000ff
294#define DSI_CMD_CFG1_WR_MEM_START__SHIFT 0
295static inline uint32_t DSI_CMD_CFG1_WR_MEM_START(uint32_t val)
296{
297 return ((val) << DSI_CMD_CFG1_WR_MEM_START__SHIFT) & DSI_CMD_CFG1_WR_MEM_START__MASK;
298}
299#define DSI_CMD_CFG1_WR_MEM_CONTINUE__MASK 0x0000ff00
300#define DSI_CMD_CFG1_WR_MEM_CONTINUE__SHIFT 8
301static inline uint32_t DSI_CMD_CFG1_WR_MEM_CONTINUE(uint32_t val)
302{
303 return ((val) << DSI_CMD_CFG1_WR_MEM_CONTINUE__SHIFT) & DSI_CMD_CFG1_WR_MEM_CONTINUE__MASK;
304}
305#define DSI_CMD_CFG1_INSERT_DCS_COMMAND 0x00010000
225 306
226#define REG_DSI_DMA_BASE 0x00000044 307#define REG_DSI_DMA_BASE 0x00000044
227 308
228#define REG_DSI_DMA_LEN 0x00000048 309#define REG_DSI_DMA_LEN 0x00000048
229 310
311#define REG_DSI_CMD_MDP_STREAM_CTRL 0x00000054
312#define DSI_CMD_MDP_STREAM_CTRL_DATA_TYPE__MASK 0x0000003f
313#define DSI_CMD_MDP_STREAM_CTRL_DATA_TYPE__SHIFT 0
314static inline uint32_t DSI_CMD_MDP_STREAM_CTRL_DATA_TYPE(uint32_t val)
315{
316 return ((val) << DSI_CMD_MDP_STREAM_CTRL_DATA_TYPE__SHIFT) & DSI_CMD_MDP_STREAM_CTRL_DATA_TYPE__MASK;
317}
318#define DSI_CMD_MDP_STREAM_CTRL_VIRTUAL_CHANNEL__MASK 0x00000300
319#define DSI_CMD_MDP_STREAM_CTRL_VIRTUAL_CHANNEL__SHIFT 8
320static inline uint32_t DSI_CMD_MDP_STREAM_CTRL_VIRTUAL_CHANNEL(uint32_t val)
321{
322 return ((val) << DSI_CMD_MDP_STREAM_CTRL_VIRTUAL_CHANNEL__SHIFT) & DSI_CMD_MDP_STREAM_CTRL_VIRTUAL_CHANNEL__MASK;
323}
324#define DSI_CMD_MDP_STREAM_CTRL_WORD_COUNT__MASK 0xffff0000
325#define DSI_CMD_MDP_STREAM_CTRL_WORD_COUNT__SHIFT 16
326static inline uint32_t DSI_CMD_MDP_STREAM_CTRL_WORD_COUNT(uint32_t val)
327{
328 return ((val) << DSI_CMD_MDP_STREAM_CTRL_WORD_COUNT__SHIFT) & DSI_CMD_MDP_STREAM_CTRL_WORD_COUNT__MASK;
329}
330
331#define REG_DSI_CMD_MDP_STREAM_TOTAL 0x00000058
332#define DSI_CMD_MDP_STREAM_TOTAL_H_TOTAL__MASK 0x00000fff
333#define DSI_CMD_MDP_STREAM_TOTAL_H_TOTAL__SHIFT 0
334static inline uint32_t DSI_CMD_MDP_STREAM_TOTAL_H_TOTAL(uint32_t val)
335{
336 return ((val) << DSI_CMD_MDP_STREAM_TOTAL_H_TOTAL__SHIFT) & DSI_CMD_MDP_STREAM_TOTAL_H_TOTAL__MASK;
337}
338#define DSI_CMD_MDP_STREAM_TOTAL_V_TOTAL__MASK 0x0fff0000
339#define DSI_CMD_MDP_STREAM_TOTAL_V_TOTAL__SHIFT 16
340static inline uint32_t DSI_CMD_MDP_STREAM_TOTAL_V_TOTAL(uint32_t val)
341{
342 return ((val) << DSI_CMD_MDP_STREAM_TOTAL_V_TOTAL__SHIFT) & DSI_CMD_MDP_STREAM_TOTAL_V_TOTAL__MASK;
343}
344
230#define REG_DSI_ACK_ERR_STATUS 0x00000064 345#define REG_DSI_ACK_ERR_STATUS 0x00000064
231 346
232static inline uint32_t REG_DSI_RDBK(uint32_t i0) { return 0x00000068 + 0x4*i0; } 347static inline uint32_t REG_DSI_RDBK(uint32_t i0) { return 0x00000068 + 0x4*i0; }
@@ -234,19 +349,25 @@ static inline uint32_t REG_DSI_RDBK(uint32_t i0) { return 0x00000068 + 0x4*i0; }
234static inline uint32_t REG_DSI_RDBK_DATA(uint32_t i0) { return 0x00000068 + 0x4*i0; } 349static inline uint32_t REG_DSI_RDBK_DATA(uint32_t i0) { return 0x00000068 + 0x4*i0; }
235 350
236#define REG_DSI_TRIG_CTRL 0x00000080 351#define REG_DSI_TRIG_CTRL 0x00000080
237#define DSI_TRIG_CTRL_DMA_TRIGGER__MASK 0x0000000f 352#define DSI_TRIG_CTRL_DMA_TRIGGER__MASK 0x00000007
238#define DSI_TRIG_CTRL_DMA_TRIGGER__SHIFT 0 353#define DSI_TRIG_CTRL_DMA_TRIGGER__SHIFT 0
239static inline uint32_t DSI_TRIG_CTRL_DMA_TRIGGER(enum dsi_cmd_trigger val) 354static inline uint32_t DSI_TRIG_CTRL_DMA_TRIGGER(enum dsi_cmd_trigger val)
240{ 355{
241 return ((val) << DSI_TRIG_CTRL_DMA_TRIGGER__SHIFT) & DSI_TRIG_CTRL_DMA_TRIGGER__MASK; 356 return ((val) << DSI_TRIG_CTRL_DMA_TRIGGER__SHIFT) & DSI_TRIG_CTRL_DMA_TRIGGER__MASK;
242} 357}
243#define DSI_TRIG_CTRL_MDP_TRIGGER__MASK 0x000000f0 358#define DSI_TRIG_CTRL_MDP_TRIGGER__MASK 0x00000070
244#define DSI_TRIG_CTRL_MDP_TRIGGER__SHIFT 4 359#define DSI_TRIG_CTRL_MDP_TRIGGER__SHIFT 4
245static inline uint32_t DSI_TRIG_CTRL_MDP_TRIGGER(enum dsi_cmd_trigger val) 360static inline uint32_t DSI_TRIG_CTRL_MDP_TRIGGER(enum dsi_cmd_trigger val)
246{ 361{
247 return ((val) << DSI_TRIG_CTRL_MDP_TRIGGER__SHIFT) & DSI_TRIG_CTRL_MDP_TRIGGER__MASK; 362 return ((val) << DSI_TRIG_CTRL_MDP_TRIGGER__SHIFT) & DSI_TRIG_CTRL_MDP_TRIGGER__MASK;
248} 363}
249#define DSI_TRIG_CTRL_STREAM 0x00000100 364#define DSI_TRIG_CTRL_STREAM__MASK 0x00000300
365#define DSI_TRIG_CTRL_STREAM__SHIFT 8
366static inline uint32_t DSI_TRIG_CTRL_STREAM(uint32_t val)
367{
368 return ((val) << DSI_TRIG_CTRL_STREAM__SHIFT) & DSI_TRIG_CTRL_STREAM__MASK;
369}
370#define DSI_TRIG_CTRL_BLOCK_DMA_WITHIN_FRAME 0x00001000
250#define DSI_TRIG_CTRL_TE 0x80000000 371#define DSI_TRIG_CTRL_TE 0x80000000
251 372
252#define REG_DSI_TRIG_DMA 0x0000008c 373#define REG_DSI_TRIG_DMA 0x0000008c
@@ -274,6 +395,12 @@ static inline uint32_t DSI_CLKOUT_TIMING_CTRL_T_CLK_POST(uint32_t val)
274#define DSI_EOT_PACKET_CTRL_RX_EOT_IGNORE 0x00000010 395#define DSI_EOT_PACKET_CTRL_RX_EOT_IGNORE 0x00000010
275 396
276#define REG_DSI_LANE_SWAP_CTRL 0x000000ac 397#define REG_DSI_LANE_SWAP_CTRL 0x000000ac
398#define DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL__MASK 0x00000007
399#define DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL__SHIFT 0
400static inline uint32_t DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL(enum dsi_lane_swap val)
401{
402 return ((val) << DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL__SHIFT) & DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL__MASK;
403}
277 404
278#define REG_DSI_ERR_INT_MASK0 0x00000108 405#define REG_DSI_ERR_INT_MASK0 0x00000108
279 406
@@ -282,8 +409,36 @@ static inline uint32_t DSI_CLKOUT_TIMING_CTRL_T_CLK_POST(uint32_t val)
282#define REG_DSI_RESET 0x00000114 409#define REG_DSI_RESET 0x00000114
283 410
284#define REG_DSI_CLK_CTRL 0x00000118 411#define REG_DSI_CLK_CTRL 0x00000118
412#define DSI_CLK_CTRL_AHBS_HCLK_ON 0x00000001
413#define DSI_CLK_CTRL_AHBM_SCLK_ON 0x00000002
414#define DSI_CLK_CTRL_PCLK_ON 0x00000004
415#define DSI_CLK_CTRL_DSICLK_ON 0x00000008
416#define DSI_CLK_CTRL_BYTECLK_ON 0x00000010
417#define DSI_CLK_CTRL_ESCCLK_ON 0x00000020
418#define DSI_CLK_CTRL_FORCE_ON_DYN_AHBM_HCLK 0x00000200
419
420#define REG_DSI_CLK_STATUS 0x0000011c
421#define DSI_CLK_STATUS_PLL_UNLOCKED 0x00010000
285 422
286#define REG_DSI_PHY_RESET 0x00000128 423#define REG_DSI_PHY_RESET 0x00000128
424#define DSI_PHY_RESET_RESET 0x00000001
425
426#define REG_DSI_RDBK_DATA_CTRL 0x000001d0
427#define DSI_RDBK_DATA_CTRL_COUNT__MASK 0x00ff0000
428#define DSI_RDBK_DATA_CTRL_COUNT__SHIFT 16
429static inline uint32_t DSI_RDBK_DATA_CTRL_COUNT(uint32_t val)
430{
431 return ((val) << DSI_RDBK_DATA_CTRL_COUNT__SHIFT) & DSI_RDBK_DATA_CTRL_COUNT__MASK;
432}
433#define DSI_RDBK_DATA_CTRL_CLR 0x00000001
434
435#define REG_DSI_VERSION 0x000001f0
436#define DSI_VERSION_MAJOR__MASK 0xff000000
437#define DSI_VERSION_MAJOR__SHIFT 24
438static inline uint32_t DSI_VERSION_MAJOR(uint32_t val)
439{
440 return ((val) << DSI_VERSION_MAJOR__SHIFT) & DSI_VERSION_MAJOR__MASK;
441}
287 442
288#define REG_DSI_PHY_PLL_CTRL_0 0x00000200 443#define REG_DSI_PHY_PLL_CTRL_0 0x00000200
289#define DSI_PHY_PLL_CTRL_0_ENABLE 0x00000001 444#define DSI_PHY_PLL_CTRL_0_ENABLE 0x00000001
@@ -501,5 +656,184 @@ static inline uint32_t REG_DSI_8960_LN_TEST_STR_1(uint32_t i0) { return 0x000003
501#define REG_DSI_8960_PHY_CAL_STATUS 0x00000550 656#define REG_DSI_8960_PHY_CAL_STATUS 0x00000550
502#define DSI_8960_PHY_CAL_STATUS_CAL_BUSY 0x00000010 657#define DSI_8960_PHY_CAL_STATUS_CAL_BUSY 0x00000010
503 658
659static inline uint32_t REG_DSI_28nm_PHY_LN(uint32_t i0) { return 0x00000000 + 0x40*i0; }
660
661static inline uint32_t REG_DSI_28nm_PHY_LN_CFG_0(uint32_t i0) { return 0x00000000 + 0x40*i0; }
662
663static inline uint32_t REG_DSI_28nm_PHY_LN_CFG_1(uint32_t i0) { return 0x00000004 + 0x40*i0; }
664
665static inline uint32_t REG_DSI_28nm_PHY_LN_CFG_2(uint32_t i0) { return 0x00000008 + 0x40*i0; }
666
667static inline uint32_t REG_DSI_28nm_PHY_LN_CFG_3(uint32_t i0) { return 0x0000000c + 0x40*i0; }
668
669static inline uint32_t REG_DSI_28nm_PHY_LN_CFG_4(uint32_t i0) { return 0x00000010 + 0x40*i0; }
670
671static inline uint32_t REG_DSI_28nm_PHY_LN_TEST_DATAPATH(uint32_t i0) { return 0x00000014 + 0x40*i0; }
672
673static inline uint32_t REG_DSI_28nm_PHY_LN_DEBUG_SEL(uint32_t i0) { return 0x00000018 + 0x40*i0; }
674
675static inline uint32_t REG_DSI_28nm_PHY_LN_TEST_STR_0(uint32_t i0) { return 0x0000001c + 0x40*i0; }
676
677static inline uint32_t REG_DSI_28nm_PHY_LN_TEST_STR_1(uint32_t i0) { return 0x00000020 + 0x40*i0; }
678
679#define REG_DSI_28nm_PHY_LNCK_CFG_0 0x00000100
680
681#define REG_DSI_28nm_PHY_LNCK_CFG_1 0x00000104
682
683#define REG_DSI_28nm_PHY_LNCK_CFG_2 0x00000108
684
685#define REG_DSI_28nm_PHY_LNCK_CFG_3 0x0000010c
686
687#define REG_DSI_28nm_PHY_LNCK_CFG_4 0x00000110
688
689#define REG_DSI_28nm_PHY_LNCK_TEST_DATAPATH 0x00000114
690
691#define REG_DSI_28nm_PHY_LNCK_DEBUG_SEL 0x00000118
692
693#define REG_DSI_28nm_PHY_LNCK_TEST_STR0 0x0000011c
694
695#define REG_DSI_28nm_PHY_LNCK_TEST_STR1 0x00000120
696
697#define REG_DSI_28nm_PHY_TIMING_CTRL_0 0x00000140
698#define DSI_28nm_PHY_TIMING_CTRL_0_CLK_ZERO__MASK 0x000000ff
699#define DSI_28nm_PHY_TIMING_CTRL_0_CLK_ZERO__SHIFT 0
700static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_0_CLK_ZERO(uint32_t val)
701{
702 return ((val) << DSI_28nm_PHY_TIMING_CTRL_0_CLK_ZERO__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_0_CLK_ZERO__MASK;
703}
704
705#define REG_DSI_28nm_PHY_TIMING_CTRL_1 0x00000144
706#define DSI_28nm_PHY_TIMING_CTRL_1_CLK_TRAIL__MASK 0x000000ff
707#define DSI_28nm_PHY_TIMING_CTRL_1_CLK_TRAIL__SHIFT 0
708static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_1_CLK_TRAIL(uint32_t val)
709{
710 return ((val) << DSI_28nm_PHY_TIMING_CTRL_1_CLK_TRAIL__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_1_CLK_TRAIL__MASK;
711}
712
713#define REG_DSI_28nm_PHY_TIMING_CTRL_2 0x00000148
714#define DSI_28nm_PHY_TIMING_CTRL_2_CLK_PREPARE__MASK 0x000000ff
715#define DSI_28nm_PHY_TIMING_CTRL_2_CLK_PREPARE__SHIFT 0
716static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_2_CLK_PREPARE(uint32_t val)
717{
718 return ((val) << DSI_28nm_PHY_TIMING_CTRL_2_CLK_PREPARE__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_2_CLK_PREPARE__MASK;
719}
720
721#define REG_DSI_28nm_PHY_TIMING_CTRL_3 0x0000014c
722#define DSI_28nm_PHY_TIMING_CTRL_3_CLK_ZERO_8 0x00000001
723
724#define REG_DSI_28nm_PHY_TIMING_CTRL_4 0x00000150
725#define DSI_28nm_PHY_TIMING_CTRL_4_HS_EXIT__MASK 0x000000ff
726#define DSI_28nm_PHY_TIMING_CTRL_4_HS_EXIT__SHIFT 0
727static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_4_HS_EXIT(uint32_t val)
728{
729 return ((val) << DSI_28nm_PHY_TIMING_CTRL_4_HS_EXIT__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_4_HS_EXIT__MASK;
730}
731
732#define REG_DSI_28nm_PHY_TIMING_CTRL_5 0x00000154
733#define DSI_28nm_PHY_TIMING_CTRL_5_HS_ZERO__MASK 0x000000ff
734#define DSI_28nm_PHY_TIMING_CTRL_5_HS_ZERO__SHIFT 0
735static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_5_HS_ZERO(uint32_t val)
736{
737 return ((val) << DSI_28nm_PHY_TIMING_CTRL_5_HS_ZERO__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_5_HS_ZERO__MASK;
738}
739
740#define REG_DSI_28nm_PHY_TIMING_CTRL_6 0x00000158
741#define DSI_28nm_PHY_TIMING_CTRL_6_HS_PREPARE__MASK 0x000000ff
742#define DSI_28nm_PHY_TIMING_CTRL_6_HS_PREPARE__SHIFT 0
743static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_6_HS_PREPARE(uint32_t val)
744{
745 return ((val) << DSI_28nm_PHY_TIMING_CTRL_6_HS_PREPARE__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_6_HS_PREPARE__MASK;
746}
747
748#define REG_DSI_28nm_PHY_TIMING_CTRL_7 0x0000015c
749#define DSI_28nm_PHY_TIMING_CTRL_7_HS_TRAIL__MASK 0x000000ff
750#define DSI_28nm_PHY_TIMING_CTRL_7_HS_TRAIL__SHIFT 0
751static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_7_HS_TRAIL(uint32_t val)
752{
753 return ((val) << DSI_28nm_PHY_TIMING_CTRL_7_HS_TRAIL__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_7_HS_TRAIL__MASK;
754}
755
756#define REG_DSI_28nm_PHY_TIMING_CTRL_8 0x00000160
757#define DSI_28nm_PHY_TIMING_CTRL_8_HS_RQST__MASK 0x000000ff
758#define DSI_28nm_PHY_TIMING_CTRL_8_HS_RQST__SHIFT 0
759static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_8_HS_RQST(uint32_t val)
760{
761 return ((val) << DSI_28nm_PHY_TIMING_CTRL_8_HS_RQST__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_8_HS_RQST__MASK;
762}
763
764#define REG_DSI_28nm_PHY_TIMING_CTRL_9 0x00000164
765#define DSI_28nm_PHY_TIMING_CTRL_9_TA_GO__MASK 0x00000007
766#define DSI_28nm_PHY_TIMING_CTRL_9_TA_GO__SHIFT 0
767static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_9_TA_GO(uint32_t val)
768{
769 return ((val) << DSI_28nm_PHY_TIMING_CTRL_9_TA_GO__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_9_TA_GO__MASK;
770}
771#define DSI_28nm_PHY_TIMING_CTRL_9_TA_SURE__MASK 0x00000070
772#define DSI_28nm_PHY_TIMING_CTRL_9_TA_SURE__SHIFT 4
773static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_9_TA_SURE(uint32_t val)
774{
775 return ((val) << DSI_28nm_PHY_TIMING_CTRL_9_TA_SURE__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_9_TA_SURE__MASK;
776}
777
778#define REG_DSI_28nm_PHY_TIMING_CTRL_10 0x00000168
779#define DSI_28nm_PHY_TIMING_CTRL_10_TA_GET__MASK 0x00000007
780#define DSI_28nm_PHY_TIMING_CTRL_10_TA_GET__SHIFT 0
781static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_10_TA_GET(uint32_t val)
782{
783 return ((val) << DSI_28nm_PHY_TIMING_CTRL_10_TA_GET__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_10_TA_GET__MASK;
784}
785
786#define REG_DSI_28nm_PHY_TIMING_CTRL_11 0x0000016c
787#define DSI_28nm_PHY_TIMING_CTRL_11_TRIG3_CMD__MASK 0x000000ff
788#define DSI_28nm_PHY_TIMING_CTRL_11_TRIG3_CMD__SHIFT 0
789static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_11_TRIG3_CMD(uint32_t val)
790{
791 return ((val) << DSI_28nm_PHY_TIMING_CTRL_11_TRIG3_CMD__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_11_TRIG3_CMD__MASK;
792}
793
794#define REG_DSI_28nm_PHY_CTRL_0 0x00000170
795
796#define REG_DSI_28nm_PHY_CTRL_1 0x00000174
797
798#define REG_DSI_28nm_PHY_CTRL_2 0x00000178
799
800#define REG_DSI_28nm_PHY_CTRL_3 0x0000017c
801
802#define REG_DSI_28nm_PHY_CTRL_4 0x00000180
803
804#define REG_DSI_28nm_PHY_STRENGTH_0 0x00000184
805
806#define REG_DSI_28nm_PHY_STRENGTH_1 0x00000188
807
808#define REG_DSI_28nm_PHY_BIST_CTRL_0 0x000001b4
809
810#define REG_DSI_28nm_PHY_BIST_CTRL_1 0x000001b8
811
812#define REG_DSI_28nm_PHY_BIST_CTRL_2 0x000001bc
813
814#define REG_DSI_28nm_PHY_BIST_CTRL_3 0x000001c0
815
816#define REG_DSI_28nm_PHY_BIST_CTRL_4 0x000001c4
817
818#define REG_DSI_28nm_PHY_BIST_CTRL_5 0x000001c8
819
820#define REG_DSI_28nm_PHY_GLBL_TEST_CTRL 0x000001d4
821
822#define REG_DSI_28nm_PHY_LDO_CNTRL 0x000001dc
823
824#define REG_DSI_28nm_PHY_REGULATOR_CTRL_0 0x00000000
825
826#define REG_DSI_28nm_PHY_REGULATOR_CTRL_1 0x00000004
827
828#define REG_DSI_28nm_PHY_REGULATOR_CTRL_2 0x00000008
829
830#define REG_DSI_28nm_PHY_REGULATOR_CTRL_3 0x0000000c
831
832#define REG_DSI_28nm_PHY_REGULATOR_CTRL_4 0x00000010
833
834#define REG_DSI_28nm_PHY_REGULATOR_CTRL_5 0x00000014
835
836#define REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG 0x00000018
837
504 838
505#endif /* DSI_XML */ 839#endif /* DSI_XML */
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
new file mode 100644
index 000000000000..fdc54e3eff55
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -0,0 +1,1993 @@
1/*
2 * Copyright (c) 2015, The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include <linux/clk.h>
15#include <linux/delay.h>
16#include <linux/err.h>
17#include <linux/gpio.h>
18#include <linux/interrupt.h>
19#include <linux/of_device.h>
20#include <linux/of_gpio.h>
21#include <linux/of_irq.h>
22#include <linux/regulator/consumer.h>
23#include <linux/spinlock.h>
24#include <video/mipi_display.h>
25
26#include "dsi.h"
27#include "dsi.xml.h"
28
29#define MSM_DSI_VER_MAJOR_V2 0x02
30#define MSM_DSI_VER_MAJOR_6G 0x03
31#define MSM_DSI_6G_VER_MINOR_V1_0 0x10000000
32#define MSM_DSI_6G_VER_MINOR_V1_1 0x10010000
33#define MSM_DSI_6G_VER_MINOR_V1_1_1 0x10010001
34#define MSM_DSI_6G_VER_MINOR_V1_2 0x10020000
35#define MSM_DSI_6G_VER_MINOR_V1_3_1 0x10030001
36
37#define DSI_6G_REG_SHIFT 4
38
39#define DSI_REGULATOR_MAX 8
40struct dsi_reg_entry {
41 char name[32];
42 int min_voltage;
43 int max_voltage;
44 int enable_load;
45 int disable_load;
46};
47
48struct dsi_reg_config {
49 int num;
50 struct dsi_reg_entry regs[DSI_REGULATOR_MAX];
51};
52
53struct dsi_config {
54 u32 major;
55 u32 minor;
56 u32 io_offset;
57 enum msm_dsi_phy_type phy_type;
58 struct dsi_reg_config reg_cfg;
59};
60
61static const struct dsi_config dsi_cfgs[] = {
62 {MSM_DSI_VER_MAJOR_V2, 0, 0, MSM_DSI_PHY_UNKNOWN},
63 { /* 8974 v1 */
64 .major = MSM_DSI_VER_MAJOR_6G,
65 .minor = MSM_DSI_6G_VER_MINOR_V1_0,
66 .io_offset = DSI_6G_REG_SHIFT,
67 .phy_type = MSM_DSI_PHY_28NM,
68 .reg_cfg = {
69 .num = 4,
70 .regs = {
71 {"gdsc", -1, -1, -1, -1},
72 {"vdd", 3000000, 3000000, 150000, 100},
73 {"vdda", 1200000, 1200000, 100000, 100},
74 {"vddio", 1800000, 1800000, 100000, 100},
75 },
76 },
77 },
78 { /* 8974 v2 */
79 .major = MSM_DSI_VER_MAJOR_6G,
80 .minor = MSM_DSI_6G_VER_MINOR_V1_1,
81 .io_offset = DSI_6G_REG_SHIFT,
82 .phy_type = MSM_DSI_PHY_28NM,
83 .reg_cfg = {
84 .num = 4,
85 .regs = {
86 {"gdsc", -1, -1, -1, -1},
87 {"vdd", 3000000, 3000000, 150000, 100},
88 {"vdda", 1200000, 1200000, 100000, 100},
89 {"vddio", 1800000, 1800000, 100000, 100},
90 },
91 },
92 },
93 { /* 8974 v3 */
94 .major = MSM_DSI_VER_MAJOR_6G,
95 .minor = MSM_DSI_6G_VER_MINOR_V1_1_1,
96 .io_offset = DSI_6G_REG_SHIFT,
97 .phy_type = MSM_DSI_PHY_28NM,
98 .reg_cfg = {
99 .num = 4,
100 .regs = {
101 {"gdsc", -1, -1, -1, -1},
102 {"vdd", 3000000, 3000000, 150000, 100},
103 {"vdda", 1200000, 1200000, 100000, 100},
104 {"vddio", 1800000, 1800000, 100000, 100},
105 },
106 },
107 },
108 { /* 8084 */
109 .major = MSM_DSI_VER_MAJOR_6G,
110 .minor = MSM_DSI_6G_VER_MINOR_V1_2,
111 .io_offset = DSI_6G_REG_SHIFT,
112 .phy_type = MSM_DSI_PHY_28NM,
113 .reg_cfg = {
114 .num = 4,
115 .regs = {
116 {"gdsc", -1, -1, -1, -1},
117 {"vdd", 3000000, 3000000, 150000, 100},
118 {"vdda", 1200000, 1200000, 100000, 100},
119 {"vddio", 1800000, 1800000, 100000, 100},
120 },
121 },
122 },
123 { /* 8916 */
124 .major = MSM_DSI_VER_MAJOR_6G,
125 .minor = MSM_DSI_6G_VER_MINOR_V1_3_1,
126 .io_offset = DSI_6G_REG_SHIFT,
127 .phy_type = MSM_DSI_PHY_28NM,
128 .reg_cfg = {
129 .num = 4,
130 .regs = {
131 {"gdsc", -1, -1, -1, -1},
132 {"vdd", 2850000, 2850000, 100000, 100},
133 {"vdda", 1200000, 1200000, 100000, 100},
134 {"vddio", 1800000, 1800000, 100000, 100},
135 },
136 },
137 },
138};
139
140static int dsi_get_version(const void __iomem *base, u32 *major, u32 *minor)
141{
142 u32 ver;
143 u32 ver_6g;
144
145 if (!major || !minor)
146 return -EINVAL;
147
148 /* From DSI6G(v3), addition of a 6G_HW_VERSION register at offset 0
149 * makes all other registers 4-byte shifted down.
150 */
151 ver_6g = msm_readl(base + REG_DSI_6G_HW_VERSION);
152 if (ver_6g == 0) {
153 ver = msm_readl(base + REG_DSI_VERSION);
154 ver = FIELD(ver, DSI_VERSION_MAJOR);
155 if (ver <= MSM_DSI_VER_MAJOR_V2) {
156 /* old versions */
157 *major = ver;
158 *minor = 0;
159 return 0;
160 } else {
161 return -EINVAL;
162 }
163 } else {
164 ver = msm_readl(base + DSI_6G_REG_SHIFT + REG_DSI_VERSION);
165 ver = FIELD(ver, DSI_VERSION_MAJOR);
166 if (ver == MSM_DSI_VER_MAJOR_6G) {
167 /* 6G version */
168 *major = ver;
169 *minor = ver_6g;
170 return 0;
171 } else {
172 return -EINVAL;
173 }
174 }
175}
176
177#define DSI_ERR_STATE_ACK 0x0000
178#define DSI_ERR_STATE_TIMEOUT 0x0001
179#define DSI_ERR_STATE_DLN0_PHY 0x0002
180#define DSI_ERR_STATE_FIFO 0x0004
181#define DSI_ERR_STATE_MDP_FIFO_UNDERFLOW 0x0008
182#define DSI_ERR_STATE_INTERLEAVE_OP_CONTENTION 0x0010
183#define DSI_ERR_STATE_PLL_UNLOCKED 0x0020
184
185#define DSI_CLK_CTRL_ENABLE_CLKS \
186 (DSI_CLK_CTRL_AHBS_HCLK_ON | DSI_CLK_CTRL_AHBM_SCLK_ON | \
187 DSI_CLK_CTRL_PCLK_ON | DSI_CLK_CTRL_DSICLK_ON | \
188 DSI_CLK_CTRL_BYTECLK_ON | DSI_CLK_CTRL_ESCCLK_ON | \
189 DSI_CLK_CTRL_FORCE_ON_DYN_AHBM_HCLK)
190
191struct msm_dsi_host {
192 struct mipi_dsi_host base;
193
194 struct platform_device *pdev;
195 struct drm_device *dev;
196
197 int id;
198
199 void __iomem *ctrl_base;
200 struct regulator_bulk_data supplies[DSI_REGULATOR_MAX];
201 struct clk *mdp_core_clk;
202 struct clk *ahb_clk;
203 struct clk *axi_clk;
204 struct clk *mmss_misc_ahb_clk;
205 struct clk *byte_clk;
206 struct clk *esc_clk;
207 struct clk *pixel_clk;
208 u32 byte_clk_rate;
209
210 struct gpio_desc *disp_en_gpio;
211 struct gpio_desc *te_gpio;
212
213 const struct dsi_config *cfg;
214
215 struct completion dma_comp;
216 struct completion video_comp;
217 struct mutex dev_mutex;
218 struct mutex cmd_mutex;
219 struct mutex clk_mutex;
220 spinlock_t intr_lock; /* Protect interrupt ctrl register */
221
222 u32 err_work_state;
223 struct work_struct err_work;
224 struct workqueue_struct *workqueue;
225
226 struct drm_gem_object *tx_gem_obj;
227 u8 *rx_buf;
228
229 struct drm_display_mode *mode;
230
231 /* Panel info */
232 struct device_node *panel_node;
233 unsigned int channel;
234 unsigned int lanes;
235 enum mipi_dsi_pixel_format format;
236 unsigned long mode_flags;
237
238 u32 dma_cmd_ctrl_restore;
239
240 bool registered;
241 bool power_on;
242 int irq;
243};
244
245static u32 dsi_get_bpp(const enum mipi_dsi_pixel_format fmt)
246{
247 switch (fmt) {
248 case MIPI_DSI_FMT_RGB565: return 16;
249 case MIPI_DSI_FMT_RGB666_PACKED: return 18;
250 case MIPI_DSI_FMT_RGB666:
251 case MIPI_DSI_FMT_RGB888:
252 default: return 24;
253 }
254}
255
256static inline u32 dsi_read(struct msm_dsi_host *msm_host, u32 reg)
257{
258 return msm_readl(msm_host->ctrl_base + msm_host->cfg->io_offset + reg);
259}
260static inline void dsi_write(struct msm_dsi_host *msm_host, u32 reg, u32 data)
261{
262 msm_writel(data, msm_host->ctrl_base + msm_host->cfg->io_offset + reg);
263}
264
265static int dsi_host_regulator_enable(struct msm_dsi_host *msm_host);
266static void dsi_host_regulator_disable(struct msm_dsi_host *msm_host);
267
268static const struct dsi_config *dsi_get_config(struct msm_dsi_host *msm_host)
269{
270 const struct dsi_config *cfg;
271 struct regulator *gdsc_reg;
272 int i, ret;
273 u32 major = 0, minor = 0;
274
275 gdsc_reg = regulator_get(&msm_host->pdev->dev, "gdsc");
276 if (IS_ERR_OR_NULL(gdsc_reg)) {
277 pr_err("%s: cannot get gdsc\n", __func__);
278 goto fail;
279 }
280 ret = regulator_enable(gdsc_reg);
281 if (ret) {
282 pr_err("%s: unable to enable gdsc\n", __func__);
283 regulator_put(gdsc_reg);
284 goto fail;
285 }
286 ret = clk_prepare_enable(msm_host->ahb_clk);
287 if (ret) {
288 pr_err("%s: unable to enable ahb_clk\n", __func__);
289 regulator_disable(gdsc_reg);
290 regulator_put(gdsc_reg);
291 goto fail;
292 }
293
294 ret = dsi_get_version(msm_host->ctrl_base, &major, &minor);
295
296 clk_disable_unprepare(msm_host->ahb_clk);
297 regulator_disable(gdsc_reg);
298 regulator_put(gdsc_reg);
299 if (ret) {
300 pr_err("%s: Invalid version\n", __func__);
301 goto fail;
302 }
303
304 for (i = 0; i < ARRAY_SIZE(dsi_cfgs); i++) {
305 cfg = dsi_cfgs + i;
306 if ((cfg->major == major) && (cfg->minor == minor))
307 return cfg;
308 }
309 pr_err("%s: Version %x:%x not support\n", __func__, major, minor);
310
311fail:
312 return NULL;
313}
314
315static inline struct msm_dsi_host *to_msm_dsi_host(struct mipi_dsi_host *host)
316{
317 return container_of(host, struct msm_dsi_host, base);
318}
319
320static void dsi_host_regulator_disable(struct msm_dsi_host *msm_host)
321{
322 struct regulator_bulk_data *s = msm_host->supplies;
323 const struct dsi_reg_entry *regs = msm_host->cfg->reg_cfg.regs;
324 int num = msm_host->cfg->reg_cfg.num;
325 int i;
326
327 DBG("");
328 for (i = num - 1; i >= 0; i--)
329 if (regs[i].disable_load >= 0)
330 regulator_set_optimum_mode(s[i].consumer,
331 regs[i].disable_load);
332
333 regulator_bulk_disable(num, s);
334}
335
336static int dsi_host_regulator_enable(struct msm_dsi_host *msm_host)
337{
338 struct regulator_bulk_data *s = msm_host->supplies;
339 const struct dsi_reg_entry *regs = msm_host->cfg->reg_cfg.regs;
340 int num = msm_host->cfg->reg_cfg.num;
341 int ret, i;
342
343 DBG("");
344 for (i = 0; i < num; i++) {
345 if (regs[i].enable_load >= 0) {
346 ret = regulator_set_optimum_mode(s[i].consumer,
347 regs[i].enable_load);
348 if (ret < 0) {
349 pr_err("regulator %d set op mode failed, %d\n",
350 i, ret);
351 goto fail;
352 }
353 }
354 }
355
356 ret = regulator_bulk_enable(num, s);
357 if (ret < 0) {
358 pr_err("regulator enable failed, %d\n", ret);
359 goto fail;
360 }
361
362 return 0;
363
364fail:
365 for (i--; i >= 0; i--)
366 regulator_set_optimum_mode(s[i].consumer, regs[i].disable_load);
367 return ret;
368}
369
370static int dsi_regulator_init(struct msm_dsi_host *msm_host)
371{
372 struct regulator_bulk_data *s = msm_host->supplies;
373 const struct dsi_reg_entry *regs = msm_host->cfg->reg_cfg.regs;
374 int num = msm_host->cfg->reg_cfg.num;
375 int i, ret;
376
377 for (i = 0; i < num; i++)
378 s[i].supply = regs[i].name;
379
380 ret = devm_regulator_bulk_get(&msm_host->pdev->dev, num, s);
381 if (ret < 0) {
382 pr_err("%s: failed to init regulator, ret=%d\n",
383 __func__, ret);
384 return ret;
385 }
386
387 for (i = 0; i < num; i++) {
388 if ((regs[i].min_voltage >= 0) && (regs[i].max_voltage >= 0)) {
389 ret = regulator_set_voltage(s[i].consumer,
390 regs[i].min_voltage, regs[i].max_voltage);
391 if (ret < 0) {
392 pr_err("regulator %d set voltage failed, %d\n",
393 i, ret);
394 return ret;
395 }
396 }
397 }
398
399 return 0;
400}
401
402static int dsi_clk_init(struct msm_dsi_host *msm_host)
403{
404 struct device *dev = &msm_host->pdev->dev;
405 int ret = 0;
406
407 msm_host->mdp_core_clk = devm_clk_get(dev, "mdp_core_clk");
408 if (IS_ERR(msm_host->mdp_core_clk)) {
409 ret = PTR_ERR(msm_host->mdp_core_clk);
410 pr_err("%s: Unable to get mdp core clk. ret=%d\n",
411 __func__, ret);
412 goto exit;
413 }
414
415 msm_host->ahb_clk = devm_clk_get(dev, "iface_clk");
416 if (IS_ERR(msm_host->ahb_clk)) {
417 ret = PTR_ERR(msm_host->ahb_clk);
418 pr_err("%s: Unable to get mdss ahb clk. ret=%d\n",
419 __func__, ret);
420 goto exit;
421 }
422
423 msm_host->axi_clk = devm_clk_get(dev, "bus_clk");
424 if (IS_ERR(msm_host->axi_clk)) {
425 ret = PTR_ERR(msm_host->axi_clk);
426 pr_err("%s: Unable to get axi bus clk. ret=%d\n",
427 __func__, ret);
428 goto exit;
429 }
430
431 msm_host->mmss_misc_ahb_clk = devm_clk_get(dev, "core_mmss_clk");
432 if (IS_ERR(msm_host->mmss_misc_ahb_clk)) {
433 ret = PTR_ERR(msm_host->mmss_misc_ahb_clk);
434 pr_err("%s: Unable to get mmss misc ahb clk. ret=%d\n",
435 __func__, ret);
436 goto exit;
437 }
438
439 msm_host->byte_clk = devm_clk_get(dev, "byte_clk");
440 if (IS_ERR(msm_host->byte_clk)) {
441 ret = PTR_ERR(msm_host->byte_clk);
442 pr_err("%s: can't find dsi_byte_clk. ret=%d\n",
443 __func__, ret);
444 msm_host->byte_clk = NULL;
445 goto exit;
446 }
447
448 msm_host->pixel_clk = devm_clk_get(dev, "pixel_clk");
449 if (IS_ERR(msm_host->pixel_clk)) {
450 ret = PTR_ERR(msm_host->pixel_clk);
451 pr_err("%s: can't find dsi_pixel_clk. ret=%d\n",
452 __func__, ret);
453 msm_host->pixel_clk = NULL;
454 goto exit;
455 }
456
457 msm_host->esc_clk = devm_clk_get(dev, "core_clk");
458 if (IS_ERR(msm_host->esc_clk)) {
459 ret = PTR_ERR(msm_host->esc_clk);
460 pr_err("%s: can't find dsi_esc_clk. ret=%d\n",
461 __func__, ret);
462 msm_host->esc_clk = NULL;
463 goto exit;
464 }
465
466exit:
467 return ret;
468}
469
470static int dsi_bus_clk_enable(struct msm_dsi_host *msm_host)
471{
472 int ret;
473
474 DBG("id=%d", msm_host->id);
475
476 ret = clk_prepare_enable(msm_host->mdp_core_clk);
477 if (ret) {
478 pr_err("%s: failed to enable mdp_core_clock, %d\n",
479 __func__, ret);
480 goto core_clk_err;
481 }
482
483 ret = clk_prepare_enable(msm_host->ahb_clk);
484 if (ret) {
485 pr_err("%s: failed to enable ahb clock, %d\n", __func__, ret);
486 goto ahb_clk_err;
487 }
488
489 ret = clk_prepare_enable(msm_host->axi_clk);
490 if (ret) {
491 pr_err("%s: failed to enable ahb clock, %d\n", __func__, ret);
492 goto axi_clk_err;
493 }
494
495 ret = clk_prepare_enable(msm_host->mmss_misc_ahb_clk);
496 if (ret) {
497 pr_err("%s: failed to enable mmss misc ahb clk, %d\n",
498 __func__, ret);
499 goto misc_ahb_clk_err;
500 }
501
502 return 0;
503
504misc_ahb_clk_err:
505 clk_disable_unprepare(msm_host->axi_clk);
506axi_clk_err:
507 clk_disable_unprepare(msm_host->ahb_clk);
508ahb_clk_err:
509 clk_disable_unprepare(msm_host->mdp_core_clk);
510core_clk_err:
511 return ret;
512}
513
514static void dsi_bus_clk_disable(struct msm_dsi_host *msm_host)
515{
516 DBG("");
517 clk_disable_unprepare(msm_host->mmss_misc_ahb_clk);
518 clk_disable_unprepare(msm_host->axi_clk);
519 clk_disable_unprepare(msm_host->ahb_clk);
520 clk_disable_unprepare(msm_host->mdp_core_clk);
521}
522
523static int dsi_link_clk_enable(struct msm_dsi_host *msm_host)
524{
525 int ret;
526
527 DBG("Set clk rates: pclk=%d, byteclk=%d",
528 msm_host->mode->clock, msm_host->byte_clk_rate);
529
530 ret = clk_set_rate(msm_host->byte_clk, msm_host->byte_clk_rate);
531 if (ret) {
532 pr_err("%s: Failed to set rate byte clk, %d\n", __func__, ret);
533 goto error;
534 }
535
536 ret = clk_set_rate(msm_host->pixel_clk, msm_host->mode->clock * 1000);
537 if (ret) {
538 pr_err("%s: Failed to set rate pixel clk, %d\n", __func__, ret);
539 goto error;
540 }
541
542 ret = clk_prepare_enable(msm_host->esc_clk);
543 if (ret) {
544 pr_err("%s: Failed to enable dsi esc clk\n", __func__);
545 goto error;
546 }
547
548 ret = clk_prepare_enable(msm_host->byte_clk);
549 if (ret) {
550 pr_err("%s: Failed to enable dsi byte clk\n", __func__);
551 goto byte_clk_err;
552 }
553
554 ret = clk_prepare_enable(msm_host->pixel_clk);
555 if (ret) {
556 pr_err("%s: Failed to enable dsi pixel clk\n", __func__);
557 goto pixel_clk_err;
558 }
559
560 return 0;
561
562pixel_clk_err:
563 clk_disable_unprepare(msm_host->byte_clk);
564byte_clk_err:
565 clk_disable_unprepare(msm_host->esc_clk);
566error:
567 return ret;
568}
569
570static void dsi_link_clk_disable(struct msm_dsi_host *msm_host)
571{
572 clk_disable_unprepare(msm_host->esc_clk);
573 clk_disable_unprepare(msm_host->pixel_clk);
574 clk_disable_unprepare(msm_host->byte_clk);
575}
576
577static int dsi_clk_ctrl(struct msm_dsi_host *msm_host, bool enable)
578{
579 int ret = 0;
580
581 mutex_lock(&msm_host->clk_mutex);
582 if (enable) {
583 ret = dsi_bus_clk_enable(msm_host);
584 if (ret) {
585 pr_err("%s: Can not enable bus clk, %d\n",
586 __func__, ret);
587 goto unlock_ret;
588 }
589 ret = dsi_link_clk_enable(msm_host);
590 if (ret) {
591 pr_err("%s: Can not enable link clk, %d\n",
592 __func__, ret);
593 dsi_bus_clk_disable(msm_host);
594 goto unlock_ret;
595 }
596 } else {
597 dsi_link_clk_disable(msm_host);
598 dsi_bus_clk_disable(msm_host);
599 }
600
601unlock_ret:
602 mutex_unlock(&msm_host->clk_mutex);
603 return ret;
604}
605
606static int dsi_calc_clk_rate(struct msm_dsi_host *msm_host)
607{
608 struct drm_display_mode *mode = msm_host->mode;
609 u8 lanes = msm_host->lanes;
610 u32 bpp = dsi_get_bpp(msm_host->format);
611 u32 pclk_rate;
612
613 if (!mode) {
614 pr_err("%s: mode not set\n", __func__);
615 return -EINVAL;
616 }
617
618 pclk_rate = mode->clock * 1000;
619 if (lanes > 0) {
620 msm_host->byte_clk_rate = (pclk_rate * bpp) / (8 * lanes);
621 } else {
622 pr_err("%s: forcing mdss_dsi lanes to 1\n", __func__);
623 msm_host->byte_clk_rate = (pclk_rate * bpp) / 8;
624 }
625
626 DBG("pclk=%d, bclk=%d", pclk_rate, msm_host->byte_clk_rate);
627
628 return 0;
629}
630
631static void dsi_phy_sw_reset(struct msm_dsi_host *msm_host)
632{
633 DBG("");
634 dsi_write(msm_host, REG_DSI_PHY_RESET, DSI_PHY_RESET_RESET);
635 /* Make sure fully reset */
636 wmb();
637 udelay(1000);
638 dsi_write(msm_host, REG_DSI_PHY_RESET, 0);
639 udelay(100);
640}
641
642static void dsi_intr_ctrl(struct msm_dsi_host *msm_host, u32 mask, int enable)
643{
644 u32 intr;
645 unsigned long flags;
646
647 spin_lock_irqsave(&msm_host->intr_lock, flags);
648 intr = dsi_read(msm_host, REG_DSI_INTR_CTRL);
649
650 if (enable)
651 intr |= mask;
652 else
653 intr &= ~mask;
654
655 DBG("intr=%x enable=%d", intr, enable);
656
657 dsi_write(msm_host, REG_DSI_INTR_CTRL, intr);
658 spin_unlock_irqrestore(&msm_host->intr_lock, flags);
659}
660
661static inline enum dsi_traffic_mode dsi_get_traffic_mode(const u32 mode_flags)
662{
663 if (mode_flags & MIPI_DSI_MODE_VIDEO_BURST)
664 return BURST_MODE;
665 else if (mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)
666 return NON_BURST_SYNCH_PULSE;
667
668 return NON_BURST_SYNCH_EVENT;
669}
670
671static inline enum dsi_vid_dst_format dsi_get_vid_fmt(
672 const enum mipi_dsi_pixel_format mipi_fmt)
673{
674 switch (mipi_fmt) {
675 case MIPI_DSI_FMT_RGB888: return VID_DST_FORMAT_RGB888;
676 case MIPI_DSI_FMT_RGB666: return VID_DST_FORMAT_RGB666_LOOSE;
677 case MIPI_DSI_FMT_RGB666_PACKED: return VID_DST_FORMAT_RGB666;
678 case MIPI_DSI_FMT_RGB565: return VID_DST_FORMAT_RGB565;
679 default: return VID_DST_FORMAT_RGB888;
680 }
681}
682
683static inline enum dsi_cmd_dst_format dsi_get_cmd_fmt(
684 const enum mipi_dsi_pixel_format mipi_fmt)
685{
686 switch (mipi_fmt) {
687 case MIPI_DSI_FMT_RGB888: return CMD_DST_FORMAT_RGB888;
688 case MIPI_DSI_FMT_RGB666_PACKED:
689 case MIPI_DSI_FMT_RGB666: return VID_DST_FORMAT_RGB666;
690 case MIPI_DSI_FMT_RGB565: return CMD_DST_FORMAT_RGB565;
691 default: return CMD_DST_FORMAT_RGB888;
692 }
693}
694
695static void dsi_ctrl_config(struct msm_dsi_host *msm_host, bool enable,
696 u32 clk_pre, u32 clk_post)
697{
698 u32 flags = msm_host->mode_flags;
699 enum mipi_dsi_pixel_format mipi_fmt = msm_host->format;
700 u32 data = 0;
701
702 if (!enable) {
703 dsi_write(msm_host, REG_DSI_CTRL, 0);
704 return;
705 }
706
707 if (flags & MIPI_DSI_MODE_VIDEO) {
708 if (flags & MIPI_DSI_MODE_VIDEO_HSE)
709 data |= DSI_VID_CFG0_PULSE_MODE_HSA_HE;
710 if (flags & MIPI_DSI_MODE_VIDEO_HFP)
711 data |= DSI_VID_CFG0_HFP_POWER_STOP;
712 if (flags & MIPI_DSI_MODE_VIDEO_HBP)
713 data |= DSI_VID_CFG0_HBP_POWER_STOP;
714 if (flags & MIPI_DSI_MODE_VIDEO_HSA)
715 data |= DSI_VID_CFG0_HSA_POWER_STOP;
716 /* Always set low power stop mode for BLLP
717 * to let command engine send packets
718 */
719 data |= DSI_VID_CFG0_EOF_BLLP_POWER_STOP |
720 DSI_VID_CFG0_BLLP_POWER_STOP;
721 data |= DSI_VID_CFG0_TRAFFIC_MODE(dsi_get_traffic_mode(flags));
722 data |= DSI_VID_CFG0_DST_FORMAT(dsi_get_vid_fmt(mipi_fmt));
723 data |= DSI_VID_CFG0_VIRT_CHANNEL(msm_host->channel);
724 dsi_write(msm_host, REG_DSI_VID_CFG0, data);
725
726 /* Do not swap RGB colors */
727 data = DSI_VID_CFG1_RGB_SWAP(SWAP_RGB);
728 dsi_write(msm_host, REG_DSI_VID_CFG1, 0);
729 } else {
730 /* Do not swap RGB colors */
731 data = DSI_CMD_CFG0_RGB_SWAP(SWAP_RGB);
732 data |= DSI_CMD_CFG0_DST_FORMAT(dsi_get_cmd_fmt(mipi_fmt));
733 dsi_write(msm_host, REG_DSI_CMD_CFG0, data);
734
735 data = DSI_CMD_CFG1_WR_MEM_START(MIPI_DCS_WRITE_MEMORY_START) |
736 DSI_CMD_CFG1_WR_MEM_CONTINUE(
737 MIPI_DCS_WRITE_MEMORY_CONTINUE);
738 /* Always insert DCS command */
739 data |= DSI_CMD_CFG1_INSERT_DCS_COMMAND;
740 dsi_write(msm_host, REG_DSI_CMD_CFG1, data);
741 }
742
743 dsi_write(msm_host, REG_DSI_CMD_DMA_CTRL,
744 DSI_CMD_DMA_CTRL_FROM_FRAME_BUFFER |
745 DSI_CMD_DMA_CTRL_LOW_POWER);
746
747 data = 0;
748 /* Always assume dedicated TE pin */
749 data |= DSI_TRIG_CTRL_TE;
750 data |= DSI_TRIG_CTRL_MDP_TRIGGER(TRIGGER_NONE);
751 data |= DSI_TRIG_CTRL_DMA_TRIGGER(TRIGGER_SW);
752 data |= DSI_TRIG_CTRL_STREAM(msm_host->channel);
753 if ((msm_host->cfg->major == MSM_DSI_VER_MAJOR_6G) &&
754 (msm_host->cfg->minor >= MSM_DSI_6G_VER_MINOR_V1_2))
755 data |= DSI_TRIG_CTRL_BLOCK_DMA_WITHIN_FRAME;
756 dsi_write(msm_host, REG_DSI_TRIG_CTRL, data);
757
758 data = DSI_CLKOUT_TIMING_CTRL_T_CLK_POST(clk_post) |
759 DSI_CLKOUT_TIMING_CTRL_T_CLK_PRE(clk_pre);
760 dsi_write(msm_host, REG_DSI_CLKOUT_TIMING_CTRL, data);
761
762 data = 0;
763 if (!(flags & MIPI_DSI_MODE_EOT_PACKET))
764 data |= DSI_EOT_PACKET_CTRL_TX_EOT_APPEND;
765 dsi_write(msm_host, REG_DSI_EOT_PACKET_CTRL, data);
766
767 /* allow only ack-err-status to generate interrupt */
768 dsi_write(msm_host, REG_DSI_ERR_INT_MASK0, 0x13ff3fe0);
769
770 dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_ERROR, 1);
771
772 dsi_write(msm_host, REG_DSI_CLK_CTRL, DSI_CLK_CTRL_ENABLE_CLKS);
773
774 data = DSI_CTRL_CLK_EN;
775
776 DBG("lane number=%d", msm_host->lanes);
777 if (msm_host->lanes == 2) {
778 data |= DSI_CTRL_LANE1 | DSI_CTRL_LANE2;
779 /* swap lanes for 2-lane panel for better performance */
780 dsi_write(msm_host, REG_DSI_LANE_SWAP_CTRL,
781 DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL(LANE_SWAP_1230));
782 } else {
783 /* Take 4 lanes as default */
784 data |= DSI_CTRL_LANE0 | DSI_CTRL_LANE1 | DSI_CTRL_LANE2 |
785 DSI_CTRL_LANE3;
786 /* Do not swap lanes for 4-lane panel */
787 dsi_write(msm_host, REG_DSI_LANE_SWAP_CTRL,
788 DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL(LANE_SWAP_0123));
789 }
790 data |= DSI_CTRL_ENABLE;
791
792 dsi_write(msm_host, REG_DSI_CTRL, data);
793}
794
795static void dsi_timing_setup(struct msm_dsi_host *msm_host)
796{
797 struct drm_display_mode *mode = msm_host->mode;
798 u32 hs_start = 0, vs_start = 0; /* take sync start as 0 */
799 u32 h_total = mode->htotal;
800 u32 v_total = mode->vtotal;
801 u32 hs_end = mode->hsync_end - mode->hsync_start;
802 u32 vs_end = mode->vsync_end - mode->vsync_start;
803 u32 ha_start = h_total - mode->hsync_start;
804 u32 ha_end = ha_start + mode->hdisplay;
805 u32 va_start = v_total - mode->vsync_start;
806 u32 va_end = va_start + mode->vdisplay;
807 u32 wc;
808
809 DBG("");
810
811 if (msm_host->mode_flags & MIPI_DSI_MODE_VIDEO) {
812 dsi_write(msm_host, REG_DSI_ACTIVE_H,
813 DSI_ACTIVE_H_START(ha_start) |
814 DSI_ACTIVE_H_END(ha_end));
815 dsi_write(msm_host, REG_DSI_ACTIVE_V,
816 DSI_ACTIVE_V_START(va_start) |
817 DSI_ACTIVE_V_END(va_end));
818 dsi_write(msm_host, REG_DSI_TOTAL,
819 DSI_TOTAL_H_TOTAL(h_total - 1) |
820 DSI_TOTAL_V_TOTAL(v_total - 1));
821
822 dsi_write(msm_host, REG_DSI_ACTIVE_HSYNC,
823 DSI_ACTIVE_HSYNC_START(hs_start) |
824 DSI_ACTIVE_HSYNC_END(hs_end));
825 dsi_write(msm_host, REG_DSI_ACTIVE_VSYNC_HPOS, 0);
826 dsi_write(msm_host, REG_DSI_ACTIVE_VSYNC_VPOS,
827 DSI_ACTIVE_VSYNC_VPOS_START(vs_start) |
828 DSI_ACTIVE_VSYNC_VPOS_END(vs_end));
829 } else { /* command mode */
830 /* image data and 1 byte write_memory_start cmd */
831 wc = mode->hdisplay * dsi_get_bpp(msm_host->format) / 8 + 1;
832
833 dsi_write(msm_host, REG_DSI_CMD_MDP_STREAM_CTRL,
834 DSI_CMD_MDP_STREAM_CTRL_WORD_COUNT(wc) |
835 DSI_CMD_MDP_STREAM_CTRL_VIRTUAL_CHANNEL(
836 msm_host->channel) |
837 DSI_CMD_MDP_STREAM_CTRL_DATA_TYPE(
838 MIPI_DSI_DCS_LONG_WRITE));
839
840 dsi_write(msm_host, REG_DSI_CMD_MDP_STREAM_TOTAL,
841 DSI_CMD_MDP_STREAM_TOTAL_H_TOTAL(mode->hdisplay) |
842 DSI_CMD_MDP_STREAM_TOTAL_V_TOTAL(mode->vdisplay));
843 }
844}
845
846static void dsi_sw_reset(struct msm_dsi_host *msm_host)
847{
848 dsi_write(msm_host, REG_DSI_CLK_CTRL, DSI_CLK_CTRL_ENABLE_CLKS);
849 wmb(); /* clocks need to be enabled before reset */
850
851 dsi_write(msm_host, REG_DSI_RESET, 1);
852 wmb(); /* make sure reset happen */
853 dsi_write(msm_host, REG_DSI_RESET, 0);
854}
855
856static void dsi_op_mode_config(struct msm_dsi_host *msm_host,
857 bool video_mode, bool enable)
858{
859 u32 dsi_ctrl;
860
861 dsi_ctrl = dsi_read(msm_host, REG_DSI_CTRL);
862
863 if (!enable) {
864 dsi_ctrl &= ~(DSI_CTRL_ENABLE | DSI_CTRL_VID_MODE_EN |
865 DSI_CTRL_CMD_MODE_EN);
866 dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_CMD_MDP_DONE |
867 DSI_IRQ_MASK_VIDEO_DONE, 0);
868 } else {
869 if (video_mode) {
870 dsi_ctrl |= DSI_CTRL_VID_MODE_EN;
871 } else { /* command mode */
872 dsi_ctrl |= DSI_CTRL_CMD_MODE_EN;
873 dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_CMD_MDP_DONE, 1);
874 }
875 dsi_ctrl |= DSI_CTRL_ENABLE;
876 }
877
878 dsi_write(msm_host, REG_DSI_CTRL, dsi_ctrl);
879}
880
881static void dsi_set_tx_power_mode(int mode, struct msm_dsi_host *msm_host)
882{
883 u32 data;
884
885 data = dsi_read(msm_host, REG_DSI_CMD_DMA_CTRL);
886
887 if (mode == 0)
888 data &= ~DSI_CMD_DMA_CTRL_LOW_POWER;
889 else
890 data |= DSI_CMD_DMA_CTRL_LOW_POWER;
891
892 dsi_write(msm_host, REG_DSI_CMD_DMA_CTRL, data);
893}
894
895static void dsi_wait4video_done(struct msm_dsi_host *msm_host)
896{
897 dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_VIDEO_DONE, 1);
898
899 reinit_completion(&msm_host->video_comp);
900
901 wait_for_completion_timeout(&msm_host->video_comp,
902 msecs_to_jiffies(70));
903
904 dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_VIDEO_DONE, 0);
905}
906
907static void dsi_wait4video_eng_busy(struct msm_dsi_host *msm_host)
908{
909 if (!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO))
910 return;
911
912 if (msm_host->power_on) {
913 dsi_wait4video_done(msm_host);
914 /* delay 4 ms to skip BLLP */
915 usleep_range(2000, 4000);
916 }
917}
918
919/* dsi_cmd */
920static int dsi_tx_buf_alloc(struct msm_dsi_host *msm_host, int size)
921{
922 struct drm_device *dev = msm_host->dev;
923 int ret;
924 u32 iova;
925
926 mutex_lock(&dev->struct_mutex);
927 msm_host->tx_gem_obj = msm_gem_new(dev, size, MSM_BO_UNCACHED);
928 if (IS_ERR(msm_host->tx_gem_obj)) {
929 ret = PTR_ERR(msm_host->tx_gem_obj);
930 pr_err("%s: failed to allocate gem, %d\n", __func__, ret);
931 msm_host->tx_gem_obj = NULL;
932 mutex_unlock(&dev->struct_mutex);
933 return ret;
934 }
935
936 ret = msm_gem_get_iova_locked(msm_host->tx_gem_obj, 0, &iova);
937 if (ret) {
938 pr_err("%s: failed to get iova, %d\n", __func__, ret);
939 return ret;
940 }
941 mutex_unlock(&dev->struct_mutex);
942
943 if (iova & 0x07) {
944 pr_err("%s: buf NOT 8 bytes aligned\n", __func__);
945 return -EINVAL;
946 }
947
948 return 0;
949}
950
951static void dsi_tx_buf_free(struct msm_dsi_host *msm_host)
952{
953 struct drm_device *dev = msm_host->dev;
954
955 if (msm_host->tx_gem_obj) {
956 msm_gem_put_iova(msm_host->tx_gem_obj, 0);
957 mutex_lock(&dev->struct_mutex);
958 msm_gem_free_object(msm_host->tx_gem_obj);
959 msm_host->tx_gem_obj = NULL;
960 mutex_unlock(&dev->struct_mutex);
961 }
962}
963
964/*
965 * prepare cmd buffer to be txed
966 */
967static int dsi_cmd_dma_add(struct drm_gem_object *tx_gem,
968 const struct mipi_dsi_msg *msg)
969{
970 struct mipi_dsi_packet packet;
971 int len;
972 int ret;
973 u8 *data;
974
975 ret = mipi_dsi_create_packet(&packet, msg);
976 if (ret) {
977 pr_err("%s: create packet failed, %d\n", __func__, ret);
978 return ret;
979 }
980 len = (packet.size + 3) & (~0x3);
981
982 if (len > tx_gem->size) {
983 pr_err("%s: packet size is too big\n", __func__);
984 return -EINVAL;
985 }
986
987 data = msm_gem_vaddr(tx_gem);
988
989 if (IS_ERR(data)) {
990 ret = PTR_ERR(data);
991 pr_err("%s: get vaddr failed, %d\n", __func__, ret);
992 return ret;
993 }
994
995 /* MSM specific command format in memory */
996 data[0] = packet.header[1];
997 data[1] = packet.header[2];
998 data[2] = packet.header[0];
999 data[3] = BIT(7); /* Last packet */
1000 if (mipi_dsi_packet_format_is_long(msg->type))
1001 data[3] |= BIT(6);
1002 if (msg->rx_buf && msg->rx_len)
1003 data[3] |= BIT(5);
1004
1005 /* Long packet */
1006 if (packet.payload && packet.payload_length)
1007 memcpy(data + 4, packet.payload, packet.payload_length);
1008
1009 /* Append 0xff to the end */
1010 if (packet.size < len)
1011 memset(data + packet.size, 0xff, len - packet.size);
1012
1013 return len;
1014}
1015
1016/*
1017 * dsi_short_read1_resp: 1 parameter
1018 */
1019static int dsi_short_read1_resp(u8 *buf, const struct mipi_dsi_msg *msg)
1020{
1021 u8 *data = msg->rx_buf;
1022 if (data && (msg->rx_len >= 1)) {
1023 *data = buf[1]; /* strip out dcs type */
1024 return 1;
1025 } else {
1026 pr_err("%s: read data does not match with rx_buf len %d\n",
1027 __func__, msg->rx_len);
1028 return -EINVAL;
1029 }
1030}
1031
1032/*
1033 * dsi_short_read2_resp: 2 parameter
1034 */
1035static int dsi_short_read2_resp(u8 *buf, const struct mipi_dsi_msg *msg)
1036{
1037 u8 *data = msg->rx_buf;
1038 if (data && (msg->rx_len >= 2)) {
1039 data[0] = buf[1]; /* strip out dcs type */
1040 data[1] = buf[2];
1041 return 2;
1042 } else {
1043 pr_err("%s: read data does not match with rx_buf len %d\n",
1044 __func__, msg->rx_len);
1045 return -EINVAL;
1046 }
1047}
1048
1049static int dsi_long_read_resp(u8 *buf, const struct mipi_dsi_msg *msg)
1050{
1051 /* strip out 4 byte dcs header */
1052 if (msg->rx_buf && msg->rx_len)
1053 memcpy(msg->rx_buf, buf + 4, msg->rx_len);
1054
1055 return msg->rx_len;
1056}
1057
1058
1059static int dsi_cmd_dma_tx(struct msm_dsi_host *msm_host, int len)
1060{
1061 int ret;
1062 u32 iova;
1063 bool triggered;
1064
1065 ret = msm_gem_get_iova(msm_host->tx_gem_obj, 0, &iova);
1066 if (ret) {
1067 pr_err("%s: failed to get iova: %d\n", __func__, ret);
1068 return ret;
1069 }
1070
1071 reinit_completion(&msm_host->dma_comp);
1072
1073 dsi_wait4video_eng_busy(msm_host);
1074
1075 triggered = msm_dsi_manager_cmd_xfer_trigger(
1076 msm_host->id, iova, len);
1077 if (triggered) {
1078 ret = wait_for_completion_timeout(&msm_host->dma_comp,
1079 msecs_to_jiffies(200));
1080 DBG("ret=%d", ret);
1081 if (ret == 0)
1082 ret = -ETIMEDOUT;
1083 else
1084 ret = len;
1085 } else
1086 ret = len;
1087
1088 return ret;
1089}
1090
1091static int dsi_cmd_dma_rx(struct msm_dsi_host *msm_host,
1092 u8 *buf, int rx_byte, int pkt_size)
1093{
1094 u32 *lp, *temp, data;
1095 int i, j = 0, cnt;
1096 bool ack_error = false;
1097 u32 read_cnt;
1098 u8 reg[16];
1099 int repeated_bytes = 0;
1100 int buf_offset = buf - msm_host->rx_buf;
1101
1102 lp = (u32 *)buf;
1103 temp = (u32 *)reg;
1104 cnt = (rx_byte + 3) >> 2;
1105 if (cnt > 4)
1106 cnt = 4; /* 4 x 32 bits registers only */
1107
1108 /* Calculate real read data count */
1109 read_cnt = dsi_read(msm_host, 0x1d4) >> 16;
1110
1111 ack_error = (rx_byte == 4) ?
1112 (read_cnt == 8) : /* short pkt + 4-byte error pkt */
1113 (read_cnt == (pkt_size + 6 + 4)); /* long pkt+4-byte error pkt*/
1114
1115 if (ack_error)
1116 read_cnt -= 4; /* Remove 4 byte error pkt */
1117
1118 /*
1119 * In case of multiple reads from the panel, after the first read, there
1120 * is possibility that there are some bytes in the payload repeating in
1121 * the RDBK_DATA registers. Since we read all the parameters from the
1122 * panel right from the first byte for every pass. We need to skip the
1123 * repeating bytes and then append the new parameters to the rx buffer.
1124 */
1125 if (read_cnt > 16) {
1126 int bytes_shifted;
1127 /* Any data more than 16 bytes will be shifted out.
1128 * The temp read buffer should already contain these bytes.
1129 * The remaining bytes in read buffer are the repeated bytes.
1130 */
1131 bytes_shifted = read_cnt - 16;
1132 repeated_bytes = buf_offset - bytes_shifted;
1133 }
1134
1135 for (i = cnt - 1; i >= 0; i--) {
1136 data = dsi_read(msm_host, REG_DSI_RDBK_DATA(i));
1137 *temp++ = ntohl(data); /* to host byte order */
1138 DBG("data = 0x%x and ntohl(data) = 0x%x", data, ntohl(data));
1139 }
1140
1141 for (i = repeated_bytes; i < 16; i++)
1142 buf[j++] = reg[i];
1143
1144 return j;
1145}
1146
1147static int dsi_cmds2buf_tx(struct msm_dsi_host *msm_host,
1148 const struct mipi_dsi_msg *msg)
1149{
1150 int len, ret;
1151 int bllp_len = msm_host->mode->hdisplay *
1152 dsi_get_bpp(msm_host->format) / 8;
1153
1154 len = dsi_cmd_dma_add(msm_host->tx_gem_obj, msg);
1155 if (!len) {
1156 pr_err("%s: failed to add cmd type = 0x%x\n",
1157 __func__, msg->type);
1158 return -EINVAL;
1159 }
1160
1161 /* for video mode, do not send cmds more than
1162 * one pixel line, since it only transmit it
1163 * during BLLP.
1164 */
1165 /* TODO: if the command is sent in LP mode, the bit rate is only
1166 * half of esc clk rate. In this case, if the video is already
1167 * actively streaming, we need to check more carefully if the
1168 * command can be fit into one BLLP.
1169 */
1170 if ((msm_host->mode_flags & MIPI_DSI_MODE_VIDEO) && (len > bllp_len)) {
1171 pr_err("%s: cmd cannot fit into BLLP period, len=%d\n",
1172 __func__, len);
1173 return -EINVAL;
1174 }
1175
1176 ret = dsi_cmd_dma_tx(msm_host, len);
1177 if (ret < len) {
1178 pr_err("%s: cmd dma tx failed, type=0x%x, data0=0x%x, len=%d\n",
1179 __func__, msg->type, (*(u8 *)(msg->tx_buf)), len);
1180 return -ECOMM;
1181 }
1182
1183 return len;
1184}
1185
1186static void dsi_sw_reset_restore(struct msm_dsi_host *msm_host)
1187{
1188 u32 data0, data1;
1189
1190 data0 = dsi_read(msm_host, REG_DSI_CTRL);
1191 data1 = data0;
1192 data1 &= ~DSI_CTRL_ENABLE;
1193 dsi_write(msm_host, REG_DSI_CTRL, data1);
1194 /*
1195 * dsi controller need to be disabled before
1196 * clocks turned on
1197 */
1198 wmb();
1199
1200 dsi_write(msm_host, REG_DSI_CLK_CTRL, DSI_CLK_CTRL_ENABLE_CLKS);
1201 wmb(); /* make sure clocks enabled */
1202
1203 /* dsi controller can only be reset while clocks are running */
1204 dsi_write(msm_host, REG_DSI_RESET, 1);
1205 wmb(); /* make sure reset happen */
1206 dsi_write(msm_host, REG_DSI_RESET, 0);
1207 wmb(); /* controller out of reset */
1208 dsi_write(msm_host, REG_DSI_CTRL, data0);
1209 wmb(); /* make sure dsi controller enabled again */
1210}
1211
1212static void dsi_err_worker(struct work_struct *work)
1213{
1214 struct msm_dsi_host *msm_host =
1215 container_of(work, struct msm_dsi_host, err_work);
1216 u32 status = msm_host->err_work_state;
1217
1218 pr_err("%s: status=%x\n", __func__, status);
1219 if (status & DSI_ERR_STATE_MDP_FIFO_UNDERFLOW)
1220 dsi_sw_reset_restore(msm_host);
1221
1222 /* It is safe to clear here because error irq is disabled. */
1223 msm_host->err_work_state = 0;
1224
1225 /* enable dsi error interrupt */
1226 dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_ERROR, 1);
1227}
1228
1229static void dsi_ack_err_status(struct msm_dsi_host *msm_host)
1230{
1231 u32 status;
1232
1233 status = dsi_read(msm_host, REG_DSI_ACK_ERR_STATUS);
1234
1235 if (status) {
1236 dsi_write(msm_host, REG_DSI_ACK_ERR_STATUS, status);
1237 /* Writing of an extra 0 needed to clear error bits */
1238 dsi_write(msm_host, REG_DSI_ACK_ERR_STATUS, 0);
1239 msm_host->err_work_state |= DSI_ERR_STATE_ACK;
1240 }
1241}
1242
1243static void dsi_timeout_status(struct msm_dsi_host *msm_host)
1244{
1245 u32 status;
1246
1247 status = dsi_read(msm_host, REG_DSI_TIMEOUT_STATUS);
1248
1249 if (status) {
1250 dsi_write(msm_host, REG_DSI_TIMEOUT_STATUS, status);
1251 msm_host->err_work_state |= DSI_ERR_STATE_TIMEOUT;
1252 }
1253}
1254
1255static void dsi_dln0_phy_err(struct msm_dsi_host *msm_host)
1256{
1257 u32 status;
1258
1259 status = dsi_read(msm_host, REG_DSI_DLN0_PHY_ERR);
1260
1261 if (status) {
1262 dsi_write(msm_host, REG_DSI_DLN0_PHY_ERR, status);
1263 msm_host->err_work_state |= DSI_ERR_STATE_DLN0_PHY;
1264 }
1265}
1266
1267static void dsi_fifo_status(struct msm_dsi_host *msm_host)
1268{
1269 u32 status;
1270
1271 status = dsi_read(msm_host, REG_DSI_FIFO_STATUS);
1272
1273 /* fifo underflow, overflow */
1274 if (status) {
1275 dsi_write(msm_host, REG_DSI_FIFO_STATUS, status);
1276 msm_host->err_work_state |= DSI_ERR_STATE_FIFO;
1277 if (status & DSI_FIFO_STATUS_CMD_MDP_FIFO_UNDERFLOW)
1278 msm_host->err_work_state |=
1279 DSI_ERR_STATE_MDP_FIFO_UNDERFLOW;
1280 }
1281}
1282
1283static void dsi_status(struct msm_dsi_host *msm_host)
1284{
1285 u32 status;
1286
1287 status = dsi_read(msm_host, REG_DSI_STATUS0);
1288
1289 if (status & DSI_STATUS0_INTERLEAVE_OP_CONTENTION) {
1290 dsi_write(msm_host, REG_DSI_STATUS0, status);
1291 msm_host->err_work_state |=
1292 DSI_ERR_STATE_INTERLEAVE_OP_CONTENTION;
1293 }
1294}
1295
1296static void dsi_clk_status(struct msm_dsi_host *msm_host)
1297{
1298 u32 status;
1299
1300 status = dsi_read(msm_host, REG_DSI_CLK_STATUS);
1301
1302 if (status & DSI_CLK_STATUS_PLL_UNLOCKED) {
1303 dsi_write(msm_host, REG_DSI_CLK_STATUS, status);
1304 msm_host->err_work_state |= DSI_ERR_STATE_PLL_UNLOCKED;
1305 }
1306}
1307
1308static void dsi_error(struct msm_dsi_host *msm_host)
1309{
1310 /* disable dsi error interrupt */
1311 dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_ERROR, 0);
1312
1313 dsi_clk_status(msm_host);
1314 dsi_fifo_status(msm_host);
1315 dsi_ack_err_status(msm_host);
1316 dsi_timeout_status(msm_host);
1317 dsi_status(msm_host);
1318 dsi_dln0_phy_err(msm_host);
1319
1320 queue_work(msm_host->workqueue, &msm_host->err_work);
1321}
1322
1323static irqreturn_t dsi_host_irq(int irq, void *ptr)
1324{
1325 struct msm_dsi_host *msm_host = ptr;
1326 u32 isr;
1327 unsigned long flags;
1328
1329 if (!msm_host->ctrl_base)
1330 return IRQ_HANDLED;
1331
1332 spin_lock_irqsave(&msm_host->intr_lock, flags);
1333 isr = dsi_read(msm_host, REG_DSI_INTR_CTRL);
1334 dsi_write(msm_host, REG_DSI_INTR_CTRL, isr);
1335 spin_unlock_irqrestore(&msm_host->intr_lock, flags);
1336
1337 DBG("isr=0x%x, id=%d", isr, msm_host->id);
1338
1339 if (isr & DSI_IRQ_ERROR)
1340 dsi_error(msm_host);
1341
1342 if (isr & DSI_IRQ_VIDEO_DONE)
1343 complete(&msm_host->video_comp);
1344
1345 if (isr & DSI_IRQ_CMD_DMA_DONE)
1346 complete(&msm_host->dma_comp);
1347
1348 return IRQ_HANDLED;
1349}
1350
1351static int dsi_host_init_panel_gpios(struct msm_dsi_host *msm_host,
1352 struct device *panel_device)
1353{
1354 int ret;
1355
1356 msm_host->disp_en_gpio = devm_gpiod_get(panel_device,
1357 "disp-enable");
1358 if (IS_ERR(msm_host->disp_en_gpio)) {
1359 DBG("cannot get disp-enable-gpios %ld",
1360 PTR_ERR(msm_host->disp_en_gpio));
1361 msm_host->disp_en_gpio = NULL;
1362 }
1363 if (msm_host->disp_en_gpio) {
1364 ret = gpiod_direction_output(msm_host->disp_en_gpio, 0);
1365 if (ret) {
1366 pr_err("cannot set dir to disp-en-gpios %d\n", ret);
1367 return ret;
1368 }
1369 }
1370
1371 msm_host->te_gpio = devm_gpiod_get(panel_device, "disp-te");
1372 if (IS_ERR(msm_host->te_gpio)) {
1373 DBG("cannot get disp-te-gpios %ld", PTR_ERR(msm_host->te_gpio));
1374 msm_host->te_gpio = NULL;
1375 }
1376
1377 if (msm_host->te_gpio) {
1378 ret = gpiod_direction_input(msm_host->te_gpio);
1379 if (ret) {
1380 pr_err("%s: cannot set dir to disp-te-gpios, %d\n",
1381 __func__, ret);
1382 return ret;
1383 }
1384 }
1385
1386 return 0;
1387}
1388
1389static int dsi_host_attach(struct mipi_dsi_host *host,
1390 struct mipi_dsi_device *dsi)
1391{
1392 struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1393 int ret;
1394
1395 msm_host->channel = dsi->channel;
1396 msm_host->lanes = dsi->lanes;
1397 msm_host->format = dsi->format;
1398 msm_host->mode_flags = dsi->mode_flags;
1399
1400 msm_host->panel_node = dsi->dev.of_node;
1401
1402 /* Some gpios defined in panel DT need to be controlled by host */
1403 ret = dsi_host_init_panel_gpios(msm_host, &dsi->dev);
1404 if (ret)
1405 return ret;
1406
1407 DBG("id=%d", msm_host->id);
1408 if (msm_host->dev)
1409 drm_helper_hpd_irq_event(msm_host->dev);
1410
1411 return 0;
1412}
1413
1414static int dsi_host_detach(struct mipi_dsi_host *host,
1415 struct mipi_dsi_device *dsi)
1416{
1417 struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1418
1419 msm_host->panel_node = NULL;
1420
1421 DBG("id=%d", msm_host->id);
1422 if (msm_host->dev)
1423 drm_helper_hpd_irq_event(msm_host->dev);
1424
1425 return 0;
1426}
1427
1428static ssize_t dsi_host_transfer(struct mipi_dsi_host *host,
1429 const struct mipi_dsi_msg *msg)
1430{
1431 struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1432 int ret;
1433
1434 if (!msg || !msm_host->power_on)
1435 return -EINVAL;
1436
1437 mutex_lock(&msm_host->cmd_mutex);
1438 ret = msm_dsi_manager_cmd_xfer(msm_host->id, msg);
1439 mutex_unlock(&msm_host->cmd_mutex);
1440
1441 return ret;
1442}
1443
1444static struct mipi_dsi_host_ops dsi_host_ops = {
1445 .attach = dsi_host_attach,
1446 .detach = dsi_host_detach,
1447 .transfer = dsi_host_transfer,
1448};
1449
1450int msm_dsi_host_init(struct msm_dsi *msm_dsi)
1451{
1452 struct msm_dsi_host *msm_host = NULL;
1453 struct platform_device *pdev = msm_dsi->pdev;
1454 int ret;
1455
1456 msm_host = devm_kzalloc(&pdev->dev, sizeof(*msm_host), GFP_KERNEL);
1457 if (!msm_host) {
1458 pr_err("%s: FAILED: cannot alloc dsi host\n",
1459 __func__);
1460 ret = -ENOMEM;
1461 goto fail;
1462 }
1463
1464 ret = of_property_read_u32(pdev->dev.of_node,
1465 "qcom,dsi-host-index", &msm_host->id);
1466 if (ret) {
1467 dev_err(&pdev->dev,
1468 "%s: host index not specified, ret=%d\n",
1469 __func__, ret);
1470 goto fail;
1471 }
1472 msm_host->pdev = pdev;
1473
1474 ret = dsi_clk_init(msm_host);
1475 if (ret) {
1476 pr_err("%s: unable to initialize dsi clks\n", __func__);
1477 goto fail;
1478 }
1479
1480 msm_host->ctrl_base = msm_ioremap(pdev, "dsi_ctrl", "DSI CTRL");
1481 if (IS_ERR(msm_host->ctrl_base)) {
1482 pr_err("%s: unable to map Dsi ctrl base\n", __func__);
1483 ret = PTR_ERR(msm_host->ctrl_base);
1484 goto fail;
1485 }
1486
1487 msm_host->cfg = dsi_get_config(msm_host);
1488 if (!msm_host->cfg) {
1489 ret = -EINVAL;
1490 pr_err("%s: get config failed\n", __func__);
1491 goto fail;
1492 }
1493
1494 ret = dsi_regulator_init(msm_host);
1495 if (ret) {
1496 pr_err("%s: regulator init failed\n", __func__);
1497 goto fail;
1498 }
1499
1500 msm_host->rx_buf = devm_kzalloc(&pdev->dev, SZ_4K, GFP_KERNEL);
1501 if (!msm_host->rx_buf) {
1502 pr_err("%s: alloc rx temp buf failed\n", __func__);
1503 goto fail;
1504 }
1505
1506 init_completion(&msm_host->dma_comp);
1507 init_completion(&msm_host->video_comp);
1508 mutex_init(&msm_host->dev_mutex);
1509 mutex_init(&msm_host->cmd_mutex);
1510 mutex_init(&msm_host->clk_mutex);
1511 spin_lock_init(&msm_host->intr_lock);
1512
1513 /* setup workqueue */
1514 msm_host->workqueue = alloc_ordered_workqueue("dsi_drm_work", 0);
1515 INIT_WORK(&msm_host->err_work, dsi_err_worker);
1516
1517 msm_dsi->phy = msm_dsi_phy_init(pdev, msm_host->cfg->phy_type,
1518 msm_host->id);
1519 if (!msm_dsi->phy) {
1520 ret = -EINVAL;
1521 pr_err("%s: phy init failed\n", __func__);
1522 goto fail;
1523 }
1524 msm_dsi->host = &msm_host->base;
1525 msm_dsi->id = msm_host->id;
1526
1527 DBG("Dsi Host %d initialized", msm_host->id);
1528 return 0;
1529
1530fail:
1531 return ret;
1532}
1533
1534void msm_dsi_host_destroy(struct mipi_dsi_host *host)
1535{
1536 struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1537
1538 DBG("");
1539 dsi_tx_buf_free(msm_host);
1540 if (msm_host->workqueue) {
1541 flush_workqueue(msm_host->workqueue);
1542 destroy_workqueue(msm_host->workqueue);
1543 msm_host->workqueue = NULL;
1544 }
1545
1546 mutex_destroy(&msm_host->clk_mutex);
1547 mutex_destroy(&msm_host->cmd_mutex);
1548 mutex_destroy(&msm_host->dev_mutex);
1549}
1550
1551int msm_dsi_host_modeset_init(struct mipi_dsi_host *host,
1552 struct drm_device *dev)
1553{
1554 struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1555 struct platform_device *pdev = msm_host->pdev;
1556 int ret;
1557
1558 msm_host->irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
1559 if (msm_host->irq < 0) {
1560 ret = msm_host->irq;
1561 dev_err(dev->dev, "failed to get irq: %d\n", ret);
1562 return ret;
1563 }
1564
1565 ret = devm_request_irq(&pdev->dev, msm_host->irq,
1566 dsi_host_irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
1567 "dsi_isr", msm_host);
1568 if (ret < 0) {
1569 dev_err(&pdev->dev, "failed to request IRQ%u: %d\n",
1570 msm_host->irq, ret);
1571 return ret;
1572 }
1573
1574 msm_host->dev = dev;
1575 ret = dsi_tx_buf_alloc(msm_host, SZ_4K);
1576 if (ret) {
1577 pr_err("%s: alloc tx gem obj failed, %d\n", __func__, ret);
1578 return ret;
1579 }
1580
1581 return 0;
1582}
1583
1584int msm_dsi_host_register(struct mipi_dsi_host *host, bool check_defer)
1585{
1586 struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1587 struct device_node *node;
1588 int ret;
1589
1590 /* Register mipi dsi host */
1591 if (!msm_host->registered) {
1592 host->dev = &msm_host->pdev->dev;
1593 host->ops = &dsi_host_ops;
1594 ret = mipi_dsi_host_register(host);
1595 if (ret)
1596 return ret;
1597
1598 msm_host->registered = true;
1599
1600 /* If the panel driver has not been probed after host register,
1601 * we should defer the host's probe.
1602 * It makes sure panel is connected when fbcon detects
1603 * connector status and gets the proper display mode to
1604 * create framebuffer.
1605 */
1606 if (check_defer) {
1607 node = of_get_child_by_name(msm_host->pdev->dev.of_node,
1608 "panel");
1609 if (node) {
1610 if (!of_drm_find_panel(node))
1611 return -EPROBE_DEFER;
1612 }
1613 }
1614 }
1615
1616 return 0;
1617}
1618
1619void msm_dsi_host_unregister(struct mipi_dsi_host *host)
1620{
1621 struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1622
1623 if (msm_host->registered) {
1624 mipi_dsi_host_unregister(host);
1625 host->dev = NULL;
1626 host->ops = NULL;
1627 msm_host->registered = false;
1628 }
1629}
1630
1631int msm_dsi_host_xfer_prepare(struct mipi_dsi_host *host,
1632 const struct mipi_dsi_msg *msg)
1633{
1634 struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1635
1636 /* TODO: make sure dsi_cmd_mdp is idle.
1637 * Since DSI6G v1.2.0, we can set DSI_TRIG_CTRL.BLOCK_DMA_WITHIN_FRAME
1638 * to ask H/W to wait until cmd mdp is idle. S/W wait is not needed.
1639 * How to handle the old versions? Wait for mdp cmd done?
1640 */
1641
1642 /*
1643 * mdss interrupt is generated in mdp core clock domain
1644 * mdp clock need to be enabled to receive dsi interrupt
1645 */
1646 dsi_clk_ctrl(msm_host, 1);
1647
1648 /* TODO: vote for bus bandwidth */
1649
1650 if (!(msg->flags & MIPI_DSI_MSG_USE_LPM))
1651 dsi_set_tx_power_mode(0, msm_host);
1652
1653 msm_host->dma_cmd_ctrl_restore = dsi_read(msm_host, REG_DSI_CTRL);
1654 dsi_write(msm_host, REG_DSI_CTRL,
1655 msm_host->dma_cmd_ctrl_restore |
1656 DSI_CTRL_CMD_MODE_EN |
1657 DSI_CTRL_ENABLE);
1658 dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_CMD_DMA_DONE, 1);
1659
1660 return 0;
1661}
1662
1663void msm_dsi_host_xfer_restore(struct mipi_dsi_host *host,
1664 const struct mipi_dsi_msg *msg)
1665{
1666 struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1667
1668 dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_CMD_DMA_DONE, 0);
1669 dsi_write(msm_host, REG_DSI_CTRL, msm_host->dma_cmd_ctrl_restore);
1670
1671 if (!(msg->flags & MIPI_DSI_MSG_USE_LPM))
1672 dsi_set_tx_power_mode(1, msm_host);
1673
1674 /* TODO: unvote for bus bandwidth */
1675
1676 dsi_clk_ctrl(msm_host, 0);
1677}
1678
1679int msm_dsi_host_cmd_tx(struct mipi_dsi_host *host,
1680 const struct mipi_dsi_msg *msg)
1681{
1682 struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1683
1684 return dsi_cmds2buf_tx(msm_host, msg);
1685}
1686
1687int msm_dsi_host_cmd_rx(struct mipi_dsi_host *host,
1688 const struct mipi_dsi_msg *msg)
1689{
1690 struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1691 int data_byte, rx_byte, dlen, end;
1692 int short_response, diff, pkt_size, ret = 0;
1693 char cmd;
1694 int rlen = msg->rx_len;
1695 u8 *buf;
1696
1697 if (rlen <= 2) {
1698 short_response = 1;
1699 pkt_size = rlen;
1700 rx_byte = 4;
1701 } else {
1702 short_response = 0;
1703 data_byte = 10; /* first read */
1704 if (rlen < data_byte)
1705 pkt_size = rlen;
1706 else
1707 pkt_size = data_byte;
1708 rx_byte = data_byte + 6; /* 4 header + 2 crc */
1709 }
1710
1711 buf = msm_host->rx_buf;
1712 end = 0;
1713 while (!end) {
1714 u8 tx[2] = {pkt_size & 0xff, pkt_size >> 8};
1715 struct mipi_dsi_msg max_pkt_size_msg = {
1716 .channel = msg->channel,
1717 .type = MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE,
1718 .tx_len = 2,
1719 .tx_buf = tx,
1720 };
1721
1722 DBG("rlen=%d pkt_size=%d rx_byte=%d",
1723 rlen, pkt_size, rx_byte);
1724
1725 ret = dsi_cmds2buf_tx(msm_host, &max_pkt_size_msg);
1726 if (ret < 2) {
1727 pr_err("%s: Set max pkt size failed, %d\n",
1728 __func__, ret);
1729 return -EINVAL;
1730 }
1731
1732 if ((msm_host->cfg->major == MSM_DSI_VER_MAJOR_6G) &&
1733 (msm_host->cfg->minor >= MSM_DSI_6G_VER_MINOR_V1_1)) {
1734 /* Clear the RDBK_DATA registers */
1735 dsi_write(msm_host, REG_DSI_RDBK_DATA_CTRL,
1736 DSI_RDBK_DATA_CTRL_CLR);
1737 wmb(); /* make sure the RDBK registers are cleared */
1738 dsi_write(msm_host, REG_DSI_RDBK_DATA_CTRL, 0);
1739 wmb(); /* release cleared status before transfer */
1740 }
1741
1742 ret = dsi_cmds2buf_tx(msm_host, msg);
1743 if (ret < msg->tx_len) {
1744 pr_err("%s: Read cmd Tx failed, %d\n", __func__, ret);
1745 return ret;
1746 }
1747
1748 /*
1749 * once cmd_dma_done interrupt received,
1750 * return data from client is ready and stored
1751 * at RDBK_DATA register already
1752 * since rx fifo is 16 bytes, dcs header is kept at first loop,
1753 * after that dcs header lost during shift into registers
1754 */
1755 dlen = dsi_cmd_dma_rx(msm_host, buf, rx_byte, pkt_size);
1756
1757 if (dlen <= 0)
1758 return 0;
1759
1760 if (short_response)
1761 break;
1762
1763 if (rlen <= data_byte) {
1764 diff = data_byte - rlen;
1765 end = 1;
1766 } else {
1767 diff = 0;
1768 rlen -= data_byte;
1769 }
1770
1771 if (!end) {
1772 dlen -= 2; /* 2 crc */
1773 dlen -= diff;
1774 buf += dlen; /* next start position */
1775 data_byte = 14; /* NOT first read */
1776 if (rlen < data_byte)
1777 pkt_size += rlen;
1778 else
1779 pkt_size += data_byte;
1780 DBG("buf=%p dlen=%d diff=%d", buf, dlen, diff);
1781 }
1782 }
1783
1784 /*
1785 * For single Long read, if the requested rlen < 10,
1786 * we need to shift the start position of rx
1787 * data buffer to skip the bytes which are not
1788 * updated.
1789 */
1790 if (pkt_size < 10 && !short_response)
1791 buf = msm_host->rx_buf + (10 - rlen);
1792 else
1793 buf = msm_host->rx_buf;
1794
1795 cmd = buf[0];
1796 switch (cmd) {
1797 case MIPI_DSI_RX_ACKNOWLEDGE_AND_ERROR_REPORT:
1798 pr_err("%s: rx ACK_ERR_PACLAGE\n", __func__);
1799 ret = 0;
1800 case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_1BYTE:
1801 case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_1BYTE:
1802 ret = dsi_short_read1_resp(buf, msg);
1803 break;
1804 case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_2BYTE:
1805 case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_2BYTE:
1806 ret = dsi_short_read2_resp(buf, msg);
1807 break;
1808 case MIPI_DSI_RX_GENERIC_LONG_READ_RESPONSE:
1809 case MIPI_DSI_RX_DCS_LONG_READ_RESPONSE:
1810 ret = dsi_long_read_resp(buf, msg);
1811 break;
1812 default:
1813 pr_warn("%s:Invalid response cmd\n", __func__);
1814 ret = 0;
1815 }
1816
1817 return ret;
1818}
1819
1820void msm_dsi_host_cmd_xfer_commit(struct mipi_dsi_host *host, u32 iova, u32 len)
1821{
1822 struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1823
1824 dsi_write(msm_host, REG_DSI_DMA_BASE, iova);
1825 dsi_write(msm_host, REG_DSI_DMA_LEN, len);
1826 dsi_write(msm_host, REG_DSI_TRIG_DMA, 1);
1827
1828 /* Make sure trigger happens */
1829 wmb();
1830}
1831
1832int msm_dsi_host_enable(struct mipi_dsi_host *host)
1833{
1834 struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1835
1836 dsi_op_mode_config(msm_host,
1837 !!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO), true);
1838
1839 /* TODO: clock should be turned off for command mode,
1840 * and only turned on before MDP START.
1841 * This part of code should be enabled once mdp driver support it.
1842 */
1843 /* if (msm_panel->mode == MSM_DSI_CMD_MODE)
1844 dsi_clk_ctrl(msm_host, 0); */
1845
1846 return 0;
1847}
1848
1849int msm_dsi_host_disable(struct mipi_dsi_host *host)
1850{
1851 struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1852
1853 dsi_op_mode_config(msm_host,
1854 !!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO), false);
1855
1856 /* Since we have disabled INTF, the video engine won't stop so that
1857 * the cmd engine will be blocked.
1858 * Reset to disable video engine so that we can send off cmd.
1859 */
1860 dsi_sw_reset(msm_host);
1861
1862 return 0;
1863}
1864
1865int msm_dsi_host_power_on(struct mipi_dsi_host *host)
1866{
1867 struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1868 u32 clk_pre = 0, clk_post = 0;
1869 int ret = 0;
1870
1871 mutex_lock(&msm_host->dev_mutex);
1872 if (msm_host->power_on) {
1873 DBG("dsi host already on");
1874 goto unlock_ret;
1875 }
1876
1877 ret = dsi_calc_clk_rate(msm_host);
1878 if (ret) {
1879 pr_err("%s: unable to calc clk rate, %d\n", __func__, ret);
1880 goto unlock_ret;
1881 }
1882
1883 ret = dsi_host_regulator_enable(msm_host);
1884 if (ret) {
1885 pr_err("%s:Failed to enable vregs.ret=%d\n",
1886 __func__, ret);
1887 goto unlock_ret;
1888 }
1889
1890 ret = dsi_bus_clk_enable(msm_host);
1891 if (ret) {
1892 pr_err("%s: failed to enable bus clocks, %d\n", __func__, ret);
1893 goto fail_disable_reg;
1894 }
1895
1896 dsi_phy_sw_reset(msm_host);
1897 ret = msm_dsi_manager_phy_enable(msm_host->id,
1898 msm_host->byte_clk_rate * 8,
1899 clk_get_rate(msm_host->esc_clk),
1900 &clk_pre, &clk_post);
1901 dsi_bus_clk_disable(msm_host);
1902 if (ret) {
1903 pr_err("%s: failed to enable phy, %d\n", __func__, ret);
1904 goto fail_disable_reg;
1905 }
1906
1907 ret = dsi_clk_ctrl(msm_host, 1);
1908 if (ret) {
1909 pr_err("%s: failed to enable clocks. ret=%d\n", __func__, ret);
1910 goto fail_disable_reg;
1911 }
1912
1913 dsi_timing_setup(msm_host);
1914 dsi_sw_reset(msm_host);
1915 dsi_ctrl_config(msm_host, true, clk_pre, clk_post);
1916
1917 if (msm_host->disp_en_gpio)
1918 gpiod_set_value(msm_host->disp_en_gpio, 1);
1919
1920 msm_host->power_on = true;
1921 mutex_unlock(&msm_host->dev_mutex);
1922
1923 return 0;
1924
1925fail_disable_reg:
1926 dsi_host_regulator_disable(msm_host);
1927unlock_ret:
1928 mutex_unlock(&msm_host->dev_mutex);
1929 return ret;
1930}
1931
1932int msm_dsi_host_power_off(struct mipi_dsi_host *host)
1933{
1934 struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1935
1936 mutex_lock(&msm_host->dev_mutex);
1937 if (!msm_host->power_on) {
1938 DBG("dsi host already off");
1939 goto unlock_ret;
1940 }
1941
1942 dsi_ctrl_config(msm_host, false, 0, 0);
1943
1944 if (msm_host->disp_en_gpio)
1945 gpiod_set_value(msm_host->disp_en_gpio, 0);
1946
1947 msm_dsi_manager_phy_disable(msm_host->id);
1948
1949 dsi_clk_ctrl(msm_host, 0);
1950
1951 dsi_host_regulator_disable(msm_host);
1952
1953 DBG("-");
1954
1955 msm_host->power_on = false;
1956
1957unlock_ret:
1958 mutex_unlock(&msm_host->dev_mutex);
1959 return 0;
1960}
1961
1962int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host,
1963 struct drm_display_mode *mode)
1964{
1965 struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1966
1967 if (msm_host->mode) {
1968 drm_mode_destroy(msm_host->dev, msm_host->mode);
1969 msm_host->mode = NULL;
1970 }
1971
1972 msm_host->mode = drm_mode_duplicate(msm_host->dev, mode);
1973 if (IS_ERR(msm_host->mode)) {
1974 pr_err("%s: cannot duplicate mode\n", __func__);
1975 return PTR_ERR(msm_host->mode);
1976 }
1977
1978 return 0;
1979}
1980
1981struct drm_panel *msm_dsi_host_get_panel(struct mipi_dsi_host *host,
1982 unsigned long *panel_flags)
1983{
1984 struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1985 struct drm_panel *panel;
1986
1987 panel = of_drm_find_panel(msm_host->panel_node);
1988 if (panel_flags)
1989 *panel_flags = msm_host->mode_flags;
1990
1991 return panel;
1992}
1993
diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c
new file mode 100644
index 000000000000..ee3ebcaa33f5
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c
@@ -0,0 +1,705 @@
1/*
2 * Copyright (c) 2015, The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include "msm_kms.h"
15#include "dsi.h"
16
17struct msm_dsi_manager {
18 struct msm_dsi *dsi[DSI_MAX];
19
20 bool is_dual_panel;
21 bool is_sync_needed;
22 int master_panel_id;
23};
24
25static struct msm_dsi_manager msm_dsim_glb;
26
27#define IS_DUAL_PANEL() (msm_dsim_glb.is_dual_panel)
28#define IS_SYNC_NEEDED() (msm_dsim_glb.is_sync_needed)
29#define IS_MASTER_PANEL(id) (msm_dsim_glb.master_panel_id == id)
30
31static inline struct msm_dsi *dsi_mgr_get_dsi(int id)
32{
33 return msm_dsim_glb.dsi[id];
34}
35
36static inline struct msm_dsi *dsi_mgr_get_other_dsi(int id)
37{
38 return msm_dsim_glb.dsi[(id + 1) % DSI_MAX];
39}
40
41static int dsi_mgr_parse_dual_panel(struct device_node *np, int id)
42{
43 struct msm_dsi_manager *msm_dsim = &msm_dsim_glb;
44
45 /* We assume 2 dsi nodes have the same information of dual-panel and
46 * sync-mode, and only one node specifies master in case of dual mode.
47 */
48 if (!msm_dsim->is_dual_panel)
49 msm_dsim->is_dual_panel = of_property_read_bool(
50 np, "qcom,dual-panel-mode");
51
52 if (msm_dsim->is_dual_panel) {
53 if (of_property_read_bool(np, "qcom,master-panel"))
54 msm_dsim->master_panel_id = id;
55 if (!msm_dsim->is_sync_needed)
56 msm_dsim->is_sync_needed = of_property_read_bool(
57 np, "qcom,sync-dual-panel");
58 }
59
60 return 0;
61}
62
63struct dsi_connector {
64 struct drm_connector base;
65 int id;
66};
67
68struct dsi_bridge {
69 struct drm_bridge base;
70 int id;
71};
72
73#define to_dsi_connector(x) container_of(x, struct dsi_connector, base)
74#define to_dsi_bridge(x) container_of(x, struct dsi_bridge, base)
75
76static inline int dsi_mgr_connector_get_id(struct drm_connector *connector)
77{
78 struct dsi_connector *dsi_connector = to_dsi_connector(connector);
79 return dsi_connector->id;
80}
81
82static int dsi_mgr_bridge_get_id(struct drm_bridge *bridge)
83{
84 struct dsi_bridge *dsi_bridge = to_dsi_bridge(bridge);
85 return dsi_bridge->id;
86}
87
88static enum drm_connector_status dsi_mgr_connector_detect(
89 struct drm_connector *connector, bool force)
90{
91 int id = dsi_mgr_connector_get_id(connector);
92 struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
93 struct msm_dsi *other_dsi = dsi_mgr_get_other_dsi(id);
94 struct msm_drm_private *priv = connector->dev->dev_private;
95 struct msm_kms *kms = priv->kms;
96
97 DBG("id=%d", id);
98 if (!msm_dsi->panel) {
99 msm_dsi->panel = msm_dsi_host_get_panel(msm_dsi->host,
100 &msm_dsi->panel_flags);
101
102 /* There is only 1 panel in the global panel list
103 * for dual panel mode. Therefore slave dsi should get
104 * the drm_panel instance from master dsi, and
105 * keep using the panel flags got from the current DSI link.
106 */
107 if (!msm_dsi->panel && IS_DUAL_PANEL() &&
108 !IS_MASTER_PANEL(id) && other_dsi)
109 msm_dsi->panel = msm_dsi_host_get_panel(
110 other_dsi->host, NULL);
111
112 if (msm_dsi->panel && IS_DUAL_PANEL())
113 drm_object_attach_property(&connector->base,
114 connector->dev->mode_config.tile_property, 0);
115
116 /* Set split display info to kms once dual panel is connected
117 * to both hosts
118 */
119 if (msm_dsi->panel && IS_DUAL_PANEL() &&
120 other_dsi && other_dsi->panel) {
121 bool cmd_mode = !(msm_dsi->panel_flags &
122 MIPI_DSI_MODE_VIDEO);
123 struct drm_encoder *encoder = msm_dsi_get_encoder(
124 dsi_mgr_get_dsi(DSI_ENCODER_MASTER));
125 struct drm_encoder *slave_enc = msm_dsi_get_encoder(
126 dsi_mgr_get_dsi(DSI_ENCODER_SLAVE));
127
128 if (kms->funcs->set_split_display)
129 kms->funcs->set_split_display(kms, encoder,
130 slave_enc, cmd_mode);
131 else
132 pr_err("mdp does not support dual panel\n");
133 }
134 }
135
136 return msm_dsi->panel ? connector_status_connected :
137 connector_status_disconnected;
138}
139
140static void dsi_mgr_connector_destroy(struct drm_connector *connector)
141{
142 DBG("");
143 drm_connector_unregister(connector);
144 drm_connector_cleanup(connector);
145}
146
147static void dsi_dual_connector_fix_modes(struct drm_connector *connector)
148{
149 struct drm_display_mode *mode, *m;
150
151 /* Only support left-right mode */
152 list_for_each_entry_safe(mode, m, &connector->probed_modes, head) {
153 mode->clock >>= 1;
154 mode->hdisplay >>= 1;
155 mode->hsync_start >>= 1;
156 mode->hsync_end >>= 1;
157 mode->htotal >>= 1;
158 drm_mode_set_name(mode);
159 }
160}
161
162static int dsi_dual_connector_tile_init(
163 struct drm_connector *connector, int id)
164{
165 struct drm_display_mode *mode;
166 /* Fake topology id */
167 char topo_id[8] = {'M', 'S', 'M', 'D', 'U', 'D', 'S', 'I'};
168
169 if (connector->tile_group) {
170 DBG("Tile property has been initialized");
171 return 0;
172 }
173
174 /* Use the first mode only for now */
175 mode = list_first_entry(&connector->probed_modes,
176 struct drm_display_mode,
177 head);
178 if (!mode)
179 return -EINVAL;
180
181 connector->tile_group = drm_mode_get_tile_group(
182 connector->dev, topo_id);
183 if (!connector->tile_group)
184 connector->tile_group = drm_mode_create_tile_group(
185 connector->dev, topo_id);
186 if (!connector->tile_group) {
187 pr_err("%s: failed to create tile group\n", __func__);
188 return -ENOMEM;
189 }
190
191 connector->has_tile = true;
192 connector->tile_is_single_monitor = true;
193
194 /* mode has been fixed */
195 connector->tile_h_size = mode->hdisplay;
196 connector->tile_v_size = mode->vdisplay;
197
198 /* Only support left-right mode */
199 connector->num_h_tile = 2;
200 connector->num_v_tile = 1;
201
202 connector->tile_v_loc = 0;
203 connector->tile_h_loc = (id == DSI_RIGHT) ? 1 : 0;
204
205 return 0;
206}
207
208static int dsi_mgr_connector_get_modes(struct drm_connector *connector)
209{
210 int id = dsi_mgr_connector_get_id(connector);
211 struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
212 struct drm_panel *panel = msm_dsi->panel;
213 int ret, num;
214
215 if (!panel)
216 return 0;
217
218 /* Since we have 2 connectors, but only 1 drm_panel in dual DSI mode,
219 * panel should not attach to any connector.
220 * Only temporarily attach panel to the current connector here,
221 * to let panel set mode to this connector.
222 */
223 drm_panel_attach(panel, connector);
224 num = drm_panel_get_modes(panel);
225 drm_panel_detach(panel);
226 if (!num)
227 return 0;
228
229 if (IS_DUAL_PANEL()) {
230 /* report half resolution to user */
231 dsi_dual_connector_fix_modes(connector);
232 ret = dsi_dual_connector_tile_init(connector, id);
233 if (ret)
234 return ret;
235 ret = drm_mode_connector_set_tile_property(connector);
236 if (ret) {
237 pr_err("%s: set tile property failed, %d\n",
238 __func__, ret);
239 return ret;
240 }
241 }
242
243 return num;
244}
245
246static int dsi_mgr_connector_mode_valid(struct drm_connector *connector,
247 struct drm_display_mode *mode)
248{
249 int id = dsi_mgr_connector_get_id(connector);
250 struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
251 struct drm_encoder *encoder = msm_dsi_get_encoder(msm_dsi);
252 struct msm_drm_private *priv = connector->dev->dev_private;
253 struct msm_kms *kms = priv->kms;
254 long actual, requested;
255
256 DBG("");
257 requested = 1000 * mode->clock;
258 actual = kms->funcs->round_pixclk(kms, requested, encoder);
259
260 DBG("requested=%ld, actual=%ld", requested, actual);
261 if (actual != requested)
262 return MODE_CLOCK_RANGE;
263
264 return MODE_OK;
265}
266
267static struct drm_encoder *
268dsi_mgr_connector_best_encoder(struct drm_connector *connector)
269{
270 int id = dsi_mgr_connector_get_id(connector);
271 struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
272
273 DBG("");
274 return msm_dsi_get_encoder(msm_dsi);
275}
276
277static void dsi_mgr_bridge_pre_enable(struct drm_bridge *bridge)
278{
279 int id = dsi_mgr_bridge_get_id(bridge);
280 struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
281 struct msm_dsi *msm_dsi1 = dsi_mgr_get_dsi(DSI_1);
282 struct mipi_dsi_host *host = msm_dsi->host;
283 struct drm_panel *panel = msm_dsi->panel;
284 bool is_dual_panel = IS_DUAL_PANEL();
285 int ret;
286
287 DBG("id=%d", id);
288 if (!panel || (is_dual_panel && (DSI_1 == id)))
289 return;
290
291 ret = msm_dsi_host_power_on(host);
292 if (ret) {
293 pr_err("%s: power on host %d failed, %d\n", __func__, id, ret);
294 goto host_on_fail;
295 }
296
297 if (is_dual_panel && msm_dsi1) {
298 ret = msm_dsi_host_power_on(msm_dsi1->host);
299 if (ret) {
300 pr_err("%s: power on host1 failed, %d\n",
301 __func__, ret);
302 goto host1_on_fail;
303 }
304 }
305
306 /* Always call panel functions once, because even for dual panels,
307 * there is only one drm_panel instance.
308 */
309 ret = drm_panel_prepare(panel);
310 if (ret) {
311 pr_err("%s: prepare panel %d failed, %d\n", __func__, id, ret);
312 goto panel_prep_fail;
313 }
314
315 ret = msm_dsi_host_enable(host);
316 if (ret) {
317 pr_err("%s: enable host %d failed, %d\n", __func__, id, ret);
318 goto host_en_fail;
319 }
320
321 if (is_dual_panel && msm_dsi1) {
322 ret = msm_dsi_host_enable(msm_dsi1->host);
323 if (ret) {
324 pr_err("%s: enable host1 failed, %d\n", __func__, ret);
325 goto host1_en_fail;
326 }
327 }
328
329 ret = drm_panel_enable(panel);
330 if (ret) {
331 pr_err("%s: enable panel %d failed, %d\n", __func__, id, ret);
332 goto panel_en_fail;
333 }
334
335 return;
336
337panel_en_fail:
338 if (is_dual_panel && msm_dsi1)
339 msm_dsi_host_disable(msm_dsi1->host);
340host1_en_fail:
341 msm_dsi_host_disable(host);
342host_en_fail:
343 drm_panel_unprepare(panel);
344panel_prep_fail:
345 if (is_dual_panel && msm_dsi1)
346 msm_dsi_host_power_off(msm_dsi1->host);
347host1_on_fail:
348 msm_dsi_host_power_off(host);
349host_on_fail:
350 return;
351}
352
353static void dsi_mgr_bridge_enable(struct drm_bridge *bridge)
354{
355 DBG("");
356}
357
358static void dsi_mgr_bridge_disable(struct drm_bridge *bridge)
359{
360 DBG("");
361}
362
363static void dsi_mgr_bridge_post_disable(struct drm_bridge *bridge)
364{
365 int id = dsi_mgr_bridge_get_id(bridge);
366 struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
367 struct msm_dsi *msm_dsi1 = dsi_mgr_get_dsi(DSI_1);
368 struct mipi_dsi_host *host = msm_dsi->host;
369 struct drm_panel *panel = msm_dsi->panel;
370 bool is_dual_panel = IS_DUAL_PANEL();
371 int ret;
372
373 DBG("id=%d", id);
374
375 if (!panel || (is_dual_panel && (DSI_1 == id)))
376 return;
377
378 ret = drm_panel_disable(panel);
379 if (ret)
380 pr_err("%s: Panel %d OFF failed, %d\n", __func__, id, ret);
381
382 ret = msm_dsi_host_disable(host);
383 if (ret)
384 pr_err("%s: host %d disable failed, %d\n", __func__, id, ret);
385
386 if (is_dual_panel && msm_dsi1) {
387 ret = msm_dsi_host_disable(msm_dsi1->host);
388 if (ret)
389 pr_err("%s: host1 disable failed, %d\n", __func__, ret);
390 }
391
392 ret = drm_panel_unprepare(panel);
393 if (ret)
394 pr_err("%s: Panel %d unprepare failed,%d\n", __func__, id, ret);
395
396 ret = msm_dsi_host_power_off(host);
397 if (ret)
398 pr_err("%s: host %d power off failed,%d\n", __func__, id, ret);
399
400 if (is_dual_panel && msm_dsi1) {
401 ret = msm_dsi_host_power_off(msm_dsi1->host);
402 if (ret)
403 pr_err("%s: host1 power off failed, %d\n",
404 __func__, ret);
405 }
406}
407
408static void dsi_mgr_bridge_mode_set(struct drm_bridge *bridge,
409 struct drm_display_mode *mode,
410 struct drm_display_mode *adjusted_mode)
411{
412 int id = dsi_mgr_bridge_get_id(bridge);
413 struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
414 struct msm_dsi *other_dsi = dsi_mgr_get_other_dsi(id);
415 struct mipi_dsi_host *host = msm_dsi->host;
416 bool is_dual_panel = IS_DUAL_PANEL();
417
418 DBG("set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
419 mode->base.id, mode->name,
420 mode->vrefresh, mode->clock,
421 mode->hdisplay, mode->hsync_start,
422 mode->hsync_end, mode->htotal,
423 mode->vdisplay, mode->vsync_start,
424 mode->vsync_end, mode->vtotal,
425 mode->type, mode->flags);
426
427 if (is_dual_panel && (DSI_1 == id))
428 return;
429
430 msm_dsi_host_set_display_mode(host, adjusted_mode);
431 if (is_dual_panel && other_dsi)
432 msm_dsi_host_set_display_mode(other_dsi->host, adjusted_mode);
433}
434
435static const struct drm_connector_funcs dsi_mgr_connector_funcs = {
436 .dpms = drm_atomic_helper_connector_dpms,
437 .detect = dsi_mgr_connector_detect,
438 .fill_modes = drm_helper_probe_single_connector_modes,
439 .destroy = dsi_mgr_connector_destroy,
440 .reset = drm_atomic_helper_connector_reset,
441 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
442 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
443};
444
445static const struct drm_connector_helper_funcs dsi_mgr_conn_helper_funcs = {
446 .get_modes = dsi_mgr_connector_get_modes,
447 .mode_valid = dsi_mgr_connector_mode_valid,
448 .best_encoder = dsi_mgr_connector_best_encoder,
449};
450
451static const struct drm_bridge_funcs dsi_mgr_bridge_funcs = {
452 .pre_enable = dsi_mgr_bridge_pre_enable,
453 .enable = dsi_mgr_bridge_enable,
454 .disable = dsi_mgr_bridge_disable,
455 .post_disable = dsi_mgr_bridge_post_disable,
456 .mode_set = dsi_mgr_bridge_mode_set,
457};
458
459/* initialize connector */
460struct drm_connector *msm_dsi_manager_connector_init(u8 id)
461{
462 struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
463 struct drm_connector *connector = NULL;
464 struct dsi_connector *dsi_connector;
465 int ret;
466
467 dsi_connector = devm_kzalloc(msm_dsi->dev->dev,
468 sizeof(*dsi_connector), GFP_KERNEL);
469 if (!dsi_connector) {
470 ret = -ENOMEM;
471 goto fail;
472 }
473
474 dsi_connector->id = id;
475
476 connector = &dsi_connector->base;
477
478 ret = drm_connector_init(msm_dsi->dev, connector,
479 &dsi_mgr_connector_funcs, DRM_MODE_CONNECTOR_DSI);
480 if (ret)
481 goto fail;
482
483 drm_connector_helper_add(connector, &dsi_mgr_conn_helper_funcs);
484
485 /* Enable HPD to let hpd event is handled
486 * when panel is attached to the host.
487 */
488 connector->polled = DRM_CONNECTOR_POLL_HPD;
489
490 /* Display driver doesn't support interlace now. */
491 connector->interlace_allowed = 0;
492 connector->doublescan_allowed = 0;
493
494 ret = drm_connector_register(connector);
495 if (ret)
496 goto fail;
497
498 return connector;
499
500fail:
501 if (connector)
502 dsi_mgr_connector_destroy(connector);
503
504 return ERR_PTR(ret);
505}
506
507/* initialize bridge */
508struct drm_bridge *msm_dsi_manager_bridge_init(u8 id)
509{
510 struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
511 struct drm_bridge *bridge = NULL;
512 struct dsi_bridge *dsi_bridge;
513 int ret;
514
515 dsi_bridge = devm_kzalloc(msm_dsi->dev->dev,
516 sizeof(*dsi_bridge), GFP_KERNEL);
517 if (!dsi_bridge) {
518 ret = -ENOMEM;
519 goto fail;
520 }
521
522 dsi_bridge->id = id;
523
524 bridge = &dsi_bridge->base;
525 bridge->funcs = &dsi_mgr_bridge_funcs;
526
527 ret = drm_bridge_attach(msm_dsi->dev, bridge);
528 if (ret)
529 goto fail;
530
531 return bridge;
532
533fail:
534 if (bridge)
535 msm_dsi_manager_bridge_destroy(bridge);
536
537 return ERR_PTR(ret);
538}
539
540void msm_dsi_manager_bridge_destroy(struct drm_bridge *bridge)
541{
542}
543
544int msm_dsi_manager_phy_enable(int id,
545 const unsigned long bit_rate, const unsigned long esc_rate,
546 u32 *clk_pre, u32 *clk_post)
547{
548 struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
549 struct msm_dsi_phy *phy = msm_dsi->phy;
550 int ret;
551
552 ret = msm_dsi_phy_enable(phy, IS_DUAL_PANEL(), bit_rate, esc_rate);
553 if (ret)
554 return ret;
555
556 msm_dsi->phy_enabled = true;
557 msm_dsi_phy_get_clk_pre_post(phy, clk_pre, clk_post);
558
559 return 0;
560}
561
562void msm_dsi_manager_phy_disable(int id)
563{
564 struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
565 struct msm_dsi *mdsi = dsi_mgr_get_dsi(DSI_CLOCK_MASTER);
566 struct msm_dsi *sdsi = dsi_mgr_get_dsi(DSI_CLOCK_SLAVE);
567 struct msm_dsi_phy *phy = msm_dsi->phy;
568
569 /* disable DSI phy
570 * In dual-dsi configuration, the phy should be disabled for the
571 * first controller only when the second controller is disabled.
572 */
573 msm_dsi->phy_enabled = false;
574 if (IS_DUAL_PANEL() && mdsi && sdsi) {
575 if (!mdsi->phy_enabled && !sdsi->phy_enabled) {
576 msm_dsi_phy_disable(sdsi->phy);
577 msm_dsi_phy_disable(mdsi->phy);
578 }
579 } else {
580 msm_dsi_phy_disable(phy);
581 }
582}
583
584int msm_dsi_manager_cmd_xfer(int id, const struct mipi_dsi_msg *msg)
585{
586 struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
587 struct msm_dsi *msm_dsi0 = dsi_mgr_get_dsi(DSI_0);
588 struct mipi_dsi_host *host = msm_dsi->host;
589 bool is_read = (msg->rx_buf && msg->rx_len);
590 bool need_sync = (IS_SYNC_NEEDED() && !is_read);
591 int ret;
592
593 if (!msg->tx_buf || !msg->tx_len)
594 return 0;
595
596 /* In dual master case, panel requires the same commands sent to
597 * both DSI links. Host issues the command trigger to both links
598 * when DSI_1 calls the cmd transfer function, no matter it happens
599 * before or after DSI_0 cmd transfer.
600 */
601 if (need_sync && (id == DSI_0))
602 return is_read ? msg->rx_len : msg->tx_len;
603
604 if (need_sync && msm_dsi0) {
605 ret = msm_dsi_host_xfer_prepare(msm_dsi0->host, msg);
606 if (ret) {
607 pr_err("%s: failed to prepare non-trigger host, %d\n",
608 __func__, ret);
609 return ret;
610 }
611 }
612 ret = msm_dsi_host_xfer_prepare(host, msg);
613 if (ret) {
614 pr_err("%s: failed to prepare host, %d\n", __func__, ret);
615 goto restore_host0;
616 }
617
618 ret = is_read ? msm_dsi_host_cmd_rx(host, msg) :
619 msm_dsi_host_cmd_tx(host, msg);
620
621 msm_dsi_host_xfer_restore(host, msg);
622
623restore_host0:
624 if (need_sync && msm_dsi0)
625 msm_dsi_host_xfer_restore(msm_dsi0->host, msg);
626
627 return ret;
628}
629
630bool msm_dsi_manager_cmd_xfer_trigger(int id, u32 iova, u32 len)
631{
632 struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
633 struct msm_dsi *msm_dsi0 = dsi_mgr_get_dsi(DSI_0);
634 struct mipi_dsi_host *host = msm_dsi->host;
635
636 if (IS_SYNC_NEEDED() && (id == DSI_0))
637 return false;
638
639 if (IS_SYNC_NEEDED() && msm_dsi0)
640 msm_dsi_host_cmd_xfer_commit(msm_dsi0->host, iova, len);
641
642 msm_dsi_host_cmd_xfer_commit(host, iova, len);
643
644 return true;
645}
646
647int msm_dsi_manager_register(struct msm_dsi *msm_dsi)
648{
649 struct msm_dsi_manager *msm_dsim = &msm_dsim_glb;
650 int id = msm_dsi->id;
651 struct msm_dsi *other_dsi = dsi_mgr_get_other_dsi(id);
652 int ret;
653
654 if (id > DSI_MAX) {
655 pr_err("%s: invalid id %d\n", __func__, id);
656 return -EINVAL;
657 }
658
659 if (msm_dsim->dsi[id]) {
660 pr_err("%s: dsi%d already registered\n", __func__, id);
661 return -EBUSY;
662 }
663
664 msm_dsim->dsi[id] = msm_dsi;
665
666 ret = dsi_mgr_parse_dual_panel(msm_dsi->pdev->dev.of_node, id);
667 if (ret) {
668 pr_err("%s: failed to parse dual panel info\n", __func__);
669 return ret;
670 }
671
672 if (!IS_DUAL_PANEL()) {
673 ret = msm_dsi_host_register(msm_dsi->host, true);
674 } else if (!other_dsi) {
675 return 0;
676 } else {
677 struct msm_dsi *mdsi = IS_MASTER_PANEL(id) ?
678 msm_dsi : other_dsi;
679 struct msm_dsi *sdsi = IS_MASTER_PANEL(id) ?
680 other_dsi : msm_dsi;
681 /* Register slave host first, so that slave DSI device
682 * has a chance to probe, and do not block the master
683 * DSI device's probe.
684 * Also, do not check defer for the slave host,
685 * because only master DSI device adds the panel to global
686 * panel list. The panel's device is the master DSI device.
687 */
688 ret = msm_dsi_host_register(sdsi->host, false);
689 if (ret)
690 return ret;
691 ret = msm_dsi_host_register(mdsi->host, true);
692 }
693
694 return ret;
695}
696
697void msm_dsi_manager_unregister(struct msm_dsi *msm_dsi)
698{
699 struct msm_dsi_manager *msm_dsim = &msm_dsim_glb;
700
701 if (msm_dsi->host)
702 msm_dsi_host_unregister(msm_dsi->host);
703 msm_dsim->dsi[msm_dsi->id] = NULL;
704}
705
diff --git a/drivers/gpu/drm/msm/dsi/dsi_phy.c b/drivers/gpu/drm/msm/dsi/dsi_phy.c
new file mode 100644
index 000000000000..f0cea8927388
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/dsi_phy.c
@@ -0,0 +1,352 @@
1/*
2 * Copyright (c) 2015, The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include "dsi.h"
15#include "dsi.xml.h"
16
17#define dsi_phy_read(offset) msm_readl((offset))
18#define dsi_phy_write(offset, data) msm_writel((data), (offset))
19
20struct dsi_dphy_timing {
21 u32 clk_pre;
22 u32 clk_post;
23 u32 clk_zero;
24 u32 clk_trail;
25 u32 clk_prepare;
26 u32 hs_exit;
27 u32 hs_zero;
28 u32 hs_prepare;
29 u32 hs_trail;
30 u32 hs_rqst;
31 u32 ta_go;
32 u32 ta_sure;
33 u32 ta_get;
34};
35
36struct msm_dsi_phy {
37 void __iomem *base;
38 void __iomem *reg_base;
39 int id;
40 struct dsi_dphy_timing timing;
41 int (*enable)(struct msm_dsi_phy *phy, bool is_dual_panel,
42 const unsigned long bit_rate, const unsigned long esc_rate);
43 int (*disable)(struct msm_dsi_phy *phy);
44};
45
46#define S_DIV_ROUND_UP(n, d) \
47 (((n) >= 0) ? (((n) + (d) - 1) / (d)) : (((n) - (d) + 1) / (d)))
48
49static inline s32 linear_inter(s32 tmax, s32 tmin, s32 percent,
50 s32 min_result, bool even)
51{
52 s32 v;
53 v = (tmax - tmin) * percent;
54 v = S_DIV_ROUND_UP(v, 100) + tmin;
55 if (even && (v & 0x1))
56 return max_t(s32, min_result, v - 1);
57 else
58 return max_t(s32, min_result, v);
59}
60
61static void dsi_dphy_timing_calc_clk_zero(struct dsi_dphy_timing *timing,
62 s32 ui, s32 coeff, s32 pcnt)
63{
64 s32 tmax, tmin, clk_z;
65 s32 temp;
66
67 /* reset */
68 temp = 300 * coeff - ((timing->clk_prepare >> 1) + 1) * 2 * ui;
69 tmin = S_DIV_ROUND_UP(temp, ui) - 2;
70 if (tmin > 255) {
71 tmax = 511;
72 clk_z = linear_inter(2 * tmin, tmin, pcnt, 0, true);
73 } else {
74 tmax = 255;
75 clk_z = linear_inter(tmax, tmin, pcnt, 0, true);
76 }
77
78 /* adjust */
79 temp = (timing->hs_rqst + timing->clk_prepare + clk_z) & 0x7;
80 timing->clk_zero = clk_z + 8 - temp;
81}
82
83static int dsi_dphy_timing_calc(struct dsi_dphy_timing *timing,
84 const unsigned long bit_rate, const unsigned long esc_rate)
85{
86 s32 ui, lpx;
87 s32 tmax, tmin;
88 s32 pcnt0 = 10;
89 s32 pcnt1 = (bit_rate > 1200000000) ? 15 : 10;
90 s32 pcnt2 = 10;
91 s32 pcnt3 = (bit_rate > 180000000) ? 10 : 40;
92 s32 coeff = 1000; /* Precision, should avoid overflow */
93 s32 temp;
94
95 if (!bit_rate || !esc_rate)
96 return -EINVAL;
97
98 ui = mult_frac(NSEC_PER_MSEC, coeff, bit_rate / 1000);
99 lpx = mult_frac(NSEC_PER_MSEC, coeff, esc_rate / 1000);
100
101 tmax = S_DIV_ROUND_UP(95 * coeff, ui) - 2;
102 tmin = S_DIV_ROUND_UP(38 * coeff, ui) - 2;
103 timing->clk_prepare = linear_inter(tmax, tmin, pcnt0, 0, true);
104
105 temp = lpx / ui;
106 if (temp & 0x1)
107 timing->hs_rqst = temp;
108 else
109 timing->hs_rqst = max_t(s32, 0, temp - 2);
110
111 /* Calculate clk_zero after clk_prepare and hs_rqst */
112 dsi_dphy_timing_calc_clk_zero(timing, ui, coeff, pcnt2);
113
114 temp = 105 * coeff + 12 * ui - 20 * coeff;
115 tmax = S_DIV_ROUND_UP(temp, ui) - 2;
116 tmin = S_DIV_ROUND_UP(60 * coeff, ui) - 2;
117 timing->clk_trail = linear_inter(tmax, tmin, pcnt3, 0, true);
118
119 temp = 85 * coeff + 6 * ui;
120 tmax = S_DIV_ROUND_UP(temp, ui) - 2;
121 temp = 40 * coeff + 4 * ui;
122 tmin = S_DIV_ROUND_UP(temp, ui) - 2;
123 timing->hs_prepare = linear_inter(tmax, tmin, pcnt1, 0, true);
124
125 tmax = 255;
126 temp = ((timing->hs_prepare >> 1) + 1) * 2 * ui + 2 * ui;
127 temp = 145 * coeff + 10 * ui - temp;
128 tmin = S_DIV_ROUND_UP(temp, ui) - 2;
129 timing->hs_zero = linear_inter(tmax, tmin, pcnt2, 24, true);
130
131 temp = 105 * coeff + 12 * ui - 20 * coeff;
132 tmax = S_DIV_ROUND_UP(temp, ui) - 2;
133 temp = 60 * coeff + 4 * ui;
134 tmin = DIV_ROUND_UP(temp, ui) - 2;
135 timing->hs_trail = linear_inter(tmax, tmin, pcnt3, 0, true);
136
137 tmax = 255;
138 tmin = S_DIV_ROUND_UP(100 * coeff, ui) - 2;
139 timing->hs_exit = linear_inter(tmax, tmin, pcnt2, 0, true);
140
141 tmax = 63;
142 temp = ((timing->hs_exit >> 1) + 1) * 2 * ui;
143 temp = 60 * coeff + 52 * ui - 24 * ui - temp;
144 tmin = S_DIV_ROUND_UP(temp, 8 * ui) - 1;
145 timing->clk_post = linear_inter(tmax, tmin, pcnt2, 0, false);
146
147 tmax = 63;
148 temp = ((timing->clk_prepare >> 1) + 1) * 2 * ui;
149 temp += ((timing->clk_zero >> 1) + 1) * 2 * ui;
150 temp += 8 * ui + lpx;
151 tmin = S_DIV_ROUND_UP(temp, 8 * ui) - 1;
152 if (tmin > tmax) {
153 temp = linear_inter(2 * tmax, tmin, pcnt2, 0, false) >> 1;
154 timing->clk_pre = temp >> 1;
155 temp = (2 * tmax - tmin) * pcnt2;
156 } else {
157 timing->clk_pre = linear_inter(tmax, tmin, pcnt2, 0, false);
158 }
159
160 timing->ta_go = 3;
161 timing->ta_sure = 0;
162 timing->ta_get = 4;
163
164 DBG("PHY timings: %d, %d, %d, %d, %d, %d, %d, %d, %d, %d",
165 timing->clk_pre, timing->clk_post, timing->clk_zero,
166 timing->clk_trail, timing->clk_prepare, timing->hs_exit,
167 timing->hs_zero, timing->hs_prepare, timing->hs_trail,
168 timing->hs_rqst);
169
170 return 0;
171}
172
173static void dsi_28nm_phy_regulator_ctrl(struct msm_dsi_phy *phy, bool enable)
174{
175 void __iomem *base = phy->reg_base;
176
177 if (!enable) {
178 dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG, 0);
179 return;
180 }
181
182 dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_0, 0x0);
183 dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG, 1);
184 dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_5, 0);
185 dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_3, 0);
186 dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_2, 0x3);
187 dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_1, 0x9);
188 dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_0, 0x7);
189 dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_4, 0x20);
190}
191
192static int dsi_28nm_phy_enable(struct msm_dsi_phy *phy, bool is_dual_panel,
193 const unsigned long bit_rate, const unsigned long esc_rate)
194{
195 struct dsi_dphy_timing *timing = &phy->timing;
196 int i;
197 void __iomem *base = phy->base;
198
199 DBG("");
200
201 if (dsi_dphy_timing_calc(timing, bit_rate, esc_rate)) {
202 pr_err("%s: D-PHY timing calculation failed\n", __func__);
203 return -EINVAL;
204 }
205
206 dsi_phy_write(base + REG_DSI_28nm_PHY_STRENGTH_0, 0xff);
207
208 dsi_28nm_phy_regulator_ctrl(phy, true);
209
210 dsi_phy_write(base + REG_DSI_28nm_PHY_LDO_CNTRL, 0x00);
211
212 dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_0,
213 DSI_28nm_PHY_TIMING_CTRL_0_CLK_ZERO(timing->clk_zero));
214 dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_1,
215 DSI_28nm_PHY_TIMING_CTRL_1_CLK_TRAIL(timing->clk_trail));
216 dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_2,
217 DSI_28nm_PHY_TIMING_CTRL_2_CLK_PREPARE(timing->clk_prepare));
218 if (timing->clk_zero & BIT(8))
219 dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_3,
220 DSI_28nm_PHY_TIMING_CTRL_3_CLK_ZERO_8);
221 dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_4,
222 DSI_28nm_PHY_TIMING_CTRL_4_HS_EXIT(timing->hs_exit));
223 dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_5,
224 DSI_28nm_PHY_TIMING_CTRL_5_HS_ZERO(timing->hs_zero));
225 dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_6,
226 DSI_28nm_PHY_TIMING_CTRL_6_HS_PREPARE(timing->hs_prepare));
227 dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_7,
228 DSI_28nm_PHY_TIMING_CTRL_7_HS_TRAIL(timing->hs_trail));
229 dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_8,
230 DSI_28nm_PHY_TIMING_CTRL_8_HS_RQST(timing->hs_rqst));
231 dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_9,
232 DSI_28nm_PHY_TIMING_CTRL_9_TA_GO(timing->ta_go) |
233 DSI_28nm_PHY_TIMING_CTRL_9_TA_SURE(timing->ta_sure));
234 dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_10,
235 DSI_28nm_PHY_TIMING_CTRL_10_TA_GET(timing->ta_get));
236 dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_11,
237 DSI_28nm_PHY_TIMING_CTRL_11_TRIG3_CMD(0));
238
239 dsi_phy_write(base + REG_DSI_28nm_PHY_CTRL_1, 0x00);
240 dsi_phy_write(base + REG_DSI_28nm_PHY_CTRL_0, 0x5f);
241
242 dsi_phy_write(base + REG_DSI_28nm_PHY_STRENGTH_1, 0x6);
243
244 for (i = 0; i < 4; i++) {
245 dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_0(i), 0);
246 dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_1(i), 0);
247 dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_2(i), 0);
248 dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_3(i), 0);
249 dsi_phy_write(base + REG_DSI_28nm_PHY_LN_TEST_DATAPATH(i), 0);
250 dsi_phy_write(base + REG_DSI_28nm_PHY_LN_DEBUG_SEL(i), 0);
251 dsi_phy_write(base + REG_DSI_28nm_PHY_LN_TEST_STR_0(i), 0x1);
252 dsi_phy_write(base + REG_DSI_28nm_PHY_LN_TEST_STR_1(i), 0x97);
253 }
254 dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_4(0), 0);
255 dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_4(1), 0x5);
256 dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_4(2), 0xa);
257 dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_4(3), 0xf);
258
259 dsi_phy_write(base + REG_DSI_28nm_PHY_LNCK_CFG_1, 0xc0);
260 dsi_phy_write(base + REG_DSI_28nm_PHY_LNCK_TEST_STR0, 0x1);
261 dsi_phy_write(base + REG_DSI_28nm_PHY_LNCK_TEST_STR1, 0xbb);
262
263 dsi_phy_write(base + REG_DSI_28nm_PHY_CTRL_0, 0x5f);
264
265 if (is_dual_panel && (phy->id != DSI_CLOCK_MASTER))
266 dsi_phy_write(base + REG_DSI_28nm_PHY_GLBL_TEST_CTRL, 0x00);
267 else
268 dsi_phy_write(base + REG_DSI_28nm_PHY_GLBL_TEST_CTRL, 0x01);
269
270 return 0;
271}
272
273static int dsi_28nm_phy_disable(struct msm_dsi_phy *phy)
274{
275 dsi_phy_write(phy->base + REG_DSI_28nm_PHY_CTRL_0, 0);
276 dsi_28nm_phy_regulator_ctrl(phy, false);
277
278 /*
279 * Wait for the registers writes to complete in order to
280 * ensure that the phy is completely disabled
281 */
282 wmb();
283
284 return 0;
285}
286
287#define dsi_phy_func_init(name) \
288 do { \
289 phy->enable = dsi_##name##_phy_enable; \
290 phy->disable = dsi_##name##_phy_disable; \
291 } while (0)
292
293struct msm_dsi_phy *msm_dsi_phy_init(struct platform_device *pdev,
294 enum msm_dsi_phy_type type, int id)
295{
296 struct msm_dsi_phy *phy;
297
298 phy = devm_kzalloc(&pdev->dev, sizeof(*phy), GFP_KERNEL);
299 if (!phy)
300 return NULL;
301
302 phy->base = msm_ioremap(pdev, "dsi_phy", "DSI_PHY");
303 if (IS_ERR_OR_NULL(phy->base)) {
304 pr_err("%s: failed to map phy base\n", __func__);
305 return NULL;
306 }
307 phy->reg_base = msm_ioremap(pdev, "dsi_phy_regulator", "DSI_PHY_REG");
308 if (IS_ERR_OR_NULL(phy->reg_base)) {
309 pr_err("%s: failed to map phy regulator base\n", __func__);
310 return NULL;
311 }
312
313 switch (type) {
314 case MSM_DSI_PHY_28NM:
315 dsi_phy_func_init(28nm);
316 break;
317 default:
318 pr_err("%s: unsupported type, %d\n", __func__, type);
319 return NULL;
320 }
321
322 phy->id = id;
323
324 return phy;
325}
326
327int msm_dsi_phy_enable(struct msm_dsi_phy *phy, bool is_dual_panel,
328 const unsigned long bit_rate, const unsigned long esc_rate)
329{
330 if (!phy || !phy->enable)
331 return -EINVAL;
332 return phy->enable(phy, is_dual_panel, bit_rate, esc_rate);
333}
334
335int msm_dsi_phy_disable(struct msm_dsi_phy *phy)
336{
337 if (!phy || !phy->disable)
338 return -EINVAL;
339 return phy->disable(phy);
340}
341
342void msm_dsi_phy_get_clk_pre_post(struct msm_dsi_phy *phy,
343 u32 *clk_pre, u32 *clk_post)
344{
345 if (!phy)
346 return;
347 if (clk_pre)
348 *clk_pre = phy->timing.clk_pre;
349 if (clk_post)
350 *clk_post = phy->timing.clk_post;
351}
352
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c
index eeed006eed13..6997ec636c6d 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c
@@ -53,6 +53,23 @@ struct pll_rate {
53 53
54/* NOTE: keep sorted highest freq to lowest: */ 54/* NOTE: keep sorted highest freq to lowest: */
55static const struct pll_rate freqtbl[] = { 55static const struct pll_rate freqtbl[] = {
56 { 154000000, {
57 { 0x08, REG_HDMI_8960_PHY_PLL_REFCLK_CFG },
58 { 0x20, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 },
59 { 0xf9, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 },
60 { 0x02, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 },
61 { 0x03, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 },
62 { 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 },
63 { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG3 },
64 { 0x86, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4 },
65 { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5 },
66 { 0x0d, REG_HDMI_8960_PHY_PLL_SDM_CFG0 },
67 { 0x4d, REG_HDMI_8960_PHY_PLL_SDM_CFG1 },
68 { 0x5e, REG_HDMI_8960_PHY_PLL_SDM_CFG2 },
69 { 0x42, REG_HDMI_8960_PHY_PLL_SDM_CFG3 },
70 { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG4 },
71 { 0, 0 } }
72 },
56 /* 1080p60/1080p50 case */ 73 /* 1080p60/1080p50 case */
57 { 148500000, { 74 { 148500000, {
58 { 0x02, REG_HDMI_8960_PHY_PLL_REFCLK_CFG }, 75 { 0x02, REG_HDMI_8960_PHY_PLL_REFCLK_CFG },
@@ -112,6 +129,23 @@ static const struct pll_rate freqtbl[] = {
112 { 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 }, 129 { 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 },
113 { 0, 0 } } 130 { 0, 0 } }
114 }, 131 },
132 { 74176000, {
133 { 0x18, REG_HDMI_8960_PHY_PLL_REFCLK_CFG },
134 { 0x20, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 },
135 { 0xf9, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 },
136 { 0xe5, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 },
137 { 0x02, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 },
138 { 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 },
139 { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG3 },
140 { 0x86, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4 },
141 { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5 },
142 { 0x0c, REG_HDMI_8960_PHY_PLL_SDM_CFG0 },
143 { 0x4c, REG_HDMI_8960_PHY_PLL_SDM_CFG1 },
144 { 0x7d, REG_HDMI_8960_PHY_PLL_SDM_CFG2 },
145 { 0xbc, REG_HDMI_8960_PHY_PLL_SDM_CFG3 },
146 { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG4 },
147 { 0, 0 } }
148 },
115 { 65000000, { 149 { 65000000, {
116 { 0x18, REG_HDMI_8960_PHY_PLL_REFCLK_CFG }, 150 { 0x18, REG_HDMI_8960_PHY_PLL_REFCLK_CFG },
117 { 0x20, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 }, 151 { 0x20, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 },
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
index c276624290af..b9a4ded6e400 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
@@ -8,9 +8,9 @@ http://github.com/freedreno/envytools/
8git clone https://github.com/freedreno/envytools.git 8git clone https://github.com/freedreno/envytools.git
9 9
10The rules-ng-ng source files this header was generated from are: 10The rules-ng-ng source files this header was generated from are:
11- /local/mnt2/workspace2/sviau/envytools/rnndb/mdp/mdp5.xml ( 27229 bytes, from 2015-02-10 17:00:41) 11- /local/mnt2/workspace2/sviau/envytools/rnndb/mdp/mdp5.xml ( 29312 bytes, from 2015-03-23 21:18:48)
12- /local/mnt2/workspace2/sviau/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2014-06-02 18:31:15) 12- /local/mnt2/workspace2/sviau/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2014-06-02 18:31:15)
13- /local/mnt2/workspace2/sviau/envytools/rnndb/mdp/mdp_common.xml ( 2357 bytes, from 2015-01-23 16:20:19) 13- /local/mnt2/workspace2/sviau/envytools/rnndb/mdp/mdp_common.xml ( 2357 bytes, from 2015-03-23 20:38:49)
14 14
15Copyright (C) 2013-2015 by the following authors: 15Copyright (C) 2013-2015 by the following authors:
16- Rob Clark <robdclark@gmail.com> (robclark) 16- Rob Clark <robdclark@gmail.com> (robclark)
@@ -37,11 +37,14 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
37*/ 37*/
38 38
39 39
40enum mdp5_intf { 40enum mdp5_intf_type {
41 INTF_DISABLED = 0,
41 INTF_DSI = 1, 42 INTF_DSI = 1,
42 INTF_HDMI = 3, 43 INTF_HDMI = 3,
43 INTF_LCDC = 5, 44 INTF_LCDC = 5,
44 INTF_eDP = 9, 45 INTF_eDP = 9,
46 INTF_VIRTUAL = 100,
47 INTF_WB = 101,
45}; 48};
46 49
47enum mdp5_intfnum { 50enum mdp5_intfnum {
@@ -67,11 +70,11 @@ enum mdp5_pipe {
67 70
68enum mdp5_ctl_mode { 71enum mdp5_ctl_mode {
69 MODE_NONE = 0, 72 MODE_NONE = 0,
70 MODE_ROT0 = 1, 73 MODE_WB_0_BLOCK = 1,
71 MODE_ROT1 = 2, 74 MODE_WB_1_BLOCK = 2,
72 MODE_WB0 = 3, 75 MODE_WB_0_LINE = 3,
73 MODE_WB1 = 4, 76 MODE_WB_1_LINE = 4,
74 MODE_WFD = 5, 77 MODE_WB_2_LINE = 5,
75}; 78};
76 79
77enum mdp5_pack_3d { 80enum mdp5_pack_3d {
@@ -94,33 +97,6 @@ enum mdp5_pipe_bwc {
94 BWC_Q_MED = 2, 97 BWC_Q_MED = 2,
95}; 98};
96 99
97enum mdp5_client_id {
98 CID_UNUSED = 0,
99 CID_VIG0_Y = 1,
100 CID_VIG0_CR = 2,
101 CID_VIG0_CB = 3,
102 CID_VIG1_Y = 4,
103 CID_VIG1_CR = 5,
104 CID_VIG1_CB = 6,
105 CID_VIG2_Y = 7,
106 CID_VIG2_CR = 8,
107 CID_VIG2_CB = 9,
108 CID_DMA0_Y = 10,
109 CID_DMA0_CR = 11,
110 CID_DMA0_CB = 12,
111 CID_DMA1_Y = 13,
112 CID_DMA1_CR = 14,
113 CID_DMA1_CB = 15,
114 CID_RGB0 = 16,
115 CID_RGB1 = 17,
116 CID_RGB2 = 18,
117 CID_VIG3_Y = 19,
118 CID_VIG3_CR = 20,
119 CID_VIG3_CB = 21,
120 CID_RGB3 = 22,
121 CID_MAX = 23,
122};
123
124enum mdp5_cursor_format { 100enum mdp5_cursor_format {
125 CURSOR_FMT_ARGB8888 = 0, 101 CURSOR_FMT_ARGB8888 = 0,
126 CURSOR_FMT_ARGB1555 = 2, 102 CURSOR_FMT_ARGB1555 = 2,
@@ -144,30 +120,25 @@ enum mdp5_data_format {
144 DATA_FORMAT_YUV = 1, 120 DATA_FORMAT_YUV = 1,
145}; 121};
146 122
147#define MDP5_IRQ_INTF0_WB_ROT_COMP 0x00000001 123#define MDP5_IRQ_WB_0_DONE 0x00000001
148#define MDP5_IRQ_INTF1_WB_ROT_COMP 0x00000002 124#define MDP5_IRQ_WB_1_DONE 0x00000002
149#define MDP5_IRQ_INTF2_WB_ROT_COMP 0x00000004 125#define MDP5_IRQ_WB_2_DONE 0x00000010
150#define MDP5_IRQ_INTF3_WB_ROT_COMP 0x00000008 126#define MDP5_IRQ_PING_PONG_0_DONE 0x00000100
151#define MDP5_IRQ_INTF0_WB_WFD 0x00000010 127#define MDP5_IRQ_PING_PONG_1_DONE 0x00000200
152#define MDP5_IRQ_INTF1_WB_WFD 0x00000020 128#define MDP5_IRQ_PING_PONG_2_DONE 0x00000400
153#define MDP5_IRQ_INTF2_WB_WFD 0x00000040 129#define MDP5_IRQ_PING_PONG_3_DONE 0x00000800
154#define MDP5_IRQ_INTF3_WB_WFD 0x00000080 130#define MDP5_IRQ_PING_PONG_0_RD_PTR 0x00001000
155#define MDP5_IRQ_INTF0_PING_PONG_COMP 0x00000100 131#define MDP5_IRQ_PING_PONG_1_RD_PTR 0x00002000
156#define MDP5_IRQ_INTF1_PING_PONG_COMP 0x00000200 132#define MDP5_IRQ_PING_PONG_2_RD_PTR 0x00004000
157#define MDP5_IRQ_INTF2_PING_PONG_COMP 0x00000400 133#define MDP5_IRQ_PING_PONG_3_RD_PTR 0x00008000
158#define MDP5_IRQ_INTF3_PING_PONG_COMP 0x00000800 134#define MDP5_IRQ_PING_PONG_0_WR_PTR 0x00010000
159#define MDP5_IRQ_INTF0_PING_PONG_RD_PTR 0x00001000 135#define MDP5_IRQ_PING_PONG_1_WR_PTR 0x00020000
160#define MDP5_IRQ_INTF1_PING_PONG_RD_PTR 0x00002000 136#define MDP5_IRQ_PING_PONG_2_WR_PTR 0x00040000
161#define MDP5_IRQ_INTF2_PING_PONG_RD_PTR 0x00004000 137#define MDP5_IRQ_PING_PONG_3_WR_PTR 0x00080000
162#define MDP5_IRQ_INTF3_PING_PONG_RD_PTR 0x00008000 138#define MDP5_IRQ_PING_PONG_0_AUTO_REF 0x00100000
163#define MDP5_IRQ_INTF0_PING_PONG_WR_PTR 0x00010000 139#define MDP5_IRQ_PING_PONG_1_AUTO_REF 0x00200000
164#define MDP5_IRQ_INTF1_PING_PONG_WR_PTR 0x00020000 140#define MDP5_IRQ_PING_PONG_2_AUTO_REF 0x00400000
165#define MDP5_IRQ_INTF2_PING_PONG_WR_PTR 0x00040000 141#define MDP5_IRQ_PING_PONG_3_AUTO_REF 0x00800000
166#define MDP5_IRQ_INTF3_PING_PONG_WR_PTR 0x00080000
167#define MDP5_IRQ_INTF0_PING_PONG_AUTO_REF 0x00100000
168#define MDP5_IRQ_INTF1_PING_PONG_AUTO_REF 0x00200000
169#define MDP5_IRQ_INTF2_PING_PONG_AUTO_REF 0x00400000
170#define MDP5_IRQ_INTF3_PING_PONG_AUTO_REF 0x00800000
171#define MDP5_IRQ_INTF0_UNDER_RUN 0x01000000 142#define MDP5_IRQ_INTF0_UNDER_RUN 0x01000000
172#define MDP5_IRQ_INTF0_VSYNC 0x02000000 143#define MDP5_IRQ_INTF0_VSYNC 0x02000000
173#define MDP5_IRQ_INTF1_UNDER_RUN 0x04000000 144#define MDP5_IRQ_INTF1_UNDER_RUN 0x04000000
@@ -176,136 +147,186 @@ enum mdp5_data_format {
176#define MDP5_IRQ_INTF2_VSYNC 0x20000000 147#define MDP5_IRQ_INTF2_VSYNC 0x20000000
177#define MDP5_IRQ_INTF3_UNDER_RUN 0x40000000 148#define MDP5_IRQ_INTF3_UNDER_RUN 0x40000000
178#define MDP5_IRQ_INTF3_VSYNC 0x80000000 149#define MDP5_IRQ_INTF3_VSYNC 0x80000000
179#define REG_MDP5_HW_VERSION 0x00000000 150#define REG_MDSS_HW_VERSION 0x00000000
151#define MDSS_HW_VERSION_STEP__MASK 0x0000ffff
152#define MDSS_HW_VERSION_STEP__SHIFT 0
153static inline uint32_t MDSS_HW_VERSION_STEP(uint32_t val)
154{
155 return ((val) << MDSS_HW_VERSION_STEP__SHIFT) & MDSS_HW_VERSION_STEP__MASK;
156}
157#define MDSS_HW_VERSION_MINOR__MASK 0x0fff0000
158#define MDSS_HW_VERSION_MINOR__SHIFT 16
159static inline uint32_t MDSS_HW_VERSION_MINOR(uint32_t val)
160{
161 return ((val) << MDSS_HW_VERSION_MINOR__SHIFT) & MDSS_HW_VERSION_MINOR__MASK;
162}
163#define MDSS_HW_VERSION_MAJOR__MASK 0xf0000000
164#define MDSS_HW_VERSION_MAJOR__SHIFT 28
165static inline uint32_t MDSS_HW_VERSION_MAJOR(uint32_t val)
166{
167 return ((val) << MDSS_HW_VERSION_MAJOR__SHIFT) & MDSS_HW_VERSION_MAJOR__MASK;
168}
169
170#define REG_MDSS_HW_INTR_STATUS 0x00000010
171#define MDSS_HW_INTR_STATUS_INTR_MDP 0x00000001
172#define MDSS_HW_INTR_STATUS_INTR_DSI0 0x00000010
173#define MDSS_HW_INTR_STATUS_INTR_DSI1 0x00000020
174#define MDSS_HW_INTR_STATUS_INTR_HDMI 0x00000100
175#define MDSS_HW_INTR_STATUS_INTR_EDP 0x00001000
180 176
181#define REG_MDP5_HW_INTR_STATUS 0x00000010 177static inline uint32_t __offset_MDP(uint32_t idx)
182#define MDP5_HW_INTR_STATUS_INTR_MDP 0x00000001 178{
183#define MDP5_HW_INTR_STATUS_INTR_DSI0 0x00000010 179 switch (idx) {
184#define MDP5_HW_INTR_STATUS_INTR_DSI1 0x00000020 180 case 0: return (mdp5_cfg->mdp.base[0]);
185#define MDP5_HW_INTR_STATUS_INTR_HDMI 0x00000100 181 default: return INVALID_IDX(idx);
186#define MDP5_HW_INTR_STATUS_INTR_EDP 0x00001000 182 }
183}
184static inline uint32_t REG_MDP5_MDP(uint32_t i0) { return 0x00000000 + __offset_MDP(i0); }
187 185
188#define REG_MDP5_MDP_VERSION 0x00000100 186static inline uint32_t REG_MDP5_MDP_HW_VERSION(uint32_t i0) { return 0x00000000 + __offset_MDP(i0); }
189#define MDP5_MDP_VERSION_MINOR__MASK 0x00ff0000 187#define MDP5_MDP_HW_VERSION_STEP__MASK 0x0000ffff
190#define MDP5_MDP_VERSION_MINOR__SHIFT 16 188#define MDP5_MDP_HW_VERSION_STEP__SHIFT 0
191static inline uint32_t MDP5_MDP_VERSION_MINOR(uint32_t val) 189static inline uint32_t MDP5_MDP_HW_VERSION_STEP(uint32_t val)
192{ 190{
193 return ((val) << MDP5_MDP_VERSION_MINOR__SHIFT) & MDP5_MDP_VERSION_MINOR__MASK; 191 return ((val) << MDP5_MDP_HW_VERSION_STEP__SHIFT) & MDP5_MDP_HW_VERSION_STEP__MASK;
194} 192}
195#define MDP5_MDP_VERSION_MAJOR__MASK 0xf0000000 193#define MDP5_MDP_HW_VERSION_MINOR__MASK 0x0fff0000
196#define MDP5_MDP_VERSION_MAJOR__SHIFT 28 194#define MDP5_MDP_HW_VERSION_MINOR__SHIFT 16
197static inline uint32_t MDP5_MDP_VERSION_MAJOR(uint32_t val) 195static inline uint32_t MDP5_MDP_HW_VERSION_MINOR(uint32_t val)
198{ 196{
199 return ((val) << MDP5_MDP_VERSION_MAJOR__SHIFT) & MDP5_MDP_VERSION_MAJOR__MASK; 197 return ((val) << MDP5_MDP_HW_VERSION_MINOR__SHIFT) & MDP5_MDP_HW_VERSION_MINOR__MASK;
198}
199#define MDP5_MDP_HW_VERSION_MAJOR__MASK 0xf0000000
200#define MDP5_MDP_HW_VERSION_MAJOR__SHIFT 28
201static inline uint32_t MDP5_MDP_HW_VERSION_MAJOR(uint32_t val)
202{
203 return ((val) << MDP5_MDP_HW_VERSION_MAJOR__SHIFT) & MDP5_MDP_HW_VERSION_MAJOR__MASK;
200} 204}
201 205
202#define REG_MDP5_DISP_INTF_SEL 0x00000104 206static inline uint32_t REG_MDP5_MDP_DISP_INTF_SEL(uint32_t i0) { return 0x00000004 + __offset_MDP(i0); }
203#define MDP5_DISP_INTF_SEL_INTF0__MASK 0x000000ff 207#define MDP5_MDP_DISP_INTF_SEL_INTF0__MASK 0x000000ff
204#define MDP5_DISP_INTF_SEL_INTF0__SHIFT 0 208#define MDP5_MDP_DISP_INTF_SEL_INTF0__SHIFT 0
205static inline uint32_t MDP5_DISP_INTF_SEL_INTF0(enum mdp5_intf val) 209static inline uint32_t MDP5_MDP_DISP_INTF_SEL_INTF0(enum mdp5_intf_type val)
206{ 210{
207 return ((val) << MDP5_DISP_INTF_SEL_INTF0__SHIFT) & MDP5_DISP_INTF_SEL_INTF0__MASK; 211 return ((val) << MDP5_MDP_DISP_INTF_SEL_INTF0__SHIFT) & MDP5_MDP_DISP_INTF_SEL_INTF0__MASK;
208} 212}
209#define MDP5_DISP_INTF_SEL_INTF1__MASK 0x0000ff00 213#define MDP5_MDP_DISP_INTF_SEL_INTF1__MASK 0x0000ff00
210#define MDP5_DISP_INTF_SEL_INTF1__SHIFT 8 214#define MDP5_MDP_DISP_INTF_SEL_INTF1__SHIFT 8
211static inline uint32_t MDP5_DISP_INTF_SEL_INTF1(enum mdp5_intf val) 215static inline uint32_t MDP5_MDP_DISP_INTF_SEL_INTF1(enum mdp5_intf_type val)
212{ 216{
213 return ((val) << MDP5_DISP_INTF_SEL_INTF1__SHIFT) & MDP5_DISP_INTF_SEL_INTF1__MASK; 217 return ((val) << MDP5_MDP_DISP_INTF_SEL_INTF1__SHIFT) & MDP5_MDP_DISP_INTF_SEL_INTF1__MASK;
214} 218}
215#define MDP5_DISP_INTF_SEL_INTF2__MASK 0x00ff0000 219#define MDP5_MDP_DISP_INTF_SEL_INTF2__MASK 0x00ff0000
216#define MDP5_DISP_INTF_SEL_INTF2__SHIFT 16 220#define MDP5_MDP_DISP_INTF_SEL_INTF2__SHIFT 16
217static inline uint32_t MDP5_DISP_INTF_SEL_INTF2(enum mdp5_intf val) 221static inline uint32_t MDP5_MDP_DISP_INTF_SEL_INTF2(enum mdp5_intf_type val)
218{ 222{
219 return ((val) << MDP5_DISP_INTF_SEL_INTF2__SHIFT) & MDP5_DISP_INTF_SEL_INTF2__MASK; 223 return ((val) << MDP5_MDP_DISP_INTF_SEL_INTF2__SHIFT) & MDP5_MDP_DISP_INTF_SEL_INTF2__MASK;
220} 224}
221#define MDP5_DISP_INTF_SEL_INTF3__MASK 0xff000000 225#define MDP5_MDP_DISP_INTF_SEL_INTF3__MASK 0xff000000
222#define MDP5_DISP_INTF_SEL_INTF3__SHIFT 24 226#define MDP5_MDP_DISP_INTF_SEL_INTF3__SHIFT 24
223static inline uint32_t MDP5_DISP_INTF_SEL_INTF3(enum mdp5_intf val) 227static inline uint32_t MDP5_MDP_DISP_INTF_SEL_INTF3(enum mdp5_intf_type val)
224{ 228{
225 return ((val) << MDP5_DISP_INTF_SEL_INTF3__SHIFT) & MDP5_DISP_INTF_SEL_INTF3__MASK; 229 return ((val) << MDP5_MDP_DISP_INTF_SEL_INTF3__SHIFT) & MDP5_MDP_DISP_INTF_SEL_INTF3__MASK;
226} 230}
227 231
228#define REG_MDP5_INTR_EN 0x00000110 232static inline uint32_t REG_MDP5_MDP_INTR_EN(uint32_t i0) { return 0x00000010 + __offset_MDP(i0); }
229 233
230#define REG_MDP5_INTR_STATUS 0x00000114 234static inline uint32_t REG_MDP5_MDP_INTR_STATUS(uint32_t i0) { return 0x00000014 + __offset_MDP(i0); }
231 235
232#define REG_MDP5_INTR_CLEAR 0x00000118 236static inline uint32_t REG_MDP5_MDP_INTR_CLEAR(uint32_t i0) { return 0x00000018 + __offset_MDP(i0); }
233 237
234#define REG_MDP5_HIST_INTR_EN 0x0000011c 238static inline uint32_t REG_MDP5_MDP_HIST_INTR_EN(uint32_t i0) { return 0x0000001c + __offset_MDP(i0); }
235 239
236#define REG_MDP5_HIST_INTR_STATUS 0x00000120 240static inline uint32_t REG_MDP5_MDP_HIST_INTR_STATUS(uint32_t i0) { return 0x00000020 + __offset_MDP(i0); }
237 241
238#define REG_MDP5_HIST_INTR_CLEAR 0x00000124 242static inline uint32_t REG_MDP5_MDP_HIST_INTR_CLEAR(uint32_t i0) { return 0x00000024 + __offset_MDP(i0); }
239 243
240static inline uint32_t REG_MDP5_SMP_ALLOC_W(uint32_t i0) { return 0x00000180 + 0x4*i0; } 244static inline uint32_t REG_MDP5_MDP_SPARE_0(uint32_t i0) { return 0x00000028 + __offset_MDP(i0); }
245#define MDP5_MDP_SPARE_0_SPLIT_DPL_SINGLE_FLUSH_EN 0x00000001
241 246
242static inline uint32_t REG_MDP5_SMP_ALLOC_W_REG(uint32_t i0) { return 0x00000180 + 0x4*i0; } 247static inline uint32_t REG_MDP5_MDP_SMP_ALLOC_W(uint32_t i0, uint32_t i1) { return 0x00000080 + __offset_MDP(i0) + 0x4*i1; }
243#define MDP5_SMP_ALLOC_W_REG_CLIENT0__MASK 0x000000ff 248
244#define MDP5_SMP_ALLOC_W_REG_CLIENT0__SHIFT 0 249static inline uint32_t REG_MDP5_MDP_SMP_ALLOC_W_REG(uint32_t i0, uint32_t i1) { return 0x00000080 + __offset_MDP(i0) + 0x4*i1; }
245static inline uint32_t MDP5_SMP_ALLOC_W_REG_CLIENT0(enum mdp5_client_id val) 250#define MDP5_MDP_SMP_ALLOC_W_REG_CLIENT0__MASK 0x000000ff
251#define MDP5_MDP_SMP_ALLOC_W_REG_CLIENT0__SHIFT 0
252static inline uint32_t MDP5_MDP_SMP_ALLOC_W_REG_CLIENT0(uint32_t val)
246{ 253{
247 return ((val) << MDP5_SMP_ALLOC_W_REG_CLIENT0__SHIFT) & MDP5_SMP_ALLOC_W_REG_CLIENT0__MASK; 254 return ((val) << MDP5_MDP_SMP_ALLOC_W_REG_CLIENT0__SHIFT) & MDP5_MDP_SMP_ALLOC_W_REG_CLIENT0__MASK;
248} 255}
249#define MDP5_SMP_ALLOC_W_REG_CLIENT1__MASK 0x0000ff00 256#define MDP5_MDP_SMP_ALLOC_W_REG_CLIENT1__MASK 0x0000ff00
250#define MDP5_SMP_ALLOC_W_REG_CLIENT1__SHIFT 8 257#define MDP5_MDP_SMP_ALLOC_W_REG_CLIENT1__SHIFT 8
251static inline uint32_t MDP5_SMP_ALLOC_W_REG_CLIENT1(enum mdp5_client_id val) 258static inline uint32_t MDP5_MDP_SMP_ALLOC_W_REG_CLIENT1(uint32_t val)
252{ 259{
253 return ((val) << MDP5_SMP_ALLOC_W_REG_CLIENT1__SHIFT) & MDP5_SMP_ALLOC_W_REG_CLIENT1__MASK; 260 return ((val) << MDP5_MDP_SMP_ALLOC_W_REG_CLIENT1__SHIFT) & MDP5_MDP_SMP_ALLOC_W_REG_CLIENT1__MASK;
254} 261}
255#define MDP5_SMP_ALLOC_W_REG_CLIENT2__MASK 0x00ff0000 262#define MDP5_MDP_SMP_ALLOC_W_REG_CLIENT2__MASK 0x00ff0000
256#define MDP5_SMP_ALLOC_W_REG_CLIENT2__SHIFT 16 263#define MDP5_MDP_SMP_ALLOC_W_REG_CLIENT2__SHIFT 16
257static inline uint32_t MDP5_SMP_ALLOC_W_REG_CLIENT2(enum mdp5_client_id val) 264static inline uint32_t MDP5_MDP_SMP_ALLOC_W_REG_CLIENT2(uint32_t val)
258{ 265{
259 return ((val) << MDP5_SMP_ALLOC_W_REG_CLIENT2__SHIFT) & MDP5_SMP_ALLOC_W_REG_CLIENT2__MASK; 266 return ((val) << MDP5_MDP_SMP_ALLOC_W_REG_CLIENT2__SHIFT) & MDP5_MDP_SMP_ALLOC_W_REG_CLIENT2__MASK;
260} 267}
261 268
262static inline uint32_t REG_MDP5_SMP_ALLOC_R(uint32_t i0) { return 0x00000230 + 0x4*i0; } 269static inline uint32_t REG_MDP5_MDP_SMP_ALLOC_R(uint32_t i0, uint32_t i1) { return 0x00000130 + __offset_MDP(i0) + 0x4*i1; }
263 270
264static inline uint32_t REG_MDP5_SMP_ALLOC_R_REG(uint32_t i0) { return 0x00000230 + 0x4*i0; } 271static inline uint32_t REG_MDP5_MDP_SMP_ALLOC_R_REG(uint32_t i0, uint32_t i1) { return 0x00000130 + __offset_MDP(i0) + 0x4*i1; }
265#define MDP5_SMP_ALLOC_R_REG_CLIENT0__MASK 0x000000ff 272#define MDP5_MDP_SMP_ALLOC_R_REG_CLIENT0__MASK 0x000000ff
266#define MDP5_SMP_ALLOC_R_REG_CLIENT0__SHIFT 0 273#define MDP5_MDP_SMP_ALLOC_R_REG_CLIENT0__SHIFT 0
267static inline uint32_t MDP5_SMP_ALLOC_R_REG_CLIENT0(enum mdp5_client_id val) 274static inline uint32_t MDP5_MDP_SMP_ALLOC_R_REG_CLIENT0(uint32_t val)
268{ 275{
269 return ((val) << MDP5_SMP_ALLOC_R_REG_CLIENT0__SHIFT) & MDP5_SMP_ALLOC_R_REG_CLIENT0__MASK; 276 return ((val) << MDP5_MDP_SMP_ALLOC_R_REG_CLIENT0__SHIFT) & MDP5_MDP_SMP_ALLOC_R_REG_CLIENT0__MASK;
270} 277}
271#define MDP5_SMP_ALLOC_R_REG_CLIENT1__MASK 0x0000ff00 278#define MDP5_MDP_SMP_ALLOC_R_REG_CLIENT1__MASK 0x0000ff00
272#define MDP5_SMP_ALLOC_R_REG_CLIENT1__SHIFT 8 279#define MDP5_MDP_SMP_ALLOC_R_REG_CLIENT1__SHIFT 8
273static inline uint32_t MDP5_SMP_ALLOC_R_REG_CLIENT1(enum mdp5_client_id val) 280static inline uint32_t MDP5_MDP_SMP_ALLOC_R_REG_CLIENT1(uint32_t val)
274{ 281{
275 return ((val) << MDP5_SMP_ALLOC_R_REG_CLIENT1__SHIFT) & MDP5_SMP_ALLOC_R_REG_CLIENT1__MASK; 282 return ((val) << MDP5_MDP_SMP_ALLOC_R_REG_CLIENT1__SHIFT) & MDP5_MDP_SMP_ALLOC_R_REG_CLIENT1__MASK;
276} 283}
277#define MDP5_SMP_ALLOC_R_REG_CLIENT2__MASK 0x00ff0000 284#define MDP5_MDP_SMP_ALLOC_R_REG_CLIENT2__MASK 0x00ff0000
278#define MDP5_SMP_ALLOC_R_REG_CLIENT2__SHIFT 16 285#define MDP5_MDP_SMP_ALLOC_R_REG_CLIENT2__SHIFT 16
279static inline uint32_t MDP5_SMP_ALLOC_R_REG_CLIENT2(enum mdp5_client_id val) 286static inline uint32_t MDP5_MDP_SMP_ALLOC_R_REG_CLIENT2(uint32_t val)
280{ 287{
281 return ((val) << MDP5_SMP_ALLOC_R_REG_CLIENT2__SHIFT) & MDP5_SMP_ALLOC_R_REG_CLIENT2__MASK; 288 return ((val) << MDP5_MDP_SMP_ALLOC_R_REG_CLIENT2__SHIFT) & MDP5_MDP_SMP_ALLOC_R_REG_CLIENT2__MASK;
282} 289}
283 290
284static inline uint32_t __offset_IGC(enum mdp5_igc_type idx) 291static inline uint32_t __offset_IGC(enum mdp5_igc_type idx)
285{ 292{
286 switch (idx) { 293 switch (idx) {
287 case IGC_VIG: return 0x00000300; 294 case IGC_VIG: return 0x00000200;
288 case IGC_RGB: return 0x00000310; 295 case IGC_RGB: return 0x00000210;
289 case IGC_DMA: return 0x00000320; 296 case IGC_DMA: return 0x00000220;
290 case IGC_DSPP: return 0x00000400; 297 case IGC_DSPP: return 0x00000300;
291 default: return INVALID_IDX(idx); 298 default: return INVALID_IDX(idx);
292 } 299 }
293} 300}
294static inline uint32_t REG_MDP5_IGC(enum mdp5_igc_type i0) { return 0x00000000 + __offset_IGC(i0); } 301static inline uint32_t REG_MDP5_MDP_IGC(uint32_t i0, enum mdp5_igc_type i1) { return 0x00000000 + __offset_MDP(i0) + __offset_IGC(i1); }
295 302
296static inline uint32_t REG_MDP5_IGC_LUT(enum mdp5_igc_type i0, uint32_t i1) { return 0x00000000 + __offset_IGC(i0) + 0x4*i1; } 303static inline uint32_t REG_MDP5_MDP_IGC_LUT(uint32_t i0, enum mdp5_igc_type i1, uint32_t i2) { return 0x00000000 + __offset_MDP(i0) + __offset_IGC(i1) + 0x4*i2; }
297 304
298static inline uint32_t REG_MDP5_IGC_LUT_REG(enum mdp5_igc_type i0, uint32_t i1) { return 0x00000000 + __offset_IGC(i0) + 0x4*i1; } 305static inline uint32_t REG_MDP5_MDP_IGC_LUT_REG(uint32_t i0, enum mdp5_igc_type i1, uint32_t i2) { return 0x00000000 + __offset_MDP(i0) + __offset_IGC(i1) + 0x4*i2; }
299#define MDP5_IGC_LUT_REG_VAL__MASK 0x00000fff 306#define MDP5_MDP_IGC_LUT_REG_VAL__MASK 0x00000fff
300#define MDP5_IGC_LUT_REG_VAL__SHIFT 0 307#define MDP5_MDP_IGC_LUT_REG_VAL__SHIFT 0
301static inline uint32_t MDP5_IGC_LUT_REG_VAL(uint32_t val) 308static inline uint32_t MDP5_MDP_IGC_LUT_REG_VAL(uint32_t val)
302{ 309{
303 return ((val) << MDP5_IGC_LUT_REG_VAL__SHIFT) & MDP5_IGC_LUT_REG_VAL__MASK; 310 return ((val) << MDP5_MDP_IGC_LUT_REG_VAL__SHIFT) & MDP5_MDP_IGC_LUT_REG_VAL__MASK;
304} 311}
305#define MDP5_IGC_LUT_REG_INDEX_UPDATE 0x02000000 312#define MDP5_MDP_IGC_LUT_REG_INDEX_UPDATE 0x02000000
306#define MDP5_IGC_LUT_REG_DISABLE_PIPE_0 0x10000000 313#define MDP5_MDP_IGC_LUT_REG_DISABLE_PIPE_0 0x10000000
307#define MDP5_IGC_LUT_REG_DISABLE_PIPE_1 0x20000000 314#define MDP5_MDP_IGC_LUT_REG_DISABLE_PIPE_1 0x20000000
308#define MDP5_IGC_LUT_REG_DISABLE_PIPE_2 0x40000000 315#define MDP5_MDP_IGC_LUT_REG_DISABLE_PIPE_2 0x40000000
316
317#define REG_MDP5_SPLIT_DPL_EN 0x000003f4
318
319#define REG_MDP5_SPLIT_DPL_UPPER 0x000003f8
320#define MDP5_SPLIT_DPL_UPPER_SMART_PANEL 0x00000002
321#define MDP5_SPLIT_DPL_UPPER_SMART_PANEL_FREE_RUN 0x00000004
322#define MDP5_SPLIT_DPL_UPPER_INTF1_SW_TRG_MUX 0x00000010
323#define MDP5_SPLIT_DPL_UPPER_INTF2_SW_TRG_MUX 0x00000100
324
325#define REG_MDP5_SPLIT_DPL_LOWER 0x000004f0
326#define MDP5_SPLIT_DPL_LOWER_SMART_PANEL 0x00000002
327#define MDP5_SPLIT_DPL_LOWER_SMART_PANEL_FREE_RUN 0x00000004
328#define MDP5_SPLIT_DPL_LOWER_INTF1_TG_SYNC 0x00000010
329#define MDP5_SPLIT_DPL_LOWER_INTF2_TG_SYNC 0x00000100
309 330
310static inline uint32_t __offset_CTL(uint32_t idx) 331static inline uint32_t __offset_CTL(uint32_t idx)
311{ 332{
@@ -437,11 +458,19 @@ static inline uint32_t REG_MDP5_CTL_FLUSH(uint32_t i0) { return 0x00000018 + __o
437#define MDP5_CTL_FLUSH_DSPP0 0x00002000 458#define MDP5_CTL_FLUSH_DSPP0 0x00002000
438#define MDP5_CTL_FLUSH_DSPP1 0x00004000 459#define MDP5_CTL_FLUSH_DSPP1 0x00004000
439#define MDP5_CTL_FLUSH_DSPP2 0x00008000 460#define MDP5_CTL_FLUSH_DSPP2 0x00008000
461#define MDP5_CTL_FLUSH_WB 0x00010000
440#define MDP5_CTL_FLUSH_CTL 0x00020000 462#define MDP5_CTL_FLUSH_CTL 0x00020000
441#define MDP5_CTL_FLUSH_VIG3 0x00040000 463#define MDP5_CTL_FLUSH_VIG3 0x00040000
442#define MDP5_CTL_FLUSH_RGB3 0x00080000 464#define MDP5_CTL_FLUSH_RGB3 0x00080000
443#define MDP5_CTL_FLUSH_LM5 0x00100000 465#define MDP5_CTL_FLUSH_LM5 0x00100000
444#define MDP5_CTL_FLUSH_DSPP3 0x00200000 466#define MDP5_CTL_FLUSH_DSPP3 0x00200000
467#define MDP5_CTL_FLUSH_CURSOR_0 0x00400000
468#define MDP5_CTL_FLUSH_CURSOR_1 0x00800000
469#define MDP5_CTL_FLUSH_CHROMADOWN_0 0x04000000
470#define MDP5_CTL_FLUSH_TIMING_3 0x10000000
471#define MDP5_CTL_FLUSH_TIMING_2 0x20000000
472#define MDP5_CTL_FLUSH_TIMING_1 0x40000000
473#define MDP5_CTL_FLUSH_TIMING_0 0x80000000
445 474
446static inline uint32_t REG_MDP5_CTL_START(uint32_t i0) { return 0x0000001c + __offset_CTL(i0); } 475static inline uint32_t REG_MDP5_CTL_START(uint32_t i0) { return 0x0000001c + __offset_CTL(i0); }
447 476
@@ -1117,6 +1146,94 @@ static inline uint32_t REG_MDP5_DSPP_GAMUT_BASE(uint32_t i0) { return 0x000002dc
1117 1146
1118static inline uint32_t REG_MDP5_DSPP_GC_BASE(uint32_t i0) { return 0x000002b0 + __offset_DSPP(i0); } 1147static inline uint32_t REG_MDP5_DSPP_GC_BASE(uint32_t i0) { return 0x000002b0 + __offset_DSPP(i0); }
1119 1148
1149static inline uint32_t __offset_PP(uint32_t idx)
1150{
1151 switch (idx) {
1152 case 0: return (mdp5_cfg->pp.base[0]);
1153 case 1: return (mdp5_cfg->pp.base[1]);
1154 case 2: return (mdp5_cfg->pp.base[2]);
1155 case 3: return (mdp5_cfg->pp.base[3]);
1156 default: return INVALID_IDX(idx);
1157 }
1158}
1159static inline uint32_t REG_MDP5_PP(uint32_t i0) { return 0x00000000 + __offset_PP(i0); }
1160
1161static inline uint32_t REG_MDP5_PP_TEAR_CHECK_EN(uint32_t i0) { return 0x00000000 + __offset_PP(i0); }
1162
1163static inline uint32_t REG_MDP5_PP_SYNC_CONFIG_VSYNC(uint32_t i0) { return 0x00000004 + __offset_PP(i0); }
1164#define MDP5_PP_SYNC_CONFIG_VSYNC_COUNT__MASK 0x0007ffff
1165#define MDP5_PP_SYNC_CONFIG_VSYNC_COUNT__SHIFT 0
1166static inline uint32_t MDP5_PP_SYNC_CONFIG_VSYNC_COUNT(uint32_t val)
1167{
1168 return ((val) << MDP5_PP_SYNC_CONFIG_VSYNC_COUNT__SHIFT) & MDP5_PP_SYNC_CONFIG_VSYNC_COUNT__MASK;
1169}
1170#define MDP5_PP_SYNC_CONFIG_VSYNC_COUNTER_EN 0x00080000
1171#define MDP5_PP_SYNC_CONFIG_VSYNC_IN_EN 0x00100000
1172
1173static inline uint32_t REG_MDP5_PP_SYNC_CONFIG_HEIGHT(uint32_t i0) { return 0x00000008 + __offset_PP(i0); }
1174
1175static inline uint32_t REG_MDP5_PP_SYNC_WRCOUNT(uint32_t i0) { return 0x0000000c + __offset_PP(i0); }
1176#define MDP5_PP_SYNC_WRCOUNT_LINE_COUNT__MASK 0x0000ffff
1177#define MDP5_PP_SYNC_WRCOUNT_LINE_COUNT__SHIFT 0
1178static inline uint32_t MDP5_PP_SYNC_WRCOUNT_LINE_COUNT(uint32_t val)
1179{
1180 return ((val) << MDP5_PP_SYNC_WRCOUNT_LINE_COUNT__SHIFT) & MDP5_PP_SYNC_WRCOUNT_LINE_COUNT__MASK;
1181}
1182#define MDP5_PP_SYNC_WRCOUNT_FRAME_COUNT__MASK 0xffff0000
1183#define MDP5_PP_SYNC_WRCOUNT_FRAME_COUNT__SHIFT 16
1184static inline uint32_t MDP5_PP_SYNC_WRCOUNT_FRAME_COUNT(uint32_t val)
1185{
1186 return ((val) << MDP5_PP_SYNC_WRCOUNT_FRAME_COUNT__SHIFT) & MDP5_PP_SYNC_WRCOUNT_FRAME_COUNT__MASK;
1187}
1188
1189static inline uint32_t REG_MDP5_PP_VSYNC_INIT_VAL(uint32_t i0) { return 0x00000010 + __offset_PP(i0); }
1190
1191static inline uint32_t REG_MDP5_PP_INT_COUNT_VAL(uint32_t i0) { return 0x00000014 + __offset_PP(i0); }
1192#define MDP5_PP_INT_COUNT_VAL_LINE_COUNT__MASK 0x0000ffff
1193#define MDP5_PP_INT_COUNT_VAL_LINE_COUNT__SHIFT 0
1194static inline uint32_t MDP5_PP_INT_COUNT_VAL_LINE_COUNT(uint32_t val)
1195{
1196 return ((val) << MDP5_PP_INT_COUNT_VAL_LINE_COUNT__SHIFT) & MDP5_PP_INT_COUNT_VAL_LINE_COUNT__MASK;
1197}
1198#define MDP5_PP_INT_COUNT_VAL_FRAME_COUNT__MASK 0xffff0000
1199#define MDP5_PP_INT_COUNT_VAL_FRAME_COUNT__SHIFT 16
1200static inline uint32_t MDP5_PP_INT_COUNT_VAL_FRAME_COUNT(uint32_t val)
1201{
1202 return ((val) << MDP5_PP_INT_COUNT_VAL_FRAME_COUNT__SHIFT) & MDP5_PP_INT_COUNT_VAL_FRAME_COUNT__MASK;
1203}
1204
1205static inline uint32_t REG_MDP5_PP_SYNC_THRESH(uint32_t i0) { return 0x00000018 + __offset_PP(i0); }
1206#define MDP5_PP_SYNC_THRESH_START__MASK 0x0000ffff
1207#define MDP5_PP_SYNC_THRESH_START__SHIFT 0
1208static inline uint32_t MDP5_PP_SYNC_THRESH_START(uint32_t val)
1209{
1210 return ((val) << MDP5_PP_SYNC_THRESH_START__SHIFT) & MDP5_PP_SYNC_THRESH_START__MASK;
1211}
1212#define MDP5_PP_SYNC_THRESH_CONTINUE__MASK 0xffff0000
1213#define MDP5_PP_SYNC_THRESH_CONTINUE__SHIFT 16
1214static inline uint32_t MDP5_PP_SYNC_THRESH_CONTINUE(uint32_t val)
1215{
1216 return ((val) << MDP5_PP_SYNC_THRESH_CONTINUE__SHIFT) & MDP5_PP_SYNC_THRESH_CONTINUE__MASK;
1217}
1218
1219static inline uint32_t REG_MDP5_PP_START_POS(uint32_t i0) { return 0x0000001c + __offset_PP(i0); }
1220
1221static inline uint32_t REG_MDP5_PP_RD_PTR_IRQ(uint32_t i0) { return 0x00000020 + __offset_PP(i0); }
1222
1223static inline uint32_t REG_MDP5_PP_WR_PTR_IRQ(uint32_t i0) { return 0x00000024 + __offset_PP(i0); }
1224
1225static inline uint32_t REG_MDP5_PP_OUT_LINE_COUNT(uint32_t i0) { return 0x00000028 + __offset_PP(i0); }
1226
1227static inline uint32_t REG_MDP5_PP_PP_LINE_COUNT(uint32_t i0) { return 0x0000002c + __offset_PP(i0); }
1228
1229static inline uint32_t REG_MDP5_PP_AUTOREFRESH_CONFIG(uint32_t i0) { return 0x00000030 + __offset_PP(i0); }
1230
1231static inline uint32_t REG_MDP5_PP_FBC_MODE(uint32_t i0) { return 0x00000034 + __offset_PP(i0); }
1232
1233static inline uint32_t REG_MDP5_PP_FBC_BUDGET_CTL(uint32_t i0) { return 0x00000038 + __offset_PP(i0); }
1234
1235static inline uint32_t REG_MDP5_PP_FBC_LOSSY_MODE(uint32_t i0) { return 0x0000003c + __offset_PP(i0); }
1236
1120static inline uint32_t __offset_INTF(uint32_t idx) 1237static inline uint32_t __offset_INTF(uint32_t idx)
1121{ 1238{
1122 switch (idx) { 1239 switch (idx) {
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
index b0a44310cf2a..e001e6b2296a 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2014 The Linux Foundation. All rights reserved. 2 * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
3 * 3 *
4 * This program is free software; you can redistribute it and/or modify 4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and 5 * it under the terms of the GNU General Public License version 2 and
@@ -24,13 +24,23 @@ const struct mdp5_cfg_hw *mdp5_cfg = NULL;
24 24
25const struct mdp5_cfg_hw msm8x74_config = { 25const struct mdp5_cfg_hw msm8x74_config = {
26 .name = "msm8x74", 26 .name = "msm8x74",
27 .mdp = {
28 .count = 1,
29 .base = { 0x00100 },
30 },
27 .smp = { 31 .smp = {
28 .mmb_count = 22, 32 .mmb_count = 22,
29 .mmb_size = 4096, 33 .mmb_size = 4096,
34 .clients = {
35 [SSPP_VIG0] = 1, [SSPP_VIG1] = 4, [SSPP_VIG2] = 7,
36 [SSPP_DMA0] = 10, [SSPP_DMA1] = 13,
37 [SSPP_RGB0] = 16, [SSPP_RGB1] = 17, [SSPP_RGB2] = 18,
38 },
30 }, 39 },
31 .ctl = { 40 .ctl = {
32 .count = 5, 41 .count = 5,
33 .base = { 0x00600, 0x00700, 0x00800, 0x00900, 0x00a00 }, 42 .base = { 0x00600, 0x00700, 0x00800, 0x00900, 0x00a00 },
43 .flush_hw_mask = 0x0003ffff,
34 }, 44 },
35 .pipe_vig = { 45 .pipe_vig = {
36 .count = 3, 46 .count = 3,
@@ -57,27 +67,49 @@ const struct mdp5_cfg_hw msm8x74_config = {
57 .count = 2, 67 .count = 2,
58 .base = { 0x13100, 0x13300 }, /* NOTE: no ad in v1.0 */ 68 .base = { 0x13100, 0x13300 }, /* NOTE: no ad in v1.0 */
59 }, 69 },
70 .pp = {
71 .count = 3,
72 .base = { 0x12d00, 0x12e00, 0x12f00 },
73 },
60 .intf = { 74 .intf = {
61 .count = 4, 75 .count = 4,
62 .base = { 0x12500, 0x12700, 0x12900, 0x12b00 }, 76 .base = { 0x12500, 0x12700, 0x12900, 0x12b00 },
63 }, 77 },
78 .intfs = {
79 [0] = INTF_eDP,
80 [1] = INTF_DSI,
81 [2] = INTF_DSI,
82 [3] = INTF_HDMI,
83 },
64 .max_clk = 200000000, 84 .max_clk = 200000000,
65}; 85};
66 86
67const struct mdp5_cfg_hw apq8084_config = { 87const struct mdp5_cfg_hw apq8084_config = {
68 .name = "apq8084", 88 .name = "apq8084",
89 .mdp = {
90 .count = 1,
91 .base = { 0x00100 },
92 },
69 .smp = { 93 .smp = {
70 .mmb_count = 44, 94 .mmb_count = 44,
71 .mmb_size = 8192, 95 .mmb_size = 8192,
96 .clients = {
97 [SSPP_VIG0] = 1, [SSPP_VIG1] = 4,
98 [SSPP_VIG2] = 7, [SSPP_VIG3] = 19,
99 [SSPP_DMA0] = 10, [SSPP_DMA1] = 13,
100 [SSPP_RGB0] = 16, [SSPP_RGB1] = 17,
101 [SSPP_RGB2] = 18, [SSPP_RGB3] = 22,
102 },
72 .reserved_state[0] = GENMASK(7, 0), /* first 8 MMBs */ 103 .reserved_state[0] = GENMASK(7, 0), /* first 8 MMBs */
73 .reserved[CID_RGB0] = 2, 104 .reserved = {
74 .reserved[CID_RGB1] = 2, 105 /* Two SMP blocks are statically tied to RGB pipes: */
75 .reserved[CID_RGB2] = 2, 106 [16] = 2, [17] = 2, [18] = 2, [22] = 2,
76 .reserved[CID_RGB3] = 2, 107 },
77 }, 108 },
78 .ctl = { 109 .ctl = {
79 .count = 5, 110 .count = 5,
80 .base = { 0x00600, 0x00700, 0x00800, 0x00900, 0x00a00 }, 111 .base = { 0x00600, 0x00700, 0x00800, 0x00900, 0x00a00 },
112 .flush_hw_mask = 0x003fffff,
81 }, 113 },
82 .pipe_vig = { 114 .pipe_vig = {
83 .count = 4, 115 .count = 4,
@@ -105,10 +137,69 @@ const struct mdp5_cfg_hw apq8084_config = {
105 .count = 3, 137 .count = 3,
106 .base = { 0x13500, 0x13700, 0x13900 }, 138 .base = { 0x13500, 0x13700, 0x13900 },
107 }, 139 },
140 .pp = {
141 .count = 4,
142 .base = { 0x12f00, 0x13000, 0x13100, 0x13200 },
143 },
108 .intf = { 144 .intf = {
109 .count = 5, 145 .count = 5,
110 .base = { 0x12500, 0x12700, 0x12900, 0x12b00, 0x12d00 }, 146 .base = { 0x12500, 0x12700, 0x12900, 0x12b00, 0x12d00 },
111 }, 147 },
148 .intfs = {
149 [0] = INTF_eDP,
150 [1] = INTF_DSI,
151 [2] = INTF_DSI,
152 [3] = INTF_HDMI,
153 },
154 .max_clk = 320000000,
155};
156
157const struct mdp5_cfg_hw msm8x16_config = {
158 .name = "msm8x16",
159 .mdp = {
160 .count = 1,
161 .base = { 0x01000 },
162 },
163 .smp = {
164 .mmb_count = 8,
165 .mmb_size = 8192,
166 .clients = {
167 [SSPP_VIG0] = 1, [SSPP_DMA0] = 4,
168 [SSPP_RGB0] = 7, [SSPP_RGB1] = 8,
169 },
170 },
171 .ctl = {
172 .count = 5,
173 .base = { 0x02000, 0x02200, 0x02400, 0x02600, 0x02800 },
174 .flush_hw_mask = 0x4003ffff,
175 },
176 .pipe_vig = {
177 .count = 1,
178 .base = { 0x05000 },
179 },
180 .pipe_rgb = {
181 .count = 2,
182 .base = { 0x15000, 0x17000 },
183 },
184 .pipe_dma = {
185 .count = 1,
186 .base = { 0x25000 },
187 },
188 .lm = {
189 .count = 2, /* LM0 and LM3 */
190 .base = { 0x45000, 0x48000 },
191 .nb_stages = 5,
192 },
193 .dspp = {
194 .count = 1,
195 .base = { 0x55000 },
196
197 },
198 .intf = {
199 .count = 1, /* INTF_1 */
200 .base = { 0x6B800 },
201 },
202 /* TODO enable .intfs[] with [1] = INTF_DSI, once DSI is implemented */
112 .max_clk = 320000000, 203 .max_clk = 320000000,
113}; 204};
114 205
@@ -116,6 +207,7 @@ static const struct mdp5_cfg_handler cfg_handlers[] = {
116 { .revision = 0, .config = { .hw = &msm8x74_config } }, 207 { .revision = 0, .config = { .hw = &msm8x74_config } },
117 { .revision = 2, .config = { .hw = &msm8x74_config } }, 208 { .revision = 2, .config = { .hw = &msm8x74_config } },
118 { .revision = 3, .config = { .hw = &apq8084_config } }, 209 { .revision = 3, .config = { .hw = &apq8084_config } },
210 { .revision = 6, .config = { .hw = &msm8x16_config } },
119}; 211};
120 212
121 213
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h
index dba4d52cceeb..3a551b0892d8 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h
@@ -44,26 +44,38 @@ struct mdp5_lm_block {
44 uint32_t nb_stages; /* number of stages per blender */ 44 uint32_t nb_stages; /* number of stages per blender */
45}; 45};
46 46
47struct mdp5_ctl_block {
48 MDP5_SUB_BLOCK_DEFINITION;
49 uint32_t flush_hw_mask; /* FLUSH register's hardware mask */
50};
51
47struct mdp5_smp_block { 52struct mdp5_smp_block {
48 int mmb_count; /* number of SMP MMBs */ 53 int mmb_count; /* number of SMP MMBs */
49 int mmb_size; /* MMB: size in bytes */ 54 int mmb_size; /* MMB: size in bytes */
55 uint32_t clients[MAX_CLIENTS]; /* SMP port allocation /pipe */
50 mdp5_smp_state_t reserved_state;/* SMP MMBs statically allocated */ 56 mdp5_smp_state_t reserved_state;/* SMP MMBs statically allocated */
51 int reserved[MAX_CLIENTS]; /* # of MMBs allocated per client */ 57 int reserved[MAX_CLIENTS]; /* # of MMBs allocated per client */
52}; 58};
53 59
60#define MDP5_INTF_NUM_MAX 5
61
54struct mdp5_cfg_hw { 62struct mdp5_cfg_hw {
55 char *name; 63 char *name;
56 64
65 struct mdp5_sub_block mdp;
57 struct mdp5_smp_block smp; 66 struct mdp5_smp_block smp;
58 struct mdp5_sub_block ctl; 67 struct mdp5_ctl_block ctl;
59 struct mdp5_sub_block pipe_vig; 68 struct mdp5_sub_block pipe_vig;
60 struct mdp5_sub_block pipe_rgb; 69 struct mdp5_sub_block pipe_rgb;
61 struct mdp5_sub_block pipe_dma; 70 struct mdp5_sub_block pipe_dma;
62 struct mdp5_lm_block lm; 71 struct mdp5_lm_block lm;
63 struct mdp5_sub_block dspp; 72 struct mdp5_sub_block dspp;
64 struct mdp5_sub_block ad; 73 struct mdp5_sub_block ad;
74 struct mdp5_sub_block pp;
65 struct mdp5_sub_block intf; 75 struct mdp5_sub_block intf;
66 76
77 u32 intfs[MDP5_INTF_NUM_MAX]; /* array of enum mdp5_intf_type */
78
67 uint32_t max_clk; 79 uint32_t max_clk;
68}; 80};
69 81
@@ -84,6 +96,10 @@ const struct mdp5_cfg_hw *mdp5_cfg_get_hw_config(struct mdp5_cfg_handler *cfg_hn
84struct mdp5_cfg *mdp5_cfg_get_config(struct mdp5_cfg_handler *cfg_hnd); 96struct mdp5_cfg *mdp5_cfg_get_config(struct mdp5_cfg_handler *cfg_hnd);
85int mdp5_cfg_get_hw_rev(struct mdp5_cfg_handler *cfg_hnd); 97int mdp5_cfg_get_hw_rev(struct mdp5_cfg_handler *cfg_hnd);
86 98
99#define mdp5_cfg_intf_is_virtual(intf_type) ({ \
100 typeof(intf_type) __val = (intf_type); \
101 (__val) >= INTF_VIRTUAL ? true : false; })
102
87struct mdp5_cfg_handler *mdp5_cfg_init(struct mdp5_kms *mdp5_kms, 103struct mdp5_cfg_handler *mdp5_cfg_init(struct mdp5_kms *mdp5_kms,
88 uint32_t major, uint32_t minor); 104 uint32_t major, uint32_t minor);
89void mdp5_cfg_destroy(struct mdp5_cfg_handler *cfg_hnd); 105void mdp5_cfg_destroy(struct mdp5_cfg_handler *cfg_hnd);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c
new file mode 100644
index 000000000000..e4e89567f51d
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c
@@ -0,0 +1,343 @@
1/*
2 * Copyright (c) 2015, The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include "mdp5_kms.h"
15
16#include "drm_crtc.h"
17#include "drm_crtc_helper.h"
18
19struct mdp5_cmd_encoder {
20 struct drm_encoder base;
21 struct mdp5_interface intf;
22 bool enabled;
23 uint32_t bsc;
24};
25#define to_mdp5_cmd_encoder(x) container_of(x, struct mdp5_cmd_encoder, base)
26
27static struct mdp5_kms *get_kms(struct drm_encoder *encoder)
28{
29 struct msm_drm_private *priv = encoder->dev->dev_private;
30 return to_mdp5_kms(to_mdp_kms(priv->kms));
31}
32
33#ifdef CONFIG_MSM_BUS_SCALING
34#include <mach/board.h>
35#include <linux/msm-bus.h>
36#include <linux/msm-bus-board.h>
37#define MDP_BUS_VECTOR_ENTRY(ab_val, ib_val) \
38 { \
39 .src = MSM_BUS_MASTER_MDP_PORT0, \
40 .dst = MSM_BUS_SLAVE_EBI_CH0, \
41 .ab = (ab_val), \
42 .ib = (ib_val), \
43 }
44
45static struct msm_bus_vectors mdp_bus_vectors[] = {
46 MDP_BUS_VECTOR_ENTRY(0, 0),
47 MDP_BUS_VECTOR_ENTRY(2000000000, 2000000000),
48};
49static struct msm_bus_paths mdp_bus_usecases[] = { {
50 .num_paths = 1,
51 .vectors = &mdp_bus_vectors[0],
52}, {
53 .num_paths = 1,
54 .vectors = &mdp_bus_vectors[1],
55} };
56static struct msm_bus_scale_pdata mdp_bus_scale_table = {
57 .usecase = mdp_bus_usecases,
58 .num_usecases = ARRAY_SIZE(mdp_bus_usecases),
59 .name = "mdss_mdp",
60};
61
62static void bs_init(struct mdp5_cmd_encoder *mdp5_cmd_enc)
63{
64 mdp5_cmd_enc->bsc = msm_bus_scale_register_client(
65 &mdp_bus_scale_table);
66 DBG("bus scale client: %08x", mdp5_cmd_enc->bsc);
67}
68
69static void bs_fini(struct mdp5_cmd_encoder *mdp5_cmd_enc)
70{
71 if (mdp5_cmd_enc->bsc) {
72 msm_bus_scale_unregister_client(mdp5_cmd_enc->bsc);
73 mdp5_cmd_enc->bsc = 0;
74 }
75}
76
77static void bs_set(struct mdp5_cmd_encoder *mdp5_cmd_enc, int idx)
78{
79 if (mdp5_cmd_enc->bsc) {
80 DBG("set bus scaling: %d", idx);
81 /* HACK: scaling down, and then immediately back up
82 * seems to leave things broken (underflow).. so
83 * never disable:
84 */
85 idx = 1;
86 msm_bus_scale_client_update_request(mdp5_cmd_enc->bsc, idx);
87 }
88}
89#else
90static void bs_init(struct mdp5_cmd_encoder *mdp5_cmd_enc) {}
91static void bs_fini(struct mdp5_cmd_encoder *mdp5_cmd_enc) {}
92static void bs_set(struct mdp5_cmd_encoder *mdp5_cmd_enc, int idx) {}
93#endif
94
95#define VSYNC_CLK_RATE 19200000
96static int pingpong_tearcheck_setup(struct drm_encoder *encoder,
97 struct drm_display_mode *mode)
98{
99 struct mdp5_kms *mdp5_kms = get_kms(encoder);
100 struct device *dev = encoder->dev->dev;
101 u32 total_lines_x100, vclks_line, cfg;
102 long vsync_clk_speed;
103 int pp_id = GET_PING_PONG_ID(mdp5_crtc_get_lm(encoder->crtc));
104
105 if (IS_ERR_OR_NULL(mdp5_kms->vsync_clk)) {
106 dev_err(dev, "vsync_clk is not initialized\n");
107 return -EINVAL;
108 }
109
110 total_lines_x100 = mode->vtotal * mode->vrefresh;
111 if (!total_lines_x100) {
112 dev_err(dev, "%s: vtotal(%d) or vrefresh(%d) is 0\n",
113 __func__, mode->vtotal, mode->vrefresh);
114 return -EINVAL;
115 }
116
117 vsync_clk_speed = clk_round_rate(mdp5_kms->vsync_clk, VSYNC_CLK_RATE);
118 if (vsync_clk_speed <= 0) {
119 dev_err(dev, "vsync_clk round rate failed %ld\n",
120 vsync_clk_speed);
121 return -EINVAL;
122 }
123 vclks_line = vsync_clk_speed * 100 / total_lines_x100;
124
125 cfg = MDP5_PP_SYNC_CONFIG_VSYNC_COUNTER_EN
126 | MDP5_PP_SYNC_CONFIG_VSYNC_IN_EN;
127 cfg |= MDP5_PP_SYNC_CONFIG_VSYNC_COUNT(vclks_line);
128
129 mdp5_write(mdp5_kms, REG_MDP5_PP_SYNC_CONFIG_VSYNC(pp_id), cfg);
130 mdp5_write(mdp5_kms,
131 REG_MDP5_PP_SYNC_CONFIG_HEIGHT(pp_id), 0xfff0);
132 mdp5_write(mdp5_kms,
133 REG_MDP5_PP_VSYNC_INIT_VAL(pp_id), mode->vdisplay);
134 mdp5_write(mdp5_kms, REG_MDP5_PP_RD_PTR_IRQ(pp_id), mode->vdisplay + 1);
135 mdp5_write(mdp5_kms, REG_MDP5_PP_START_POS(pp_id), mode->vdisplay);
136 mdp5_write(mdp5_kms, REG_MDP5_PP_SYNC_THRESH(pp_id),
137 MDP5_PP_SYNC_THRESH_START(4) |
138 MDP5_PP_SYNC_THRESH_CONTINUE(4));
139
140 return 0;
141}
142
143static int pingpong_tearcheck_enable(struct drm_encoder *encoder)
144{
145 struct mdp5_kms *mdp5_kms = get_kms(encoder);
146 int pp_id = GET_PING_PONG_ID(mdp5_crtc_get_lm(encoder->crtc));
147 int ret;
148
149 ret = clk_set_rate(mdp5_kms->vsync_clk,
150 clk_round_rate(mdp5_kms->vsync_clk, VSYNC_CLK_RATE));
151 if (ret) {
152 dev_err(encoder->dev->dev,
153 "vsync_clk clk_set_rate failed, %d\n", ret);
154 return ret;
155 }
156 ret = clk_prepare_enable(mdp5_kms->vsync_clk);
157 if (ret) {
158 dev_err(encoder->dev->dev,
159 "vsync_clk clk_prepare_enable failed, %d\n", ret);
160 return ret;
161 }
162
163 mdp5_write(mdp5_kms, REG_MDP5_PP_TEAR_CHECK_EN(pp_id), 1);
164
165 return 0;
166}
167
168static void pingpong_tearcheck_disable(struct drm_encoder *encoder)
169{
170 struct mdp5_kms *mdp5_kms = get_kms(encoder);
171 int pp_id = GET_PING_PONG_ID(mdp5_crtc_get_lm(encoder->crtc));
172
173 mdp5_write(mdp5_kms, REG_MDP5_PP_TEAR_CHECK_EN(pp_id), 0);
174 clk_disable_unprepare(mdp5_kms->vsync_clk);
175}
176
177static void mdp5_cmd_encoder_destroy(struct drm_encoder *encoder)
178{
179 struct mdp5_cmd_encoder *mdp5_cmd_enc = to_mdp5_cmd_encoder(encoder);
180 bs_fini(mdp5_cmd_enc);
181 drm_encoder_cleanup(encoder);
182 kfree(mdp5_cmd_enc);
183}
184
185static const struct drm_encoder_funcs mdp5_cmd_encoder_funcs = {
186 .destroy = mdp5_cmd_encoder_destroy,
187};
188
189static bool mdp5_cmd_encoder_mode_fixup(struct drm_encoder *encoder,
190 const struct drm_display_mode *mode,
191 struct drm_display_mode *adjusted_mode)
192{
193 return true;
194}
195
196static void mdp5_cmd_encoder_mode_set(struct drm_encoder *encoder,
197 struct drm_display_mode *mode,
198 struct drm_display_mode *adjusted_mode)
199{
200 struct mdp5_cmd_encoder *mdp5_cmd_enc = to_mdp5_cmd_encoder(encoder);
201
202 mode = adjusted_mode;
203
204 DBG("set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
205 mode->base.id, mode->name,
206 mode->vrefresh, mode->clock,
207 mode->hdisplay, mode->hsync_start,
208 mode->hsync_end, mode->htotal,
209 mode->vdisplay, mode->vsync_start,
210 mode->vsync_end, mode->vtotal,
211 mode->type, mode->flags);
212 pingpong_tearcheck_setup(encoder, mode);
213 mdp5_crtc_set_intf(encoder->crtc, &mdp5_cmd_enc->intf);
214}
215
216static void mdp5_cmd_encoder_disable(struct drm_encoder *encoder)
217{
218 struct mdp5_cmd_encoder *mdp5_cmd_enc = to_mdp5_cmd_encoder(encoder);
219 struct mdp5_kms *mdp5_kms = get_kms(encoder);
220 struct mdp5_ctl *ctl = mdp5_crtc_get_ctl(encoder->crtc);
221 struct mdp5_interface *intf = &mdp5_cmd_enc->intf;
222 int lm = mdp5_crtc_get_lm(encoder->crtc);
223
224 if (WARN_ON(!mdp5_cmd_enc->enabled))
225 return;
226
227 /* Wait for the last frame done */
228 mdp_irq_wait(&mdp5_kms->base, lm2ppdone(lm));
229 pingpong_tearcheck_disable(encoder);
230
231 mdp5_ctl_set_encoder_state(ctl, false);
232 mdp5_ctl_commit(ctl, mdp_ctl_flush_mask_encoder(intf));
233
234 bs_set(mdp5_cmd_enc, 0);
235
236 mdp5_cmd_enc->enabled = false;
237}
238
239static void mdp5_cmd_encoder_enable(struct drm_encoder *encoder)
240{
241 struct mdp5_cmd_encoder *mdp5_cmd_enc = to_mdp5_cmd_encoder(encoder);
242 struct mdp5_ctl *ctl = mdp5_crtc_get_ctl(encoder->crtc);
243 struct mdp5_interface *intf = &mdp5_cmd_enc->intf;
244
245 if (WARN_ON(mdp5_cmd_enc->enabled))
246 return;
247
248 bs_set(mdp5_cmd_enc, 1);
249 if (pingpong_tearcheck_enable(encoder))
250 return;
251
252 mdp5_ctl_commit(ctl, mdp_ctl_flush_mask_encoder(intf));
253
254 mdp5_ctl_set_encoder_state(ctl, true);
255
256 mdp5_cmd_enc->enabled = true;
257}
258
259static const struct drm_encoder_helper_funcs mdp5_cmd_encoder_helper_funcs = {
260 .mode_fixup = mdp5_cmd_encoder_mode_fixup,
261 .mode_set = mdp5_cmd_encoder_mode_set,
262 .disable = mdp5_cmd_encoder_disable,
263 .enable = mdp5_cmd_encoder_enable,
264};
265
266int mdp5_cmd_encoder_set_split_display(struct drm_encoder *encoder,
267 struct drm_encoder *slave_encoder)
268{
269 struct mdp5_cmd_encoder *mdp5_cmd_enc = to_mdp5_cmd_encoder(encoder);
270 struct mdp5_kms *mdp5_kms;
271 int intf_num;
272 u32 data = 0;
273
274 if (!encoder || !slave_encoder)
275 return -EINVAL;
276
277 mdp5_kms = get_kms(encoder);
278 intf_num = mdp5_cmd_enc->intf.num;
279
280 /* Switch slave encoder's trigger MUX, to use the master's
281 * start signal for the slave encoder
282 */
283 if (intf_num == 1)
284 data |= MDP5_SPLIT_DPL_UPPER_INTF2_SW_TRG_MUX;
285 else if (intf_num == 2)
286 data |= MDP5_SPLIT_DPL_UPPER_INTF1_SW_TRG_MUX;
287 else
288 return -EINVAL;
289
290 /* Smart Panel, Sync mode */
291 data |= MDP5_SPLIT_DPL_UPPER_SMART_PANEL;
292
293 /* Make sure clocks are on when connectors calling this function. */
294 mdp5_enable(mdp5_kms);
295 mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_UPPER, data);
296
297 mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_LOWER,
298 MDP5_SPLIT_DPL_LOWER_SMART_PANEL);
299 mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_EN, 1);
300 mdp5_disable(mdp5_kms);
301
302 return 0;
303}
304
305/* initialize command mode encoder */
306struct drm_encoder *mdp5_cmd_encoder_init(struct drm_device *dev,
307 struct mdp5_interface *intf)
308{
309 struct drm_encoder *encoder = NULL;
310 struct mdp5_cmd_encoder *mdp5_cmd_enc;
311 int ret;
312
313 if (WARN_ON((intf->type != INTF_DSI) &&
314 (intf->mode != MDP5_INTF_DSI_MODE_COMMAND))) {
315 ret = -EINVAL;
316 goto fail;
317 }
318
319 mdp5_cmd_enc = kzalloc(sizeof(*mdp5_cmd_enc), GFP_KERNEL);
320 if (!mdp5_cmd_enc) {
321 ret = -ENOMEM;
322 goto fail;
323 }
324
325 memcpy(&mdp5_cmd_enc->intf, intf, sizeof(mdp5_cmd_enc->intf));
326 encoder = &mdp5_cmd_enc->base;
327
328 drm_encoder_init(dev, encoder, &mdp5_cmd_encoder_funcs,
329 DRM_MODE_ENCODER_DSI);
330
331 drm_encoder_helper_add(encoder, &mdp5_cmd_encoder_helper_funcs);
332
333 bs_init(mdp5_cmd_enc);
334
335 return encoder;
336
337fail:
338 if (encoder)
339 mdp5_cmd_encoder_destroy(encoder);
340
341 return ERR_PTR(ret);
342}
343
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
index 2f2863cf8b45..c1530772187d 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
@@ -82,8 +82,6 @@ static void request_pending(struct drm_crtc *crtc, uint32_t pending)
82 mdp_irq_register(&get_kms(crtc)->base, &mdp5_crtc->vblank); 82 mdp_irq_register(&get_kms(crtc)->base, &mdp5_crtc->vblank);
83} 83}
84 84
85#define mdp5_lm_get_flush(lm) mdp_ctl_flush_mask_lm(lm)
86
87static void crtc_flush(struct drm_crtc *crtc, u32 flush_mask) 85static void crtc_flush(struct drm_crtc *crtc, u32 flush_mask)
88{ 86{
89 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); 87 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
@@ -110,8 +108,8 @@ static void crtc_flush_all(struct drm_crtc *crtc)
110 drm_atomic_crtc_for_each_plane(plane, crtc) { 108 drm_atomic_crtc_for_each_plane(plane, crtc) {
111 flush_mask |= mdp5_plane_get_flush(plane); 109 flush_mask |= mdp5_plane_get_flush(plane);
112 } 110 }
113 flush_mask |= mdp5_ctl_get_flush(mdp5_crtc->ctl); 111
114 flush_mask |= mdp5_lm_get_flush(mdp5_crtc->lm); 112 flush_mask |= mdp_ctl_flush_mask_lm(mdp5_crtc->lm);
115 113
116 crtc_flush(crtc, flush_mask); 114 crtc_flush(crtc, flush_mask);
117} 115}
@@ -298,8 +296,6 @@ static void mdp5_crtc_enable(struct drm_crtc *crtc)
298 mdp5_enable(mdp5_kms); 296 mdp5_enable(mdp5_kms);
299 mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->err); 297 mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->err);
300 298
301 crtc_flush_all(crtc);
302
303 mdp5_crtc->enabled = true; 299 mdp5_crtc->enabled = true;
304} 300}
305 301
@@ -444,13 +440,14 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
444 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); 440 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
445 struct drm_device *dev = crtc->dev; 441 struct drm_device *dev = crtc->dev;
446 struct mdp5_kms *mdp5_kms = get_kms(crtc); 442 struct mdp5_kms *mdp5_kms = get_kms(crtc);
447 struct drm_gem_object *cursor_bo, *old_bo; 443 struct drm_gem_object *cursor_bo, *old_bo = NULL;
448 uint32_t blendcfg, cursor_addr, stride; 444 uint32_t blendcfg, cursor_addr, stride;
449 int ret, bpp, lm; 445 int ret, bpp, lm;
450 unsigned int depth; 446 unsigned int depth;
451 enum mdp5_cursor_alpha cur_alpha = CURSOR_ALPHA_PER_PIXEL; 447 enum mdp5_cursor_alpha cur_alpha = CURSOR_ALPHA_PER_PIXEL;
452 uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0); 448 uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0);
453 uint32_t roi_w, roi_h; 449 uint32_t roi_w, roi_h;
450 bool cursor_enable = true;
454 unsigned long flags; 451 unsigned long flags;
455 452
456 if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) { 453 if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) {
@@ -463,7 +460,8 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
463 460
464 if (!handle) { 461 if (!handle) {
465 DBG("Cursor off"); 462 DBG("Cursor off");
466 return mdp5_ctl_set_cursor(mdp5_crtc->ctl, false); 463 cursor_enable = false;
464 goto set_cursor;
467 } 465 }
468 466
469 cursor_bo = drm_gem_object_lookup(dev, file, handle); 467 cursor_bo = drm_gem_object_lookup(dev, file, handle);
@@ -504,11 +502,14 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
504 502
505 spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags); 503 spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags);
506 504
507 ret = mdp5_ctl_set_cursor(mdp5_crtc->ctl, true); 505set_cursor:
508 if (ret) 506 ret = mdp5_ctl_set_cursor(mdp5_crtc->ctl, 0, cursor_enable);
507 if (ret) {
508 dev_err(dev->dev, "failed to %sable cursor: %d\n",
509 cursor_enable ? "en" : "dis", ret);
509 goto end; 510 goto end;
511 }
510 512
511 flush_mask |= mdp5_ctl_get_flush(mdp5_crtc->ctl);
512 crtc_flush(crtc, flush_mask); 513 crtc_flush(crtc, flush_mask);
513 514
514end: 515end:
@@ -613,64 +614,39 @@ void mdp5_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file)
613} 614}
614 615
615/* set interface for routing crtc->encoder: */ 616/* set interface for routing crtc->encoder: */
616void mdp5_crtc_set_intf(struct drm_crtc *crtc, int intf, 617void mdp5_crtc_set_intf(struct drm_crtc *crtc, struct mdp5_interface *intf)
617 enum mdp5_intf intf_id)
618{ 618{
619 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); 619 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
620 struct mdp5_kms *mdp5_kms = get_kms(crtc); 620 struct mdp5_kms *mdp5_kms = get_kms(crtc);
621 uint32_t flush_mask = 0; 621 int lm = mdp5_crtc_get_lm(crtc);
622 uint32_t intf_sel;
623 unsigned long flags;
624 622
625 /* now that we know what irq's we want: */ 623 /* now that we know what irq's we want: */
626 mdp5_crtc->err.irqmask = intf2err(intf); 624 mdp5_crtc->err.irqmask = intf2err(intf->num);
627 mdp5_crtc->vblank.irqmask = intf2vblank(intf);
628 mdp_irq_update(&mdp5_kms->base);
629
630 spin_lock_irqsave(&mdp5_kms->resource_lock, flags);
631 intf_sel = mdp5_read(mdp5_kms, REG_MDP5_DISP_INTF_SEL);
632
633 switch (intf) {
634 case 0:
635 intf_sel &= ~MDP5_DISP_INTF_SEL_INTF0__MASK;
636 intf_sel |= MDP5_DISP_INTF_SEL_INTF0(intf_id);
637 break;
638 case 1:
639 intf_sel &= ~MDP5_DISP_INTF_SEL_INTF1__MASK;
640 intf_sel |= MDP5_DISP_INTF_SEL_INTF1(intf_id);
641 break;
642 case 2:
643 intf_sel &= ~MDP5_DISP_INTF_SEL_INTF2__MASK;
644 intf_sel |= MDP5_DISP_INTF_SEL_INTF2(intf_id);
645 break;
646 case 3:
647 intf_sel &= ~MDP5_DISP_INTF_SEL_INTF3__MASK;
648 intf_sel |= MDP5_DISP_INTF_SEL_INTF3(intf_id);
649 break;
650 default:
651 BUG();
652 break;
653 }
654 625
655 mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, intf_sel); 626 /* Register command mode Pingpong done as vblank for now,
656 spin_unlock_irqrestore(&mdp5_kms->resource_lock, flags); 627 * so that atomic commit should wait for it to finish.
628 * Ideally, in the future, we should take rd_ptr done as vblank,
629 * and let atomic commit wait for pingpong done for commond mode.
630 */
631 if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND)
632 mdp5_crtc->vblank.irqmask = lm2ppdone(lm);
633 else
634 mdp5_crtc->vblank.irqmask = intf2vblank(lm, intf);
635 mdp_irq_update(&mdp5_kms->base);
657 636
658 DBG("%s: intf_sel=%08x", mdp5_crtc->name, intf_sel);
659 mdp5_ctl_set_intf(mdp5_crtc->ctl, intf); 637 mdp5_ctl_set_intf(mdp5_crtc->ctl, intf);
660 flush_mask |= mdp5_ctl_get_flush(mdp5_crtc->ctl);
661 flush_mask |= mdp5_lm_get_flush(mdp5_crtc->lm);
662
663 crtc_flush(crtc, flush_mask);
664} 638}
665 639
666int mdp5_crtc_get_lm(struct drm_crtc *crtc) 640int mdp5_crtc_get_lm(struct drm_crtc *crtc)
667{ 641{
668 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); 642 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
643 return WARN_ON(!crtc) ? -EINVAL : mdp5_crtc->lm;
644}
669 645
670 if (WARN_ON(!crtc)) 646struct mdp5_ctl *mdp5_crtc_get_ctl(struct drm_crtc *crtc)
671 return -EINVAL; 647{
672 648 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
673 return mdp5_crtc->lm; 649 return WARN_ON(!crtc) ? NULL : mdp5_crtc->ctl;
674} 650}
675 651
676/* initialize crtc */ 652/* initialize crtc */
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c
index 151129032d16..5488b687c8d1 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2014 The Linux Foundation. All rights reserved. 2 * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
3 * 3 *
4 * This program is free software; you can redistribute it and/or modify 4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and 5 * it under the terms of the GNU General Public License version 2 and
@@ -33,23 +33,31 @@
33 * requested by the client (in mdp5_crtc_mode_set()). 33 * requested by the client (in mdp5_crtc_mode_set()).
34 */ 34 */
35 35
36struct op_mode {
37 struct mdp5_interface intf;
38
39 bool encoder_enabled;
40 uint32_t start_mask;
41};
42
36struct mdp5_ctl { 43struct mdp5_ctl {
37 struct mdp5_ctl_manager *ctlm; 44 struct mdp5_ctl_manager *ctlm;
38 45
39 u32 id; 46 u32 id;
47 int lm;
40 48
41 /* whether this CTL has been allocated or not: */ 49 /* whether this CTL has been allocated or not: */
42 bool busy; 50 bool busy;
43 51
44 /* memory output connection (@see mdp5_ctl_mode): */ 52 /* Operation Mode Configuration for the Pipeline */
45 u32 mode; 53 struct op_mode pipeline;
46 54
47 /* REG_MDP5_CTL_*(<id>) registers access info + lock: */ 55 /* REG_MDP5_CTL_*(<id>) registers access info + lock: */
48 spinlock_t hw_lock; 56 spinlock_t hw_lock;
49 u32 reg_offset; 57 u32 reg_offset;
50 58
51 /* flush mask used to commit CTL registers */ 59 /* when do CTL registers need to be flushed? (mask of trigger bits) */
52 u32 flush_mask; 60 u32 pending_ctl_trigger;
53 61
54 bool cursor_on; 62 bool cursor_on;
55 63
@@ -63,6 +71,9 @@ struct mdp5_ctl_manager {
63 u32 nlm; 71 u32 nlm;
64 u32 nctl; 72 u32 nctl;
65 73
74 /* to filter out non-present bits in the current hardware config */
75 u32 flush_hw_mask;
76
66 /* pool of CTLs + lock to protect resource allocation (ctls[i].busy) */ 77 /* pool of CTLs + lock to protect resource allocation (ctls[i].busy) */
67 spinlock_t pool_lock; 78 spinlock_t pool_lock;
68 struct mdp5_ctl ctls[MAX_CTL]; 79 struct mdp5_ctl ctls[MAX_CTL];
@@ -94,31 +105,172 @@ u32 ctl_read(struct mdp5_ctl *ctl, u32 reg)
94 return mdp5_read(mdp5_kms, reg); 105 return mdp5_read(mdp5_kms, reg);
95} 106}
96 107
108static void set_display_intf(struct mdp5_kms *mdp5_kms,
109 struct mdp5_interface *intf)
110{
111 unsigned long flags;
112 u32 intf_sel;
113
114 spin_lock_irqsave(&mdp5_kms->resource_lock, flags);
115 intf_sel = mdp5_read(mdp5_kms, REG_MDP5_MDP_DISP_INTF_SEL(0));
116
117 switch (intf->num) {
118 case 0:
119 intf_sel &= ~MDP5_MDP_DISP_INTF_SEL_INTF0__MASK;
120 intf_sel |= MDP5_MDP_DISP_INTF_SEL_INTF0(intf->type);
121 break;
122 case 1:
123 intf_sel &= ~MDP5_MDP_DISP_INTF_SEL_INTF1__MASK;
124 intf_sel |= MDP5_MDP_DISP_INTF_SEL_INTF1(intf->type);
125 break;
126 case 2:
127 intf_sel &= ~MDP5_MDP_DISP_INTF_SEL_INTF2__MASK;
128 intf_sel |= MDP5_MDP_DISP_INTF_SEL_INTF2(intf->type);
129 break;
130 case 3:
131 intf_sel &= ~MDP5_MDP_DISP_INTF_SEL_INTF3__MASK;
132 intf_sel |= MDP5_MDP_DISP_INTF_SEL_INTF3(intf->type);
133 break;
134 default:
135 BUG();
136 break;
137 }
138
139 mdp5_write(mdp5_kms, REG_MDP5_MDP_DISP_INTF_SEL(0), intf_sel);
140 spin_unlock_irqrestore(&mdp5_kms->resource_lock, flags);
141}
97 142
98int mdp5_ctl_set_intf(struct mdp5_ctl *ctl, int intf) 143static void set_ctl_op(struct mdp5_ctl *ctl, struct mdp5_interface *intf)
99{ 144{
100 unsigned long flags; 145 unsigned long flags;
101 static const enum mdp5_intfnum intfnum[] = { 146 u32 ctl_op = 0;
102 INTF0, INTF1, INTF2, INTF3, 147
103 }; 148 if (!mdp5_cfg_intf_is_virtual(intf->type))
149 ctl_op |= MDP5_CTL_OP_INTF_NUM(INTF0 + intf->num);
150
151 switch (intf->type) {
152 case INTF_DSI:
153 if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND)
154 ctl_op |= MDP5_CTL_OP_CMD_MODE;
155 break;
156
157 case INTF_WB:
158 if (intf->mode == MDP5_INTF_WB_MODE_LINE)
159 ctl_op |= MDP5_CTL_OP_MODE(MODE_WB_2_LINE);
160 break;
161
162 default:
163 break;
164 }
104 165
105 spin_lock_irqsave(&ctl->hw_lock, flags); 166 spin_lock_irqsave(&ctl->hw_lock, flags);
106 ctl_write(ctl, REG_MDP5_CTL_OP(ctl->id), 167 ctl_write(ctl, REG_MDP5_CTL_OP(ctl->id), ctl_op);
107 MDP5_CTL_OP_MODE(ctl->mode) |
108 MDP5_CTL_OP_INTF_NUM(intfnum[intf]));
109 spin_unlock_irqrestore(&ctl->hw_lock, flags); 168 spin_unlock_irqrestore(&ctl->hw_lock, flags);
169}
170
171int mdp5_ctl_set_intf(struct mdp5_ctl *ctl, struct mdp5_interface *intf)
172{
173 struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
174 struct mdp5_kms *mdp5_kms = get_kms(ctl_mgr);
175
176 memcpy(&ctl->pipeline.intf, intf, sizeof(*intf));
177
178 ctl->pipeline.start_mask = mdp_ctl_flush_mask_lm(ctl->lm) |
179 mdp_ctl_flush_mask_encoder(intf);
180
181 /* Virtual interfaces need not set a display intf (e.g.: Writeback) */
182 if (!mdp5_cfg_intf_is_virtual(intf->type))
183 set_display_intf(mdp5_kms, intf);
184
185 set_ctl_op(ctl, intf);
110 186
111 return 0; 187 return 0;
112} 188}
113 189
114int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, bool enable) 190static bool start_signal_needed(struct mdp5_ctl *ctl)
191{
192 struct op_mode *pipeline = &ctl->pipeline;
193
194 if (!pipeline->encoder_enabled || pipeline->start_mask != 0)
195 return false;
196
197 switch (pipeline->intf.type) {
198 case INTF_WB:
199 return true;
200 case INTF_DSI:
201 return pipeline->intf.mode == MDP5_INTF_DSI_MODE_COMMAND;
202 default:
203 return false;
204 }
205}
206
207/*
208 * send_start_signal() - Overlay Processor Start Signal
209 *
210 * For a given control operation (display pipeline), a START signal needs to be
211 * executed in order to kick off operation and activate all layers.
212 * e.g.: DSI command mode, Writeback
213 */
214static void send_start_signal(struct mdp5_ctl *ctl)
215{
216 unsigned long flags;
217
218 spin_lock_irqsave(&ctl->hw_lock, flags);
219 ctl_write(ctl, REG_MDP5_CTL_START(ctl->id), 1);
220 spin_unlock_irqrestore(&ctl->hw_lock, flags);
221}
222
223static void refill_start_mask(struct mdp5_ctl *ctl)
224{
225 struct op_mode *pipeline = &ctl->pipeline;
226 struct mdp5_interface *intf = &ctl->pipeline.intf;
227
228 pipeline->start_mask = mdp_ctl_flush_mask_lm(ctl->lm);
229
230 /*
231 * Writeback encoder needs to program & flush
232 * address registers for each page flip..
233 */
234 if (intf->type == INTF_WB)
235 pipeline->start_mask |= mdp_ctl_flush_mask_encoder(intf);
236}
237
238/**
239 * mdp5_ctl_set_encoder_state() - set the encoder state
240 *
241 * @enable: true, when encoder is ready for data streaming; false, otherwise.
242 *
243 * Note:
244 * This encoder state is needed to trigger START signal (data path kickoff).
245 */
246int mdp5_ctl_set_encoder_state(struct mdp5_ctl *ctl, bool enabled)
247{
248 if (WARN_ON(!ctl))
249 return -EINVAL;
250
251 ctl->pipeline.encoder_enabled = enabled;
252 DBG("intf_%d: %s", ctl->pipeline.intf.num, enabled ? "on" : "off");
253
254 if (start_signal_needed(ctl)) {
255 send_start_signal(ctl);
256 refill_start_mask(ctl);
257 }
258
259 return 0;
260}
261
262/*
263 * Note:
264 * CTL registers need to be flushed after calling this function
265 * (call mdp5_ctl_commit() with mdp_ctl_flush_mask_ctl() mask)
266 */
267int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, int cursor_id, bool enable)
115{ 268{
116 struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm; 269 struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
117 unsigned long flags; 270 unsigned long flags;
118 u32 blend_cfg; 271 u32 blend_cfg;
119 int lm; 272 int lm = ctl->lm;
120 273
121 lm = mdp5_crtc_get_lm(ctl->crtc);
122 if (unlikely(WARN_ON(lm < 0))) { 274 if (unlikely(WARN_ON(lm < 0))) {
123 dev_err(ctl_mgr->dev->dev, "CTL %d cannot find LM: %d", 275 dev_err(ctl_mgr->dev->dev, "CTL %d cannot find LM: %d",
124 ctl->id, lm); 276 ctl->id, lm);
@@ -138,12 +290,12 @@ int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, bool enable)
138 290
139 spin_unlock_irqrestore(&ctl->hw_lock, flags); 291 spin_unlock_irqrestore(&ctl->hw_lock, flags);
140 292
293 ctl->pending_ctl_trigger = mdp_ctl_flush_mask_cursor(cursor_id);
141 ctl->cursor_on = enable; 294 ctl->cursor_on = enable;
142 295
143 return 0; 296 return 0;
144} 297}
145 298
146
147int mdp5_ctl_blend(struct mdp5_ctl *ctl, u32 lm, u32 blend_cfg) 299int mdp5_ctl_blend(struct mdp5_ctl *ctl, u32 lm, u32 blend_cfg)
148{ 300{
149 unsigned long flags; 301 unsigned long flags;
@@ -157,37 +309,122 @@ int mdp5_ctl_blend(struct mdp5_ctl *ctl, u32 lm, u32 blend_cfg)
157 ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, lm), blend_cfg); 309 ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, lm), blend_cfg);
158 spin_unlock_irqrestore(&ctl->hw_lock, flags); 310 spin_unlock_irqrestore(&ctl->hw_lock, flags);
159 311
312 ctl->pending_ctl_trigger = mdp_ctl_flush_mask_lm(lm);
313
160 return 0; 314 return 0;
161} 315}
162 316
317u32 mdp_ctl_flush_mask_encoder(struct mdp5_interface *intf)
318{
319 if (intf->type == INTF_WB)
320 return MDP5_CTL_FLUSH_WB;
321
322 switch (intf->num) {
323 case 0: return MDP5_CTL_FLUSH_TIMING_0;
324 case 1: return MDP5_CTL_FLUSH_TIMING_1;
325 case 2: return MDP5_CTL_FLUSH_TIMING_2;
326 case 3: return MDP5_CTL_FLUSH_TIMING_3;
327 default: return 0;
328 }
329}
330
331u32 mdp_ctl_flush_mask_cursor(int cursor_id)
332{
333 switch (cursor_id) {
334 case 0: return MDP5_CTL_FLUSH_CURSOR_0;
335 case 1: return MDP5_CTL_FLUSH_CURSOR_1;
336 default: return 0;
337 }
338}
339
340u32 mdp_ctl_flush_mask_pipe(enum mdp5_pipe pipe)
341{
342 switch (pipe) {
343 case SSPP_VIG0: return MDP5_CTL_FLUSH_VIG0;
344 case SSPP_VIG1: return MDP5_CTL_FLUSH_VIG1;
345 case SSPP_VIG2: return MDP5_CTL_FLUSH_VIG2;
346 case SSPP_RGB0: return MDP5_CTL_FLUSH_RGB0;
347 case SSPP_RGB1: return MDP5_CTL_FLUSH_RGB1;
348 case SSPP_RGB2: return MDP5_CTL_FLUSH_RGB2;
349 case SSPP_DMA0: return MDP5_CTL_FLUSH_DMA0;
350 case SSPP_DMA1: return MDP5_CTL_FLUSH_DMA1;
351 case SSPP_VIG3: return MDP5_CTL_FLUSH_VIG3;
352 case SSPP_RGB3: return MDP5_CTL_FLUSH_RGB3;
353 default: return 0;
354 }
355}
356
357u32 mdp_ctl_flush_mask_lm(int lm)
358{
359 switch (lm) {
360 case 0: return MDP5_CTL_FLUSH_LM0;
361 case 1: return MDP5_CTL_FLUSH_LM1;
362 case 2: return MDP5_CTL_FLUSH_LM2;
363 case 5: return MDP5_CTL_FLUSH_LM5;
364 default: return 0;
365 }
366}
367
368static u32 fix_sw_flush(struct mdp5_ctl *ctl, u32 flush_mask)
369{
370 struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
371 u32 sw_mask = 0;
372#define BIT_NEEDS_SW_FIX(bit) \
373 (!(ctl_mgr->flush_hw_mask & bit) && (flush_mask & bit))
374
375 /* for some targets, cursor bit is the same as LM bit */
376 if (BIT_NEEDS_SW_FIX(MDP5_CTL_FLUSH_CURSOR_0))
377 sw_mask |= mdp_ctl_flush_mask_lm(ctl->lm);
378
379 return sw_mask;
380}
381
382/**
383 * mdp5_ctl_commit() - Register Flush
384 *
385 * The flush register is used to indicate several registers are all
386 * programmed, and are safe to update to the back copy of the double
387 * buffered registers.
388 *
389 * Some registers FLUSH bits are shared when the hardware does not have
390 * dedicated bits for them; handling these is the job of fix_sw_flush().
391 *
392 * CTL registers need to be flushed in some circumstances; if that is the
393 * case, some trigger bits will be present in both flush mask and
394 * ctl->pending_ctl_trigger.
395 */
163int mdp5_ctl_commit(struct mdp5_ctl *ctl, u32 flush_mask) 396int mdp5_ctl_commit(struct mdp5_ctl *ctl, u32 flush_mask)
164{ 397{
165 struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm; 398 struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
399 struct op_mode *pipeline = &ctl->pipeline;
166 unsigned long flags; 400 unsigned long flags;
167 401
168 if (flush_mask & MDP5_CTL_FLUSH_CURSOR_DUMMY) { 402 pipeline->start_mask &= ~flush_mask;
169 int lm = mdp5_crtc_get_lm(ctl->crtc);
170 403
171 if (unlikely(WARN_ON(lm < 0))) { 404 VERB("flush_mask=%x, start_mask=%x, trigger=%x", flush_mask,
172 dev_err(ctl_mgr->dev->dev, "CTL %d cannot find LM: %d", 405 pipeline->start_mask, ctl->pending_ctl_trigger);
173 ctl->id, lm);
174 return -EINVAL;
175 }
176 406
177 /* for current targets, cursor bit is the same as LM bit */ 407 if (ctl->pending_ctl_trigger & flush_mask) {
178 flush_mask |= mdp_ctl_flush_mask_lm(lm); 408 flush_mask |= MDP5_CTL_FLUSH_CTL;
409 ctl->pending_ctl_trigger = 0;
179 } 410 }
180 411
181 spin_lock_irqsave(&ctl->hw_lock, flags); 412 flush_mask |= fix_sw_flush(ctl, flush_mask);
182 ctl_write(ctl, REG_MDP5_CTL_FLUSH(ctl->id), flush_mask);
183 spin_unlock_irqrestore(&ctl->hw_lock, flags);
184 413
185 return 0; 414 flush_mask &= ctl_mgr->flush_hw_mask;
186}
187 415
188u32 mdp5_ctl_get_flush(struct mdp5_ctl *ctl) 416 if (flush_mask) {
189{ 417 spin_lock_irqsave(&ctl->hw_lock, flags);
190 return ctl->flush_mask; 418 ctl_write(ctl, REG_MDP5_CTL_FLUSH(ctl->id), flush_mask);
419 spin_unlock_irqrestore(&ctl->hw_lock, flags);
420 }
421
422 if (start_signal_needed(ctl)) {
423 send_start_signal(ctl);
424 refill_start_mask(ctl);
425 }
426
427 return 0;
191} 428}
192 429
193void mdp5_ctl_release(struct mdp5_ctl *ctl) 430void mdp5_ctl_release(struct mdp5_ctl *ctl)
@@ -208,6 +445,11 @@ void mdp5_ctl_release(struct mdp5_ctl *ctl)
208 DBG("CTL %d released", ctl->id); 445 DBG("CTL %d released", ctl->id);
209} 446}
210 447
448int mdp5_ctl_get_ctl_id(struct mdp5_ctl *ctl)
449{
450 return WARN_ON(!ctl) ? -EINVAL : ctl->id;
451}
452
211/* 453/*
212 * mdp5_ctl_request() - CTL dynamic allocation 454 * mdp5_ctl_request() - CTL dynamic allocation
213 * 455 *
@@ -235,8 +477,10 @@ struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctl_mgr,
235 477
236 ctl = &ctl_mgr->ctls[c]; 478 ctl = &ctl_mgr->ctls[c];
237 479
480 ctl->lm = mdp5_crtc_get_lm(crtc);
238 ctl->crtc = crtc; 481 ctl->crtc = crtc;
239 ctl->busy = true; 482 ctl->busy = true;
483 ctl->pending_ctl_trigger = 0;
240 DBG("CTL %d allocated", ctl->id); 484 DBG("CTL %d allocated", ctl->id);
241 485
242unlock: 486unlock:
@@ -267,7 +511,7 @@ struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev,
267 void __iomem *mmio_base, const struct mdp5_cfg_hw *hw_cfg) 511 void __iomem *mmio_base, const struct mdp5_cfg_hw *hw_cfg)
268{ 512{
269 struct mdp5_ctl_manager *ctl_mgr; 513 struct mdp5_ctl_manager *ctl_mgr;
270 const struct mdp5_sub_block *ctl_cfg = &hw_cfg->ctl; 514 const struct mdp5_ctl_block *ctl_cfg = &hw_cfg->ctl;
271 unsigned long flags; 515 unsigned long flags;
272 int c, ret; 516 int c, ret;
273 517
@@ -289,6 +533,7 @@ struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev,
289 ctl_mgr->dev = dev; 533 ctl_mgr->dev = dev;
290 ctl_mgr->nlm = hw_cfg->lm.count; 534 ctl_mgr->nlm = hw_cfg->lm.count;
291 ctl_mgr->nctl = ctl_cfg->count; 535 ctl_mgr->nctl = ctl_cfg->count;
536 ctl_mgr->flush_hw_mask = ctl_cfg->flush_hw_mask;
292 spin_lock_init(&ctl_mgr->pool_lock); 537 spin_lock_init(&ctl_mgr->pool_lock);
293 538
294 /* initialize each CTL of the pool: */ 539 /* initialize each CTL of the pool: */
@@ -303,9 +548,7 @@ struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev,
303 } 548 }
304 ctl->ctlm = ctl_mgr; 549 ctl->ctlm = ctl_mgr;
305 ctl->id = c; 550 ctl->id = c;
306 ctl->mode = MODE_NONE;
307 ctl->reg_offset = ctl_cfg->base[c]; 551 ctl->reg_offset = ctl_cfg->base[c];
308 ctl->flush_mask = MDP5_CTL_FLUSH_CTL;
309 ctl->busy = false; 552 ctl->busy = false;
310 spin_lock_init(&ctl->hw_lock); 553 spin_lock_init(&ctl->hw_lock);
311 } 554 }
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h
index ad48788efeea..7a62000994a1 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h
@@ -33,19 +33,13 @@ void mdp5_ctlm_destroy(struct mdp5_ctl_manager *ctlm);
33 * which is then used to call the other mdp5_ctl_*(ctl, ...) functions. 33 * which is then used to call the other mdp5_ctl_*(ctl, ...) functions.
34 */ 34 */
35struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctlm, struct drm_crtc *crtc); 35struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctlm, struct drm_crtc *crtc);
36int mdp5_ctl_get_ctl_id(struct mdp5_ctl *ctl);
36 37
37int mdp5_ctl_set_intf(struct mdp5_ctl *ctl, int intf); 38struct mdp5_interface;
39int mdp5_ctl_set_intf(struct mdp5_ctl *ctl, struct mdp5_interface *intf);
40int mdp5_ctl_set_encoder_state(struct mdp5_ctl *ctl, bool enabled);
38 41
39int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, bool enable); 42int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, int cursor_id, bool enable);
40
41/* @blend_cfg: see LM blender config definition below */
42int mdp5_ctl_blend(struct mdp5_ctl *ctl, u32 lm, u32 blend_cfg);
43
44/* @flush_mask: see CTL flush masks definitions below */
45int mdp5_ctl_commit(struct mdp5_ctl *ctl, u32 flush_mask);
46u32 mdp5_ctl_get_flush(struct mdp5_ctl *ctl);
47
48void mdp5_ctl_release(struct mdp5_ctl *ctl);
49 43
50/* 44/*
51 * blend_cfg (LM blender config): 45 * blend_cfg (LM blender config):
@@ -72,51 +66,32 @@ static inline u32 mdp_ctl_blend_mask(enum mdp5_pipe pipe,
72} 66}
73 67
74/* 68/*
75 * flush_mask (CTL flush masks): 69 * mdp5_ctl_blend() - Blend multiple layers on a Layer Mixer (LM)
70 *
71 * @blend_cfg: see LM blender config definition below
76 * 72 *
77 * The following functions allow each DRM entity to get and store 73 * Note:
78 * their own flush mask. 74 * CTL registers need to be flushed after calling this function
79 * Once stored, these masks will then be accessed through each DRM's 75 * (call mdp5_ctl_commit() with mdp_ctl_flush_mask_ctl() mask)
80 * interface and used by the caller of mdp5_ctl_commit() to specify
81 * which block(s) need to be flushed through @flush_mask parameter.
82 */ 76 */
77int mdp5_ctl_blend(struct mdp5_ctl *ctl, u32 lm, u32 blend_cfg);
83 78
84#define MDP5_CTL_FLUSH_CURSOR_DUMMY 0x80000000 79/**
80 * mdp_ctl_flush_mask...() - Register FLUSH masks
81 *
82 * These masks are used to specify which block(s) need to be flushed
83 * through @flush_mask parameter in mdp5_ctl_commit(.., flush_mask).
84 */
85u32 mdp_ctl_flush_mask_lm(int lm);
86u32 mdp_ctl_flush_mask_pipe(enum mdp5_pipe pipe);
87u32 mdp_ctl_flush_mask_cursor(int cursor_id);
88u32 mdp_ctl_flush_mask_encoder(struct mdp5_interface *intf);
85 89
86static inline u32 mdp_ctl_flush_mask_cursor(int cursor_id) 90/* @flush_mask: see CTL flush masks definitions below */
87{ 91int mdp5_ctl_commit(struct mdp5_ctl *ctl, u32 flush_mask);
88 /* TODO: use id once multiple cursor support is present */
89 (void)cursor_id;
90 92
91 return MDP5_CTL_FLUSH_CURSOR_DUMMY; 93void mdp5_ctl_release(struct mdp5_ctl *ctl);
92}
93 94
94static inline u32 mdp_ctl_flush_mask_lm(int lm)
95{
96 switch (lm) {
97 case 0: return MDP5_CTL_FLUSH_LM0;
98 case 1: return MDP5_CTL_FLUSH_LM1;
99 case 2: return MDP5_CTL_FLUSH_LM2;
100 case 5: return MDP5_CTL_FLUSH_LM5;
101 default: return 0;
102 }
103}
104 95
105static inline u32 mdp_ctl_flush_mask_pipe(enum mdp5_pipe pipe)
106{
107 switch (pipe) {
108 case SSPP_VIG0: return MDP5_CTL_FLUSH_VIG0;
109 case SSPP_VIG1: return MDP5_CTL_FLUSH_VIG1;
110 case SSPP_VIG2: return MDP5_CTL_FLUSH_VIG2;
111 case SSPP_RGB0: return MDP5_CTL_FLUSH_RGB0;
112 case SSPP_RGB1: return MDP5_CTL_FLUSH_RGB1;
113 case SSPP_RGB2: return MDP5_CTL_FLUSH_RGB2;
114 case SSPP_DMA0: return MDP5_CTL_FLUSH_DMA0;
115 case SSPP_DMA1: return MDP5_CTL_FLUSH_DMA1;
116 case SSPP_VIG3: return MDP5_CTL_FLUSH_VIG3;
117 case SSPP_RGB3: return MDP5_CTL_FLUSH_RGB3;
118 default: return 0;
119 }
120}
121 96
122#endif /* __MDP5_CTL_H__ */ 97#endif /* __MDP5_CTL_H__ */
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
index af0e02fa4f48..1188f4bf1e60 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
@@ -23,8 +23,7 @@
23 23
24struct mdp5_encoder { 24struct mdp5_encoder {
25 struct drm_encoder base; 25 struct drm_encoder base;
26 int intf; 26 struct mdp5_interface intf;
27 enum mdp5_intf intf_id;
28 spinlock_t intf_lock; /* protect REG_MDP5_INTF_* registers */ 27 spinlock_t intf_lock; /* protect REG_MDP5_INTF_* registers */
29 bool enabled; 28 bool enabled;
30 uint32_t bsc; 29 uint32_t bsc;
@@ -126,7 +125,7 @@ static void mdp5_encoder_mode_set(struct drm_encoder *encoder,
126 struct mdp5_kms *mdp5_kms = get_kms(encoder); 125 struct mdp5_kms *mdp5_kms = get_kms(encoder);
127 struct drm_device *dev = encoder->dev; 126 struct drm_device *dev = encoder->dev;
128 struct drm_connector *connector; 127 struct drm_connector *connector;
129 int intf = mdp5_encoder->intf; 128 int intf = mdp5_encoder->intf.num;
130 uint32_t dtv_hsync_skew, vsync_period, vsync_len, ctrl_pol; 129 uint32_t dtv_hsync_skew, vsync_period, vsync_len, ctrl_pol;
131 uint32_t display_v_start, display_v_end; 130 uint32_t display_v_start, display_v_end;
132 uint32_t hsync_start_x, hsync_end_x; 131 uint32_t hsync_start_x, hsync_end_x;
@@ -188,7 +187,7 @@ static void mdp5_encoder_mode_set(struct drm_encoder *encoder,
188 * DISPLAY_V_START = (VBP * HCYCLE) + HBP 187 * DISPLAY_V_START = (VBP * HCYCLE) + HBP
189 * DISPLAY_V_END = (VBP + VACTIVE) * HCYCLE - 1 - HFP 188 * DISPLAY_V_END = (VBP + VACTIVE) * HCYCLE - 1 - HFP
190 */ 189 */
191 if (mdp5_encoder->intf_id == INTF_eDP) { 190 if (mdp5_encoder->intf.type == INTF_eDP) {
192 display_v_start += mode->htotal - mode->hsync_start; 191 display_v_start += mode->htotal - mode->hsync_start;
193 display_v_end -= mode->hsync_start - mode->hdisplay; 192 display_v_end -= mode->hsync_start - mode->hdisplay;
194 } 193 }
@@ -218,21 +217,29 @@ static void mdp5_encoder_mode_set(struct drm_encoder *encoder,
218 mdp5_write(mdp5_kms, REG_MDP5_INTF_FRAME_LINE_COUNT_EN(intf), 0x3); /* frame+line? */ 217 mdp5_write(mdp5_kms, REG_MDP5_INTF_FRAME_LINE_COUNT_EN(intf), 0x3); /* frame+line? */
219 218
220 spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags); 219 spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags);
220
221 mdp5_crtc_set_intf(encoder->crtc, &mdp5_encoder->intf);
221} 222}
222 223
223static void mdp5_encoder_disable(struct drm_encoder *encoder) 224static void mdp5_encoder_disable(struct drm_encoder *encoder)
224{ 225{
225 struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); 226 struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
226 struct mdp5_kms *mdp5_kms = get_kms(encoder); 227 struct mdp5_kms *mdp5_kms = get_kms(encoder);
227 int intf = mdp5_encoder->intf; 228 struct mdp5_ctl *ctl = mdp5_crtc_get_ctl(encoder->crtc);
229 int lm = mdp5_crtc_get_lm(encoder->crtc);
230 struct mdp5_interface *intf = &mdp5_encoder->intf;
231 int intfn = mdp5_encoder->intf.num;
228 unsigned long flags; 232 unsigned long flags;
229 233
230 if (WARN_ON(!mdp5_encoder->enabled)) 234 if (WARN_ON(!mdp5_encoder->enabled))
231 return; 235 return;
232 236
237 mdp5_ctl_set_encoder_state(ctl, false);
238
233 spin_lock_irqsave(&mdp5_encoder->intf_lock, flags); 239 spin_lock_irqsave(&mdp5_encoder->intf_lock, flags);
234 mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intf), 0); 240 mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intfn), 0);
235 spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags); 241 spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags);
242 mdp5_ctl_commit(ctl, mdp_ctl_flush_mask_encoder(intf));
236 243
237 /* 244 /*
238 * Wait for a vsync so we know the ENABLE=0 latched before 245 * Wait for a vsync so we know the ENABLE=0 latched before
@@ -242,7 +249,7 @@ static void mdp5_encoder_disable(struct drm_encoder *encoder)
242 * the settings changes for the new modeset (like new 249 * the settings changes for the new modeset (like new
243 * scanout buffer) don't latch properly.. 250 * scanout buffer) don't latch properly..
244 */ 251 */
245 mdp_irq_wait(&mdp5_kms->base, intf2vblank(intf)); 252 mdp_irq_wait(&mdp5_kms->base, intf2vblank(lm, intf));
246 253
247 bs_set(mdp5_encoder, 0); 254 bs_set(mdp5_encoder, 0);
248 255
@@ -253,19 +260,21 @@ static void mdp5_encoder_enable(struct drm_encoder *encoder)
253{ 260{
254 struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); 261 struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
255 struct mdp5_kms *mdp5_kms = get_kms(encoder); 262 struct mdp5_kms *mdp5_kms = get_kms(encoder);
256 int intf = mdp5_encoder->intf; 263 struct mdp5_ctl *ctl = mdp5_crtc_get_ctl(encoder->crtc);
264 struct mdp5_interface *intf = &mdp5_encoder->intf;
265 int intfn = mdp5_encoder->intf.num;
257 unsigned long flags; 266 unsigned long flags;
258 267
259 if (WARN_ON(mdp5_encoder->enabled)) 268 if (WARN_ON(mdp5_encoder->enabled))
260 return; 269 return;
261 270
262 mdp5_crtc_set_intf(encoder->crtc, mdp5_encoder->intf,
263 mdp5_encoder->intf_id);
264
265 bs_set(mdp5_encoder, 1); 271 bs_set(mdp5_encoder, 1);
266 spin_lock_irqsave(&mdp5_encoder->intf_lock, flags); 272 spin_lock_irqsave(&mdp5_encoder->intf_lock, flags);
267 mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intf), 1); 273 mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intfn), 1);
268 spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags); 274 spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags);
275 mdp5_ctl_commit(ctl, mdp_ctl_flush_mask_encoder(intf));
276
277 mdp5_ctl_set_encoder_state(ctl, true);
269 278
270 mdp5_encoder->enabled = true; 279 mdp5_encoder->enabled = true;
271} 280}
@@ -277,12 +286,51 @@ static const struct drm_encoder_helper_funcs mdp5_encoder_helper_funcs = {
277 .enable = mdp5_encoder_enable, 286 .enable = mdp5_encoder_enable,
278}; 287};
279 288
289int mdp5_encoder_set_split_display(struct drm_encoder *encoder,
290 struct drm_encoder *slave_encoder)
291{
292 struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
293 struct mdp5_kms *mdp5_kms;
294 int intf_num;
295 u32 data = 0;
296
297 if (!encoder || !slave_encoder)
298 return -EINVAL;
299
300 mdp5_kms = get_kms(encoder);
301 intf_num = mdp5_encoder->intf.num;
302
303 /* Switch slave encoder's TimingGen Sync mode,
304 * to use the master's enable signal for the slave encoder.
305 */
306 if (intf_num == 1)
307 data |= MDP5_SPLIT_DPL_LOWER_INTF2_TG_SYNC;
308 else if (intf_num == 2)
309 data |= MDP5_SPLIT_DPL_LOWER_INTF1_TG_SYNC;
310 else
311 return -EINVAL;
312
313 /* Make sure clocks are on when connectors calling this function. */
314 mdp5_enable(mdp5_kms);
315 mdp5_write(mdp5_kms, REG_MDP5_MDP_SPARE_0(0),
316 MDP5_MDP_SPARE_0_SPLIT_DPL_SINGLE_FLUSH_EN);
317 /* Dumb Panel, Sync mode */
318 mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_UPPER, 0);
319 mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_LOWER, data);
320 mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_EN, 1);
321 mdp5_disable(mdp5_kms);
322
323 return 0;
324}
325
280/* initialize encoder */ 326/* initialize encoder */
281struct drm_encoder *mdp5_encoder_init(struct drm_device *dev, int intf, 327struct drm_encoder *mdp5_encoder_init(struct drm_device *dev,
282 enum mdp5_intf intf_id) 328 struct mdp5_interface *intf)
283{ 329{
284 struct drm_encoder *encoder = NULL; 330 struct drm_encoder *encoder = NULL;
285 struct mdp5_encoder *mdp5_encoder; 331 struct mdp5_encoder *mdp5_encoder;
332 int enc_type = (intf->type == INTF_DSI) ?
333 DRM_MODE_ENCODER_DSI : DRM_MODE_ENCODER_TMDS;
286 int ret; 334 int ret;
287 335
288 mdp5_encoder = kzalloc(sizeof(*mdp5_encoder), GFP_KERNEL); 336 mdp5_encoder = kzalloc(sizeof(*mdp5_encoder), GFP_KERNEL);
@@ -291,14 +339,13 @@ struct drm_encoder *mdp5_encoder_init(struct drm_device *dev, int intf,
291 goto fail; 339 goto fail;
292 } 340 }
293 341
294 mdp5_encoder->intf = intf; 342 memcpy(&mdp5_encoder->intf, intf, sizeof(mdp5_encoder->intf));
295 mdp5_encoder->intf_id = intf_id;
296 encoder = &mdp5_encoder->base; 343 encoder = &mdp5_encoder->base;
297 344
298 spin_lock_init(&mdp5_encoder->intf_lock); 345 spin_lock_init(&mdp5_encoder->intf_lock);
299 346
300 drm_encoder_init(dev, encoder, &mdp5_encoder_funcs, 347 drm_encoder_init(dev, encoder, &mdp5_encoder_funcs, enc_type);
301 DRM_MODE_ENCODER_TMDS); 348
302 drm_encoder_helper_add(encoder, &mdp5_encoder_helper_funcs); 349 drm_encoder_helper_add(encoder, &mdp5_encoder_helper_funcs);
303 350
304 bs_init(mdp5_encoder); 351 bs_init(mdp5_encoder);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
index a9407105b9b7..33bd4c6160dd 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
@@ -23,7 +23,7 @@
23 23
24void mdp5_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask) 24void mdp5_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask)
25{ 25{
26 mdp5_write(to_mdp5_kms(mdp_kms), REG_MDP5_INTR_EN, irqmask); 26 mdp5_write(to_mdp5_kms(mdp_kms), REG_MDP5_MDP_INTR_EN(0), irqmask);
27} 27}
28 28
29static void mdp5_irq_error_handler(struct mdp_irq *irq, uint32_t irqstatus) 29static void mdp5_irq_error_handler(struct mdp_irq *irq, uint32_t irqstatus)
@@ -35,8 +35,8 @@ void mdp5_irq_preinstall(struct msm_kms *kms)
35{ 35{
36 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); 36 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
37 mdp5_enable(mdp5_kms); 37 mdp5_enable(mdp5_kms);
38 mdp5_write(mdp5_kms, REG_MDP5_INTR_CLEAR, 0xffffffff); 38 mdp5_write(mdp5_kms, REG_MDP5_MDP_INTR_CLEAR(0), 0xffffffff);
39 mdp5_write(mdp5_kms, REG_MDP5_INTR_EN, 0x00000000); 39 mdp5_write(mdp5_kms, REG_MDP5_MDP_INTR_EN(0), 0x00000000);
40 mdp5_disable(mdp5_kms); 40 mdp5_disable(mdp5_kms);
41} 41}
42 42
@@ -61,7 +61,7 @@ void mdp5_irq_uninstall(struct msm_kms *kms)
61{ 61{
62 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); 62 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
63 mdp5_enable(mdp5_kms); 63 mdp5_enable(mdp5_kms);
64 mdp5_write(mdp5_kms, REG_MDP5_INTR_EN, 0x00000000); 64 mdp5_write(mdp5_kms, REG_MDP5_MDP_INTR_EN(0), 0x00000000);
65 mdp5_disable(mdp5_kms); 65 mdp5_disable(mdp5_kms);
66} 66}
67 67
@@ -73,8 +73,8 @@ static void mdp5_irq_mdp(struct mdp_kms *mdp_kms)
73 unsigned int id; 73 unsigned int id;
74 uint32_t status; 74 uint32_t status;
75 75
76 status = mdp5_read(mdp5_kms, REG_MDP5_INTR_STATUS); 76 status = mdp5_read(mdp5_kms, REG_MDP5_MDP_INTR_STATUS(0));
77 mdp5_write(mdp5_kms, REG_MDP5_INTR_CLEAR, status); 77 mdp5_write(mdp5_kms, REG_MDP5_MDP_INTR_CLEAR(0), status);
78 78
79 VERB("status=%08x", status); 79 VERB("status=%08x", status);
80 80
@@ -91,13 +91,13 @@ irqreturn_t mdp5_irq(struct msm_kms *kms)
91 struct mdp5_kms *mdp5_kms = to_mdp5_kms(mdp_kms); 91 struct mdp5_kms *mdp5_kms = to_mdp5_kms(mdp_kms);
92 uint32_t intr; 92 uint32_t intr;
93 93
94 intr = mdp5_read(mdp5_kms, REG_MDP5_HW_INTR_STATUS); 94 intr = mdp5_read(mdp5_kms, REG_MDSS_HW_INTR_STATUS);
95 95
96 VERB("intr=%08x", intr); 96 VERB("intr=%08x", intr);
97 97
98 if (intr & MDP5_HW_INTR_STATUS_INTR_MDP) { 98 if (intr & MDSS_HW_INTR_STATUS_INTR_MDP) {
99 mdp5_irq_mdp(mdp_kms); 99 mdp5_irq_mdp(mdp_kms);
100 intr &= ~MDP5_HW_INTR_STATUS_INTR_MDP; 100 intr &= ~MDSS_HW_INTR_STATUS_INTR_MDP;
101 } 101 }
102 102
103 while (intr) { 103 while (intr) {
@@ -128,10 +128,10 @@ void mdp5_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
128 * can register to get their irq's delivered 128 * can register to get their irq's delivered
129 */ 129 */
130 130
131#define VALID_IRQS (MDP5_HW_INTR_STATUS_INTR_DSI0 | \ 131#define VALID_IRQS (MDSS_HW_INTR_STATUS_INTR_DSI0 | \
132 MDP5_HW_INTR_STATUS_INTR_DSI1 | \ 132 MDSS_HW_INTR_STATUS_INTR_DSI1 | \
133 MDP5_HW_INTR_STATUS_INTR_HDMI | \ 133 MDSS_HW_INTR_STATUS_INTR_HDMI | \
134 MDP5_HW_INTR_STATUS_INTR_EDP) 134 MDSS_HW_INTR_STATUS_INTR_EDP)
135 135
136static void mdp5_hw_mask_irq(struct irq_data *irqd) 136static void mdp5_hw_mask_irq(struct irq_data *irqd)
137{ 137{
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
index 92b61db5754c..dfa8beb9343a 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
@@ -58,7 +58,7 @@ static int mdp5_hw_init(struct msm_kms *kms)
58 */ 58 */
59 59
60 spin_lock_irqsave(&mdp5_kms->resource_lock, flags); 60 spin_lock_irqsave(&mdp5_kms->resource_lock, flags);
61 mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, 0); 61 mdp5_write(mdp5_kms, REG_MDP5_MDP_DISP_INTF_SEL(0), 0);
62 spin_unlock_irqrestore(&mdp5_kms->resource_lock, flags); 62 spin_unlock_irqrestore(&mdp5_kms->resource_lock, flags);
63 63
64 mdp5_ctlm_hw_reset(mdp5_kms->ctlm); 64 mdp5_ctlm_hw_reset(mdp5_kms->ctlm);
@@ -86,6 +86,18 @@ static long mdp5_round_pixclk(struct msm_kms *kms, unsigned long rate,
86 return rate; 86 return rate;
87} 87}
88 88
89static int mdp5_set_split_display(struct msm_kms *kms,
90 struct drm_encoder *encoder,
91 struct drm_encoder *slave_encoder,
92 bool is_cmd_mode)
93{
94 if (is_cmd_mode)
95 return mdp5_cmd_encoder_set_split_display(encoder,
96 slave_encoder);
97 else
98 return mdp5_encoder_set_split_display(encoder, slave_encoder);
99}
100
89static void mdp5_preclose(struct msm_kms *kms, struct drm_file *file) 101static void mdp5_preclose(struct msm_kms *kms, struct drm_file *file)
90{ 102{
91 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); 103 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
@@ -131,6 +143,7 @@ static const struct mdp_kms_funcs kms_funcs = {
131 .complete_commit = mdp5_complete_commit, 143 .complete_commit = mdp5_complete_commit,
132 .get_format = mdp_get_format, 144 .get_format = mdp_get_format,
133 .round_pixclk = mdp5_round_pixclk, 145 .round_pixclk = mdp5_round_pixclk,
146 .set_split_display = mdp5_set_split_display,
134 .preclose = mdp5_preclose, 147 .preclose = mdp5_preclose,
135 .destroy = mdp5_destroy, 148 .destroy = mdp5_destroy,
136 }, 149 },
@@ -161,6 +174,134 @@ int mdp5_enable(struct mdp5_kms *mdp5_kms)
161 return 0; 174 return 0;
162} 175}
163 176
177static struct drm_encoder *construct_encoder(struct mdp5_kms *mdp5_kms,
178 enum mdp5_intf_type intf_type, int intf_num,
179 enum mdp5_intf_mode intf_mode)
180{
181 struct drm_device *dev = mdp5_kms->dev;
182 struct msm_drm_private *priv = dev->dev_private;
183 struct drm_encoder *encoder;
184 struct mdp5_interface intf = {
185 .num = intf_num,
186 .type = intf_type,
187 .mode = intf_mode,
188 };
189
190 if ((intf_type == INTF_DSI) &&
191 (intf_mode == MDP5_INTF_DSI_MODE_COMMAND))
192 encoder = mdp5_cmd_encoder_init(dev, &intf);
193 else
194 encoder = mdp5_encoder_init(dev, &intf);
195
196 if (IS_ERR(encoder)) {
197 dev_err(dev->dev, "failed to construct encoder\n");
198 return encoder;
199 }
200
201 encoder->possible_crtcs = (1 << priv->num_crtcs) - 1;
202 priv->encoders[priv->num_encoders++] = encoder;
203
204 return encoder;
205}
206
207static int get_dsi_id_from_intf(const struct mdp5_cfg_hw *hw_cfg, int intf_num)
208{
209 const int intf_cnt = hw_cfg->intf.count;
210 const u32 *intfs = hw_cfg->intfs;
211 int id = 0, i;
212
213 for (i = 0; i < intf_cnt; i++) {
214 if (intfs[i] == INTF_DSI) {
215 if (intf_num == i)
216 return id;
217
218 id++;
219 }
220 }
221
222 return -EINVAL;
223}
224
225static int modeset_init_intf(struct mdp5_kms *mdp5_kms, int intf_num)
226{
227 struct drm_device *dev = mdp5_kms->dev;
228 struct msm_drm_private *priv = dev->dev_private;
229 const struct mdp5_cfg_hw *hw_cfg =
230 mdp5_cfg_get_hw_config(mdp5_kms->cfg);
231 enum mdp5_intf_type intf_type = hw_cfg->intfs[intf_num];
232 struct drm_encoder *encoder;
233 int ret = 0;
234
235 switch (intf_type) {
236 case INTF_DISABLED:
237 break;
238 case INTF_eDP:
239 if (!priv->edp)
240 break;
241
242 encoder = construct_encoder(mdp5_kms, INTF_eDP, intf_num,
243 MDP5_INTF_MODE_NONE);
244 if (IS_ERR(encoder)) {
245 ret = PTR_ERR(encoder);
246 break;
247 }
248
249 ret = msm_edp_modeset_init(priv->edp, dev, encoder);
250 break;
251 case INTF_HDMI:
252 if (!priv->hdmi)
253 break;
254
255 encoder = construct_encoder(mdp5_kms, INTF_HDMI, intf_num,
256 MDP5_INTF_MODE_NONE);
257 if (IS_ERR(encoder)) {
258 ret = PTR_ERR(encoder);
259 break;
260 }
261
262 ret = hdmi_modeset_init(priv->hdmi, dev, encoder);
263 break;
264 case INTF_DSI:
265 {
266 int dsi_id = get_dsi_id_from_intf(hw_cfg, intf_num);
267 struct drm_encoder *dsi_encs[MSM_DSI_ENCODER_NUM];
268 enum mdp5_intf_mode mode;
269 int i;
270
271 if ((dsi_id >= ARRAY_SIZE(priv->dsi)) || (dsi_id < 0)) {
272 dev_err(dev->dev, "failed to find dsi from intf %d\n",
273 intf_num);
274 ret = -EINVAL;
275 break;
276 }
277
278 if (!priv->dsi[dsi_id])
279 break;
280
281 for (i = 0; i < MSM_DSI_ENCODER_NUM; i++) {
282 mode = (i == MSM_DSI_CMD_ENCODER_ID) ?
283 MDP5_INTF_DSI_MODE_COMMAND :
284 MDP5_INTF_DSI_MODE_VIDEO;
285 dsi_encs[i] = construct_encoder(mdp5_kms, INTF_DSI,
286 intf_num, mode);
287 if (IS_ERR(dsi_encs)) {
288 ret = PTR_ERR(dsi_encs);
289 break;
290 }
291 }
292
293 ret = msm_dsi_modeset_init(priv->dsi[dsi_id], dev, dsi_encs);
294 break;
295 }
296 default:
297 dev_err(dev->dev, "unknown intf: %d\n", intf_type);
298 ret = -EINVAL;
299 break;
300 }
301
302 return ret;
303}
304
164static int modeset_init(struct mdp5_kms *mdp5_kms) 305static int modeset_init(struct mdp5_kms *mdp5_kms)
165{ 306{
166 static const enum mdp5_pipe crtcs[] = { 307 static const enum mdp5_pipe crtcs[] = {
@@ -171,7 +312,6 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
171 }; 312 };
172 struct drm_device *dev = mdp5_kms->dev; 313 struct drm_device *dev = mdp5_kms->dev;
173 struct msm_drm_private *priv = dev->dev_private; 314 struct msm_drm_private *priv = dev->dev_private;
174 struct drm_encoder *encoder;
175 const struct mdp5_cfg_hw *hw_cfg; 315 const struct mdp5_cfg_hw *hw_cfg;
176 int i, ret; 316 int i, ret;
177 317
@@ -222,44 +362,13 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
222 } 362 }
223 } 363 }
224 364
225 if (priv->hdmi) { 365 /* Construct encoders and modeset initialize connector devices
226 /* Construct encoder for HDMI: */ 366 * for each external display interface.
227 encoder = mdp5_encoder_init(dev, 3, INTF_HDMI); 367 */
228 if (IS_ERR(encoder)) { 368 for (i = 0; i < ARRAY_SIZE(hw_cfg->intfs); i++) {
229 dev_err(dev->dev, "failed to construct encoder\n"); 369 ret = modeset_init_intf(mdp5_kms, i);
230 ret = PTR_ERR(encoder); 370 if (ret)
231 goto fail;
232 }
233
234 encoder->possible_crtcs = (1 << priv->num_crtcs) - 1;;
235 priv->encoders[priv->num_encoders++] = encoder;
236
237 ret = hdmi_modeset_init(priv->hdmi, dev, encoder);
238 if (ret) {
239 dev_err(dev->dev, "failed to initialize HDMI: %d\n", ret);
240 goto fail;
241 }
242 }
243
244 if (priv->edp) {
245 /* Construct encoder for eDP: */
246 encoder = mdp5_encoder_init(dev, 0, INTF_eDP);
247 if (IS_ERR(encoder)) {
248 dev_err(dev->dev, "failed to construct eDP encoder\n");
249 ret = PTR_ERR(encoder);
250 goto fail;
251 }
252
253 encoder->possible_crtcs = (1 << priv->num_crtcs) - 1;
254 priv->encoders[priv->num_encoders++] = encoder;
255
256 /* Construct bridge/connector for eDP: */
257 ret = msm_edp_modeset_init(priv->edp, dev, encoder);
258 if (ret) {
259 dev_err(dev->dev, "failed to initialize eDP: %d\n",
260 ret);
261 goto fail; 371 goto fail;
262 }
263 } 372 }
264 373
265 return 0; 374 return 0;
@@ -274,11 +383,11 @@ static void read_hw_revision(struct mdp5_kms *mdp5_kms,
274 uint32_t version; 383 uint32_t version;
275 384
276 mdp5_enable(mdp5_kms); 385 mdp5_enable(mdp5_kms);
277 version = mdp5_read(mdp5_kms, REG_MDP5_MDP_VERSION); 386 version = mdp5_read(mdp5_kms, REG_MDSS_HW_VERSION);
278 mdp5_disable(mdp5_kms); 387 mdp5_disable(mdp5_kms);
279 388
280 *major = FIELD(version, MDP5_MDP_VERSION_MAJOR); 389 *major = FIELD(version, MDSS_HW_VERSION_MAJOR);
281 *minor = FIELD(version, MDP5_MDP_VERSION_MINOR); 390 *minor = FIELD(version, MDSS_HW_VERSION_MINOR);
282 391
283 DBG("MDP5 version v%d.%d", *major, *minor); 392 DBG("MDP5 version v%d.%d", *major, *minor);
284} 393}
@@ -321,6 +430,7 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
321 430
322 mdp5_kms->dev = dev; 431 mdp5_kms->dev = dev;
323 432
433 /* mdp5_kms->mmio actually represents the MDSS base address */
324 mdp5_kms->mmio = msm_ioremap(pdev, "mdp_phys", "MDP5"); 434 mdp5_kms->mmio = msm_ioremap(pdev, "mdp_phys", "MDP5");
325 if (IS_ERR(mdp5_kms->mmio)) { 435 if (IS_ERR(mdp5_kms->mmio)) {
326 ret = PTR_ERR(mdp5_kms->mmio); 436 ret = PTR_ERR(mdp5_kms->mmio);
@@ -403,8 +513,12 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
403 * we don't disable): 513 * we don't disable):
404 */ 514 */
405 mdp5_enable(mdp5_kms); 515 mdp5_enable(mdp5_kms);
406 for (i = 0; i < config->hw->intf.count; i++) 516 for (i = 0; i < MDP5_INTF_NUM_MAX; i++) {
517 if (!config->hw->intf.base[i] ||
518 mdp5_cfg_intf_is_virtual(config->hw->intfs[i]))
519 continue;
407 mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(i), 0); 520 mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(i), 0);
521 }
408 mdp5_disable(mdp5_kms); 522 mdp5_disable(mdp5_kms);
409 mdelay(16); 523 mdelay(16);
410 524
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
index 49d011e8835b..2c0de174cc09 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
@@ -54,7 +54,7 @@ struct mdp5_kms {
54 54
55 /* 55 /*
56 * lock to protect access to global resources: ie., following register: 56 * lock to protect access to global resources: ie., following register:
57 * - REG_MDP5_DISP_INTF_SEL 57 * - REG_MDP5_MDP_DISP_INTF_SEL
58 */ 58 */
59 spinlock_t resource_lock; 59 spinlock_t resource_lock;
60 60
@@ -94,6 +94,24 @@ struct mdp5_plane_state {
94#define to_mdp5_plane_state(x) \ 94#define to_mdp5_plane_state(x) \
95 container_of(x, struct mdp5_plane_state, base) 95 container_of(x, struct mdp5_plane_state, base)
96 96
97enum mdp5_intf_mode {
98 MDP5_INTF_MODE_NONE = 0,
99
100 /* Modes used for DSI interface (INTF_DSI type): */
101 MDP5_INTF_DSI_MODE_VIDEO,
102 MDP5_INTF_DSI_MODE_COMMAND,
103
104 /* Modes used for WB interface (INTF_WB type): */
105 MDP5_INTF_WB_MODE_BLOCK,
106 MDP5_INTF_WB_MODE_LINE,
107};
108
109struct mdp5_interface {
110 int num; /* display interface number */
111 enum mdp5_intf_type type;
112 enum mdp5_intf_mode mode;
113};
114
97static inline void mdp5_write(struct mdp5_kms *mdp5_kms, u32 reg, u32 data) 115static inline void mdp5_write(struct mdp5_kms *mdp5_kms, u32 reg, u32 data)
98{ 116{
99 msm_writel(data, mdp5_kms->mmio + reg); 117 msm_writel(data, mdp5_kms->mmio + reg);
@@ -130,9 +148,9 @@ static inline int pipe2nclients(enum mdp5_pipe pipe)
130 } 148 }
131} 149}
132 150
133static inline uint32_t intf2err(int intf) 151static inline uint32_t intf2err(int intf_num)
134{ 152{
135 switch (intf) { 153 switch (intf_num) {
136 case 0: return MDP5_IRQ_INTF0_UNDER_RUN; 154 case 0: return MDP5_IRQ_INTF0_UNDER_RUN;
137 case 1: return MDP5_IRQ_INTF1_UNDER_RUN; 155 case 1: return MDP5_IRQ_INTF1_UNDER_RUN;
138 case 2: return MDP5_IRQ_INTF2_UNDER_RUN; 156 case 2: return MDP5_IRQ_INTF2_UNDER_RUN;
@@ -141,9 +159,23 @@ static inline uint32_t intf2err(int intf)
141 } 159 }
142} 160}
143 161
144static inline uint32_t intf2vblank(int intf) 162#define GET_PING_PONG_ID(layer_mixer) ((layer_mixer == 5) ? 3 : layer_mixer)
163static inline uint32_t intf2vblank(int lm, struct mdp5_interface *intf)
145{ 164{
146 switch (intf) { 165 /*
166 * In case of DSI Command Mode, the Ping Pong's read pointer IRQ
167 * acts as a Vblank signal. The Ping Pong buffer used is bound to
168 * layer mixer.
169 */
170
171 if ((intf->type == INTF_DSI) &&
172 (intf->mode == MDP5_INTF_DSI_MODE_COMMAND))
173 return MDP5_IRQ_PING_PONG_0_RD_PTR << GET_PING_PONG_ID(lm);
174
175 if (intf->type == INTF_WB)
176 return MDP5_IRQ_WB_2_DONE;
177
178 switch (intf->num) {
147 case 0: return MDP5_IRQ_INTF0_VSYNC; 179 case 0: return MDP5_IRQ_INTF0_VSYNC;
148 case 1: return MDP5_IRQ_INTF1_VSYNC; 180 case 1: return MDP5_IRQ_INTF1_VSYNC;
149 case 2: return MDP5_IRQ_INTF2_VSYNC; 181 case 2: return MDP5_IRQ_INTF2_VSYNC;
@@ -152,6 +184,11 @@ static inline uint32_t intf2vblank(int intf)
152 } 184 }
153} 185}
154 186
187static inline uint32_t lm2ppdone(int lm)
188{
189 return MDP5_IRQ_PING_PONG_0_DONE << GET_PING_PONG_ID(lm);
190}
191
155int mdp5_disable(struct mdp5_kms *mdp5_kms); 192int mdp5_disable(struct mdp5_kms *mdp5_kms);
156int mdp5_enable(struct mdp5_kms *mdp5_kms); 193int mdp5_enable(struct mdp5_kms *mdp5_kms);
157 194
@@ -197,13 +234,33 @@ struct drm_plane *mdp5_plane_init(struct drm_device *dev,
197uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc); 234uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc);
198 235
199int mdp5_crtc_get_lm(struct drm_crtc *crtc); 236int mdp5_crtc_get_lm(struct drm_crtc *crtc);
237struct mdp5_ctl *mdp5_crtc_get_ctl(struct drm_crtc *crtc);
200void mdp5_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file); 238void mdp5_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file);
201void mdp5_crtc_set_intf(struct drm_crtc *crtc, int intf, 239void mdp5_crtc_set_intf(struct drm_crtc *crtc, struct mdp5_interface *intf);
202 enum mdp5_intf intf_id);
203struct drm_crtc *mdp5_crtc_init(struct drm_device *dev, 240struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
204 struct drm_plane *plane, int id); 241 struct drm_plane *plane, int id);
205 242
206struct drm_encoder *mdp5_encoder_init(struct drm_device *dev, int intf, 243struct drm_encoder *mdp5_encoder_init(struct drm_device *dev,
207 enum mdp5_intf intf_id); 244 struct mdp5_interface *intf);
245int mdp5_encoder_set_split_display(struct drm_encoder *encoder,
246 struct drm_encoder *slave_encoder);
247
248#ifdef CONFIG_DRM_MSM_DSI
249struct drm_encoder *mdp5_cmd_encoder_init(struct drm_device *dev,
250 struct mdp5_interface *intf);
251int mdp5_cmd_encoder_set_split_display(struct drm_encoder *encoder,
252 struct drm_encoder *slave_encoder);
253#else
254static inline struct drm_encoder *mdp5_cmd_encoder_init(
255 struct drm_device *dev, struct mdp5_interface *intf)
256{
257 return ERR_PTR(-EINVAL);
258}
259static inline int mdp5_cmd_encoder_set_split_display(
260 struct drm_encoder *encoder, struct drm_encoder *slave_encoder)
261{
262 return -EINVAL;
263}
264#endif
208 265
209#endif /* __MDP5_KMS_H__ */ 266#endif /* __MDP5_KMS_H__ */
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
index 6bd48e246283..18a3d203b174 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
@@ -507,8 +507,8 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
507 spin_lock_irqsave(&mdp5_plane->pipe_lock, flags); 507 spin_lock_irqsave(&mdp5_plane->pipe_lock, flags);
508 508
509 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_IMG_SIZE(pipe), 509 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_IMG_SIZE(pipe),
510 MDP5_PIPE_SRC_IMG_SIZE_WIDTH(src_w) | 510 MDP5_PIPE_SRC_IMG_SIZE_WIDTH(fb->width) |
511 MDP5_PIPE_SRC_IMG_SIZE_HEIGHT(src_h)); 511 MDP5_PIPE_SRC_IMG_SIZE_HEIGHT(fb->height));
512 512
513 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_SIZE(pipe), 513 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_SIZE(pipe),
514 MDP5_PIPE_SRC_SIZE_WIDTH(src_w) | 514 MDP5_PIPE_SRC_SIZE_WIDTH(src_w) |
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
index 1f795af89680..16702aecf0df 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
@@ -43,7 +43,7 @@
43 * set. 43 * set.
44 * 44 *
45 * 2) mdp5_smp_configure(): 45 * 2) mdp5_smp_configure():
46 * As hw is programmed, before FLUSH, MDP5_SMP_ALLOC registers 46 * As hw is programmed, before FLUSH, MDP5_MDP_SMP_ALLOC registers
47 * are configured for the union(pending, inuse) 47 * are configured for the union(pending, inuse)
48 * 48 *
49 * 3) mdp5_smp_commit(): 49 * 3) mdp5_smp_commit():
@@ -74,7 +74,7 @@ struct mdp5_smp {
74 spinlock_t state_lock; 74 spinlock_t state_lock;
75 mdp5_smp_state_t state; /* to track smp allocation amongst pipes: */ 75 mdp5_smp_state_t state; /* to track smp allocation amongst pipes: */
76 76
77 struct mdp5_client_smp_state client_state[CID_MAX]; 77 struct mdp5_client_smp_state client_state[MAX_CLIENTS];
78}; 78};
79 79
80static inline 80static inline
@@ -85,27 +85,31 @@ struct mdp5_kms *get_kms(struct mdp5_smp *smp)
85 return to_mdp5_kms(to_mdp_kms(priv->kms)); 85 return to_mdp5_kms(to_mdp_kms(priv->kms));
86} 86}
87 87
88static inline enum mdp5_client_id pipe2client(enum mdp5_pipe pipe, int plane) 88static inline u32 pipe2client(enum mdp5_pipe pipe, int plane)
89{ 89{
90 WARN_ON(plane >= pipe2nclients(pipe)); 90#define CID_UNUSED 0
91 switch (pipe) { 91
92 case SSPP_VIG0: return CID_VIG0_Y + plane; 92 if (WARN_ON(plane >= pipe2nclients(pipe)))
93 case SSPP_VIG1: return CID_VIG1_Y + plane; 93 return CID_UNUSED;
94 case SSPP_VIG2: return CID_VIG2_Y + plane; 94
95 case SSPP_RGB0: return CID_RGB0; 95 /*
96 case SSPP_RGB1: return CID_RGB1; 96 * Note on SMP clients:
97 case SSPP_RGB2: return CID_RGB2; 97 * For ViG pipes, fetch Y/Cr/Cb-components clients are always
98 case SSPP_DMA0: return CID_DMA0_Y + plane; 98 * consecutive, and in that order.
99 case SSPP_DMA1: return CID_DMA1_Y + plane; 99 *
100 case SSPP_VIG3: return CID_VIG3_Y + plane; 100 * e.g.:
101 case SSPP_RGB3: return CID_RGB3; 101 * if mdp5_cfg->smp.clients[SSPP_VIG0] = N,
102 default: return CID_UNUSED; 102 * Y plane's client ID is N
103 } 103 * Cr plane's client ID is N + 1
104 * Cb plane's client ID is N + 2
105 */
106
107 return mdp5_cfg->smp.clients[pipe] + plane;
104} 108}
105 109
106/* step #1: update # of blocks pending for the client: */ 110/* step #1: update # of blocks pending for the client: */
107static int smp_request_block(struct mdp5_smp *smp, 111static int smp_request_block(struct mdp5_smp *smp,
108 enum mdp5_client_id cid, int nblks) 112 u32 cid, int nblks)
109{ 113{
110 struct mdp5_kms *mdp5_kms = get_kms(smp); 114 struct mdp5_kms *mdp5_kms = get_kms(smp);
111 const struct mdp5_cfg_hw *hw_cfg; 115 const struct mdp5_cfg_hw *hw_cfg;
@@ -227,7 +231,7 @@ void mdp5_smp_release(struct mdp5_smp *smp, enum mdp5_pipe pipe)
227} 231}
228 232
229static void update_smp_state(struct mdp5_smp *smp, 233static void update_smp_state(struct mdp5_smp *smp,
230 enum mdp5_client_id cid, mdp5_smp_state_t *assigned) 234 u32 cid, mdp5_smp_state_t *assigned)
231{ 235{
232 struct mdp5_kms *mdp5_kms = get_kms(smp); 236 struct mdp5_kms *mdp5_kms = get_kms(smp);
233 int cnt = smp->blk_cnt; 237 int cnt = smp->blk_cnt;
@@ -237,25 +241,25 @@ static void update_smp_state(struct mdp5_smp *smp,
237 int idx = blk / 3; 241 int idx = blk / 3;
238 int fld = blk % 3; 242 int fld = blk % 3;
239 243
240 val = mdp5_read(mdp5_kms, REG_MDP5_SMP_ALLOC_W_REG(idx)); 244 val = mdp5_read(mdp5_kms, REG_MDP5_MDP_SMP_ALLOC_W_REG(0, idx));
241 245
242 switch (fld) { 246 switch (fld) {
243 case 0: 247 case 0:
244 val &= ~MDP5_SMP_ALLOC_W_REG_CLIENT0__MASK; 248 val &= ~MDP5_MDP_SMP_ALLOC_W_REG_CLIENT0__MASK;
245 val |= MDP5_SMP_ALLOC_W_REG_CLIENT0(cid); 249 val |= MDP5_MDP_SMP_ALLOC_W_REG_CLIENT0(cid);
246 break; 250 break;
247 case 1: 251 case 1:
248 val &= ~MDP5_SMP_ALLOC_W_REG_CLIENT1__MASK; 252 val &= ~MDP5_MDP_SMP_ALLOC_W_REG_CLIENT1__MASK;
249 val |= MDP5_SMP_ALLOC_W_REG_CLIENT1(cid); 253 val |= MDP5_MDP_SMP_ALLOC_W_REG_CLIENT1(cid);
250 break; 254 break;
251 case 2: 255 case 2:
252 val &= ~MDP5_SMP_ALLOC_W_REG_CLIENT2__MASK; 256 val &= ~MDP5_MDP_SMP_ALLOC_W_REG_CLIENT2__MASK;
253 val |= MDP5_SMP_ALLOC_W_REG_CLIENT2(cid); 257 val |= MDP5_MDP_SMP_ALLOC_W_REG_CLIENT2(cid);
254 break; 258 break;
255 } 259 }
256 260
257 mdp5_write(mdp5_kms, REG_MDP5_SMP_ALLOC_W_REG(idx), val); 261 mdp5_write(mdp5_kms, REG_MDP5_MDP_SMP_ALLOC_W_REG(0, idx), val);
258 mdp5_write(mdp5_kms, REG_MDP5_SMP_ALLOC_R_REG(idx), val); 262 mdp5_write(mdp5_kms, REG_MDP5_MDP_SMP_ALLOC_R_REG(0, idx), val);
259 } 263 }
260} 264}
261 265
@@ -267,7 +271,7 @@ void mdp5_smp_configure(struct mdp5_smp *smp, enum mdp5_pipe pipe)
267 int i; 271 int i;
268 272
269 for (i = 0; i < pipe2nclients(pipe); i++) { 273 for (i = 0; i < pipe2nclients(pipe); i++) {
270 enum mdp5_client_id cid = pipe2client(pipe, i); 274 u32 cid = pipe2client(pipe, i);
271 struct mdp5_client_smp_state *ps = &smp->client_state[cid]; 275 struct mdp5_client_smp_state *ps = &smp->client_state[cid];
272 276
273 bitmap_or(assigned, ps->inuse, ps->pending, cnt); 277 bitmap_or(assigned, ps->inuse, ps->pending, cnt);
@@ -283,7 +287,7 @@ void mdp5_smp_commit(struct mdp5_smp *smp, enum mdp5_pipe pipe)
283 int i; 287 int i;
284 288
285 for (i = 0; i < pipe2nclients(pipe); i++) { 289 for (i = 0; i < pipe2nclients(pipe); i++) {
286 enum mdp5_client_id cid = pipe2client(pipe, i); 290 u32 cid = pipe2client(pipe, i);
287 struct mdp5_client_smp_state *ps = &smp->client_state[cid]; 291 struct mdp5_client_smp_state *ps = &smp->client_state[cid];
288 292
289 /* 293 /*
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index a4269119f9ea..47f4dd407671 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -182,41 +182,57 @@ static int get_mdp_ver(struct platform_device *pdev)
182 return 4; 182 return 4;
183} 183}
184 184
185static int msm_load(struct drm_device *dev, unsigned long flags) 185#include <linux/of_address.h>
186{
187 struct platform_device *pdev = dev->platformdev;
188 struct msm_drm_private *priv;
189 struct msm_kms *kms;
190 int ret;
191
192 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
193 if (!priv) {
194 dev_err(dev->dev, "failed to allocate private data\n");
195 return -ENOMEM;
196 }
197 186
198 dev->dev_private = priv; 187static int msm_init_vram(struct drm_device *dev)
199 188{
200 priv->wq = alloc_ordered_workqueue("msm", 0); 189 struct msm_drm_private *priv = dev->dev_private;
201 init_waitqueue_head(&priv->fence_event); 190 unsigned long size = 0;
202 init_waitqueue_head(&priv->pending_crtcs_event); 191 int ret = 0;
203
204 INIT_LIST_HEAD(&priv->inactive_list);
205 INIT_LIST_HEAD(&priv->fence_cbs);
206 192
207 drm_mode_config_init(dev); 193#ifdef CONFIG_OF
194 /* In the device-tree world, we could have a 'memory-region'
195 * phandle, which gives us a link to our "vram". Allocating
196 * is all nicely abstracted behind the dma api, but we need
197 * to know the entire size to allocate it all in one go. There
198 * are two cases:
199 * 1) device with no IOMMU, in which case we need exclusive
200 * access to a VRAM carveout big enough for all gpu
201 * buffers
202 * 2) device with IOMMU, but where the bootloader puts up
203 * a splash screen. In this case, the VRAM carveout
204 * need only be large enough for fbdev fb. But we need
205 * exclusive access to the buffer to avoid the kernel
206 * using those pages for other purposes (which appears
207 * as corruption on screen before we have a chance to
208 * load and do initial modeset)
209 */
210 struct device_node *node;
211
212 node = of_parse_phandle(dev->dev->of_node, "memory-region", 0);
213 if (node) {
214 struct resource r;
215 ret = of_address_to_resource(node, 0, &r);
216 if (ret)
217 return ret;
218 size = r.end - r.start;
219 DRM_INFO("using VRAM carveout: %lx@%08x\n", size, r.start);
220 } else
221#endif
208 222
209 /* if we have no IOMMU, then we need to use carveout allocator. 223 /* if we have no IOMMU, then we need to use carveout allocator.
210 * Grab the entire CMA chunk carved out in early startup in 224 * Grab the entire CMA chunk carved out in early startup in
211 * mach-msm: 225 * mach-msm:
212 */ 226 */
213 if (!iommu_present(&platform_bus_type)) { 227 if (!iommu_present(&platform_bus_type)) {
228 DRM_INFO("using %s VRAM carveout\n", vram);
229 size = memparse(vram, NULL);
230 }
231
232 if (size) {
214 DEFINE_DMA_ATTRS(attrs); 233 DEFINE_DMA_ATTRS(attrs);
215 unsigned long size;
216 void *p; 234 void *p;
217 235
218 DBG("using %s VRAM carveout", vram);
219 size = memparse(vram, NULL);
220 priv->vram.size = size; 236 priv->vram.size = size;
221 237
222 drm_mm_init(&priv->vram.mm, 0, (size >> PAGE_SHIFT) - 1); 238 drm_mm_init(&priv->vram.mm, 0, (size >> PAGE_SHIFT) - 1);
@@ -232,8 +248,7 @@ static int msm_load(struct drm_device *dev, unsigned long flags)
232 if (!p) { 248 if (!p) {
233 dev_err(dev->dev, "failed to allocate VRAM\n"); 249 dev_err(dev->dev, "failed to allocate VRAM\n");
234 priv->vram.paddr = 0; 250 priv->vram.paddr = 0;
235 ret = -ENOMEM; 251 return -ENOMEM;
236 goto fail;
237 } 252 }
238 253
239 dev_info(dev->dev, "VRAM: %08x->%08x\n", 254 dev_info(dev->dev, "VRAM: %08x->%08x\n",
@@ -241,6 +256,37 @@ static int msm_load(struct drm_device *dev, unsigned long flags)
241 (uint32_t)(priv->vram.paddr + size)); 256 (uint32_t)(priv->vram.paddr + size));
242 } 257 }
243 258
259 return ret;
260}
261
262static int msm_load(struct drm_device *dev, unsigned long flags)
263{
264 struct platform_device *pdev = dev->platformdev;
265 struct msm_drm_private *priv;
266 struct msm_kms *kms;
267 int ret;
268
269 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
270 if (!priv) {
271 dev_err(dev->dev, "failed to allocate private data\n");
272 return -ENOMEM;
273 }
274
275 dev->dev_private = priv;
276
277 priv->wq = alloc_ordered_workqueue("msm", 0);
278 init_waitqueue_head(&priv->fence_event);
279 init_waitqueue_head(&priv->pending_crtcs_event);
280
281 INIT_LIST_HEAD(&priv->inactive_list);
282 INIT_LIST_HEAD(&priv->fence_cbs);
283
284 drm_mode_config_init(dev);
285
286 ret = msm_init_vram(dev);
287 if (ret)
288 goto fail;
289
244 platform_set_drvdata(pdev, dev); 290 platform_set_drvdata(pdev, dev);
245 291
246 /* Bind all our sub-components: */ 292 /* Bind all our sub-components: */
@@ -1030,6 +1076,7 @@ static struct platform_driver msm_platform_driver = {
1030static int __init msm_drm_register(void) 1076static int __init msm_drm_register(void)
1031{ 1077{
1032 DBG("init"); 1078 DBG("init");
1079 msm_dsi_register();
1033 msm_edp_register(); 1080 msm_edp_register();
1034 hdmi_register(); 1081 hdmi_register();
1035 adreno_register(); 1082 adreno_register();
@@ -1043,6 +1090,7 @@ static void __exit msm_drm_unregister(void)
1043 hdmi_unregister(); 1090 hdmi_unregister();
1044 adreno_unregister(); 1091 adreno_unregister();
1045 msm_edp_unregister(); 1092 msm_edp_unregister();
1093 msm_dsi_unregister();
1046} 1094}
1047 1095
1048module_init(msm_drm_register); 1096module_init(msm_drm_register);
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 9e8d441b61c3..04db4bd1b5b6 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -82,6 +82,9 @@ struct msm_drm_private {
82 */ 82 */
83 struct msm_edp *edp; 83 struct msm_edp *edp;
84 84
85 /* DSI is shared by mdp4 and mdp5 */
86 struct msm_dsi *dsi[2];
87
85 /* when we have more than one 'msm_gpu' these need to be an array: */ 88 /* when we have more than one 'msm_gpu' these need to be an array: */
86 struct msm_gpu *gpu; 89 struct msm_gpu *gpu;
87 struct msm_file_private *lastctx; 90 struct msm_file_private *lastctx;
@@ -236,6 +239,32 @@ void __exit msm_edp_unregister(void);
236int msm_edp_modeset_init(struct msm_edp *edp, struct drm_device *dev, 239int msm_edp_modeset_init(struct msm_edp *edp, struct drm_device *dev,
237 struct drm_encoder *encoder); 240 struct drm_encoder *encoder);
238 241
242struct msm_dsi;
243enum msm_dsi_encoder_id {
244 MSM_DSI_VIDEO_ENCODER_ID = 0,
245 MSM_DSI_CMD_ENCODER_ID = 1,
246 MSM_DSI_ENCODER_NUM = 2
247};
248#ifdef CONFIG_DRM_MSM_DSI
249void __init msm_dsi_register(void);
250void __exit msm_dsi_unregister(void);
251int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
252 struct drm_encoder *encoders[MSM_DSI_ENCODER_NUM]);
253#else
254static inline void __init msm_dsi_register(void)
255{
256}
257static inline void __exit msm_dsi_unregister(void)
258{
259}
260static inline int msm_dsi_modeset_init(struct msm_dsi *msm_dsi,
261 struct drm_device *dev,
262 struct drm_encoder *encoders[MSM_DSI_ENCODER_NUM])
263{
264 return -EINVAL;
265}
266#endif
267
239#ifdef CONFIG_DEBUG_FS 268#ifdef CONFIG_DEBUG_FS
240void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m); 269void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m);
241void msm_gem_describe_objects(struct list_head *list, struct seq_file *m); 270void msm_gem_describe_objects(struct list_head *list, struct seq_file *m);
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
index df60f65728ff..95f6532df02d 100644
--- a/drivers/gpu/drm/msm/msm_fbdev.c
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -110,7 +110,8 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
110 size = mode_cmd.pitches[0] * mode_cmd.height; 110 size = mode_cmd.pitches[0] * mode_cmd.height;
111 DBG("allocating %d bytes for fb %d", size, dev->primary->index); 111 DBG("allocating %d bytes for fb %d", size, dev->primary->index);
112 mutex_lock(&dev->struct_mutex); 112 mutex_lock(&dev->struct_mutex);
113 fbdev->bo = msm_gem_new(dev, size, MSM_BO_SCANOUT | MSM_BO_WC); 113 fbdev->bo = msm_gem_new(dev, size, MSM_BO_SCANOUT |
114 MSM_BO_WC | MSM_BO_STOLEN);
114 mutex_unlock(&dev->struct_mutex); 115 mutex_unlock(&dev->struct_mutex);
115 if (IS_ERR(fbdev->bo)) { 116 if (IS_ERR(fbdev->bo)) {
116 ret = PTR_ERR(fbdev->bo); 117 ret = PTR_ERR(fbdev->bo);
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 49dea4fb55ac..479d8af72bcb 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -32,6 +32,12 @@ static dma_addr_t physaddr(struct drm_gem_object *obj)
32 priv->vram.paddr; 32 priv->vram.paddr;
33} 33}
34 34
35static bool use_pages(struct drm_gem_object *obj)
36{
37 struct msm_gem_object *msm_obj = to_msm_bo(obj);
38 return !msm_obj->vram_node;
39}
40
35/* allocate pages from VRAM carveout, used when no IOMMU: */ 41/* allocate pages from VRAM carveout, used when no IOMMU: */
36static struct page **get_pages_vram(struct drm_gem_object *obj, 42static struct page **get_pages_vram(struct drm_gem_object *obj,
37 int npages) 43 int npages)
@@ -72,7 +78,7 @@ static struct page **get_pages(struct drm_gem_object *obj)
72 struct page **p; 78 struct page **p;
73 int npages = obj->size >> PAGE_SHIFT; 79 int npages = obj->size >> PAGE_SHIFT;
74 80
75 if (iommu_present(&platform_bus_type)) 81 if (use_pages(obj))
76 p = drm_gem_get_pages(obj); 82 p = drm_gem_get_pages(obj);
77 else 83 else
78 p = get_pages_vram(obj, npages); 84 p = get_pages_vram(obj, npages);
@@ -116,7 +122,7 @@ static void put_pages(struct drm_gem_object *obj)
116 sg_free_table(msm_obj->sgt); 122 sg_free_table(msm_obj->sgt);
117 kfree(msm_obj->sgt); 123 kfree(msm_obj->sgt);
118 124
119 if (iommu_present(&platform_bus_type)) 125 if (use_pages(obj))
120 drm_gem_put_pages(obj, msm_obj->pages, true, false); 126 drm_gem_put_pages(obj, msm_obj->pages, true, false);
121 else { 127 else {
122 drm_mm_remove_node(msm_obj->vram_node); 128 drm_mm_remove_node(msm_obj->vram_node);
@@ -580,6 +586,7 @@ static int msm_gem_new_impl(struct drm_device *dev,
580 struct msm_drm_private *priv = dev->dev_private; 586 struct msm_drm_private *priv = dev->dev_private;
581 struct msm_gem_object *msm_obj; 587 struct msm_gem_object *msm_obj;
582 unsigned sz; 588 unsigned sz;
589 bool use_vram = false;
583 590
584 switch (flags & MSM_BO_CACHE_MASK) { 591 switch (flags & MSM_BO_CACHE_MASK) {
585 case MSM_BO_UNCACHED: 592 case MSM_BO_UNCACHED:
@@ -592,15 +599,23 @@ static int msm_gem_new_impl(struct drm_device *dev,
592 return -EINVAL; 599 return -EINVAL;
593 } 600 }
594 601
595 sz = sizeof(*msm_obj);
596 if (!iommu_present(&platform_bus_type)) 602 if (!iommu_present(&platform_bus_type))
603 use_vram = true;
604 else if ((flags & MSM_BO_STOLEN) && priv->vram.size)
605 use_vram = true;
606
607 if (WARN_ON(use_vram && !priv->vram.size))
608 return -EINVAL;
609
610 sz = sizeof(*msm_obj);
611 if (use_vram)
597 sz += sizeof(struct drm_mm_node); 612 sz += sizeof(struct drm_mm_node);
598 613
599 msm_obj = kzalloc(sz, GFP_KERNEL); 614 msm_obj = kzalloc(sz, GFP_KERNEL);
600 if (!msm_obj) 615 if (!msm_obj)
601 return -ENOMEM; 616 return -ENOMEM;
602 617
603 if (!iommu_present(&platform_bus_type)) 618 if (use_vram)
604 msm_obj->vram_node = (void *)&msm_obj[1]; 619 msm_obj->vram_node = (void *)&msm_obj[1];
605 620
606 msm_obj->flags = flags; 621 msm_obj->flags = flags;
@@ -630,7 +645,7 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev,
630 if (ret) 645 if (ret)
631 goto fail; 646 goto fail;
632 647
633 if (iommu_present(&platform_bus_type)) { 648 if (use_pages(obj)) {
634 ret = drm_gem_object_init(dev, obj, size); 649 ret = drm_gem_object_init(dev, obj, size);
635 if (ret) 650 if (ret)
636 goto fail; 651 goto fail;
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index 8fbbd0594c46..85d481e29276 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -21,6 +21,9 @@
21#include <linux/reservation.h> 21#include <linux/reservation.h>
22#include "msm_drv.h" 22#include "msm_drv.h"
23 23
24/* Additional internal-use only BO flags: */
25#define MSM_BO_STOLEN 0x10000000 /* try to use stolen/splash memory */
26
24struct msm_gem_object { 27struct msm_gem_object {
25 struct drm_gem_object base; 28 struct drm_gem_object base;
26 29
@@ -59,7 +62,7 @@ struct msm_gem_object {
59 struct reservation_object _resv; 62 struct reservation_object _resv;
60 63
61 /* For physically contiguous buffers. Used when we don't have 64 /* For physically contiguous buffers. Used when we don't have
62 * an IOMMU. 65 * an IOMMU. Also used for stolen/splashscreen buffer.
63 */ 66 */
64 struct drm_mm_node *vram_node; 67 struct drm_mm_node *vram_node;
65}; 68};
diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h
index 3a78cb48662b..a9f17bdb4530 100644
--- a/drivers/gpu/drm/msm/msm_kms.h
+++ b/drivers/gpu/drm/msm/msm_kms.h
@@ -47,6 +47,10 @@ struct msm_kms_funcs {
47 const struct msm_format *(*get_format)(struct msm_kms *kms, uint32_t format); 47 const struct msm_format *(*get_format)(struct msm_kms *kms, uint32_t format);
48 long (*round_pixclk)(struct msm_kms *kms, unsigned long rate, 48 long (*round_pixclk)(struct msm_kms *kms, unsigned long rate,
49 struct drm_encoder *encoder); 49 struct drm_encoder *encoder);
50 int (*set_split_display)(struct msm_kms *kms,
51 struct drm_encoder *encoder,
52 struct drm_encoder *slave_encoder,
53 bool is_cmd_mode);
50 /* cleanup: */ 54 /* cleanup: */
51 void (*preclose)(struct msm_kms *kms, struct drm_file *file); 55 void (*preclose)(struct msm_kms *kms, struct drm_file *file);
52 void (*destroy)(struct msm_kms *kms); 56 void (*destroy)(struct msm_kms *kms);