aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHai Li <hali@codeaurora.org>2015-03-31 14:36:33 -0400
committerRob Clark <robdclark@gmail.com>2015-04-01 19:29:38 -0400
commita689554ba6ed81cf606c16539f6ffc2a1dcdaf8e (patch)
treeeda42f5e85c4960fcc53f723b03d2e3d4b5d70cd
parent7a6dc9550d0a17e3f24b2c13582f093193cd08ef (diff)
drm/msm: Initial add DSI connector support
This change adds the DSI connector support in msm drm driver. v1: Initial change v2: - Address comments from Archit + minor clean-ups - Rebase to not depend on msm_drm_sub_dev change [Rob's comment] v3: Fix issues when initialization is failed Signed-off-by: Hai Li <hali@codeaurora.org> Signed-off-by: Rob Clark <robdclark@gmail.com>
-rw-r--r--drivers/gpu/drm/msm/Kconfig11
-rw-r--r--drivers/gpu/drm/msm/Makefile4
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.c212
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.h117
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c1993
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_manager.c705
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_phy.c352
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h29
8 files changed, 3423 insertions, 0 deletions
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index bacbbb70f679..0a6f6764a37c 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -35,3 +35,14 @@ config DRM_MSM_REGISTER_LOGGING
35 Compile in support for logging register reads/writes in a format 35 Compile in support for logging register reads/writes in a format
36 that can be parsed by envytools demsm tool. If enabled, register 36 that can be parsed by envytools demsm tool. If enabled, register
37 logging can be switched on via msm.reglog=y module param. 37 logging can be switched on via msm.reglog=y module param.
38
39config DRM_MSM_DSI
40 bool "Enable DSI support in MSM DRM driver"
41 depends on DRM_MSM
42 select DRM_PANEL
43 select DRM_MIPI_DSI
44 default y
45 help
46 Choose this option if you have a need for MIPI DSI connector
47 support.
48
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index 674a132fd76e..5c144cc5f8db 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -50,5 +50,9 @@ msm-y := \
50 50
51msm-$(CONFIG_DRM_MSM_FBDEV) += msm_fbdev.o 51msm-$(CONFIG_DRM_MSM_FBDEV) += msm_fbdev.o
52msm-$(CONFIG_COMMON_CLK) += mdp/mdp4/mdp4_lvds_pll.o 52msm-$(CONFIG_COMMON_CLK) += mdp/mdp4/mdp4_lvds_pll.o
53msm-$(CONFIG_DRM_MSM_DSI) += dsi/dsi.o \
54 dsi/dsi_host.o \
55 dsi/dsi_manager.o \
56 dsi/dsi_phy.o
53 57
54obj-$(CONFIG_DRM_MSM) += msm.o 58obj-$(CONFIG_DRM_MSM) += msm.o
diff --git a/drivers/gpu/drm/msm/dsi/dsi.c b/drivers/gpu/drm/msm/dsi/dsi.c
new file mode 100644
index 000000000000..28d1f95a90cc
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/dsi.c
@@ -0,0 +1,212 @@
1/*
2 * Copyright (c) 2015, The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include "dsi.h"
15
16struct drm_encoder *msm_dsi_get_encoder(struct msm_dsi *msm_dsi)
17{
18 if (!msm_dsi || !msm_dsi->panel)
19 return NULL;
20
21 return (msm_dsi->panel_flags & MIPI_DSI_MODE_VIDEO) ?
22 msm_dsi->encoders[MSM_DSI_VIDEO_ENCODER_ID] :
23 msm_dsi->encoders[MSM_DSI_CMD_ENCODER_ID];
24}
25
26static void dsi_destroy(struct msm_dsi *msm_dsi)
27{
28 if (!msm_dsi)
29 return;
30
31 msm_dsi_manager_unregister(msm_dsi);
32 if (msm_dsi->host) {
33 msm_dsi_host_destroy(msm_dsi->host);
34 msm_dsi->host = NULL;
35 }
36
37 platform_set_drvdata(msm_dsi->pdev, NULL);
38}
39
40static struct msm_dsi *dsi_init(struct platform_device *pdev)
41{
42 struct msm_dsi *msm_dsi = NULL;
43 int ret;
44
45 if (!pdev) {
46 dev_err(&pdev->dev, "no dsi device\n");
47 ret = -ENXIO;
48 goto fail;
49 }
50
51 msm_dsi = devm_kzalloc(&pdev->dev, sizeof(*msm_dsi), GFP_KERNEL);
52 if (!msm_dsi) {
53 ret = -ENOMEM;
54 goto fail;
55 }
56 DBG("dsi probed=%p", msm_dsi);
57
58 msm_dsi->pdev = pdev;
59 platform_set_drvdata(pdev, msm_dsi);
60
61 /* Init dsi host */
62 ret = msm_dsi_host_init(msm_dsi);
63 if (ret)
64 goto fail;
65
66 /* Register to dsi manager */
67 ret = msm_dsi_manager_register(msm_dsi);
68 if (ret)
69 goto fail;
70
71 return msm_dsi;
72
73fail:
74 if (msm_dsi)
75 dsi_destroy(msm_dsi);
76
77 return ERR_PTR(ret);
78}
79
80static int dsi_bind(struct device *dev, struct device *master, void *data)
81{
82 struct drm_device *drm = dev_get_drvdata(master);
83 struct msm_drm_private *priv = drm->dev_private;
84 struct platform_device *pdev = to_platform_device(dev);
85 struct msm_dsi *msm_dsi;
86
87 DBG("");
88 msm_dsi = dsi_init(pdev);
89 if (IS_ERR(msm_dsi))
90 return PTR_ERR(msm_dsi);
91
92 priv->dsi[msm_dsi->id] = msm_dsi;
93
94 return 0;
95}
96
97static void dsi_unbind(struct device *dev, struct device *master,
98 void *data)
99{
100 struct drm_device *drm = dev_get_drvdata(master);
101 struct msm_drm_private *priv = drm->dev_private;
102 struct msm_dsi *msm_dsi = dev_get_drvdata(dev);
103 int id = msm_dsi->id;
104
105 if (priv->dsi[id]) {
106 dsi_destroy(msm_dsi);
107 priv->dsi[id] = NULL;
108 }
109}
110
111static const struct component_ops dsi_ops = {
112 .bind = dsi_bind,
113 .unbind = dsi_unbind,
114};
115
116static int dsi_dev_probe(struct platform_device *pdev)
117{
118 return component_add(&pdev->dev, &dsi_ops);
119}
120
121static int dsi_dev_remove(struct platform_device *pdev)
122{
123 DBG("");
124 component_del(&pdev->dev, &dsi_ops);
125 return 0;
126}
127
128static const struct of_device_id dt_match[] = {
129 { .compatible = "qcom,mdss-dsi-ctrl" },
130 {}
131};
132
133static struct platform_driver dsi_driver = {
134 .probe = dsi_dev_probe,
135 .remove = dsi_dev_remove,
136 .driver = {
137 .name = "msm_dsi",
138 .of_match_table = dt_match,
139 },
140};
141
142void __init msm_dsi_register(void)
143{
144 DBG("");
145 platform_driver_register(&dsi_driver);
146}
147
148void __exit msm_dsi_unregister(void)
149{
150 DBG("");
151 platform_driver_unregister(&dsi_driver);
152}
153
154int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
155 struct drm_encoder *encoders[MSM_DSI_ENCODER_NUM])
156{
157 struct msm_drm_private *priv = dev->dev_private;
158 int ret, i;
159
160 if (WARN_ON(!encoders[MSM_DSI_VIDEO_ENCODER_ID] ||
161 !encoders[MSM_DSI_CMD_ENCODER_ID]))
162 return -EINVAL;
163
164 msm_dsi->dev = dev;
165
166 ret = msm_dsi_host_modeset_init(msm_dsi->host, dev);
167 if (ret) {
168 dev_err(dev->dev, "failed to modeset init host: %d\n", ret);
169 goto fail;
170 }
171
172 msm_dsi->bridge = msm_dsi_manager_bridge_init(msm_dsi->id);
173 if (IS_ERR(msm_dsi->bridge)) {
174 ret = PTR_ERR(msm_dsi->bridge);
175 dev_err(dev->dev, "failed to create dsi bridge: %d\n", ret);
176 msm_dsi->bridge = NULL;
177 goto fail;
178 }
179
180 msm_dsi->connector = msm_dsi_manager_connector_init(msm_dsi->id);
181 if (IS_ERR(msm_dsi->connector)) {
182 ret = PTR_ERR(msm_dsi->connector);
183 dev_err(dev->dev, "failed to create dsi connector: %d\n", ret);
184 msm_dsi->connector = NULL;
185 goto fail;
186 }
187
188 for (i = 0; i < MSM_DSI_ENCODER_NUM; i++) {
189 encoders[i]->bridge = msm_dsi->bridge;
190 msm_dsi->encoders[i] = encoders[i];
191 }
192
193 priv->bridges[priv->num_bridges++] = msm_dsi->bridge;
194 priv->connectors[priv->num_connectors++] = msm_dsi->connector;
195
196 return 0;
197fail:
198 if (msm_dsi) {
199 /* bridge/connector are normally destroyed by drm: */
200 if (msm_dsi->bridge) {
201 msm_dsi_manager_bridge_destroy(msm_dsi->bridge);
202 msm_dsi->bridge = NULL;
203 }
204 if (msm_dsi->connector) {
205 msm_dsi->connector->funcs->destroy(msm_dsi->connector);
206 msm_dsi->connector = NULL;
207 }
208 }
209
210 return ret;
211}
212
diff --git a/drivers/gpu/drm/msm/dsi/dsi.h b/drivers/gpu/drm/msm/dsi/dsi.h
new file mode 100644
index 000000000000..10f54d4e379a
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/dsi.h
@@ -0,0 +1,117 @@
1/*
2 * Copyright (c) 2015, The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#ifndef __DSI_CONNECTOR_H__
15#define __DSI_CONNECTOR_H__
16
17#include <linux/platform_device.h>
18
19#include "drm_crtc.h"
20#include "drm_mipi_dsi.h"
21#include "drm_panel.h"
22
23#include "msm_drv.h"
24
25#define DSI_0 0
26#define DSI_1 1
27#define DSI_MAX 2
28
29#define DSI_CLOCK_MASTER DSI_0
30#define DSI_CLOCK_SLAVE DSI_1
31
32#define DSI_LEFT DSI_0
33#define DSI_RIGHT DSI_1
34
35/* According to the current drm framework sequence, take the encoder of
36 * DSI_1 as master encoder
37 */
38#define DSI_ENCODER_MASTER DSI_1
39#define DSI_ENCODER_SLAVE DSI_0
40
41struct msm_dsi {
42 struct drm_device *dev;
43 struct platform_device *pdev;
44
45 struct drm_connector *connector;
46 struct drm_bridge *bridge;
47
48 struct mipi_dsi_host *host;
49 struct msm_dsi_phy *phy;
50 struct drm_panel *panel;
51 unsigned long panel_flags;
52 bool phy_enabled;
53
54 /* the encoders we are hooked to (outside of dsi block) */
55 struct drm_encoder *encoders[MSM_DSI_ENCODER_NUM];
56
57 int id;
58};
59
60/* dsi manager */
61struct drm_bridge *msm_dsi_manager_bridge_init(u8 id);
62void msm_dsi_manager_bridge_destroy(struct drm_bridge *bridge);
63struct drm_connector *msm_dsi_manager_connector_init(u8 id);
64int msm_dsi_manager_phy_enable(int id,
65 const unsigned long bit_rate, const unsigned long esc_rate,
66 u32 *clk_pre, u32 *clk_post);
67void msm_dsi_manager_phy_disable(int id);
68int msm_dsi_manager_cmd_xfer(int id, const struct mipi_dsi_msg *msg);
69bool msm_dsi_manager_cmd_xfer_trigger(int id, u32 iova, u32 len);
70int msm_dsi_manager_register(struct msm_dsi *msm_dsi);
71void msm_dsi_manager_unregister(struct msm_dsi *msm_dsi);
72
73/* msm dsi */
74struct drm_encoder *msm_dsi_get_encoder(struct msm_dsi *msm_dsi);
75
76/* dsi host */
77int msm_dsi_host_xfer_prepare(struct mipi_dsi_host *host,
78 const struct mipi_dsi_msg *msg);
79void msm_dsi_host_xfer_restore(struct mipi_dsi_host *host,
80 const struct mipi_dsi_msg *msg);
81int msm_dsi_host_cmd_tx(struct mipi_dsi_host *host,
82 const struct mipi_dsi_msg *msg);
83int msm_dsi_host_cmd_rx(struct mipi_dsi_host *host,
84 const struct mipi_dsi_msg *msg);
85void msm_dsi_host_cmd_xfer_commit(struct mipi_dsi_host *host,
86 u32 iova, u32 len);
87int msm_dsi_host_enable(struct mipi_dsi_host *host);
88int msm_dsi_host_disable(struct mipi_dsi_host *host);
89int msm_dsi_host_power_on(struct mipi_dsi_host *host);
90int msm_dsi_host_power_off(struct mipi_dsi_host *host);
91int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host,
92 struct drm_display_mode *mode);
93struct drm_panel *msm_dsi_host_get_panel(struct mipi_dsi_host *host,
94 unsigned long *panel_flags);
95int msm_dsi_host_register(struct mipi_dsi_host *host, bool check_defer);
96void msm_dsi_host_unregister(struct mipi_dsi_host *host);
97void msm_dsi_host_destroy(struct mipi_dsi_host *host);
98int msm_dsi_host_modeset_init(struct mipi_dsi_host *host,
99 struct drm_device *dev);
100int msm_dsi_host_init(struct msm_dsi *msm_dsi);
101
102/* dsi phy */
103struct msm_dsi_phy;
104enum msm_dsi_phy_type {
105 MSM_DSI_PHY_UNKNOWN,
106 MSM_DSI_PHY_28NM,
107 MSM_DSI_PHY_MAX
108};
109struct msm_dsi_phy *msm_dsi_phy_init(struct platform_device *pdev,
110 enum msm_dsi_phy_type type, int id);
111int msm_dsi_phy_enable(struct msm_dsi_phy *phy, bool is_dual_panel,
112 const unsigned long bit_rate, const unsigned long esc_rate);
113int msm_dsi_phy_disable(struct msm_dsi_phy *phy);
114void msm_dsi_phy_get_clk_pre_post(struct msm_dsi_phy *phy,
115 u32 *clk_pre, u32 *clk_post);
116#endif /* __DSI_CONNECTOR_H__ */
117
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
new file mode 100644
index 000000000000..fdc54e3eff55
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -0,0 +1,1993 @@
1/*
2 * Copyright (c) 2015, The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include <linux/clk.h>
15#include <linux/delay.h>
16#include <linux/err.h>
17#include <linux/gpio.h>
18#include <linux/interrupt.h>
19#include <linux/of_device.h>
20#include <linux/of_gpio.h>
21#include <linux/of_irq.h>
22#include <linux/regulator/consumer.h>
23#include <linux/spinlock.h>
24#include <video/mipi_display.h>
25
26#include "dsi.h"
27#include "dsi.xml.h"
28
29#define MSM_DSI_VER_MAJOR_V2 0x02
30#define MSM_DSI_VER_MAJOR_6G 0x03
31#define MSM_DSI_6G_VER_MINOR_V1_0 0x10000000
32#define MSM_DSI_6G_VER_MINOR_V1_1 0x10010000
33#define MSM_DSI_6G_VER_MINOR_V1_1_1 0x10010001
34#define MSM_DSI_6G_VER_MINOR_V1_2 0x10020000
35#define MSM_DSI_6G_VER_MINOR_V1_3_1 0x10030001
36
37#define DSI_6G_REG_SHIFT 4
38
39#define DSI_REGULATOR_MAX 8
40struct dsi_reg_entry {
41 char name[32];
42 int min_voltage;
43 int max_voltage;
44 int enable_load;
45 int disable_load;
46};
47
48struct dsi_reg_config {
49 int num;
50 struct dsi_reg_entry regs[DSI_REGULATOR_MAX];
51};
52
53struct dsi_config {
54 u32 major;
55 u32 minor;
56 u32 io_offset;
57 enum msm_dsi_phy_type phy_type;
58 struct dsi_reg_config reg_cfg;
59};
60
61static const struct dsi_config dsi_cfgs[] = {
62 {MSM_DSI_VER_MAJOR_V2, 0, 0, MSM_DSI_PHY_UNKNOWN},
63 { /* 8974 v1 */
64 .major = MSM_DSI_VER_MAJOR_6G,
65 .minor = MSM_DSI_6G_VER_MINOR_V1_0,
66 .io_offset = DSI_6G_REG_SHIFT,
67 .phy_type = MSM_DSI_PHY_28NM,
68 .reg_cfg = {
69 .num = 4,
70 .regs = {
71 {"gdsc", -1, -1, -1, -1},
72 {"vdd", 3000000, 3000000, 150000, 100},
73 {"vdda", 1200000, 1200000, 100000, 100},
74 {"vddio", 1800000, 1800000, 100000, 100},
75 },
76 },
77 },
78 { /* 8974 v2 */
79 .major = MSM_DSI_VER_MAJOR_6G,
80 .minor = MSM_DSI_6G_VER_MINOR_V1_1,
81 .io_offset = DSI_6G_REG_SHIFT,
82 .phy_type = MSM_DSI_PHY_28NM,
83 .reg_cfg = {
84 .num = 4,
85 .regs = {
86 {"gdsc", -1, -1, -1, -1},
87 {"vdd", 3000000, 3000000, 150000, 100},
88 {"vdda", 1200000, 1200000, 100000, 100},
89 {"vddio", 1800000, 1800000, 100000, 100},
90 },
91 },
92 },
93 { /* 8974 v3 */
94 .major = MSM_DSI_VER_MAJOR_6G,
95 .minor = MSM_DSI_6G_VER_MINOR_V1_1_1,
96 .io_offset = DSI_6G_REG_SHIFT,
97 .phy_type = MSM_DSI_PHY_28NM,
98 .reg_cfg = {
99 .num = 4,
100 .regs = {
101 {"gdsc", -1, -1, -1, -1},
102 {"vdd", 3000000, 3000000, 150000, 100},
103 {"vdda", 1200000, 1200000, 100000, 100},
104 {"vddio", 1800000, 1800000, 100000, 100},
105 },
106 },
107 },
108 { /* 8084 */
109 .major = MSM_DSI_VER_MAJOR_6G,
110 .minor = MSM_DSI_6G_VER_MINOR_V1_2,
111 .io_offset = DSI_6G_REG_SHIFT,
112 .phy_type = MSM_DSI_PHY_28NM,
113 .reg_cfg = {
114 .num = 4,
115 .regs = {
116 {"gdsc", -1, -1, -1, -1},
117 {"vdd", 3000000, 3000000, 150000, 100},
118 {"vdda", 1200000, 1200000, 100000, 100},
119 {"vddio", 1800000, 1800000, 100000, 100},
120 },
121 },
122 },
123 { /* 8916 */
124 .major = MSM_DSI_VER_MAJOR_6G,
125 .minor = MSM_DSI_6G_VER_MINOR_V1_3_1,
126 .io_offset = DSI_6G_REG_SHIFT,
127 .phy_type = MSM_DSI_PHY_28NM,
128 .reg_cfg = {
129 .num = 4,
130 .regs = {
131 {"gdsc", -1, -1, -1, -1},
132 {"vdd", 2850000, 2850000, 100000, 100},
133 {"vdda", 1200000, 1200000, 100000, 100},
134 {"vddio", 1800000, 1800000, 100000, 100},
135 },
136 },
137 },
138};
139
140static int dsi_get_version(const void __iomem *base, u32 *major, u32 *minor)
141{
142 u32 ver;
143 u32 ver_6g;
144
145 if (!major || !minor)
146 return -EINVAL;
147
148 /* From DSI6G(v3), addition of a 6G_HW_VERSION register at offset 0
149 * makes all other registers 4-byte shifted down.
150 */
151 ver_6g = msm_readl(base + REG_DSI_6G_HW_VERSION);
152 if (ver_6g == 0) {
153 ver = msm_readl(base + REG_DSI_VERSION);
154 ver = FIELD(ver, DSI_VERSION_MAJOR);
155 if (ver <= MSM_DSI_VER_MAJOR_V2) {
156 /* old versions */
157 *major = ver;
158 *minor = 0;
159 return 0;
160 } else {
161 return -EINVAL;
162 }
163 } else {
164 ver = msm_readl(base + DSI_6G_REG_SHIFT + REG_DSI_VERSION);
165 ver = FIELD(ver, DSI_VERSION_MAJOR);
166 if (ver == MSM_DSI_VER_MAJOR_6G) {
167 /* 6G version */
168 *major = ver;
169 *minor = ver_6g;
170 return 0;
171 } else {
172 return -EINVAL;
173 }
174 }
175}
176
177#define DSI_ERR_STATE_ACK 0x0000
178#define DSI_ERR_STATE_TIMEOUT 0x0001
179#define DSI_ERR_STATE_DLN0_PHY 0x0002
180#define DSI_ERR_STATE_FIFO 0x0004
181#define DSI_ERR_STATE_MDP_FIFO_UNDERFLOW 0x0008
182#define DSI_ERR_STATE_INTERLEAVE_OP_CONTENTION 0x0010
183#define DSI_ERR_STATE_PLL_UNLOCKED 0x0020
184
185#define DSI_CLK_CTRL_ENABLE_CLKS \
186 (DSI_CLK_CTRL_AHBS_HCLK_ON | DSI_CLK_CTRL_AHBM_SCLK_ON | \
187 DSI_CLK_CTRL_PCLK_ON | DSI_CLK_CTRL_DSICLK_ON | \
188 DSI_CLK_CTRL_BYTECLK_ON | DSI_CLK_CTRL_ESCCLK_ON | \
189 DSI_CLK_CTRL_FORCE_ON_DYN_AHBM_HCLK)
190
191struct msm_dsi_host {
192 struct mipi_dsi_host base;
193
194 struct platform_device *pdev;
195 struct drm_device *dev;
196
197 int id;
198
199 void __iomem *ctrl_base;
200 struct regulator_bulk_data supplies[DSI_REGULATOR_MAX];
201 struct clk *mdp_core_clk;
202 struct clk *ahb_clk;
203 struct clk *axi_clk;
204 struct clk *mmss_misc_ahb_clk;
205 struct clk *byte_clk;
206 struct clk *esc_clk;
207 struct clk *pixel_clk;
208 u32 byte_clk_rate;
209
210 struct gpio_desc *disp_en_gpio;
211 struct gpio_desc *te_gpio;
212
213 const struct dsi_config *cfg;
214
215 struct completion dma_comp;
216 struct completion video_comp;
217 struct mutex dev_mutex;
218 struct mutex cmd_mutex;
219 struct mutex clk_mutex;
220 spinlock_t intr_lock; /* Protect interrupt ctrl register */
221
222 u32 err_work_state;
223 struct work_struct err_work;
224 struct workqueue_struct *workqueue;
225
226 struct drm_gem_object *tx_gem_obj;
227 u8 *rx_buf;
228
229 struct drm_display_mode *mode;
230
231 /* Panel info */
232 struct device_node *panel_node;
233 unsigned int channel;
234 unsigned int lanes;
235 enum mipi_dsi_pixel_format format;
236 unsigned long mode_flags;
237
238 u32 dma_cmd_ctrl_restore;
239
240 bool registered;
241 bool power_on;
242 int irq;
243};
244
245static u32 dsi_get_bpp(const enum mipi_dsi_pixel_format fmt)
246{
247 switch (fmt) {
248 case MIPI_DSI_FMT_RGB565: return 16;
249 case MIPI_DSI_FMT_RGB666_PACKED: return 18;
250 case MIPI_DSI_FMT_RGB666:
251 case MIPI_DSI_FMT_RGB888:
252 default: return 24;
253 }
254}
255
256static inline u32 dsi_read(struct msm_dsi_host *msm_host, u32 reg)
257{
258 return msm_readl(msm_host->ctrl_base + msm_host->cfg->io_offset + reg);
259}
260static inline void dsi_write(struct msm_dsi_host *msm_host, u32 reg, u32 data)
261{
262 msm_writel(data, msm_host->ctrl_base + msm_host->cfg->io_offset + reg);
263}
264
265static int dsi_host_regulator_enable(struct msm_dsi_host *msm_host);
266static void dsi_host_regulator_disable(struct msm_dsi_host *msm_host);
267
268static const struct dsi_config *dsi_get_config(struct msm_dsi_host *msm_host)
269{
270 const struct dsi_config *cfg;
271 struct regulator *gdsc_reg;
272 int i, ret;
273 u32 major = 0, minor = 0;
274
275 gdsc_reg = regulator_get(&msm_host->pdev->dev, "gdsc");
276 if (IS_ERR_OR_NULL(gdsc_reg)) {
277 pr_err("%s: cannot get gdsc\n", __func__);
278 goto fail;
279 }
280 ret = regulator_enable(gdsc_reg);
281 if (ret) {
282 pr_err("%s: unable to enable gdsc\n", __func__);
283 regulator_put(gdsc_reg);
284 goto fail;
285 }
286 ret = clk_prepare_enable(msm_host->ahb_clk);
287 if (ret) {
288 pr_err("%s: unable to enable ahb_clk\n", __func__);
289 regulator_disable(gdsc_reg);
290 regulator_put(gdsc_reg);
291 goto fail;
292 }
293
294 ret = dsi_get_version(msm_host->ctrl_base, &major, &minor);
295
296 clk_disable_unprepare(msm_host->ahb_clk);
297 regulator_disable(gdsc_reg);
298 regulator_put(gdsc_reg);
299 if (ret) {
300 pr_err("%s: Invalid version\n", __func__);
301 goto fail;
302 }
303
304 for (i = 0; i < ARRAY_SIZE(dsi_cfgs); i++) {
305 cfg = dsi_cfgs + i;
306 if ((cfg->major == major) && (cfg->minor == minor))
307 return cfg;
308 }
309 pr_err("%s: Version %x:%x not support\n", __func__, major, minor);
310
311fail:
312 return NULL;
313}
314
315static inline struct msm_dsi_host *to_msm_dsi_host(struct mipi_dsi_host *host)
316{
317 return container_of(host, struct msm_dsi_host, base);
318}
319
320static void dsi_host_regulator_disable(struct msm_dsi_host *msm_host)
321{
322 struct regulator_bulk_data *s = msm_host->supplies;
323 const struct dsi_reg_entry *regs = msm_host->cfg->reg_cfg.regs;
324 int num = msm_host->cfg->reg_cfg.num;
325 int i;
326
327 DBG("");
328 for (i = num - 1; i >= 0; i--)
329 if (regs[i].disable_load >= 0)
330 regulator_set_optimum_mode(s[i].consumer,
331 regs[i].disable_load);
332
333 regulator_bulk_disable(num, s);
334}
335
336static int dsi_host_regulator_enable(struct msm_dsi_host *msm_host)
337{
338 struct regulator_bulk_data *s = msm_host->supplies;
339 const struct dsi_reg_entry *regs = msm_host->cfg->reg_cfg.regs;
340 int num = msm_host->cfg->reg_cfg.num;
341 int ret, i;
342
343 DBG("");
344 for (i = 0; i < num; i++) {
345 if (regs[i].enable_load >= 0) {
346 ret = regulator_set_optimum_mode(s[i].consumer,
347 regs[i].enable_load);
348 if (ret < 0) {
349 pr_err("regulator %d set op mode failed, %d\n",
350 i, ret);
351 goto fail;
352 }
353 }
354 }
355
356 ret = regulator_bulk_enable(num, s);
357 if (ret < 0) {
358 pr_err("regulator enable failed, %d\n", ret);
359 goto fail;
360 }
361
362 return 0;
363
364fail:
365 for (i--; i >= 0; i--)
366 regulator_set_optimum_mode(s[i].consumer, regs[i].disable_load);
367 return ret;
368}
369
370static int dsi_regulator_init(struct msm_dsi_host *msm_host)
371{
372 struct regulator_bulk_data *s = msm_host->supplies;
373 const struct dsi_reg_entry *regs = msm_host->cfg->reg_cfg.regs;
374 int num = msm_host->cfg->reg_cfg.num;
375 int i, ret;
376
377 for (i = 0; i < num; i++)
378 s[i].supply = regs[i].name;
379
380 ret = devm_regulator_bulk_get(&msm_host->pdev->dev, num, s);
381 if (ret < 0) {
382 pr_err("%s: failed to init regulator, ret=%d\n",
383 __func__, ret);
384 return ret;
385 }
386
387 for (i = 0; i < num; i++) {
388 if ((regs[i].min_voltage >= 0) && (regs[i].max_voltage >= 0)) {
389 ret = regulator_set_voltage(s[i].consumer,
390 regs[i].min_voltage, regs[i].max_voltage);
391 if (ret < 0) {
392 pr_err("regulator %d set voltage failed, %d\n",
393 i, ret);
394 return ret;
395 }
396 }
397 }
398
399 return 0;
400}
401
402static int dsi_clk_init(struct msm_dsi_host *msm_host)
403{
404 struct device *dev = &msm_host->pdev->dev;
405 int ret = 0;
406
407 msm_host->mdp_core_clk = devm_clk_get(dev, "mdp_core_clk");
408 if (IS_ERR(msm_host->mdp_core_clk)) {
409 ret = PTR_ERR(msm_host->mdp_core_clk);
410 pr_err("%s: Unable to get mdp core clk. ret=%d\n",
411 __func__, ret);
412 goto exit;
413 }
414
415 msm_host->ahb_clk = devm_clk_get(dev, "iface_clk");
416 if (IS_ERR(msm_host->ahb_clk)) {
417 ret = PTR_ERR(msm_host->ahb_clk);
418 pr_err("%s: Unable to get mdss ahb clk. ret=%d\n",
419 __func__, ret);
420 goto exit;
421 }
422
423 msm_host->axi_clk = devm_clk_get(dev, "bus_clk");
424 if (IS_ERR(msm_host->axi_clk)) {
425 ret = PTR_ERR(msm_host->axi_clk);
426 pr_err("%s: Unable to get axi bus clk. ret=%d\n",
427 __func__, ret);
428 goto exit;
429 }
430
431 msm_host->mmss_misc_ahb_clk = devm_clk_get(dev, "core_mmss_clk");
432 if (IS_ERR(msm_host->mmss_misc_ahb_clk)) {
433 ret = PTR_ERR(msm_host->mmss_misc_ahb_clk);
434 pr_err("%s: Unable to get mmss misc ahb clk. ret=%d\n",
435 __func__, ret);
436 goto exit;
437 }
438
439 msm_host->byte_clk = devm_clk_get(dev, "byte_clk");
440 if (IS_ERR(msm_host->byte_clk)) {
441 ret = PTR_ERR(msm_host->byte_clk);
442 pr_err("%s: can't find dsi_byte_clk. ret=%d\n",
443 __func__, ret);
444 msm_host->byte_clk = NULL;
445 goto exit;
446 }
447
448 msm_host->pixel_clk = devm_clk_get(dev, "pixel_clk");
449 if (IS_ERR(msm_host->pixel_clk)) {
450 ret = PTR_ERR(msm_host->pixel_clk);
451 pr_err("%s: can't find dsi_pixel_clk. ret=%d\n",
452 __func__, ret);
453 msm_host->pixel_clk = NULL;
454 goto exit;
455 }
456
457 msm_host->esc_clk = devm_clk_get(dev, "core_clk");
458 if (IS_ERR(msm_host->esc_clk)) {
459 ret = PTR_ERR(msm_host->esc_clk);
460 pr_err("%s: can't find dsi_esc_clk. ret=%d\n",
461 __func__, ret);
462 msm_host->esc_clk = NULL;
463 goto exit;
464 }
465
466exit:
467 return ret;
468}
469
470static int dsi_bus_clk_enable(struct msm_dsi_host *msm_host)
471{
472 int ret;
473
474 DBG("id=%d", msm_host->id);
475
476 ret = clk_prepare_enable(msm_host->mdp_core_clk);
477 if (ret) {
478 pr_err("%s: failed to enable mdp_core_clock, %d\n",
479 __func__, ret);
480 goto core_clk_err;
481 }
482
483 ret = clk_prepare_enable(msm_host->ahb_clk);
484 if (ret) {
485 pr_err("%s: failed to enable ahb clock, %d\n", __func__, ret);
486 goto ahb_clk_err;
487 }
488
489 ret = clk_prepare_enable(msm_host->axi_clk);
490 if (ret) {
491 pr_err("%s: failed to enable ahb clock, %d\n", __func__, ret);
492 goto axi_clk_err;
493 }
494
495 ret = clk_prepare_enable(msm_host->mmss_misc_ahb_clk);
496 if (ret) {
497 pr_err("%s: failed to enable mmss misc ahb clk, %d\n",
498 __func__, ret);
499 goto misc_ahb_clk_err;
500 }
501
502 return 0;
503
504misc_ahb_clk_err:
505 clk_disable_unprepare(msm_host->axi_clk);
506axi_clk_err:
507 clk_disable_unprepare(msm_host->ahb_clk);
508ahb_clk_err:
509 clk_disable_unprepare(msm_host->mdp_core_clk);
510core_clk_err:
511 return ret;
512}
513
514static void dsi_bus_clk_disable(struct msm_dsi_host *msm_host)
515{
516 DBG("");
517 clk_disable_unprepare(msm_host->mmss_misc_ahb_clk);
518 clk_disable_unprepare(msm_host->axi_clk);
519 clk_disable_unprepare(msm_host->ahb_clk);
520 clk_disable_unprepare(msm_host->mdp_core_clk);
521}
522
523static int dsi_link_clk_enable(struct msm_dsi_host *msm_host)
524{
525 int ret;
526
527 DBG("Set clk rates: pclk=%d, byteclk=%d",
528 msm_host->mode->clock, msm_host->byte_clk_rate);
529
530 ret = clk_set_rate(msm_host->byte_clk, msm_host->byte_clk_rate);
531 if (ret) {
532 pr_err("%s: Failed to set rate byte clk, %d\n", __func__, ret);
533 goto error;
534 }
535
536 ret = clk_set_rate(msm_host->pixel_clk, msm_host->mode->clock * 1000);
537 if (ret) {
538 pr_err("%s: Failed to set rate pixel clk, %d\n", __func__, ret);
539 goto error;
540 }
541
542 ret = clk_prepare_enable(msm_host->esc_clk);
543 if (ret) {
544 pr_err("%s: Failed to enable dsi esc clk\n", __func__);
545 goto error;
546 }
547
548 ret = clk_prepare_enable(msm_host->byte_clk);
549 if (ret) {
550 pr_err("%s: Failed to enable dsi byte clk\n", __func__);
551 goto byte_clk_err;
552 }
553
554 ret = clk_prepare_enable(msm_host->pixel_clk);
555 if (ret) {
556 pr_err("%s: Failed to enable dsi pixel clk\n", __func__);
557 goto pixel_clk_err;
558 }
559
560 return 0;
561
562pixel_clk_err:
563 clk_disable_unprepare(msm_host->byte_clk);
564byte_clk_err:
565 clk_disable_unprepare(msm_host->esc_clk);
566error:
567 return ret;
568}
569
570static void dsi_link_clk_disable(struct msm_dsi_host *msm_host)
571{
572 clk_disable_unprepare(msm_host->esc_clk);
573 clk_disable_unprepare(msm_host->pixel_clk);
574 clk_disable_unprepare(msm_host->byte_clk);
575}
576
577static int dsi_clk_ctrl(struct msm_dsi_host *msm_host, bool enable)
578{
579 int ret = 0;
580
581 mutex_lock(&msm_host->clk_mutex);
582 if (enable) {
583 ret = dsi_bus_clk_enable(msm_host);
584 if (ret) {
585 pr_err("%s: Can not enable bus clk, %d\n",
586 __func__, ret);
587 goto unlock_ret;
588 }
589 ret = dsi_link_clk_enable(msm_host);
590 if (ret) {
591 pr_err("%s: Can not enable link clk, %d\n",
592 __func__, ret);
593 dsi_bus_clk_disable(msm_host);
594 goto unlock_ret;
595 }
596 } else {
597 dsi_link_clk_disable(msm_host);
598 dsi_bus_clk_disable(msm_host);
599 }
600
601unlock_ret:
602 mutex_unlock(&msm_host->clk_mutex);
603 return ret;
604}
605
606static int dsi_calc_clk_rate(struct msm_dsi_host *msm_host)
607{
608 struct drm_display_mode *mode = msm_host->mode;
609 u8 lanes = msm_host->lanes;
610 u32 bpp = dsi_get_bpp(msm_host->format);
611 u32 pclk_rate;
612
613 if (!mode) {
614 pr_err("%s: mode not set\n", __func__);
615 return -EINVAL;
616 }
617
618 pclk_rate = mode->clock * 1000;
619 if (lanes > 0) {
620 msm_host->byte_clk_rate = (pclk_rate * bpp) / (8 * lanes);
621 } else {
622 pr_err("%s: forcing mdss_dsi lanes to 1\n", __func__);
623 msm_host->byte_clk_rate = (pclk_rate * bpp) / 8;
624 }
625
626 DBG("pclk=%d, bclk=%d", pclk_rate, msm_host->byte_clk_rate);
627
628 return 0;
629}
630
631static void dsi_phy_sw_reset(struct msm_dsi_host *msm_host)
632{
633 DBG("");
634 dsi_write(msm_host, REG_DSI_PHY_RESET, DSI_PHY_RESET_RESET);
635 /* Make sure fully reset */
636 wmb();
637 udelay(1000);
638 dsi_write(msm_host, REG_DSI_PHY_RESET, 0);
639 udelay(100);
640}
641
642static void dsi_intr_ctrl(struct msm_dsi_host *msm_host, u32 mask, int enable)
643{
644 u32 intr;
645 unsigned long flags;
646
647 spin_lock_irqsave(&msm_host->intr_lock, flags);
648 intr = dsi_read(msm_host, REG_DSI_INTR_CTRL);
649
650 if (enable)
651 intr |= mask;
652 else
653 intr &= ~mask;
654
655 DBG("intr=%x enable=%d", intr, enable);
656
657 dsi_write(msm_host, REG_DSI_INTR_CTRL, intr);
658 spin_unlock_irqrestore(&msm_host->intr_lock, flags);
659}
660
661static inline enum dsi_traffic_mode dsi_get_traffic_mode(const u32 mode_flags)
662{
663 if (mode_flags & MIPI_DSI_MODE_VIDEO_BURST)
664 return BURST_MODE;
665 else if (mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)
666 return NON_BURST_SYNCH_PULSE;
667
668 return NON_BURST_SYNCH_EVENT;
669}
670
671static inline enum dsi_vid_dst_format dsi_get_vid_fmt(
672 const enum mipi_dsi_pixel_format mipi_fmt)
673{
674 switch (mipi_fmt) {
675 case MIPI_DSI_FMT_RGB888: return VID_DST_FORMAT_RGB888;
676 case MIPI_DSI_FMT_RGB666: return VID_DST_FORMAT_RGB666_LOOSE;
677 case MIPI_DSI_FMT_RGB666_PACKED: return VID_DST_FORMAT_RGB666;
678 case MIPI_DSI_FMT_RGB565: return VID_DST_FORMAT_RGB565;
679 default: return VID_DST_FORMAT_RGB888;
680 }
681}
682
683static inline enum dsi_cmd_dst_format dsi_get_cmd_fmt(
684 const enum mipi_dsi_pixel_format mipi_fmt)
685{
686 switch (mipi_fmt) {
687 case MIPI_DSI_FMT_RGB888: return CMD_DST_FORMAT_RGB888;
688 case MIPI_DSI_FMT_RGB666_PACKED:
689 case MIPI_DSI_FMT_RGB666: return VID_DST_FORMAT_RGB666;
690 case MIPI_DSI_FMT_RGB565: return CMD_DST_FORMAT_RGB565;
691 default: return CMD_DST_FORMAT_RGB888;
692 }
693}
694
695static void dsi_ctrl_config(struct msm_dsi_host *msm_host, bool enable,
696 u32 clk_pre, u32 clk_post)
697{
698 u32 flags = msm_host->mode_flags;
699 enum mipi_dsi_pixel_format mipi_fmt = msm_host->format;
700 u32 data = 0;
701
702 if (!enable) {
703 dsi_write(msm_host, REG_DSI_CTRL, 0);
704 return;
705 }
706
707 if (flags & MIPI_DSI_MODE_VIDEO) {
708 if (flags & MIPI_DSI_MODE_VIDEO_HSE)
709 data |= DSI_VID_CFG0_PULSE_MODE_HSA_HE;
710 if (flags & MIPI_DSI_MODE_VIDEO_HFP)
711 data |= DSI_VID_CFG0_HFP_POWER_STOP;
712 if (flags & MIPI_DSI_MODE_VIDEO_HBP)
713 data |= DSI_VID_CFG0_HBP_POWER_STOP;
714 if (flags & MIPI_DSI_MODE_VIDEO_HSA)
715 data |= DSI_VID_CFG0_HSA_POWER_STOP;
716 /* Always set low power stop mode for BLLP
717 * to let command engine send packets
718 */
719 data |= DSI_VID_CFG0_EOF_BLLP_POWER_STOP |
720 DSI_VID_CFG0_BLLP_POWER_STOP;
721 data |= DSI_VID_CFG0_TRAFFIC_MODE(dsi_get_traffic_mode(flags));
722 data |= DSI_VID_CFG0_DST_FORMAT(dsi_get_vid_fmt(mipi_fmt));
723 data |= DSI_VID_CFG0_VIRT_CHANNEL(msm_host->channel);
724 dsi_write(msm_host, REG_DSI_VID_CFG0, data);
725
726 /* Do not swap RGB colors */
727 data = DSI_VID_CFG1_RGB_SWAP(SWAP_RGB);
728 dsi_write(msm_host, REG_DSI_VID_CFG1, 0);
729 } else {
730 /* Do not swap RGB colors */
731 data = DSI_CMD_CFG0_RGB_SWAP(SWAP_RGB);
732 data |= DSI_CMD_CFG0_DST_FORMAT(dsi_get_cmd_fmt(mipi_fmt));
733 dsi_write(msm_host, REG_DSI_CMD_CFG0, data);
734
735 data = DSI_CMD_CFG1_WR_MEM_START(MIPI_DCS_WRITE_MEMORY_START) |
736 DSI_CMD_CFG1_WR_MEM_CONTINUE(
737 MIPI_DCS_WRITE_MEMORY_CONTINUE);
738 /* Always insert DCS command */
739 data |= DSI_CMD_CFG1_INSERT_DCS_COMMAND;
740 dsi_write(msm_host, REG_DSI_CMD_CFG1, data);
741 }
742
743 dsi_write(msm_host, REG_DSI_CMD_DMA_CTRL,
744 DSI_CMD_DMA_CTRL_FROM_FRAME_BUFFER |
745 DSI_CMD_DMA_CTRL_LOW_POWER);
746
747 data = 0;
748 /* Always assume dedicated TE pin */
749 data |= DSI_TRIG_CTRL_TE;
750 data |= DSI_TRIG_CTRL_MDP_TRIGGER(TRIGGER_NONE);
751 data |= DSI_TRIG_CTRL_DMA_TRIGGER(TRIGGER_SW);
752 data |= DSI_TRIG_CTRL_STREAM(msm_host->channel);
753 if ((msm_host->cfg->major == MSM_DSI_VER_MAJOR_6G) &&
754 (msm_host->cfg->minor >= MSM_DSI_6G_VER_MINOR_V1_2))
755 data |= DSI_TRIG_CTRL_BLOCK_DMA_WITHIN_FRAME;
756 dsi_write(msm_host, REG_DSI_TRIG_CTRL, data);
757
758 data = DSI_CLKOUT_TIMING_CTRL_T_CLK_POST(clk_post) |
759 DSI_CLKOUT_TIMING_CTRL_T_CLK_PRE(clk_pre);
760 dsi_write(msm_host, REG_DSI_CLKOUT_TIMING_CTRL, data);
761
762 data = 0;
763 if (!(flags & MIPI_DSI_MODE_EOT_PACKET))
764 data |= DSI_EOT_PACKET_CTRL_TX_EOT_APPEND;
765 dsi_write(msm_host, REG_DSI_EOT_PACKET_CTRL, data);
766
767 /* allow only ack-err-status to generate interrupt */
768 dsi_write(msm_host, REG_DSI_ERR_INT_MASK0, 0x13ff3fe0);
769
770 dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_ERROR, 1);
771
772 dsi_write(msm_host, REG_DSI_CLK_CTRL, DSI_CLK_CTRL_ENABLE_CLKS);
773
774 data = DSI_CTRL_CLK_EN;
775
776 DBG("lane number=%d", msm_host->lanes);
777 if (msm_host->lanes == 2) {
778 data |= DSI_CTRL_LANE1 | DSI_CTRL_LANE2;
779 /* swap lanes for 2-lane panel for better performance */
780 dsi_write(msm_host, REG_DSI_LANE_SWAP_CTRL,
781 DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL(LANE_SWAP_1230));
782 } else {
783 /* Take 4 lanes as default */
784 data |= DSI_CTRL_LANE0 | DSI_CTRL_LANE1 | DSI_CTRL_LANE2 |
785 DSI_CTRL_LANE3;
786 /* Do not swap lanes for 4-lane panel */
787 dsi_write(msm_host, REG_DSI_LANE_SWAP_CTRL,
788 DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL(LANE_SWAP_0123));
789 }
790 data |= DSI_CTRL_ENABLE;
791
792 dsi_write(msm_host, REG_DSI_CTRL, data);
793}
794
795static void dsi_timing_setup(struct msm_dsi_host *msm_host)
796{
797 struct drm_display_mode *mode = msm_host->mode;
798 u32 hs_start = 0, vs_start = 0; /* take sync start as 0 */
799 u32 h_total = mode->htotal;
800 u32 v_total = mode->vtotal;
801 u32 hs_end = mode->hsync_end - mode->hsync_start;
802 u32 vs_end = mode->vsync_end - mode->vsync_start;
803 u32 ha_start = h_total - mode->hsync_start;
804 u32 ha_end = ha_start + mode->hdisplay;
805 u32 va_start = v_total - mode->vsync_start;
806 u32 va_end = va_start + mode->vdisplay;
807 u32 wc;
808
809 DBG("");
810
811 if (msm_host->mode_flags & MIPI_DSI_MODE_VIDEO) {
812 dsi_write(msm_host, REG_DSI_ACTIVE_H,
813 DSI_ACTIVE_H_START(ha_start) |
814 DSI_ACTIVE_H_END(ha_end));
815 dsi_write(msm_host, REG_DSI_ACTIVE_V,
816 DSI_ACTIVE_V_START(va_start) |
817 DSI_ACTIVE_V_END(va_end));
818 dsi_write(msm_host, REG_DSI_TOTAL,
819 DSI_TOTAL_H_TOTAL(h_total - 1) |
820 DSI_TOTAL_V_TOTAL(v_total - 1));
821
822 dsi_write(msm_host, REG_DSI_ACTIVE_HSYNC,
823 DSI_ACTIVE_HSYNC_START(hs_start) |
824 DSI_ACTIVE_HSYNC_END(hs_end));
825 dsi_write(msm_host, REG_DSI_ACTIVE_VSYNC_HPOS, 0);
826 dsi_write(msm_host, REG_DSI_ACTIVE_VSYNC_VPOS,
827 DSI_ACTIVE_VSYNC_VPOS_START(vs_start) |
828 DSI_ACTIVE_VSYNC_VPOS_END(vs_end));
829 } else { /* command mode */
830 /* image data and 1 byte write_memory_start cmd */
831 wc = mode->hdisplay * dsi_get_bpp(msm_host->format) / 8 + 1;
832
833 dsi_write(msm_host, REG_DSI_CMD_MDP_STREAM_CTRL,
834 DSI_CMD_MDP_STREAM_CTRL_WORD_COUNT(wc) |
835 DSI_CMD_MDP_STREAM_CTRL_VIRTUAL_CHANNEL(
836 msm_host->channel) |
837 DSI_CMD_MDP_STREAM_CTRL_DATA_TYPE(
838 MIPI_DSI_DCS_LONG_WRITE));
839
840 dsi_write(msm_host, REG_DSI_CMD_MDP_STREAM_TOTAL,
841 DSI_CMD_MDP_STREAM_TOTAL_H_TOTAL(mode->hdisplay) |
842 DSI_CMD_MDP_STREAM_TOTAL_V_TOTAL(mode->vdisplay));
843 }
844}
845
846static void dsi_sw_reset(struct msm_dsi_host *msm_host)
847{
848 dsi_write(msm_host, REG_DSI_CLK_CTRL, DSI_CLK_CTRL_ENABLE_CLKS);
849 wmb(); /* clocks need to be enabled before reset */
850
851 dsi_write(msm_host, REG_DSI_RESET, 1);
852 wmb(); /* make sure reset happen */
853 dsi_write(msm_host, REG_DSI_RESET, 0);
854}
855
856static void dsi_op_mode_config(struct msm_dsi_host *msm_host,
857 bool video_mode, bool enable)
858{
859 u32 dsi_ctrl;
860
861 dsi_ctrl = dsi_read(msm_host, REG_DSI_CTRL);
862
863 if (!enable) {
864 dsi_ctrl &= ~(DSI_CTRL_ENABLE | DSI_CTRL_VID_MODE_EN |
865 DSI_CTRL_CMD_MODE_EN);
866 dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_CMD_MDP_DONE |
867 DSI_IRQ_MASK_VIDEO_DONE, 0);
868 } else {
869 if (video_mode) {
870 dsi_ctrl |= DSI_CTRL_VID_MODE_EN;
871 } else { /* command mode */
872 dsi_ctrl |= DSI_CTRL_CMD_MODE_EN;
873 dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_CMD_MDP_DONE, 1);
874 }
875 dsi_ctrl |= DSI_CTRL_ENABLE;
876 }
877
878 dsi_write(msm_host, REG_DSI_CTRL, dsi_ctrl);
879}
880
881static void dsi_set_tx_power_mode(int mode, struct msm_dsi_host *msm_host)
882{
883 u32 data;
884
885 data = dsi_read(msm_host, REG_DSI_CMD_DMA_CTRL);
886
887 if (mode == 0)
888 data &= ~DSI_CMD_DMA_CTRL_LOW_POWER;
889 else
890 data |= DSI_CMD_DMA_CTRL_LOW_POWER;
891
892 dsi_write(msm_host, REG_DSI_CMD_DMA_CTRL, data);
893}
894
895static void dsi_wait4video_done(struct msm_dsi_host *msm_host)
896{
897 dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_VIDEO_DONE, 1);
898
899 reinit_completion(&msm_host->video_comp);
900
901 wait_for_completion_timeout(&msm_host->video_comp,
902 msecs_to_jiffies(70));
903
904 dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_VIDEO_DONE, 0);
905}
906
907static void dsi_wait4video_eng_busy(struct msm_dsi_host *msm_host)
908{
909 if (!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO))
910 return;
911
912 if (msm_host->power_on) {
913 dsi_wait4video_done(msm_host);
914 /* delay 4 ms to skip BLLP */
915 usleep_range(2000, 4000);
916 }
917}
918
919/* dsi_cmd */
920static int dsi_tx_buf_alloc(struct msm_dsi_host *msm_host, int size)
921{
922 struct drm_device *dev = msm_host->dev;
923 int ret;
924 u32 iova;
925
926 mutex_lock(&dev->struct_mutex);
927 msm_host->tx_gem_obj = msm_gem_new(dev, size, MSM_BO_UNCACHED);
928 if (IS_ERR(msm_host->tx_gem_obj)) {
929 ret = PTR_ERR(msm_host->tx_gem_obj);
930 pr_err("%s: failed to allocate gem, %d\n", __func__, ret);
931 msm_host->tx_gem_obj = NULL;
932 mutex_unlock(&dev->struct_mutex);
933 return ret;
934 }
935
936 ret = msm_gem_get_iova_locked(msm_host->tx_gem_obj, 0, &iova);
937 if (ret) {
938 pr_err("%s: failed to get iova, %d\n", __func__, ret);
939 return ret;
940 }
941 mutex_unlock(&dev->struct_mutex);
942
943 if (iova & 0x07) {
944 pr_err("%s: buf NOT 8 bytes aligned\n", __func__);
945 return -EINVAL;
946 }
947
948 return 0;
949}
950
951static void dsi_tx_buf_free(struct msm_dsi_host *msm_host)
952{
953 struct drm_device *dev = msm_host->dev;
954
955 if (msm_host->tx_gem_obj) {
956 msm_gem_put_iova(msm_host->tx_gem_obj, 0);
957 mutex_lock(&dev->struct_mutex);
958 msm_gem_free_object(msm_host->tx_gem_obj);
959 msm_host->tx_gem_obj = NULL;
960 mutex_unlock(&dev->struct_mutex);
961 }
962}
963
964/*
965 * prepare cmd buffer to be txed
966 */
967static int dsi_cmd_dma_add(struct drm_gem_object *tx_gem,
968 const struct mipi_dsi_msg *msg)
969{
970 struct mipi_dsi_packet packet;
971 int len;
972 int ret;
973 u8 *data;
974
975 ret = mipi_dsi_create_packet(&packet, msg);
976 if (ret) {
977 pr_err("%s: create packet failed, %d\n", __func__, ret);
978 return ret;
979 }
980 len = (packet.size + 3) & (~0x3);
981
982 if (len > tx_gem->size) {
983 pr_err("%s: packet size is too big\n", __func__);
984 return -EINVAL;
985 }
986
987 data = msm_gem_vaddr(tx_gem);
988
989 if (IS_ERR(data)) {
990 ret = PTR_ERR(data);
991 pr_err("%s: get vaddr failed, %d\n", __func__, ret);
992 return ret;
993 }
994
995 /* MSM specific command format in memory */
996 data[0] = packet.header[1];
997 data[1] = packet.header[2];
998 data[2] = packet.header[0];
999 data[3] = BIT(7); /* Last packet */
1000 if (mipi_dsi_packet_format_is_long(msg->type))
1001 data[3] |= BIT(6);
1002 if (msg->rx_buf && msg->rx_len)
1003 data[3] |= BIT(5);
1004
1005 /* Long packet */
1006 if (packet.payload && packet.payload_length)
1007 memcpy(data + 4, packet.payload, packet.payload_length);
1008
1009 /* Append 0xff to the end */
1010 if (packet.size < len)
1011 memset(data + packet.size, 0xff, len - packet.size);
1012
1013 return len;
1014}
1015
1016/*
1017 * dsi_short_read1_resp: 1 parameter
1018 */
1019static int dsi_short_read1_resp(u8 *buf, const struct mipi_dsi_msg *msg)
1020{
1021 u8 *data = msg->rx_buf;
1022 if (data && (msg->rx_len >= 1)) {
1023 *data = buf[1]; /* strip out dcs type */
1024 return 1;
1025 } else {
1026 pr_err("%s: read data does not match with rx_buf len %d\n",
1027 __func__, msg->rx_len);
1028 return -EINVAL;
1029 }
1030}
1031
1032/*
1033 * dsi_short_read2_resp: 2 parameter
1034 */
1035static int dsi_short_read2_resp(u8 *buf, const struct mipi_dsi_msg *msg)
1036{
1037 u8 *data = msg->rx_buf;
1038 if (data && (msg->rx_len >= 2)) {
1039 data[0] = buf[1]; /* strip out dcs type */
1040 data[1] = buf[2];
1041 return 2;
1042 } else {
1043 pr_err("%s: read data does not match with rx_buf len %d\n",
1044 __func__, msg->rx_len);
1045 return -EINVAL;
1046 }
1047}
1048
1049static int dsi_long_read_resp(u8 *buf, const struct mipi_dsi_msg *msg)
1050{
1051 /* strip out 4 byte dcs header */
1052 if (msg->rx_buf && msg->rx_len)
1053 memcpy(msg->rx_buf, buf + 4, msg->rx_len);
1054
1055 return msg->rx_len;
1056}
1057
1058
1059static int dsi_cmd_dma_tx(struct msm_dsi_host *msm_host, int len)
1060{
1061 int ret;
1062 u32 iova;
1063 bool triggered;
1064
1065 ret = msm_gem_get_iova(msm_host->tx_gem_obj, 0, &iova);
1066 if (ret) {
1067 pr_err("%s: failed to get iova: %d\n", __func__, ret);
1068 return ret;
1069 }
1070
1071 reinit_completion(&msm_host->dma_comp);
1072
1073 dsi_wait4video_eng_busy(msm_host);
1074
1075 triggered = msm_dsi_manager_cmd_xfer_trigger(
1076 msm_host->id, iova, len);
1077 if (triggered) {
1078 ret = wait_for_completion_timeout(&msm_host->dma_comp,
1079 msecs_to_jiffies(200));
1080 DBG("ret=%d", ret);
1081 if (ret == 0)
1082 ret = -ETIMEDOUT;
1083 else
1084 ret = len;
1085 } else
1086 ret = len;
1087
1088 return ret;
1089}
1090
1091static int dsi_cmd_dma_rx(struct msm_dsi_host *msm_host,
1092 u8 *buf, int rx_byte, int pkt_size)
1093{
1094 u32 *lp, *temp, data;
1095 int i, j = 0, cnt;
1096 bool ack_error = false;
1097 u32 read_cnt;
1098 u8 reg[16];
1099 int repeated_bytes = 0;
1100 int buf_offset = buf - msm_host->rx_buf;
1101
1102 lp = (u32 *)buf;
1103 temp = (u32 *)reg;
1104 cnt = (rx_byte + 3) >> 2;
1105 if (cnt > 4)
1106 cnt = 4; /* 4 x 32 bits registers only */
1107
1108 /* Calculate real read data count */
1109 read_cnt = dsi_read(msm_host, 0x1d4) >> 16;
1110
1111 ack_error = (rx_byte == 4) ?
1112 (read_cnt == 8) : /* short pkt + 4-byte error pkt */
1113 (read_cnt == (pkt_size + 6 + 4)); /* long pkt+4-byte error pkt*/
1114
1115 if (ack_error)
1116 read_cnt -= 4; /* Remove 4 byte error pkt */
1117
1118 /*
1119 * In case of multiple reads from the panel, after the first read, there
1120 * is possibility that there are some bytes in the payload repeating in
1121 * the RDBK_DATA registers. Since we read all the parameters from the
1122 * panel right from the first byte for every pass. We need to skip the
1123 * repeating bytes and then append the new parameters to the rx buffer.
1124 */
1125 if (read_cnt > 16) {
1126 int bytes_shifted;
1127 /* Any data more than 16 bytes will be shifted out.
1128 * The temp read buffer should already contain these bytes.
1129 * The remaining bytes in read buffer are the repeated bytes.
1130 */
1131 bytes_shifted = read_cnt - 16;
1132 repeated_bytes = buf_offset - bytes_shifted;
1133 }
1134
1135 for (i = cnt - 1; i >= 0; i--) {
1136 data = dsi_read(msm_host, REG_DSI_RDBK_DATA(i));
1137 *temp++ = ntohl(data); /* to host byte order */
1138 DBG("data = 0x%x and ntohl(data) = 0x%x", data, ntohl(data));
1139 }
1140
1141 for (i = repeated_bytes; i < 16; i++)
1142 buf[j++] = reg[i];
1143
1144 return j;
1145}
1146
1147static int dsi_cmds2buf_tx(struct msm_dsi_host *msm_host,
1148 const struct mipi_dsi_msg *msg)
1149{
1150 int len, ret;
1151 int bllp_len = msm_host->mode->hdisplay *
1152 dsi_get_bpp(msm_host->format) / 8;
1153
1154 len = dsi_cmd_dma_add(msm_host->tx_gem_obj, msg);
1155 if (!len) {
1156 pr_err("%s: failed to add cmd type = 0x%x\n",
1157 __func__, msg->type);
1158 return -EINVAL;
1159 }
1160
1161 /* for video mode, do not send cmds more than
1162 * one pixel line, since it only transmit it
1163 * during BLLP.
1164 */
1165 /* TODO: if the command is sent in LP mode, the bit rate is only
1166 * half of esc clk rate. In this case, if the video is already
1167 * actively streaming, we need to check more carefully if the
1168 * command can be fit into one BLLP.
1169 */
1170 if ((msm_host->mode_flags & MIPI_DSI_MODE_VIDEO) && (len > bllp_len)) {
1171 pr_err("%s: cmd cannot fit into BLLP period, len=%d\n",
1172 __func__, len);
1173 return -EINVAL;
1174 }
1175
1176 ret = dsi_cmd_dma_tx(msm_host, len);
1177 if (ret < len) {
1178 pr_err("%s: cmd dma tx failed, type=0x%x, data0=0x%x, len=%d\n",
1179 __func__, msg->type, (*(u8 *)(msg->tx_buf)), len);
1180 return -ECOMM;
1181 }
1182
1183 return len;
1184}
1185
1186static void dsi_sw_reset_restore(struct msm_dsi_host *msm_host)
1187{
1188 u32 data0, data1;
1189
1190 data0 = dsi_read(msm_host, REG_DSI_CTRL);
1191 data1 = data0;
1192 data1 &= ~DSI_CTRL_ENABLE;
1193 dsi_write(msm_host, REG_DSI_CTRL, data1);
1194 /*
1195 * dsi controller need to be disabled before
1196 * clocks turned on
1197 */
1198 wmb();
1199
1200 dsi_write(msm_host, REG_DSI_CLK_CTRL, DSI_CLK_CTRL_ENABLE_CLKS);
1201 wmb(); /* make sure clocks enabled */
1202
1203 /* dsi controller can only be reset while clocks are running */
1204 dsi_write(msm_host, REG_DSI_RESET, 1);
1205 wmb(); /* make sure reset happen */
1206 dsi_write(msm_host, REG_DSI_RESET, 0);
1207 wmb(); /* controller out of reset */
1208 dsi_write(msm_host, REG_DSI_CTRL, data0);
1209 wmb(); /* make sure dsi controller enabled again */
1210}
1211
1212static void dsi_err_worker(struct work_struct *work)
1213{
1214 struct msm_dsi_host *msm_host =
1215 container_of(work, struct msm_dsi_host, err_work);
1216 u32 status = msm_host->err_work_state;
1217
1218 pr_err("%s: status=%x\n", __func__, status);
1219 if (status & DSI_ERR_STATE_MDP_FIFO_UNDERFLOW)
1220 dsi_sw_reset_restore(msm_host);
1221
1222 /* It is safe to clear here because error irq is disabled. */
1223 msm_host->err_work_state = 0;
1224
1225 /* enable dsi error interrupt */
1226 dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_ERROR, 1);
1227}
1228
1229static void dsi_ack_err_status(struct msm_dsi_host *msm_host)
1230{
1231 u32 status;
1232
1233 status = dsi_read(msm_host, REG_DSI_ACK_ERR_STATUS);
1234
1235 if (status) {
1236 dsi_write(msm_host, REG_DSI_ACK_ERR_STATUS, status);
1237 /* Writing of an extra 0 needed to clear error bits */
1238 dsi_write(msm_host, REG_DSI_ACK_ERR_STATUS, 0);
1239 msm_host->err_work_state |= DSI_ERR_STATE_ACK;
1240 }
1241}
1242
1243static void dsi_timeout_status(struct msm_dsi_host *msm_host)
1244{
1245 u32 status;
1246
1247 status = dsi_read(msm_host, REG_DSI_TIMEOUT_STATUS);
1248
1249 if (status) {
1250 dsi_write(msm_host, REG_DSI_TIMEOUT_STATUS, status);
1251 msm_host->err_work_state |= DSI_ERR_STATE_TIMEOUT;
1252 }
1253}
1254
1255static void dsi_dln0_phy_err(struct msm_dsi_host *msm_host)
1256{
1257 u32 status;
1258
1259 status = dsi_read(msm_host, REG_DSI_DLN0_PHY_ERR);
1260
1261 if (status) {
1262 dsi_write(msm_host, REG_DSI_DLN0_PHY_ERR, status);
1263 msm_host->err_work_state |= DSI_ERR_STATE_DLN0_PHY;
1264 }
1265}
1266
1267static void dsi_fifo_status(struct msm_dsi_host *msm_host)
1268{
1269 u32 status;
1270
1271 status = dsi_read(msm_host, REG_DSI_FIFO_STATUS);
1272
1273 /* fifo underflow, overflow */
1274 if (status) {
1275 dsi_write(msm_host, REG_DSI_FIFO_STATUS, status);
1276 msm_host->err_work_state |= DSI_ERR_STATE_FIFO;
1277 if (status & DSI_FIFO_STATUS_CMD_MDP_FIFO_UNDERFLOW)
1278 msm_host->err_work_state |=
1279 DSI_ERR_STATE_MDP_FIFO_UNDERFLOW;
1280 }
1281}
1282
1283static void dsi_status(struct msm_dsi_host *msm_host)
1284{
1285 u32 status;
1286
1287 status = dsi_read(msm_host, REG_DSI_STATUS0);
1288
1289 if (status & DSI_STATUS0_INTERLEAVE_OP_CONTENTION) {
1290 dsi_write(msm_host, REG_DSI_STATUS0, status);
1291 msm_host->err_work_state |=
1292 DSI_ERR_STATE_INTERLEAVE_OP_CONTENTION;
1293 }
1294}
1295
1296static void dsi_clk_status(struct msm_dsi_host *msm_host)
1297{
1298 u32 status;
1299
1300 status = dsi_read(msm_host, REG_DSI_CLK_STATUS);
1301
1302 if (status & DSI_CLK_STATUS_PLL_UNLOCKED) {
1303 dsi_write(msm_host, REG_DSI_CLK_STATUS, status);
1304 msm_host->err_work_state |= DSI_ERR_STATE_PLL_UNLOCKED;
1305 }
1306}
1307
1308static void dsi_error(struct msm_dsi_host *msm_host)
1309{
1310 /* disable dsi error interrupt */
1311 dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_ERROR, 0);
1312
1313 dsi_clk_status(msm_host);
1314 dsi_fifo_status(msm_host);
1315 dsi_ack_err_status(msm_host);
1316 dsi_timeout_status(msm_host);
1317 dsi_status(msm_host);
1318 dsi_dln0_phy_err(msm_host);
1319
1320 queue_work(msm_host->workqueue, &msm_host->err_work);
1321}
1322
1323static irqreturn_t dsi_host_irq(int irq, void *ptr)
1324{
1325 struct msm_dsi_host *msm_host = ptr;
1326 u32 isr;
1327 unsigned long flags;
1328
1329 if (!msm_host->ctrl_base)
1330 return IRQ_HANDLED;
1331
1332 spin_lock_irqsave(&msm_host->intr_lock, flags);
1333 isr = dsi_read(msm_host, REG_DSI_INTR_CTRL);
1334 dsi_write(msm_host, REG_DSI_INTR_CTRL, isr);
1335 spin_unlock_irqrestore(&msm_host->intr_lock, flags);
1336
1337 DBG("isr=0x%x, id=%d", isr, msm_host->id);
1338
1339 if (isr & DSI_IRQ_ERROR)
1340 dsi_error(msm_host);
1341
1342 if (isr & DSI_IRQ_VIDEO_DONE)
1343 complete(&msm_host->video_comp);
1344
1345 if (isr & DSI_IRQ_CMD_DMA_DONE)
1346 complete(&msm_host->dma_comp);
1347
1348 return IRQ_HANDLED;
1349}
1350
1351static int dsi_host_init_panel_gpios(struct msm_dsi_host *msm_host,
1352 struct device *panel_device)
1353{
1354 int ret;
1355
1356 msm_host->disp_en_gpio = devm_gpiod_get(panel_device,
1357 "disp-enable");
1358 if (IS_ERR(msm_host->disp_en_gpio)) {
1359 DBG("cannot get disp-enable-gpios %ld",
1360 PTR_ERR(msm_host->disp_en_gpio));
1361 msm_host->disp_en_gpio = NULL;
1362 }
1363 if (msm_host->disp_en_gpio) {
1364 ret = gpiod_direction_output(msm_host->disp_en_gpio, 0);
1365 if (ret) {
1366 pr_err("cannot set dir to disp-en-gpios %d\n", ret);
1367 return ret;
1368 }
1369 }
1370
1371 msm_host->te_gpio = devm_gpiod_get(panel_device, "disp-te");
1372 if (IS_ERR(msm_host->te_gpio)) {
1373 DBG("cannot get disp-te-gpios %ld", PTR_ERR(msm_host->te_gpio));
1374 msm_host->te_gpio = NULL;
1375 }
1376
1377 if (msm_host->te_gpio) {
1378 ret = gpiod_direction_input(msm_host->te_gpio);
1379 if (ret) {
1380 pr_err("%s: cannot set dir to disp-te-gpios, %d\n",
1381 __func__, ret);
1382 return ret;
1383 }
1384 }
1385
1386 return 0;
1387}
1388
1389static int dsi_host_attach(struct mipi_dsi_host *host,
1390 struct mipi_dsi_device *dsi)
1391{
1392 struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1393 int ret;
1394
1395 msm_host->channel = dsi->channel;
1396 msm_host->lanes = dsi->lanes;
1397 msm_host->format = dsi->format;
1398 msm_host->mode_flags = dsi->mode_flags;
1399
1400 msm_host->panel_node = dsi->dev.of_node;
1401
1402 /* Some gpios defined in panel DT need to be controlled by host */
1403 ret = dsi_host_init_panel_gpios(msm_host, &dsi->dev);
1404 if (ret)
1405 return ret;
1406
1407 DBG("id=%d", msm_host->id);
1408 if (msm_host->dev)
1409 drm_helper_hpd_irq_event(msm_host->dev);
1410
1411 return 0;
1412}
1413
1414static int dsi_host_detach(struct mipi_dsi_host *host,
1415 struct mipi_dsi_device *dsi)
1416{
1417 struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1418
1419 msm_host->panel_node = NULL;
1420
1421 DBG("id=%d", msm_host->id);
1422 if (msm_host->dev)
1423 drm_helper_hpd_irq_event(msm_host->dev);
1424
1425 return 0;
1426}
1427
1428static ssize_t dsi_host_transfer(struct mipi_dsi_host *host,
1429 const struct mipi_dsi_msg *msg)
1430{
1431 struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1432 int ret;
1433
1434 if (!msg || !msm_host->power_on)
1435 return -EINVAL;
1436
1437 mutex_lock(&msm_host->cmd_mutex);
1438 ret = msm_dsi_manager_cmd_xfer(msm_host->id, msg);
1439 mutex_unlock(&msm_host->cmd_mutex);
1440
1441 return ret;
1442}
1443
1444static struct mipi_dsi_host_ops dsi_host_ops = {
1445 .attach = dsi_host_attach,
1446 .detach = dsi_host_detach,
1447 .transfer = dsi_host_transfer,
1448};
1449
1450int msm_dsi_host_init(struct msm_dsi *msm_dsi)
1451{
1452 struct msm_dsi_host *msm_host = NULL;
1453 struct platform_device *pdev = msm_dsi->pdev;
1454 int ret;
1455
1456 msm_host = devm_kzalloc(&pdev->dev, sizeof(*msm_host), GFP_KERNEL);
1457 if (!msm_host) {
1458 pr_err("%s: FAILED: cannot alloc dsi host\n",
1459 __func__);
1460 ret = -ENOMEM;
1461 goto fail;
1462 }
1463
1464 ret = of_property_read_u32(pdev->dev.of_node,
1465 "qcom,dsi-host-index", &msm_host->id);
1466 if (ret) {
1467 dev_err(&pdev->dev,
1468 "%s: host index not specified, ret=%d\n",
1469 __func__, ret);
1470 goto fail;
1471 }
1472 msm_host->pdev = pdev;
1473
1474 ret = dsi_clk_init(msm_host);
1475 if (ret) {
1476 pr_err("%s: unable to initialize dsi clks\n", __func__);
1477 goto fail;
1478 }
1479
1480 msm_host->ctrl_base = msm_ioremap(pdev, "dsi_ctrl", "DSI CTRL");
1481 if (IS_ERR(msm_host->ctrl_base)) {
1482 pr_err("%s: unable to map Dsi ctrl base\n", __func__);
1483 ret = PTR_ERR(msm_host->ctrl_base);
1484 goto fail;
1485 }
1486
1487 msm_host->cfg = dsi_get_config(msm_host);
1488 if (!msm_host->cfg) {
1489 ret = -EINVAL;
1490 pr_err("%s: get config failed\n", __func__);
1491 goto fail;
1492 }
1493
1494 ret = dsi_regulator_init(msm_host);
1495 if (ret) {
1496 pr_err("%s: regulator init failed\n", __func__);
1497 goto fail;
1498 }
1499
1500 msm_host->rx_buf = devm_kzalloc(&pdev->dev, SZ_4K, GFP_KERNEL);
1501 if (!msm_host->rx_buf) {
1502 pr_err("%s: alloc rx temp buf failed\n", __func__);
1503 goto fail;
1504 }
1505
1506 init_completion(&msm_host->dma_comp);
1507 init_completion(&msm_host->video_comp);
1508 mutex_init(&msm_host->dev_mutex);
1509 mutex_init(&msm_host->cmd_mutex);
1510 mutex_init(&msm_host->clk_mutex);
1511 spin_lock_init(&msm_host->intr_lock);
1512
1513 /* setup workqueue */
1514 msm_host->workqueue = alloc_ordered_workqueue("dsi_drm_work", 0);
1515 INIT_WORK(&msm_host->err_work, dsi_err_worker);
1516
1517 msm_dsi->phy = msm_dsi_phy_init(pdev, msm_host->cfg->phy_type,
1518 msm_host->id);
1519 if (!msm_dsi->phy) {
1520 ret = -EINVAL;
1521 pr_err("%s: phy init failed\n", __func__);
1522 goto fail;
1523 }
1524 msm_dsi->host = &msm_host->base;
1525 msm_dsi->id = msm_host->id;
1526
1527 DBG("Dsi Host %d initialized", msm_host->id);
1528 return 0;
1529
1530fail:
1531 return ret;
1532}
1533
1534void msm_dsi_host_destroy(struct mipi_dsi_host *host)
1535{
1536 struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1537
1538 DBG("");
1539 dsi_tx_buf_free(msm_host);
1540 if (msm_host->workqueue) {
1541 flush_workqueue(msm_host->workqueue);
1542 destroy_workqueue(msm_host->workqueue);
1543 msm_host->workqueue = NULL;
1544 }
1545
1546 mutex_destroy(&msm_host->clk_mutex);
1547 mutex_destroy(&msm_host->cmd_mutex);
1548 mutex_destroy(&msm_host->dev_mutex);
1549}
1550
1551int msm_dsi_host_modeset_init(struct mipi_dsi_host *host,
1552 struct drm_device *dev)
1553{
1554 struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1555 struct platform_device *pdev = msm_host->pdev;
1556 int ret;
1557
1558 msm_host->irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
1559 if (msm_host->irq < 0) {
1560 ret = msm_host->irq;
1561 dev_err(dev->dev, "failed to get irq: %d\n", ret);
1562 return ret;
1563 }
1564
1565 ret = devm_request_irq(&pdev->dev, msm_host->irq,
1566 dsi_host_irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
1567 "dsi_isr", msm_host);
1568 if (ret < 0) {
1569 dev_err(&pdev->dev, "failed to request IRQ%u: %d\n",
1570 msm_host->irq, ret);
1571 return ret;
1572 }
1573
1574 msm_host->dev = dev;
1575 ret = dsi_tx_buf_alloc(msm_host, SZ_4K);
1576 if (ret) {
1577 pr_err("%s: alloc tx gem obj failed, %d\n", __func__, ret);
1578 return ret;
1579 }
1580
1581 return 0;
1582}
1583
1584int msm_dsi_host_register(struct mipi_dsi_host *host, bool check_defer)
1585{
1586 struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1587 struct device_node *node;
1588 int ret;
1589
1590 /* Register mipi dsi host */
1591 if (!msm_host->registered) {
1592 host->dev = &msm_host->pdev->dev;
1593 host->ops = &dsi_host_ops;
1594 ret = mipi_dsi_host_register(host);
1595 if (ret)
1596 return ret;
1597
1598 msm_host->registered = true;
1599
1600 /* If the panel driver has not been probed after host register,
1601 * we should defer the host's probe.
1602 * It makes sure panel is connected when fbcon detects
1603 * connector status and gets the proper display mode to
1604 * create framebuffer.
1605 */
1606 if (check_defer) {
1607 node = of_get_child_by_name(msm_host->pdev->dev.of_node,
1608 "panel");
1609 if (node) {
1610 if (!of_drm_find_panel(node))
1611 return -EPROBE_DEFER;
1612 }
1613 }
1614 }
1615
1616 return 0;
1617}
1618
1619void msm_dsi_host_unregister(struct mipi_dsi_host *host)
1620{
1621 struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1622
1623 if (msm_host->registered) {
1624 mipi_dsi_host_unregister(host);
1625 host->dev = NULL;
1626 host->ops = NULL;
1627 msm_host->registered = false;
1628 }
1629}
1630
1631int msm_dsi_host_xfer_prepare(struct mipi_dsi_host *host,
1632 const struct mipi_dsi_msg *msg)
1633{
1634 struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1635
1636 /* TODO: make sure dsi_cmd_mdp is idle.
1637 * Since DSI6G v1.2.0, we can set DSI_TRIG_CTRL.BLOCK_DMA_WITHIN_FRAME
1638 * to ask H/W to wait until cmd mdp is idle. S/W wait is not needed.
1639 * How to handle the old versions? Wait for mdp cmd done?
1640 */
1641
1642 /*
1643 * mdss interrupt is generated in mdp core clock domain
1644 * mdp clock need to be enabled to receive dsi interrupt
1645 */
1646 dsi_clk_ctrl(msm_host, 1);
1647
1648 /* TODO: vote for bus bandwidth */
1649
1650 if (!(msg->flags & MIPI_DSI_MSG_USE_LPM))
1651 dsi_set_tx_power_mode(0, msm_host);
1652
1653 msm_host->dma_cmd_ctrl_restore = dsi_read(msm_host, REG_DSI_CTRL);
1654 dsi_write(msm_host, REG_DSI_CTRL,
1655 msm_host->dma_cmd_ctrl_restore |
1656 DSI_CTRL_CMD_MODE_EN |
1657 DSI_CTRL_ENABLE);
1658 dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_CMD_DMA_DONE, 1);
1659
1660 return 0;
1661}
1662
1663void msm_dsi_host_xfer_restore(struct mipi_dsi_host *host,
1664 const struct mipi_dsi_msg *msg)
1665{
1666 struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1667
1668 dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_CMD_DMA_DONE, 0);
1669 dsi_write(msm_host, REG_DSI_CTRL, msm_host->dma_cmd_ctrl_restore);
1670
1671 if (!(msg->flags & MIPI_DSI_MSG_USE_LPM))
1672 dsi_set_tx_power_mode(1, msm_host);
1673
1674 /* TODO: unvote for bus bandwidth */
1675
1676 dsi_clk_ctrl(msm_host, 0);
1677}
1678
1679int msm_dsi_host_cmd_tx(struct mipi_dsi_host *host,
1680 const struct mipi_dsi_msg *msg)
1681{
1682 struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1683
1684 return dsi_cmds2buf_tx(msm_host, msg);
1685}
1686
1687int msm_dsi_host_cmd_rx(struct mipi_dsi_host *host,
1688 const struct mipi_dsi_msg *msg)
1689{
1690 struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1691 int data_byte, rx_byte, dlen, end;
1692 int short_response, diff, pkt_size, ret = 0;
1693 char cmd;
1694 int rlen = msg->rx_len;
1695 u8 *buf;
1696
1697 if (rlen <= 2) {
1698 short_response = 1;
1699 pkt_size = rlen;
1700 rx_byte = 4;
1701 } else {
1702 short_response = 0;
1703 data_byte = 10; /* first read */
1704 if (rlen < data_byte)
1705 pkt_size = rlen;
1706 else
1707 pkt_size = data_byte;
1708 rx_byte = data_byte + 6; /* 4 header + 2 crc */
1709 }
1710
1711 buf = msm_host->rx_buf;
1712 end = 0;
1713 while (!end) {
1714 u8 tx[2] = {pkt_size & 0xff, pkt_size >> 8};
1715 struct mipi_dsi_msg max_pkt_size_msg = {
1716 .channel = msg->channel,
1717 .type = MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE,
1718 .tx_len = 2,
1719 .tx_buf = tx,
1720 };
1721
1722 DBG("rlen=%d pkt_size=%d rx_byte=%d",
1723 rlen, pkt_size, rx_byte);
1724
1725 ret = dsi_cmds2buf_tx(msm_host, &max_pkt_size_msg);
1726 if (ret < 2) {
1727 pr_err("%s: Set max pkt size failed, %d\n",
1728 __func__, ret);
1729 return -EINVAL;
1730 }
1731
1732 if ((msm_host->cfg->major == MSM_DSI_VER_MAJOR_6G) &&
1733 (msm_host->cfg->minor >= MSM_DSI_6G_VER_MINOR_V1_1)) {
1734 /* Clear the RDBK_DATA registers */
1735 dsi_write(msm_host, REG_DSI_RDBK_DATA_CTRL,
1736 DSI_RDBK_DATA_CTRL_CLR);
1737 wmb(); /* make sure the RDBK registers are cleared */
1738 dsi_write(msm_host, REG_DSI_RDBK_DATA_CTRL, 0);
1739 wmb(); /* release cleared status before transfer */
1740 }
1741
1742 ret = dsi_cmds2buf_tx(msm_host, msg);
1743 if (ret < msg->tx_len) {
1744 pr_err("%s: Read cmd Tx failed, %d\n", __func__, ret);
1745 return ret;
1746 }
1747
1748 /*
1749 * once cmd_dma_done interrupt received,
1750 * return data from client is ready and stored
1751 * at RDBK_DATA register already
1752 * since rx fifo is 16 bytes, dcs header is kept at first loop,
1753 * after that dcs header lost during shift into registers
1754 */
1755 dlen = dsi_cmd_dma_rx(msm_host, buf, rx_byte, pkt_size);
1756
1757 if (dlen <= 0)
1758 return 0;
1759
1760 if (short_response)
1761 break;
1762
1763 if (rlen <= data_byte) {
1764 diff = data_byte - rlen;
1765 end = 1;
1766 } else {
1767 diff = 0;
1768 rlen -= data_byte;
1769 }
1770
1771 if (!end) {
1772 dlen -= 2; /* 2 crc */
1773 dlen -= diff;
1774 buf += dlen; /* next start position */
1775 data_byte = 14; /* NOT first read */
1776 if (rlen < data_byte)
1777 pkt_size += rlen;
1778 else
1779 pkt_size += data_byte;
1780 DBG("buf=%p dlen=%d diff=%d", buf, dlen, diff);
1781 }
1782 }
1783
1784 /*
1785 * For single Long read, if the requested rlen < 10,
1786 * we need to shift the start position of rx
1787 * data buffer to skip the bytes which are not
1788 * updated.
1789 */
1790 if (pkt_size < 10 && !short_response)
1791 buf = msm_host->rx_buf + (10 - rlen);
1792 else
1793 buf = msm_host->rx_buf;
1794
1795 cmd = buf[0];
1796 switch (cmd) {
1797 case MIPI_DSI_RX_ACKNOWLEDGE_AND_ERROR_REPORT:
1798 pr_err("%s: rx ACK_ERR_PACLAGE\n", __func__);
1799 ret = 0;
1800 case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_1BYTE:
1801 case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_1BYTE:
1802 ret = dsi_short_read1_resp(buf, msg);
1803 break;
1804 case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_2BYTE:
1805 case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_2BYTE:
1806 ret = dsi_short_read2_resp(buf, msg);
1807 break;
1808 case MIPI_DSI_RX_GENERIC_LONG_READ_RESPONSE:
1809 case MIPI_DSI_RX_DCS_LONG_READ_RESPONSE:
1810 ret = dsi_long_read_resp(buf, msg);
1811 break;
1812 default:
1813 pr_warn("%s:Invalid response cmd\n", __func__);
1814 ret = 0;
1815 }
1816
1817 return ret;
1818}
1819
1820void msm_dsi_host_cmd_xfer_commit(struct mipi_dsi_host *host, u32 iova, u32 len)
1821{
1822 struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1823
1824 dsi_write(msm_host, REG_DSI_DMA_BASE, iova);
1825 dsi_write(msm_host, REG_DSI_DMA_LEN, len);
1826 dsi_write(msm_host, REG_DSI_TRIG_DMA, 1);
1827
1828 /* Make sure trigger happens */
1829 wmb();
1830}
1831
1832int msm_dsi_host_enable(struct mipi_dsi_host *host)
1833{
1834 struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1835
1836 dsi_op_mode_config(msm_host,
1837 !!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO), true);
1838
1839 /* TODO: clock should be turned off for command mode,
1840 * and only turned on before MDP START.
1841 * This part of code should be enabled once mdp driver support it.
1842 */
1843 /* if (msm_panel->mode == MSM_DSI_CMD_MODE)
1844 dsi_clk_ctrl(msm_host, 0); */
1845
1846 return 0;
1847}
1848
1849int msm_dsi_host_disable(struct mipi_dsi_host *host)
1850{
1851 struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1852
1853 dsi_op_mode_config(msm_host,
1854 !!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO), false);
1855
1856 /* Since we have disabled INTF, the video engine won't stop so that
1857 * the cmd engine will be blocked.
1858 * Reset to disable video engine so that we can send off cmd.
1859 */
1860 dsi_sw_reset(msm_host);
1861
1862 return 0;
1863}
1864
1865int msm_dsi_host_power_on(struct mipi_dsi_host *host)
1866{
1867 struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1868 u32 clk_pre = 0, clk_post = 0;
1869 int ret = 0;
1870
1871 mutex_lock(&msm_host->dev_mutex);
1872 if (msm_host->power_on) {
1873 DBG("dsi host already on");
1874 goto unlock_ret;
1875 }
1876
1877 ret = dsi_calc_clk_rate(msm_host);
1878 if (ret) {
1879 pr_err("%s: unable to calc clk rate, %d\n", __func__, ret);
1880 goto unlock_ret;
1881 }
1882
1883 ret = dsi_host_regulator_enable(msm_host);
1884 if (ret) {
1885 pr_err("%s:Failed to enable vregs.ret=%d\n",
1886 __func__, ret);
1887 goto unlock_ret;
1888 }
1889
1890 ret = dsi_bus_clk_enable(msm_host);
1891 if (ret) {
1892 pr_err("%s: failed to enable bus clocks, %d\n", __func__, ret);
1893 goto fail_disable_reg;
1894 }
1895
1896 dsi_phy_sw_reset(msm_host);
1897 ret = msm_dsi_manager_phy_enable(msm_host->id,
1898 msm_host->byte_clk_rate * 8,
1899 clk_get_rate(msm_host->esc_clk),
1900 &clk_pre, &clk_post);
1901 dsi_bus_clk_disable(msm_host);
1902 if (ret) {
1903 pr_err("%s: failed to enable phy, %d\n", __func__, ret);
1904 goto fail_disable_reg;
1905 }
1906
1907 ret = dsi_clk_ctrl(msm_host, 1);
1908 if (ret) {
1909 pr_err("%s: failed to enable clocks. ret=%d\n", __func__, ret);
1910 goto fail_disable_reg;
1911 }
1912
1913 dsi_timing_setup(msm_host);
1914 dsi_sw_reset(msm_host);
1915 dsi_ctrl_config(msm_host, true, clk_pre, clk_post);
1916
1917 if (msm_host->disp_en_gpio)
1918 gpiod_set_value(msm_host->disp_en_gpio, 1);
1919
1920 msm_host->power_on = true;
1921 mutex_unlock(&msm_host->dev_mutex);
1922
1923 return 0;
1924
1925fail_disable_reg:
1926 dsi_host_regulator_disable(msm_host);
1927unlock_ret:
1928 mutex_unlock(&msm_host->dev_mutex);
1929 return ret;
1930}
1931
1932int msm_dsi_host_power_off(struct mipi_dsi_host *host)
1933{
1934 struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1935
1936 mutex_lock(&msm_host->dev_mutex);
1937 if (!msm_host->power_on) {
1938 DBG("dsi host already off");
1939 goto unlock_ret;
1940 }
1941
1942 dsi_ctrl_config(msm_host, false, 0, 0);
1943
1944 if (msm_host->disp_en_gpio)
1945 gpiod_set_value(msm_host->disp_en_gpio, 0);
1946
1947 msm_dsi_manager_phy_disable(msm_host->id);
1948
1949 dsi_clk_ctrl(msm_host, 0);
1950
1951 dsi_host_regulator_disable(msm_host);
1952
1953 DBG("-");
1954
1955 msm_host->power_on = false;
1956
1957unlock_ret:
1958 mutex_unlock(&msm_host->dev_mutex);
1959 return 0;
1960}
1961
1962int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host,
1963 struct drm_display_mode *mode)
1964{
1965 struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1966
1967 if (msm_host->mode) {
1968 drm_mode_destroy(msm_host->dev, msm_host->mode);
1969 msm_host->mode = NULL;
1970 }
1971
1972 msm_host->mode = drm_mode_duplicate(msm_host->dev, mode);
1973 if (IS_ERR(msm_host->mode)) {
1974 pr_err("%s: cannot duplicate mode\n", __func__);
1975 return PTR_ERR(msm_host->mode);
1976 }
1977
1978 return 0;
1979}
1980
1981struct drm_panel *msm_dsi_host_get_panel(struct mipi_dsi_host *host,
1982 unsigned long *panel_flags)
1983{
1984 struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1985 struct drm_panel *panel;
1986
1987 panel = of_drm_find_panel(msm_host->panel_node);
1988 if (panel_flags)
1989 *panel_flags = msm_host->mode_flags;
1990
1991 return panel;
1992}
1993
diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c
new file mode 100644
index 000000000000..ee3ebcaa33f5
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c
@@ -0,0 +1,705 @@
1/*
2 * Copyright (c) 2015, The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include "msm_kms.h"
15#include "dsi.h"
16
17struct msm_dsi_manager {
18 struct msm_dsi *dsi[DSI_MAX];
19
20 bool is_dual_panel;
21 bool is_sync_needed;
22 int master_panel_id;
23};
24
25static struct msm_dsi_manager msm_dsim_glb;
26
27#define IS_DUAL_PANEL() (msm_dsim_glb.is_dual_panel)
28#define IS_SYNC_NEEDED() (msm_dsim_glb.is_sync_needed)
29#define IS_MASTER_PANEL(id) (msm_dsim_glb.master_panel_id == id)
30
31static inline struct msm_dsi *dsi_mgr_get_dsi(int id)
32{
33 return msm_dsim_glb.dsi[id];
34}
35
36static inline struct msm_dsi *dsi_mgr_get_other_dsi(int id)
37{
38 return msm_dsim_glb.dsi[(id + 1) % DSI_MAX];
39}
40
41static int dsi_mgr_parse_dual_panel(struct device_node *np, int id)
42{
43 struct msm_dsi_manager *msm_dsim = &msm_dsim_glb;
44
45 /* We assume 2 dsi nodes have the same information of dual-panel and
46 * sync-mode, and only one node specifies master in case of dual mode.
47 */
48 if (!msm_dsim->is_dual_panel)
49 msm_dsim->is_dual_panel = of_property_read_bool(
50 np, "qcom,dual-panel-mode");
51
52 if (msm_dsim->is_dual_panel) {
53 if (of_property_read_bool(np, "qcom,master-panel"))
54 msm_dsim->master_panel_id = id;
55 if (!msm_dsim->is_sync_needed)
56 msm_dsim->is_sync_needed = of_property_read_bool(
57 np, "qcom,sync-dual-panel");
58 }
59
60 return 0;
61}
62
63struct dsi_connector {
64 struct drm_connector base;
65 int id;
66};
67
68struct dsi_bridge {
69 struct drm_bridge base;
70 int id;
71};
72
73#define to_dsi_connector(x) container_of(x, struct dsi_connector, base)
74#define to_dsi_bridge(x) container_of(x, struct dsi_bridge, base)
75
76static inline int dsi_mgr_connector_get_id(struct drm_connector *connector)
77{
78 struct dsi_connector *dsi_connector = to_dsi_connector(connector);
79 return dsi_connector->id;
80}
81
82static int dsi_mgr_bridge_get_id(struct drm_bridge *bridge)
83{
84 struct dsi_bridge *dsi_bridge = to_dsi_bridge(bridge);
85 return dsi_bridge->id;
86}
87
88static enum drm_connector_status dsi_mgr_connector_detect(
89 struct drm_connector *connector, bool force)
90{
91 int id = dsi_mgr_connector_get_id(connector);
92 struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
93 struct msm_dsi *other_dsi = dsi_mgr_get_other_dsi(id);
94 struct msm_drm_private *priv = connector->dev->dev_private;
95 struct msm_kms *kms = priv->kms;
96
97 DBG("id=%d", id);
98 if (!msm_dsi->panel) {
99 msm_dsi->panel = msm_dsi_host_get_panel(msm_dsi->host,
100 &msm_dsi->panel_flags);
101
102 /* There is only 1 panel in the global panel list
103 * for dual panel mode. Therefore slave dsi should get
104 * the drm_panel instance from master dsi, and
105 * keep using the panel flags got from the current DSI link.
106 */
107 if (!msm_dsi->panel && IS_DUAL_PANEL() &&
108 !IS_MASTER_PANEL(id) && other_dsi)
109 msm_dsi->panel = msm_dsi_host_get_panel(
110 other_dsi->host, NULL);
111
112 if (msm_dsi->panel && IS_DUAL_PANEL())
113 drm_object_attach_property(&connector->base,
114 connector->dev->mode_config.tile_property, 0);
115
116 /* Set split display info to kms once dual panel is connected
117 * to both hosts
118 */
119 if (msm_dsi->panel && IS_DUAL_PANEL() &&
120 other_dsi && other_dsi->panel) {
121 bool cmd_mode = !(msm_dsi->panel_flags &
122 MIPI_DSI_MODE_VIDEO);
123 struct drm_encoder *encoder = msm_dsi_get_encoder(
124 dsi_mgr_get_dsi(DSI_ENCODER_MASTER));
125 struct drm_encoder *slave_enc = msm_dsi_get_encoder(
126 dsi_mgr_get_dsi(DSI_ENCODER_SLAVE));
127
128 if (kms->funcs->set_split_display)
129 kms->funcs->set_split_display(kms, encoder,
130 slave_enc, cmd_mode);
131 else
132 pr_err("mdp does not support dual panel\n");
133 }
134 }
135
136 return msm_dsi->panel ? connector_status_connected :
137 connector_status_disconnected;
138}
139
140static void dsi_mgr_connector_destroy(struct drm_connector *connector)
141{
142 DBG("");
143 drm_connector_unregister(connector);
144 drm_connector_cleanup(connector);
145}
146
147static void dsi_dual_connector_fix_modes(struct drm_connector *connector)
148{
149 struct drm_display_mode *mode, *m;
150
151 /* Only support left-right mode */
152 list_for_each_entry_safe(mode, m, &connector->probed_modes, head) {
153 mode->clock >>= 1;
154 mode->hdisplay >>= 1;
155 mode->hsync_start >>= 1;
156 mode->hsync_end >>= 1;
157 mode->htotal >>= 1;
158 drm_mode_set_name(mode);
159 }
160}
161
162static int dsi_dual_connector_tile_init(
163 struct drm_connector *connector, int id)
164{
165 struct drm_display_mode *mode;
166 /* Fake topology id */
167 char topo_id[8] = {'M', 'S', 'M', 'D', 'U', 'D', 'S', 'I'};
168
169 if (connector->tile_group) {
170 DBG("Tile property has been initialized");
171 return 0;
172 }
173
174 /* Use the first mode only for now */
175 mode = list_first_entry(&connector->probed_modes,
176 struct drm_display_mode,
177 head);
178 if (!mode)
179 return -EINVAL;
180
181 connector->tile_group = drm_mode_get_tile_group(
182 connector->dev, topo_id);
183 if (!connector->tile_group)
184 connector->tile_group = drm_mode_create_tile_group(
185 connector->dev, topo_id);
186 if (!connector->tile_group) {
187 pr_err("%s: failed to create tile group\n", __func__);
188 return -ENOMEM;
189 }
190
191 connector->has_tile = true;
192 connector->tile_is_single_monitor = true;
193
194 /* mode has been fixed */
195 connector->tile_h_size = mode->hdisplay;
196 connector->tile_v_size = mode->vdisplay;
197
198 /* Only support left-right mode */
199 connector->num_h_tile = 2;
200 connector->num_v_tile = 1;
201
202 connector->tile_v_loc = 0;
203 connector->tile_h_loc = (id == DSI_RIGHT) ? 1 : 0;
204
205 return 0;
206}
207
208static int dsi_mgr_connector_get_modes(struct drm_connector *connector)
209{
210 int id = dsi_mgr_connector_get_id(connector);
211 struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
212 struct drm_panel *panel = msm_dsi->panel;
213 int ret, num;
214
215 if (!panel)
216 return 0;
217
218 /* Since we have 2 connectors, but only 1 drm_panel in dual DSI mode,
219 * panel should not attach to any connector.
220 * Only temporarily attach panel to the current connector here,
221 * to let panel set mode to this connector.
222 */
223 drm_panel_attach(panel, connector);
224 num = drm_panel_get_modes(panel);
225 drm_panel_detach(panel);
226 if (!num)
227 return 0;
228
229 if (IS_DUAL_PANEL()) {
230 /* report half resolution to user */
231 dsi_dual_connector_fix_modes(connector);
232 ret = dsi_dual_connector_tile_init(connector, id);
233 if (ret)
234 return ret;
235 ret = drm_mode_connector_set_tile_property(connector);
236 if (ret) {
237 pr_err("%s: set tile property failed, %d\n",
238 __func__, ret);
239 return ret;
240 }
241 }
242
243 return num;
244}
245
246static int dsi_mgr_connector_mode_valid(struct drm_connector *connector,
247 struct drm_display_mode *mode)
248{
249 int id = dsi_mgr_connector_get_id(connector);
250 struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
251 struct drm_encoder *encoder = msm_dsi_get_encoder(msm_dsi);
252 struct msm_drm_private *priv = connector->dev->dev_private;
253 struct msm_kms *kms = priv->kms;
254 long actual, requested;
255
256 DBG("");
257 requested = 1000 * mode->clock;
258 actual = kms->funcs->round_pixclk(kms, requested, encoder);
259
260 DBG("requested=%ld, actual=%ld", requested, actual);
261 if (actual != requested)
262 return MODE_CLOCK_RANGE;
263
264 return MODE_OK;
265}
266
267static struct drm_encoder *
268dsi_mgr_connector_best_encoder(struct drm_connector *connector)
269{
270 int id = dsi_mgr_connector_get_id(connector);
271 struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
272
273 DBG("");
274 return msm_dsi_get_encoder(msm_dsi);
275}
276
277static void dsi_mgr_bridge_pre_enable(struct drm_bridge *bridge)
278{
279 int id = dsi_mgr_bridge_get_id(bridge);
280 struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
281 struct msm_dsi *msm_dsi1 = dsi_mgr_get_dsi(DSI_1);
282 struct mipi_dsi_host *host = msm_dsi->host;
283 struct drm_panel *panel = msm_dsi->panel;
284 bool is_dual_panel = IS_DUAL_PANEL();
285 int ret;
286
287 DBG("id=%d", id);
288 if (!panel || (is_dual_panel && (DSI_1 == id)))
289 return;
290
291 ret = msm_dsi_host_power_on(host);
292 if (ret) {
293 pr_err("%s: power on host %d failed, %d\n", __func__, id, ret);
294 goto host_on_fail;
295 }
296
297 if (is_dual_panel && msm_dsi1) {
298 ret = msm_dsi_host_power_on(msm_dsi1->host);
299 if (ret) {
300 pr_err("%s: power on host1 failed, %d\n",
301 __func__, ret);
302 goto host1_on_fail;
303 }
304 }
305
306 /* Always call panel functions once, because even for dual panels,
307 * there is only one drm_panel instance.
308 */
309 ret = drm_panel_prepare(panel);
310 if (ret) {
311 pr_err("%s: prepare panel %d failed, %d\n", __func__, id, ret);
312 goto panel_prep_fail;
313 }
314
315 ret = msm_dsi_host_enable(host);
316 if (ret) {
317 pr_err("%s: enable host %d failed, %d\n", __func__, id, ret);
318 goto host_en_fail;
319 }
320
321 if (is_dual_panel && msm_dsi1) {
322 ret = msm_dsi_host_enable(msm_dsi1->host);
323 if (ret) {
324 pr_err("%s: enable host1 failed, %d\n", __func__, ret);
325 goto host1_en_fail;
326 }
327 }
328
329 ret = drm_panel_enable(panel);
330 if (ret) {
331 pr_err("%s: enable panel %d failed, %d\n", __func__, id, ret);
332 goto panel_en_fail;
333 }
334
335 return;
336
337panel_en_fail:
338 if (is_dual_panel && msm_dsi1)
339 msm_dsi_host_disable(msm_dsi1->host);
340host1_en_fail:
341 msm_dsi_host_disable(host);
342host_en_fail:
343 drm_panel_unprepare(panel);
344panel_prep_fail:
345 if (is_dual_panel && msm_dsi1)
346 msm_dsi_host_power_off(msm_dsi1->host);
347host1_on_fail:
348 msm_dsi_host_power_off(host);
349host_on_fail:
350 return;
351}
352
353static void dsi_mgr_bridge_enable(struct drm_bridge *bridge)
354{
355 DBG("");
356}
357
358static void dsi_mgr_bridge_disable(struct drm_bridge *bridge)
359{
360 DBG("");
361}
362
363static void dsi_mgr_bridge_post_disable(struct drm_bridge *bridge)
364{
365 int id = dsi_mgr_bridge_get_id(bridge);
366 struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
367 struct msm_dsi *msm_dsi1 = dsi_mgr_get_dsi(DSI_1);
368 struct mipi_dsi_host *host = msm_dsi->host;
369 struct drm_panel *panel = msm_dsi->panel;
370 bool is_dual_panel = IS_DUAL_PANEL();
371 int ret;
372
373 DBG("id=%d", id);
374
375 if (!panel || (is_dual_panel && (DSI_1 == id)))
376 return;
377
378 ret = drm_panel_disable(panel);
379 if (ret)
380 pr_err("%s: Panel %d OFF failed, %d\n", __func__, id, ret);
381
382 ret = msm_dsi_host_disable(host);
383 if (ret)
384 pr_err("%s: host %d disable failed, %d\n", __func__, id, ret);
385
386 if (is_dual_panel && msm_dsi1) {
387 ret = msm_dsi_host_disable(msm_dsi1->host);
388 if (ret)
389 pr_err("%s: host1 disable failed, %d\n", __func__, ret);
390 }
391
392 ret = drm_panel_unprepare(panel);
393 if (ret)
394 pr_err("%s: Panel %d unprepare failed,%d\n", __func__, id, ret);
395
396 ret = msm_dsi_host_power_off(host);
397 if (ret)
398 pr_err("%s: host %d power off failed,%d\n", __func__, id, ret);
399
400 if (is_dual_panel && msm_dsi1) {
401 ret = msm_dsi_host_power_off(msm_dsi1->host);
402 if (ret)
403 pr_err("%s: host1 power off failed, %d\n",
404 __func__, ret);
405 }
406}
407
408static void dsi_mgr_bridge_mode_set(struct drm_bridge *bridge,
409 struct drm_display_mode *mode,
410 struct drm_display_mode *adjusted_mode)
411{
412 int id = dsi_mgr_bridge_get_id(bridge);
413 struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
414 struct msm_dsi *other_dsi = dsi_mgr_get_other_dsi(id);
415 struct mipi_dsi_host *host = msm_dsi->host;
416 bool is_dual_panel = IS_DUAL_PANEL();
417
418 DBG("set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
419 mode->base.id, mode->name,
420 mode->vrefresh, mode->clock,
421 mode->hdisplay, mode->hsync_start,
422 mode->hsync_end, mode->htotal,
423 mode->vdisplay, mode->vsync_start,
424 mode->vsync_end, mode->vtotal,
425 mode->type, mode->flags);
426
427 if (is_dual_panel && (DSI_1 == id))
428 return;
429
430 msm_dsi_host_set_display_mode(host, adjusted_mode);
431 if (is_dual_panel && other_dsi)
432 msm_dsi_host_set_display_mode(other_dsi->host, adjusted_mode);
433}
434
435static const struct drm_connector_funcs dsi_mgr_connector_funcs = {
436 .dpms = drm_atomic_helper_connector_dpms,
437 .detect = dsi_mgr_connector_detect,
438 .fill_modes = drm_helper_probe_single_connector_modes,
439 .destroy = dsi_mgr_connector_destroy,
440 .reset = drm_atomic_helper_connector_reset,
441 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
442 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
443};
444
445static const struct drm_connector_helper_funcs dsi_mgr_conn_helper_funcs = {
446 .get_modes = dsi_mgr_connector_get_modes,
447 .mode_valid = dsi_mgr_connector_mode_valid,
448 .best_encoder = dsi_mgr_connector_best_encoder,
449};
450
451static const struct drm_bridge_funcs dsi_mgr_bridge_funcs = {
452 .pre_enable = dsi_mgr_bridge_pre_enable,
453 .enable = dsi_mgr_bridge_enable,
454 .disable = dsi_mgr_bridge_disable,
455 .post_disable = dsi_mgr_bridge_post_disable,
456 .mode_set = dsi_mgr_bridge_mode_set,
457};
458
459/* initialize connector */
460struct drm_connector *msm_dsi_manager_connector_init(u8 id)
461{
462 struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
463 struct drm_connector *connector = NULL;
464 struct dsi_connector *dsi_connector;
465 int ret;
466
467 dsi_connector = devm_kzalloc(msm_dsi->dev->dev,
468 sizeof(*dsi_connector), GFP_KERNEL);
469 if (!dsi_connector) {
470 ret = -ENOMEM;
471 goto fail;
472 }
473
474 dsi_connector->id = id;
475
476 connector = &dsi_connector->base;
477
478 ret = drm_connector_init(msm_dsi->dev, connector,
479 &dsi_mgr_connector_funcs, DRM_MODE_CONNECTOR_DSI);
480 if (ret)
481 goto fail;
482
483 drm_connector_helper_add(connector, &dsi_mgr_conn_helper_funcs);
484
485 /* Enable HPD to let hpd event is handled
486 * when panel is attached to the host.
487 */
488 connector->polled = DRM_CONNECTOR_POLL_HPD;
489
490 /* Display driver doesn't support interlace now. */
491 connector->interlace_allowed = 0;
492 connector->doublescan_allowed = 0;
493
494 ret = drm_connector_register(connector);
495 if (ret)
496 goto fail;
497
498 return connector;
499
500fail:
501 if (connector)
502 dsi_mgr_connector_destroy(connector);
503
504 return ERR_PTR(ret);
505}
506
507/* initialize bridge */
508struct drm_bridge *msm_dsi_manager_bridge_init(u8 id)
509{
510 struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
511 struct drm_bridge *bridge = NULL;
512 struct dsi_bridge *dsi_bridge;
513 int ret;
514
515 dsi_bridge = devm_kzalloc(msm_dsi->dev->dev,
516 sizeof(*dsi_bridge), GFP_KERNEL);
517 if (!dsi_bridge) {
518 ret = -ENOMEM;
519 goto fail;
520 }
521
522 dsi_bridge->id = id;
523
524 bridge = &dsi_bridge->base;
525 bridge->funcs = &dsi_mgr_bridge_funcs;
526
527 ret = drm_bridge_attach(msm_dsi->dev, bridge);
528 if (ret)
529 goto fail;
530
531 return bridge;
532
533fail:
534 if (bridge)
535 msm_dsi_manager_bridge_destroy(bridge);
536
537 return ERR_PTR(ret);
538}
539
540void msm_dsi_manager_bridge_destroy(struct drm_bridge *bridge)
541{
542}
543
544int msm_dsi_manager_phy_enable(int id,
545 const unsigned long bit_rate, const unsigned long esc_rate,
546 u32 *clk_pre, u32 *clk_post)
547{
548 struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
549 struct msm_dsi_phy *phy = msm_dsi->phy;
550 int ret;
551
552 ret = msm_dsi_phy_enable(phy, IS_DUAL_PANEL(), bit_rate, esc_rate);
553 if (ret)
554 return ret;
555
556 msm_dsi->phy_enabled = true;
557 msm_dsi_phy_get_clk_pre_post(phy, clk_pre, clk_post);
558
559 return 0;
560}
561
562void msm_dsi_manager_phy_disable(int id)
563{
564 struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
565 struct msm_dsi *mdsi = dsi_mgr_get_dsi(DSI_CLOCK_MASTER);
566 struct msm_dsi *sdsi = dsi_mgr_get_dsi(DSI_CLOCK_SLAVE);
567 struct msm_dsi_phy *phy = msm_dsi->phy;
568
569 /* disable DSI phy
570 * In dual-dsi configuration, the phy should be disabled for the
571 * first controller only when the second controller is disabled.
572 */
573 msm_dsi->phy_enabled = false;
574 if (IS_DUAL_PANEL() && mdsi && sdsi) {
575 if (!mdsi->phy_enabled && !sdsi->phy_enabled) {
576 msm_dsi_phy_disable(sdsi->phy);
577 msm_dsi_phy_disable(mdsi->phy);
578 }
579 } else {
580 msm_dsi_phy_disable(phy);
581 }
582}
583
584int msm_dsi_manager_cmd_xfer(int id, const struct mipi_dsi_msg *msg)
585{
586 struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
587 struct msm_dsi *msm_dsi0 = dsi_mgr_get_dsi(DSI_0);
588 struct mipi_dsi_host *host = msm_dsi->host;
589 bool is_read = (msg->rx_buf && msg->rx_len);
590 bool need_sync = (IS_SYNC_NEEDED() && !is_read);
591 int ret;
592
593 if (!msg->tx_buf || !msg->tx_len)
594 return 0;
595
596 /* In dual master case, panel requires the same commands sent to
597 * both DSI links. Host issues the command trigger to both links
598 * when DSI_1 calls the cmd transfer function, no matter it happens
599 * before or after DSI_0 cmd transfer.
600 */
601 if (need_sync && (id == DSI_0))
602 return is_read ? msg->rx_len : msg->tx_len;
603
604 if (need_sync && msm_dsi0) {
605 ret = msm_dsi_host_xfer_prepare(msm_dsi0->host, msg);
606 if (ret) {
607 pr_err("%s: failed to prepare non-trigger host, %d\n",
608 __func__, ret);
609 return ret;
610 }
611 }
612 ret = msm_dsi_host_xfer_prepare(host, msg);
613 if (ret) {
614 pr_err("%s: failed to prepare host, %d\n", __func__, ret);
615 goto restore_host0;
616 }
617
618 ret = is_read ? msm_dsi_host_cmd_rx(host, msg) :
619 msm_dsi_host_cmd_tx(host, msg);
620
621 msm_dsi_host_xfer_restore(host, msg);
622
623restore_host0:
624 if (need_sync && msm_dsi0)
625 msm_dsi_host_xfer_restore(msm_dsi0->host, msg);
626
627 return ret;
628}
629
630bool msm_dsi_manager_cmd_xfer_trigger(int id, u32 iova, u32 len)
631{
632 struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
633 struct msm_dsi *msm_dsi0 = dsi_mgr_get_dsi(DSI_0);
634 struct mipi_dsi_host *host = msm_dsi->host;
635
636 if (IS_SYNC_NEEDED() && (id == DSI_0))
637 return false;
638
639 if (IS_SYNC_NEEDED() && msm_dsi0)
640 msm_dsi_host_cmd_xfer_commit(msm_dsi0->host, iova, len);
641
642 msm_dsi_host_cmd_xfer_commit(host, iova, len);
643
644 return true;
645}
646
647int msm_dsi_manager_register(struct msm_dsi *msm_dsi)
648{
649 struct msm_dsi_manager *msm_dsim = &msm_dsim_glb;
650 int id = msm_dsi->id;
651 struct msm_dsi *other_dsi = dsi_mgr_get_other_dsi(id);
652 int ret;
653
654 if (id > DSI_MAX) {
655 pr_err("%s: invalid id %d\n", __func__, id);
656 return -EINVAL;
657 }
658
659 if (msm_dsim->dsi[id]) {
660 pr_err("%s: dsi%d already registered\n", __func__, id);
661 return -EBUSY;
662 }
663
664 msm_dsim->dsi[id] = msm_dsi;
665
666 ret = dsi_mgr_parse_dual_panel(msm_dsi->pdev->dev.of_node, id);
667 if (ret) {
668 pr_err("%s: failed to parse dual panel info\n", __func__);
669 return ret;
670 }
671
672 if (!IS_DUAL_PANEL()) {
673 ret = msm_dsi_host_register(msm_dsi->host, true);
674 } else if (!other_dsi) {
675 return 0;
676 } else {
677 struct msm_dsi *mdsi = IS_MASTER_PANEL(id) ?
678 msm_dsi : other_dsi;
679 struct msm_dsi *sdsi = IS_MASTER_PANEL(id) ?
680 other_dsi : msm_dsi;
681 /* Register slave host first, so that slave DSI device
682 * has a chance to probe, and do not block the master
683 * DSI device's probe.
684 * Also, do not check defer for the slave host,
685 * because only master DSI device adds the panel to global
686 * panel list. The panel's device is the master DSI device.
687 */
688 ret = msm_dsi_host_register(sdsi->host, false);
689 if (ret)
690 return ret;
691 ret = msm_dsi_host_register(mdsi->host, true);
692 }
693
694 return ret;
695}
696
697void msm_dsi_manager_unregister(struct msm_dsi *msm_dsi)
698{
699 struct msm_dsi_manager *msm_dsim = &msm_dsim_glb;
700
701 if (msm_dsi->host)
702 msm_dsi_host_unregister(msm_dsi->host);
703 msm_dsim->dsi[msm_dsi->id] = NULL;
704}
705
diff --git a/drivers/gpu/drm/msm/dsi/dsi_phy.c b/drivers/gpu/drm/msm/dsi/dsi_phy.c
new file mode 100644
index 000000000000..f0cea8927388
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/dsi_phy.c
@@ -0,0 +1,352 @@
1/*
2 * Copyright (c) 2015, The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include "dsi.h"
15#include "dsi.xml.h"
16
17#define dsi_phy_read(offset) msm_readl((offset))
18#define dsi_phy_write(offset, data) msm_writel((data), (offset))
19
20struct dsi_dphy_timing {
21 u32 clk_pre;
22 u32 clk_post;
23 u32 clk_zero;
24 u32 clk_trail;
25 u32 clk_prepare;
26 u32 hs_exit;
27 u32 hs_zero;
28 u32 hs_prepare;
29 u32 hs_trail;
30 u32 hs_rqst;
31 u32 ta_go;
32 u32 ta_sure;
33 u32 ta_get;
34};
35
36struct msm_dsi_phy {
37 void __iomem *base;
38 void __iomem *reg_base;
39 int id;
40 struct dsi_dphy_timing timing;
41 int (*enable)(struct msm_dsi_phy *phy, bool is_dual_panel,
42 const unsigned long bit_rate, const unsigned long esc_rate);
43 int (*disable)(struct msm_dsi_phy *phy);
44};
45
46#define S_DIV_ROUND_UP(n, d) \
47 (((n) >= 0) ? (((n) + (d) - 1) / (d)) : (((n) - (d) + 1) / (d)))
48
49static inline s32 linear_inter(s32 tmax, s32 tmin, s32 percent,
50 s32 min_result, bool even)
51{
52 s32 v;
53 v = (tmax - tmin) * percent;
54 v = S_DIV_ROUND_UP(v, 100) + tmin;
55 if (even && (v & 0x1))
56 return max_t(s32, min_result, v - 1);
57 else
58 return max_t(s32, min_result, v);
59}
60
61static void dsi_dphy_timing_calc_clk_zero(struct dsi_dphy_timing *timing,
62 s32 ui, s32 coeff, s32 pcnt)
63{
64 s32 tmax, tmin, clk_z;
65 s32 temp;
66
67 /* reset */
68 temp = 300 * coeff - ((timing->clk_prepare >> 1) + 1) * 2 * ui;
69 tmin = S_DIV_ROUND_UP(temp, ui) - 2;
70 if (tmin > 255) {
71 tmax = 511;
72 clk_z = linear_inter(2 * tmin, tmin, pcnt, 0, true);
73 } else {
74 tmax = 255;
75 clk_z = linear_inter(tmax, tmin, pcnt, 0, true);
76 }
77
78 /* adjust */
79 temp = (timing->hs_rqst + timing->clk_prepare + clk_z) & 0x7;
80 timing->clk_zero = clk_z + 8 - temp;
81}
82
83static int dsi_dphy_timing_calc(struct dsi_dphy_timing *timing,
84 const unsigned long bit_rate, const unsigned long esc_rate)
85{
86 s32 ui, lpx;
87 s32 tmax, tmin;
88 s32 pcnt0 = 10;
89 s32 pcnt1 = (bit_rate > 1200000000) ? 15 : 10;
90 s32 pcnt2 = 10;
91 s32 pcnt3 = (bit_rate > 180000000) ? 10 : 40;
92 s32 coeff = 1000; /* Precision, should avoid overflow */
93 s32 temp;
94
95 if (!bit_rate || !esc_rate)
96 return -EINVAL;
97
98 ui = mult_frac(NSEC_PER_MSEC, coeff, bit_rate / 1000);
99 lpx = mult_frac(NSEC_PER_MSEC, coeff, esc_rate / 1000);
100
101 tmax = S_DIV_ROUND_UP(95 * coeff, ui) - 2;
102 tmin = S_DIV_ROUND_UP(38 * coeff, ui) - 2;
103 timing->clk_prepare = linear_inter(tmax, tmin, pcnt0, 0, true);
104
105 temp = lpx / ui;
106 if (temp & 0x1)
107 timing->hs_rqst = temp;
108 else
109 timing->hs_rqst = max_t(s32, 0, temp - 2);
110
111 /* Calculate clk_zero after clk_prepare and hs_rqst */
112 dsi_dphy_timing_calc_clk_zero(timing, ui, coeff, pcnt2);
113
114 temp = 105 * coeff + 12 * ui - 20 * coeff;
115 tmax = S_DIV_ROUND_UP(temp, ui) - 2;
116 tmin = S_DIV_ROUND_UP(60 * coeff, ui) - 2;
117 timing->clk_trail = linear_inter(tmax, tmin, pcnt3, 0, true);
118
119 temp = 85 * coeff + 6 * ui;
120 tmax = S_DIV_ROUND_UP(temp, ui) - 2;
121 temp = 40 * coeff + 4 * ui;
122 tmin = S_DIV_ROUND_UP(temp, ui) - 2;
123 timing->hs_prepare = linear_inter(tmax, tmin, pcnt1, 0, true);
124
125 tmax = 255;
126 temp = ((timing->hs_prepare >> 1) + 1) * 2 * ui + 2 * ui;
127 temp = 145 * coeff + 10 * ui - temp;
128 tmin = S_DIV_ROUND_UP(temp, ui) - 2;
129 timing->hs_zero = linear_inter(tmax, tmin, pcnt2, 24, true);
130
131 temp = 105 * coeff + 12 * ui - 20 * coeff;
132 tmax = S_DIV_ROUND_UP(temp, ui) - 2;
133 temp = 60 * coeff + 4 * ui;
134 tmin = DIV_ROUND_UP(temp, ui) - 2;
135 timing->hs_trail = linear_inter(tmax, tmin, pcnt3, 0, true);
136
137 tmax = 255;
138 tmin = S_DIV_ROUND_UP(100 * coeff, ui) - 2;
139 timing->hs_exit = linear_inter(tmax, tmin, pcnt2, 0, true);
140
141 tmax = 63;
142 temp = ((timing->hs_exit >> 1) + 1) * 2 * ui;
143 temp = 60 * coeff + 52 * ui - 24 * ui - temp;
144 tmin = S_DIV_ROUND_UP(temp, 8 * ui) - 1;
145 timing->clk_post = linear_inter(tmax, tmin, pcnt2, 0, false);
146
147 tmax = 63;
148 temp = ((timing->clk_prepare >> 1) + 1) * 2 * ui;
149 temp += ((timing->clk_zero >> 1) + 1) * 2 * ui;
150 temp += 8 * ui + lpx;
151 tmin = S_DIV_ROUND_UP(temp, 8 * ui) - 1;
152 if (tmin > tmax) {
153 temp = linear_inter(2 * tmax, tmin, pcnt2, 0, false) >> 1;
154 timing->clk_pre = temp >> 1;
155 temp = (2 * tmax - tmin) * pcnt2;
156 } else {
157 timing->clk_pre = linear_inter(tmax, tmin, pcnt2, 0, false);
158 }
159
160 timing->ta_go = 3;
161 timing->ta_sure = 0;
162 timing->ta_get = 4;
163
164 DBG("PHY timings: %d, %d, %d, %d, %d, %d, %d, %d, %d, %d",
165 timing->clk_pre, timing->clk_post, timing->clk_zero,
166 timing->clk_trail, timing->clk_prepare, timing->hs_exit,
167 timing->hs_zero, timing->hs_prepare, timing->hs_trail,
168 timing->hs_rqst);
169
170 return 0;
171}
172
173static void dsi_28nm_phy_regulator_ctrl(struct msm_dsi_phy *phy, bool enable)
174{
175 void __iomem *base = phy->reg_base;
176
177 if (!enable) {
178 dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG, 0);
179 return;
180 }
181
182 dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_0, 0x0);
183 dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG, 1);
184 dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_5, 0);
185 dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_3, 0);
186 dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_2, 0x3);
187 dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_1, 0x9);
188 dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_0, 0x7);
189 dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_4, 0x20);
190}
191
192static int dsi_28nm_phy_enable(struct msm_dsi_phy *phy, bool is_dual_panel,
193 const unsigned long bit_rate, const unsigned long esc_rate)
194{
195 struct dsi_dphy_timing *timing = &phy->timing;
196 int i;
197 void __iomem *base = phy->base;
198
199 DBG("");
200
201 if (dsi_dphy_timing_calc(timing, bit_rate, esc_rate)) {
202 pr_err("%s: D-PHY timing calculation failed\n", __func__);
203 return -EINVAL;
204 }
205
206 dsi_phy_write(base + REG_DSI_28nm_PHY_STRENGTH_0, 0xff);
207
208 dsi_28nm_phy_regulator_ctrl(phy, true);
209
210 dsi_phy_write(base + REG_DSI_28nm_PHY_LDO_CNTRL, 0x00);
211
212 dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_0,
213 DSI_28nm_PHY_TIMING_CTRL_0_CLK_ZERO(timing->clk_zero));
214 dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_1,
215 DSI_28nm_PHY_TIMING_CTRL_1_CLK_TRAIL(timing->clk_trail));
216 dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_2,
217 DSI_28nm_PHY_TIMING_CTRL_2_CLK_PREPARE(timing->clk_prepare));
218 if (timing->clk_zero & BIT(8))
219 dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_3,
220 DSI_28nm_PHY_TIMING_CTRL_3_CLK_ZERO_8);
221 dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_4,
222 DSI_28nm_PHY_TIMING_CTRL_4_HS_EXIT(timing->hs_exit));
223 dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_5,
224 DSI_28nm_PHY_TIMING_CTRL_5_HS_ZERO(timing->hs_zero));
225 dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_6,
226 DSI_28nm_PHY_TIMING_CTRL_6_HS_PREPARE(timing->hs_prepare));
227 dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_7,
228 DSI_28nm_PHY_TIMING_CTRL_7_HS_TRAIL(timing->hs_trail));
229 dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_8,
230 DSI_28nm_PHY_TIMING_CTRL_8_HS_RQST(timing->hs_rqst));
231 dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_9,
232 DSI_28nm_PHY_TIMING_CTRL_9_TA_GO(timing->ta_go) |
233 DSI_28nm_PHY_TIMING_CTRL_9_TA_SURE(timing->ta_sure));
234 dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_10,
235 DSI_28nm_PHY_TIMING_CTRL_10_TA_GET(timing->ta_get));
236 dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_11,
237 DSI_28nm_PHY_TIMING_CTRL_11_TRIG3_CMD(0));
238
239 dsi_phy_write(base + REG_DSI_28nm_PHY_CTRL_1, 0x00);
240 dsi_phy_write(base + REG_DSI_28nm_PHY_CTRL_0, 0x5f);
241
242 dsi_phy_write(base + REG_DSI_28nm_PHY_STRENGTH_1, 0x6);
243
244 for (i = 0; i < 4; i++) {
245 dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_0(i), 0);
246 dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_1(i), 0);
247 dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_2(i), 0);
248 dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_3(i), 0);
249 dsi_phy_write(base + REG_DSI_28nm_PHY_LN_TEST_DATAPATH(i), 0);
250 dsi_phy_write(base + REG_DSI_28nm_PHY_LN_DEBUG_SEL(i), 0);
251 dsi_phy_write(base + REG_DSI_28nm_PHY_LN_TEST_STR_0(i), 0x1);
252 dsi_phy_write(base + REG_DSI_28nm_PHY_LN_TEST_STR_1(i), 0x97);
253 }
254 dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_4(0), 0);
255 dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_4(1), 0x5);
256 dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_4(2), 0xa);
257 dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_4(3), 0xf);
258
259 dsi_phy_write(base + REG_DSI_28nm_PHY_LNCK_CFG_1, 0xc0);
260 dsi_phy_write(base + REG_DSI_28nm_PHY_LNCK_TEST_STR0, 0x1);
261 dsi_phy_write(base + REG_DSI_28nm_PHY_LNCK_TEST_STR1, 0xbb);
262
263 dsi_phy_write(base + REG_DSI_28nm_PHY_CTRL_0, 0x5f);
264
265 if (is_dual_panel && (phy->id != DSI_CLOCK_MASTER))
266 dsi_phy_write(base + REG_DSI_28nm_PHY_GLBL_TEST_CTRL, 0x00);
267 else
268 dsi_phy_write(base + REG_DSI_28nm_PHY_GLBL_TEST_CTRL, 0x01);
269
270 return 0;
271}
272
273static int dsi_28nm_phy_disable(struct msm_dsi_phy *phy)
274{
275 dsi_phy_write(phy->base + REG_DSI_28nm_PHY_CTRL_0, 0);
276 dsi_28nm_phy_regulator_ctrl(phy, false);
277
278 /*
279 * Wait for the registers writes to complete in order to
280 * ensure that the phy is completely disabled
281 */
282 wmb();
283
284 return 0;
285}
286
287#define dsi_phy_func_init(name) \
288 do { \
289 phy->enable = dsi_##name##_phy_enable; \
290 phy->disable = dsi_##name##_phy_disable; \
291 } while (0)
292
293struct msm_dsi_phy *msm_dsi_phy_init(struct platform_device *pdev,
294 enum msm_dsi_phy_type type, int id)
295{
296 struct msm_dsi_phy *phy;
297
298 phy = devm_kzalloc(&pdev->dev, sizeof(*phy), GFP_KERNEL);
299 if (!phy)
300 return NULL;
301
302 phy->base = msm_ioremap(pdev, "dsi_phy", "DSI_PHY");
303 if (IS_ERR_OR_NULL(phy->base)) {
304 pr_err("%s: failed to map phy base\n", __func__);
305 return NULL;
306 }
307 phy->reg_base = msm_ioremap(pdev, "dsi_phy_regulator", "DSI_PHY_REG");
308 if (IS_ERR_OR_NULL(phy->reg_base)) {
309 pr_err("%s: failed to map phy regulator base\n", __func__);
310 return NULL;
311 }
312
313 switch (type) {
314 case MSM_DSI_PHY_28NM:
315 dsi_phy_func_init(28nm);
316 break;
317 default:
318 pr_err("%s: unsupported type, %d\n", __func__, type);
319 return NULL;
320 }
321
322 phy->id = id;
323
324 return phy;
325}
326
327int msm_dsi_phy_enable(struct msm_dsi_phy *phy, bool is_dual_panel,
328 const unsigned long bit_rate, const unsigned long esc_rate)
329{
330 if (!phy || !phy->enable)
331 return -EINVAL;
332 return phy->enable(phy, is_dual_panel, bit_rate, esc_rate);
333}
334
335int msm_dsi_phy_disable(struct msm_dsi_phy *phy)
336{
337 if (!phy || !phy->disable)
338 return -EINVAL;
339 return phy->disable(phy);
340}
341
342void msm_dsi_phy_get_clk_pre_post(struct msm_dsi_phy *phy,
343 u32 *clk_pre, u32 *clk_post)
344{
345 if (!phy)
346 return;
347 if (clk_pre)
348 *clk_pre = phy->timing.clk_pre;
349 if (clk_post)
350 *clk_post = phy->timing.clk_post;
351}
352
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 9e8d441b61c3..04db4bd1b5b6 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -82,6 +82,9 @@ struct msm_drm_private {
82 */ 82 */
83 struct msm_edp *edp; 83 struct msm_edp *edp;
84 84
85 /* DSI is shared by mdp4 and mdp5 */
86 struct msm_dsi *dsi[2];
87
85 /* when we have more than one 'msm_gpu' these need to be an array: */ 88 /* when we have more than one 'msm_gpu' these need to be an array: */
86 struct msm_gpu *gpu; 89 struct msm_gpu *gpu;
87 struct msm_file_private *lastctx; 90 struct msm_file_private *lastctx;
@@ -236,6 +239,32 @@ void __exit msm_edp_unregister(void);
236int msm_edp_modeset_init(struct msm_edp *edp, struct drm_device *dev, 239int msm_edp_modeset_init(struct msm_edp *edp, struct drm_device *dev,
237 struct drm_encoder *encoder); 240 struct drm_encoder *encoder);
238 241
242struct msm_dsi;
243enum msm_dsi_encoder_id {
244 MSM_DSI_VIDEO_ENCODER_ID = 0,
245 MSM_DSI_CMD_ENCODER_ID = 1,
246 MSM_DSI_ENCODER_NUM = 2
247};
248#ifdef CONFIG_DRM_MSM_DSI
249void __init msm_dsi_register(void);
250void __exit msm_dsi_unregister(void);
251int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
252 struct drm_encoder *encoders[MSM_DSI_ENCODER_NUM]);
253#else
254static inline void __init msm_dsi_register(void)
255{
256}
257static inline void __exit msm_dsi_unregister(void)
258{
259}
260static inline int msm_dsi_modeset_init(struct msm_dsi *msm_dsi,
261 struct drm_device *dev,
262 struct drm_encoder *encoders[MSM_DSI_ENCODER_NUM])
263{
264 return -EINVAL;
265}
266#endif
267
239#ifdef CONFIG_DEBUG_FS 268#ifdef CONFIG_DEBUG_FS
240void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m); 269void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m);
241void msm_gem_describe_objects(struct list_head *list, struct seq_file *m); 270void msm_gem_describe_objects(struct list_head *list, struct seq_file *m);