aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/msm
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/msm')
-rw-r--r--drivers/gpu/drm/msm/Kconfig1
-rw-r--r--drivers/gpu/drm/msm/Makefile2
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c17
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.c2
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_cfg.c8
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_cfg.h2
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c69
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.c32
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.h2
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c4
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c4
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c2
-rw-r--r--drivers/gpu/drm/msm/edp/edp_connector.c10
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.c117
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.h14
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_connector.c8
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c2
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c31
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c44
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h2
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c9
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h203
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c113
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c14
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c16
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c26
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c10
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c125
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c339
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h16
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_mdss.c235
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c22
-rw-r--r--drivers/gpu/drm/msm/msm_atomic.c39
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c270
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h24
-rw-r--r--drivers/gpu/drm/msm/msm_fb.c12
-rw-r--r--drivers/gpu/drm/msm/msm_fbdev.c17
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c139
-rw-r--r--drivers/gpu/drm/msm/msm_gem.h23
-rw-r--r--drivers/gpu/drm/msm/msm_gem_prime.c4
-rw-r--r--drivers/gpu/drm/msm/msm_gem_shrinker.c168
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c26
-rw-r--r--drivers/gpu/drm/msm/msm_iommu.c6
-rw-r--r--drivers/gpu/drm/msm/msm_kms.h8
-rw-r--r--drivers/gpu/drm/msm/msm_perf.c7
-rw-r--r--drivers/gpu/drm/msm/msm_rd.c71
-rw-r--r--drivers/gpu/drm/msm/msm_ringbuffer.c6
47 files changed, 1590 insertions, 731 deletions
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index 167a4971f47c..7c7a0314a756 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -10,6 +10,7 @@ config DRM_MSM
10 select SHMEM 10 select SHMEM
11 select TMPFS 11 select TMPFS
12 select QCOM_SCM 12 select QCOM_SCM
13 select SND_SOC_HDMI_CODEC if SND_SOC
13 default y 14 default y
14 help 15 help
15 DRM/KMS driver for MSM/snapdragon. 16 DRM/KMS driver for MSM/snapdragon.
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index 60cb02624dc0..4e2806cf778c 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -35,6 +35,7 @@ msm-y := \
35 mdp/mdp5/mdp5_crtc.o \ 35 mdp/mdp5/mdp5_crtc.o \
36 mdp/mdp5/mdp5_encoder.o \ 36 mdp/mdp5/mdp5_encoder.o \
37 mdp/mdp5/mdp5_irq.o \ 37 mdp/mdp5/mdp5_irq.o \
38 mdp/mdp5/mdp5_mdss.o \
38 mdp/mdp5/mdp5_kms.o \ 39 mdp/mdp5/mdp5_kms.o \
39 mdp/mdp5/mdp5_plane.o \ 40 mdp/mdp5/mdp5_plane.o \
40 mdp/mdp5/mdp5_smp.o \ 41 mdp/mdp5/mdp5_smp.o \
@@ -45,6 +46,7 @@ msm-y := \
45 msm_fence.o \ 46 msm_fence.o \
46 msm_gem.o \ 47 msm_gem.o \
47 msm_gem_prime.o \ 48 msm_gem_prime.o \
49 msm_gem_shrinker.o \
48 msm_gem_submit.o \ 50 msm_gem_submit.o \
49 msm_gpu.o \ 51 msm_gpu.o \
50 msm_iommu.o \ 52 msm_iommu.o \
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 2aec27dbb5bb..f386f463278d 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -139,7 +139,7 @@ void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
139 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); 139 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
140 struct msm_drm_private *priv = gpu->dev->dev_private; 140 struct msm_drm_private *priv = gpu->dev->dev_private;
141 struct msm_ringbuffer *ring = gpu->rb; 141 struct msm_ringbuffer *ring = gpu->rb;
142 unsigned i, ibs = 0; 142 unsigned i;
143 143
144 for (i = 0; i < submit->nr_cmds; i++) { 144 for (i = 0; i < submit->nr_cmds; i++) {
145 switch (submit->cmd[i].type) { 145 switch (submit->cmd[i].type) {
@@ -155,18 +155,11 @@ void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
155 CP_INDIRECT_BUFFER_PFE : CP_INDIRECT_BUFFER_PFD, 2); 155 CP_INDIRECT_BUFFER_PFE : CP_INDIRECT_BUFFER_PFD, 2);
156 OUT_RING(ring, submit->cmd[i].iova); 156 OUT_RING(ring, submit->cmd[i].iova);
157 OUT_RING(ring, submit->cmd[i].size); 157 OUT_RING(ring, submit->cmd[i].size);
158 ibs++; 158 OUT_PKT2(ring);
159 break; 159 break;
160 } 160 }
161 } 161 }
162 162
163 /* on a320, at least, we seem to need to pad things out to an
164 * even number of qwords to avoid issue w/ CP hanging on wrap-
165 * around:
166 */
167 if (ibs % 2)
168 OUT_PKT2(ring);
169
170 OUT_PKT0(ring, REG_AXXX_CP_SCRATCH_REG2, 1); 163 OUT_PKT0(ring, REG_AXXX_CP_SCRATCH_REG2, 1);
171 OUT_RING(ring, submit->fence->seqno); 164 OUT_RING(ring, submit->fence->seqno);
172 165
@@ -407,7 +400,7 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
407 return ret; 400 return ret;
408 } 401 }
409 402
410 adreno_gpu->memptrs = msm_gem_vaddr(adreno_gpu->memptrs_bo); 403 adreno_gpu->memptrs = msm_gem_get_vaddr(adreno_gpu->memptrs_bo);
411 if (IS_ERR(adreno_gpu->memptrs)) { 404 if (IS_ERR(adreno_gpu->memptrs)) {
412 dev_err(drm->dev, "could not vmap memptrs\n"); 405 dev_err(drm->dev, "could not vmap memptrs\n");
413 return -ENOMEM; 406 return -ENOMEM;
@@ -426,8 +419,12 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
426void adreno_gpu_cleanup(struct adreno_gpu *gpu) 419void adreno_gpu_cleanup(struct adreno_gpu *gpu)
427{ 420{
428 if (gpu->memptrs_bo) { 421 if (gpu->memptrs_bo) {
422 if (gpu->memptrs)
423 msm_gem_put_vaddr(gpu->memptrs_bo);
424
429 if (gpu->memptrs_iova) 425 if (gpu->memptrs_iova)
430 msm_gem_put_iova(gpu->memptrs_bo, gpu->base.id); 426 msm_gem_put_iova(gpu->memptrs_bo, gpu->base.id);
427
431 drm_gem_object_unreference_unlocked(gpu->memptrs_bo); 428 drm_gem_object_unreference_unlocked(gpu->memptrs_bo);
432 } 429 }
433 release_firmware(gpu->pm4); 430 release_firmware(gpu->pm4);
diff --git a/drivers/gpu/drm/msm/dsi/dsi.c b/drivers/gpu/drm/msm/dsi/dsi.c
index 6edcd6f57e70..ec572f8389ed 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.c
+++ b/drivers/gpu/drm/msm/dsi/dsi.c
@@ -29,7 +29,7 @@ static int dsi_get_phy(struct msm_dsi *msm_dsi)
29 struct platform_device *phy_pdev; 29 struct platform_device *phy_pdev;
30 struct device_node *phy_node; 30 struct device_node *phy_node;
31 31
32 phy_node = of_parse_phandle(pdev->dev.of_node, "qcom,dsi-phy", 0); 32 phy_node = of_parse_phandle(pdev->dev.of_node, "phys", 0);
33 if (!phy_node) { 33 if (!phy_node) {
34 dev_err(&pdev->dev, "cannot find phy device\n"); 34 dev_err(&pdev->dev, "cannot find phy device\n");
35 return -ENXIO; 35 return -ENXIO;
diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.c b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
index 93c1ee094eac..63436d8ee470 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_cfg.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
@@ -29,6 +29,8 @@ static const struct msm_dsi_config apq8064_dsi_cfg = {
29 }, 29 },
30 .bus_clk_names = dsi_v2_bus_clk_names, 30 .bus_clk_names = dsi_v2_bus_clk_names,
31 .num_bus_clks = ARRAY_SIZE(dsi_v2_bus_clk_names), 31 .num_bus_clks = ARRAY_SIZE(dsi_v2_bus_clk_names),
32 .io_start = { 0x4700000, 0x5800000 },
33 .num_dsi = 2,
32}; 34};
33 35
34static const char * const dsi_6g_bus_clk_names[] = { 36static const char * const dsi_6g_bus_clk_names[] = {
@@ -48,6 +50,8 @@ static const struct msm_dsi_config msm8974_apq8084_dsi_cfg = {
48 }, 50 },
49 .bus_clk_names = dsi_6g_bus_clk_names, 51 .bus_clk_names = dsi_6g_bus_clk_names,
50 .num_bus_clks = ARRAY_SIZE(dsi_6g_bus_clk_names), 52 .num_bus_clks = ARRAY_SIZE(dsi_6g_bus_clk_names),
53 .io_start = { 0xfd922800, 0xfd922b00 },
54 .num_dsi = 2,
51}; 55};
52 56
53static const char * const dsi_8916_bus_clk_names[] = { 57static const char * const dsi_8916_bus_clk_names[] = {
@@ -66,6 +70,8 @@ static const struct msm_dsi_config msm8916_dsi_cfg = {
66 }, 70 },
67 .bus_clk_names = dsi_8916_bus_clk_names, 71 .bus_clk_names = dsi_8916_bus_clk_names,
68 .num_bus_clks = ARRAY_SIZE(dsi_8916_bus_clk_names), 72 .num_bus_clks = ARRAY_SIZE(dsi_8916_bus_clk_names),
73 .io_start = { 0x1a98000 },
74 .num_dsi = 1,
69}; 75};
70 76
71static const struct msm_dsi_config msm8994_dsi_cfg = { 77static const struct msm_dsi_config msm8994_dsi_cfg = {
@@ -84,6 +90,8 @@ static const struct msm_dsi_config msm8994_dsi_cfg = {
84 }, 90 },
85 .bus_clk_names = dsi_6g_bus_clk_names, 91 .bus_clk_names = dsi_6g_bus_clk_names,
86 .num_bus_clks = ARRAY_SIZE(dsi_6g_bus_clk_names), 92 .num_bus_clks = ARRAY_SIZE(dsi_6g_bus_clk_names),
93 .io_start = { 0xfd998000, 0xfd9a0000 },
94 .num_dsi = 2,
87}; 95};
88 96
89static const struct msm_dsi_cfg_handler dsi_cfg_handlers[] = { 97static const struct msm_dsi_cfg_handler dsi_cfg_handlers[] = {
diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.h b/drivers/gpu/drm/msm/dsi/dsi_cfg.h
index a68c836744a3..eeacc3232494 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_cfg.h
+++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.h
@@ -34,6 +34,8 @@ struct msm_dsi_config {
34 struct dsi_reg_config reg_cfg; 34 struct dsi_reg_config reg_cfg;
35 const char * const *bus_clk_names; 35 const char * const *bus_clk_names;
36 const int num_bus_clks; 36 const int num_bus_clks;
37 const resource_size_t io_start[DSI_MAX];
38 const int num_dsi;
37}; 39};
38 40
39struct msm_dsi_cfg_handler { 41struct msm_dsi_cfg_handler {
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index a3e47ad83eb3..f05ed0e1f3d6 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -1066,7 +1066,7 @@ static int dsi_cmd_dma_add(struct msm_dsi_host *msm_host,
1066 } 1066 }
1067 1067
1068 if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) { 1068 if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) {
1069 data = msm_gem_vaddr(msm_host->tx_gem_obj); 1069 data = msm_gem_get_vaddr(msm_host->tx_gem_obj);
1070 if (IS_ERR(data)) { 1070 if (IS_ERR(data)) {
1071 ret = PTR_ERR(data); 1071 ret = PTR_ERR(data);
1072 pr_err("%s: get vaddr failed, %d\n", __func__, ret); 1072 pr_err("%s: get vaddr failed, %d\n", __func__, ret);
@@ -1094,6 +1094,9 @@ static int dsi_cmd_dma_add(struct msm_dsi_host *msm_host,
1094 if (packet.size < len) 1094 if (packet.size < len)
1095 memset(data + packet.size, 0xff, len - packet.size); 1095 memset(data + packet.size, 0xff, len - packet.size);
1096 1096
1097 if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G)
1098 msm_gem_put_vaddr(msm_host->tx_gem_obj);
1099
1097 return len; 1100 return len;
1098} 1101}
1099 1102
@@ -1543,7 +1546,7 @@ static int dsi_host_parse_lane_data(struct msm_dsi_host *msm_host,
1543 u32 lane_map[4]; 1546 u32 lane_map[4];
1544 int ret, i, len, num_lanes; 1547 int ret, i, len, num_lanes;
1545 1548
1546 prop = of_find_property(ep, "qcom,data-lane-map", &len); 1549 prop = of_find_property(ep, "data-lanes", &len);
1547 if (!prop) { 1550 if (!prop) {
1548 dev_dbg(dev, "failed to find data lane mapping\n"); 1551 dev_dbg(dev, "failed to find data lane mapping\n");
1549 return -EINVAL; 1552 return -EINVAL;
@@ -1558,7 +1561,7 @@ static int dsi_host_parse_lane_data(struct msm_dsi_host *msm_host,
1558 1561
1559 msm_host->num_data_lanes = num_lanes; 1562 msm_host->num_data_lanes = num_lanes;
1560 1563
1561 ret = of_property_read_u32_array(ep, "qcom,data-lane-map", lane_map, 1564 ret = of_property_read_u32_array(ep, "data-lanes", lane_map,
1562 num_lanes); 1565 num_lanes);
1563 if (ret) { 1566 if (ret) {
1564 dev_err(dev, "failed to read lane data\n"); 1567 dev_err(dev, "failed to read lane data\n");
@@ -1573,8 +1576,19 @@ static int dsi_host_parse_lane_data(struct msm_dsi_host *msm_host,
1573 const int *swap = supported_data_lane_swaps[i]; 1576 const int *swap = supported_data_lane_swaps[i];
1574 int j; 1577 int j;
1575 1578
1579 /*
1580 * the data-lanes array we get from DT has a logical->physical
1581 * mapping. The "data lane swap" register field represents
1582 * supported configurations in a physical->logical mapping.
1583 * Translate the DT mapping to what we understand and find a
1584 * configuration that works.
1585 */
1576 for (j = 0; j < num_lanes; j++) { 1586 for (j = 0; j < num_lanes; j++) {
1577 if (swap[j] != lane_map[j]) 1587 if (lane_map[j] < 0 || lane_map[j] > 3)
1588 dev_err(dev, "bad physical lane entry %u\n",
1589 lane_map[j]);
1590
1591 if (swap[lane_map[j]] != j)
1578 break; 1592 break;
1579 } 1593 }
1580 1594
@@ -1594,20 +1608,13 @@ static int dsi_host_parse_dt(struct msm_dsi_host *msm_host)
1594 struct device_node *endpoint, *device_node; 1608 struct device_node *endpoint, *device_node;
1595 int ret; 1609 int ret;
1596 1610
1597 ret = of_property_read_u32(np, "qcom,dsi-host-index", &msm_host->id);
1598 if (ret) {
1599 dev_err(dev, "%s: host index not specified, ret=%d\n",
1600 __func__, ret);
1601 return ret;
1602 }
1603
1604 /* 1611 /*
1605 * Get the first endpoint node. In our case, dsi has one output port 1612 * Get the endpoint of the output port of the DSI host. In our case,
1606 * to which the panel is connected. Don't return an error if a port 1613 * this is mapped to port number with reg = 1. Don't return an error if
1607 * isn't defined. It's possible that there is nothing connected to 1614 * the remote endpoint isn't defined. It's possible that there is
1608 * the dsi output. 1615 * nothing connected to the dsi output.
1609 */ 1616 */
1610 endpoint = of_graph_get_next_endpoint(np, NULL); 1617 endpoint = of_graph_get_endpoint_by_regs(np, 1, -1);
1611 if (!endpoint) { 1618 if (!endpoint) {
1612 dev_dbg(dev, "%s: no endpoint\n", __func__); 1619 dev_dbg(dev, "%s: no endpoint\n", __func__);
1613 return 0; 1620 return 0;
@@ -1648,6 +1655,25 @@ err:
1648 return ret; 1655 return ret;
1649} 1656}
1650 1657
1658static int dsi_host_get_id(struct msm_dsi_host *msm_host)
1659{
1660 struct platform_device *pdev = msm_host->pdev;
1661 const struct msm_dsi_config *cfg = msm_host->cfg_hnd->cfg;
1662 struct resource *res;
1663 int i;
1664
1665 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dsi_ctrl");
1666 if (!res)
1667 return -EINVAL;
1668
1669 for (i = 0; i < cfg->num_dsi; i++) {
1670 if (cfg->io_start[i] == res->start)
1671 return i;
1672 }
1673
1674 return -EINVAL;
1675}
1676
1651int msm_dsi_host_init(struct msm_dsi *msm_dsi) 1677int msm_dsi_host_init(struct msm_dsi *msm_dsi)
1652{ 1678{
1653 struct msm_dsi_host *msm_host = NULL; 1679 struct msm_dsi_host *msm_host = NULL;
@@ -1684,6 +1710,13 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi)
1684 goto fail; 1710 goto fail;
1685 } 1711 }
1686 1712
1713 msm_host->id = dsi_host_get_id(msm_host);
1714 if (msm_host->id < 0) {
1715 ret = msm_host->id;
1716 pr_err("%s: unable to identify DSI host index\n", __func__);
1717 goto fail;
1718 }
1719
1687 /* fixup base address by io offset */ 1720 /* fixup base address by io offset */
1688 msm_host->ctrl_base += msm_host->cfg_hnd->cfg->io_offset; 1721 msm_host->ctrl_base += msm_host->cfg_hnd->cfg->io_offset;
1689 1722
@@ -2245,9 +2278,9 @@ int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host,
2245 } 2278 }
2246 2279
2247 msm_host->mode = drm_mode_duplicate(msm_host->dev, mode); 2280 msm_host->mode = drm_mode_duplicate(msm_host->dev, mode);
2248 if (IS_ERR(msm_host->mode)) { 2281 if (!msm_host->mode) {
2249 pr_err("%s: cannot duplicate mode\n", __func__); 2282 pr_err("%s: cannot duplicate mode\n", __func__);
2250 return PTR_ERR(msm_host->mode); 2283 return -ENOMEM;
2251 } 2284 }
2252 2285
2253 return 0; 2286 return 0;
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
index e2f42d8ea294..f39386ed75e4 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
@@ -271,6 +271,30 @@ static const struct of_device_id dsi_phy_dt_match[] = {
271 {} 271 {}
272}; 272};
273 273
274/*
275 * Currently, we only support one SoC for each PHY type. When we have multiple
276 * SoCs for the same PHY, we can try to make the index searching a bit more
277 * clever.
278 */
279static int dsi_phy_get_id(struct msm_dsi_phy *phy)
280{
281 struct platform_device *pdev = phy->pdev;
282 const struct msm_dsi_phy_cfg *cfg = phy->cfg;
283 struct resource *res;
284 int i;
285
286 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dsi_phy");
287 if (!res)
288 return -EINVAL;
289
290 for (i = 0; i < cfg->num_dsi_phy; i++) {
291 if (cfg->io_start[i] == res->start)
292 return i;
293 }
294
295 return -EINVAL;
296}
297
274static int dsi_phy_driver_probe(struct platform_device *pdev) 298static int dsi_phy_driver_probe(struct platform_device *pdev)
275{ 299{
276 struct msm_dsi_phy *phy; 300 struct msm_dsi_phy *phy;
@@ -289,10 +313,10 @@ static int dsi_phy_driver_probe(struct platform_device *pdev)
289 phy->cfg = match->data; 313 phy->cfg = match->data;
290 phy->pdev = pdev; 314 phy->pdev = pdev;
291 315
292 ret = of_property_read_u32(dev->of_node, 316 phy->id = dsi_phy_get_id(phy);
293 "qcom,dsi-phy-index", &phy->id); 317 if (phy->id < 0) {
294 if (ret) { 318 ret = phy->id;
295 dev_err(dev, "%s: PHY index not specified, %d\n", 319 dev_err(dev, "%s: couldn't identify PHY index, %d\n",
296 __func__, ret); 320 __func__, ret);
297 goto fail; 321 goto fail;
298 } 322 }
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
index 0d54ed00386d..f24a85439b94 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
@@ -38,6 +38,8 @@ struct msm_dsi_phy_cfg {
38 * Fill default H/W values in illegal cells, eg. cell {0, 1}. 38 * Fill default H/W values in illegal cells, eg. cell {0, 1}.
39 */ 39 */
40 bool src_pll_truthtable[DSI_MAX][DSI_MAX]; 40 bool src_pll_truthtable[DSI_MAX][DSI_MAX];
41 const resource_size_t io_start[DSI_MAX];
42 const int num_dsi_phy;
41}; 43};
42 44
43extern const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_cfgs; 45extern const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_cfgs;
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c
index f4bc11af849a..c757e2070cac 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c
@@ -145,6 +145,8 @@ const struct msm_dsi_phy_cfg dsi_phy_20nm_cfgs = {
145 .ops = { 145 .ops = {
146 .enable = dsi_20nm_phy_enable, 146 .enable = dsi_20nm_phy_enable,
147 .disable = dsi_20nm_phy_disable, 147 .disable = dsi_20nm_phy_disable,
148 } 148 },
149 .io_start = { 0xfd998300, 0xfd9a0300 },
150 .num_dsi_phy = 2,
149}; 151};
150 152
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c
index 96d1852af418..63d7fba31380 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c
@@ -145,6 +145,8 @@ const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_cfgs = {
145 .enable = dsi_28nm_phy_enable, 145 .enable = dsi_28nm_phy_enable,
146 .disable = dsi_28nm_phy_disable, 146 .disable = dsi_28nm_phy_disable,
147 }, 147 },
148 .io_start = { 0xfd922b00, 0xfd923100 },
149 .num_dsi_phy = 2,
148}; 150};
149 151
150const struct msm_dsi_phy_cfg dsi_phy_28nm_lp_cfgs = { 152const struct msm_dsi_phy_cfg dsi_phy_28nm_lp_cfgs = {
@@ -160,5 +162,7 @@ const struct msm_dsi_phy_cfg dsi_phy_28nm_lp_cfgs = {
160 .enable = dsi_28nm_phy_enable, 162 .enable = dsi_28nm_phy_enable,
161 .disable = dsi_28nm_phy_disable, 163 .disable = dsi_28nm_phy_disable,
162 }, 164 },
165 .io_start = { 0x1a98500 },
166 .num_dsi_phy = 1,
163}; 167};
164 168
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c
index 213355a3e767..7bdb9de54968 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c
@@ -192,4 +192,6 @@ const struct msm_dsi_phy_cfg dsi_phy_28nm_8960_cfgs = {
192 .enable = dsi_28nm_phy_enable, 192 .enable = dsi_28nm_phy_enable,
193 .disable = dsi_28nm_phy_disable, 193 .disable = dsi_28nm_phy_disable,
194 }, 194 },
195 .io_start = { 0x4700300, 0x5800300 },
196 .num_dsi_phy = 2,
195}; 197};
diff --git a/drivers/gpu/drm/msm/edp/edp_connector.c b/drivers/gpu/drm/msm/edp/edp_connector.c
index 72360cd038c0..5960628ceb93 100644
--- a/drivers/gpu/drm/msm/edp/edp_connector.c
+++ b/drivers/gpu/drm/msm/edp/edp_connector.c
@@ -91,15 +91,6 @@ static int edp_connector_mode_valid(struct drm_connector *connector,
91 return MODE_OK; 91 return MODE_OK;
92} 92}
93 93
94static struct drm_encoder *
95edp_connector_best_encoder(struct drm_connector *connector)
96{
97 struct edp_connector *edp_connector = to_edp_connector(connector);
98
99 DBG("");
100 return edp_connector->edp->encoder;
101}
102
103static const struct drm_connector_funcs edp_connector_funcs = { 94static const struct drm_connector_funcs edp_connector_funcs = {
104 .dpms = drm_atomic_helper_connector_dpms, 95 .dpms = drm_atomic_helper_connector_dpms,
105 .detect = edp_connector_detect, 96 .detect = edp_connector_detect,
@@ -113,7 +104,6 @@ static const struct drm_connector_funcs edp_connector_funcs = {
113static const struct drm_connector_helper_funcs edp_connector_helper_funcs = { 104static const struct drm_connector_helper_funcs edp_connector_helper_funcs = {
114 .get_modes = edp_connector_get_modes, 105 .get_modes = edp_connector_get_modes,
115 .mode_valid = edp_connector_mode_valid, 106 .mode_valid = edp_connector_mode_valid,
116 .best_encoder = edp_connector_best_encoder,
117}; 107};
118 108
119/* initialize connector */ 109/* initialize connector */
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
index 51b9ea552f97..973720792236 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
@@ -19,6 +19,7 @@
19#include <linux/of_irq.h> 19#include <linux/of_irq.h>
20#include <linux/of_gpio.h> 20#include <linux/of_gpio.h>
21 21
22#include <sound/hdmi-codec.h>
22#include "hdmi.h" 23#include "hdmi.h"
23 24
24void msm_hdmi_set_mode(struct hdmi *hdmi, bool power_on) 25void msm_hdmi_set_mode(struct hdmi *hdmi, bool power_on)
@@ -434,6 +435,111 @@ static int msm_hdmi_get_gpio(struct device_node *of_node, const char *name)
434 return gpio; 435 return gpio;
435} 436}
436 437
438/*
439 * HDMI audio codec callbacks
440 */
441static int msm_hdmi_audio_hw_params(struct device *dev, void *data,
442 struct hdmi_codec_daifmt *daifmt,
443 struct hdmi_codec_params *params)
444{
445 struct hdmi *hdmi = dev_get_drvdata(dev);
446 unsigned int chan;
447 unsigned int channel_allocation = 0;
448 unsigned int rate;
449 unsigned int level_shift = 0; /* 0dB */
450 bool down_mix = false;
451
452 dev_dbg(dev, "%u Hz, %d bit, %d channels\n", params->sample_rate,
453 params->sample_width, params->cea.channels);
454
455 switch (params->cea.channels) {
456 case 2:
457 /* FR and FL speakers */
458 channel_allocation = 0;
459 chan = MSM_HDMI_AUDIO_CHANNEL_2;
460 break;
461 case 4:
462 /* FC, LFE, FR and FL speakers */
463 channel_allocation = 0x3;
464 chan = MSM_HDMI_AUDIO_CHANNEL_4;
465 break;
466 case 6:
467 /* RR, RL, FC, LFE, FR and FL speakers */
468 channel_allocation = 0x0B;
469 chan = MSM_HDMI_AUDIO_CHANNEL_6;
470 break;
471 case 8:
472 /* FRC, FLC, RR, RL, FC, LFE, FR and FL speakers */
473 channel_allocation = 0x1F;
474 chan = MSM_HDMI_AUDIO_CHANNEL_8;
475 break;
476 default:
477 return -EINVAL;
478 }
479
480 switch (params->sample_rate) {
481 case 32000:
482 rate = HDMI_SAMPLE_RATE_32KHZ;
483 break;
484 case 44100:
485 rate = HDMI_SAMPLE_RATE_44_1KHZ;
486 break;
487 case 48000:
488 rate = HDMI_SAMPLE_RATE_48KHZ;
489 break;
490 case 88200:
491 rate = HDMI_SAMPLE_RATE_88_2KHZ;
492 break;
493 case 96000:
494 rate = HDMI_SAMPLE_RATE_96KHZ;
495 break;
496 case 176400:
497 rate = HDMI_SAMPLE_RATE_176_4KHZ;
498 break;
499 case 192000:
500 rate = HDMI_SAMPLE_RATE_192KHZ;
501 break;
502 default:
503 dev_err(dev, "rate[%d] not supported!\n",
504 params->sample_rate);
505 return -EINVAL;
506 }
507
508 msm_hdmi_audio_set_sample_rate(hdmi, rate);
509 msm_hdmi_audio_info_setup(hdmi, 1, chan, channel_allocation,
510 level_shift, down_mix);
511
512 return 0;
513}
514
515static void msm_hdmi_audio_shutdown(struct device *dev, void *data)
516{
517 struct hdmi *hdmi = dev_get_drvdata(dev);
518
519 msm_hdmi_audio_info_setup(hdmi, 0, 0, 0, 0, 0);
520}
521
522static const struct hdmi_codec_ops msm_hdmi_audio_codec_ops = {
523 .hw_params = msm_hdmi_audio_hw_params,
524 .audio_shutdown = msm_hdmi_audio_shutdown,
525};
526
527static struct hdmi_codec_pdata codec_data = {
528 .ops = &msm_hdmi_audio_codec_ops,
529 .max_i2s_channels = 8,
530 .i2s = 1,
531};
532
533static int msm_hdmi_register_audio_driver(struct hdmi *hdmi, struct device *dev)
534{
535 hdmi->audio_pdev = platform_device_register_data(dev,
536 HDMI_CODEC_DRV_NAME,
537 PLATFORM_DEVID_AUTO,
538 &codec_data,
539 sizeof(codec_data));
540 return PTR_ERR_OR_ZERO(hdmi->audio_pdev);
541}
542
437static int msm_hdmi_bind(struct device *dev, struct device *master, void *data) 543static int msm_hdmi_bind(struct device *dev, struct device *master, void *data)
438{ 544{
439 struct drm_device *drm = dev_get_drvdata(master); 545 struct drm_device *drm = dev_get_drvdata(master);
@@ -441,7 +547,7 @@ static int msm_hdmi_bind(struct device *dev, struct device *master, void *data)
441 static struct hdmi_platform_config *hdmi_cfg; 547 static struct hdmi_platform_config *hdmi_cfg;
442 struct hdmi *hdmi; 548 struct hdmi *hdmi;
443 struct device_node *of_node = dev->of_node; 549 struct device_node *of_node = dev->of_node;
444 int i; 550 int i, err;
445 551
446 hdmi_cfg = (struct hdmi_platform_config *) 552 hdmi_cfg = (struct hdmi_platform_config *)
447 of_device_get_match_data(dev); 553 of_device_get_match_data(dev);
@@ -468,6 +574,12 @@ static int msm_hdmi_bind(struct device *dev, struct device *master, void *data)
468 return PTR_ERR(hdmi); 574 return PTR_ERR(hdmi);
469 priv->hdmi = hdmi; 575 priv->hdmi = hdmi;
470 576
577 err = msm_hdmi_register_audio_driver(hdmi, dev);
578 if (err) {
579 DRM_ERROR("Failed to attach an audio codec %d\n", err);
580 hdmi->audio_pdev = NULL;
581 }
582
471 return 0; 583 return 0;
472} 584}
473 585
@@ -477,6 +589,9 @@ static void msm_hdmi_unbind(struct device *dev, struct device *master,
477 struct drm_device *drm = dev_get_drvdata(master); 589 struct drm_device *drm = dev_get_drvdata(master);
478 struct msm_drm_private *priv = drm->dev_private; 590 struct msm_drm_private *priv = drm->dev_private;
479 if (priv->hdmi) { 591 if (priv->hdmi) {
592 if (priv->hdmi->audio_pdev)
593 platform_device_unregister(priv->hdmi->audio_pdev);
594
480 msm_hdmi_destroy(priv->hdmi); 595 msm_hdmi_destroy(priv->hdmi);
481 priv->hdmi = NULL; 596 priv->hdmi = NULL;
482 } 597 }
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.h b/drivers/gpu/drm/msm/hdmi/hdmi.h
index bc7ba0bdee07..accc9a61611d 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.h
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.h
@@ -50,6 +50,7 @@ struct hdmi_hdcp_ctrl;
50struct hdmi { 50struct hdmi {
51 struct drm_device *dev; 51 struct drm_device *dev;
52 struct platform_device *pdev; 52 struct platform_device *pdev;
53 struct platform_device *audio_pdev;
53 54
54 const struct hdmi_platform_config *config; 55 const struct hdmi_platform_config *config;
55 56
@@ -210,6 +211,19 @@ static inline int msm_hdmi_pll_8996_init(struct platform_device *pdev)
210/* 211/*
211 * audio: 212 * audio:
212 */ 213 */
214/* Supported HDMI Audio channels and rates */
215#define MSM_HDMI_AUDIO_CHANNEL_2 0
216#define MSM_HDMI_AUDIO_CHANNEL_4 1
217#define MSM_HDMI_AUDIO_CHANNEL_6 2
218#define MSM_HDMI_AUDIO_CHANNEL_8 3
219
220#define HDMI_SAMPLE_RATE_32KHZ 0
221#define HDMI_SAMPLE_RATE_44_1KHZ 1
222#define HDMI_SAMPLE_RATE_48KHZ 2
223#define HDMI_SAMPLE_RATE_88_2KHZ 3
224#define HDMI_SAMPLE_RATE_96KHZ 4
225#define HDMI_SAMPLE_RATE_176_4KHZ 5
226#define HDMI_SAMPLE_RATE_192KHZ 6
213 227
214int msm_hdmi_audio_update(struct hdmi *hdmi); 228int msm_hdmi_audio_update(struct hdmi *hdmi);
215int msm_hdmi_audio_info_setup(struct hdmi *hdmi, bool enabled, 229int msm_hdmi_audio_info_setup(struct hdmi *hdmi, bool enabled,
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
index b15d72683112..a2515b466ce5 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
@@ -406,13 +406,6 @@ static int msm_hdmi_connector_mode_valid(struct drm_connector *connector,
406 return 0; 406 return 0;
407} 407}
408 408
409static struct drm_encoder *
410msm_hdmi_connector_best_encoder(struct drm_connector *connector)
411{
412 struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector);
413 return hdmi_connector->hdmi->encoder;
414}
415
416static const struct drm_connector_funcs hdmi_connector_funcs = { 409static const struct drm_connector_funcs hdmi_connector_funcs = {
417 .dpms = drm_atomic_helper_connector_dpms, 410 .dpms = drm_atomic_helper_connector_dpms,
418 .detect = hdmi_connector_detect, 411 .detect = hdmi_connector_detect,
@@ -426,7 +419,6 @@ static const struct drm_connector_funcs hdmi_connector_funcs = {
426static const struct drm_connector_helper_funcs msm_hdmi_connector_helper_funcs = { 419static const struct drm_connector_helper_funcs msm_hdmi_connector_helper_funcs = {
427 .get_modes = msm_hdmi_connector_get_modes, 420 .get_modes = msm_hdmi_connector_get_modes,
428 .mode_valid = msm_hdmi_connector_mode_valid, 421 .mode_valid = msm_hdmi_connector_mode_valid,
429 .best_encoder = msm_hdmi_connector_best_encoder,
430}; 422};
431 423
432/* initialize connector */ 424/* initialize connector */
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c b/drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c
index 0baaaaabd002..6e767979aab3 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c
@@ -1430,7 +1430,7 @@ struct hdmi_hdcp_ctrl *msm_hdmi_hdcp_init(struct hdmi *hdmi)
1430 1430
1431void msm_hdmi_hdcp_destroy(struct hdmi *hdmi) 1431void msm_hdmi_hdcp_destroy(struct hdmi *hdmi)
1432{ 1432{
1433 if (hdmi && hdmi->hdcp_ctrl) { 1433 if (hdmi) {
1434 kfree(hdmi->hdcp_ctrl); 1434 kfree(hdmi->hdcp_ctrl);
1435 hdmi->hdcp_ctrl = NULL; 1435 hdmi->hdcp_ctrl = NULL;
1436 } 1436 }
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c
index 35ad78a1dc1c..24258e3025e3 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c
@@ -23,7 +23,6 @@
23 23
24struct mdp4_dtv_encoder { 24struct mdp4_dtv_encoder {
25 struct drm_encoder base; 25 struct drm_encoder base;
26 struct clk *src_clk;
27 struct clk *hdmi_clk; 26 struct clk *hdmi_clk;
28 struct clk *mdp_clk; 27 struct clk *mdp_clk;
29 unsigned long int pixclock; 28 unsigned long int pixclock;
@@ -179,7 +178,6 @@ static void mdp4_dtv_encoder_disable(struct drm_encoder *encoder)
179 */ 178 */
180 mdp_irq_wait(&mdp4_kms->base, MDP4_IRQ_EXTERNAL_VSYNC); 179 mdp_irq_wait(&mdp4_kms->base, MDP4_IRQ_EXTERNAL_VSYNC);
181 180
182 clk_disable_unprepare(mdp4_dtv_encoder->src_clk);
183 clk_disable_unprepare(mdp4_dtv_encoder->hdmi_clk); 181 clk_disable_unprepare(mdp4_dtv_encoder->hdmi_clk);
184 clk_disable_unprepare(mdp4_dtv_encoder->mdp_clk); 182 clk_disable_unprepare(mdp4_dtv_encoder->mdp_clk);
185 183
@@ -208,19 +206,21 @@ static void mdp4_dtv_encoder_enable(struct drm_encoder *encoder)
208 206
209 bs_set(mdp4_dtv_encoder, 1); 207 bs_set(mdp4_dtv_encoder, 1);
210 208
211 DBG("setting src_clk=%lu", pc); 209 DBG("setting mdp_clk=%lu", pc);
212 210
213 ret = clk_set_rate(mdp4_dtv_encoder->src_clk, pc); 211 ret = clk_set_rate(mdp4_dtv_encoder->mdp_clk, pc);
214 if (ret) 212 if (ret)
215 dev_err(dev->dev, "failed to set src_clk to %lu: %d\n", pc, ret); 213 dev_err(dev->dev, "failed to set mdp_clk to %lu: %d\n",
216 clk_prepare_enable(mdp4_dtv_encoder->src_clk); 214 pc, ret);
217 ret = clk_prepare_enable(mdp4_dtv_encoder->hdmi_clk); 215
218 if (ret)
219 dev_err(dev->dev, "failed to enable hdmi_clk: %d\n", ret);
220 ret = clk_prepare_enable(mdp4_dtv_encoder->mdp_clk); 216 ret = clk_prepare_enable(mdp4_dtv_encoder->mdp_clk);
221 if (ret) 217 if (ret)
222 dev_err(dev->dev, "failed to enabled mdp_clk: %d\n", ret); 218 dev_err(dev->dev, "failed to enabled mdp_clk: %d\n", ret);
223 219
220 ret = clk_prepare_enable(mdp4_dtv_encoder->hdmi_clk);
221 if (ret)
222 dev_err(dev->dev, "failed to enable hdmi_clk: %d\n", ret);
223
224 mdp4_write(mdp4_kms, REG_MDP4_DTV_ENABLE, 1); 224 mdp4_write(mdp4_kms, REG_MDP4_DTV_ENABLE, 1);
225 225
226 mdp4_dtv_encoder->enabled = true; 226 mdp4_dtv_encoder->enabled = true;
@@ -235,7 +235,7 @@ static const struct drm_encoder_helper_funcs mdp4_dtv_encoder_helper_funcs = {
235long mdp4_dtv_round_pixclk(struct drm_encoder *encoder, unsigned long rate) 235long mdp4_dtv_round_pixclk(struct drm_encoder *encoder, unsigned long rate)
236{ 236{
237 struct mdp4_dtv_encoder *mdp4_dtv_encoder = to_mdp4_dtv_encoder(encoder); 237 struct mdp4_dtv_encoder *mdp4_dtv_encoder = to_mdp4_dtv_encoder(encoder);
238 return clk_round_rate(mdp4_dtv_encoder->src_clk, rate); 238 return clk_round_rate(mdp4_dtv_encoder->mdp_clk, rate);
239} 239}
240 240
241/* initialize encoder */ 241/* initialize encoder */
@@ -257,13 +257,6 @@ struct drm_encoder *mdp4_dtv_encoder_init(struct drm_device *dev)
257 DRM_MODE_ENCODER_TMDS, NULL); 257 DRM_MODE_ENCODER_TMDS, NULL);
258 drm_encoder_helper_add(encoder, &mdp4_dtv_encoder_helper_funcs); 258 drm_encoder_helper_add(encoder, &mdp4_dtv_encoder_helper_funcs);
259 259
260 mdp4_dtv_encoder->src_clk = devm_clk_get(dev->dev, "src_clk");
261 if (IS_ERR(mdp4_dtv_encoder->src_clk)) {
262 dev_err(dev->dev, "failed to get src_clk\n");
263 ret = PTR_ERR(mdp4_dtv_encoder->src_clk);
264 goto fail;
265 }
266
267 mdp4_dtv_encoder->hdmi_clk = devm_clk_get(dev->dev, "hdmi_clk"); 260 mdp4_dtv_encoder->hdmi_clk = devm_clk_get(dev->dev, "hdmi_clk");
268 if (IS_ERR(mdp4_dtv_encoder->hdmi_clk)) { 261 if (IS_ERR(mdp4_dtv_encoder->hdmi_clk)) {
269 dev_err(dev->dev, "failed to get hdmi_clk\n"); 262 dev_err(dev->dev, "failed to get hdmi_clk\n");
@@ -271,9 +264,9 @@ struct drm_encoder *mdp4_dtv_encoder_init(struct drm_device *dev)
271 goto fail; 264 goto fail;
272 } 265 }
273 266
274 mdp4_dtv_encoder->mdp_clk = devm_clk_get(dev->dev, "mdp_clk"); 267 mdp4_dtv_encoder->mdp_clk = devm_clk_get(dev->dev, "tv_clk");
275 if (IS_ERR(mdp4_dtv_encoder->mdp_clk)) { 268 if (IS_ERR(mdp4_dtv_encoder->mdp_clk)) {
276 dev_err(dev->dev, "failed to get mdp_clk\n"); 269 dev_err(dev->dev, "failed to get tv_clk\n");
277 ret = PTR_ERR(mdp4_dtv_encoder->mdp_clk); 270 ret = PTR_ERR(mdp4_dtv_encoder->mdp_clk);
278 goto fail; 271 goto fail;
279 } 272 }
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
index 67442d50a6c2..7b39e89fbc2b 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
@@ -106,31 +106,27 @@ out:
106static void mdp4_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *state) 106static void mdp4_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *state)
107{ 107{
108 struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms)); 108 struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
109 int i, ncrtcs = state->dev->mode_config.num_crtc; 109 int i;
110 struct drm_crtc *crtc;
111 struct drm_crtc_state *crtc_state;
110 112
111 mdp4_enable(mdp4_kms); 113 mdp4_enable(mdp4_kms);
112 114
113 /* see 119ecb7fd */ 115 /* see 119ecb7fd */
114 for (i = 0; i < ncrtcs; i++) { 116 for_each_crtc_in_state(state, crtc, crtc_state, i)
115 struct drm_crtc *crtc = state->crtcs[i];
116 if (!crtc)
117 continue;
118 drm_crtc_vblank_get(crtc); 117 drm_crtc_vblank_get(crtc);
119 }
120} 118}
121 119
122static void mdp4_complete_commit(struct msm_kms *kms, struct drm_atomic_state *state) 120static void mdp4_complete_commit(struct msm_kms *kms, struct drm_atomic_state *state)
123{ 121{
124 struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms)); 122 struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
125 int i, ncrtcs = state->dev->mode_config.num_crtc; 123 int i;
124 struct drm_crtc *crtc;
125 struct drm_crtc_state *crtc_state;
126 126
127 /* see 119ecb7fd */ 127 /* see 119ecb7fd */
128 for (i = 0; i < ncrtcs; i++) { 128 for_each_crtc_in_state(state, crtc, crtc_state, i)
129 struct drm_crtc *crtc = state->crtcs[i];
130 if (!crtc)
131 continue;
132 drm_crtc_vblank_put(crtc); 129 drm_crtc_vblank_put(crtc);
133 }
134 130
135 mdp4_disable(mdp4_kms); 131 mdp4_disable(mdp4_kms);
136} 132}
@@ -162,6 +158,7 @@ static const char * const iommu_ports[] = {
162static void mdp4_destroy(struct msm_kms *kms) 158static void mdp4_destroy(struct msm_kms *kms)
163{ 159{
164 struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms)); 160 struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
161 struct device *dev = mdp4_kms->dev->dev;
165 struct msm_mmu *mmu = mdp4_kms->mmu; 162 struct msm_mmu *mmu = mdp4_kms->mmu;
166 163
167 if (mmu) { 164 if (mmu) {
@@ -171,8 +168,11 @@ static void mdp4_destroy(struct msm_kms *kms)
171 168
172 if (mdp4_kms->blank_cursor_iova) 169 if (mdp4_kms->blank_cursor_iova)
173 msm_gem_put_iova(mdp4_kms->blank_cursor_bo, mdp4_kms->id); 170 msm_gem_put_iova(mdp4_kms->blank_cursor_bo, mdp4_kms->id);
174 if (mdp4_kms->blank_cursor_bo) 171 drm_gem_object_unreference_unlocked(mdp4_kms->blank_cursor_bo);
175 drm_gem_object_unreference_unlocked(mdp4_kms->blank_cursor_bo); 172
173 if (mdp4_kms->rpm_enabled)
174 pm_runtime_disable(dev);
175
176 kfree(mdp4_kms); 176 kfree(mdp4_kms);
177} 177}
178 178
@@ -440,7 +440,7 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
440 struct mdp4_kms *mdp4_kms; 440 struct mdp4_kms *mdp4_kms;
441 struct msm_kms *kms = NULL; 441 struct msm_kms *kms = NULL;
442 struct msm_mmu *mmu; 442 struct msm_mmu *mmu;
443 int ret; 443 int irq, ret;
444 444
445 mdp4_kms = kzalloc(sizeof(*mdp4_kms), GFP_KERNEL); 445 mdp4_kms = kzalloc(sizeof(*mdp4_kms), GFP_KERNEL);
446 if (!mdp4_kms) { 446 if (!mdp4_kms) {
@@ -461,6 +461,15 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
461 goto fail; 461 goto fail;
462 } 462 }
463 463
464 irq = platform_get_irq(pdev, 0);
465 if (irq < 0) {
466 ret = irq;
467 dev_err(dev->dev, "failed to get irq: %d\n", ret);
468 goto fail;
469 }
470
471 kms->irq = irq;
472
464 /* NOTE: driver for this regulator still missing upstream.. use 473 /* NOTE: driver for this regulator still missing upstream.. use
465 * _get_exclusive() and ignore the error if it does not exist 474 * _get_exclusive() and ignore the error if it does not exist
466 * (and hope that the bootloader left it on for us) 475 * (and hope that the bootloader left it on for us)
@@ -496,7 +505,7 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
496 goto fail; 505 goto fail;
497 } 506 }
498 507
499 mdp4_kms->axi_clk = devm_clk_get(&pdev->dev, "mdp_axi_clk"); 508 mdp4_kms->axi_clk = devm_clk_get(&pdev->dev, "bus_clk");
500 if (IS_ERR(mdp4_kms->axi_clk)) { 509 if (IS_ERR(mdp4_kms->axi_clk)) {
501 dev_err(dev->dev, "failed to get axi_clk\n"); 510 dev_err(dev->dev, "failed to get axi_clk\n");
502 ret = PTR_ERR(mdp4_kms->axi_clk); 511 ret = PTR_ERR(mdp4_kms->axi_clk);
@@ -506,6 +515,9 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
506 clk_set_rate(mdp4_kms->clk, config->max_clk); 515 clk_set_rate(mdp4_kms->clk, config->max_clk);
507 clk_set_rate(mdp4_kms->lut_clk, config->max_clk); 516 clk_set_rate(mdp4_kms->lut_clk, config->max_clk);
508 517
518 pm_runtime_enable(dev->dev);
519 mdp4_kms->rpm_enabled = true;
520
509 /* make sure things are off before attaching iommu (bootloader could 521 /* make sure things are off before attaching iommu (bootloader could
510 * have left things on, in which case we'll start getting faults if 522 * have left things on, in which case we'll start getting faults if
511 * we don't disable): 523 * we don't disable):
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
index c5d045d5680d..25fb83997119 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
@@ -47,6 +47,8 @@ struct mdp4_kms {
47 47
48 struct mdp_irq error_handler; 48 struct mdp_irq error_handler;
49 49
50 bool rpm_enabled;
51
50 /* empty/blank cursor bo to use when cursor is "disabled" */ 52 /* empty/blank cursor bo to use when cursor is "disabled" */
51 struct drm_gem_object *blank_cursor_bo; 53 struct drm_gem_object *blank_cursor_bo;
52 uint32_t blank_cursor_iova; 54 uint32_t blank_cursor_iova;
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c
index 2648cd7631ef..353429b05733 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c
@@ -90,14 +90,6 @@ static int mdp4_lvds_connector_mode_valid(struct drm_connector *connector,
90 return MODE_OK; 90 return MODE_OK;
91} 91}
92 92
93static struct drm_encoder *
94mdp4_lvds_connector_best_encoder(struct drm_connector *connector)
95{
96 struct mdp4_lvds_connector *mdp4_lvds_connector =
97 to_mdp4_lvds_connector(connector);
98 return mdp4_lvds_connector->encoder;
99}
100
101static const struct drm_connector_funcs mdp4_lvds_connector_funcs = { 93static const struct drm_connector_funcs mdp4_lvds_connector_funcs = {
102 .dpms = drm_atomic_helper_connector_dpms, 94 .dpms = drm_atomic_helper_connector_dpms,
103 .detect = mdp4_lvds_connector_detect, 95 .detect = mdp4_lvds_connector_detect,
@@ -111,7 +103,6 @@ static const struct drm_connector_funcs mdp4_lvds_connector_funcs = {
111static const struct drm_connector_helper_funcs mdp4_lvds_connector_helper_funcs = { 103static const struct drm_connector_helper_funcs mdp4_lvds_connector_helper_funcs = {
112 .get_modes = mdp4_lvds_connector_get_modes, 104 .get_modes = mdp4_lvds_connector_get_modes,
113 .mode_valid = mdp4_lvds_connector_mode_valid, 105 .mode_valid = mdp4_lvds_connector_mode_valid,
114 .best_encoder = mdp4_lvds_connector_best_encoder,
115}; 106};
116 107
117/* initialize connector */ 108/* initialize connector */
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
index b275ce11b24b..ca6ca30650a0 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
@@ -8,19 +8,11 @@ http://github.com/freedreno/envytools/
8git clone https://github.com/freedreno/envytools.git 8git clone https://github.com/freedreno/envytools.git
9 9
10The rules-ng-ng source files this header was generated from are: 10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14) 11- /local/mnt/workspace/source_trees/envytools/rnndb/../rnndb/mdp/mdp5.xml ( 36965 bytes, from 2016-05-10 05:06:30)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21) 12- /local/mnt/workspace/source_trees/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-05-09 06:32:54)
13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14) 13- /local/mnt/workspace/source_trees/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2016-01-07 08:45:55)
14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2015-09-18 12:07:28) 14
15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37194 bytes, from 2015-09-18 12:07:28) 15Copyright (C) 2013-2016 by the following authors:
16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 27887 bytes, from 2015-10-22 16:34:52)
17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2015-10-22 16:35:02)
18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14)
19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07)
20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 41472 bytes, from 2016-01-22 18:18:18)
21- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-20 20:03:14)
22
23Copyright (C) 2013-2015 by the following authors:
24- Rob Clark <robdclark@gmail.com> (robclark) 16- Rob Clark <robdclark@gmail.com> (robclark)
25- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin) 17- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
26 18
@@ -198,118 +190,109 @@ static inline uint32_t MDSS_HW_VERSION_MAJOR(uint32_t val)
198#define MDSS_HW_INTR_STATUS_INTR_HDMI 0x00000100 190#define MDSS_HW_INTR_STATUS_INTR_HDMI 0x00000100
199#define MDSS_HW_INTR_STATUS_INTR_EDP 0x00001000 191#define MDSS_HW_INTR_STATUS_INTR_EDP 0x00001000
200 192
201static inline uint32_t __offset_MDP(uint32_t idx) 193#define REG_MDP5_HW_VERSION 0x00000000
202{ 194#define MDP5_HW_VERSION_STEP__MASK 0x0000ffff
203 switch (idx) { 195#define MDP5_HW_VERSION_STEP__SHIFT 0
204 case 0: return (mdp5_cfg->mdp.base[0]); 196static inline uint32_t MDP5_HW_VERSION_STEP(uint32_t val)
205 default: return INVALID_IDX(idx);
206 }
207}
208static inline uint32_t REG_MDP5_MDP(uint32_t i0) { return 0x00000000 + __offset_MDP(i0); }
209
210static inline uint32_t REG_MDP5_MDP_HW_VERSION(uint32_t i0) { return 0x00000000 + __offset_MDP(i0); }
211#define MDP5_MDP_HW_VERSION_STEP__MASK 0x0000ffff
212#define MDP5_MDP_HW_VERSION_STEP__SHIFT 0
213static inline uint32_t MDP5_MDP_HW_VERSION_STEP(uint32_t val)
214{ 197{
215 return ((val) << MDP5_MDP_HW_VERSION_STEP__SHIFT) & MDP5_MDP_HW_VERSION_STEP__MASK; 198 return ((val) << MDP5_HW_VERSION_STEP__SHIFT) & MDP5_HW_VERSION_STEP__MASK;
216} 199}
217#define MDP5_MDP_HW_VERSION_MINOR__MASK 0x0fff0000 200#define MDP5_HW_VERSION_MINOR__MASK 0x0fff0000
218#define MDP5_MDP_HW_VERSION_MINOR__SHIFT 16 201#define MDP5_HW_VERSION_MINOR__SHIFT 16
219static inline uint32_t MDP5_MDP_HW_VERSION_MINOR(uint32_t val) 202static inline uint32_t MDP5_HW_VERSION_MINOR(uint32_t val)
220{ 203{
221 return ((val) << MDP5_MDP_HW_VERSION_MINOR__SHIFT) & MDP5_MDP_HW_VERSION_MINOR__MASK; 204 return ((val) << MDP5_HW_VERSION_MINOR__SHIFT) & MDP5_HW_VERSION_MINOR__MASK;
222} 205}
223#define MDP5_MDP_HW_VERSION_MAJOR__MASK 0xf0000000 206#define MDP5_HW_VERSION_MAJOR__MASK 0xf0000000
224#define MDP5_MDP_HW_VERSION_MAJOR__SHIFT 28 207#define MDP5_HW_VERSION_MAJOR__SHIFT 28
225static inline uint32_t MDP5_MDP_HW_VERSION_MAJOR(uint32_t val) 208static inline uint32_t MDP5_HW_VERSION_MAJOR(uint32_t val)
226{ 209{
227 return ((val) << MDP5_MDP_HW_VERSION_MAJOR__SHIFT) & MDP5_MDP_HW_VERSION_MAJOR__MASK; 210 return ((val) << MDP5_HW_VERSION_MAJOR__SHIFT) & MDP5_HW_VERSION_MAJOR__MASK;
228} 211}
229 212
230static inline uint32_t REG_MDP5_MDP_DISP_INTF_SEL(uint32_t i0) { return 0x00000004 + __offset_MDP(i0); } 213#define REG_MDP5_DISP_INTF_SEL 0x00000004
231#define MDP5_MDP_DISP_INTF_SEL_INTF0__MASK 0x000000ff 214#define MDP5_DISP_INTF_SEL_INTF0__MASK 0x000000ff
232#define MDP5_MDP_DISP_INTF_SEL_INTF0__SHIFT 0 215#define MDP5_DISP_INTF_SEL_INTF0__SHIFT 0
233static inline uint32_t MDP5_MDP_DISP_INTF_SEL_INTF0(enum mdp5_intf_type val) 216static inline uint32_t MDP5_DISP_INTF_SEL_INTF0(enum mdp5_intf_type val)
234{ 217{
235 return ((val) << MDP5_MDP_DISP_INTF_SEL_INTF0__SHIFT) & MDP5_MDP_DISP_INTF_SEL_INTF0__MASK; 218 return ((val) << MDP5_DISP_INTF_SEL_INTF0__SHIFT) & MDP5_DISP_INTF_SEL_INTF0__MASK;
236} 219}
237#define MDP5_MDP_DISP_INTF_SEL_INTF1__MASK 0x0000ff00 220#define MDP5_DISP_INTF_SEL_INTF1__MASK 0x0000ff00
238#define MDP5_MDP_DISP_INTF_SEL_INTF1__SHIFT 8 221#define MDP5_DISP_INTF_SEL_INTF1__SHIFT 8
239static inline uint32_t MDP5_MDP_DISP_INTF_SEL_INTF1(enum mdp5_intf_type val) 222static inline uint32_t MDP5_DISP_INTF_SEL_INTF1(enum mdp5_intf_type val)
240{ 223{
241 return ((val) << MDP5_MDP_DISP_INTF_SEL_INTF1__SHIFT) & MDP5_MDP_DISP_INTF_SEL_INTF1__MASK; 224 return ((val) << MDP5_DISP_INTF_SEL_INTF1__SHIFT) & MDP5_DISP_INTF_SEL_INTF1__MASK;
242} 225}
243#define MDP5_MDP_DISP_INTF_SEL_INTF2__MASK 0x00ff0000 226#define MDP5_DISP_INTF_SEL_INTF2__MASK 0x00ff0000
244#define MDP5_MDP_DISP_INTF_SEL_INTF2__SHIFT 16 227#define MDP5_DISP_INTF_SEL_INTF2__SHIFT 16
245static inline uint32_t MDP5_MDP_DISP_INTF_SEL_INTF2(enum mdp5_intf_type val) 228static inline uint32_t MDP5_DISP_INTF_SEL_INTF2(enum mdp5_intf_type val)
246{ 229{
247 return ((val) << MDP5_MDP_DISP_INTF_SEL_INTF2__SHIFT) & MDP5_MDP_DISP_INTF_SEL_INTF2__MASK; 230 return ((val) << MDP5_DISP_INTF_SEL_INTF2__SHIFT) & MDP5_DISP_INTF_SEL_INTF2__MASK;
248} 231}
249#define MDP5_MDP_DISP_INTF_SEL_INTF3__MASK 0xff000000 232#define MDP5_DISP_INTF_SEL_INTF3__MASK 0xff000000
250#define MDP5_MDP_DISP_INTF_SEL_INTF3__SHIFT 24 233#define MDP5_DISP_INTF_SEL_INTF3__SHIFT 24
251static inline uint32_t MDP5_MDP_DISP_INTF_SEL_INTF3(enum mdp5_intf_type val) 234static inline uint32_t MDP5_DISP_INTF_SEL_INTF3(enum mdp5_intf_type val)
252{ 235{
253 return ((val) << MDP5_MDP_DISP_INTF_SEL_INTF3__SHIFT) & MDP5_MDP_DISP_INTF_SEL_INTF3__MASK; 236 return ((val) << MDP5_DISP_INTF_SEL_INTF3__SHIFT) & MDP5_DISP_INTF_SEL_INTF3__MASK;
254} 237}
255 238
256static inline uint32_t REG_MDP5_MDP_INTR_EN(uint32_t i0) { return 0x00000010 + __offset_MDP(i0); } 239#define REG_MDP5_INTR_EN 0x00000010
257 240
258static inline uint32_t REG_MDP5_MDP_INTR_STATUS(uint32_t i0) { return 0x00000014 + __offset_MDP(i0); } 241#define REG_MDP5_INTR_STATUS 0x00000014
259 242
260static inline uint32_t REG_MDP5_MDP_INTR_CLEAR(uint32_t i0) { return 0x00000018 + __offset_MDP(i0); } 243#define REG_MDP5_INTR_CLEAR 0x00000018
261 244
262static inline uint32_t REG_MDP5_MDP_HIST_INTR_EN(uint32_t i0) { return 0x0000001c + __offset_MDP(i0); } 245#define REG_MDP5_HIST_INTR_EN 0x0000001c
263 246
264static inline uint32_t REG_MDP5_MDP_HIST_INTR_STATUS(uint32_t i0) { return 0x00000020 + __offset_MDP(i0); } 247#define REG_MDP5_HIST_INTR_STATUS 0x00000020
265 248
266static inline uint32_t REG_MDP5_MDP_HIST_INTR_CLEAR(uint32_t i0) { return 0x00000024 + __offset_MDP(i0); } 249#define REG_MDP5_HIST_INTR_CLEAR 0x00000024
267 250
268static inline uint32_t REG_MDP5_MDP_SPARE_0(uint32_t i0) { return 0x00000028 + __offset_MDP(i0); } 251#define REG_MDP5_SPARE_0 0x00000028
269#define MDP5_MDP_SPARE_0_SPLIT_DPL_SINGLE_FLUSH_EN 0x00000001 252#define MDP5_SPARE_0_SPLIT_DPL_SINGLE_FLUSH_EN 0x00000001
270 253
271static inline uint32_t REG_MDP5_MDP_SMP_ALLOC_W(uint32_t i0, uint32_t i1) { return 0x00000080 + __offset_MDP(i0) + 0x4*i1; } 254static inline uint32_t REG_MDP5_SMP_ALLOC_W(uint32_t i0) { return 0x00000080 + 0x4*i0; }
272 255
273static inline uint32_t REG_MDP5_MDP_SMP_ALLOC_W_REG(uint32_t i0, uint32_t i1) { return 0x00000080 + __offset_MDP(i0) + 0x4*i1; } 256static inline uint32_t REG_MDP5_SMP_ALLOC_W_REG(uint32_t i0) { return 0x00000080 + 0x4*i0; }
274#define MDP5_MDP_SMP_ALLOC_W_REG_CLIENT0__MASK 0x000000ff 257#define MDP5_SMP_ALLOC_W_REG_CLIENT0__MASK 0x000000ff
275#define MDP5_MDP_SMP_ALLOC_W_REG_CLIENT0__SHIFT 0 258#define MDP5_SMP_ALLOC_W_REG_CLIENT0__SHIFT 0
276static inline uint32_t MDP5_MDP_SMP_ALLOC_W_REG_CLIENT0(uint32_t val) 259static inline uint32_t MDP5_SMP_ALLOC_W_REG_CLIENT0(uint32_t val)
277{ 260{
278 return ((val) << MDP5_MDP_SMP_ALLOC_W_REG_CLIENT0__SHIFT) & MDP5_MDP_SMP_ALLOC_W_REG_CLIENT0__MASK; 261 return ((val) << MDP5_SMP_ALLOC_W_REG_CLIENT0__SHIFT) & MDP5_SMP_ALLOC_W_REG_CLIENT0__MASK;
279} 262}
280#define MDP5_MDP_SMP_ALLOC_W_REG_CLIENT1__MASK 0x0000ff00 263#define MDP5_SMP_ALLOC_W_REG_CLIENT1__MASK 0x0000ff00
281#define MDP5_MDP_SMP_ALLOC_W_REG_CLIENT1__SHIFT 8 264#define MDP5_SMP_ALLOC_W_REG_CLIENT1__SHIFT 8
282static inline uint32_t MDP5_MDP_SMP_ALLOC_W_REG_CLIENT1(uint32_t val) 265static inline uint32_t MDP5_SMP_ALLOC_W_REG_CLIENT1(uint32_t val)
283{ 266{
284 return ((val) << MDP5_MDP_SMP_ALLOC_W_REG_CLIENT1__SHIFT) & MDP5_MDP_SMP_ALLOC_W_REG_CLIENT1__MASK; 267 return ((val) << MDP5_SMP_ALLOC_W_REG_CLIENT1__SHIFT) & MDP5_SMP_ALLOC_W_REG_CLIENT1__MASK;
285} 268}
286#define MDP5_MDP_SMP_ALLOC_W_REG_CLIENT2__MASK 0x00ff0000 269#define MDP5_SMP_ALLOC_W_REG_CLIENT2__MASK 0x00ff0000
287#define MDP5_MDP_SMP_ALLOC_W_REG_CLIENT2__SHIFT 16 270#define MDP5_SMP_ALLOC_W_REG_CLIENT2__SHIFT 16
288static inline uint32_t MDP5_MDP_SMP_ALLOC_W_REG_CLIENT2(uint32_t val) 271static inline uint32_t MDP5_SMP_ALLOC_W_REG_CLIENT2(uint32_t val)
289{ 272{
290 return ((val) << MDP5_MDP_SMP_ALLOC_W_REG_CLIENT2__SHIFT) & MDP5_MDP_SMP_ALLOC_W_REG_CLIENT2__MASK; 273 return ((val) << MDP5_SMP_ALLOC_W_REG_CLIENT2__SHIFT) & MDP5_SMP_ALLOC_W_REG_CLIENT2__MASK;
291} 274}
292 275
293static inline uint32_t REG_MDP5_MDP_SMP_ALLOC_R(uint32_t i0, uint32_t i1) { return 0x00000130 + __offset_MDP(i0) + 0x4*i1; } 276static inline uint32_t REG_MDP5_SMP_ALLOC_R(uint32_t i0) { return 0x00000130 + 0x4*i0; }
294 277
295static inline uint32_t REG_MDP5_MDP_SMP_ALLOC_R_REG(uint32_t i0, uint32_t i1) { return 0x00000130 + __offset_MDP(i0) + 0x4*i1; } 278static inline uint32_t REG_MDP5_SMP_ALLOC_R_REG(uint32_t i0) { return 0x00000130 + 0x4*i0; }
296#define MDP5_MDP_SMP_ALLOC_R_REG_CLIENT0__MASK 0x000000ff 279#define MDP5_SMP_ALLOC_R_REG_CLIENT0__MASK 0x000000ff
297#define MDP5_MDP_SMP_ALLOC_R_REG_CLIENT0__SHIFT 0 280#define MDP5_SMP_ALLOC_R_REG_CLIENT0__SHIFT 0
298static inline uint32_t MDP5_MDP_SMP_ALLOC_R_REG_CLIENT0(uint32_t val) 281static inline uint32_t MDP5_SMP_ALLOC_R_REG_CLIENT0(uint32_t val)
299{ 282{
300 return ((val) << MDP5_MDP_SMP_ALLOC_R_REG_CLIENT0__SHIFT) & MDP5_MDP_SMP_ALLOC_R_REG_CLIENT0__MASK; 283 return ((val) << MDP5_SMP_ALLOC_R_REG_CLIENT0__SHIFT) & MDP5_SMP_ALLOC_R_REG_CLIENT0__MASK;
301} 284}
302#define MDP5_MDP_SMP_ALLOC_R_REG_CLIENT1__MASK 0x0000ff00 285#define MDP5_SMP_ALLOC_R_REG_CLIENT1__MASK 0x0000ff00
303#define MDP5_MDP_SMP_ALLOC_R_REG_CLIENT1__SHIFT 8 286#define MDP5_SMP_ALLOC_R_REG_CLIENT1__SHIFT 8
304static inline uint32_t MDP5_MDP_SMP_ALLOC_R_REG_CLIENT1(uint32_t val) 287static inline uint32_t MDP5_SMP_ALLOC_R_REG_CLIENT1(uint32_t val)
305{ 288{
306 return ((val) << MDP5_MDP_SMP_ALLOC_R_REG_CLIENT1__SHIFT) & MDP5_MDP_SMP_ALLOC_R_REG_CLIENT1__MASK; 289 return ((val) << MDP5_SMP_ALLOC_R_REG_CLIENT1__SHIFT) & MDP5_SMP_ALLOC_R_REG_CLIENT1__MASK;
307} 290}
308#define MDP5_MDP_SMP_ALLOC_R_REG_CLIENT2__MASK 0x00ff0000 291#define MDP5_SMP_ALLOC_R_REG_CLIENT2__MASK 0x00ff0000
309#define MDP5_MDP_SMP_ALLOC_R_REG_CLIENT2__SHIFT 16 292#define MDP5_SMP_ALLOC_R_REG_CLIENT2__SHIFT 16
310static inline uint32_t MDP5_MDP_SMP_ALLOC_R_REG_CLIENT2(uint32_t val) 293static inline uint32_t MDP5_SMP_ALLOC_R_REG_CLIENT2(uint32_t val)
311{ 294{
312 return ((val) << MDP5_MDP_SMP_ALLOC_R_REG_CLIENT2__SHIFT) & MDP5_MDP_SMP_ALLOC_R_REG_CLIENT2__MASK; 295 return ((val) << MDP5_SMP_ALLOC_R_REG_CLIENT2__SHIFT) & MDP5_SMP_ALLOC_R_REG_CLIENT2__MASK;
313} 296}
314 297
315static inline uint32_t __offset_IGC(enum mdp5_igc_type idx) 298static inline uint32_t __offset_IGC(enum mdp5_igc_type idx)
@@ -322,35 +305,35 @@ static inline uint32_t __offset_IGC(enum mdp5_igc_type idx)
322 default: return INVALID_IDX(idx); 305 default: return INVALID_IDX(idx);
323 } 306 }
324} 307}
325static inline uint32_t REG_MDP5_MDP_IGC(uint32_t i0, enum mdp5_igc_type i1) { return 0x00000000 + __offset_MDP(i0) + __offset_IGC(i1); } 308static inline uint32_t REG_MDP5_IGC(enum mdp5_igc_type i0) { return 0x00000000 + __offset_IGC(i0); }
326 309
327static inline uint32_t REG_MDP5_MDP_IGC_LUT(uint32_t i0, enum mdp5_igc_type i1, uint32_t i2) { return 0x00000000 + __offset_MDP(i0) + __offset_IGC(i1) + 0x4*i2; } 310static inline uint32_t REG_MDP5_IGC_LUT(enum mdp5_igc_type i0, uint32_t i1) { return 0x00000000 + __offset_IGC(i0) + 0x4*i1; }
328 311
329static inline uint32_t REG_MDP5_MDP_IGC_LUT_REG(uint32_t i0, enum mdp5_igc_type i1, uint32_t i2) { return 0x00000000 + __offset_MDP(i0) + __offset_IGC(i1) + 0x4*i2; } 312static inline uint32_t REG_MDP5_IGC_LUT_REG(enum mdp5_igc_type i0, uint32_t i1) { return 0x00000000 + __offset_IGC(i0) + 0x4*i1; }
330#define MDP5_MDP_IGC_LUT_REG_VAL__MASK 0x00000fff 313#define MDP5_IGC_LUT_REG_VAL__MASK 0x00000fff
331#define MDP5_MDP_IGC_LUT_REG_VAL__SHIFT 0 314#define MDP5_IGC_LUT_REG_VAL__SHIFT 0
332static inline uint32_t MDP5_MDP_IGC_LUT_REG_VAL(uint32_t val) 315static inline uint32_t MDP5_IGC_LUT_REG_VAL(uint32_t val)
333{ 316{
334 return ((val) << MDP5_MDP_IGC_LUT_REG_VAL__SHIFT) & MDP5_MDP_IGC_LUT_REG_VAL__MASK; 317 return ((val) << MDP5_IGC_LUT_REG_VAL__SHIFT) & MDP5_IGC_LUT_REG_VAL__MASK;
335} 318}
336#define MDP5_MDP_IGC_LUT_REG_INDEX_UPDATE 0x02000000 319#define MDP5_IGC_LUT_REG_INDEX_UPDATE 0x02000000
337#define MDP5_MDP_IGC_LUT_REG_DISABLE_PIPE_0 0x10000000 320#define MDP5_IGC_LUT_REG_DISABLE_PIPE_0 0x10000000
338#define MDP5_MDP_IGC_LUT_REG_DISABLE_PIPE_1 0x20000000 321#define MDP5_IGC_LUT_REG_DISABLE_PIPE_1 0x20000000
339#define MDP5_MDP_IGC_LUT_REG_DISABLE_PIPE_2 0x40000000 322#define MDP5_IGC_LUT_REG_DISABLE_PIPE_2 0x40000000
340 323
341static inline uint32_t REG_MDP5_MDP_SPLIT_DPL_EN(uint32_t i0) { return 0x000002f4 + __offset_MDP(i0); } 324#define REG_MDP5_SPLIT_DPL_EN 0x000002f4
342 325
343static inline uint32_t REG_MDP5_MDP_SPLIT_DPL_UPPER(uint32_t i0) { return 0x000002f8 + __offset_MDP(i0); } 326#define REG_MDP5_SPLIT_DPL_UPPER 0x000002f8
344#define MDP5_MDP_SPLIT_DPL_UPPER_SMART_PANEL 0x00000002 327#define MDP5_SPLIT_DPL_UPPER_SMART_PANEL 0x00000002
345#define MDP5_MDP_SPLIT_DPL_UPPER_SMART_PANEL_FREE_RUN 0x00000004 328#define MDP5_SPLIT_DPL_UPPER_SMART_PANEL_FREE_RUN 0x00000004
346#define MDP5_MDP_SPLIT_DPL_UPPER_INTF1_SW_TRG_MUX 0x00000010 329#define MDP5_SPLIT_DPL_UPPER_INTF1_SW_TRG_MUX 0x00000010
347#define MDP5_MDP_SPLIT_DPL_UPPER_INTF2_SW_TRG_MUX 0x00000100 330#define MDP5_SPLIT_DPL_UPPER_INTF2_SW_TRG_MUX 0x00000100
348 331
349static inline uint32_t REG_MDP5_MDP_SPLIT_DPL_LOWER(uint32_t i0) { return 0x000003f0 + __offset_MDP(i0); } 332#define REG_MDP5_SPLIT_DPL_LOWER 0x000003f0
350#define MDP5_MDP_SPLIT_DPL_LOWER_SMART_PANEL 0x00000002 333#define MDP5_SPLIT_DPL_LOWER_SMART_PANEL 0x00000002
351#define MDP5_MDP_SPLIT_DPL_LOWER_SMART_PANEL_FREE_RUN 0x00000004 334#define MDP5_SPLIT_DPL_LOWER_SMART_PANEL_FREE_RUN 0x00000004
352#define MDP5_MDP_SPLIT_DPL_LOWER_INTF1_TG_SYNC 0x00000010 335#define MDP5_SPLIT_DPL_LOWER_INTF1_TG_SYNC 0x00000010
353#define MDP5_MDP_SPLIT_DPL_LOWER_INTF2_TG_SYNC 0x00000100 336#define MDP5_SPLIT_DPL_LOWER_INTF2_TG_SYNC 0x00000100
354 337
355static inline uint32_t __offset_CTL(uint32_t idx) 338static inline uint32_t __offset_CTL(uint32_t idx)
356{ 339{
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
index 57f73f0c120d..ac9e4cde1380 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
@@ -26,7 +26,6 @@ const struct mdp5_cfg_hw msm8x74v1_config = {
26 .name = "msm8x74v1", 26 .name = "msm8x74v1",
27 .mdp = { 27 .mdp = {
28 .count = 1, 28 .count = 1,
29 .base = { 0x00100 },
30 .caps = MDP_CAP_SMP | 29 .caps = MDP_CAP_SMP |
31 0, 30 0,
32 }, 31 },
@@ -41,12 +40,12 @@ const struct mdp5_cfg_hw msm8x74v1_config = {
41 }, 40 },
42 .ctl = { 41 .ctl = {
43 .count = 5, 42 .count = 5,
44 .base = { 0x00600, 0x00700, 0x00800, 0x00900, 0x00a00 }, 43 .base = { 0x00500, 0x00600, 0x00700, 0x00800, 0x00900 },
45 .flush_hw_mask = 0x0003ffff, 44 .flush_hw_mask = 0x0003ffff,
46 }, 45 },
47 .pipe_vig = { 46 .pipe_vig = {
48 .count = 3, 47 .count = 3,
49 .base = { 0x01200, 0x01600, 0x01a00 }, 48 .base = { 0x01100, 0x01500, 0x01900 },
50 .caps = MDP_PIPE_CAP_HFLIP | 49 .caps = MDP_PIPE_CAP_HFLIP |
51 MDP_PIPE_CAP_VFLIP | 50 MDP_PIPE_CAP_VFLIP |
52 MDP_PIPE_CAP_SCALE | 51 MDP_PIPE_CAP_SCALE |
@@ -55,7 +54,7 @@ const struct mdp5_cfg_hw msm8x74v1_config = {
55 }, 54 },
56 .pipe_rgb = { 55 .pipe_rgb = {
57 .count = 3, 56 .count = 3,
58 .base = { 0x01e00, 0x02200, 0x02600 }, 57 .base = { 0x01d00, 0x02100, 0x02500 },
59 .caps = MDP_PIPE_CAP_HFLIP | 58 .caps = MDP_PIPE_CAP_HFLIP |
60 MDP_PIPE_CAP_VFLIP | 59 MDP_PIPE_CAP_VFLIP |
61 MDP_PIPE_CAP_SCALE | 60 MDP_PIPE_CAP_SCALE |
@@ -63,26 +62,26 @@ const struct mdp5_cfg_hw msm8x74v1_config = {
63 }, 62 },
64 .pipe_dma = { 63 .pipe_dma = {
65 .count = 2, 64 .count = 2,
66 .base = { 0x02a00, 0x02e00 }, 65 .base = { 0x02900, 0x02d00 },
67 .caps = MDP_PIPE_CAP_HFLIP | 66 .caps = MDP_PIPE_CAP_HFLIP |
68 MDP_PIPE_CAP_VFLIP | 67 MDP_PIPE_CAP_VFLIP |
69 0, 68 0,
70 }, 69 },
71 .lm = { 70 .lm = {
72 .count = 5, 71 .count = 5,
73 .base = { 0x03200, 0x03600, 0x03a00, 0x03e00, 0x04200 }, 72 .base = { 0x03100, 0x03500, 0x03900, 0x03d00, 0x04100 },
74 .nb_stages = 5, 73 .nb_stages = 5,
75 }, 74 },
76 .dspp = { 75 .dspp = {
77 .count = 3, 76 .count = 3,
78 .base = { 0x04600, 0x04a00, 0x04e00 }, 77 .base = { 0x04500, 0x04900, 0x04d00 },
79 }, 78 },
80 .pp = { 79 .pp = {
81 .count = 3, 80 .count = 3,
82 .base = { 0x21b00, 0x21c00, 0x21d00 }, 81 .base = { 0x21a00, 0x21b00, 0x21c00 },
83 }, 82 },
84 .intf = { 83 .intf = {
85 .base = { 0x21100, 0x21300, 0x21500, 0x21700 }, 84 .base = { 0x21000, 0x21200, 0x21400, 0x21600 },
86 .connect = { 85 .connect = {
87 [0] = INTF_eDP, 86 [0] = INTF_eDP,
88 [1] = INTF_DSI, 87 [1] = INTF_DSI,
@@ -97,7 +96,6 @@ const struct mdp5_cfg_hw msm8x74v2_config = {
97 .name = "msm8x74", 96 .name = "msm8x74",
98 .mdp = { 97 .mdp = {
99 .count = 1, 98 .count = 1,
100 .base = { 0x00100 },
101 .caps = MDP_CAP_SMP | 99 .caps = MDP_CAP_SMP |
102 0, 100 0,
103 }, 101 },
@@ -112,48 +110,48 @@ const struct mdp5_cfg_hw msm8x74v2_config = {
112 }, 110 },
113 .ctl = { 111 .ctl = {
114 .count = 5, 112 .count = 5,
115 .base = { 0x00600, 0x00700, 0x00800, 0x00900, 0x00a00 }, 113 .base = { 0x00500, 0x00600, 0x00700, 0x00800, 0x00900 },
116 .flush_hw_mask = 0x0003ffff, 114 .flush_hw_mask = 0x0003ffff,
117 }, 115 },
118 .pipe_vig = { 116 .pipe_vig = {
119 .count = 3, 117 .count = 3,
120 .base = { 0x01200, 0x01600, 0x01a00 }, 118 .base = { 0x01100, 0x01500, 0x01900 },
121 .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP | 119 .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
122 MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC | 120 MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC |
123 MDP_PIPE_CAP_DECIMATION, 121 MDP_PIPE_CAP_DECIMATION,
124 }, 122 },
125 .pipe_rgb = { 123 .pipe_rgb = {
126 .count = 3, 124 .count = 3,
127 .base = { 0x01e00, 0x02200, 0x02600 }, 125 .base = { 0x01d00, 0x02100, 0x02500 },
128 .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP | 126 .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
129 MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_DECIMATION, 127 MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_DECIMATION,
130 }, 128 },
131 .pipe_dma = { 129 .pipe_dma = {
132 .count = 2, 130 .count = 2,
133 .base = { 0x02a00, 0x02e00 }, 131 .base = { 0x02900, 0x02d00 },
134 .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP, 132 .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP,
135 }, 133 },
136 .lm = { 134 .lm = {
137 .count = 5, 135 .count = 5,
138 .base = { 0x03200, 0x03600, 0x03a00, 0x03e00, 0x04200 }, 136 .base = { 0x03100, 0x03500, 0x03900, 0x03d00, 0x04100 },
139 .nb_stages = 5, 137 .nb_stages = 5,
140 .max_width = 2048, 138 .max_width = 2048,
141 .max_height = 0xFFFF, 139 .max_height = 0xFFFF,
142 }, 140 },
143 .dspp = { 141 .dspp = {
144 .count = 3, 142 .count = 3,
145 .base = { 0x04600, 0x04a00, 0x04e00 }, 143 .base = { 0x04500, 0x04900, 0x04d00 },
146 }, 144 },
147 .ad = { 145 .ad = {
148 .count = 2, 146 .count = 2,
149 .base = { 0x13100, 0x13300 }, 147 .base = { 0x13000, 0x13200 },
150 }, 148 },
151 .pp = { 149 .pp = {
152 .count = 3, 150 .count = 3,
153 .base = { 0x12d00, 0x12e00, 0x12f00 }, 151 .base = { 0x12c00, 0x12d00, 0x12e00 },
154 }, 152 },
155 .intf = { 153 .intf = {
156 .base = { 0x12500, 0x12700, 0x12900, 0x12b00 }, 154 .base = { 0x12400, 0x12600, 0x12800, 0x12a00 },
157 .connect = { 155 .connect = {
158 [0] = INTF_eDP, 156 [0] = INTF_eDP,
159 [1] = INTF_DSI, 157 [1] = INTF_DSI,
@@ -168,7 +166,6 @@ const struct mdp5_cfg_hw apq8084_config = {
168 .name = "apq8084", 166 .name = "apq8084",
169 .mdp = { 167 .mdp = {
170 .count = 1, 168 .count = 1,
171 .base = { 0x00100 },
172 .caps = MDP_CAP_SMP | 169 .caps = MDP_CAP_SMP |
173 0, 170 0,
174 }, 171 },
@@ -190,49 +187,49 @@ const struct mdp5_cfg_hw apq8084_config = {
190 }, 187 },
191 .ctl = { 188 .ctl = {
192 .count = 5, 189 .count = 5,
193 .base = { 0x00600, 0x00700, 0x00800, 0x00900, 0x00a00 }, 190 .base = { 0x00500, 0x00600, 0x00700, 0x00800, 0x00900 },
194 .flush_hw_mask = 0x003fffff, 191 .flush_hw_mask = 0x003fffff,
195 }, 192 },
196 .pipe_vig = { 193 .pipe_vig = {
197 .count = 4, 194 .count = 4,
198 .base = { 0x01200, 0x01600, 0x01a00, 0x01e00 }, 195 .base = { 0x01100, 0x01500, 0x01900, 0x01d00 },
199 .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP | 196 .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
200 MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC | 197 MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC |
201 MDP_PIPE_CAP_DECIMATION, 198 MDP_PIPE_CAP_DECIMATION,
202 }, 199 },
203 .pipe_rgb = { 200 .pipe_rgb = {
204 .count = 4, 201 .count = 4,
205 .base = { 0x02200, 0x02600, 0x02a00, 0x02e00 }, 202 .base = { 0x02100, 0x02500, 0x02900, 0x02d00 },
206 .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP | 203 .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
207 MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_DECIMATION, 204 MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_DECIMATION,
208 }, 205 },
209 .pipe_dma = { 206 .pipe_dma = {
210 .count = 2, 207 .count = 2,
211 .base = { 0x03200, 0x03600 }, 208 .base = { 0x03100, 0x03500 },
212 .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP, 209 .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP,
213 }, 210 },
214 .lm = { 211 .lm = {
215 .count = 6, 212 .count = 6,
216 .base = { 0x03a00, 0x03e00, 0x04200, 0x04600, 0x04a00, 0x04e00 }, 213 .base = { 0x03900, 0x03d00, 0x04100, 0x04500, 0x04900, 0x04d00 },
217 .nb_stages = 5, 214 .nb_stages = 5,
218 .max_width = 2048, 215 .max_width = 2048,
219 .max_height = 0xFFFF, 216 .max_height = 0xFFFF,
220 }, 217 },
221 .dspp = { 218 .dspp = {
222 .count = 4, 219 .count = 4,
223 .base = { 0x05200, 0x05600, 0x05a00, 0x05e00 }, 220 .base = { 0x05100, 0x05500, 0x05900, 0x05d00 },
224 221
225 }, 222 },
226 .ad = { 223 .ad = {
227 .count = 3, 224 .count = 3,
228 .base = { 0x13500, 0x13700, 0x13900 }, 225 .base = { 0x13400, 0x13600, 0x13800 },
229 }, 226 },
230 .pp = { 227 .pp = {
231 .count = 4, 228 .count = 4,
232 .base = { 0x12f00, 0x13000, 0x13100, 0x13200 }, 229 .base = { 0x12e00, 0x12f00, 0x13000, 0x13100 },
233 }, 230 },
234 .intf = { 231 .intf = {
235 .base = { 0x12500, 0x12700, 0x12900, 0x12b00, 0x12d00 }, 232 .base = { 0x12400, 0x12600, 0x12800, 0x12a00, 0x12c00 },
236 .connect = { 233 .connect = {
237 [0] = INTF_eDP, 234 [0] = INTF_eDP,
238 [1] = INTF_DSI, 235 [1] = INTF_DSI,
@@ -247,7 +244,7 @@ const struct mdp5_cfg_hw msm8x16_config = {
247 .name = "msm8x16", 244 .name = "msm8x16",
248 .mdp = { 245 .mdp = {
249 .count = 1, 246 .count = 1,
250 .base = { 0x01000 }, 247 .base = { 0x0 },
251 .caps = MDP_CAP_SMP | 248 .caps = MDP_CAP_SMP |
252 0, 249 0,
253 }, 250 },
@@ -261,41 +258,41 @@ const struct mdp5_cfg_hw msm8x16_config = {
261 }, 258 },
262 .ctl = { 259 .ctl = {
263 .count = 5, 260 .count = 5,
264 .base = { 0x02000, 0x02200, 0x02400, 0x02600, 0x02800 }, 261 .base = { 0x01000, 0x01200, 0x01400, 0x01600, 0x01800 },
265 .flush_hw_mask = 0x4003ffff, 262 .flush_hw_mask = 0x4003ffff,
266 }, 263 },
267 .pipe_vig = { 264 .pipe_vig = {
268 .count = 1, 265 .count = 1,
269 .base = { 0x05000 }, 266 .base = { 0x04000 },
270 .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP | 267 .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
271 MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC | 268 MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC |
272 MDP_PIPE_CAP_DECIMATION, 269 MDP_PIPE_CAP_DECIMATION,
273 }, 270 },
274 .pipe_rgb = { 271 .pipe_rgb = {
275 .count = 2, 272 .count = 2,
276 .base = { 0x15000, 0x17000 }, 273 .base = { 0x14000, 0x16000 },
277 .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP | 274 .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
278 MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_DECIMATION, 275 MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_DECIMATION,
279 }, 276 },
280 .pipe_dma = { 277 .pipe_dma = {
281 .count = 1, 278 .count = 1,
282 .base = { 0x25000 }, 279 .base = { 0x24000 },
283 .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP, 280 .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP,
284 }, 281 },
285 .lm = { 282 .lm = {
286 .count = 2, /* LM0 and LM3 */ 283 .count = 2, /* LM0 and LM3 */
287 .base = { 0x45000, 0x48000 }, 284 .base = { 0x44000, 0x47000 },
288 .nb_stages = 5, 285 .nb_stages = 5,
289 .max_width = 2048, 286 .max_width = 2048,
290 .max_height = 0xFFFF, 287 .max_height = 0xFFFF,
291 }, 288 },
292 .dspp = { 289 .dspp = {
293 .count = 1, 290 .count = 1,
294 .base = { 0x55000 }, 291 .base = { 0x54000 },
295 292
296 }, 293 },
297 .intf = { 294 .intf = {
298 .base = { 0x00000, 0x6b800 }, 295 .base = { 0x00000, 0x6a800 },
299 .connect = { 296 .connect = {
300 [0] = INTF_DISABLED, 297 [0] = INTF_DISABLED,
301 [1] = INTF_DSI, 298 [1] = INTF_DSI,
@@ -308,7 +305,6 @@ const struct mdp5_cfg_hw msm8x94_config = {
308 .name = "msm8x94", 305 .name = "msm8x94",
309 .mdp = { 306 .mdp = {
310 .count = 1, 307 .count = 1,
311 .base = { 0x01000 },
312 .caps = MDP_CAP_SMP | 308 .caps = MDP_CAP_SMP |
313 0, 309 0,
314 }, 310 },
@@ -330,49 +326,49 @@ const struct mdp5_cfg_hw msm8x94_config = {
330 }, 326 },
331 .ctl = { 327 .ctl = {
332 .count = 5, 328 .count = 5,
333 .base = { 0x02000, 0x02200, 0x02400, 0x02600, 0x02800 }, 329 .base = { 0x01000, 0x01200, 0x01400, 0x01600, 0x01800 },
334 .flush_hw_mask = 0xf0ffffff, 330 .flush_hw_mask = 0xf0ffffff,
335 }, 331 },
336 .pipe_vig = { 332 .pipe_vig = {
337 .count = 4, 333 .count = 4,
338 .base = { 0x05000, 0x07000, 0x09000, 0x0b000 }, 334 .base = { 0x04000, 0x06000, 0x08000, 0x0a000 },
339 .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP | 335 .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
340 MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC | 336 MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC |
341 MDP_PIPE_CAP_DECIMATION, 337 MDP_PIPE_CAP_DECIMATION,
342 }, 338 },
343 .pipe_rgb = { 339 .pipe_rgb = {
344 .count = 4, 340 .count = 4,
345 .base = { 0x15000, 0x17000, 0x19000, 0x1b000 }, 341 .base = { 0x14000, 0x16000, 0x18000, 0x1a000 },
346 .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP | 342 .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
347 MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_DECIMATION, 343 MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_DECIMATION,
348 }, 344 },
349 .pipe_dma = { 345 .pipe_dma = {
350 .count = 2, 346 .count = 2,
351 .base = { 0x25000, 0x27000 }, 347 .base = { 0x24000, 0x26000 },
352 .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP, 348 .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP,
353 }, 349 },
354 .lm = { 350 .lm = {
355 .count = 6, 351 .count = 6,
356 .base = { 0x45000, 0x46000, 0x47000, 0x48000, 0x49000, 0x4a000 }, 352 .base = { 0x44000, 0x45000, 0x46000, 0x47000, 0x48000, 0x49000 },
357 .nb_stages = 8, 353 .nb_stages = 8,
358 .max_width = 2048, 354 .max_width = 2048,
359 .max_height = 0xFFFF, 355 .max_height = 0xFFFF,
360 }, 356 },
361 .dspp = { 357 .dspp = {
362 .count = 4, 358 .count = 4,
363 .base = { 0x55000, 0x57000, 0x59000, 0x5b000 }, 359 .base = { 0x54000, 0x56000, 0x58000, 0x5a000 },
364 360
365 }, 361 },
366 .ad = { 362 .ad = {
367 .count = 3, 363 .count = 3,
368 .base = { 0x79000, 0x79800, 0x7a000 }, 364 .base = { 0x78000, 0x78800, 0x79000 },
369 }, 365 },
370 .pp = { 366 .pp = {
371 .count = 4, 367 .count = 4,
372 .base = { 0x71000, 0x71800, 0x72000, 0x72800 }, 368 .base = { 0x70000, 0x70800, 0x71000, 0x71800 },
373 }, 369 },
374 .intf = { 370 .intf = {
375 .base = { 0x6b000, 0x6b800, 0x6c000, 0x6c800, 0x6d000 }, 371 .base = { 0x6a000, 0x6a800, 0x6b000, 0x6b800, 0x6c000 },
376 .connect = { 372 .connect = {
377 [0] = INTF_DISABLED, 373 [0] = INTF_DISABLED,
378 [1] = INTF_DSI, 374 [1] = INTF_DSI,
@@ -387,19 +383,18 @@ const struct mdp5_cfg_hw msm8x96_config = {
387 .name = "msm8x96", 383 .name = "msm8x96",
388 .mdp = { 384 .mdp = {
389 .count = 1, 385 .count = 1,
390 .base = { 0x01000 },
391 .caps = MDP_CAP_DSC | 386 .caps = MDP_CAP_DSC |
392 MDP_CAP_CDM | 387 MDP_CAP_CDM |
393 0, 388 0,
394 }, 389 },
395 .ctl = { 390 .ctl = {
396 .count = 5, 391 .count = 5,
397 .base = { 0x02000, 0x02200, 0x02400, 0x02600, 0x02800 }, 392 .base = { 0x01000, 0x01200, 0x01400, 0x01600, 0x01800 },
398 .flush_hw_mask = 0xf4ffffff, 393 .flush_hw_mask = 0xf4ffffff,
399 }, 394 },
400 .pipe_vig = { 395 .pipe_vig = {
401 .count = 4, 396 .count = 4,
402 .base = { 0x05000, 0x07000, 0x09000, 0x0b000 }, 397 .base = { 0x04000, 0x06000, 0x08000, 0x0a000 },
403 .caps = MDP_PIPE_CAP_HFLIP | 398 .caps = MDP_PIPE_CAP_HFLIP |
404 MDP_PIPE_CAP_VFLIP | 399 MDP_PIPE_CAP_VFLIP |
405 MDP_PIPE_CAP_SCALE | 400 MDP_PIPE_CAP_SCALE |
@@ -410,7 +405,7 @@ const struct mdp5_cfg_hw msm8x96_config = {
410 }, 405 },
411 .pipe_rgb = { 406 .pipe_rgb = {
412 .count = 4, 407 .count = 4,
413 .base = { 0x15000, 0x17000, 0x19000, 0x1b000 }, 408 .base = { 0x14000, 0x16000, 0x18000, 0x1a000 },
414 .caps = MDP_PIPE_CAP_HFLIP | 409 .caps = MDP_PIPE_CAP_HFLIP |
415 MDP_PIPE_CAP_VFLIP | 410 MDP_PIPE_CAP_VFLIP |
416 MDP_PIPE_CAP_SCALE | 411 MDP_PIPE_CAP_SCALE |
@@ -420,7 +415,7 @@ const struct mdp5_cfg_hw msm8x96_config = {
420 }, 415 },
421 .pipe_dma = { 416 .pipe_dma = {
422 .count = 2, 417 .count = 2,
423 .base = { 0x25000, 0x27000 }, 418 .base = { 0x24000, 0x26000 },
424 .caps = MDP_PIPE_CAP_HFLIP | 419 .caps = MDP_PIPE_CAP_HFLIP |
425 MDP_PIPE_CAP_VFLIP | 420 MDP_PIPE_CAP_VFLIP |
426 MDP_PIPE_CAP_SW_PIX_EXT | 421 MDP_PIPE_CAP_SW_PIX_EXT |
@@ -428,33 +423,33 @@ const struct mdp5_cfg_hw msm8x96_config = {
428 }, 423 },
429 .lm = { 424 .lm = {
430 .count = 6, 425 .count = 6,
431 .base = { 0x45000, 0x46000, 0x47000, 0x48000, 0x49000, 0x4a000 }, 426 .base = { 0x44000, 0x45000, 0x46000, 0x47000, 0x48000, 0x49000 },
432 .nb_stages = 8, 427 .nb_stages = 8,
433 .max_width = 2560, 428 .max_width = 2560,
434 .max_height = 0xFFFF, 429 .max_height = 0xFFFF,
435 }, 430 },
436 .dspp = { 431 .dspp = {
437 .count = 2, 432 .count = 2,
438 .base = { 0x55000, 0x57000 }, 433 .base = { 0x54000, 0x56000 },
439 }, 434 },
440 .ad = { 435 .ad = {
441 .count = 3, 436 .count = 3,
442 .base = { 0x79000, 0x79800, 0x7a000 }, 437 .base = { 0x78000, 0x78800, 0x79000 },
443 }, 438 },
444 .pp = { 439 .pp = {
445 .count = 4, 440 .count = 4,
446 .base = { 0x71000, 0x71800, 0x72000, 0x72800 }, 441 .base = { 0x70000, 0x70800, 0x71000, 0x71800 },
447 }, 442 },
448 .cdm = { 443 .cdm = {
449 .count = 1, 444 .count = 1,
450 .base = { 0x7a200 }, 445 .base = { 0x79200 },
451 }, 446 },
452 .dsc = { 447 .dsc = {
453 .count = 2, 448 .count = 2,
454 .base = { 0x81000, 0x81400 }, 449 .base = { 0x80000, 0x80400 },
455 }, 450 },
456 .intf = { 451 .intf = {
457 .base = { 0x6b000, 0x6b800, 0x6c000, 0x6c800, 0x6d000 }, 452 .base = { 0x6a000, 0x6a800, 0x6b000, 0x6b800, 0x6c000 },
458 .connect = { 453 .connect = {
459 [0] = INTF_DISABLED, 454 [0] = INTF_DISABLED,
460 [1] = INTF_DSI, 455 [1] = INTF_DSI,
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c
index 69094cb28103..c627ab6d0061 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c
@@ -272,22 +272,22 @@ int mdp5_cmd_encoder_set_split_display(struct drm_encoder *encoder,
272 * start signal for the slave encoder 272 * start signal for the slave encoder
273 */ 273 */
274 if (intf_num == 1) 274 if (intf_num == 1)
275 data |= MDP5_MDP_SPLIT_DPL_UPPER_INTF2_SW_TRG_MUX; 275 data |= MDP5_SPLIT_DPL_UPPER_INTF2_SW_TRG_MUX;
276 else if (intf_num == 2) 276 else if (intf_num == 2)
277 data |= MDP5_MDP_SPLIT_DPL_UPPER_INTF1_SW_TRG_MUX; 277 data |= MDP5_SPLIT_DPL_UPPER_INTF1_SW_TRG_MUX;
278 else 278 else
279 return -EINVAL; 279 return -EINVAL;
280 280
281 /* Smart Panel, Sync mode */ 281 /* Smart Panel, Sync mode */
282 data |= MDP5_MDP_SPLIT_DPL_UPPER_SMART_PANEL; 282 data |= MDP5_SPLIT_DPL_UPPER_SMART_PANEL;
283 283
284 /* Make sure clocks are on when connectors calling this function. */ 284 /* Make sure clocks are on when connectors calling this function. */
285 mdp5_enable(mdp5_kms); 285 mdp5_enable(mdp5_kms);
286 mdp5_write(mdp5_kms, REG_MDP5_MDP_SPLIT_DPL_UPPER(0), data); 286 mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_UPPER, data);
287 287
288 mdp5_write(mdp5_kms, REG_MDP5_MDP_SPLIT_DPL_LOWER(0), 288 mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_LOWER,
289 MDP5_MDP_SPLIT_DPL_LOWER_SMART_PANEL); 289 MDP5_SPLIT_DPL_LOWER_SMART_PANEL);
290 mdp5_write(mdp5_kms, REG_MDP5_MDP_SPLIT_DPL_EN(0), 1); 290 mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_EN, 1);
291 mdp5_disable(mdp5_kms); 291 mdp5_disable(mdp5_kms);
292 292
293 return 0; 293 return 0;
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
index 88fe256c1931..fa2be7ce9468 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
@@ -374,6 +374,7 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
374 struct drm_device *dev = crtc->dev; 374 struct drm_device *dev = crtc->dev;
375 struct plane_state pstates[STAGE_MAX + 1]; 375 struct plane_state pstates[STAGE_MAX + 1];
376 const struct mdp5_cfg_hw *hw_cfg; 376 const struct mdp5_cfg_hw *hw_cfg;
377 const struct drm_plane_state *pstate;
377 int cnt = 0, i; 378 int cnt = 0, i;
378 379
379 DBG("%s: check", mdp5_crtc->name); 380 DBG("%s: check", mdp5_crtc->name);
@@ -382,20 +383,13 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
382 * and that we don't have conflicting mixer stages: 383 * and that we don't have conflicting mixer stages:
383 */ 384 */
384 hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg); 385 hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
385 drm_atomic_crtc_state_for_each_plane(plane, state) { 386 drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) {
386 struct drm_plane_state *pstate;
387 if (cnt >= (hw_cfg->lm.nb_stages)) { 387 if (cnt >= (hw_cfg->lm.nb_stages)) {
388 dev_err(dev->dev, "too many planes!\n"); 388 dev_err(dev->dev, "too many planes!\n");
389 return -EINVAL; 389 return -EINVAL;
390 } 390 }
391 391
392 pstate = state->state->plane_states[drm_plane_index(plane)];
393 392
394 /* plane might not have changed, in which case take
395 * current state:
396 */
397 if (!pstate)
398 pstate = plane->state;
399 pstates[cnt].plane = plane; 393 pstates[cnt].plane = plane;
400 pstates[cnt].state = to_mdp5_plane_state(pstate); 394 pstates[cnt].state = to_mdp5_plane_state(pstate);
401 395
@@ -496,8 +490,7 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
496 struct mdp5_kms *mdp5_kms = get_kms(crtc); 490 struct mdp5_kms *mdp5_kms = get_kms(crtc);
497 struct drm_gem_object *cursor_bo, *old_bo = NULL; 491 struct drm_gem_object *cursor_bo, *old_bo = NULL;
498 uint32_t blendcfg, cursor_addr, stride; 492 uint32_t blendcfg, cursor_addr, stride;
499 int ret, bpp, lm; 493 int ret, lm;
500 unsigned int depth;
501 enum mdp5_cursor_alpha cur_alpha = CURSOR_ALPHA_PER_PIXEL; 494 enum mdp5_cursor_alpha cur_alpha = CURSOR_ALPHA_PER_PIXEL;
502 uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0); 495 uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0);
503 uint32_t roi_w, roi_h; 496 uint32_t roi_w, roi_h;
@@ -527,8 +520,7 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
527 return -EINVAL; 520 return -EINVAL;
528 521
529 lm = mdp5_crtc->lm; 522 lm = mdp5_crtc->lm;
530 drm_fb_get_bpp_depth(DRM_FORMAT_ARGB8888, &depth, &bpp); 523 stride = width * drm_format_plane_cpp(DRM_FORMAT_ARGB8888, 0);
531 stride = width * (bpp >> 3);
532 524
533 spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags); 525 spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
534 old_bo = mdp5_crtc->cursor.scanout_bo; 526 old_bo = mdp5_crtc->cursor.scanout_bo;
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c
index 4e81ca4f964a..d021edc3b307 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c
@@ -118,31 +118,31 @@ static void set_display_intf(struct mdp5_kms *mdp5_kms,
118 u32 intf_sel; 118 u32 intf_sel;
119 119
120 spin_lock_irqsave(&mdp5_kms->resource_lock, flags); 120 spin_lock_irqsave(&mdp5_kms->resource_lock, flags);
121 intf_sel = mdp5_read(mdp5_kms, REG_MDP5_MDP_DISP_INTF_SEL(0)); 121 intf_sel = mdp5_read(mdp5_kms, REG_MDP5_DISP_INTF_SEL);
122 122
123 switch (intf->num) { 123 switch (intf->num) {
124 case 0: 124 case 0:
125 intf_sel &= ~MDP5_MDP_DISP_INTF_SEL_INTF0__MASK; 125 intf_sel &= ~MDP5_DISP_INTF_SEL_INTF0__MASK;
126 intf_sel |= MDP5_MDP_DISP_INTF_SEL_INTF0(intf->type); 126 intf_sel |= MDP5_DISP_INTF_SEL_INTF0(intf->type);
127 break; 127 break;
128 case 1: 128 case 1:
129 intf_sel &= ~MDP5_MDP_DISP_INTF_SEL_INTF1__MASK; 129 intf_sel &= ~MDP5_DISP_INTF_SEL_INTF1__MASK;
130 intf_sel |= MDP5_MDP_DISP_INTF_SEL_INTF1(intf->type); 130 intf_sel |= MDP5_DISP_INTF_SEL_INTF1(intf->type);
131 break; 131 break;
132 case 2: 132 case 2:
133 intf_sel &= ~MDP5_MDP_DISP_INTF_SEL_INTF2__MASK; 133 intf_sel &= ~MDP5_DISP_INTF_SEL_INTF2__MASK;
134 intf_sel |= MDP5_MDP_DISP_INTF_SEL_INTF2(intf->type); 134 intf_sel |= MDP5_DISP_INTF_SEL_INTF2(intf->type);
135 break; 135 break;
136 case 3: 136 case 3:
137 intf_sel &= ~MDP5_MDP_DISP_INTF_SEL_INTF3__MASK; 137 intf_sel &= ~MDP5_DISP_INTF_SEL_INTF3__MASK;
138 intf_sel |= MDP5_MDP_DISP_INTF_SEL_INTF3(intf->type); 138 intf_sel |= MDP5_DISP_INTF_SEL_INTF3(intf->type);
139 break; 139 break;
140 default: 140 default:
141 BUG(); 141 BUG();
142 break; 142 break;
143 } 143 }
144 144
145 mdp5_write(mdp5_kms, REG_MDP5_MDP_DISP_INTF_SEL(0), intf_sel); 145 mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, intf_sel);
146 spin_unlock_irqrestore(&mdp5_kms->resource_lock, flags); 146 spin_unlock_irqrestore(&mdp5_kms->resource_lock, flags);
147} 147}
148 148
@@ -557,7 +557,7 @@ int mdp5_ctl_pair(struct mdp5_ctl *ctlx, struct mdp5_ctl *ctly, bool enable)
557 if (!enable) { 557 if (!enable) {
558 ctlx->pair = NULL; 558 ctlx->pair = NULL;
559 ctly->pair = NULL; 559 ctly->pair = NULL;
560 mdp5_write(mdp5_kms, REG_MDP5_MDP_SPARE_0(0), 0); 560 mdp5_write(mdp5_kms, REG_MDP5_SPARE_0, 0);
561 return 0; 561 return 0;
562 } else if ((ctlx->pair != NULL) || (ctly->pair != NULL)) { 562 } else if ((ctlx->pair != NULL) || (ctly->pair != NULL)) {
563 dev_err(ctl_mgr->dev->dev, "CTLs already paired\n"); 563 dev_err(ctl_mgr->dev->dev, "CTLs already paired\n");
@@ -570,8 +570,8 @@ int mdp5_ctl_pair(struct mdp5_ctl *ctlx, struct mdp5_ctl *ctly, bool enable)
570 ctlx->pair = ctly; 570 ctlx->pair = ctly;
571 ctly->pair = ctlx; 571 ctly->pair = ctlx;
572 572
573 mdp5_write(mdp5_kms, REG_MDP5_MDP_SPARE_0(0), 573 mdp5_write(mdp5_kms, REG_MDP5_SPARE_0,
574 MDP5_MDP_SPARE_0_SPLIT_DPL_SINGLE_FLUSH_EN); 574 MDP5_SPARE_0_SPLIT_DPL_SINGLE_FLUSH_EN);
575 575
576 return 0; 576 return 0;
577} 577}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
index 1d95f9fd9dc7..fe0c22230883 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
@@ -322,18 +322,18 @@ int mdp5_encoder_set_split_display(struct drm_encoder *encoder,
322 * to use the master's enable signal for the slave encoder. 322 * to use the master's enable signal for the slave encoder.
323 */ 323 */
324 if (intf_num == 1) 324 if (intf_num == 1)
325 data |= MDP5_MDP_SPLIT_DPL_LOWER_INTF2_TG_SYNC; 325 data |= MDP5_SPLIT_DPL_LOWER_INTF2_TG_SYNC;
326 else if (intf_num == 2) 326 else if (intf_num == 2)
327 data |= MDP5_MDP_SPLIT_DPL_LOWER_INTF1_TG_SYNC; 327 data |= MDP5_SPLIT_DPL_LOWER_INTF1_TG_SYNC;
328 else 328 else
329 return -EINVAL; 329 return -EINVAL;
330 330
331 /* Make sure clocks are on when connectors calling this function. */ 331 /* Make sure clocks are on when connectors calling this function. */
332 mdp5_enable(mdp5_kms); 332 mdp5_enable(mdp5_kms);
333 /* Dumb Panel, Sync mode */ 333 /* Dumb Panel, Sync mode */
334 mdp5_write(mdp5_kms, REG_MDP5_MDP_SPLIT_DPL_UPPER(0), 0); 334 mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_UPPER, 0);
335 mdp5_write(mdp5_kms, REG_MDP5_MDP_SPLIT_DPL_LOWER(0), data); 335 mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_LOWER, data);
336 mdp5_write(mdp5_kms, REG_MDP5_MDP_SPLIT_DPL_EN(0), 1); 336 mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_EN, 1);
337 337
338 mdp5_ctl_pair(mdp5_encoder->ctl, mdp5_slave_enc->ctl, true); 338 mdp5_ctl_pair(mdp5_encoder->ctl, mdp5_slave_enc->ctl, true);
339 339
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
index 73bc3e312fd4..d53e5510fd7c 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
@@ -15,7 +15,6 @@
15 * this program. If not, see <http://www.gnu.org/licenses/>. 15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */ 16 */
17 17
18#include <linux/irqdomain.h>
19#include <linux/irq.h> 18#include <linux/irq.h>
20 19
21#include "msm_drv.h" 20#include "msm_drv.h"
@@ -24,9 +23,9 @@
24void mdp5_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask, 23void mdp5_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask,
25 uint32_t old_irqmask) 24 uint32_t old_irqmask)
26{ 25{
27 mdp5_write(to_mdp5_kms(mdp_kms), REG_MDP5_MDP_INTR_CLEAR(0), 26 mdp5_write(to_mdp5_kms(mdp_kms), REG_MDP5_INTR_CLEAR,
28 irqmask ^ (irqmask & old_irqmask)); 27 irqmask ^ (irqmask & old_irqmask));
29 mdp5_write(to_mdp5_kms(mdp_kms), REG_MDP5_MDP_INTR_EN(0), irqmask); 28 mdp5_write(to_mdp5_kms(mdp_kms), REG_MDP5_INTR_EN, irqmask);
30} 29}
31 30
32static void mdp5_irq_error_handler(struct mdp_irq *irq, uint32_t irqstatus) 31static void mdp5_irq_error_handler(struct mdp_irq *irq, uint32_t irqstatus)
@@ -38,8 +37,8 @@ void mdp5_irq_preinstall(struct msm_kms *kms)
38{ 37{
39 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); 38 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
40 mdp5_enable(mdp5_kms); 39 mdp5_enable(mdp5_kms);
41 mdp5_write(mdp5_kms, REG_MDP5_MDP_INTR_CLEAR(0), 0xffffffff); 40 mdp5_write(mdp5_kms, REG_MDP5_INTR_CLEAR, 0xffffffff);
42 mdp5_write(mdp5_kms, REG_MDP5_MDP_INTR_EN(0), 0x00000000); 41 mdp5_write(mdp5_kms, REG_MDP5_INTR_EN, 0x00000000);
43 mdp5_disable(mdp5_kms); 42 mdp5_disable(mdp5_kms);
44} 43}
45 44
@@ -55,7 +54,9 @@ int mdp5_irq_postinstall(struct msm_kms *kms)
55 MDP5_IRQ_INTF2_UNDER_RUN | 54 MDP5_IRQ_INTF2_UNDER_RUN |
56 MDP5_IRQ_INTF3_UNDER_RUN; 55 MDP5_IRQ_INTF3_UNDER_RUN;
57 56
57 mdp5_enable(mdp5_kms);
58 mdp_irq_register(mdp_kms, error_handler); 58 mdp_irq_register(mdp_kms, error_handler);
59 mdp5_disable(mdp5_kms);
59 60
60 return 0; 61 return 0;
61} 62}
@@ -64,21 +65,22 @@ void mdp5_irq_uninstall(struct msm_kms *kms)
64{ 65{
65 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); 66 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
66 mdp5_enable(mdp5_kms); 67 mdp5_enable(mdp5_kms);
67 mdp5_write(mdp5_kms, REG_MDP5_MDP_INTR_EN(0), 0x00000000); 68 mdp5_write(mdp5_kms, REG_MDP5_INTR_EN, 0x00000000);
68 mdp5_disable(mdp5_kms); 69 mdp5_disable(mdp5_kms);
69} 70}
70 71
71static void mdp5_irq_mdp(struct mdp_kms *mdp_kms) 72irqreturn_t mdp5_irq(struct msm_kms *kms)
72{ 73{
74 struct mdp_kms *mdp_kms = to_mdp_kms(kms);
73 struct mdp5_kms *mdp5_kms = to_mdp5_kms(mdp_kms); 75 struct mdp5_kms *mdp5_kms = to_mdp5_kms(mdp_kms);
74 struct drm_device *dev = mdp5_kms->dev; 76 struct drm_device *dev = mdp5_kms->dev;
75 struct msm_drm_private *priv = dev->dev_private; 77 struct msm_drm_private *priv = dev->dev_private;
76 unsigned int id; 78 unsigned int id;
77 uint32_t status, enable; 79 uint32_t status, enable;
78 80
79 enable = mdp5_read(mdp5_kms, REG_MDP5_MDP_INTR_EN(0)); 81 enable = mdp5_read(mdp5_kms, REG_MDP5_INTR_EN);
80 status = mdp5_read(mdp5_kms, REG_MDP5_MDP_INTR_STATUS(0)) & enable; 82 status = mdp5_read(mdp5_kms, REG_MDP5_INTR_STATUS) & enable;
81 mdp5_write(mdp5_kms, REG_MDP5_MDP_INTR_CLEAR(0), status); 83 mdp5_write(mdp5_kms, REG_MDP5_INTR_CLEAR, status);
82 84
83 VERB("status=%08x", status); 85 VERB("status=%08x", status);
84 86
@@ -87,29 +89,6 @@ static void mdp5_irq_mdp(struct mdp_kms *mdp_kms)
87 for (id = 0; id < priv->num_crtcs; id++) 89 for (id = 0; id < priv->num_crtcs; id++)
88 if (status & mdp5_crtc_vblank(priv->crtcs[id])) 90 if (status & mdp5_crtc_vblank(priv->crtcs[id]))
89 drm_handle_vblank(dev, id); 91 drm_handle_vblank(dev, id);
90}
91
92irqreturn_t mdp5_irq(struct msm_kms *kms)
93{
94 struct mdp_kms *mdp_kms = to_mdp_kms(kms);
95 struct mdp5_kms *mdp5_kms = to_mdp5_kms(mdp_kms);
96 uint32_t intr;
97
98 intr = mdp5_read(mdp5_kms, REG_MDSS_HW_INTR_STATUS);
99
100 VERB("intr=%08x", intr);
101
102 if (intr & MDSS_HW_INTR_STATUS_INTR_MDP) {
103 mdp5_irq_mdp(mdp_kms);
104 intr &= ~MDSS_HW_INTR_STATUS_INTR_MDP;
105 }
106
107 while (intr) {
108 irq_hw_number_t hwirq = fls(intr) - 1;
109 generic_handle_irq(irq_find_mapping(
110 mdp5_kms->irqcontroller.domain, hwirq));
111 intr &= ~(1 << hwirq);
112 }
113 92
114 return IRQ_HANDLED; 93 return IRQ_HANDLED;
115} 94}
@@ -135,81 +114,3 @@ void mdp5_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
135 mdp5_crtc_vblank(crtc), false); 114 mdp5_crtc_vblank(crtc), false);
136 mdp5_disable(mdp5_kms); 115 mdp5_disable(mdp5_kms);
137} 116}
138
139/*
140 * interrupt-controller implementation, so sub-blocks (hdmi/eDP/dsi/etc)
141 * can register to get their irq's delivered
142 */
143
144#define VALID_IRQS (MDSS_HW_INTR_STATUS_INTR_DSI0 | \
145 MDSS_HW_INTR_STATUS_INTR_DSI1 | \
146 MDSS_HW_INTR_STATUS_INTR_HDMI | \
147 MDSS_HW_INTR_STATUS_INTR_EDP)
148
149static void mdp5_hw_mask_irq(struct irq_data *irqd)
150{
151 struct mdp5_kms *mdp5_kms = irq_data_get_irq_chip_data(irqd);
152 smp_mb__before_atomic();
153 clear_bit(irqd->hwirq, &mdp5_kms->irqcontroller.enabled_mask);
154 smp_mb__after_atomic();
155}
156
157static void mdp5_hw_unmask_irq(struct irq_data *irqd)
158{
159 struct mdp5_kms *mdp5_kms = irq_data_get_irq_chip_data(irqd);
160 smp_mb__before_atomic();
161 set_bit(irqd->hwirq, &mdp5_kms->irqcontroller.enabled_mask);
162 smp_mb__after_atomic();
163}
164
165static struct irq_chip mdp5_hw_irq_chip = {
166 .name = "mdp5",
167 .irq_mask = mdp5_hw_mask_irq,
168 .irq_unmask = mdp5_hw_unmask_irq,
169};
170
171static int mdp5_hw_irqdomain_map(struct irq_domain *d,
172 unsigned int irq, irq_hw_number_t hwirq)
173{
174 struct mdp5_kms *mdp5_kms = d->host_data;
175
176 if (!(VALID_IRQS & (1 << hwirq)))
177 return -EPERM;
178
179 irq_set_chip_and_handler(irq, &mdp5_hw_irq_chip, handle_level_irq);
180 irq_set_chip_data(irq, mdp5_kms);
181
182 return 0;
183}
184
185static struct irq_domain_ops mdp5_hw_irqdomain_ops = {
186 .map = mdp5_hw_irqdomain_map,
187 .xlate = irq_domain_xlate_onecell,
188};
189
190
191int mdp5_irq_domain_init(struct mdp5_kms *mdp5_kms)
192{
193 struct device *dev = mdp5_kms->dev->dev;
194 struct irq_domain *d;
195
196 d = irq_domain_add_linear(dev->of_node, 32,
197 &mdp5_hw_irqdomain_ops, mdp5_kms);
198 if (!d) {
199 dev_err(dev, "mdp5 irq domain add failed\n");
200 return -ENXIO;
201 }
202
203 mdp5_kms->irqcontroller.enabled_mask = 0;
204 mdp5_kms->irqcontroller.domain = d;
205
206 return 0;
207}
208
209void mdp5_irq_domain_fini(struct mdp5_kms *mdp5_kms)
210{
211 if (mdp5_kms->irqcontroller.domain) {
212 irq_domain_remove(mdp5_kms->irqcontroller.domain);
213 mdp5_kms->irqcontroller.domain = NULL;
214 }
215}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
index 484b4d15e71d..ed7143d35b25 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
@@ -16,6 +16,7 @@
16 * this program. If not, see <http://www.gnu.org/licenses/>. 16 * this program. If not, see <http://www.gnu.org/licenses/>.
17 */ 17 */
18 18
19#include <linux/of_irq.h>
19 20
20#include "msm_drv.h" 21#include "msm_drv.h"
21#include "msm_mmu.h" 22#include "msm_mmu.h"
@@ -28,10 +29,11 @@ static const char *iommu_ports[] = {
28static int mdp5_hw_init(struct msm_kms *kms) 29static int mdp5_hw_init(struct msm_kms *kms)
29{ 30{
30 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); 31 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
31 struct drm_device *dev = mdp5_kms->dev; 32 struct platform_device *pdev = mdp5_kms->pdev;
32 unsigned long flags; 33 unsigned long flags;
33 34
34 pm_runtime_get_sync(dev->dev); 35 pm_runtime_get_sync(&pdev->dev);
36 mdp5_enable(mdp5_kms);
35 37
36 /* Magic unknown register writes: 38 /* Magic unknown register writes:
37 * 39 *
@@ -58,12 +60,13 @@ static int mdp5_hw_init(struct msm_kms *kms)
58 */ 60 */
59 61
60 spin_lock_irqsave(&mdp5_kms->resource_lock, flags); 62 spin_lock_irqsave(&mdp5_kms->resource_lock, flags);
61 mdp5_write(mdp5_kms, REG_MDP5_MDP_DISP_INTF_SEL(0), 0); 63 mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, 0);
62 spin_unlock_irqrestore(&mdp5_kms->resource_lock, flags); 64 spin_unlock_irqrestore(&mdp5_kms->resource_lock, flags);
63 65
64 mdp5_ctlm_hw_reset(mdp5_kms->ctlm); 66 mdp5_ctlm_hw_reset(mdp5_kms->ctlm);
65 67
66 pm_runtime_put_sync(dev->dev); 68 mdp5_disable(mdp5_kms);
69 pm_runtime_put_sync(&pdev->dev);
67 70
68 return 0; 71 return 0;
69} 72}
@@ -78,17 +81,11 @@ static void mdp5_complete_commit(struct msm_kms *kms, struct drm_atomic_state *s
78{ 81{
79 int i; 82 int i;
80 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); 83 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
81 int nplanes = mdp5_kms->dev->mode_config.num_total_plane; 84 struct drm_plane *plane;
82 85 struct drm_plane_state *plane_state;
83 for (i = 0; i < nplanes; i++) {
84 struct drm_plane *plane = state->planes[i];
85 struct drm_plane_state *plane_state = state->plane_states[i];
86
87 if (!plane)
88 continue;
89 86
87 for_each_plane_in_state(state, plane, plane_state, i)
90 mdp5_plane_complete_commit(plane, plane_state); 88 mdp5_plane_complete_commit(plane, plane_state);
91 }
92 89
93 mdp5_disable(mdp5_kms); 90 mdp5_disable(mdp5_kms);
94} 91}
@@ -117,26 +114,15 @@ static int mdp5_set_split_display(struct msm_kms *kms,
117 return mdp5_encoder_set_split_display(encoder, slave_encoder); 114 return mdp5_encoder_set_split_display(encoder, slave_encoder);
118} 115}
119 116
120static void mdp5_destroy(struct msm_kms *kms) 117static void mdp5_kms_destroy(struct msm_kms *kms)
121{ 118{
122 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); 119 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
123 struct msm_mmu *mmu = mdp5_kms->mmu; 120 struct msm_mmu *mmu = mdp5_kms->mmu;
124 121
125 mdp5_irq_domain_fini(mdp5_kms);
126
127 if (mmu) { 122 if (mmu) {
128 mmu->funcs->detach(mmu, iommu_ports, ARRAY_SIZE(iommu_ports)); 123 mmu->funcs->detach(mmu, iommu_ports, ARRAY_SIZE(iommu_ports));
129 mmu->funcs->destroy(mmu); 124 mmu->funcs->destroy(mmu);
130 } 125 }
131
132 if (mdp5_kms->ctlm)
133 mdp5_ctlm_destroy(mdp5_kms->ctlm);
134 if (mdp5_kms->smp)
135 mdp5_smp_destroy(mdp5_kms->smp);
136 if (mdp5_kms->cfg)
137 mdp5_cfg_destroy(mdp5_kms->cfg);
138
139 kfree(mdp5_kms);
140} 126}
141 127
142static const struct mdp_kms_funcs kms_funcs = { 128static const struct mdp_kms_funcs kms_funcs = {
@@ -154,7 +140,7 @@ static const struct mdp_kms_funcs kms_funcs = {
154 .get_format = mdp_get_format, 140 .get_format = mdp_get_format,
155 .round_pixclk = mdp5_round_pixclk, 141 .round_pixclk = mdp5_round_pixclk,
156 .set_split_display = mdp5_set_split_display, 142 .set_split_display = mdp5_set_split_display,
157 .destroy = mdp5_destroy, 143 .destroy = mdp5_kms_destroy,
158 }, 144 },
159 .set_irqmask = mdp5_set_irqmask, 145 .set_irqmask = mdp5_set_irqmask,
160}; 146};
@@ -351,13 +337,6 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
351 337
352 hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg); 338 hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
353 339
354 /* register our interrupt-controller for hdmi/eDP/dsi/etc
355 * to use for irqs routed through mdp:
356 */
357 ret = mdp5_irq_domain_init(mdp5_kms);
358 if (ret)
359 goto fail;
360
361 /* construct CRTCs and their private planes: */ 340 /* construct CRTCs and their private planes: */
362 for (i = 0; i < hw_cfg->pipe_rgb.count; i++) { 341 for (i = 0; i < hw_cfg->pipe_rgb.count; i++) {
363 struct drm_plane *plane; 342 struct drm_plane *plane;
@@ -425,17 +404,17 @@ fail:
425 return ret; 404 return ret;
426} 405}
427 406
428static void read_hw_revision(struct mdp5_kms *mdp5_kms, 407static void read_mdp_hw_revision(struct mdp5_kms *mdp5_kms,
429 uint32_t *major, uint32_t *minor) 408 u32 *major, u32 *minor)
430{ 409{
431 uint32_t version; 410 u32 version;
432 411
433 mdp5_enable(mdp5_kms); 412 mdp5_enable(mdp5_kms);
434 version = mdp5_read(mdp5_kms, REG_MDSS_HW_VERSION); 413 version = mdp5_read(mdp5_kms, REG_MDP5_HW_VERSION);
435 mdp5_disable(mdp5_kms); 414 mdp5_disable(mdp5_kms);
436 415
437 *major = FIELD(version, MDSS_HW_VERSION_MAJOR); 416 *major = FIELD(version, MDP5_HW_VERSION_MAJOR);
438 *minor = FIELD(version, MDSS_HW_VERSION_MINOR); 417 *minor = FIELD(version, MDP5_HW_VERSION_MINOR);
439 418
440 DBG("MDP5 version v%d.%d", *major, *minor); 419 DBG("MDP5 version v%d.%d", *major, *minor);
441} 420}
@@ -580,51 +559,146 @@ static u32 mdp5_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
580 559
581struct msm_kms *mdp5_kms_init(struct drm_device *dev) 560struct msm_kms *mdp5_kms_init(struct drm_device *dev)
582{ 561{
583 struct platform_device *pdev = dev->platformdev; 562 struct msm_drm_private *priv = dev->dev_private;
584 struct mdp5_cfg *config; 563 struct platform_device *pdev;
585 struct mdp5_kms *mdp5_kms; 564 struct mdp5_kms *mdp5_kms;
586 struct msm_kms *kms = NULL; 565 struct mdp5_cfg *config;
566 struct msm_kms *kms;
587 struct msm_mmu *mmu; 567 struct msm_mmu *mmu;
588 uint32_t major, minor; 568 int irq, i, ret;
589 int i, ret;
590 569
591 mdp5_kms = kzalloc(sizeof(*mdp5_kms), GFP_KERNEL); 570 /* priv->kms would have been populated by the MDP5 driver */
592 if (!mdp5_kms) { 571 kms = priv->kms;
593 dev_err(dev->dev, "failed to allocate kms\n"); 572 if (!kms)
594 ret = -ENOMEM; 573 return NULL;
574
575 mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
576
577 mdp_kms_init(&mdp5_kms->base, &kms_funcs);
578
579 pdev = mdp5_kms->pdev;
580
581 irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
582 if (irq < 0) {
583 ret = irq;
584 dev_err(&pdev->dev, "failed to get irq: %d\n", ret);
595 goto fail; 585 goto fail;
596 } 586 }
597 587
598 spin_lock_init(&mdp5_kms->resource_lock); 588 kms->irq = irq;
599 589
600 mdp_kms_init(&mdp5_kms->base, &kms_funcs); 590 config = mdp5_cfg_get_config(mdp5_kms->cfg);
601 591
602 kms = &mdp5_kms->base.base; 592 /* make sure things are off before attaching iommu (bootloader could
593 * have left things on, in which case we'll start getting faults if
594 * we don't disable):
595 */
596 mdp5_enable(mdp5_kms);
597 for (i = 0; i < MDP5_INTF_NUM_MAX; i++) {
598 if (mdp5_cfg_intf_is_virtual(config->hw->intf.connect[i]) ||
599 !config->hw->intf.base[i])
600 continue;
601 mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(i), 0);
603 602
604 mdp5_kms->dev = dev; 603 mdp5_write(mdp5_kms, REG_MDP5_INTF_FRAME_LINE_COUNT_EN(i), 0x3);
604 }
605 mdp5_disable(mdp5_kms);
606 mdelay(16);
605 607
606 /* mdp5_kms->mmio actually represents the MDSS base address */ 608 if (config->platform.iommu) {
607 mdp5_kms->mmio = msm_ioremap(pdev, "mdp_phys", "MDP5"); 609 mmu = msm_iommu_new(&pdev->dev, config->platform.iommu);
608 if (IS_ERR(mdp5_kms->mmio)) { 610 if (IS_ERR(mmu)) {
609 ret = PTR_ERR(mdp5_kms->mmio); 611 ret = PTR_ERR(mmu);
612 dev_err(&pdev->dev, "failed to init iommu: %d\n", ret);
613 iommu_domain_free(config->platform.iommu);
614 goto fail;
615 }
616
617 ret = mmu->funcs->attach(mmu, iommu_ports,
618 ARRAY_SIZE(iommu_ports));
619 if (ret) {
620 dev_err(&pdev->dev, "failed to attach iommu: %d\n",
621 ret);
622 mmu->funcs->destroy(mmu);
623 goto fail;
624 }
625 } else {
626 dev_info(&pdev->dev,
627 "no iommu, fallback to phys contig buffers for scanout\n");
628 mmu = NULL;
629 }
630 mdp5_kms->mmu = mmu;
631
632 mdp5_kms->id = msm_register_mmu(dev, mmu);
633 if (mdp5_kms->id < 0) {
634 ret = mdp5_kms->id;
635 dev_err(&pdev->dev, "failed to register mdp5 iommu: %d\n", ret);
610 goto fail; 636 goto fail;
611 } 637 }
612 638
613 mdp5_kms->vbif = msm_ioremap(pdev, "vbif_phys", "VBIF"); 639 ret = modeset_init(mdp5_kms);
614 if (IS_ERR(mdp5_kms->vbif)) { 640 if (ret) {
615 ret = PTR_ERR(mdp5_kms->vbif); 641 dev_err(&pdev->dev, "modeset_init failed: %d\n", ret);
616 goto fail; 642 goto fail;
617 } 643 }
618 644
619 mdp5_kms->vdd = devm_regulator_get(&pdev->dev, "vdd"); 645 dev->mode_config.min_width = 0;
620 if (IS_ERR(mdp5_kms->vdd)) { 646 dev->mode_config.min_height = 0;
621 ret = PTR_ERR(mdp5_kms->vdd); 647 dev->mode_config.max_width = config->hw->lm.max_width;
648 dev->mode_config.max_height = config->hw->lm.max_height;
649
650 dev->driver->get_vblank_timestamp = mdp5_get_vblank_timestamp;
651 dev->driver->get_scanout_position = mdp5_get_scanoutpos;
652 dev->driver->get_vblank_counter = mdp5_get_vblank_counter;
653 dev->max_vblank_count = 0xffffffff;
654 dev->vblank_disable_immediate = true;
655
656 return kms;
657fail:
658 if (kms)
659 mdp5_kms_destroy(kms);
660 return ERR_PTR(ret);
661}
662
663static void mdp5_destroy(struct platform_device *pdev)
664{
665 struct mdp5_kms *mdp5_kms = platform_get_drvdata(pdev);
666
667 if (mdp5_kms->ctlm)
668 mdp5_ctlm_destroy(mdp5_kms->ctlm);
669 if (mdp5_kms->smp)
670 mdp5_smp_destroy(mdp5_kms->smp);
671 if (mdp5_kms->cfg)
672 mdp5_cfg_destroy(mdp5_kms->cfg);
673
674 if (mdp5_kms->rpm_enabled)
675 pm_runtime_disable(&pdev->dev);
676}
677
678static int mdp5_init(struct platform_device *pdev, struct drm_device *dev)
679{
680 struct msm_drm_private *priv = dev->dev_private;
681 struct mdp5_kms *mdp5_kms;
682 struct mdp5_cfg *config;
683 u32 major, minor;
684 int ret;
685
686 mdp5_kms = devm_kzalloc(&pdev->dev, sizeof(*mdp5_kms), GFP_KERNEL);
687 if (!mdp5_kms) {
688 ret = -ENOMEM;
622 goto fail; 689 goto fail;
623 } 690 }
624 691
625 ret = regulator_enable(mdp5_kms->vdd); 692 platform_set_drvdata(pdev, mdp5_kms);
626 if (ret) { 693
627 dev_err(dev->dev, "failed to enable regulator vdd: %d\n", ret); 694 spin_lock_init(&mdp5_kms->resource_lock);
695
696 mdp5_kms->dev = dev;
697 mdp5_kms->pdev = pdev;
698
699 mdp5_kms->mmio = msm_ioremap(pdev, "mdp_phys", "MDP5");
700 if (IS_ERR(mdp5_kms->mmio)) {
701 ret = PTR_ERR(mdp5_kms->mmio);
628 goto fail; 702 goto fail;
629 } 703 }
630 704
@@ -635,9 +709,6 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
635 ret = get_clk(pdev, &mdp5_kms->ahb_clk, "iface_clk", true); 709 ret = get_clk(pdev, &mdp5_kms->ahb_clk, "iface_clk", true);
636 if (ret) 710 if (ret)
637 goto fail; 711 goto fail;
638 ret = get_clk(pdev, &mdp5_kms->src_clk, "core_clk_src", true);
639 if (ret)
640 goto fail;
641 ret = get_clk(pdev, &mdp5_kms->core_clk, "core_clk", true); 712 ret = get_clk(pdev, &mdp5_kms->core_clk, "core_clk", true);
642 if (ret) 713 if (ret)
643 goto fail; 714 goto fail;
@@ -652,9 +723,12 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
652 * rate first, then figure out hw revision, and then set a 723 * rate first, then figure out hw revision, and then set a
653 * more optimal rate: 724 * more optimal rate:
654 */ 725 */
655 clk_set_rate(mdp5_kms->src_clk, 200000000); 726 clk_set_rate(mdp5_kms->core_clk, 200000000);
656 727
657 read_hw_revision(mdp5_kms, &major, &minor); 728 pm_runtime_enable(&pdev->dev);
729 mdp5_kms->rpm_enabled = true;
730
731 read_mdp_hw_revision(mdp5_kms, &major, &minor);
658 732
659 mdp5_kms->cfg = mdp5_cfg_init(mdp5_kms, major, minor); 733 mdp5_kms->cfg = mdp5_cfg_init(mdp5_kms, major, minor);
660 if (IS_ERR(mdp5_kms->cfg)) { 734 if (IS_ERR(mdp5_kms->cfg)) {
@@ -667,7 +741,7 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
667 mdp5_kms->caps = config->hw->mdp.caps; 741 mdp5_kms->caps = config->hw->mdp.caps;
668 742
669 /* TODO: compute core clock rate at runtime */ 743 /* TODO: compute core clock rate at runtime */
670 clk_set_rate(mdp5_kms->src_clk, config->hw->max_clk); 744 clk_set_rate(mdp5_kms->core_clk, config->hw->max_clk);
671 745
672 /* 746 /*
673 * Some chipsets have a Shared Memory Pool (SMP), while others 747 * Some chipsets have a Shared Memory Pool (SMP), while others
@@ -690,73 +764,76 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
690 goto fail; 764 goto fail;
691 } 765 }
692 766
693 /* make sure things are off before attaching iommu (bootloader could 767 /* set uninit-ed kms */
694 * have left things on, in which case we'll start getting faults if 768 priv->kms = &mdp5_kms->base.base;
695 * we don't disable):
696 */
697 mdp5_enable(mdp5_kms);
698 for (i = 0; i < MDP5_INTF_NUM_MAX; i++) {
699 if (mdp5_cfg_intf_is_virtual(config->hw->intf.connect[i]) ||
700 !config->hw->intf.base[i])
701 continue;
702 mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(i), 0);
703 769
704 mdp5_write(mdp5_kms, REG_MDP5_INTF_FRAME_LINE_COUNT_EN(i), 0x3); 770 return 0;
705 } 771fail:
706 mdp5_disable(mdp5_kms); 772 mdp5_destroy(pdev);
707 mdelay(16); 773 return ret;
774}
708 775
709 if (config->platform.iommu) { 776static int mdp5_bind(struct device *dev, struct device *master, void *data)
710 mmu = msm_iommu_new(&pdev->dev, config->platform.iommu); 777{
711 if (IS_ERR(mmu)) { 778 struct drm_device *ddev = dev_get_drvdata(master);
712 ret = PTR_ERR(mmu); 779 struct platform_device *pdev = to_platform_device(dev);
713 dev_err(dev->dev, "failed to init iommu: %d\n", ret);
714 iommu_domain_free(config->platform.iommu);
715 goto fail;
716 }
717 780
718 ret = mmu->funcs->attach(mmu, iommu_ports, 781 DBG("");
719 ARRAY_SIZE(iommu_ports));
720 if (ret) {
721 dev_err(dev->dev, "failed to attach iommu: %d\n", ret);
722 mmu->funcs->destroy(mmu);
723 goto fail;
724 }
725 } else {
726 dev_info(dev->dev, "no iommu, fallback to phys "
727 "contig buffers for scanout\n");
728 mmu = NULL;
729 }
730 mdp5_kms->mmu = mmu;
731 782
732 mdp5_kms->id = msm_register_mmu(dev, mmu); 783 return mdp5_init(pdev, ddev);
733 if (mdp5_kms->id < 0) { 784}
734 ret = mdp5_kms->id;
735 dev_err(dev->dev, "failed to register mdp5 iommu: %d\n", ret);
736 goto fail;
737 }
738 785
739 ret = modeset_init(mdp5_kms); 786static void mdp5_unbind(struct device *dev, struct device *master,
740 if (ret) { 787 void *data)
741 dev_err(dev->dev, "modeset_init failed: %d\n", ret); 788{
742 goto fail; 789 struct platform_device *pdev = to_platform_device(dev);
743 }
744 790
745 dev->mode_config.min_width = 0; 791 mdp5_destroy(pdev);
746 dev->mode_config.min_height = 0; 792}
747 dev->mode_config.max_width = config->hw->lm.max_width;
748 dev->mode_config.max_height = config->hw->lm.max_height;
749 793
750 dev->driver->get_vblank_timestamp = mdp5_get_vblank_timestamp; 794static const struct component_ops mdp5_ops = {
751 dev->driver->get_scanout_position = mdp5_get_scanoutpos; 795 .bind = mdp5_bind,
752 dev->driver->get_vblank_counter = mdp5_get_vblank_counter; 796 .unbind = mdp5_unbind,
753 dev->max_vblank_count = 0xffffffff; 797};
754 dev->vblank_disable_immediate = true;
755 798
756 return kms; 799static int mdp5_dev_probe(struct platform_device *pdev)
800{
801 DBG("");
802 return component_add(&pdev->dev, &mdp5_ops);
803}
757 804
758fail: 805static int mdp5_dev_remove(struct platform_device *pdev)
759 if (kms) 806{
760 mdp5_destroy(kms); 807 DBG("");
761 return ERR_PTR(ret); 808 component_del(&pdev->dev, &mdp5_ops);
809 return 0;
810}
811
812static const struct of_device_id mdp5_dt_match[] = {
813 { .compatible = "qcom,mdp5", },
814 /* to support downstream DT files */
815 { .compatible = "qcom,mdss_mdp", },
816 {}
817};
818MODULE_DEVICE_TABLE(of, mdp5_dt_match);
819
820static struct platform_driver mdp5_driver = {
821 .probe = mdp5_dev_probe,
822 .remove = mdp5_dev_remove,
823 .driver = {
824 .name = "msm_mdp",
825 .of_match_table = mdp5_dt_match,
826 },
827};
828
829void __init msm_mdp_register(void)
830{
831 DBG("");
832 platform_driver_register(&mdp5_driver);
833}
834
835void __exit msm_mdp_unregister(void)
836{
837 DBG("");
838 platform_driver_unregister(&mdp5_driver);
762} 839}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
index 9a25898239d3..03738927be10 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
@@ -31,6 +31,8 @@ struct mdp5_kms {
31 31
32 struct drm_device *dev; 32 struct drm_device *dev;
33 33
34 struct platform_device *pdev;
35
34 struct mdp5_cfg_handler *cfg; 36 struct mdp5_cfg_handler *cfg;
35 uint32_t caps; /* MDP capabilities (MDP_CAP_XXX bits) */ 37 uint32_t caps; /* MDP capabilities (MDP_CAP_XXX bits) */
36 38
@@ -43,29 +45,23 @@ struct mdp5_kms {
43 struct mdp5_ctl_manager *ctlm; 45 struct mdp5_ctl_manager *ctlm;
44 46
45 /* io/register spaces: */ 47 /* io/register spaces: */
46 void __iomem *mmio, *vbif; 48 void __iomem *mmio;
47
48 struct regulator *vdd;
49 49
50 struct clk *axi_clk; 50 struct clk *axi_clk;
51 struct clk *ahb_clk; 51 struct clk *ahb_clk;
52 struct clk *src_clk;
53 struct clk *core_clk; 52 struct clk *core_clk;
54 struct clk *lut_clk; 53 struct clk *lut_clk;
55 struct clk *vsync_clk; 54 struct clk *vsync_clk;
56 55
57 /* 56 /*
58 * lock to protect access to global resources: ie., following register: 57 * lock to protect access to global resources: ie., following register:
59 * - REG_MDP5_MDP_DISP_INTF_SEL 58 * - REG_MDP5_DISP_INTF_SEL
60 */ 59 */
61 spinlock_t resource_lock; 60 spinlock_t resource_lock;
62 61
63 struct mdp_irq error_handler; 62 bool rpm_enabled;
64 63
65 struct { 64 struct mdp_irq error_handler;
66 volatile unsigned long enabled_mask;
67 struct irq_domain *domain;
68 } irqcontroller;
69}; 65};
70#define to_mdp5_kms(x) container_of(x, struct mdp5_kms, base) 66#define to_mdp5_kms(x) container_of(x, struct mdp5_kms, base)
71 67
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_mdss.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_mdss.c
new file mode 100644
index 000000000000..d444a6901fff
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_mdss.c
@@ -0,0 +1,235 @@
1/*
2 * Copyright (c) 2016, The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License version 2 as published by
6 * the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16
17#include <linux/irqdomain.h>
18#include <linux/irq.h>
19
20#include "msm_drv.h"
21#include "mdp5_kms.h"
22
23/*
24 * If needed, this can become more specific: something like struct mdp5_mdss,
25 * which contains a 'struct msm_mdss base' member.
26 */
27struct msm_mdss {
28 struct drm_device *dev;
29
30 void __iomem *mmio, *vbif;
31
32 struct regulator *vdd;
33
34 struct {
35 volatile unsigned long enabled_mask;
36 struct irq_domain *domain;
37 } irqcontroller;
38};
39
40static inline void mdss_write(struct msm_mdss *mdss, u32 reg, u32 data)
41{
42 msm_writel(data, mdss->mmio + reg);
43}
44
45static inline u32 mdss_read(struct msm_mdss *mdss, u32 reg)
46{
47 return msm_readl(mdss->mmio + reg);
48}
49
50static irqreturn_t mdss_irq(int irq, void *arg)
51{
52 struct msm_mdss *mdss = arg;
53 u32 intr;
54
55 intr = mdss_read(mdss, REG_MDSS_HW_INTR_STATUS);
56
57 VERB("intr=%08x", intr);
58
59 while (intr) {
60 irq_hw_number_t hwirq = fls(intr) - 1;
61
62 generic_handle_irq(irq_find_mapping(
63 mdss->irqcontroller.domain, hwirq));
64 intr &= ~(1 << hwirq);
65 }
66
67 return IRQ_HANDLED;
68}
69
70/*
71 * interrupt-controller implementation, so sub-blocks (MDP/HDMI/eDP/DSI/etc)
72 * can register to get their irq's delivered
73 */
74
75#define VALID_IRQS (MDSS_HW_INTR_STATUS_INTR_MDP | \
76 MDSS_HW_INTR_STATUS_INTR_DSI0 | \
77 MDSS_HW_INTR_STATUS_INTR_DSI1 | \
78 MDSS_HW_INTR_STATUS_INTR_HDMI | \
79 MDSS_HW_INTR_STATUS_INTR_EDP)
80
81static void mdss_hw_mask_irq(struct irq_data *irqd)
82{
83 struct msm_mdss *mdss = irq_data_get_irq_chip_data(irqd);
84
85 smp_mb__before_atomic();
86 clear_bit(irqd->hwirq, &mdss->irqcontroller.enabled_mask);
87 smp_mb__after_atomic();
88}
89
90static void mdss_hw_unmask_irq(struct irq_data *irqd)
91{
92 struct msm_mdss *mdss = irq_data_get_irq_chip_data(irqd);
93
94 smp_mb__before_atomic();
95 set_bit(irqd->hwirq, &mdss->irqcontroller.enabled_mask);
96 smp_mb__after_atomic();
97}
98
99static struct irq_chip mdss_hw_irq_chip = {
100 .name = "mdss",
101 .irq_mask = mdss_hw_mask_irq,
102 .irq_unmask = mdss_hw_unmask_irq,
103};
104
105static int mdss_hw_irqdomain_map(struct irq_domain *d, unsigned int irq,
106 irq_hw_number_t hwirq)
107{
108 struct msm_mdss *mdss = d->host_data;
109
110 if (!(VALID_IRQS & (1 << hwirq)))
111 return -EPERM;
112
113 irq_set_chip_and_handler(irq, &mdss_hw_irq_chip, handle_level_irq);
114 irq_set_chip_data(irq, mdss);
115
116 return 0;
117}
118
119static struct irq_domain_ops mdss_hw_irqdomain_ops = {
120 .map = mdss_hw_irqdomain_map,
121 .xlate = irq_domain_xlate_onecell,
122};
123
124
125static int mdss_irq_domain_init(struct msm_mdss *mdss)
126{
127 struct device *dev = mdss->dev->dev;
128 struct irq_domain *d;
129
130 d = irq_domain_add_linear(dev->of_node, 32, &mdss_hw_irqdomain_ops,
131 mdss);
132 if (!d) {
133 dev_err(dev, "mdss irq domain add failed\n");
134 return -ENXIO;
135 }
136
137 mdss->irqcontroller.enabled_mask = 0;
138 mdss->irqcontroller.domain = d;
139
140 return 0;
141}
142
143void msm_mdss_destroy(struct drm_device *dev)
144{
145 struct msm_drm_private *priv = dev->dev_private;
146 struct msm_mdss *mdss = priv->mdss;
147
148 if (!mdss)
149 return;
150
151 irq_domain_remove(mdss->irqcontroller.domain);
152 mdss->irqcontroller.domain = NULL;
153
154 regulator_disable(mdss->vdd);
155
156 pm_runtime_put_sync(dev->dev);
157
158 pm_runtime_disable(dev->dev);
159}
160
161int msm_mdss_init(struct drm_device *dev)
162{
163 struct platform_device *pdev = dev->platformdev;
164 struct msm_drm_private *priv = dev->dev_private;
165 struct msm_mdss *mdss;
166 int ret;
167
168 DBG("");
169
170 if (!of_device_is_compatible(dev->dev->of_node, "qcom,mdss"))
171 return 0;
172
173 mdss = devm_kzalloc(dev->dev, sizeof(*mdss), GFP_KERNEL);
174 if (!mdss) {
175 ret = -ENOMEM;
176 goto fail;
177 }
178
179 mdss->dev = dev;
180
181 mdss->mmio = msm_ioremap(pdev, "mdss_phys", "MDSS");
182 if (IS_ERR(mdss->mmio)) {
183 ret = PTR_ERR(mdss->mmio);
184 goto fail;
185 }
186
187 mdss->vbif = msm_ioremap(pdev, "vbif_phys", "VBIF");
188 if (IS_ERR(mdss->vbif)) {
189 ret = PTR_ERR(mdss->vbif);
190 goto fail;
191 }
192
193 /* Regulator to enable GDSCs in downstream kernels */
194 mdss->vdd = devm_regulator_get(dev->dev, "vdd");
195 if (IS_ERR(mdss->vdd)) {
196 ret = PTR_ERR(mdss->vdd);
197 goto fail;
198 }
199
200 ret = regulator_enable(mdss->vdd);
201 if (ret) {
202 dev_err(dev->dev, "failed to enable regulator vdd: %d\n",
203 ret);
204 goto fail;
205 }
206
207 ret = devm_request_irq(dev->dev, platform_get_irq(pdev, 0),
208 mdss_irq, 0, "mdss_isr", mdss);
209 if (ret) {
210 dev_err(dev->dev, "failed to init irq: %d\n", ret);
211 goto fail_irq;
212 }
213
214 ret = mdss_irq_domain_init(mdss);
215 if (ret) {
216 dev_err(dev->dev, "failed to init sub-block irqs: %d\n", ret);
217 goto fail_irq;
218 }
219
220 priv->mdss = mdss;
221
222 pm_runtime_enable(dev->dev);
223
224 /*
225 * TODO: This is needed as the MDSS GDSC is only tied to MDSS's power
226 * domain. Remove this once runtime PM is adapted for all the devices.
227 */
228 pm_runtime_get_sync(dev->dev);
229
230 return 0;
231fail_irq:
232 regulator_disable(mdss->vdd);
233fail:
234 return ret;
235}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
index 6f425c25d9fe..27d7b55b52c9 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
@@ -42,7 +42,7 @@
42 * 42 *
43 * configured: 43 * configured:
44 * The block is allocated to some client, and assigned to that 44 * The block is allocated to some client, and assigned to that
45 * client in MDP5_MDP_SMP_ALLOC registers. 45 * client in MDP5_SMP_ALLOC registers.
46 * 46 *
47 * inuse: 47 * inuse:
48 * The block is being actively used by a client. 48 * The block is being actively used by a client.
@@ -59,7 +59,7 @@
59 * mdp5_smp_commit. 59 * mdp5_smp_commit.
60 * 60 *
61 * 2) mdp5_smp_configure(): 61 * 2) mdp5_smp_configure():
62 * As hw is programmed, before FLUSH, MDP5_MDP_SMP_ALLOC registers 62 * As hw is programmed, before FLUSH, MDP5_SMP_ALLOC registers
63 * are configured for the union(pending, inuse) 63 * are configured for the union(pending, inuse)
64 * Current pending is copied to configured. 64 * Current pending is copied to configured.
65 * It is assumed that mdp5_smp_request and mdp5_smp_configure not run 65 * It is assumed that mdp5_smp_request and mdp5_smp_configure not run
@@ -311,25 +311,25 @@ static void update_smp_state(struct mdp5_smp *smp,
311 int idx = blk / 3; 311 int idx = blk / 3;
312 int fld = blk % 3; 312 int fld = blk % 3;
313 313
314 val = mdp5_read(mdp5_kms, REG_MDP5_MDP_SMP_ALLOC_W_REG(0, idx)); 314 val = mdp5_read(mdp5_kms, REG_MDP5_SMP_ALLOC_W_REG(idx));
315 315
316 switch (fld) { 316 switch (fld) {
317 case 0: 317 case 0:
318 val &= ~MDP5_MDP_SMP_ALLOC_W_REG_CLIENT0__MASK; 318 val &= ~MDP5_SMP_ALLOC_W_REG_CLIENT0__MASK;
319 val |= MDP5_MDP_SMP_ALLOC_W_REG_CLIENT0(cid); 319 val |= MDP5_SMP_ALLOC_W_REG_CLIENT0(cid);
320 break; 320 break;
321 case 1: 321 case 1:
322 val &= ~MDP5_MDP_SMP_ALLOC_W_REG_CLIENT1__MASK; 322 val &= ~MDP5_SMP_ALLOC_W_REG_CLIENT1__MASK;
323 val |= MDP5_MDP_SMP_ALLOC_W_REG_CLIENT1(cid); 323 val |= MDP5_SMP_ALLOC_W_REG_CLIENT1(cid);
324 break; 324 break;
325 case 2: 325 case 2:
326 val &= ~MDP5_MDP_SMP_ALLOC_W_REG_CLIENT2__MASK; 326 val &= ~MDP5_SMP_ALLOC_W_REG_CLIENT2__MASK;
327 val |= MDP5_MDP_SMP_ALLOC_W_REG_CLIENT2(cid); 327 val |= MDP5_SMP_ALLOC_W_REG_CLIENT2(cid);
328 break; 328 break;
329 } 329 }
330 330
331 mdp5_write(mdp5_kms, REG_MDP5_MDP_SMP_ALLOC_W_REG(0, idx), val); 331 mdp5_write(mdp5_kms, REG_MDP5_SMP_ALLOC_W_REG(idx), val);
332 mdp5_write(mdp5_kms, REG_MDP5_MDP_SMP_ALLOC_R_REG(0, idx), val); 332 mdp5_write(mdp5_kms, REG_MDP5_SMP_ALLOC_R_REG(idx), val);
333 } 333 }
334} 334}
335 335
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
index e3892c263f27..4a8a6f1f1151 100644
--- a/drivers/gpu/drm/msm/msm_atomic.c
+++ b/drivers/gpu/drm/msm/msm_atomic.c
@@ -84,17 +84,12 @@ static void msm_atomic_wait_for_commit_done(struct drm_device *dev,
84 struct drm_atomic_state *old_state) 84 struct drm_atomic_state *old_state)
85{ 85{
86 struct drm_crtc *crtc; 86 struct drm_crtc *crtc;
87 struct drm_crtc_state *crtc_state;
87 struct msm_drm_private *priv = old_state->dev->dev_private; 88 struct msm_drm_private *priv = old_state->dev->dev_private;
88 struct msm_kms *kms = priv->kms; 89 struct msm_kms *kms = priv->kms;
89 int ncrtcs = old_state->dev->mode_config.num_crtc;
90 int i; 90 int i;
91 91
92 for (i = 0; i < ncrtcs; i++) { 92 for_each_crtc_in_state(old_state, crtc, crtc_state, i) {
93 crtc = old_state->crtcs[i];
94
95 if (!crtc)
96 continue;
97
98 if (!crtc->state->enable) 93 if (!crtc->state->enable)
99 continue; 94 continue;
100 95
@@ -192,9 +187,11 @@ int msm_atomic_commit(struct drm_device *dev,
192 struct drm_atomic_state *state, bool nonblock) 187 struct drm_atomic_state *state, bool nonblock)
193{ 188{
194 struct msm_drm_private *priv = dev->dev_private; 189 struct msm_drm_private *priv = dev->dev_private;
195 int nplanes = dev->mode_config.num_total_plane;
196 int ncrtcs = dev->mode_config.num_crtc;
197 struct msm_commit *c; 190 struct msm_commit *c;
191 struct drm_crtc *crtc;
192 struct drm_crtc_state *crtc_state;
193 struct drm_plane *plane;
194 struct drm_plane_state *plane_state;
198 int i, ret; 195 int i, ret;
199 196
200 ret = drm_atomic_helper_prepare_planes(dev, state); 197 ret = drm_atomic_helper_prepare_planes(dev, state);
@@ -210,28 +207,18 @@ int msm_atomic_commit(struct drm_device *dev,
210 /* 207 /*
211 * Figure out what crtcs we have: 208 * Figure out what crtcs we have:
212 */ 209 */
213 for (i = 0; i < ncrtcs; i++) { 210 for_each_crtc_in_state(state, crtc, crtc_state, i)
214 struct drm_crtc *crtc = state->crtcs[i]; 211 c->crtc_mask |= drm_crtc_mask(crtc);
215 if (!crtc)
216 continue;
217 c->crtc_mask |= (1 << drm_crtc_index(crtc));
218 }
219 212
220 /* 213 /*
221 * Figure out what fence to wait for: 214 * Figure out what fence to wait for:
222 */ 215 */
223 for (i = 0; i < nplanes; i++) { 216 for_each_plane_in_state(state, plane, plane_state, i) {
224 struct drm_plane *plane = state->planes[i]; 217 if ((plane->state->fb != plane_state->fb) && plane_state->fb) {
225 struct drm_plane_state *new_state = state->plane_states[i]; 218 struct drm_gem_object *obj = msm_framebuffer_bo(plane_state->fb, 0);
226
227 if (!plane)
228 continue;
229
230 if ((plane->state->fb != new_state->fb) && new_state->fb) {
231 struct drm_gem_object *obj = msm_framebuffer_bo(new_state->fb, 0);
232 struct msm_gem_object *msm_obj = to_msm_bo(obj); 219 struct msm_gem_object *msm_obj = to_msm_bo(obj);
233 220
234 new_state->fence = reservation_object_get_excl_rcu(msm_obj->resv); 221 plane_state->fence = reservation_object_get_excl_rcu(msm_obj->resv);
235 } 222 }
236 } 223 }
237 224
@@ -251,7 +238,7 @@ int msm_atomic_commit(struct drm_device *dev,
251 * the software side now. 238 * the software side now.
252 */ 239 */
253 240
254 drm_atomic_helper_swap_state(dev, state); 241 drm_atomic_helper_swap_state(state, true);
255 242
256 /* 243 /*
257 * Everything below can be run asynchronously without the need to grab 244 * Everything below can be run asynchronously without the need to grab
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 9c654092ef78..26f859ec24b3 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -21,6 +21,16 @@
21#include "msm_gpu.h" 21#include "msm_gpu.h"
22#include "msm_kms.h" 22#include "msm_kms.h"
23 23
24
25/*
26 * MSM driver version:
27 * - 1.0.0 - initial interface
28 * - 1.1.0 - adds madvise, and support for submits with > 4 cmd buffers
29 */
30#define MSM_VERSION_MAJOR 1
31#define MSM_VERSION_MINOR 1
32#define MSM_VERSION_PATCHLEVEL 0
33
24static void msm_fb_output_poll_changed(struct drm_device *dev) 34static void msm_fb_output_poll_changed(struct drm_device *dev)
25{ 35{
26 struct msm_drm_private *priv = dev->dev_private; 36 struct msm_drm_private *priv = dev->dev_private;
@@ -195,9 +205,9 @@ static int msm_drm_uninit(struct device *dev)
195 kfree(vbl_ev); 205 kfree(vbl_ev);
196 } 206 }
197 207
198 drm_kms_helper_poll_fini(ddev); 208 msm_gem_shrinker_cleanup(ddev);
199 209
200 drm_connector_unregister_all(ddev); 210 drm_kms_helper_poll_fini(ddev);
201 211
202 drm_dev_unregister(ddev); 212 drm_dev_unregister(ddev);
203 213
@@ -217,10 +227,8 @@ static int msm_drm_uninit(struct device *dev)
217 flush_workqueue(priv->atomic_wq); 227 flush_workqueue(priv->atomic_wq);
218 destroy_workqueue(priv->atomic_wq); 228 destroy_workqueue(priv->atomic_wq);
219 229
220 if (kms) { 230 if (kms)
221 pm_runtime_disable(dev);
222 kms->funcs->destroy(kms); 231 kms->funcs->destroy(kms);
223 }
224 232
225 if (gpu) { 233 if (gpu) {
226 mutex_lock(&ddev->struct_mutex); 234 mutex_lock(&ddev->struct_mutex);
@@ -239,6 +247,8 @@ static int msm_drm_uninit(struct device *dev)
239 247
240 component_unbind_all(dev, ddev); 248 component_unbind_all(dev, ddev);
241 249
250 msm_mdss_destroy(ddev);
251
242 ddev->dev_private = NULL; 252 ddev->dev_private = NULL;
243 drm_dev_unref(ddev); 253 drm_dev_unref(ddev);
244 254
@@ -284,6 +294,7 @@ static int msm_init_vram(struct drm_device *dev)
284 if (node) { 294 if (node) {
285 struct resource r; 295 struct resource r;
286 ret = of_address_to_resource(node, 0, &r); 296 ret = of_address_to_resource(node, 0, &r);
297 of_node_put(node);
287 if (ret) 298 if (ret)
288 return ret; 299 return ret;
289 size = r.end - r.start; 300 size = r.end - r.start;
@@ -352,6 +363,14 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
352 } 363 }
353 364
354 ddev->dev_private = priv; 365 ddev->dev_private = priv;
366 priv->dev = ddev;
367
368 ret = msm_mdss_init(ddev);
369 if (ret) {
370 kfree(priv);
371 drm_dev_unref(ddev);
372 return ret;
373 }
355 374
356 priv->wq = alloc_ordered_workqueue("msm", 0); 375 priv->wq = alloc_ordered_workqueue("msm", 0);
357 priv->atomic_wq = alloc_ordered_workqueue("msm:atomic", 0); 376 priv->atomic_wq = alloc_ordered_workqueue("msm:atomic", 0);
@@ -367,6 +386,7 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
367 /* Bind all our sub-components: */ 386 /* Bind all our sub-components: */
368 ret = component_bind_all(dev, ddev); 387 ret = component_bind_all(dev, ddev);
369 if (ret) { 388 if (ret) {
389 msm_mdss_destroy(ddev);
370 kfree(priv); 390 kfree(priv);
371 drm_dev_unref(ddev); 391 drm_dev_unref(ddev);
372 return ret; 392 return ret;
@@ -376,9 +396,12 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
376 if (ret) 396 if (ret)
377 goto fail; 397 goto fail;
378 398
399 msm_gem_shrinker_init(ddev);
400
379 switch (get_mdp_ver(pdev)) { 401 switch (get_mdp_ver(pdev)) {
380 case 4: 402 case 4:
381 kms = mdp4_kms_init(ddev); 403 kms = mdp4_kms_init(ddev);
404 priv->kms = kms;
382 break; 405 break;
383 case 5: 406 case 5:
384 kms = mdp5_kms_init(ddev); 407 kms = mdp5_kms_init(ddev);
@@ -400,10 +423,7 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
400 goto fail; 423 goto fail;
401 } 424 }
402 425
403 priv->kms = kms;
404
405 if (kms) { 426 if (kms) {
406 pm_runtime_enable(dev);
407 ret = kms->funcs->hw_init(kms); 427 ret = kms->funcs->hw_init(kms);
408 if (ret) { 428 if (ret) {
409 dev_err(dev, "kms hw init failed: %d\n", ret); 429 dev_err(dev, "kms hw init failed: %d\n", ret);
@@ -419,24 +439,20 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
419 goto fail; 439 goto fail;
420 } 440 }
421 441
422 pm_runtime_get_sync(dev); 442 if (kms) {
423 ret = drm_irq_install(ddev, platform_get_irq(pdev, 0)); 443 pm_runtime_get_sync(dev);
424 pm_runtime_put_sync(dev); 444 ret = drm_irq_install(ddev, kms->irq);
425 if (ret < 0) { 445 pm_runtime_put_sync(dev);
426 dev_err(dev, "failed to install IRQ handler\n"); 446 if (ret < 0) {
427 goto fail; 447 dev_err(dev, "failed to install IRQ handler\n");
448 goto fail;
449 }
428 } 450 }
429 451
430 ret = drm_dev_register(ddev, 0); 452 ret = drm_dev_register(ddev, 0);
431 if (ret) 453 if (ret)
432 goto fail; 454 goto fail;
433 455
434 ret = drm_connector_register_all(ddev);
435 if (ret) {
436 dev_err(dev, "failed to register connectors\n");
437 goto fail;
438 }
439
440 drm_mode_config_reset(ddev); 456 drm_mode_config_reset(ddev);
441 457
442#ifdef CONFIG_DRM_FBDEV_EMULATION 458#ifdef CONFIG_DRM_FBDEV_EMULATION
@@ -690,6 +706,44 @@ static int msm_ioctl_wait_fence(struct drm_device *dev, void *data,
690 return msm_wait_fence(priv->gpu->fctx, args->fence, &timeout, true); 706 return msm_wait_fence(priv->gpu->fctx, args->fence, &timeout, true);
691} 707}
692 708
709static int msm_ioctl_gem_madvise(struct drm_device *dev, void *data,
710 struct drm_file *file)
711{
712 struct drm_msm_gem_madvise *args = data;
713 struct drm_gem_object *obj;
714 int ret;
715
716 switch (args->madv) {
717 case MSM_MADV_DONTNEED:
718 case MSM_MADV_WILLNEED:
719 break;
720 default:
721 return -EINVAL;
722 }
723
724 ret = mutex_lock_interruptible(&dev->struct_mutex);
725 if (ret)
726 return ret;
727
728 obj = drm_gem_object_lookup(file, args->handle);
729 if (!obj) {
730 ret = -ENOENT;
731 goto unlock;
732 }
733
734 ret = msm_gem_madvise(obj, args->madv);
735 if (ret >= 0) {
736 args->retained = ret;
737 ret = 0;
738 }
739
740 drm_gem_object_unreference(obj);
741
742unlock:
743 mutex_unlock(&dev->struct_mutex);
744 return ret;
745}
746
693static const struct drm_ioctl_desc msm_ioctls[] = { 747static const struct drm_ioctl_desc msm_ioctls[] = {
694 DRM_IOCTL_DEF_DRV(MSM_GET_PARAM, msm_ioctl_get_param, DRM_AUTH|DRM_RENDER_ALLOW), 748 DRM_IOCTL_DEF_DRV(MSM_GET_PARAM, msm_ioctl_get_param, DRM_AUTH|DRM_RENDER_ALLOW),
695 DRM_IOCTL_DEF_DRV(MSM_GEM_NEW, msm_ioctl_gem_new, DRM_AUTH|DRM_RENDER_ALLOW), 749 DRM_IOCTL_DEF_DRV(MSM_GEM_NEW, msm_ioctl_gem_new, DRM_AUTH|DRM_RENDER_ALLOW),
@@ -698,6 +752,7 @@ static const struct drm_ioctl_desc msm_ioctls[] = {
698 DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_FINI, msm_ioctl_gem_cpu_fini, DRM_AUTH|DRM_RENDER_ALLOW), 752 DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_FINI, msm_ioctl_gem_cpu_fini, DRM_AUTH|DRM_RENDER_ALLOW),
699 DRM_IOCTL_DEF_DRV(MSM_GEM_SUBMIT, msm_ioctl_gem_submit, DRM_AUTH|DRM_RENDER_ALLOW), 753 DRM_IOCTL_DEF_DRV(MSM_GEM_SUBMIT, msm_ioctl_gem_submit, DRM_AUTH|DRM_RENDER_ALLOW),
700 DRM_IOCTL_DEF_DRV(MSM_WAIT_FENCE, msm_ioctl_wait_fence, DRM_AUTH|DRM_RENDER_ALLOW), 754 DRM_IOCTL_DEF_DRV(MSM_WAIT_FENCE, msm_ioctl_wait_fence, DRM_AUTH|DRM_RENDER_ALLOW),
755 DRM_IOCTL_DEF_DRV(MSM_GEM_MADVISE, msm_ioctl_gem_madvise, DRM_AUTH|DRM_RENDER_ALLOW),
701}; 756};
702 757
703static const struct vm_operations_struct vm_ops = { 758static const struct vm_operations_struct vm_ops = {
@@ -730,7 +785,6 @@ static struct drm_driver msm_driver = {
730 .open = msm_open, 785 .open = msm_open,
731 .preclose = msm_preclose, 786 .preclose = msm_preclose,
732 .lastclose = msm_lastclose, 787 .lastclose = msm_lastclose,
733 .set_busid = drm_platform_set_busid,
734 .irq_handler = msm_irq, 788 .irq_handler = msm_irq,
735 .irq_preinstall = msm_irq_preinstall, 789 .irq_preinstall = msm_irq_preinstall,
736 .irq_postinstall = msm_irq_postinstall, 790 .irq_postinstall = msm_irq_postinstall,
@@ -764,8 +818,9 @@ static struct drm_driver msm_driver = {
764 .name = "msm", 818 .name = "msm",
765 .desc = "MSM Snapdragon DRM", 819 .desc = "MSM Snapdragon DRM",
766 .date = "20130625", 820 .date = "20130625",
767 .major = 1, 821 .major = MSM_VERSION_MAJOR,
768 .minor = 0, 822 .minor = MSM_VERSION_MINOR,
823 .patchlevel = MSM_VERSION_PATCHLEVEL,
769}; 824};
770 825
771#ifdef CONFIG_PM_SLEEP 826#ifdef CONFIG_PM_SLEEP
@@ -805,22 +860,146 @@ static int compare_of(struct device *dev, void *data)
805 return dev->of_node == data; 860 return dev->of_node == data;
806} 861}
807 862
808static int add_components(struct device *dev, struct component_match **matchptr, 863/*
809 const char *name) 864 * Identify what components need to be added by parsing what remote-endpoints
865 * our MDP output ports are connected to. In the case of LVDS on MDP4, there
866 * is no external component that we need to add since LVDS is within MDP4
867 * itself.
868 */
869static int add_components_mdp(struct device *mdp_dev,
870 struct component_match **matchptr)
810{ 871{
811 struct device_node *np = dev->of_node; 872 struct device_node *np = mdp_dev->of_node;
812 unsigned i; 873 struct device_node *ep_node;
874 struct device *master_dev;
875
876 /*
877 * on MDP4 based platforms, the MDP platform device is the component
878 * master that adds other display interface components to itself.
879 *
880 * on MDP5 based platforms, the MDSS platform device is the component
881 * master that adds MDP5 and other display interface components to
882 * itself.
883 */
884 if (of_device_is_compatible(np, "qcom,mdp4"))
885 master_dev = mdp_dev;
886 else
887 master_dev = mdp_dev->parent;
813 888
814 for (i = 0; ; i++) { 889 for_each_endpoint_of_node(np, ep_node) {
815 struct device_node *node; 890 struct device_node *intf;
891 struct of_endpoint ep;
892 int ret;
816 893
817 node = of_parse_phandle(np, name, i); 894 ret = of_graph_parse_endpoint(ep_node, &ep);
818 if (!node) 895 if (ret) {
819 break; 896 dev_err(mdp_dev, "unable to parse port endpoint\n");
897 of_node_put(ep_node);
898 return ret;
899 }
900
901 /*
902 * The LCDC/LVDS port on MDP4 is a speacial case where the
903 * remote-endpoint isn't a component that we need to add
904 */
905 if (of_device_is_compatible(np, "qcom,mdp4") &&
906 ep.port == 0) {
907 of_node_put(ep_node);
908 continue;
909 }
910
911 /*
912 * It's okay if some of the ports don't have a remote endpoint
913 * specified. It just means that the port isn't connected to
914 * any external interface.
915 */
916 intf = of_graph_get_remote_port_parent(ep_node);
917 if (!intf) {
918 of_node_put(ep_node);
919 continue;
920 }
921
922 component_match_add(master_dev, matchptr, compare_of, intf);
923
924 of_node_put(intf);
925 of_node_put(ep_node);
926 }
927
928 return 0;
929}
930
931static int compare_name_mdp(struct device *dev, void *data)
932{
933 return (strstr(dev_name(dev), "mdp") != NULL);
934}
935
936static int add_display_components(struct device *dev,
937 struct component_match **matchptr)
938{
939 struct device *mdp_dev;
940 int ret;
941
942 /*
943 * MDP5 based devices don't have a flat hierarchy. There is a top level
944 * parent: MDSS, and children: MDP5, DSI, HDMI, eDP etc. Populate the
945 * children devices, find the MDP5 node, and then add the interfaces
946 * to our components list.
947 */
948 if (of_device_is_compatible(dev->of_node, "qcom,mdss")) {
949 ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
950 if (ret) {
951 dev_err(dev, "failed to populate children devices\n");
952 return ret;
953 }
954
955 mdp_dev = device_find_child(dev, NULL, compare_name_mdp);
956 if (!mdp_dev) {
957 dev_err(dev, "failed to find MDSS MDP node\n");
958 of_platform_depopulate(dev);
959 return -ENODEV;
960 }
961
962 put_device(mdp_dev);
820 963
821 component_match_add(dev, matchptr, compare_of, node); 964 /* add the MDP component itself */
965 component_match_add(dev, matchptr, compare_of,
966 mdp_dev->of_node);
967 } else {
968 /* MDP4 */
969 mdp_dev = dev;
822 } 970 }
823 971
972 ret = add_components_mdp(mdp_dev, matchptr);
973 if (ret)
974 of_platform_depopulate(dev);
975
976 return ret;
977}
978
979/*
980 * We don't know what's the best binding to link the gpu with the drm device.
981 * Fow now, we just hunt for all the possible gpus that we support, and add them
982 * as components.
983 */
984static const struct of_device_id msm_gpu_match[] = {
985 { .compatible = "qcom,adreno-3xx" },
986 { .compatible = "qcom,kgsl-3d0" },
987 { },
988};
989
990static int add_gpu_components(struct device *dev,
991 struct component_match **matchptr)
992{
993 struct device_node *np;
994
995 np = of_find_matching_node(NULL, msm_gpu_match);
996 if (!np)
997 return 0;
998
999 component_match_add(dev, matchptr, compare_of, np);
1000
1001 of_node_put(np);
1002
824 return 0; 1003 return 0;
825} 1004}
826 1005
@@ -846,9 +1025,15 @@ static const struct component_master_ops msm_drm_ops = {
846static int msm_pdev_probe(struct platform_device *pdev) 1025static int msm_pdev_probe(struct platform_device *pdev)
847{ 1026{
848 struct component_match *match = NULL; 1027 struct component_match *match = NULL;
1028 int ret;
849 1029
850 add_components(&pdev->dev, &match, "connectors"); 1030 ret = add_display_components(&pdev->dev, &match);
851 add_components(&pdev->dev, &match, "gpus"); 1031 if (ret)
1032 return ret;
1033
1034 ret = add_gpu_components(&pdev->dev, &match);
1035 if (ret)
1036 return ret;
852 1037
853 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); 1038 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
854 return component_master_add_with_match(&pdev->dev, &msm_drm_ops, match); 1039 return component_master_add_with_match(&pdev->dev, &msm_drm_ops, match);
@@ -857,20 +1042,14 @@ static int msm_pdev_probe(struct platform_device *pdev)
857static int msm_pdev_remove(struct platform_device *pdev) 1042static int msm_pdev_remove(struct platform_device *pdev)
858{ 1043{
859 component_master_del(&pdev->dev, &msm_drm_ops); 1044 component_master_del(&pdev->dev, &msm_drm_ops);
1045 of_platform_depopulate(&pdev->dev);
860 1046
861 return 0; 1047 return 0;
862} 1048}
863 1049
864static const struct platform_device_id msm_id[] = {
865 { "mdp", 0 },
866 { }
867};
868
869static const struct of_device_id dt_match[] = { 1050static const struct of_device_id dt_match[] = {
870 { .compatible = "qcom,mdp4", .data = (void *) 4 }, /* mdp4 */ 1051 { .compatible = "qcom,mdp4", .data = (void *)4 }, /* MDP4 */
871 { .compatible = "qcom,mdp5", .data = (void *) 5 }, /* mdp5 */ 1052 { .compatible = "qcom,mdss", .data = (void *)5 }, /* MDP5 MDSS */
872 /* to support downstream DT files */
873 { .compatible = "qcom,mdss_mdp", .data = (void *) 5 }, /* mdp5 */
874 {} 1053 {}
875}; 1054};
876MODULE_DEVICE_TABLE(of, dt_match); 1055MODULE_DEVICE_TABLE(of, dt_match);
@@ -883,12 +1062,12 @@ static struct platform_driver msm_platform_driver = {
883 .of_match_table = dt_match, 1062 .of_match_table = dt_match,
884 .pm = &msm_pm_ops, 1063 .pm = &msm_pm_ops,
885 }, 1064 },
886 .id_table = msm_id,
887}; 1065};
888 1066
889static int __init msm_drm_register(void) 1067static int __init msm_drm_register(void)
890{ 1068{
891 DBG("init"); 1069 DBG("init");
1070 msm_mdp_register();
892 msm_dsi_register(); 1071 msm_dsi_register();
893 msm_edp_register(); 1072 msm_edp_register();
894 msm_hdmi_register(); 1073 msm_hdmi_register();
@@ -904,6 +1083,7 @@ static void __exit msm_drm_unregister(void)
904 adreno_unregister(); 1083 adreno_unregister();
905 msm_edp_unregister(); 1084 msm_edp_unregister();
906 msm_dsi_unregister(); 1085 msm_dsi_unregister();
1086 msm_mdp_unregister();
907} 1087}
908 1088
909module_init(msm_drm_register); 1089module_init(msm_drm_register);
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 5b2963f32291..b4bc7f1ef717 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -46,6 +46,7 @@
46struct msm_kms; 46struct msm_kms;
47struct msm_gpu; 47struct msm_gpu;
48struct msm_mmu; 48struct msm_mmu;
49struct msm_mdss;
49struct msm_rd_state; 50struct msm_rd_state;
50struct msm_perf_state; 51struct msm_perf_state;
51struct msm_gem_submit; 52struct msm_gem_submit;
@@ -77,11 +78,16 @@ struct msm_vblank_ctrl {
77 78
78struct msm_drm_private { 79struct msm_drm_private {
79 80
81 struct drm_device *dev;
82
80 struct msm_kms *kms; 83 struct msm_kms *kms;
81 84
82 /* subordinate devices, if present: */ 85 /* subordinate devices, if present: */
83 struct platform_device *gpu_pdev; 86 struct platform_device *gpu_pdev;
84 87
88 /* top level MDSS wrapper device (for MDP5 only) */
89 struct msm_mdss *mdss;
90
85 /* possibly this should be in the kms component, but it is 91 /* possibly this should be in the kms component, but it is
86 * shared by both mdp4 and mdp5.. 92 * shared by both mdp4 and mdp5..
87 */ 93 */
@@ -147,6 +153,9 @@ struct msm_drm_private {
147 struct drm_mm mm; 153 struct drm_mm mm;
148 } vram; 154 } vram;
149 155
156 struct notifier_block vmap_notifier;
157 struct shrinker shrinker;
158
150 struct msm_vblank_ctrl vblank_ctrl; 159 struct msm_vblank_ctrl vblank_ctrl;
151}; 160};
152 161
@@ -165,6 +174,9 @@ void msm_gem_submit_free(struct msm_gem_submit *submit);
165int msm_ioctl_gem_submit(struct drm_device *dev, void *data, 174int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
166 struct drm_file *file); 175 struct drm_file *file);
167 176
177void msm_gem_shrinker_init(struct drm_device *dev);
178void msm_gem_shrinker_cleanup(struct drm_device *dev);
179
168int msm_gem_mmap_obj(struct drm_gem_object *obj, 180int msm_gem_mmap_obj(struct drm_gem_object *obj,
169 struct vm_area_struct *vma); 181 struct vm_area_struct *vma);
170int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma); 182int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
@@ -189,8 +201,13 @@ struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
189 struct dma_buf_attachment *attach, struct sg_table *sg); 201 struct dma_buf_attachment *attach, struct sg_table *sg);
190int msm_gem_prime_pin(struct drm_gem_object *obj); 202int msm_gem_prime_pin(struct drm_gem_object *obj);
191void msm_gem_prime_unpin(struct drm_gem_object *obj); 203void msm_gem_prime_unpin(struct drm_gem_object *obj);
192void *msm_gem_vaddr_locked(struct drm_gem_object *obj); 204void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj);
193void *msm_gem_vaddr(struct drm_gem_object *obj); 205void *msm_gem_get_vaddr(struct drm_gem_object *obj);
206void msm_gem_put_vaddr_locked(struct drm_gem_object *obj);
207void msm_gem_put_vaddr(struct drm_gem_object *obj);
208int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv);
209void msm_gem_purge(struct drm_gem_object *obj);
210void msm_gem_vunmap(struct drm_gem_object *obj);
194int msm_gem_sync_object(struct drm_gem_object *obj, 211int msm_gem_sync_object(struct drm_gem_object *obj,
195 struct msm_fence_context *fctx, bool exclusive); 212 struct msm_fence_context *fctx, bool exclusive);
196void msm_gem_move_to_active(struct drm_gem_object *obj, 213void msm_gem_move_to_active(struct drm_gem_object *obj,
@@ -257,6 +274,9 @@ static inline int msm_dsi_modeset_init(struct msm_dsi *msm_dsi,
257} 274}
258#endif 275#endif
259 276
277void __init msm_mdp_register(void);
278void __exit msm_mdp_unregister(void);
279
260#ifdef CONFIG_DEBUG_FS 280#ifdef CONFIG_DEBUG_FS
261void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m); 281void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m);
262void msm_gem_describe_objects(struct list_head *list, struct seq_file *m); 282void msm_gem_describe_objects(struct list_head *list, struct seq_file *m);
diff --git a/drivers/gpu/drm/msm/msm_fb.c b/drivers/gpu/drm/msm/msm_fb.c
index 461dc8b873f0..95cf8fe72ee5 100644
--- a/drivers/gpu/drm/msm/msm_fb.c
+++ b/drivers/gpu/drm/msm/msm_fb.c
@@ -49,24 +49,16 @@ static void msm_framebuffer_destroy(struct drm_framebuffer *fb)
49 49
50 for (i = 0; i < n; i++) { 50 for (i = 0; i < n; i++) {
51 struct drm_gem_object *bo = msm_fb->planes[i]; 51 struct drm_gem_object *bo = msm_fb->planes[i];
52 if (bo) 52
53 drm_gem_object_unreference_unlocked(bo); 53 drm_gem_object_unreference_unlocked(bo);
54 } 54 }
55 55
56 kfree(msm_fb); 56 kfree(msm_fb);
57} 57}
58 58
59static int msm_framebuffer_dirty(struct drm_framebuffer *fb,
60 struct drm_file *file_priv, unsigned flags, unsigned color,
61 struct drm_clip_rect *clips, unsigned num_clips)
62{
63 return 0;
64}
65
66static const struct drm_framebuffer_funcs msm_framebuffer_funcs = { 59static const struct drm_framebuffer_funcs msm_framebuffer_funcs = {
67 .create_handle = msm_framebuffer_create_handle, 60 .create_handle = msm_framebuffer_create_handle,
68 .destroy = msm_framebuffer_destroy, 61 .destroy = msm_framebuffer_destroy,
69 .dirty = msm_framebuffer_dirty,
70}; 62};
71 63
72#ifdef CONFIG_DEBUG_FS 64#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
index c6cf837c5193..ffd4a338ca12 100644
--- a/drivers/gpu/drm/msm/msm_fbdev.c
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -158,7 +158,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
158 158
159 dev->mode_config.fb_base = paddr; 159 dev->mode_config.fb_base = paddr;
160 160
161 fbi->screen_base = msm_gem_vaddr_locked(fbdev->bo); 161 fbi->screen_base = msm_gem_get_vaddr_locked(fbdev->bo);
162 if (IS_ERR(fbi->screen_base)) { 162 if (IS_ERR(fbi->screen_base)) {
163 ret = PTR_ERR(fbi->screen_base); 163 ret = PTR_ERR(fbi->screen_base);
164 goto fail_unlock; 164 goto fail_unlock;
@@ -188,21 +188,7 @@ fail:
188 return ret; 188 return ret;
189} 189}
190 190
191static void msm_crtc_fb_gamma_set(struct drm_crtc *crtc,
192 u16 red, u16 green, u16 blue, int regno)
193{
194 DBG("fbdev: set gamma");
195}
196
197static void msm_crtc_fb_gamma_get(struct drm_crtc *crtc,
198 u16 *red, u16 *green, u16 *blue, int regno)
199{
200 DBG("fbdev: get gamma");
201}
202
203static const struct drm_fb_helper_funcs msm_fb_helper_funcs = { 191static const struct drm_fb_helper_funcs msm_fb_helper_funcs = {
204 .gamma_set = msm_crtc_fb_gamma_set,
205 .gamma_get = msm_crtc_fb_gamma_get,
206 .fb_probe = msm_fbdev_create, 192 .fb_probe = msm_fbdev_create,
207}; 193};
208 194
@@ -265,6 +251,7 @@ void msm_fbdev_free(struct drm_device *dev)
265 251
266 /* this will free the backing object */ 252 /* this will free the backing object */
267 if (fbdev->fb) { 253 if (fbdev->fb) {
254 msm_gem_put_vaddr(fbdev->bo);
268 drm_framebuffer_unregister_private(fbdev->fb); 255 drm_framebuffer_unregister_private(fbdev->fb);
269 drm_framebuffer_remove(fbdev->fb); 256 drm_framebuffer_remove(fbdev->fb);
270 } 257 }
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 69836f5685b1..6cd4af443139 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -276,6 +276,26 @@ uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
276 return offset; 276 return offset;
277} 277}
278 278
279static void
280put_iova(struct drm_gem_object *obj)
281{
282 struct drm_device *dev = obj->dev;
283 struct msm_drm_private *priv = obj->dev->dev_private;
284 struct msm_gem_object *msm_obj = to_msm_bo(obj);
285 int id;
286
287 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
288
289 for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) {
290 struct msm_mmu *mmu = priv->mmus[id];
291 if (mmu && msm_obj->domain[id].iova) {
292 uint32_t offset = msm_obj->domain[id].iova;
293 mmu->funcs->unmap(mmu, offset, msm_obj->sgt, obj->size);
294 msm_obj->domain[id].iova = 0;
295 }
296 }
297}
298
279/* should be called under struct_mutex.. although it can be called 299/* should be called under struct_mutex.. although it can be called
280 * from atomic context without struct_mutex to acquire an extra 300 * from atomic context without struct_mutex to acquire an extra
281 * iova ref if you know one is already held. 301 * iova ref if you know one is already held.
@@ -388,7 +408,7 @@ fail:
388 return ret; 408 return ret;
389} 409}
390 410
391void *msm_gem_vaddr_locked(struct drm_gem_object *obj) 411void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj)
392{ 412{
393 struct msm_gem_object *msm_obj = to_msm_bo(obj); 413 struct msm_gem_object *msm_obj = to_msm_bo(obj);
394 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); 414 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
@@ -401,18 +421,91 @@ void *msm_gem_vaddr_locked(struct drm_gem_object *obj)
401 if (msm_obj->vaddr == NULL) 421 if (msm_obj->vaddr == NULL)
402 return ERR_PTR(-ENOMEM); 422 return ERR_PTR(-ENOMEM);
403 } 423 }
424 msm_obj->vmap_count++;
404 return msm_obj->vaddr; 425 return msm_obj->vaddr;
405} 426}
406 427
407void *msm_gem_vaddr(struct drm_gem_object *obj) 428void *msm_gem_get_vaddr(struct drm_gem_object *obj)
408{ 429{
409 void *ret; 430 void *ret;
410 mutex_lock(&obj->dev->struct_mutex); 431 mutex_lock(&obj->dev->struct_mutex);
411 ret = msm_gem_vaddr_locked(obj); 432 ret = msm_gem_get_vaddr_locked(obj);
412 mutex_unlock(&obj->dev->struct_mutex); 433 mutex_unlock(&obj->dev->struct_mutex);
413 return ret; 434 return ret;
414} 435}
415 436
437void msm_gem_put_vaddr_locked(struct drm_gem_object *obj)
438{
439 struct msm_gem_object *msm_obj = to_msm_bo(obj);
440 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
441 WARN_ON(msm_obj->vmap_count < 1);
442 msm_obj->vmap_count--;
443}
444
445void msm_gem_put_vaddr(struct drm_gem_object *obj)
446{
447 mutex_lock(&obj->dev->struct_mutex);
448 msm_gem_put_vaddr_locked(obj);
449 mutex_unlock(&obj->dev->struct_mutex);
450}
451
452/* Update madvise status, returns true if not purged, else
453 * false or -errno.
454 */
455int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
456{
457 struct msm_gem_object *msm_obj = to_msm_bo(obj);
458
459 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
460
461 if (msm_obj->madv != __MSM_MADV_PURGED)
462 msm_obj->madv = madv;
463
464 return (msm_obj->madv != __MSM_MADV_PURGED);
465}
466
467void msm_gem_purge(struct drm_gem_object *obj)
468{
469 struct drm_device *dev = obj->dev;
470 struct msm_gem_object *msm_obj = to_msm_bo(obj);
471
472 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
473 WARN_ON(!is_purgeable(msm_obj));
474 WARN_ON(obj->import_attach);
475
476 put_iova(obj);
477
478 msm_gem_vunmap(obj);
479
480 put_pages(obj);
481
482 msm_obj->madv = __MSM_MADV_PURGED;
483
484 drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
485 drm_gem_free_mmap_offset(obj);
486
487 /* Our goal here is to return as much of the memory as
488 * is possible back to the system as we are called from OOM.
489 * To do this we must instruct the shmfs to drop all of its
490 * backing pages, *now*.
491 */
492 shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
493
494 invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
495 0, (loff_t)-1);
496}
497
498void msm_gem_vunmap(struct drm_gem_object *obj)
499{
500 struct msm_gem_object *msm_obj = to_msm_bo(obj);
501
502 if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj)))
503 return;
504
505 vunmap(msm_obj->vaddr);
506 msm_obj->vaddr = NULL;
507}
508
416/* must be called before _move_to_active().. */ 509/* must be called before _move_to_active().. */
417int msm_gem_sync_object(struct drm_gem_object *obj, 510int msm_gem_sync_object(struct drm_gem_object *obj,
418 struct msm_fence_context *fctx, bool exclusive) 511 struct msm_fence_context *fctx, bool exclusive)
@@ -464,6 +557,7 @@ void msm_gem_move_to_active(struct drm_gem_object *obj,
464 struct msm_gpu *gpu, bool exclusive, struct fence *fence) 557 struct msm_gpu *gpu, bool exclusive, struct fence *fence)
465{ 558{
466 struct msm_gem_object *msm_obj = to_msm_bo(obj); 559 struct msm_gem_object *msm_obj = to_msm_bo(obj);
560 WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
467 msm_obj->gpu = gpu; 561 msm_obj->gpu = gpu;
468 if (exclusive) 562 if (exclusive)
469 reservation_object_add_excl_fence(msm_obj->resv, fence); 563 reservation_object_add_excl_fence(msm_obj->resv, fence);
@@ -532,13 +626,27 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
532 struct reservation_object_list *fobj; 626 struct reservation_object_list *fobj;
533 struct fence *fence; 627 struct fence *fence;
534 uint64_t off = drm_vma_node_start(&obj->vma_node); 628 uint64_t off = drm_vma_node_start(&obj->vma_node);
629 const char *madv;
535 630
536 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); 631 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
537 632
538 seq_printf(m, "%08x: %c %2d (%2d) %08llx %p %zu\n", 633 switch (msm_obj->madv) {
634 case __MSM_MADV_PURGED:
635 madv = " purged";
636 break;
637 case MSM_MADV_DONTNEED:
638 madv = " purgeable";
639 break;
640 case MSM_MADV_WILLNEED:
641 default:
642 madv = "";
643 break;
644 }
645
646 seq_printf(m, "%08x: %c %2d (%2d) %08llx %p %zu%s\n",
539 msm_obj->flags, is_active(msm_obj) ? 'A' : 'I', 647 msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
540 obj->name, obj->refcount.refcount.counter, 648 obj->name, obj->refcount.refcount.counter,
541 off, msm_obj->vaddr, obj->size); 649 off, msm_obj->vaddr, obj->size, madv);
542 650
543 rcu_read_lock(); 651 rcu_read_lock();
544 fobj = rcu_dereference(robj->fence); 652 fobj = rcu_dereference(robj->fence);
@@ -578,9 +686,7 @@ void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
578void msm_gem_free_object(struct drm_gem_object *obj) 686void msm_gem_free_object(struct drm_gem_object *obj)
579{ 687{
580 struct drm_device *dev = obj->dev; 688 struct drm_device *dev = obj->dev;
581 struct msm_drm_private *priv = obj->dev->dev_private;
582 struct msm_gem_object *msm_obj = to_msm_bo(obj); 689 struct msm_gem_object *msm_obj = to_msm_bo(obj);
583 int id;
584 690
585 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 691 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
586 692
@@ -589,13 +695,7 @@ void msm_gem_free_object(struct drm_gem_object *obj)
589 695
590 list_del(&msm_obj->mm_list); 696 list_del(&msm_obj->mm_list);
591 697
592 for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) { 698 put_iova(obj);
593 struct msm_mmu *mmu = priv->mmus[id];
594 if (mmu && msm_obj->domain[id].iova) {
595 uint32_t offset = msm_obj->domain[id].iova;
596 mmu->funcs->unmap(mmu, offset, msm_obj->sgt, obj->size);
597 }
598 }
599 699
600 if (obj->import_attach) { 700 if (obj->import_attach) {
601 if (msm_obj->vaddr) 701 if (msm_obj->vaddr)
@@ -609,7 +709,7 @@ void msm_gem_free_object(struct drm_gem_object *obj)
609 709
610 drm_prime_gem_destroy(obj, msm_obj->sgt); 710 drm_prime_gem_destroy(obj, msm_obj->sgt);
611 } else { 711 } else {
612 vunmap(msm_obj->vaddr); 712 msm_gem_vunmap(obj);
613 put_pages(obj); 713 put_pages(obj);
614 } 714 }
615 715
@@ -688,6 +788,7 @@ static int msm_gem_new_impl(struct drm_device *dev,
688 msm_obj->vram_node = (void *)&msm_obj[1]; 788 msm_obj->vram_node = (void *)&msm_obj[1];
689 789
690 msm_obj->flags = flags; 790 msm_obj->flags = flags;
791 msm_obj->madv = MSM_MADV_WILLNEED;
691 792
692 if (resv) { 793 if (resv) {
693 msm_obj->resv = resv; 794 msm_obj->resv = resv;
@@ -729,9 +830,7 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev,
729 return obj; 830 return obj;
730 831
731fail: 832fail:
732 if (obj) 833 drm_gem_object_unreference(obj);
733 drm_gem_object_unreference(obj);
734
735 return ERR_PTR(ret); 834 return ERR_PTR(ret);
736} 835}
737 836
@@ -774,8 +873,6 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev,
774 return obj; 873 return obj;
775 874
776fail: 875fail:
777 if (obj) 876 drm_gem_object_unreference_unlocked(obj);
778 drm_gem_object_unreference_unlocked(obj);
779
780 return ERR_PTR(ret); 877 return ERR_PTR(ret);
781} 878}
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index 9facd4b6ffd9..b2f13cfe945e 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -29,6 +29,16 @@ struct msm_gem_object {
29 29
30 uint32_t flags; 30 uint32_t flags;
31 31
32 /**
33 * Advice: are the backing pages purgeable?
34 */
35 uint8_t madv;
36
37 /**
38 * count of active vmap'ing
39 */
40 uint8_t vmap_count;
41
32 /* And object is either: 42 /* And object is either:
33 * inactive - on priv->inactive_list 43 * inactive - on priv->inactive_list
34 * active - on one one of the gpu's active_list.. well, at 44 * active - on one one of the gpu's active_list.. well, at
@@ -72,7 +82,16 @@ static inline bool is_active(struct msm_gem_object *msm_obj)
72 return msm_obj->gpu != NULL; 82 return msm_obj->gpu != NULL;
73} 83}
74 84
75#define MAX_CMDS 4 85static inline bool is_purgeable(struct msm_gem_object *msm_obj)
86{
87 return (msm_obj->madv == MSM_MADV_DONTNEED) && msm_obj->sgt &&
88 !msm_obj->base.dma_buf && !msm_obj->base.import_attach;
89}
90
91static inline bool is_vunmapable(struct msm_gem_object *msm_obj)
92{
93 return (msm_obj->vmap_count == 0) && msm_obj->vaddr;
94}
76 95
77/* Created per submit-ioctl, to track bo's and cmdstream bufs, etc, 96/* Created per submit-ioctl, to track bo's and cmdstream bufs, etc,
78 * associated with the cmdstream submission for synchronization (and 97 * associated with the cmdstream submission for synchronization (and
@@ -95,7 +114,7 @@ struct msm_gem_submit {
95 uint32_t size; /* in dwords */ 114 uint32_t size; /* in dwords */
96 uint32_t iova; 115 uint32_t iova;
97 uint32_t idx; /* cmdstream buffer idx in bos[] */ 116 uint32_t idx; /* cmdstream buffer idx in bos[] */
98 } cmd[MAX_CMDS]; 117 } *cmd; /* array of size nr_cmds */
99 struct { 118 struct {
100 uint32_t flags; 119 uint32_t flags;
101 struct msm_gem_object *obj; 120 struct msm_gem_object *obj;
diff --git a/drivers/gpu/drm/msm/msm_gem_prime.c b/drivers/gpu/drm/msm/msm_gem_prime.c
index 6b90890faffe..60bb290700ce 100644
--- a/drivers/gpu/drm/msm/msm_gem_prime.c
+++ b/drivers/gpu/drm/msm/msm_gem_prime.c
@@ -33,12 +33,12 @@ struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj)
33 33
34void *msm_gem_prime_vmap(struct drm_gem_object *obj) 34void *msm_gem_prime_vmap(struct drm_gem_object *obj)
35{ 35{
36 return msm_gem_vaddr(obj); 36 return msm_gem_get_vaddr(obj);
37} 37}
38 38
39void msm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) 39void msm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
40{ 40{
41 /* TODO msm_gem_vunmap() */ 41 msm_gem_put_vaddr(obj);
42} 42}
43 43
44int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) 44int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
diff --git a/drivers/gpu/drm/msm/msm_gem_shrinker.c b/drivers/gpu/drm/msm/msm_gem_shrinker.c
new file mode 100644
index 000000000000..283d2841ba58
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_gem_shrinker.c
@@ -0,0 +1,168 @@
1/*
2 * Copyright (C) 2016 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include "msm_drv.h"
19#include "msm_gem.h"
20
21static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
22{
23 if (!mutex_is_locked(mutex))
24 return false;
25
26#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
27 return mutex->owner == task;
28#else
29 /* Since UP may be pre-empted, we cannot assume that we own the lock */
30 return false;
31#endif
32}
33
34static bool msm_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
35{
36 if (!mutex_trylock(&dev->struct_mutex)) {
37 if (!mutex_is_locked_by(&dev->struct_mutex, current))
38 return false;
39 *unlock = false;
40 } else {
41 *unlock = true;
42 }
43
44 return true;
45}
46
47
48static unsigned long
49msm_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
50{
51 struct msm_drm_private *priv =
52 container_of(shrinker, struct msm_drm_private, shrinker);
53 struct drm_device *dev = priv->dev;
54 struct msm_gem_object *msm_obj;
55 unsigned long count = 0;
56 bool unlock;
57
58 if (!msm_gem_shrinker_lock(dev, &unlock))
59 return 0;
60
61 list_for_each_entry(msm_obj, &priv->inactive_list, mm_list) {
62 if (is_purgeable(msm_obj))
63 count += msm_obj->base.size >> PAGE_SHIFT;
64 }
65
66 if (unlock)
67 mutex_unlock(&dev->struct_mutex);
68
69 return count;
70}
71
72static unsigned long
73msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
74{
75 struct msm_drm_private *priv =
76 container_of(shrinker, struct msm_drm_private, shrinker);
77 struct drm_device *dev = priv->dev;
78 struct msm_gem_object *msm_obj;
79 unsigned long freed = 0;
80 bool unlock;
81
82 if (!msm_gem_shrinker_lock(dev, &unlock))
83 return SHRINK_STOP;
84
85 list_for_each_entry(msm_obj, &priv->inactive_list, mm_list) {
86 if (freed >= sc->nr_to_scan)
87 break;
88 if (is_purgeable(msm_obj)) {
89 msm_gem_purge(&msm_obj->base);
90 freed += msm_obj->base.size >> PAGE_SHIFT;
91 }
92 }
93
94 if (unlock)
95 mutex_unlock(&dev->struct_mutex);
96
97 if (freed > 0)
98 pr_info_ratelimited("Purging %lu bytes\n", freed << PAGE_SHIFT);
99
100 return freed;
101}
102
103static int
104msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
105{
106 struct msm_drm_private *priv =
107 container_of(nb, struct msm_drm_private, vmap_notifier);
108 struct drm_device *dev = priv->dev;
109 struct msm_gem_object *msm_obj;
110 unsigned unmapped = 0;
111 bool unlock;
112
113 if (!msm_gem_shrinker_lock(dev, &unlock))
114 return NOTIFY_DONE;
115
116 list_for_each_entry(msm_obj, &priv->inactive_list, mm_list) {
117 if (is_vunmapable(msm_obj)) {
118 msm_gem_vunmap(&msm_obj->base);
119 /* since we don't know any better, lets bail after a few
120 * and if necessary the shrinker will be invoked again.
121 * Seems better than unmapping *everything*
122 */
123 if (++unmapped >= 15)
124 break;
125 }
126 }
127
128 if (unlock)
129 mutex_unlock(&dev->struct_mutex);
130
131 *(unsigned long *)ptr += unmapped;
132
133 if (unmapped > 0)
134 pr_info_ratelimited("Purging %u vmaps\n", unmapped);
135
136 return NOTIFY_DONE;
137}
138
139/**
140 * msm_gem_shrinker_init - Initialize msm shrinker
141 * @dev_priv: msm device
142 *
143 * This function registers and sets up the msm shrinker.
144 */
145void msm_gem_shrinker_init(struct drm_device *dev)
146{
147 struct msm_drm_private *priv = dev->dev_private;
148 priv->shrinker.count_objects = msm_gem_shrinker_count;
149 priv->shrinker.scan_objects = msm_gem_shrinker_scan;
150 priv->shrinker.seeks = DEFAULT_SEEKS;
151 WARN_ON(register_shrinker(&priv->shrinker));
152
153 priv->vmap_notifier.notifier_call = msm_gem_shrinker_vmap;
154 WARN_ON(register_vmap_purge_notifier(&priv->vmap_notifier));
155}
156
157/**
158 * msm_gem_shrinker_cleanup - Clean up msm shrinker
159 * @dev_priv: msm device
160 *
161 * This function unregisters the msm shrinker.
162 */
163void msm_gem_shrinker_cleanup(struct drm_device *dev)
164{
165 struct msm_drm_private *priv = dev->dev_private;
166 WARN_ON(unregister_vmap_purge_notifier(&priv->vmap_notifier));
167 unregister_shrinker(&priv->shrinker);
168}
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index eb4bb8b2f3a5..9766f9ae4b7d 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -29,10 +29,11 @@
29#define BO_PINNED 0x2000 29#define BO_PINNED 0x2000
30 30
31static struct msm_gem_submit *submit_create(struct drm_device *dev, 31static struct msm_gem_submit *submit_create(struct drm_device *dev,
32 struct msm_gpu *gpu, int nr) 32 struct msm_gpu *gpu, int nr_bos, int nr_cmds)
33{ 33{
34 struct msm_gem_submit *submit; 34 struct msm_gem_submit *submit;
35 int sz = sizeof(*submit) + (nr * sizeof(submit->bos[0])); 35 int sz = sizeof(*submit) + (nr_bos * sizeof(submit->bos[0])) +
36 (nr_cmds * sizeof(*submit->cmd));
36 37
37 submit = kmalloc(sz, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY); 38 submit = kmalloc(sz, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
38 if (!submit) 39 if (!submit)
@@ -42,6 +43,7 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev,
42 submit->gpu = gpu; 43 submit->gpu = gpu;
43 submit->fence = NULL; 44 submit->fence = NULL;
44 submit->pid = get_pid(task_pid(current)); 45 submit->pid = get_pid(task_pid(current));
46 submit->cmd = (void *)&submit->bos[nr_bos];
45 47
46 /* initially, until copy_from_user() and bo lookup succeeds: */ 48 /* initially, until copy_from_user() and bo lookup succeeds: */
47 submit->nr_bos = 0; 49 submit->nr_bos = 0;
@@ -279,7 +281,7 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
279 /* For now, just map the entire thing. Eventually we probably 281 /* For now, just map the entire thing. Eventually we probably
280 * to do it page-by-page, w/ kmap() if not vmap()d.. 282 * to do it page-by-page, w/ kmap() if not vmap()d..
281 */ 283 */
282 ptr = msm_gem_vaddr_locked(&obj->base); 284 ptr = msm_gem_get_vaddr_locked(&obj->base);
283 285
284 if (IS_ERR(ptr)) { 286 if (IS_ERR(ptr)) {
285 ret = PTR_ERR(ptr); 287 ret = PTR_ERR(ptr);
@@ -332,6 +334,8 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
332 last_offset = off; 334 last_offset = off;
333 } 335 }
334 336
337 msm_gem_put_vaddr_locked(&obj->base);
338
335 return 0; 339 return 0;
336} 340}
337 341
@@ -369,14 +373,15 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
369 if (args->pipe != MSM_PIPE_3D0) 373 if (args->pipe != MSM_PIPE_3D0)
370 return -EINVAL; 374 return -EINVAL;
371 375
372 if (args->nr_cmds > MAX_CMDS) 376 ret = mutex_lock_interruptible(&dev->struct_mutex);
373 return -EINVAL; 377 if (ret)
374 378 return ret;
375 submit = submit_create(dev, gpu, args->nr_bos);
376 if (!submit)
377 return -ENOMEM;
378 379
379 mutex_lock(&dev->struct_mutex); 380 submit = submit_create(dev, gpu, args->nr_bos, args->nr_cmds);
381 if (!submit) {
382 ret = -ENOMEM;
383 goto out_unlock;
384 }
380 385
381 ret = submit_lookup_objects(submit, args, file); 386 ret = submit_lookup_objects(submit, args, file);
382 if (ret) 387 if (ret)
@@ -462,6 +467,7 @@ out:
462 submit_cleanup(submit); 467 submit_cleanup(submit);
463 if (ret) 468 if (ret)
464 msm_gem_submit_free(submit); 469 msm_gem_submit_free(submit);
470out_unlock:
465 mutex_unlock(&dev->struct_mutex); 471 mutex_unlock(&dev->struct_mutex);
466 return ret; 472 return ret;
467} 473}
diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c
index a7a0b6d9b057..3a294d0da3a0 100644
--- a/drivers/gpu/drm/msm/msm_iommu.c
+++ b/drivers/gpu/drm/msm/msm_iommu.c
@@ -59,10 +59,10 @@ static int msm_iommu_map(struct msm_mmu *mmu, uint32_t iova,
59 return -EINVAL; 59 return -EINVAL;
60 60
61 for_each_sg(sgt->sgl, sg, sgt->nents, i) { 61 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
62 u32 pa = sg_phys(sg) - sg->offset; 62 dma_addr_t pa = sg_phys(sg) - sg->offset;
63 size_t bytes = sg->length + sg->offset; 63 size_t bytes = sg->length + sg->offset;
64 64
65 VERB("map[%d]: %08x %08x(%zx)", i, iova, pa, bytes); 65 VERB("map[%d]: %08x %08lx(%zx)", i, da, (unsigned long)pa, bytes);
66 66
67 ret = iommu_map(domain, da, pa, bytes, prot); 67 ret = iommu_map(domain, da, pa, bytes, prot);
68 if (ret) 68 if (ret)
@@ -101,7 +101,7 @@ static int msm_iommu_unmap(struct msm_mmu *mmu, uint32_t iova,
101 if (unmapped < bytes) 101 if (unmapped < bytes)
102 return unmapped; 102 return unmapped;
103 103
104 VERB("unmap[%d]: %08x(%zx)", i, iova, bytes); 104 VERB("unmap[%d]: %08x(%zx)", i, da, bytes);
105 105
106 BUG_ON(!PAGE_ALIGNED(bytes)); 106 BUG_ON(!PAGE_ALIGNED(bytes));
107 107
diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h
index e32222c3d44f..40e41e5cdbc6 100644
--- a/drivers/gpu/drm/msm/msm_kms.h
+++ b/drivers/gpu/drm/msm/msm_kms.h
@@ -61,10 +61,8 @@ struct msm_kms_funcs {
61struct msm_kms { 61struct msm_kms {
62 const struct msm_kms_funcs *funcs; 62 const struct msm_kms_funcs *funcs;
63 63
64 /* irq handling: */ 64 /* irq number to be passed on to drm_irq_install */
65 bool in_irq; 65 int irq;
66 struct list_head irq_list; /* list of mdp4_irq */
67 uint32_t vblank_mask; /* irq bits set for userspace vblank */
68}; 66};
69 67
70static inline void msm_kms_init(struct msm_kms *kms, 68static inline void msm_kms_init(struct msm_kms *kms,
@@ -75,5 +73,7 @@ static inline void msm_kms_init(struct msm_kms *kms,
75 73
76struct msm_kms *mdp4_kms_init(struct drm_device *dev); 74struct msm_kms *mdp4_kms_init(struct drm_device *dev);
77struct msm_kms *mdp5_kms_init(struct drm_device *dev); 75struct msm_kms *mdp5_kms_init(struct drm_device *dev);
76int msm_mdss_init(struct drm_device *dev);
77void msm_mdss_destroy(struct drm_device *dev);
78 78
79#endif /* __MSM_KMS_H__ */ 79#endif /* __MSM_KMS_H__ */
diff --git a/drivers/gpu/drm/msm/msm_perf.c b/drivers/gpu/drm/msm/msm_perf.c
index 830857c47c86..17fe4e53e0d1 100644
--- a/drivers/gpu/drm/msm/msm_perf.c
+++ b/drivers/gpu/drm/msm/msm_perf.c
@@ -132,7 +132,7 @@ static ssize_t perf_read(struct file *file, char __user *buf,
132 size_t sz, loff_t *ppos) 132 size_t sz, loff_t *ppos)
133{ 133{
134 struct msm_perf_state *perf = file->private_data; 134 struct msm_perf_state *perf = file->private_data;
135 int n = 0, ret; 135 int n = 0, ret = 0;
136 136
137 mutex_lock(&perf->read_lock); 137 mutex_lock(&perf->read_lock);
138 138
@@ -143,9 +143,10 @@ static ssize_t perf_read(struct file *file, char __user *buf,
143 } 143 }
144 144
145 n = min((int)sz, perf->buftot - perf->bufpos); 145 n = min((int)sz, perf->buftot - perf->bufpos);
146 ret = copy_to_user(buf, &perf->buf[perf->bufpos], n); 146 if (copy_to_user(buf, &perf->buf[perf->bufpos], n)) {
147 if (ret) 147 ret = -EFAULT;
148 goto out; 148 goto out;
149 }
149 150
150 perf->bufpos += n; 151 perf->bufpos += n;
151 *ppos += n; 152 *ppos += n;
diff --git a/drivers/gpu/drm/msm/msm_rd.c b/drivers/gpu/drm/msm/msm_rd.c
index 0857710c2ff2..3a5fdfcd67ae 100644
--- a/drivers/gpu/drm/msm/msm_rd.c
+++ b/drivers/gpu/drm/msm/msm_rd.c
@@ -27,6 +27,11 @@
27 * This bypasses drm_debugfs_create_files() mainly because we need to use 27 * This bypasses drm_debugfs_create_files() mainly because we need to use
28 * our own fops for a bit more control. In particular, we don't want to 28 * our own fops for a bit more control. In particular, we don't want to
29 * do anything if userspace doesn't have the debugfs file open. 29 * do anything if userspace doesn't have the debugfs file open.
30 *
31 * The module-param "rd_full", which defaults to false, enables snapshotting
32 * all (non-written) buffers in the submit, rather than just cmdstream bo's.
33 * This is useful to capture the contents of (for example) vbo's or textures,
34 * or shader programs (if not emitted inline in cmdstream).
30 */ 35 */
31 36
32#ifdef CONFIG_DEBUG_FS 37#ifdef CONFIG_DEBUG_FS
@@ -40,6 +45,10 @@
40#include "msm_gpu.h" 45#include "msm_gpu.h"
41#include "msm_gem.h" 46#include "msm_gem.h"
42 47
48static bool rd_full = false;
49MODULE_PARM_DESC(rd_full, "If true, $debugfs/.../rd will snapshot all buffer contents");
50module_param_named(rd_full, rd_full, bool, 0600);
51
43enum rd_sect_type { 52enum rd_sect_type {
44 RD_NONE, 53 RD_NONE,
45 RD_TEST, /* ascii text */ 54 RD_TEST, /* ascii text */
@@ -140,9 +149,10 @@ static ssize_t rd_read(struct file *file, char __user *buf,
140 goto out; 149 goto out;
141 150
142 n = min_t(int, sz, circ_count_to_end(&rd->fifo)); 151 n = min_t(int, sz, circ_count_to_end(&rd->fifo));
143 ret = copy_to_user(buf, fptr, n); 152 if (copy_to_user(buf, fptr, n)) {
144 if (ret) 153 ret = -EFAULT;
145 goto out; 154 goto out;
155 }
146 156
147 fifo->tail = (fifo->tail + n) & (BUF_SZ - 1); 157 fifo->tail = (fifo->tail + n) & (BUF_SZ - 1);
148 *ppos += n; 158 *ppos += n;
@@ -277,6 +287,31 @@ void msm_rd_debugfs_cleanup(struct drm_minor *minor)
277 kfree(rd); 287 kfree(rd);
278} 288}
279 289
290static void snapshot_buf(struct msm_rd_state *rd,
291 struct msm_gem_submit *submit, int idx,
292 uint32_t iova, uint32_t size)
293{
294 struct msm_gem_object *obj = submit->bos[idx].obj;
295 const char *buf;
296
297 buf = msm_gem_get_vaddr_locked(&obj->base);
298 if (IS_ERR(buf))
299 return;
300
301 if (iova) {
302 buf += iova - submit->bos[idx].iova;
303 } else {
304 iova = submit->bos[idx].iova;
305 size = obj->base.size;
306 }
307
308 rd_write_section(rd, RD_GPUADDR,
309 (uint32_t[2]){ iova, size }, 8);
310 rd_write_section(rd, RD_BUFFER_CONTENTS, buf, size);
311
312 msm_gem_put_vaddr_locked(&obj->base);
313}
314
280/* called under struct_mutex */ 315/* called under struct_mutex */
281void msm_rd_dump_submit(struct msm_gem_submit *submit) 316void msm_rd_dump_submit(struct msm_gem_submit *submit)
282{ 317{
@@ -300,27 +335,27 @@ void msm_rd_dump_submit(struct msm_gem_submit *submit)
300 335
301 rd_write_section(rd, RD_CMD, msg, ALIGN(n, 4)); 336 rd_write_section(rd, RD_CMD, msg, ALIGN(n, 4));
302 337
303 /* could be nice to have an option (module-param?) to snapshot 338 if (rd_full) {
304 * all the bo's associated with the submit. Handy to see vtx 339 for (i = 0; i < submit->nr_bos; i++) {
305 * buffers, etc. For now just the cmdstream bo's is enough. 340 /* buffers that are written to probably don't start out
306 */ 341 * with anything interesting:
342 */
343 if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE)
344 continue;
345
346 snapshot_buf(rd, submit, i, 0, 0);
347 }
348 }
307 349
308 for (i = 0; i < submit->nr_cmds; i++) { 350 for (i = 0; i < submit->nr_cmds; i++) {
309 uint32_t idx = submit->cmd[i].idx;
310 uint32_t iova = submit->cmd[i].iova; 351 uint32_t iova = submit->cmd[i].iova;
311 uint32_t szd = submit->cmd[i].size; /* in dwords */ 352 uint32_t szd = submit->cmd[i].size; /* in dwords */
312 struct msm_gem_object *obj = submit->bos[idx].obj;
313 const char *buf = msm_gem_vaddr_locked(&obj->base);
314
315 if (IS_ERR(buf))
316 continue;
317 353
318 buf += iova - submit->bos[idx].iova; 354 /* snapshot cmdstream bo's (if we haven't already): */
319 355 if (!rd_full) {
320 rd_write_section(rd, RD_GPUADDR, 356 snapshot_buf(rd, submit, submit->cmd[i].idx,
321 (uint32_t[2]){ iova, szd * 4 }, 8); 357 submit->cmd[i].iova, szd * 4);
322 rd_write_section(rd, RD_BUFFER_CONTENTS, 358 }
323 buf, szd * 4);
324 359
325 switch (submit->cmd[i].type) { 360 switch (submit->cmd[i].type) {
326 case MSM_SUBMIT_CMD_IB_TARGET_BUF: 361 case MSM_SUBMIT_CMD_IB_TARGET_BUF:
diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c
index 42f5359cf988..f326cf6a32e6 100644
--- a/drivers/gpu/drm/msm/msm_ringbuffer.c
+++ b/drivers/gpu/drm/msm/msm_ringbuffer.c
@@ -39,7 +39,7 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int size)
39 goto fail; 39 goto fail;
40 } 40 }
41 41
42 ring->start = msm_gem_vaddr_locked(ring->bo); 42 ring->start = msm_gem_get_vaddr_locked(ring->bo);
43 if (IS_ERR(ring->start)) { 43 if (IS_ERR(ring->start)) {
44 ret = PTR_ERR(ring->start); 44 ret = PTR_ERR(ring->start);
45 goto fail; 45 goto fail;
@@ -59,7 +59,9 @@ fail:
59 59
60void msm_ringbuffer_destroy(struct msm_ringbuffer *ring) 60void msm_ringbuffer_destroy(struct msm_ringbuffer *ring)
61{ 61{
62 if (ring->bo) 62 if (ring->bo) {
63 msm_gem_put_vaddr(ring->bo);
63 drm_gem_object_unreference_unlocked(ring->bo); 64 drm_gem_object_unreference_unlocked(ring->bo);
65 }
64 kfree(ring); 66 kfree(ring);
65} 67}