diff options
author | Dave Airlie <airlied@redhat.com> | 2014-12-07 22:45:18 -0500 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2014-12-07 22:45:18 -0500 |
commit | b75478d1c7e4758abcf15c8494d70d4dc4dcb553 (patch) | |
tree | fcb297f559a1d95cc94a5eb13eb1055b5bed3e9e | |
parent | b00ff043d164bba5d43cbac42bdf0aeeb43fbda8 (diff) | |
parent | 5ac4837b12f533de5d9f8f66b45494c58e805536 (diff) |
Merge branch 'drm_iommu_v15' of https://github.com/markyzq/kernel-drm-rockchip into drm-next
Merge rockchip GPU support.
This has a branch in common with the iommu tree, hopefully the
process works.
* 'drm_iommu_v15' of https://github.com/markyzq/kernel-drm-rockchip:
dt-bindings: video: Add documentation for rockchip vop
dt-bindings: video: Add for rockchip display subsytem
drm: rockchip: Add basic drm driver
dt-bindings: iommu: Add documentation for rockchip iommu
iommu/rockchip: rk3288 iommu driver
20 files changed, 4265 insertions, 0 deletions
diff --git a/Documentation/devicetree/bindings/iommu/rockchip,iommu.txt b/Documentation/devicetree/bindings/iommu/rockchip,iommu.txt new file mode 100644 index 000000000000..9a55ac3735e5 --- /dev/null +++ b/Documentation/devicetree/bindings/iommu/rockchip,iommu.txt | |||
@@ -0,0 +1,26 @@ | |||
1 | Rockchip IOMMU | ||
2 | ============== | ||
3 | |||
4 | A Rockchip DRM iommu translates io virtual addresses to physical addresses for | ||
5 | its master device. Each slave device is bound to a single master device, and | ||
6 | shares its clocks, power domain and irq. | ||
7 | |||
8 | Required properties: | ||
9 | - compatible : Should be "rockchip,iommu" | ||
10 | - reg : Address space for the configuration registers | ||
11 | - interrupts : Interrupt specifier for the IOMMU instance | ||
12 | - interrupt-names : Interrupt name for the IOMMU instance | ||
13 | - #iommu-cells : Should be <0>. This indicates the iommu is a | ||
14 | "single-master" device, and needs no additional information | ||
15 | to associate with its master device. See: | ||
16 | Documentation/devicetree/bindings/iommu/iommu.txt | ||
17 | |||
18 | Example: | ||
19 | |||
20 | vopl_mmu: iommu@ff940300 { | ||
21 | compatible = "rockchip,iommu"; | ||
22 | reg = <0xff940300 0x100>; | ||
23 | interrupts = <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>; | ||
24 | interrupt-names = "vopl_mmu"; | ||
25 | #iommu-cells = <0>; | ||
26 | }; | ||
diff --git a/Documentation/devicetree/bindings/video/rockchip-drm.txt b/Documentation/devicetree/bindings/video/rockchip-drm.txt new file mode 100644 index 000000000000..7fff582495a2 --- /dev/null +++ b/Documentation/devicetree/bindings/video/rockchip-drm.txt | |||
@@ -0,0 +1,19 @@ | |||
1 | Rockchip DRM master device | ||
2 | ================================ | ||
3 | |||
4 | The Rockchip DRM master device is a virtual device needed to list all | ||
5 | vop devices or other display interface nodes that comprise the | ||
6 | graphics subsystem. | ||
7 | |||
8 | Required properties: | ||
9 | - compatible: Should be "rockchip,display-subsystem" | ||
10 | - ports: Should contain a list of phandles pointing to display interface port | ||
11 | of vop devices. vop definitions as defined in | ||
12 | Documentation/devicetree/bindings/video/rockchip-vop.txt | ||
13 | |||
14 | example: | ||
15 | |||
16 | display-subsystem { | ||
17 | compatible = "rockchip,display-subsystem"; | ||
18 | ports = <&vopl_out>, <&vopb_out>; | ||
19 | }; | ||
diff --git a/Documentation/devicetree/bindings/video/rockchip-vop.txt b/Documentation/devicetree/bindings/video/rockchip-vop.txt new file mode 100644 index 000000000000..d15351f2313d --- /dev/null +++ b/Documentation/devicetree/bindings/video/rockchip-vop.txt | |||
@@ -0,0 +1,58 @@ | |||
1 | device-tree bindings for rockchip soc display controller (vop) | ||
2 | |||
3 | VOP (Visual Output Processor) is the Display Controller for the Rockchip | ||
4 | series of SoCs which transfers the image data from a video memory | ||
5 | buffer to an external LCD interface. | ||
6 | |||
7 | Required properties: | ||
8 | - compatible: value should be one of the following | ||
9 | "rockchip,rk3288-vop"; | ||
10 | |||
11 | - interrupts: should contain a list of all VOP IP block interrupts in the | ||
12 | order: VSYNC, LCD_SYSTEM. The interrupt specifier | ||
13 | format depends on the interrupt controller used. | ||
14 | |||
15 | - clocks: must include clock specifiers corresponding to entries in the | ||
16 | clock-names property. | ||
17 | |||
18 | - clock-names: Must contain | ||
19 | aclk_vop: for ddr buffer transfer. | ||
20 | hclk_vop: for ahb bus to R/W the phy regs. | ||
21 | dclk_vop: pixel clock. | ||
22 | |||
23 | - resets: Must contain an entry for each entry in reset-names. | ||
24 | See ../reset/reset.txt for details. | ||
25 | - reset-names: Must include the following entries: | ||
26 | - axi | ||
27 | - ahb | ||
28 | - dclk | ||
29 | |||
30 | - iommus: required a iommu node | ||
31 | |||
32 | - port: A port node with endpoint definitions as defined in | ||
33 | Documentation/devicetree/bindings/media/video-interfaces.txt. | ||
34 | |||
35 | Example: | ||
36 | SoC specific DT entry: | ||
37 | vopb: vopb@ff930000 { | ||
38 | compatible = "rockchip,rk3288-vop"; | ||
39 | reg = <0xff930000 0x19c>; | ||
40 | interrupts = <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>; | ||
41 | clocks = <&cru ACLK_VOP0>, <&cru DCLK_VOP0>, <&cru HCLK_VOP0>; | ||
42 | clock-names = "aclk_vop", "dclk_vop", "hclk_vop"; | ||
43 | resets = <&cru SRST_LCDC1_AXI>, <&cru SRST_LCDC1_AHB>, <&cru SRST_LCDC1_DCLK>; | ||
44 | reset-names = "axi", "ahb", "dclk"; | ||
45 | iommus = <&vopb_mmu>; | ||
46 | vopb_out: port { | ||
47 | #address-cells = <1>; | ||
48 | #size-cells = <0>; | ||
49 | vopb_out_edp: endpoint@0 { | ||
50 | reg = <0>; | ||
51 | remote-endpoint=<&edp_in_vopb>; | ||
52 | }; | ||
53 | vopb_out_hdmi: endpoint@1 { | ||
54 | reg = <1>; | ||
55 | remote-endpoint=<&hdmi_in_vopb>; | ||
56 | }; | ||
57 | }; | ||
58 | }; | ||
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index 24c2d7caedd5..c3413b6adb17 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig | |||
@@ -167,6 +167,8 @@ config DRM_SAVAGE | |||
167 | 167 | ||
168 | source "drivers/gpu/drm/exynos/Kconfig" | 168 | source "drivers/gpu/drm/exynos/Kconfig" |
169 | 169 | ||
170 | source "drivers/gpu/drm/rockchip/Kconfig" | ||
171 | |||
170 | source "drivers/gpu/drm/vmwgfx/Kconfig" | 172 | source "drivers/gpu/drm/vmwgfx/Kconfig" |
171 | 173 | ||
172 | source "drivers/gpu/drm/gma500/Kconfig" | 174 | source "drivers/gpu/drm/gma500/Kconfig" |
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index 47d89869c5df..66e40398b3d3 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile | |||
@@ -49,6 +49,7 @@ obj-$(CONFIG_DRM_VMWGFX)+= vmwgfx/ | |||
49 | obj-$(CONFIG_DRM_VIA) +=via/ | 49 | obj-$(CONFIG_DRM_VIA) +=via/ |
50 | obj-$(CONFIG_DRM_NOUVEAU) +=nouveau/ | 50 | obj-$(CONFIG_DRM_NOUVEAU) +=nouveau/ |
51 | obj-$(CONFIG_DRM_EXYNOS) +=exynos/ | 51 | obj-$(CONFIG_DRM_EXYNOS) +=exynos/ |
52 | obj-$(CONFIG_DRM_ROCKCHIP) +=rockchip/ | ||
52 | obj-$(CONFIG_DRM_GMA500) += gma500/ | 53 | obj-$(CONFIG_DRM_GMA500) += gma500/ |
53 | obj-$(CONFIG_DRM_UDL) += udl/ | 54 | obj-$(CONFIG_DRM_UDL) += udl/ |
54 | obj-$(CONFIG_DRM_AST) += ast/ | 55 | obj-$(CONFIG_DRM_AST) += ast/ |
diff --git a/drivers/gpu/drm/rockchip/Kconfig b/drivers/gpu/drm/rockchip/Kconfig new file mode 100644 index 000000000000..ca9f085efa92 --- /dev/null +++ b/drivers/gpu/drm/rockchip/Kconfig | |||
@@ -0,0 +1,17 @@ | |||
1 | config DRM_ROCKCHIP | ||
2 | tristate "DRM Support for Rockchip" | ||
3 | depends on DRM && ROCKCHIP_IOMMU | ||
4 | select DRM_KMS_HELPER | ||
5 | select DRM_KMS_FB_HELPER | ||
6 | select DRM_PANEL | ||
7 | select FB_CFB_FILLRECT | ||
8 | select FB_CFB_COPYAREA | ||
9 | select FB_CFB_IMAGEBLIT | ||
10 | select VT_HW_CONSOLE_BINDING if FRAMEBUFFER_CONSOLE | ||
11 | select VIDEOMODE_HELPERS | ||
12 | help | ||
13 | Choose this option if you have a Rockchip soc chipset. | ||
14 | This driver provides kernel mode setting and buffer | ||
15 | management to userspace. This driver does not provide | ||
16 | 2D or 3D acceleration; acceleration is performed by other | ||
17 | IP found on the SoC. | ||
diff --git a/drivers/gpu/drm/rockchip/Makefile b/drivers/gpu/drm/rockchip/Makefile new file mode 100644 index 000000000000..2cb0672f57ed --- /dev/null +++ b/drivers/gpu/drm/rockchip/Makefile | |||
@@ -0,0 +1,8 @@ | |||
1 | # | ||
2 | # Makefile for the drm device driver. This driver provides support for the | ||
3 | # Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher. | ||
4 | |||
5 | rockchipdrm-y := rockchip_drm_drv.o rockchip_drm_fb.o rockchip_drm_fbdev.o \ | ||
6 | rockchip_drm_gem.o | ||
7 | |||
8 | obj-$(CONFIG_DRM_ROCKCHIP) += rockchipdrm.o rockchip_drm_vop.o | ||
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c new file mode 100644 index 000000000000..a798c7c71f91 --- /dev/null +++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c | |||
@@ -0,0 +1,551 @@ | |||
1 | /* | ||
2 | * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd | ||
3 | * Author:Mark Yao <mark.yao@rock-chips.com> | ||
4 | * | ||
5 | * based on exynos_drm_drv.c | ||
6 | * | ||
7 | * This software is licensed under the terms of the GNU General Public | ||
8 | * License version 2, as published by the Free Software Foundation, and | ||
9 | * may be copied, distributed, and modified under those terms. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | */ | ||
16 | |||
17 | #include <asm/dma-iommu.h> | ||
18 | |||
19 | #include <drm/drmP.h> | ||
20 | #include <drm/drm_crtc_helper.h> | ||
21 | #include <drm/drm_fb_helper.h> | ||
22 | #include <linux/dma-mapping.h> | ||
23 | #include <linux/pm_runtime.h> | ||
24 | #include <linux/of_graph.h> | ||
25 | #include <linux/component.h> | ||
26 | |||
27 | #include "rockchip_drm_drv.h" | ||
28 | #include "rockchip_drm_fb.h" | ||
29 | #include "rockchip_drm_fbdev.h" | ||
30 | #include "rockchip_drm_gem.h" | ||
31 | |||
32 | #define DRIVER_NAME "rockchip" | ||
33 | #define DRIVER_DESC "RockChip Soc DRM" | ||
34 | #define DRIVER_DATE "20140818" | ||
35 | #define DRIVER_MAJOR 1 | ||
36 | #define DRIVER_MINOR 0 | ||
37 | |||
38 | /* | ||
39 | * Attach a (component) device to the shared drm dma mapping from master drm | ||
40 | * device. This is used by the VOPs to map GEM buffers to a common DMA | ||
41 | * mapping. | ||
42 | */ | ||
43 | int rockchip_drm_dma_attach_device(struct drm_device *drm_dev, | ||
44 | struct device *dev) | ||
45 | { | ||
46 | struct dma_iommu_mapping *mapping = drm_dev->dev->archdata.mapping; | ||
47 | int ret; | ||
48 | |||
49 | ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(32)); | ||
50 | if (ret) | ||
51 | return ret; | ||
52 | |||
53 | dma_set_max_seg_size(dev, DMA_BIT_MASK(32)); | ||
54 | |||
55 | return arm_iommu_attach_device(dev, mapping); | ||
56 | } | ||
57 | EXPORT_SYMBOL_GPL(rockchip_drm_dma_attach_device); | ||
58 | |||
59 | void rockchip_drm_dma_detach_device(struct drm_device *drm_dev, | ||
60 | struct device *dev) | ||
61 | { | ||
62 | arm_iommu_detach_device(dev); | ||
63 | } | ||
64 | EXPORT_SYMBOL_GPL(rockchip_drm_dma_detach_device); | ||
65 | |||
66 | int rockchip_register_crtc_funcs(struct drm_device *dev, | ||
67 | const struct rockchip_crtc_funcs *crtc_funcs, | ||
68 | int pipe) | ||
69 | { | ||
70 | struct rockchip_drm_private *priv = dev->dev_private; | ||
71 | |||
72 | if (pipe > ROCKCHIP_MAX_CRTC) | ||
73 | return -EINVAL; | ||
74 | |||
75 | priv->crtc_funcs[pipe] = crtc_funcs; | ||
76 | |||
77 | return 0; | ||
78 | } | ||
79 | EXPORT_SYMBOL_GPL(rockchip_register_crtc_funcs); | ||
80 | |||
81 | void rockchip_unregister_crtc_funcs(struct drm_device *dev, int pipe) | ||
82 | { | ||
83 | struct rockchip_drm_private *priv = dev->dev_private; | ||
84 | |||
85 | if (pipe > ROCKCHIP_MAX_CRTC) | ||
86 | return; | ||
87 | |||
88 | priv->crtc_funcs[pipe] = NULL; | ||
89 | } | ||
90 | EXPORT_SYMBOL_GPL(rockchip_unregister_crtc_funcs); | ||
91 | |||
92 | static struct drm_crtc *rockchip_crtc_from_pipe(struct drm_device *drm, | ||
93 | int pipe) | ||
94 | { | ||
95 | struct drm_crtc *crtc; | ||
96 | int i = 0; | ||
97 | |||
98 | list_for_each_entry(crtc, &drm->mode_config.crtc_list, head) | ||
99 | if (i++ == pipe) | ||
100 | return crtc; | ||
101 | |||
102 | return NULL; | ||
103 | } | ||
104 | |||
105 | static int rockchip_drm_crtc_enable_vblank(struct drm_device *dev, int pipe) | ||
106 | { | ||
107 | struct rockchip_drm_private *priv = dev->dev_private; | ||
108 | struct drm_crtc *crtc = rockchip_crtc_from_pipe(dev, pipe); | ||
109 | |||
110 | if (crtc && priv->crtc_funcs[pipe] && | ||
111 | priv->crtc_funcs[pipe]->enable_vblank) | ||
112 | return priv->crtc_funcs[pipe]->enable_vblank(crtc); | ||
113 | |||
114 | return 0; | ||
115 | } | ||
116 | |||
117 | static void rockchip_drm_crtc_disable_vblank(struct drm_device *dev, int pipe) | ||
118 | { | ||
119 | struct rockchip_drm_private *priv = dev->dev_private; | ||
120 | struct drm_crtc *crtc = rockchip_crtc_from_pipe(dev, pipe); | ||
121 | |||
122 | if (crtc && priv->crtc_funcs[pipe] && | ||
123 | priv->crtc_funcs[pipe]->enable_vblank) | ||
124 | priv->crtc_funcs[pipe]->disable_vblank(crtc); | ||
125 | } | ||
126 | |||
127 | static int rockchip_drm_load(struct drm_device *drm_dev, unsigned long flags) | ||
128 | { | ||
129 | struct rockchip_drm_private *private; | ||
130 | struct dma_iommu_mapping *mapping; | ||
131 | struct device *dev = drm_dev->dev; | ||
132 | int ret; | ||
133 | |||
134 | private = devm_kzalloc(drm_dev->dev, sizeof(*private), GFP_KERNEL); | ||
135 | if (!private) | ||
136 | return -ENOMEM; | ||
137 | |||
138 | drm_dev->dev_private = private; | ||
139 | |||
140 | drm_mode_config_init(drm_dev); | ||
141 | |||
142 | rockchip_drm_mode_config_init(drm_dev); | ||
143 | |||
144 | dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms), | ||
145 | GFP_KERNEL); | ||
146 | if (!dev->dma_parms) { | ||
147 | ret = -ENOMEM; | ||
148 | goto err_config_cleanup; | ||
149 | } | ||
150 | |||
151 | /* TODO(djkurtz): fetch the mapping start/size from somewhere */ | ||
152 | mapping = arm_iommu_create_mapping(&platform_bus_type, 0x00000000, | ||
153 | SZ_2G); | ||
154 | if (IS_ERR(mapping)) { | ||
155 | ret = PTR_ERR(mapping); | ||
156 | goto err_config_cleanup; | ||
157 | } | ||
158 | |||
159 | ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); | ||
160 | if (ret) | ||
161 | goto err_release_mapping; | ||
162 | |||
163 | dma_set_max_seg_size(dev, DMA_BIT_MASK(32)); | ||
164 | |||
165 | ret = arm_iommu_attach_device(dev, mapping); | ||
166 | if (ret) | ||
167 | goto err_release_mapping; | ||
168 | |||
169 | /* Try to bind all sub drivers. */ | ||
170 | ret = component_bind_all(dev, drm_dev); | ||
171 | if (ret) | ||
172 | goto err_detach_device; | ||
173 | |||
174 | /* init kms poll for handling hpd */ | ||
175 | drm_kms_helper_poll_init(drm_dev); | ||
176 | |||
177 | /* | ||
178 | * enable drm irq mode. | ||
179 | * - with irq_enabled = true, we can use the vblank feature. | ||
180 | */ | ||
181 | drm_dev->irq_enabled = true; | ||
182 | |||
183 | ret = drm_vblank_init(drm_dev, ROCKCHIP_MAX_CRTC); | ||
184 | if (ret) | ||
185 | goto err_kms_helper_poll_fini; | ||
186 | |||
187 | /* | ||
188 | * with vblank_disable_allowed = true, vblank interrupt will be disabled | ||
189 | * by drm timer once a current process gives up ownership of | ||
190 | * vblank event.(after drm_vblank_put function is called) | ||
191 | */ | ||
192 | drm_dev->vblank_disable_allowed = true; | ||
193 | |||
194 | ret = rockchip_drm_fbdev_init(drm_dev); | ||
195 | if (ret) | ||
196 | goto err_vblank_cleanup; | ||
197 | |||
198 | return 0; | ||
199 | err_vblank_cleanup: | ||
200 | drm_vblank_cleanup(drm_dev); | ||
201 | err_kms_helper_poll_fini: | ||
202 | drm_kms_helper_poll_fini(drm_dev); | ||
203 | component_unbind_all(dev, drm_dev); | ||
204 | err_detach_device: | ||
205 | arm_iommu_detach_device(dev); | ||
206 | err_release_mapping: | ||
207 | arm_iommu_release_mapping(dev->archdata.mapping); | ||
208 | err_config_cleanup: | ||
209 | drm_mode_config_cleanup(drm_dev); | ||
210 | drm_dev->dev_private = NULL; | ||
211 | return ret; | ||
212 | } | ||
213 | |||
214 | static int rockchip_drm_unload(struct drm_device *drm_dev) | ||
215 | { | ||
216 | struct device *dev = drm_dev->dev; | ||
217 | |||
218 | rockchip_drm_fbdev_fini(drm_dev); | ||
219 | drm_vblank_cleanup(drm_dev); | ||
220 | drm_kms_helper_poll_fini(drm_dev); | ||
221 | component_unbind_all(dev, drm_dev); | ||
222 | arm_iommu_detach_device(dev); | ||
223 | arm_iommu_release_mapping(dev->archdata.mapping); | ||
224 | drm_mode_config_cleanup(drm_dev); | ||
225 | drm_dev->dev_private = NULL; | ||
226 | |||
227 | return 0; | ||
228 | } | ||
229 | |||
230 | void rockchip_drm_lastclose(struct drm_device *dev) | ||
231 | { | ||
232 | struct rockchip_drm_private *priv = dev->dev_private; | ||
233 | |||
234 | drm_fb_helper_restore_fbdev_mode_unlocked(&priv->fbdev_helper); | ||
235 | } | ||
236 | |||
237 | static const struct file_operations rockchip_drm_driver_fops = { | ||
238 | .owner = THIS_MODULE, | ||
239 | .open = drm_open, | ||
240 | .mmap = rockchip_gem_mmap, | ||
241 | .poll = drm_poll, | ||
242 | .read = drm_read, | ||
243 | .unlocked_ioctl = drm_ioctl, | ||
244 | #ifdef CONFIG_COMPAT | ||
245 | .compat_ioctl = drm_compat_ioctl, | ||
246 | #endif | ||
247 | .release = drm_release, | ||
248 | }; | ||
249 | |||
250 | const struct vm_operations_struct rockchip_drm_vm_ops = { | ||
251 | .open = drm_gem_vm_open, | ||
252 | .close = drm_gem_vm_close, | ||
253 | }; | ||
254 | |||
255 | static struct drm_driver rockchip_drm_driver = { | ||
256 | .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME, | ||
257 | .load = rockchip_drm_load, | ||
258 | .unload = rockchip_drm_unload, | ||
259 | .lastclose = rockchip_drm_lastclose, | ||
260 | .get_vblank_counter = drm_vblank_count, | ||
261 | .enable_vblank = rockchip_drm_crtc_enable_vblank, | ||
262 | .disable_vblank = rockchip_drm_crtc_disable_vblank, | ||
263 | .gem_vm_ops = &rockchip_drm_vm_ops, | ||
264 | .gem_free_object = rockchip_gem_free_object, | ||
265 | .dumb_create = rockchip_gem_dumb_create, | ||
266 | .dumb_map_offset = rockchip_gem_dumb_map_offset, | ||
267 | .dumb_destroy = drm_gem_dumb_destroy, | ||
268 | .prime_handle_to_fd = drm_gem_prime_handle_to_fd, | ||
269 | .prime_fd_to_handle = drm_gem_prime_fd_to_handle, | ||
270 | .gem_prime_import = drm_gem_prime_import, | ||
271 | .gem_prime_export = drm_gem_prime_export, | ||
272 | .gem_prime_get_sg_table = rockchip_gem_prime_get_sg_table, | ||
273 | .gem_prime_vmap = rockchip_gem_prime_vmap, | ||
274 | .gem_prime_vunmap = rockchip_gem_prime_vunmap, | ||
275 | .gem_prime_mmap = rockchip_gem_mmap_buf, | ||
276 | .fops = &rockchip_drm_driver_fops, | ||
277 | .name = DRIVER_NAME, | ||
278 | .desc = DRIVER_DESC, | ||
279 | .date = DRIVER_DATE, | ||
280 | .major = DRIVER_MAJOR, | ||
281 | .minor = DRIVER_MINOR, | ||
282 | }; | ||
283 | |||
284 | #ifdef CONFIG_PM_SLEEP | ||
285 | static int rockchip_drm_sys_suspend(struct device *dev) | ||
286 | { | ||
287 | struct drm_device *drm = dev_get_drvdata(dev); | ||
288 | struct drm_connector *connector; | ||
289 | |||
290 | if (!drm) | ||
291 | return 0; | ||
292 | |||
293 | drm_modeset_lock_all(drm); | ||
294 | list_for_each_entry(connector, &drm->mode_config.connector_list, head) { | ||
295 | int old_dpms = connector->dpms; | ||
296 | |||
297 | if (connector->funcs->dpms) | ||
298 | connector->funcs->dpms(connector, DRM_MODE_DPMS_OFF); | ||
299 | |||
300 | /* Set the old mode back to the connector for resume */ | ||
301 | connector->dpms = old_dpms; | ||
302 | } | ||
303 | drm_modeset_unlock_all(drm); | ||
304 | |||
305 | return 0; | ||
306 | } | ||
307 | |||
308 | static int rockchip_drm_sys_resume(struct device *dev) | ||
309 | { | ||
310 | struct drm_device *drm = dev_get_drvdata(dev); | ||
311 | struct drm_connector *connector; | ||
312 | enum drm_connector_status status; | ||
313 | bool changed = false; | ||
314 | |||
315 | if (!drm) | ||
316 | return 0; | ||
317 | |||
318 | drm_modeset_lock_all(drm); | ||
319 | list_for_each_entry(connector, &drm->mode_config.connector_list, head) { | ||
320 | int desired_mode = connector->dpms; | ||
321 | |||
322 | /* | ||
323 | * at suspend time, we save dpms to connector->dpms, | ||
324 | * restore the old_dpms, and at current time, the connector | ||
325 | * dpms status must be DRM_MODE_DPMS_OFF. | ||
326 | */ | ||
327 | connector->dpms = DRM_MODE_DPMS_OFF; | ||
328 | |||
329 | /* | ||
330 | * If the connector has been disconnected during suspend, | ||
331 | * disconnect it from the encoder and leave it off. We'll notify | ||
332 | * userspace at the end. | ||
333 | */ | ||
334 | if (desired_mode == DRM_MODE_DPMS_ON) { | ||
335 | status = connector->funcs->detect(connector, true); | ||
336 | if (status == connector_status_disconnected) { | ||
337 | connector->encoder = NULL; | ||
338 | connector->status = status; | ||
339 | changed = true; | ||
340 | continue; | ||
341 | } | ||
342 | } | ||
343 | if (connector->funcs->dpms) | ||
344 | connector->funcs->dpms(connector, desired_mode); | ||
345 | } | ||
346 | drm_modeset_unlock_all(drm); | ||
347 | |||
348 | drm_helper_resume_force_mode(drm); | ||
349 | |||
350 | if (changed) | ||
351 | drm_kms_helper_hotplug_event(drm); | ||
352 | |||
353 | return 0; | ||
354 | } | ||
355 | #endif | ||
356 | |||
357 | static const struct dev_pm_ops rockchip_drm_pm_ops = { | ||
358 | SET_SYSTEM_SLEEP_PM_OPS(rockchip_drm_sys_suspend, | ||
359 | rockchip_drm_sys_resume) | ||
360 | }; | ||
361 | |||
362 | /* | ||
363 | * @node: device tree node containing encoder input ports | ||
364 | * @encoder: drm_encoder | ||
365 | */ | ||
366 | int rockchip_drm_encoder_get_mux_id(struct device_node *node, | ||
367 | struct drm_encoder *encoder) | ||
368 | { | ||
369 | struct device_node *ep = NULL; | ||
370 | struct drm_crtc *crtc = encoder->crtc; | ||
371 | struct of_endpoint endpoint; | ||
372 | struct device_node *port; | ||
373 | int ret; | ||
374 | |||
375 | if (!node || !crtc) | ||
376 | return -EINVAL; | ||
377 | |||
378 | do { | ||
379 | ep = of_graph_get_next_endpoint(node, ep); | ||
380 | if (!ep) | ||
381 | break; | ||
382 | |||
383 | port = of_graph_get_remote_port(ep); | ||
384 | of_node_put(port); | ||
385 | if (port == crtc->port) { | ||
386 | ret = of_graph_parse_endpoint(ep, &endpoint); | ||
387 | return ret ?: endpoint.id; | ||
388 | } | ||
389 | } while (ep); | ||
390 | |||
391 | return -EINVAL; | ||
392 | } | ||
393 | |||
394 | static int compare_of(struct device *dev, void *data) | ||
395 | { | ||
396 | struct device_node *np = data; | ||
397 | |||
398 | return dev->of_node == np; | ||
399 | } | ||
400 | |||
401 | static void rockchip_add_endpoints(struct device *dev, | ||
402 | struct component_match **match, | ||
403 | struct device_node *port) | ||
404 | { | ||
405 | struct device_node *ep, *remote; | ||
406 | |||
407 | for_each_child_of_node(port, ep) { | ||
408 | remote = of_graph_get_remote_port_parent(ep); | ||
409 | if (!remote || !of_device_is_available(remote)) { | ||
410 | of_node_put(remote); | ||
411 | continue; | ||
412 | } else if (!of_device_is_available(remote->parent)) { | ||
413 | dev_warn(dev, "parent device of %s is not available\n", | ||
414 | remote->full_name); | ||
415 | of_node_put(remote); | ||
416 | continue; | ||
417 | } | ||
418 | |||
419 | component_match_add(dev, match, compare_of, remote); | ||
420 | of_node_put(remote); | ||
421 | } | ||
422 | } | ||
423 | |||
424 | static int rockchip_drm_bind(struct device *dev) | ||
425 | { | ||
426 | struct drm_device *drm; | ||
427 | int ret; | ||
428 | |||
429 | drm = drm_dev_alloc(&rockchip_drm_driver, dev); | ||
430 | if (!drm) | ||
431 | return -ENOMEM; | ||
432 | |||
433 | ret = drm_dev_set_unique(drm, "%s", dev_name(dev)); | ||
434 | if (ret) | ||
435 | goto err_free; | ||
436 | |||
437 | ret = drm_dev_register(drm, 0); | ||
438 | if (ret) | ||
439 | goto err_free; | ||
440 | |||
441 | dev_set_drvdata(dev, drm); | ||
442 | |||
443 | return 0; | ||
444 | |||
445 | err_free: | ||
446 | drm_dev_unref(drm); | ||
447 | return ret; | ||
448 | } | ||
449 | |||
450 | static void rockchip_drm_unbind(struct device *dev) | ||
451 | { | ||
452 | struct drm_device *drm = dev_get_drvdata(dev); | ||
453 | |||
454 | drm_dev_unregister(drm); | ||
455 | drm_dev_unref(drm); | ||
456 | dev_set_drvdata(dev, NULL); | ||
457 | } | ||
458 | |||
459 | static const struct component_master_ops rockchip_drm_ops = { | ||
460 | .bind = rockchip_drm_bind, | ||
461 | .unbind = rockchip_drm_unbind, | ||
462 | }; | ||
463 | |||
464 | static int rockchip_drm_platform_probe(struct platform_device *pdev) | ||
465 | { | ||
466 | struct device *dev = &pdev->dev; | ||
467 | struct component_match *match = NULL; | ||
468 | struct device_node *np = dev->of_node; | ||
469 | struct device_node *port; | ||
470 | int i; | ||
471 | |||
472 | if (!np) | ||
473 | return -ENODEV; | ||
474 | /* | ||
475 | * Bind the crtc ports first, so that | ||
476 | * drm_of_find_possible_crtcs called from encoder .bind callbacks | ||
477 | * works as expected. | ||
478 | */ | ||
479 | for (i = 0;; i++) { | ||
480 | port = of_parse_phandle(np, "ports", i); | ||
481 | if (!port) | ||
482 | break; | ||
483 | |||
484 | if (!of_device_is_available(port->parent)) { | ||
485 | of_node_put(port); | ||
486 | continue; | ||
487 | } | ||
488 | |||
489 | component_match_add(dev, &match, compare_of, port->parent); | ||
490 | of_node_put(port); | ||
491 | } | ||
492 | |||
493 | if (i == 0) { | ||
494 | dev_err(dev, "missing 'ports' property\n"); | ||
495 | return -ENODEV; | ||
496 | } | ||
497 | |||
498 | if (!match) { | ||
499 | dev_err(dev, "No available vop found for display-subsystem.\n"); | ||
500 | return -ENODEV; | ||
501 | } | ||
502 | /* | ||
503 | * For each bound crtc, bind the encoders attached to its | ||
504 | * remote endpoint. | ||
505 | */ | ||
506 | for (i = 0;; i++) { | ||
507 | port = of_parse_phandle(np, "ports", i); | ||
508 | if (!port) | ||
509 | break; | ||
510 | |||
511 | if (!of_device_is_available(port->parent)) { | ||
512 | of_node_put(port); | ||
513 | continue; | ||
514 | } | ||
515 | |||
516 | rockchip_add_endpoints(dev, &match, port); | ||
517 | of_node_put(port); | ||
518 | } | ||
519 | |||
520 | return component_master_add_with_match(dev, &rockchip_drm_ops, match); | ||
521 | } | ||
522 | |||
523 | static int rockchip_drm_platform_remove(struct platform_device *pdev) | ||
524 | { | ||
525 | component_master_del(&pdev->dev, &rockchip_drm_ops); | ||
526 | |||
527 | return 0; | ||
528 | } | ||
529 | |||
530 | static const struct of_device_id rockchip_drm_dt_ids[] = { | ||
531 | { .compatible = "rockchip,display-subsystem", }, | ||
532 | { /* sentinel */ }, | ||
533 | }; | ||
534 | MODULE_DEVICE_TABLE(of, rockchip_drm_dt_ids); | ||
535 | |||
536 | static struct platform_driver rockchip_drm_platform_driver = { | ||
537 | .probe = rockchip_drm_platform_probe, | ||
538 | .remove = rockchip_drm_platform_remove, | ||
539 | .driver = { | ||
540 | .owner = THIS_MODULE, | ||
541 | .name = "rockchip-drm", | ||
542 | .of_match_table = rockchip_drm_dt_ids, | ||
543 | .pm = &rockchip_drm_pm_ops, | ||
544 | }, | ||
545 | }; | ||
546 | |||
547 | module_platform_driver(rockchip_drm_platform_driver); | ||
548 | |||
549 | MODULE_AUTHOR("Mark Yao <mark.yao@rock-chips.com>"); | ||
550 | MODULE_DESCRIPTION("ROCKCHIP DRM Driver"); | ||
551 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h new file mode 100644 index 000000000000..dc4e5f03ac79 --- /dev/null +++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h | |||
@@ -0,0 +1,68 @@ | |||
1 | /* | ||
2 | * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd | ||
3 | * Author:Mark Yao <mark.yao@rock-chips.com> | ||
4 | * | ||
5 | * based on exynos_drm_drv.h | ||
6 | * | ||
7 | * This software is licensed under the terms of the GNU General Public | ||
8 | * License version 2, as published by the Free Software Foundation, and | ||
9 | * may be copied, distributed, and modified under those terms. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | */ | ||
16 | |||
17 | #ifndef _ROCKCHIP_DRM_DRV_H | ||
18 | #define _ROCKCHIP_DRM_DRV_H | ||
19 | |||
20 | #include <drm/drm_fb_helper.h> | ||
21 | #include <drm/drm_gem.h> | ||
22 | |||
23 | #include <linux/module.h> | ||
24 | #include <linux/component.h> | ||
25 | |||
26 | #define ROCKCHIP_MAX_FB_BUFFER 3 | ||
27 | #define ROCKCHIP_MAX_CONNECTOR 2 | ||
28 | #define ROCKCHIP_MAX_CRTC 2 | ||
29 | |||
30 | struct drm_device; | ||
31 | struct drm_connector; | ||
32 | |||
33 | /* | ||
34 | * Rockchip drm private crtc funcs. | ||
35 | * @enable_vblank: enable crtc vblank irq. | ||
36 | * @disable_vblank: disable crtc vblank irq. | ||
37 | */ | ||
38 | struct rockchip_crtc_funcs { | ||
39 | int (*enable_vblank)(struct drm_crtc *crtc); | ||
40 | void (*disable_vblank)(struct drm_crtc *crtc); | ||
41 | }; | ||
42 | |||
43 | /* | ||
44 | * Rockchip drm private structure. | ||
45 | * | ||
46 | * @crtc: array of enabled CRTCs, used to map from "pipe" to drm_crtc. | ||
47 | * @num_pipe: number of pipes for this device. | ||
48 | */ | ||
49 | struct rockchip_drm_private { | ||
50 | struct drm_fb_helper fbdev_helper; | ||
51 | struct drm_gem_object *fbdev_bo; | ||
52 | const struct rockchip_crtc_funcs *crtc_funcs[ROCKCHIP_MAX_CRTC]; | ||
53 | }; | ||
54 | |||
55 | int rockchip_register_crtc_funcs(struct drm_device *dev, | ||
56 | const struct rockchip_crtc_funcs *crtc_funcs, | ||
57 | int pipe); | ||
58 | void rockchip_unregister_crtc_funcs(struct drm_device *dev, int pipe); | ||
59 | int rockchip_drm_encoder_get_mux_id(struct device_node *node, | ||
60 | struct drm_encoder *encoder); | ||
61 | int rockchip_drm_crtc_mode_config(struct drm_crtc *crtc, int connector_type, | ||
62 | int out_mode); | ||
63 | int rockchip_drm_dma_attach_device(struct drm_device *drm_dev, | ||
64 | struct device *dev); | ||
65 | void rockchip_drm_dma_detach_device(struct drm_device *drm_dev, | ||
66 | struct device *dev); | ||
67 | |||
68 | #endif /* _ROCKCHIP_DRM_DRV_H_ */ | ||
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c new file mode 100644 index 000000000000..77d52893d40f --- /dev/null +++ b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c | |||
@@ -0,0 +1,201 @@ | |||
1 | /* | ||
2 | * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd | ||
3 | * Author:Mark Yao <mark.yao@rock-chips.com> | ||
4 | * | ||
5 | * This software is licensed under the terms of the GNU General Public | ||
6 | * License version 2, as published by the Free Software Foundation, and | ||
7 | * may be copied, distributed, and modified under those terms. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | */ | ||
14 | |||
15 | #include <linux/kernel.h> | ||
16 | #include <drm/drm.h> | ||
17 | #include <drm/drmP.h> | ||
18 | #include <drm/drm_fb_helper.h> | ||
19 | #include <drm/drm_crtc_helper.h> | ||
20 | |||
21 | #include "rockchip_drm_drv.h" | ||
22 | #include "rockchip_drm_gem.h" | ||
23 | |||
24 | #define to_rockchip_fb(x) container_of(x, struct rockchip_drm_fb, fb) | ||
25 | |||
26 | struct rockchip_drm_fb { | ||
27 | struct drm_framebuffer fb; | ||
28 | struct drm_gem_object *obj[ROCKCHIP_MAX_FB_BUFFER]; | ||
29 | }; | ||
30 | |||
31 | struct drm_gem_object *rockchip_fb_get_gem_obj(struct drm_framebuffer *fb, | ||
32 | unsigned int plane) | ||
33 | { | ||
34 | struct rockchip_drm_fb *rk_fb = to_rockchip_fb(fb); | ||
35 | |||
36 | if (plane >= ROCKCHIP_MAX_FB_BUFFER) | ||
37 | return NULL; | ||
38 | |||
39 | return rk_fb->obj[plane]; | ||
40 | } | ||
41 | EXPORT_SYMBOL_GPL(rockchip_fb_get_gem_obj); | ||
42 | |||
43 | static void rockchip_drm_fb_destroy(struct drm_framebuffer *fb) | ||
44 | { | ||
45 | struct rockchip_drm_fb *rockchip_fb = to_rockchip_fb(fb); | ||
46 | struct drm_gem_object *obj; | ||
47 | int i; | ||
48 | |||
49 | for (i = 0; i < ROCKCHIP_MAX_FB_BUFFER; i++) { | ||
50 | obj = rockchip_fb->obj[i]; | ||
51 | if (obj) | ||
52 | drm_gem_object_unreference_unlocked(obj); | ||
53 | } | ||
54 | |||
55 | drm_framebuffer_cleanup(fb); | ||
56 | kfree(rockchip_fb); | ||
57 | } | ||
58 | |||
59 | static int rockchip_drm_fb_create_handle(struct drm_framebuffer *fb, | ||
60 | struct drm_file *file_priv, | ||
61 | unsigned int *handle) | ||
62 | { | ||
63 | struct rockchip_drm_fb *rockchip_fb = to_rockchip_fb(fb); | ||
64 | |||
65 | return drm_gem_handle_create(file_priv, | ||
66 | rockchip_fb->obj[0], handle); | ||
67 | } | ||
68 | |||
69 | static struct drm_framebuffer_funcs rockchip_drm_fb_funcs = { | ||
70 | .destroy = rockchip_drm_fb_destroy, | ||
71 | .create_handle = rockchip_drm_fb_create_handle, | ||
72 | }; | ||
73 | |||
74 | static struct rockchip_drm_fb * | ||
75 | rockchip_fb_alloc(struct drm_device *dev, struct drm_mode_fb_cmd2 *mode_cmd, | ||
76 | struct drm_gem_object **obj, unsigned int num_planes) | ||
77 | { | ||
78 | struct rockchip_drm_fb *rockchip_fb; | ||
79 | int ret; | ||
80 | int i; | ||
81 | |||
82 | rockchip_fb = kzalloc(sizeof(*rockchip_fb), GFP_KERNEL); | ||
83 | if (!rockchip_fb) | ||
84 | return ERR_PTR(-ENOMEM); | ||
85 | |||
86 | drm_helper_mode_fill_fb_struct(&rockchip_fb->fb, mode_cmd); | ||
87 | |||
88 | for (i = 0; i < num_planes; i++) | ||
89 | rockchip_fb->obj[i] = obj[i]; | ||
90 | |||
91 | ret = drm_framebuffer_init(dev, &rockchip_fb->fb, | ||
92 | &rockchip_drm_fb_funcs); | ||
93 | if (ret) { | ||
94 | dev_err(dev->dev, "Failed to initialize framebuffer: %d\n", | ||
95 | ret); | ||
96 | kfree(rockchip_fb); | ||
97 | return ERR_PTR(ret); | ||
98 | } | ||
99 | |||
100 | return rockchip_fb; | ||
101 | } | ||
102 | |||
103 | static struct drm_framebuffer * | ||
104 | rockchip_user_fb_create(struct drm_device *dev, struct drm_file *file_priv, | ||
105 | struct drm_mode_fb_cmd2 *mode_cmd) | ||
106 | { | ||
107 | struct rockchip_drm_fb *rockchip_fb; | ||
108 | struct drm_gem_object *objs[ROCKCHIP_MAX_FB_BUFFER]; | ||
109 | struct drm_gem_object *obj; | ||
110 | unsigned int hsub; | ||
111 | unsigned int vsub; | ||
112 | int num_planes; | ||
113 | int ret; | ||
114 | int i; | ||
115 | |||
116 | hsub = drm_format_horz_chroma_subsampling(mode_cmd->pixel_format); | ||
117 | vsub = drm_format_vert_chroma_subsampling(mode_cmd->pixel_format); | ||
118 | num_planes = min(drm_format_num_planes(mode_cmd->pixel_format), | ||
119 | ROCKCHIP_MAX_FB_BUFFER); | ||
120 | |||
121 | for (i = 0; i < num_planes; i++) { | ||
122 | unsigned int width = mode_cmd->width / (i ? hsub : 1); | ||
123 | unsigned int height = mode_cmd->height / (i ? vsub : 1); | ||
124 | unsigned int min_size; | ||
125 | |||
126 | obj = drm_gem_object_lookup(dev, file_priv, | ||
127 | mode_cmd->handles[i]); | ||
128 | if (!obj) { | ||
129 | dev_err(dev->dev, "Failed to lookup GEM object\n"); | ||
130 | ret = -ENXIO; | ||
131 | goto err_gem_object_unreference; | ||
132 | } | ||
133 | |||
134 | min_size = (height - 1) * mode_cmd->pitches[i] + | ||
135 | mode_cmd->offsets[i] + | ||
136 | width * drm_format_plane_cpp(mode_cmd->pixel_format, i); | ||
137 | |||
138 | if (obj->size < min_size) { | ||
139 | drm_gem_object_unreference_unlocked(obj); | ||
140 | ret = -EINVAL; | ||
141 | goto err_gem_object_unreference; | ||
142 | } | ||
143 | objs[i] = obj; | ||
144 | } | ||
145 | |||
146 | rockchip_fb = rockchip_fb_alloc(dev, mode_cmd, objs, i); | ||
147 | if (IS_ERR(rockchip_fb)) { | ||
148 | ret = PTR_ERR(rockchip_fb); | ||
149 | goto err_gem_object_unreference; | ||
150 | } | ||
151 | |||
152 | return &rockchip_fb->fb; | ||
153 | |||
154 | err_gem_object_unreference: | ||
155 | for (i--; i >= 0; i--) | ||
156 | drm_gem_object_unreference_unlocked(objs[i]); | ||
157 | return ERR_PTR(ret); | ||
158 | } | ||
159 | |||
160 | static void rockchip_drm_output_poll_changed(struct drm_device *dev) | ||
161 | { | ||
162 | struct rockchip_drm_private *private = dev->dev_private; | ||
163 | struct drm_fb_helper *fb_helper = &private->fbdev_helper; | ||
164 | |||
165 | drm_fb_helper_hotplug_event(fb_helper); | ||
166 | } | ||
167 | |||
168 | static const struct drm_mode_config_funcs rockchip_drm_mode_config_funcs = { | ||
169 | .fb_create = rockchip_user_fb_create, | ||
170 | .output_poll_changed = rockchip_drm_output_poll_changed, | ||
171 | }; | ||
172 | |||
173 | struct drm_framebuffer * | ||
174 | rockchip_drm_framebuffer_init(struct drm_device *dev, | ||
175 | struct drm_mode_fb_cmd2 *mode_cmd, | ||
176 | struct drm_gem_object *obj) | ||
177 | { | ||
178 | struct rockchip_drm_fb *rockchip_fb; | ||
179 | |||
180 | rockchip_fb = rockchip_fb_alloc(dev, mode_cmd, &obj, 1); | ||
181 | if (IS_ERR(rockchip_fb)) | ||
182 | return NULL; | ||
183 | |||
184 | return &rockchip_fb->fb; | ||
185 | } | ||
186 | |||
187 | void rockchip_drm_mode_config_init(struct drm_device *dev) | ||
188 | { | ||
189 | dev->mode_config.min_width = 0; | ||
190 | dev->mode_config.min_height = 0; | ||
191 | |||
192 | /* | ||
193 | * set max width and height as default value(4096x4096). | ||
194 | * this value would be used to check framebuffer size limitation | ||
195 | * at drm_mode_addfb(). | ||
196 | */ | ||
197 | dev->mode_config.max_width = 4096; | ||
198 | dev->mode_config.max_height = 4096; | ||
199 | |||
200 | dev->mode_config.funcs = &rockchip_drm_mode_config_funcs; | ||
201 | } | ||
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fb.h b/drivers/gpu/drm/rockchip/rockchip_drm_fb.h new file mode 100644 index 000000000000..09574d48226f --- /dev/null +++ b/drivers/gpu/drm/rockchip/rockchip_drm_fb.h | |||
@@ -0,0 +1,28 @@ | |||
1 | /* | ||
2 | * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd | ||
3 | * Author:Mark Yao <mark.yao@rock-chips.com> | ||
4 | * | ||
5 | * This software is licensed under the terms of the GNU General Public | ||
6 | * License version 2, as published by the Free Software Foundation, and | ||
7 | * may be copied, distributed, and modified under those terms. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ROCKCHIP_DRM_FB_H | ||
16 | #define _ROCKCHIP_DRM_FB_H | ||
17 | |||
18 | struct drm_framebuffer * | ||
19 | rockchip_drm_framebuffer_init(struct drm_device *dev, | ||
20 | struct drm_mode_fb_cmd2 *mode_cmd, | ||
21 | struct drm_gem_object *obj); | ||
22 | void rockchip_drm_framebuffer_fini(struct drm_framebuffer *fb); | ||
23 | |||
24 | void rockchip_drm_mode_config_init(struct drm_device *dev); | ||
25 | |||
26 | struct drm_gem_object *rockchip_fb_get_gem_obj(struct drm_framebuffer *fb, | ||
27 | unsigned int plane); | ||
28 | #endif /* _ROCKCHIP_DRM_FB_H */ | ||
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c new file mode 100644 index 000000000000..a5d889a8716b --- /dev/null +++ b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c | |||
@@ -0,0 +1,210 @@ | |||
1 | /* | ||
2 | * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd | ||
3 | * Author:Mark Yao <mark.yao@rock-chips.com> | ||
4 | * | ||
5 | * This software is licensed under the terms of the GNU General Public | ||
6 | * License version 2, as published by the Free Software Foundation, and | ||
7 | * may be copied, distributed, and modified under those terms. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | */ | ||
14 | |||
15 | #include <drm/drm.h> | ||
16 | #include <drm/drmP.h> | ||
17 | #include <drm/drm_fb_helper.h> | ||
18 | #include <drm/drm_crtc_helper.h> | ||
19 | |||
20 | #include "rockchip_drm_drv.h" | ||
21 | #include "rockchip_drm_gem.h" | ||
22 | #include "rockchip_drm_fb.h" | ||
23 | |||
24 | #define PREFERRED_BPP 32 | ||
25 | #define to_drm_private(x) \ | ||
26 | container_of(x, struct rockchip_drm_private, fbdev_helper) | ||
27 | |||
28 | static int rockchip_fbdev_mmap(struct fb_info *info, | ||
29 | struct vm_area_struct *vma) | ||
30 | { | ||
31 | struct drm_fb_helper *helper = info->par; | ||
32 | struct rockchip_drm_private *private = to_drm_private(helper); | ||
33 | |||
34 | return rockchip_gem_mmap_buf(private->fbdev_bo, vma); | ||
35 | } | ||
36 | |||
37 | static struct fb_ops rockchip_drm_fbdev_ops = { | ||
38 | .owner = THIS_MODULE, | ||
39 | .fb_mmap = rockchip_fbdev_mmap, | ||
40 | .fb_fillrect = cfb_fillrect, | ||
41 | .fb_copyarea = cfb_copyarea, | ||
42 | .fb_imageblit = cfb_imageblit, | ||
43 | .fb_check_var = drm_fb_helper_check_var, | ||
44 | .fb_set_par = drm_fb_helper_set_par, | ||
45 | .fb_blank = drm_fb_helper_blank, | ||
46 | .fb_pan_display = drm_fb_helper_pan_display, | ||
47 | .fb_setcmap = drm_fb_helper_setcmap, | ||
48 | }; | ||
49 | |||
50 | static int rockchip_drm_fbdev_create(struct drm_fb_helper *helper, | ||
51 | struct drm_fb_helper_surface_size *sizes) | ||
52 | { | ||
53 | struct rockchip_drm_private *private = to_drm_private(helper); | ||
54 | struct drm_mode_fb_cmd2 mode_cmd = { 0 }; | ||
55 | struct drm_device *dev = helper->dev; | ||
56 | struct rockchip_gem_object *rk_obj; | ||
57 | struct drm_framebuffer *fb; | ||
58 | unsigned int bytes_per_pixel; | ||
59 | unsigned long offset; | ||
60 | struct fb_info *fbi; | ||
61 | size_t size; | ||
62 | int ret; | ||
63 | |||
64 | bytes_per_pixel = DIV_ROUND_UP(sizes->surface_bpp, 8); | ||
65 | |||
66 | mode_cmd.width = sizes->surface_width; | ||
67 | mode_cmd.height = sizes->surface_height; | ||
68 | mode_cmd.pitches[0] = sizes->surface_width * bytes_per_pixel; | ||
69 | mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, | ||
70 | sizes->surface_depth); | ||
71 | |||
72 | size = mode_cmd.pitches[0] * mode_cmd.height; | ||
73 | |||
74 | rk_obj = rockchip_gem_create_object(dev, size); | ||
75 | if (IS_ERR(rk_obj)) | ||
76 | return -ENOMEM; | ||
77 | |||
78 | private->fbdev_bo = &rk_obj->base; | ||
79 | |||
80 | fbi = framebuffer_alloc(0, dev->dev); | ||
81 | if (!fbi) { | ||
82 | dev_err(dev->dev, "Failed to allocate framebuffer info.\n"); | ||
83 | ret = -ENOMEM; | ||
84 | goto err_rockchip_gem_free_object; | ||
85 | } | ||
86 | |||
87 | helper->fb = rockchip_drm_framebuffer_init(dev, &mode_cmd, | ||
88 | private->fbdev_bo); | ||
89 | if (IS_ERR(helper->fb)) { | ||
90 | dev_err(dev->dev, "Failed to allocate DRM framebuffer.\n"); | ||
91 | ret = PTR_ERR(helper->fb); | ||
92 | goto err_framebuffer_release; | ||
93 | } | ||
94 | |||
95 | helper->fbdev = fbi; | ||
96 | |||
97 | fbi->par = helper; | ||
98 | fbi->flags = FBINFO_FLAG_DEFAULT; | ||
99 | fbi->fbops = &rockchip_drm_fbdev_ops; | ||
100 | |||
101 | ret = fb_alloc_cmap(&fbi->cmap, 256, 0); | ||
102 | if (ret) { | ||
103 | dev_err(dev->dev, "Failed to allocate color map.\n"); | ||
104 | goto err_drm_framebuffer_unref; | ||
105 | } | ||
106 | |||
107 | fb = helper->fb; | ||
108 | drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth); | ||
109 | drm_fb_helper_fill_var(fbi, helper, fb->width, fb->height); | ||
110 | |||
111 | offset = fbi->var.xoffset * bytes_per_pixel; | ||
112 | offset += fbi->var.yoffset * fb->pitches[0]; | ||
113 | |||
114 | dev->mode_config.fb_base = 0; | ||
115 | fbi->screen_base = rk_obj->kvaddr + offset; | ||
116 | fbi->screen_size = rk_obj->base.size; | ||
117 | fbi->fix.smem_len = rk_obj->base.size; | ||
118 | |||
119 | DRM_DEBUG_KMS("FB [%dx%d]-%d kvaddr=%p offset=%ld size=%d\n", | ||
120 | fb->width, fb->height, fb->depth, rk_obj->kvaddr, | ||
121 | offset, size); | ||
122 | return 0; | ||
123 | |||
124 | err_drm_framebuffer_unref: | ||
125 | drm_framebuffer_unreference(helper->fb); | ||
126 | err_framebuffer_release: | ||
127 | framebuffer_release(fbi); | ||
128 | err_rockchip_gem_free_object: | ||
129 | rockchip_gem_free_object(&rk_obj->base); | ||
130 | return ret; | ||
131 | } | ||
132 | |||
133 | static const struct drm_fb_helper_funcs rockchip_drm_fb_helper_funcs = { | ||
134 | .fb_probe = rockchip_drm_fbdev_create, | ||
135 | }; | ||
136 | |||
137 | int rockchip_drm_fbdev_init(struct drm_device *dev) | ||
138 | { | ||
139 | struct rockchip_drm_private *private = dev->dev_private; | ||
140 | struct drm_fb_helper *helper; | ||
141 | unsigned int num_crtc; | ||
142 | int ret; | ||
143 | |||
144 | if (!dev->mode_config.num_crtc || !dev->mode_config.num_connector) | ||
145 | return -EINVAL; | ||
146 | |||
147 | num_crtc = dev->mode_config.num_crtc; | ||
148 | |||
149 | helper = &private->fbdev_helper; | ||
150 | |||
151 | drm_fb_helper_prepare(dev, helper, &rockchip_drm_fb_helper_funcs); | ||
152 | |||
153 | ret = drm_fb_helper_init(dev, helper, num_crtc, ROCKCHIP_MAX_CONNECTOR); | ||
154 | if (ret < 0) { | ||
155 | dev_err(dev->dev, "Failed to initialize drm fb helper - %d.\n", | ||
156 | ret); | ||
157 | return ret; | ||
158 | } | ||
159 | |||
160 | ret = drm_fb_helper_single_add_all_connectors(helper); | ||
161 | if (ret < 0) { | ||
162 | dev_err(dev->dev, "Failed to add connectors - %d.\n", ret); | ||
163 | goto err_drm_fb_helper_fini; | ||
164 | } | ||
165 | |||
166 | /* disable all the possible outputs/crtcs before entering KMS mode */ | ||
167 | drm_helper_disable_unused_functions(dev); | ||
168 | |||
169 | ret = drm_fb_helper_initial_config(helper, PREFERRED_BPP); | ||
170 | if (ret < 0) { | ||
171 | dev_err(dev->dev, "Failed to set initial hw config - %d.\n", | ||
172 | ret); | ||
173 | goto err_drm_fb_helper_fini; | ||
174 | } | ||
175 | |||
176 | return 0; | ||
177 | |||
178 | err_drm_fb_helper_fini: | ||
179 | drm_fb_helper_fini(helper); | ||
180 | return ret; | ||
181 | } | ||
182 | |||
183 | void rockchip_drm_fbdev_fini(struct drm_device *dev) | ||
184 | { | ||
185 | struct rockchip_drm_private *private = dev->dev_private; | ||
186 | struct drm_fb_helper *helper; | ||
187 | |||
188 | helper = &private->fbdev_helper; | ||
189 | |||
190 | if (helper->fbdev) { | ||
191 | struct fb_info *info; | ||
192 | int ret; | ||
193 | |||
194 | info = helper->fbdev; | ||
195 | ret = unregister_framebuffer(info); | ||
196 | if (ret < 0) | ||
197 | DRM_DEBUG_KMS("failed unregister_framebuffer() - %d\n", | ||
198 | ret); | ||
199 | |||
200 | if (info->cmap.len) | ||
201 | fb_dealloc_cmap(&info->cmap); | ||
202 | |||
203 | framebuffer_release(info); | ||
204 | } | ||
205 | |||
206 | if (helper->fb) | ||
207 | drm_framebuffer_unreference(helper->fb); | ||
208 | |||
209 | drm_fb_helper_fini(helper); | ||
210 | } | ||
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.h b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.h new file mode 100644 index 000000000000..50432e9b5b37 --- /dev/null +++ b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.h | |||
@@ -0,0 +1,21 @@ | |||
1 | /* | ||
2 | * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd | ||
3 | * Author:Mark Yao <mark.yao@rock-chips.com> | ||
4 | * | ||
5 | * This software is licensed under the terms of the GNU General Public | ||
6 | * License version 2, as published by the Free Software Foundation, and | ||
7 | * may be copied, distributed, and modified under those terms. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ROCKCHIP_DRM_FBDEV_H | ||
16 | #define _ROCKCHIP_DRM_FBDEV_H | ||
17 | |||
18 | int rockchip_drm_fbdev_init(struct drm_device *dev); | ||
19 | void rockchip_drm_fbdev_fini(struct drm_device *dev); | ||
20 | |||
21 | #endif /* _ROCKCHIP_DRM_FBDEV_H */ | ||
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c new file mode 100644 index 000000000000..bc98a227dc76 --- /dev/null +++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c | |||
@@ -0,0 +1,294 @@ | |||
1 | /* | ||
2 | * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd | ||
3 | * Author:Mark Yao <mark.yao@rock-chips.com> | ||
4 | * | ||
5 | * This software is licensed under the terms of the GNU General Public | ||
6 | * License version 2, as published by the Free Software Foundation, and | ||
7 | * may be copied, distributed, and modified under those terms. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | */ | ||
14 | |||
15 | #include <drm/drm.h> | ||
16 | #include <drm/drmP.h> | ||
17 | #include <drm/drm_gem.h> | ||
18 | #include <drm/drm_vma_manager.h> | ||
19 | |||
20 | #include <linux/dma-attrs.h> | ||
21 | |||
22 | #include "rockchip_drm_drv.h" | ||
23 | #include "rockchip_drm_gem.h" | ||
24 | |||
25 | static int rockchip_gem_alloc_buf(struct rockchip_gem_object *rk_obj) | ||
26 | { | ||
27 | struct drm_gem_object *obj = &rk_obj->base; | ||
28 | struct drm_device *drm = obj->dev; | ||
29 | |||
30 | init_dma_attrs(&rk_obj->dma_attrs); | ||
31 | dma_set_attr(DMA_ATTR_WRITE_COMBINE, &rk_obj->dma_attrs); | ||
32 | |||
33 | /* TODO(djkurtz): Use DMA_ATTR_NO_KERNEL_MAPPING except for fbdev */ | ||
34 | rk_obj->kvaddr = dma_alloc_attrs(drm->dev, obj->size, | ||
35 | &rk_obj->dma_addr, GFP_KERNEL, | ||
36 | &rk_obj->dma_attrs); | ||
37 | if (IS_ERR(rk_obj->kvaddr)) { | ||
38 | int ret = PTR_ERR(rk_obj->kvaddr); | ||
39 | |||
40 | DRM_ERROR("failed to allocate %#x byte dma buffer, %d", | ||
41 | obj->size, ret); | ||
42 | return ret; | ||
43 | } | ||
44 | |||
45 | return 0; | ||
46 | } | ||
47 | |||
48 | static void rockchip_gem_free_buf(struct rockchip_gem_object *rk_obj) | ||
49 | { | ||
50 | struct drm_gem_object *obj = &rk_obj->base; | ||
51 | struct drm_device *drm = obj->dev; | ||
52 | |||
53 | dma_free_attrs(drm->dev, obj->size, rk_obj->kvaddr, rk_obj->dma_addr, | ||
54 | &rk_obj->dma_attrs); | ||
55 | } | ||
56 | |||
57 | int rockchip_gem_mmap_buf(struct drm_gem_object *obj, | ||
58 | struct vm_area_struct *vma) | ||
59 | { | ||
60 | struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj); | ||
61 | struct drm_device *drm = obj->dev; | ||
62 | unsigned long vm_size; | ||
63 | |||
64 | vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP; | ||
65 | vm_size = vma->vm_end - vma->vm_start; | ||
66 | |||
67 | if (vm_size > obj->size) | ||
68 | return -EINVAL; | ||
69 | |||
70 | return dma_mmap_attrs(drm->dev, vma, rk_obj->kvaddr, rk_obj->dma_addr, | ||
71 | obj->size, &rk_obj->dma_attrs); | ||
72 | } | ||
73 | |||
74 | /* drm driver mmap file operations */ | ||
75 | int rockchip_gem_mmap(struct file *filp, struct vm_area_struct *vma) | ||
76 | { | ||
77 | struct drm_file *priv = filp->private_data; | ||
78 | struct drm_device *dev = priv->minor->dev; | ||
79 | struct drm_gem_object *obj; | ||
80 | struct drm_vma_offset_node *node; | ||
81 | int ret; | ||
82 | |||
83 | if (drm_device_is_unplugged(dev)) | ||
84 | return -ENODEV; | ||
85 | |||
86 | mutex_lock(&dev->struct_mutex); | ||
87 | |||
88 | node = drm_vma_offset_exact_lookup(dev->vma_offset_manager, | ||
89 | vma->vm_pgoff, | ||
90 | vma_pages(vma)); | ||
91 | if (!node) { | ||
92 | mutex_unlock(&dev->struct_mutex); | ||
93 | DRM_ERROR("failed to find vma node.\n"); | ||
94 | return -EINVAL; | ||
95 | } else if (!drm_vma_node_is_allowed(node, filp)) { | ||
96 | mutex_unlock(&dev->struct_mutex); | ||
97 | return -EACCES; | ||
98 | } | ||
99 | |||
100 | obj = container_of(node, struct drm_gem_object, vma_node); | ||
101 | ret = rockchip_gem_mmap_buf(obj, vma); | ||
102 | |||
103 | mutex_unlock(&dev->struct_mutex); | ||
104 | |||
105 | return ret; | ||
106 | } | ||
107 | |||
108 | struct rockchip_gem_object * | ||
109 | rockchip_gem_create_object(struct drm_device *drm, unsigned int size) | ||
110 | { | ||
111 | struct rockchip_gem_object *rk_obj; | ||
112 | struct drm_gem_object *obj; | ||
113 | int ret; | ||
114 | |||
115 | size = round_up(size, PAGE_SIZE); | ||
116 | |||
117 | rk_obj = kzalloc(sizeof(*rk_obj), GFP_KERNEL); | ||
118 | if (!rk_obj) | ||
119 | return ERR_PTR(-ENOMEM); | ||
120 | |||
121 | obj = &rk_obj->base; | ||
122 | |||
123 | drm_gem_private_object_init(drm, obj, size); | ||
124 | |||
125 | ret = rockchip_gem_alloc_buf(rk_obj); | ||
126 | if (ret) | ||
127 | goto err_free_rk_obj; | ||
128 | |||
129 | return rk_obj; | ||
130 | |||
131 | err_free_rk_obj: | ||
132 | kfree(rk_obj); | ||
133 | return ERR_PTR(ret); | ||
134 | } | ||
135 | |||
136 | /* | ||
137 | * rockchip_gem_free_object - (struct drm_driver)->gem_free_object callback | ||
138 | * function | ||
139 | */ | ||
140 | void rockchip_gem_free_object(struct drm_gem_object *obj) | ||
141 | { | ||
142 | struct rockchip_gem_object *rk_obj; | ||
143 | |||
144 | drm_gem_free_mmap_offset(obj); | ||
145 | |||
146 | rk_obj = to_rockchip_obj(obj); | ||
147 | |||
148 | rockchip_gem_free_buf(rk_obj); | ||
149 | |||
150 | kfree(rk_obj); | ||
151 | } | ||
152 | |||
153 | /* | ||
154 | * rockchip_gem_create_with_handle - allocate an object with the given | ||
155 | * size and create a gem handle on it | ||
156 | * | ||
157 | * returns a struct rockchip_gem_object* on success or ERR_PTR values | ||
158 | * on failure. | ||
159 | */ | ||
160 | static struct rockchip_gem_object * | ||
161 | rockchip_gem_create_with_handle(struct drm_file *file_priv, | ||
162 | struct drm_device *drm, unsigned int size, | ||
163 | unsigned int *handle) | ||
164 | { | ||
165 | struct rockchip_gem_object *rk_obj; | ||
166 | struct drm_gem_object *obj; | ||
167 | int ret; | ||
168 | |||
169 | rk_obj = rockchip_gem_create_object(drm, size); | ||
170 | if (IS_ERR(rk_obj)) | ||
171 | return ERR_CAST(rk_obj); | ||
172 | |||
173 | obj = &rk_obj->base; | ||
174 | |||
175 | /* | ||
176 | * allocate a id of idr table where the obj is registered | ||
177 | * and handle has the id what user can see. | ||
178 | */ | ||
179 | ret = drm_gem_handle_create(file_priv, obj, handle); | ||
180 | if (ret) | ||
181 | goto err_handle_create; | ||
182 | |||
183 | /* drop reference from allocate - handle holds it now. */ | ||
184 | drm_gem_object_unreference_unlocked(obj); | ||
185 | |||
186 | return rk_obj; | ||
187 | |||
188 | err_handle_create: | ||
189 | rockchip_gem_free_object(obj); | ||
190 | |||
191 | return ERR_PTR(ret); | ||
192 | } | ||
193 | |||
194 | int rockchip_gem_dumb_map_offset(struct drm_file *file_priv, | ||
195 | struct drm_device *dev, uint32_t handle, | ||
196 | uint64_t *offset) | ||
197 | { | ||
198 | struct drm_gem_object *obj; | ||
199 | int ret; | ||
200 | |||
201 | mutex_lock(&dev->struct_mutex); | ||
202 | |||
203 | obj = drm_gem_object_lookup(dev, file_priv, handle); | ||
204 | if (!obj) { | ||
205 | DRM_ERROR("failed to lookup gem object.\n"); | ||
206 | ret = -EINVAL; | ||
207 | goto unlock; | ||
208 | } | ||
209 | |||
210 | ret = drm_gem_create_mmap_offset(obj); | ||
211 | if (ret) | ||
212 | goto out; | ||
213 | |||
214 | *offset = drm_vma_node_offset_addr(&obj->vma_node); | ||
215 | DRM_DEBUG_KMS("offset = 0x%llx\n", *offset); | ||
216 | |||
217 | out: | ||
218 | drm_gem_object_unreference(obj); | ||
219 | unlock: | ||
220 | mutex_unlock(&dev->struct_mutex); | ||
221 | return ret; | ||
222 | } | ||
223 | |||
224 | /* | ||
225 | * rockchip_gem_dumb_create - (struct drm_driver)->dumb_create callback | ||
226 | * function | ||
227 | * | ||
228 | * This aligns the pitch and size arguments to the minimum required. wrap | ||
229 | * this into your own function if you need bigger alignment. | ||
230 | */ | ||
231 | int rockchip_gem_dumb_create(struct drm_file *file_priv, | ||
232 | struct drm_device *dev, | ||
233 | struct drm_mode_create_dumb *args) | ||
234 | { | ||
235 | struct rockchip_gem_object *rk_obj; | ||
236 | int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8); | ||
237 | |||
238 | /* | ||
239 | * align to 64 bytes since Mali requires it. | ||
240 | */ | ||
241 | min_pitch = ALIGN(min_pitch, 64); | ||
242 | |||
243 | if (args->pitch < min_pitch) | ||
244 | args->pitch = min_pitch; | ||
245 | |||
246 | if (args->size < args->pitch * args->height) | ||
247 | args->size = args->pitch * args->height; | ||
248 | |||
249 | rk_obj = rockchip_gem_create_with_handle(file_priv, dev, args->size, | ||
250 | &args->handle); | ||
251 | |||
252 | return PTR_ERR_OR_ZERO(rk_obj); | ||
253 | } | ||
254 | |||
255 | /* | ||
256 | * Allocate a sg_table for this GEM object. | ||
257 | * Note: Both the table's contents, and the sg_table itself must be freed by | ||
258 | * the caller. | ||
259 | * Returns a pointer to the newly allocated sg_table, or an ERR_PTR() error. | ||
260 | */ | ||
261 | struct sg_table *rockchip_gem_prime_get_sg_table(struct drm_gem_object *obj) | ||
262 | { | ||
263 | struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj); | ||
264 | struct drm_device *drm = obj->dev; | ||
265 | struct sg_table *sgt; | ||
266 | int ret; | ||
267 | |||
268 | sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); | ||
269 | if (!sgt) | ||
270 | return ERR_PTR(-ENOMEM); | ||
271 | |||
272 | ret = dma_get_sgtable_attrs(drm->dev, sgt, rk_obj->kvaddr, | ||
273 | rk_obj->dma_addr, obj->size, | ||
274 | &rk_obj->dma_attrs); | ||
275 | if (ret) { | ||
276 | DRM_ERROR("failed to allocate sgt, %d\n", ret); | ||
277 | kfree(sgt); | ||
278 | return ERR_PTR(ret); | ||
279 | } | ||
280 | |||
281 | return sgt; | ||
282 | } | ||
283 | |||
284 | void *rockchip_gem_prime_vmap(struct drm_gem_object *obj) | ||
285 | { | ||
286 | struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj); | ||
287 | |||
288 | return rk_obj->kvaddr; | ||
289 | } | ||
290 | |||
291 | void rockchip_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) | ||
292 | { | ||
293 | /* Nothing to do */ | ||
294 | } | ||
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.h b/drivers/gpu/drm/rockchip/rockchip_drm_gem.h new file mode 100644 index 000000000000..67bcebe90003 --- /dev/null +++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.h | |||
@@ -0,0 +1,54 @@ | |||
1 | /* | ||
2 | * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd | ||
3 | * Author:Mark Yao <mark.yao@rock-chips.com> | ||
4 | * | ||
5 | * This software is licensed under the terms of the GNU General Public | ||
6 | * License version 2, as published by the Free Software Foundation, and | ||
7 | * may be copied, distributed, and modified under those terms. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ROCKCHIP_DRM_GEM_H | ||
16 | #define _ROCKCHIP_DRM_GEM_H | ||
17 | |||
18 | #define to_rockchip_obj(x) container_of(x, struct rockchip_gem_object, base) | ||
19 | |||
20 | struct rockchip_gem_object { | ||
21 | struct drm_gem_object base; | ||
22 | unsigned int flags; | ||
23 | |||
24 | void *kvaddr; | ||
25 | dma_addr_t dma_addr; | ||
26 | struct dma_attrs dma_attrs; | ||
27 | }; | ||
28 | |||
29 | struct sg_table *rockchip_gem_prime_get_sg_table(struct drm_gem_object *obj); | ||
30 | struct drm_gem_object * | ||
31 | rockchip_gem_prime_import_sg_table(struct drm_device *dev, size_t size, | ||
32 | struct sg_table *sgt); | ||
33 | void *rockchip_gem_prime_vmap(struct drm_gem_object *obj); | ||
34 | void rockchip_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); | ||
35 | |||
36 | /* drm driver mmap file operations */ | ||
37 | int rockchip_gem_mmap(struct file *filp, struct vm_area_struct *vma); | ||
38 | |||
39 | /* mmap a gem object to userspace. */ | ||
40 | int rockchip_gem_mmap_buf(struct drm_gem_object *obj, | ||
41 | struct vm_area_struct *vma); | ||
42 | |||
43 | struct rockchip_gem_object * | ||
44 | rockchip_gem_create_object(struct drm_device *drm, unsigned int size); | ||
45 | |||
46 | void rockchip_gem_free_object(struct drm_gem_object *obj); | ||
47 | |||
48 | int rockchip_gem_dumb_create(struct drm_file *file_priv, | ||
49 | struct drm_device *dev, | ||
50 | struct drm_mode_create_dumb *args); | ||
51 | int rockchip_gem_dumb_map_offset(struct drm_file *file_priv, | ||
52 | struct drm_device *dev, uint32_t handle, | ||
53 | uint64_t *offset); | ||
54 | #endif /* _ROCKCHIP_DRM_GEM_H */ | ||
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c new file mode 100644 index 000000000000..e7ca25b3fb38 --- /dev/null +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c | |||
@@ -0,0 +1,1455 @@ | |||
1 | /* | ||
2 | * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd | ||
3 | * Author:Mark Yao <mark.yao@rock-chips.com> | ||
4 | * | ||
5 | * This software is licensed under the terms of the GNU General Public | ||
6 | * License version 2, as published by the Free Software Foundation, and | ||
7 | * may be copied, distributed, and modified under those terms. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | */ | ||
14 | |||
15 | #include <drm/drm.h> | ||
16 | #include <drm/drmP.h> | ||
17 | #include <drm/drm_crtc.h> | ||
18 | #include <drm/drm_crtc_helper.h> | ||
19 | #include <drm/drm_plane_helper.h> | ||
20 | |||
21 | #include <linux/kernel.h> | ||
22 | #include <linux/platform_device.h> | ||
23 | #include <linux/clk.h> | ||
24 | #include <linux/of.h> | ||
25 | #include <linux/of_device.h> | ||
26 | #include <linux/pm_runtime.h> | ||
27 | #include <linux/component.h> | ||
28 | |||
29 | #include <linux/reset.h> | ||
30 | #include <linux/delay.h> | ||
31 | |||
32 | #include "rockchip_drm_drv.h" | ||
33 | #include "rockchip_drm_gem.h" | ||
34 | #include "rockchip_drm_fb.h" | ||
35 | #include "rockchip_drm_vop.h" | ||
36 | |||
37 | #define VOP_REG(off, _mask, s) \ | ||
38 | {.offset = off, \ | ||
39 | .mask = _mask, \ | ||
40 | .shift = s,} | ||
41 | |||
42 | #define __REG_SET_RELAXED(x, off, mask, shift, v) \ | ||
43 | vop_mask_write_relaxed(x, off, (mask) << shift, (v) << shift) | ||
44 | #define __REG_SET_NORMAL(x, off, mask, shift, v) \ | ||
45 | vop_mask_write(x, off, (mask) << shift, (v) << shift) | ||
46 | |||
47 | #define REG_SET(x, base, reg, v, mode) \ | ||
48 | __REG_SET_##mode(x, base + reg.offset, reg.mask, reg.shift, v) | ||
49 | |||
50 | #define VOP_WIN_SET(x, win, name, v) \ | ||
51 | REG_SET(x, win->base, win->phy->name, v, RELAXED) | ||
52 | #define VOP_CTRL_SET(x, name, v) \ | ||
53 | REG_SET(x, 0, (x)->data->ctrl->name, v, NORMAL) | ||
54 | |||
55 | #define VOP_WIN_GET(x, win, name) \ | ||
56 | vop_read_reg(x, win->base, &win->phy->name) | ||
57 | |||
58 | #define VOP_WIN_GET_YRGBADDR(vop, win) \ | ||
59 | vop_readl(vop, win->base + win->phy->yrgb_mst.offset) | ||
60 | |||
61 | #define to_vop(x) container_of(x, struct vop, crtc) | ||
62 | #define to_vop_win(x) container_of(x, struct vop_win, base) | ||
63 | |||
64 | struct vop_win_state { | ||
65 | struct list_head head; | ||
66 | struct drm_framebuffer *fb; | ||
67 | dma_addr_t yrgb_mst; | ||
68 | struct drm_pending_vblank_event *event; | ||
69 | }; | ||
70 | |||
71 | struct vop_win { | ||
72 | struct drm_plane base; | ||
73 | const struct vop_win_data *data; | ||
74 | struct vop *vop; | ||
75 | |||
76 | struct list_head pending; | ||
77 | struct vop_win_state *active; | ||
78 | }; | ||
79 | |||
80 | struct vop { | ||
81 | struct drm_crtc crtc; | ||
82 | struct device *dev; | ||
83 | struct drm_device *drm_dev; | ||
84 | unsigned int dpms; | ||
85 | |||
86 | int connector_type; | ||
87 | int connector_out_mode; | ||
88 | |||
89 | /* mutex vsync_ work */ | ||
90 | struct mutex vsync_mutex; | ||
91 | bool vsync_work_pending; | ||
92 | |||
93 | const struct vop_data *data; | ||
94 | |||
95 | uint32_t *regsbak; | ||
96 | void __iomem *regs; | ||
97 | |||
98 | /* physical map length of vop register */ | ||
99 | uint32_t len; | ||
100 | |||
101 | /* one time only one process allowed to config the register */ | ||
102 | spinlock_t reg_lock; | ||
103 | /* lock vop irq reg */ | ||
104 | spinlock_t irq_lock; | ||
105 | |||
106 | unsigned int irq; | ||
107 | |||
108 | /* vop AHP clk */ | ||
109 | struct clk *hclk; | ||
110 | /* vop dclk */ | ||
111 | struct clk *dclk; | ||
112 | /* vop share memory frequency */ | ||
113 | struct clk *aclk; | ||
114 | |||
115 | /* vop dclk reset */ | ||
116 | struct reset_control *dclk_rst; | ||
117 | |||
118 | int pipe; | ||
119 | |||
120 | struct vop_win win[]; | ||
121 | }; | ||
122 | |||
123 | enum vop_data_format { | ||
124 | VOP_FMT_ARGB8888 = 0, | ||
125 | VOP_FMT_RGB888, | ||
126 | VOP_FMT_RGB565, | ||
127 | VOP_FMT_YUV420SP = 4, | ||
128 | VOP_FMT_YUV422SP, | ||
129 | VOP_FMT_YUV444SP, | ||
130 | }; | ||
131 | |||
132 | struct vop_reg_data { | ||
133 | uint32_t offset; | ||
134 | uint32_t value; | ||
135 | }; | ||
136 | |||
137 | struct vop_reg { | ||
138 | uint32_t offset; | ||
139 | uint32_t shift; | ||
140 | uint32_t mask; | ||
141 | }; | ||
142 | |||
143 | struct vop_ctrl { | ||
144 | struct vop_reg standby; | ||
145 | struct vop_reg data_blank; | ||
146 | struct vop_reg gate_en; | ||
147 | struct vop_reg mmu_en; | ||
148 | struct vop_reg rgb_en; | ||
149 | struct vop_reg edp_en; | ||
150 | struct vop_reg hdmi_en; | ||
151 | struct vop_reg mipi_en; | ||
152 | struct vop_reg out_mode; | ||
153 | struct vop_reg dither_down; | ||
154 | struct vop_reg dither_up; | ||
155 | struct vop_reg pin_pol; | ||
156 | |||
157 | struct vop_reg htotal_pw; | ||
158 | struct vop_reg hact_st_end; | ||
159 | struct vop_reg vtotal_pw; | ||
160 | struct vop_reg vact_st_end; | ||
161 | struct vop_reg hpost_st_end; | ||
162 | struct vop_reg vpost_st_end; | ||
163 | }; | ||
164 | |||
165 | struct vop_win_phy { | ||
166 | const uint32_t *data_formats; | ||
167 | uint32_t nformats; | ||
168 | |||
169 | struct vop_reg enable; | ||
170 | struct vop_reg format; | ||
171 | struct vop_reg act_info; | ||
172 | struct vop_reg dsp_info; | ||
173 | struct vop_reg dsp_st; | ||
174 | struct vop_reg yrgb_mst; | ||
175 | struct vop_reg uv_mst; | ||
176 | struct vop_reg yrgb_vir; | ||
177 | struct vop_reg uv_vir; | ||
178 | |||
179 | struct vop_reg dst_alpha_ctl; | ||
180 | struct vop_reg src_alpha_ctl; | ||
181 | }; | ||
182 | |||
183 | struct vop_win_data { | ||
184 | uint32_t base; | ||
185 | const struct vop_win_phy *phy; | ||
186 | enum drm_plane_type type; | ||
187 | }; | ||
188 | |||
189 | struct vop_data { | ||
190 | const struct vop_reg_data *init_table; | ||
191 | unsigned int table_size; | ||
192 | const struct vop_ctrl *ctrl; | ||
193 | const struct vop_win_data *win; | ||
194 | unsigned int win_size; | ||
195 | }; | ||
196 | |||
197 | static const uint32_t formats_01[] = { | ||
198 | DRM_FORMAT_XRGB8888, | ||
199 | DRM_FORMAT_ARGB8888, | ||
200 | DRM_FORMAT_RGB888, | ||
201 | DRM_FORMAT_RGB565, | ||
202 | DRM_FORMAT_NV12, | ||
203 | DRM_FORMAT_NV16, | ||
204 | DRM_FORMAT_NV24, | ||
205 | }; | ||
206 | |||
207 | static const uint32_t formats_234[] = { | ||
208 | DRM_FORMAT_XRGB8888, | ||
209 | DRM_FORMAT_ARGB8888, | ||
210 | DRM_FORMAT_RGB888, | ||
211 | DRM_FORMAT_RGB565, | ||
212 | }; | ||
213 | |||
214 | static const struct vop_win_phy win01_data = { | ||
215 | .data_formats = formats_01, | ||
216 | .nformats = ARRAY_SIZE(formats_01), | ||
217 | .enable = VOP_REG(WIN0_CTRL0, 0x1, 0), | ||
218 | .format = VOP_REG(WIN0_CTRL0, 0x7, 1), | ||
219 | .act_info = VOP_REG(WIN0_ACT_INFO, 0x1fff1fff, 0), | ||
220 | .dsp_info = VOP_REG(WIN0_DSP_INFO, 0x0fff0fff, 0), | ||
221 | .dsp_st = VOP_REG(WIN0_DSP_ST, 0x1fff1fff, 0), | ||
222 | .yrgb_mst = VOP_REG(WIN0_YRGB_MST, 0xffffffff, 0), | ||
223 | .uv_mst = VOP_REG(WIN0_CBR_MST, 0xffffffff, 0), | ||
224 | .yrgb_vir = VOP_REG(WIN0_VIR, 0x3fff, 0), | ||
225 | .uv_vir = VOP_REG(WIN0_VIR, 0x3fff, 16), | ||
226 | .src_alpha_ctl = VOP_REG(WIN0_SRC_ALPHA_CTRL, 0xff, 0), | ||
227 | .dst_alpha_ctl = VOP_REG(WIN0_DST_ALPHA_CTRL, 0xff, 0), | ||
228 | }; | ||
229 | |||
230 | static const struct vop_win_phy win23_data = { | ||
231 | .data_formats = formats_234, | ||
232 | .nformats = ARRAY_SIZE(formats_234), | ||
233 | .enable = VOP_REG(WIN2_CTRL0, 0x1, 0), | ||
234 | .format = VOP_REG(WIN2_CTRL0, 0x7, 1), | ||
235 | .dsp_info = VOP_REG(WIN2_DSP_INFO0, 0x0fff0fff, 0), | ||
236 | .dsp_st = VOP_REG(WIN2_DSP_ST0, 0x1fff1fff, 0), | ||
237 | .yrgb_mst = VOP_REG(WIN2_MST0, 0xffffffff, 0), | ||
238 | .yrgb_vir = VOP_REG(WIN2_VIR0_1, 0x1fff, 0), | ||
239 | .src_alpha_ctl = VOP_REG(WIN2_SRC_ALPHA_CTRL, 0xff, 0), | ||
240 | .dst_alpha_ctl = VOP_REG(WIN2_DST_ALPHA_CTRL, 0xff, 0), | ||
241 | }; | ||
242 | |||
243 | static const struct vop_win_phy cursor_data = { | ||
244 | .data_formats = formats_234, | ||
245 | .nformats = ARRAY_SIZE(formats_234), | ||
246 | .enable = VOP_REG(HWC_CTRL0, 0x1, 0), | ||
247 | .format = VOP_REG(HWC_CTRL0, 0x7, 1), | ||
248 | .dsp_st = VOP_REG(HWC_DSP_ST, 0x1fff1fff, 0), | ||
249 | .yrgb_mst = VOP_REG(HWC_MST, 0xffffffff, 0), | ||
250 | }; | ||
251 | |||
252 | static const struct vop_ctrl ctrl_data = { | ||
253 | .standby = VOP_REG(SYS_CTRL, 0x1, 22), | ||
254 | .gate_en = VOP_REG(SYS_CTRL, 0x1, 23), | ||
255 | .mmu_en = VOP_REG(SYS_CTRL, 0x1, 20), | ||
256 | .rgb_en = VOP_REG(SYS_CTRL, 0x1, 12), | ||
257 | .hdmi_en = VOP_REG(SYS_CTRL, 0x1, 13), | ||
258 | .edp_en = VOP_REG(SYS_CTRL, 0x1, 14), | ||
259 | .mipi_en = VOP_REG(SYS_CTRL, 0x1, 15), | ||
260 | .dither_down = VOP_REG(DSP_CTRL1, 0xf, 1), | ||
261 | .dither_up = VOP_REG(DSP_CTRL1, 0x1, 6), | ||
262 | .data_blank = VOP_REG(DSP_CTRL0, 0x1, 19), | ||
263 | .out_mode = VOP_REG(DSP_CTRL0, 0xf, 0), | ||
264 | .pin_pol = VOP_REG(DSP_CTRL0, 0xf, 4), | ||
265 | .htotal_pw = VOP_REG(DSP_HTOTAL_HS_END, 0x1fff1fff, 0), | ||
266 | .hact_st_end = VOP_REG(DSP_HACT_ST_END, 0x1fff1fff, 0), | ||
267 | .vtotal_pw = VOP_REG(DSP_VTOTAL_VS_END, 0x1fff1fff, 0), | ||
268 | .vact_st_end = VOP_REG(DSP_VACT_ST_END, 0x1fff1fff, 0), | ||
269 | .hpost_st_end = VOP_REG(POST_DSP_HACT_INFO, 0x1fff1fff, 0), | ||
270 | .vpost_st_end = VOP_REG(POST_DSP_VACT_INFO, 0x1fff1fff, 0), | ||
271 | }; | ||
272 | |||
273 | static const struct vop_reg_data vop_init_reg_table[] = { | ||
274 | {SYS_CTRL, 0x00c00000}, | ||
275 | {DSP_CTRL0, 0x00000000}, | ||
276 | {WIN0_CTRL0, 0x00000080}, | ||
277 | {WIN1_CTRL0, 0x00000080}, | ||
278 | }; | ||
279 | |||
280 | /* | ||
281 | * Note: rk3288 has a dedicated 'cursor' window, however, that window requires | ||
282 | * special support to get alpha blending working. For now, just use overlay | ||
283 | * window 1 for the drm cursor. | ||
284 | */ | ||
285 | static const struct vop_win_data rk3288_vop_win_data[] = { | ||
286 | { .base = 0x00, .phy = &win01_data, .type = DRM_PLANE_TYPE_PRIMARY }, | ||
287 | { .base = 0x40, .phy = &win01_data, .type = DRM_PLANE_TYPE_CURSOR }, | ||
288 | { .base = 0x00, .phy = &win23_data, .type = DRM_PLANE_TYPE_OVERLAY }, | ||
289 | { .base = 0x50, .phy = &win23_data, .type = DRM_PLANE_TYPE_OVERLAY }, | ||
290 | { .base = 0x00, .phy = &cursor_data, .type = DRM_PLANE_TYPE_OVERLAY }, | ||
291 | }; | ||
292 | |||
293 | static const struct vop_data rk3288_vop = { | ||
294 | .init_table = vop_init_reg_table, | ||
295 | .table_size = ARRAY_SIZE(vop_init_reg_table), | ||
296 | .ctrl = &ctrl_data, | ||
297 | .win = rk3288_vop_win_data, | ||
298 | .win_size = ARRAY_SIZE(rk3288_vop_win_data), | ||
299 | }; | ||
300 | |||
301 | static const struct of_device_id vop_driver_dt_match[] = { | ||
302 | { .compatible = "rockchip,rk3288-vop", | ||
303 | .data = &rk3288_vop }, | ||
304 | {}, | ||
305 | }; | ||
306 | |||
307 | static inline void vop_writel(struct vop *vop, uint32_t offset, uint32_t v) | ||
308 | { | ||
309 | writel(v, vop->regs + offset); | ||
310 | vop->regsbak[offset >> 2] = v; | ||
311 | } | ||
312 | |||
313 | static inline uint32_t vop_readl(struct vop *vop, uint32_t offset) | ||
314 | { | ||
315 | return readl(vop->regs + offset); | ||
316 | } | ||
317 | |||
318 | static inline uint32_t vop_read_reg(struct vop *vop, uint32_t base, | ||
319 | const struct vop_reg *reg) | ||
320 | { | ||
321 | return (vop_readl(vop, base + reg->offset) >> reg->shift) & reg->mask; | ||
322 | } | ||
323 | |||
324 | static inline void vop_cfg_done(struct vop *vop) | ||
325 | { | ||
326 | writel(0x01, vop->regs + REG_CFG_DONE); | ||
327 | } | ||
328 | |||
329 | static inline void vop_mask_write(struct vop *vop, uint32_t offset, | ||
330 | uint32_t mask, uint32_t v) | ||
331 | { | ||
332 | if (mask) { | ||
333 | uint32_t cached_val = vop->regsbak[offset >> 2]; | ||
334 | |||
335 | cached_val = (cached_val & ~mask) | v; | ||
336 | writel(cached_val, vop->regs + offset); | ||
337 | vop->regsbak[offset >> 2] = cached_val; | ||
338 | } | ||
339 | } | ||
340 | |||
341 | static inline void vop_mask_write_relaxed(struct vop *vop, uint32_t offset, | ||
342 | uint32_t mask, uint32_t v) | ||
343 | { | ||
344 | if (mask) { | ||
345 | uint32_t cached_val = vop->regsbak[offset >> 2]; | ||
346 | |||
347 | cached_val = (cached_val & ~mask) | v; | ||
348 | writel_relaxed(cached_val, vop->regs + offset); | ||
349 | vop->regsbak[offset >> 2] = cached_val; | ||
350 | } | ||
351 | } | ||
352 | |||
353 | static enum vop_data_format vop_convert_format(uint32_t format) | ||
354 | { | ||
355 | switch (format) { | ||
356 | case DRM_FORMAT_XRGB8888: | ||
357 | case DRM_FORMAT_ARGB8888: | ||
358 | return VOP_FMT_ARGB8888; | ||
359 | case DRM_FORMAT_RGB888: | ||
360 | return VOP_FMT_RGB888; | ||
361 | case DRM_FORMAT_RGB565: | ||
362 | return VOP_FMT_RGB565; | ||
363 | case DRM_FORMAT_NV12: | ||
364 | return VOP_FMT_YUV420SP; | ||
365 | case DRM_FORMAT_NV16: | ||
366 | return VOP_FMT_YUV422SP; | ||
367 | case DRM_FORMAT_NV24: | ||
368 | return VOP_FMT_YUV444SP; | ||
369 | default: | ||
370 | DRM_ERROR("unsupport format[%08x]\n", format); | ||
371 | return -EINVAL; | ||
372 | } | ||
373 | } | ||
374 | |||
375 | static bool is_alpha_support(uint32_t format) | ||
376 | { | ||
377 | switch (format) { | ||
378 | case DRM_FORMAT_ARGB8888: | ||
379 | return true; | ||
380 | default: | ||
381 | return false; | ||
382 | } | ||
383 | } | ||
384 | |||
385 | static void vop_enable(struct drm_crtc *crtc) | ||
386 | { | ||
387 | struct vop *vop = to_vop(crtc); | ||
388 | int ret; | ||
389 | |||
390 | ret = clk_enable(vop->hclk); | ||
391 | if (ret < 0) { | ||
392 | dev_err(vop->dev, "failed to enable hclk - %d\n", ret); | ||
393 | return; | ||
394 | } | ||
395 | |||
396 | ret = clk_enable(vop->dclk); | ||
397 | if (ret < 0) { | ||
398 | dev_err(vop->dev, "failed to enable dclk - %d\n", ret); | ||
399 | goto err_disable_hclk; | ||
400 | } | ||
401 | |||
402 | ret = clk_enable(vop->aclk); | ||
403 | if (ret < 0) { | ||
404 | dev_err(vop->dev, "failed to enable aclk - %d\n", ret); | ||
405 | goto err_disable_dclk; | ||
406 | } | ||
407 | |||
408 | /* | ||
409 | * Slave iommu shares power, irq and clock with vop. It was associated | ||
410 | * automatically with this master device via common driver code. | ||
411 | * Now that we have enabled the clock we attach it to the shared drm | ||
412 | * mapping. | ||
413 | */ | ||
414 | ret = rockchip_drm_dma_attach_device(vop->drm_dev, vop->dev); | ||
415 | if (ret) { | ||
416 | dev_err(vop->dev, "failed to attach dma mapping, %d\n", ret); | ||
417 | goto err_disable_aclk; | ||
418 | } | ||
419 | |||
420 | spin_lock(&vop->reg_lock); | ||
421 | |||
422 | VOP_CTRL_SET(vop, standby, 0); | ||
423 | |||
424 | spin_unlock(&vop->reg_lock); | ||
425 | |||
426 | enable_irq(vop->irq); | ||
427 | |||
428 | drm_vblank_on(vop->drm_dev, vop->pipe); | ||
429 | |||
430 | return; | ||
431 | |||
432 | err_disable_aclk: | ||
433 | clk_disable(vop->aclk); | ||
434 | err_disable_dclk: | ||
435 | clk_disable(vop->dclk); | ||
436 | err_disable_hclk: | ||
437 | clk_disable(vop->hclk); | ||
438 | } | ||
439 | |||
440 | static void vop_disable(struct drm_crtc *crtc) | ||
441 | { | ||
442 | struct vop *vop = to_vop(crtc); | ||
443 | |||
444 | drm_vblank_off(crtc->dev, vop->pipe); | ||
445 | |||
446 | disable_irq(vop->irq); | ||
447 | |||
448 | /* | ||
449 | * TODO: Since standby doesn't take effect until the next vblank, | ||
450 | * when we turn off dclk below, the vop is probably still active. | ||
451 | */ | ||
452 | spin_lock(&vop->reg_lock); | ||
453 | |||
454 | VOP_CTRL_SET(vop, standby, 1); | ||
455 | |||
456 | spin_unlock(&vop->reg_lock); | ||
457 | /* | ||
458 | * disable dclk to stop frame scan, so we can safely detach iommu, | ||
459 | */ | ||
460 | clk_disable(vop->dclk); | ||
461 | |||
462 | rockchip_drm_dma_detach_device(vop->drm_dev, vop->dev); | ||
463 | |||
464 | clk_disable(vop->aclk); | ||
465 | clk_disable(vop->hclk); | ||
466 | } | ||
467 | |||
468 | /* | ||
469 | * Caller must hold vsync_mutex. | ||
470 | */ | ||
471 | static struct drm_framebuffer *vop_win_last_pending_fb(struct vop_win *vop_win) | ||
472 | { | ||
473 | struct vop_win_state *last; | ||
474 | struct vop_win_state *active = vop_win->active; | ||
475 | |||
476 | if (list_empty(&vop_win->pending)) | ||
477 | return active ? active->fb : NULL; | ||
478 | |||
479 | last = list_last_entry(&vop_win->pending, struct vop_win_state, head); | ||
480 | return last ? last->fb : NULL; | ||
481 | } | ||
482 | |||
483 | /* | ||
484 | * Caller must hold vsync_mutex. | ||
485 | */ | ||
486 | static int vop_win_queue_fb(struct vop_win *vop_win, | ||
487 | struct drm_framebuffer *fb, dma_addr_t yrgb_mst, | ||
488 | struct drm_pending_vblank_event *event) | ||
489 | { | ||
490 | struct vop_win_state *state; | ||
491 | |||
492 | state = kzalloc(sizeof(*state), GFP_KERNEL); | ||
493 | if (!state) | ||
494 | return -ENOMEM; | ||
495 | |||
496 | state->fb = fb; | ||
497 | state->yrgb_mst = yrgb_mst; | ||
498 | state->event = event; | ||
499 | |||
500 | list_add_tail(&state->head, &vop_win->pending); | ||
501 | |||
502 | return 0; | ||
503 | } | ||
504 | |||
505 | static int vop_update_plane_event(struct drm_plane *plane, | ||
506 | struct drm_crtc *crtc, | ||
507 | struct drm_framebuffer *fb, int crtc_x, | ||
508 | int crtc_y, unsigned int crtc_w, | ||
509 | unsigned int crtc_h, uint32_t src_x, | ||
510 | uint32_t src_y, uint32_t src_w, | ||
511 | uint32_t src_h, | ||
512 | struct drm_pending_vblank_event *event) | ||
513 | { | ||
514 | struct vop_win *vop_win = to_vop_win(plane); | ||
515 | const struct vop_win_data *win = vop_win->data; | ||
516 | struct vop *vop = to_vop(crtc); | ||
517 | struct drm_gem_object *obj; | ||
518 | struct rockchip_gem_object *rk_obj; | ||
519 | unsigned long offset; | ||
520 | unsigned int actual_w; | ||
521 | unsigned int actual_h; | ||
522 | unsigned int dsp_stx; | ||
523 | unsigned int dsp_sty; | ||
524 | unsigned int y_vir_stride; | ||
525 | dma_addr_t yrgb_mst; | ||
526 | enum vop_data_format format; | ||
527 | uint32_t val; | ||
528 | bool is_alpha; | ||
529 | bool visible; | ||
530 | int ret; | ||
531 | struct drm_rect dest = { | ||
532 | .x1 = crtc_x, | ||
533 | .y1 = crtc_y, | ||
534 | .x2 = crtc_x + crtc_w, | ||
535 | .y2 = crtc_y + crtc_h, | ||
536 | }; | ||
537 | struct drm_rect src = { | ||
538 | /* 16.16 fixed point */ | ||
539 | .x1 = src_x, | ||
540 | .y1 = src_y, | ||
541 | .x2 = src_x + src_w, | ||
542 | .y2 = src_y + src_h, | ||
543 | }; | ||
544 | const struct drm_rect clip = { | ||
545 | .x2 = crtc->mode.hdisplay, | ||
546 | .y2 = crtc->mode.vdisplay, | ||
547 | }; | ||
548 | bool can_position = plane->type != DRM_PLANE_TYPE_PRIMARY; | ||
549 | |||
550 | ret = drm_plane_helper_check_update(plane, crtc, fb, | ||
551 | &src, &dest, &clip, | ||
552 | DRM_PLANE_HELPER_NO_SCALING, | ||
553 | DRM_PLANE_HELPER_NO_SCALING, | ||
554 | can_position, false, &visible); | ||
555 | if (ret) | ||
556 | return ret; | ||
557 | |||
558 | if (!visible) | ||
559 | return 0; | ||
560 | |||
561 | is_alpha = is_alpha_support(fb->pixel_format); | ||
562 | format = vop_convert_format(fb->pixel_format); | ||
563 | if (format < 0) | ||
564 | return format; | ||
565 | |||
566 | obj = rockchip_fb_get_gem_obj(fb, 0); | ||
567 | if (!obj) { | ||
568 | DRM_ERROR("fail to get rockchip gem object from framebuffer\n"); | ||
569 | return -EINVAL; | ||
570 | } | ||
571 | |||
572 | rk_obj = to_rockchip_obj(obj); | ||
573 | |||
574 | actual_w = (src.x2 - src.x1) >> 16; | ||
575 | actual_h = (src.y2 - src.y1) >> 16; | ||
576 | crtc_x = max(0, crtc_x); | ||
577 | crtc_y = max(0, crtc_y); | ||
578 | |||
579 | dsp_stx = crtc_x + crtc->mode.htotal - crtc->mode.hsync_start; | ||
580 | dsp_sty = crtc_y + crtc->mode.vtotal - crtc->mode.vsync_start; | ||
581 | |||
582 | offset = (src.x1 >> 16) * (fb->bits_per_pixel >> 3); | ||
583 | offset += (src.y1 >> 16) * fb->pitches[0]; | ||
584 | yrgb_mst = rk_obj->dma_addr + offset; | ||
585 | |||
586 | y_vir_stride = fb->pitches[0] / (fb->bits_per_pixel >> 3); | ||
587 | |||
588 | /* | ||
589 | * If this plane update changes the plane's framebuffer, (or more | ||
590 | * precisely, if this update has a different framebuffer than the last | ||
591 | * update), enqueue it so we can track when it completes. | ||
592 | * | ||
593 | * Only when we discover that this update has completed, can we | ||
594 | * unreference any previous framebuffers. | ||
595 | */ | ||
596 | mutex_lock(&vop->vsync_mutex); | ||
597 | if (fb != vop_win_last_pending_fb(vop_win)) { | ||
598 | ret = drm_vblank_get(plane->dev, vop->pipe); | ||
599 | if (ret) { | ||
600 | DRM_ERROR("failed to get vblank, %d\n", ret); | ||
601 | mutex_unlock(&vop->vsync_mutex); | ||
602 | return ret; | ||
603 | } | ||
604 | |||
605 | drm_framebuffer_reference(fb); | ||
606 | |||
607 | ret = vop_win_queue_fb(vop_win, fb, yrgb_mst, event); | ||
608 | if (ret) { | ||
609 | drm_vblank_put(plane->dev, vop->pipe); | ||
610 | mutex_unlock(&vop->vsync_mutex); | ||
611 | return ret; | ||
612 | } | ||
613 | |||
614 | vop->vsync_work_pending = true; | ||
615 | } | ||
616 | mutex_unlock(&vop->vsync_mutex); | ||
617 | |||
618 | spin_lock(&vop->reg_lock); | ||
619 | |||
620 | VOP_WIN_SET(vop, win, format, format); | ||
621 | VOP_WIN_SET(vop, win, yrgb_vir, y_vir_stride); | ||
622 | VOP_WIN_SET(vop, win, yrgb_mst, yrgb_mst); | ||
623 | val = (actual_h - 1) << 16; | ||
624 | val |= (actual_w - 1) & 0xffff; | ||
625 | VOP_WIN_SET(vop, win, act_info, val); | ||
626 | VOP_WIN_SET(vop, win, dsp_info, val); | ||
627 | val = (dsp_sty - 1) << 16; | ||
628 | val |= (dsp_stx - 1) & 0xffff; | ||
629 | VOP_WIN_SET(vop, win, dsp_st, val); | ||
630 | |||
631 | if (is_alpha) { | ||
632 | VOP_WIN_SET(vop, win, dst_alpha_ctl, | ||
633 | DST_FACTOR_M0(ALPHA_SRC_INVERSE)); | ||
634 | val = SRC_ALPHA_EN(1) | SRC_COLOR_M0(ALPHA_SRC_PRE_MUL) | | ||
635 | SRC_ALPHA_M0(ALPHA_STRAIGHT) | | ||
636 | SRC_BLEND_M0(ALPHA_PER_PIX) | | ||
637 | SRC_ALPHA_CAL_M0(ALPHA_NO_SATURATION) | | ||
638 | SRC_FACTOR_M0(ALPHA_ONE); | ||
639 | VOP_WIN_SET(vop, win, src_alpha_ctl, val); | ||
640 | } else { | ||
641 | VOP_WIN_SET(vop, win, src_alpha_ctl, SRC_ALPHA_EN(0)); | ||
642 | } | ||
643 | |||
644 | VOP_WIN_SET(vop, win, enable, 1); | ||
645 | |||
646 | vop_cfg_done(vop); | ||
647 | spin_unlock(&vop->reg_lock); | ||
648 | |||
649 | return 0; | ||
650 | } | ||
651 | |||
652 | static int vop_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, | ||
653 | struct drm_framebuffer *fb, int crtc_x, int crtc_y, | ||
654 | unsigned int crtc_w, unsigned int crtc_h, | ||
655 | uint32_t src_x, uint32_t src_y, uint32_t src_w, | ||
656 | uint32_t src_h) | ||
657 | { | ||
658 | return vop_update_plane_event(plane, crtc, fb, crtc_x, crtc_y, crtc_w, | ||
659 | crtc_h, src_x, src_y, src_w, src_h, | ||
660 | NULL); | ||
661 | } | ||
662 | |||
663 | static int vop_update_primary_plane(struct drm_crtc *crtc, | ||
664 | struct drm_pending_vblank_event *event) | ||
665 | { | ||
666 | unsigned int crtc_w, crtc_h; | ||
667 | |||
668 | crtc_w = crtc->primary->fb->width - crtc->x; | ||
669 | crtc_h = crtc->primary->fb->height - crtc->y; | ||
670 | |||
671 | return vop_update_plane_event(crtc->primary, crtc, crtc->primary->fb, | ||
672 | 0, 0, crtc_w, crtc_h, crtc->x << 16, | ||
673 | crtc->y << 16, crtc_w << 16, | ||
674 | crtc_h << 16, event); | ||
675 | } | ||
676 | |||
677 | static int vop_disable_plane(struct drm_plane *plane) | ||
678 | { | ||
679 | struct vop_win *vop_win = to_vop_win(plane); | ||
680 | const struct vop_win_data *win = vop_win->data; | ||
681 | struct vop *vop; | ||
682 | int ret; | ||
683 | |||
684 | if (!plane->crtc) | ||
685 | return 0; | ||
686 | |||
687 | vop = to_vop(plane->crtc); | ||
688 | |||
689 | ret = drm_vblank_get(plane->dev, vop->pipe); | ||
690 | if (ret) { | ||
691 | DRM_ERROR("failed to get vblank, %d\n", ret); | ||
692 | return ret; | ||
693 | } | ||
694 | |||
695 | mutex_lock(&vop->vsync_mutex); | ||
696 | |||
697 | ret = vop_win_queue_fb(vop_win, NULL, 0, NULL); | ||
698 | if (ret) { | ||
699 | drm_vblank_put(plane->dev, vop->pipe); | ||
700 | mutex_unlock(&vop->vsync_mutex); | ||
701 | return ret; | ||
702 | } | ||
703 | |||
704 | vop->vsync_work_pending = true; | ||
705 | mutex_unlock(&vop->vsync_mutex); | ||
706 | |||
707 | spin_lock(&vop->reg_lock); | ||
708 | VOP_WIN_SET(vop, win, enable, 0); | ||
709 | vop_cfg_done(vop); | ||
710 | spin_unlock(&vop->reg_lock); | ||
711 | |||
712 | return 0; | ||
713 | } | ||
714 | |||
715 | static void vop_plane_destroy(struct drm_plane *plane) | ||
716 | { | ||
717 | vop_disable_plane(plane); | ||
718 | drm_plane_cleanup(plane); | ||
719 | } | ||
720 | |||
721 | static const struct drm_plane_funcs vop_plane_funcs = { | ||
722 | .update_plane = vop_update_plane, | ||
723 | .disable_plane = vop_disable_plane, | ||
724 | .destroy = vop_plane_destroy, | ||
725 | }; | ||
726 | |||
727 | int rockchip_drm_crtc_mode_config(struct drm_crtc *crtc, | ||
728 | int connector_type, | ||
729 | int out_mode) | ||
730 | { | ||
731 | struct vop *vop = to_vop(crtc); | ||
732 | |||
733 | vop->connector_type = connector_type; | ||
734 | vop->connector_out_mode = out_mode; | ||
735 | |||
736 | return 0; | ||
737 | } | ||
738 | |||
739 | static int vop_crtc_enable_vblank(struct drm_crtc *crtc) | ||
740 | { | ||
741 | struct vop *vop = to_vop(crtc); | ||
742 | unsigned long flags; | ||
743 | |||
744 | if (vop->dpms != DRM_MODE_DPMS_ON) | ||
745 | return -EPERM; | ||
746 | |||
747 | spin_lock_irqsave(&vop->irq_lock, flags); | ||
748 | |||
749 | vop_mask_write(vop, INTR_CTRL0, FS_INTR_MASK, FS_INTR_EN(1)); | ||
750 | |||
751 | spin_unlock_irqrestore(&vop->irq_lock, flags); | ||
752 | |||
753 | return 0; | ||
754 | } | ||
755 | |||
756 | static void vop_crtc_disable_vblank(struct drm_crtc *crtc) | ||
757 | { | ||
758 | struct vop *vop = to_vop(crtc); | ||
759 | unsigned long flags; | ||
760 | |||
761 | if (vop->dpms != DRM_MODE_DPMS_ON) | ||
762 | return; | ||
763 | spin_lock_irqsave(&vop->irq_lock, flags); | ||
764 | vop_mask_write(vop, INTR_CTRL0, FS_INTR_MASK, FS_INTR_EN(0)); | ||
765 | spin_unlock_irqrestore(&vop->irq_lock, flags); | ||
766 | } | ||
767 | |||
768 | static const struct rockchip_crtc_funcs private_crtc_funcs = { | ||
769 | .enable_vblank = vop_crtc_enable_vblank, | ||
770 | .disable_vblank = vop_crtc_disable_vblank, | ||
771 | }; | ||
772 | |||
773 | static void vop_crtc_dpms(struct drm_crtc *crtc, int mode) | ||
774 | { | ||
775 | struct vop *vop = to_vop(crtc); | ||
776 | |||
777 | DRM_DEBUG_KMS("crtc[%d] mode[%d]\n", crtc->base.id, mode); | ||
778 | |||
779 | if (vop->dpms == mode) { | ||
780 | DRM_DEBUG_KMS("desired dpms mode is same as previous one.\n"); | ||
781 | return; | ||
782 | } | ||
783 | |||
784 | switch (mode) { | ||
785 | case DRM_MODE_DPMS_ON: | ||
786 | vop_enable(crtc); | ||
787 | break; | ||
788 | case DRM_MODE_DPMS_STANDBY: | ||
789 | case DRM_MODE_DPMS_SUSPEND: | ||
790 | case DRM_MODE_DPMS_OFF: | ||
791 | vop_disable(crtc); | ||
792 | break; | ||
793 | default: | ||
794 | DRM_DEBUG_KMS("unspecified mode %d\n", mode); | ||
795 | break; | ||
796 | } | ||
797 | |||
798 | vop->dpms = mode; | ||
799 | } | ||
800 | |||
801 | static void vop_crtc_prepare(struct drm_crtc *crtc) | ||
802 | { | ||
803 | vop_crtc_dpms(crtc, DRM_MODE_DPMS_ON); | ||
804 | } | ||
805 | |||
806 | static bool vop_crtc_mode_fixup(struct drm_crtc *crtc, | ||
807 | const struct drm_display_mode *mode, | ||
808 | struct drm_display_mode *adjusted_mode) | ||
809 | { | ||
810 | if (adjusted_mode->htotal == 0 || adjusted_mode->vtotal == 0) | ||
811 | return false; | ||
812 | |||
813 | return true; | ||
814 | } | ||
815 | |||
816 | static int vop_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, | ||
817 | struct drm_framebuffer *old_fb) | ||
818 | { | ||
819 | int ret; | ||
820 | |||
821 | crtc->x = x; | ||
822 | crtc->y = y; | ||
823 | |||
824 | ret = vop_update_primary_plane(crtc, NULL); | ||
825 | if (ret < 0) { | ||
826 | DRM_ERROR("fail to update plane\n"); | ||
827 | return ret; | ||
828 | } | ||
829 | |||
830 | return 0; | ||
831 | } | ||
832 | |||
833 | static int vop_crtc_mode_set(struct drm_crtc *crtc, | ||
834 | struct drm_display_mode *mode, | ||
835 | struct drm_display_mode *adjusted_mode, | ||
836 | int x, int y, struct drm_framebuffer *fb) | ||
837 | { | ||
838 | struct vop *vop = to_vop(crtc); | ||
839 | u16 hsync_len = adjusted_mode->hsync_end - adjusted_mode->hsync_start; | ||
840 | u16 hdisplay = adjusted_mode->hdisplay; | ||
841 | u16 htotal = adjusted_mode->htotal; | ||
842 | u16 hact_st = adjusted_mode->htotal - adjusted_mode->hsync_start; | ||
843 | u16 hact_end = hact_st + hdisplay; | ||
844 | u16 vdisplay = adjusted_mode->vdisplay; | ||
845 | u16 vtotal = adjusted_mode->vtotal; | ||
846 | u16 vsync_len = adjusted_mode->vsync_end - adjusted_mode->vsync_start; | ||
847 | u16 vact_st = adjusted_mode->vtotal - adjusted_mode->vsync_start; | ||
848 | u16 vact_end = vact_st + vdisplay; | ||
849 | int ret; | ||
850 | uint32_t val; | ||
851 | |||
852 | /* | ||
853 | * disable dclk to stop frame scan, so that we can safe config mode and | ||
854 | * enable iommu. | ||
855 | */ | ||
856 | clk_disable(vop->dclk); | ||
857 | |||
858 | switch (vop->connector_type) { | ||
859 | case DRM_MODE_CONNECTOR_LVDS: | ||
860 | VOP_CTRL_SET(vop, rgb_en, 1); | ||
861 | break; | ||
862 | case DRM_MODE_CONNECTOR_eDP: | ||
863 | VOP_CTRL_SET(vop, edp_en, 1); | ||
864 | break; | ||
865 | case DRM_MODE_CONNECTOR_HDMIA: | ||
866 | VOP_CTRL_SET(vop, hdmi_en, 1); | ||
867 | break; | ||
868 | default: | ||
869 | DRM_ERROR("unsupport connector_type[%d]\n", | ||
870 | vop->connector_type); | ||
871 | return -EINVAL; | ||
872 | }; | ||
873 | VOP_CTRL_SET(vop, out_mode, vop->connector_out_mode); | ||
874 | |||
875 | val = 0x8; | ||
876 | val |= (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC) ? 1 : 0; | ||
877 | val |= (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) ? (1 << 1) : 0; | ||
878 | VOP_CTRL_SET(vop, pin_pol, val); | ||
879 | |||
880 | VOP_CTRL_SET(vop, htotal_pw, (htotal << 16) | hsync_len); | ||
881 | val = hact_st << 16; | ||
882 | val |= hact_end; | ||
883 | VOP_CTRL_SET(vop, hact_st_end, val); | ||
884 | VOP_CTRL_SET(vop, hpost_st_end, val); | ||
885 | |||
886 | VOP_CTRL_SET(vop, vtotal_pw, (vtotal << 16) | vsync_len); | ||
887 | val = vact_st << 16; | ||
888 | val |= vact_end; | ||
889 | VOP_CTRL_SET(vop, vact_st_end, val); | ||
890 | VOP_CTRL_SET(vop, vpost_st_end, val); | ||
891 | |||
892 | ret = vop_crtc_mode_set_base(crtc, x, y, fb); | ||
893 | if (ret) | ||
894 | return ret; | ||
895 | |||
896 | /* | ||
897 | * reset dclk, take all mode config affect, so the clk would run in | ||
898 | * correct frame. | ||
899 | */ | ||
900 | reset_control_assert(vop->dclk_rst); | ||
901 | usleep_range(10, 20); | ||
902 | reset_control_deassert(vop->dclk_rst); | ||
903 | |||
904 | clk_set_rate(vop->dclk, adjusted_mode->clock * 1000); | ||
905 | ret = clk_enable(vop->dclk); | ||
906 | if (ret < 0) { | ||
907 | dev_err(vop->dev, "failed to enable dclk - %d\n", ret); | ||
908 | return ret; | ||
909 | } | ||
910 | |||
911 | return 0; | ||
912 | } | ||
913 | |||
914 | static void vop_crtc_commit(struct drm_crtc *crtc) | ||
915 | { | ||
916 | } | ||
917 | |||
918 | static const struct drm_crtc_helper_funcs vop_crtc_helper_funcs = { | ||
919 | .dpms = vop_crtc_dpms, | ||
920 | .prepare = vop_crtc_prepare, | ||
921 | .mode_fixup = vop_crtc_mode_fixup, | ||
922 | .mode_set = vop_crtc_mode_set, | ||
923 | .mode_set_base = vop_crtc_mode_set_base, | ||
924 | .commit = vop_crtc_commit, | ||
925 | }; | ||
926 | |||
927 | static int vop_crtc_page_flip(struct drm_crtc *crtc, | ||
928 | struct drm_framebuffer *fb, | ||
929 | struct drm_pending_vblank_event *event, | ||
930 | uint32_t page_flip_flags) | ||
931 | { | ||
932 | struct vop *vop = to_vop(crtc); | ||
933 | struct drm_framebuffer *old_fb = crtc->primary->fb; | ||
934 | int ret; | ||
935 | |||
936 | /* when the page flip is requested, crtc's dpms should be on */ | ||
937 | if (vop->dpms > DRM_MODE_DPMS_ON) { | ||
938 | DRM_DEBUG("failed page flip request at dpms[%d].\n", vop->dpms); | ||
939 | return 0; | ||
940 | } | ||
941 | |||
942 | crtc->primary->fb = fb; | ||
943 | |||
944 | ret = vop_update_primary_plane(crtc, event); | ||
945 | if (ret) | ||
946 | crtc->primary->fb = old_fb; | ||
947 | |||
948 | return ret; | ||
949 | } | ||
950 | |||
951 | static void vop_win_state_complete(struct vop_win *vop_win, | ||
952 | struct vop_win_state *state) | ||
953 | { | ||
954 | struct vop *vop = vop_win->vop; | ||
955 | struct drm_crtc *crtc = &vop->crtc; | ||
956 | struct drm_device *drm = crtc->dev; | ||
957 | unsigned long flags; | ||
958 | |||
959 | if (state->event) { | ||
960 | spin_lock_irqsave(&drm->event_lock, flags); | ||
961 | drm_send_vblank_event(drm, -1, state->event); | ||
962 | spin_unlock_irqrestore(&drm->event_lock, flags); | ||
963 | } | ||
964 | |||
965 | list_del(&state->head); | ||
966 | drm_vblank_put(crtc->dev, vop->pipe); | ||
967 | } | ||
968 | |||
969 | static void vop_crtc_destroy(struct drm_crtc *crtc) | ||
970 | { | ||
971 | drm_crtc_cleanup(crtc); | ||
972 | } | ||
973 | |||
974 | static const struct drm_crtc_funcs vop_crtc_funcs = { | ||
975 | .set_config = drm_crtc_helper_set_config, | ||
976 | .page_flip = vop_crtc_page_flip, | ||
977 | .destroy = vop_crtc_destroy, | ||
978 | }; | ||
979 | |||
980 | static bool vop_win_state_is_active(struct vop_win *vop_win, | ||
981 | struct vop_win_state *state) | ||
982 | { | ||
983 | bool active = false; | ||
984 | |||
985 | if (state->fb) { | ||
986 | dma_addr_t yrgb_mst; | ||
987 | |||
988 | /* check yrgb_mst to tell if pending_fb is now front */ | ||
989 | yrgb_mst = VOP_WIN_GET_YRGBADDR(vop_win->vop, vop_win->data); | ||
990 | |||
991 | active = (yrgb_mst == state->yrgb_mst); | ||
992 | } else { | ||
993 | bool enabled; | ||
994 | |||
995 | /* if enable bit is clear, plane is now disabled */ | ||
996 | enabled = VOP_WIN_GET(vop_win->vop, vop_win->data, enable); | ||
997 | |||
998 | active = (enabled == 0); | ||
999 | } | ||
1000 | |||
1001 | return active; | ||
1002 | } | ||
1003 | |||
1004 | static void vop_win_state_destroy(struct vop_win_state *state) | ||
1005 | { | ||
1006 | struct drm_framebuffer *fb = state->fb; | ||
1007 | |||
1008 | if (fb) | ||
1009 | drm_framebuffer_unreference(fb); | ||
1010 | |||
1011 | kfree(state); | ||
1012 | } | ||
1013 | |||
1014 | static void vop_win_update_state(struct vop_win *vop_win) | ||
1015 | { | ||
1016 | struct vop_win_state *state, *n, *new_active = NULL; | ||
1017 | |||
1018 | /* Check if any pending states are now active */ | ||
1019 | list_for_each_entry(state, &vop_win->pending, head) | ||
1020 | if (vop_win_state_is_active(vop_win, state)) { | ||
1021 | new_active = state; | ||
1022 | break; | ||
1023 | } | ||
1024 | |||
1025 | if (!new_active) | ||
1026 | return; | ||
1027 | |||
1028 | /* | ||
1029 | * Destroy any 'skipped' pending states - states that were queued | ||
1030 | * before the newly active state. | ||
1031 | */ | ||
1032 | list_for_each_entry_safe(state, n, &vop_win->pending, head) { | ||
1033 | if (state == new_active) | ||
1034 | break; | ||
1035 | vop_win_state_complete(vop_win, state); | ||
1036 | vop_win_state_destroy(state); | ||
1037 | } | ||
1038 | |||
1039 | vop_win_state_complete(vop_win, new_active); | ||
1040 | |||
1041 | if (vop_win->active) | ||
1042 | vop_win_state_destroy(vop_win->active); | ||
1043 | vop_win->active = new_active; | ||
1044 | } | ||
1045 | |||
1046 | static bool vop_win_has_pending_state(struct vop_win *vop_win) | ||
1047 | { | ||
1048 | return !list_empty(&vop_win->pending); | ||
1049 | } | ||
1050 | |||
1051 | static irqreturn_t vop_isr_thread(int irq, void *data) | ||
1052 | { | ||
1053 | struct vop *vop = data; | ||
1054 | const struct vop_data *vop_data = vop->data; | ||
1055 | unsigned int i; | ||
1056 | |||
1057 | mutex_lock(&vop->vsync_mutex); | ||
1058 | |||
1059 | if (!vop->vsync_work_pending) | ||
1060 | goto done; | ||
1061 | |||
1062 | vop->vsync_work_pending = false; | ||
1063 | |||
1064 | for (i = 0; i < vop_data->win_size; i++) { | ||
1065 | struct vop_win *vop_win = &vop->win[i]; | ||
1066 | |||
1067 | vop_win_update_state(vop_win); | ||
1068 | if (vop_win_has_pending_state(vop_win)) | ||
1069 | vop->vsync_work_pending = true; | ||
1070 | } | ||
1071 | |||
1072 | done: | ||
1073 | mutex_unlock(&vop->vsync_mutex); | ||
1074 | |||
1075 | return IRQ_HANDLED; | ||
1076 | } | ||
1077 | |||
1078 | static irqreturn_t vop_isr(int irq, void *data) | ||
1079 | { | ||
1080 | struct vop *vop = data; | ||
1081 | uint32_t intr0_reg, active_irqs; | ||
1082 | unsigned long flags; | ||
1083 | |||
1084 | /* | ||
1085 | * INTR_CTRL0 register has interrupt status, enable and clear bits, we | ||
1086 | * must hold irq_lock to avoid a race with enable/disable_vblank(). | ||
1087 | */ | ||
1088 | spin_lock_irqsave(&vop->irq_lock, flags); | ||
1089 | intr0_reg = vop_readl(vop, INTR_CTRL0); | ||
1090 | active_irqs = intr0_reg & INTR_MASK; | ||
1091 | /* Clear all active interrupt sources */ | ||
1092 | if (active_irqs) | ||
1093 | vop_writel(vop, INTR_CTRL0, | ||
1094 | intr0_reg | (active_irqs << INTR_CLR_SHIFT)); | ||
1095 | spin_unlock_irqrestore(&vop->irq_lock, flags); | ||
1096 | |||
1097 | /* This is expected for vop iommu irqs, since the irq is shared */ | ||
1098 | if (!active_irqs) | ||
1099 | return IRQ_NONE; | ||
1100 | |||
1101 | /* Only Frame Start Interrupt is enabled; other irqs are spurious. */ | ||
1102 | if (!(active_irqs & FS_INTR)) { | ||
1103 | DRM_ERROR("Unknown VOP IRQs: %#02x\n", active_irqs); | ||
1104 | return IRQ_NONE; | ||
1105 | } | ||
1106 | |||
1107 | drm_handle_vblank(vop->drm_dev, vop->pipe); | ||
1108 | |||
1109 | return (vop->vsync_work_pending) ? IRQ_WAKE_THREAD : IRQ_HANDLED; | ||
1110 | } | ||
1111 | |||
1112 | static int vop_create_crtc(struct vop *vop) | ||
1113 | { | ||
1114 | const struct vop_data *vop_data = vop->data; | ||
1115 | struct device *dev = vop->dev; | ||
1116 | struct drm_device *drm_dev = vop->drm_dev; | ||
1117 | struct drm_plane *primary = NULL, *cursor = NULL, *plane; | ||
1118 | struct drm_crtc *crtc = &vop->crtc; | ||
1119 | struct device_node *port; | ||
1120 | int ret; | ||
1121 | int i; | ||
1122 | |||
1123 | /* | ||
1124 | * Create drm_plane for primary and cursor planes first, since we need | ||
1125 | * to pass them to drm_crtc_init_with_planes, which sets the | ||
1126 | * "possible_crtcs" to the newly initialized crtc. | ||
1127 | */ | ||
1128 | for (i = 0; i < vop_data->win_size; i++) { | ||
1129 | struct vop_win *vop_win = &vop->win[i]; | ||
1130 | const struct vop_win_data *win_data = vop_win->data; | ||
1131 | |||
1132 | if (win_data->type != DRM_PLANE_TYPE_PRIMARY && | ||
1133 | win_data->type != DRM_PLANE_TYPE_CURSOR) | ||
1134 | continue; | ||
1135 | |||
1136 | ret = drm_universal_plane_init(vop->drm_dev, &vop_win->base, | ||
1137 | 0, &vop_plane_funcs, | ||
1138 | win_data->phy->data_formats, | ||
1139 | win_data->phy->nformats, | ||
1140 | win_data->type); | ||
1141 | if (ret) { | ||
1142 | DRM_ERROR("failed to initialize plane\n"); | ||
1143 | goto err_cleanup_planes; | ||
1144 | } | ||
1145 | |||
1146 | plane = &vop_win->base; | ||
1147 | if (plane->type == DRM_PLANE_TYPE_PRIMARY) | ||
1148 | primary = plane; | ||
1149 | else if (plane->type == DRM_PLANE_TYPE_CURSOR) | ||
1150 | cursor = plane; | ||
1151 | } | ||
1152 | |||
1153 | ret = drm_crtc_init_with_planes(drm_dev, crtc, primary, cursor, | ||
1154 | &vop_crtc_funcs); | ||
1155 | if (ret) | ||
1156 | return ret; | ||
1157 | |||
1158 | drm_crtc_helper_add(crtc, &vop_crtc_helper_funcs); | ||
1159 | |||
1160 | /* | ||
1161 | * Create drm_planes for overlay windows with possible_crtcs restricted | ||
1162 | * to the newly created crtc. | ||
1163 | */ | ||
1164 | for (i = 0; i < vop_data->win_size; i++) { | ||
1165 | struct vop_win *vop_win = &vop->win[i]; | ||
1166 | const struct vop_win_data *win_data = vop_win->data; | ||
1167 | unsigned long possible_crtcs = 1 << drm_crtc_index(crtc); | ||
1168 | |||
1169 | if (win_data->type != DRM_PLANE_TYPE_OVERLAY) | ||
1170 | continue; | ||
1171 | |||
1172 | ret = drm_universal_plane_init(vop->drm_dev, &vop_win->base, | ||
1173 | possible_crtcs, | ||
1174 | &vop_plane_funcs, | ||
1175 | win_data->phy->data_formats, | ||
1176 | win_data->phy->nformats, | ||
1177 | win_data->type); | ||
1178 | if (ret) { | ||
1179 | DRM_ERROR("failed to initialize overlay plane\n"); | ||
1180 | goto err_cleanup_crtc; | ||
1181 | } | ||
1182 | } | ||
1183 | |||
1184 | port = of_get_child_by_name(dev->of_node, "port"); | ||
1185 | if (!port) { | ||
1186 | DRM_ERROR("no port node found in %s\n", | ||
1187 | dev->of_node->full_name); | ||
1188 | goto err_cleanup_crtc; | ||
1189 | } | ||
1190 | |||
1191 | crtc->port = port; | ||
1192 | vop->pipe = drm_crtc_index(crtc); | ||
1193 | rockchip_register_crtc_funcs(drm_dev, &private_crtc_funcs, vop->pipe); | ||
1194 | |||
1195 | return 0; | ||
1196 | |||
1197 | err_cleanup_crtc: | ||
1198 | drm_crtc_cleanup(crtc); | ||
1199 | err_cleanup_planes: | ||
1200 | list_for_each_entry(plane, &drm_dev->mode_config.plane_list, head) | ||
1201 | drm_plane_cleanup(plane); | ||
1202 | return ret; | ||
1203 | } | ||
1204 | |||
1205 | static void vop_destroy_crtc(struct vop *vop) | ||
1206 | { | ||
1207 | struct drm_crtc *crtc = &vop->crtc; | ||
1208 | |||
1209 | rockchip_unregister_crtc_funcs(vop->drm_dev, vop->pipe); | ||
1210 | of_node_put(crtc->port); | ||
1211 | drm_crtc_cleanup(crtc); | ||
1212 | } | ||
1213 | |||
1214 | static int vop_initial(struct vop *vop) | ||
1215 | { | ||
1216 | const struct vop_data *vop_data = vop->data; | ||
1217 | const struct vop_reg_data *init_table = vop_data->init_table; | ||
1218 | struct reset_control *ahb_rst; | ||
1219 | int i, ret; | ||
1220 | |||
1221 | vop->hclk = devm_clk_get(vop->dev, "hclk_vop"); | ||
1222 | if (IS_ERR(vop->hclk)) { | ||
1223 | dev_err(vop->dev, "failed to get hclk source\n"); | ||
1224 | return PTR_ERR(vop->hclk); | ||
1225 | } | ||
1226 | vop->aclk = devm_clk_get(vop->dev, "aclk_vop"); | ||
1227 | if (IS_ERR(vop->aclk)) { | ||
1228 | dev_err(vop->dev, "failed to get aclk source\n"); | ||
1229 | return PTR_ERR(vop->aclk); | ||
1230 | } | ||
1231 | vop->dclk = devm_clk_get(vop->dev, "dclk_vop"); | ||
1232 | if (IS_ERR(vop->dclk)) { | ||
1233 | dev_err(vop->dev, "failed to get dclk source\n"); | ||
1234 | return PTR_ERR(vop->dclk); | ||
1235 | } | ||
1236 | |||
1237 | ret = clk_prepare(vop->hclk); | ||
1238 | if (ret < 0) { | ||
1239 | dev_err(vop->dev, "failed to prepare hclk\n"); | ||
1240 | return ret; | ||
1241 | } | ||
1242 | |||
1243 | ret = clk_prepare(vop->dclk); | ||
1244 | if (ret < 0) { | ||
1245 | dev_err(vop->dev, "failed to prepare dclk\n"); | ||
1246 | goto err_unprepare_hclk; | ||
1247 | } | ||
1248 | |||
1249 | ret = clk_prepare(vop->aclk); | ||
1250 | if (ret < 0) { | ||
1251 | dev_err(vop->dev, "failed to prepare aclk\n"); | ||
1252 | goto err_unprepare_dclk; | ||
1253 | } | ||
1254 | |||
1255 | /* | ||
1256 | * enable hclk, so that we can config vop register. | ||
1257 | */ | ||
1258 | ret = clk_enable(vop->hclk); | ||
1259 | if (ret < 0) { | ||
1260 | dev_err(vop->dev, "failed to prepare aclk\n"); | ||
1261 | goto err_unprepare_aclk; | ||
1262 | } | ||
1263 | /* | ||
1264 | * do hclk_reset, reset all vop registers. | ||
1265 | */ | ||
1266 | ahb_rst = devm_reset_control_get(vop->dev, "ahb"); | ||
1267 | if (IS_ERR(ahb_rst)) { | ||
1268 | dev_err(vop->dev, "failed to get ahb reset\n"); | ||
1269 | ret = PTR_ERR(ahb_rst); | ||
1270 | goto err_disable_hclk; | ||
1271 | } | ||
1272 | reset_control_assert(ahb_rst); | ||
1273 | usleep_range(10, 20); | ||
1274 | reset_control_deassert(ahb_rst); | ||
1275 | |||
1276 | memcpy(vop->regsbak, vop->regs, vop->len); | ||
1277 | |||
1278 | for (i = 0; i < vop_data->table_size; i++) | ||
1279 | vop_writel(vop, init_table[i].offset, init_table[i].value); | ||
1280 | |||
1281 | for (i = 0; i < vop_data->win_size; i++) { | ||
1282 | const struct vop_win_data *win = &vop_data->win[i]; | ||
1283 | |||
1284 | VOP_WIN_SET(vop, win, enable, 0); | ||
1285 | } | ||
1286 | |||
1287 | vop_cfg_done(vop); | ||
1288 | |||
1289 | /* | ||
1290 | * do dclk_reset, let all config take affect. | ||
1291 | */ | ||
1292 | vop->dclk_rst = devm_reset_control_get(vop->dev, "dclk"); | ||
1293 | if (IS_ERR(vop->dclk_rst)) { | ||
1294 | dev_err(vop->dev, "failed to get dclk reset\n"); | ||
1295 | ret = PTR_ERR(vop->dclk_rst); | ||
1296 | goto err_unprepare_aclk; | ||
1297 | } | ||
1298 | reset_control_assert(vop->dclk_rst); | ||
1299 | usleep_range(10, 20); | ||
1300 | reset_control_deassert(vop->dclk_rst); | ||
1301 | |||
1302 | clk_disable(vop->hclk); | ||
1303 | |||
1304 | vop->dpms = DRM_MODE_DPMS_OFF; | ||
1305 | |||
1306 | return 0; | ||
1307 | |||
1308 | err_disable_hclk: | ||
1309 | clk_disable(vop->hclk); | ||
1310 | err_unprepare_aclk: | ||
1311 | clk_unprepare(vop->aclk); | ||
1312 | err_unprepare_dclk: | ||
1313 | clk_unprepare(vop->dclk); | ||
1314 | err_unprepare_hclk: | ||
1315 | clk_unprepare(vop->hclk); | ||
1316 | return ret; | ||
1317 | } | ||
1318 | |||
1319 | /* | ||
1320 | * Initialize the vop->win array elements. | ||
1321 | */ | ||
1322 | static void vop_win_init(struct vop *vop) | ||
1323 | { | ||
1324 | const struct vop_data *vop_data = vop->data; | ||
1325 | unsigned int i; | ||
1326 | |||
1327 | for (i = 0; i < vop_data->win_size; i++) { | ||
1328 | struct vop_win *vop_win = &vop->win[i]; | ||
1329 | const struct vop_win_data *win_data = &vop_data->win[i]; | ||
1330 | |||
1331 | vop_win->data = win_data; | ||
1332 | vop_win->vop = vop; | ||
1333 | INIT_LIST_HEAD(&vop_win->pending); | ||
1334 | } | ||
1335 | } | ||
1336 | |||
1337 | static int vop_bind(struct device *dev, struct device *master, void *data) | ||
1338 | { | ||
1339 | struct platform_device *pdev = to_platform_device(dev); | ||
1340 | const struct of_device_id *of_id; | ||
1341 | const struct vop_data *vop_data; | ||
1342 | struct drm_device *drm_dev = data; | ||
1343 | struct vop *vop; | ||
1344 | struct resource *res; | ||
1345 | size_t alloc_size; | ||
1346 | int ret; | ||
1347 | |||
1348 | of_id = of_match_device(vop_driver_dt_match, dev); | ||
1349 | vop_data = of_id->data; | ||
1350 | if (!vop_data) | ||
1351 | return -ENODEV; | ||
1352 | |||
1353 | /* Allocate vop struct and its vop_win array */ | ||
1354 | alloc_size = sizeof(*vop) + sizeof(*vop->win) * vop_data->win_size; | ||
1355 | vop = devm_kzalloc(dev, alloc_size, GFP_KERNEL); | ||
1356 | if (!vop) | ||
1357 | return -ENOMEM; | ||
1358 | |||
1359 | vop->dev = dev; | ||
1360 | vop->data = vop_data; | ||
1361 | vop->drm_dev = drm_dev; | ||
1362 | dev_set_drvdata(dev, vop); | ||
1363 | |||
1364 | vop_win_init(vop); | ||
1365 | |||
1366 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
1367 | vop->len = resource_size(res); | ||
1368 | vop->regs = devm_ioremap_resource(dev, res); | ||
1369 | if (IS_ERR(vop->regs)) | ||
1370 | return PTR_ERR(vop->regs); | ||
1371 | |||
1372 | vop->regsbak = devm_kzalloc(dev, vop->len, GFP_KERNEL); | ||
1373 | if (!vop->regsbak) | ||
1374 | return -ENOMEM; | ||
1375 | |||
1376 | ret = vop_initial(vop); | ||
1377 | if (ret < 0) { | ||
1378 | dev_err(&pdev->dev, "cannot initial vop dev - err %d\n", ret); | ||
1379 | return ret; | ||
1380 | } | ||
1381 | |||
1382 | vop->irq = platform_get_irq(pdev, 0); | ||
1383 | if (vop->irq < 0) { | ||
1384 | dev_err(dev, "cannot find irq for vop\n"); | ||
1385 | return vop->irq; | ||
1386 | } | ||
1387 | |||
1388 | spin_lock_init(&vop->reg_lock); | ||
1389 | spin_lock_init(&vop->irq_lock); | ||
1390 | |||
1391 | mutex_init(&vop->vsync_mutex); | ||
1392 | |||
1393 | ret = devm_request_threaded_irq(dev, vop->irq, vop_isr, vop_isr_thread, | ||
1394 | IRQF_SHARED, dev_name(dev), vop); | ||
1395 | if (ret) | ||
1396 | return ret; | ||
1397 | |||
1398 | /* IRQ is initially disabled; it gets enabled in power_on */ | ||
1399 | disable_irq(vop->irq); | ||
1400 | |||
1401 | ret = vop_create_crtc(vop); | ||
1402 | if (ret) | ||
1403 | return ret; | ||
1404 | |||
1405 | pm_runtime_enable(&pdev->dev); | ||
1406 | return 0; | ||
1407 | } | ||
1408 | |||
1409 | static void vop_unbind(struct device *dev, struct device *master, void *data) | ||
1410 | { | ||
1411 | struct vop *vop = dev_get_drvdata(dev); | ||
1412 | |||
1413 | pm_runtime_disable(dev); | ||
1414 | vop_destroy_crtc(vop); | ||
1415 | } | ||
1416 | |||
1417 | static const struct component_ops vop_component_ops = { | ||
1418 | .bind = vop_bind, | ||
1419 | .unbind = vop_unbind, | ||
1420 | }; | ||
1421 | |||
1422 | static int vop_probe(struct platform_device *pdev) | ||
1423 | { | ||
1424 | struct device *dev = &pdev->dev; | ||
1425 | |||
1426 | if (!dev->of_node) { | ||
1427 | dev_err(dev, "can't find vop devices\n"); | ||
1428 | return -ENODEV; | ||
1429 | } | ||
1430 | |||
1431 | return component_add(dev, &vop_component_ops); | ||
1432 | } | ||
1433 | |||
1434 | static int vop_remove(struct platform_device *pdev) | ||
1435 | { | ||
1436 | component_del(&pdev->dev, &vop_component_ops); | ||
1437 | |||
1438 | return 0; | ||
1439 | } | ||
1440 | |||
1441 | struct platform_driver vop_platform_driver = { | ||
1442 | .probe = vop_probe, | ||
1443 | .remove = vop_remove, | ||
1444 | .driver = { | ||
1445 | .name = "rockchip-vop", | ||
1446 | .owner = THIS_MODULE, | ||
1447 | .of_match_table = of_match_ptr(vop_driver_dt_match), | ||
1448 | }, | ||
1449 | }; | ||
1450 | |||
1451 | module_platform_driver(vop_platform_driver); | ||
1452 | |||
1453 | MODULE_AUTHOR("Mark Yao <mark.yao@rock-chips.com>"); | ||
1454 | MODULE_DESCRIPTION("ROCKCHIP VOP Driver"); | ||
1455 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h new file mode 100644 index 000000000000..63e9b3a084c5 --- /dev/null +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h | |||
@@ -0,0 +1,201 @@ | |||
1 | /* | ||
2 | * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd | ||
3 | * Author:Mark Yao <mark.yao@rock-chips.com> | ||
4 | * | ||
5 | * This software is licensed under the terms of the GNU General Public | ||
6 | * License version 2, as published by the Free Software Foundation, and | ||
7 | * may be copied, distributed, and modified under those terms. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ROCKCHIP_DRM_VOP_H | ||
16 | #define _ROCKCHIP_DRM_VOP_H | ||
17 | |||
18 | /* register definition */ | ||
19 | #define REG_CFG_DONE 0x0000 | ||
20 | #define VERSION_INFO 0x0004 | ||
21 | #define SYS_CTRL 0x0008 | ||
22 | #define SYS_CTRL1 0x000c | ||
23 | #define DSP_CTRL0 0x0010 | ||
24 | #define DSP_CTRL1 0x0014 | ||
25 | #define DSP_BG 0x0018 | ||
26 | #define MCU_CTRL 0x001c | ||
27 | #define INTR_CTRL0 0x0020 | ||
28 | #define INTR_CTRL1 0x0024 | ||
29 | #define WIN0_CTRL0 0x0030 | ||
30 | #define WIN0_CTRL1 0x0034 | ||
31 | #define WIN0_COLOR_KEY 0x0038 | ||
32 | #define WIN0_VIR 0x003c | ||
33 | #define WIN0_YRGB_MST 0x0040 | ||
34 | #define WIN0_CBR_MST 0x0044 | ||
35 | #define WIN0_ACT_INFO 0x0048 | ||
36 | #define WIN0_DSP_INFO 0x004c | ||
37 | #define WIN0_DSP_ST 0x0050 | ||
38 | #define WIN0_SCL_FACTOR_YRGB 0x0054 | ||
39 | #define WIN0_SCL_FACTOR_CBR 0x0058 | ||
40 | #define WIN0_SCL_OFFSET 0x005c | ||
41 | #define WIN0_SRC_ALPHA_CTRL 0x0060 | ||
42 | #define WIN0_DST_ALPHA_CTRL 0x0064 | ||
43 | #define WIN0_FADING_CTRL 0x0068 | ||
44 | /* win1 register */ | ||
45 | #define WIN1_CTRL0 0x0070 | ||
46 | #define WIN1_CTRL1 0x0074 | ||
47 | #define WIN1_COLOR_KEY 0x0078 | ||
48 | #define WIN1_VIR 0x007c | ||
49 | #define WIN1_YRGB_MST 0x0080 | ||
50 | #define WIN1_CBR_MST 0x0084 | ||
51 | #define WIN1_ACT_INFO 0x0088 | ||
52 | #define WIN1_DSP_INFO 0x008c | ||
53 | #define WIN1_DSP_ST 0x0090 | ||
54 | #define WIN1_SCL_FACTOR_YRGB 0x0094 | ||
55 | #define WIN1_SCL_FACTOR_CBR 0x0098 | ||
56 | #define WIN1_SCL_OFFSET 0x009c | ||
57 | #define WIN1_SRC_ALPHA_CTRL 0x00a0 | ||
58 | #define WIN1_DST_ALPHA_CTRL 0x00a4 | ||
59 | #define WIN1_FADING_CTRL 0x00a8 | ||
60 | /* win2 register */ | ||
61 | #define WIN2_CTRL0 0x00b0 | ||
62 | #define WIN2_CTRL1 0x00b4 | ||
63 | #define WIN2_VIR0_1 0x00b8 | ||
64 | #define WIN2_VIR2_3 0x00bc | ||
65 | #define WIN2_MST0 0x00c0 | ||
66 | #define WIN2_DSP_INFO0 0x00c4 | ||
67 | #define WIN2_DSP_ST0 0x00c8 | ||
68 | #define WIN2_COLOR_KEY 0x00cc | ||
69 | #define WIN2_MST1 0x00d0 | ||
70 | #define WIN2_DSP_INFO1 0x00d4 | ||
71 | #define WIN2_DSP_ST1 0x00d8 | ||
72 | #define WIN2_SRC_ALPHA_CTRL 0x00dc | ||
73 | #define WIN2_MST2 0x00e0 | ||
74 | #define WIN2_DSP_INFO2 0x00e4 | ||
75 | #define WIN2_DSP_ST2 0x00e8 | ||
76 | #define WIN2_DST_ALPHA_CTRL 0x00ec | ||
77 | #define WIN2_MST3 0x00f0 | ||
78 | #define WIN2_DSP_INFO3 0x00f4 | ||
79 | #define WIN2_DSP_ST3 0x00f8 | ||
80 | #define WIN2_FADING_CTRL 0x00fc | ||
81 | /* win3 register */ | ||
82 | #define WIN3_CTRL0 0x0100 | ||
83 | #define WIN3_CTRL1 0x0104 | ||
84 | #define WIN3_VIR0_1 0x0108 | ||
85 | #define WIN3_VIR2_3 0x010c | ||
86 | #define WIN3_MST0 0x0110 | ||
87 | #define WIN3_DSP_INFO0 0x0114 | ||
88 | #define WIN3_DSP_ST0 0x0118 | ||
89 | #define WIN3_COLOR_KEY 0x011c | ||
90 | #define WIN3_MST1 0x0120 | ||
91 | #define WIN3_DSP_INFO1 0x0124 | ||
92 | #define WIN3_DSP_ST1 0x0128 | ||
93 | #define WIN3_SRC_ALPHA_CTRL 0x012c | ||
94 | #define WIN3_MST2 0x0130 | ||
95 | #define WIN3_DSP_INFO2 0x0134 | ||
96 | #define WIN3_DSP_ST2 0x0138 | ||
97 | #define WIN3_DST_ALPHA_CTRL 0x013c | ||
98 | #define WIN3_MST3 0x0140 | ||
99 | #define WIN3_DSP_INFO3 0x0144 | ||
100 | #define WIN3_DSP_ST3 0x0148 | ||
101 | #define WIN3_FADING_CTRL 0x014c | ||
102 | /* hwc register */ | ||
103 | #define HWC_CTRL0 0x0150 | ||
104 | #define HWC_CTRL1 0x0154 | ||
105 | #define HWC_MST 0x0158 | ||
106 | #define HWC_DSP_ST 0x015c | ||
107 | #define HWC_SRC_ALPHA_CTRL 0x0160 | ||
108 | #define HWC_DST_ALPHA_CTRL 0x0164 | ||
109 | #define HWC_FADING_CTRL 0x0168 | ||
110 | /* post process register */ | ||
111 | #define POST_DSP_HACT_INFO 0x0170 | ||
112 | #define POST_DSP_VACT_INFO 0x0174 | ||
113 | #define POST_SCL_FACTOR_YRGB 0x0178 | ||
114 | #define POST_SCL_CTRL 0x0180 | ||
115 | #define POST_DSP_VACT_INFO_F1 0x0184 | ||
116 | #define DSP_HTOTAL_HS_END 0x0188 | ||
117 | #define DSP_HACT_ST_END 0x018c | ||
118 | #define DSP_VTOTAL_VS_END 0x0190 | ||
119 | #define DSP_VACT_ST_END 0x0194 | ||
120 | #define DSP_VS_ST_END_F1 0x0198 | ||
121 | #define DSP_VACT_ST_END_F1 0x019c | ||
122 | /* register definition end */ | ||
123 | |||
124 | /* interrupt define */ | ||
125 | #define DSP_HOLD_VALID_INTR (1 << 0) | ||
126 | #define FS_INTR (1 << 1) | ||
127 | #define LINE_FLAG_INTR (1 << 2) | ||
128 | #define BUS_ERROR_INTR (1 << 3) | ||
129 | |||
130 | #define INTR_MASK (DSP_HOLD_VALID_INTR | FS_INTR | \ | ||
131 | LINE_FLAG_INTR | BUS_ERROR_INTR) | ||
132 | |||
133 | #define DSP_HOLD_VALID_INTR_EN(x) ((x) << 4) | ||
134 | #define FS_INTR_EN(x) ((x) << 5) | ||
135 | #define LINE_FLAG_INTR_EN(x) ((x) << 6) | ||
136 | #define BUS_ERROR_INTR_EN(x) ((x) << 7) | ||
137 | #define DSP_HOLD_VALID_INTR_MASK (1 << 4) | ||
138 | #define FS_INTR_MASK (1 << 5) | ||
139 | #define LINE_FLAG_INTR_MASK (1 << 6) | ||
140 | #define BUS_ERROR_INTR_MASK (1 << 7) | ||
141 | |||
142 | #define INTR_CLR_SHIFT 8 | ||
143 | #define DSP_HOLD_VALID_INTR_CLR (1 << (INTR_CLR_SHIFT + 0)) | ||
144 | #define FS_INTR_CLR (1 << (INTR_CLR_SHIFT + 1)) | ||
145 | #define LINE_FLAG_INTR_CLR (1 << (INTR_CLR_SHIFT + 2)) | ||
146 | #define BUS_ERROR_INTR_CLR (1 << (INTR_CLR_SHIFT + 3)) | ||
147 | |||
148 | #define DSP_LINE_NUM(x) (((x) & 0x1fff) << 12) | ||
149 | #define DSP_LINE_NUM_MASK (0x1fff << 12) | ||
150 | |||
151 | /* src alpha ctrl define */ | ||
152 | #define SRC_FADING_VALUE(x) (((x) & 0xff) << 24) | ||
153 | #define SRC_GLOBAL_ALPHA(x) (((x) & 0xff) << 16) | ||
154 | #define SRC_FACTOR_M0(x) (((x) & 0x7) << 6) | ||
155 | #define SRC_ALPHA_CAL_M0(x) (((x) & 0x1) << 5) | ||
156 | #define SRC_BLEND_M0(x) (((x) & 0x3) << 3) | ||
157 | #define SRC_ALPHA_M0(x) (((x) & 0x1) << 2) | ||
158 | #define SRC_COLOR_M0(x) (((x) & 0x1) << 1) | ||
159 | #define SRC_ALPHA_EN(x) (((x) & 0x1) << 0) | ||
160 | /* dst alpha ctrl define */ | ||
161 | #define DST_FACTOR_M0(x) (((x) & 0x7) << 6) | ||
162 | |||
163 | /* | ||
164 | * display output interface supported by rockchip lcdc | ||
165 | */ | ||
166 | #define ROCKCHIP_OUT_MODE_P888 0 | ||
167 | #define ROCKCHIP_OUT_MODE_P666 1 | ||
168 | #define ROCKCHIP_OUT_MODE_P565 2 | ||
169 | /* for use special outface */ | ||
170 | #define ROCKCHIP_OUT_MODE_AAAA 15 | ||
171 | |||
172 | enum alpha_mode { | ||
173 | ALPHA_STRAIGHT, | ||
174 | ALPHA_INVERSE, | ||
175 | }; | ||
176 | |||
177 | enum global_blend_mode { | ||
178 | ALPHA_GLOBAL, | ||
179 | ALPHA_PER_PIX, | ||
180 | ALPHA_PER_PIX_GLOBAL, | ||
181 | }; | ||
182 | |||
183 | enum alpha_cal_mode { | ||
184 | ALPHA_SATURATION, | ||
185 | ALPHA_NO_SATURATION, | ||
186 | }; | ||
187 | |||
188 | enum color_mode { | ||
189 | ALPHA_SRC_PRE_MUL, | ||
190 | ALPHA_SRC_NO_PRE_MUL, | ||
191 | }; | ||
192 | |||
193 | enum factor_mode { | ||
194 | ALPHA_ZERO, | ||
195 | ALPHA_ONE, | ||
196 | ALPHA_SRC, | ||
197 | ALPHA_SRC_INVERSE, | ||
198 | ALPHA_SRC_GLOBAL, | ||
199 | }; | ||
200 | |||
201 | #endif /* _ROCKCHIP_DRM_VOP_H */ | ||
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index dd5112265cc9..d0a1261eb1ba 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig | |||
@@ -152,6 +152,18 @@ config OMAP_IOMMU_DEBUG | |||
152 | 152 | ||
153 | Say N unless you know you need this. | 153 | Say N unless you know you need this. |
154 | 154 | ||
155 | config ROCKCHIP_IOMMU | ||
156 | bool "Rockchip IOMMU Support" | ||
157 | depends on ARCH_ROCKCHIP | ||
158 | select IOMMU_API | ||
159 | select ARM_DMA_USE_IOMMU | ||
160 | help | ||
161 | Support for IOMMUs found on Rockchip rk32xx SOCs. | ||
162 | These IOMMUs allow virtualization of the address space used by most | ||
163 | cores within the multimedia subsystem. | ||
164 | Say Y here if you are using a Rockchip SoC that includes an IOMMU | ||
165 | device. | ||
166 | |||
155 | config TEGRA_IOMMU_GART | 167 | config TEGRA_IOMMU_GART |
156 | bool "Tegra GART IOMMU Support" | 168 | bool "Tegra GART IOMMU Support" |
157 | depends on ARCH_TEGRA_2x_SOC | 169 | depends on ARCH_TEGRA_2x_SOC |
diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile index 16edef74b8ee..3e47ef35a35f 100644 --- a/drivers/iommu/Makefile +++ b/drivers/iommu/Makefile | |||
@@ -13,6 +13,7 @@ obj-$(CONFIG_IRQ_REMAP) += intel_irq_remapping.o irq_remapping.o | |||
13 | obj-$(CONFIG_OMAP_IOMMU) += omap-iommu.o | 13 | obj-$(CONFIG_OMAP_IOMMU) += omap-iommu.o |
14 | obj-$(CONFIG_OMAP_IOMMU) += omap-iommu2.o | 14 | obj-$(CONFIG_OMAP_IOMMU) += omap-iommu2.o |
15 | obj-$(CONFIG_OMAP_IOMMU_DEBUG) += omap-iommu-debug.o | 15 | obj-$(CONFIG_OMAP_IOMMU_DEBUG) += omap-iommu-debug.o |
16 | obj-$(CONFIG_ROCKCHIP_IOMMU) += rockchip-iommu.o | ||
16 | obj-$(CONFIG_TEGRA_IOMMU_GART) += tegra-gart.o | 17 | obj-$(CONFIG_TEGRA_IOMMU_GART) += tegra-gart.o |
17 | obj-$(CONFIG_TEGRA_IOMMU_SMMU) += tegra-smmu.o | 18 | obj-$(CONFIG_TEGRA_IOMMU_SMMU) += tegra-smmu.o |
18 | obj-$(CONFIG_EXYNOS_IOMMU) += exynos-iommu.o | 19 | obj-$(CONFIG_EXYNOS_IOMMU) += exynos-iommu.o |
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c new file mode 100644 index 000000000000..b2023af384b9 --- /dev/null +++ b/drivers/iommu/rockchip-iommu.c | |||
@@ -0,0 +1,1038 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify | ||
3 | * it under the terms of the GNU General Public License version 2 as | ||
4 | * published by the Free Software Foundation. | ||
5 | */ | ||
6 | |||
7 | #include <asm/cacheflush.h> | ||
8 | #include <asm/pgtable.h> | ||
9 | #include <linux/compiler.h> | ||
10 | #include <linux/delay.h> | ||
11 | #include <linux/device.h> | ||
12 | #include <linux/errno.h> | ||
13 | #include <linux/interrupt.h> | ||
14 | #include <linux/io.h> | ||
15 | #include <linux/iommu.h> | ||
16 | #include <linux/jiffies.h> | ||
17 | #include <linux/list.h> | ||
18 | #include <linux/mm.h> | ||
19 | #include <linux/module.h> | ||
20 | #include <linux/of.h> | ||
21 | #include <linux/of_platform.h> | ||
22 | #include <linux/platform_device.h> | ||
23 | #include <linux/slab.h> | ||
24 | #include <linux/spinlock.h> | ||
25 | |||
26 | /** MMU register offsets */ | ||
27 | #define RK_MMU_DTE_ADDR 0x00 /* Directory table address */ | ||
28 | #define RK_MMU_STATUS 0x04 | ||
29 | #define RK_MMU_COMMAND 0x08 | ||
30 | #define RK_MMU_PAGE_FAULT_ADDR 0x0C /* IOVA of last page fault */ | ||
31 | #define RK_MMU_ZAP_ONE_LINE 0x10 /* Shootdown one IOTLB entry */ | ||
32 | #define RK_MMU_INT_RAWSTAT 0x14 /* IRQ status ignoring mask */ | ||
33 | #define RK_MMU_INT_CLEAR 0x18 /* Acknowledge and re-arm irq */ | ||
34 | #define RK_MMU_INT_MASK 0x1C /* IRQ enable */ | ||
35 | #define RK_MMU_INT_STATUS 0x20 /* IRQ status after masking */ | ||
36 | #define RK_MMU_AUTO_GATING 0x24 | ||
37 | |||
38 | #define DTE_ADDR_DUMMY 0xCAFEBABE | ||
39 | #define FORCE_RESET_TIMEOUT 100 /* ms */ | ||
40 | |||
41 | /* RK_MMU_STATUS fields */ | ||
42 | #define RK_MMU_STATUS_PAGING_ENABLED BIT(0) | ||
43 | #define RK_MMU_STATUS_PAGE_FAULT_ACTIVE BIT(1) | ||
44 | #define RK_MMU_STATUS_STALL_ACTIVE BIT(2) | ||
45 | #define RK_MMU_STATUS_IDLE BIT(3) | ||
46 | #define RK_MMU_STATUS_REPLAY_BUFFER_EMPTY BIT(4) | ||
47 | #define RK_MMU_STATUS_PAGE_FAULT_IS_WRITE BIT(5) | ||
48 | #define RK_MMU_STATUS_STALL_NOT_ACTIVE BIT(31) | ||
49 | |||
50 | /* RK_MMU_COMMAND command values */ | ||
51 | #define RK_MMU_CMD_ENABLE_PAGING 0 /* Enable memory translation */ | ||
52 | #define RK_MMU_CMD_DISABLE_PAGING 1 /* Disable memory translation */ | ||
53 | #define RK_MMU_CMD_ENABLE_STALL 2 /* Stall paging to allow other cmds */ | ||
54 | #define RK_MMU_CMD_DISABLE_STALL 3 /* Stop stall re-enables paging */ | ||
55 | #define RK_MMU_CMD_ZAP_CACHE 4 /* Shoot down entire IOTLB */ | ||
56 | #define RK_MMU_CMD_PAGE_FAULT_DONE 5 /* Clear page fault */ | ||
57 | #define RK_MMU_CMD_FORCE_RESET 6 /* Reset all registers */ | ||
58 | |||
59 | /* RK_MMU_INT_* register fields */ | ||
60 | #define RK_MMU_IRQ_PAGE_FAULT 0x01 /* page fault */ | ||
61 | #define RK_MMU_IRQ_BUS_ERROR 0x02 /* bus read error */ | ||
62 | #define RK_MMU_IRQ_MASK (RK_MMU_IRQ_PAGE_FAULT | RK_MMU_IRQ_BUS_ERROR) | ||
63 | |||
64 | #define NUM_DT_ENTRIES 1024 | ||
65 | #define NUM_PT_ENTRIES 1024 | ||
66 | |||
67 | #define SPAGE_ORDER 12 | ||
68 | #define SPAGE_SIZE (1 << SPAGE_ORDER) | ||
69 | |||
70 | /* | ||
71 | * Support mapping any size that fits in one page table: | ||
72 | * 4 KiB to 4 MiB | ||
73 | */ | ||
74 | #define RK_IOMMU_PGSIZE_BITMAP 0x007ff000 | ||
75 | |||
76 | #define IOMMU_REG_POLL_COUNT_FAST 1000 | ||
77 | |||
78 | struct rk_iommu_domain { | ||
79 | struct list_head iommus; | ||
80 | u32 *dt; /* page directory table */ | ||
81 | spinlock_t iommus_lock; /* lock for iommus list */ | ||
82 | spinlock_t dt_lock; /* lock for modifying page directory table */ | ||
83 | }; | ||
84 | |||
85 | struct rk_iommu { | ||
86 | struct device *dev; | ||
87 | void __iomem *base; | ||
88 | int irq; | ||
89 | struct list_head node; /* entry in rk_iommu_domain.iommus */ | ||
90 | struct iommu_domain *domain; /* domain to which iommu is attached */ | ||
91 | }; | ||
92 | |||
93 | static inline void rk_table_flush(u32 *va, unsigned int count) | ||
94 | { | ||
95 | phys_addr_t pa_start = virt_to_phys(va); | ||
96 | phys_addr_t pa_end = virt_to_phys(va + count); | ||
97 | size_t size = pa_end - pa_start; | ||
98 | |||
99 | __cpuc_flush_dcache_area(va, size); | ||
100 | outer_flush_range(pa_start, pa_end); | ||
101 | } | ||
102 | |||
103 | /** | ||
104 | * Inspired by _wait_for in intel_drv.h | ||
105 | * This is NOT safe for use in interrupt context. | ||
106 | * | ||
107 | * Note that it's important that we check the condition again after having | ||
108 | * timed out, since the timeout could be due to preemption or similar and | ||
109 | * we've never had a chance to check the condition before the timeout. | ||
110 | */ | ||
111 | #define rk_wait_for(COND, MS) ({ \ | ||
112 | unsigned long timeout__ = jiffies + msecs_to_jiffies(MS) + 1; \ | ||
113 | int ret__ = 0; \ | ||
114 | while (!(COND)) { \ | ||
115 | if (time_after(jiffies, timeout__)) { \ | ||
116 | ret__ = (COND) ? 0 : -ETIMEDOUT; \ | ||
117 | break; \ | ||
118 | } \ | ||
119 | usleep_range(50, 100); \ | ||
120 | } \ | ||
121 | ret__; \ | ||
122 | }) | ||
123 | |||
124 | /* | ||
125 | * The Rockchip rk3288 iommu uses a 2-level page table. | ||
126 | * The first level is the "Directory Table" (DT). | ||
127 | * The DT consists of 1024 4-byte Directory Table Entries (DTEs), each pointing | ||
128 | * to a "Page Table". | ||
129 | * The second level is the 1024 Page Tables (PT). | ||
130 | * Each PT consists of 1024 4-byte Page Table Entries (PTEs), each pointing to | ||
131 | * a 4 KB page of physical memory. | ||
132 | * | ||
133 | * The DT and each PT fits in a single 4 KB page (4-bytes * 1024 entries). | ||
134 | * Each iommu device has a MMU_DTE_ADDR register that contains the physical | ||
135 | * address of the start of the DT page. | ||
136 | * | ||
137 | * The structure of the page table is as follows: | ||
138 | * | ||
139 | * DT | ||
140 | * MMU_DTE_ADDR -> +-----+ | ||
141 | * | | | ||
142 | * +-----+ PT | ||
143 | * | DTE | -> +-----+ | ||
144 | * +-----+ | | Memory | ||
145 | * | | +-----+ Page | ||
146 | * | | | PTE | -> +-----+ | ||
147 | * +-----+ +-----+ | | | ||
148 | * | | | | | ||
149 | * | | | | | ||
150 | * +-----+ | | | ||
151 | * | | | ||
152 | * | | | ||
153 | * +-----+ | ||
154 | */ | ||
155 | |||
156 | /* | ||
157 | * Each DTE has a PT address and a valid bit: | ||
158 | * +---------------------+-----------+-+ | ||
159 | * | PT address | Reserved |V| | ||
160 | * +---------------------+-----------+-+ | ||
161 | * 31:12 - PT address (PTs always starts on a 4 KB boundary) | ||
162 | * 11: 1 - Reserved | ||
163 | * 0 - 1 if PT @ PT address is valid | ||
164 | */ | ||
165 | #define RK_DTE_PT_ADDRESS_MASK 0xfffff000 | ||
166 | #define RK_DTE_PT_VALID BIT(0) | ||
167 | |||
168 | static inline phys_addr_t rk_dte_pt_address(u32 dte) | ||
169 | { | ||
170 | return (phys_addr_t)dte & RK_DTE_PT_ADDRESS_MASK; | ||
171 | } | ||
172 | |||
173 | static inline bool rk_dte_is_pt_valid(u32 dte) | ||
174 | { | ||
175 | return dte & RK_DTE_PT_VALID; | ||
176 | } | ||
177 | |||
178 | static u32 rk_mk_dte(u32 *pt) | ||
179 | { | ||
180 | phys_addr_t pt_phys = virt_to_phys(pt); | ||
181 | return (pt_phys & RK_DTE_PT_ADDRESS_MASK) | RK_DTE_PT_VALID; | ||
182 | } | ||
183 | |||
184 | /* | ||
185 | * Each PTE has a Page address, some flags and a valid bit: | ||
186 | * +---------------------+---+-------+-+ | ||
187 | * | Page address |Rsv| Flags |V| | ||
188 | * +---------------------+---+-------+-+ | ||
189 | * 31:12 - Page address (Pages always start on a 4 KB boundary) | ||
190 | * 11: 9 - Reserved | ||
191 | * 8: 1 - Flags | ||
192 | * 8 - Read allocate - allocate cache space on read misses | ||
193 | * 7 - Read cache - enable cache & prefetch of data | ||
194 | * 6 - Write buffer - enable delaying writes on their way to memory | ||
195 | * 5 - Write allocate - allocate cache space on write misses | ||
196 | * 4 - Write cache - different writes can be merged together | ||
197 | * 3 - Override cache attributes | ||
198 | * if 1, bits 4-8 control cache attributes | ||
199 | * if 0, the system bus defaults are used | ||
200 | * 2 - Writable | ||
201 | * 1 - Readable | ||
202 | * 0 - 1 if Page @ Page address is valid | ||
203 | */ | ||
204 | #define RK_PTE_PAGE_ADDRESS_MASK 0xfffff000 | ||
205 | #define RK_PTE_PAGE_FLAGS_MASK 0x000001fe | ||
206 | #define RK_PTE_PAGE_WRITABLE BIT(2) | ||
207 | #define RK_PTE_PAGE_READABLE BIT(1) | ||
208 | #define RK_PTE_PAGE_VALID BIT(0) | ||
209 | |||
210 | static inline phys_addr_t rk_pte_page_address(u32 pte) | ||
211 | { | ||
212 | return (phys_addr_t)pte & RK_PTE_PAGE_ADDRESS_MASK; | ||
213 | } | ||
214 | |||
215 | static inline bool rk_pte_is_page_valid(u32 pte) | ||
216 | { | ||
217 | return pte & RK_PTE_PAGE_VALID; | ||
218 | } | ||
219 | |||
220 | /* TODO: set cache flags per prot IOMMU_CACHE */ | ||
221 | static u32 rk_mk_pte(phys_addr_t page, int prot) | ||
222 | { | ||
223 | u32 flags = 0; | ||
224 | flags |= (prot & IOMMU_READ) ? RK_PTE_PAGE_READABLE : 0; | ||
225 | flags |= (prot & IOMMU_WRITE) ? RK_PTE_PAGE_WRITABLE : 0; | ||
226 | page &= RK_PTE_PAGE_ADDRESS_MASK; | ||
227 | return page | flags | RK_PTE_PAGE_VALID; | ||
228 | } | ||
229 | |||
230 | static u32 rk_mk_pte_invalid(u32 pte) | ||
231 | { | ||
232 | return pte & ~RK_PTE_PAGE_VALID; | ||
233 | } | ||
234 | |||
235 | /* | ||
236 | * rk3288 iova (IOMMU Virtual Address) format | ||
237 | * 31 22.21 12.11 0 | ||
238 | * +-----------+-----------+-------------+ | ||
239 | * | DTE index | PTE index | Page offset | | ||
240 | * +-----------+-----------+-------------+ | ||
241 | * 31:22 - DTE index - index of DTE in DT | ||
242 | * 21:12 - PTE index - index of PTE in PT @ DTE.pt_address | ||
243 | * 11: 0 - Page offset - offset into page @ PTE.page_address | ||
244 | */ | ||
245 | #define RK_IOVA_DTE_MASK 0xffc00000 | ||
246 | #define RK_IOVA_DTE_SHIFT 22 | ||
247 | #define RK_IOVA_PTE_MASK 0x003ff000 | ||
248 | #define RK_IOVA_PTE_SHIFT 12 | ||
249 | #define RK_IOVA_PAGE_MASK 0x00000fff | ||
250 | #define RK_IOVA_PAGE_SHIFT 0 | ||
251 | |||
252 | static u32 rk_iova_dte_index(dma_addr_t iova) | ||
253 | { | ||
254 | return (u32)(iova & RK_IOVA_DTE_MASK) >> RK_IOVA_DTE_SHIFT; | ||
255 | } | ||
256 | |||
257 | static u32 rk_iova_pte_index(dma_addr_t iova) | ||
258 | { | ||
259 | return (u32)(iova & RK_IOVA_PTE_MASK) >> RK_IOVA_PTE_SHIFT; | ||
260 | } | ||
261 | |||
262 | static u32 rk_iova_page_offset(dma_addr_t iova) | ||
263 | { | ||
264 | return (u32)(iova & RK_IOVA_PAGE_MASK) >> RK_IOVA_PAGE_SHIFT; | ||
265 | } | ||
266 | |||
267 | static u32 rk_iommu_read(struct rk_iommu *iommu, u32 offset) | ||
268 | { | ||
269 | return readl(iommu->base + offset); | ||
270 | } | ||
271 | |||
272 | static void rk_iommu_write(struct rk_iommu *iommu, u32 offset, u32 value) | ||
273 | { | ||
274 | writel(value, iommu->base + offset); | ||
275 | } | ||
276 | |||
277 | static void rk_iommu_command(struct rk_iommu *iommu, u32 command) | ||
278 | { | ||
279 | writel(command, iommu->base + RK_MMU_COMMAND); | ||
280 | } | ||
281 | |||
282 | static void rk_iommu_zap_lines(struct rk_iommu *iommu, dma_addr_t iova, | ||
283 | size_t size) | ||
284 | { | ||
285 | dma_addr_t iova_end = iova + size; | ||
286 | /* | ||
287 | * TODO(djkurtz): Figure out when it is more efficient to shootdown the | ||
288 | * entire iotlb rather than iterate over individual iovas. | ||
289 | */ | ||
290 | for (; iova < iova_end; iova += SPAGE_SIZE) | ||
291 | rk_iommu_write(iommu, RK_MMU_ZAP_ONE_LINE, iova); | ||
292 | } | ||
293 | |||
294 | static bool rk_iommu_is_stall_active(struct rk_iommu *iommu) | ||
295 | { | ||
296 | return rk_iommu_read(iommu, RK_MMU_STATUS) & RK_MMU_STATUS_STALL_ACTIVE; | ||
297 | } | ||
298 | |||
299 | static bool rk_iommu_is_paging_enabled(struct rk_iommu *iommu) | ||
300 | { | ||
301 | return rk_iommu_read(iommu, RK_MMU_STATUS) & | ||
302 | RK_MMU_STATUS_PAGING_ENABLED; | ||
303 | } | ||
304 | |||
305 | static int rk_iommu_enable_stall(struct rk_iommu *iommu) | ||
306 | { | ||
307 | int ret; | ||
308 | |||
309 | if (rk_iommu_is_stall_active(iommu)) | ||
310 | return 0; | ||
311 | |||
312 | /* Stall can only be enabled if paging is enabled */ | ||
313 | if (!rk_iommu_is_paging_enabled(iommu)) | ||
314 | return 0; | ||
315 | |||
316 | rk_iommu_command(iommu, RK_MMU_CMD_ENABLE_STALL); | ||
317 | |||
318 | ret = rk_wait_for(rk_iommu_is_stall_active(iommu), 1); | ||
319 | if (ret) | ||
320 | dev_err(iommu->dev, "Enable stall request timed out, status: %#08x\n", | ||
321 | rk_iommu_read(iommu, RK_MMU_STATUS)); | ||
322 | |||
323 | return ret; | ||
324 | } | ||
325 | |||
326 | static int rk_iommu_disable_stall(struct rk_iommu *iommu) | ||
327 | { | ||
328 | int ret; | ||
329 | |||
330 | if (!rk_iommu_is_stall_active(iommu)) | ||
331 | return 0; | ||
332 | |||
333 | rk_iommu_command(iommu, RK_MMU_CMD_DISABLE_STALL); | ||
334 | |||
335 | ret = rk_wait_for(!rk_iommu_is_stall_active(iommu), 1); | ||
336 | if (ret) | ||
337 | dev_err(iommu->dev, "Disable stall request timed out, status: %#08x\n", | ||
338 | rk_iommu_read(iommu, RK_MMU_STATUS)); | ||
339 | |||
340 | return ret; | ||
341 | } | ||
342 | |||
343 | static int rk_iommu_enable_paging(struct rk_iommu *iommu) | ||
344 | { | ||
345 | int ret; | ||
346 | |||
347 | if (rk_iommu_is_paging_enabled(iommu)) | ||
348 | return 0; | ||
349 | |||
350 | rk_iommu_command(iommu, RK_MMU_CMD_ENABLE_PAGING); | ||
351 | |||
352 | ret = rk_wait_for(rk_iommu_is_paging_enabled(iommu), 1); | ||
353 | if (ret) | ||
354 | dev_err(iommu->dev, "Enable paging request timed out, status: %#08x\n", | ||
355 | rk_iommu_read(iommu, RK_MMU_STATUS)); | ||
356 | |||
357 | return ret; | ||
358 | } | ||
359 | |||
360 | static int rk_iommu_disable_paging(struct rk_iommu *iommu) | ||
361 | { | ||
362 | int ret; | ||
363 | |||
364 | if (!rk_iommu_is_paging_enabled(iommu)) | ||
365 | return 0; | ||
366 | |||
367 | rk_iommu_command(iommu, RK_MMU_CMD_DISABLE_PAGING); | ||
368 | |||
369 | ret = rk_wait_for(!rk_iommu_is_paging_enabled(iommu), 1); | ||
370 | if (ret) | ||
371 | dev_err(iommu->dev, "Disable paging request timed out, status: %#08x\n", | ||
372 | rk_iommu_read(iommu, RK_MMU_STATUS)); | ||
373 | |||
374 | return ret; | ||
375 | } | ||
376 | |||
377 | static int rk_iommu_force_reset(struct rk_iommu *iommu) | ||
378 | { | ||
379 | int ret; | ||
380 | u32 dte_addr; | ||
381 | |||
382 | /* | ||
383 | * Check if register DTE_ADDR is working by writing DTE_ADDR_DUMMY | ||
384 | * and verifying that upper 5 nybbles are read back. | ||
385 | */ | ||
386 | rk_iommu_write(iommu, RK_MMU_DTE_ADDR, DTE_ADDR_DUMMY); | ||
387 | |||
388 | dte_addr = rk_iommu_read(iommu, RK_MMU_DTE_ADDR); | ||
389 | if (dte_addr != (DTE_ADDR_DUMMY & RK_DTE_PT_ADDRESS_MASK)) { | ||
390 | dev_err(iommu->dev, "Error during raw reset. MMU_DTE_ADDR is not functioning\n"); | ||
391 | return -EFAULT; | ||
392 | } | ||
393 | |||
394 | rk_iommu_command(iommu, RK_MMU_CMD_FORCE_RESET); | ||
395 | |||
396 | ret = rk_wait_for(rk_iommu_read(iommu, RK_MMU_DTE_ADDR) == 0x00000000, | ||
397 | FORCE_RESET_TIMEOUT); | ||
398 | if (ret) | ||
399 | dev_err(iommu->dev, "FORCE_RESET command timed out\n"); | ||
400 | |||
401 | return ret; | ||
402 | } | ||
403 | |||
404 | static void log_iova(struct rk_iommu *iommu, dma_addr_t iova) | ||
405 | { | ||
406 | u32 dte_index, pte_index, page_offset; | ||
407 | u32 mmu_dte_addr; | ||
408 | phys_addr_t mmu_dte_addr_phys, dte_addr_phys; | ||
409 | u32 *dte_addr; | ||
410 | u32 dte; | ||
411 | phys_addr_t pte_addr_phys = 0; | ||
412 | u32 *pte_addr = NULL; | ||
413 | u32 pte = 0; | ||
414 | phys_addr_t page_addr_phys = 0; | ||
415 | u32 page_flags = 0; | ||
416 | |||
417 | dte_index = rk_iova_dte_index(iova); | ||
418 | pte_index = rk_iova_pte_index(iova); | ||
419 | page_offset = rk_iova_page_offset(iova); | ||
420 | |||
421 | mmu_dte_addr = rk_iommu_read(iommu, RK_MMU_DTE_ADDR); | ||
422 | mmu_dte_addr_phys = (phys_addr_t)mmu_dte_addr; | ||
423 | |||
424 | dte_addr_phys = mmu_dte_addr_phys + (4 * dte_index); | ||
425 | dte_addr = phys_to_virt(dte_addr_phys); | ||
426 | dte = *dte_addr; | ||
427 | |||
428 | if (!rk_dte_is_pt_valid(dte)) | ||
429 | goto print_it; | ||
430 | |||
431 | pte_addr_phys = rk_dte_pt_address(dte) + (pte_index * 4); | ||
432 | pte_addr = phys_to_virt(pte_addr_phys); | ||
433 | pte = *pte_addr; | ||
434 | |||
435 | if (!rk_pte_is_page_valid(pte)) | ||
436 | goto print_it; | ||
437 | |||
438 | page_addr_phys = rk_pte_page_address(pte) + page_offset; | ||
439 | page_flags = pte & RK_PTE_PAGE_FLAGS_MASK; | ||
440 | |||
441 | print_it: | ||
442 | dev_err(iommu->dev, "iova = %pad: dte_index: %#03x pte_index: %#03x page_offset: %#03x\n", | ||
443 | &iova, dte_index, pte_index, page_offset); | ||
444 | dev_err(iommu->dev, "mmu_dte_addr: %pa dte@%pa: %#08x valid: %u pte@%pa: %#08x valid: %u page@%pa flags: %#03x\n", | ||
445 | &mmu_dte_addr_phys, &dte_addr_phys, dte, | ||
446 | rk_dte_is_pt_valid(dte), &pte_addr_phys, pte, | ||
447 | rk_pte_is_page_valid(pte), &page_addr_phys, page_flags); | ||
448 | } | ||
449 | |||
450 | static irqreturn_t rk_iommu_irq(int irq, void *dev_id) | ||
451 | { | ||
452 | struct rk_iommu *iommu = dev_id; | ||
453 | u32 status; | ||
454 | u32 int_status; | ||
455 | dma_addr_t iova; | ||
456 | |||
457 | int_status = rk_iommu_read(iommu, RK_MMU_INT_STATUS); | ||
458 | if (int_status == 0) | ||
459 | return IRQ_NONE; | ||
460 | |||
461 | iova = rk_iommu_read(iommu, RK_MMU_PAGE_FAULT_ADDR); | ||
462 | |||
463 | if (int_status & RK_MMU_IRQ_PAGE_FAULT) { | ||
464 | int flags; | ||
465 | |||
466 | status = rk_iommu_read(iommu, RK_MMU_STATUS); | ||
467 | flags = (status & RK_MMU_STATUS_PAGE_FAULT_IS_WRITE) ? | ||
468 | IOMMU_FAULT_WRITE : IOMMU_FAULT_READ; | ||
469 | |||
470 | dev_err(iommu->dev, "Page fault at %pad of type %s\n", | ||
471 | &iova, | ||
472 | (flags == IOMMU_FAULT_WRITE) ? "write" : "read"); | ||
473 | |||
474 | log_iova(iommu, iova); | ||
475 | |||
476 | /* | ||
477 | * Report page fault to any installed handlers. | ||
478 | * Ignore the return code, though, since we always zap cache | ||
479 | * and clear the page fault anyway. | ||
480 | */ | ||
481 | if (iommu->domain) | ||
482 | report_iommu_fault(iommu->domain, iommu->dev, iova, | ||
483 | flags); | ||
484 | else | ||
485 | dev_err(iommu->dev, "Page fault while iommu not attached to domain?\n"); | ||
486 | |||
487 | rk_iommu_command(iommu, RK_MMU_CMD_ZAP_CACHE); | ||
488 | rk_iommu_command(iommu, RK_MMU_CMD_PAGE_FAULT_DONE); | ||
489 | } | ||
490 | |||
491 | if (int_status & RK_MMU_IRQ_BUS_ERROR) | ||
492 | dev_err(iommu->dev, "BUS_ERROR occurred at %pad\n", &iova); | ||
493 | |||
494 | if (int_status & ~RK_MMU_IRQ_MASK) | ||
495 | dev_err(iommu->dev, "unexpected int_status: %#08x\n", | ||
496 | int_status); | ||
497 | |||
498 | rk_iommu_write(iommu, RK_MMU_INT_CLEAR, int_status); | ||
499 | |||
500 | return IRQ_HANDLED; | ||
501 | } | ||
502 | |||
503 | static phys_addr_t rk_iommu_iova_to_phys(struct iommu_domain *domain, | ||
504 | dma_addr_t iova) | ||
505 | { | ||
506 | struct rk_iommu_domain *rk_domain = domain->priv; | ||
507 | unsigned long flags; | ||
508 | phys_addr_t pt_phys, phys = 0; | ||
509 | u32 dte, pte; | ||
510 | u32 *page_table; | ||
511 | |||
512 | spin_lock_irqsave(&rk_domain->dt_lock, flags); | ||
513 | |||
514 | dte = rk_domain->dt[rk_iova_dte_index(iova)]; | ||
515 | if (!rk_dte_is_pt_valid(dte)) | ||
516 | goto out; | ||
517 | |||
518 | pt_phys = rk_dte_pt_address(dte); | ||
519 | page_table = (u32 *)phys_to_virt(pt_phys); | ||
520 | pte = page_table[rk_iova_pte_index(iova)]; | ||
521 | if (!rk_pte_is_page_valid(pte)) | ||
522 | goto out; | ||
523 | |||
524 | phys = rk_pte_page_address(pte) + rk_iova_page_offset(iova); | ||
525 | out: | ||
526 | spin_unlock_irqrestore(&rk_domain->dt_lock, flags); | ||
527 | |||
528 | return phys; | ||
529 | } | ||
530 | |||
531 | static void rk_iommu_zap_iova(struct rk_iommu_domain *rk_domain, | ||
532 | dma_addr_t iova, size_t size) | ||
533 | { | ||
534 | struct list_head *pos; | ||
535 | unsigned long flags; | ||
536 | |||
537 | /* shootdown these iova from all iommus using this domain */ | ||
538 | spin_lock_irqsave(&rk_domain->iommus_lock, flags); | ||
539 | list_for_each(pos, &rk_domain->iommus) { | ||
540 | struct rk_iommu *iommu; | ||
541 | iommu = list_entry(pos, struct rk_iommu, node); | ||
542 | rk_iommu_zap_lines(iommu, iova, size); | ||
543 | } | ||
544 | spin_unlock_irqrestore(&rk_domain->iommus_lock, flags); | ||
545 | } | ||
546 | |||
547 | static u32 *rk_dte_get_page_table(struct rk_iommu_domain *rk_domain, | ||
548 | dma_addr_t iova) | ||
549 | { | ||
550 | u32 *page_table, *dte_addr; | ||
551 | u32 dte; | ||
552 | phys_addr_t pt_phys; | ||
553 | |||
554 | assert_spin_locked(&rk_domain->dt_lock); | ||
555 | |||
556 | dte_addr = &rk_domain->dt[rk_iova_dte_index(iova)]; | ||
557 | dte = *dte_addr; | ||
558 | if (rk_dte_is_pt_valid(dte)) | ||
559 | goto done; | ||
560 | |||
561 | page_table = (u32 *)get_zeroed_page(GFP_ATOMIC | GFP_DMA32); | ||
562 | if (!page_table) | ||
563 | return ERR_PTR(-ENOMEM); | ||
564 | |||
565 | dte = rk_mk_dte(page_table); | ||
566 | *dte_addr = dte; | ||
567 | |||
568 | rk_table_flush(page_table, NUM_PT_ENTRIES); | ||
569 | rk_table_flush(dte_addr, 1); | ||
570 | |||
571 | /* | ||
572 | * Zap the first iova of newly allocated page table so iommu evicts | ||
573 | * old cached value of new dte from the iotlb. | ||
574 | */ | ||
575 | rk_iommu_zap_iova(rk_domain, iova, SPAGE_SIZE); | ||
576 | |||
577 | done: | ||
578 | pt_phys = rk_dte_pt_address(dte); | ||
579 | return (u32 *)phys_to_virt(pt_phys); | ||
580 | } | ||
581 | |||
582 | static size_t rk_iommu_unmap_iova(struct rk_iommu_domain *rk_domain, | ||
583 | u32 *pte_addr, dma_addr_t iova, size_t size) | ||
584 | { | ||
585 | unsigned int pte_count; | ||
586 | unsigned int pte_total = size / SPAGE_SIZE; | ||
587 | |||
588 | assert_spin_locked(&rk_domain->dt_lock); | ||
589 | |||
590 | for (pte_count = 0; pte_count < pte_total; pte_count++) { | ||
591 | u32 pte = pte_addr[pte_count]; | ||
592 | if (!rk_pte_is_page_valid(pte)) | ||
593 | break; | ||
594 | |||
595 | pte_addr[pte_count] = rk_mk_pte_invalid(pte); | ||
596 | } | ||
597 | |||
598 | rk_table_flush(pte_addr, pte_count); | ||
599 | |||
600 | return pte_count * SPAGE_SIZE; | ||
601 | } | ||
602 | |||
603 | static int rk_iommu_map_iova(struct rk_iommu_domain *rk_domain, u32 *pte_addr, | ||
604 | dma_addr_t iova, phys_addr_t paddr, size_t size, | ||
605 | int prot) | ||
606 | { | ||
607 | unsigned int pte_count; | ||
608 | unsigned int pte_total = size / SPAGE_SIZE; | ||
609 | phys_addr_t page_phys; | ||
610 | |||
611 | assert_spin_locked(&rk_domain->dt_lock); | ||
612 | |||
613 | for (pte_count = 0; pte_count < pte_total; pte_count++) { | ||
614 | u32 pte = pte_addr[pte_count]; | ||
615 | |||
616 | if (rk_pte_is_page_valid(pte)) | ||
617 | goto unwind; | ||
618 | |||
619 | pte_addr[pte_count] = rk_mk_pte(paddr, prot); | ||
620 | |||
621 | paddr += SPAGE_SIZE; | ||
622 | } | ||
623 | |||
624 | rk_table_flush(pte_addr, pte_count); | ||
625 | |||
626 | return 0; | ||
627 | unwind: | ||
628 | /* Unmap the range of iovas that we just mapped */ | ||
629 | rk_iommu_unmap_iova(rk_domain, pte_addr, iova, pte_count * SPAGE_SIZE); | ||
630 | |||
631 | iova += pte_count * SPAGE_SIZE; | ||
632 | page_phys = rk_pte_page_address(pte_addr[pte_count]); | ||
633 | pr_err("iova: %pad already mapped to %pa cannot remap to phys: %pa prot: %#x\n", | ||
634 | &iova, &page_phys, &paddr, prot); | ||
635 | |||
636 | return -EADDRINUSE; | ||
637 | } | ||
638 | |||
639 | static int rk_iommu_map(struct iommu_domain *domain, unsigned long _iova, | ||
640 | phys_addr_t paddr, size_t size, int prot) | ||
641 | { | ||
642 | struct rk_iommu_domain *rk_domain = domain->priv; | ||
643 | unsigned long flags; | ||
644 | dma_addr_t iova = (dma_addr_t)_iova; | ||
645 | u32 *page_table, *pte_addr; | ||
646 | int ret; | ||
647 | |||
648 | spin_lock_irqsave(&rk_domain->dt_lock, flags); | ||
649 | |||
650 | /* | ||
651 | * pgsize_bitmap specifies iova sizes that fit in one page table | ||
652 | * (1024 4-KiB pages = 4 MiB). | ||
653 | * So, size will always be 4096 <= size <= 4194304. | ||
654 | * Since iommu_map() guarantees that both iova and size will be | ||
655 | * aligned, we will always only be mapping from a single dte here. | ||
656 | */ | ||
657 | page_table = rk_dte_get_page_table(rk_domain, iova); | ||
658 | if (IS_ERR(page_table)) { | ||
659 | spin_unlock_irqrestore(&rk_domain->dt_lock, flags); | ||
660 | return PTR_ERR(page_table); | ||
661 | } | ||
662 | |||
663 | pte_addr = &page_table[rk_iova_pte_index(iova)]; | ||
664 | ret = rk_iommu_map_iova(rk_domain, pte_addr, iova, paddr, size, prot); | ||
665 | spin_unlock_irqrestore(&rk_domain->dt_lock, flags); | ||
666 | |||
667 | return ret; | ||
668 | } | ||
669 | |||
670 | static size_t rk_iommu_unmap(struct iommu_domain *domain, unsigned long _iova, | ||
671 | size_t size) | ||
672 | { | ||
673 | struct rk_iommu_domain *rk_domain = domain->priv; | ||
674 | unsigned long flags; | ||
675 | dma_addr_t iova = (dma_addr_t)_iova; | ||
676 | phys_addr_t pt_phys; | ||
677 | u32 dte; | ||
678 | u32 *pte_addr; | ||
679 | size_t unmap_size; | ||
680 | |||
681 | spin_lock_irqsave(&rk_domain->dt_lock, flags); | ||
682 | |||
683 | /* | ||
684 | * pgsize_bitmap specifies iova sizes that fit in one page table | ||
685 | * (1024 4-KiB pages = 4 MiB). | ||
686 | * So, size will always be 4096 <= size <= 4194304. | ||
687 | * Since iommu_unmap() guarantees that both iova and size will be | ||
688 | * aligned, we will always only be unmapping from a single dte here. | ||
689 | */ | ||
690 | dte = rk_domain->dt[rk_iova_dte_index(iova)]; | ||
691 | /* Just return 0 if iova is unmapped */ | ||
692 | if (!rk_dte_is_pt_valid(dte)) { | ||
693 | spin_unlock_irqrestore(&rk_domain->dt_lock, flags); | ||
694 | return 0; | ||
695 | } | ||
696 | |||
697 | pt_phys = rk_dte_pt_address(dte); | ||
698 | pte_addr = (u32 *)phys_to_virt(pt_phys) + rk_iova_pte_index(iova); | ||
699 | unmap_size = rk_iommu_unmap_iova(rk_domain, pte_addr, iova, size); | ||
700 | |||
701 | spin_unlock_irqrestore(&rk_domain->dt_lock, flags); | ||
702 | |||
703 | /* Shootdown iotlb entries for iova range that was just unmapped */ | ||
704 | rk_iommu_zap_iova(rk_domain, iova, unmap_size); | ||
705 | |||
706 | return unmap_size; | ||
707 | } | ||
708 | |||
709 | static struct rk_iommu *rk_iommu_from_dev(struct device *dev) | ||
710 | { | ||
711 | struct iommu_group *group; | ||
712 | struct device *iommu_dev; | ||
713 | struct rk_iommu *rk_iommu; | ||
714 | |||
715 | group = iommu_group_get(dev); | ||
716 | if (!group) | ||
717 | return NULL; | ||
718 | iommu_dev = iommu_group_get_iommudata(group); | ||
719 | rk_iommu = dev_get_drvdata(iommu_dev); | ||
720 | iommu_group_put(group); | ||
721 | |||
722 | return rk_iommu; | ||
723 | } | ||
724 | |||
725 | static int rk_iommu_attach_device(struct iommu_domain *domain, | ||
726 | struct device *dev) | ||
727 | { | ||
728 | struct rk_iommu *iommu; | ||
729 | struct rk_iommu_domain *rk_domain = domain->priv; | ||
730 | unsigned long flags; | ||
731 | int ret; | ||
732 | phys_addr_t dte_addr; | ||
733 | |||
734 | /* | ||
735 | * Allow 'virtual devices' (e.g., drm) to attach to domain. | ||
736 | * Such a device does not belong to an iommu group. | ||
737 | */ | ||
738 | iommu = rk_iommu_from_dev(dev); | ||
739 | if (!iommu) | ||
740 | return 0; | ||
741 | |||
742 | ret = rk_iommu_enable_stall(iommu); | ||
743 | if (ret) | ||
744 | return ret; | ||
745 | |||
746 | ret = rk_iommu_force_reset(iommu); | ||
747 | if (ret) | ||
748 | return ret; | ||
749 | |||
750 | iommu->domain = domain; | ||
751 | |||
752 | ret = devm_request_irq(dev, iommu->irq, rk_iommu_irq, | ||
753 | IRQF_SHARED, dev_name(dev), iommu); | ||
754 | if (ret) | ||
755 | return ret; | ||
756 | |||
757 | dte_addr = virt_to_phys(rk_domain->dt); | ||
758 | rk_iommu_write(iommu, RK_MMU_DTE_ADDR, dte_addr); | ||
759 | rk_iommu_command(iommu, RK_MMU_CMD_ZAP_CACHE); | ||
760 | rk_iommu_write(iommu, RK_MMU_INT_MASK, RK_MMU_IRQ_MASK); | ||
761 | |||
762 | ret = rk_iommu_enable_paging(iommu); | ||
763 | if (ret) | ||
764 | return ret; | ||
765 | |||
766 | spin_lock_irqsave(&rk_domain->iommus_lock, flags); | ||
767 | list_add_tail(&iommu->node, &rk_domain->iommus); | ||
768 | spin_unlock_irqrestore(&rk_domain->iommus_lock, flags); | ||
769 | |||
770 | dev_info(dev, "Attached to iommu domain\n"); | ||
771 | |||
772 | rk_iommu_disable_stall(iommu); | ||
773 | |||
774 | return 0; | ||
775 | } | ||
776 | |||
777 | static void rk_iommu_detach_device(struct iommu_domain *domain, | ||
778 | struct device *dev) | ||
779 | { | ||
780 | struct rk_iommu *iommu; | ||
781 | struct rk_iommu_domain *rk_domain = domain->priv; | ||
782 | unsigned long flags; | ||
783 | |||
784 | /* Allow 'virtual devices' (eg drm) to detach from domain */ | ||
785 | iommu = rk_iommu_from_dev(dev); | ||
786 | if (!iommu) | ||
787 | return; | ||
788 | |||
789 | spin_lock_irqsave(&rk_domain->iommus_lock, flags); | ||
790 | list_del_init(&iommu->node); | ||
791 | spin_unlock_irqrestore(&rk_domain->iommus_lock, flags); | ||
792 | |||
793 | /* Ignore error while disabling, just keep going */ | ||
794 | rk_iommu_enable_stall(iommu); | ||
795 | rk_iommu_disable_paging(iommu); | ||
796 | rk_iommu_write(iommu, RK_MMU_INT_MASK, 0); | ||
797 | rk_iommu_write(iommu, RK_MMU_DTE_ADDR, 0); | ||
798 | rk_iommu_disable_stall(iommu); | ||
799 | |||
800 | devm_free_irq(dev, iommu->irq, iommu); | ||
801 | |||
802 | iommu->domain = NULL; | ||
803 | |||
804 | dev_info(dev, "Detached from iommu domain\n"); | ||
805 | } | ||
806 | |||
807 | static int rk_iommu_domain_init(struct iommu_domain *domain) | ||
808 | { | ||
809 | struct rk_iommu_domain *rk_domain; | ||
810 | |||
811 | rk_domain = kzalloc(sizeof(*rk_domain), GFP_KERNEL); | ||
812 | if (!rk_domain) | ||
813 | return -ENOMEM; | ||
814 | |||
815 | /* | ||
816 | * rk32xx iommus use a 2 level pagetable. | ||
817 | * Each level1 (dt) and level2 (pt) table has 1024 4-byte entries. | ||
818 | * Allocate one 4 KiB page for each table. | ||
819 | */ | ||
820 | rk_domain->dt = (u32 *)get_zeroed_page(GFP_KERNEL | GFP_DMA32); | ||
821 | if (!rk_domain->dt) | ||
822 | goto err_dt; | ||
823 | |||
824 | rk_table_flush(rk_domain->dt, NUM_DT_ENTRIES); | ||
825 | |||
826 | spin_lock_init(&rk_domain->iommus_lock); | ||
827 | spin_lock_init(&rk_domain->dt_lock); | ||
828 | INIT_LIST_HEAD(&rk_domain->iommus); | ||
829 | |||
830 | domain->priv = rk_domain; | ||
831 | |||
832 | return 0; | ||
833 | err_dt: | ||
834 | kfree(rk_domain); | ||
835 | return -ENOMEM; | ||
836 | } | ||
837 | |||
838 | static void rk_iommu_domain_destroy(struct iommu_domain *domain) | ||
839 | { | ||
840 | struct rk_iommu_domain *rk_domain = domain->priv; | ||
841 | int i; | ||
842 | |||
843 | WARN_ON(!list_empty(&rk_domain->iommus)); | ||
844 | |||
845 | for (i = 0; i < NUM_DT_ENTRIES; i++) { | ||
846 | u32 dte = rk_domain->dt[i]; | ||
847 | if (rk_dte_is_pt_valid(dte)) { | ||
848 | phys_addr_t pt_phys = rk_dte_pt_address(dte); | ||
849 | u32 *page_table = phys_to_virt(pt_phys); | ||
850 | free_page((unsigned long)page_table); | ||
851 | } | ||
852 | } | ||
853 | |||
854 | free_page((unsigned long)rk_domain->dt); | ||
855 | kfree(domain->priv); | ||
856 | domain->priv = NULL; | ||
857 | } | ||
858 | |||
859 | static bool rk_iommu_is_dev_iommu_master(struct device *dev) | ||
860 | { | ||
861 | struct device_node *np = dev->of_node; | ||
862 | int ret; | ||
863 | |||
864 | /* | ||
865 | * An iommu master has an iommus property containing a list of phandles | ||
866 | * to iommu nodes, each with an #iommu-cells property with value 0. | ||
867 | */ | ||
868 | ret = of_count_phandle_with_args(np, "iommus", "#iommu-cells"); | ||
869 | return (ret > 0); | ||
870 | } | ||
871 | |||
872 | static int rk_iommu_group_set_iommudata(struct iommu_group *group, | ||
873 | struct device *dev) | ||
874 | { | ||
875 | struct device_node *np = dev->of_node; | ||
876 | struct platform_device *pd; | ||
877 | int ret; | ||
878 | struct of_phandle_args args; | ||
879 | |||
880 | /* | ||
881 | * An iommu master has an iommus property containing a list of phandles | ||
882 | * to iommu nodes, each with an #iommu-cells property with value 0. | ||
883 | */ | ||
884 | ret = of_parse_phandle_with_args(np, "iommus", "#iommu-cells", 0, | ||
885 | &args); | ||
886 | if (ret) { | ||
887 | dev_err(dev, "of_parse_phandle_with_args(%s) => %d\n", | ||
888 | np->full_name, ret); | ||
889 | return ret; | ||
890 | } | ||
891 | if (args.args_count != 0) { | ||
892 | dev_err(dev, "incorrect number of iommu params found for %s (found %d, expected 0)\n", | ||
893 | args.np->full_name, args.args_count); | ||
894 | return -EINVAL; | ||
895 | } | ||
896 | |||
897 | pd = of_find_device_by_node(args.np); | ||
898 | of_node_put(args.np); | ||
899 | if (!pd) { | ||
900 | dev_err(dev, "iommu %s not found\n", args.np->full_name); | ||
901 | return -EPROBE_DEFER; | ||
902 | } | ||
903 | |||
904 | /* TODO(djkurtz): handle multiple slave iommus for a single master */ | ||
905 | iommu_group_set_iommudata(group, &pd->dev, NULL); | ||
906 | |||
907 | return 0; | ||
908 | } | ||
909 | |||
910 | static int rk_iommu_add_device(struct device *dev) | ||
911 | { | ||
912 | struct iommu_group *group; | ||
913 | int ret; | ||
914 | |||
915 | if (!rk_iommu_is_dev_iommu_master(dev)) | ||
916 | return -ENODEV; | ||
917 | |||
918 | group = iommu_group_get(dev); | ||
919 | if (!group) { | ||
920 | group = iommu_group_alloc(); | ||
921 | if (IS_ERR(group)) { | ||
922 | dev_err(dev, "Failed to allocate IOMMU group\n"); | ||
923 | return PTR_ERR(group); | ||
924 | } | ||
925 | } | ||
926 | |||
927 | ret = iommu_group_add_device(group, dev); | ||
928 | if (ret) | ||
929 | goto err_put_group; | ||
930 | |||
931 | ret = rk_iommu_group_set_iommudata(group, dev); | ||
932 | if (ret) | ||
933 | goto err_remove_device; | ||
934 | |||
935 | iommu_group_put(group); | ||
936 | |||
937 | return 0; | ||
938 | |||
939 | err_remove_device: | ||
940 | iommu_group_remove_device(dev); | ||
941 | err_put_group: | ||
942 | iommu_group_put(group); | ||
943 | return ret; | ||
944 | } | ||
945 | |||
946 | static void rk_iommu_remove_device(struct device *dev) | ||
947 | { | ||
948 | if (!rk_iommu_is_dev_iommu_master(dev)) | ||
949 | return; | ||
950 | |||
951 | iommu_group_remove_device(dev); | ||
952 | } | ||
953 | |||
954 | static const struct iommu_ops rk_iommu_ops = { | ||
955 | .domain_init = rk_iommu_domain_init, | ||
956 | .domain_destroy = rk_iommu_domain_destroy, | ||
957 | .attach_dev = rk_iommu_attach_device, | ||
958 | .detach_dev = rk_iommu_detach_device, | ||
959 | .map = rk_iommu_map, | ||
960 | .unmap = rk_iommu_unmap, | ||
961 | .add_device = rk_iommu_add_device, | ||
962 | .remove_device = rk_iommu_remove_device, | ||
963 | .iova_to_phys = rk_iommu_iova_to_phys, | ||
964 | .pgsize_bitmap = RK_IOMMU_PGSIZE_BITMAP, | ||
965 | }; | ||
966 | |||
967 | static int rk_iommu_probe(struct platform_device *pdev) | ||
968 | { | ||
969 | struct device *dev = &pdev->dev; | ||
970 | struct rk_iommu *iommu; | ||
971 | struct resource *res; | ||
972 | |||
973 | iommu = devm_kzalloc(dev, sizeof(*iommu), GFP_KERNEL); | ||
974 | if (!iommu) | ||
975 | return -ENOMEM; | ||
976 | |||
977 | platform_set_drvdata(pdev, iommu); | ||
978 | iommu->dev = dev; | ||
979 | |||
980 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
981 | iommu->base = devm_ioremap_resource(&pdev->dev, res); | ||
982 | if (IS_ERR(iommu->base)) | ||
983 | return PTR_ERR(iommu->base); | ||
984 | |||
985 | iommu->irq = platform_get_irq(pdev, 0); | ||
986 | if (iommu->irq < 0) { | ||
987 | dev_err(dev, "Failed to get IRQ, %d\n", iommu->irq); | ||
988 | return -ENXIO; | ||
989 | } | ||
990 | |||
991 | return 0; | ||
992 | } | ||
993 | |||
994 | static int rk_iommu_remove(struct platform_device *pdev) | ||
995 | { | ||
996 | return 0; | ||
997 | } | ||
998 | |||
999 | #ifdef CONFIG_OF | ||
1000 | static const struct of_device_id rk_iommu_dt_ids[] = { | ||
1001 | { .compatible = "rockchip,iommu" }, | ||
1002 | { /* sentinel */ } | ||
1003 | }; | ||
1004 | MODULE_DEVICE_TABLE(of, rk_iommu_dt_ids); | ||
1005 | #endif | ||
1006 | |||
1007 | static struct platform_driver rk_iommu_driver = { | ||
1008 | .probe = rk_iommu_probe, | ||
1009 | .remove = rk_iommu_remove, | ||
1010 | .driver = { | ||
1011 | .name = "rk_iommu", | ||
1012 | .owner = THIS_MODULE, | ||
1013 | .of_match_table = of_match_ptr(rk_iommu_dt_ids), | ||
1014 | }, | ||
1015 | }; | ||
1016 | |||
1017 | static int __init rk_iommu_init(void) | ||
1018 | { | ||
1019 | int ret; | ||
1020 | |||
1021 | ret = bus_set_iommu(&platform_bus_type, &rk_iommu_ops); | ||
1022 | if (ret) | ||
1023 | return ret; | ||
1024 | |||
1025 | return platform_driver_register(&rk_iommu_driver); | ||
1026 | } | ||
1027 | static void __exit rk_iommu_exit(void) | ||
1028 | { | ||
1029 | platform_driver_unregister(&rk_iommu_driver); | ||
1030 | } | ||
1031 | |||
1032 | subsys_initcall(rk_iommu_init); | ||
1033 | module_exit(rk_iommu_exit); | ||
1034 | |||
1035 | MODULE_DESCRIPTION("IOMMU API for Rockchip"); | ||
1036 | MODULE_AUTHOR("Simon Xue <xxm@rock-chips.com> and Daniel Kurtz <djkurtz@chromium.org>"); | ||
1037 | MODULE_ALIAS("platform:rockchip-iommu"); | ||
1038 | MODULE_LICENSE("GPL v2"); | ||