summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r--drivers/gpu/drm/Kconfig8
-rw-r--r--drivers/gpu/drm/Makefile2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c2
-rw-r--r--drivers/gpu/drm/arm/malidp_drv.c48
-rw-r--r--drivers/gpu/drm/arm/malidp_drv.h6
-rw-r--r--drivers/gpu/drm/arm/malidp_hw.c249
-rw-r--r--drivers/gpu/drm/arm/malidp_hw.h31
-rw-r--r--drivers/gpu/drm/arm/malidp_mw.c7
-rw-r--r--drivers/gpu/drm/arm/malidp_planes.c271
-rw-r--r--drivers/gpu/drm/arm/malidp_regs.h20
-rw-r--r--drivers/gpu/drm/bochs/bochs_kms.c11
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c4
-rw-r--r--drivers/gpu/drm/drm_atomic_uapi.c4
-rw-r--r--drivers/gpu/drm/drm_connector.c91
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c1
-rw-r--r--drivers/gpu/drm/drm_drv.c206
-rw-r--r--drivers/gpu/drm/drm_dsc.c269
-rw-r--r--drivers/gpu/drm/drm_edid.c76
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c6
-rw-r--r--drivers/gpu/drm/drm_file.c7
-rw-r--r--drivers/gpu/drm/drm_fourcc.c27
-rw-r--r--drivers/gpu/drm/drm_gem.c121
-rw-r--r--drivers/gpu/drm/drm_gem_shmem_helper.c625
-rw-r--r--drivers/gpu/drm/drm_kms_helper_common.c2
-rw-r--r--drivers/gpu/drm/drm_memory.c19
-rw-r--r--drivers/gpu/drm/drm_panel_orientation_quirks.c13
-rw-r--r--drivers/gpu/drm/drm_prime.c1
-rw-r--r--drivers/gpu/drm/drm_syncobj.c3
-rw-r--r--drivers/gpu/drm/drm_vm.c4
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.c1
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.h1
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.c16
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.h4
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c7
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c22
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h9
-rw-r--r--drivers/gpu/drm/i915/intel_atomic.c4
-rw-r--r--drivers/gpu/drm/i915/intel_atomic_plane.c2
-rw-r--r--drivers/gpu/drm/i915/intel_connector.c8
-rw-r--r--drivers/gpu/drm/i915/intel_display.c86
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h9
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c13
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c14
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c184
-rw-r--r--drivers/gpu/drm/i915/intel_vdsc.c133
-rw-r--r--drivers/gpu/drm/imx/imx-drm-core.c2
-rw-r--r--drivers/gpu/drm/meson/Makefile2
-rw-r--r--drivers/gpu/drm/meson/meson_canvas.c73
-rw-r--r--drivers/gpu/drm/meson/meson_canvas.h51
-rw-r--r--drivers/gpu/drm/meson/meson_crtc.c84
-rw-r--r--drivers/gpu/drm/meson/meson_drv.c68
-rw-r--r--drivers/gpu/drm/meson/meson_drv.h1
-rw-r--r--drivers/gpu/drm/meson/meson_overlay.c8
-rw-r--r--drivers/gpu/drm/meson/meson_plane.c6
-rw-r--r--drivers/gpu/drm/meson/meson_viu.c1
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c1
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h1
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c27
-rw-r--r--drivers/gpu/drm/msm/msm_gem_prime.c7
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c8
-rw-r--r--drivers/gpu/drm/panel/Kconfig9
-rw-r--r--drivers/gpu/drm/panel/Makefile1
-rw-r--r--drivers/gpu/drm/panel/panel-ronbo-rb070d30.c258
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c2
-rw-r--r--drivers/gpu/drm/selftests/test-drm_mm.c12
-rw-r--r--drivers/gpu/drm/stm/Kconfig2
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_backend.c35
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_lvds.c29
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_rgb.c74
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.c4
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.h2
-rw-r--r--drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c179
-rw-r--r--drivers/gpu/drm/sun4i/sun6i_mipi_dsi.h2
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_mixer.c49
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_mixer.h2
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_vi_layer.c54
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_vi_layer.h11
-rw-r--r--drivers/gpu/drm/tinydrm/core/Makefile2
-rw-r--r--drivers/gpu/drm/tinydrm/core/tinydrm-core.c183
-rw-r--r--drivers/gpu/drm/tinydrm/core/tinydrm-helpers.c2
-rw-r--r--drivers/gpu/drm/tinydrm/core/tinydrm-pipe.c24
-rw-r--r--drivers/gpu/drm/tinydrm/hx8357d.c59
-rw-r--r--drivers/gpu/drm/tinydrm/ili9225.c87
-rw-r--r--drivers/gpu/drm/tinydrm/ili9341.c59
-rw-r--r--drivers/gpu/drm/tinydrm/mi0283qt.c67
-rw-r--r--drivers/gpu/drm/tinydrm/mipi-dbi.c178
-rw-r--r--drivers/gpu/drm/tinydrm/repaper.c144
-rw-r--r--drivers/gpu/drm/tinydrm/st7586.c145
-rw-r--r--drivers/gpu/drm/tinydrm/st7735r.c59
-rw-r--r--drivers/gpu/drm/udl/udl_drv.c1
-rw-r--r--drivers/gpu/drm/v3d/Kconfig1
-rw-r--r--drivers/gpu/drm/v3d/v3d_bo.c314
-rw-r--r--drivers/gpu/drm/v3d/v3d_debugfs.c8
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.c63
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.h33
-rw-r--r--drivers/gpu/drm/v3d/v3d_gem.c104
-rw-r--r--drivers/gpu/drm/v3d/v3d_irq.c58
-rw-r--r--drivers/gpu/drm/v3d/v3d_mmu.c11
-rw-r--r--drivers/gpu/drm/v3d/v3d_regs.h2
-rw-r--r--drivers/gpu/drm/v3d/v3d_sched.c13
-rw-r--r--drivers/gpu/drm/vboxvideo/Kconfig15
-rw-r--r--drivers/gpu/drm/vboxvideo/Makefile6
-rw-r--r--drivers/gpu/drm/vboxvideo/hgsmi_base.c207
-rw-r--r--drivers/gpu/drm/vboxvideo/hgsmi_ch_setup.h32
-rw-r--r--drivers/gpu/drm/vboxvideo/hgsmi_channels.h34
-rw-r--r--drivers/gpu/drm/vboxvideo/hgsmi_defs.h73
-rw-r--r--drivers/gpu/drm/vboxvideo/modesetting.c123
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_drv.c258
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_drv.h273
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_fb.c155
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_hgsmi.c95
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_irq.c183
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_main.c361
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_mode.c940
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_prime.c56
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_ttm.c394
-rw-r--r--drivers/gpu/drm/vboxvideo/vboxvideo.h442
-rw-r--r--drivers/gpu/drm/vboxvideo/vboxvideo_guest.h61
-rw-r--r--drivers/gpu/drm/vboxvideo/vboxvideo_vbe.h54
-rw-r--r--drivers/gpu/drm/vboxvideo/vbva_base.c214
-rw-r--r--drivers/gpu/drm/vc4/vc4_bo.c15
-rw-r--r--drivers/gpu/drm/vc4/vc4_crtc.c8
-rw-r--r--drivers/gpu/drm/vc4/vc4_debugfs.c10
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.c4
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.h30
-rw-r--r--drivers/gpu/drm/vc4/vc4_gem.c18
-rw-r--r--drivers/gpu/drm/vc4/vc4_hvs.c95
-rw-r--r--drivers/gpu/drm/vc4/vc4_kms.c122
-rw-r--r--drivers/gpu/drm/vc4/vc4_plane.c59
-rw-r--r--drivers/gpu/drm/vc4/vc4_regs.h51
-rw-r--r--drivers/gpu/drm/vc4/vc4_txp.c3
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.c4
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.h3
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_prime.c22
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front.c1
140 files changed, 8058 insertions, 1707 deletions
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index bd943a71756c..5e1bc630b885 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -173,6 +173,12 @@ config DRM_KMS_CMA_HELPER
173 help 173 help
174 Choose this if you need the KMS CMA helper functions 174 Choose this if you need the KMS CMA helper functions
175 175
176config DRM_GEM_SHMEM_HELPER
177 bool
178 depends on DRM
179 help
180 Choose this if you need the GEM shmem helper functions
181
176config DRM_VM 182config DRM_VM
177 bool 183 bool
178 depends on DRM && MMU 184 depends on DRM && MMU
@@ -329,6 +335,8 @@ source "drivers/gpu/drm/tve200/Kconfig"
329 335
330source "drivers/gpu/drm/xen/Kconfig" 336source "drivers/gpu/drm/xen/Kconfig"
331 337
338source "drivers/gpu/drm/vboxvideo/Kconfig"
339
332# Keep legacy drivers last 340# Keep legacy drivers last
333 341
334menuconfig DRM_LEGACY 342menuconfig DRM_LEGACY
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 1ac55c65eac0..e630eccb951c 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -25,6 +25,7 @@ drm-$(CONFIG_DRM_LIB_RANDOM) += lib/drm_random.o
25drm-$(CONFIG_DRM_VM) += drm_vm.o 25drm-$(CONFIG_DRM_VM) += drm_vm.o
26drm-$(CONFIG_COMPAT) += drm_ioc32.o 26drm-$(CONFIG_COMPAT) += drm_ioc32.o
27drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o 27drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o
28drm-$(CONFIG_DRM_GEM_SHMEM_HELPER) += drm_gem_shmem_helper.o
28drm-$(CONFIG_PCI) += ati_pcigart.o 29drm-$(CONFIG_PCI) += ati_pcigart.o
29drm-$(CONFIG_DRM_PANEL) += drm_panel.o 30drm-$(CONFIG_DRM_PANEL) += drm_panel.o
30drm-$(CONFIG_OF) += drm_of.o 31drm-$(CONFIG_OF) += drm_of.o
@@ -109,3 +110,4 @@ obj-$(CONFIG_DRM_TINYDRM) += tinydrm/
109obj-$(CONFIG_DRM_PL111) += pl111/ 110obj-$(CONFIG_DRM_PL111) += pl111/
110obj-$(CONFIG_DRM_TVE200) += tve200/ 111obj-$(CONFIG_DRM_TVE200) += tve200/
111obj-$(CONFIG_DRM_XEN) += xen/ 112obj-$(CONFIG_DRM_XEN) += xen/
113obj-$(CONFIG_DRM_VBOXVIDEO) += vboxvideo/
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 7419ea8a388b..8a0732088640 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -974,6 +974,7 @@ amdgpu_pci_remove(struct pci_dev *pdev)
974 974
975 DRM_ERROR("Device removal is currently not supported outside of fbcon\n"); 975 DRM_ERROR("Device removal is currently not supported outside of fbcon\n");
976 drm_dev_unplug(dev); 976 drm_dev_unplug(dev);
977 drm_dev_put(dev);
977 pci_disable_device(pdev); 978 pci_disable_device(pdev);
978 pci_set_drvdata(pdev, NULL); 979 pci_set_drvdata(pdev, NULL);
979} 980}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
index 9fc3296592fe..98fd9208877f 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
@@ -886,7 +886,7 @@ static int gmc_v6_0_sw_init(void *handle)
886 pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32)); 886 pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32));
887 dev_warn(adev->dev, "amdgpu: No coherent DMA available.\n"); 887 dev_warn(adev->dev, "amdgpu: No coherent DMA available.\n");
888 } 888 }
889 adev->need_swiotlb = drm_get_max_iomem() > ((u64)1 << dma_bits); 889 adev->need_swiotlb = drm_need_swiotlb(dma_bits);
890 890
891 r = gmc_v6_0_init_microcode(adev); 891 r = gmc_v6_0_init_microcode(adev);
892 if (r) { 892 if (r) {
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index 761dcfb2fec0..3e9c5034febe 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -1030,7 +1030,7 @@ static int gmc_v7_0_sw_init(void *handle)
1030 pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32)); 1030 pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32));
1031 pr_warn("amdgpu: No coherent DMA available\n"); 1031 pr_warn("amdgpu: No coherent DMA available\n");
1032 } 1032 }
1033 adev->need_swiotlb = drm_get_max_iomem() > ((u64)1 << dma_bits); 1033 adev->need_swiotlb = drm_need_swiotlb(dma_bits);
1034 1034
1035 r = gmc_v7_0_init_microcode(adev); 1035 r = gmc_v7_0_init_microcode(adev);
1036 if (r) { 1036 if (r) {
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 34440672f938..29dde64bf2e7 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -1155,7 +1155,7 @@ static int gmc_v8_0_sw_init(void *handle)
1155 pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32)); 1155 pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32));
1156 pr_warn("amdgpu: No coherent DMA available\n"); 1156 pr_warn("amdgpu: No coherent DMA available\n");
1157 } 1157 }
1158 adev->need_swiotlb = drm_get_max_iomem() > ((u64)1 << dma_bits); 1158 adev->need_swiotlb = drm_need_swiotlb(dma_bits);
1159 1159
1160 r = gmc_v8_0_init_microcode(adev); 1160 r = gmc_v8_0_init_microcode(adev);
1161 if (r) { 1161 if (r) {
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 2fe8397241ea..53327498efbf 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -1011,7 +1011,7 @@ static int gmc_v9_0_sw_init(void *handle)
1011 pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32)); 1011 pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32));
1012 printk(KERN_WARNING "amdgpu: No coherent DMA available.\n"); 1012 printk(KERN_WARNING "amdgpu: No coherent DMA available.\n");
1013 } 1013 }
1014 adev->need_swiotlb = drm_get_max_iomem() > ((u64)1 << dma_bits); 1014 adev->need_swiotlb = drm_need_swiotlb(dma_bits);
1015 1015
1016 if (adev->gmc.xgmi.supported) { 1016 if (adev->gmc.xgmi.supported) {
1017 r = gfxhub_v1_1_get_xgmi_info(adev); 1017 r = gfxhub_v1_1_get_xgmi_info(adev);
diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c
index ab50ad06e271..21725c9b9f5e 100644
--- a/drivers/gpu/drm/arm/malidp_drv.c
+++ b/drivers/gpu/drm/arm/malidp_drv.c
@@ -264,37 +264,17 @@ static bool
264malidp_verify_afbc_framebuffer_caps(struct drm_device *dev, 264malidp_verify_afbc_framebuffer_caps(struct drm_device *dev,
265 const struct drm_mode_fb_cmd2 *mode_cmd) 265 const struct drm_mode_fb_cmd2 *mode_cmd)
266{ 266{
267 const struct drm_format_info *info; 267 if (malidp_format_mod_supported(dev, mode_cmd->pixel_format,
268 268 mode_cmd->modifier[0]) == false)
269 if ((mode_cmd->modifier[0] >> 56) != DRM_FORMAT_MOD_VENDOR_ARM) {
270 DRM_DEBUG_KMS("Unknown modifier (not Arm)\n");
271 return false;
272 }
273
274 if (mode_cmd->modifier[0] &
275 ~DRM_FORMAT_MOD_ARM_AFBC(AFBC_MOD_VALID_BITS)) {
276 DRM_DEBUG_KMS("Unsupported modifiers\n");
277 return false;
278 }
279
280 info = drm_get_format_info(dev, mode_cmd);
281 if (!info) {
282 DRM_DEBUG_KMS("Unable to get the format information\n");
283 return false; 269 return false;
284 }
285
286 if (info->num_planes != 1) {
287 DRM_DEBUG_KMS("AFBC buffers expect one plane\n");
288 return false;
289 }
290 270
291 if (mode_cmd->offsets[0] != 0) { 271 if (mode_cmd->offsets[0] != 0) {
292 DRM_DEBUG_KMS("AFBC buffers' plane offset should be 0\n"); 272 DRM_DEBUG_KMS("AFBC buffers' plane offset should be 0\n");
293 return false; 273 return false;
294 } 274 }
295 275
296 switch (mode_cmd->modifier[0] & AFBC_FORMAT_MOD_BLOCK_SIZE_MASK) { 276 switch (mode_cmd->modifier[0] & AFBC_SIZE_MASK) {
297 case AFBC_FORMAT_MOD_BLOCK_SIZE_16x16: 277 case AFBC_SIZE_16X16:
298 if ((mode_cmd->width % 16) || (mode_cmd->height % 16)) { 278 if ((mode_cmd->width % 16) || (mode_cmd->height % 16)) {
299 DRM_DEBUG_KMS("AFBC buffers must be aligned to 16 pixels\n"); 279 DRM_DEBUG_KMS("AFBC buffers must be aligned to 16 pixels\n");
300 return false; 280 return false;
@@ -318,9 +298,10 @@ malidp_verify_afbc_framebuffer_size(struct drm_device *dev,
318 struct drm_gem_object *objs = NULL; 298 struct drm_gem_object *objs = NULL;
319 u32 afbc_superblock_size = 0, afbc_superblock_height = 0; 299 u32 afbc_superblock_size = 0, afbc_superblock_height = 0;
320 u32 afbc_superblock_width = 0, afbc_size = 0; 300 u32 afbc_superblock_width = 0, afbc_size = 0;
301 int bpp = 0;
321 302
322 switch (mode_cmd->modifier[0] & AFBC_FORMAT_MOD_BLOCK_SIZE_MASK) { 303 switch (mode_cmd->modifier[0] & AFBC_SIZE_MASK) {
323 case AFBC_FORMAT_MOD_BLOCK_SIZE_16x16: 304 case AFBC_SIZE_16X16:
324 afbc_superblock_height = 16; 305 afbc_superblock_height = 16;
325 afbc_superblock_width = 16; 306 afbc_superblock_width = 16;
326 break; 307 break;
@@ -334,15 +315,19 @@ malidp_verify_afbc_framebuffer_size(struct drm_device *dev,
334 n_superblocks = (mode_cmd->width / afbc_superblock_width) * 315 n_superblocks = (mode_cmd->width / afbc_superblock_width) *
335 (mode_cmd->height / afbc_superblock_height); 316 (mode_cmd->height / afbc_superblock_height);
336 317
337 afbc_superblock_size = info->cpp[0] * afbc_superblock_width * 318 bpp = malidp_format_get_bpp(info->format);
338 afbc_superblock_height; 319
320 afbc_superblock_size = (bpp * afbc_superblock_width * afbc_superblock_height)
321 / BITS_PER_BYTE;
339 322
340 afbc_size = ALIGN(n_superblocks * AFBC_HEADER_SIZE, AFBC_SUPERBLK_ALIGNMENT); 323 afbc_size = ALIGN(n_superblocks * AFBC_HEADER_SIZE, AFBC_SUPERBLK_ALIGNMENT);
341 afbc_size += n_superblocks * ALIGN(afbc_superblock_size, AFBC_SUPERBLK_ALIGNMENT); 324 afbc_size += n_superblocks * ALIGN(afbc_superblock_size, AFBC_SUPERBLK_ALIGNMENT);
342 325
343 if (mode_cmd->width * info->cpp[0] != mode_cmd->pitches[0]) { 326 if ((mode_cmd->width * bpp) != (mode_cmd->pitches[0] * BITS_PER_BYTE)) {
344 DRM_DEBUG_KMS("Invalid value of pitch (=%u) should be same as width (=%u) * cpp (=%u)\n", 327 DRM_DEBUG_KMS("Invalid value of (pitch * BITS_PER_BYTE) (=%u) "
345 mode_cmd->pitches[0], mode_cmd->width, info->cpp[0]); 328 "should be same as width (=%u) * bpp (=%u)\n",
329 (mode_cmd->pitches[0] * BITS_PER_BYTE),
330 mode_cmd->width, bpp);
346 return false; 331 return false;
347 } 332 }
348 333
@@ -406,6 +391,7 @@ static int malidp_init(struct drm_device *drm)
406 drm->mode_config.max_height = hwdev->max_line_size; 391 drm->mode_config.max_height = hwdev->max_line_size;
407 drm->mode_config.funcs = &malidp_mode_config_funcs; 392 drm->mode_config.funcs = &malidp_mode_config_funcs;
408 drm->mode_config.helper_private = &malidp_mode_config_helpers; 393 drm->mode_config.helper_private = &malidp_mode_config_helpers;
394 drm->mode_config.allow_fb_modifiers = true;
409 395
410 ret = malidp_crtc_init(drm); 396 ret = malidp_crtc_init(drm);
411 if (ret) 397 if (ret)
diff --git a/drivers/gpu/drm/arm/malidp_drv.h b/drivers/gpu/drm/arm/malidp_drv.h
index b76c86f18a56..019a682b2716 100644
--- a/drivers/gpu/drm/arm/malidp_drv.h
+++ b/drivers/gpu/drm/arm/malidp_drv.h
@@ -90,6 +90,12 @@ struct malidp_crtc_state {
90int malidp_de_planes_init(struct drm_device *drm); 90int malidp_de_planes_init(struct drm_device *drm);
91int malidp_crtc_init(struct drm_device *drm); 91int malidp_crtc_init(struct drm_device *drm);
92 92
93bool malidp_hw_format_is_linear_only(u32 format);
94bool malidp_hw_format_is_afbc_only(u32 format);
95
96bool malidp_format_mod_supported(struct drm_device *drm,
97 u32 format, u64 modifier);
98
93#ifdef CONFIG_DEBUG_FS 99#ifdef CONFIG_DEBUG_FS
94void malidp_error(struct malidp_drm *malidp, 100void malidp_error(struct malidp_drm *malidp,
95 struct malidp_error_stats *error_stats, u32 status, 101 struct malidp_error_stats *error_stats, u32 status,
diff --git a/drivers/gpu/drm/arm/malidp_hw.c b/drivers/gpu/drm/arm/malidp_hw.c
index b9bed1138fa3..8df12e9a33bb 100644
--- a/drivers/gpu/drm/arm/malidp_hw.c
+++ b/drivers/gpu/drm/arm/malidp_hw.c
@@ -49,11 +49,19 @@ static const struct malidp_format_id malidp500_de_formats[] = {
49 { DRM_FORMAT_YUYV, DE_VIDEO1, 13 }, 49 { DRM_FORMAT_YUYV, DE_VIDEO1, 13 },
50 { DRM_FORMAT_NV12, DE_VIDEO1 | SE_MEMWRITE, 14 }, 50 { DRM_FORMAT_NV12, DE_VIDEO1 | SE_MEMWRITE, 14 },
51 { DRM_FORMAT_YUV420, DE_VIDEO1, 15 }, 51 { DRM_FORMAT_YUV420, DE_VIDEO1, 15 },
52 { DRM_FORMAT_XYUV8888, DE_VIDEO1, 16 },
53 /* These are supported with AFBC only */
54 { DRM_FORMAT_YUV420_8BIT, DE_VIDEO1, 14 },
55 { DRM_FORMAT_VUY888, DE_VIDEO1, 16 },
56 { DRM_FORMAT_VUY101010, DE_VIDEO1, 17 },
57 { DRM_FORMAT_YUV420_10BIT, DE_VIDEO1, 18 }
52}; 58};
53 59
54#define MALIDP_ID(__group, __format) \ 60#define MALIDP_ID(__group, __format) \
55 ((((__group) & 0x7) << 3) | ((__format) & 0x7)) 61 ((((__group) & 0x7) << 3) | ((__format) & 0x7))
56 62
63#define AFBC_YUV_422_FORMAT_ID MALIDP_ID(5, 1)
64
57#define MALIDP_COMMON_FORMATS \ 65#define MALIDP_COMMON_FORMATS \
58 /* fourcc, layers supporting the format, internal id */ \ 66 /* fourcc, layers supporting the format, internal id */ \
59 { DRM_FORMAT_ARGB2101010, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | SE_MEMWRITE, MALIDP_ID(0, 0) }, \ 67 { DRM_FORMAT_ARGB2101010, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | SE_MEMWRITE, MALIDP_ID(0, 0) }, \
@@ -74,11 +82,25 @@ static const struct malidp_format_id malidp500_de_formats[] = {
74 { DRM_FORMAT_ABGR1555, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(4, 1) }, \ 82 { DRM_FORMAT_ABGR1555, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(4, 1) }, \
75 { DRM_FORMAT_RGB565, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(4, 2) }, \ 83 { DRM_FORMAT_RGB565, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(4, 2) }, \
76 { DRM_FORMAT_BGR565, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(4, 3) }, \ 84 { DRM_FORMAT_BGR565, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(4, 3) }, \
85 /* This is only supported with linear modifier */ \
86 { DRM_FORMAT_XYUV8888, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(5, 0) },\
87 /* This is only supported with AFBC modifier */ \
88 { DRM_FORMAT_VUY888, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(5, 0) }, \
77 { DRM_FORMAT_YUYV, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(5, 2) }, \ 89 { DRM_FORMAT_YUYV, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(5, 2) }, \
90 /* This is only supported with linear modifier */ \
78 { DRM_FORMAT_UYVY, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(5, 3) }, \ 91 { DRM_FORMAT_UYVY, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(5, 3) }, \
79 { DRM_FORMAT_NV12, DE_VIDEO1 | DE_VIDEO2 | SE_MEMWRITE, MALIDP_ID(5, 6) }, \ 92 { DRM_FORMAT_NV12, DE_VIDEO1 | DE_VIDEO2 | SE_MEMWRITE, MALIDP_ID(5, 6) }, \
93 /* This is only supported with AFBC modifier */ \
94 { DRM_FORMAT_YUV420_8BIT, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(5, 6) }, \
80 { DRM_FORMAT_YUV420, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(5, 7) }, \ 95 { DRM_FORMAT_YUV420, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(5, 7) }, \
81 { DRM_FORMAT_X0L2, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(6, 6)} 96 /* This is only supported with linear modifier */ \
97 { DRM_FORMAT_XVYU2101010, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(6, 0)}, \
98 /* This is only supported with AFBC modifier */ \
99 { DRM_FORMAT_VUY101010, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(6, 0)}, \
100 { DRM_FORMAT_X0L2, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(6, 6)}, \
101 /* This is only supported with AFBC modifier */ \
102 { DRM_FORMAT_YUV420_10BIT, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(6, 7)}, \
103 { DRM_FORMAT_P010, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(6, 7)}
82 104
83static const struct malidp_format_id malidp550_de_formats[] = { 105static const struct malidp_format_id malidp550_de_formats[] = {
84 MALIDP_COMMON_FORMATS, 106 MALIDP_COMMON_FORMATS,
@@ -94,11 +116,14 @@ static const struct malidp_layer malidp500_layers[] = {
94 * yuv2rgb matrix offset, mmu control register offset, rotation_features 116 * yuv2rgb matrix offset, mmu control register offset, rotation_features
95 */ 117 */
96 { DE_VIDEO1, MALIDP500_DE_LV_BASE, MALIDP500_DE_LV_PTR_BASE, 118 { DE_VIDEO1, MALIDP500_DE_LV_BASE, MALIDP500_DE_LV_PTR_BASE,
97 MALIDP_DE_LV_STRIDE0, MALIDP500_LV_YUV2RGB, 0, ROTATE_ANY }, 119 MALIDP_DE_LV_STRIDE0, MALIDP500_LV_YUV2RGB, 0, ROTATE_ANY,
120 MALIDP500_DE_LV_AD_CTRL },
98 { DE_GRAPHICS1, MALIDP500_DE_LG1_BASE, MALIDP500_DE_LG1_PTR_BASE, 121 { DE_GRAPHICS1, MALIDP500_DE_LG1_BASE, MALIDP500_DE_LG1_PTR_BASE,
99 MALIDP_DE_LG_STRIDE, 0, 0, ROTATE_ANY }, 122 MALIDP_DE_LG_STRIDE, 0, 0, ROTATE_ANY,
123 MALIDP500_DE_LG1_AD_CTRL },
100 { DE_GRAPHICS2, MALIDP500_DE_LG2_BASE, MALIDP500_DE_LG2_PTR_BASE, 124 { DE_GRAPHICS2, MALIDP500_DE_LG2_BASE, MALIDP500_DE_LG2_PTR_BASE,
101 MALIDP_DE_LG_STRIDE, 0, 0, ROTATE_ANY }, 125 MALIDP_DE_LG_STRIDE, 0, 0, ROTATE_ANY,
126 MALIDP500_DE_LG2_AD_CTRL },
102}; 127};
103 128
104static const struct malidp_layer malidp550_layers[] = { 129static const struct malidp_layer malidp550_layers[] = {
@@ -106,13 +131,16 @@ static const struct malidp_layer malidp550_layers[] = {
106 * yuv2rgb matrix offset, mmu control register offset, rotation_features 131 * yuv2rgb matrix offset, mmu control register offset, rotation_features
107 */ 132 */
108 { DE_VIDEO1, MALIDP550_DE_LV1_BASE, MALIDP550_DE_LV1_PTR_BASE, 133 { DE_VIDEO1, MALIDP550_DE_LV1_BASE, MALIDP550_DE_LV1_PTR_BASE,
109 MALIDP_DE_LV_STRIDE0, MALIDP550_LV_YUV2RGB, 0, ROTATE_ANY }, 134 MALIDP_DE_LV_STRIDE0, MALIDP550_LV_YUV2RGB, 0, ROTATE_ANY,
135 MALIDP550_DE_LV1_AD_CTRL },
110 { DE_GRAPHICS1, MALIDP550_DE_LG_BASE, MALIDP550_DE_LG_PTR_BASE, 136 { DE_GRAPHICS1, MALIDP550_DE_LG_BASE, MALIDP550_DE_LG_PTR_BASE,
111 MALIDP_DE_LG_STRIDE, 0, 0, ROTATE_ANY }, 137 MALIDP_DE_LG_STRIDE, 0, 0, ROTATE_ANY,
138 MALIDP550_DE_LG_AD_CTRL },
112 { DE_VIDEO2, MALIDP550_DE_LV2_BASE, MALIDP550_DE_LV2_PTR_BASE, 139 { DE_VIDEO2, MALIDP550_DE_LV2_BASE, MALIDP550_DE_LV2_PTR_BASE,
113 MALIDP_DE_LV_STRIDE0, MALIDP550_LV_YUV2RGB, 0, ROTATE_ANY }, 140 MALIDP_DE_LV_STRIDE0, MALIDP550_LV_YUV2RGB, 0, ROTATE_ANY,
141 MALIDP550_DE_LV2_AD_CTRL },
114 { DE_SMART, MALIDP550_DE_LS_BASE, MALIDP550_DE_LS_PTR_BASE, 142 { DE_SMART, MALIDP550_DE_LS_BASE, MALIDP550_DE_LS_PTR_BASE,
115 MALIDP550_DE_LS_R1_STRIDE, 0, 0, ROTATE_NONE }, 143 MALIDP550_DE_LS_R1_STRIDE, 0, 0, ROTATE_NONE, 0 },
116}; 144};
117 145
118static const struct malidp_layer malidp650_layers[] = { 146static const struct malidp_layer malidp650_layers[] = {
@@ -122,16 +150,44 @@ static const struct malidp_layer malidp650_layers[] = {
122 */ 150 */
123 { DE_VIDEO1, MALIDP550_DE_LV1_BASE, MALIDP550_DE_LV1_PTR_BASE, 151 { DE_VIDEO1, MALIDP550_DE_LV1_BASE, MALIDP550_DE_LV1_PTR_BASE,
124 MALIDP_DE_LV_STRIDE0, MALIDP550_LV_YUV2RGB, 152 MALIDP_DE_LV_STRIDE0, MALIDP550_LV_YUV2RGB,
125 MALIDP650_DE_LV_MMU_CTRL, ROTATE_ANY }, 153 MALIDP650_DE_LV_MMU_CTRL, ROTATE_ANY,
154 MALIDP550_DE_LV1_AD_CTRL },
126 { DE_GRAPHICS1, MALIDP550_DE_LG_BASE, MALIDP550_DE_LG_PTR_BASE, 155 { DE_GRAPHICS1, MALIDP550_DE_LG_BASE, MALIDP550_DE_LG_PTR_BASE,
127 MALIDP_DE_LG_STRIDE, 0, MALIDP650_DE_LG_MMU_CTRL, 156 MALIDP_DE_LG_STRIDE, 0, MALIDP650_DE_LG_MMU_CTRL,
128 ROTATE_COMPRESSED }, 157 ROTATE_COMPRESSED, MALIDP550_DE_LG_AD_CTRL },
129 { DE_VIDEO2, MALIDP550_DE_LV2_BASE, MALIDP550_DE_LV2_PTR_BASE, 158 { DE_VIDEO2, MALIDP550_DE_LV2_BASE, MALIDP550_DE_LV2_PTR_BASE,
130 MALIDP_DE_LV_STRIDE0, MALIDP550_LV_YUV2RGB, 159 MALIDP_DE_LV_STRIDE0, MALIDP550_LV_YUV2RGB,
131 MALIDP650_DE_LV_MMU_CTRL, ROTATE_ANY }, 160 MALIDP650_DE_LV_MMU_CTRL, ROTATE_ANY,
161 MALIDP550_DE_LV2_AD_CTRL },
132 { DE_SMART, MALIDP550_DE_LS_BASE, MALIDP550_DE_LS_PTR_BASE, 162 { DE_SMART, MALIDP550_DE_LS_BASE, MALIDP550_DE_LS_PTR_BASE,
133 MALIDP550_DE_LS_R1_STRIDE, 0, MALIDP650_DE_LS_MMU_CTRL, 163 MALIDP550_DE_LS_R1_STRIDE, 0, MALIDP650_DE_LS_MMU_CTRL,
134 ROTATE_NONE }, 164 ROTATE_NONE, 0 },
165};
166
167const u64 malidp_format_modifiers[] = {
168 /* All RGB formats (except XRGB, RGBX, XBGR, BGRX) */
169 DRM_FORMAT_MOD_ARM_AFBC(AFBC_SIZE_16X16 | AFBC_YTR | AFBC_SPARSE),
170 DRM_FORMAT_MOD_ARM_AFBC(AFBC_SIZE_16X16 | AFBC_YTR),
171
172 /* All RGB formats > 16bpp (except XRGB, RGBX, XBGR, BGRX) */
173 DRM_FORMAT_MOD_ARM_AFBC(AFBC_SIZE_16X16 | AFBC_YTR | AFBC_SPARSE | AFBC_SPLIT),
174
175 /* All 8 or 10 bit YUV 444 formats. */
176 /* In DP550, 10 bit YUV 420 format also supported */
177 DRM_FORMAT_MOD_ARM_AFBC(AFBC_SIZE_16X16 | AFBC_SPARSE | AFBC_SPLIT),
178
179 /* YUV 420, 422 P1 8 bit and YUV 444 8 bit/10 bit formats */
180 DRM_FORMAT_MOD_ARM_AFBC(AFBC_SIZE_16X16 | AFBC_SPARSE),
181 DRM_FORMAT_MOD_ARM_AFBC(AFBC_SIZE_16X16),
182
183 /* YUV 420, 422 P1 8, 10 bit formats */
184 DRM_FORMAT_MOD_ARM_AFBC(AFBC_SIZE_16X16 | AFBC_CBR | AFBC_SPARSE),
185 DRM_FORMAT_MOD_ARM_AFBC(AFBC_SIZE_16X16 | AFBC_CBR),
186
187 /* All formats */
188 DRM_FORMAT_MOD_LINEAR,
189
190 DRM_FORMAT_MOD_INVALID
135}; 191};
136 192
137#define SE_N_SCALING_COEFFS 96 193#define SE_N_SCALING_COEFFS 96
@@ -324,14 +380,39 @@ static void malidp500_modeset(struct malidp_hw_device *hwdev, struct videomode *
324 malidp_hw_clearbits(hwdev, MALIDP_DISP_FUNC_ILACED, MALIDP_DE_DISPLAY_FUNC); 380 malidp_hw_clearbits(hwdev, MALIDP_DISP_FUNC_ILACED, MALIDP_DE_DISPLAY_FUNC);
325} 381}
326 382
327static int malidp500_rotmem_required(struct malidp_hw_device *hwdev, u16 w, u16 h, u32 fmt) 383int malidp_format_get_bpp(u32 fmt)
384{
385 int bpp = drm_format_plane_cpp(fmt, 0) * 8;
386
387 if (bpp == 0) {
388 switch (fmt) {
389 case DRM_FORMAT_VUY101010:
390 bpp = 30;
391 case DRM_FORMAT_YUV420_10BIT:
392 bpp = 15;
393 break;
394 case DRM_FORMAT_YUV420_8BIT:
395 bpp = 12;
396 break;
397 default:
398 bpp = 0;
399 }
400 }
401
402 return bpp;
403}
404
405static int malidp500_rotmem_required(struct malidp_hw_device *hwdev, u16 w,
406 u16 h, u32 fmt, bool has_modifier)
328{ 407{
329 /* 408 /*
330 * Each layer needs enough rotation memory to fit 8 lines 409 * Each layer needs enough rotation memory to fit 8 lines
331 * worth of pixel data. Required size is then: 410 * worth of pixel data. Required size is then:
332 * size = rotated_width * (bpp / 8) * 8; 411 * size = rotated_width * (bpp / 8) * 8;
333 */ 412 */
334 return w * drm_format_plane_cpp(fmt, 0) * 8; 413 int bpp = malidp_format_get_bpp(fmt);
414
415 return w * bpp;
335} 416}
336 417
337static void malidp500_se_write_pp_coefftab(struct malidp_hw_device *hwdev, 418static void malidp500_se_write_pp_coefftab(struct malidp_hw_device *hwdev,
@@ -609,9 +690,9 @@ static void malidp550_modeset(struct malidp_hw_device *hwdev, struct videomode *
609 malidp_hw_clearbits(hwdev, MALIDP_DISP_FUNC_ILACED, MALIDP_DE_DISPLAY_FUNC); 690 malidp_hw_clearbits(hwdev, MALIDP_DISP_FUNC_ILACED, MALIDP_DE_DISPLAY_FUNC);
610} 691}
611 692
612static int malidp550_rotmem_required(struct malidp_hw_device *hwdev, u16 w, u16 h, u32 fmt) 693static int malidpx50_get_bytes_per_column(u32 fmt)
613{ 694{
614 u32 bytes_per_col; 695 u32 bytes_per_column;
615 696
616 switch (fmt) { 697 switch (fmt) {
617 /* 8 lines at 4 bytes per pixel */ 698 /* 8 lines at 4 bytes per pixel */
@@ -637,19 +718,77 @@ static int malidp550_rotmem_required(struct malidp_hw_device *hwdev, u16 w, u16
637 case DRM_FORMAT_UYVY: 718 case DRM_FORMAT_UYVY:
638 case DRM_FORMAT_YUYV: 719 case DRM_FORMAT_YUYV:
639 case DRM_FORMAT_X0L0: 720 case DRM_FORMAT_X0L0:
640 case DRM_FORMAT_X0L2: 721 bytes_per_column = 32;
641 bytes_per_col = 32;
642 break; 722 break;
643 /* 16 lines at 1.5 bytes per pixel */ 723 /* 16 lines at 1.5 bytes per pixel */
644 case DRM_FORMAT_NV12: 724 case DRM_FORMAT_NV12:
645 case DRM_FORMAT_YUV420: 725 case DRM_FORMAT_YUV420:
646 bytes_per_col = 24; 726 /* 8 lines at 3 bytes per pixel */
727 case DRM_FORMAT_VUY888:
728 /* 16 lines at 12 bits per pixel */
729 case DRM_FORMAT_YUV420_8BIT:
730 /* 8 lines at 3 bytes per pixel */
731 case DRM_FORMAT_P010:
732 bytes_per_column = 24;
733 break;
734 /* 8 lines at 30 bits per pixel */
735 case DRM_FORMAT_VUY101010:
736 /* 16 lines at 15 bits per pixel */
737 case DRM_FORMAT_YUV420_10BIT:
738 bytes_per_column = 30;
647 break; 739 break;
648 default: 740 default:
649 return -EINVAL; 741 return -EINVAL;
650 } 742 }
651 743
652 return w * bytes_per_col; 744 return bytes_per_column;
745}
746
747static int malidp550_rotmem_required(struct malidp_hw_device *hwdev, u16 w,
748 u16 h, u32 fmt, bool has_modifier)
749{
750 int bytes_per_column = 0;
751
752 switch (fmt) {
753 /* 8 lines at 15 bits per pixel */
754 case DRM_FORMAT_YUV420_10BIT:
755 bytes_per_column = 15;
756 break;
757 /* Uncompressed YUV 420 10 bit single plane cannot be rotated */
758 case DRM_FORMAT_X0L2:
759 if (has_modifier)
760 bytes_per_column = 8;
761 else
762 return -EINVAL;
763 break;
764 default:
765 bytes_per_column = malidpx50_get_bytes_per_column(fmt);
766 }
767
768 if (bytes_per_column == -EINVAL)
769 return bytes_per_column;
770
771 return w * bytes_per_column;
772}
773
774static int malidp650_rotmem_required(struct malidp_hw_device *hwdev, u16 w,
775 u16 h, u32 fmt, bool has_modifier)
776{
777 int bytes_per_column = 0;
778
779 switch (fmt) {
780 /* 16 lines at 2 bytes per pixel */
781 case DRM_FORMAT_X0L2:
782 bytes_per_column = 32;
783 break;
784 default:
785 bytes_per_column = malidpx50_get_bytes_per_column(fmt);
786 }
787
788 if (bytes_per_column == -EINVAL)
789 return bytes_per_column;
790
791 return w * bytes_per_column;
653} 792}
654 793
655static int malidp550_se_set_scaling_coeffs(struct malidp_hw_device *hwdev, 794static int malidp550_se_set_scaling_coeffs(struct malidp_hw_device *hwdev,
@@ -838,7 +977,10 @@ const struct malidp_hw malidp_device[MALIDP_MAX_DEVICES] = {
838 .se_base = MALIDP550_SE_BASE, 977 .se_base = MALIDP550_SE_BASE,
839 .dc_base = MALIDP550_DC_BASE, 978 .dc_base = MALIDP550_DC_BASE,
840 .out_depth_base = MALIDP550_DE_OUTPUT_DEPTH, 979 .out_depth_base = MALIDP550_DE_OUTPUT_DEPTH,
841 .features = MALIDP_REGMAP_HAS_CLEARIRQ, 980 .features = MALIDP_REGMAP_HAS_CLEARIRQ |
981 MALIDP_DEVICE_AFBC_SUPPORT_SPLIT |
982 MALIDP_DEVICE_AFBC_YUV_420_10_SUPPORT_SPLIT |
983 MALIDP_DEVICE_AFBC_YUYV_USE_422_P2,
842 .n_layers = ARRAY_SIZE(malidp550_layers), 984 .n_layers = ARRAY_SIZE(malidp550_layers),
843 .layers = malidp550_layers, 985 .layers = malidp550_layers,
844 .de_irq_map = { 986 .de_irq_map = {
@@ -884,7 +1026,9 @@ const struct malidp_hw malidp_device[MALIDP_MAX_DEVICES] = {
884 .se_base = MALIDP550_SE_BASE, 1026 .se_base = MALIDP550_SE_BASE,
885 .dc_base = MALIDP550_DC_BASE, 1027 .dc_base = MALIDP550_DC_BASE,
886 .out_depth_base = MALIDP550_DE_OUTPUT_DEPTH, 1028 .out_depth_base = MALIDP550_DE_OUTPUT_DEPTH,
887 .features = MALIDP_REGMAP_HAS_CLEARIRQ, 1029 .features = MALIDP_REGMAP_HAS_CLEARIRQ |
1030 MALIDP_DEVICE_AFBC_SUPPORT_SPLIT |
1031 MALIDP_DEVICE_AFBC_YUYV_USE_422_P2,
888 .n_layers = ARRAY_SIZE(malidp650_layers), 1032 .n_layers = ARRAY_SIZE(malidp650_layers),
889 .layers = malidp650_layers, 1033 .layers = malidp650_layers,
890 .de_irq_map = { 1034 .de_irq_map = {
@@ -923,7 +1067,7 @@ const struct malidp_hw malidp_device[MALIDP_MAX_DEVICES] = {
923 .in_config_mode = malidp550_in_config_mode, 1067 .in_config_mode = malidp550_in_config_mode,
924 .set_config_valid = malidp550_set_config_valid, 1068 .set_config_valid = malidp550_set_config_valid,
925 .modeset = malidp550_modeset, 1069 .modeset = malidp550_modeset,
926 .rotmem_required = malidp550_rotmem_required, 1070 .rotmem_required = malidp650_rotmem_required,
927 .se_set_scaling_coeffs = malidp550_se_set_scaling_coeffs, 1071 .se_set_scaling_coeffs = malidp550_se_set_scaling_coeffs,
928 .se_calc_mclk = malidp550_se_calc_mclk, 1072 .se_calc_mclk = malidp550_se_calc_mclk,
929 .enable_memwrite = malidp550_enable_memwrite, 1073 .enable_memwrite = malidp550_enable_memwrite,
@@ -933,19 +1077,72 @@ const struct malidp_hw malidp_device[MALIDP_MAX_DEVICES] = {
933}; 1077};
934 1078
935u8 malidp_hw_get_format_id(const struct malidp_hw_regmap *map, 1079u8 malidp_hw_get_format_id(const struct malidp_hw_regmap *map,
936 u8 layer_id, u32 format) 1080 u8 layer_id, u32 format, bool has_modifier)
937{ 1081{
938 unsigned int i; 1082 unsigned int i;
939 1083
940 for (i = 0; i < map->n_pixel_formats; i++) { 1084 for (i = 0; i < map->n_pixel_formats; i++) {
941 if (((map->pixel_formats[i].layer & layer_id) == layer_id) && 1085 if (((map->pixel_formats[i].layer & layer_id) == layer_id) &&
942 (map->pixel_formats[i].format == format)) 1086 (map->pixel_formats[i].format == format)) {
943 return map->pixel_formats[i].id; 1087 /*
1088 * In some DP550 and DP650, DRM_FORMAT_YUYV + AFBC modifier
1089 * is supported by a different h/w format id than
1090 * DRM_FORMAT_YUYV (only).
1091 */
1092 if (format == DRM_FORMAT_YUYV &&
1093 (has_modifier) &&
1094 (map->features & MALIDP_DEVICE_AFBC_YUYV_USE_422_P2))
1095 return AFBC_YUV_422_FORMAT_ID;
1096 else
1097 return map->pixel_formats[i].id;
1098 }
944 } 1099 }
945 1100
946 return MALIDP_INVALID_FORMAT_ID; 1101 return MALIDP_INVALID_FORMAT_ID;
947} 1102}
948 1103
1104bool malidp_hw_format_is_linear_only(u32 format)
1105{
1106 switch (format) {
1107 case DRM_FORMAT_ARGB2101010:
1108 case DRM_FORMAT_RGBA1010102:
1109 case DRM_FORMAT_BGRA1010102:
1110 case DRM_FORMAT_ARGB8888:
1111 case DRM_FORMAT_RGBA8888:
1112 case DRM_FORMAT_BGRA8888:
1113 case DRM_FORMAT_XBGR8888:
1114 case DRM_FORMAT_XRGB8888:
1115 case DRM_FORMAT_RGBX8888:
1116 case DRM_FORMAT_BGRX8888:
1117 case DRM_FORMAT_RGB888:
1118 case DRM_FORMAT_RGB565:
1119 case DRM_FORMAT_ARGB1555:
1120 case DRM_FORMAT_RGBA5551:
1121 case DRM_FORMAT_BGRA5551:
1122 case DRM_FORMAT_UYVY:
1123 case DRM_FORMAT_XYUV8888:
1124 case DRM_FORMAT_XVYU2101010:
1125 case DRM_FORMAT_X0L2:
1126 case DRM_FORMAT_X0L0:
1127 return true;
1128 default:
1129 return false;
1130 }
1131}
1132
1133bool malidp_hw_format_is_afbc_only(u32 format)
1134{
1135 switch (format) {
1136 case DRM_FORMAT_VUY888:
1137 case DRM_FORMAT_VUY101010:
1138 case DRM_FORMAT_YUV420_8BIT:
1139 case DRM_FORMAT_YUV420_10BIT:
1140 return true;
1141 default:
1142 return false;
1143 }
1144}
1145
949static void malidp_hw_clear_irq(struct malidp_hw_device *hwdev, u8 block, u32 irq) 1146static void malidp_hw_clear_irq(struct malidp_hw_device *hwdev, u8 block, u32 irq)
950{ 1147{
951 u32 base = malidp_get_block_base(hwdev, block); 1148 u32 base = malidp_get_block_base(hwdev, block);
diff --git a/drivers/gpu/drm/arm/malidp_hw.h b/drivers/gpu/drm/arm/malidp_hw.h
index 40155e2ea9d9..207c3ce52f1a 100644
--- a/drivers/gpu/drm/arm/malidp_hw.h
+++ b/drivers/gpu/drm/arm/malidp_hw.h
@@ -70,6 +70,8 @@ struct malidp_layer {
70 s16 yuv2rgb_offset; /* offset to the YUV->RGB matrix entries */ 70 s16 yuv2rgb_offset; /* offset to the YUV->RGB matrix entries */
71 u16 mmu_ctrl_offset; /* offset to the MMU control register */ 71 u16 mmu_ctrl_offset; /* offset to the MMU control register */
72 enum rotation_features rot; /* type of rotation supported */ 72 enum rotation_features rot; /* type of rotation supported */
73 /* address offset for the AFBC decoder registers */
74 u16 afbc_decoder_offset;
73}; 75};
74 76
75enum malidp_scaling_coeff_set { 77enum malidp_scaling_coeff_set {
@@ -93,7 +95,10 @@ struct malidp_se_config {
93}; 95};
94 96
95/* regmap features */ 97/* regmap features */
96#define MALIDP_REGMAP_HAS_CLEARIRQ (1 << 0) 98#define MALIDP_REGMAP_HAS_CLEARIRQ BIT(0)
99#define MALIDP_DEVICE_AFBC_SUPPORT_SPLIT BIT(1)
100#define MALIDP_DEVICE_AFBC_YUV_420_10_SUPPORT_SPLIT BIT(2)
101#define MALIDP_DEVICE_AFBC_YUYV_USE_422_P2 BIT(3)
97 102
98struct malidp_hw_regmap { 103struct malidp_hw_regmap {
99 /* address offset of the DE register bank */ 104 /* address offset of the DE register bank */
@@ -179,7 +184,8 @@ struct malidp_hw {
179 * Calculate the required rotation memory given the active area 184 * Calculate the required rotation memory given the active area
180 * and the buffer format. 185 * and the buffer format.
181 */ 186 */
182 int (*rotmem_required)(struct malidp_hw_device *hwdev, u16 w, u16 h, u32 fmt); 187 int (*rotmem_required)(struct malidp_hw_device *hwdev, u16 w, u16 h,
188 u32 fmt, bool has_modifier);
183 189
184 int (*se_set_scaling_coeffs)(struct malidp_hw_device *hwdev, 190 int (*se_set_scaling_coeffs)(struct malidp_hw_device *hwdev,
185 struct malidp_se_config *se_config, 191 struct malidp_se_config *se_config,
@@ -319,7 +325,9 @@ int malidp_se_irq_init(struct drm_device *drm, int irq);
319void malidp_se_irq_fini(struct malidp_hw_device *hwdev); 325void malidp_se_irq_fini(struct malidp_hw_device *hwdev);
320 326
321u8 malidp_hw_get_format_id(const struct malidp_hw_regmap *map, 327u8 malidp_hw_get_format_id(const struct malidp_hw_regmap *map,
322 u8 layer_id, u32 format); 328 u8 layer_id, u32 format, bool has_modifier);
329
330int malidp_format_get_bpp(u32 fmt);
323 331
324static inline u8 malidp_hw_get_pitch_align(struct malidp_hw_device *hwdev, bool rotated) 332static inline u8 malidp_hw_get_pitch_align(struct malidp_hw_device *hwdev, bool rotated)
325{ 333{
@@ -388,9 +396,18 @@ static inline void malidp_se_set_enh_coeffs(struct malidp_hw_device *hwdev)
388 396
389#define MALIDP_GAMMA_LUT_SIZE 4096 397#define MALIDP_GAMMA_LUT_SIZE 4096
390 398
391#define AFBC_MOD_VALID_BITS (AFBC_FORMAT_MOD_BLOCK_SIZE_MASK | \ 399#define AFBC_SIZE_MASK AFBC_FORMAT_MOD_BLOCK_SIZE_MASK
392 AFBC_FORMAT_MOD_YTR | AFBC_FORMAT_MOD_SPLIT | \ 400#define AFBC_SIZE_16X16 AFBC_FORMAT_MOD_BLOCK_SIZE_16x16
393 AFBC_FORMAT_MOD_SPARSE | AFBC_FORMAT_MOD_CBR | \ 401#define AFBC_YTR AFBC_FORMAT_MOD_YTR
394 AFBC_FORMAT_MOD_TILED | AFBC_FORMAT_MOD_SC) 402#define AFBC_SPARSE AFBC_FORMAT_MOD_SPARSE
403#define AFBC_CBR AFBC_FORMAT_MOD_CBR
404#define AFBC_SPLIT AFBC_FORMAT_MOD_SPLIT
405#define AFBC_TILED AFBC_FORMAT_MOD_TILED
406#define AFBC_SC AFBC_FORMAT_MOD_SC
407
408#define AFBC_MOD_VALID_BITS (AFBC_SIZE_MASK | AFBC_YTR | AFBC_SPLIT | \
409 AFBC_SPARSE | AFBC_CBR | AFBC_TILED | AFBC_SC)
410
411extern const u64 malidp_format_modifiers[];
395 412
396#endif /* __MALIDP_HW_H__ */ 413#endif /* __MALIDP_HW_H__ */
diff --git a/drivers/gpu/drm/arm/malidp_mw.c b/drivers/gpu/drm/arm/malidp_mw.c
index 87627219ce3b..5f102bdaf841 100644
--- a/drivers/gpu/drm/arm/malidp_mw.c
+++ b/drivers/gpu/drm/arm/malidp_mw.c
@@ -141,9 +141,14 @@ malidp_mw_encoder_atomic_check(struct drm_encoder *encoder,
141 return -EINVAL; 141 return -EINVAL;
142 } 142 }
143 143
144 if (fb->modifier) {
145 DRM_DEBUG_KMS("Writeback framebuffer does not support modifiers\n");
146 return -EINVAL;
147 }
148
144 mw_state->format = 149 mw_state->format =
145 malidp_hw_get_format_id(&malidp->dev->hw->map, SE_MEMWRITE, 150 malidp_hw_get_format_id(&malidp->dev->hw->map, SE_MEMWRITE,
146 fb->format->format); 151 fb->format->format, !!fb->modifier);
147 if (mw_state->format == MALIDP_INVALID_FORMAT_ID) { 152 if (mw_state->format == MALIDP_INVALID_FORMAT_ID) {
148 struct drm_format_name_buf format_name; 153 struct drm_format_name_buf format_name;
149 154
diff --git a/drivers/gpu/drm/arm/malidp_planes.c b/drivers/gpu/drm/arm/malidp_planes.c
index c9a6d3e0cada..d42e0ea9a303 100644
--- a/drivers/gpu/drm/arm/malidp_planes.c
+++ b/drivers/gpu/drm/arm/malidp_planes.c
@@ -52,6 +52,8 @@
52#define MALIDP550_LS_ENABLE 0x01c 52#define MALIDP550_LS_ENABLE 0x01c
53#define MALIDP550_LS_R1_IN_SIZE 0x020 53#define MALIDP550_LS_R1_IN_SIZE 0x020
54 54
55#define MODIFIERS_COUNT_MAX 15
56
55/* 57/*
56 * This 4-entry look-up-table is used to determine the full 8-bit alpha value 58 * This 4-entry look-up-table is used to determine the full 8-bit alpha value
57 * for formats with 1- or 2-bit alpha channels. 59 * for formats with 1- or 2-bit alpha channels.
@@ -145,6 +147,119 @@ static void malidp_plane_atomic_print_state(struct drm_printer *p,
145 drm_printf(p, "\tmmu_prefetch_pgsize=%d\n", ms->mmu_prefetch_pgsize); 147 drm_printf(p, "\tmmu_prefetch_pgsize=%d\n", ms->mmu_prefetch_pgsize);
146} 148}
147 149
150bool malidp_format_mod_supported(struct drm_device *drm,
151 u32 format, u64 modifier)
152{
153 const struct drm_format_info *info;
154 const u64 *modifiers;
155 struct malidp_drm *malidp = drm->dev_private;
156 const struct malidp_hw_regmap *map = &malidp->dev->hw->map;
157
158 if (WARN_ON(modifier == DRM_FORMAT_MOD_INVALID))
159 return false;
160
161 /* Some pixel formats are supported without any modifier */
162 if (modifier == DRM_FORMAT_MOD_LINEAR) {
163 /*
164 * However these pixel formats need to be supported with
165 * modifiers only
166 */
167 return !malidp_hw_format_is_afbc_only(format);
168 }
169
170 if ((modifier >> 56) != DRM_FORMAT_MOD_VENDOR_ARM) {
171 DRM_ERROR("Unknown modifier (not Arm)\n");
172 return false;
173 }
174
175 if (modifier &
176 ~DRM_FORMAT_MOD_ARM_AFBC(AFBC_MOD_VALID_BITS)) {
177 DRM_DEBUG_KMS("Unsupported modifiers\n");
178 return false;
179 }
180
181 modifiers = malidp_format_modifiers;
182
183 /* SPLIT buffers must use SPARSE layout */
184 if (WARN_ON_ONCE((modifier & AFBC_SPLIT) && !(modifier & AFBC_SPARSE)))
185 return false;
186
187 /* CBR only applies to YUV formats, where YTR should be always 0 */
188 if (WARN_ON_ONCE((modifier & AFBC_CBR) && (modifier & AFBC_YTR)))
189 return false;
190
191 while (*modifiers != DRM_FORMAT_MOD_INVALID) {
192 if (*modifiers == modifier)
193 break;
194
195 modifiers++;
196 }
197
198 /* return false, if the modifier was not found */
199 if (*modifiers == DRM_FORMAT_MOD_INVALID) {
200 DRM_DEBUG_KMS("Unsupported modifier\n");
201 return false;
202 }
203
204 info = drm_format_info(format);
205
206 if (info->num_planes != 1) {
207 DRM_DEBUG_KMS("AFBC buffers expect one plane\n");
208 return false;
209 }
210
211 if (malidp_hw_format_is_linear_only(format) == true) {
212 DRM_DEBUG_KMS("Given format (0x%x) is supported is linear mode only\n",
213 format);
214 return false;
215 }
216
217 /*
218 * RGB formats need to provide YTR modifier and YUV formats should not
219 * provide YTR modifier.
220 */
221 if (!(info->is_yuv) != !!(modifier & AFBC_FORMAT_MOD_YTR)) {
222 DRM_DEBUG_KMS("AFBC_FORMAT_MOD_YTR is %s for %s formats\n",
223 info->is_yuv ? "disallowed" : "mandatory",
224 info->is_yuv ? "YUV" : "RGB");
225 return false;
226 }
227
228 if (modifier & AFBC_SPLIT) {
229 if (!info->is_yuv) {
230 if (drm_format_plane_cpp(format, 0) <= 2) {
231 DRM_DEBUG_KMS("RGB formats <= 16bpp are not supported with SPLIT\n");
232 return false;
233 }
234 }
235
236 if ((drm_format_horz_chroma_subsampling(format) != 1) ||
237 (drm_format_vert_chroma_subsampling(format) != 1)) {
238 if (!(format == DRM_FORMAT_YUV420_10BIT &&
239 (map->features & MALIDP_DEVICE_AFBC_YUV_420_10_SUPPORT_SPLIT))) {
240 DRM_DEBUG_KMS("Formats which are sub-sampled should never be split\n");
241 return false;
242 }
243 }
244 }
245
246 if (modifier & AFBC_CBR) {
247 if ((drm_format_horz_chroma_subsampling(format) == 1) ||
248 (drm_format_vert_chroma_subsampling(format) == 1)) {
249 DRM_DEBUG_KMS("Formats which are not sub-sampled should not have CBR set\n");
250 return false;
251 }
252 }
253
254 return true;
255}
256
257static bool malidp_format_mod_supported_per_plane(struct drm_plane *plane,
258 u32 format, u64 modifier)
259{
260 return malidp_format_mod_supported(plane->dev, format, modifier);
261}
262
148static const struct drm_plane_funcs malidp_de_plane_funcs = { 263static const struct drm_plane_funcs malidp_de_plane_funcs = {
149 .update_plane = drm_atomic_helper_update_plane, 264 .update_plane = drm_atomic_helper_update_plane,
150 .disable_plane = drm_atomic_helper_disable_plane, 265 .disable_plane = drm_atomic_helper_disable_plane,
@@ -153,6 +268,7 @@ static const struct drm_plane_funcs malidp_de_plane_funcs = {
153 .atomic_duplicate_state = malidp_duplicate_plane_state, 268 .atomic_duplicate_state = malidp_duplicate_plane_state,
154 .atomic_destroy_state = malidp_destroy_plane_state, 269 .atomic_destroy_state = malidp_destroy_plane_state,
155 .atomic_print_state = malidp_plane_atomic_print_state, 270 .atomic_print_state = malidp_plane_atomic_print_state,
271 .format_mod_supported = malidp_format_mod_supported_per_plane,
156}; 272};
157 273
158static int malidp_se_check_scaling(struct malidp_plane *mp, 274static int malidp_se_check_scaling(struct malidp_plane *mp,
@@ -406,8 +522,8 @@ static int malidp_de_plane_check(struct drm_plane *plane,
406 fb = state->fb; 522 fb = state->fb;
407 523
408 ms->format = malidp_hw_get_format_id(&mp->hwdev->hw->map, 524 ms->format = malidp_hw_get_format_id(&mp->hwdev->hw->map,
409 mp->layer->id, 525 mp->layer->id, fb->format->format,
410 fb->format->format); 526 !!fb->modifier);
411 if (ms->format == MALIDP_INVALID_FORMAT_ID) 527 if (ms->format == MALIDP_INVALID_FORMAT_ID)
412 return -EINVAL; 528 return -EINVAL;
413 529
@@ -415,8 +531,8 @@ static int malidp_de_plane_check(struct drm_plane *plane,
415 for (i = 0; i < ms->n_planes; i++) { 531 for (i = 0; i < ms->n_planes; i++) {
416 u8 alignment = malidp_hw_get_pitch_align(mp->hwdev, rotated); 532 u8 alignment = malidp_hw_get_pitch_align(mp->hwdev, rotated);
417 533
418 if ((fb->pitches[i] * drm_format_info_block_height(fb->format, i)) 534 if (((fb->pitches[i] * drm_format_info_block_height(fb->format, i))
419 & (alignment - 1)) { 535 & (alignment - 1)) && !(fb->modifier)) {
420 DRM_DEBUG_KMS("Invalid pitch %u for plane %d\n", 536 DRM_DEBUG_KMS("Invalid pitch %u for plane %d\n",
421 fb->pitches[i], i); 537 fb->pitches[i], i);
422 return -EINVAL; 538 return -EINVAL;
@@ -469,13 +585,20 @@ static int malidp_de_plane_check(struct drm_plane *plane,
469 return -EINVAL; 585 return -EINVAL;
470 } 586 }
471 587
588 /* SMART layer does not support AFBC */
589 if (mp->layer->id == DE_SMART && fb->modifier) {
590 DRM_ERROR("AFBC framebuffer not supported in SMART layer");
591 return -EINVAL;
592 }
593
472 ms->rotmem_size = 0; 594 ms->rotmem_size = 0;
473 if (state->rotation & MALIDP_ROTATED_MASK) { 595 if (state->rotation & MALIDP_ROTATED_MASK) {
474 int val; 596 int val;
475 597
476 val = mp->hwdev->hw->rotmem_required(mp->hwdev, state->crtc_w, 598 val = mp->hwdev->hw->rotmem_required(mp->hwdev, state->crtc_w,
477 state->crtc_h, 599 state->crtc_h,
478 fb->format->format); 600 fb->format->format,
601 !!(fb->modifier));
479 if (val < 0) 602 if (val < 0)
480 return val; 603 return val;
481 604
@@ -592,6 +715,83 @@ static void malidp_de_set_mmu_control(struct malidp_plane *mp,
592 mp->layer->base + mp->layer->mmu_ctrl_offset); 715 mp->layer->base + mp->layer->mmu_ctrl_offset);
593} 716}
594 717
718static void malidp_set_plane_base_addr(struct drm_framebuffer *fb,
719 struct malidp_plane *mp,
720 int plane_index)
721{
722 dma_addr_t paddr;
723 u16 ptr;
724 struct drm_plane *plane = &mp->base;
725 bool afbc = fb->modifier ? true : false;
726
727 ptr = mp->layer->ptr + (plane_index << 4);
728
729 /*
730 * drm_fb_cma_get_gem_addr() alters the physical base address of the
731 * framebuffer as per the plane's src_x, src_y co-ordinates (ie to
732 * take care of source cropping).
733 * For AFBC, this is not needed as the cropping is handled by _AD_CROP_H
734 * and _AD_CROP_V registers.
735 */
736 if (!afbc) {
737 paddr = drm_fb_cma_get_gem_addr(fb, plane->state,
738 plane_index);
739 } else {
740 struct drm_gem_cma_object *obj;
741
742 obj = drm_fb_cma_get_gem_obj(fb, plane_index);
743
744 if (WARN_ON(!obj))
745 return;
746 paddr = obj->paddr;
747 }
748
749 malidp_hw_write(mp->hwdev, lower_32_bits(paddr), ptr);
750 malidp_hw_write(mp->hwdev, upper_32_bits(paddr), ptr + 4);
751}
752
753static void malidp_de_set_plane_afbc(struct drm_plane *plane)
754{
755 struct malidp_plane *mp;
756 u32 src_w, src_h, val = 0, src_x, src_y;
757 struct drm_framebuffer *fb = plane->state->fb;
758
759 mp = to_malidp_plane(plane);
760
761 /* no afbc_decoder_offset means AFBC is not supported on this plane */
762 if (!mp->layer->afbc_decoder_offset)
763 return;
764
765 if (!fb->modifier) {
766 malidp_hw_write(mp->hwdev, 0, mp->layer->afbc_decoder_offset);
767 return;
768 }
769
770 /* convert src values from Q16 fixed point to integer */
771 src_w = plane->state->src_w >> 16;
772 src_h = plane->state->src_h >> 16;
773 src_x = plane->state->src_x >> 16;
774 src_y = plane->state->src_y >> 16;
775
776 val = ((fb->width - (src_x + src_w)) << MALIDP_AD_CROP_RIGHT_OFFSET) |
777 src_x;
778 malidp_hw_write(mp->hwdev, val,
779 mp->layer->afbc_decoder_offset + MALIDP_AD_CROP_H);
780
781 val = ((fb->height - (src_y + src_h)) << MALIDP_AD_CROP_BOTTOM_OFFSET) |
782 src_y;
783 malidp_hw_write(mp->hwdev, val,
784 mp->layer->afbc_decoder_offset + MALIDP_AD_CROP_V);
785
786 val = MALIDP_AD_EN;
787 if (fb->modifier & AFBC_FORMAT_MOD_SPLIT)
788 val |= MALIDP_AD_BS;
789 if (fb->modifier & AFBC_FORMAT_MOD_YTR)
790 val |= MALIDP_AD_YTR;
791
792 malidp_hw_write(mp->hwdev, val, mp->layer->afbc_decoder_offset);
793}
794
595static void malidp_de_plane_update(struct drm_plane *plane, 795static void malidp_de_plane_update(struct drm_plane *plane,
596 struct drm_plane_state *old_state) 796 struct drm_plane_state *old_state)
597{ 797{
@@ -602,12 +802,23 @@ static void malidp_de_plane_update(struct drm_plane *plane,
602 u8 plane_alpha = state->alpha >> 8; 802 u8 plane_alpha = state->alpha >> 8;
603 u32 src_w, src_h, dest_w, dest_h, val; 803 u32 src_w, src_h, dest_w, dest_h, val;
604 int i; 804 int i;
805 struct drm_framebuffer *fb = plane->state->fb;
605 806
606 mp = to_malidp_plane(plane); 807 mp = to_malidp_plane(plane);
607 808
608 /* convert src values from Q16 fixed point to integer */ 809 /*
609 src_w = state->src_w >> 16; 810 * For AFBC framebuffer, use the framebuffer width and height for
610 src_h = state->src_h >> 16; 811 * configuring layer input size register.
812 */
813 if (fb->modifier) {
814 src_w = fb->width;
815 src_h = fb->height;
816 } else {
817 /* convert src values from Q16 fixed point to integer */
818 src_w = state->src_w >> 16;
819 src_h = state->src_h >> 16;
820 }
821
611 dest_w = state->crtc_w; 822 dest_w = state->crtc_w;
612 dest_h = state->crtc_h; 823 dest_h = state->crtc_h;
613 824
@@ -615,15 +826,8 @@ static void malidp_de_plane_update(struct drm_plane *plane,
615 val = (val & ~LAYER_FORMAT_MASK) | ms->format; 826 val = (val & ~LAYER_FORMAT_MASK) | ms->format;
616 malidp_hw_write(mp->hwdev, val, mp->layer->base); 827 malidp_hw_write(mp->hwdev, val, mp->layer->base);
617 828
618 for (i = 0; i < ms->n_planes; i++) { 829 for (i = 0; i < ms->n_planes; i++)
619 /* calculate the offset for the layer's plane registers */ 830 malidp_set_plane_base_addr(fb, mp, i);
620 u16 ptr = mp->layer->ptr + (i << 4);
621 dma_addr_t fb_addr = drm_fb_cma_get_gem_addr(state->fb,
622 state, i);
623
624 malidp_hw_write(mp->hwdev, lower_32_bits(fb_addr), ptr);
625 malidp_hw_write(mp->hwdev, upper_32_bits(fb_addr), ptr + 4);
626 }
627 831
628 malidp_de_set_mmu_control(mp, ms); 832 malidp_de_set_mmu_control(mp, ms);
629 833
@@ -657,6 +861,8 @@ static void malidp_de_plane_update(struct drm_plane *plane,
657 mp->layer->base + MALIDP550_LS_R1_IN_SIZE); 861 mp->layer->base + MALIDP550_LS_R1_IN_SIZE);
658 } 862 }
659 863
864 malidp_de_set_plane_afbc(plane);
865
660 /* first clear the rotation bits */ 866 /* first clear the rotation bits */
661 val = malidp_hw_read(mp->hwdev, mp->layer->base + MALIDP_LAYER_CONTROL); 867 val = malidp_hw_read(mp->hwdev, mp->layer->base + MALIDP_LAYER_CONTROL);
662 val &= ~LAYER_ROT_MASK; 868 val &= ~LAYER_ROT_MASK;
@@ -733,7 +939,26 @@ int malidp_de_planes_init(struct drm_device *drm)
733 BIT(DRM_MODE_BLEND_PREMULTI) | 939 BIT(DRM_MODE_BLEND_PREMULTI) |
734 BIT(DRM_MODE_BLEND_COVERAGE); 940 BIT(DRM_MODE_BLEND_COVERAGE);
735 u32 *formats; 941 u32 *formats;
736 int ret, i, j, n; 942 int ret, i = 0, j = 0, n;
943 u64 supported_modifiers[MODIFIERS_COUNT_MAX];
944 const u64 *modifiers;
945
946 modifiers = malidp_format_modifiers;
947
948 if (!(map->features & MALIDP_DEVICE_AFBC_SUPPORT_SPLIT)) {
949 /*
950 * Since our hardware does not support SPLIT, so build the list
951 * of supported modifiers excluding SPLIT ones.
952 */
953 while (*modifiers != DRM_FORMAT_MOD_INVALID) {
954 if (!(*modifiers & AFBC_SPLIT))
955 supported_modifiers[j++] = *modifiers;
956
957 modifiers++;
958 }
959 supported_modifiers[j++] = DRM_FORMAT_MOD_INVALID;
960 modifiers = supported_modifiers;
961 }
737 962
738 formats = kcalloc(map->n_pixel_formats, sizeof(*formats), GFP_KERNEL); 963 formats = kcalloc(map->n_pixel_formats, sizeof(*formats), GFP_KERNEL);
739 if (!formats) { 964 if (!formats) {
@@ -758,9 +983,15 @@ int malidp_de_planes_init(struct drm_device *drm)
758 983
759 plane_type = (i == 0) ? DRM_PLANE_TYPE_PRIMARY : 984 plane_type = (i == 0) ? DRM_PLANE_TYPE_PRIMARY :
760 DRM_PLANE_TYPE_OVERLAY; 985 DRM_PLANE_TYPE_OVERLAY;
986
987 /*
988 * All the layers except smart layer supports AFBC modifiers.
989 */
761 ret = drm_universal_plane_init(drm, &plane->base, crtcs, 990 ret = drm_universal_plane_init(drm, &plane->base, crtcs,
762 &malidp_de_plane_funcs, formats, 991 &malidp_de_plane_funcs, formats, n,
763 n, NULL, plane_type, NULL); 992 (id == DE_SMART) ? NULL : modifiers, plane_type,
993 NULL);
994
764 if (ret < 0) 995 if (ret < 0)
765 goto cleanup; 996 goto cleanup;
766 997
diff --git a/drivers/gpu/drm/arm/malidp_regs.h b/drivers/gpu/drm/arm/malidp_regs.h
index 7ce3e141464d..a0dd6e1676a8 100644
--- a/drivers/gpu/drm/arm/malidp_regs.h
+++ b/drivers/gpu/drm/arm/malidp_regs.h
@@ -198,10 +198,13 @@
198#define MALIDP500_LV_YUV2RGB ((s16)(-0xB8)) 198#define MALIDP500_LV_YUV2RGB ((s16)(-0xB8))
199#define MALIDP500_DE_LV_BASE 0x00100 199#define MALIDP500_DE_LV_BASE 0x00100
200#define MALIDP500_DE_LV_PTR_BASE 0x00124 200#define MALIDP500_DE_LV_PTR_BASE 0x00124
201#define MALIDP500_DE_LV_AD_CTRL 0x00400
201#define MALIDP500_DE_LG1_BASE 0x00200 202#define MALIDP500_DE_LG1_BASE 0x00200
202#define MALIDP500_DE_LG1_PTR_BASE 0x0021c 203#define MALIDP500_DE_LG1_PTR_BASE 0x0021c
204#define MALIDP500_DE_LG1_AD_CTRL 0x0040c
203#define MALIDP500_DE_LG2_BASE 0x00300 205#define MALIDP500_DE_LG2_BASE 0x00300
204#define MALIDP500_DE_LG2_PTR_BASE 0x0031c 206#define MALIDP500_DE_LG2_PTR_BASE 0x0031c
207#define MALIDP500_DE_LG2_AD_CTRL 0x00418
205#define MALIDP500_SE_BASE 0x00c00 208#define MALIDP500_SE_BASE 0x00c00
206#define MALIDP500_SE_CONTROL 0x00c0c 209#define MALIDP500_SE_CONTROL 0x00c0c
207#define MALIDP500_SE_MEMWRITE_OUT_SIZE 0x00c2c 210#define MALIDP500_SE_MEMWRITE_OUT_SIZE 0x00c2c
@@ -228,10 +231,13 @@
228#define MALIDP550_LV_YUV2RGB 0x00084 231#define MALIDP550_LV_YUV2RGB 0x00084
229#define MALIDP550_DE_LV1_BASE 0x00100 232#define MALIDP550_DE_LV1_BASE 0x00100
230#define MALIDP550_DE_LV1_PTR_BASE 0x00124 233#define MALIDP550_DE_LV1_PTR_BASE 0x00124
234#define MALIDP550_DE_LV1_AD_CTRL 0x001B8
231#define MALIDP550_DE_LV2_BASE 0x00200 235#define MALIDP550_DE_LV2_BASE 0x00200
232#define MALIDP550_DE_LV2_PTR_BASE 0x00224 236#define MALIDP550_DE_LV2_PTR_BASE 0x00224
237#define MALIDP550_DE_LV2_AD_CTRL 0x002B8
233#define MALIDP550_DE_LG_BASE 0x00300 238#define MALIDP550_DE_LG_BASE 0x00300
234#define MALIDP550_DE_LG_PTR_BASE 0x0031c 239#define MALIDP550_DE_LG_PTR_BASE 0x0031c
240#define MALIDP550_DE_LG_AD_CTRL 0x00330
235#define MALIDP550_DE_LS_BASE 0x00400 241#define MALIDP550_DE_LS_BASE 0x00400
236#define MALIDP550_DE_LS_PTR_BASE 0x0042c 242#define MALIDP550_DE_LS_PTR_BASE 0x0042c
237#define MALIDP550_DE_PERF_BASE 0x00500 243#define MALIDP550_DE_PERF_BASE 0x00500
@@ -258,6 +264,20 @@
258#define MALIDP_MMU_CTRL_PX_PS(x) (1 << (8 + (x))) 264#define MALIDP_MMU_CTRL_PX_PS(x) (1 << (8 + (x)))
259#define MALIDP_MMU_CTRL_PP_NUM_REQ(x) (((x) & 0x7f) << 12) 265#define MALIDP_MMU_CTRL_PP_NUM_REQ(x) (((x) & 0x7f) << 12)
260 266
267/* AFBC register offsets relative to MALIDPXXX_DE_LX_AD_CTRL */
268/* The following register offsets are common for DP500, DP550 and DP650 */
269#define MALIDP_AD_CROP_H 0x4
270#define MALIDP_AD_CROP_V 0x8
271#define MALIDP_AD_END_PTR_LOW 0xc
272#define MALIDP_AD_END_PTR_HIGH 0x10
273
274/* AFBC decoder Registers */
275#define MALIDP_AD_EN BIT(0)
276#define MALIDP_AD_YTR BIT(4)
277#define MALIDP_AD_BS BIT(8)
278#define MALIDP_AD_CROP_RIGHT_OFFSET 16
279#define MALIDP_AD_CROP_BOTTOM_OFFSET 16
280
261/* 281/*
262 * Starting with DP550 the register map blocks has been standardised to the 282 * Starting with DP550 the register map blocks has been standardised to the
263 * following layout: 283 * following layout:
diff --git a/drivers/gpu/drm/bochs/bochs_kms.c b/drivers/gpu/drm/bochs/bochs_kms.c
index 9cd82e3631fb..9e7cd6b34106 100644
--- a/drivers/gpu/drm/bochs/bochs_kms.c
+++ b/drivers/gpu/drm/bochs/bochs_kms.c
@@ -214,20 +214,9 @@ static enum drm_mode_status bochs_connector_mode_valid(struct drm_connector *con
214 return MODE_OK; 214 return MODE_OK;
215} 215}
216 216
217static struct drm_encoder *
218bochs_connector_best_encoder(struct drm_connector *connector)
219{
220 int enc_id = connector->encoder_ids[0];
221 /* pick the encoder ids */
222 if (enc_id)
223 return drm_encoder_find(connector->dev, NULL, enc_id);
224 return NULL;
225}
226
227static const struct drm_connector_helper_funcs bochs_connector_connector_helper_funcs = { 217static const struct drm_connector_helper_funcs bochs_connector_connector_helper_funcs = {
228 .get_modes = bochs_connector_get_modes, 218 .get_modes = bochs_connector_get_modes,
229 .mode_valid = bochs_connector_mode_valid, 219 .mode_valid = bochs_connector_mode_valid,
230 .best_encoder = bochs_connector_best_encoder,
231}; 220};
232 221
233static const struct drm_connector_funcs bochs_connector_connector_funcs = { 222static const struct drm_connector_funcs bochs_connector_connector_funcs = {
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index dc8ae98071b4..86efd2da37f9 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -1752,7 +1752,7 @@ int drm_atomic_helper_commit(struct drm_device *dev,
1752 * 1752 *
1753 * NOTE: Commit work has multiple phases, first hardware commit, then 1753 * NOTE: Commit work has multiple phases, first hardware commit, then
1754 * cleanup. We want them to overlap, hence need system_unbound_wq to 1754 * cleanup. We want them to overlap, hence need system_unbound_wq to
1755 * make sure work items don't artifically stall on each another. 1755 * make sure work items don't artificially stall on each another.
1756 */ 1756 */
1757 1757
1758 drm_atomic_state_get(state); 1758 drm_atomic_state_get(state);
@@ -1786,7 +1786,7 @@ EXPORT_SYMBOL(drm_atomic_helper_commit);
1786 * 1786 *
1787 * Asynchronous workers need to have sufficient parallelism to be able to run 1787 * Asynchronous workers need to have sufficient parallelism to be able to run
1788 * different atomic commits on different CRTCs in parallel. The simplest way to 1788 * different atomic commits on different CRTCs in parallel. The simplest way to
1789 * achive this is by running them on the &system_unbound_wq work queue. Note 1789 * achieve this is by running them on the &system_unbound_wq work queue. Note
1790 * that drivers are not required to split up atomic commits and run an 1790 * that drivers are not required to split up atomic commits and run an
1791 * individual commit in parallel - userspace is supposed to do that if it cares. 1791 * individual commit in parallel - userspace is supposed to do that if it cares.
1792 * But it might be beneficial to do that for modesets, since those necessarily 1792 * But it might be beneficial to do that for modesets, since those necessarily
diff --git a/drivers/gpu/drm/drm_atomic_uapi.c b/drivers/gpu/drm/drm_atomic_uapi.c
index 8fa77def577f..ea797d4c82ee 100644
--- a/drivers/gpu/drm/drm_atomic_uapi.c
+++ b/drivers/gpu/drm/drm_atomic_uapi.c
@@ -733,6 +733,8 @@ static int drm_atomic_connector_set_property(struct drm_connector *connector,
733 return -EINVAL; 733 return -EINVAL;
734 } 734 }
735 state->content_protection = val; 735 state->content_protection = val;
736 } else if (property == connector->colorspace_property) {
737 state->colorspace = val;
736 } else if (property == config->writeback_fb_id_property) { 738 } else if (property == config->writeback_fb_id_property) {
737 struct drm_framebuffer *fb = drm_framebuffer_lookup(dev, NULL, val); 739 struct drm_framebuffer *fb = drm_framebuffer_lookup(dev, NULL, val);
738 int ret = drm_atomic_set_writeback_fb_for_connector(state, fb); 740 int ret = drm_atomic_set_writeback_fb_for_connector(state, fb);
@@ -801,6 +803,8 @@ drm_atomic_connector_get_property(struct drm_connector *connector,
801 *val = state->picture_aspect_ratio; 803 *val = state->picture_aspect_ratio;
802 } else if (property == config->content_type_property) { 804 } else if (property == config->content_type_property) {
803 *val = state->content_type; 805 *val = state->content_type;
806 } else if (property == connector->colorspace_property) {
807 *val = state->colorspace;
804 } else if (property == connector->scaling_mode_property) { 808 } else if (property == connector->scaling_mode_property) {
805 *val = state->scaling_mode; 809 *val = state->scaling_mode;
806 } else if (property == connector->content_protection_property) { 810 } else if (property == connector->content_protection_property) {
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
index dd40eff0911c..2355124849db 100644
--- a/drivers/gpu/drm/drm_connector.c
+++ b/drivers/gpu/drm/drm_connector.c
@@ -245,6 +245,7 @@ int drm_connector_init(struct drm_device *dev,
245 INIT_LIST_HEAD(&connector->modes); 245 INIT_LIST_HEAD(&connector->modes);
246 mutex_init(&connector->mutex); 246 mutex_init(&connector->mutex);
247 connector->edid_blob_ptr = NULL; 247 connector->edid_blob_ptr = NULL;
248 connector->tile_blob_ptr = NULL;
248 connector->status = connector_status_unknown; 249 connector->status = connector_status_unknown;
249 connector->display_info.panel_orientation = 250 connector->display_info.panel_orientation =
250 DRM_MODE_PANEL_ORIENTATION_UNKNOWN; 251 DRM_MODE_PANEL_ORIENTATION_UNKNOWN;
@@ -272,6 +273,9 @@ int drm_connector_init(struct drm_device *dev,
272 drm_object_attach_property(&connector->base, 273 drm_object_attach_property(&connector->base,
273 config->non_desktop_property, 274 config->non_desktop_property,
274 0); 275 0);
276 drm_object_attach_property(&connector->base,
277 config->tile_property,
278 0);
275 279
276 if (drm_core_check_feature(dev, DRIVER_ATOMIC)) { 280 if (drm_core_check_feature(dev, DRIVER_ATOMIC)) {
277 drm_object_attach_property(&connector->base, config->prop_crtc_id, 0); 281 drm_object_attach_property(&connector->base, config->prop_crtc_id, 0);
@@ -826,6 +830,33 @@ static struct drm_prop_enum_list drm_cp_enum_list[] = {
826}; 830};
827DRM_ENUM_NAME_FN(drm_get_content_protection_name, drm_cp_enum_list) 831DRM_ENUM_NAME_FN(drm_get_content_protection_name, drm_cp_enum_list)
828 832
833static const struct drm_prop_enum_list hdmi_colorspaces[] = {
834 /* For Default case, driver will set the colorspace */
835 { DRM_MODE_COLORIMETRY_DEFAULT, "Default" },
836 /* Standard Definition Colorimetry based on CEA 861 */
837 { DRM_MODE_COLORIMETRY_SMPTE_170M_YCC, "SMPTE_170M_YCC" },
838 { DRM_MODE_COLORIMETRY_BT709_YCC, "BT709_YCC" },
839 /* Standard Definition Colorimetry based on IEC 61966-2-4 */
840 { DRM_MODE_COLORIMETRY_XVYCC_601, "XVYCC_601" },
841 /* High Definition Colorimetry based on IEC 61966-2-4 */
842 { DRM_MODE_COLORIMETRY_XVYCC_709, "XVYCC_709" },
843 /* Colorimetry based on IEC 61966-2-1/Amendment 1 */
844 { DRM_MODE_COLORIMETRY_SYCC_601, "SYCC_601" },
845 /* Colorimetry based on IEC 61966-2-5 [33] */
846 { DRM_MODE_COLORIMETRY_OPYCC_601, "opYCC_601" },
847 /* Colorimetry based on IEC 61966-2-5 */
848 { DRM_MODE_COLORIMETRY_OPRGB, "opRGB" },
849 /* Colorimetry based on ITU-R BT.2020 */
850 { DRM_MODE_COLORIMETRY_BT2020_CYCC, "BT2020_CYCC" },
851 /* Colorimetry based on ITU-R BT.2020 */
852 { DRM_MODE_COLORIMETRY_BT2020_RGB, "BT2020_RGB" },
853 /* Colorimetry based on ITU-R BT.2020 */
854 { DRM_MODE_COLORIMETRY_BT2020_YCC, "BT2020_YCC" },
855 /* Added as part of Additional Colorimetry Extension in 861.G */
856 { DRM_MODE_COLORIMETRY_DCI_P3_RGB_D65, "DCI-P3_RGB_D65" },
857 { DRM_MODE_COLORIMETRY_DCI_P3_RGB_THEATER, "DCI-P3_RGB_Theater" },
858};
859
829/** 860/**
830 * DOC: standard connector properties 861 * DOC: standard connector properties
831 * 862 *
@@ -1548,6 +1579,57 @@ int drm_mode_create_aspect_ratio_property(struct drm_device *dev)
1548EXPORT_SYMBOL(drm_mode_create_aspect_ratio_property); 1579EXPORT_SYMBOL(drm_mode_create_aspect_ratio_property);
1549 1580
1550/** 1581/**
1582 * DOC: standard connector properties
1583 *
1584 * Colorspace:
1585 * drm_mode_create_colorspace_property - create colorspace property
1586 * This property helps select a suitable colorspace based on the sink
1587 * capability. Modern sink devices support wider gamut like BT2020.
1588 * This helps switch to BT2020 mode if the BT2020 encoded video stream
1589 * is being played by the user, same for any other colorspace. Thereby
1590 * giving a good visual experience to users.
1591 *
1592 * The expectation from userspace is that it should parse the EDID
1593 * and get supported colorspaces. Use this property and switch to the
1594 * one supported. Sink supported colorspaces should be retrieved by
1595 * userspace from EDID and driver will not explicitly expose them.
1596 *
1597 * Basically the expectation from userspace is:
1598 * - Set up CRTC DEGAMMA/CTM/GAMMA to convert to some sink
1599 * colorspace
1600 * - Set this new property to let the sink know what it
1601 * converted the CRTC output to.
1602 * - This property is just to inform sink what colorspace
1603 * source is trying to drive.
1604 *
1605 * Called by a driver the first time it's needed, must be attached to desired
1606 * connectors.
1607 */
1608int drm_mode_create_colorspace_property(struct drm_connector *connector)
1609{
1610 struct drm_device *dev = connector->dev;
1611 struct drm_property *prop;
1612
1613 if (connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
1614 connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) {
1615 prop = drm_property_create_enum(dev, DRM_MODE_PROP_ENUM,
1616 "Colorspace",
1617 hdmi_colorspaces,
1618 ARRAY_SIZE(hdmi_colorspaces));
1619 if (!prop)
1620 return -ENOMEM;
1621 } else {
1622 DRM_DEBUG_KMS("Colorspace property not supported\n");
1623 return 0;
1624 }
1625
1626 connector->colorspace_property = prop;
1627
1628 return 0;
1629}
1630EXPORT_SYMBOL(drm_mode_create_colorspace_property);
1631
1632/**
1551 * drm_mode_create_content_type_property - create content type property 1633 * drm_mode_create_content_type_property - create content type property
1552 * @dev: DRM device 1634 * @dev: DRM device
1553 * 1635 *
@@ -1634,6 +1716,8 @@ EXPORT_SYMBOL(drm_connector_set_path_property);
1634 * This looks up the tile information for a connector, and creates a 1716 * This looks up the tile information for a connector, and creates a
1635 * property for userspace to parse if it exists. The property is of 1717 * property for userspace to parse if it exists. The property is of
1636 * the form of 8 integers using ':' as a separator. 1718 * the form of 8 integers using ':' as a separator.
1719 * This is used for dual port tiled displays with DisplayPort SST
1720 * or DisplayPort MST connectors.
1637 * 1721 *
1638 * Returns: 1722 * Returns:
1639 * Zero on success, errno on failure. 1723 * Zero on success, errno on failure.
@@ -1677,6 +1761,9 @@ EXPORT_SYMBOL(drm_connector_set_tile_property);
1677 * 1761 *
1678 * This function creates a new blob modeset object and assigns its id to the 1762 * This function creates a new blob modeset object and assigns its id to the
1679 * connector's edid property. 1763 * connector's edid property.
1764 * Since we also parse tile information from EDID's displayID block, we also
1765 * set the connector's tile property here. See drm_connector_set_tile_property()
1766 * for more details.
1680 * 1767 *
1681 * Returns: 1768 * Returns:
1682 * Zero on success, negative errno on failure. 1769 * Zero on success, negative errno on failure.
@@ -1718,7 +1805,9 @@ int drm_connector_update_edid_property(struct drm_connector *connector,
1718 edid, 1805 edid,
1719 &connector->base, 1806 &connector->base,
1720 dev->mode_config.edid_property); 1807 dev->mode_config.edid_property);
1721 return ret; 1808 if (ret)
1809 return ret;
1810 return drm_connector_set_tile_property(connector);
1722} 1811}
1723EXPORT_SYMBOL(drm_connector_update_edid_property); 1812EXPORT_SYMBOL(drm_connector_update_edid_property);
1724 1813
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index dc7ac0c60547..c630ed157994 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -3022,7 +3022,6 @@ struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_
3022 edid = drm_edid_duplicate(port->cached_edid); 3022 edid = drm_edid_duplicate(port->cached_edid);
3023 else { 3023 else {
3024 edid = drm_get_edid(connector, &port->aux.ddc); 3024 edid = drm_get_edid(connector, &port->aux.ddc);
3025 drm_connector_set_tile_property(connector);
3026 } 3025 }
3027 port->has_audio = drm_detect_monitor_audio(edid); 3026 port->has_audio = drm_detect_monitor_audio(edid);
3028 drm_dp_mst_topology_put_port(port); 3027 drm_dp_mst_topology_put_port(port);
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 381581b01d48..50d849d1bc6e 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -286,6 +286,138 @@ void drm_minor_release(struct drm_minor *minor)
286 * Note that the lifetime rules for &drm_device instance has still a lot of 286 * Note that the lifetime rules for &drm_device instance has still a lot of
287 * historical baggage. Hence use the reference counting provided by 287 * historical baggage. Hence use the reference counting provided by
288 * drm_dev_get() and drm_dev_put() only carefully. 288 * drm_dev_get() and drm_dev_put() only carefully.
289 *
290 * Display driver example
291 * ~~~~~~~~~~~~~~~~~~~~~~
292 *
293 * The following example shows a typical structure of a DRM display driver.
294 * The example focus on the probe() function and the other functions that is
295 * almost always present and serves as a demonstration of devm_drm_dev_init()
296 * usage with its accompanying drm_driver->release callback.
297 *
298 * .. code-block:: c
299 *
300 * struct driver_device {
301 * struct drm_device drm;
302 * void *userspace_facing;
303 * struct clk *pclk;
304 * };
305 *
306 * static void driver_drm_release(struct drm_device *drm)
307 * {
308 * struct driver_device *priv = container_of(...);
309 *
310 * drm_mode_config_cleanup(drm);
311 * drm_dev_fini(drm);
312 * kfree(priv->userspace_facing);
313 * kfree(priv);
314 * }
315 *
316 * static struct drm_driver driver_drm_driver = {
317 * [...]
318 * .release = driver_drm_release,
319 * };
320 *
321 * static int driver_probe(struct platform_device *pdev)
322 * {
323 * struct driver_device *priv;
324 * struct drm_device *drm;
325 * int ret;
326 *
327 * [
328 * devm_kzalloc() can't be used here because the drm_device
329 * lifetime can exceed the device lifetime if driver unbind
330 * happens when userspace still has open file descriptors.
331 * ]
332 * priv = kzalloc(sizeof(*priv), GFP_KERNEL);
333 * if (!priv)
334 * return -ENOMEM;
335 *
336 * drm = &priv->drm;
337 *
338 * ret = devm_drm_dev_init(&pdev->dev, drm, &driver_drm_driver);
339 * if (ret) {
340 * kfree(drm);
341 * return ret;
342 * }
343 *
344 * drm_mode_config_init(drm);
345 *
346 * priv->userspace_facing = kzalloc(..., GFP_KERNEL);
347 * if (!priv->userspace_facing)
348 * return -ENOMEM;
349 *
350 * priv->pclk = devm_clk_get(dev, "PCLK");
351 * if (IS_ERR(priv->pclk))
352 * return PTR_ERR(priv->pclk);
353 *
354 * [ Further setup, display pipeline etc ]
355 *
356 * platform_set_drvdata(pdev, drm);
357 *
358 * drm_mode_config_reset(drm);
359 *
360 * ret = drm_dev_register(drm);
361 * if (ret)
362 * return ret;
363 *
364 * drm_fbdev_generic_setup(drm, 32);
365 *
366 * return 0;
367 * }
368 *
369 * [ This function is called before the devm_ resources are released ]
370 * static int driver_remove(struct platform_device *pdev)
371 * {
372 * struct drm_device *drm = platform_get_drvdata(pdev);
373 *
374 * drm_dev_unregister(drm);
375 * drm_atomic_helper_shutdown(drm)
376 *
377 * return 0;
378 * }
379 *
380 * [ This function is called on kernel restart and shutdown ]
381 * static void driver_shutdown(struct platform_device *pdev)
382 * {
383 * drm_atomic_helper_shutdown(platform_get_drvdata(pdev));
384 * }
385 *
386 * static int __maybe_unused driver_pm_suspend(struct device *dev)
387 * {
388 * return drm_mode_config_helper_suspend(dev_get_drvdata(dev));
389 * }
390 *
391 * static int __maybe_unused driver_pm_resume(struct device *dev)
392 * {
393 * drm_mode_config_helper_resume(dev_get_drvdata(dev));
394 *
395 * return 0;
396 * }
397 *
398 * static const struct dev_pm_ops driver_pm_ops = {
399 * SET_SYSTEM_SLEEP_PM_OPS(driver_pm_suspend, driver_pm_resume)
400 * };
401 *
402 * static struct platform_driver driver_driver = {
403 * .driver = {
404 * [...]
405 * .pm = &driver_pm_ops,
406 * },
407 * .probe = driver_probe,
408 * .remove = driver_remove,
409 * .shutdown = driver_shutdown,
410 * };
411 * module_platform_driver(driver_driver);
412 *
413 * Drivers that want to support device unplugging (USB, DT overlay unload) should
414 * use drm_dev_unplug() instead of drm_dev_unregister(). The driver must protect
415 * regions that is accessing device resources to prevent use after they're
416 * released. This is done using drm_dev_enter() and drm_dev_exit(). There is one
417 * shortcoming however, drm_dev_unplug() marks the drm_device as unplugged before
418 * drm_atomic_helper_shutdown() is called. This means that if the disable code
419 * paths are protected, they will not run on regular driver module unload,
420 * possibily leaving the hardware enabled.
289 */ 421 */
290 422
291/** 423/**
@@ -376,11 +508,6 @@ void drm_dev_unplug(struct drm_device *dev)
376 synchronize_srcu(&drm_unplug_srcu); 508 synchronize_srcu(&drm_unplug_srcu);
377 509
378 drm_dev_unregister(dev); 510 drm_dev_unregister(dev);
379
380 mutex_lock(&drm_global_mutex);
381 if (dev->open_count == 0)
382 drm_dev_put(dev);
383 mutex_unlock(&drm_global_mutex);
384} 511}
385EXPORT_SYMBOL(drm_dev_unplug); 512EXPORT_SYMBOL(drm_dev_unplug);
386 513
@@ -457,6 +584,31 @@ static void drm_fs_inode_free(struct inode *inode)
457} 584}
458 585
459/** 586/**
587 * DOC: component helper usage recommendations
588 *
589 * DRM drivers that drive hardware where a logical device consists of a pile of
590 * independent hardware blocks are recommended to use the :ref:`component helper
591 * library<component>`. For consistency and better options for code reuse the
592 * following guidelines apply:
593 *
594 * - The entire device initialization procedure should be run from the
595 * &component_master_ops.master_bind callback, starting with drm_dev_init(),
596 * then binding all components with component_bind_all() and finishing with
597 * drm_dev_register().
598 *
599 * - The opaque pointer passed to all components through component_bind_all()
600 * should point at &struct drm_device of the device instance, not some driver
601 * specific private structure.
602 *
603 * - The component helper fills the niche where further standardization of
604 * interfaces is not practical. When there already is, or will be, a
605 * standardized interface like &drm_bridge or &drm_panel, providing its own
606 * functions to find such components at driver load time, like
607 * drm_of_find_panel_or_bridge(), then the component helper should not be
608 * used.
609 */
610
611/**
460 * drm_dev_init - Initialise new DRM device 612 * drm_dev_init - Initialise new DRM device
461 * @dev: DRM device 613 * @dev: DRM device
462 * @driver: DRM driver 614 * @driver: DRM driver
@@ -501,7 +653,7 @@ int drm_dev_init(struct drm_device *dev,
501 BUG_ON(!parent); 653 BUG_ON(!parent);
502 654
503 kref_init(&dev->ref); 655 kref_init(&dev->ref);
504 dev->dev = parent; 656 dev->dev = get_device(parent);
505 dev->driver = driver; 657 dev->driver = driver;
506 658
507 /* no per-device feature limits by default */ 659 /* no per-device feature limits by default */
@@ -571,6 +723,7 @@ err_minors:
571 drm_minor_free(dev, DRM_MINOR_RENDER); 723 drm_minor_free(dev, DRM_MINOR_RENDER);
572 drm_fs_inode_free(dev->anon_inode); 724 drm_fs_inode_free(dev->anon_inode);
573err_free: 725err_free:
726 put_device(dev->dev);
574 mutex_destroy(&dev->master_mutex); 727 mutex_destroy(&dev->master_mutex);
575 mutex_destroy(&dev->ctxlist_mutex); 728 mutex_destroy(&dev->ctxlist_mutex);
576 mutex_destroy(&dev->clientlist_mutex); 729 mutex_destroy(&dev->clientlist_mutex);
@@ -580,6 +733,45 @@ err_free:
580} 733}
581EXPORT_SYMBOL(drm_dev_init); 734EXPORT_SYMBOL(drm_dev_init);
582 735
736static void devm_drm_dev_init_release(void *data)
737{
738 drm_dev_put(data);
739}
740
741/**
742 * devm_drm_dev_init - Resource managed drm_dev_init()
743 * @parent: Parent device object
744 * @dev: DRM device
745 * @driver: DRM driver
746 *
747 * Managed drm_dev_init(). The DRM device initialized with this function is
748 * automatically put on driver detach using drm_dev_put(). You must supply a
749 * &drm_driver.release callback to control the finalization explicitly.
750 *
751 * RETURNS:
752 * 0 on success, or error code on failure.
753 */
754int devm_drm_dev_init(struct device *parent,
755 struct drm_device *dev,
756 struct drm_driver *driver)
757{
758 int ret;
759
760 if (WARN_ON(!parent || !driver->release))
761 return -EINVAL;
762
763 ret = drm_dev_init(dev, driver, parent);
764 if (ret)
765 return ret;
766
767 ret = devm_add_action(parent, devm_drm_dev_init_release, dev);
768 if (ret)
769 devm_drm_dev_init_release(dev);
770
771 return ret;
772}
773EXPORT_SYMBOL(devm_drm_dev_init);
774
583/** 775/**
584 * drm_dev_fini - Finalize a dead DRM device 776 * drm_dev_fini - Finalize a dead DRM device
585 * @dev: DRM device 777 * @dev: DRM device
@@ -606,6 +798,8 @@ void drm_dev_fini(struct drm_device *dev)
606 drm_minor_free(dev, DRM_MINOR_PRIMARY); 798 drm_minor_free(dev, DRM_MINOR_PRIMARY);
607 drm_minor_free(dev, DRM_MINOR_RENDER); 799 drm_minor_free(dev, DRM_MINOR_RENDER);
608 800
801 put_device(dev->dev);
802
609 mutex_destroy(&dev->master_mutex); 803 mutex_destroy(&dev->master_mutex);
610 mutex_destroy(&dev->ctxlist_mutex); 804 mutex_destroy(&dev->ctxlist_mutex);
611 mutex_destroy(&dev->clientlist_mutex); 805 mutex_destroy(&dev->clientlist_mutex);
diff --git a/drivers/gpu/drm/drm_dsc.c b/drivers/gpu/drm/drm_dsc.c
index bce99f95c1a3..77f4e5ae4197 100644
--- a/drivers/gpu/drm/drm_dsc.c
+++ b/drivers/gpu/drm/drm_dsc.c
@@ -11,6 +11,7 @@
11#include <linux/init.h> 11#include <linux/init.h>
12#include <linux/errno.h> 12#include <linux/errno.h>
13#include <linux/byteorder/generic.h> 13#include <linux/byteorder/generic.h>
14#include <drm/drm_print.h>
14#include <drm/drm_dp_helper.h> 15#include <drm/drm_dp_helper.h>
15#include <drm/drm_dsc.h> 16#include <drm/drm_dsc.h>
16 17
@@ -31,75 +32,74 @@
31/** 32/**
32 * drm_dsc_dp_pps_header_init() - Initializes the PPS Header 33 * drm_dsc_dp_pps_header_init() - Initializes the PPS Header
33 * for DisplayPort as per the DP 1.4 spec. 34 * for DisplayPort as per the DP 1.4 spec.
34 * @pps_sdp: Secondary data packet for DSC Picture Parameter Set 35 * @pps_header: Secondary data packet header for DSC Picture
35 * as defined in &struct drm_dsc_pps_infoframe 36 * Parameter Set as defined in &struct dp_sdp_header
36 * 37 *
37 * DP 1.4 spec defines the secondary data packet for sending the 38 * DP 1.4 spec defines the secondary data packet for sending the
38 * picture parameter infoframes from the source to the sink. 39 * picture parameter infoframes from the source to the sink.
39 * This function populates the pps header defined in 40 * This function populates the SDP header defined in
40 * &struct drm_dsc_pps_infoframe as per the header bytes defined 41 * &struct dp_sdp_header.
41 * in &struct dp_sdp_header.
42 */ 42 */
43void drm_dsc_dp_pps_header_init(struct drm_dsc_pps_infoframe *pps_sdp) 43void drm_dsc_dp_pps_header_init(struct dp_sdp_header *pps_header)
44{ 44{
45 memset(&pps_sdp->pps_header, 0, sizeof(pps_sdp->pps_header)); 45 memset(pps_header, 0, sizeof(*pps_header));
46 46
47 pps_sdp->pps_header.HB1 = DP_SDP_PPS; 47 pps_header->HB1 = DP_SDP_PPS;
48 pps_sdp->pps_header.HB2 = DP_SDP_PPS_HEADER_PAYLOAD_BYTES_MINUS_1; 48 pps_header->HB2 = DP_SDP_PPS_HEADER_PAYLOAD_BYTES_MINUS_1;
49} 49}
50EXPORT_SYMBOL(drm_dsc_dp_pps_header_init); 50EXPORT_SYMBOL(drm_dsc_dp_pps_header_init);
51 51
52/** 52/**
53 * drm_dsc_pps_infoframe_pack() - Populates the DSC PPS infoframe 53 * drm_dsc_pps_payload_pack() - Populates the DSC PPS
54 * 54 *
55 * @pps_sdp: 55 * @pps_payload:
56 * Secondary data packet for DSC Picture Parameter Set. This is defined 56 * Bitwise struct for DSC Picture Parameter Set. This is defined
57 * by &struct drm_dsc_pps_infoframe 57 * by &struct drm_dsc_picture_parameter_set
58 * @dsc_cfg: 58 * @dsc_cfg:
59 * DSC Configuration data filled by driver as defined by 59 * DSC Configuration data filled by driver as defined by
60 * &struct drm_dsc_config 60 * &struct drm_dsc_config
61 * 61 *
62 * DSC source device sends a secondary data packet filled with all the 62 * DSC source device sends a picture parameter set (PPS) containing the
63 * picture parameter set (PPS) information required by the sink to decode 63 * information required by the sink to decode the compressed frame. Driver
64 * the compressed frame. Driver populates the dsC PPS infoframe using the DSC 64 * populates the DSC PPS struct using the DSC configuration parameters in
65 * configuration parameters in the order expected by the DSC Display Sink 65 * the order expected by the DSC Display Sink device. For the DSC, the sink
66 * device. For the DSC, the sink device expects the PPS payload in the big 66 * device expects the PPS payload in big endian format for fields
67 * endian format for the fields that span more than 1 byte. 67 * that span more than 1 byte.
68 */ 68 */
69void drm_dsc_pps_infoframe_pack(struct drm_dsc_pps_infoframe *pps_sdp, 69void drm_dsc_pps_payload_pack(struct drm_dsc_picture_parameter_set *pps_payload,
70 const struct drm_dsc_config *dsc_cfg) 70 const struct drm_dsc_config *dsc_cfg)
71{ 71{
72 int i; 72 int i;
73 73
74 /* Protect against someone accidently changing struct size */ 74 /* Protect against someone accidently changing struct size */
75 BUILD_BUG_ON(sizeof(pps_sdp->pps_payload) != 75 BUILD_BUG_ON(sizeof(*pps_payload) !=
76 DP_SDP_PPS_HEADER_PAYLOAD_BYTES_MINUS_1 + 1); 76 DP_SDP_PPS_HEADER_PAYLOAD_BYTES_MINUS_1 + 1);
77 77
78 memset(&pps_sdp->pps_payload, 0, sizeof(pps_sdp->pps_payload)); 78 memset(pps_payload, 0, sizeof(*pps_payload));
79 79
80 /* PPS 0 */ 80 /* PPS 0 */
81 pps_sdp->pps_payload.dsc_version = 81 pps_payload->dsc_version =
82 dsc_cfg->dsc_version_minor | 82 dsc_cfg->dsc_version_minor |
83 dsc_cfg->dsc_version_major << DSC_PPS_VERSION_MAJOR_SHIFT; 83 dsc_cfg->dsc_version_major << DSC_PPS_VERSION_MAJOR_SHIFT;
84 84
85 /* PPS 1, 2 is 0 */ 85 /* PPS 1, 2 is 0 */
86 86
87 /* PPS 3 */ 87 /* PPS 3 */
88 pps_sdp->pps_payload.pps_3 = 88 pps_payload->pps_3 =
89 dsc_cfg->line_buf_depth | 89 dsc_cfg->line_buf_depth |
90 dsc_cfg->bits_per_component << DSC_PPS_BPC_SHIFT; 90 dsc_cfg->bits_per_component << DSC_PPS_BPC_SHIFT;
91 91
92 /* PPS 4 */ 92 /* PPS 4 */
93 pps_sdp->pps_payload.pps_4 = 93 pps_payload->pps_4 =
94 ((dsc_cfg->bits_per_pixel & DSC_PPS_BPP_HIGH_MASK) >> 94 ((dsc_cfg->bits_per_pixel & DSC_PPS_BPP_HIGH_MASK) >>
95 DSC_PPS_MSB_SHIFT) | 95 DSC_PPS_MSB_SHIFT) |
96 dsc_cfg->vbr_enable << DSC_PPS_VBR_EN_SHIFT | 96 dsc_cfg->vbr_enable << DSC_PPS_VBR_EN_SHIFT |
97 dsc_cfg->enable422 << DSC_PPS_SIMPLE422_SHIFT | 97 dsc_cfg->simple_422 << DSC_PPS_SIMPLE422_SHIFT |
98 dsc_cfg->convert_rgb << DSC_PPS_CONVERT_RGB_SHIFT | 98 dsc_cfg->convert_rgb << DSC_PPS_CONVERT_RGB_SHIFT |
99 dsc_cfg->block_pred_enable << DSC_PPS_BLOCK_PRED_EN_SHIFT; 99 dsc_cfg->block_pred_enable << DSC_PPS_BLOCK_PRED_EN_SHIFT;
100 100
101 /* PPS 5 */ 101 /* PPS 5 */
102 pps_sdp->pps_payload.bits_per_pixel_low = 102 pps_payload->bits_per_pixel_low =
103 (dsc_cfg->bits_per_pixel & DSC_PPS_LSB_MASK); 103 (dsc_cfg->bits_per_pixel & DSC_PPS_LSB_MASK);
104 104
105 /* 105 /*
@@ -110,103 +110,103 @@ void drm_dsc_pps_infoframe_pack(struct drm_dsc_pps_infoframe *pps_sdp,
110 */ 110 */
111 111
112 /* PPS 6, 7 */ 112 /* PPS 6, 7 */
113 pps_sdp->pps_payload.pic_height = cpu_to_be16(dsc_cfg->pic_height); 113 pps_payload->pic_height = cpu_to_be16(dsc_cfg->pic_height);
114 114
115 /* PPS 8, 9 */ 115 /* PPS 8, 9 */
116 pps_sdp->pps_payload.pic_width = cpu_to_be16(dsc_cfg->pic_width); 116 pps_payload->pic_width = cpu_to_be16(dsc_cfg->pic_width);
117 117
118 /* PPS 10, 11 */ 118 /* PPS 10, 11 */
119 pps_sdp->pps_payload.slice_height = cpu_to_be16(dsc_cfg->slice_height); 119 pps_payload->slice_height = cpu_to_be16(dsc_cfg->slice_height);
120 120
121 /* PPS 12, 13 */ 121 /* PPS 12, 13 */
122 pps_sdp->pps_payload.slice_width = cpu_to_be16(dsc_cfg->slice_width); 122 pps_payload->slice_width = cpu_to_be16(dsc_cfg->slice_width);
123 123
124 /* PPS 14, 15 */ 124 /* PPS 14, 15 */
125 pps_sdp->pps_payload.chunk_size = cpu_to_be16(dsc_cfg->slice_chunk_size); 125 pps_payload->chunk_size = cpu_to_be16(dsc_cfg->slice_chunk_size);
126 126
127 /* PPS 16 */ 127 /* PPS 16 */
128 pps_sdp->pps_payload.initial_xmit_delay_high = 128 pps_payload->initial_xmit_delay_high =
129 ((dsc_cfg->initial_xmit_delay & 129 ((dsc_cfg->initial_xmit_delay &
130 DSC_PPS_INIT_XMIT_DELAY_HIGH_MASK) >> 130 DSC_PPS_INIT_XMIT_DELAY_HIGH_MASK) >>
131 DSC_PPS_MSB_SHIFT); 131 DSC_PPS_MSB_SHIFT);
132 132
133 /* PPS 17 */ 133 /* PPS 17 */
134 pps_sdp->pps_payload.initial_xmit_delay_low = 134 pps_payload->initial_xmit_delay_low =
135 (dsc_cfg->initial_xmit_delay & DSC_PPS_LSB_MASK); 135 (dsc_cfg->initial_xmit_delay & DSC_PPS_LSB_MASK);
136 136
137 /* PPS 18, 19 */ 137 /* PPS 18, 19 */
138 pps_sdp->pps_payload.initial_dec_delay = 138 pps_payload->initial_dec_delay =
139 cpu_to_be16(dsc_cfg->initial_dec_delay); 139 cpu_to_be16(dsc_cfg->initial_dec_delay);
140 140
141 /* PPS 20 is 0 */ 141 /* PPS 20 is 0 */
142 142
143 /* PPS 21 */ 143 /* PPS 21 */
144 pps_sdp->pps_payload.initial_scale_value = 144 pps_payload->initial_scale_value =
145 dsc_cfg->initial_scale_value; 145 dsc_cfg->initial_scale_value;
146 146
147 /* PPS 22, 23 */ 147 /* PPS 22, 23 */
148 pps_sdp->pps_payload.scale_increment_interval = 148 pps_payload->scale_increment_interval =
149 cpu_to_be16(dsc_cfg->scale_increment_interval); 149 cpu_to_be16(dsc_cfg->scale_increment_interval);
150 150
151 /* PPS 24 */ 151 /* PPS 24 */
152 pps_sdp->pps_payload.scale_decrement_interval_high = 152 pps_payload->scale_decrement_interval_high =
153 ((dsc_cfg->scale_decrement_interval & 153 ((dsc_cfg->scale_decrement_interval &
154 DSC_PPS_SCALE_DEC_INT_HIGH_MASK) >> 154 DSC_PPS_SCALE_DEC_INT_HIGH_MASK) >>
155 DSC_PPS_MSB_SHIFT); 155 DSC_PPS_MSB_SHIFT);
156 156
157 /* PPS 25 */ 157 /* PPS 25 */
158 pps_sdp->pps_payload.scale_decrement_interval_low = 158 pps_payload->scale_decrement_interval_low =
159 (dsc_cfg->scale_decrement_interval & DSC_PPS_LSB_MASK); 159 (dsc_cfg->scale_decrement_interval & DSC_PPS_LSB_MASK);
160 160
161 /* PPS 26[7:0], PPS 27[7:5] RESERVED */ 161 /* PPS 26[7:0], PPS 27[7:5] RESERVED */
162 162
163 /* PPS 27 */ 163 /* PPS 27 */
164 pps_sdp->pps_payload.first_line_bpg_offset = 164 pps_payload->first_line_bpg_offset =
165 dsc_cfg->first_line_bpg_offset; 165 dsc_cfg->first_line_bpg_offset;
166 166
167 /* PPS 28, 29 */ 167 /* PPS 28, 29 */
168 pps_sdp->pps_payload.nfl_bpg_offset = 168 pps_payload->nfl_bpg_offset =
169 cpu_to_be16(dsc_cfg->nfl_bpg_offset); 169 cpu_to_be16(dsc_cfg->nfl_bpg_offset);
170 170
171 /* PPS 30, 31 */ 171 /* PPS 30, 31 */
172 pps_sdp->pps_payload.slice_bpg_offset = 172 pps_payload->slice_bpg_offset =
173 cpu_to_be16(dsc_cfg->slice_bpg_offset); 173 cpu_to_be16(dsc_cfg->slice_bpg_offset);
174 174
175 /* PPS 32, 33 */ 175 /* PPS 32, 33 */
176 pps_sdp->pps_payload.initial_offset = 176 pps_payload->initial_offset =
177 cpu_to_be16(dsc_cfg->initial_offset); 177 cpu_to_be16(dsc_cfg->initial_offset);
178 178
179 /* PPS 34, 35 */ 179 /* PPS 34, 35 */
180 pps_sdp->pps_payload.final_offset = cpu_to_be16(dsc_cfg->final_offset); 180 pps_payload->final_offset = cpu_to_be16(dsc_cfg->final_offset);
181 181
182 /* PPS 36 */ 182 /* PPS 36 */
183 pps_sdp->pps_payload.flatness_min_qp = dsc_cfg->flatness_min_qp; 183 pps_payload->flatness_min_qp = dsc_cfg->flatness_min_qp;
184 184
185 /* PPS 37 */ 185 /* PPS 37 */
186 pps_sdp->pps_payload.flatness_max_qp = dsc_cfg->flatness_max_qp; 186 pps_payload->flatness_max_qp = dsc_cfg->flatness_max_qp;
187 187
188 /* PPS 38, 39 */ 188 /* PPS 38, 39 */
189 pps_sdp->pps_payload.rc_model_size = 189 pps_payload->rc_model_size =
190 cpu_to_be16(DSC_RC_MODEL_SIZE_CONST); 190 cpu_to_be16(DSC_RC_MODEL_SIZE_CONST);
191 191
192 /* PPS 40 */ 192 /* PPS 40 */
193 pps_sdp->pps_payload.rc_edge_factor = DSC_RC_EDGE_FACTOR_CONST; 193 pps_payload->rc_edge_factor = DSC_RC_EDGE_FACTOR_CONST;
194 194
195 /* PPS 41 */ 195 /* PPS 41 */
196 pps_sdp->pps_payload.rc_quant_incr_limit0 = 196 pps_payload->rc_quant_incr_limit0 =
197 dsc_cfg->rc_quant_incr_limit0; 197 dsc_cfg->rc_quant_incr_limit0;
198 198
199 /* PPS 42 */ 199 /* PPS 42 */
200 pps_sdp->pps_payload.rc_quant_incr_limit1 = 200 pps_payload->rc_quant_incr_limit1 =
201 dsc_cfg->rc_quant_incr_limit1; 201 dsc_cfg->rc_quant_incr_limit1;
202 202
203 /* PPS 43 */ 203 /* PPS 43 */
204 pps_sdp->pps_payload.rc_tgt_offset = DSC_RC_TGT_OFFSET_LO_CONST | 204 pps_payload->rc_tgt_offset = DSC_RC_TGT_OFFSET_LO_CONST |
205 DSC_RC_TGT_OFFSET_HI_CONST << DSC_PPS_RC_TGT_OFFSET_HI_SHIFT; 205 DSC_RC_TGT_OFFSET_HI_CONST << DSC_PPS_RC_TGT_OFFSET_HI_SHIFT;
206 206
207 /* PPS 44 - 57 */ 207 /* PPS 44 - 57 */
208 for (i = 0; i < DSC_NUM_BUF_RANGES - 1; i++) 208 for (i = 0; i < DSC_NUM_BUF_RANGES - 1; i++)
209 pps_sdp->pps_payload.rc_buf_thresh[i] = 209 pps_payload->rc_buf_thresh[i] =
210 dsc_cfg->rc_buf_thresh[i]; 210 dsc_cfg->rc_buf_thresh[i];
211 211
212 /* PPS 58 - 87 */ 212 /* PPS 58 - 87 */
@@ -215,32 +215,181 @@ void drm_dsc_pps_infoframe_pack(struct drm_dsc_pps_infoframe *pps_sdp,
215 * are as follows: Min_qp[15:11], max_qp[10:6], offset[5:0] 215 * are as follows: Min_qp[15:11], max_qp[10:6], offset[5:0]
216 */ 216 */
217 for (i = 0; i < DSC_NUM_BUF_RANGES; i++) { 217 for (i = 0; i < DSC_NUM_BUF_RANGES; i++) {
218 pps_sdp->pps_payload.rc_range_parameters[i] = 218 pps_payload->rc_range_parameters[i] =
219 ((dsc_cfg->rc_range_params[i].range_min_qp << 219 ((dsc_cfg->rc_range_params[i].range_min_qp <<
220 DSC_PPS_RC_RANGE_MINQP_SHIFT) | 220 DSC_PPS_RC_RANGE_MINQP_SHIFT) |
221 (dsc_cfg->rc_range_params[i].range_max_qp << 221 (dsc_cfg->rc_range_params[i].range_max_qp <<
222 DSC_PPS_RC_RANGE_MAXQP_SHIFT) | 222 DSC_PPS_RC_RANGE_MAXQP_SHIFT) |
223 (dsc_cfg->rc_range_params[i].range_bpg_offset)); 223 (dsc_cfg->rc_range_params[i].range_bpg_offset));
224 pps_sdp->pps_payload.rc_range_parameters[i] = 224 pps_payload->rc_range_parameters[i] =
225 cpu_to_be16(pps_sdp->pps_payload.rc_range_parameters[i]); 225 cpu_to_be16(pps_payload->rc_range_parameters[i]);
226 } 226 }
227 227
228 /* PPS 88 */ 228 /* PPS 88 */
229 pps_sdp->pps_payload.native_422_420 = dsc_cfg->native_422 | 229 pps_payload->native_422_420 = dsc_cfg->native_422 |
230 dsc_cfg->native_420 << DSC_PPS_NATIVE_420_SHIFT; 230 dsc_cfg->native_420 << DSC_PPS_NATIVE_420_SHIFT;
231 231
232 /* PPS 89 */ 232 /* PPS 89 */
233 pps_sdp->pps_payload.second_line_bpg_offset = 233 pps_payload->second_line_bpg_offset =
234 dsc_cfg->second_line_bpg_offset; 234 dsc_cfg->second_line_bpg_offset;
235 235
236 /* PPS 90, 91 */ 236 /* PPS 90, 91 */
237 pps_sdp->pps_payload.nsl_bpg_offset = 237 pps_payload->nsl_bpg_offset =
238 cpu_to_be16(dsc_cfg->nsl_bpg_offset); 238 cpu_to_be16(dsc_cfg->nsl_bpg_offset);
239 239
240 /* PPS 92, 93 */ 240 /* PPS 92, 93 */
241 pps_sdp->pps_payload.second_line_offset_adj = 241 pps_payload->second_line_offset_adj =
242 cpu_to_be16(dsc_cfg->second_line_offset_adj); 242 cpu_to_be16(dsc_cfg->second_line_offset_adj);
243 243
244 /* PPS 94 - 127 are O */ 244 /* PPS 94 - 127 are O */
245} 245}
246EXPORT_SYMBOL(drm_dsc_pps_infoframe_pack); 246EXPORT_SYMBOL(drm_dsc_pps_payload_pack);
247
248/**
249 * drm_dsc_compute_rc_parameters() - Write rate control
250 * parameters to the dsc configuration defined in
251 * &struct drm_dsc_config in accordance with the DSC 1.2
252 * specification. Some configuration fields must be present
253 * beforehand.
254 *
255 * @vdsc_cfg:
256 * DSC Configuration data partially filled by driver
257 */
258int drm_dsc_compute_rc_parameters(struct drm_dsc_config *vdsc_cfg)
259{
260 unsigned long groups_per_line = 0;
261 unsigned long groups_total = 0;
262 unsigned long num_extra_mux_bits = 0;
263 unsigned long slice_bits = 0;
264 unsigned long hrd_delay = 0;
265 unsigned long final_scale = 0;
266 unsigned long rbs_min = 0;
267
268 if (vdsc_cfg->native_420 || vdsc_cfg->native_422) {
269 /* Number of groups used to code each line of a slice */
270 groups_per_line = DIV_ROUND_UP(vdsc_cfg->slice_width / 2,
271 DSC_RC_PIXELS_PER_GROUP);
272
273 /* chunksize in Bytes */
274 vdsc_cfg->slice_chunk_size = DIV_ROUND_UP(vdsc_cfg->slice_width / 2 *
275 vdsc_cfg->bits_per_pixel,
276 (8 * 16));
277 } else {
278 /* Number of groups used to code each line of a slice */
279 groups_per_line = DIV_ROUND_UP(vdsc_cfg->slice_width,
280 DSC_RC_PIXELS_PER_GROUP);
281
282 /* chunksize in Bytes */
283 vdsc_cfg->slice_chunk_size = DIV_ROUND_UP(vdsc_cfg->slice_width *
284 vdsc_cfg->bits_per_pixel,
285 (8 * 16));
286 }
287
288 if (vdsc_cfg->convert_rgb)
289 num_extra_mux_bits = 3 * (vdsc_cfg->mux_word_size +
290 (4 * vdsc_cfg->bits_per_component + 4)
291 - 2);
292 else if (vdsc_cfg->native_422)
293 num_extra_mux_bits = 4 * vdsc_cfg->mux_word_size +
294 (4 * vdsc_cfg->bits_per_component + 4) +
295 3 * (4 * vdsc_cfg->bits_per_component) - 2;
296 else
297 num_extra_mux_bits = 3 * vdsc_cfg->mux_word_size +
298 (4 * vdsc_cfg->bits_per_component + 4) +
299 2 * (4 * vdsc_cfg->bits_per_component) - 2;
300 /* Number of bits in one Slice */
301 slice_bits = 8 * vdsc_cfg->slice_chunk_size * vdsc_cfg->slice_height;
302
303 while ((num_extra_mux_bits > 0) &&
304 ((slice_bits - num_extra_mux_bits) % vdsc_cfg->mux_word_size))
305 num_extra_mux_bits--;
306
307 if (groups_per_line < vdsc_cfg->initial_scale_value - 8)
308 vdsc_cfg->initial_scale_value = groups_per_line + 8;
309
310 /* scale_decrement_interval calculation according to DSC spec 1.11 */
311 if (vdsc_cfg->initial_scale_value > 8)
312 vdsc_cfg->scale_decrement_interval = groups_per_line /
313 (vdsc_cfg->initial_scale_value - 8);
314 else
315 vdsc_cfg->scale_decrement_interval = DSC_SCALE_DECREMENT_INTERVAL_MAX;
316
317 vdsc_cfg->final_offset = vdsc_cfg->rc_model_size -
318 (vdsc_cfg->initial_xmit_delay *
319 vdsc_cfg->bits_per_pixel + 8) / 16 + num_extra_mux_bits;
320
321 if (vdsc_cfg->final_offset >= vdsc_cfg->rc_model_size) {
322 DRM_DEBUG_KMS("FinalOfs < RcModelSze for this InitialXmitDelay\n");
323 return -ERANGE;
324 }
325
326 final_scale = (vdsc_cfg->rc_model_size * 8) /
327 (vdsc_cfg->rc_model_size - vdsc_cfg->final_offset);
328 if (vdsc_cfg->slice_height > 1)
329 /*
330 * NflBpgOffset is 16 bit value with 11 fractional bits
331 * hence we multiply by 2^11 for preserving the
332 * fractional part
333 */
334 vdsc_cfg->nfl_bpg_offset = DIV_ROUND_UP((vdsc_cfg->first_line_bpg_offset << 11),
335 (vdsc_cfg->slice_height - 1));
336 else
337 vdsc_cfg->nfl_bpg_offset = 0;
338
339 /* 2^16 - 1 */
340 if (vdsc_cfg->nfl_bpg_offset > 65535) {
341 DRM_DEBUG_KMS("NflBpgOffset is too large for this slice height\n");
342 return -ERANGE;
343 }
344
345 /* Number of groups used to code the entire slice */
346 groups_total = groups_per_line * vdsc_cfg->slice_height;
347
348 /* slice_bpg_offset is 16 bit value with 11 fractional bits */
349 vdsc_cfg->slice_bpg_offset = DIV_ROUND_UP(((vdsc_cfg->rc_model_size -
350 vdsc_cfg->initial_offset +
351 num_extra_mux_bits) << 11),
352 groups_total);
353
354 if (final_scale > 9) {
355 /*
356 * ScaleIncrementInterval =
357 * finaloffset/((NflBpgOffset + SliceBpgOffset)*8(finalscale - 1.125))
358 * as (NflBpgOffset + SliceBpgOffset) has 11 bit fractional value,
359 * we need divide by 2^11 from pstDscCfg values
360 */
361 vdsc_cfg->scale_increment_interval =
362 (vdsc_cfg->final_offset * (1 << 11)) /
363 ((vdsc_cfg->nfl_bpg_offset +
364 vdsc_cfg->slice_bpg_offset) *
365 (final_scale - 9));
366 } else {
367 /*
368 * If finalScaleValue is less than or equal to 9, a value of 0 should
369 * be used to disable the scale increment at the end of the slice
370 */
371 vdsc_cfg->scale_increment_interval = 0;
372 }
373
374 if (vdsc_cfg->scale_increment_interval > 65535) {
375 DRM_DEBUG_KMS("ScaleIncrementInterval is large for slice height\n");
376 return -ERANGE;
377 }
378
379 /*
380 * DSC spec mentions that bits_per_pixel specifies the target
381 * bits/pixel (bpp) rate that is used by the encoder,
382 * in steps of 1/16 of a bit per pixel
383 */
384 rbs_min = vdsc_cfg->rc_model_size - vdsc_cfg->initial_offset +
385 DIV_ROUND_UP(vdsc_cfg->initial_xmit_delay *
386 vdsc_cfg->bits_per_pixel, 16) +
387 groups_per_line * vdsc_cfg->first_line_bpg_offset;
388
389 hrd_delay = DIV_ROUND_UP((rbs_min * 16), vdsc_cfg->bits_per_pixel);
390 vdsc_cfg->rc_bits = (hrd_delay * vdsc_cfg->bits_per_pixel) / 16;
391 vdsc_cfg->initial_dec_delay = hrd_delay - vdsc_cfg->initial_xmit_delay;
392
393 return 0;
394}
395EXPORT_SYMBOL(drm_dsc_compute_rc_parameters);
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 990b1909f9d7..fa39592ebc0a 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -193,6 +193,12 @@ static const struct edid_quirk {
193 193
194 /* Sony PlayStation VR Headset */ 194 /* Sony PlayStation VR Headset */
195 { "SNY", 0x0704, EDID_QUIRK_NON_DESKTOP }, 195 { "SNY", 0x0704, EDID_QUIRK_NON_DESKTOP },
196
197 /* Sensics VR Headsets */
198 { "SEN", 0x1019, EDID_QUIRK_NON_DESKTOP },
199
200 /* OSVR HDK and HDK2 VR Headsets */
201 { "SVR", 0x1019, EDID_QUIRK_NON_DESKTOP },
196}; 202};
197 203
198/* 204/*
@@ -4924,6 +4930,76 @@ drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame,
4924} 4930}
4925EXPORT_SYMBOL(drm_hdmi_avi_infoframe_from_display_mode); 4931EXPORT_SYMBOL(drm_hdmi_avi_infoframe_from_display_mode);
4926 4932
4933/* HDMI Colorspace Spec Definitions */
4934#define FULL_COLORIMETRY_MASK 0x1FF
4935#define NORMAL_COLORIMETRY_MASK 0x3
4936#define EXTENDED_COLORIMETRY_MASK 0x7
4937#define EXTENDED_ACE_COLORIMETRY_MASK 0xF
4938
4939#define C(x) ((x) << 0)
4940#define EC(x) ((x) << 2)
4941#define ACE(x) ((x) << 5)
4942
4943#define HDMI_COLORIMETRY_NO_DATA 0x0
4944#define HDMI_COLORIMETRY_SMPTE_170M_YCC (C(1) | EC(0) | ACE(0))
4945#define HDMI_COLORIMETRY_BT709_YCC (C(2) | EC(0) | ACE(0))
4946#define HDMI_COLORIMETRY_XVYCC_601 (C(3) | EC(0) | ACE(0))
4947#define HDMI_COLORIMETRY_XVYCC_709 (C(3) | EC(1) | ACE(0))
4948#define HDMI_COLORIMETRY_SYCC_601 (C(3) | EC(2) | ACE(0))
4949#define HDMI_COLORIMETRY_OPYCC_601 (C(3) | EC(3) | ACE(0))
4950#define HDMI_COLORIMETRY_OPRGB (C(3) | EC(4) | ACE(0))
4951#define HDMI_COLORIMETRY_BT2020_CYCC (C(3) | EC(5) | ACE(0))
4952#define HDMI_COLORIMETRY_BT2020_RGB (C(3) | EC(6) | ACE(0))
4953#define HDMI_COLORIMETRY_BT2020_YCC (C(3) | EC(6) | ACE(0))
4954#define HDMI_COLORIMETRY_DCI_P3_RGB_D65 (C(3) | EC(7) | ACE(0))
4955#define HDMI_COLORIMETRY_DCI_P3_RGB_THEATER (C(3) | EC(7) | ACE(1))
4956
4957static const u32 hdmi_colorimetry_val[] = {
4958 [DRM_MODE_COLORIMETRY_NO_DATA] = HDMI_COLORIMETRY_NO_DATA,
4959 [DRM_MODE_COLORIMETRY_SMPTE_170M_YCC] = HDMI_COLORIMETRY_SMPTE_170M_YCC,
4960 [DRM_MODE_COLORIMETRY_BT709_YCC] = HDMI_COLORIMETRY_BT709_YCC,
4961 [DRM_MODE_COLORIMETRY_XVYCC_601] = HDMI_COLORIMETRY_XVYCC_601,
4962 [DRM_MODE_COLORIMETRY_XVYCC_709] = HDMI_COLORIMETRY_XVYCC_709,
4963 [DRM_MODE_COLORIMETRY_SYCC_601] = HDMI_COLORIMETRY_SYCC_601,
4964 [DRM_MODE_COLORIMETRY_OPYCC_601] = HDMI_COLORIMETRY_OPYCC_601,
4965 [DRM_MODE_COLORIMETRY_OPRGB] = HDMI_COLORIMETRY_OPRGB,
4966 [DRM_MODE_COLORIMETRY_BT2020_CYCC] = HDMI_COLORIMETRY_BT2020_CYCC,
4967 [DRM_MODE_COLORIMETRY_BT2020_RGB] = HDMI_COLORIMETRY_BT2020_RGB,
4968 [DRM_MODE_COLORIMETRY_BT2020_YCC] = HDMI_COLORIMETRY_BT2020_YCC,
4969};
4970
4971#undef C
4972#undef EC
4973#undef ACE
4974
4975/**
4976 * drm_hdmi_avi_infoframe_colorspace() - fill the HDMI AVI infoframe
4977 * colorspace information
4978 * @frame: HDMI AVI infoframe
4979 * @conn_state: connector state
4980 */
4981void
4982drm_hdmi_avi_infoframe_colorspace(struct hdmi_avi_infoframe *frame,
4983 const struct drm_connector_state *conn_state)
4984{
4985 u32 colorimetry_val;
4986 u32 colorimetry_index = conn_state->colorspace & FULL_COLORIMETRY_MASK;
4987
4988 if (colorimetry_index >= ARRAY_SIZE(hdmi_colorimetry_val))
4989 colorimetry_val = HDMI_COLORIMETRY_NO_DATA;
4990 else
4991 colorimetry_val = hdmi_colorimetry_val[colorimetry_index];
4992
4993 frame->colorimetry = colorimetry_val & NORMAL_COLORIMETRY_MASK;
4994 /*
4995 * ToDo: Extend it for ACE formats as well. Modify the infoframe
4996 * structure and extend it in drivers/video/hdmi
4997 */
4998 frame->extended_colorimetry = (colorimetry_val >> 2) &
4999 EXTENDED_COLORIMETRY_MASK;
5000}
5001EXPORT_SYMBOL(drm_hdmi_avi_infoframe_colorspace);
5002
4927/** 5003/**
4928 * drm_hdmi_avi_infoframe_quant_range() - fill the HDMI AVI infoframe 5004 * drm_hdmi_avi_infoframe_quant_range() - fill the HDMI AVI infoframe
4929 * quantization range information 5005 * quantization range information
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 0e9349ff2d16..04d23cb430bf 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -3024,7 +3024,8 @@ static int drm_fbdev_fb_open(struct fb_info *info, int user)
3024{ 3024{
3025 struct drm_fb_helper *fb_helper = info->par; 3025 struct drm_fb_helper *fb_helper = info->par;
3026 3026
3027 if (!try_module_get(fb_helper->dev->driver->fops->owner)) 3027 /* No need to take a ref for fbcon because it unbinds on unregister */
3028 if (user && !try_module_get(fb_helper->dev->driver->fops->owner))
3028 return -ENODEV; 3029 return -ENODEV;
3029 3030
3030 return 0; 3031 return 0;
@@ -3034,7 +3035,8 @@ static int drm_fbdev_fb_release(struct fb_info *info, int user)
3034{ 3035{
3035 struct drm_fb_helper *fb_helper = info->par; 3036 struct drm_fb_helper *fb_helper = info->par;
3036 3037
3037 module_put(fb_helper->dev->driver->fops->owner); 3038 if (user)
3039 module_put(fb_helper->dev->driver->fops->owner);
3038 3040
3039 return 0; 3041 return 0;
3040} 3042}
diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c
index 83a5bbca6e7e..9701469a6e93 100644
--- a/drivers/gpu/drm/drm_file.c
+++ b/drivers/gpu/drm/drm_file.c
@@ -489,11 +489,9 @@ int drm_release(struct inode *inode, struct file *filp)
489 489
490 drm_close_helper(filp); 490 drm_close_helper(filp);
491 491
492 if (!--dev->open_count) { 492 if (!--dev->open_count)
493 drm_lastclose(dev); 493 drm_lastclose(dev);
494 if (drm_dev_is_unplugged(dev)) 494
495 drm_put_dev(dev);
496 }
497 mutex_unlock(&drm_global_mutex); 495 mutex_unlock(&drm_global_mutex);
498 496
499 drm_minor_release(minor); 497 drm_minor_release(minor);
@@ -579,6 +577,7 @@ put_back_event:
579 file_priv->event_space -= length; 577 file_priv->event_space -= length;
580 list_add(&e->link, &file_priv->event_list); 578 list_add(&e->link, &file_priv->event_list);
581 spin_unlock_irq(&dev->event_lock); 579 spin_unlock_irq(&dev->event_lock);
580 wake_up_interruptible(&file_priv->event_wait);
582 break; 581 break;
583 } 582 }
584 583
diff --git a/drivers/gpu/drm/drm_fourcc.c b/drivers/gpu/drm/drm_fourcc.c
index ba7e19d4336c..6ea55fb4526d 100644
--- a/drivers/gpu/drm/drm_fourcc.c
+++ b/drivers/gpu/drm/drm_fourcc.c
@@ -198,6 +198,10 @@ const struct drm_format_info *__drm_format_info(u32 format)
198 { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true }, 198 { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
199 { .format = DRM_FORMAT_RGBA8888, .depth = 32, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true }, 199 { .format = DRM_FORMAT_RGBA8888, .depth = 32, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
200 { .format = DRM_FORMAT_BGRA8888, .depth = 32, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true }, 200 { .format = DRM_FORMAT_BGRA8888, .depth = 32, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
201 { .format = DRM_FORMAT_XRGB16161616F, .depth = 0, .num_planes = 1, .cpp = { 8, 0, 0 }, .hsub = 1, .vsub = 1 },
202 { .format = DRM_FORMAT_XBGR16161616F, .depth = 0, .num_planes = 1, .cpp = { 8, 0, 0 }, .hsub = 1, .vsub = 1 },
203 { .format = DRM_FORMAT_ARGB16161616F, .depth = 0, .num_planes = 1, .cpp = { 8, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
204 { .format = DRM_FORMAT_ABGR16161616F, .depth = 0, .num_planes = 1, .cpp = { 8, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
201 { .format = DRM_FORMAT_RGB888_A8, .depth = 32, .num_planes = 2, .cpp = { 3, 1, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true }, 205 { .format = DRM_FORMAT_RGB888_A8, .depth = 32, .num_planes = 2, .cpp = { 3, 1, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
202 { .format = DRM_FORMAT_BGR888_A8, .depth = 32, .num_planes = 2, .cpp = { 3, 1, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true }, 206 { .format = DRM_FORMAT_BGR888_A8, .depth = 32, .num_planes = 2, .cpp = { 3, 1, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
203 { .format = DRM_FORMAT_XRGB8888_A8, .depth = 32, .num_planes = 2, .cpp = { 4, 1, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true }, 207 { .format = DRM_FORMAT_XRGB8888_A8, .depth = 32, .num_planes = 2, .cpp = { 4, 1, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
@@ -225,7 +229,17 @@ const struct drm_format_info *__drm_format_info(u32 format)
225 { .format = DRM_FORMAT_UYVY, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true }, 229 { .format = DRM_FORMAT_UYVY, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true },
226 { .format = DRM_FORMAT_VYUY, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true }, 230 { .format = DRM_FORMAT_VYUY, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true },
227 { .format = DRM_FORMAT_XYUV8888, .depth = 0, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .is_yuv = true }, 231 { .format = DRM_FORMAT_XYUV8888, .depth = 0, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .is_yuv = true },
232 { .format = DRM_FORMAT_VUY888, .depth = 0, .num_planes = 1, .cpp = { 3, 0, 0 }, .hsub = 1, .vsub = 1, .is_yuv = true },
228 { .format = DRM_FORMAT_AYUV, .depth = 0, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true, .is_yuv = true }, 233 { .format = DRM_FORMAT_AYUV, .depth = 0, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true, .is_yuv = true },
234 { .format = DRM_FORMAT_Y210, .depth = 0, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true },
235 { .format = DRM_FORMAT_Y212, .depth = 0, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true },
236 { .format = DRM_FORMAT_Y216, .depth = 0, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true },
237 { .format = DRM_FORMAT_Y410, .depth = 0, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true, .is_yuv = true },
238 { .format = DRM_FORMAT_Y412, .depth = 0, .num_planes = 1, .cpp = { 8, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true, .is_yuv = true },
239 { .format = DRM_FORMAT_Y416, .depth = 0, .num_planes = 1, .cpp = { 8, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true, .is_yuv = true },
240 { .format = DRM_FORMAT_XVYU2101010, .depth = 0, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .is_yuv = true },
241 { .format = DRM_FORMAT_XVYU12_16161616, .depth = 0, .num_planes = 1, .cpp = { 8, 0, 0 }, .hsub = 1, .vsub = 1, .is_yuv = true },
242 { .format = DRM_FORMAT_XVYU16161616, .depth = 0, .num_planes = 1, .cpp = { 8, 0, 0 }, .hsub = 1, .vsub = 1, .is_yuv = true },
229 { .format = DRM_FORMAT_Y0L0, .depth = 0, .num_planes = 1, 243 { .format = DRM_FORMAT_Y0L0, .depth = 0, .num_planes = 1,
230 .char_per_block = { 8, 0, 0 }, .block_w = { 2, 0, 0 }, .block_h = { 2, 0, 0 }, 244 .char_per_block = { 8, 0, 0 }, .block_w = { 2, 0, 0 }, .block_h = { 2, 0, 0 },
231 .hsub = 2, .vsub = 2, .has_alpha = true, .is_yuv = true }, 245 .hsub = 2, .vsub = 2, .has_alpha = true, .is_yuv = true },
@@ -247,6 +261,19 @@ const struct drm_format_info *__drm_format_info(u32 format)
247 { .format = DRM_FORMAT_P016, .depth = 0, .num_planes = 2, 261 { .format = DRM_FORMAT_P016, .depth = 0, .num_planes = 2,
248 .char_per_block = { 2, 4, 0 }, .block_w = { 1, 0, 0 }, .block_h = { 1, 0, 0 }, 262 .char_per_block = { 2, 4, 0 }, .block_w = { 1, 0, 0 }, .block_h = { 1, 0, 0 },
249 .hsub = 2, .vsub = 2, .is_yuv = true}, 263 .hsub = 2, .vsub = 2, .is_yuv = true},
264 { .format = DRM_FORMAT_P210, .depth = 0,
265 .num_planes = 2, .char_per_block = { 2, 4, 0 },
266 .block_w = { 1, 0, 0 }, .block_h = { 1, 0, 0 }, .hsub = 2,
267 .vsub = 1, .is_yuv = true },
268 { .format = DRM_FORMAT_VUY101010, .depth = 0,
269 .num_planes = 1, .cpp = { 0, 0, 0 }, .hsub = 1, .vsub = 1,
270 .is_yuv = true },
271 { .format = DRM_FORMAT_YUV420_8BIT, .depth = 0,
272 .num_planes = 1, .cpp = { 0, 0, 0 }, .hsub = 2, .vsub = 2,
273 .is_yuv = true },
274 { .format = DRM_FORMAT_YUV420_10BIT, .depth = 0,
275 .num_planes = 1, .cpp = { 0, 0, 0 }, .hsub = 2, .vsub = 2,
276 .is_yuv = true },
250 }; 277 };
251 278
252 unsigned int i; 279 unsigned int i;
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index d0b9f6a9953f..388b3742e562 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -171,6 +171,10 @@ void drm_gem_private_object_init(struct drm_device *dev,
171 kref_init(&obj->refcount); 171 kref_init(&obj->refcount);
172 obj->handle_count = 0; 172 obj->handle_count = 0;
173 obj->size = size; 173 obj->size = size;
174 reservation_object_init(&obj->_resv);
175 if (!obj->resv)
176 obj->resv = &obj->_resv;
177
174 drm_vma_node_reset(&obj->vma_node); 178 drm_vma_node_reset(&obj->vma_node);
175} 179}
176EXPORT_SYMBOL(drm_gem_private_object_init); 180EXPORT_SYMBOL(drm_gem_private_object_init);
@@ -688,6 +692,44 @@ drm_gem_object_lookup(struct drm_file *filp, u32 handle)
688EXPORT_SYMBOL(drm_gem_object_lookup); 692EXPORT_SYMBOL(drm_gem_object_lookup);
689 693
690/** 694/**
695 * drm_gem_reservation_object_wait - Wait on GEM object's reservation's objects
696 * shared and/or exclusive fences.
697 * @filep: DRM file private date
698 * @handle: userspace handle
699 * @wait_all: if true, wait on all fences, else wait on just exclusive fence
700 * @timeout: timeout value in jiffies or zero to return immediately
701 *
702 * Returns:
703 *
704 * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or
705 * greater than 0 on success.
706 */
707long drm_gem_reservation_object_wait(struct drm_file *filep, u32 handle,
708 bool wait_all, unsigned long timeout)
709{
710 long ret;
711 struct drm_gem_object *obj;
712
713 obj = drm_gem_object_lookup(filep, handle);
714 if (!obj) {
715 DRM_DEBUG("Failed to look up GEM BO %d\n", handle);
716 return -EINVAL;
717 }
718
719 ret = reservation_object_wait_timeout_rcu(obj->resv, wait_all,
720 true, timeout);
721 if (ret == 0)
722 ret = -ETIME;
723 else if (ret > 0)
724 ret = 0;
725
726 drm_gem_object_put_unlocked(obj);
727
728 return ret;
729}
730EXPORT_SYMBOL(drm_gem_reservation_object_wait);
731
732/**
691 * drm_gem_close_ioctl - implementation of the GEM_CLOSE ioctl 733 * drm_gem_close_ioctl - implementation of the GEM_CLOSE ioctl
692 * @dev: drm_device 734 * @dev: drm_device
693 * @data: ioctl data 735 * @data: ioctl data
@@ -851,6 +893,7 @@ drm_gem_object_release(struct drm_gem_object *obj)
851 if (obj->filp) 893 if (obj->filp)
852 fput(obj->filp); 894 fput(obj->filp);
853 895
896 reservation_object_fini(&obj->_resv);
854 drm_gem_free_mmap_offset(obj); 897 drm_gem_free_mmap_offset(obj);
855} 898}
856EXPORT_SYMBOL(drm_gem_object_release); 899EXPORT_SYMBOL(drm_gem_object_release);
@@ -1190,3 +1233,81 @@ void drm_gem_vunmap(struct drm_gem_object *obj, void *vaddr)
1190 obj->dev->driver->gem_prime_vunmap(obj, vaddr); 1233 obj->dev->driver->gem_prime_vunmap(obj, vaddr);
1191} 1234}
1192EXPORT_SYMBOL(drm_gem_vunmap); 1235EXPORT_SYMBOL(drm_gem_vunmap);
1236
1237/**
1238 * drm_gem_lock_reservations - Sets up the ww context and acquires
1239 * the lock on an array of GEM objects.
1240 *
1241 * Once you've locked your reservations, you'll want to set up space
1242 * for your shared fences (if applicable), submit your job, then
1243 * drm_gem_unlock_reservations().
1244 *
1245 * @objs: drm_gem_objects to lock
1246 * @count: Number of objects in @objs
1247 * @acquire_ctx: struct ww_acquire_ctx that will be initialized as
1248 * part of tracking this set of locked reservations.
1249 */
1250int
1251drm_gem_lock_reservations(struct drm_gem_object **objs, int count,
1252 struct ww_acquire_ctx *acquire_ctx)
1253{
1254 int contended = -1;
1255 int i, ret;
1256
1257 ww_acquire_init(acquire_ctx, &reservation_ww_class);
1258
1259retry:
1260 if (contended != -1) {
1261 struct drm_gem_object *obj = objs[contended];
1262
1263 ret = ww_mutex_lock_slow_interruptible(&obj->resv->lock,
1264 acquire_ctx);
1265 if (ret) {
1266 ww_acquire_done(acquire_ctx);
1267 return ret;
1268 }
1269 }
1270
1271 for (i = 0; i < count; i++) {
1272 if (i == contended)
1273 continue;
1274
1275 ret = ww_mutex_lock_interruptible(&objs[i]->resv->lock,
1276 acquire_ctx);
1277 if (ret) {
1278 int j;
1279
1280 for (j = 0; j < i; j++)
1281 ww_mutex_unlock(&objs[j]->resv->lock);
1282
1283 if (contended != -1 && contended >= i)
1284 ww_mutex_unlock(&objs[contended]->resv->lock);
1285
1286 if (ret == -EDEADLK) {
1287 contended = i;
1288 goto retry;
1289 }
1290
1291 ww_acquire_done(acquire_ctx);
1292 return ret;
1293 }
1294 }
1295
1296 ww_acquire_done(acquire_ctx);
1297
1298 return 0;
1299}
1300EXPORT_SYMBOL(drm_gem_lock_reservations);
1301
1302void
1303drm_gem_unlock_reservations(struct drm_gem_object **objs, int count,
1304 struct ww_acquire_ctx *acquire_ctx)
1305{
1306 int i;
1307
1308 for (i = 0; i < count; i++)
1309 ww_mutex_unlock(&objs[i]->resv->lock);
1310
1311 ww_acquire_fini(acquire_ctx);
1312}
1313EXPORT_SYMBOL(drm_gem_unlock_reservations);
diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
new file mode 100644
index 000000000000..3750a982aaf6
--- /dev/null
+++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
@@ -0,0 +1,625 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright 2018 Noralf Trønnes
4 */
5
6#include <linux/dma-buf.h>
7#include <linux/export.h>
8#include <linux/mutex.h>
9#include <linux/shmem_fs.h>
10#include <linux/slab.h>
11#include <linux/vmalloc.h>
12
13#include <drm/drm_device.h>
14#include <drm/drm_drv.h>
15#include <drm/drm_gem_shmem_helper.h>
16#include <drm/drm_prime.h>
17#include <drm/drm_print.h>
18
19/**
20 * DOC: overview
21 *
22 * This library provides helpers for GEM objects backed by shmem buffers
23 * allocated using anonymous pageable memory.
24 */
25
26static const struct drm_gem_object_funcs drm_gem_shmem_funcs = {
27 .free = drm_gem_shmem_free_object,
28 .print_info = drm_gem_shmem_print_info,
29 .pin = drm_gem_shmem_pin,
30 .unpin = drm_gem_shmem_unpin,
31 .get_sg_table = drm_gem_shmem_get_sg_table,
32 .vmap = drm_gem_shmem_vmap,
33 .vunmap = drm_gem_shmem_vunmap,
34 .vm_ops = &drm_gem_shmem_vm_ops,
35};
36
37/**
38 * drm_gem_shmem_create - Allocate an object with the given size
39 * @dev: DRM device
40 * @size: Size of the object to allocate
41 *
42 * This function creates a shmem GEM object.
43 *
44 * Returns:
45 * A struct drm_gem_shmem_object * on success or an ERR_PTR()-encoded negative
46 * error code on failure.
47 */
48struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size)
49{
50 struct drm_gem_shmem_object *shmem;
51 struct drm_gem_object *obj;
52 int ret;
53
54 size = PAGE_ALIGN(size);
55
56 if (dev->driver->gem_create_object)
57 obj = dev->driver->gem_create_object(dev, size);
58 else
59 obj = kzalloc(sizeof(*shmem), GFP_KERNEL);
60 if (!obj)
61 return ERR_PTR(-ENOMEM);
62
63 if (!obj->funcs)
64 obj->funcs = &drm_gem_shmem_funcs;
65
66 ret = drm_gem_object_init(dev, obj, size);
67 if (ret)
68 goto err_free;
69
70 ret = drm_gem_create_mmap_offset(obj);
71 if (ret)
72 goto err_release;
73
74 shmem = to_drm_gem_shmem_obj(obj);
75 mutex_init(&shmem->pages_lock);
76 mutex_init(&shmem->vmap_lock);
77
78 /*
79 * Our buffers are kept pinned, so allocating them
80 * from the MOVABLE zone is a really bad idea, and
81 * conflicts with CMA. See comments above new_inode()
82 * why this is required _and_ expected if you're
83 * going to pin these pages.
84 */
85 mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER |
86 __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
87
88 return shmem;
89
90err_release:
91 drm_gem_object_release(obj);
92err_free:
93 kfree(obj);
94
95 return ERR_PTR(ret);
96}
97EXPORT_SYMBOL_GPL(drm_gem_shmem_create);
98
99/**
100 * drm_gem_shmem_free_object - Free resources associated with a shmem GEM object
101 * @obj: GEM object to free
102 *
103 * This function cleans up the GEM object state and frees the memory used to
104 * store the object itself.
105 */
106void drm_gem_shmem_free_object(struct drm_gem_object *obj)
107{
108 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
109
110 WARN_ON(shmem->vmap_use_count);
111
112 if (obj->import_attach) {
113 shmem->pages_use_count--;
114 drm_prime_gem_destroy(obj, shmem->sgt);
115 kvfree(shmem->pages);
116 } else {
117 if (shmem->sgt) {
118 dma_unmap_sg(obj->dev->dev, shmem->sgt->sgl,
119 shmem->sgt->nents, DMA_BIDIRECTIONAL);
120
121 drm_gem_shmem_put_pages(shmem);
122 sg_free_table(shmem->sgt);
123 kfree(shmem->sgt);
124 }
125 }
126
127 WARN_ON(shmem->pages_use_count);
128
129 drm_gem_object_release(obj);
130 mutex_destroy(&shmem->pages_lock);
131 mutex_destroy(&shmem->vmap_lock);
132 kfree(shmem);
133}
134EXPORT_SYMBOL_GPL(drm_gem_shmem_free_object);
135
136static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem)
137{
138 struct drm_gem_object *obj = &shmem->base;
139 struct page **pages;
140
141 if (shmem->pages_use_count++ > 0)
142 return 0;
143
144 pages = drm_gem_get_pages(obj);
145 if (IS_ERR(pages)) {
146 DRM_DEBUG_KMS("Failed to get pages (%ld)\n", PTR_ERR(pages));
147 shmem->pages_use_count = 0;
148 return PTR_ERR(pages);
149 }
150
151 shmem->pages = pages;
152
153 return 0;
154}
155
156/*
157 * drm_gem_shmem_get_pages - Allocate backing pages for a shmem GEM object
158 * @shmem: shmem GEM object
159 *
160 * This function makes sure that backing pages exists for the shmem GEM object
161 * and increases the use count.
162 *
163 * Returns:
164 * 0 on success or a negative error code on failure.
165 */
166int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem)
167{
168 int ret;
169
170 ret = mutex_lock_interruptible(&shmem->pages_lock);
171 if (ret)
172 return ret;
173 ret = drm_gem_shmem_get_pages_locked(shmem);
174 mutex_unlock(&shmem->pages_lock);
175
176 return ret;
177}
178EXPORT_SYMBOL(drm_gem_shmem_get_pages);
179
180static void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem)
181{
182 struct drm_gem_object *obj = &shmem->base;
183
184 if (WARN_ON_ONCE(!shmem->pages_use_count))
185 return;
186
187 if (--shmem->pages_use_count > 0)
188 return;
189
190 drm_gem_put_pages(obj, shmem->pages,
191 shmem->pages_mark_dirty_on_put,
192 shmem->pages_mark_accessed_on_put);
193 shmem->pages = NULL;
194}
195
196/*
197 * drm_gem_shmem_put_pages - Decrease use count on the backing pages for a shmem GEM object
198 * @shmem: shmem GEM object
199 *
200 * This function decreases the use count and puts the backing pages when use drops to zero.
201 */
202void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem)
203{
204 mutex_lock(&shmem->pages_lock);
205 drm_gem_shmem_put_pages_locked(shmem);
206 mutex_unlock(&shmem->pages_lock);
207}
208EXPORT_SYMBOL(drm_gem_shmem_put_pages);
209
210/**
211 * drm_gem_shmem_pin - Pin backing pages for a shmem GEM object
212 * @obj: GEM object
213 *
214 * This function makes sure the backing pages are pinned in memory while the
215 * buffer is exported.
216 *
217 * Returns:
218 * 0 on success or a negative error code on failure.
219 */
220int drm_gem_shmem_pin(struct drm_gem_object *obj)
221{
222 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
223
224 return drm_gem_shmem_get_pages(shmem);
225}
226EXPORT_SYMBOL(drm_gem_shmem_pin);
227
228/**
229 * drm_gem_shmem_unpin - Unpin backing pages for a shmem GEM object
230 * @obj: GEM object
231 *
232 * This function removes the requirement that the backing pages are pinned in
233 * memory.
234 */
235void drm_gem_shmem_unpin(struct drm_gem_object *obj)
236{
237 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
238
239 drm_gem_shmem_put_pages(shmem);
240}
241EXPORT_SYMBOL(drm_gem_shmem_unpin);
242
243static void *drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem)
244{
245 struct drm_gem_object *obj = &shmem->base;
246 int ret;
247
248 if (shmem->vmap_use_count++ > 0)
249 return shmem->vaddr;
250
251 ret = drm_gem_shmem_get_pages(shmem);
252 if (ret)
253 goto err_zero_use;
254
255 if (obj->import_attach)
256 shmem->vaddr = dma_buf_vmap(obj->import_attach->dmabuf);
257 else
258 shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT, VM_MAP, PAGE_KERNEL);
259
260 if (!shmem->vaddr) {
261 DRM_DEBUG_KMS("Failed to vmap pages\n");
262 ret = -ENOMEM;
263 goto err_put_pages;
264 }
265
266 return shmem->vaddr;
267
268err_put_pages:
269 drm_gem_shmem_put_pages(shmem);
270err_zero_use:
271 shmem->vmap_use_count = 0;
272
273 return ERR_PTR(ret);
274}
275
276/*
277 * drm_gem_shmem_vmap - Create a virtual mapping for a shmem GEM object
278 * @shmem: shmem GEM object
279 *
280 * This function makes sure that a virtual address exists for the buffer backing
281 * the shmem GEM object.
282 *
283 * Returns:
284 * 0 on success or a negative error code on failure.
285 */
286void *drm_gem_shmem_vmap(struct drm_gem_object *obj)
287{
288 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
289 void *vaddr;
290 int ret;
291
292 ret = mutex_lock_interruptible(&shmem->vmap_lock);
293 if (ret)
294 return ERR_PTR(ret);
295 vaddr = drm_gem_shmem_vmap_locked(shmem);
296 mutex_unlock(&shmem->vmap_lock);
297
298 return vaddr;
299}
300EXPORT_SYMBOL(drm_gem_shmem_vmap);
301
302static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem)
303{
304 struct drm_gem_object *obj = &shmem->base;
305
306 if (WARN_ON_ONCE(!shmem->vmap_use_count))
307 return;
308
309 if (--shmem->vmap_use_count > 0)
310 return;
311
312 if (obj->import_attach)
313 dma_buf_vunmap(obj->import_attach->dmabuf, shmem->vaddr);
314 else
315 vunmap(shmem->vaddr);
316
317 shmem->vaddr = NULL;
318 drm_gem_shmem_put_pages(shmem);
319}
320
321/*
322 * drm_gem_shmem_vunmap - Unmap a virtual mapping fo a shmem GEM object
323 * @shmem: shmem GEM object
324 *
325 * This function removes the virtual address when use count drops to zero.
326 */
327void drm_gem_shmem_vunmap(struct drm_gem_object *obj, void *vaddr)
328{
329 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
330
331 mutex_lock(&shmem->vmap_lock);
332 drm_gem_shmem_vunmap_locked(shmem);
333 mutex_unlock(&shmem->vmap_lock);
334}
335EXPORT_SYMBOL(drm_gem_shmem_vunmap);
336
337struct drm_gem_shmem_object *
338drm_gem_shmem_create_with_handle(struct drm_file *file_priv,
339 struct drm_device *dev, size_t size,
340 uint32_t *handle)
341{
342 struct drm_gem_shmem_object *shmem;
343 int ret;
344
345 shmem = drm_gem_shmem_create(dev, size);
346 if (IS_ERR(shmem))
347 return shmem;
348
349 /*
350 * Allocate an id of idr table where the obj is registered
351 * and handle has the id what user can see.
352 */
353 ret = drm_gem_handle_create(file_priv, &shmem->base, handle);
354 /* drop reference from allocate - handle holds it now. */
355 drm_gem_object_put_unlocked(&shmem->base);
356 if (ret)
357 return ERR_PTR(ret);
358
359 return shmem;
360}
361EXPORT_SYMBOL(drm_gem_shmem_create_with_handle);
362
363/**
364 * drm_gem_shmem_dumb_create - Create a dumb shmem buffer object
365 * @file: DRM file structure to create the dumb buffer for
366 * @dev: DRM device
367 * @args: IOCTL data
368 *
369 * This function computes the pitch of the dumb buffer and rounds it up to an
370 * integer number of bytes per pixel. Drivers for hardware that doesn't have
371 * any additional restrictions on the pitch can directly use this function as
372 * their &drm_driver.dumb_create callback.
373 *
374 * For hardware with additional restrictions, drivers can adjust the fields
375 * set up by userspace before calling into this function.
376 *
377 * Returns:
378 * 0 on success or a negative error code on failure.
379 */
380int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev,
381 struct drm_mode_create_dumb *args)
382{
383 u32 min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
384 struct drm_gem_shmem_object *shmem;
385
386 if (!args->pitch || !args->size) {
387 args->pitch = min_pitch;
388 args->size = args->pitch * args->height;
389 } else {
390 /* ensure sane minimum values */
391 if (args->pitch < min_pitch)
392 args->pitch = min_pitch;
393 if (args->size < args->pitch * args->height)
394 args->size = args->pitch * args->height;
395 }
396
397 shmem = drm_gem_shmem_create_with_handle(file, dev, args->size, &args->handle);
398
399 return PTR_ERR_OR_ZERO(shmem);
400}
401EXPORT_SYMBOL_GPL(drm_gem_shmem_dumb_create);
402
403static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
404{
405 struct vm_area_struct *vma = vmf->vma;
406 struct drm_gem_object *obj = vma->vm_private_data;
407 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
408 loff_t num_pages = obj->size >> PAGE_SHIFT;
409 struct page *page;
410
411 if (vmf->pgoff > num_pages || WARN_ON_ONCE(!shmem->pages))
412 return VM_FAULT_SIGBUS;
413
414 page = shmem->pages[vmf->pgoff];
415
416 return vmf_insert_page(vma, vmf->address, page);
417}
418
419static void drm_gem_shmem_vm_open(struct vm_area_struct *vma)
420{
421 struct drm_gem_object *obj = vma->vm_private_data;
422 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
423 int ret;
424
425 ret = drm_gem_shmem_get_pages(shmem);
426 WARN_ON_ONCE(ret != 0);
427
428 drm_gem_vm_open(vma);
429}
430
431static void drm_gem_shmem_vm_close(struct vm_area_struct *vma)
432{
433 struct drm_gem_object *obj = vma->vm_private_data;
434 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
435
436 drm_gem_shmem_put_pages(shmem);
437 drm_gem_vm_close(vma);
438}
439
440const struct vm_operations_struct drm_gem_shmem_vm_ops = {
441 .fault = drm_gem_shmem_fault,
442 .open = drm_gem_shmem_vm_open,
443 .close = drm_gem_shmem_vm_close,
444};
445EXPORT_SYMBOL_GPL(drm_gem_shmem_vm_ops);
446
447/**
448 * drm_gem_shmem_mmap - Memory-map a shmem GEM object
449 * @filp: File object
450 * @vma: VMA for the area to be mapped
451 *
452 * This function implements an augmented version of the GEM DRM file mmap
453 * operation for shmem objects. Drivers which employ the shmem helpers should
454 * use this function as their &file_operations.mmap handler in the DRM device file's
455 * file_operations structure.
456 *
457 * Instead of directly referencing this function, drivers should use the
458 * DEFINE_DRM_GEM_SHMEM_FOPS() macro.
459 *
460 * Returns:
461 * 0 on success or a negative error code on failure.
462 */
463int drm_gem_shmem_mmap(struct file *filp, struct vm_area_struct *vma)
464{
465 struct drm_gem_shmem_object *shmem;
466 int ret;
467
468 ret = drm_gem_mmap(filp, vma);
469 if (ret)
470 return ret;
471
472 shmem = to_drm_gem_shmem_obj(vma->vm_private_data);
473
474 ret = drm_gem_shmem_get_pages(shmem);
475 if (ret) {
476 drm_gem_vm_close(vma);
477 return ret;
478 }
479
480 /* VM_PFNMAP was set by drm_gem_mmap() */
481 vma->vm_flags &= ~VM_PFNMAP;
482 vma->vm_flags |= VM_MIXEDMAP;
483
484 /* Remove the fake offset */
485 vma->vm_pgoff -= drm_vma_node_start(&shmem->base.vma_node);
486
487 return 0;
488}
489EXPORT_SYMBOL_GPL(drm_gem_shmem_mmap);
490
491/**
492 * drm_gem_shmem_print_info() - Print &drm_gem_shmem_object info for debugfs
493 * @p: DRM printer
494 * @indent: Tab indentation level
495 * @obj: GEM object
496 */
497void drm_gem_shmem_print_info(struct drm_printer *p, unsigned int indent,
498 const struct drm_gem_object *obj)
499{
500 const struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
501
502 drm_printf_indent(p, indent, "pages_use_count=%u\n", shmem->pages_use_count);
503 drm_printf_indent(p, indent, "vmap_use_count=%u\n", shmem->vmap_use_count);
504 drm_printf_indent(p, indent, "vaddr=%p\n", shmem->vaddr);
505}
506EXPORT_SYMBOL(drm_gem_shmem_print_info);
507
508/**
509 * drm_gem_shmem_get_sg_table - Provide a scatter/gather table of pinned
510 * pages for a shmem GEM object
511 * @obj: GEM object
512 *
513 * This function exports a scatter/gather table suitable for PRIME usage by
514 * calling the standard DMA mapping API.
515 *
516 * Returns:
517 * A pointer to the scatter/gather table of pinned pages or NULL on failure.
518 */
519struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_object *obj)
520{
521 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
522
523 return drm_prime_pages_to_sg(shmem->pages, obj->size >> PAGE_SHIFT);
524}
525EXPORT_SYMBOL_GPL(drm_gem_shmem_get_sg_table);
526
527/**
528 * drm_gem_shmem_get_pages_sgt - Pin pages, dma map them, and return a
529 * scatter/gather table for a shmem GEM object.
530 * @obj: GEM object
531 *
532 * This function returns a scatter/gather table suitable for driver usage. If
533 * the sg table doesn't exist, the pages are pinned, dma-mapped, and a sg
534 * table created.
535 *
536 * Returns:
537 * A pointer to the scatter/gather table of pinned pages or errno on failure.
538 */
539struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_object *obj)
540{
541 int ret;
542 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
543 struct sg_table *sgt;
544
545 if (shmem->sgt)
546 return shmem->sgt;
547
548 WARN_ON(obj->import_attach);
549
550 ret = drm_gem_shmem_get_pages(shmem);
551 if (ret)
552 return ERR_PTR(ret);
553
554 sgt = drm_gem_shmem_get_sg_table(&shmem->base);
555 if (IS_ERR(sgt)) {
556 ret = PTR_ERR(sgt);
557 goto err_put_pages;
558 }
559 /* Map the pages for use by the h/w. */
560 dma_map_sg(obj->dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL);
561
562 shmem->sgt = sgt;
563
564 return sgt;
565
566err_put_pages:
567 drm_gem_shmem_put_pages(shmem);
568 return ERR_PTR(ret);
569}
570EXPORT_SYMBOL_GPL(drm_gem_shmem_get_pages_sgt);
571
572/**
573 * drm_gem_shmem_prime_import_sg_table - Produce a shmem GEM object from
574 * another driver's scatter/gather table of pinned pages
575 * @dev: Device to import into
576 * @attach: DMA-BUF attachment
577 * @sgt: Scatter/gather table of pinned pages
578 *
579 * This function imports a scatter/gather table exported via DMA-BUF by
580 * another driver. Drivers that use the shmem helpers should set this as their
581 * &drm_driver.gem_prime_import_sg_table callback.
582 *
583 * Returns:
584 * A pointer to a newly created GEM object or an ERR_PTR-encoded negative
585 * error code on failure.
586 */
587struct drm_gem_object *
588drm_gem_shmem_prime_import_sg_table(struct drm_device *dev,
589 struct dma_buf_attachment *attach,
590 struct sg_table *sgt)
591{
592 size_t size = PAGE_ALIGN(attach->dmabuf->size);
593 size_t npages = size >> PAGE_SHIFT;
594 struct drm_gem_shmem_object *shmem;
595 int ret;
596
597 shmem = drm_gem_shmem_create(dev, size);
598 if (IS_ERR(shmem))
599 return ERR_CAST(shmem);
600
601 shmem->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
602 if (!shmem->pages) {
603 ret = -ENOMEM;
604 goto err_free_gem;
605 }
606
607 ret = drm_prime_sg_to_page_addr_arrays(sgt, shmem->pages, NULL, npages);
608 if (ret < 0)
609 goto err_free_array;
610
611 shmem->sgt = sgt;
612 shmem->pages_use_count = 1; /* Permanently pinned from our point of view */
613
614 DRM_DEBUG_PRIME("size = %zu\n", size);
615
616 return &shmem->base;
617
618err_free_array:
619 kvfree(shmem->pages);
620err_free_gem:
621 drm_gem_object_put_unlocked(&shmem->base);
622
623 return ERR_PTR(ret);
624}
625EXPORT_SYMBOL_GPL(drm_gem_shmem_prime_import_sg_table);
diff --git a/drivers/gpu/drm/drm_kms_helper_common.c b/drivers/gpu/drm/drm_kms_helper_common.c
index 93e2b30fe1a5..9c5ae825c507 100644
--- a/drivers/gpu/drm/drm_kms_helper_common.c
+++ b/drivers/gpu/drm/drm_kms_helper_common.c
@@ -39,7 +39,7 @@ MODULE_LICENSE("GPL and additional rights");
39/* Backward compatibility for drm_kms_helper.edid_firmware */ 39/* Backward compatibility for drm_kms_helper.edid_firmware */
40static int edid_firmware_set(const char *val, const struct kernel_param *kp) 40static int edid_firmware_set(const char *val, const struct kernel_param *kp)
41{ 41{
42 DRM_NOTE("drm_kms_firmware.edid_firmware is deprecated, please use drm.edid_firmware intead.\n"); 42 DRM_NOTE("drm_kms_firmware.edid_firmware is deprecated, please use drm.edid_firmware instead.\n");
43 43
44 return __drm_set_edid_firmware_path(val); 44 return __drm_set_edid_firmware_path(val);
45} 45}
diff --git a/drivers/gpu/drm/drm_memory.c b/drivers/gpu/drm/drm_memory.c
index 40c4349cb939..8dbcdc77f6bf 100644
--- a/drivers/gpu/drm/drm_memory.c
+++ b/drivers/gpu/drm/drm_memory.c
@@ -35,6 +35,7 @@
35 35
36#include <linux/highmem.h> 36#include <linux/highmem.h>
37#include <linux/export.h> 37#include <linux/export.h>
38#include <xen/xen.h>
38#include <drm/drmP.h> 39#include <drm/drmP.h>
39#include "drm_legacy.h" 40#include "drm_legacy.h"
40 41
@@ -150,15 +151,27 @@ void drm_legacy_ioremapfree(struct drm_local_map *map, struct drm_device *dev)
150} 151}
151EXPORT_SYMBOL(drm_legacy_ioremapfree); 152EXPORT_SYMBOL(drm_legacy_ioremapfree);
152 153
153u64 drm_get_max_iomem(void) 154bool drm_need_swiotlb(int dma_bits)
154{ 155{
155 struct resource *tmp; 156 struct resource *tmp;
156 resource_size_t max_iomem = 0; 157 resource_size_t max_iomem = 0;
157 158
159 /*
160 * Xen paravirtual hosts require swiotlb regardless of requested dma
161 * transfer size.
162 *
163 * NOTE: Really, what it requires is use of the dma_alloc_coherent
164 * allocator used in ttm_dma_populate() instead of
165 * ttm_populate_and_map_pages(), which bounce buffers so much in
166 * Xen it leads to swiotlb buffer exhaustion.
167 */
168 if (xen_pv_domain())
169 return true;
170
158 for (tmp = iomem_resource.child; tmp; tmp = tmp->sibling) { 171 for (tmp = iomem_resource.child; tmp; tmp = tmp->sibling) {
159 max_iomem = max(max_iomem, tmp->end); 172 max_iomem = max(max_iomem, tmp->end);
160 } 173 }
161 174
162 return max_iomem; 175 return max_iomem > ((u64)1 << dma_bits);
163} 176}
164EXPORT_SYMBOL(drm_get_max_iomem); 177EXPORT_SYMBOL(drm_need_swiotlb);
diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c
index 52e445bb1aa5..521aff99b08a 100644
--- a/drivers/gpu/drm/drm_panel_orientation_quirks.c
+++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c
@@ -80,6 +80,12 @@ static const struct drm_dmi_panel_orientation_data lcd800x1280_rightside_up = {
80 .orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP, 80 .orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
81}; 81};
82 82
83static const struct drm_dmi_panel_orientation_data lcd1200x1920_rightside_up = {
84 .width = 1200,
85 .height = 1920,
86 .orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
87};
88
83static const struct dmi_system_id orientation_data[] = { 89static const struct dmi_system_id orientation_data[] = {
84 { /* Acer One 10 (S1003) */ 90 { /* Acer One 10 (S1003) */
85 .matches = { 91 .matches = {
@@ -148,6 +154,13 @@ static const struct dmi_system_id orientation_data[] = {
148 DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Lenovo MIIX 320-10ICR"), 154 DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Lenovo MIIX 320-10ICR"),
149 }, 155 },
150 .driver_data = (void *)&lcd800x1280_rightside_up, 156 .driver_data = (void *)&lcd800x1280_rightside_up,
157 }, { /* Lenovo Ideapad D330 */
158 .matches = {
159 DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
160 DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "81H3"),
161 DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad D330-10IGM"),
162 },
163 .driver_data = (void *)&lcd1200x1920_rightside_up,
151 }, { /* VIOS LTH17 */ 164 }, { /* VIOS LTH17 */
152 .matches = { 165 .matches = {
153 DMI_EXACT_MATCH(DMI_SYS_VENDOR, "VIOS"), 166 DMI_EXACT_MATCH(DMI_SYS_VENDOR, "VIOS"),
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 231e3f6d5f41..dc079efb3b0f 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -504,6 +504,7 @@ struct dma_buf *drm_gem_prime_export(struct drm_device *dev,
504 .size = obj->size, 504 .size = obj->size,
505 .flags = flags, 505 .flags = flags,
506 .priv = obj, 506 .priv = obj,
507 .resv = obj->resv,
507 }; 508 };
508 509
509 if (dev->driver->gem_prime_res_obj) 510 if (dev->driver->gem_prime_res_obj)
diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
index e19525af0cce..5329e66598c6 100644
--- a/drivers/gpu/drm/drm_syncobj.c
+++ b/drivers/gpu/drm/drm_syncobj.c
@@ -731,7 +731,7 @@ cleanup_entries:
731 * 731 *
732 * Calculate the timeout in jiffies from an absolute time in sec/nsec. 732 * Calculate the timeout in jiffies from an absolute time in sec/nsec.
733 */ 733 */
734static signed long drm_timeout_abs_to_jiffies(int64_t timeout_nsec) 734signed long drm_timeout_abs_to_jiffies(int64_t timeout_nsec)
735{ 735{
736 ktime_t abs_timeout, now; 736 ktime_t abs_timeout, now;
737 u64 timeout_ns, timeout_jiffies64; 737 u64 timeout_ns, timeout_jiffies64;
@@ -755,6 +755,7 @@ static signed long drm_timeout_abs_to_jiffies(int64_t timeout_nsec)
755 755
756 return timeout_jiffies64 + 1; 756 return timeout_jiffies64 + 1;
757} 757}
758EXPORT_SYMBOL(drm_timeout_abs_to_jiffies);
758 759
759static int drm_syncobj_array_wait(struct drm_device *dev, 760static int drm_syncobj_array_wait(struct drm_device *dev,
760 struct drm_file *file_private, 761 struct drm_file *file_private,
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c
index c3301046dfaa..8987501f53b2 100644
--- a/drivers/gpu/drm/drm_vm.c
+++ b/drivers/gpu/drm/drm_vm.c
@@ -584,8 +584,8 @@ static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
584 vma->vm_ops = &drm_vm_ops; 584 vma->vm_ops = &drm_vm_ops;
585 break; 585 break;
586 } 586 }
587 /* fall through to _DRM_FRAME_BUFFER... */
588#endif 587#endif
588 /* fall through - to _DRM_FRAME_BUFFER... */
589 case _DRM_FRAME_BUFFER: 589 case _DRM_FRAME_BUFFER:
590 case _DRM_REGISTERS: 590 case _DRM_REGISTERS:
591 offset = drm_core_get_reg_ofs(dev); 591 offset = drm_core_get_reg_ofs(dev);
@@ -610,7 +610,7 @@ static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
610 vma->vm_end - vma->vm_start, vma->vm_page_prot)) 610 vma->vm_end - vma->vm_start, vma->vm_page_prot))
611 return -EAGAIN; 611 return -EAGAIN;
612 vma->vm_page_prot = drm_dma_prot(map->type, vma); 612 vma->vm_page_prot = drm_dma_prot(map->type, vma);
613 /* fall through to _DRM_SHM */ 613 /* fall through - to _DRM_SHM */
614 case _DRM_SHM: 614 case _DRM_SHM:
615 vma->vm_ops = &drm_vm_shm_ops; 615 vma->vm_ops = &drm_vm_shm_ops;
616 vma->vm_private_data = (void *)map; 616 vma->vm_private_data = (void *)map;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
index 18c27f795cf6..9f42f7538236 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
@@ -473,7 +473,6 @@ static struct drm_driver etnaviv_drm_driver = {
473 .prime_fd_to_handle = drm_gem_prime_fd_to_handle, 473 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
474 .gem_prime_export = drm_gem_prime_export, 474 .gem_prime_export = drm_gem_prime_export,
475 .gem_prime_import = drm_gem_prime_import, 475 .gem_prime_import = drm_gem_prime_import,
476 .gem_prime_res_obj = etnaviv_gem_prime_res_obj,
477 .gem_prime_pin = etnaviv_gem_prime_pin, 476 .gem_prime_pin = etnaviv_gem_prime_pin,
478 .gem_prime_unpin = etnaviv_gem_prime_unpin, 477 .gem_prime_unpin = etnaviv_gem_prime_unpin,
479 .gem_prime_get_sg_table = etnaviv_gem_prime_get_sg_table, 478 .gem_prime_get_sg_table = etnaviv_gem_prime_get_sg_table,
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.h b/drivers/gpu/drm/etnaviv/etnaviv_drv.h
index a6a7ded37ef1..6044ace6bb3e 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.h
@@ -60,7 +60,6 @@ void *etnaviv_gem_prime_vmap(struct drm_gem_object *obj);
60void etnaviv_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); 60void etnaviv_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
61int etnaviv_gem_prime_mmap(struct drm_gem_object *obj, 61int etnaviv_gem_prime_mmap(struct drm_gem_object *obj,
62 struct vm_area_struct *vma); 62 struct vm_area_struct *vma);
63struct reservation_object *etnaviv_gem_prime_res_obj(struct drm_gem_object *obj);
64struct drm_gem_object *etnaviv_gem_prime_import_sg_table(struct drm_device *dev, 63struct drm_gem_object *etnaviv_gem_prime_import_sg_table(struct drm_device *dev,
65 struct dma_buf_attachment *attach, struct sg_table *sg); 64 struct dma_buf_attachment *attach, struct sg_table *sg);
66int etnaviv_gem_prime_pin(struct drm_gem_object *obj); 65int etnaviv_gem_prime_pin(struct drm_gem_object *obj);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
index 5c48915f492d..c60752ef7324 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
@@ -397,13 +397,13 @@ int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
397 } 397 }
398 398
399 if (op & ETNA_PREP_NOSYNC) { 399 if (op & ETNA_PREP_NOSYNC) {
400 if (!reservation_object_test_signaled_rcu(etnaviv_obj->resv, 400 if (!reservation_object_test_signaled_rcu(obj->resv,
401 write)) 401 write))
402 return -EBUSY; 402 return -EBUSY;
403 } else { 403 } else {
404 unsigned long remain = etnaviv_timeout_to_jiffies(timeout); 404 unsigned long remain = etnaviv_timeout_to_jiffies(timeout);
405 405
406 ret = reservation_object_wait_timeout_rcu(etnaviv_obj->resv, 406 ret = reservation_object_wait_timeout_rcu(obj->resv,
407 write, true, remain); 407 write, true, remain);
408 if (ret <= 0) 408 if (ret <= 0)
409 return ret == 0 ? -ETIMEDOUT : ret; 409 return ret == 0 ? -ETIMEDOUT : ret;
@@ -459,7 +459,7 @@ static void etnaviv_gem_describe_fence(struct dma_fence *fence,
459static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m) 459static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
460{ 460{
461 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); 461 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
462 struct reservation_object *robj = etnaviv_obj->resv; 462 struct reservation_object *robj = obj->resv;
463 struct reservation_object_list *fobj; 463 struct reservation_object_list *fobj;
464 struct dma_fence *fence; 464 struct dma_fence *fence;
465 unsigned long off = drm_vma_node_start(&obj->vma_node); 465 unsigned long off = drm_vma_node_start(&obj->vma_node);
@@ -549,8 +549,6 @@ void etnaviv_gem_free_object(struct drm_gem_object *obj)
549 549
550 drm_gem_free_mmap_offset(obj); 550 drm_gem_free_mmap_offset(obj);
551 etnaviv_obj->ops->release(etnaviv_obj); 551 etnaviv_obj->ops->release(etnaviv_obj);
552 if (etnaviv_obj->resv == &etnaviv_obj->_resv)
553 reservation_object_fini(&etnaviv_obj->_resv);
554 drm_gem_object_release(obj); 552 drm_gem_object_release(obj);
555 553
556 kfree(etnaviv_obj); 554 kfree(etnaviv_obj);
@@ -596,12 +594,8 @@ static int etnaviv_gem_new_impl(struct drm_device *dev, u32 size, u32 flags,
596 594
597 etnaviv_obj->flags = flags; 595 etnaviv_obj->flags = flags;
598 etnaviv_obj->ops = ops; 596 etnaviv_obj->ops = ops;
599 if (robj) { 597 if (robj)
600 etnaviv_obj->resv = robj; 598 etnaviv_obj->base.resv = robj;
601 } else {
602 etnaviv_obj->resv = &etnaviv_obj->_resv;
603 reservation_object_init(&etnaviv_obj->_resv);
604 }
605 599
606 mutex_init(&etnaviv_obj->lock); 600 mutex_init(&etnaviv_obj->lock);
607 INIT_LIST_HEAD(&etnaviv_obj->vram_list); 601 INIT_LIST_HEAD(&etnaviv_obj->vram_list);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.h b/drivers/gpu/drm/etnaviv/etnaviv_gem.h
index f0abb744ef95..753c458497d0 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.h
@@ -47,10 +47,6 @@ struct etnaviv_gem_object {
47 struct sg_table *sgt; 47 struct sg_table *sgt;
48 void *vaddr; 48 void *vaddr;
49 49
50 /* normally (resv == &_resv) except for imported bo's */
51 struct reservation_object *resv;
52 struct reservation_object _resv;
53
54 struct list_head vram_list; 50 struct list_head vram_list;
55 51
56 /* cache maintenance */ 52 /* cache maintenance */
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
index f21529e635e3..00e8b6a817e3 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
@@ -139,10 +139,3 @@ fail:
139 139
140 return ERR_PTR(ret); 140 return ERR_PTR(ret);
141} 141}
142
143struct reservation_object *etnaviv_gem_prime_res_obj(struct drm_gem_object *obj)
144{
145 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
146
147 return etnaviv_obj->resv;
148}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
index b2fe3446bfbc..e054f09ac828 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
@@ -108,9 +108,9 @@ out_unlock:
108static void submit_unlock_object(struct etnaviv_gem_submit *submit, int i) 108static void submit_unlock_object(struct etnaviv_gem_submit *submit, int i)
109{ 109{
110 if (submit->bos[i].flags & BO_LOCKED) { 110 if (submit->bos[i].flags & BO_LOCKED) {
111 struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj; 111 struct drm_gem_object *obj = &submit->bos[i].obj->base;
112 112
113 ww_mutex_unlock(&etnaviv_obj->resv->lock); 113 ww_mutex_unlock(&obj->resv->lock);
114 submit->bos[i].flags &= ~BO_LOCKED; 114 submit->bos[i].flags &= ~BO_LOCKED;
115 } 115 }
116} 116}
@@ -122,7 +122,7 @@ static int submit_lock_objects(struct etnaviv_gem_submit *submit,
122 122
123retry: 123retry:
124 for (i = 0; i < submit->nr_bos; i++) { 124 for (i = 0; i < submit->nr_bos; i++) {
125 struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj; 125 struct drm_gem_object *obj = &submit->bos[i].obj->base;
126 126
127 if (slow_locked == i) 127 if (slow_locked == i)
128 slow_locked = -1; 128 slow_locked = -1;
@@ -130,7 +130,7 @@ retry:
130 contended = i; 130 contended = i;
131 131
132 if (!(submit->bos[i].flags & BO_LOCKED)) { 132 if (!(submit->bos[i].flags & BO_LOCKED)) {
133 ret = ww_mutex_lock_interruptible(&etnaviv_obj->resv->lock, 133 ret = ww_mutex_lock_interruptible(&obj->resv->lock,
134 ticket); 134 ticket);
135 if (ret == -EALREADY) 135 if (ret == -EALREADY)
136 DRM_ERROR("BO at index %u already on submit list\n", 136 DRM_ERROR("BO at index %u already on submit list\n",
@@ -153,12 +153,12 @@ fail:
153 submit_unlock_object(submit, slow_locked); 153 submit_unlock_object(submit, slow_locked);
154 154
155 if (ret == -EDEADLK) { 155 if (ret == -EDEADLK) {
156 struct etnaviv_gem_object *etnaviv_obj; 156 struct drm_gem_object *obj;
157 157
158 etnaviv_obj = submit->bos[contended].obj; 158 obj = &submit->bos[contended].obj->base;
159 159
160 /* we lost out in a seqno race, lock and retry.. */ 160 /* we lost out in a seqno race, lock and retry.. */
161 ret = ww_mutex_lock_slow_interruptible(&etnaviv_obj->resv->lock, 161 ret = ww_mutex_lock_slow_interruptible(&obj->resv->lock,
162 ticket); 162 ticket);
163 if (!ret) { 163 if (!ret) {
164 submit->bos[contended].flags |= BO_LOCKED; 164 submit->bos[contended].flags |= BO_LOCKED;
@@ -176,7 +176,7 @@ static int submit_fence_sync(struct etnaviv_gem_submit *submit)
176 176
177 for (i = 0; i < submit->nr_bos; i++) { 177 for (i = 0; i < submit->nr_bos; i++) {
178 struct etnaviv_gem_submit_bo *bo = &submit->bos[i]; 178 struct etnaviv_gem_submit_bo *bo = &submit->bos[i];
179 struct reservation_object *robj = bo->obj->resv; 179 struct reservation_object *robj = bo->obj->base.resv;
180 180
181 if (!(bo->flags & ETNA_SUBMIT_BO_WRITE)) { 181 if (!(bo->flags & ETNA_SUBMIT_BO_WRITE)) {
182 ret = reservation_object_reserve_shared(robj, 1); 182 ret = reservation_object_reserve_shared(robj, 1);
@@ -207,13 +207,13 @@ static void submit_attach_object_fences(struct etnaviv_gem_submit *submit)
207 int i; 207 int i;
208 208
209 for (i = 0; i < submit->nr_bos; i++) { 209 for (i = 0; i < submit->nr_bos; i++) {
210 struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj; 210 struct drm_gem_object *obj = &submit->bos[i].obj->base;
211 211
212 if (submit->bos[i].flags & ETNA_SUBMIT_BO_WRITE) 212 if (submit->bos[i].flags & ETNA_SUBMIT_BO_WRITE)
213 reservation_object_add_excl_fence(etnaviv_obj->resv, 213 reservation_object_add_excl_fence(obj->resv,
214 submit->out_fence); 214 submit->out_fence);
215 else 215 else
216 reservation_object_add_shared_fence(etnaviv_obj->resv, 216 reservation_object_add_shared_fence(obj->resv,
217 submit->out_fence); 217 submit->out_fence);
218 218
219 submit_unlock_object(submit, i); 219 submit_unlock_object(submit, i);
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 638a586469f9..cf80c5d8af34 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -6557,13 +6557,22 @@ enum {
6557#define PLANE_CTL_FORMAT_YUV422 (0 << 24) 6557#define PLANE_CTL_FORMAT_YUV422 (0 << 24)
6558#define PLANE_CTL_FORMAT_NV12 (1 << 24) 6558#define PLANE_CTL_FORMAT_NV12 (1 << 24)
6559#define PLANE_CTL_FORMAT_XRGB_2101010 (2 << 24) 6559#define PLANE_CTL_FORMAT_XRGB_2101010 (2 << 24)
6560#define PLANE_CTL_FORMAT_P010 (3 << 24)
6560#define PLANE_CTL_FORMAT_XRGB_8888 (4 << 24) 6561#define PLANE_CTL_FORMAT_XRGB_8888 (4 << 24)
6562#define PLANE_CTL_FORMAT_P012 (5 << 24)
6561#define PLANE_CTL_FORMAT_XRGB_16161616F (6 << 24) 6563#define PLANE_CTL_FORMAT_XRGB_16161616F (6 << 24)
6564#define PLANE_CTL_FORMAT_P016 (7 << 24)
6562#define PLANE_CTL_FORMAT_AYUV (8 << 24) 6565#define PLANE_CTL_FORMAT_AYUV (8 << 24)
6563#define PLANE_CTL_FORMAT_INDEXED (12 << 24) 6566#define PLANE_CTL_FORMAT_INDEXED (12 << 24)
6564#define PLANE_CTL_FORMAT_RGB_565 (14 << 24) 6567#define PLANE_CTL_FORMAT_RGB_565 (14 << 24)
6565#define ICL_PLANE_CTL_FORMAT_MASK (0x1f << 23) 6568#define ICL_PLANE_CTL_FORMAT_MASK (0x1f << 23)
6566#define PLANE_CTL_PIPE_CSC_ENABLE (1 << 23) /* Pre-GLK */ 6569#define PLANE_CTL_PIPE_CSC_ENABLE (1 << 23) /* Pre-GLK */
6570#define PLANE_CTL_FORMAT_Y210 (1 << 23)
6571#define PLANE_CTL_FORMAT_Y212 (3 << 23)
6572#define PLANE_CTL_FORMAT_Y216 (5 << 23)
6573#define PLANE_CTL_FORMAT_Y410 (7 << 23)
6574#define PLANE_CTL_FORMAT_Y412 (9 << 23)
6575#define PLANE_CTL_FORMAT_Y416 (0xb << 23)
6567#define PLANE_CTL_KEY_ENABLE_MASK (0x3 << 21) 6576#define PLANE_CTL_KEY_ENABLE_MASK (0x3 << 21)
6568#define PLANE_CTL_KEY_ENABLE_SOURCE (1 << 21) 6577#define PLANE_CTL_KEY_ENABLE_SOURCE (1 << 21)
6569#define PLANE_CTL_KEY_ENABLE_DESTINATION (2 << 21) 6578#define PLANE_CTL_KEY_ENABLE_DESTINATION (2 << 21)
diff --git a/drivers/gpu/drm/i915/intel_atomic.c b/drivers/gpu/drm/i915/intel_atomic.c
index 7cf9290ea34a..b844e8840c6f 100644
--- a/drivers/gpu/drm/i915/intel_atomic.c
+++ b/drivers/gpu/drm/i915/intel_atomic.c
@@ -126,6 +126,7 @@ int intel_digital_connector_atomic_check(struct drm_connector *conn,
126 */ 126 */
127 if (new_conn_state->force_audio != old_conn_state->force_audio || 127 if (new_conn_state->force_audio != old_conn_state->force_audio ||
128 new_conn_state->broadcast_rgb != old_conn_state->broadcast_rgb || 128 new_conn_state->broadcast_rgb != old_conn_state->broadcast_rgb ||
129 new_conn_state->base.colorspace != old_conn_state->base.colorspace ||
129 new_conn_state->base.picture_aspect_ratio != old_conn_state->base.picture_aspect_ratio || 130 new_conn_state->base.picture_aspect_ratio != old_conn_state->base.picture_aspect_ratio ||
130 new_conn_state->base.content_type != old_conn_state->base.content_type || 131 new_conn_state->base.content_type != old_conn_state->base.content_type ||
131 new_conn_state->base.scaling_mode != old_conn_state->base.scaling_mode) 132 new_conn_state->base.scaling_mode != old_conn_state->base.scaling_mode)
@@ -234,10 +235,11 @@ static void intel_atomic_setup_scaler(struct intel_crtc_scaler_state *scaler_sta
234 if (plane_state && plane_state->base.fb && 235 if (plane_state && plane_state->base.fb &&
235 plane_state->base.fb->format->is_yuv && 236 plane_state->base.fb->format->is_yuv &&
236 plane_state->base.fb->format->num_planes > 1) { 237 plane_state->base.fb->format->num_planes > 1) {
238 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
237 if (IS_GEN(dev_priv, 9) && 239 if (IS_GEN(dev_priv, 9) &&
238 !IS_GEMINILAKE(dev_priv)) { 240 !IS_GEMINILAKE(dev_priv)) {
239 mode = SKL_PS_SCALER_MODE_NV12; 241 mode = SKL_PS_SCALER_MODE_NV12;
240 } else if (icl_is_hdr_plane(to_intel_plane(plane_state->base.plane))) { 242 } else if (icl_is_hdr_plane(dev_priv, plane->id)) {
241 /* 243 /*
242 * On gen11+'s HDR planes we only use the scaler for 244 * On gen11+'s HDR planes we only use the scaler for
243 * scaling. They have a dedicated chroma upsampler, so 245 * scaling. They have a dedicated chroma upsampler, so
diff --git a/drivers/gpu/drm/i915/intel_atomic_plane.c b/drivers/gpu/drm/i915/intel_atomic_plane.c
index db0965904439..dd6c09699237 100644
--- a/drivers/gpu/drm/i915/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/intel_atomic_plane.c
@@ -135,7 +135,7 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_
135 new_crtc_state->active_planes |= BIT(plane->id); 135 new_crtc_state->active_planes |= BIT(plane->id);
136 136
137 if (new_plane_state->base.visible && 137 if (new_plane_state->base.visible &&
138 new_plane_state->base.fb->format->format == DRM_FORMAT_NV12) 138 is_planar_yuv_format(new_plane_state->base.fb->format->format))
139 new_crtc_state->nv12_planes |= BIT(plane->id); 139 new_crtc_state->nv12_planes |= BIT(plane->id);
140 140
141 if (new_plane_state->base.visible || old_plane_state->base.visible) 141 if (new_plane_state->base.visible || old_plane_state->base.visible)
diff --git a/drivers/gpu/drm/i915/intel_connector.c b/drivers/gpu/drm/i915/intel_connector.c
index ee16758747c5..8352d0bd8813 100644
--- a/drivers/gpu/drm/i915/intel_connector.c
+++ b/drivers/gpu/drm/i915/intel_connector.c
@@ -265,3 +265,11 @@ intel_attach_aspect_ratio_property(struct drm_connector *connector)
265 connector->dev->mode_config.aspect_ratio_property, 265 connector->dev->mode_config.aspect_ratio_property,
266 DRM_MODE_PICTURE_ASPECT_NONE); 266 DRM_MODE_PICTURE_ASPECT_NONE);
267} 267}
268
269void
270intel_attach_colorspace_property(struct drm_connector *connector)
271{
272 if (!drm_mode_create_colorspace_property(connector))
273 drm_object_attach_property(&connector->base,
274 connector->colorspace_property, 0);
275}
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index ccb616351bba..94496488641c 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -2677,6 +2677,24 @@ int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
2677 return DRM_FORMAT_RGB565; 2677 return DRM_FORMAT_RGB565;
2678 case PLANE_CTL_FORMAT_NV12: 2678 case PLANE_CTL_FORMAT_NV12:
2679 return DRM_FORMAT_NV12; 2679 return DRM_FORMAT_NV12;
2680 case PLANE_CTL_FORMAT_P010:
2681 return DRM_FORMAT_P010;
2682 case PLANE_CTL_FORMAT_P012:
2683 return DRM_FORMAT_P012;
2684 case PLANE_CTL_FORMAT_P016:
2685 return DRM_FORMAT_P016;
2686 case PLANE_CTL_FORMAT_Y210:
2687 return DRM_FORMAT_Y210;
2688 case PLANE_CTL_FORMAT_Y212:
2689 return DRM_FORMAT_Y212;
2690 case PLANE_CTL_FORMAT_Y216:
2691 return DRM_FORMAT_Y216;
2692 case PLANE_CTL_FORMAT_Y410:
2693 return DRM_FORMAT_XVYU2101010;
2694 case PLANE_CTL_FORMAT_Y412:
2695 return DRM_FORMAT_XVYU12_16161616;
2696 case PLANE_CTL_FORMAT_Y416:
2697 return DRM_FORMAT_XVYU16161616;
2680 default: 2698 default:
2681 case PLANE_CTL_FORMAT_XRGB_8888: 2699 case PLANE_CTL_FORMAT_XRGB_8888:
2682 if (rgb_order) { 2700 if (rgb_order) {
@@ -2695,6 +2713,18 @@ int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
2695 return DRM_FORMAT_XBGR2101010; 2713 return DRM_FORMAT_XBGR2101010;
2696 else 2714 else
2697 return DRM_FORMAT_XRGB2101010; 2715 return DRM_FORMAT_XRGB2101010;
2716 case PLANE_CTL_FORMAT_XRGB_16161616F:
2717 if (rgb_order) {
2718 if (alpha)
2719 return DRM_FORMAT_ABGR16161616F;
2720 else
2721 return DRM_FORMAT_XBGR16161616F;
2722 } else {
2723 if (alpha)
2724 return DRM_FORMAT_ARGB16161616F;
2725 else
2726 return DRM_FORMAT_XRGB16161616F;
2727 }
2698 } 2728 }
2699} 2729}
2700 2730
@@ -3176,7 +3206,7 @@ int skl_check_plane_surface(struct intel_plane_state *plane_state)
3176 * Handle the AUX surface first since 3206 * Handle the AUX surface first since
3177 * the main surface setup depends on it. 3207 * the main surface setup depends on it.
3178 */ 3208 */
3179 if (fb->format->format == DRM_FORMAT_NV12) { 3209 if (is_planar_yuv_format(fb->format->format)) {
3180 ret = skl_check_nv12_aux_surface(plane_state); 3210 ret = skl_check_nv12_aux_surface(plane_state);
3181 if (ret) 3211 if (ret)
3182 return ret; 3212 return ret;
@@ -3590,6 +3620,12 @@ static u32 skl_plane_ctl_format(u32 pixel_format)
3590 return PLANE_CTL_FORMAT_XRGB_2101010; 3620 return PLANE_CTL_FORMAT_XRGB_2101010;
3591 case DRM_FORMAT_XBGR2101010: 3621 case DRM_FORMAT_XBGR2101010:
3592 return PLANE_CTL_ORDER_RGBX | PLANE_CTL_FORMAT_XRGB_2101010; 3622 return PLANE_CTL_ORDER_RGBX | PLANE_CTL_FORMAT_XRGB_2101010;
3623 case DRM_FORMAT_XBGR16161616F:
3624 case DRM_FORMAT_ABGR16161616F:
3625 return PLANE_CTL_FORMAT_XRGB_16161616F | PLANE_CTL_ORDER_RGBX;
3626 case DRM_FORMAT_XRGB16161616F:
3627 case DRM_FORMAT_ARGB16161616F:
3628 return PLANE_CTL_FORMAT_XRGB_16161616F;
3593 case DRM_FORMAT_YUYV: 3629 case DRM_FORMAT_YUYV:
3594 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV; 3630 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV;
3595 case DRM_FORMAT_YVYU: 3631 case DRM_FORMAT_YVYU:
@@ -3600,6 +3636,24 @@ static u32 skl_plane_ctl_format(u32 pixel_format)
3600 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY; 3636 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY;
3601 case DRM_FORMAT_NV12: 3637 case DRM_FORMAT_NV12:
3602 return PLANE_CTL_FORMAT_NV12; 3638 return PLANE_CTL_FORMAT_NV12;
3639 case DRM_FORMAT_P010:
3640 return PLANE_CTL_FORMAT_P010;
3641 case DRM_FORMAT_P012:
3642 return PLANE_CTL_FORMAT_P012;
3643 case DRM_FORMAT_P016:
3644 return PLANE_CTL_FORMAT_P016;
3645 case DRM_FORMAT_Y210:
3646 return PLANE_CTL_FORMAT_Y210;
3647 case DRM_FORMAT_Y212:
3648 return PLANE_CTL_FORMAT_Y212;
3649 case DRM_FORMAT_Y216:
3650 return PLANE_CTL_FORMAT_Y216;
3651 case DRM_FORMAT_XVYU2101010:
3652 return PLANE_CTL_FORMAT_Y410;
3653 case DRM_FORMAT_XVYU12_16161616:
3654 return PLANE_CTL_FORMAT_Y412;
3655 case DRM_FORMAT_XVYU16161616:
3656 return PLANE_CTL_FORMAT_Y416;
3603 default: 3657 default:
3604 MISSING_CASE(pixel_format); 3658 MISSING_CASE(pixel_format);
3605 } 3659 }
@@ -3772,6 +3826,8 @@ u32 glk_plane_color_ctl_crtc(const struct intel_crtc_state *crtc_state)
3772u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state, 3826u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
3773 const struct intel_plane_state *plane_state) 3827 const struct intel_plane_state *plane_state)
3774{ 3828{
3829 struct drm_i915_private *dev_priv =
3830 to_i915(plane_state->base.plane->dev);
3775 const struct drm_framebuffer *fb = plane_state->base.fb; 3831 const struct drm_framebuffer *fb = plane_state->base.fb;
3776 struct intel_plane *plane = to_intel_plane(plane_state->base.plane); 3832 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
3777 u32 plane_color_ctl = 0; 3833 u32 plane_color_ctl = 0;
@@ -3779,7 +3835,7 @@ u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
3779 plane_color_ctl |= PLANE_COLOR_PLANE_GAMMA_DISABLE; 3835 plane_color_ctl |= PLANE_COLOR_PLANE_GAMMA_DISABLE;
3780 plane_color_ctl |= glk_plane_color_ctl_alpha(plane_state); 3836 plane_color_ctl |= glk_plane_color_ctl_alpha(plane_state);
3781 3837
3782 if (fb->format->is_yuv && !icl_is_hdr_plane(plane)) { 3838 if (fb->format->is_yuv && !icl_is_hdr_plane(dev_priv, plane->id)) {
3783 if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709) 3839 if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709)
3784 plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV709_TO_RGB709; 3840 plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV709_TO_RGB709;
3785 else 3841 else
@@ -5036,9 +5092,9 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
5036 return 0; 5092 return 0;
5037 } 5093 }
5038 5094
5039 if (format && format->format == DRM_FORMAT_NV12 && 5095 if (format && is_planar_yuv_format(format->format) &&
5040 (src_h < SKL_MIN_YUV_420_SRC_H || src_w < SKL_MIN_YUV_420_SRC_W)) { 5096 (src_h < SKL_MIN_YUV_420_SRC_H || src_w < SKL_MIN_YUV_420_SRC_W)) {
5041 DRM_DEBUG_KMS("NV12: src dimensions not met\n"); 5097 DRM_DEBUG_KMS("Planar YUV: src dimensions not met\n");
5042 return -EINVAL; 5098 return -EINVAL;
5043 } 5099 }
5044 5100
@@ -5105,14 +5161,15 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
5105{ 5161{
5106 struct intel_plane *intel_plane = 5162 struct intel_plane *intel_plane =
5107 to_intel_plane(plane_state->base.plane); 5163 to_intel_plane(plane_state->base.plane);
5164 struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
5108 struct drm_framebuffer *fb = plane_state->base.fb; 5165 struct drm_framebuffer *fb = plane_state->base.fb;
5109 int ret; 5166 int ret;
5110 bool force_detach = !fb || !plane_state->base.visible; 5167 bool force_detach = !fb || !plane_state->base.visible;
5111 bool need_scaler = false; 5168 bool need_scaler = false;
5112 5169
5113 /* Pre-gen11 and SDR planes always need a scaler for planar formats. */ 5170 /* Pre-gen11 and SDR planes always need a scaler for planar formats. */
5114 if (!icl_is_hdr_plane(intel_plane) && 5171 if (!icl_is_hdr_plane(dev_priv, intel_plane->id) &&
5115 fb && fb->format->format == DRM_FORMAT_NV12) 5172 fb && is_planar_yuv_format(fb->format->format))
5116 need_scaler = true; 5173 need_scaler = true;
5117 5174
5118 ret = skl_update_scaler(crtc_state, force_detach, 5175 ret = skl_update_scaler(crtc_state, force_detach,
@@ -5144,11 +5201,24 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
5144 case DRM_FORMAT_ARGB8888: 5201 case DRM_FORMAT_ARGB8888:
5145 case DRM_FORMAT_XRGB2101010: 5202 case DRM_FORMAT_XRGB2101010:
5146 case DRM_FORMAT_XBGR2101010: 5203 case DRM_FORMAT_XBGR2101010:
5204 case DRM_FORMAT_XBGR16161616F:
5205 case DRM_FORMAT_ABGR16161616F:
5206 case DRM_FORMAT_XRGB16161616F:
5207 case DRM_FORMAT_ARGB16161616F:
5147 case DRM_FORMAT_YUYV: 5208 case DRM_FORMAT_YUYV:
5148 case DRM_FORMAT_YVYU: 5209 case DRM_FORMAT_YVYU:
5149 case DRM_FORMAT_UYVY: 5210 case DRM_FORMAT_UYVY:
5150 case DRM_FORMAT_VYUY: 5211 case DRM_FORMAT_VYUY:
5151 case DRM_FORMAT_NV12: 5212 case DRM_FORMAT_NV12:
5213 case DRM_FORMAT_P010:
5214 case DRM_FORMAT_P012:
5215 case DRM_FORMAT_P016:
5216 case DRM_FORMAT_Y210:
5217 case DRM_FORMAT_Y212:
5218 case DRM_FORMAT_Y216:
5219 case DRM_FORMAT_XVYU2101010:
5220 case DRM_FORMAT_XVYU12_16161616:
5221 case DRM_FORMAT_XVYU16161616:
5152 break; 5222 break;
5153 default: 5223 default:
5154 DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n", 5224 DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n",
@@ -11134,7 +11204,7 @@ static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
11134 } 11204 }
11135 11205
11136 if (!linked_state) { 11206 if (!linked_state) {
11137 DRM_DEBUG_KMS("Need %d free Y planes for NV12\n", 11207 DRM_DEBUG_KMS("Need %d free Y planes for planar YUV\n",
11138 hweight8(crtc_state->nv12_planes)); 11208 hweight8(crtc_state->nv12_planes));
11139 11209
11140 return -EINVAL; 11210 return -EINVAL;
@@ -13767,7 +13837,7 @@ skl_max_scale(const struct intel_crtc_state *crtc_state,
13767 * or 13837 * or
13768 * cdclk/crtc_clock 13838 * cdclk/crtc_clock
13769 */ 13839 */
13770 mult = pixel_format == DRM_FORMAT_NV12 ? 2 : 3; 13840 mult = is_planar_yuv_format(pixel_format) ? 2 : 3;
13771 tmpclk1 = (1 << 16) * mult - 1; 13841 tmpclk1 = (1 << 16) * mult - 1;
13772 tmpclk2 = (1 << 8) * ((max_dotclk << 8) / crtc_clock); 13842 tmpclk2 = (1 << 8) * ((max_dotclk << 8) / crtc_clock);
13773 max_scale = min(tmpclk1, tmpclk2); 13843 max_scale = min(tmpclk1, tmpclk2);
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 15db41394b9e..375f51d14dda 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -1796,6 +1796,7 @@ int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
1796void intel_attach_force_audio_property(struct drm_connector *connector); 1796void intel_attach_force_audio_property(struct drm_connector *connector);
1797void intel_attach_broadcast_rgb_property(struct drm_connector *connector); 1797void intel_attach_broadcast_rgb_property(struct drm_connector *connector);
1798void intel_attach_aspect_ratio_property(struct drm_connector *connector); 1798void intel_attach_aspect_ratio_property(struct drm_connector *connector);
1799void intel_attach_colorspace_property(struct drm_connector *connector);
1799 1800
1800/* intel_csr.c */ 1801/* intel_csr.c */
1801void intel_csr_ucode_init(struct drm_i915_private *); 1802void intel_csr_ucode_init(struct drm_i915_private *);
@@ -2300,6 +2301,7 @@ bool intel_sdvo_init(struct drm_i915_private *dev_priv,
2300 2301
2301 2302
2302/* intel_sprite.c */ 2303/* intel_sprite.c */
2304bool is_planar_yuv_format(u32 pixelformat);
2303int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode, 2305int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
2304 int usecs); 2306 int usecs);
2305struct intel_plane *intel_sprite_plane_create(struct drm_i915_private *dev_priv, 2307struct intel_plane *intel_sprite_plane_create(struct drm_i915_private *dev_priv,
@@ -2324,12 +2326,13 @@ static inline bool icl_is_nv12_y_plane(enum plane_id id)
2324 return false; 2326 return false;
2325} 2327}
2326 2328
2327static inline bool icl_is_hdr_plane(struct intel_plane *plane) 2329static inline bool icl_is_hdr_plane(struct drm_i915_private *dev_priv,
2330 enum plane_id plane_id)
2328{ 2331{
2329 if (INTEL_GEN(to_i915(plane->base.dev)) < 11) 2332 if (INTEL_GEN(dev_priv) < 11)
2330 return false; 2333 return false;
2331 2334
2332 return plane->id < PLANE_SPRITE2; 2335 return plane_id < PLANE_SPRITE2;
2333} 2336}
2334 2337
2335/* intel_tv.c */ 2338/* intel_tv.c */
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index f125a62eba8c..765718b606d8 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -498,6 +498,8 @@ static void intel_hdmi_set_avi_infoframe(struct intel_encoder *encoder,
498 else 498 else
499 frame.avi.colorspace = HDMI_COLORSPACE_RGB; 499 frame.avi.colorspace = HDMI_COLORSPACE_RGB;
500 500
501 drm_hdmi_avi_infoframe_colorspace(&frame.avi, conn_state);
502
501 drm_hdmi_avi_infoframe_quant_range(&frame.avi, 503 drm_hdmi_avi_infoframe_quant_range(&frame.avi,
502 conn_state->connector, 504 conn_state->connector,
503 adjusted_mode, 505 adjusted_mode,
@@ -2143,10 +2145,21 @@ static void
2143intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *connector) 2145intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *connector)
2144{ 2146{
2145 struct drm_i915_private *dev_priv = to_i915(connector->dev); 2147 struct drm_i915_private *dev_priv = to_i915(connector->dev);
2148 struct intel_digital_port *intel_dig_port =
2149 hdmi_to_dig_port(intel_hdmi);
2146 2150
2147 intel_attach_force_audio_property(connector); 2151 intel_attach_force_audio_property(connector);
2148 intel_attach_broadcast_rgb_property(connector); 2152 intel_attach_broadcast_rgb_property(connector);
2149 intel_attach_aspect_ratio_property(connector); 2153 intel_attach_aspect_ratio_property(connector);
2154
2155 /*
2156 * Attach Colorspace property for Non LSPCON based device
2157 * ToDo: This needs to be extended for LSPCON implementation
2158 * as well. Will be implemented separately.
2159 */
2160 if (!intel_dig_port->lspcon.active)
2161 intel_attach_colorspace_property(connector);
2162
2150 drm_connector_attach_content_type_property(connector); 2163 drm_connector_attach_content_type_property(connector);
2151 connector->state->picture_aspect_ratio = HDMI_PICTURE_ASPECT_NONE; 2164 connector->state->picture_aspect_ratio = HDMI_PICTURE_ASPECT_NONE;
2152 2165
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 54307f1df6cf..14ac31888c67 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -3970,7 +3970,7 @@ skl_ddb_get_hw_plane_state(struct drm_i915_private *dev_priv,
3970 val = I915_READ(PLANE_BUF_CFG(pipe, plane_id)); 3970 val = I915_READ(PLANE_BUF_CFG(pipe, plane_id));
3971 val2 = I915_READ(PLANE_NV12_BUF_CFG(pipe, plane_id)); 3971 val2 = I915_READ(PLANE_NV12_BUF_CFG(pipe, plane_id));
3972 3972
3973 if (fourcc == DRM_FORMAT_NV12) 3973 if (is_planar_yuv_format(fourcc))
3974 swap(val, val2); 3974 swap(val, val2);
3975 3975
3976 skl_ddb_entry_init_from_hw(dev_priv, ddb_y, val); 3976 skl_ddb_entry_init_from_hw(dev_priv, ddb_y, val);
@@ -4180,7 +4180,7 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
4180 4180
4181 if (intel_plane->id == PLANE_CURSOR) 4181 if (intel_plane->id == PLANE_CURSOR)
4182 return 0; 4182 return 0;
4183 if (plane == 1 && format != DRM_FORMAT_NV12) 4183 if (plane == 1 && !is_planar_yuv_format(format))
4184 return 0; 4184 return 0;
4185 4185
4186 /* 4186 /*
@@ -4192,7 +4192,7 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
4192 height = drm_rect_height(&intel_pstate->base.src) >> 16; 4192 height = drm_rect_height(&intel_pstate->base.src) >> 16;
4193 4193
4194 /* UV plane does 1/2 pixel sub-sampling */ 4194 /* UV plane does 1/2 pixel sub-sampling */
4195 if (plane == 1 && format == DRM_FORMAT_NV12) { 4195 if (plane == 1 && is_planar_yuv_format(format)) {
4196 width /= 2; 4196 width /= 2;
4197 height /= 2; 4197 height /= 2;
4198 } 4198 }
@@ -4578,9 +4578,9 @@ skl_compute_plane_wm_params(const struct intel_crtc_state *cstate,
4578 const struct drm_framebuffer *fb = pstate->fb; 4578 const struct drm_framebuffer *fb = pstate->fb;
4579 u32 interm_pbpl; 4579 u32 interm_pbpl;
4580 4580
4581 /* only NV12 format has two planes */ 4581 /* only planar format has two planes */
4582 if (color_plane == 1 && fb->format->format != DRM_FORMAT_NV12) { 4582 if (color_plane == 1 && !is_planar_yuv_format(fb->format->format)) {
4583 DRM_DEBUG_KMS("Non NV12 format have single plane\n"); 4583 DRM_DEBUG_KMS("Non planar format have single plane\n");
4584 return -EINVAL; 4584 return -EINVAL;
4585 } 4585 }
4586 4586
@@ -4591,7 +4591,7 @@ skl_compute_plane_wm_params(const struct intel_crtc_state *cstate,
4591 wp->x_tiled = fb->modifier == I915_FORMAT_MOD_X_TILED; 4591 wp->x_tiled = fb->modifier == I915_FORMAT_MOD_X_TILED;
4592 wp->rc_surface = fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS || 4592 wp->rc_surface = fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
4593 fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS; 4593 fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
4594 wp->is_planar = fb->format->format == DRM_FORMAT_NV12; 4594 wp->is_planar = is_planar_yuv_format(fb->format->format);
4595 4595
4596 if (plane->id == PLANE_CURSOR) { 4596 if (plane->id == PLANE_CURSOR) {
4597 wp->width = intel_pstate->base.crtc_w; 4597 wp->width = intel_pstate->base.crtc_w;
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index b56a1a9ad01d..53174d579574 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -41,6 +41,19 @@
41#include "i915_drv.h" 41#include "i915_drv.h"
42#include <drm/drm_color_mgmt.h> 42#include <drm/drm_color_mgmt.h>
43 43
44bool is_planar_yuv_format(u32 pixelformat)
45{
46 switch (pixelformat) {
47 case DRM_FORMAT_NV12:
48 case DRM_FORMAT_P010:
49 case DRM_FORMAT_P012:
50 case DRM_FORMAT_P016:
51 return true;
52 default:
53 return false;
54 }
55}
56
44int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode, 57int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
45 int usecs) 58 int usecs)
46{ 59{
@@ -335,8 +348,8 @@ skl_program_scaler(struct intel_plane *plane,
335 0, INT_MAX); 348 0, INT_MAX);
336 349
337 /* TODO: handle sub-pixel coordinates */ 350 /* TODO: handle sub-pixel coordinates */
338 if (plane_state->base.fb->format->format == DRM_FORMAT_NV12 && 351 if (is_planar_yuv_format(plane_state->base.fb->format->format) &&
339 !icl_is_hdr_plane(plane)) { 352 !icl_is_hdr_plane(dev_priv, plane->id)) {
340 y_hphase = skl_scaler_calc_phase(1, hscale, false); 353 y_hphase = skl_scaler_calc_phase(1, hscale, false);
341 y_vphase = skl_scaler_calc_phase(1, vscale, false); 354 y_vphase = skl_scaler_calc_phase(1, vscale, false);
342 355
@@ -518,7 +531,7 @@ skl_program_plane(struct intel_plane *plane,
518 I915_WRITE_FW(PLANE_AUX_DIST(pipe, plane_id), 531 I915_WRITE_FW(PLANE_AUX_DIST(pipe, plane_id),
519 (plane_state->color_plane[1].offset - surf_addr) | aux_stride); 532 (plane_state->color_plane[1].offset - surf_addr) | aux_stride);
520 533
521 if (icl_is_hdr_plane(plane)) { 534 if (icl_is_hdr_plane(dev_priv, plane_id)) {
522 u32 cus_ctl = 0; 535 u32 cus_ctl = 0;
523 536
524 if (linked) { 537 if (linked) {
@@ -542,7 +555,7 @@ skl_program_plane(struct intel_plane *plane,
542 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) 555 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
543 I915_WRITE_FW(PLANE_COLOR_CTL(pipe, plane_id), plane_color_ctl); 556 I915_WRITE_FW(PLANE_COLOR_CTL(pipe, plane_id), plane_color_ctl);
544 557
545 if (fb->format->is_yuv && icl_is_hdr_plane(plane)) 558 if (fb->format->is_yuv && icl_is_hdr_plane(dev_priv, plane_id))
546 icl_program_input_csc(plane, crtc_state, plane_state); 559 icl_program_input_csc(plane, crtc_state, plane_state);
547 560
548 skl_write_plane_wm(plane, crtc_state); 561 skl_write_plane_wm(plane, crtc_state);
@@ -1482,8 +1495,6 @@ static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state,
1482 /* 1495 /*
1483 * 90/270 is not allowed with RGB64 16:16:16:16 and 1496 * 90/270 is not allowed with RGB64 16:16:16:16 and
1484 * Indexed 8-bit. RGB 16-bit 5:6:5 is allowed gen11 onwards. 1497 * Indexed 8-bit. RGB 16-bit 5:6:5 is allowed gen11 onwards.
1485 * TBD: Add RGB64 case once its added in supported format
1486 * list.
1487 */ 1498 */
1488 switch (fb->format->format) { 1499 switch (fb->format->format) {
1489 case DRM_FORMAT_RGB565: 1500 case DRM_FORMAT_RGB565:
@@ -1491,6 +1502,10 @@ static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state,
1491 break; 1502 break;
1492 /* fall through */ 1503 /* fall through */
1493 case DRM_FORMAT_C8: 1504 case DRM_FORMAT_C8:
1505 case DRM_FORMAT_XRGB16161616F:
1506 case DRM_FORMAT_XBGR16161616F:
1507 case DRM_FORMAT_ARGB16161616F:
1508 case DRM_FORMAT_ABGR16161616F:
1494 DRM_DEBUG_KMS("Unsupported pixel format %s for 90/270!\n", 1509 DRM_DEBUG_KMS("Unsupported pixel format %s for 90/270!\n",
1495 drm_get_format_name(fb->format->format, 1510 drm_get_format_name(fb->format->format,
1496 &format_name)); 1511 &format_name));
@@ -1551,10 +1566,10 @@ static int skl_plane_check_nv12_rotation(const struct intel_plane_state *plane_s
1551 int src_w = drm_rect_width(&plane_state->base.src) >> 16; 1566 int src_w = drm_rect_width(&plane_state->base.src) >> 16;
1552 1567
1553 /* Display WA #1106 */ 1568 /* Display WA #1106 */
1554 if (fb->format->format == DRM_FORMAT_NV12 && src_w & 3 && 1569 if (is_planar_yuv_format(fb->format->format) && src_w & 3 &&
1555 (rotation == DRM_MODE_ROTATE_270 || 1570 (rotation == DRM_MODE_ROTATE_270 ||
1556 rotation == (DRM_MODE_REFLECT_X | DRM_MODE_ROTATE_90))) { 1571 rotation == (DRM_MODE_REFLECT_X | DRM_MODE_ROTATE_90))) {
1557 DRM_DEBUG_KMS("src width must be multiple of 4 for rotated NV12\n"); 1572 DRM_DEBUG_KMS("src width must be multiple of 4 for rotated planar YUV\n");
1558 return -EINVAL; 1573 return -EINVAL;
1559 } 1574 }
1560 1575
@@ -1790,6 +1805,52 @@ static const u32 skl_plane_formats[] = {
1790 DRM_FORMAT_VYUY, 1805 DRM_FORMAT_VYUY,
1791}; 1806};
1792 1807
1808static const uint32_t icl_plane_formats[] = {
1809 DRM_FORMAT_C8,
1810 DRM_FORMAT_RGB565,
1811 DRM_FORMAT_XRGB8888,
1812 DRM_FORMAT_XBGR8888,
1813 DRM_FORMAT_ARGB8888,
1814 DRM_FORMAT_ABGR8888,
1815 DRM_FORMAT_XRGB2101010,
1816 DRM_FORMAT_XBGR2101010,
1817 DRM_FORMAT_YUYV,
1818 DRM_FORMAT_YVYU,
1819 DRM_FORMAT_UYVY,
1820 DRM_FORMAT_VYUY,
1821 DRM_FORMAT_Y210,
1822 DRM_FORMAT_Y212,
1823 DRM_FORMAT_Y216,
1824 DRM_FORMAT_XVYU2101010,
1825 DRM_FORMAT_XVYU12_16161616,
1826 DRM_FORMAT_XVYU16161616,
1827};
1828
1829static const uint32_t icl_hdr_plane_formats[] = {
1830 DRM_FORMAT_C8,
1831 DRM_FORMAT_RGB565,
1832 DRM_FORMAT_XRGB8888,
1833 DRM_FORMAT_XBGR8888,
1834 DRM_FORMAT_ARGB8888,
1835 DRM_FORMAT_ABGR8888,
1836 DRM_FORMAT_XRGB2101010,
1837 DRM_FORMAT_XBGR2101010,
1838 DRM_FORMAT_XRGB16161616F,
1839 DRM_FORMAT_XBGR16161616F,
1840 DRM_FORMAT_ARGB16161616F,
1841 DRM_FORMAT_ABGR16161616F,
1842 DRM_FORMAT_YUYV,
1843 DRM_FORMAT_YVYU,
1844 DRM_FORMAT_UYVY,
1845 DRM_FORMAT_VYUY,
1846 DRM_FORMAT_Y210,
1847 DRM_FORMAT_Y212,
1848 DRM_FORMAT_Y216,
1849 DRM_FORMAT_XVYU2101010,
1850 DRM_FORMAT_XVYU12_16161616,
1851 DRM_FORMAT_XVYU16161616,
1852};
1853
1793static const u32 skl_planar_formats[] = { 1854static const u32 skl_planar_formats[] = {
1794 DRM_FORMAT_C8, 1855 DRM_FORMAT_C8,
1795 DRM_FORMAT_RGB565, 1856 DRM_FORMAT_RGB565,
@@ -1806,6 +1867,79 @@ static const u32 skl_planar_formats[] = {
1806 DRM_FORMAT_NV12, 1867 DRM_FORMAT_NV12,
1807}; 1868};
1808 1869
1870static const uint32_t glk_planar_formats[] = {
1871 DRM_FORMAT_C8,
1872 DRM_FORMAT_RGB565,
1873 DRM_FORMAT_XRGB8888,
1874 DRM_FORMAT_XBGR8888,
1875 DRM_FORMAT_ARGB8888,
1876 DRM_FORMAT_ABGR8888,
1877 DRM_FORMAT_XRGB2101010,
1878 DRM_FORMAT_XBGR2101010,
1879 DRM_FORMAT_YUYV,
1880 DRM_FORMAT_YVYU,
1881 DRM_FORMAT_UYVY,
1882 DRM_FORMAT_VYUY,
1883 DRM_FORMAT_NV12,
1884 DRM_FORMAT_P010,
1885 DRM_FORMAT_P012,
1886 DRM_FORMAT_P016,
1887};
1888
1889static const uint32_t icl_planar_formats[] = {
1890 DRM_FORMAT_C8,
1891 DRM_FORMAT_RGB565,
1892 DRM_FORMAT_XRGB8888,
1893 DRM_FORMAT_XBGR8888,
1894 DRM_FORMAT_ARGB8888,
1895 DRM_FORMAT_ABGR8888,
1896 DRM_FORMAT_XRGB2101010,
1897 DRM_FORMAT_XBGR2101010,
1898 DRM_FORMAT_YUYV,
1899 DRM_FORMAT_YVYU,
1900 DRM_FORMAT_UYVY,
1901 DRM_FORMAT_VYUY,
1902 DRM_FORMAT_NV12,
1903 DRM_FORMAT_P010,
1904 DRM_FORMAT_P012,
1905 DRM_FORMAT_P016,
1906 DRM_FORMAT_Y210,
1907 DRM_FORMAT_Y212,
1908 DRM_FORMAT_Y216,
1909 DRM_FORMAT_XVYU2101010,
1910 DRM_FORMAT_XVYU12_16161616,
1911 DRM_FORMAT_XVYU16161616,
1912};
1913
1914static const uint32_t icl_hdr_planar_formats[] = {
1915 DRM_FORMAT_C8,
1916 DRM_FORMAT_RGB565,
1917 DRM_FORMAT_XRGB8888,
1918 DRM_FORMAT_XBGR8888,
1919 DRM_FORMAT_ARGB8888,
1920 DRM_FORMAT_ABGR8888,
1921 DRM_FORMAT_XRGB2101010,
1922 DRM_FORMAT_XBGR2101010,
1923 DRM_FORMAT_XRGB16161616F,
1924 DRM_FORMAT_XBGR16161616F,
1925 DRM_FORMAT_ARGB16161616F,
1926 DRM_FORMAT_ABGR16161616F,
1927 DRM_FORMAT_YUYV,
1928 DRM_FORMAT_YVYU,
1929 DRM_FORMAT_UYVY,
1930 DRM_FORMAT_VYUY,
1931 DRM_FORMAT_NV12,
1932 DRM_FORMAT_P010,
1933 DRM_FORMAT_P012,
1934 DRM_FORMAT_P016,
1935 DRM_FORMAT_Y210,
1936 DRM_FORMAT_Y212,
1937 DRM_FORMAT_Y216,
1938 DRM_FORMAT_XVYU2101010,
1939 DRM_FORMAT_XVYU12_16161616,
1940 DRM_FORMAT_XVYU16161616,
1941};
1942
1809static const u64 skl_plane_format_modifiers_noccs[] = { 1943static const u64 skl_plane_format_modifiers_noccs[] = {
1810 I915_FORMAT_MOD_Yf_TILED, 1944 I915_FORMAT_MOD_Yf_TILED,
1811 I915_FORMAT_MOD_Y_TILED, 1945 I915_FORMAT_MOD_Y_TILED,
@@ -1945,10 +2079,23 @@ static bool skl_plane_format_mod_supported(struct drm_plane *_plane,
1945 case DRM_FORMAT_UYVY: 2079 case DRM_FORMAT_UYVY:
1946 case DRM_FORMAT_VYUY: 2080 case DRM_FORMAT_VYUY:
1947 case DRM_FORMAT_NV12: 2081 case DRM_FORMAT_NV12:
2082 case DRM_FORMAT_P010:
2083 case DRM_FORMAT_P012:
2084 case DRM_FORMAT_P016:
2085 case DRM_FORMAT_Y210:
2086 case DRM_FORMAT_Y212:
2087 case DRM_FORMAT_Y216:
2088 case DRM_FORMAT_XVYU2101010:
2089 case DRM_FORMAT_XVYU12_16161616:
2090 case DRM_FORMAT_XVYU16161616:
1948 if (modifier == I915_FORMAT_MOD_Yf_TILED) 2091 if (modifier == I915_FORMAT_MOD_Yf_TILED)
1949 return true; 2092 return true;
1950 /* fall through */ 2093 /* fall through */
1951 case DRM_FORMAT_C8: 2094 case DRM_FORMAT_C8:
2095 case DRM_FORMAT_XBGR16161616F:
2096 case DRM_FORMAT_ABGR16161616F:
2097 case DRM_FORMAT_XRGB16161616F:
2098 case DRM_FORMAT_ARGB16161616F:
1952 if (modifier == DRM_FORMAT_MOD_LINEAR || 2099 if (modifier == DRM_FORMAT_MOD_LINEAR ||
1953 modifier == I915_FORMAT_MOD_X_TILED || 2100 modifier == I915_FORMAT_MOD_X_TILED ||
1954 modifier == I915_FORMAT_MOD_Y_TILED) 2101 modifier == I915_FORMAT_MOD_Y_TILED)
@@ -2085,8 +2232,25 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv,
2085 plane->update_slave = icl_update_slave; 2232 plane->update_slave = icl_update_slave;
2086 2233
2087 if (skl_plane_has_planar(dev_priv, pipe, plane_id)) { 2234 if (skl_plane_has_planar(dev_priv, pipe, plane_id)) {
2088 formats = skl_planar_formats; 2235 if (icl_is_hdr_plane(dev_priv, plane_id)) {
2089 num_formats = ARRAY_SIZE(skl_planar_formats); 2236 formats = icl_hdr_planar_formats;
2237 num_formats = ARRAY_SIZE(icl_hdr_planar_formats);
2238 } else if (INTEL_GEN(dev_priv) >= 11) {
2239 formats = icl_planar_formats;
2240 num_formats = ARRAY_SIZE(icl_planar_formats);
2241 } else if (INTEL_GEN(dev_priv) == 10 || IS_GEMINILAKE(dev_priv)) {
2242 formats = glk_planar_formats;
2243 num_formats = ARRAY_SIZE(glk_planar_formats);
2244 } else {
2245 formats = skl_planar_formats;
2246 num_formats = ARRAY_SIZE(skl_planar_formats);
2247 }
2248 } else if (icl_is_hdr_plane(dev_priv, plane_id)) {
2249 formats = icl_hdr_plane_formats;
2250 num_formats = ARRAY_SIZE(icl_hdr_plane_formats);
2251 } else if (INTEL_GEN(dev_priv) >= 11) {
2252 formats = icl_plane_formats;
2253 num_formats = ARRAY_SIZE(icl_plane_formats);
2090 } else { 2254 } else {
2091 formats = skl_plane_formats; 2255 formats = skl_plane_formats;
2092 num_formats = ARRAY_SIZE(skl_plane_formats); 2256 num_formats = ARRAY_SIZE(skl_plane_formats);
diff --git a/drivers/gpu/drm/i915/intel_vdsc.c b/drivers/gpu/drm/i915/intel_vdsc.c
index 23abf03736e7..3f9921ba4a76 100644
--- a/drivers/gpu/drm/i915/intel_vdsc.c
+++ b/drivers/gpu/drm/i915/intel_vdsc.c
@@ -317,129 +317,6 @@ static int get_column_index_for_rc_params(u8 bits_per_component)
317 } 317 }
318} 318}
319 319
320static int intel_compute_rc_parameters(struct drm_dsc_config *vdsc_cfg)
321{
322 unsigned long groups_per_line = 0;
323 unsigned long groups_total = 0;
324 unsigned long num_extra_mux_bits = 0;
325 unsigned long slice_bits = 0;
326 unsigned long hrd_delay = 0;
327 unsigned long final_scale = 0;
328 unsigned long rbs_min = 0;
329
330 /* Number of groups used to code each line of a slice */
331 groups_per_line = DIV_ROUND_UP(vdsc_cfg->slice_width,
332 DSC_RC_PIXELS_PER_GROUP);
333
334 /* chunksize in Bytes */
335 vdsc_cfg->slice_chunk_size = DIV_ROUND_UP(vdsc_cfg->slice_width *
336 vdsc_cfg->bits_per_pixel,
337 (8 * 16));
338
339 if (vdsc_cfg->convert_rgb)
340 num_extra_mux_bits = 3 * (vdsc_cfg->mux_word_size +
341 (4 * vdsc_cfg->bits_per_component + 4)
342 - 2);
343 else
344 num_extra_mux_bits = 3 * vdsc_cfg->mux_word_size +
345 (4 * vdsc_cfg->bits_per_component + 4) +
346 2 * (4 * vdsc_cfg->bits_per_component) - 2;
347 /* Number of bits in one Slice */
348 slice_bits = 8 * vdsc_cfg->slice_chunk_size * vdsc_cfg->slice_height;
349
350 while ((num_extra_mux_bits > 0) &&
351 ((slice_bits - num_extra_mux_bits) % vdsc_cfg->mux_word_size))
352 num_extra_mux_bits--;
353
354 if (groups_per_line < vdsc_cfg->initial_scale_value - 8)
355 vdsc_cfg->initial_scale_value = groups_per_line + 8;
356
357 /* scale_decrement_interval calculation according to DSC spec 1.11 */
358 if (vdsc_cfg->initial_scale_value > 8)
359 vdsc_cfg->scale_decrement_interval = groups_per_line /
360 (vdsc_cfg->initial_scale_value - 8);
361 else
362 vdsc_cfg->scale_decrement_interval = DSC_SCALE_DECREMENT_INTERVAL_MAX;
363
364 vdsc_cfg->final_offset = vdsc_cfg->rc_model_size -
365 (vdsc_cfg->initial_xmit_delay *
366 vdsc_cfg->bits_per_pixel + 8) / 16 + num_extra_mux_bits;
367
368 if (vdsc_cfg->final_offset >= vdsc_cfg->rc_model_size) {
369 DRM_DEBUG_KMS("FinalOfs < RcModelSze for this InitialXmitDelay\n");
370 return -ERANGE;
371 }
372
373 final_scale = (vdsc_cfg->rc_model_size * 8) /
374 (vdsc_cfg->rc_model_size - vdsc_cfg->final_offset);
375 if (vdsc_cfg->slice_height > 1)
376 /*
377 * NflBpgOffset is 16 bit value with 11 fractional bits
378 * hence we multiply by 2^11 for preserving the
379 * fractional part
380 */
381 vdsc_cfg->nfl_bpg_offset = DIV_ROUND_UP((vdsc_cfg->first_line_bpg_offset << 11),
382 (vdsc_cfg->slice_height - 1));
383 else
384 vdsc_cfg->nfl_bpg_offset = 0;
385
386 /* 2^16 - 1 */
387 if (vdsc_cfg->nfl_bpg_offset > 65535) {
388 DRM_DEBUG_KMS("NflBpgOffset is too large for this slice height\n");
389 return -ERANGE;
390 }
391
392 /* Number of groups used to code the entire slice */
393 groups_total = groups_per_line * vdsc_cfg->slice_height;
394
395 /* slice_bpg_offset is 16 bit value with 11 fractional bits */
396 vdsc_cfg->slice_bpg_offset = DIV_ROUND_UP(((vdsc_cfg->rc_model_size -
397 vdsc_cfg->initial_offset +
398 num_extra_mux_bits) << 11),
399 groups_total);
400
401 if (final_scale > 9) {
402 /*
403 * ScaleIncrementInterval =
404 * finaloffset/((NflBpgOffset + SliceBpgOffset)*8(finalscale - 1.125))
405 * as (NflBpgOffset + SliceBpgOffset) has 11 bit fractional value,
406 * we need divide by 2^11 from pstDscCfg values
407 */
408 vdsc_cfg->scale_increment_interval =
409 (vdsc_cfg->final_offset * (1 << 11)) /
410 ((vdsc_cfg->nfl_bpg_offset +
411 vdsc_cfg->slice_bpg_offset) *
412 (final_scale - 9));
413 } else {
414 /*
415 * If finalScaleValue is less than or equal to 9, a value of 0 should
416 * be used to disable the scale increment at the end of the slice
417 */
418 vdsc_cfg->scale_increment_interval = 0;
419 }
420
421 if (vdsc_cfg->scale_increment_interval > 65535) {
422 DRM_DEBUG_KMS("ScaleIncrementInterval is large for slice height\n");
423 return -ERANGE;
424 }
425
426 /*
427 * DSC spec mentions that bits_per_pixel specifies the target
428 * bits/pixel (bpp) rate that is used by the encoder,
429 * in steps of 1/16 of a bit per pixel
430 */
431 rbs_min = vdsc_cfg->rc_model_size - vdsc_cfg->initial_offset +
432 DIV_ROUND_UP(vdsc_cfg->initial_xmit_delay *
433 vdsc_cfg->bits_per_pixel, 16) +
434 groups_per_line * vdsc_cfg->first_line_bpg_offset;
435
436 hrd_delay = DIV_ROUND_UP((rbs_min * 16), vdsc_cfg->bits_per_pixel);
437 vdsc_cfg->rc_bits = (hrd_delay * vdsc_cfg->bits_per_pixel) / 16;
438 vdsc_cfg->initial_dec_delay = hrd_delay - vdsc_cfg->initial_xmit_delay;
439
440 return 0;
441}
442
443int intel_dp_compute_dsc_params(struct intel_dp *intel_dp, 320int intel_dp_compute_dsc_params(struct intel_dp *intel_dp,
444 struct intel_crtc_state *pipe_config) 321 struct intel_crtc_state *pipe_config)
445{ 322{
@@ -491,7 +368,7 @@ int intel_dp_compute_dsc_params(struct intel_dp *intel_dp,
491 DSC_1_1_MAX_LINEBUF_DEPTH_BITS : line_buf_depth; 368 DSC_1_1_MAX_LINEBUF_DEPTH_BITS : line_buf_depth;
492 369
493 /* Gen 11 does not support YCbCr */ 370 /* Gen 11 does not support YCbCr */
494 vdsc_cfg->enable422 = false; 371 vdsc_cfg->simple_422 = false;
495 /* Gen 11 does not support VBR */ 372 /* Gen 11 does not support VBR */
496 vdsc_cfg->vbr_enable = false; 373 vdsc_cfg->vbr_enable = false;
497 vdsc_cfg->block_pred_enable = 374 vdsc_cfg->block_pred_enable =
@@ -574,7 +451,7 @@ int intel_dp_compute_dsc_params(struct intel_dp *intel_dp,
574 vdsc_cfg->initial_scale_value = (vdsc_cfg->rc_model_size << 3) / 451 vdsc_cfg->initial_scale_value = (vdsc_cfg->rc_model_size << 3) /
575 (vdsc_cfg->rc_model_size - vdsc_cfg->initial_offset); 452 (vdsc_cfg->rc_model_size - vdsc_cfg->initial_offset);
576 453
577 return intel_compute_rc_parameters(vdsc_cfg); 454 return drm_dsc_compute_rc_parameters(vdsc_cfg);
578} 455}
579 456
580enum intel_display_power_domain 457enum intel_display_power_domain
@@ -618,7 +495,7 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
618 pps_val |= DSC_BLOCK_PREDICTION; 495 pps_val |= DSC_BLOCK_PREDICTION;
619 if (vdsc_cfg->convert_rgb) 496 if (vdsc_cfg->convert_rgb)
620 pps_val |= DSC_COLOR_SPACE_CONVERSION; 497 pps_val |= DSC_COLOR_SPACE_CONVERSION;
621 if (vdsc_cfg->enable422) 498 if (vdsc_cfg->simple_422)
622 pps_val |= DSC_422_ENABLE; 499 pps_val |= DSC_422_ENABLE;
623 if (vdsc_cfg->vbr_enable) 500 if (vdsc_cfg->vbr_enable)
624 pps_val |= DSC_VBR_ENABLE; 501 pps_val |= DSC_VBR_ENABLE;
@@ -1004,10 +881,10 @@ static void intel_dp_write_dsc_pps_sdp(struct intel_encoder *encoder,
1004 struct drm_dsc_pps_infoframe dp_dsc_pps_sdp; 881 struct drm_dsc_pps_infoframe dp_dsc_pps_sdp;
1005 882
1006 /* Prepare DP SDP PPS header as per DP 1.4 spec, Table 2-123 */ 883 /* Prepare DP SDP PPS header as per DP 1.4 spec, Table 2-123 */
1007 drm_dsc_dp_pps_header_init(&dp_dsc_pps_sdp); 884 drm_dsc_dp_pps_header_init(&dp_dsc_pps_sdp.pps_header);
1008 885
1009 /* Fill the PPS payload bytes as per DSC spec 1.2 Table 4-1 */ 886 /* Fill the PPS payload bytes as per DSC spec 1.2 Table 4-1 */
1010 drm_dsc_pps_infoframe_pack(&dp_dsc_pps_sdp, vdsc_cfg); 887 drm_dsc_pps_payload_pack(&dp_dsc_pps_sdp.pps_payload, vdsc_cfg);
1011 888
1012 intel_dig_port->write_infoframe(encoder, crtc_state, 889 intel_dig_port->write_infoframe(encoder, crtc_state,
1013 DP_SDP_PPS, &dp_dsc_pps_sdp, 890 DP_SDP_PPS, &dp_dsc_pps_sdp,
diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c
index c935cbe059a7..3e8bece620df 100644
--- a/drivers/gpu/drm/imx/imx-drm-core.c
+++ b/drivers/gpu/drm/imx/imx-drm-core.c
@@ -185,7 +185,7 @@ static int compare_of(struct device *dev, void *data)
185 } 185 }
186 186
187 /* Special case for LDB, one device for two channels */ 187 /* Special case for LDB, one device for two channels */
188 if (of_node_cmp(np->name, "lvds-channel") == 0) { 188 if (of_node_name_eq(np, "lvds-channel")) {
189 np = of_get_parent(np); 189 np = of_get_parent(np);
190 of_node_put(np); 190 of_node_put(np);
191 } 191 }
diff --git a/drivers/gpu/drm/meson/Makefile b/drivers/gpu/drm/meson/Makefile
index 7709f2fbb9f7..d4ea82fc493b 100644
--- a/drivers/gpu/drm/meson/Makefile
+++ b/drivers/gpu/drm/meson/Makefile
@@ -1,5 +1,5 @@
1meson-drm-y := meson_drv.o meson_plane.o meson_crtc.o meson_venc_cvbs.o 1meson-drm-y := meson_drv.o meson_plane.o meson_crtc.o meson_venc_cvbs.o
2meson-drm-y += meson_viu.o meson_vpp.o meson_venc.o meson_vclk.o meson_canvas.o meson_overlay.o 2meson-drm-y += meson_viu.o meson_vpp.o meson_venc.o meson_vclk.o meson_overlay.o
3 3
4obj-$(CONFIG_DRM_MESON) += meson-drm.o 4obj-$(CONFIG_DRM_MESON) += meson-drm.o
5obj-$(CONFIG_DRM_MESON_DW_HDMI) += meson_dw_hdmi.o 5obj-$(CONFIG_DRM_MESON_DW_HDMI) += meson_dw_hdmi.o
diff --git a/drivers/gpu/drm/meson/meson_canvas.c b/drivers/gpu/drm/meson/meson_canvas.c
deleted file mode 100644
index 5de11aa7c775..000000000000
--- a/drivers/gpu/drm/meson/meson_canvas.c
+++ /dev/null
@@ -1,73 +0,0 @@
1/*
2 * Copyright (C) 2016 BayLibre, SAS
3 * Author: Neil Armstrong <narmstrong@baylibre.com>
4 * Copyright (C) 2015 Amlogic, Inc. All rights reserved.
5 * Copyright (C) 2014 Endless Mobile
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License as
9 * published by the Free Software Foundation; either version 2 of the
10 * License, or (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21#include <linux/kernel.h>
22#include <linux/module.h>
23#include "meson_drv.h"
24#include "meson_canvas.h"
25#include "meson_registers.h"
26
27/**
28 * DOC: Canvas
29 *
30 * CANVAS is a memory zone where physical memory frames information
31 * are stored for the VIU to scanout.
32 */
33
34/* DMC Registers */
35#define DMC_CAV_LUT_DATAL 0x48 /* 0x12 offset in data sheet */
36#define CANVAS_WIDTH_LBIT 29
37#define CANVAS_WIDTH_LWID 3
38#define DMC_CAV_LUT_DATAH 0x4c /* 0x13 offset in data sheet */
39#define CANVAS_WIDTH_HBIT 0
40#define CANVAS_HEIGHT_BIT 9
41#define CANVAS_BLKMODE_BIT 24
42#define CANVAS_ENDIAN_BIT 26
43#define DMC_CAV_LUT_ADDR 0x50 /* 0x14 offset in data sheet */
44#define CANVAS_LUT_WR_EN (0x2 << 8)
45#define CANVAS_LUT_RD_EN (0x1 << 8)
46
47void meson_canvas_setup(struct meson_drm *priv,
48 uint32_t canvas_index, uint32_t addr,
49 uint32_t stride, uint32_t height,
50 unsigned int wrap,
51 unsigned int blkmode,
52 unsigned int endian)
53{
54 unsigned int val;
55
56 regmap_write(priv->dmc, DMC_CAV_LUT_DATAL,
57 (((addr + 7) >> 3)) |
58 (((stride + 7) >> 3) << CANVAS_WIDTH_LBIT));
59
60 regmap_write(priv->dmc, DMC_CAV_LUT_DATAH,
61 ((((stride + 7) >> 3) >> CANVAS_WIDTH_LWID) <<
62 CANVAS_WIDTH_HBIT) |
63 (height << CANVAS_HEIGHT_BIT) |
64 (wrap << 22) |
65 (blkmode << CANVAS_BLKMODE_BIT) |
66 (endian << CANVAS_ENDIAN_BIT));
67
68 regmap_write(priv->dmc, DMC_CAV_LUT_ADDR,
69 CANVAS_LUT_WR_EN | canvas_index);
70
71 /* Force a read-back to make sure everything is flushed. */
72 regmap_read(priv->dmc, DMC_CAV_LUT_DATAH, &val);
73}
diff --git a/drivers/gpu/drm/meson/meson_canvas.h b/drivers/gpu/drm/meson/meson_canvas.h
deleted file mode 100644
index 85dbf26e2826..000000000000
--- a/drivers/gpu/drm/meson/meson_canvas.h
+++ /dev/null
@@ -1,51 +0,0 @@
1/*
2 * Copyright (C) 2016 BayLibre, SAS
3 * Author: Neil Armstrong <narmstrong@baylibre.com>
4 * Copyright (C) 2014 Endless Mobile
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation; either version 2 of the
9 * License, or (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20/* Canvas LUT Memory */
21
22#ifndef __MESON_CANVAS_H
23#define __MESON_CANVAS_H
24
25#define MESON_CANVAS_ID_OSD1 0x4e
26#define MESON_CANVAS_ID_VD1_0 0x60
27#define MESON_CANVAS_ID_VD1_1 0x61
28#define MESON_CANVAS_ID_VD1_2 0x62
29
30/* Canvas configuration. */
31#define MESON_CANVAS_WRAP_NONE 0x00
32#define MESON_CANVAS_WRAP_X 0x01
33#define MESON_CANVAS_WRAP_Y 0x02
34
35#define MESON_CANVAS_BLKMODE_LINEAR 0x00
36#define MESON_CANVAS_BLKMODE_32x32 0x01
37#define MESON_CANVAS_BLKMODE_64x64 0x02
38
39#define MESON_CANVAS_ENDIAN_SWAP16 0x1
40#define MESON_CANVAS_ENDIAN_SWAP32 0x3
41#define MESON_CANVAS_ENDIAN_SWAP64 0x7
42#define MESON_CANVAS_ENDIAN_SWAP128 0xf
43
44void meson_canvas_setup(struct meson_drm *priv,
45 uint32_t canvas_index, uint32_t addr,
46 uint32_t stride, uint32_t height,
47 unsigned int wrap,
48 unsigned int blkmode,
49 unsigned int endian);
50
51#endif /* __MESON_CANVAS_H */
diff --git a/drivers/gpu/drm/meson/meson_crtc.c b/drivers/gpu/drm/meson/meson_crtc.c
index 43e29984f8b1..6d9311e254ef 100644
--- a/drivers/gpu/drm/meson/meson_crtc.c
+++ b/drivers/gpu/drm/meson/meson_crtc.c
@@ -37,7 +37,6 @@
37#include "meson_venc.h" 37#include "meson_venc.h"
38#include "meson_vpp.h" 38#include "meson_vpp.h"
39#include "meson_viu.h" 39#include "meson_viu.h"
40#include "meson_canvas.h"
41#include "meson_registers.h" 40#include "meson_registers.h"
42 41
43/* CRTC definition */ 42/* CRTC definition */
@@ -214,13 +213,7 @@ void meson_crtc_irq(struct meson_drm *priv)
214 writel_relaxed(priv->viu.osd_sc_v_ctrl0, 213 writel_relaxed(priv->viu.osd_sc_v_ctrl0,
215 priv->io_base + _REG(VPP_OSD_VSC_CTRL0)); 214 priv->io_base + _REG(VPP_OSD_VSC_CTRL0));
216 215
217 if (priv->canvas) 216 meson_canvas_config(priv->canvas, priv->canvas_id_osd1,
218 meson_canvas_config(priv->canvas, priv->canvas_id_osd1,
219 priv->viu.osd1_addr, priv->viu.osd1_stride,
220 priv->viu.osd1_height, MESON_CANVAS_WRAP_NONE,
221 MESON_CANVAS_BLKMODE_LINEAR, 0);
222 else
223 meson_canvas_setup(priv, MESON_CANVAS_ID_OSD1,
224 priv->viu.osd1_addr, priv->viu.osd1_stride, 217 priv->viu.osd1_addr, priv->viu.osd1_stride,
225 priv->viu.osd1_height, MESON_CANVAS_WRAP_NONE, 218 priv->viu.osd1_height, MESON_CANVAS_WRAP_NONE,
226 MESON_CANVAS_BLKMODE_LINEAR, 0); 219 MESON_CANVAS_BLKMODE_LINEAR, 0);
@@ -237,61 +230,34 @@ void meson_crtc_irq(struct meson_drm *priv)
237 230
238 switch (priv->viu.vd1_planes) { 231 switch (priv->viu.vd1_planes) {
239 case 3: 232 case 3:
240 if (priv->canvas) 233 meson_canvas_config(priv->canvas,
241 meson_canvas_config(priv->canvas, 234 priv->canvas_id_vd1_2,
242 priv->canvas_id_vd1_2, 235 priv->viu.vd1_addr2,
243 priv->viu.vd1_addr2, 236 priv->viu.vd1_stride2,
244 priv->viu.vd1_stride2, 237 priv->viu.vd1_height2,
245 priv->viu.vd1_height2, 238 MESON_CANVAS_WRAP_NONE,
246 MESON_CANVAS_WRAP_NONE, 239 MESON_CANVAS_BLKMODE_LINEAR,
247 MESON_CANVAS_BLKMODE_LINEAR, 240 MESON_CANVAS_ENDIAN_SWAP64);
248 MESON_CANVAS_ENDIAN_SWAP64);
249 else
250 meson_canvas_setup(priv, MESON_CANVAS_ID_VD1_2,
251 priv->viu.vd1_addr2,
252 priv->viu.vd1_stride2,
253 priv->viu.vd1_height2,
254 MESON_CANVAS_WRAP_NONE,
255 MESON_CANVAS_BLKMODE_LINEAR,
256 MESON_CANVAS_ENDIAN_SWAP64);
257 /* fallthrough */ 241 /* fallthrough */
258 case 2: 242 case 2:
259 if (priv->canvas) 243 meson_canvas_config(priv->canvas,
260 meson_canvas_config(priv->canvas, 244 priv->canvas_id_vd1_1,
261 priv->canvas_id_vd1_1, 245 priv->viu.vd1_addr1,
262 priv->viu.vd1_addr1, 246 priv->viu.vd1_stride1,
263 priv->viu.vd1_stride1, 247 priv->viu.vd1_height1,
264 priv->viu.vd1_height1, 248 MESON_CANVAS_WRAP_NONE,
265 MESON_CANVAS_WRAP_NONE, 249 MESON_CANVAS_BLKMODE_LINEAR,
266 MESON_CANVAS_BLKMODE_LINEAR, 250 MESON_CANVAS_ENDIAN_SWAP64);
267 MESON_CANVAS_ENDIAN_SWAP64);
268 else
269 meson_canvas_setup(priv, MESON_CANVAS_ID_VD1_1,
270 priv->viu.vd1_addr2,
271 priv->viu.vd1_stride2,
272 priv->viu.vd1_height2,
273 MESON_CANVAS_WRAP_NONE,
274 MESON_CANVAS_BLKMODE_LINEAR,
275 MESON_CANVAS_ENDIAN_SWAP64);
276 /* fallthrough */ 251 /* fallthrough */
277 case 1: 252 case 1:
278 if (priv->canvas) 253 meson_canvas_config(priv->canvas,
279 meson_canvas_config(priv->canvas, 254 priv->canvas_id_vd1_0,
280 priv->canvas_id_vd1_0, 255 priv->viu.vd1_addr0,
281 priv->viu.vd1_addr0, 256 priv->viu.vd1_stride0,
282 priv->viu.vd1_stride0, 257 priv->viu.vd1_height0,
283 priv->viu.vd1_height0, 258 MESON_CANVAS_WRAP_NONE,
284 MESON_CANVAS_WRAP_NONE, 259 MESON_CANVAS_BLKMODE_LINEAR,
285 MESON_CANVAS_BLKMODE_LINEAR, 260 MESON_CANVAS_ENDIAN_SWAP64);
286 MESON_CANVAS_ENDIAN_SWAP64);
287 else
288 meson_canvas_setup(priv, MESON_CANVAS_ID_VD1_0,
289 priv->viu.vd1_addr2,
290 priv->viu.vd1_stride2,
291 priv->viu.vd1_height2,
292 MESON_CANVAS_WRAP_NONE,
293 MESON_CANVAS_BLKMODE_LINEAR,
294 MESON_CANVAS_ENDIAN_SWAP64);
295 }; 261 };
296 262
297 writel_relaxed(priv->viu.vd1_if0_gen_reg, 263 writel_relaxed(priv->viu.vd1_if0_gen_reg,
diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
index 2281ed3eb774..70f9d7b85e8e 100644
--- a/drivers/gpu/drm/meson/meson_drv.c
+++ b/drivers/gpu/drm/meson/meson_drv.c
@@ -48,7 +48,6 @@
48#include "meson_vpp.h" 48#include "meson_vpp.h"
49#include "meson_viu.h" 49#include "meson_viu.h"
50#include "meson_venc.h" 50#include "meson_venc.h"
51#include "meson_canvas.h"
52#include "meson_registers.h" 51#include "meson_registers.h"
53 52
54#define DRIVER_NAME "meson" 53#define DRIVER_NAME "meson"
@@ -231,50 +230,31 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
231 } 230 }
232 231
233 priv->canvas = meson_canvas_get(dev); 232 priv->canvas = meson_canvas_get(dev);
234 if (!IS_ERR(priv->canvas)) { 233 if (IS_ERR(priv->canvas)) {
235 ret = meson_canvas_alloc(priv->canvas, &priv->canvas_id_osd1); 234 ret = PTR_ERR(priv->canvas);
236 if (ret) 235 goto free_drm;
237 goto free_drm; 236 }
238 ret = meson_canvas_alloc(priv->canvas, &priv->canvas_id_vd1_0);
239 if (ret) {
240 meson_canvas_free(priv->canvas, priv->canvas_id_osd1);
241 goto free_drm;
242 }
243 ret = meson_canvas_alloc(priv->canvas, &priv->canvas_id_vd1_1);
244 if (ret) {
245 meson_canvas_free(priv->canvas, priv->canvas_id_osd1);
246 meson_canvas_free(priv->canvas, priv->canvas_id_vd1_0);
247 goto free_drm;
248 }
249 ret = meson_canvas_alloc(priv->canvas, &priv->canvas_id_vd1_2);
250 if (ret) {
251 meson_canvas_free(priv->canvas, priv->canvas_id_osd1);
252 meson_canvas_free(priv->canvas, priv->canvas_id_vd1_0);
253 meson_canvas_free(priv->canvas, priv->canvas_id_vd1_1);
254 goto free_drm;
255 }
256 } else {
257 priv->canvas = NULL;
258
259 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dmc");
260 if (!res) {
261 ret = -EINVAL;
262 goto free_drm;
263 }
264 /* Simply ioremap since it may be a shared register zone */
265 regs = devm_ioremap(dev, res->start, resource_size(res));
266 if (!regs) {
267 ret = -EADDRNOTAVAIL;
268 goto free_drm;
269 }
270 237
271 priv->dmc = devm_regmap_init_mmio(dev, regs, 238 ret = meson_canvas_alloc(priv->canvas, &priv->canvas_id_osd1);
272 &meson_regmap_config); 239 if (ret)
273 if (IS_ERR(priv->dmc)) { 240 goto free_drm;
274 dev_err(&pdev->dev, "Couldn't create the DMC regmap\n"); 241 ret = meson_canvas_alloc(priv->canvas, &priv->canvas_id_vd1_0);
275 ret = PTR_ERR(priv->dmc); 242 if (ret) {
276 goto free_drm; 243 meson_canvas_free(priv->canvas, priv->canvas_id_osd1);
277 } 244 goto free_drm;
245 }
246 ret = meson_canvas_alloc(priv->canvas, &priv->canvas_id_vd1_1);
247 if (ret) {
248 meson_canvas_free(priv->canvas, priv->canvas_id_osd1);
249 meson_canvas_free(priv->canvas, priv->canvas_id_vd1_0);
250 goto free_drm;
251 }
252 ret = meson_canvas_alloc(priv->canvas, &priv->canvas_id_vd1_2);
253 if (ret) {
254 meson_canvas_free(priv->canvas, priv->canvas_id_osd1);
255 meson_canvas_free(priv->canvas, priv->canvas_id_vd1_0);
256 meson_canvas_free(priv->canvas, priv->canvas_id_vd1_1);
257 goto free_drm;
278 } 258 }
279 259
280 priv->vsync_irq = platform_get_irq(pdev, 0); 260 priv->vsync_irq = platform_get_irq(pdev, 0);
diff --git a/drivers/gpu/drm/meson/meson_drv.h b/drivers/gpu/drm/meson/meson_drv.h
index 4dccf4cd042a..214a7cb18ce2 100644
--- a/drivers/gpu/drm/meson/meson_drv.h
+++ b/drivers/gpu/drm/meson/meson_drv.h
@@ -29,7 +29,6 @@ struct meson_drm {
29 struct device *dev; 29 struct device *dev;
30 void __iomem *io_base; 30 void __iomem *io_base;
31 struct regmap *hhi; 31 struct regmap *hhi;
32 struct regmap *dmc;
33 int vsync_irq; 32 int vsync_irq;
34 33
35 struct meson_canvas *canvas; 34 struct meson_canvas *canvas;
diff --git a/drivers/gpu/drm/meson/meson_overlay.c b/drivers/gpu/drm/meson/meson_overlay.c
index 691a9fd16b36..b54a22e483b9 100644
--- a/drivers/gpu/drm/meson/meson_overlay.c
+++ b/drivers/gpu/drm/meson/meson_overlay.c
@@ -22,7 +22,6 @@
22#include "meson_overlay.h" 22#include "meson_overlay.h"
23#include "meson_vpp.h" 23#include "meson_vpp.h"
24#include "meson_viu.h" 24#include "meson_viu.h"
25#include "meson_canvas.h"
26#include "meson_registers.h" 25#include "meson_registers.h"
27 26
28/* VD1_IF0_GEN_REG */ 27/* VD1_IF0_GEN_REG */
@@ -350,13 +349,6 @@ static void meson_overlay_atomic_update(struct drm_plane *plane,
350 349
351 DRM_DEBUG_DRIVER("\n"); 350 DRM_DEBUG_DRIVER("\n");
352 351
353 /* Fallback is canvas provider is not available */
354 if (!priv->canvas) {
355 priv->canvas_id_vd1_0 = MESON_CANVAS_ID_VD1_0;
356 priv->canvas_id_vd1_1 = MESON_CANVAS_ID_VD1_1;
357 priv->canvas_id_vd1_2 = MESON_CANVAS_ID_VD1_2;
358 }
359
360 interlace_mode = state->crtc->mode.flags & DRM_MODE_FLAG_INTERLACE; 352 interlace_mode = state->crtc->mode.flags & DRM_MODE_FLAG_INTERLACE;
361 353
362 spin_lock_irqsave(&priv->drm->event_lock, flags); 354 spin_lock_irqsave(&priv->drm->event_lock, flags);
diff --git a/drivers/gpu/drm/meson/meson_plane.c b/drivers/gpu/drm/meson/meson_plane.c
index 6119a0224278..b7786218cb10 100644
--- a/drivers/gpu/drm/meson/meson_plane.c
+++ b/drivers/gpu/drm/meson/meson_plane.c
@@ -38,7 +38,6 @@
38#include "meson_plane.h" 38#include "meson_plane.h"
39#include "meson_vpp.h" 39#include "meson_vpp.h"
40#include "meson_viu.h" 40#include "meson_viu.h"
41#include "meson_canvas.h"
42#include "meson_registers.h" 41#include "meson_registers.h"
43 42
44/* OSD_SCI_WH_M1 */ 43/* OSD_SCI_WH_M1 */
@@ -148,10 +147,7 @@ static void meson_plane_atomic_update(struct drm_plane *plane,
148 (0xFF << OSD_GLOBAL_ALPHA_SHIFT) | 147 (0xFF << OSD_GLOBAL_ALPHA_SHIFT) |
149 OSD_BLK0_ENABLE; 148 OSD_BLK0_ENABLE;
150 149
151 if (priv->canvas) 150 canvas_id_osd1 = priv->canvas_id_osd1;
152 canvas_id_osd1 = priv->canvas_id_osd1;
153 else
154 canvas_id_osd1 = MESON_CANVAS_ID_OSD1;
155 151
156 /* Set up BLK0 to point to the right canvas */ 152 /* Set up BLK0 to point to the right canvas */
157 priv->viu.osd1_blk0_cfg[0] = ((canvas_id_osd1 << OSD_CANVAS_SEL) | 153 priv->viu.osd1_blk0_cfg[0] = ((canvas_id_osd1 << OSD_CANVAS_SEL) |
diff --git a/drivers/gpu/drm/meson/meson_viu.c b/drivers/gpu/drm/meson/meson_viu.c
index e46e05f50bad..ac0f3687e09a 100644
--- a/drivers/gpu/drm/meson/meson_viu.c
+++ b/drivers/gpu/drm/meson/meson_viu.c
@@ -25,7 +25,6 @@
25#include "meson_viu.h" 25#include "meson_viu.h"
26#include "meson_vpp.h" 26#include "meson_vpp.h"
27#include "meson_venc.h" 27#include "meson_venc.h"
28#include "meson_canvas.h"
29#include "meson_registers.h" 28#include "meson_registers.h"
30 29
31/** 30/**
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 0bdd93648761..4697d854b827 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -1027,7 +1027,6 @@ static struct drm_driver msm_driver = {
1027 .prime_fd_to_handle = drm_gem_prime_fd_to_handle, 1027 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
1028 .gem_prime_export = drm_gem_prime_export, 1028 .gem_prime_export = drm_gem_prime_export,
1029 .gem_prime_import = drm_gem_prime_import, 1029 .gem_prime_import = drm_gem_prime_import,
1030 .gem_prime_res_obj = msm_gem_prime_res_obj,
1031 .gem_prime_pin = msm_gem_prime_pin, 1030 .gem_prime_pin = msm_gem_prime_pin,
1032 .gem_prime_unpin = msm_gem_prime_unpin, 1031 .gem_prime_unpin = msm_gem_prime_unpin,
1033 .gem_prime_get_sg_table = msm_gem_prime_get_sg_table, 1032 .gem_prime_get_sg_table = msm_gem_prime_get_sg_table,
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index c56dade2c1dc..163e24d2ab99 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -292,7 +292,6 @@ struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj);
292void *msm_gem_prime_vmap(struct drm_gem_object *obj); 292void *msm_gem_prime_vmap(struct drm_gem_object *obj);
293void msm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); 293void msm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
294int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma); 294int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
295struct reservation_object *msm_gem_prime_res_obj(struct drm_gem_object *obj);
296struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev, 295struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
297 struct dma_buf_attachment *attach, struct sg_table *sg); 296 struct dma_buf_attachment *attach, struct sg_table *sg);
298int msm_gem_prime_pin(struct drm_gem_object *obj); 297int msm_gem_prime_pin(struct drm_gem_object *obj);
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 18ca651ab942..a72c648ba6e7 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -672,14 +672,13 @@ void msm_gem_vunmap(struct drm_gem_object *obj, enum msm_gem_lock subclass)
672int msm_gem_sync_object(struct drm_gem_object *obj, 672int msm_gem_sync_object(struct drm_gem_object *obj,
673 struct msm_fence_context *fctx, bool exclusive) 673 struct msm_fence_context *fctx, bool exclusive)
674{ 674{
675 struct msm_gem_object *msm_obj = to_msm_bo(obj);
676 struct reservation_object_list *fobj; 675 struct reservation_object_list *fobj;
677 struct dma_fence *fence; 676 struct dma_fence *fence;
678 int i, ret; 677 int i, ret;
679 678
680 fobj = reservation_object_get_list(msm_obj->resv); 679 fobj = reservation_object_get_list(obj->resv);
681 if (!fobj || (fobj->shared_count == 0)) { 680 if (!fobj || (fobj->shared_count == 0)) {
682 fence = reservation_object_get_excl(msm_obj->resv); 681 fence = reservation_object_get_excl(obj->resv);
683 /* don't need to wait on our own fences, since ring is fifo */ 682 /* don't need to wait on our own fences, since ring is fifo */
684 if (fence && (fence->context != fctx->context)) { 683 if (fence && (fence->context != fctx->context)) {
685 ret = dma_fence_wait(fence, true); 684 ret = dma_fence_wait(fence, true);
@@ -693,7 +692,7 @@ int msm_gem_sync_object(struct drm_gem_object *obj,
693 692
694 for (i = 0; i < fobj->shared_count; i++) { 693 for (i = 0; i < fobj->shared_count; i++) {
695 fence = rcu_dereference_protected(fobj->shared[i], 694 fence = rcu_dereference_protected(fobj->shared[i],
696 reservation_object_held(msm_obj->resv)); 695 reservation_object_held(obj->resv));
697 if (fence->context != fctx->context) { 696 if (fence->context != fctx->context) {
698 ret = dma_fence_wait(fence, true); 697 ret = dma_fence_wait(fence, true);
699 if (ret) 698 if (ret)
@@ -711,9 +710,9 @@ void msm_gem_move_to_active(struct drm_gem_object *obj,
711 WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED); 710 WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
712 msm_obj->gpu = gpu; 711 msm_obj->gpu = gpu;
713 if (exclusive) 712 if (exclusive)
714 reservation_object_add_excl_fence(msm_obj->resv, fence); 713 reservation_object_add_excl_fence(obj->resv, fence);
715 else 714 else
716 reservation_object_add_shared_fence(msm_obj->resv, fence); 715 reservation_object_add_shared_fence(obj->resv, fence);
717 list_del_init(&msm_obj->mm_list); 716 list_del_init(&msm_obj->mm_list);
718 list_add_tail(&msm_obj->mm_list, &gpu->active_list); 717 list_add_tail(&msm_obj->mm_list, &gpu->active_list);
719} 718}
@@ -733,13 +732,12 @@ void msm_gem_move_to_inactive(struct drm_gem_object *obj)
733 732
734int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout) 733int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
735{ 734{
736 struct msm_gem_object *msm_obj = to_msm_bo(obj);
737 bool write = !!(op & MSM_PREP_WRITE); 735 bool write = !!(op & MSM_PREP_WRITE);
738 unsigned long remain = 736 unsigned long remain =
739 op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout); 737 op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
740 long ret; 738 long ret;
741 739
742 ret = reservation_object_wait_timeout_rcu(msm_obj->resv, write, 740 ret = reservation_object_wait_timeout_rcu(obj->resv, write,
743 true, remain); 741 true, remain);
744 if (ret == 0) 742 if (ret == 0)
745 return remain == 0 ? -EBUSY : -ETIMEDOUT; 743 return remain == 0 ? -EBUSY : -ETIMEDOUT;
@@ -771,7 +769,7 @@ static void describe_fence(struct dma_fence *fence, const char *type,
771void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m) 769void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
772{ 770{
773 struct msm_gem_object *msm_obj = to_msm_bo(obj); 771 struct msm_gem_object *msm_obj = to_msm_bo(obj);
774 struct reservation_object *robj = msm_obj->resv; 772 struct reservation_object *robj = obj->resv;
775 struct reservation_object_list *fobj; 773 struct reservation_object_list *fobj;
776 struct dma_fence *fence; 774 struct dma_fence *fence;
777 struct msm_gem_vma *vma; 775 struct msm_gem_vma *vma;
@@ -883,9 +881,6 @@ void msm_gem_free_object(struct drm_gem_object *obj)
883 put_pages(obj); 881 put_pages(obj);
884 } 882 }
885 883
886 if (msm_obj->resv == &msm_obj->_resv)
887 reservation_object_fini(msm_obj->resv);
888
889 drm_gem_object_release(obj); 884 drm_gem_object_release(obj);
890 885
891 mutex_unlock(&msm_obj->lock); 886 mutex_unlock(&msm_obj->lock);
@@ -945,12 +940,8 @@ static int msm_gem_new_impl(struct drm_device *dev,
945 msm_obj->flags = flags; 940 msm_obj->flags = flags;
946 msm_obj->madv = MSM_MADV_WILLNEED; 941 msm_obj->madv = MSM_MADV_WILLNEED;
947 942
948 if (resv) { 943 if (resv)
949 msm_obj->resv = resv; 944 msm_obj->base.resv = resv;
950 } else {
951 msm_obj->resv = &msm_obj->_resv;
952 reservation_object_init(msm_obj->resv);
953 }
954 945
955 INIT_LIST_HEAD(&msm_obj->submit_entry); 946 INIT_LIST_HEAD(&msm_obj->submit_entry);
956 INIT_LIST_HEAD(&msm_obj->vmas); 947 INIT_LIST_HEAD(&msm_obj->vmas);
diff --git a/drivers/gpu/drm/msm/msm_gem_prime.c b/drivers/gpu/drm/msm/msm_gem_prime.c
index 13403c6da6c7..60bb290700ce 100644
--- a/drivers/gpu/drm/msm/msm_gem_prime.c
+++ b/drivers/gpu/drm/msm/msm_gem_prime.c
@@ -70,10 +70,3 @@ void msm_gem_prime_unpin(struct drm_gem_object *obj)
70 if (!obj->import_attach) 70 if (!obj->import_attach)
71 msm_gem_put_pages(obj); 71 msm_gem_put_pages(obj);
72} 72}
73
74struct reservation_object *msm_gem_prime_res_obj(struct drm_gem_object *obj)
75{
76 struct msm_gem_object *msm_obj = to_msm_bo(obj);
77
78 return msm_obj->resv;
79}
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index 12b983fc0b56..df302521ec74 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -173,7 +173,7 @@ static void submit_unlock_unpin_bo(struct msm_gem_submit *submit,
173 msm_gem_unpin_iova(&msm_obj->base, submit->gpu->aspace); 173 msm_gem_unpin_iova(&msm_obj->base, submit->gpu->aspace);
174 174
175 if (submit->bos[i].flags & BO_LOCKED) 175 if (submit->bos[i].flags & BO_LOCKED)
176 ww_mutex_unlock(&msm_obj->resv->lock); 176 ww_mutex_unlock(&msm_obj->base.resv->lock);
177 177
178 if (backoff && !(submit->bos[i].flags & BO_VALID)) 178 if (backoff && !(submit->bos[i].flags & BO_VALID))
179 submit->bos[i].iova = 0; 179 submit->bos[i].iova = 0;
@@ -196,7 +196,7 @@ retry:
196 contended = i; 196 contended = i;
197 197
198 if (!(submit->bos[i].flags & BO_LOCKED)) { 198 if (!(submit->bos[i].flags & BO_LOCKED)) {
199 ret = ww_mutex_lock_interruptible(&msm_obj->resv->lock, 199 ret = ww_mutex_lock_interruptible(&msm_obj->base.resv->lock,
200 &submit->ticket); 200 &submit->ticket);
201 if (ret) 201 if (ret)
202 goto fail; 202 goto fail;
@@ -218,7 +218,7 @@ fail:
218 if (ret == -EDEADLK) { 218 if (ret == -EDEADLK) {
219 struct msm_gem_object *msm_obj = submit->bos[contended].obj; 219 struct msm_gem_object *msm_obj = submit->bos[contended].obj;
220 /* we lost out in a seqno race, lock and retry.. */ 220 /* we lost out in a seqno race, lock and retry.. */
221 ret = ww_mutex_lock_slow_interruptible(&msm_obj->resv->lock, 221 ret = ww_mutex_lock_slow_interruptible(&msm_obj->base.resv->lock,
222 &submit->ticket); 222 &submit->ticket);
223 if (!ret) { 223 if (!ret) {
224 submit->bos[contended].flags |= BO_LOCKED; 224 submit->bos[contended].flags |= BO_LOCKED;
@@ -244,7 +244,7 @@ static int submit_fence_sync(struct msm_gem_submit *submit, bool no_implicit)
244 * strange place to call it. OTOH this is a 244 * strange place to call it. OTOH this is a
245 * convenient can-fail point to hook it in. 245 * convenient can-fail point to hook it in.
246 */ 246 */
247 ret = reservation_object_reserve_shared(msm_obj->resv, 247 ret = reservation_object_reserve_shared(msm_obj->base.resv,
248 1); 248 1);
249 if (ret) 249 if (ret)
250 return ret; 250 return ret;
diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
index 3e070153ef21..f53f817356db 100644
--- a/drivers/gpu/drm/panel/Kconfig
+++ b/drivers/gpu/drm/panel/Kconfig
@@ -149,6 +149,15 @@ config DRM_PANEL_RAYDIUM_RM68200
149 Say Y here if you want to enable support for Raydium RM68200 149 Say Y here if you want to enable support for Raydium RM68200
150 720x1280 DSI video mode panel. 150 720x1280 DSI video mode panel.
151 151
152config DRM_PANEL_RONBO_RB070D30
153 tristate "Ronbo Electronics RB070D30 panel"
154 depends on OF
155 depends on DRM_MIPI_DSI
156 depends on BACKLIGHT_CLASS_DEVICE
157 help
158 Say Y here if you want to enable support for Ronbo Electronics
159 RB070D30 1024x600 DSI panel.
160
152config DRM_PANEL_SAMSUNG_S6D16D0 161config DRM_PANEL_SAMSUNG_S6D16D0
153 tristate "Samsung S6D16D0 DSI video mode panel" 162 tristate "Samsung S6D16D0 DSI video mode panel"
154 depends on OF 163 depends on OF
diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile
index e7ab71968bbf..7834947a53b0 100644
--- a/drivers/gpu/drm/panel/Makefile
+++ b/drivers/gpu/drm/panel/Makefile
@@ -13,6 +13,7 @@ obj-$(CONFIG_DRM_PANEL_ORISETECH_OTM8009A) += panel-orisetech-otm8009a.o
13obj-$(CONFIG_DRM_PANEL_PANASONIC_VVX10F034N00) += panel-panasonic-vvx10f034n00.o 13obj-$(CONFIG_DRM_PANEL_PANASONIC_VVX10F034N00) += panel-panasonic-vvx10f034n00.o
14obj-$(CONFIG_DRM_PANEL_RASPBERRYPI_TOUCHSCREEN) += panel-raspberrypi-touchscreen.o 14obj-$(CONFIG_DRM_PANEL_RASPBERRYPI_TOUCHSCREEN) += panel-raspberrypi-touchscreen.o
15obj-$(CONFIG_DRM_PANEL_RAYDIUM_RM68200) += panel-raydium-rm68200.o 15obj-$(CONFIG_DRM_PANEL_RAYDIUM_RM68200) += panel-raydium-rm68200.o
16obj-$(CONFIG_DRM_PANEL_RONBO_RB070D30) += panel-ronbo-rb070d30.o
16obj-$(CONFIG_DRM_PANEL_SAMSUNG_LD9040) += panel-samsung-ld9040.o 17obj-$(CONFIG_DRM_PANEL_SAMSUNG_LD9040) += panel-samsung-ld9040.o
17obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6D16D0) += panel-samsung-s6d16d0.o 18obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6D16D0) += panel-samsung-s6d16d0.o
18obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E3HA2) += panel-samsung-s6e3ha2.o 19obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E3HA2) += panel-samsung-s6e3ha2.o
diff --git a/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c b/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c
new file mode 100644
index 000000000000..3c15764f0c03
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c
@@ -0,0 +1,258 @@
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Copyright (C) 2018-2019, Bridge Systems BV
4 * Copyright (C) 2018-2019, Bootlin
5 * Copyright (C) 2017, Free Electrons
6 *
7 * This file based on panel-ilitek-ili9881c.c
8 */
9
10#include <linux/backlight.h>
11#include <linux/delay.h>
12#include <linux/device.h>
13#include <linux/err.h>
14#include <linux/errno.h>
15#include <linux/fb.h>
16#include <linux/kernel.h>
17#include <linux/media-bus-format.h>
18#include <linux/module.h>
19
20#include <linux/gpio/consumer.h>
21#include <linux/regulator/consumer.h>
22
23#include <drm/drm_connector.h>
24#include <drm/drm_mipi_dsi.h>
25#include <drm/drm_modes.h>
26#include <drm/drm_panel.h>
27#include <drm/drm_print.h>
28
29struct rb070d30_panel {
30 struct drm_panel panel;
31 struct mipi_dsi_device *dsi;
32 struct backlight_device *backlight;
33 struct regulator *supply;
34
35 struct {
36 struct gpio_desc *power;
37 struct gpio_desc *reset;
38 struct gpio_desc *updn;
39 struct gpio_desc *shlr;
40 } gpios;
41};
42
43static inline struct rb070d30_panel *panel_to_rb070d30_panel(struct drm_panel *panel)
44{
45 return container_of(panel, struct rb070d30_panel, panel);
46}
47
48static int rb070d30_panel_prepare(struct drm_panel *panel)
49{
50 struct rb070d30_panel *ctx = panel_to_rb070d30_panel(panel);
51 int ret;
52
53 ret = regulator_enable(ctx->supply);
54 if (ret < 0) {
55 DRM_DEV_ERROR(&ctx->dsi->dev, "Failed to enable supply: %d\n", ret);
56 return ret;
57 }
58
59 msleep(20);
60 gpiod_set_value(ctx->gpios.power, 1);
61 msleep(20);
62 gpiod_set_value(ctx->gpios.reset, 1);
63 msleep(20);
64 return 0;
65}
66
67static int rb070d30_panel_unprepare(struct drm_panel *panel)
68{
69 struct rb070d30_panel *ctx = panel_to_rb070d30_panel(panel);
70
71 gpiod_set_value(ctx->gpios.reset, 0);
72 gpiod_set_value(ctx->gpios.power, 0);
73 regulator_disable(ctx->supply);
74
75 return 0;
76}
77
78static int rb070d30_panel_enable(struct drm_panel *panel)
79{
80 struct rb070d30_panel *ctx = panel_to_rb070d30_panel(panel);
81 int ret;
82
83 ret = mipi_dsi_dcs_exit_sleep_mode(ctx->dsi);
84 if (ret)
85 return ret;
86
87 ret = backlight_enable(ctx->backlight);
88 if (ret)
89 goto out;
90
91 return 0;
92
93out:
94 mipi_dsi_dcs_enter_sleep_mode(ctx->dsi);
95 return ret;
96}
97
98static int rb070d30_panel_disable(struct drm_panel *panel)
99{
100 struct rb070d30_panel *ctx = panel_to_rb070d30_panel(panel);
101
102 backlight_disable(ctx->backlight);
103 return mipi_dsi_dcs_enter_sleep_mode(ctx->dsi);
104}
105
106/* Default timings */
107static const struct drm_display_mode default_mode = {
108 .clock = 51206,
109 .hdisplay = 1024,
110 .hsync_start = 1024 + 160,
111 .hsync_end = 1024 + 160 + 80,
112 .htotal = 1024 + 160 + 80 + 80,
113 .vdisplay = 600,
114 .vsync_start = 600 + 12,
115 .vsync_end = 600 + 12 + 10,
116 .vtotal = 600 + 12 + 10 + 13,
117 .vrefresh = 60,
118
119 .width_mm = 154,
120 .height_mm = 85,
121};
122
123static int rb070d30_panel_get_modes(struct drm_panel *panel)
124{
125 struct drm_connector *connector = panel->connector;
126 struct rb070d30_panel *ctx = panel_to_rb070d30_panel(panel);
127 struct drm_display_mode *mode;
128 static const u32 bus_format = MEDIA_BUS_FMT_RGB888_1X24;
129
130 mode = drm_mode_duplicate(panel->drm, &default_mode);
131 if (!mode) {
132 DRM_DEV_ERROR(&ctx->dsi->dev,
133 "Failed to add mode " DRM_MODE_FMT "\n",
134 DRM_MODE_ARG(&default_mode));
135 return -EINVAL;
136 }
137
138 drm_mode_set_name(mode);
139
140 mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
141 drm_mode_probed_add(connector, mode);
142
143 panel->connector->display_info.bpc = 8;
144 panel->connector->display_info.width_mm = mode->width_mm;
145 panel->connector->display_info.height_mm = mode->height_mm;
146 drm_display_info_set_bus_formats(&connector->display_info,
147 &bus_format, 1);
148
149 return 1;
150}
151
152static const struct drm_panel_funcs rb070d30_panel_funcs = {
153 .get_modes = rb070d30_panel_get_modes,
154 .prepare = rb070d30_panel_prepare,
155 .enable = rb070d30_panel_enable,
156 .disable = rb070d30_panel_disable,
157 .unprepare = rb070d30_panel_unprepare,
158};
159
160static int rb070d30_panel_dsi_probe(struct mipi_dsi_device *dsi)
161{
162 struct rb070d30_panel *ctx;
163 int ret;
164
165 ctx = devm_kzalloc(&dsi->dev, sizeof(*ctx), GFP_KERNEL);
166 if (!ctx)
167 return -ENOMEM;
168
169 ctx->supply = devm_regulator_get(&dsi->dev, "vcc-lcd");
170 if (IS_ERR(ctx->supply))
171 return PTR_ERR(ctx->supply);
172
173 mipi_dsi_set_drvdata(dsi, ctx);
174 ctx->dsi = dsi;
175
176 drm_panel_init(&ctx->panel);
177 ctx->panel.dev = &dsi->dev;
178 ctx->panel.funcs = &rb070d30_panel_funcs;
179
180 ctx->gpios.reset = devm_gpiod_get(&dsi->dev, "reset", GPIOD_OUT_LOW);
181 if (IS_ERR(ctx->gpios.reset)) {
182 DRM_DEV_ERROR(&dsi->dev, "Couldn't get our reset GPIO\n");
183 return PTR_ERR(ctx->gpios.reset);
184 }
185
186 ctx->gpios.power = devm_gpiod_get(&dsi->dev, "power", GPIOD_OUT_LOW);
187 if (IS_ERR(ctx->gpios.power)) {
188 DRM_DEV_ERROR(&dsi->dev, "Couldn't get our power GPIO\n");
189 return PTR_ERR(ctx->gpios.power);
190 }
191
192 /*
193 * We don't change the state of that GPIO later on but we need
194 * to force it into a low state.
195 */
196 ctx->gpios.updn = devm_gpiod_get(&dsi->dev, "updn", GPIOD_OUT_LOW);
197 if (IS_ERR(ctx->gpios.updn)) {
198 DRM_DEV_ERROR(&dsi->dev, "Couldn't get our updn GPIO\n");
199 return PTR_ERR(ctx->gpios.updn);
200 }
201
202 /*
203 * We don't change the state of that GPIO later on but we need
204 * to force it into a low state.
205 */
206 ctx->gpios.shlr = devm_gpiod_get(&dsi->dev, "shlr", GPIOD_OUT_LOW);
207 if (IS_ERR(ctx->gpios.shlr)) {
208 DRM_DEV_ERROR(&dsi->dev, "Couldn't get our shlr GPIO\n");
209 return PTR_ERR(ctx->gpios.shlr);
210 }
211
212 ctx->backlight = devm_of_find_backlight(&dsi->dev);
213 if (IS_ERR(ctx->backlight)) {
214 DRM_DEV_ERROR(&dsi->dev, "Couldn't get our backlight\n");
215 return PTR_ERR(ctx->backlight);
216 }
217
218 ret = drm_panel_add(&ctx->panel);
219 if (ret < 0)
220 return ret;
221
222 dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST | MIPI_DSI_MODE_LPM;
223 dsi->format = MIPI_DSI_FMT_RGB888;
224 dsi->lanes = 4;
225
226 return mipi_dsi_attach(dsi);
227}
228
229static int rb070d30_panel_dsi_remove(struct mipi_dsi_device *dsi)
230{
231 struct rb070d30_panel *ctx = mipi_dsi_get_drvdata(dsi);
232
233 mipi_dsi_detach(dsi);
234 drm_panel_remove(&ctx->panel);
235
236 return 0;
237}
238
239static const struct of_device_id rb070d30_panel_of_match[] = {
240 { .compatible = "ronbo,rb070d30" },
241 { /* sentinel */ },
242};
243MODULE_DEVICE_TABLE(of, rb070d30_panel_of_match);
244
245static struct mipi_dsi_driver rb070d30_panel_driver = {
246 .probe = rb070d30_panel_dsi_probe,
247 .remove = rb070d30_panel_dsi_remove,
248 .driver = {
249 .name = "panel-ronbo-rb070d30",
250 .of_match_table = rb070d30_panel_of_match,
251 },
252};
253module_mipi_dsi_driver(rb070d30_panel_driver);
254
255MODULE_AUTHOR("Boris Brezillon <boris.brezillon@bootlin.com>");
256MODULE_AUTHOR("Konstantin Sudakov <k.sudakov@integrasources.com>");
257MODULE_DESCRIPTION("Ronbo RB070D30 Panel Driver");
258MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index 08c725544a2f..8b319ebbb0fb 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -535,7 +535,7 @@ static void qxl_primary_atomic_update(struct drm_plane *plane,
535{ 535{
536 struct qxl_device *qdev = plane->dev->dev_private; 536 struct qxl_device *qdev = plane->dev->dev_private;
537 struct qxl_bo *bo = gem_to_qxl_bo(plane->state->fb->obj[0]); 537 struct qxl_bo *bo = gem_to_qxl_bo(plane->state->fb->obj[0]);
538 struct qxl_bo *bo_old, *primary; 538 struct qxl_bo *primary;
539 struct drm_clip_rect norect = { 539 struct drm_clip_rect norect = {
540 .x1 = 0, 540 .x1 = 0,
541 .y1 = 0, 541 .y1 = 0,
@@ -544,12 +544,6 @@ static void qxl_primary_atomic_update(struct drm_plane *plane,
544 }; 544 };
545 uint32_t dumb_shadow_offset = 0; 545 uint32_t dumb_shadow_offset = 0;
546 546
547 if (old_state->fb) {
548 bo_old = gem_to_qxl_bo(old_state->fb->obj[0]);
549 } else {
550 bo_old = NULL;
551 }
552
553 primary = bo->shadow ? bo->shadow : bo; 547 primary = bo->shadow ? bo->shadow : bo;
554 548
555 if (!primary->is_primary) { 549 if (!primary->is_primary) {
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 53f29a115104..0a9312ea250a 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1388,7 +1388,7 @@ int radeon_device_init(struct radeon_device *rdev,
1388 pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(32)); 1388 pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(32));
1389 pr_warn("radeon: No coherent DMA available\n"); 1389 pr_warn("radeon: No coherent DMA available\n");
1390 } 1390 }
1391 rdev->need_swiotlb = drm_get_max_iomem() > ((u64)1 << dma_bits); 1391 rdev->need_swiotlb = drm_need_swiotlb(dma_bits);
1392 1392
1393 /* Registers mapping */ 1393 /* Registers mapping */
1394 /* TODO: block userspace mapping of io register */ 1394 /* TODO: block userspace mapping of io register */
diff --git a/drivers/gpu/drm/selftests/test-drm_mm.c b/drivers/gpu/drm/selftests/test-drm_mm.c
index fbed2c90fd51..286a0eeefcb6 100644
--- a/drivers/gpu/drm/selftests/test-drm_mm.c
+++ b/drivers/gpu/drm/selftests/test-drm_mm.c
@@ -1615,7 +1615,7 @@ static int igt_topdown(void *ignored)
1615 DRM_RND_STATE(prng, random_seed); 1615 DRM_RND_STATE(prng, random_seed);
1616 const unsigned int count = 8192; 1616 const unsigned int count = 8192;
1617 unsigned int size; 1617 unsigned int size;
1618 unsigned long *bitmap = NULL; 1618 unsigned long *bitmap;
1619 struct drm_mm mm; 1619 struct drm_mm mm;
1620 struct drm_mm_node *nodes, *node, *next; 1620 struct drm_mm_node *nodes, *node, *next;
1621 unsigned int *order, n, m, o = 0; 1621 unsigned int *order, n, m, o = 0;
@@ -1631,8 +1631,7 @@ static int igt_topdown(void *ignored)
1631 if (!nodes) 1631 if (!nodes)
1632 goto err; 1632 goto err;
1633 1633
1634 bitmap = kcalloc(count / BITS_PER_LONG, sizeof(unsigned long), 1634 bitmap = bitmap_zalloc(count, GFP_KERNEL);
1635 GFP_KERNEL);
1636 if (!bitmap) 1635 if (!bitmap)
1637 goto err_nodes; 1636 goto err_nodes;
1638 1637
@@ -1717,7 +1716,7 @@ out:
1717 drm_mm_takedown(&mm); 1716 drm_mm_takedown(&mm);
1718 kfree(order); 1717 kfree(order);
1719err_bitmap: 1718err_bitmap:
1720 kfree(bitmap); 1719 bitmap_free(bitmap);
1721err_nodes: 1720err_nodes:
1722 vfree(nodes); 1721 vfree(nodes);
1723err: 1722err:
@@ -1745,8 +1744,7 @@ static int igt_bottomup(void *ignored)
1745 if (!nodes) 1744 if (!nodes)
1746 goto err; 1745 goto err;
1747 1746
1748 bitmap = kcalloc(count / BITS_PER_LONG, sizeof(unsigned long), 1747 bitmap = bitmap_zalloc(count, GFP_KERNEL);
1749 GFP_KERNEL);
1750 if (!bitmap) 1748 if (!bitmap)
1751 goto err_nodes; 1749 goto err_nodes;
1752 1750
@@ -1818,7 +1816,7 @@ out:
1818 drm_mm_takedown(&mm); 1816 drm_mm_takedown(&mm);
1819 kfree(order); 1817 kfree(order);
1820err_bitmap: 1818err_bitmap:
1821 kfree(bitmap); 1819 bitmap_free(bitmap);
1822err_nodes: 1820err_nodes:
1823 vfree(nodes); 1821 vfree(nodes);
1824err: 1822err:
diff --git a/drivers/gpu/drm/stm/Kconfig b/drivers/gpu/drm/stm/Kconfig
index 35367ada3bc1..d15b10de1da6 100644
--- a/drivers/gpu/drm/stm/Kconfig
+++ b/drivers/gpu/drm/stm/Kconfig
@@ -6,7 +6,7 @@ config DRM_STM
6 select DRM_KMS_CMA_HELPER 6 select DRM_KMS_CMA_HELPER
7 select DRM_PANEL_BRIDGE 7 select DRM_PANEL_BRIDGE
8 select VIDEOMODE_HELPERS 8 select VIDEOMODE_HELPERS
9 select FB_PROVIDE_GET_FB_UNMAPPED_AREA 9 select FB_PROVIDE_GET_FB_UNMAPPED_AREA if FB
10 10
11 help 11 help
12 Enable support for the on-chip display controller on 12 Enable support for the on-chip display controller on
diff --git a/drivers/gpu/drm/sun4i/sun4i_backend.c b/drivers/gpu/drm/sun4i/sun4i_backend.c
index 4c0d51f73237..ee59da4a0172 100644
--- a/drivers/gpu/drm/sun4i/sun4i_backend.c
+++ b/drivers/gpu/drm/sun4i/sun4i_backend.c
@@ -720,33 +720,22 @@ static int sun4i_backend_free_sat(struct device *dev) {
720 */ 720 */
721static int sun4i_backend_of_get_id(struct device_node *node) 721static int sun4i_backend_of_get_id(struct device_node *node)
722{ 722{
723 struct device_node *port, *ep; 723 struct device_node *ep, *remote;
724 int ret = -EINVAL; 724 struct of_endpoint of_ep;
725 725
726 /* input is port 0 */ 726 /* Input port is 0, and we want the first endpoint. */
727 port = of_graph_get_port_by_id(node, 0); 727 ep = of_graph_get_endpoint_by_regs(node, 0, -1);
728 if (!port) 728 if (!ep)
729 return -EINVAL; 729 return -EINVAL;
730 730
731 /* try finding an upstream endpoint */ 731 remote = of_graph_get_remote_endpoint(ep);
732 for_each_available_child_of_node(port, ep) { 732 of_node_put(ep);
733 struct device_node *remote; 733 if (!remote)
734 u32 reg; 734 return -EINVAL;
735
736 remote = of_graph_get_remote_endpoint(ep);
737 if (!remote)
738 continue;
739
740 ret = of_property_read_u32(remote, "reg", &reg);
741 if (ret)
742 continue;
743
744 ret = reg;
745 }
746
747 of_node_put(port);
748 735
749 return ret; 736 of_graph_parse_endpoint(remote, &of_ep);
737 of_node_put(remote);
738 return of_ep.id;
750} 739}
751 740
752/* TODO: This needs to take multiple pipelines into account */ 741/* TODO: This needs to take multiple pipelines into account */
diff --git a/drivers/gpu/drm/sun4i/sun4i_lvds.c b/drivers/gpu/drm/sun4i/sun4i_lvds.c
index 147b97ed1a09..3a3ba99fed22 100644
--- a/drivers/gpu/drm/sun4i/sun4i_lvds.c
+++ b/drivers/gpu/drm/sun4i/sun4i_lvds.c
@@ -20,7 +20,7 @@ struct sun4i_lvds {
20 struct drm_connector connector; 20 struct drm_connector connector;
21 struct drm_encoder encoder; 21 struct drm_encoder encoder;
22 22
23 struct sun4i_tcon *tcon; 23 struct drm_panel *panel;
24}; 24};
25 25
26static inline struct sun4i_lvds * 26static inline struct sun4i_lvds *
@@ -41,9 +41,8 @@ static int sun4i_lvds_get_modes(struct drm_connector *connector)
41{ 41{
42 struct sun4i_lvds *lvds = 42 struct sun4i_lvds *lvds =
43 drm_connector_to_sun4i_lvds(connector); 43 drm_connector_to_sun4i_lvds(connector);
44 struct sun4i_tcon *tcon = lvds->tcon;
45 44
46 return drm_panel_get_modes(tcon->panel); 45 return drm_panel_get_modes(lvds->panel);
47} 46}
48 47
49static struct drm_connector_helper_funcs sun4i_lvds_con_helper_funcs = { 48static struct drm_connector_helper_funcs sun4i_lvds_con_helper_funcs = {
@@ -54,9 +53,8 @@ static void
54sun4i_lvds_connector_destroy(struct drm_connector *connector) 53sun4i_lvds_connector_destroy(struct drm_connector *connector)
55{ 54{
56 struct sun4i_lvds *lvds = drm_connector_to_sun4i_lvds(connector); 55 struct sun4i_lvds *lvds = drm_connector_to_sun4i_lvds(connector);
57 struct sun4i_tcon *tcon = lvds->tcon;
58 56
59 drm_panel_detach(tcon->panel); 57 drm_panel_detach(lvds->panel);
60 drm_connector_cleanup(connector); 58 drm_connector_cleanup(connector);
61} 59}
62 60
@@ -71,26 +69,24 @@ static const struct drm_connector_funcs sun4i_lvds_con_funcs = {
71static void sun4i_lvds_encoder_enable(struct drm_encoder *encoder) 69static void sun4i_lvds_encoder_enable(struct drm_encoder *encoder)
72{ 70{
73 struct sun4i_lvds *lvds = drm_encoder_to_sun4i_lvds(encoder); 71 struct sun4i_lvds *lvds = drm_encoder_to_sun4i_lvds(encoder);
74 struct sun4i_tcon *tcon = lvds->tcon;
75 72
76 DRM_DEBUG_DRIVER("Enabling LVDS output\n"); 73 DRM_DEBUG_DRIVER("Enabling LVDS output\n");
77 74
78 if (tcon->panel) { 75 if (lvds->panel) {
79 drm_panel_prepare(tcon->panel); 76 drm_panel_prepare(lvds->panel);
80 drm_panel_enable(tcon->panel); 77 drm_panel_enable(lvds->panel);
81 } 78 }
82} 79}
83 80
84static void sun4i_lvds_encoder_disable(struct drm_encoder *encoder) 81static void sun4i_lvds_encoder_disable(struct drm_encoder *encoder)
85{ 82{
86 struct sun4i_lvds *lvds = drm_encoder_to_sun4i_lvds(encoder); 83 struct sun4i_lvds *lvds = drm_encoder_to_sun4i_lvds(encoder);
87 struct sun4i_tcon *tcon = lvds->tcon;
88 84
89 DRM_DEBUG_DRIVER("Disabling LVDS output\n"); 85 DRM_DEBUG_DRIVER("Disabling LVDS output\n");
90 86
91 if (tcon->panel) { 87 if (lvds->panel) {
92 drm_panel_disable(tcon->panel); 88 drm_panel_disable(lvds->panel);
93 drm_panel_unprepare(tcon->panel); 89 drm_panel_unprepare(lvds->panel);
94 } 90 }
95} 91}
96 92
@@ -113,11 +109,10 @@ int sun4i_lvds_init(struct drm_device *drm, struct sun4i_tcon *tcon)
113 lvds = devm_kzalloc(drm->dev, sizeof(*lvds), GFP_KERNEL); 109 lvds = devm_kzalloc(drm->dev, sizeof(*lvds), GFP_KERNEL);
114 if (!lvds) 110 if (!lvds)
115 return -ENOMEM; 111 return -ENOMEM;
116 lvds->tcon = tcon;
117 encoder = &lvds->encoder; 112 encoder = &lvds->encoder;
118 113
119 ret = drm_of_find_panel_or_bridge(tcon->dev->of_node, 1, 0, 114 ret = drm_of_find_panel_or_bridge(tcon->dev->of_node, 1, 0,
120 &tcon->panel, &bridge); 115 &lvds->panel, &bridge);
121 if (ret) { 116 if (ret) {
122 dev_info(drm->dev, "No panel or bridge found... LVDS output disabled\n"); 117 dev_info(drm->dev, "No panel or bridge found... LVDS output disabled\n");
123 return 0; 118 return 0;
@@ -138,7 +133,7 @@ int sun4i_lvds_init(struct drm_device *drm, struct sun4i_tcon *tcon)
138 /* The LVDS encoder can only work with the TCON channel 0 */ 133 /* The LVDS encoder can only work with the TCON channel 0 */
139 lvds->encoder.possible_crtcs = drm_crtc_mask(&tcon->crtc->crtc); 134 lvds->encoder.possible_crtcs = drm_crtc_mask(&tcon->crtc->crtc);
140 135
141 if (tcon->panel) { 136 if (lvds->panel) {
142 drm_connector_helper_add(&lvds->connector, 137 drm_connector_helper_add(&lvds->connector,
143 &sun4i_lvds_con_helper_funcs); 138 &sun4i_lvds_con_helper_funcs);
144 ret = drm_connector_init(drm, &lvds->connector, 139 ret = drm_connector_init(drm, &lvds->connector,
@@ -152,7 +147,7 @@ int sun4i_lvds_init(struct drm_device *drm, struct sun4i_tcon *tcon)
152 drm_connector_attach_encoder(&lvds->connector, 147 drm_connector_attach_encoder(&lvds->connector,
153 &lvds->encoder); 148 &lvds->encoder);
154 149
155 ret = drm_panel_attach(tcon->panel, &lvds->connector); 150 ret = drm_panel_attach(lvds->panel, &lvds->connector);
156 if (ret) { 151 if (ret) {
157 dev_err(drm->dev, "Couldn't attach our panel\n"); 152 dev_err(drm->dev, "Couldn't attach our panel\n");
158 goto err_cleanup_connector; 153 goto err_cleanup_connector;
diff --git a/drivers/gpu/drm/sun4i/sun4i_rgb.c b/drivers/gpu/drm/sun4i/sun4i_rgb.c
index cae19e7bbeaa..d9e2502b49fa 100644
--- a/drivers/gpu/drm/sun4i/sun4i_rgb.c
+++ b/drivers/gpu/drm/sun4i/sun4i_rgb.c
@@ -27,6 +27,8 @@ struct sun4i_rgb {
27 struct drm_encoder encoder; 27 struct drm_encoder encoder;
28 28
29 struct sun4i_tcon *tcon; 29 struct sun4i_tcon *tcon;
30 struct drm_panel *panel;
31 struct drm_bridge *bridge;
30}; 32};
31 33
32static inline struct sun4i_rgb * 34static inline struct sun4i_rgb *
@@ -47,11 +49,18 @@ static int sun4i_rgb_get_modes(struct drm_connector *connector)
47{ 49{
48 struct sun4i_rgb *rgb = 50 struct sun4i_rgb *rgb =
49 drm_connector_to_sun4i_rgb(connector); 51 drm_connector_to_sun4i_rgb(connector);
50 struct sun4i_tcon *tcon = rgb->tcon;
51 52
52 return drm_panel_get_modes(tcon->panel); 53 return drm_panel_get_modes(rgb->panel);
53} 54}
54 55
56/*
57 * VESA DMT defines a tolerance of 0.5% on the pixel clock, while the
58 * CVT spec reuses that tolerance in its examples, so it looks to be a
59 * good default tolerance for the EDID-based modes. Define it to 5 per
60 * mille to avoid floating point operations.
61 */
62#define SUN4I_RGB_DOTCLOCK_TOLERANCE_PER_MILLE 5
63
55static enum drm_mode_status sun4i_rgb_mode_valid(struct drm_encoder *crtc, 64static enum drm_mode_status sun4i_rgb_mode_valid(struct drm_encoder *crtc,
56 const struct drm_display_mode *mode) 65 const struct drm_display_mode *mode)
57{ 66{
@@ -59,8 +68,9 @@ static enum drm_mode_status sun4i_rgb_mode_valid(struct drm_encoder *crtc,
59 struct sun4i_tcon *tcon = rgb->tcon; 68 struct sun4i_tcon *tcon = rgb->tcon;
60 u32 hsync = mode->hsync_end - mode->hsync_start; 69 u32 hsync = mode->hsync_end - mode->hsync_start;
61 u32 vsync = mode->vsync_end - mode->vsync_start; 70 u32 vsync = mode->vsync_end - mode->vsync_start;
62 unsigned long rate = mode->clock * 1000; 71 unsigned long long rate = mode->clock * 1000;
63 long rounded_rate; 72 unsigned long long lowest, highest;
73 unsigned long long rounded_rate;
64 74
65 DRM_DEBUG_DRIVER("Validating modes...\n"); 75 DRM_DEBUG_DRIVER("Validating modes...\n");
66 76
@@ -92,15 +102,39 @@ static enum drm_mode_status sun4i_rgb_mode_valid(struct drm_encoder *crtc,
92 102
93 DRM_DEBUG_DRIVER("Vertical parameters OK\n"); 103 DRM_DEBUG_DRIVER("Vertical parameters OK\n");
94 104
105 /*
106 * TODO: We should use the struct display_timing if available
107 * and / or trying to stretch the timings within that
108 * tolerancy to take care of panels that we wouldn't be able
109 * to have a exact match for.
110 */
111 if (rgb->panel) {
112 DRM_DEBUG_DRIVER("RGB panel used, skipping clock rate checks");
113 goto out;
114 }
115
116 /*
117 * That shouldn't ever happen unless something is really wrong, but it
118 * doesn't harm to check.
119 */
120 if (!rgb->bridge)
121 goto out;
122
95 tcon->dclk_min_div = 6; 123 tcon->dclk_min_div = 6;
96 tcon->dclk_max_div = 127; 124 tcon->dclk_max_div = 127;
97 rounded_rate = clk_round_rate(tcon->dclk, rate); 125 rounded_rate = clk_round_rate(tcon->dclk, rate);
98 if (rounded_rate < rate) 126
127 lowest = rate * (1000 - SUN4I_RGB_DOTCLOCK_TOLERANCE_PER_MILLE);
128 do_div(lowest, 1000);
129 if (rounded_rate < lowest)
99 return MODE_CLOCK_LOW; 130 return MODE_CLOCK_LOW;
100 131
101 if (rounded_rate > rate) 132 highest = rate * (1000 + SUN4I_RGB_DOTCLOCK_TOLERANCE_PER_MILLE);
133 do_div(highest, 1000);
134 if (rounded_rate > highest)
102 return MODE_CLOCK_HIGH; 135 return MODE_CLOCK_HIGH;
103 136
137out:
104 DRM_DEBUG_DRIVER("Clock rate OK\n"); 138 DRM_DEBUG_DRIVER("Clock rate OK\n");
105 139
106 return MODE_OK; 140 return MODE_OK;
@@ -114,9 +148,8 @@ static void
114sun4i_rgb_connector_destroy(struct drm_connector *connector) 148sun4i_rgb_connector_destroy(struct drm_connector *connector)
115{ 149{
116 struct sun4i_rgb *rgb = drm_connector_to_sun4i_rgb(connector); 150 struct sun4i_rgb *rgb = drm_connector_to_sun4i_rgb(connector);
117 struct sun4i_tcon *tcon = rgb->tcon;
118 151
119 drm_panel_detach(tcon->panel); 152 drm_panel_detach(rgb->panel);
120 drm_connector_cleanup(connector); 153 drm_connector_cleanup(connector);
121} 154}
122 155
@@ -131,26 +164,24 @@ static const struct drm_connector_funcs sun4i_rgb_con_funcs = {
131static void sun4i_rgb_encoder_enable(struct drm_encoder *encoder) 164static void sun4i_rgb_encoder_enable(struct drm_encoder *encoder)
132{ 165{
133 struct sun4i_rgb *rgb = drm_encoder_to_sun4i_rgb(encoder); 166 struct sun4i_rgb *rgb = drm_encoder_to_sun4i_rgb(encoder);
134 struct sun4i_tcon *tcon = rgb->tcon;
135 167
136 DRM_DEBUG_DRIVER("Enabling RGB output\n"); 168 DRM_DEBUG_DRIVER("Enabling RGB output\n");
137 169
138 if (tcon->panel) { 170 if (rgb->panel) {
139 drm_panel_prepare(tcon->panel); 171 drm_panel_prepare(rgb->panel);
140 drm_panel_enable(tcon->panel); 172 drm_panel_enable(rgb->panel);
141 } 173 }
142} 174}
143 175
144static void sun4i_rgb_encoder_disable(struct drm_encoder *encoder) 176static void sun4i_rgb_encoder_disable(struct drm_encoder *encoder)
145{ 177{
146 struct sun4i_rgb *rgb = drm_encoder_to_sun4i_rgb(encoder); 178 struct sun4i_rgb *rgb = drm_encoder_to_sun4i_rgb(encoder);
147 struct sun4i_tcon *tcon = rgb->tcon;
148 179
149 DRM_DEBUG_DRIVER("Disabling RGB output\n"); 180 DRM_DEBUG_DRIVER("Disabling RGB output\n");
150 181
151 if (tcon->panel) { 182 if (rgb->panel) {
152 drm_panel_disable(tcon->panel); 183 drm_panel_disable(rgb->panel);
153 drm_panel_unprepare(tcon->panel); 184 drm_panel_unprepare(rgb->panel);
154 } 185 }
155} 186}
156 187
@@ -172,7 +203,6 @@ static struct drm_encoder_funcs sun4i_rgb_enc_funcs = {
172int sun4i_rgb_init(struct drm_device *drm, struct sun4i_tcon *tcon) 203int sun4i_rgb_init(struct drm_device *drm, struct sun4i_tcon *tcon)
173{ 204{
174 struct drm_encoder *encoder; 205 struct drm_encoder *encoder;
175 struct drm_bridge *bridge;
176 struct sun4i_rgb *rgb; 206 struct sun4i_rgb *rgb;
177 int ret; 207 int ret;
178 208
@@ -183,7 +213,7 @@ int sun4i_rgb_init(struct drm_device *drm, struct sun4i_tcon *tcon)
183 encoder = &rgb->encoder; 213 encoder = &rgb->encoder;
184 214
185 ret = drm_of_find_panel_or_bridge(tcon->dev->of_node, 1, 0, 215 ret = drm_of_find_panel_or_bridge(tcon->dev->of_node, 1, 0,
186 &tcon->panel, &bridge); 216 &rgb->panel, &rgb->bridge);
187 if (ret) { 217 if (ret) {
188 dev_info(drm->dev, "No panel or bridge found... RGB output disabled\n"); 218 dev_info(drm->dev, "No panel or bridge found... RGB output disabled\n");
189 return 0; 219 return 0;
@@ -204,7 +234,7 @@ int sun4i_rgb_init(struct drm_device *drm, struct sun4i_tcon *tcon)
204 /* The RGB encoder can only work with the TCON channel 0 */ 234 /* The RGB encoder can only work with the TCON channel 0 */
205 rgb->encoder.possible_crtcs = drm_crtc_mask(&tcon->crtc->crtc); 235 rgb->encoder.possible_crtcs = drm_crtc_mask(&tcon->crtc->crtc);
206 236
207 if (tcon->panel) { 237 if (rgb->panel) {
208 drm_connector_helper_add(&rgb->connector, 238 drm_connector_helper_add(&rgb->connector,
209 &sun4i_rgb_con_helper_funcs); 239 &sun4i_rgb_con_helper_funcs);
210 ret = drm_connector_init(drm, &rgb->connector, 240 ret = drm_connector_init(drm, &rgb->connector,
@@ -218,15 +248,15 @@ int sun4i_rgb_init(struct drm_device *drm, struct sun4i_tcon *tcon)
218 drm_connector_attach_encoder(&rgb->connector, 248 drm_connector_attach_encoder(&rgb->connector,
219 &rgb->encoder); 249 &rgb->encoder);
220 250
221 ret = drm_panel_attach(tcon->panel, &rgb->connector); 251 ret = drm_panel_attach(rgb->panel, &rgb->connector);
222 if (ret) { 252 if (ret) {
223 dev_err(drm->dev, "Couldn't attach our panel\n"); 253 dev_err(drm->dev, "Couldn't attach our panel\n");
224 goto err_cleanup_connector; 254 goto err_cleanup_connector;
225 } 255 }
226 } 256 }
227 257
228 if (bridge) { 258 if (rgb->bridge) {
229 ret = drm_bridge_attach(encoder, bridge, NULL); 259 ret = drm_bridge_attach(encoder, rgb->bridge, NULL);
230 if (ret) { 260 if (ret) {
231 dev_err(drm->dev, "Couldn't attach our bridge\n"); 261 dev_err(drm->dev, "Couldn't attach our bridge\n");
232 goto err_cleanup_connector; 262 goto err_cleanup_connector;
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c
index ca713d200280..fa92e992a282 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c
@@ -341,8 +341,8 @@ static void sun4i_tcon0_mode_set_cpu(struct sun4i_tcon *tcon,
341 u32 block_space, start_delay; 341 u32 block_space, start_delay;
342 u32 tcon_div; 342 u32 tcon_div;
343 343
344 tcon->dclk_min_div = 4; 344 tcon->dclk_min_div = SUN6I_DSI_TCON_DIV;
345 tcon->dclk_max_div = 127; 345 tcon->dclk_max_div = SUN6I_DSI_TCON_DIV;
346 346
347 sun4i_tcon0_mode_set_common(tcon, mode); 347 sun4i_tcon0_mode_set_common(tcon, mode);
348 348
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.h b/drivers/gpu/drm/sun4i/sun4i_tcon.h
index b5214d71610f..84cfb1952ff7 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.h
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.h
@@ -257,8 +257,6 @@ struct sun4i_tcon {
257 struct reset_control *lcd_rst; 257 struct reset_control *lcd_rst;
258 struct reset_control *lvds_rst; 258 struct reset_control *lvds_rst;
259 259
260 struct drm_panel *panel;
261
262 /* Platform adjustments */ 260 /* Platform adjustments */
263 const struct sun4i_tcon_quirks *quirks; 261 const struct sun4i_tcon_quirks *quirks;
264 262
diff --git a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
index 318994cd1b85..6ff585055a07 100644
--- a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
+++ b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
@@ -24,7 +24,9 @@
24#include <drm/drm_panel.h> 24#include <drm/drm_panel.h>
25#include <drm/drm_probe_helper.h> 25#include <drm/drm_probe_helper.h>
26 26
27#include "sun4i_crtc.h"
27#include "sun4i_drv.h" 28#include "sun4i_drv.h"
29#include "sun4i_tcon.h"
28#include "sun6i_mipi_dsi.h" 30#include "sun6i_mipi_dsi.h"
29 31
30#include <video/mipi_display.h> 32#include <video/mipi_display.h>
@@ -33,6 +35,8 @@
33#define SUN6I_DSI_CTL_EN BIT(0) 35#define SUN6I_DSI_CTL_EN BIT(0)
34 36
35#define SUN6I_DSI_BASIC_CTL_REG 0x00c 37#define SUN6I_DSI_BASIC_CTL_REG 0x00c
38#define SUN6I_DSI_BASIC_CTL_TRAIL_INV(n) (((n) & 0xf) << 4)
39#define SUN6I_DSI_BASIC_CTL_TRAIL_FILL BIT(3)
36#define SUN6I_DSI_BASIC_CTL_HBP_DIS BIT(2) 40#define SUN6I_DSI_BASIC_CTL_HBP_DIS BIT(2)
37#define SUN6I_DSI_BASIC_CTL_HSA_HSE_DIS BIT(1) 41#define SUN6I_DSI_BASIC_CTL_HSA_HSE_DIS BIT(1)
38#define SUN6I_DSI_BASIC_CTL_VIDEO_BURST BIT(0) 42#define SUN6I_DSI_BASIC_CTL_VIDEO_BURST BIT(0)
@@ -153,6 +157,8 @@
153 157
154#define SUN6I_DSI_CMD_TX_REG(n) (0x300 + (n) * 0x04) 158#define SUN6I_DSI_CMD_TX_REG(n) (0x300 + (n) * 0x04)
155 159
160#define SUN6I_DSI_SYNC_POINT 40
161
156enum sun6i_dsi_start_inst { 162enum sun6i_dsi_start_inst {
157 DSI_START_LPRX, 163 DSI_START_LPRX,
158 DSI_START_LPTX, 164 DSI_START_LPTX,
@@ -358,7 +364,54 @@ static void sun6i_dsi_inst_init(struct sun6i_dsi *dsi,
358static u16 sun6i_dsi_get_video_start_delay(struct sun6i_dsi *dsi, 364static u16 sun6i_dsi_get_video_start_delay(struct sun6i_dsi *dsi,
359 struct drm_display_mode *mode) 365 struct drm_display_mode *mode)
360{ 366{
361 return mode->vtotal - (mode->vsync_end - mode->vdisplay) + 1; 367 u16 start = clamp(mode->vtotal - mode->vdisplay - 10, 8, 100);
368 u16 delay = mode->vtotal - (mode->vsync_end - mode->vdisplay) + start;
369
370 if (delay > mode->vtotal)
371 delay = delay % mode->vtotal;
372
373 return max_t(u16, delay, 1);
374}
375
376static u16 sun6i_dsi_get_line_num(struct sun6i_dsi *dsi,
377 struct drm_display_mode *mode)
378{
379 struct mipi_dsi_device *device = dsi->device;
380 unsigned int Bpp = mipi_dsi_pixel_format_to_bpp(device->format) / 8;
381
382 return mode->htotal * Bpp / device->lanes;
383}
384
385static u16 sun6i_dsi_get_drq_edge0(struct sun6i_dsi *dsi,
386 struct drm_display_mode *mode,
387 u16 line_num, u16 edge1)
388{
389 u16 edge0 = edge1;
390
391 edge0 += (mode->hdisplay + 40) * SUN6I_DSI_TCON_DIV / 8;
392
393 if (edge0 > line_num)
394 return edge0 - line_num;
395
396 return 1;
397}
398
399static u16 sun6i_dsi_get_drq_edge1(struct sun6i_dsi *dsi,
400 struct drm_display_mode *mode,
401 u16 line_num)
402{
403 struct mipi_dsi_device *device = dsi->device;
404 unsigned int Bpp = mipi_dsi_pixel_format_to_bpp(device->format) / 8;
405 unsigned int hbp = mode->htotal - mode->hsync_end;
406 u16 edge1;
407
408 edge1 = SUN6I_DSI_SYNC_POINT;
409 edge1 += (mode->hdisplay + hbp + 20) * Bpp / device->lanes;
410
411 if (edge1 > line_num)
412 return line_num;
413
414 return edge1;
362} 415}
363 416
364static void sun6i_dsi_setup_burst(struct sun6i_dsi *dsi, 417static void sun6i_dsi_setup_burst(struct sun6i_dsi *dsi,
@@ -367,7 +420,23 @@ static void sun6i_dsi_setup_burst(struct sun6i_dsi *dsi,
367 struct mipi_dsi_device *device = dsi->device; 420 struct mipi_dsi_device *device = dsi->device;
368 u32 val = 0; 421 u32 val = 0;
369 422
370 if ((mode->hsync_end - mode->hdisplay) > 20) { 423 if (device->mode_flags & MIPI_DSI_MODE_VIDEO_BURST) {
424 u16 line_num = sun6i_dsi_get_line_num(dsi, mode);
425 u16 edge0, edge1;
426
427 edge1 = sun6i_dsi_get_drq_edge1(dsi, mode, line_num);
428 edge0 = sun6i_dsi_get_drq_edge0(dsi, mode, line_num, edge1);
429
430 regmap_write(dsi->regs, SUN6I_DSI_BURST_DRQ_REG,
431 SUN6I_DSI_BURST_DRQ_EDGE0(edge0) |
432 SUN6I_DSI_BURST_DRQ_EDGE1(edge1));
433
434 regmap_write(dsi->regs, SUN6I_DSI_BURST_LINE_REG,
435 SUN6I_DSI_BURST_LINE_NUM(line_num) |
436 SUN6I_DSI_BURST_LINE_SYNC_POINT(SUN6I_DSI_SYNC_POINT));
437
438 val = SUN6I_DSI_TCON_DRQ_ENABLE_MODE;
439 } else if ((mode->hsync_end - mode->hdisplay) > 20) {
371 /* Maaaaaagic */ 440 /* Maaaaaagic */
372 u16 drq = (mode->hsync_end - mode->hdisplay) - 20; 441 u16 drq = (mode->hsync_end - mode->hdisplay) - 20;
373 442
@@ -384,8 +453,19 @@ static void sun6i_dsi_setup_burst(struct sun6i_dsi *dsi,
384static void sun6i_dsi_setup_inst_loop(struct sun6i_dsi *dsi, 453static void sun6i_dsi_setup_inst_loop(struct sun6i_dsi *dsi,
385 struct drm_display_mode *mode) 454 struct drm_display_mode *mode)
386{ 455{
456 struct mipi_dsi_device *device = dsi->device;
387 u16 delay = 50 - 1; 457 u16 delay = 50 - 1;
388 458
459 if (device->mode_flags & MIPI_DSI_MODE_VIDEO_BURST) {
460 delay = (mode->htotal - mode->hdisplay) * 150;
461 delay /= (mode->clock / 1000) * 8;
462 delay -= 50;
463 }
464
465 regmap_write(dsi->regs, SUN6I_DSI_INST_LOOP_SEL_REG,
466 2 << (4 * DSI_INST_ID_LP11) |
467 3 << (4 * DSI_INST_ID_DLY));
468
389 regmap_write(dsi->regs, SUN6I_DSI_INST_LOOP_NUM_REG(0), 469 regmap_write(dsi->regs, SUN6I_DSI_INST_LOOP_NUM_REG(0),
390 SUN6I_DSI_INST_LOOP_NUM_N0(50 - 1) | 470 SUN6I_DSI_INST_LOOP_NUM_N0(50 - 1) |
391 SUN6I_DSI_INST_LOOP_NUM_N1(delay)); 471 SUN6I_DSI_INST_LOOP_NUM_N1(delay));
@@ -451,49 +531,68 @@ static void sun6i_dsi_setup_timings(struct sun6i_dsi *dsi,
451{ 531{
452 struct mipi_dsi_device *device = dsi->device; 532 struct mipi_dsi_device *device = dsi->device;
453 unsigned int Bpp = mipi_dsi_pixel_format_to_bpp(device->format) / 8; 533 unsigned int Bpp = mipi_dsi_pixel_format_to_bpp(device->format) / 8;
454 u16 hbp, hfp, hsa, hblk, vblk; 534 u16 hbp = 0, hfp = 0, hsa = 0, hblk = 0, vblk = 0;
535 u32 basic_ctl = 0;
455 size_t bytes; 536 size_t bytes;
456 u8 *buffer; 537 u8 *buffer;
457 538
458 /* Do all timing calculations up front to allocate buffer space */ 539 /* Do all timing calculations up front to allocate buffer space */
459 540
460 /* 541 if (device->mode_flags & MIPI_DSI_MODE_VIDEO_BURST) {
461 * A sync period is composed of a blanking packet (4 bytes + 542 hblk = mode->hdisplay * Bpp;
462 * payload + 2 bytes) and a sync event packet (4 bytes). Its 543 basic_ctl = SUN6I_DSI_BASIC_CTL_VIDEO_BURST |
463 * minimal size is therefore 10 bytes 544 SUN6I_DSI_BASIC_CTL_HSA_HSE_DIS |
464 */ 545 SUN6I_DSI_BASIC_CTL_HBP_DIS;
465#define HSA_PACKET_OVERHEAD 10
466 hsa = max((unsigned int)HSA_PACKET_OVERHEAD,
467 (mode->hsync_end - mode->hsync_start) * Bpp - HSA_PACKET_OVERHEAD);
468 546
469 /* 547 if (device->lanes == 4)
470 * The backporch is set using a blanking packet (4 bytes + 548 basic_ctl |= SUN6I_DSI_BASIC_CTL_TRAIL_FILL |
471 * payload + 2 bytes). Its minimal size is therefore 6 bytes 549 SUN6I_DSI_BASIC_CTL_TRAIL_INV(0xc);
472 */ 550 } else {
551 /*
552 * A sync period is composed of a blanking packet (4
553 * bytes + payload + 2 bytes) and a sync event packet
554 * (4 bytes). Its minimal size is therefore 10 bytes
555 */
556#define HSA_PACKET_OVERHEAD 10
557 hsa = max((unsigned int)HSA_PACKET_OVERHEAD,
558 (mode->hsync_end - mode->hsync_start) * Bpp - HSA_PACKET_OVERHEAD);
559
560 /*
561 * The backporch is set using a blanking packet (4
562 * bytes + payload + 2 bytes). Its minimal size is
563 * therefore 6 bytes
564 */
473#define HBP_PACKET_OVERHEAD 6 565#define HBP_PACKET_OVERHEAD 6
474 hbp = max((unsigned int)HBP_PACKET_OVERHEAD, 566 hbp = max((unsigned int)HBP_PACKET_OVERHEAD,
475 (mode->hsync_start - mode->hdisplay) * Bpp - HBP_PACKET_OVERHEAD); 567 (mode->htotal - mode->hsync_end) * Bpp - HBP_PACKET_OVERHEAD);
476 568
477 /* 569 /*
478 * The frontporch is set using a blanking packet (4 bytes + 570 * The frontporch is set using a blanking packet (4
479 * payload + 2 bytes). Its minimal size is therefore 6 bytes 571 * bytes + payload + 2 bytes). Its minimal size is
480 */ 572 * therefore 6 bytes
573 */
481#define HFP_PACKET_OVERHEAD 6 574#define HFP_PACKET_OVERHEAD 6
482 hfp = max((unsigned int)HFP_PACKET_OVERHEAD, 575 hfp = max((unsigned int)HFP_PACKET_OVERHEAD,
483 (mode->htotal - mode->hsync_end) * Bpp - HFP_PACKET_OVERHEAD); 576 (mode->hsync_start - mode->hdisplay) * Bpp - HFP_PACKET_OVERHEAD);
484 577
485 /* 578 /*
486 * hblk seems to be the line + porches length. 579 * The blanking is set using a sync event (4 bytes)
487 */ 580 * and a blanking packet (4 bytes + payload + 2
488 hblk = mode->htotal * Bpp - hsa; 581 * bytes). Its minimal size is therefore 10 bytes.
489 582 */
490 /* 583#define HBLK_PACKET_OVERHEAD 10
491 * And I'm not entirely sure what vblk is about. The driver in 584 hblk = max((unsigned int)HBLK_PACKET_OVERHEAD,
492 * Allwinner BSP is using a rather convoluted calculation 585 (mode->htotal - (mode->hsync_end - mode->hsync_start)) * Bpp -
493 * there only for 4 lanes. However, using 0 (the !4 lanes 586 HBLK_PACKET_OVERHEAD);
494 * case) even with a 4 lanes screen seems to work... 587
495 */ 588 /*
496 vblk = 0; 589 * And I'm not entirely sure what vblk is about. The driver in
590 * Allwinner BSP is using a rather convoluted calculation
591 * there only for 4 lanes. However, using 0 (the !4 lanes
592 * case) even with a 4 lanes screen seems to work...
593 */
594 vblk = 0;
595 }
497 596
498 /* How many bytes do we need to send all payloads? */ 597 /* How many bytes do we need to send all payloads? */
499 bytes = max_t(size_t, max(max(hfp, hblk), max(hsa, hbp)), vblk); 598 bytes = max_t(size_t, max(max(hfp, hblk), max(hsa, hbp)), vblk);
@@ -501,7 +600,7 @@ static void sun6i_dsi_setup_timings(struct sun6i_dsi *dsi,
501 if (WARN_ON(!buffer)) 600 if (WARN_ON(!buffer))
502 return; 601 return;
503 602
504 regmap_write(dsi->regs, SUN6I_DSI_BASIC_CTL_REG, 0); 603 regmap_write(dsi->regs, SUN6I_DSI_BASIC_CTL_REG, basic_ctl);
505 604
506 regmap_write(dsi->regs, SUN6I_DSI_SYNC_HSS_REG, 605 regmap_write(dsi->regs, SUN6I_DSI_SYNC_HSS_REG,
507 sun6i_dsi_build_sync_pkt(MIPI_DSI_H_SYNC_START, 606 sun6i_dsi_build_sync_pkt(MIPI_DSI_H_SYNC_START,
@@ -526,8 +625,8 @@ static void sun6i_dsi_setup_timings(struct sun6i_dsi *dsi,
526 regmap_write(dsi->regs, SUN6I_DSI_BASIC_SIZE0_REG, 625 regmap_write(dsi->regs, SUN6I_DSI_BASIC_SIZE0_REG,
527 SUN6I_DSI_BASIC_SIZE0_VSA(mode->vsync_end - 626 SUN6I_DSI_BASIC_SIZE0_VSA(mode->vsync_end -
528 mode->vsync_start) | 627 mode->vsync_start) |
529 SUN6I_DSI_BASIC_SIZE0_VBP(mode->vsync_start - 628 SUN6I_DSI_BASIC_SIZE0_VBP(mode->vtotal -
530 mode->vdisplay)); 629 mode->vsync_end));
531 630
532 regmap_write(dsi->regs, SUN6I_DSI_BASIC_SIZE1_REG, 631 regmap_write(dsi->regs, SUN6I_DSI_BASIC_SIZE1_REG,
533 SUN6I_DSI_BASIC_SIZE1_VACT(mode->vdisplay) | 632 SUN6I_DSI_BASIC_SIZE1_VACT(mode->vdisplay) |
diff --git a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.h b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.h
index a07090579f84..5c3ad5be0690 100644
--- a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.h
+++ b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.h
@@ -13,6 +13,8 @@
13#include <drm/drm_encoder.h> 13#include <drm/drm_encoder.h>
14#include <drm/drm_mipi_dsi.h> 14#include <drm/drm_mipi_dsi.h>
15 15
16#define SUN6I_DSI_TCON_DIV 4
17
16struct sun6i_dsi { 18struct sun6i_dsi {
17 struct drm_connector connector; 19 struct drm_connector connector;
18 struct drm_encoder encoder; 20 struct drm_encoder encoder;
diff --git a/drivers/gpu/drm/sun4i/sun8i_mixer.c b/drivers/gpu/drm/sun4i/sun8i_mixer.c
index 30a2eff55687..fd20a928cf4d 100644
--- a/drivers/gpu/drm/sun4i/sun8i_mixer.c
+++ b/drivers/gpu/drm/sun4i/sun8i_mixer.c
@@ -325,38 +325,22 @@ static struct regmap_config sun8i_mixer_regmap_config = {
325 325
326static int sun8i_mixer_of_get_id(struct device_node *node) 326static int sun8i_mixer_of_get_id(struct device_node *node)
327{ 327{
328 struct device_node *port, *ep; 328 struct device_node *ep, *remote;
329 int ret = -EINVAL; 329 struct of_endpoint of_ep;
330 330
331 /* output is port 1 */ 331 /* Output port is 1, and we want the first endpoint. */
332 port = of_graph_get_port_by_id(node, 1); 332 ep = of_graph_get_endpoint_by_regs(node, 1, -1);
333 if (!port) 333 if (!ep)
334 return -EINVAL; 334 return -EINVAL;
335 335
336 /* try to find downstream endpoint */ 336 remote = of_graph_get_remote_endpoint(ep);
337 for_each_available_child_of_node(port, ep) { 337 of_node_put(ep);
338 struct device_node *remote; 338 if (!remote)
339 u32 reg; 339 return -EINVAL;
340
341 remote = of_graph_get_remote_endpoint(ep);
342 if (!remote)
343 continue;
344
345 ret = of_property_read_u32(remote, "reg", &reg);
346 if (!ret) {
347 of_node_put(remote);
348 of_node_put(ep);
349 of_node_put(port);
350
351 return reg;
352 }
353
354 of_node_put(remote);
355 }
356
357 of_node_put(port);
358 340
359 return ret; 341 of_graph_parse_endpoint(remote, &of_ep);
342 of_node_put(remote);
343 return of_ep.id;
360} 344}
361 345
362static int sun8i_mixer_bind(struct device *dev, struct device *master, 346static int sun8i_mixer_bind(struct device *dev, struct device *master,
@@ -554,6 +538,7 @@ static int sun8i_mixer_remove(struct platform_device *pdev)
554static const struct sun8i_mixer_cfg sun8i_a83t_mixer0_cfg = { 538static const struct sun8i_mixer_cfg sun8i_a83t_mixer0_cfg = {
555 .ccsc = 0, 539 .ccsc = 0,
556 .scaler_mask = 0xf, 540 .scaler_mask = 0xf,
541 .scanline_yuv = 2048,
557 .ui_num = 3, 542 .ui_num = 3,
558 .vi_num = 1, 543 .vi_num = 1,
559}; 544};
@@ -561,6 +546,7 @@ static const struct sun8i_mixer_cfg sun8i_a83t_mixer0_cfg = {
561static const struct sun8i_mixer_cfg sun8i_a83t_mixer1_cfg = { 546static const struct sun8i_mixer_cfg sun8i_a83t_mixer1_cfg = {
562 .ccsc = 1, 547 .ccsc = 1,
563 .scaler_mask = 0x3, 548 .scaler_mask = 0x3,
549 .scanline_yuv = 2048,
564 .ui_num = 1, 550 .ui_num = 1,
565 .vi_num = 1, 551 .vi_num = 1,
566}; 552};
@@ -569,6 +555,7 @@ static const struct sun8i_mixer_cfg sun8i_h3_mixer0_cfg = {
569 .ccsc = 0, 555 .ccsc = 0,
570 .mod_rate = 432000000, 556 .mod_rate = 432000000,
571 .scaler_mask = 0xf, 557 .scaler_mask = 0xf,
558 .scanline_yuv = 2048,
572 .ui_num = 3, 559 .ui_num = 3,
573 .vi_num = 1, 560 .vi_num = 1,
574}; 561};
@@ -577,6 +564,7 @@ static const struct sun8i_mixer_cfg sun8i_r40_mixer0_cfg = {
577 .ccsc = 0, 564 .ccsc = 0,
578 .mod_rate = 297000000, 565 .mod_rate = 297000000,
579 .scaler_mask = 0xf, 566 .scaler_mask = 0xf,
567 .scanline_yuv = 2048,
580 .ui_num = 3, 568 .ui_num = 3,
581 .vi_num = 1, 569 .vi_num = 1,
582}; 570};
@@ -585,6 +573,7 @@ static const struct sun8i_mixer_cfg sun8i_r40_mixer1_cfg = {
585 .ccsc = 1, 573 .ccsc = 1,
586 .mod_rate = 297000000, 574 .mod_rate = 297000000,
587 .scaler_mask = 0x3, 575 .scaler_mask = 0x3,
576 .scanline_yuv = 2048,
588 .ui_num = 1, 577 .ui_num = 1,
589 .vi_num = 1, 578 .vi_num = 1,
590}; 579};
@@ -593,6 +582,7 @@ static const struct sun8i_mixer_cfg sun8i_v3s_mixer_cfg = {
593 .vi_num = 2, 582 .vi_num = 2,
594 .ui_num = 1, 583 .ui_num = 1,
595 .scaler_mask = 0x3, 584 .scaler_mask = 0x3,
585 .scanline_yuv = 2048,
596 .ccsc = 0, 586 .ccsc = 0,
597 .mod_rate = 150000000, 587 .mod_rate = 150000000,
598}; 588};
@@ -601,6 +591,7 @@ static const struct sun8i_mixer_cfg sun50i_a64_mixer0_cfg = {
601 .ccsc = 0, 591 .ccsc = 0,
602 .mod_rate = 297000000, 592 .mod_rate = 297000000,
603 .scaler_mask = 0xf, 593 .scaler_mask = 0xf,
594 .scanline_yuv = 4096,
604 .ui_num = 3, 595 .ui_num = 3,
605 .vi_num = 1, 596 .vi_num = 1,
606}; 597};
@@ -609,6 +600,7 @@ static const struct sun8i_mixer_cfg sun50i_a64_mixer1_cfg = {
609 .ccsc = 1, 600 .ccsc = 1,
610 .mod_rate = 297000000, 601 .mod_rate = 297000000,
611 .scaler_mask = 0x3, 602 .scaler_mask = 0x3,
603 .scanline_yuv = 2048,
612 .ui_num = 1, 604 .ui_num = 1,
613 .vi_num = 1, 605 .vi_num = 1,
614}; 606};
@@ -618,6 +610,7 @@ static const struct sun8i_mixer_cfg sun50i_h6_mixer0_cfg = {
618 .is_de3 = true, 610 .is_de3 = true,
619 .mod_rate = 600000000, 611 .mod_rate = 600000000,
620 .scaler_mask = 0xf, 612 .scaler_mask = 0xf,
613 .scanline_yuv = 4096,
621 .ui_num = 3, 614 .ui_num = 3,
622 .vi_num = 1, 615 .vi_num = 1,
623}; 616};
diff --git a/drivers/gpu/drm/sun4i/sun8i_mixer.h b/drivers/gpu/drm/sun4i/sun8i_mixer.h
index 913d14ce68b0..80e084caa084 100644
--- a/drivers/gpu/drm/sun4i/sun8i_mixer.h
+++ b/drivers/gpu/drm/sun4i/sun8i_mixer.h
@@ -159,6 +159,7 @@ struct de2_fmt_info {
159 * @mod_rate: module clock rate that needs to be set in order to have 159 * @mod_rate: module clock rate that needs to be set in order to have
160 * a functional block. 160 * a functional block.
161 * @is_de3: true, if this is next gen display engine 3.0, false otherwise. 161 * @is_de3: true, if this is next gen display engine 3.0, false otherwise.
162 * @scaline_yuv: size of a scanline for VI scaler for YUV formats.
162 */ 163 */
163struct sun8i_mixer_cfg { 164struct sun8i_mixer_cfg {
164 int vi_num; 165 int vi_num;
@@ -167,6 +168,7 @@ struct sun8i_mixer_cfg {
167 int ccsc; 168 int ccsc;
168 unsigned long mod_rate; 169 unsigned long mod_rate;
169 unsigned int is_de3 : 1; 170 unsigned int is_de3 : 1;
171 unsigned int scanline_yuv;
170}; 172};
171 173
172struct sun8i_mixer { 174struct sun8i_mixer {
diff --git a/drivers/gpu/drm/sun4i/sun8i_vi_layer.c b/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
index 8a0616238467..bb8e026d6405 100644
--- a/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
+++ b/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
@@ -80,6 +80,8 @@ static int sun8i_vi_layer_update_coord(struct sun8i_mixer *mixer, int channel,
80 u32 bld_base, ch_base; 80 u32 bld_base, ch_base;
81 u32 outsize, insize; 81 u32 outsize, insize;
82 u32 hphase, vphase; 82 u32 hphase, vphase;
83 u32 hn = 0, hm = 0;
84 u32 vn = 0, vm = 0;
83 bool subsampled; 85 bool subsampled;
84 86
85 DRM_DEBUG_DRIVER("Updating VI channel %d overlay %d\n", 87 DRM_DEBUG_DRIVER("Updating VI channel %d overlay %d\n",
@@ -137,12 +139,41 @@ static int sun8i_vi_layer_update_coord(struct sun8i_mixer *mixer, int channel,
137 subsampled = format->hsub > 1 || format->vsub > 1; 139 subsampled = format->hsub > 1 || format->vsub > 1;
138 140
139 if (insize != outsize || subsampled || hphase || vphase) { 141 if (insize != outsize || subsampled || hphase || vphase) {
140 u32 hscale, vscale; 142 unsigned int scanline, required;
143 struct drm_display_mode *mode;
144 u32 hscale, vscale, fps;
145 u64 ability;
141 146
142 DRM_DEBUG_DRIVER("HW scaling is enabled\n"); 147 DRM_DEBUG_DRIVER("HW scaling is enabled\n");
143 148
144 hscale = state->src_w / state->crtc_w; 149 mode = &plane->state->crtc->state->mode;
145 vscale = state->src_h / state->crtc_h; 150 fps = (mode->clock * 1000) / (mode->vtotal * mode->htotal);
151 ability = clk_get_rate(mixer->mod_clk);
152 /* BSP algorithm assumes 80% efficiency of VI scaler unit */
153 ability *= 80;
154 do_div(ability, mode->vdisplay * fps * max(src_w, dst_w));
155
156 required = src_h * 100 / dst_h;
157
158 if (ability < required) {
159 DRM_DEBUG_DRIVER("Using vertical coarse scaling\n");
160 vm = src_h;
161 vn = (u32)ability * dst_h / 100;
162 src_h = vn;
163 }
164
165 /* it seems that every RGB scaler has buffer for 2048 pixels */
166 scanline = subsampled ? mixer->cfg->scanline_yuv : 2048;
167
168 if (src_w > scanline) {
169 DRM_DEBUG_DRIVER("Using horizontal coarse scaling\n");
170 hm = src_w;
171 hn = scanline;
172 src_w = hn;
173 }
174
175 hscale = (src_w << 16) / dst_w;
176 vscale = (src_h << 16) / dst_h;
146 177
147 sun8i_vi_scaler_setup(mixer, channel, src_w, src_h, dst_w, 178 sun8i_vi_scaler_setup(mixer, channel, src_w, src_h, dst_w,
148 dst_h, hscale, vscale, hphase, vphase, 179 dst_h, hscale, vscale, hphase, vphase,
@@ -153,6 +184,23 @@ static int sun8i_vi_layer_update_coord(struct sun8i_mixer *mixer, int channel,
153 sun8i_vi_scaler_enable(mixer, channel, false); 184 sun8i_vi_scaler_enable(mixer, channel, false);
154 } 185 }
155 186
187 regmap_write(mixer->engine.regs,
188 SUN8I_MIXER_CHAN_VI_HDS_Y(ch_base),
189 SUN8I_MIXER_CHAN_VI_DS_N(hn) |
190 SUN8I_MIXER_CHAN_VI_DS_M(hm));
191 regmap_write(mixer->engine.regs,
192 SUN8I_MIXER_CHAN_VI_HDS_UV(ch_base),
193 SUN8I_MIXER_CHAN_VI_DS_N(hn) |
194 SUN8I_MIXER_CHAN_VI_DS_M(hm));
195 regmap_write(mixer->engine.regs,
196 SUN8I_MIXER_CHAN_VI_VDS_Y(ch_base),
197 SUN8I_MIXER_CHAN_VI_DS_N(vn) |
198 SUN8I_MIXER_CHAN_VI_DS_M(vm));
199 regmap_write(mixer->engine.regs,
200 SUN8I_MIXER_CHAN_VI_VDS_UV(ch_base),
201 SUN8I_MIXER_CHAN_VI_DS_N(vn) |
202 SUN8I_MIXER_CHAN_VI_DS_M(vm));
203
156 /* Set base coordinates */ 204 /* Set base coordinates */
157 DRM_DEBUG_DRIVER("Layer destination coordinates X: %d Y: %d\n", 205 DRM_DEBUG_DRIVER("Layer destination coordinates X: %d Y: %d\n",
158 state->dst.x1, state->dst.y1); 206 state->dst.x1, state->dst.y1);
diff --git a/drivers/gpu/drm/sun4i/sun8i_vi_layer.h b/drivers/gpu/drm/sun4i/sun8i_vi_layer.h
index 8a5e6d01c85d..a223a4839f45 100644
--- a/drivers/gpu/drm/sun4i/sun8i_vi_layer.h
+++ b/drivers/gpu/drm/sun4i/sun8i_vi_layer.h
@@ -24,6 +24,14 @@
24 ((base) + 0x30 * (layer) + 0x18 + 4 * (plane)) 24 ((base) + 0x30 * (layer) + 0x18 + 4 * (plane))
25#define SUN8I_MIXER_CHAN_VI_OVL_SIZE(base) \ 25#define SUN8I_MIXER_CHAN_VI_OVL_SIZE(base) \
26 ((base) + 0xe8) 26 ((base) + 0xe8)
27#define SUN8I_MIXER_CHAN_VI_HDS_Y(base) \
28 ((base) + 0xf0)
29#define SUN8I_MIXER_CHAN_VI_HDS_UV(base) \
30 ((base) + 0xf4)
31#define SUN8I_MIXER_CHAN_VI_VDS_Y(base) \
32 ((base) + 0xf8)
33#define SUN8I_MIXER_CHAN_VI_VDS_UV(base) \
34 ((base) + 0xfc)
27 35
28#define SUN8I_MIXER_CHAN_VI_LAYER_ATTR_EN BIT(0) 36#define SUN8I_MIXER_CHAN_VI_LAYER_ATTR_EN BIT(0)
29/* RGB mode should be set for RGB formats and cleared for YCbCr */ 37/* RGB mode should be set for RGB formats and cleared for YCbCr */
@@ -33,6 +41,9 @@
33#define SUN50I_MIXER_CHAN_VI_LAYER_ATTR_ALPHA_MASK GENMASK(31, 24) 41#define SUN50I_MIXER_CHAN_VI_LAYER_ATTR_ALPHA_MASK GENMASK(31, 24)
34#define SUN50I_MIXER_CHAN_VI_LAYER_ATTR_ALPHA(x) ((x) << 24) 42#define SUN50I_MIXER_CHAN_VI_LAYER_ATTR_ALPHA(x) ((x) << 24)
35 43
44#define SUN8I_MIXER_CHAN_VI_DS_N(x) ((x) << 16)
45#define SUN8I_MIXER_CHAN_VI_DS_M(x) ((x) << 0)
46
36struct sun8i_mixer; 47struct sun8i_mixer;
37 48
38struct sun8i_vi_layer { 49struct sun8i_vi_layer {
diff --git a/drivers/gpu/drm/tinydrm/core/Makefile b/drivers/gpu/drm/tinydrm/core/Makefile
index fb221e6f8885..6f8f764560e0 100644
--- a/drivers/gpu/drm/tinydrm/core/Makefile
+++ b/drivers/gpu/drm/tinydrm/core/Makefile
@@ -1,3 +1,3 @@
1tinydrm-y := tinydrm-core.o tinydrm-pipe.o tinydrm-helpers.o 1tinydrm-y := tinydrm-pipe.o tinydrm-helpers.o
2 2
3obj-$(CONFIG_DRM_TINYDRM) += tinydrm.o 3obj-$(CONFIG_DRM_TINYDRM) += tinydrm.o
diff --git a/drivers/gpu/drm/tinydrm/core/tinydrm-core.c b/drivers/gpu/drm/tinydrm/core/tinydrm-core.c
deleted file mode 100644
index 554abd5d3b53..000000000000
--- a/drivers/gpu/drm/tinydrm/core/tinydrm-core.c
+++ /dev/null
@@ -1,183 +0,0 @@
1/*
2 * Copyright (C) 2016 Noralf Trønnes
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 */
9
10#include <drm/drm_atomic.h>
11#include <drm/drm_atomic_helper.h>
12#include <drm/drm_drv.h>
13#include <drm/drm_fb_helper.h>
14#include <drm/drm_gem_framebuffer_helper.h>
15#include <drm/drm_probe_helper.h>
16#include <drm/drm_print.h>
17#include <drm/tinydrm/tinydrm.h>
18#include <linux/device.h>
19#include <linux/dma-buf.h>
20#include <linux/module.h>
21
22/**
23 * DOC: overview
24 *
25 * This library provides driver helpers for very simple display hardware.
26 *
27 * It is based on &drm_simple_display_pipe coupled with a &drm_connector which
28 * has only one fixed &drm_display_mode. The framebuffers are backed by the
29 * cma helper and have support for framebuffer flushing (dirty).
30 * fbdev support is also included.
31 *
32 */
33
34/**
35 * DOC: core
36 *
37 * The driver allocates &tinydrm_device, initializes it using
38 * devm_tinydrm_init(), sets up the pipeline using tinydrm_display_pipe_init()
39 * and registers the DRM device using devm_tinydrm_register().
40 */
41
42static const struct drm_mode_config_funcs tinydrm_mode_config_funcs = {
43 .fb_create = drm_gem_fb_create_with_dirty,
44 .atomic_check = drm_atomic_helper_check,
45 .atomic_commit = drm_atomic_helper_commit,
46};
47
48static int tinydrm_init(struct device *parent, struct tinydrm_device *tdev,
49 struct drm_driver *driver)
50{
51 struct drm_device *drm;
52
53 /*
54 * We don't embed drm_device, because that prevent us from using
55 * devm_kzalloc() to allocate tinydrm_device in the driver since
56 * drm_dev_put() frees the structure. The devm_ functions provide
57 * for easy error handling.
58 */
59 drm = drm_dev_alloc(driver, parent);
60 if (IS_ERR(drm))
61 return PTR_ERR(drm);
62
63 tdev->drm = drm;
64 drm->dev_private = tdev;
65 drm_mode_config_init(drm);
66 drm->mode_config.funcs = &tinydrm_mode_config_funcs;
67 drm->mode_config.allow_fb_modifiers = true;
68
69 return 0;
70}
71
72static void tinydrm_fini(struct tinydrm_device *tdev)
73{
74 drm_mode_config_cleanup(tdev->drm);
75 tdev->drm->dev_private = NULL;
76 drm_dev_put(tdev->drm);
77}
78
79static void devm_tinydrm_release(void *data)
80{
81 tinydrm_fini(data);
82}
83
84/**
85 * devm_tinydrm_init - Initialize tinydrm device
86 * @parent: Parent device object
87 * @tdev: tinydrm device
88 * @driver: DRM driver
89 *
90 * This function initializes @tdev, the underlying DRM device and it's
91 * mode_config. Resources will be automatically freed on driver detach (devres)
92 * using drm_mode_config_cleanup() and drm_dev_put().
93 *
94 * Returns:
95 * Zero on success, negative error code on failure.
96 */
97int devm_tinydrm_init(struct device *parent, struct tinydrm_device *tdev,
98 struct drm_driver *driver)
99{
100 int ret;
101
102 ret = tinydrm_init(parent, tdev, driver);
103 if (ret)
104 return ret;
105
106 ret = devm_add_action(parent, devm_tinydrm_release, tdev);
107 if (ret)
108 tinydrm_fini(tdev);
109
110 return ret;
111}
112EXPORT_SYMBOL(devm_tinydrm_init);
113
114static int tinydrm_register(struct tinydrm_device *tdev)
115{
116 struct drm_device *drm = tdev->drm;
117 int ret;
118
119 ret = drm_dev_register(tdev->drm, 0);
120 if (ret)
121 return ret;
122
123 ret = drm_fbdev_generic_setup(drm, 0);
124 if (ret)
125 DRM_ERROR("Failed to initialize fbdev: %d\n", ret);
126
127 return 0;
128}
129
130static void tinydrm_unregister(struct tinydrm_device *tdev)
131{
132 drm_atomic_helper_shutdown(tdev->drm);
133 drm_dev_unregister(tdev->drm);
134}
135
136static void devm_tinydrm_register_release(void *data)
137{
138 tinydrm_unregister(data);
139}
140
141/**
142 * devm_tinydrm_register - Register tinydrm device
143 * @tdev: tinydrm device
144 *
145 * This function registers the underlying DRM device and fbdev.
146 * These resources will be automatically unregistered on driver detach (devres)
147 * and the display pipeline will be disabled.
148 *
149 * Returns:
150 * Zero on success, negative error code on failure.
151 */
152int devm_tinydrm_register(struct tinydrm_device *tdev)
153{
154 struct device *dev = tdev->drm->dev;
155 int ret;
156
157 ret = tinydrm_register(tdev);
158 if (ret)
159 return ret;
160
161 ret = devm_add_action(dev, devm_tinydrm_register_release, tdev);
162 if (ret)
163 tinydrm_unregister(tdev);
164
165 return ret;
166}
167EXPORT_SYMBOL(devm_tinydrm_register);
168
169/**
170 * tinydrm_shutdown - Shutdown tinydrm
171 * @tdev: tinydrm device
172 *
173 * This function makes sure that the display pipeline is disabled.
174 * Used by drivers in their shutdown callback to turn off the display
175 * on machine shutdown and reboot.
176 */
177void tinydrm_shutdown(struct tinydrm_device *tdev)
178{
179 drm_atomic_helper_shutdown(tdev->drm);
180}
181EXPORT_SYMBOL(tinydrm_shutdown);
182
183MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/tinydrm/core/tinydrm-helpers.c b/drivers/gpu/drm/tinydrm/core/tinydrm-helpers.c
index 2737b6fdadc8..d7b38dfb6438 100644
--- a/drivers/gpu/drm/tinydrm/core/tinydrm-helpers.c
+++ b/drivers/gpu/drm/tinydrm/core/tinydrm-helpers.c
@@ -365,3 +365,5 @@ int tinydrm_spi_transfer(struct spi_device *spi, u32 speed_hz,
365EXPORT_SYMBOL(tinydrm_spi_transfer); 365EXPORT_SYMBOL(tinydrm_spi_transfer);
366 366
367#endif /* CONFIG_SPI */ 367#endif /* CONFIG_SPI */
368
369MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/tinydrm/core/tinydrm-pipe.c b/drivers/gpu/drm/tinydrm/core/tinydrm-pipe.c
index bb5b1c1e21ba..bb8a7ed8ddf6 100644
--- a/drivers/gpu/drm/tinydrm/core/tinydrm-pipe.c
+++ b/drivers/gpu/drm/tinydrm/core/tinydrm-pipe.c
@@ -13,7 +13,7 @@
13#include <drm/drm_modes.h> 13#include <drm/drm_modes.h>
14#include <drm/drm_probe_helper.h> 14#include <drm/drm_probe_helper.h>
15#include <drm/drm_print.h> 15#include <drm/drm_print.h>
16#include <drm/tinydrm/tinydrm.h> 16#include <drm/drm_simple_kms_helper.h>
17 17
18struct tinydrm_connector { 18struct tinydrm_connector {
19 struct drm_connector base; 19 struct drm_connector base;
@@ -129,7 +129,8 @@ static int tinydrm_rotate_mode(struct drm_display_mode *mode,
129 129
130/** 130/**
131 * tinydrm_display_pipe_init - Initialize display pipe 131 * tinydrm_display_pipe_init - Initialize display pipe
132 * @tdev: tinydrm device 132 * @drm: DRM device
133 * @pipe: Display pipe
133 * @funcs: Display pipe functions 134 * @funcs: Display pipe functions
134 * @connector_type: Connector type 135 * @connector_type: Connector type
135 * @formats: Array of supported formats (DRM_FORMAT\_\*) 136 * @formats: Array of supported formats (DRM_FORMAT\_\*)
@@ -143,16 +144,15 @@ static int tinydrm_rotate_mode(struct drm_display_mode *mode,
143 * Returns: 144 * Returns:
144 * Zero on success, negative error code on failure. 145 * Zero on success, negative error code on failure.
145 */ 146 */
146int 147int tinydrm_display_pipe_init(struct drm_device *drm,
147tinydrm_display_pipe_init(struct tinydrm_device *tdev, 148 struct drm_simple_display_pipe *pipe,
148 const struct drm_simple_display_pipe_funcs *funcs, 149 const struct drm_simple_display_pipe_funcs *funcs,
149 int connector_type, 150 int connector_type,
150 const uint32_t *formats, 151 const uint32_t *formats,
151 unsigned int format_count, 152 unsigned int format_count,
152 const struct drm_display_mode *mode, 153 const struct drm_display_mode *mode,
153 unsigned int rotation) 154 unsigned int rotation)
154{ 155{
155 struct drm_device *drm = tdev->drm;
156 struct drm_display_mode mode_copy; 156 struct drm_display_mode mode_copy;
157 struct drm_connector *connector; 157 struct drm_connector *connector;
158 int ret; 158 int ret;
@@ -177,7 +177,7 @@ tinydrm_display_pipe_init(struct tinydrm_device *tdev,
177 if (IS_ERR(connector)) 177 if (IS_ERR(connector))
178 return PTR_ERR(connector); 178 return PTR_ERR(connector);
179 179
180 return drm_simple_display_pipe_init(drm, &tdev->pipe, funcs, formats, 180 return drm_simple_display_pipe_init(drm, pipe, funcs, formats,
181 format_count, modifiers, connector); 181 format_count, modifiers, connector);
182} 182}
183EXPORT_SYMBOL(tinydrm_display_pipe_init); 183EXPORT_SYMBOL(tinydrm_display_pipe_init);
diff --git a/drivers/gpu/drm/tinydrm/hx8357d.c b/drivers/gpu/drm/tinydrm/hx8357d.c
index 8bbd0beafc6a..fab961dded87 100644
--- a/drivers/gpu/drm/tinydrm/hx8357d.c
+++ b/drivers/gpu/drm/tinydrm/hx8357d.c
@@ -16,7 +16,9 @@
16#include <linux/property.h> 16#include <linux/property.h>
17#include <linux/spi/spi.h> 17#include <linux/spi/spi.h>
18 18
19#include <drm/drm_atomic_helper.h>
19#include <drm/drm_drv.h> 20#include <drm/drm_drv.h>
21#include <drm/drm_fb_helper.h>
20#include <drm/drm_gem_cma_helper.h> 22#include <drm/drm_gem_cma_helper.h>
21#include <drm/drm_gem_framebuffer_helper.h> 23#include <drm/drm_gem_framebuffer_helper.h>
22#include <drm/drm_modeset_helper.h> 24#include <drm/drm_modeset_helper.h>
@@ -46,16 +48,18 @@ static void yx240qv29_enable(struct drm_simple_display_pipe *pipe,
46 struct drm_crtc_state *crtc_state, 48 struct drm_crtc_state *crtc_state,
47 struct drm_plane_state *plane_state) 49 struct drm_plane_state *plane_state)
48{ 50{
49 struct tinydrm_device *tdev = pipe_to_tinydrm(pipe); 51 struct mipi_dbi *mipi = drm_to_mipi_dbi(pipe->crtc.dev);
50 struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev);
51 u8 addr_mode; 52 u8 addr_mode;
52 int ret; 53 int ret, idx;
54
55 if (!drm_dev_enter(pipe->crtc.dev, &idx))
56 return;
53 57
54 DRM_DEBUG_KMS("\n"); 58 DRM_DEBUG_KMS("\n");
55 59
56 ret = mipi_dbi_poweron_conditional_reset(mipi); 60 ret = mipi_dbi_poweron_conditional_reset(mipi);
57 if (ret < 0) 61 if (ret < 0)
58 return; 62 goto out_exit;
59 if (ret == 1) 63 if (ret == 1)
60 goto out_enable; 64 goto out_enable;
61 65
@@ -171,6 +175,8 @@ out_enable:
171 } 175 }
172 mipi_dbi_command(mipi, MIPI_DCS_SET_ADDRESS_MODE, addr_mode); 176 mipi_dbi_command(mipi, MIPI_DCS_SET_ADDRESS_MODE, addr_mode);
173 mipi_dbi_enable_flush(mipi, crtc_state, plane_state); 177 mipi_dbi_enable_flush(mipi, crtc_state, plane_state);
178out_exit:
179 drm_dev_exit(idx);
174} 180}
175 181
176static const struct drm_simple_display_pipe_funcs hx8357d_pipe_funcs = { 182static const struct drm_simple_display_pipe_funcs hx8357d_pipe_funcs = {
@@ -181,7 +187,7 @@ static const struct drm_simple_display_pipe_funcs hx8357d_pipe_funcs = {
181}; 187};
182 188
183static const struct drm_display_mode yx350hv15_mode = { 189static const struct drm_display_mode yx350hv15_mode = {
184 TINYDRM_MODE(320, 480, 60, 75), 190 DRM_SIMPLE_MODE(320, 480, 60, 75),
185}; 191};
186 192
187DEFINE_DRM_GEM_CMA_FOPS(hx8357d_fops); 193DEFINE_DRM_GEM_CMA_FOPS(hx8357d_fops);
@@ -189,6 +195,7 @@ DEFINE_DRM_GEM_CMA_FOPS(hx8357d_fops);
189static struct drm_driver hx8357d_driver = { 195static struct drm_driver hx8357d_driver = {
190 .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_ATOMIC, 196 .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_ATOMIC,
191 .fops = &hx8357d_fops, 197 .fops = &hx8357d_fops,
198 .release = mipi_dbi_release,
192 DRM_GEM_CMA_VMAP_DRIVER_OPS, 199 DRM_GEM_CMA_VMAP_DRIVER_OPS,
193 .debugfs_init = mipi_dbi_debugfs_init, 200 .debugfs_init = mipi_dbi_debugfs_init,
194 .name = "hx8357d", 201 .name = "hx8357d",
@@ -213,15 +220,25 @@ MODULE_DEVICE_TABLE(spi, hx8357d_id);
213static int hx8357d_probe(struct spi_device *spi) 220static int hx8357d_probe(struct spi_device *spi)
214{ 221{
215 struct device *dev = &spi->dev; 222 struct device *dev = &spi->dev;
223 struct drm_device *drm;
216 struct mipi_dbi *mipi; 224 struct mipi_dbi *mipi;
217 struct gpio_desc *dc; 225 struct gpio_desc *dc;
218 u32 rotation = 0; 226 u32 rotation = 0;
219 int ret; 227 int ret;
220 228
221 mipi = devm_kzalloc(dev, sizeof(*mipi), GFP_KERNEL); 229 mipi = kzalloc(sizeof(*mipi), GFP_KERNEL);
222 if (!mipi) 230 if (!mipi)
223 return -ENOMEM; 231 return -ENOMEM;
224 232
233 drm = &mipi->drm;
234 ret = devm_drm_dev_init(dev, drm, &hx8357d_driver);
235 if (ret) {
236 kfree(mipi);
237 return ret;
238 }
239
240 drm_mode_config_init(drm);
241
225 dc = devm_gpiod_get(dev, "dc", GPIOD_OUT_LOW); 242 dc = devm_gpiod_get(dev, "dc", GPIOD_OUT_LOW);
226 if (IS_ERR(dc)) { 243 if (IS_ERR(dc)) {
227 DRM_DEV_ERROR(dev, "Failed to get gpio 'dc'\n"); 244 DRM_DEV_ERROR(dev, "Failed to get gpio 'dc'\n");
@@ -238,21 +255,36 @@ static int hx8357d_probe(struct spi_device *spi)
238 if (ret) 255 if (ret)
239 return ret; 256 return ret;
240 257
241 ret = mipi_dbi_init(&spi->dev, mipi, &hx8357d_pipe_funcs, 258 ret = mipi_dbi_init(mipi, &hx8357d_pipe_funcs, &yx350hv15_mode, rotation);
242 &hx8357d_driver, &yx350hv15_mode, rotation);
243 if (ret) 259 if (ret)
244 return ret; 260 return ret;
245 261
246 spi_set_drvdata(spi, mipi); 262 drm_mode_config_reset(drm);
247 263
248 return devm_tinydrm_register(&mipi->tinydrm); 264 ret = drm_dev_register(drm, 0);
265 if (ret)
266 return ret;
267
268 spi_set_drvdata(spi, drm);
269
270 drm_fbdev_generic_setup(drm, 32);
271
272 return 0;
249} 273}
250 274
251static void hx8357d_shutdown(struct spi_device *spi) 275static int hx8357d_remove(struct spi_device *spi)
252{ 276{
253 struct mipi_dbi *mipi = spi_get_drvdata(spi); 277 struct drm_device *drm = spi_get_drvdata(spi);
278
279 drm_dev_unplug(drm);
280 drm_atomic_helper_shutdown(drm);
254 281
255 tinydrm_shutdown(&mipi->tinydrm); 282 return 0;
283}
284
285static void hx8357d_shutdown(struct spi_device *spi)
286{
287 drm_atomic_helper_shutdown(spi_get_drvdata(spi));
256} 288}
257 289
258static struct spi_driver hx8357d_spi_driver = { 290static struct spi_driver hx8357d_spi_driver = {
@@ -262,6 +294,7 @@ static struct spi_driver hx8357d_spi_driver = {
262 }, 294 },
263 .id_table = hx8357d_id, 295 .id_table = hx8357d_id,
264 .probe = hx8357d_probe, 296 .probe = hx8357d_probe,
297 .remove = hx8357d_remove,
265 .shutdown = hx8357d_shutdown, 298 .shutdown = hx8357d_shutdown,
266}; 299};
267module_spi_driver(hx8357d_spi_driver); 300module_spi_driver(hx8357d_spi_driver);
diff --git a/drivers/gpu/drm/tinydrm/ili9225.c b/drivers/gpu/drm/tinydrm/ili9225.c
index 43a3b68d90a2..e9116ef4b5bc 100644
--- a/drivers/gpu/drm/tinydrm/ili9225.c
+++ b/drivers/gpu/drm/tinydrm/ili9225.c
@@ -20,9 +20,11 @@
20#include <linux/spi/spi.h> 20#include <linux/spi/spi.h>
21#include <video/mipi_display.h> 21#include <video/mipi_display.h>
22 22
23#include <drm/drm_atomic_helper.h>
23#include <drm/drm_damage_helper.h> 24#include <drm/drm_damage_helper.h>
24#include <drm/drm_drv.h> 25#include <drm/drm_drv.h>
25#include <drm/drm_fb_cma_helper.h> 26#include <drm/drm_fb_cma_helper.h>
27#include <drm/drm_fb_helper.h>
26#include <drm/drm_fourcc.h> 28#include <drm/drm_fourcc.h>
27#include <drm/drm_gem_cma_helper.h> 29#include <drm/drm_gem_cma_helper.h>
28#include <drm/drm_gem_framebuffer_helper.h> 30#include <drm/drm_gem_framebuffer_helper.h>
@@ -81,20 +83,22 @@ static inline int ili9225_command(struct mipi_dbi *mipi, u8 cmd, u16 data)
81static void ili9225_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect) 83static void ili9225_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect)
82{ 84{
83 struct drm_gem_cma_object *cma_obj = drm_fb_cma_get_gem_obj(fb, 0); 85 struct drm_gem_cma_object *cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
84 struct tinydrm_device *tdev = fb->dev->dev_private; 86 struct mipi_dbi *mipi = drm_to_mipi_dbi(fb->dev);
85 struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev);
86 unsigned int height = rect->y2 - rect->y1; 87 unsigned int height = rect->y2 - rect->y1;
87 unsigned int width = rect->x2 - rect->x1; 88 unsigned int width = rect->x2 - rect->x1;
88 bool swap = mipi->swap_bytes; 89 bool swap = mipi->swap_bytes;
89 u16 x_start, y_start; 90 u16 x_start, y_start;
90 u16 x1, x2, y1, y2; 91 u16 x1, x2, y1, y2;
91 int ret = 0; 92 int idx, ret = 0;
92 bool full; 93 bool full;
93 void *tr; 94 void *tr;
94 95
95 if (!mipi->enabled) 96 if (!mipi->enabled)
96 return; 97 return;
97 98
99 if (!drm_dev_enter(fb->dev, &idx))
100 return;
101
98 full = width == fb->width && height == fb->height; 102 full = width == fb->width && height == fb->height;
99 103
100 DRM_DEBUG_KMS("Flushing [FB:%d] " DRM_RECT_FMT "\n", fb->base.id, DRM_RECT_ARG(rect)); 104 DRM_DEBUG_KMS("Flushing [FB:%d] " DRM_RECT_FMT "\n", fb->base.id, DRM_RECT_ARG(rect));
@@ -157,6 +161,8 @@ static void ili9225_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect)
157err_msg: 161err_msg:
158 if (ret) 162 if (ret)
159 dev_err_once(fb->dev->dev, "Failed to update display %d\n", ret); 163 dev_err_once(fb->dev->dev, "Failed to update display %d\n", ret);
164
165 drm_dev_exit(idx);
160} 166}
161 167
162static void ili9225_pipe_update(struct drm_simple_display_pipe *pipe, 168static void ili9225_pipe_update(struct drm_simple_display_pipe *pipe,
@@ -181,19 +187,21 @@ static void ili9225_pipe_enable(struct drm_simple_display_pipe *pipe,
181 struct drm_crtc_state *crtc_state, 187 struct drm_crtc_state *crtc_state,
182 struct drm_plane_state *plane_state) 188 struct drm_plane_state *plane_state)
183{ 189{
184 struct tinydrm_device *tdev = pipe_to_tinydrm(pipe); 190 struct mipi_dbi *mipi = drm_to_mipi_dbi(pipe->crtc.dev);
185 struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev);
186 struct drm_framebuffer *fb = plane_state->fb; 191 struct drm_framebuffer *fb = plane_state->fb;
187 struct device *dev = tdev->drm->dev; 192 struct device *dev = pipe->crtc.dev->dev;
188 struct drm_rect rect = { 193 struct drm_rect rect = {
189 .x1 = 0, 194 .x1 = 0,
190 .x2 = fb->width, 195 .x2 = fb->width,
191 .y1 = 0, 196 .y1 = 0,
192 .y2 = fb->height, 197 .y2 = fb->height,
193 }; 198 };
194 int ret; 199 int ret, idx;
195 u8 am_id; 200 u8 am_id;
196 201
202 if (!drm_dev_enter(pipe->crtc.dev, &idx))
203 return;
204
197 DRM_DEBUG_KMS("\n"); 205 DRM_DEBUG_KMS("\n");
198 206
199 mipi_dbi_hw_reset(mipi); 207 mipi_dbi_hw_reset(mipi);
@@ -207,7 +215,7 @@ static void ili9225_pipe_enable(struct drm_simple_display_pipe *pipe,
207 ret = ili9225_command(mipi, ILI9225_POWER_CONTROL_1, 0x0000); 215 ret = ili9225_command(mipi, ILI9225_POWER_CONTROL_1, 0x0000);
208 if (ret) { 216 if (ret) {
209 DRM_DEV_ERROR(dev, "Error sending command %d\n", ret); 217 DRM_DEV_ERROR(dev, "Error sending command %d\n", ret);
210 return; 218 goto out_exit;
211 } 219 }
212 ili9225_command(mipi, ILI9225_POWER_CONTROL_2, 0x0000); 220 ili9225_command(mipi, ILI9225_POWER_CONTROL_2, 0x0000);
213 ili9225_command(mipi, ILI9225_POWER_CONTROL_3, 0x0000); 221 ili9225_command(mipi, ILI9225_POWER_CONTROL_3, 0x0000);
@@ -280,15 +288,23 @@ static void ili9225_pipe_enable(struct drm_simple_display_pipe *pipe,
280 288
281 mipi->enabled = true; 289 mipi->enabled = true;
282 ili9225_fb_dirty(fb, &rect); 290 ili9225_fb_dirty(fb, &rect);
291out_exit:
292 drm_dev_exit(idx);
283} 293}
284 294
285static void ili9225_pipe_disable(struct drm_simple_display_pipe *pipe) 295static void ili9225_pipe_disable(struct drm_simple_display_pipe *pipe)
286{ 296{
287 struct tinydrm_device *tdev = pipe_to_tinydrm(pipe); 297 struct mipi_dbi *mipi = drm_to_mipi_dbi(pipe->crtc.dev);
288 struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev);
289 298
290 DRM_DEBUG_KMS("\n"); 299 DRM_DEBUG_KMS("\n");
291 300
301 /*
302 * This callback is not protected by drm_dev_enter/exit since we want to
303 * turn off the display on regular driver unload. It's highly unlikely
304 * that the underlying SPI controller is gone should this be called after
305 * unplug.
306 */
307
292 if (!mipi->enabled) 308 if (!mipi->enabled)
293 return; 309 return;
294 310
@@ -301,7 +317,7 @@ static void ili9225_pipe_disable(struct drm_simple_display_pipe *pipe)
301 mipi->enabled = false; 317 mipi->enabled = false;
302} 318}
303 319
304static int ili9225_dbi_command(struct mipi_dbi *mipi, u8 cmd, u8 *par, 320static int ili9225_dbi_command(struct mipi_dbi *mipi, u8 *cmd, u8 *par,
305 size_t num) 321 size_t num)
306{ 322{
307 struct spi_device *spi = mipi->spi; 323 struct spi_device *spi = mipi->spi;
@@ -311,11 +327,11 @@ static int ili9225_dbi_command(struct mipi_dbi *mipi, u8 cmd, u8 *par,
311 327
312 gpiod_set_value_cansleep(mipi->dc, 0); 328 gpiod_set_value_cansleep(mipi->dc, 0);
313 speed_hz = mipi_dbi_spi_cmd_max_speed(spi, 1); 329 speed_hz = mipi_dbi_spi_cmd_max_speed(spi, 1);
314 ret = tinydrm_spi_transfer(spi, speed_hz, NULL, 8, &cmd, 1); 330 ret = tinydrm_spi_transfer(spi, speed_hz, NULL, 8, cmd, 1);
315 if (ret || !num) 331 if (ret || !num)
316 return ret; 332 return ret;
317 333
318 if (cmd == ILI9225_WRITE_DATA_TO_GRAM && !mipi->swap_bytes) 334 if (*cmd == ILI9225_WRITE_DATA_TO_GRAM && !mipi->swap_bytes)
319 bpw = 16; 335 bpw = 16;
320 336
321 gpiod_set_value_cansleep(mipi->dc, 1); 337 gpiod_set_value_cansleep(mipi->dc, 1);
@@ -332,7 +348,7 @@ static const struct drm_simple_display_pipe_funcs ili9225_pipe_funcs = {
332}; 348};
333 349
334static const struct drm_display_mode ili9225_mode = { 350static const struct drm_display_mode ili9225_mode = {
335 TINYDRM_MODE(176, 220, 35, 44), 351 DRM_SIMPLE_MODE(176, 220, 35, 44),
336}; 352};
337 353
338DEFINE_DRM_GEM_CMA_FOPS(ili9225_fops); 354DEFINE_DRM_GEM_CMA_FOPS(ili9225_fops);
@@ -341,6 +357,7 @@ static struct drm_driver ili9225_driver = {
341 .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | 357 .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME |
342 DRIVER_ATOMIC, 358 DRIVER_ATOMIC,
343 .fops = &ili9225_fops, 359 .fops = &ili9225_fops,
360 .release = mipi_dbi_release,
344 DRM_GEM_CMA_VMAP_DRIVER_OPS, 361 DRM_GEM_CMA_VMAP_DRIVER_OPS,
345 .name = "ili9225", 362 .name = "ili9225",
346 .desc = "Ilitek ILI9225", 363 .desc = "Ilitek ILI9225",
@@ -364,15 +381,25 @@ MODULE_DEVICE_TABLE(spi, ili9225_id);
364static int ili9225_probe(struct spi_device *spi) 381static int ili9225_probe(struct spi_device *spi)
365{ 382{
366 struct device *dev = &spi->dev; 383 struct device *dev = &spi->dev;
384 struct drm_device *drm;
367 struct mipi_dbi *mipi; 385 struct mipi_dbi *mipi;
368 struct gpio_desc *rs; 386 struct gpio_desc *rs;
369 u32 rotation = 0; 387 u32 rotation = 0;
370 int ret; 388 int ret;
371 389
372 mipi = devm_kzalloc(dev, sizeof(*mipi), GFP_KERNEL); 390 mipi = kzalloc(sizeof(*mipi), GFP_KERNEL);
373 if (!mipi) 391 if (!mipi)
374 return -ENOMEM; 392 return -ENOMEM;
375 393
394 drm = &mipi->drm;
395 ret = devm_drm_dev_init(dev, drm, &ili9225_driver);
396 if (ret) {
397 kfree(mipi);
398 return ret;
399 }
400
401 drm_mode_config_init(drm);
402
376 mipi->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); 403 mipi->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
377 if (IS_ERR(mipi->reset)) { 404 if (IS_ERR(mipi->reset)) {
378 DRM_DEV_ERROR(dev, "Failed to get gpio 'reset'\n"); 405 DRM_DEV_ERROR(dev, "Failed to get gpio 'reset'\n");
@@ -394,21 +421,36 @@ static int ili9225_probe(struct spi_device *spi)
394 /* override the command function set in mipi_dbi_spi_init() */ 421 /* override the command function set in mipi_dbi_spi_init() */
395 mipi->command = ili9225_dbi_command; 422 mipi->command = ili9225_dbi_command;
396 423
397 ret = mipi_dbi_init(&spi->dev, mipi, &ili9225_pipe_funcs, 424 ret = mipi_dbi_init(mipi, &ili9225_pipe_funcs, &ili9225_mode, rotation);
398 &ili9225_driver, &ili9225_mode, rotation);
399 if (ret) 425 if (ret)
400 return ret; 426 return ret;
401 427
402 spi_set_drvdata(spi, mipi); 428 drm_mode_config_reset(drm);
429
430 ret = drm_dev_register(drm, 0);
431 if (ret)
432 return ret;
433
434 spi_set_drvdata(spi, drm);
435
436 drm_fbdev_generic_setup(drm, 32);
403 437
404 return devm_tinydrm_register(&mipi->tinydrm); 438 return 0;
405} 439}
406 440
407static void ili9225_shutdown(struct spi_device *spi) 441static int ili9225_remove(struct spi_device *spi)
408{ 442{
409 struct mipi_dbi *mipi = spi_get_drvdata(spi); 443 struct drm_device *drm = spi_get_drvdata(spi);
444
445 drm_dev_unplug(drm);
446 drm_atomic_helper_shutdown(drm);
447
448 return 0;
449}
410 450
411 tinydrm_shutdown(&mipi->tinydrm); 451static void ili9225_shutdown(struct spi_device *spi)
452{
453 drm_atomic_helper_shutdown(spi_get_drvdata(spi));
412} 454}
413 455
414static struct spi_driver ili9225_spi_driver = { 456static struct spi_driver ili9225_spi_driver = {
@@ -419,6 +461,7 @@ static struct spi_driver ili9225_spi_driver = {
419 }, 461 },
420 .id_table = ili9225_id, 462 .id_table = ili9225_id,
421 .probe = ili9225_probe, 463 .probe = ili9225_probe,
464 .remove = ili9225_remove,
422 .shutdown = ili9225_shutdown, 465 .shutdown = ili9225_shutdown,
423}; 466};
424module_spi_driver(ili9225_spi_driver); 467module_spi_driver(ili9225_spi_driver);
diff --git a/drivers/gpu/drm/tinydrm/ili9341.c b/drivers/gpu/drm/tinydrm/ili9341.c
index 713bb2dd7e04..d15f85e837ae 100644
--- a/drivers/gpu/drm/tinydrm/ili9341.c
+++ b/drivers/gpu/drm/tinydrm/ili9341.c
@@ -15,7 +15,9 @@
15#include <linux/property.h> 15#include <linux/property.h>
16#include <linux/spi/spi.h> 16#include <linux/spi/spi.h>
17 17
18#include <drm/drm_atomic_helper.h>
18#include <drm/drm_drv.h> 19#include <drm/drm_drv.h>
20#include <drm/drm_fb_helper.h>
19#include <drm/drm_gem_cma_helper.h> 21#include <drm/drm_gem_cma_helper.h>
20#include <drm/drm_gem_framebuffer_helper.h> 22#include <drm/drm_gem_framebuffer_helper.h>
21#include <drm/drm_modeset_helper.h> 23#include <drm/drm_modeset_helper.h>
@@ -52,16 +54,18 @@ static void yx240qv29_enable(struct drm_simple_display_pipe *pipe,
52 struct drm_crtc_state *crtc_state, 54 struct drm_crtc_state *crtc_state,
53 struct drm_plane_state *plane_state) 55 struct drm_plane_state *plane_state)
54{ 56{
55 struct tinydrm_device *tdev = pipe_to_tinydrm(pipe); 57 struct mipi_dbi *mipi = drm_to_mipi_dbi(pipe->crtc.dev);
56 struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev);
57 u8 addr_mode; 58 u8 addr_mode;
58 int ret; 59 int ret, idx;
60
61 if (!drm_dev_enter(pipe->crtc.dev, &idx))
62 return;
59 63
60 DRM_DEBUG_KMS("\n"); 64 DRM_DEBUG_KMS("\n");
61 65
62 ret = mipi_dbi_poweron_conditional_reset(mipi); 66 ret = mipi_dbi_poweron_conditional_reset(mipi);
63 if (ret < 0) 67 if (ret < 0)
64 return; 68 goto out_exit;
65 if (ret == 1) 69 if (ret == 1)
66 goto out_enable; 70 goto out_enable;
67 71
@@ -127,6 +131,8 @@ out_enable:
127 addr_mode |= ILI9341_MADCTL_BGR; 131 addr_mode |= ILI9341_MADCTL_BGR;
128 mipi_dbi_command(mipi, MIPI_DCS_SET_ADDRESS_MODE, addr_mode); 132 mipi_dbi_command(mipi, MIPI_DCS_SET_ADDRESS_MODE, addr_mode);
129 mipi_dbi_enable_flush(mipi, crtc_state, plane_state); 133 mipi_dbi_enable_flush(mipi, crtc_state, plane_state);
134out_exit:
135 drm_dev_exit(idx);
130} 136}
131 137
132static const struct drm_simple_display_pipe_funcs ili9341_pipe_funcs = { 138static const struct drm_simple_display_pipe_funcs ili9341_pipe_funcs = {
@@ -137,7 +143,7 @@ static const struct drm_simple_display_pipe_funcs ili9341_pipe_funcs = {
137}; 143};
138 144
139static const struct drm_display_mode yx240qv29_mode = { 145static const struct drm_display_mode yx240qv29_mode = {
140 TINYDRM_MODE(240, 320, 37, 49), 146 DRM_SIMPLE_MODE(240, 320, 37, 49),
141}; 147};
142 148
143DEFINE_DRM_GEM_CMA_FOPS(ili9341_fops); 149DEFINE_DRM_GEM_CMA_FOPS(ili9341_fops);
@@ -145,6 +151,7 @@ DEFINE_DRM_GEM_CMA_FOPS(ili9341_fops);
145static struct drm_driver ili9341_driver = { 151static struct drm_driver ili9341_driver = {
146 .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_ATOMIC, 152 .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_ATOMIC,
147 .fops = &ili9341_fops, 153 .fops = &ili9341_fops,
154 .release = mipi_dbi_release,
148 DRM_GEM_CMA_VMAP_DRIVER_OPS, 155 DRM_GEM_CMA_VMAP_DRIVER_OPS,
149 .debugfs_init = mipi_dbi_debugfs_init, 156 .debugfs_init = mipi_dbi_debugfs_init,
150 .name = "ili9341", 157 .name = "ili9341",
@@ -169,15 +176,25 @@ MODULE_DEVICE_TABLE(spi, ili9341_id);
169static int ili9341_probe(struct spi_device *spi) 176static int ili9341_probe(struct spi_device *spi)
170{ 177{
171 struct device *dev = &spi->dev; 178 struct device *dev = &spi->dev;
179 struct drm_device *drm;
172 struct mipi_dbi *mipi; 180 struct mipi_dbi *mipi;
173 struct gpio_desc *dc; 181 struct gpio_desc *dc;
174 u32 rotation = 0; 182 u32 rotation = 0;
175 int ret; 183 int ret;
176 184
177 mipi = devm_kzalloc(dev, sizeof(*mipi), GFP_KERNEL); 185 mipi = kzalloc(sizeof(*mipi), GFP_KERNEL);
178 if (!mipi) 186 if (!mipi)
179 return -ENOMEM; 187 return -ENOMEM;
180 188
189 drm = &mipi->drm;
190 ret = devm_drm_dev_init(dev, drm, &ili9341_driver);
191 if (ret) {
192 kfree(mipi);
193 return ret;
194 }
195
196 drm_mode_config_init(drm);
197
181 mipi->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH); 198 mipi->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
182 if (IS_ERR(mipi->reset)) { 199 if (IS_ERR(mipi->reset)) {
183 DRM_DEV_ERROR(dev, "Failed to get gpio 'reset'\n"); 200 DRM_DEV_ERROR(dev, "Failed to get gpio 'reset'\n");
@@ -200,21 +217,36 @@ static int ili9341_probe(struct spi_device *spi)
200 if (ret) 217 if (ret)
201 return ret; 218 return ret;
202 219
203 ret = mipi_dbi_init(&spi->dev, mipi, &ili9341_pipe_funcs, 220 ret = mipi_dbi_init(mipi, &ili9341_pipe_funcs, &yx240qv29_mode, rotation);
204 &ili9341_driver, &yx240qv29_mode, rotation);
205 if (ret) 221 if (ret)
206 return ret; 222 return ret;
207 223
208 spi_set_drvdata(spi, mipi); 224 drm_mode_config_reset(drm);
209 225
210 return devm_tinydrm_register(&mipi->tinydrm); 226 ret = drm_dev_register(drm, 0);
227 if (ret)
228 return ret;
229
230 spi_set_drvdata(spi, drm);
231
232 drm_fbdev_generic_setup(drm, 32);
233
234 return 0;
211} 235}
212 236
213static void ili9341_shutdown(struct spi_device *spi) 237static int ili9341_remove(struct spi_device *spi)
214{ 238{
215 struct mipi_dbi *mipi = spi_get_drvdata(spi); 239 struct drm_device *drm = spi_get_drvdata(spi);
240
241 drm_dev_unplug(drm);
242 drm_atomic_helper_shutdown(drm);
216 243
217 tinydrm_shutdown(&mipi->tinydrm); 244 return 0;
245}
246
247static void ili9341_shutdown(struct spi_device *spi)
248{
249 drm_atomic_helper_shutdown(spi_get_drvdata(spi));
218} 250}
219 251
220static struct spi_driver ili9341_spi_driver = { 252static struct spi_driver ili9341_spi_driver = {
@@ -224,6 +256,7 @@ static struct spi_driver ili9341_spi_driver = {
224 }, 256 },
225 .id_table = ili9341_id, 257 .id_table = ili9341_id,
226 .probe = ili9341_probe, 258 .probe = ili9341_probe,
259 .remove = ili9341_remove,
227 .shutdown = ili9341_shutdown, 260 .shutdown = ili9341_shutdown,
228}; 261};
229module_spi_driver(ili9341_spi_driver); 262module_spi_driver(ili9341_spi_driver);
diff --git a/drivers/gpu/drm/tinydrm/mi0283qt.c b/drivers/gpu/drm/tinydrm/mi0283qt.c
index 82a92ec9ae3c..c6dc31084a4e 100644
--- a/drivers/gpu/drm/tinydrm/mi0283qt.c
+++ b/drivers/gpu/drm/tinydrm/mi0283qt.c
@@ -17,7 +17,9 @@
17#include <linux/regulator/consumer.h> 17#include <linux/regulator/consumer.h>
18#include <linux/spi/spi.h> 18#include <linux/spi/spi.h>
19 19
20#include <drm/drm_atomic_helper.h>
20#include <drm/drm_drv.h> 21#include <drm/drm_drv.h>
22#include <drm/drm_fb_helper.h>
21#include <drm/drm_gem_cma_helper.h> 23#include <drm/drm_gem_cma_helper.h>
22#include <drm/drm_gem_framebuffer_helper.h> 24#include <drm/drm_gem_framebuffer_helper.h>
23#include <drm/drm_modeset_helper.h> 25#include <drm/drm_modeset_helper.h>
@@ -54,16 +56,18 @@ static void mi0283qt_enable(struct drm_simple_display_pipe *pipe,
54 struct drm_crtc_state *crtc_state, 56 struct drm_crtc_state *crtc_state,
55 struct drm_plane_state *plane_state) 57 struct drm_plane_state *plane_state)
56{ 58{
57 struct tinydrm_device *tdev = pipe_to_tinydrm(pipe); 59 struct mipi_dbi *mipi = drm_to_mipi_dbi(pipe->crtc.dev);
58 struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev);
59 u8 addr_mode; 60 u8 addr_mode;
60 int ret; 61 int ret, idx;
62
63 if (!drm_dev_enter(pipe->crtc.dev, &idx))
64 return;
61 65
62 DRM_DEBUG_KMS("\n"); 66 DRM_DEBUG_KMS("\n");
63 67
64 ret = mipi_dbi_poweron_conditional_reset(mipi); 68 ret = mipi_dbi_poweron_conditional_reset(mipi);
65 if (ret < 0) 69 if (ret < 0)
66 return; 70 goto out_exit;
67 if (ret == 1) 71 if (ret == 1)
68 goto out_enable; 72 goto out_enable;
69 73
@@ -135,6 +139,8 @@ out_enable:
135 addr_mode |= ILI9341_MADCTL_BGR; 139 addr_mode |= ILI9341_MADCTL_BGR;
136 mipi_dbi_command(mipi, MIPI_DCS_SET_ADDRESS_MODE, addr_mode); 140 mipi_dbi_command(mipi, MIPI_DCS_SET_ADDRESS_MODE, addr_mode);
137 mipi_dbi_enable_flush(mipi, crtc_state, plane_state); 141 mipi_dbi_enable_flush(mipi, crtc_state, plane_state);
142out_exit:
143 drm_dev_exit(idx);
138} 144}
139 145
140static const struct drm_simple_display_pipe_funcs mi0283qt_pipe_funcs = { 146static const struct drm_simple_display_pipe_funcs mi0283qt_pipe_funcs = {
@@ -145,7 +151,7 @@ static const struct drm_simple_display_pipe_funcs mi0283qt_pipe_funcs = {
145}; 151};
146 152
147static const struct drm_display_mode mi0283qt_mode = { 153static const struct drm_display_mode mi0283qt_mode = {
148 TINYDRM_MODE(320, 240, 58, 43), 154 DRM_SIMPLE_MODE(320, 240, 58, 43),
149}; 155};
150 156
151DEFINE_DRM_GEM_CMA_FOPS(mi0283qt_fops); 157DEFINE_DRM_GEM_CMA_FOPS(mi0283qt_fops);
@@ -154,6 +160,7 @@ static struct drm_driver mi0283qt_driver = {
154 .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | 160 .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME |
155 DRIVER_ATOMIC, 161 DRIVER_ATOMIC,
156 .fops = &mi0283qt_fops, 162 .fops = &mi0283qt_fops,
163 .release = mipi_dbi_release,
157 DRM_GEM_CMA_VMAP_DRIVER_OPS, 164 DRM_GEM_CMA_VMAP_DRIVER_OPS,
158 .debugfs_init = mipi_dbi_debugfs_init, 165 .debugfs_init = mipi_dbi_debugfs_init,
159 .name = "mi0283qt", 166 .name = "mi0283qt",
@@ -178,15 +185,25 @@ MODULE_DEVICE_TABLE(spi, mi0283qt_id);
178static int mi0283qt_probe(struct spi_device *spi) 185static int mi0283qt_probe(struct spi_device *spi)
179{ 186{
180 struct device *dev = &spi->dev; 187 struct device *dev = &spi->dev;
188 struct drm_device *drm;
181 struct mipi_dbi *mipi; 189 struct mipi_dbi *mipi;
182 struct gpio_desc *dc; 190 struct gpio_desc *dc;
183 u32 rotation = 0; 191 u32 rotation = 0;
184 int ret; 192 int ret;
185 193
186 mipi = devm_kzalloc(dev, sizeof(*mipi), GFP_KERNEL); 194 mipi = kzalloc(sizeof(*mipi), GFP_KERNEL);
187 if (!mipi) 195 if (!mipi)
188 return -ENOMEM; 196 return -ENOMEM;
189 197
198 drm = &mipi->drm;
199 ret = devm_drm_dev_init(dev, drm, &mi0283qt_driver);
200 if (ret) {
201 kfree(mipi);
202 return ret;
203 }
204
205 drm_mode_config_init(drm);
206
190 mipi->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH); 207 mipi->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
191 if (IS_ERR(mipi->reset)) { 208 if (IS_ERR(mipi->reset)) {
192 DRM_DEV_ERROR(dev, "Failed to get gpio 'reset'\n"); 209 DRM_DEV_ERROR(dev, "Failed to get gpio 'reset'\n");
@@ -213,35 +230,46 @@ static int mi0283qt_probe(struct spi_device *spi)
213 if (ret) 230 if (ret)
214 return ret; 231 return ret;
215 232
216 ret = mipi_dbi_init(&spi->dev, mipi, &mi0283qt_pipe_funcs, 233 ret = mipi_dbi_init(mipi, &mi0283qt_pipe_funcs, &mi0283qt_mode, rotation);
217 &mi0283qt_driver, &mi0283qt_mode, rotation); 234 if (ret)
235 return ret;
236
237 drm_mode_config_reset(drm);
238
239 ret = drm_dev_register(drm, 0);
218 if (ret) 240 if (ret)
219 return ret; 241 return ret;
220 242
221 spi_set_drvdata(spi, mipi); 243 spi_set_drvdata(spi, drm);
222 244
223 return devm_tinydrm_register(&mipi->tinydrm); 245 drm_fbdev_generic_setup(drm, 32);
246
247 return 0;
224} 248}
225 249
226static void mi0283qt_shutdown(struct spi_device *spi) 250static int mi0283qt_remove(struct spi_device *spi)
227{ 251{
228 struct mipi_dbi *mipi = spi_get_drvdata(spi); 252 struct drm_device *drm = spi_get_drvdata(spi);
229 253
230 tinydrm_shutdown(&mipi->tinydrm); 254 drm_dev_unplug(drm);
255 drm_atomic_helper_shutdown(drm);
256
257 return 0;
231} 258}
232 259
233static int __maybe_unused mi0283qt_pm_suspend(struct device *dev) 260static void mi0283qt_shutdown(struct spi_device *spi)
234{ 261{
235 struct mipi_dbi *mipi = dev_get_drvdata(dev); 262 drm_atomic_helper_shutdown(spi_get_drvdata(spi));
263}
236 264
237 return drm_mode_config_helper_suspend(mipi->tinydrm.drm); 265static int __maybe_unused mi0283qt_pm_suspend(struct device *dev)
266{
267 return drm_mode_config_helper_suspend(dev_get_drvdata(dev));
238} 268}
239 269
240static int __maybe_unused mi0283qt_pm_resume(struct device *dev) 270static int __maybe_unused mi0283qt_pm_resume(struct device *dev)
241{ 271{
242 struct mipi_dbi *mipi = dev_get_drvdata(dev); 272 drm_mode_config_helper_resume(dev_get_drvdata(dev));
243
244 drm_mode_config_helper_resume(mipi->tinydrm.drm);
245 273
246 return 0; 274 return 0;
247} 275}
@@ -259,6 +287,7 @@ static struct spi_driver mi0283qt_spi_driver = {
259 }, 287 },
260 .id_table = mi0283qt_id, 288 .id_table = mi0283qt_id,
261 .probe = mi0283qt_probe, 289 .probe = mi0283qt_probe,
290 .remove = mi0283qt_remove,
262 .shutdown = mi0283qt_shutdown, 291 .shutdown = mi0283qt_shutdown,
263}; 292};
264module_spi_driver(mi0283qt_spi_driver); 293module_spi_driver(mi0283qt_spi_driver);
diff --git a/drivers/gpu/drm/tinydrm/mipi-dbi.c b/drivers/gpu/drm/tinydrm/mipi-dbi.c
index 918f77c7de34..869c8f56da3b 100644
--- a/drivers/gpu/drm/tinydrm/mipi-dbi.c
+++ b/drivers/gpu/drm/tinydrm/mipi-dbi.c
@@ -153,16 +153,42 @@ EXPORT_SYMBOL(mipi_dbi_command_read);
153 */ 153 */
154int mipi_dbi_command_buf(struct mipi_dbi *mipi, u8 cmd, u8 *data, size_t len) 154int mipi_dbi_command_buf(struct mipi_dbi *mipi, u8 cmd, u8 *data, size_t len)
155{ 155{
156 u8 *cmdbuf;
156 int ret; 157 int ret;
157 158
159 /* SPI requires dma-safe buffers */
160 cmdbuf = kmemdup(&cmd, 1, GFP_KERNEL);
161 if (!cmdbuf)
162 return -ENOMEM;
163
158 mutex_lock(&mipi->cmdlock); 164 mutex_lock(&mipi->cmdlock);
159 ret = mipi->command(mipi, cmd, data, len); 165 ret = mipi->command(mipi, cmdbuf, data, len);
160 mutex_unlock(&mipi->cmdlock); 166 mutex_unlock(&mipi->cmdlock);
161 167
168 kfree(cmdbuf);
169
162 return ret; 170 return ret;
163} 171}
164EXPORT_SYMBOL(mipi_dbi_command_buf); 172EXPORT_SYMBOL(mipi_dbi_command_buf);
165 173
174/* This should only be used by mipi_dbi_command() */
175int mipi_dbi_command_stackbuf(struct mipi_dbi *mipi, u8 cmd, u8 *data, size_t len)
176{
177 u8 *buf;
178 int ret;
179
180 buf = kmemdup(data, len, GFP_KERNEL);
181 if (!buf)
182 return -ENOMEM;
183
184 ret = mipi_dbi_command_buf(mipi, cmd, buf, len);
185
186 kfree(buf);
187
188 return ret;
189}
190EXPORT_SYMBOL(mipi_dbi_command_stackbuf);
191
166/** 192/**
167 * mipi_dbi_buf_copy - Copy a framebuffer, transforming it if necessary 193 * mipi_dbi_buf_copy - Copy a framebuffer, transforming it if necessary
168 * @dst: The destination buffer 194 * @dst: The destination buffer
@@ -216,18 +242,20 @@ EXPORT_SYMBOL(mipi_dbi_buf_copy);
216static void mipi_dbi_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect) 242static void mipi_dbi_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect)
217{ 243{
218 struct drm_gem_cma_object *cma_obj = drm_fb_cma_get_gem_obj(fb, 0); 244 struct drm_gem_cma_object *cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
219 struct tinydrm_device *tdev = fb->dev->dev_private; 245 struct mipi_dbi *mipi = drm_to_mipi_dbi(fb->dev);
220 struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev);
221 unsigned int height = rect->y2 - rect->y1; 246 unsigned int height = rect->y2 - rect->y1;
222 unsigned int width = rect->x2 - rect->x1; 247 unsigned int width = rect->x2 - rect->x1;
223 bool swap = mipi->swap_bytes; 248 bool swap = mipi->swap_bytes;
224 int ret = 0; 249 int idx, ret = 0;
225 bool full; 250 bool full;
226 void *tr; 251 void *tr;
227 252
228 if (!mipi->enabled) 253 if (!mipi->enabled)
229 return; 254 return;
230 255
256 if (!drm_dev_enter(fb->dev, &idx))
257 return;
258
231 full = width == fb->width && height == fb->height; 259 full = width == fb->width && height == fb->height;
232 260
233 DRM_DEBUG_KMS("Flushing [FB:%d] " DRM_RECT_FMT "\n", fb->base.id, DRM_RECT_ARG(rect)); 261 DRM_DEBUG_KMS("Flushing [FB:%d] " DRM_RECT_FMT "\n", fb->base.id, DRM_RECT_ARG(rect));
@@ -254,6 +282,8 @@ static void mipi_dbi_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect)
254err_msg: 282err_msg:
255 if (ret) 283 if (ret)
256 dev_err_once(fb->dev->dev, "Failed to update display %d\n", ret); 284 dev_err_once(fb->dev->dev, "Failed to update display %d\n", ret);
285
286 drm_dev_exit(idx);
257} 287}
258 288
259/** 289/**
@@ -308,19 +338,29 @@ void mipi_dbi_enable_flush(struct mipi_dbi *mipi,
308 .y1 = 0, 338 .y1 = 0,
309 .y2 = fb->height, 339 .y2 = fb->height,
310 }; 340 };
341 int idx;
342
343 if (!drm_dev_enter(&mipi->drm, &idx))
344 return;
311 345
312 mipi->enabled = true; 346 mipi->enabled = true;
313 mipi_dbi_fb_dirty(fb, &rect); 347 mipi_dbi_fb_dirty(fb, &rect);
314 backlight_enable(mipi->backlight); 348 backlight_enable(mipi->backlight);
349
350 drm_dev_exit(idx);
315} 351}
316EXPORT_SYMBOL(mipi_dbi_enable_flush); 352EXPORT_SYMBOL(mipi_dbi_enable_flush);
317 353
318static void mipi_dbi_blank(struct mipi_dbi *mipi) 354static void mipi_dbi_blank(struct mipi_dbi *mipi)
319{ 355{
320 struct drm_device *drm = mipi->tinydrm.drm; 356 struct drm_device *drm = &mipi->drm;
321 u16 height = drm->mode_config.min_height; 357 u16 height = drm->mode_config.min_height;
322 u16 width = drm->mode_config.min_width; 358 u16 width = drm->mode_config.min_width;
323 size_t len = width * height * 2; 359 size_t len = width * height * 2;
360 int idx;
361
362 if (!drm_dev_enter(drm, &idx))
363 return;
324 364
325 memset(mipi->tx_buf, 0, len); 365 memset(mipi->tx_buf, 0, len);
326 366
@@ -330,6 +370,8 @@ static void mipi_dbi_blank(struct mipi_dbi *mipi)
330 (height >> 8) & 0xFF, (height - 1) & 0xFF); 370 (height >> 8) & 0xFF, (height - 1) & 0xFF);
331 mipi_dbi_command_buf(mipi, MIPI_DCS_WRITE_MEMORY_START, 371 mipi_dbi_command_buf(mipi, MIPI_DCS_WRITE_MEMORY_START,
332 (u8 *)mipi->tx_buf, len); 372 (u8 *)mipi->tx_buf, len);
373
374 drm_dev_exit(idx);
333} 375}
334 376
335/** 377/**
@@ -342,8 +384,10 @@ static void mipi_dbi_blank(struct mipi_dbi *mipi)
342 */ 384 */
343void mipi_dbi_pipe_disable(struct drm_simple_display_pipe *pipe) 385void mipi_dbi_pipe_disable(struct drm_simple_display_pipe *pipe)
344{ 386{
345 struct tinydrm_device *tdev = pipe_to_tinydrm(pipe); 387 struct mipi_dbi *mipi = drm_to_mipi_dbi(pipe->crtc.dev);
346 struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev); 388
389 if (!mipi->enabled)
390 return;
347 391
348 DRM_DEBUG_KMS("\n"); 392 DRM_DEBUG_KMS("\n");
349 393
@@ -359,6 +403,12 @@ void mipi_dbi_pipe_disable(struct drm_simple_display_pipe *pipe)
359} 403}
360EXPORT_SYMBOL(mipi_dbi_pipe_disable); 404EXPORT_SYMBOL(mipi_dbi_pipe_disable);
361 405
406static const struct drm_mode_config_funcs mipi_dbi_mode_config_funcs = {
407 .fb_create = drm_gem_fb_create_with_dirty,
408 .atomic_check = drm_atomic_helper_check,
409 .atomic_commit = drm_atomic_helper_commit,
410};
411
362static const uint32_t mipi_dbi_formats[] = { 412static const uint32_t mipi_dbi_formats[] = {
363 DRM_FORMAT_RGB565, 413 DRM_FORMAT_RGB565,
364 DRM_FORMAT_XRGB8888, 414 DRM_FORMAT_XRGB8888,
@@ -366,31 +416,27 @@ static const uint32_t mipi_dbi_formats[] = {
366 416
367/** 417/**
368 * mipi_dbi_init - MIPI DBI initialization 418 * mipi_dbi_init - MIPI DBI initialization
369 * @dev: Parent device
370 * @mipi: &mipi_dbi structure to initialize 419 * @mipi: &mipi_dbi structure to initialize
371 * @pipe_funcs: Display pipe functions 420 * @funcs: Display pipe functions
372 * @driver: DRM driver
373 * @mode: Display mode 421 * @mode: Display mode
374 * @rotation: Initial rotation in degrees Counter Clock Wise 422 * @rotation: Initial rotation in degrees Counter Clock Wise
375 * 423 *
376 * This function initializes a &mipi_dbi structure and it's underlying 424 * This function sets up a &drm_simple_display_pipe with a &drm_connector that
377 * @tinydrm_device. It also sets up the display pipeline. 425 * has one fixed &drm_display_mode which is rotated according to @rotation.
426 * This mode is used to set the mode config min/max width/height properties.
427 * Additionally &mipi_dbi.tx_buf is allocated.
378 * 428 *
379 * Supported formats: Native RGB565 and emulated XRGB8888. 429 * Supported formats: Native RGB565 and emulated XRGB8888.
380 * 430 *
381 * Objects created by this function will be automatically freed on driver
382 * detach (devres).
383 *
384 * Returns: 431 * Returns:
385 * Zero on success, negative error code on failure. 432 * Zero on success, negative error code on failure.
386 */ 433 */
387int mipi_dbi_init(struct device *dev, struct mipi_dbi *mipi, 434int mipi_dbi_init(struct mipi_dbi *mipi,
388 const struct drm_simple_display_pipe_funcs *pipe_funcs, 435 const struct drm_simple_display_pipe_funcs *funcs,
389 struct drm_driver *driver,
390 const struct drm_display_mode *mode, unsigned int rotation) 436 const struct drm_display_mode *mode, unsigned int rotation)
391{ 437{
392 size_t bufsize = mode->vdisplay * mode->hdisplay * sizeof(u16); 438 size_t bufsize = mode->vdisplay * mode->hdisplay * sizeof(u16);
393 struct tinydrm_device *tdev = &mipi->tinydrm; 439 struct drm_device *drm = &mipi->drm;
394 int ret; 440 int ret;
395 441
396 if (!mipi->command) 442 if (!mipi->command)
@@ -398,16 +444,12 @@ int mipi_dbi_init(struct device *dev, struct mipi_dbi *mipi,
398 444
399 mutex_init(&mipi->cmdlock); 445 mutex_init(&mipi->cmdlock);
400 446
401 mipi->tx_buf = devm_kmalloc(dev, bufsize, GFP_KERNEL); 447 mipi->tx_buf = devm_kmalloc(drm->dev, bufsize, GFP_KERNEL);
402 if (!mipi->tx_buf) 448 if (!mipi->tx_buf)
403 return -ENOMEM; 449 return -ENOMEM;
404 450
405 ret = devm_tinydrm_init(dev, tdev, driver);
406 if (ret)
407 return ret;
408
409 /* TODO: Maybe add DRM_MODE_CONNECTOR_SPI */ 451 /* TODO: Maybe add DRM_MODE_CONNECTOR_SPI */
410 ret = tinydrm_display_pipe_init(tdev, pipe_funcs, 452 ret = tinydrm_display_pipe_init(drm, &mipi->pipe, funcs,
411 DRM_MODE_CONNECTOR_VIRTUAL, 453 DRM_MODE_CONNECTOR_VIRTUAL,
412 mipi_dbi_formats, 454 mipi_dbi_formats,
413 ARRAY_SIZE(mipi_dbi_formats), mode, 455 ARRAY_SIZE(mipi_dbi_formats), mode,
@@ -415,21 +457,40 @@ int mipi_dbi_init(struct device *dev, struct mipi_dbi *mipi,
415 if (ret) 457 if (ret)
416 return ret; 458 return ret;
417 459
418 drm_plane_enable_fb_damage_clips(&tdev->pipe.plane); 460 drm_plane_enable_fb_damage_clips(&mipi->pipe.plane);
419 461
420 tdev->drm->mode_config.preferred_depth = 16; 462 drm->mode_config.funcs = &mipi_dbi_mode_config_funcs;
463 drm->mode_config.preferred_depth = 16;
421 mipi->rotation = rotation; 464 mipi->rotation = rotation;
422 465
423 drm_mode_config_reset(tdev->drm);
424
425 DRM_DEBUG_KMS("preferred_depth=%u, rotation = %u\n", 466 DRM_DEBUG_KMS("preferred_depth=%u, rotation = %u\n",
426 tdev->drm->mode_config.preferred_depth, rotation); 467 drm->mode_config.preferred_depth, rotation);
427 468
428 return 0; 469 return 0;
429} 470}
430EXPORT_SYMBOL(mipi_dbi_init); 471EXPORT_SYMBOL(mipi_dbi_init);
431 472
432/** 473/**
474 * mipi_dbi_release - DRM driver release helper
475 * @drm: DRM device
476 *
477 * This function finalizes and frees &mipi_dbi.
478 *
479 * Drivers can use this as their &drm_driver->release callback.
480 */
481void mipi_dbi_release(struct drm_device *drm)
482{
483 struct mipi_dbi *dbi = drm_to_mipi_dbi(drm);
484
485 DRM_DEBUG_DRIVER("\n");
486
487 drm_mode_config_cleanup(drm);
488 drm_dev_fini(drm);
489 kfree(dbi);
490}
491EXPORT_SYMBOL(mipi_dbi_release);
492
493/**
433 * mipi_dbi_hw_reset - Hardware reset of controller 494 * mipi_dbi_hw_reset - Hardware reset of controller
434 * @mipi: MIPI DBI structure 495 * @mipi: MIPI DBI structure
435 * 496 *
@@ -481,7 +542,7 @@ EXPORT_SYMBOL(mipi_dbi_display_is_on);
481 542
482static int mipi_dbi_poweron_reset_conditional(struct mipi_dbi *mipi, bool cond) 543static int mipi_dbi_poweron_reset_conditional(struct mipi_dbi *mipi, bool cond)
483{ 544{
484 struct device *dev = mipi->tinydrm.drm->dev; 545 struct device *dev = mipi->drm.dev;
485 int ret; 546 int ret;
486 547
487 if (mipi->regulator) { 548 if (mipi->regulator) {
@@ -774,18 +835,18 @@ static int mipi_dbi_spi1_transfer(struct mipi_dbi *mipi, int dc,
774 return 0; 835 return 0;
775} 836}
776 837
777static int mipi_dbi_typec1_command(struct mipi_dbi *mipi, u8 cmd, 838static int mipi_dbi_typec1_command(struct mipi_dbi *mipi, u8 *cmd,
778 u8 *parameters, size_t num) 839 u8 *parameters, size_t num)
779{ 840{
780 unsigned int bpw = (cmd == MIPI_DCS_WRITE_MEMORY_START) ? 16 : 8; 841 unsigned int bpw = (*cmd == MIPI_DCS_WRITE_MEMORY_START) ? 16 : 8;
781 int ret; 842 int ret;
782 843
783 if (mipi_dbi_command_is_read(mipi, cmd)) 844 if (mipi_dbi_command_is_read(mipi, *cmd))
784 return -ENOTSUPP; 845 return -ENOTSUPP;
785 846
786 MIPI_DBI_DEBUG_COMMAND(cmd, parameters, num); 847 MIPI_DBI_DEBUG_COMMAND(*cmd, parameters, num);
787 848
788 ret = mipi_dbi_spi1_transfer(mipi, 0, &cmd, 1, 8); 849 ret = mipi_dbi_spi1_transfer(mipi, 0, cmd, 1, 8);
789 if (ret || !num) 850 if (ret || !num)
790 return ret; 851 return ret;
791 852
@@ -794,7 +855,7 @@ static int mipi_dbi_typec1_command(struct mipi_dbi *mipi, u8 cmd,
794 855
795/* MIPI DBI Type C Option 3 */ 856/* MIPI DBI Type C Option 3 */
796 857
797static int mipi_dbi_typec3_command_read(struct mipi_dbi *mipi, u8 cmd, 858static int mipi_dbi_typec3_command_read(struct mipi_dbi *mipi, u8 *cmd,
798 u8 *data, size_t len) 859 u8 *data, size_t len)
799{ 860{
800 struct spi_device *spi = mipi->spi; 861 struct spi_device *spi = mipi->spi;
@@ -803,7 +864,7 @@ static int mipi_dbi_typec3_command_read(struct mipi_dbi *mipi, u8 cmd,
803 struct spi_transfer tr[2] = { 864 struct spi_transfer tr[2] = {
804 { 865 {
805 .speed_hz = speed_hz, 866 .speed_hz = speed_hz,
806 .tx_buf = &cmd, 867 .tx_buf = cmd,
807 .len = 1, 868 .len = 1,
808 }, { 869 }, {
809 .speed_hz = speed_hz, 870 .speed_hz = speed_hz,
@@ -821,8 +882,8 @@ static int mipi_dbi_typec3_command_read(struct mipi_dbi *mipi, u8 cmd,
821 * Support non-standard 24-bit and 32-bit Nokia read commands which 882 * Support non-standard 24-bit and 32-bit Nokia read commands which
822 * start with a dummy clock, so we need to read an extra byte. 883 * start with a dummy clock, so we need to read an extra byte.
823 */ 884 */
824 if (cmd == MIPI_DCS_GET_DISPLAY_ID || 885 if (*cmd == MIPI_DCS_GET_DISPLAY_ID ||
825 cmd == MIPI_DCS_GET_DISPLAY_STATUS) { 886 *cmd == MIPI_DCS_GET_DISPLAY_STATUS) {
826 if (!(len == 3 || len == 4)) 887 if (!(len == 3 || len == 4))
827 return -EINVAL; 888 return -EINVAL;
828 889
@@ -852,7 +913,7 @@ static int mipi_dbi_typec3_command_read(struct mipi_dbi *mipi, u8 cmd,
852 data[i] = (buf[i] << 1) | !!(buf[i + 1] & BIT(7)); 913 data[i] = (buf[i] << 1) | !!(buf[i + 1] & BIT(7));
853 } 914 }
854 915
855 MIPI_DBI_DEBUG_COMMAND(cmd, data, len); 916 MIPI_DBI_DEBUG_COMMAND(*cmd, data, len);
856 917
857err_free: 918err_free:
858 kfree(buf); 919 kfree(buf);
@@ -860,7 +921,7 @@ err_free:
860 return ret; 921 return ret;
861} 922}
862 923
863static int mipi_dbi_typec3_command(struct mipi_dbi *mipi, u8 cmd, 924static int mipi_dbi_typec3_command(struct mipi_dbi *mipi, u8 *cmd,
864 u8 *par, size_t num) 925 u8 *par, size_t num)
865{ 926{
866 struct spi_device *spi = mipi->spi; 927 struct spi_device *spi = mipi->spi;
@@ -868,18 +929,18 @@ static int mipi_dbi_typec3_command(struct mipi_dbi *mipi, u8 cmd,
868 u32 speed_hz; 929 u32 speed_hz;
869 int ret; 930 int ret;
870 931
871 if (mipi_dbi_command_is_read(mipi, cmd)) 932 if (mipi_dbi_command_is_read(mipi, *cmd))
872 return mipi_dbi_typec3_command_read(mipi, cmd, par, num); 933 return mipi_dbi_typec3_command_read(mipi, cmd, par, num);
873 934
874 MIPI_DBI_DEBUG_COMMAND(cmd, par, num); 935 MIPI_DBI_DEBUG_COMMAND(*cmd, par, num);
875 936
876 gpiod_set_value_cansleep(mipi->dc, 0); 937 gpiod_set_value_cansleep(mipi->dc, 0);
877 speed_hz = mipi_dbi_spi_cmd_max_speed(spi, 1); 938 speed_hz = mipi_dbi_spi_cmd_max_speed(spi, 1);
878 ret = tinydrm_spi_transfer(spi, speed_hz, NULL, 8, &cmd, 1); 939 ret = tinydrm_spi_transfer(spi, speed_hz, NULL, 8, cmd, 1);
879 if (ret || !num) 940 if (ret || !num)
880 return ret; 941 return ret;
881 942
882 if (cmd == MIPI_DCS_WRITE_MEMORY_START && !mipi->swap_bytes) 943 if (*cmd == MIPI_DCS_WRITE_MEMORY_START && !mipi->swap_bytes)
883 bpw = 16; 944 bpw = 16;
884 945
885 gpiod_set_value_cansleep(mipi->dc, 1); 946 gpiod_set_value_cansleep(mipi->dc, 1);
@@ -926,7 +987,7 @@ int mipi_dbi_spi_init(struct spi_device *spi, struct mipi_dbi *mipi,
926 * Even though it's not the SPI device that does DMA (the master does), 987 * Even though it's not the SPI device that does DMA (the master does),
927 * the dma mask is necessary for the dma_alloc_wc() in 988 * the dma mask is necessary for the dma_alloc_wc() in
928 * drm_gem_cma_create(). The dma_addr returned will be a physical 989 * drm_gem_cma_create(). The dma_addr returned will be a physical
929 * adddress which might be different from the bus address, but this is 990 * address which might be different from the bus address, but this is
930 * not a problem since the address will not be used. 991 * not a problem since the address will not be used.
931 * The virtual address is used in the transfer and the SPI core 992 * The virtual address is used in the transfer and the SPI core
932 * re-maps it on the SPI master device using the DMA streaming API 993 * re-maps it on the SPI master device using the DMA streaming API
@@ -976,11 +1037,16 @@ static ssize_t mipi_dbi_debugfs_command_write(struct file *file,
976 u8 val, cmd = 0, parameters[64]; 1037 u8 val, cmd = 0, parameters[64];
977 char *buf, *pos, *token; 1038 char *buf, *pos, *token;
978 unsigned int i; 1039 unsigned int i;
979 int ret; 1040 int ret, idx;
1041
1042 if (!drm_dev_enter(&mipi->drm, &idx))
1043 return -ENODEV;
980 1044
981 buf = memdup_user_nul(ubuf, count); 1045 buf = memdup_user_nul(ubuf, count);
982 if (IS_ERR(buf)) 1046 if (IS_ERR(buf)) {
983 return PTR_ERR(buf); 1047 ret = PTR_ERR(buf);
1048 goto err_exit;
1049 }
984 1050
985 /* strip trailing whitespace */ 1051 /* strip trailing whitespace */
986 for (i = count - 1; i > 0; i--) 1052 for (i = count - 1; i > 0; i--)
@@ -1016,6 +1082,8 @@ static ssize_t mipi_dbi_debugfs_command_write(struct file *file,
1016 1082
1017err_free: 1083err_free:
1018 kfree(buf); 1084 kfree(buf);
1085err_exit:
1086 drm_dev_exit(idx);
1019 1087
1020 return ret < 0 ? ret : count; 1088 return ret < 0 ? ret : count;
1021} 1089}
@@ -1024,8 +1092,11 @@ static int mipi_dbi_debugfs_command_show(struct seq_file *m, void *unused)
1024{ 1092{
1025 struct mipi_dbi *mipi = m->private; 1093 struct mipi_dbi *mipi = m->private;
1026 u8 cmd, val[4]; 1094 u8 cmd, val[4];
1095 int ret, idx;
1027 size_t len; 1096 size_t len;
1028 int ret; 1097
1098 if (!drm_dev_enter(&mipi->drm, &idx))
1099 return -ENODEV;
1029 1100
1030 for (cmd = 0; cmd < 255; cmd++) { 1101 for (cmd = 0; cmd < 255; cmd++) {
1031 if (!mipi_dbi_command_is_read(mipi, cmd)) 1102 if (!mipi_dbi_command_is_read(mipi, cmd))
@@ -1056,6 +1127,8 @@ static int mipi_dbi_debugfs_command_show(struct seq_file *m, void *unused)
1056 seq_printf(m, "%*phN\n", (int)len, val); 1127 seq_printf(m, "%*phN\n", (int)len, val);
1057 } 1128 }
1058 1129
1130 drm_dev_exit(idx);
1131
1059 return 0; 1132 return 0;
1060} 1133}
1061 1134
@@ -1088,8 +1161,7 @@ static const struct file_operations mipi_dbi_debugfs_command_fops = {
1088 */ 1161 */
1089int mipi_dbi_debugfs_init(struct drm_minor *minor) 1162int mipi_dbi_debugfs_init(struct drm_minor *minor)
1090{ 1163{
1091 struct tinydrm_device *tdev = minor->dev->dev_private; 1164 struct mipi_dbi *mipi = drm_to_mipi_dbi(minor->dev);
1092 struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev);
1093 umode_t mode = S_IFREG | S_IWUSR; 1165 umode_t mode = S_IFREG | S_IWUSR;
1094 1166
1095 if (mipi->read_commands) 1167 if (mipi->read_commands)
diff --git a/drivers/gpu/drm/tinydrm/repaper.c b/drivers/gpu/drm/tinydrm/repaper.c
index b037c6540cf3..3f3632457079 100644
--- a/drivers/gpu/drm/tinydrm/repaper.c
+++ b/drivers/gpu/drm/tinydrm/repaper.c
@@ -26,14 +26,16 @@
26#include <linux/spi/spi.h> 26#include <linux/spi/spi.h>
27#include <linux/thermal.h> 27#include <linux/thermal.h>
28 28
29#include <drm/drm_atomic_helper.h>
29#include <drm/drm_damage_helper.h> 30#include <drm/drm_damage_helper.h>
30#include <drm/drm_drv.h> 31#include <drm/drm_drv.h>
31#include <drm/drm_fb_cma_helper.h> 32#include <drm/drm_fb_cma_helper.h>
33#include <drm/drm_fb_helper.h>
32#include <drm/drm_gem_cma_helper.h> 34#include <drm/drm_gem_cma_helper.h>
33#include <drm/drm_gem_framebuffer_helper.h> 35#include <drm/drm_gem_framebuffer_helper.h>
34#include <drm/drm_rect.h> 36#include <drm/drm_rect.h>
35#include <drm/drm_vblank.h> 37#include <drm/drm_vblank.h>
36#include <drm/tinydrm/tinydrm.h> 38#include <drm/drm_simple_kms_helper.h>
37#include <drm/tinydrm/tinydrm-helpers.h> 39#include <drm/tinydrm/tinydrm-helpers.h>
38 40
39#define REPAPER_RID_G2_COG_ID 0x12 41#define REPAPER_RID_G2_COG_ID 0x12
@@ -59,7 +61,8 @@ enum repaper_epd_border_byte {
59}; 61};
60 62
61struct repaper_epd { 63struct repaper_epd {
62 struct tinydrm_device tinydrm; 64 struct drm_device drm;
65 struct drm_simple_display_pipe pipe;
63 struct spi_device *spi; 66 struct spi_device *spi;
64 67
65 struct gpio_desc *panel_on; 68 struct gpio_desc *panel_on;
@@ -88,10 +91,9 @@ struct repaper_epd {
88 bool partial; 91 bool partial;
89}; 92};
90 93
91static inline struct repaper_epd * 94static inline struct repaper_epd *drm_to_epd(struct drm_device *drm)
92epd_from_tinydrm(struct tinydrm_device *tdev)
93{ 95{
94 return container_of(tdev, struct repaper_epd, tinydrm); 96 return container_of(drm, struct repaper_epd, drm);
95} 97}
96 98
97static int repaper_spi_transfer(struct spi_device *spi, u8 header, 99static int repaper_spi_transfer(struct spi_device *spi, u8 header,
@@ -529,11 +531,16 @@ static int repaper_fb_dirty(struct drm_framebuffer *fb)
529{ 531{
530 struct drm_gem_cma_object *cma_obj = drm_fb_cma_get_gem_obj(fb, 0); 532 struct drm_gem_cma_object *cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
531 struct dma_buf_attachment *import_attach = cma_obj->base.import_attach; 533 struct dma_buf_attachment *import_attach = cma_obj->base.import_attach;
532 struct tinydrm_device *tdev = fb->dev->dev_private; 534 struct repaper_epd *epd = drm_to_epd(fb->dev);
533 struct repaper_epd *epd = epd_from_tinydrm(tdev);
534 struct drm_rect clip; 535 struct drm_rect clip;
536 int idx, ret = 0;
535 u8 *buf = NULL; 537 u8 *buf = NULL;
536 int ret = 0; 538
539 if (!epd->enabled)
540 return 0;
541
542 if (!drm_dev_enter(fb->dev, &idx))
543 return -ENODEV;
537 544
538 /* repaper can't do partial updates */ 545 /* repaper can't do partial updates */
539 clip.x1 = 0; 546 clip.x1 = 0;
@@ -541,17 +548,16 @@ static int repaper_fb_dirty(struct drm_framebuffer *fb)
541 clip.y1 = 0; 548 clip.y1 = 0;
542 clip.y2 = fb->height; 549 clip.y2 = fb->height;
543 550
544 if (!epd->enabled)
545 return 0;
546
547 repaper_get_temperature(epd); 551 repaper_get_temperature(epd);
548 552
549 DRM_DEBUG("Flushing [FB:%d] st=%ums\n", fb->base.id, 553 DRM_DEBUG("Flushing [FB:%d] st=%ums\n", fb->base.id,
550 epd->factored_stage_time); 554 epd->factored_stage_time);
551 555
552 buf = kmalloc_array(fb->width, fb->height, GFP_KERNEL); 556 buf = kmalloc_array(fb->width, fb->height, GFP_KERNEL);
553 if (!buf) 557 if (!buf) {
554 return -ENOMEM; 558 ret = -ENOMEM;
559 goto out_exit;
560 }
555 561
556 if (import_attach) { 562 if (import_attach) {
557 ret = dma_buf_begin_cpu_access(import_attach->dmabuf, 563 ret = dma_buf_begin_cpu_access(import_attach->dmabuf,
@@ -620,6 +626,8 @@ static int repaper_fb_dirty(struct drm_framebuffer *fb)
620 626
621out_free: 627out_free:
622 kfree(buf); 628 kfree(buf);
629out_exit:
630 drm_dev_exit(idx);
623 631
624 return ret; 632 return ret;
625} 633}
@@ -645,12 +653,14 @@ static void repaper_pipe_enable(struct drm_simple_display_pipe *pipe,
645 struct drm_crtc_state *crtc_state, 653 struct drm_crtc_state *crtc_state,
646 struct drm_plane_state *plane_state) 654 struct drm_plane_state *plane_state)
647{ 655{
648 struct tinydrm_device *tdev = pipe_to_tinydrm(pipe); 656 struct repaper_epd *epd = drm_to_epd(pipe->crtc.dev);
649 struct repaper_epd *epd = epd_from_tinydrm(tdev);
650 struct spi_device *spi = epd->spi; 657 struct spi_device *spi = epd->spi;
651 struct device *dev = &spi->dev; 658 struct device *dev = &spi->dev;
652 bool dc_ok = false; 659 bool dc_ok = false;
653 int i, ret; 660 int i, ret, idx;
661
662 if (!drm_dev_enter(pipe->crtc.dev, &idx))
663 return;
654 664
655 DRM_DEBUG_DRIVER("\n"); 665 DRM_DEBUG_DRIVER("\n");
656 666
@@ -689,7 +699,7 @@ static void repaper_pipe_enable(struct drm_simple_display_pipe *pipe,
689 if (!i) { 699 if (!i) {
690 DRM_DEV_ERROR(dev, "timeout waiting for panel to become ready.\n"); 700 DRM_DEV_ERROR(dev, "timeout waiting for panel to become ready.\n");
691 power_off(epd); 701 power_off(epd);
692 return; 702 goto out_exit;
693 } 703 }
694 704
695 repaper_read_id(spi); 705 repaper_read_id(spi);
@@ -700,7 +710,7 @@ static void repaper_pipe_enable(struct drm_simple_display_pipe *pipe,
700 else 710 else
701 dev_err(dev, "wrong COG ID 0x%02x\n", ret); 711 dev_err(dev, "wrong COG ID 0x%02x\n", ret);
702 power_off(epd); 712 power_off(epd);
703 return; 713 goto out_exit;
704 } 714 }
705 715
706 /* Disable OE */ 716 /* Disable OE */
@@ -713,7 +723,7 @@ static void repaper_pipe_enable(struct drm_simple_display_pipe *pipe,
713 else 723 else
714 DRM_DEV_ERROR(dev, "panel is reported broken\n"); 724 DRM_DEV_ERROR(dev, "panel is reported broken\n");
715 power_off(epd); 725 power_off(epd);
716 return; 726 goto out_exit;
717 } 727 }
718 728
719 /* Power saving mode */ 729 /* Power saving mode */
@@ -753,7 +763,7 @@ static void repaper_pipe_enable(struct drm_simple_display_pipe *pipe,
753 if (ret < 0) { 763 if (ret < 0) {
754 DRM_DEV_ERROR(dev, "failed to read chip (%d)\n", ret); 764 DRM_DEV_ERROR(dev, "failed to read chip (%d)\n", ret);
755 power_off(epd); 765 power_off(epd);
756 return; 766 goto out_exit;
757 } 767 }
758 768
759 if (ret & 0x40) { 769 if (ret & 0x40) {
@@ -765,7 +775,7 @@ static void repaper_pipe_enable(struct drm_simple_display_pipe *pipe,
765 if (!dc_ok) { 775 if (!dc_ok) {
766 DRM_DEV_ERROR(dev, "dc/dc failed\n"); 776 DRM_DEV_ERROR(dev, "dc/dc failed\n");
767 power_off(epd); 777 power_off(epd);
768 return; 778 goto out_exit;
769 } 779 }
770 780
771 /* 781 /*
@@ -776,15 +786,26 @@ static void repaper_pipe_enable(struct drm_simple_display_pipe *pipe,
776 786
777 epd->enabled = true; 787 epd->enabled = true;
778 epd->partial = false; 788 epd->partial = false;
789out_exit:
790 drm_dev_exit(idx);
779} 791}
780 792
781static void repaper_pipe_disable(struct drm_simple_display_pipe *pipe) 793static void repaper_pipe_disable(struct drm_simple_display_pipe *pipe)
782{ 794{
783 struct tinydrm_device *tdev = pipe_to_tinydrm(pipe); 795 struct repaper_epd *epd = drm_to_epd(pipe->crtc.dev);
784 struct repaper_epd *epd = epd_from_tinydrm(tdev);
785 struct spi_device *spi = epd->spi; 796 struct spi_device *spi = epd->spi;
786 unsigned int line; 797 unsigned int line;
787 798
799 /*
800 * This callback is not protected by drm_dev_enter/exit since we want to
801 * turn off the display on regular driver unload. It's highly unlikely
802 * that the underlying SPI controller is gone should this be called after
803 * unplug.
804 */
805
806 if (!epd->enabled)
807 return;
808
788 DRM_DEBUG_DRIVER("\n"); 809 DRM_DEBUG_DRIVER("\n");
789 810
790 epd->enabled = false; 811 epd->enabled = false;
@@ -855,33 +876,50 @@ static const struct drm_simple_display_pipe_funcs repaper_pipe_funcs = {
855 .prepare_fb = drm_gem_fb_simple_display_pipe_prepare_fb, 876 .prepare_fb = drm_gem_fb_simple_display_pipe_prepare_fb,
856}; 877};
857 878
879static const struct drm_mode_config_funcs repaper_mode_config_funcs = {
880 .fb_create = drm_gem_fb_create_with_dirty,
881 .atomic_check = drm_atomic_helper_check,
882 .atomic_commit = drm_atomic_helper_commit,
883};
884
885static void repaper_release(struct drm_device *drm)
886{
887 struct repaper_epd *epd = drm_to_epd(drm);
888
889 DRM_DEBUG_DRIVER("\n");
890
891 drm_mode_config_cleanup(drm);
892 drm_dev_fini(drm);
893 kfree(epd);
894}
895
858static const uint32_t repaper_formats[] = { 896static const uint32_t repaper_formats[] = {
859 DRM_FORMAT_XRGB8888, 897 DRM_FORMAT_XRGB8888,
860}; 898};
861 899
862static const struct drm_display_mode repaper_e1144cs021_mode = { 900static const struct drm_display_mode repaper_e1144cs021_mode = {
863 TINYDRM_MODE(128, 96, 29, 22), 901 DRM_SIMPLE_MODE(128, 96, 29, 22),
864}; 902};
865 903
866static const u8 repaper_e1144cs021_cs[] = { 0x00, 0x00, 0x00, 0x00, 904static const u8 repaper_e1144cs021_cs[] = { 0x00, 0x00, 0x00, 0x00,
867 0x00, 0x0f, 0xff, 0x00 }; 905 0x00, 0x0f, 0xff, 0x00 };
868 906
869static const struct drm_display_mode repaper_e1190cs021_mode = { 907static const struct drm_display_mode repaper_e1190cs021_mode = {
870 TINYDRM_MODE(144, 128, 36, 32), 908 DRM_SIMPLE_MODE(144, 128, 36, 32),
871}; 909};
872 910
873static const u8 repaper_e1190cs021_cs[] = { 0x00, 0x00, 0x00, 0x03, 911static const u8 repaper_e1190cs021_cs[] = { 0x00, 0x00, 0x00, 0x03,
874 0xfc, 0x00, 0x00, 0xff }; 912 0xfc, 0x00, 0x00, 0xff };
875 913
876static const struct drm_display_mode repaper_e2200cs021_mode = { 914static const struct drm_display_mode repaper_e2200cs021_mode = {
877 TINYDRM_MODE(200, 96, 46, 22), 915 DRM_SIMPLE_MODE(200, 96, 46, 22),
878}; 916};
879 917
880static const u8 repaper_e2200cs021_cs[] = { 0x00, 0x00, 0x00, 0x00, 918static const u8 repaper_e2200cs021_cs[] = { 0x00, 0x00, 0x00, 0x00,
881 0x01, 0xff, 0xe0, 0x00 }; 919 0x01, 0xff, 0xe0, 0x00 };
882 920
883static const struct drm_display_mode repaper_e2271cs021_mode = { 921static const struct drm_display_mode repaper_e2271cs021_mode = {
884 TINYDRM_MODE(264, 176, 57, 38), 922 DRM_SIMPLE_MODE(264, 176, 57, 38),
885}; 923};
886 924
887static const u8 repaper_e2271cs021_cs[] = { 0x00, 0x00, 0x00, 0x7f, 925static const u8 repaper_e2271cs021_cs[] = { 0x00, 0x00, 0x00, 0x7f,
@@ -893,6 +931,7 @@ static struct drm_driver repaper_driver = {
893 .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | 931 .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME |
894 DRIVER_ATOMIC, 932 DRIVER_ATOMIC,
895 .fops = &repaper_fops, 933 .fops = &repaper_fops,
934 .release = repaper_release,
896 DRM_GEM_CMA_VMAP_DRIVER_OPS, 935 DRM_GEM_CMA_VMAP_DRIVER_OPS,
897 .name = "repaper", 936 .name = "repaper",
898 .desc = "Pervasive Displays RePaper e-ink panels", 937 .desc = "Pervasive Displays RePaper e-ink panels",
@@ -925,11 +964,11 @@ static int repaper_probe(struct spi_device *spi)
925 const struct spi_device_id *spi_id; 964 const struct spi_device_id *spi_id;
926 const struct of_device_id *match; 965 const struct of_device_id *match;
927 struct device *dev = &spi->dev; 966 struct device *dev = &spi->dev;
928 struct tinydrm_device *tdev;
929 enum repaper_model model; 967 enum repaper_model model;
930 const char *thermal_zone; 968 const char *thermal_zone;
931 struct repaper_epd *epd; 969 struct repaper_epd *epd;
932 size_t line_buffer_size; 970 size_t line_buffer_size;
971 struct drm_device *drm;
933 int ret; 972 int ret;
934 973
935 match = of_match_device(repaper_of_match, dev); 974 match = of_match_device(repaper_of_match, dev);
@@ -949,10 +988,21 @@ static int repaper_probe(struct spi_device *spi)
949 } 988 }
950 } 989 }
951 990
952 epd = devm_kzalloc(dev, sizeof(*epd), GFP_KERNEL); 991 epd = kzalloc(sizeof(*epd), GFP_KERNEL);
953 if (!epd) 992 if (!epd)
954 return -ENOMEM; 993 return -ENOMEM;
955 994
995 drm = &epd->drm;
996
997 ret = devm_drm_dev_init(dev, drm, &repaper_driver);
998 if (ret) {
999 kfree(epd);
1000 return ret;
1001 }
1002
1003 drm_mode_config_init(drm);
1004 drm->mode_config.funcs = &repaper_mode_config_funcs;
1005
956 epd->spi = spi; 1006 epd->spi = spi;
957 1007
958 epd->panel_on = devm_gpiod_get(dev, "panel-on", GPIOD_OUT_LOW); 1008 epd->panel_on = devm_gpiod_get(dev, "panel-on", GPIOD_OUT_LOW);
@@ -1063,32 +1113,41 @@ static int repaper_probe(struct spi_device *spi)
1063 if (!epd->current_frame) 1113 if (!epd->current_frame)
1064 return -ENOMEM; 1114 return -ENOMEM;
1065 1115
1066 tdev = &epd->tinydrm; 1116 ret = tinydrm_display_pipe_init(drm, &epd->pipe, &repaper_pipe_funcs,
1067
1068 ret = devm_tinydrm_init(dev, tdev, &repaper_driver);
1069 if (ret)
1070 return ret;
1071
1072 ret = tinydrm_display_pipe_init(tdev, &repaper_pipe_funcs,
1073 DRM_MODE_CONNECTOR_VIRTUAL, 1117 DRM_MODE_CONNECTOR_VIRTUAL,
1074 repaper_formats, 1118 repaper_formats,
1075 ARRAY_SIZE(repaper_formats), mode, 0); 1119 ARRAY_SIZE(repaper_formats), mode, 0);
1076 if (ret) 1120 if (ret)
1077 return ret; 1121 return ret;
1078 1122
1079 drm_mode_config_reset(tdev->drm); 1123 drm_mode_config_reset(drm);
1080 spi_set_drvdata(spi, tdev); 1124
1125 ret = drm_dev_register(drm, 0);
1126 if (ret)
1127 return ret;
1128
1129 spi_set_drvdata(spi, drm);
1081 1130
1082 DRM_DEBUG_DRIVER("SPI speed: %uMHz\n", spi->max_speed_hz / 1000000); 1131 DRM_DEBUG_DRIVER("SPI speed: %uMHz\n", spi->max_speed_hz / 1000000);
1083 1132
1084 return devm_tinydrm_register(tdev); 1133 drm_fbdev_generic_setup(drm, 32);
1134
1135 return 0;
1085} 1136}
1086 1137
1087static void repaper_shutdown(struct spi_device *spi) 1138static int repaper_remove(struct spi_device *spi)
1088{ 1139{
1089 struct tinydrm_device *tdev = spi_get_drvdata(spi); 1140 struct drm_device *drm = spi_get_drvdata(spi);
1141
1142 drm_dev_unplug(drm);
1143 drm_atomic_helper_shutdown(drm);
1144
1145 return 0;
1146}
1090 1147
1091 tinydrm_shutdown(tdev); 1148static void repaper_shutdown(struct spi_device *spi)
1149{
1150 drm_atomic_helper_shutdown(spi_get_drvdata(spi));
1092} 1151}
1093 1152
1094static struct spi_driver repaper_spi_driver = { 1153static struct spi_driver repaper_spi_driver = {
@@ -1099,6 +1158,7 @@ static struct spi_driver repaper_spi_driver = {
1099 }, 1158 },
1100 .id_table = repaper_id, 1159 .id_table = repaper_id,
1101 .probe = repaper_probe, 1160 .probe = repaper_probe,
1161 .remove = repaper_remove,
1102 .shutdown = repaper_shutdown, 1162 .shutdown = repaper_shutdown,
1103}; 1163};
1104module_spi_driver(repaper_spi_driver); 1164module_spi_driver(repaper_spi_driver);
diff --git a/drivers/gpu/drm/tinydrm/st7586.c b/drivers/gpu/drm/tinydrm/st7586.c
index 01a8077954b3..d99957bac532 100644
--- a/drivers/gpu/drm/tinydrm/st7586.c
+++ b/drivers/gpu/drm/tinydrm/st7586.c
@@ -17,9 +17,11 @@
17#include <linux/spi/spi.h> 17#include <linux/spi/spi.h>
18#include <video/mipi_display.h> 18#include <video/mipi_display.h>
19 19
20#include <drm/drm_atomic_helper.h>
20#include <drm/drm_damage_helper.h> 21#include <drm/drm_damage_helper.h>
21#include <drm/drm_drv.h> 22#include <drm/drm_drv.h>
22#include <drm/drm_fb_cma_helper.h> 23#include <drm/drm_fb_cma_helper.h>
24#include <drm/drm_fb_helper.h>
23#include <drm/drm_gem_cma_helper.h> 25#include <drm/drm_gem_cma_helper.h>
24#include <drm/drm_gem_framebuffer_helper.h> 26#include <drm/drm_gem_framebuffer_helper.h>
25#include <drm/drm_rect.h> 27#include <drm/drm_rect.h>
@@ -116,14 +118,15 @@ static int st7586_buf_copy(void *dst, struct drm_framebuffer *fb,
116 118
117static void st7586_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect) 119static void st7586_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect)
118{ 120{
119 struct tinydrm_device *tdev = fb->dev->dev_private; 121 struct mipi_dbi *mipi = drm_to_mipi_dbi(fb->dev);
120 struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev); 122 int start, end, idx, ret = 0;
121 int start, end;
122 int ret = 0;
123 123
124 if (!mipi->enabled) 124 if (!mipi->enabled)
125 return; 125 return;
126 126
127 if (!drm_dev_enter(fb->dev, &idx))
128 return;
129
127 /* 3 pixels per byte, so grow clip to nearest multiple of 3 */ 130 /* 3 pixels per byte, so grow clip to nearest multiple of 3 */
128 rect->x1 = rounddown(rect->x1, 3); 131 rect->x1 = rounddown(rect->x1, 3);
129 rect->x2 = roundup(rect->x2, 3); 132 rect->x2 = roundup(rect->x2, 3);
@@ -151,6 +154,8 @@ static void st7586_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect)
151err_msg: 154err_msg:
152 if (ret) 155 if (ret)
153 dev_err_once(fb->dev->dev, "Failed to update display %d\n", ret); 156 dev_err_once(fb->dev->dev, "Failed to update display %d\n", ret);
157
158 drm_dev_exit(idx);
154} 159}
155 160
156static void st7586_pipe_update(struct drm_simple_display_pipe *pipe, 161static void st7586_pipe_update(struct drm_simple_display_pipe *pipe,
@@ -175,8 +180,7 @@ static void st7586_pipe_enable(struct drm_simple_display_pipe *pipe,
175 struct drm_crtc_state *crtc_state, 180 struct drm_crtc_state *crtc_state,
176 struct drm_plane_state *plane_state) 181 struct drm_plane_state *plane_state)
177{ 182{
178 struct tinydrm_device *tdev = pipe_to_tinydrm(pipe); 183 struct mipi_dbi *mipi = drm_to_mipi_dbi(pipe->crtc.dev);
179 struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev);
180 struct drm_framebuffer *fb = plane_state->fb; 184 struct drm_framebuffer *fb = plane_state->fb;
181 struct drm_rect rect = { 185 struct drm_rect rect = {
182 .x1 = 0, 186 .x1 = 0,
@@ -184,14 +188,17 @@ static void st7586_pipe_enable(struct drm_simple_display_pipe *pipe,
184 .y1 = 0, 188 .y1 = 0,
185 .y2 = fb->height, 189 .y2 = fb->height,
186 }; 190 };
187 int ret; 191 int idx, ret;
188 u8 addr_mode; 192 u8 addr_mode;
189 193
194 if (!drm_dev_enter(pipe->crtc.dev, &idx))
195 return;
196
190 DRM_DEBUG_KMS("\n"); 197 DRM_DEBUG_KMS("\n");
191 198
192 ret = mipi_dbi_poweron_reset(mipi); 199 ret = mipi_dbi_poweron_reset(mipi);
193 if (ret) 200 if (ret)
194 return; 201 goto out_exit;
195 202
196 mipi_dbi_command(mipi, ST7586_AUTO_READ_CTRL, 0x9f); 203 mipi_dbi_command(mipi, ST7586_AUTO_READ_CTRL, 0x9f);
197 mipi_dbi_command(mipi, ST7586_OTP_RW_CTRL, 0x00); 204 mipi_dbi_command(mipi, ST7586_OTP_RW_CTRL, 0x00);
@@ -244,12 +251,20 @@ static void st7586_pipe_enable(struct drm_simple_display_pipe *pipe,
244 st7586_fb_dirty(fb, &rect); 251 st7586_fb_dirty(fb, &rect);
245 252
246 mipi_dbi_command(mipi, MIPI_DCS_SET_DISPLAY_ON); 253 mipi_dbi_command(mipi, MIPI_DCS_SET_DISPLAY_ON);
254out_exit:
255 drm_dev_exit(idx);
247} 256}
248 257
249static void st7586_pipe_disable(struct drm_simple_display_pipe *pipe) 258static void st7586_pipe_disable(struct drm_simple_display_pipe *pipe)
250{ 259{
251 struct tinydrm_device *tdev = pipe_to_tinydrm(pipe); 260 struct mipi_dbi *mipi = drm_to_mipi_dbi(pipe->crtc.dev);
252 struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev); 261
262 /*
263 * This callback is not protected by drm_dev_enter/exit since we want to
264 * turn off the display on regular driver unload. It's highly unlikely
265 * that the underlying SPI controller is gone should this be called after
266 * unplug.
267 */
253 268
254 DRM_DEBUG_KMS("\n"); 269 DRM_DEBUG_KMS("\n");
255 270
@@ -264,46 +279,6 @@ static const u32 st7586_formats[] = {
264 DRM_FORMAT_XRGB8888, 279 DRM_FORMAT_XRGB8888,
265}; 280};
266 281
267static int st7586_init(struct device *dev, struct mipi_dbi *mipi,
268 const struct drm_simple_display_pipe_funcs *pipe_funcs,
269 struct drm_driver *driver, const struct drm_display_mode *mode,
270 unsigned int rotation)
271{
272 size_t bufsize = (mode->vdisplay + 2) / 3 * mode->hdisplay;
273 struct tinydrm_device *tdev = &mipi->tinydrm;
274 int ret;
275
276 mutex_init(&mipi->cmdlock);
277
278 mipi->tx_buf = devm_kmalloc(dev, bufsize, GFP_KERNEL);
279 if (!mipi->tx_buf)
280 return -ENOMEM;
281
282 ret = devm_tinydrm_init(dev, tdev, driver);
283 if (ret)
284 return ret;
285
286 ret = tinydrm_display_pipe_init(tdev, pipe_funcs,
287 DRM_MODE_CONNECTOR_VIRTUAL,
288 st7586_formats,
289 ARRAY_SIZE(st7586_formats),
290 mode, rotation);
291 if (ret)
292 return ret;
293
294 drm_plane_enable_fb_damage_clips(&tdev->pipe.plane);
295
296 tdev->drm->mode_config.preferred_depth = 32;
297 mipi->rotation = rotation;
298
299 drm_mode_config_reset(tdev->drm);
300
301 DRM_DEBUG_KMS("preferred_depth=%u, rotation = %u\n",
302 tdev->drm->mode_config.preferred_depth, rotation);
303
304 return 0;
305}
306
307static const struct drm_simple_display_pipe_funcs st7586_pipe_funcs = { 282static const struct drm_simple_display_pipe_funcs st7586_pipe_funcs = {
308 .enable = st7586_pipe_enable, 283 .enable = st7586_pipe_enable,
309 .disable = st7586_pipe_disable, 284 .disable = st7586_pipe_disable,
@@ -311,8 +286,14 @@ static const struct drm_simple_display_pipe_funcs st7586_pipe_funcs = {
311 .prepare_fb = drm_gem_fb_simple_display_pipe_prepare_fb, 286 .prepare_fb = drm_gem_fb_simple_display_pipe_prepare_fb,
312}; 287};
313 288
289static const struct drm_mode_config_funcs st7586_mode_config_funcs = {
290 .fb_create = drm_gem_fb_create_with_dirty,
291 .atomic_check = drm_atomic_helper_check,
292 .atomic_commit = drm_atomic_helper_commit,
293};
294
314static const struct drm_display_mode st7586_mode = { 295static const struct drm_display_mode st7586_mode = {
315 TINYDRM_MODE(178, 128, 37, 27), 296 DRM_SIMPLE_MODE(178, 128, 37, 27),
316}; 297};
317 298
318DEFINE_DRM_GEM_CMA_FOPS(st7586_fops); 299DEFINE_DRM_GEM_CMA_FOPS(st7586_fops);
@@ -321,6 +302,7 @@ static struct drm_driver st7586_driver = {
321 .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | 302 .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME |
322 DRIVER_ATOMIC, 303 DRIVER_ATOMIC,
323 .fops = &st7586_fops, 304 .fops = &st7586_fops,
305 .release = mipi_dbi_release,
324 DRM_GEM_CMA_VMAP_DRIVER_OPS, 306 DRM_GEM_CMA_VMAP_DRIVER_OPS,
325 .debugfs_init = mipi_dbi_debugfs_init, 307 .debugfs_init = mipi_dbi_debugfs_init,
326 .name = "st7586", 308 .name = "st7586",
@@ -345,15 +327,35 @@ MODULE_DEVICE_TABLE(spi, st7586_id);
345static int st7586_probe(struct spi_device *spi) 327static int st7586_probe(struct spi_device *spi)
346{ 328{
347 struct device *dev = &spi->dev; 329 struct device *dev = &spi->dev;
330 struct drm_device *drm;
348 struct mipi_dbi *mipi; 331 struct mipi_dbi *mipi;
349 struct gpio_desc *a0; 332 struct gpio_desc *a0;
350 u32 rotation = 0; 333 u32 rotation = 0;
334 size_t bufsize;
351 int ret; 335 int ret;
352 336
353 mipi = devm_kzalloc(dev, sizeof(*mipi), GFP_KERNEL); 337 mipi = kzalloc(sizeof(*mipi), GFP_KERNEL);
354 if (!mipi) 338 if (!mipi)
355 return -ENOMEM; 339 return -ENOMEM;
356 340
341 drm = &mipi->drm;
342 ret = devm_drm_dev_init(dev, drm, &st7586_driver);
343 if (ret) {
344 kfree(mipi);
345 return ret;
346 }
347
348 drm_mode_config_init(drm);
349 drm->mode_config.preferred_depth = 32;
350 drm->mode_config.funcs = &st7586_mode_config_funcs;
351
352 mutex_init(&mipi->cmdlock);
353
354 bufsize = (st7586_mode.vdisplay + 2) / 3 * st7586_mode.hdisplay;
355 mipi->tx_buf = devm_kmalloc(dev, bufsize, GFP_KERNEL);
356 if (!mipi->tx_buf)
357 return -ENOMEM;
358
357 mipi->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); 359 mipi->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
358 if (IS_ERR(mipi->reset)) { 360 if (IS_ERR(mipi->reset)) {
359 DRM_DEV_ERROR(dev, "Failed to get gpio 'reset'\n"); 361 DRM_DEV_ERROR(dev, "Failed to get gpio 'reset'\n");
@@ -367,6 +369,7 @@ static int st7586_probe(struct spi_device *spi)
367 } 369 }
368 370
369 device_property_read_u32(dev, "rotation", &rotation); 371 device_property_read_u32(dev, "rotation", &rotation);
372 mipi->rotation = rotation;
370 373
371 ret = mipi_dbi_spi_init(spi, mipi, a0); 374 ret = mipi_dbi_spi_init(spi, mipi, a0);
372 if (ret) 375 if (ret)
@@ -384,21 +387,44 @@ static int st7586_probe(struct spi_device *spi)
384 */ 387 */
385 mipi->swap_bytes = true; 388 mipi->swap_bytes = true;
386 389
387 ret = st7586_init(&spi->dev, mipi, &st7586_pipe_funcs, &st7586_driver, 390 ret = tinydrm_display_pipe_init(drm, &mipi->pipe, &st7586_pipe_funcs,
388 &st7586_mode, rotation); 391 DRM_MODE_CONNECTOR_VIRTUAL,
392 st7586_formats, ARRAY_SIZE(st7586_formats),
393 &st7586_mode, rotation);
389 if (ret) 394 if (ret)
390 return ret; 395 return ret;
391 396
392 spi_set_drvdata(spi, mipi); 397 drm_plane_enable_fb_damage_clips(&mipi->pipe.plane);
393 398
394 return devm_tinydrm_register(&mipi->tinydrm); 399 drm_mode_config_reset(drm);
400
401 ret = drm_dev_register(drm, 0);
402 if (ret)
403 return ret;
404
405 spi_set_drvdata(spi, drm);
406
407 DRM_DEBUG_KMS("preferred_depth=%u, rotation = %u\n",
408 drm->mode_config.preferred_depth, rotation);
409
410 drm_fbdev_generic_setup(drm, 32);
411
412 return 0;
395} 413}
396 414
397static void st7586_shutdown(struct spi_device *spi) 415static int st7586_remove(struct spi_device *spi)
398{ 416{
399 struct mipi_dbi *mipi = spi_get_drvdata(spi); 417 struct drm_device *drm = spi_get_drvdata(spi);
418
419 drm_dev_unplug(drm);
420 drm_atomic_helper_shutdown(drm);
421
422 return 0;
423}
400 424
401 tinydrm_shutdown(&mipi->tinydrm); 425static void st7586_shutdown(struct spi_device *spi)
426{
427 drm_atomic_helper_shutdown(spi_get_drvdata(spi));
402} 428}
403 429
404static struct spi_driver st7586_spi_driver = { 430static struct spi_driver st7586_spi_driver = {
@@ -409,6 +435,7 @@ static struct spi_driver st7586_spi_driver = {
409 }, 435 },
410 .id_table = st7586_id, 436 .id_table = st7586_id,
411 .probe = st7586_probe, 437 .probe = st7586_probe,
438 .remove = st7586_remove,
412 .shutdown = st7586_shutdown, 439 .shutdown = st7586_shutdown,
413}; 440};
414module_spi_driver(st7586_spi_driver); 441module_spi_driver(st7586_spi_driver);
diff --git a/drivers/gpu/drm/tinydrm/st7735r.c b/drivers/gpu/drm/tinydrm/st7735r.c
index 3bab9a9569a6..022e9849b95b 100644
--- a/drivers/gpu/drm/tinydrm/st7735r.c
+++ b/drivers/gpu/drm/tinydrm/st7735r.c
@@ -14,7 +14,9 @@
14#include <linux/spi/spi.h> 14#include <linux/spi/spi.h>
15#include <video/mipi_display.h> 15#include <video/mipi_display.h>
16 16
17#include <drm/drm_atomic_helper.h>
17#include <drm/drm_drv.h> 18#include <drm/drm_drv.h>
19#include <drm/drm_fb_helper.h>
18#include <drm/drm_gem_cma_helper.h> 20#include <drm/drm_gem_cma_helper.h>
19#include <drm/drm_gem_framebuffer_helper.h> 21#include <drm/drm_gem_framebuffer_helper.h>
20#include <drm/tinydrm/mipi-dbi.h> 22#include <drm/tinydrm/mipi-dbi.h>
@@ -41,16 +43,18 @@ static void jd_t18003_t01_pipe_enable(struct drm_simple_display_pipe *pipe,
41 struct drm_crtc_state *crtc_state, 43 struct drm_crtc_state *crtc_state,
42 struct drm_plane_state *plane_state) 44 struct drm_plane_state *plane_state)
43{ 45{
44 struct tinydrm_device *tdev = pipe_to_tinydrm(pipe); 46 struct mipi_dbi *mipi = drm_to_mipi_dbi(pipe->crtc.dev);
45 struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev); 47 int ret, idx;
46 int ret;
47 u8 addr_mode; 48 u8 addr_mode;
48 49
50 if (!drm_dev_enter(pipe->crtc.dev, &idx))
51 return;
52
49 DRM_DEBUG_KMS("\n"); 53 DRM_DEBUG_KMS("\n");
50 54
51 ret = mipi_dbi_poweron_reset(mipi); 55 ret = mipi_dbi_poweron_reset(mipi);
52 if (ret) 56 if (ret)
53 return; 57 goto out_exit;
54 58
55 msleep(150); 59 msleep(150);
56 60
@@ -101,6 +105,8 @@ static void jd_t18003_t01_pipe_enable(struct drm_simple_display_pipe *pipe,
101 msleep(20); 105 msleep(20);
102 106
103 mipi_dbi_enable_flush(mipi, crtc_state, plane_state); 107 mipi_dbi_enable_flush(mipi, crtc_state, plane_state);
108out_exit:
109 drm_dev_exit(idx);
104} 110}
105 111
106static const struct drm_simple_display_pipe_funcs jd_t18003_t01_pipe_funcs = { 112static const struct drm_simple_display_pipe_funcs jd_t18003_t01_pipe_funcs = {
@@ -111,7 +117,7 @@ static const struct drm_simple_display_pipe_funcs jd_t18003_t01_pipe_funcs = {
111}; 117};
112 118
113static const struct drm_display_mode jd_t18003_t01_mode = { 119static const struct drm_display_mode jd_t18003_t01_mode = {
114 TINYDRM_MODE(128, 160, 28, 35), 120 DRM_SIMPLE_MODE(128, 160, 28, 35),
115}; 121};
116 122
117DEFINE_DRM_GEM_CMA_FOPS(st7735r_fops); 123DEFINE_DRM_GEM_CMA_FOPS(st7735r_fops);
@@ -120,6 +126,7 @@ static struct drm_driver st7735r_driver = {
120 .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | 126 .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME |
121 DRIVER_ATOMIC, 127 DRIVER_ATOMIC,
122 .fops = &st7735r_fops, 128 .fops = &st7735r_fops,
129 .release = mipi_dbi_release,
123 DRM_GEM_CMA_VMAP_DRIVER_OPS, 130 DRM_GEM_CMA_VMAP_DRIVER_OPS,
124 .debugfs_init = mipi_dbi_debugfs_init, 131 .debugfs_init = mipi_dbi_debugfs_init,
125 .name = "st7735r", 132 .name = "st7735r",
@@ -144,15 +151,25 @@ MODULE_DEVICE_TABLE(spi, st7735r_id);
144static int st7735r_probe(struct spi_device *spi) 151static int st7735r_probe(struct spi_device *spi)
145{ 152{
146 struct device *dev = &spi->dev; 153 struct device *dev = &spi->dev;
154 struct drm_device *drm;
147 struct mipi_dbi *mipi; 155 struct mipi_dbi *mipi;
148 struct gpio_desc *dc; 156 struct gpio_desc *dc;
149 u32 rotation = 0; 157 u32 rotation = 0;
150 int ret; 158 int ret;
151 159
152 mipi = devm_kzalloc(dev, sizeof(*mipi), GFP_KERNEL); 160 mipi = kzalloc(sizeof(*mipi), GFP_KERNEL);
153 if (!mipi) 161 if (!mipi)
154 return -ENOMEM; 162 return -ENOMEM;
155 163
164 drm = &mipi->drm;
165 ret = devm_drm_dev_init(dev, drm, &st7735r_driver);
166 if (ret) {
167 kfree(mipi);
168 return ret;
169 }
170
171 drm_mode_config_init(drm);
172
156 mipi->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); 173 mipi->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
157 if (IS_ERR(mipi->reset)) { 174 if (IS_ERR(mipi->reset)) {
158 DRM_DEV_ERROR(dev, "Failed to get gpio 'reset'\n"); 175 DRM_DEV_ERROR(dev, "Failed to get gpio 'reset'\n");
@@ -178,21 +195,36 @@ static int st7735r_probe(struct spi_device *spi)
178 /* Cannot read from Adafruit 1.8" display via SPI */ 195 /* Cannot read from Adafruit 1.8" display via SPI */
179 mipi->read_commands = NULL; 196 mipi->read_commands = NULL;
180 197
181 ret = mipi_dbi_init(&spi->dev, mipi, &jd_t18003_t01_pipe_funcs, 198 ret = mipi_dbi_init(mipi, &jd_t18003_t01_pipe_funcs, &jd_t18003_t01_mode, rotation);
182 &st7735r_driver, &jd_t18003_t01_mode, rotation);
183 if (ret) 199 if (ret)
184 return ret; 200 return ret;
185 201
186 spi_set_drvdata(spi, mipi); 202 drm_mode_config_reset(drm);
187 203
188 return devm_tinydrm_register(&mipi->tinydrm); 204 ret = drm_dev_register(drm, 0);
205 if (ret)
206 return ret;
207
208 spi_set_drvdata(spi, drm);
209
210 drm_fbdev_generic_setup(drm, 32);
211
212 return 0;
189} 213}
190 214
191static void st7735r_shutdown(struct spi_device *spi) 215static int st7735r_remove(struct spi_device *spi)
192{ 216{
193 struct mipi_dbi *mipi = spi_get_drvdata(spi); 217 struct drm_device *drm = spi_get_drvdata(spi);
218
219 drm_dev_unplug(drm);
220 drm_atomic_helper_shutdown(drm);
194 221
195 tinydrm_shutdown(&mipi->tinydrm); 222 return 0;
223}
224
225static void st7735r_shutdown(struct spi_device *spi)
226{
227 drm_atomic_helper_shutdown(spi_get_drvdata(spi));
196} 228}
197 229
198static struct spi_driver st7735r_spi_driver = { 230static struct spi_driver st7735r_spi_driver = {
@@ -203,6 +235,7 @@ static struct spi_driver st7735r_spi_driver = {
203 }, 235 },
204 .id_table = st7735r_id, 236 .id_table = st7735r_id,
205 .probe = st7735r_probe, 237 .probe = st7735r_probe,
238 .remove = st7735r_remove,
206 .shutdown = st7735r_shutdown, 239 .shutdown = st7735r_shutdown,
207}; 240};
208module_spi_driver(st7735r_spi_driver); 241module_spi_driver(st7735r_spi_driver);
diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c
index 22cd2d13e272..53b7b8c04bc6 100644
--- a/drivers/gpu/drm/udl/udl_drv.c
+++ b/drivers/gpu/drm/udl/udl_drv.c
@@ -107,6 +107,7 @@ static void udl_usb_disconnect(struct usb_interface *interface)
107 udl_fbdev_unplug(dev); 107 udl_fbdev_unplug(dev);
108 udl_drop_usb(dev); 108 udl_drop_usb(dev);
109 drm_dev_unplug(dev); 109 drm_dev_unplug(dev);
110 drm_dev_put(dev);
110} 111}
111 112
112/* 113/*
diff --git a/drivers/gpu/drm/v3d/Kconfig b/drivers/gpu/drm/v3d/Kconfig
index 1552bf552c94..75a74c45f109 100644
--- a/drivers/gpu/drm/v3d/Kconfig
+++ b/drivers/gpu/drm/v3d/Kconfig
@@ -5,6 +5,7 @@ config DRM_V3D
5 depends on COMMON_CLK 5 depends on COMMON_CLK
6 depends on MMU 6 depends on MMU
7 select DRM_SCHED 7 select DRM_SCHED
8 select DRM_GEM_SHMEM_HELPER
8 help 9 help
9 Choose this option if you have a system that has a Broadcom 10 Choose this option if you have a system that has a Broadcom
10 V3D 3.x or newer GPU, such as BCM7268. 11 V3D 3.x or newer GPU, such as BCM7268.
diff --git a/drivers/gpu/drm/v3d/v3d_bo.c b/drivers/gpu/drm/v3d/v3d_bo.c
index a08766d39eab..c0219ebb4284 100644
--- a/drivers/gpu/drm/v3d/v3d_bo.c
+++ b/drivers/gpu/drm/v3d/v3d_bo.c
@@ -25,162 +25,6 @@
25#include "v3d_drv.h" 25#include "v3d_drv.h"
26#include "uapi/drm/v3d_drm.h" 26#include "uapi/drm/v3d_drm.h"
27 27
28/* Pins the shmem pages, fills in the .pages and .sgt fields of the BO, and maps
29 * it for DMA.
30 */
31static int
32v3d_bo_get_pages(struct v3d_bo *bo)
33{
34 struct drm_gem_object *obj = &bo->base;
35 struct drm_device *dev = obj->dev;
36 int npages = obj->size >> PAGE_SHIFT;
37 int ret = 0;
38
39 mutex_lock(&bo->lock);
40 if (bo->pages_refcount++ != 0)
41 goto unlock;
42
43 if (!obj->import_attach) {
44 bo->pages = drm_gem_get_pages(obj);
45 if (IS_ERR(bo->pages)) {
46 ret = PTR_ERR(bo->pages);
47 goto unlock;
48 }
49
50 bo->sgt = drm_prime_pages_to_sg(bo->pages, npages);
51 if (IS_ERR(bo->sgt)) {
52 ret = PTR_ERR(bo->sgt);
53 goto put_pages;
54 }
55
56 /* Map the pages for use by the GPU. */
57 dma_map_sg(dev->dev, bo->sgt->sgl,
58 bo->sgt->nents, DMA_BIDIRECTIONAL);
59 } else {
60 bo->pages = kcalloc(npages, sizeof(*bo->pages), GFP_KERNEL);
61 if (!bo->pages)
62 goto put_pages;
63
64 drm_prime_sg_to_page_addr_arrays(bo->sgt, bo->pages,
65 NULL, npages);
66
67 /* Note that dma-bufs come in mapped. */
68 }
69
70 mutex_unlock(&bo->lock);
71
72 return 0;
73
74put_pages:
75 drm_gem_put_pages(obj, bo->pages, true, true);
76 bo->pages = NULL;
77unlock:
78 bo->pages_refcount--;
79 mutex_unlock(&bo->lock);
80 return ret;
81}
82
83static void
84v3d_bo_put_pages(struct v3d_bo *bo)
85{
86 struct drm_gem_object *obj = &bo->base;
87
88 mutex_lock(&bo->lock);
89 if (--bo->pages_refcount == 0) {
90 if (!obj->import_attach) {
91 dma_unmap_sg(obj->dev->dev, bo->sgt->sgl,
92 bo->sgt->nents, DMA_BIDIRECTIONAL);
93 sg_free_table(bo->sgt);
94 kfree(bo->sgt);
95 drm_gem_put_pages(obj, bo->pages, true, true);
96 } else {
97 kfree(bo->pages);
98 }
99 }
100 mutex_unlock(&bo->lock);
101}
102
103static struct v3d_bo *v3d_bo_create_struct(struct drm_device *dev,
104 size_t unaligned_size)
105{
106 struct v3d_dev *v3d = to_v3d_dev(dev);
107 struct drm_gem_object *obj;
108 struct v3d_bo *bo;
109 size_t size = roundup(unaligned_size, PAGE_SIZE);
110 int ret;
111
112 if (size == 0)
113 return ERR_PTR(-EINVAL);
114
115 bo = kzalloc(sizeof(*bo), GFP_KERNEL);
116 if (!bo)
117 return ERR_PTR(-ENOMEM);
118 obj = &bo->base;
119
120 INIT_LIST_HEAD(&bo->vmas);
121 INIT_LIST_HEAD(&bo->unref_head);
122 mutex_init(&bo->lock);
123
124 ret = drm_gem_object_init(dev, obj, size);
125 if (ret)
126 goto free_bo;
127
128 spin_lock(&v3d->mm_lock);
129 ret = drm_mm_insert_node_generic(&v3d->mm, &bo->node,
130 obj->size >> PAGE_SHIFT,
131 GMP_GRANULARITY >> PAGE_SHIFT, 0, 0);
132 spin_unlock(&v3d->mm_lock);
133 if (ret)
134 goto free_obj;
135
136 return bo;
137
138free_obj:
139 drm_gem_object_release(obj);
140free_bo:
141 kfree(bo);
142 return ERR_PTR(ret);
143}
144
145struct v3d_bo *v3d_bo_create(struct drm_device *dev, struct drm_file *file_priv,
146 size_t unaligned_size)
147{
148 struct v3d_dev *v3d = to_v3d_dev(dev);
149 struct drm_gem_object *obj;
150 struct v3d_bo *bo;
151 int ret;
152
153 bo = v3d_bo_create_struct(dev, unaligned_size);
154 if (IS_ERR(bo))
155 return bo;
156 obj = &bo->base;
157
158 bo->resv = &bo->_resv;
159 reservation_object_init(bo->resv);
160
161 ret = v3d_bo_get_pages(bo);
162 if (ret)
163 goto free_mm;
164
165 v3d_mmu_insert_ptes(bo);
166
167 mutex_lock(&v3d->bo_lock);
168 v3d->bo_stats.num_allocated++;
169 v3d->bo_stats.pages_allocated += obj->size >> PAGE_SHIFT;
170 mutex_unlock(&v3d->bo_lock);
171
172 return bo;
173
174free_mm:
175 spin_lock(&v3d->mm_lock);
176 drm_mm_remove_node(&bo->node);
177 spin_unlock(&v3d->mm_lock);
178
179 drm_gem_object_release(obj);
180 kfree(bo);
181 return ERR_PTR(ret);
182}
183
184/* Called DRM core on the last userspace/kernel unreference of the 28/* Called DRM core on the last userspace/kernel unreference of the
185 * BO. 29 * BO.
186 */ 30 */
@@ -189,92 +33,116 @@ void v3d_free_object(struct drm_gem_object *obj)
189 struct v3d_dev *v3d = to_v3d_dev(obj->dev); 33 struct v3d_dev *v3d = to_v3d_dev(obj->dev);
190 struct v3d_bo *bo = to_v3d_bo(obj); 34 struct v3d_bo *bo = to_v3d_bo(obj);
191 35
36 v3d_mmu_remove_ptes(bo);
37
192 mutex_lock(&v3d->bo_lock); 38 mutex_lock(&v3d->bo_lock);
193 v3d->bo_stats.num_allocated--; 39 v3d->bo_stats.num_allocated--;
194 v3d->bo_stats.pages_allocated -= obj->size >> PAGE_SHIFT; 40 v3d->bo_stats.pages_allocated -= obj->size >> PAGE_SHIFT;
195 mutex_unlock(&v3d->bo_lock); 41 mutex_unlock(&v3d->bo_lock);
196 42
197 reservation_object_fini(&bo->_resv);
198
199 v3d_bo_put_pages(bo);
200
201 if (obj->import_attach)
202 drm_prime_gem_destroy(obj, bo->sgt);
203
204 v3d_mmu_remove_ptes(bo);
205 spin_lock(&v3d->mm_lock); 43 spin_lock(&v3d->mm_lock);
206 drm_mm_remove_node(&bo->node); 44 drm_mm_remove_node(&bo->node);
207 spin_unlock(&v3d->mm_lock); 45 spin_unlock(&v3d->mm_lock);
208 46
209 mutex_destroy(&bo->lock); 47 /* GPU execution may have dirtied any pages in the BO. */
48 bo->base.pages_mark_dirty_on_put = true;
210 49
211 drm_gem_object_release(obj); 50 drm_gem_shmem_free_object(obj);
212 kfree(bo);
213} 51}
214 52
215struct reservation_object *v3d_prime_res_obj(struct drm_gem_object *obj) 53static const struct drm_gem_object_funcs v3d_gem_funcs = {
54 .free = v3d_free_object,
55 .print_info = drm_gem_shmem_print_info,
56 .pin = drm_gem_shmem_pin,
57 .unpin = drm_gem_shmem_unpin,
58 .get_sg_table = drm_gem_shmem_get_sg_table,
59 .vmap = drm_gem_shmem_vmap,
60 .vunmap = drm_gem_shmem_vunmap,
61 .vm_ops = &drm_gem_shmem_vm_ops,
62};
63
64/* gem_create_object function for allocating a BO struct and doing
65 * early setup.
66 */
67struct drm_gem_object *v3d_create_object(struct drm_device *dev, size_t size)
216{ 68{
217 struct v3d_bo *bo = to_v3d_bo(obj); 69 struct v3d_bo *bo;
70 struct drm_gem_object *obj;
218 71
219 return bo->resv; 72 if (size == 0)
220} 73 return NULL;
221 74
222static void 75 bo = kzalloc(sizeof(*bo), GFP_KERNEL);
223v3d_set_mmap_vma_flags(struct vm_area_struct *vma) 76 if (!bo)
224{ 77 return NULL;
225 vma->vm_flags &= ~VM_PFNMAP; 78 obj = &bo->base.base;
226 vma->vm_flags |= VM_MIXEDMAP;
227 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
228}
229 79
230vm_fault_t v3d_gem_fault(struct vm_fault *vmf) 80 obj->funcs = &v3d_gem_funcs;
231{
232 struct vm_area_struct *vma = vmf->vma;
233 struct drm_gem_object *obj = vma->vm_private_data;
234 struct v3d_bo *bo = to_v3d_bo(obj);
235 pfn_t pfn;
236 pgoff_t pgoff;
237 81
238 /* We don't use vmf->pgoff since that has the fake offset: */ 82 INIT_LIST_HEAD(&bo->unref_head);
239 pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
240 pfn = __pfn_to_pfn_t(page_to_pfn(bo->pages[pgoff]), PFN_DEV);
241 83
242 return vmf_insert_mixed(vma, vmf->address, pfn); 84 return &bo->base.base;
243} 85}
244 86
245int v3d_mmap(struct file *filp, struct vm_area_struct *vma) 87static int
88v3d_bo_create_finish(struct drm_gem_object *obj)
246{ 89{
90 struct v3d_dev *v3d = to_v3d_dev(obj->dev);
91 struct v3d_bo *bo = to_v3d_bo(obj);
92 struct sg_table *sgt;
247 int ret; 93 int ret;
248 94
249 ret = drm_gem_mmap(filp, vma); 95 /* So far we pin the BO in the MMU for its lifetime, so use
96 * shmem's helper for getting a lifetime sgt.
97 */
98 sgt = drm_gem_shmem_get_pages_sgt(&bo->base.base);
99 if (IS_ERR(sgt))
100 return PTR_ERR(sgt);
101
102 spin_lock(&v3d->mm_lock);
103 /* Allocate the object's space in the GPU's page tables.
104 * Inserting PTEs will happen later, but the offset is for the
105 * lifetime of the BO.
106 */
107 ret = drm_mm_insert_node_generic(&v3d->mm, &bo->node,
108 obj->size >> PAGE_SHIFT,
109 GMP_GRANULARITY >> PAGE_SHIFT, 0, 0);
110 spin_unlock(&v3d->mm_lock);
250 if (ret) 111 if (ret)
251 return ret; 112 return ret;
252 113
253 v3d_set_mmap_vma_flags(vma); 114 /* Track stats for /debug/dri/n/bo_stats. */
115 mutex_lock(&v3d->bo_lock);
116 v3d->bo_stats.num_allocated++;
117 v3d->bo_stats.pages_allocated += obj->size >> PAGE_SHIFT;
118 mutex_unlock(&v3d->bo_lock);
254 119
255 return ret; 120 v3d_mmu_insert_ptes(bo);
121
122 return 0;
256} 123}
257 124
258int v3d_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) 125struct v3d_bo *v3d_bo_create(struct drm_device *dev, struct drm_file *file_priv,
126 size_t unaligned_size)
259{ 127{
128 struct drm_gem_shmem_object *shmem_obj;
129 struct v3d_bo *bo;
260 int ret; 130 int ret;
261 131
262 ret = drm_gem_mmap_obj(obj, obj->size, vma); 132 shmem_obj = drm_gem_shmem_create(dev, unaligned_size);
263 if (ret < 0) 133 if (!shmem_obj)
264 return ret; 134 return NULL;
265 135 bo = to_v3d_bo(&shmem_obj->base);
266 v3d_set_mmap_vma_flags(vma);
267 136
268 return 0; 137 ret = v3d_bo_create_finish(&shmem_obj->base);
269} 138 if (ret)
139 goto free_obj;
270 140
271struct sg_table * 141 return bo;
272v3d_prime_get_sg_table(struct drm_gem_object *obj)
273{
274 struct v3d_bo *bo = to_v3d_bo(obj);
275 int npages = obj->size >> PAGE_SHIFT;
276 142
277 return drm_prime_pages_to_sg(bo->pages, npages); 143free_obj:
144 drm_gem_shmem_free_object(&shmem_obj->base);
145 return ERR_PTR(ret);
278} 146}
279 147
280struct drm_gem_object * 148struct drm_gem_object *
@@ -283,20 +151,17 @@ v3d_prime_import_sg_table(struct drm_device *dev,
283 struct sg_table *sgt) 151 struct sg_table *sgt)
284{ 152{
285 struct drm_gem_object *obj; 153 struct drm_gem_object *obj;
286 struct v3d_bo *bo; 154 int ret;
287
288 bo = v3d_bo_create_struct(dev, attach->dmabuf->size);
289 if (IS_ERR(bo))
290 return ERR_CAST(bo);
291 obj = &bo->base;
292
293 bo->resv = attach->dmabuf->resv;
294 155
295 bo->sgt = sgt; 156 obj = drm_gem_shmem_prime_import_sg_table(dev, attach, sgt);
296 obj->import_attach = attach; 157 if (IS_ERR(obj))
297 v3d_bo_get_pages(bo); 158 return obj;
298 159
299 v3d_mmu_insert_ptes(bo); 160 ret = v3d_bo_create_finish(obj);
161 if (ret) {
162 drm_gem_shmem_free_object(obj);
163 return ERR_PTR(ret);
164 }
300 165
301 return obj; 166 return obj;
302} 167}
@@ -319,8 +184,8 @@ int v3d_create_bo_ioctl(struct drm_device *dev, void *data,
319 184
320 args->offset = bo->node.start << PAGE_SHIFT; 185 args->offset = bo->node.start << PAGE_SHIFT;
321 186
322 ret = drm_gem_handle_create(file_priv, &bo->base, &args->handle); 187 ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
323 drm_gem_object_put_unlocked(&bo->base); 188 drm_gem_object_put_unlocked(&bo->base.base);
324 189
325 return ret; 190 return ret;
326} 191}
@@ -330,7 +195,6 @@ int v3d_mmap_bo_ioctl(struct drm_device *dev, void *data,
330{ 195{
331 struct drm_v3d_mmap_bo *args = data; 196 struct drm_v3d_mmap_bo *args = data;
332 struct drm_gem_object *gem_obj; 197 struct drm_gem_object *gem_obj;
333 int ret;
334 198
335 if (args->flags != 0) { 199 if (args->flags != 0) {
336 DRM_INFO("unknown mmap_bo flags: %d\n", args->flags); 200 DRM_INFO("unknown mmap_bo flags: %d\n", args->flags);
@@ -343,12 +207,10 @@ int v3d_mmap_bo_ioctl(struct drm_device *dev, void *data,
343 return -ENOENT; 207 return -ENOENT;
344 } 208 }
345 209
346 ret = drm_gem_create_mmap_offset(gem_obj); 210 args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
347 if (ret == 0)
348 args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
349 drm_gem_object_put_unlocked(gem_obj); 211 drm_gem_object_put_unlocked(gem_obj);
350 212
351 return ret; 213 return 0;
352} 214}
353 215
354int v3d_get_bo_offset_ioctl(struct drm_device *dev, void *data, 216int v3d_get_bo_offset_ioctl(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/v3d/v3d_debugfs.c b/drivers/gpu/drm/v3d/v3d_debugfs.c
index eb2b2d2f8553..a24af2d2f574 100644
--- a/drivers/gpu/drm/v3d/v3d_debugfs.c
+++ b/drivers/gpu/drm/v3d/v3d_debugfs.c
@@ -187,6 +187,11 @@ static int v3d_measure_clock(struct seq_file *m, void *unused)
187 uint32_t cycles; 187 uint32_t cycles;
188 int core = 0; 188 int core = 0;
189 int measure_ms = 1000; 189 int measure_ms = 1000;
190 int ret;
191
192 ret = pm_runtime_get_sync(v3d->dev);
193 if (ret < 0)
194 return ret;
190 195
191 if (v3d->ver >= 40) { 196 if (v3d->ver >= 40) {
192 V3D_CORE_WRITE(core, V3D_V4_PCTR_0_SRC_0_3, 197 V3D_CORE_WRITE(core, V3D_V4_PCTR_0_SRC_0_3,
@@ -210,6 +215,9 @@ static int v3d_measure_clock(struct seq_file *m, void *unused)
210 cycles / (measure_ms * 1000), 215 cycles / (measure_ms * 1000),
211 (cycles / (measure_ms * 100)) % 10); 216 (cycles / (measure_ms * 100)) % 10);
212 217
218 pm_runtime_mark_last_busy(v3d->dev);
219 pm_runtime_put_autosuspend(v3d->dev);
220
213 return 0; 221 return 0;
214} 222}
215 223
diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c
index f0afcec72c34..d600628bb5c1 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.c
+++ b/drivers/gpu/drm/v3d/v3d_drv.c
@@ -7,9 +7,9 @@
7 * This driver supports the Broadcom V3D 3.3 and 4.1 OpenGL ES GPUs. 7 * This driver supports the Broadcom V3D 3.3 and 4.1 OpenGL ES GPUs.
8 * For V3D 2.x support, see the VC4 driver. 8 * For V3D 2.x support, see the VC4 driver.
9 * 9 *
10 * Currently only single-core rendering using the binner and renderer 10 * Currently only single-core rendering using the binner and renderer,
11 * is supported. The TFU (texture formatting unit) and V3D 4.x's CSD 11 * along with TFU (texture formatting unit) rendering is supported.
12 * (compute shader dispatch) are not yet supported. 12 * V3D 4.x's CSD (compute shader dispatch) is not yet supported.
13 */ 13 */
14 14
15#include <linux/clk.h> 15#include <linux/clk.h>
@@ -19,6 +19,7 @@
19#include <linux/of_platform.h> 19#include <linux/of_platform.h>
20#include <linux/platform_device.h> 20#include <linux/platform_device.h>
21#include <linux/pm_runtime.h> 21#include <linux/pm_runtime.h>
22#include <linux/reset.h>
22#include <drm/drm_fb_cma_helper.h> 23#include <drm/drm_fb_cma_helper.h>
23#include <drm/drm_fb_helper.h> 24#include <drm/drm_fb_helper.h>
24 25
@@ -160,17 +161,7 @@ v3d_postclose(struct drm_device *dev, struct drm_file *file)
160 kfree(v3d_priv); 161 kfree(v3d_priv);
161} 162}
162 163
163static const struct file_operations v3d_drm_fops = { 164DEFINE_DRM_GEM_SHMEM_FOPS(v3d_drm_fops);
164 .owner = THIS_MODULE,
165 .open = drm_open,
166 .release = drm_release,
167 .unlocked_ioctl = drm_ioctl,
168 .mmap = v3d_mmap,
169 .poll = drm_poll,
170 .read = drm_read,
171 .compat_ioctl = drm_compat_ioctl,
172 .llseek = noop_llseek,
173};
174 165
175/* DRM_AUTH is required on SUBMIT_CL for now, while we don't have GMP 166/* DRM_AUTH is required on SUBMIT_CL for now, while we don't have GMP
176 * protection between clients. Note that render nodes would be be 167 * protection between clients. Note that render nodes would be be
@@ -188,12 +179,6 @@ static const struct drm_ioctl_desc v3d_drm_ioctls[] = {
188 DRM_IOCTL_DEF_DRV(V3D_SUBMIT_TFU, v3d_submit_tfu_ioctl, DRM_RENDER_ALLOW | DRM_AUTH), 179 DRM_IOCTL_DEF_DRV(V3D_SUBMIT_TFU, v3d_submit_tfu_ioctl, DRM_RENDER_ALLOW | DRM_AUTH),
189}; 180};
190 181
191static const struct vm_operations_struct v3d_vm_ops = {
192 .fault = v3d_gem_fault,
193 .open = drm_gem_vm_open,
194 .close = drm_gem_vm_close,
195};
196
197static struct drm_driver v3d_drm_driver = { 182static struct drm_driver v3d_drm_driver = {
198 .driver_features = (DRIVER_GEM | 183 .driver_features = (DRIVER_GEM |
199 DRIVER_RENDER | 184 DRIVER_RENDER |
@@ -207,17 +192,11 @@ static struct drm_driver v3d_drm_driver = {
207 .debugfs_init = v3d_debugfs_init, 192 .debugfs_init = v3d_debugfs_init,
208#endif 193#endif
209 194
210 .gem_free_object_unlocked = v3d_free_object, 195 .gem_create_object = v3d_create_object,
211 .gem_vm_ops = &v3d_vm_ops,
212
213 .prime_handle_to_fd = drm_gem_prime_handle_to_fd, 196 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
214 .prime_fd_to_handle = drm_gem_prime_fd_to_handle, 197 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
215 .gem_prime_import = drm_gem_prime_import,
216 .gem_prime_export = drm_gem_prime_export,
217 .gem_prime_res_obj = v3d_prime_res_obj,
218 .gem_prime_get_sg_table = v3d_prime_get_sg_table,
219 .gem_prime_import_sg_table = v3d_prime_import_sg_table, 198 .gem_prime_import_sg_table = v3d_prime_import_sg_table,
220 .gem_prime_mmap = v3d_prime_mmap, 199 .gem_prime_mmap = drm_gem_prime_mmap,
221 200
222 .ioctls = v3d_drm_ioctls, 201 .ioctls = v3d_drm_ioctls,
223 .num_ioctls = ARRAY_SIZE(v3d_drm_ioctls), 202 .num_ioctls = ARRAY_SIZE(v3d_drm_ioctls),
@@ -265,10 +244,6 @@ static int v3d_platform_drm_probe(struct platform_device *pdev)
265 v3d->pdev = pdev; 244 v3d->pdev = pdev;
266 drm = &v3d->drm; 245 drm = &v3d->drm;
267 246
268 ret = map_regs(v3d, &v3d->bridge_regs, "bridge");
269 if (ret)
270 goto dev_free;
271
272 ret = map_regs(v3d, &v3d->hub_regs, "hub"); 247 ret = map_regs(v3d, &v3d->hub_regs, "hub");
273 if (ret) 248 if (ret)
274 goto dev_free; 249 goto dev_free;
@@ -283,6 +258,22 @@ static int v3d_platform_drm_probe(struct platform_device *pdev)
283 v3d->cores = V3D_GET_FIELD(ident1, V3D_HUB_IDENT1_NCORES); 258 v3d->cores = V3D_GET_FIELD(ident1, V3D_HUB_IDENT1_NCORES);
284 WARN_ON(v3d->cores > 1); /* multicore not yet implemented */ 259 WARN_ON(v3d->cores > 1); /* multicore not yet implemented */
285 260
261 v3d->reset = devm_reset_control_get_exclusive(dev, NULL);
262 if (IS_ERR(v3d->reset)) {
263 ret = PTR_ERR(v3d->reset);
264
265 if (ret == -EPROBE_DEFER)
266 goto dev_free;
267
268 v3d->reset = NULL;
269 ret = map_regs(v3d, &v3d->bridge_regs, "bridge");
270 if (ret) {
271 dev_err(dev,
272 "Failed to get reset control or bridge regs\n");
273 goto dev_free;
274 }
275 }
276
286 if (v3d->ver < 41) { 277 if (v3d->ver < 41) {
287 ret = map_regs(v3d, &v3d->gca_regs, "gca"); 278 ret = map_regs(v3d, &v3d->gca_regs, "gca");
288 if (ret) 279 if (ret)
@@ -312,14 +303,18 @@ static int v3d_platform_drm_probe(struct platform_device *pdev)
312 if (ret) 303 if (ret)
313 goto dev_destroy; 304 goto dev_destroy;
314 305
315 v3d_irq_init(v3d); 306 ret = v3d_irq_init(v3d);
307 if (ret)
308 goto gem_destroy;
316 309
317 ret = drm_dev_register(drm, 0); 310 ret = drm_dev_register(drm, 0);
318 if (ret) 311 if (ret)
319 goto gem_destroy; 312 goto irq_disable;
320 313
321 return 0; 314 return 0;
322 315
316irq_disable:
317 v3d_irq_disable(v3d);
323gem_destroy: 318gem_destroy:
324 v3d_gem_destroy(drm); 319 v3d_gem_destroy(drm);
325dev_destroy: 320dev_destroy:
diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h
index fdda3037f7af..7b0fe6240f7d 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.h
+++ b/drivers/gpu/drm/v3d/v3d_drv.h
@@ -1,11 +1,11 @@
1// SPDX-License-Identifier: GPL-2.0+ 1// SPDX-License-Identifier: GPL-2.0+
2/* Copyright (C) 2015-2018 Broadcom */ 2/* Copyright (C) 2015-2018 Broadcom */
3 3
4#include <linux/reservation.h>
5#include <linux/mm_types.h> 4#include <linux/mm_types.h>
6#include <drm/drmP.h> 5#include <drm/drmP.h>
7#include <drm/drm_encoder.h> 6#include <drm/drm_encoder.h>
8#include <drm/drm_gem.h> 7#include <drm/drm_gem.h>
8#include <drm/drm_gem_shmem_helper.h>
9#include <drm/gpu_scheduler.h> 9#include <drm/gpu_scheduler.h>
10#include "uapi/drm/v3d_drm.h" 10#include "uapi/drm/v3d_drm.h"
11 11
@@ -34,6 +34,7 @@ struct v3d_dev {
34 * and revision. 34 * and revision.
35 */ 35 */
36 int ver; 36 int ver;
37 bool single_irq_line;
37 38
38 struct device *dev; 39 struct device *dev;
39 struct platform_device *pdev; 40 struct platform_device *pdev;
@@ -42,6 +43,7 @@ struct v3d_dev {
42 void __iomem *bridge_regs; 43 void __iomem *bridge_regs;
43 void __iomem *gca_regs; 44 void __iomem *gca_regs;
44 struct clk *clk; 45 struct clk *clk;
46 struct reset_control *reset;
45 47
46 /* Virtual and DMA addresses of the single shared page table. */ 48 /* Virtual and DMA addresses of the single shared page table. */
47 volatile u32 *pt; 49 volatile u32 *pt;
@@ -109,34 +111,15 @@ struct v3d_file_priv {
109 struct drm_sched_entity sched_entity[V3D_MAX_QUEUES]; 111 struct drm_sched_entity sched_entity[V3D_MAX_QUEUES];
110}; 112};
111 113
112/* Tracks a mapping of a BO into a per-fd address space */
113struct v3d_vma {
114 struct v3d_page_table *pt;
115 struct list_head list; /* entry in v3d_bo.vmas */
116};
117
118struct v3d_bo { 114struct v3d_bo {
119 struct drm_gem_object base; 115 struct drm_gem_shmem_object base;
120
121 struct mutex lock;
122 116
123 struct drm_mm_node node; 117 struct drm_mm_node node;
124 118
125 u32 pages_refcount;
126 struct page **pages;
127 struct sg_table *sgt;
128 void *vaddr;
129
130 struct list_head vmas; /* list of v3d_vma */
131
132 /* List entry for the BO's position in 119 /* List entry for the BO's position in
133 * v3d_exec_info->unref_list 120 * v3d_exec_info->unref_list
134 */ 121 */
135 struct list_head unref_head; 122 struct list_head unref_head;
136
137 /* normally (resv == &_resv) except for imported bo's */
138 struct reservation_object *resv;
139 struct reservation_object _resv;
140}; 123};
141 124
142static inline struct v3d_bo * 125static inline struct v3d_bo *
@@ -270,6 +253,7 @@ static inline unsigned long nsecs_to_jiffies_timeout(const u64 n)
270} 253}
271 254
272/* v3d_bo.c */ 255/* v3d_bo.c */
256struct drm_gem_object *v3d_create_object(struct drm_device *dev, size_t size);
273void v3d_free_object(struct drm_gem_object *gem_obj); 257void v3d_free_object(struct drm_gem_object *gem_obj);
274struct v3d_bo *v3d_bo_create(struct drm_device *dev, struct drm_file *file_priv, 258struct v3d_bo *v3d_bo_create(struct drm_device *dev, struct drm_file *file_priv,
275 size_t size); 259 size_t size);
@@ -279,11 +263,6 @@ int v3d_mmap_bo_ioctl(struct drm_device *dev, void *data,
279 struct drm_file *file_priv); 263 struct drm_file *file_priv);
280int v3d_get_bo_offset_ioctl(struct drm_device *dev, void *data, 264int v3d_get_bo_offset_ioctl(struct drm_device *dev, void *data,
281 struct drm_file *file_priv); 265 struct drm_file *file_priv);
282vm_fault_t v3d_gem_fault(struct vm_fault *vmf);
283int v3d_mmap(struct file *filp, struct vm_area_struct *vma);
284struct reservation_object *v3d_prime_res_obj(struct drm_gem_object *obj);
285int v3d_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
286struct sg_table *v3d_prime_get_sg_table(struct drm_gem_object *obj);
287struct drm_gem_object *v3d_prime_import_sg_table(struct drm_device *dev, 266struct drm_gem_object *v3d_prime_import_sg_table(struct drm_device *dev,
288 struct dma_buf_attachment *attach, 267 struct dma_buf_attachment *attach,
289 struct sg_table *sgt); 268 struct sg_table *sgt);
@@ -310,7 +289,7 @@ void v3d_reset(struct v3d_dev *v3d);
310void v3d_invalidate_caches(struct v3d_dev *v3d); 289void v3d_invalidate_caches(struct v3d_dev *v3d);
311 290
312/* v3d_irq.c */ 291/* v3d_irq.c */
313void v3d_irq_init(struct v3d_dev *v3d); 292int v3d_irq_init(struct v3d_dev *v3d);
314void v3d_irq_enable(struct v3d_dev *v3d); 293void v3d_irq_enable(struct v3d_dev *v3d);
315void v3d_irq_disable(struct v3d_dev *v3d); 294void v3d_irq_disable(struct v3d_dev *v3d);
316void v3d_irq_reset(struct v3d_dev *v3d); 295void v3d_irq_reset(struct v3d_dev *v3d);
diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c
index 803f31467ec1..b84d89c7b3fb 100644
--- a/drivers/gpu/drm/v3d/v3d_gem.c
+++ b/drivers/gpu/drm/v3d/v3d_gem.c
@@ -6,6 +6,7 @@
6#include <linux/module.h> 6#include <linux/module.h>
7#include <linux/platform_device.h> 7#include <linux/platform_device.h>
8#include <linux/pm_runtime.h> 8#include <linux/pm_runtime.h>
9#include <linux/reset.h>
9#include <linux/device.h> 10#include <linux/device.h>
10#include <linux/io.h> 11#include <linux/io.h>
11#include <linux/sched/signal.h> 12#include <linux/sched/signal.h>
@@ -24,7 +25,8 @@ v3d_init_core(struct v3d_dev *v3d, int core)
24 * type. If you want the default behavior, you can still put 25 * type. If you want the default behavior, you can still put
25 * "2" in the indirect texture state's output_type field. 26 * "2" in the indirect texture state's output_type field.
26 */ 27 */
27 V3D_CORE_WRITE(core, V3D_CTL_MISCCFG, V3D_MISCCFG_OVRTMUOUT); 28 if (v3d->ver < 40)
29 V3D_CORE_WRITE(core, V3D_CTL_MISCCFG, V3D_MISCCFG_OVRTMUOUT);
28 30
29 /* Whenever we flush the L2T cache, we always want to flush 31 /* Whenever we flush the L2T cache, we always want to flush
30 * the whole thing. 32 * the whole thing.
@@ -69,7 +71,7 @@ v3d_idle_gca(struct v3d_dev *v3d)
69} 71}
70 72
71static void 73static void
72v3d_reset_v3d(struct v3d_dev *v3d) 74v3d_reset_by_bridge(struct v3d_dev *v3d)
73{ 75{
74 int version = V3D_BRIDGE_READ(V3D_TOP_GR_BRIDGE_REVISION); 76 int version = V3D_BRIDGE_READ(V3D_TOP_GR_BRIDGE_REVISION);
75 77
@@ -89,6 +91,15 @@ v3d_reset_v3d(struct v3d_dev *v3d)
89 V3D_TOP_GR_BRIDGE_SW_INIT_1_V3D_CLK_108_SW_INIT); 91 V3D_TOP_GR_BRIDGE_SW_INIT_1_V3D_CLK_108_SW_INIT);
90 V3D_BRIDGE_WRITE(V3D_TOP_GR_BRIDGE_SW_INIT_1, 0); 92 V3D_BRIDGE_WRITE(V3D_TOP_GR_BRIDGE_SW_INIT_1, 0);
91 } 93 }
94}
95
96static void
97v3d_reset_v3d(struct v3d_dev *v3d)
98{
99 if (v3d->reset)
100 reset_control_reset(v3d->reset);
101 else
102 v3d_reset_by_bridge(v3d);
92 103
93 v3d_init_hw_state(v3d); 104 v3d_init_hw_state(v3d);
94} 105}
@@ -190,7 +201,8 @@ v3d_attach_object_fences(struct v3d_bo **bos, int bo_count,
190 201
191 for (i = 0; i < bo_count; i++) { 202 for (i = 0; i < bo_count; i++) {
192 /* XXX: Use shared fences for read-only objects. */ 203 /* XXX: Use shared fences for read-only objects. */
193 reservation_object_add_excl_fence(bos[i]->resv, fence); 204 reservation_object_add_excl_fence(bos[i]->base.base.resv,
205 fence);
194 } 206 }
195} 207}
196 208
@@ -199,12 +211,8 @@ v3d_unlock_bo_reservations(struct v3d_bo **bos,
199 int bo_count, 211 int bo_count,
200 struct ww_acquire_ctx *acquire_ctx) 212 struct ww_acquire_ctx *acquire_ctx)
201{ 213{
202 int i; 214 drm_gem_unlock_reservations((struct drm_gem_object **)bos, bo_count,
203 215 acquire_ctx);
204 for (i = 0; i < bo_count; i++)
205 ww_mutex_unlock(&bos[i]->resv->lock);
206
207 ww_acquire_fini(acquire_ctx);
208} 216}
209 217
210/* Takes the reservation lock on all the BOs being referenced, so that 218/* Takes the reservation lock on all the BOs being referenced, so that
@@ -219,58 +227,19 @@ v3d_lock_bo_reservations(struct v3d_bo **bos,
219 int bo_count, 227 int bo_count,
220 struct ww_acquire_ctx *acquire_ctx) 228 struct ww_acquire_ctx *acquire_ctx)
221{ 229{
222 int contended_lock = -1;
223 int i, ret; 230 int i, ret;
224 231
225 ww_acquire_init(acquire_ctx, &reservation_ww_class); 232 ret = drm_gem_lock_reservations((struct drm_gem_object **)bos,
226 233 bo_count, acquire_ctx);
227retry: 234 if (ret)
228 if (contended_lock != -1) { 235 return ret;
229 struct v3d_bo *bo = bos[contended_lock];
230
231 ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock,
232 acquire_ctx);
233 if (ret) {
234 ww_acquire_done(acquire_ctx);
235 return ret;
236 }
237 }
238
239 for (i = 0; i < bo_count; i++) {
240 if (i == contended_lock)
241 continue;
242
243 ret = ww_mutex_lock_interruptible(&bos[i]->resv->lock,
244 acquire_ctx);
245 if (ret) {
246 int j;
247
248 for (j = 0; j < i; j++)
249 ww_mutex_unlock(&bos[j]->resv->lock);
250
251 if (contended_lock != -1 && contended_lock >= i) {
252 struct v3d_bo *bo = bos[contended_lock];
253
254 ww_mutex_unlock(&bo->resv->lock);
255 }
256
257 if (ret == -EDEADLK) {
258 contended_lock = i;
259 goto retry;
260 }
261
262 ww_acquire_done(acquire_ctx);
263 return ret;
264 }
265 }
266
267 ww_acquire_done(acquire_ctx);
268 236
269 /* Reserve space for our shared (read-only) fence references, 237 /* Reserve space for our shared (read-only) fence references,
270 * before we commit the CL to the hardware. 238 * before we commit the CL to the hardware.
271 */ 239 */
272 for (i = 0; i < bo_count; i++) { 240 for (i = 0; i < bo_count; i++) {
273 ret = reservation_object_reserve_shared(bos[i]->resv, 1); 241 ret = reservation_object_reserve_shared(bos[i]->base.base.resv,
242 1);
274 if (ret) { 243 if (ret) {
275 v3d_unlock_bo_reservations(bos, bo_count, 244 v3d_unlock_bo_reservations(bos, bo_count,
276 acquire_ctx); 245 acquire_ctx);
@@ -378,11 +347,11 @@ v3d_exec_cleanup(struct kref *ref)
378 dma_fence_put(exec->render_done_fence); 347 dma_fence_put(exec->render_done_fence);
379 348
380 for (i = 0; i < exec->bo_count; i++) 349 for (i = 0; i < exec->bo_count; i++)
381 drm_gem_object_put_unlocked(&exec->bo[i]->base); 350 drm_gem_object_put_unlocked(&exec->bo[i]->base.base);
382 kvfree(exec->bo); 351 kvfree(exec->bo);
383 352
384 list_for_each_entry_safe(bo, save, &exec->unref_list, unref_head) { 353 list_for_each_entry_safe(bo, save, &exec->unref_list, unref_head) {
385 drm_gem_object_put_unlocked(&bo->base); 354 drm_gem_object_put_unlocked(&bo->base.base);
386 } 355 }
387 356
388 pm_runtime_mark_last_busy(v3d->dev); 357 pm_runtime_mark_last_busy(v3d->dev);
@@ -409,7 +378,7 @@ v3d_tfu_job_cleanup(struct kref *ref)
409 378
410 for (i = 0; i < ARRAY_SIZE(job->bo); i++) { 379 for (i = 0; i < ARRAY_SIZE(job->bo); i++) {
411 if (job->bo[i]) 380 if (job->bo[i])
412 drm_gem_object_put_unlocked(&job->bo[i]->base); 381 drm_gem_object_put_unlocked(&job->bo[i]->base.base);
413 } 382 }
414 383
415 pm_runtime_mark_last_busy(v3d->dev); 384 pm_runtime_mark_last_busy(v3d->dev);
@@ -429,8 +398,6 @@ v3d_wait_bo_ioctl(struct drm_device *dev, void *data,
429{ 398{
430 int ret; 399 int ret;
431 struct drm_v3d_wait_bo *args = data; 400 struct drm_v3d_wait_bo *args = data;
432 struct drm_gem_object *gem_obj;
433 struct v3d_bo *bo;
434 ktime_t start = ktime_get(); 401 ktime_t start = ktime_get();
435 u64 delta_ns; 402 u64 delta_ns;
436 unsigned long timeout_jiffies = 403 unsigned long timeout_jiffies =
@@ -439,21 +406,8 @@ v3d_wait_bo_ioctl(struct drm_device *dev, void *data,
439 if (args->pad != 0) 406 if (args->pad != 0)
440 return -EINVAL; 407 return -EINVAL;
441 408
442 gem_obj = drm_gem_object_lookup(file_priv, args->handle); 409 ret = drm_gem_reservation_object_wait(file_priv, args->handle,
443 if (!gem_obj) { 410 true, timeout_jiffies);
444 DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
445 return -EINVAL;
446 }
447 bo = to_v3d_bo(gem_obj);
448
449 ret = reservation_object_wait_timeout_rcu(bo->resv,
450 true, true,
451 timeout_jiffies);
452
453 if (ret == 0)
454 ret = -ETIME;
455 else if (ret > 0)
456 ret = 0;
457 411
458 /* Decrement the user's timeout, in case we got interrupted 412 /* Decrement the user's timeout, in case we got interrupted
459 * such that the ioctl will be restarted. 413 * such that the ioctl will be restarted.
@@ -468,8 +422,6 @@ v3d_wait_bo_ioctl(struct drm_device *dev, void *data,
468 if (ret == -ETIME && args->timeout_ns) 422 if (ret == -ETIME && args->timeout_ns)
469 ret = -EAGAIN; 423 ret = -EAGAIN;
470 424
471 drm_gem_object_put_unlocked(gem_obj);
472
473 return ret; 425 return ret;
474} 426}
475 427
diff --git a/drivers/gpu/drm/v3d/v3d_irq.c b/drivers/gpu/drm/v3d/v3d_irq.c
index 69338da70ddc..b4d6ae81186d 100644
--- a/drivers/gpu/drm/v3d/v3d_irq.c
+++ b/drivers/gpu/drm/v3d/v3d_irq.c
@@ -27,6 +27,9 @@
27 V3D_HUB_INT_MMU_CAP | \ 27 V3D_HUB_INT_MMU_CAP | \
28 V3D_HUB_INT_TFUC)) 28 V3D_HUB_INT_TFUC))
29 29
30static irqreturn_t
31v3d_hub_irq(int irq, void *arg);
32
30static void 33static void
31v3d_overflow_mem_work(struct work_struct *work) 34v3d_overflow_mem_work(struct work_struct *work)
32{ 35{
@@ -34,12 +37,14 @@ v3d_overflow_mem_work(struct work_struct *work)
34 container_of(work, struct v3d_dev, overflow_mem_work); 37 container_of(work, struct v3d_dev, overflow_mem_work);
35 struct drm_device *dev = &v3d->drm; 38 struct drm_device *dev = &v3d->drm;
36 struct v3d_bo *bo = v3d_bo_create(dev, NULL /* XXX: GMP */, 256 * 1024); 39 struct v3d_bo *bo = v3d_bo_create(dev, NULL /* XXX: GMP */, 256 * 1024);
40 struct drm_gem_object *obj;
37 unsigned long irqflags; 41 unsigned long irqflags;
38 42
39 if (IS_ERR(bo)) { 43 if (IS_ERR(bo)) {
40 DRM_ERROR("Couldn't allocate binner overflow mem\n"); 44 DRM_ERROR("Couldn't allocate binner overflow mem\n");
41 return; 45 return;
42 } 46 }
47 obj = &bo->base.base;
43 48
44 /* We lost a race, and our work task came in after the bin job 49 /* We lost a race, and our work task came in after the bin job
45 * completed and exited. This can happen because the HW 50 * completed and exited. This can happen because the HW
@@ -56,15 +61,15 @@ v3d_overflow_mem_work(struct work_struct *work)
56 goto out; 61 goto out;
57 } 62 }
58 63
59 drm_gem_object_get(&bo->base); 64 drm_gem_object_get(obj);
60 list_add_tail(&bo->unref_head, &v3d->bin_job->unref_list); 65 list_add_tail(&bo->unref_head, &v3d->bin_job->unref_list);
61 spin_unlock_irqrestore(&v3d->job_lock, irqflags); 66 spin_unlock_irqrestore(&v3d->job_lock, irqflags);
62 67
63 V3D_CORE_WRITE(0, V3D_PTB_BPOA, bo->node.start << PAGE_SHIFT); 68 V3D_CORE_WRITE(0, V3D_PTB_BPOA, bo->node.start << PAGE_SHIFT);
64 V3D_CORE_WRITE(0, V3D_PTB_BPOS, bo->base.size); 69 V3D_CORE_WRITE(0, V3D_PTB_BPOS, obj->size);
65 70
66out: 71out:
67 drm_gem_object_put_unlocked(&bo->base); 72 drm_gem_object_put_unlocked(obj);
68} 73}
69 74
70static irqreturn_t 75static irqreturn_t
@@ -112,6 +117,12 @@ v3d_irq(int irq, void *arg)
112 if (intsts & V3D_INT_GMPV) 117 if (intsts & V3D_INT_GMPV)
113 dev_err(v3d->dev, "GMP violation\n"); 118 dev_err(v3d->dev, "GMP violation\n");
114 119
120 /* V3D 4.2 wires the hub and core IRQs together, so if we &
121 * didn't see the common one then check hub for MMU IRQs.
122 */
123 if (v3d->single_irq_line && status == IRQ_NONE)
124 return v3d_hub_irq(irq, arg);
125
115 return status; 126 return status;
116} 127}
117 128
@@ -156,10 +167,10 @@ v3d_hub_irq(int irq, void *arg)
156 return status; 167 return status;
157} 168}
158 169
159void 170int
160v3d_irq_init(struct v3d_dev *v3d) 171v3d_irq_init(struct v3d_dev *v3d)
161{ 172{
162 int ret, core; 173 int irq1, ret, core;
163 174
164 INIT_WORK(&v3d->overflow_mem_work, v3d_overflow_mem_work); 175 INIT_WORK(&v3d->overflow_mem_work, v3d_overflow_mem_work);
165 176
@@ -170,16 +181,37 @@ v3d_irq_init(struct v3d_dev *v3d)
170 V3D_CORE_WRITE(core, V3D_CTL_INT_CLR, V3D_CORE_IRQS); 181 V3D_CORE_WRITE(core, V3D_CTL_INT_CLR, V3D_CORE_IRQS);
171 V3D_WRITE(V3D_HUB_INT_CLR, V3D_HUB_IRQS); 182 V3D_WRITE(V3D_HUB_INT_CLR, V3D_HUB_IRQS);
172 183
173 ret = devm_request_irq(v3d->dev, platform_get_irq(v3d->pdev, 0), 184 irq1 = platform_get_irq(v3d->pdev, 1);
174 v3d_hub_irq, IRQF_SHARED, 185 if (irq1 == -EPROBE_DEFER)
175 "v3d_hub", v3d); 186 return irq1;
176 ret = devm_request_irq(v3d->dev, platform_get_irq(v3d->pdev, 1), 187 if (irq1 > 0) {
177 v3d_irq, IRQF_SHARED, 188 ret = devm_request_irq(v3d->dev, irq1,
178 "v3d_core0", v3d); 189 v3d_irq, IRQF_SHARED,
179 if (ret) 190 "v3d_core0", v3d);
180 dev_err(v3d->dev, "IRQ setup failed: %d\n", ret); 191 if (ret)
192 goto fail;
193 ret = devm_request_irq(v3d->dev, platform_get_irq(v3d->pdev, 0),
194 v3d_hub_irq, IRQF_SHARED,
195 "v3d_hub", v3d);
196 if (ret)
197 goto fail;
198 } else {
199 v3d->single_irq_line = true;
200
201 ret = devm_request_irq(v3d->dev, platform_get_irq(v3d->pdev, 0),
202 v3d_irq, IRQF_SHARED,
203 "v3d", v3d);
204 if (ret)
205 goto fail;
206 }
181 207
182 v3d_irq_enable(v3d); 208 v3d_irq_enable(v3d);
209 return 0;
210
211fail:
212 if (ret != -EPROBE_DEFER)
213 dev_err(v3d->dev, "IRQ setup failed: %d\n", ret);
214 return ret;
183} 215}
184 216
185void 217void
diff --git a/drivers/gpu/drm/v3d/v3d_mmu.c b/drivers/gpu/drm/v3d/v3d_mmu.c
index b00f97c31b70..7a21f1787ab1 100644
--- a/drivers/gpu/drm/v3d/v3d_mmu.c
+++ b/drivers/gpu/drm/v3d/v3d_mmu.c
@@ -83,13 +83,14 @@ int v3d_mmu_set_page_table(struct v3d_dev *v3d)
83 83
84void v3d_mmu_insert_ptes(struct v3d_bo *bo) 84void v3d_mmu_insert_ptes(struct v3d_bo *bo)
85{ 85{
86 struct v3d_dev *v3d = to_v3d_dev(bo->base.dev); 86 struct drm_gem_shmem_object *shmem_obj = &bo->base;
87 struct v3d_dev *v3d = to_v3d_dev(shmem_obj->base.dev);
87 u32 page = bo->node.start; 88 u32 page = bo->node.start;
88 u32 page_prot = V3D_PTE_WRITEABLE | V3D_PTE_VALID; 89 u32 page_prot = V3D_PTE_WRITEABLE | V3D_PTE_VALID;
89 unsigned int count; 90 unsigned int count;
90 struct scatterlist *sgl; 91 struct scatterlist *sgl;
91 92
92 for_each_sg(bo->sgt->sgl, sgl, bo->sgt->nents, count) { 93 for_each_sg(shmem_obj->sgt->sgl, sgl, shmem_obj->sgt->nents, count) {
93 u32 page_address = sg_dma_address(sgl) >> V3D_MMU_PAGE_SHIFT; 94 u32 page_address = sg_dma_address(sgl) >> V3D_MMU_PAGE_SHIFT;
94 u32 pte = page_prot | page_address; 95 u32 pte = page_prot | page_address;
95 u32 i; 96 u32 i;
@@ -102,7 +103,7 @@ void v3d_mmu_insert_ptes(struct v3d_bo *bo)
102 } 103 }
103 104
104 WARN_ON_ONCE(page - bo->node.start != 105 WARN_ON_ONCE(page - bo->node.start !=
105 bo->base.size >> V3D_MMU_PAGE_SHIFT); 106 shmem_obj->base.size >> V3D_MMU_PAGE_SHIFT);
106 107
107 if (v3d_mmu_flush_all(v3d)) 108 if (v3d_mmu_flush_all(v3d))
108 dev_err(v3d->dev, "MMU flush timeout\n"); 109 dev_err(v3d->dev, "MMU flush timeout\n");
@@ -110,8 +111,8 @@ void v3d_mmu_insert_ptes(struct v3d_bo *bo)
110 111
111void v3d_mmu_remove_ptes(struct v3d_bo *bo) 112void v3d_mmu_remove_ptes(struct v3d_bo *bo)
112{ 113{
113 struct v3d_dev *v3d = to_v3d_dev(bo->base.dev); 114 struct v3d_dev *v3d = to_v3d_dev(bo->base.base.dev);
114 u32 npages = bo->base.size >> V3D_MMU_PAGE_SHIFT; 115 u32 npages = bo->base.base.size >> V3D_MMU_PAGE_SHIFT;
115 u32 page; 116 u32 page;
116 117
117 for (page = bo->node.start; page < bo->node.start + npages; page++) 118 for (page = bo->node.start; page < bo->node.start + npages; page++)
diff --git a/drivers/gpu/drm/v3d/v3d_regs.h b/drivers/gpu/drm/v3d/v3d_regs.h
index 6ccdee9d47bd..8e88af237610 100644
--- a/drivers/gpu/drm/v3d/v3d_regs.h
+++ b/drivers/gpu/drm/v3d/v3d_regs.h
@@ -216,6 +216,8 @@
216# define V3D_IDENT2_BCG_INT BIT(28) 216# define V3D_IDENT2_BCG_INT BIT(28)
217 217
218#define V3D_CTL_MISCCFG 0x00018 218#define V3D_CTL_MISCCFG 0x00018
219# define V3D_CTL_MISCCFG_QRMAXCNT_MASK V3D_MASK(3, 1)
220# define V3D_CTL_MISCCFG_QRMAXCNT_SHIFT 1
219# define V3D_MISCCFG_OVRTMUOUT BIT(0) 221# define V3D_MISCCFG_OVRTMUOUT BIT(0)
220 222
221#define V3D_CTL_L2CACTL 0x00020 223#define V3D_CTL_L2CACTL 0x00020
diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c
index 4704b2df3688..d0c68b7c8b41 100644
--- a/drivers/gpu/drm/v3d/v3d_sched.c
+++ b/drivers/gpu/drm/v3d/v3d_sched.c
@@ -231,20 +231,17 @@ v3d_gpu_reset_for_timeout(struct v3d_dev *v3d, struct drm_sched_job *sched_job)
231 mutex_lock(&v3d->reset_lock); 231 mutex_lock(&v3d->reset_lock);
232 232
233 /* block scheduler */ 233 /* block scheduler */
234 for (q = 0; q < V3D_MAX_QUEUES; q++) { 234 for (q = 0; q < V3D_MAX_QUEUES; q++)
235 struct drm_gpu_scheduler *sched = &v3d->queue[q].sched; 235 drm_sched_stop(&v3d->queue[q].sched);
236
237 drm_sched_stop(sched);
238 236
239 if(sched_job) 237 if (sched_job)
240 drm_sched_increase_karma(sched_job); 238 drm_sched_increase_karma(sched_job);
241 }
242 239
243 /* get the GPU back into the init state */ 240 /* get the GPU back into the init state */
244 v3d_reset(v3d); 241 v3d_reset(v3d);
245 242
246 for (q = 0; q < V3D_MAX_QUEUES; q++) 243 for (q = 0; q < V3D_MAX_QUEUES; q++)
247 drm_sched_resubmit_jobs(sched_job->sched); 244 drm_sched_resubmit_jobs(&v3d->queue[q].sched);
248 245
249 /* Unblock schedulers and restart their jobs. */ 246 /* Unblock schedulers and restart their jobs. */
250 for (q = 0; q < V3D_MAX_QUEUES; q++) { 247 for (q = 0; q < V3D_MAX_QUEUES; q++) {
diff --git a/drivers/gpu/drm/vboxvideo/Kconfig b/drivers/gpu/drm/vboxvideo/Kconfig
new file mode 100644
index 000000000000..1f4182e2e980
--- /dev/null
+++ b/drivers/gpu/drm/vboxvideo/Kconfig
@@ -0,0 +1,15 @@
1config DRM_VBOXVIDEO
2 tristate "Virtual Box Graphics Card"
3 depends on DRM && X86 && PCI
4 select DRM_KMS_HELPER
5 select DRM_TTM
6 select GENERIC_ALLOCATOR
7 help
8 This is a KMS driver for the virtual Graphics Card used in
9 Virtual Box virtual machines.
10
11 Although it is possible to build this driver built-in to the
12 kernel, it is advised to build it as a module, so that it can
13 be updated independently of the kernel. Select M to build this
14 driver as a module and add support for these devices via drm/kms
15 interfaces.
diff --git a/drivers/gpu/drm/vboxvideo/Makefile b/drivers/gpu/drm/vboxvideo/Makefile
new file mode 100644
index 000000000000..1224f313af0c
--- /dev/null
+++ b/drivers/gpu/drm/vboxvideo/Makefile
@@ -0,0 +1,6 @@
1# SPDX-License-Identifier: GPL-2.0
2vboxvideo-y := hgsmi_base.o modesetting.o vbva_base.o \
3 vbox_drv.o vbox_fb.o vbox_hgsmi.o vbox_irq.o vbox_main.o \
4 vbox_mode.o vbox_prime.o vbox_ttm.o
5
6obj-$(CONFIG_DRM_VBOXVIDEO) += vboxvideo.o
diff --git a/drivers/gpu/drm/vboxvideo/hgsmi_base.c b/drivers/gpu/drm/vboxvideo/hgsmi_base.c
new file mode 100644
index 000000000000..361d3193258e
--- /dev/null
+++ b/drivers/gpu/drm/vboxvideo/hgsmi_base.c
@@ -0,0 +1,207 @@
1// SPDX-License-Identifier: MIT
2/* Copyright (C) 2006-2017 Oracle Corporation */
3
4#include <linux/vbox_err.h>
5#include "vbox_drv.h"
6#include "vboxvideo_guest.h"
7#include "vboxvideo_vbe.h"
8#include "hgsmi_channels.h"
9#include "hgsmi_ch_setup.h"
10
11/**
12 * Inform the host of the location of the host flags in VRAM via an HGSMI cmd.
13 * Return: 0 or negative errno value.
14 * @ctx: The context of the guest heap to use.
15 * @location: The offset chosen for the flags within guest VRAM.
16 */
17int hgsmi_report_flags_location(struct gen_pool *ctx, u32 location)
18{
19 struct hgsmi_buffer_location *p;
20
21 p = hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_HGSMI,
22 HGSMI_CC_HOST_FLAGS_LOCATION);
23 if (!p)
24 return -ENOMEM;
25
26 p->buf_location = location;
27 p->buf_len = sizeof(struct hgsmi_host_flags);
28
29 hgsmi_buffer_submit(ctx, p);
30 hgsmi_buffer_free(ctx, p);
31
32 return 0;
33}
34
35/**
36 * Notify the host of HGSMI-related guest capabilities via an HGSMI command.
37 * Return: 0 or negative errno value.
38 * @ctx: The context of the guest heap to use.
39 * @caps: The capabilities to report, see vbva_caps.
40 */
41int hgsmi_send_caps_info(struct gen_pool *ctx, u32 caps)
42{
43 struct vbva_caps *p;
44
45 p = hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_VBVA, VBVA_INFO_CAPS);
46 if (!p)
47 return -ENOMEM;
48
49 p->rc = VERR_NOT_IMPLEMENTED;
50 p->caps = caps;
51
52 hgsmi_buffer_submit(ctx, p);
53
54 WARN_ON_ONCE(p->rc < 0);
55
56 hgsmi_buffer_free(ctx, p);
57
58 return 0;
59}
60
61int hgsmi_test_query_conf(struct gen_pool *ctx)
62{
63 u32 value = 0;
64 int ret;
65
66 ret = hgsmi_query_conf(ctx, U32_MAX, &value);
67 if (ret)
68 return ret;
69
70 return value == U32_MAX ? 0 : -EIO;
71}
72
73/**
74 * Query the host for an HGSMI configuration parameter via an HGSMI command.
75 * Return: 0 or negative errno value.
76 * @ctx: The context containing the heap used.
77 * @index: The index of the parameter to query.
78 * @value_ret: Where to store the value of the parameter on success.
79 */
80int hgsmi_query_conf(struct gen_pool *ctx, u32 index, u32 *value_ret)
81{
82 struct vbva_conf32 *p;
83
84 p = hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_VBVA,
85 VBVA_QUERY_CONF32);
86 if (!p)
87 return -ENOMEM;
88
89 p->index = index;
90 p->value = U32_MAX;
91
92 hgsmi_buffer_submit(ctx, p);
93
94 *value_ret = p->value;
95
96 hgsmi_buffer_free(ctx, p);
97
98 return 0;
99}
100
101/**
102 * Pass the host a new mouse pointer shape via an HGSMI command.
103 * Return: 0 or negative errno value.
104 * @ctx: The context containing the heap to be used.
105 * @flags: Cursor flags.
106 * @hot_x: Horizontal position of the hot spot.
107 * @hot_y: Vertical position of the hot spot.
108 * @width: Width in pixels of the cursor.
109 * @height: Height in pixels of the cursor.
110 * @pixels: Pixel data, @see VMMDevReqMousePointer for the format.
111 * @len: Size in bytes of the pixel data.
112 */
113int hgsmi_update_pointer_shape(struct gen_pool *ctx, u32 flags,
114 u32 hot_x, u32 hot_y, u32 width, u32 height,
115 u8 *pixels, u32 len)
116{
117 struct vbva_mouse_pointer_shape *p;
118 u32 pixel_len = 0;
119 int rc;
120
121 if (flags & VBOX_MOUSE_POINTER_SHAPE) {
122 /*
123 * Size of the pointer data:
124 * sizeof (AND mask) + sizeof (XOR_MASK)
125 */
126 pixel_len = ((((width + 7) / 8) * height + 3) & ~3) +
127 width * 4 * height;
128 if (pixel_len > len)
129 return -EINVAL;
130
131 /*
132 * If shape is supplied, then always create the pointer visible.
133 * See comments in 'vboxUpdatePointerShape'
134 */
135 flags |= VBOX_MOUSE_POINTER_VISIBLE;
136 }
137
138 p = hgsmi_buffer_alloc(ctx, sizeof(*p) + pixel_len, HGSMI_CH_VBVA,
139 VBVA_MOUSE_POINTER_SHAPE);
140 if (!p)
141 return -ENOMEM;
142
143 p->result = VINF_SUCCESS;
144 p->flags = flags;
145 p->hot_X = hot_x;
146 p->hot_y = hot_y;
147 p->width = width;
148 p->height = height;
149 if (pixel_len)
150 memcpy(p->data, pixels, pixel_len);
151
152 hgsmi_buffer_submit(ctx, p);
153
154 switch (p->result) {
155 case VINF_SUCCESS:
156 rc = 0;
157 break;
158 case VERR_NO_MEMORY:
159 rc = -ENOMEM;
160 break;
161 case VERR_NOT_SUPPORTED:
162 rc = -EBUSY;
163 break;
164 default:
165 rc = -EINVAL;
166 }
167
168 hgsmi_buffer_free(ctx, p);
169
170 return rc;
171}
172
173/**
174 * Report the guest cursor position. The host may wish to use this information
175 * to re-position its own cursor (though this is currently unlikely). The
176 * current host cursor position is returned.
177 * Return: 0 or negative errno value.
178 * @ctx: The context containing the heap used.
179 * @report_position: Are we reporting a position?
180 * @x: Guest cursor X position.
181 * @y: Guest cursor Y position.
182 * @x_host: Host cursor X position is stored here. Optional.
183 * @y_host: Host cursor Y position is stored here. Optional.
184 */
185int hgsmi_cursor_position(struct gen_pool *ctx, bool report_position,
186 u32 x, u32 y, u32 *x_host, u32 *y_host)
187{
188 struct vbva_cursor_position *p;
189
190 p = hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_VBVA,
191 VBVA_CURSOR_POSITION);
192 if (!p)
193 return -ENOMEM;
194
195 p->report_position = report_position;
196 p->x = x;
197 p->y = y;
198
199 hgsmi_buffer_submit(ctx, p);
200
201 *x_host = p->x;
202 *y_host = p->y;
203
204 hgsmi_buffer_free(ctx, p);
205
206 return 0;
207}
diff --git a/drivers/gpu/drm/vboxvideo/hgsmi_ch_setup.h b/drivers/gpu/drm/vboxvideo/hgsmi_ch_setup.h
new file mode 100644
index 000000000000..4e93418d6a13
--- /dev/null
+++ b/drivers/gpu/drm/vboxvideo/hgsmi_ch_setup.h
@@ -0,0 +1,32 @@
1/* SPDX-License-Identifier: MIT */
2/* Copyright (C) 2006-2017 Oracle Corporation */
3
4#ifndef __HGSMI_CH_SETUP_H__
5#define __HGSMI_CH_SETUP_H__
6
7/*
8 * Tell the host the location of hgsmi_host_flags structure, where the host
9 * can write information about pending buffers, etc, and which can be quickly
10 * polled by the guest without a need to port IO.
11 */
12#define HGSMI_CC_HOST_FLAGS_LOCATION 0
13
14struct hgsmi_buffer_location {
15 u32 buf_location;
16 u32 buf_len;
17} __packed;
18
19/* HGSMI setup and configuration data structures. */
20
21#define HGSMIHOSTFLAGS_COMMANDS_PENDING 0x01u
22#define HGSMIHOSTFLAGS_IRQ 0x02u
23#define HGSMIHOSTFLAGS_VSYNC 0x10u
24#define HGSMIHOSTFLAGS_HOTPLUG 0x20u
25#define HGSMIHOSTFLAGS_CURSOR_CAPABILITIES 0x40u
26
27struct hgsmi_host_flags {
28 u32 host_flags;
29 u32 reserved[3];
30} __packed;
31
32#endif
diff --git a/drivers/gpu/drm/vboxvideo/hgsmi_channels.h b/drivers/gpu/drm/vboxvideo/hgsmi_channels.h
new file mode 100644
index 000000000000..9b83f4ff3faf
--- /dev/null
+++ b/drivers/gpu/drm/vboxvideo/hgsmi_channels.h
@@ -0,0 +1,34 @@
1/* SPDX-License-Identifier: MIT */
2/* Copyright (C) 2006-2017 Oracle Corporation */
3
4#ifndef __HGSMI_CHANNELS_H__
5#define __HGSMI_CHANNELS_H__
6
7/*
8 * Each channel has an 8 bit identifier. There are a number of predefined
9 * (hardcoded) channels.
10 *
11 * HGSMI_CH_HGSMI channel can be used to map a string channel identifier
12 * to a free 16 bit numerical value. values are allocated in range
13 * [HGSMI_CH_STRING_FIRST;HGSMI_CH_STRING_LAST].
14 */
15
16/* A reserved channel value */
17#define HGSMI_CH_RESERVED 0x00
18/* HGCMI: setup and configuration */
19#define HGSMI_CH_HGSMI 0x01
20/* Graphics: VBVA */
21#define HGSMI_CH_VBVA 0x02
22/* Graphics: Seamless with a single guest region */
23#define HGSMI_CH_SEAMLESS 0x03
24/* Graphics: Seamless with separate host windows */
25#define HGSMI_CH_SEAMLESS2 0x04
26/* Graphics: OpenGL HW acceleration */
27#define HGSMI_CH_OPENGL 0x05
28
29/* The first channel index to be used for string mappings (inclusive) */
30#define HGSMI_CH_STRING_FIRST 0x20
31/* The last channel index for string mappings (inclusive) */
32#define HGSMI_CH_STRING_LAST 0xff
33
34#endif
diff --git a/drivers/gpu/drm/vboxvideo/hgsmi_defs.h b/drivers/gpu/drm/vboxvideo/hgsmi_defs.h
new file mode 100644
index 000000000000..6c8df1cdb087
--- /dev/null
+++ b/drivers/gpu/drm/vboxvideo/hgsmi_defs.h
@@ -0,0 +1,73 @@
1/* SPDX-License-Identifier: MIT */
2/* Copyright (C) 2006-2017 Oracle Corporation */
3
4#ifndef __HGSMI_DEFS_H__
5#define __HGSMI_DEFS_H__
6
7/* Buffer sequence type mask. */
8#define HGSMI_BUFFER_HEADER_F_SEQ_MASK 0x03
9/* Single buffer, not a part of a sequence. */
10#define HGSMI_BUFFER_HEADER_F_SEQ_SINGLE 0x00
11/* The first buffer in a sequence. */
12#define HGSMI_BUFFER_HEADER_F_SEQ_START 0x01
13/* A middle buffer in a sequence. */
14#define HGSMI_BUFFER_HEADER_F_SEQ_CONTINUE 0x02
15/* The last buffer in a sequence. */
16#define HGSMI_BUFFER_HEADER_F_SEQ_END 0x03
17
18/* 16 bytes buffer header. */
19struct hgsmi_buffer_header {
20 u32 data_size; /* Size of data that follows the header. */
21 u8 flags; /* HGSMI_BUFFER_HEADER_F_* */
22 u8 channel; /* The channel the data must be routed to. */
23 u16 channel_info; /* Opaque to the HGSMI, used by the channel. */
24
25 union {
26 /* Opaque placeholder to make the union 8 bytes. */
27 u8 header_data[8];
28
29 /* HGSMI_BUFFER_HEADER_F_SEQ_SINGLE */
30 struct {
31 u32 reserved1; /* A reserved field, initialize to 0. */
32 u32 reserved2; /* A reserved field, initialize to 0. */
33 } buffer;
34
35 /* HGSMI_BUFFER_HEADER_F_SEQ_START */
36 struct {
37 /* Must be the same for all buffers in the sequence. */
38 u32 sequence_number;
39 /* The total size of the sequence. */
40 u32 sequence_size;
41 } sequence_start;
42
43 /*
44 * HGSMI_BUFFER_HEADER_F_SEQ_CONTINUE and
45 * HGSMI_BUFFER_HEADER_F_SEQ_END
46 */
47 struct {
48 /* Must be the same for all buffers in the sequence. */
49 u32 sequence_number;
50 /* Data offset in the entire sequence. */
51 u32 sequence_offset;
52 } sequence_continue;
53 } u;
54} __packed;
55
56/* 8 bytes buffer tail. */
57struct hgsmi_buffer_tail {
58 /* Reserved, must be initialized to 0. */
59 u32 reserved;
60 /*
61 * One-at-a-Time Hash: http://www.burtleburtle.net/bob/hash/doobs.html
62 * Over the header, offset and for first 4 bytes of the tail.
63 */
64 u32 checksum;
65} __packed;
66
67/*
68 * The size of the array of channels. Array indexes are u8.
69 * Note: the value must not be changed.
70 */
71#define HGSMI_NUMBER_OF_CHANNELS 0x100
72
73#endif
diff --git a/drivers/gpu/drm/vboxvideo/modesetting.c b/drivers/gpu/drm/vboxvideo/modesetting.c
new file mode 100644
index 000000000000..7580b9002379
--- /dev/null
+++ b/drivers/gpu/drm/vboxvideo/modesetting.c
@@ -0,0 +1,123 @@
1// SPDX-License-Identifier: MIT
2/* Copyright (C) 2006-2017 Oracle Corporation */
3
4#include <linux/vbox_err.h>
5#include "vbox_drv.h"
6#include "vboxvideo_guest.h"
7#include "vboxvideo_vbe.h"
8#include "hgsmi_channels.h"
9
10/**
11 * Set a video mode via an HGSMI request. The views must have been
12 * initialised first using @a VBoxHGSMISendViewInfo and if the mode is being
13 * set on the first display then it must be set first using registers.
14 * @ctx: The context containing the heap to use.
15 * @display: The screen number.
16 * @origin_x: The horizontal displacement relative to the first scrn.
17 * @origin_y: The vertical displacement relative to the first screen.
18 * @start_offset: The offset of the visible area of the framebuffer
19 * relative to the framebuffer start.
20 * @pitch: The offset in bytes between the starts of two adjecent
21 * scan lines in video RAM.
22 * @width: The mode width.
23 * @height: The mode height.
24 * @bpp: The colour depth of the mode.
25 * @flags: Flags.
26 */
27void hgsmi_process_display_info(struct gen_pool *ctx, u32 display,
28 s32 origin_x, s32 origin_y, u32 start_offset,
29 u32 pitch, u32 width, u32 height,
30 u16 bpp, u16 flags)
31{
32 struct vbva_infoscreen *p;
33
34 p = hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_VBVA,
35 VBVA_INFO_SCREEN);
36 if (!p)
37 return;
38
39 p->view_index = display;
40 p->origin_x = origin_x;
41 p->origin_y = origin_y;
42 p->start_offset = start_offset;
43 p->line_size = pitch;
44 p->width = width;
45 p->height = height;
46 p->bits_per_pixel = bpp;
47 p->flags = flags;
48
49 hgsmi_buffer_submit(ctx, p);
50 hgsmi_buffer_free(ctx, p);
51}
52
53/**
54 * Report the rectangle relative to which absolute pointer events should be
55 * expressed. This information remains valid until the next VBVA resize event
56 * for any screen, at which time it is reset to the bounding rectangle of all
57 * virtual screens.
58 * Return: 0 or negative errno value.
59 * @ctx: The context containing the heap to use.
60 * @origin_x: Upper left X co-ordinate relative to the first screen.
61 * @origin_y: Upper left Y co-ordinate relative to the first screen.
62 * @width: Rectangle width.
63 * @height: Rectangle height.
64 */
65int hgsmi_update_input_mapping(struct gen_pool *ctx, s32 origin_x, s32 origin_y,
66 u32 width, u32 height)
67{
68 struct vbva_report_input_mapping *p;
69
70 p = hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_VBVA,
71 VBVA_REPORT_INPUT_MAPPING);
72 if (!p)
73 return -ENOMEM;
74
75 p->x = origin_x;
76 p->y = origin_y;
77 p->cx = width;
78 p->cy = height;
79
80 hgsmi_buffer_submit(ctx, p);
81 hgsmi_buffer_free(ctx, p);
82
83 return 0;
84}
85
86/**
87 * Get most recent video mode hints.
88 * Return: 0 or negative errno value.
89 * @ctx: The context containing the heap to use.
90 * @screens: The number of screens to query hints for, starting at 0.
91 * @hints: Array of vbva_modehint structures for receiving the hints.
92 */
93int hgsmi_get_mode_hints(struct gen_pool *ctx, unsigned int screens,
94 struct vbva_modehint *hints)
95{
96 struct vbva_query_mode_hints *p;
97 size_t size;
98
99 if (WARN_ON(!hints))
100 return -EINVAL;
101
102 size = screens * sizeof(struct vbva_modehint);
103 p = hgsmi_buffer_alloc(ctx, sizeof(*p) + size, HGSMI_CH_VBVA,
104 VBVA_QUERY_MODE_HINTS);
105 if (!p)
106 return -ENOMEM;
107
108 p->hints_queried_count = screens;
109 p->hint_structure_guest_size = sizeof(struct vbva_modehint);
110 p->rc = VERR_NOT_SUPPORTED;
111
112 hgsmi_buffer_submit(ctx, p);
113
114 if (p->rc < 0) {
115 hgsmi_buffer_free(ctx, p);
116 return -EIO;
117 }
118
119 memcpy(hints, ((u8 *)p) + sizeof(struct vbva_query_mode_hints), size);
120 hgsmi_buffer_free(ctx, p);
121
122 return 0;
123}
diff --git a/drivers/gpu/drm/vboxvideo/vbox_drv.c b/drivers/gpu/drm/vboxvideo/vbox_drv.c
new file mode 100644
index 000000000000..fb6a0f0b8167
--- /dev/null
+++ b/drivers/gpu/drm/vboxvideo/vbox_drv.c
@@ -0,0 +1,258 @@
1// SPDX-License-Identifier: MIT
2/*
3 * Copyright (C) 2013-2017 Oracle Corporation
4 * This file is based on ast_drv.c
5 * Copyright 2012 Red Hat Inc.
6 * Authors: Dave Airlie <airlied@redhat.com>
7 * Michael Thayer <michael.thayer@oracle.com,
8 * Hans de Goede <hdegoede@redhat.com>
9 */
10#include <linux/console.h>
11#include <linux/module.h>
12#include <linux/pci.h>
13#include <linux/vt_kern.h>
14
15#include <drm/drm_crtc_helper.h>
16#include <drm/drm_drv.h>
17#include <drm/drm_file.h>
18#include <drm/drm_ioctl.h>
19
20#include "vbox_drv.h"
21
22static int vbox_modeset = -1;
23
24MODULE_PARM_DESC(modeset, "Disable/Enable modesetting");
25module_param_named(modeset, vbox_modeset, int, 0400);
26
27static struct drm_driver driver;
28
29static const struct pci_device_id pciidlist[] = {
30 { PCI_DEVICE(0x80ee, 0xbeef) },
31 { }
32};
33MODULE_DEVICE_TABLE(pci, pciidlist);
34
35static struct drm_fb_helper_funcs vbox_fb_helper_funcs = {
36 .fb_probe = vboxfb_create,
37};
38
39static int vbox_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
40{
41 struct vbox_private *vbox;
42 int ret = 0;
43
44 if (!vbox_check_supported(VBE_DISPI_ID_HGSMI))
45 return -ENODEV;
46
47 vbox = kzalloc(sizeof(*vbox), GFP_KERNEL);
48 if (!vbox)
49 return -ENOMEM;
50
51 ret = drm_dev_init(&vbox->ddev, &driver, &pdev->dev);
52 if (ret) {
53 kfree(vbox);
54 return ret;
55 }
56
57 vbox->ddev.pdev = pdev;
58 vbox->ddev.dev_private = vbox;
59 pci_set_drvdata(pdev, vbox);
60 mutex_init(&vbox->hw_mutex);
61
62 ret = pci_enable_device(pdev);
63 if (ret)
64 goto err_dev_put;
65
66 ret = vbox_hw_init(vbox);
67 if (ret)
68 goto err_pci_disable;
69
70 ret = vbox_mm_init(vbox);
71 if (ret)
72 goto err_hw_fini;
73
74 ret = vbox_mode_init(vbox);
75 if (ret)
76 goto err_mm_fini;
77
78 ret = vbox_irq_init(vbox);
79 if (ret)
80 goto err_mode_fini;
81
82 ret = drm_fb_helper_fbdev_setup(&vbox->ddev, &vbox->fb_helper,
83 &vbox_fb_helper_funcs, 32,
84 vbox->num_crtcs);
85 if (ret)
86 goto err_irq_fini;
87
88 ret = drm_dev_register(&vbox->ddev, 0);
89 if (ret)
90 goto err_fbdev_fini;
91
92 return 0;
93
94err_fbdev_fini:
95 vbox_fbdev_fini(vbox);
96err_irq_fini:
97 vbox_irq_fini(vbox);
98err_mode_fini:
99 vbox_mode_fini(vbox);
100err_mm_fini:
101 vbox_mm_fini(vbox);
102err_hw_fini:
103 vbox_hw_fini(vbox);
104err_pci_disable:
105 pci_disable_device(pdev);
106err_dev_put:
107 drm_dev_put(&vbox->ddev);
108 return ret;
109}
110
111static void vbox_pci_remove(struct pci_dev *pdev)
112{
113 struct vbox_private *vbox = pci_get_drvdata(pdev);
114
115 drm_dev_unregister(&vbox->ddev);
116 vbox_fbdev_fini(vbox);
117 vbox_irq_fini(vbox);
118 vbox_mode_fini(vbox);
119 vbox_mm_fini(vbox);
120 vbox_hw_fini(vbox);
121 drm_dev_put(&vbox->ddev);
122}
123
124#ifdef CONFIG_PM_SLEEP
125static int vbox_pm_suspend(struct device *dev)
126{
127 struct vbox_private *vbox = dev_get_drvdata(dev);
128 int error;
129
130 error = drm_mode_config_helper_suspend(&vbox->ddev);
131 if (error)
132 return error;
133
134 pci_save_state(vbox->ddev.pdev);
135 pci_disable_device(vbox->ddev.pdev);
136 pci_set_power_state(vbox->ddev.pdev, PCI_D3hot);
137
138 return 0;
139}
140
141static int vbox_pm_resume(struct device *dev)
142{
143 struct vbox_private *vbox = dev_get_drvdata(dev);
144
145 if (pci_enable_device(vbox->ddev.pdev))
146 return -EIO;
147
148 return drm_mode_config_helper_resume(&vbox->ddev);
149}
150
151static int vbox_pm_freeze(struct device *dev)
152{
153 struct vbox_private *vbox = dev_get_drvdata(dev);
154
155 return drm_mode_config_helper_suspend(&vbox->ddev);
156}
157
158static int vbox_pm_thaw(struct device *dev)
159{
160 struct vbox_private *vbox = dev_get_drvdata(dev);
161
162 return drm_mode_config_helper_resume(&vbox->ddev);
163}
164
165static int vbox_pm_poweroff(struct device *dev)
166{
167 struct vbox_private *vbox = dev_get_drvdata(dev);
168
169 return drm_mode_config_helper_suspend(&vbox->ddev);
170}
171
172static const struct dev_pm_ops vbox_pm_ops = {
173 .suspend = vbox_pm_suspend,
174 .resume = vbox_pm_resume,
175 .freeze = vbox_pm_freeze,
176 .thaw = vbox_pm_thaw,
177 .poweroff = vbox_pm_poweroff,
178 .restore = vbox_pm_resume,
179};
180#endif
181
182static struct pci_driver vbox_pci_driver = {
183 .name = DRIVER_NAME,
184 .id_table = pciidlist,
185 .probe = vbox_pci_probe,
186 .remove = vbox_pci_remove,
187#ifdef CONFIG_PM_SLEEP
188 .driver.pm = &vbox_pm_ops,
189#endif
190};
191
192static const struct file_operations vbox_fops = {
193 .owner = THIS_MODULE,
194 .open = drm_open,
195 .release = drm_release,
196 .unlocked_ioctl = drm_ioctl,
197 .compat_ioctl = drm_compat_ioctl,
198 .mmap = vbox_mmap,
199 .poll = drm_poll,
200 .read = drm_read,
201};
202
203static struct drm_driver driver = {
204 .driver_features =
205 DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME | DRIVER_ATOMIC,
206
207 .lastclose = drm_fb_helper_lastclose,
208
209 .fops = &vbox_fops,
210 .irq_handler = vbox_irq_handler,
211 .name = DRIVER_NAME,
212 .desc = DRIVER_DESC,
213 .date = DRIVER_DATE,
214 .major = DRIVER_MAJOR,
215 .minor = DRIVER_MINOR,
216 .patchlevel = DRIVER_PATCHLEVEL,
217
218 .gem_free_object_unlocked = vbox_gem_free_object,
219 .dumb_create = vbox_dumb_create,
220 .dumb_map_offset = vbox_dumb_mmap_offset,
221 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
222 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
223 .gem_prime_export = drm_gem_prime_export,
224 .gem_prime_import = drm_gem_prime_import,
225 .gem_prime_pin = vbox_gem_prime_pin,
226 .gem_prime_unpin = vbox_gem_prime_unpin,
227 .gem_prime_get_sg_table = vbox_gem_prime_get_sg_table,
228 .gem_prime_import_sg_table = vbox_gem_prime_import_sg_table,
229 .gem_prime_vmap = vbox_gem_prime_vmap,
230 .gem_prime_vunmap = vbox_gem_prime_vunmap,
231 .gem_prime_mmap = vbox_gem_prime_mmap,
232};
233
234static int __init vbox_init(void)
235{
236#ifdef CONFIG_VGA_CONSOLE
237 if (vgacon_text_force() && vbox_modeset == -1)
238 return -EINVAL;
239#endif
240
241 if (vbox_modeset == 0)
242 return -EINVAL;
243
244 return pci_register_driver(&vbox_pci_driver);
245}
246
247static void __exit vbox_exit(void)
248{
249 pci_unregister_driver(&vbox_pci_driver);
250}
251
252module_init(vbox_init);
253module_exit(vbox_exit);
254
255MODULE_AUTHOR("Oracle Corporation");
256MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
257MODULE_DESCRIPTION(DRIVER_DESC);
258MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/vboxvideo/vbox_drv.h b/drivers/gpu/drm/vboxvideo/vbox_drv.h
new file mode 100644
index 000000000000..0ecd0a44176e
--- /dev/null
+++ b/drivers/gpu/drm/vboxvideo/vbox_drv.h
@@ -0,0 +1,273 @@
1/* SPDX-License-Identifier: MIT */
2/*
3 * Copyright (C) 2013-2017 Oracle Corporation
4 * This file is based on ast_drv.h
5 * Copyright 2012 Red Hat Inc.
6 * Authors: Dave Airlie <airlied@redhat.com>
7 * Michael Thayer <michael.thayer@oracle.com,
8 * Hans de Goede <hdegoede@redhat.com>
9 */
10#ifndef __VBOX_DRV_H__
11#define __VBOX_DRV_H__
12
13#include <linux/genalloc.h>
14#include <linux/io.h>
15#include <linux/irqreturn.h>
16#include <linux/string.h>
17
18#include <drm/drm_encoder.h>
19#include <drm/drm_fb_helper.h>
20#include <drm/drm_gem.h>
21
22#include <drm/ttm/ttm_bo_api.h>
23#include <drm/ttm/ttm_bo_driver.h>
24#include <drm/ttm/ttm_placement.h>
25#include <drm/ttm/ttm_memory.h>
26#include <drm/ttm/ttm_module.h>
27
28#include "vboxvideo_guest.h"
29#include "vboxvideo_vbe.h"
30#include "hgsmi_ch_setup.h"
31
32#define DRIVER_NAME "vboxvideo"
33#define DRIVER_DESC "Oracle VM VirtualBox Graphics Card"
34#define DRIVER_DATE "20130823"
35
36#define DRIVER_MAJOR 1
37#define DRIVER_MINOR 0
38#define DRIVER_PATCHLEVEL 0
39
40#define VBOX_MAX_CURSOR_WIDTH 64
41#define VBOX_MAX_CURSOR_HEIGHT 64
42#define CURSOR_PIXEL_COUNT (VBOX_MAX_CURSOR_WIDTH * VBOX_MAX_CURSOR_HEIGHT)
43#define CURSOR_DATA_SIZE (CURSOR_PIXEL_COUNT * 4 + CURSOR_PIXEL_COUNT / 8)
44
45#define VBOX_MAX_SCREENS 32
46
47#define GUEST_HEAP_OFFSET(vbox) ((vbox)->full_vram_size - \
48 VBVA_ADAPTER_INFORMATION_SIZE)
49#define GUEST_HEAP_SIZE VBVA_ADAPTER_INFORMATION_SIZE
50#define GUEST_HEAP_USABLE_SIZE (VBVA_ADAPTER_INFORMATION_SIZE - \
51 sizeof(struct hgsmi_host_flags))
52#define HOST_FLAGS_OFFSET GUEST_HEAP_USABLE_SIZE
53
54struct vbox_framebuffer {
55 struct drm_framebuffer base;
56 struct drm_gem_object *obj;
57};
58
59struct vbox_private {
60 /* Must be first; or we must define our own release callback */
61 struct drm_device ddev;
62 struct drm_fb_helper fb_helper;
63 struct vbox_framebuffer afb;
64
65 u8 __iomem *guest_heap;
66 u8 __iomem *vbva_buffers;
67 struct gen_pool *guest_pool;
68 struct vbva_buf_ctx *vbva_info;
69 bool any_pitch;
70 u32 num_crtcs;
71 /* Amount of available VRAM, including space used for buffers. */
72 u32 full_vram_size;
73 /* Amount of available VRAM, not including space used for buffers. */
74 u32 available_vram_size;
75 /* Array of structures for receiving mode hints. */
76 struct vbva_modehint *last_mode_hints;
77
78 int fb_mtrr;
79
80 struct {
81 struct ttm_bo_device bdev;
82 } ttm;
83
84 struct mutex hw_mutex; /* protects modeset and accel/vbva accesses */
85 struct work_struct hotplug_work;
86 u32 input_mapping_width;
87 u32 input_mapping_height;
88 /*
89 * Is user-space using an X.Org-style layout of one large frame-buffer
90 * encompassing all screen ones or is the fbdev console active?
91 */
92 bool single_framebuffer;
93 u8 cursor_data[CURSOR_DATA_SIZE];
94};
95
96#undef CURSOR_PIXEL_COUNT
97#undef CURSOR_DATA_SIZE
98
99struct vbox_gem_object;
100
101struct vbox_connector {
102 struct drm_connector base;
103 char name[32];
104 struct vbox_crtc *vbox_crtc;
105 struct {
106 u32 width;
107 u32 height;
108 bool disconnected;
109 } mode_hint;
110};
111
112struct vbox_crtc {
113 struct drm_crtc base;
114 bool disconnected;
115 unsigned int crtc_id;
116 u32 fb_offset;
117 bool cursor_enabled;
118 u32 x_hint;
119 u32 y_hint;
120 /*
121 * When setting a mode we not only pass the mode to the hypervisor,
122 * but also information on how to map / translate input coordinates
123 * for the emulated USB tablet. This input-mapping may change when
124 * the mode on *another* crtc changes.
125 *
126 * This means that sometimes we must do a modeset on other crtc-s then
127 * the one being changed to update the input-mapping. Including crtc-s
128 * which may be disabled inside the guest (shown as a black window
129 * on the host unless closed by the user).
130 *
131 * With atomic modesetting the mode-info of disabled crtcs gets zeroed
132 * yet we need it when updating the input-map to avoid resizing the
133 * window as a side effect of a mode_set on another crtc. Therefor we
134 * cache the info of the last mode below.
135 */
136 u32 width;
137 u32 height;
138 u32 x;
139 u32 y;
140};
141
142struct vbox_encoder {
143 struct drm_encoder base;
144};
145
146#define to_vbox_crtc(x) container_of(x, struct vbox_crtc, base)
147#define to_vbox_connector(x) container_of(x, struct vbox_connector, base)
148#define to_vbox_encoder(x) container_of(x, struct vbox_encoder, base)
149#define to_vbox_framebuffer(x) container_of(x, struct vbox_framebuffer, base)
150
151bool vbox_check_supported(u16 id);
152int vbox_hw_init(struct vbox_private *vbox);
153void vbox_hw_fini(struct vbox_private *vbox);
154
155int vbox_mode_init(struct vbox_private *vbox);
156void vbox_mode_fini(struct vbox_private *vbox);
157
158void vbox_report_caps(struct vbox_private *vbox);
159
160void vbox_framebuffer_dirty_rectangles(struct drm_framebuffer *fb,
161 struct drm_clip_rect *rects,
162 unsigned int num_rects);
163
164int vbox_framebuffer_init(struct vbox_private *vbox,
165 struct vbox_framebuffer *vbox_fb,
166 const struct drm_mode_fb_cmd2 *mode_cmd,
167 struct drm_gem_object *obj);
168
169int vboxfb_create(struct drm_fb_helper *helper,
170 struct drm_fb_helper_surface_size *sizes);
171void vbox_fbdev_fini(struct vbox_private *vbox);
172
173struct vbox_bo {
174 struct ttm_buffer_object bo;
175 struct ttm_placement placement;
176 struct ttm_bo_kmap_obj kmap;
177 struct drm_gem_object gem;
178 struct ttm_place placements[3];
179 int pin_count;
180};
181
182#define gem_to_vbox_bo(gobj) container_of((gobj), struct vbox_bo, gem)
183
184static inline struct vbox_bo *vbox_bo(struct ttm_buffer_object *bo)
185{
186 return container_of(bo, struct vbox_bo, bo);
187}
188
189#define to_vbox_obj(x) container_of(x, struct vbox_gem_object, base)
190
191static inline u64 vbox_bo_gpu_offset(struct vbox_bo *bo)
192{
193 return bo->bo.offset;
194}
195
196int vbox_dumb_create(struct drm_file *file,
197 struct drm_device *dev,
198 struct drm_mode_create_dumb *args);
199
200void vbox_gem_free_object(struct drm_gem_object *obj);
201int vbox_dumb_mmap_offset(struct drm_file *file,
202 struct drm_device *dev,
203 u32 handle, u64 *offset);
204
205#define DRM_FILE_PAGE_OFFSET (0x10000000ULL >> PAGE_SHIFT)
206
207int vbox_mm_init(struct vbox_private *vbox);
208void vbox_mm_fini(struct vbox_private *vbox);
209
210int vbox_bo_create(struct vbox_private *vbox, int size, int align,
211 u32 flags, struct vbox_bo **pvboxbo);
212
213int vbox_gem_create(struct vbox_private *vbox,
214 u32 size, bool iskernel, struct drm_gem_object **obj);
215
216int vbox_bo_pin(struct vbox_bo *bo, u32 pl_flag);
217int vbox_bo_unpin(struct vbox_bo *bo);
218
219static inline int vbox_bo_reserve(struct vbox_bo *bo, bool no_wait)
220{
221 int ret;
222
223 ret = ttm_bo_reserve(&bo->bo, true, no_wait, NULL);
224 if (ret) {
225 if (ret != -ERESTARTSYS && ret != -EBUSY)
226 DRM_ERROR("reserve failed %p\n", bo);
227 return ret;
228 }
229 return 0;
230}
231
232static inline void vbox_bo_unreserve(struct vbox_bo *bo)
233{
234 ttm_bo_unreserve(&bo->bo);
235}
236
237void vbox_ttm_placement(struct vbox_bo *bo, int domain);
238int vbox_bo_push_sysram(struct vbox_bo *bo);
239int vbox_mmap(struct file *filp, struct vm_area_struct *vma);
240void *vbox_bo_kmap(struct vbox_bo *bo);
241void vbox_bo_kunmap(struct vbox_bo *bo);
242
243/* vbox_prime.c */
244int vbox_gem_prime_pin(struct drm_gem_object *obj);
245void vbox_gem_prime_unpin(struct drm_gem_object *obj);
246struct sg_table *vbox_gem_prime_get_sg_table(struct drm_gem_object *obj);
247struct drm_gem_object *vbox_gem_prime_import_sg_table(
248 struct drm_device *dev, struct dma_buf_attachment *attach,
249 struct sg_table *table);
250void *vbox_gem_prime_vmap(struct drm_gem_object *obj);
251void vbox_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
252int vbox_gem_prime_mmap(struct drm_gem_object *obj,
253 struct vm_area_struct *area);
254
255/* vbox_irq.c */
256int vbox_irq_init(struct vbox_private *vbox);
257void vbox_irq_fini(struct vbox_private *vbox);
258void vbox_report_hotplug(struct vbox_private *vbox);
259irqreturn_t vbox_irq_handler(int irq, void *arg);
260
261/* vbox_hgsmi.c */
262void *hgsmi_buffer_alloc(struct gen_pool *guest_pool, size_t size,
263 u8 channel, u16 channel_info);
264void hgsmi_buffer_free(struct gen_pool *guest_pool, void *buf);
265int hgsmi_buffer_submit(struct gen_pool *guest_pool, void *buf);
266
267static inline void vbox_write_ioport(u16 index, u16 data)
268{
269 outw(index, VBE_DISPI_IOPORT_INDEX);
270 outw(data, VBE_DISPI_IOPORT_DATA);
271}
272
273#endif
diff --git a/drivers/gpu/drm/vboxvideo/vbox_fb.c b/drivers/gpu/drm/vboxvideo/vbox_fb.c
new file mode 100644
index 000000000000..83a04afd1766
--- /dev/null
+++ b/drivers/gpu/drm/vboxvideo/vbox_fb.c
@@ -0,0 +1,155 @@
1// SPDX-License-Identifier: MIT
2/*
3 * Copyright (C) 2013-2017 Oracle Corporation
4 * This file is based on ast_fb.c
5 * Copyright 2012 Red Hat Inc.
6 * Authors: Dave Airlie <airlied@redhat.com>
7 * Michael Thayer <michael.thayer@oracle.com,
8 */
9#include <linux/delay.h>
10#include <linux/errno.h>
11#include <linux/fb.h>
12#include <linux/init.h>
13#include <linux/kernel.h>
14#include <linux/mm.h>
15#include <linux/module.h>
16#include <linux/pci.h>
17#include <linux/string.h>
18#include <linux/sysrq.h>
19#include <linux/tty.h>
20
21#include <drm/drm_crtc.h>
22#include <drm/drm_crtc_helper.h>
23#include <drm/drm_fb_helper.h>
24#include <drm/drm_fourcc.h>
25
26#include "vbox_drv.h"
27#include "vboxvideo.h"
28
29#ifdef CONFIG_DRM_KMS_FB_HELPER
30static struct fb_deferred_io vbox_defio = {
31 .delay = HZ / 30,
32 .deferred_io = drm_fb_helper_deferred_io,
33};
34#endif
35
36static struct fb_ops vboxfb_ops = {
37 .owner = THIS_MODULE,
38 DRM_FB_HELPER_DEFAULT_OPS,
39 .fb_fillrect = drm_fb_helper_sys_fillrect,
40 .fb_copyarea = drm_fb_helper_sys_copyarea,
41 .fb_imageblit = drm_fb_helper_sys_imageblit,
42};
43
44int vboxfb_create(struct drm_fb_helper *helper,
45 struct drm_fb_helper_surface_size *sizes)
46{
47 struct vbox_private *vbox =
48 container_of(helper, struct vbox_private, fb_helper);
49 struct pci_dev *pdev = vbox->ddev.pdev;
50 struct drm_mode_fb_cmd2 mode_cmd;
51 struct drm_framebuffer *fb;
52 struct fb_info *info;
53 struct drm_gem_object *gobj;
54 struct vbox_bo *bo;
55 int size, ret;
56 u64 gpu_addr;
57 u32 pitch;
58
59 mode_cmd.width = sizes->surface_width;
60 mode_cmd.height = sizes->surface_height;
61 pitch = mode_cmd.width * ((sizes->surface_bpp + 7) / 8);
62 mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
63 sizes->surface_depth);
64 mode_cmd.pitches[0] = pitch;
65
66 size = pitch * mode_cmd.height;
67
68 ret = vbox_gem_create(vbox, size, true, &gobj);
69 if (ret) {
70 DRM_ERROR("failed to create fbcon backing object %d\n", ret);
71 return ret;
72 }
73
74 ret = vbox_framebuffer_init(vbox, &vbox->afb, &mode_cmd, gobj);
75 if (ret)
76 return ret;
77
78 bo = gem_to_vbox_bo(gobj);
79
80 ret = vbox_bo_pin(bo, TTM_PL_FLAG_VRAM);
81 if (ret)
82 return ret;
83
84 info = drm_fb_helper_alloc_fbi(helper);
85 if (IS_ERR(info))
86 return PTR_ERR(info);
87
88 info->screen_size = size;
89 info->screen_base = (char __iomem *)vbox_bo_kmap(bo);
90 if (IS_ERR(info->screen_base))
91 return PTR_ERR(info->screen_base);
92
93 info->par = helper;
94
95 fb = &vbox->afb.base;
96 helper->fb = fb;
97
98 strcpy(info->fix.id, "vboxdrmfb");
99
100 info->fbops = &vboxfb_ops;
101
102 /*
103 * This seems to be done for safety checking that the framebuffer
104 * is not registered twice by different drivers.
105 */
106 info->apertures->ranges[0].base = pci_resource_start(pdev, 0);
107 info->apertures->ranges[0].size = pci_resource_len(pdev, 0);
108
109 drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth);
110 drm_fb_helper_fill_var(info, helper, sizes->fb_width,
111 sizes->fb_height);
112
113 gpu_addr = vbox_bo_gpu_offset(bo);
114 info->fix.smem_start = info->apertures->ranges[0].base + gpu_addr;
115 info->fix.smem_len = vbox->available_vram_size - gpu_addr;
116
117#ifdef CONFIG_DRM_KMS_FB_HELPER
118 info->fbdefio = &vbox_defio;
119 fb_deferred_io_init(info);
120#endif
121
122 info->pixmap.flags = FB_PIXMAP_SYSTEM;
123
124 DRM_DEBUG_KMS("allocated %dx%d\n", fb->width, fb->height);
125
126 return 0;
127}
128
129void vbox_fbdev_fini(struct vbox_private *vbox)
130{
131 struct vbox_framebuffer *afb = &vbox->afb;
132
133#ifdef CONFIG_DRM_KMS_FB_HELPER
134 if (vbox->fb_helper.fbdev && vbox->fb_helper.fbdev->fbdefio)
135 fb_deferred_io_cleanup(vbox->fb_helper.fbdev);
136#endif
137
138 drm_fb_helper_unregister_fbi(&vbox->fb_helper);
139
140 if (afb->obj) {
141 struct vbox_bo *bo = gem_to_vbox_bo(afb->obj);
142
143 vbox_bo_kunmap(bo);
144
145 if (bo->pin_count)
146 vbox_bo_unpin(bo);
147
148 drm_gem_object_put_unlocked(afb->obj);
149 afb->obj = NULL;
150 }
151 drm_fb_helper_fini(&vbox->fb_helper);
152
153 drm_framebuffer_unregister_private(&afb->base);
154 drm_framebuffer_cleanup(&afb->base);
155}
diff --git a/drivers/gpu/drm/vboxvideo/vbox_hgsmi.c b/drivers/gpu/drm/vboxvideo/vbox_hgsmi.c
new file mode 100644
index 000000000000..94b60654a012
--- /dev/null
+++ b/drivers/gpu/drm/vboxvideo/vbox_hgsmi.c
@@ -0,0 +1,95 @@
1// SPDX-License-Identifier: MIT
2/*
3 * Copyright (C) 2017 Oracle Corporation
4 * Authors: Hans de Goede <hdegoede@redhat.com>
5 */
6
7#include "vbox_drv.h"
8#include "vboxvideo_vbe.h"
9#include "hgsmi_defs.h"
10
11/* One-at-a-Time Hash from http://www.burtleburtle.net/bob/hash/doobs.html */
12static u32 hgsmi_hash_process(u32 hash, const u8 *data, int size)
13{
14 while (size--) {
15 hash += *data++;
16 hash += (hash << 10);
17 hash ^= (hash >> 6);
18 }
19
20 return hash;
21}
22
23static u32 hgsmi_hash_end(u32 hash)
24{
25 hash += (hash << 3);
26 hash ^= (hash >> 11);
27 hash += (hash << 15);
28
29 return hash;
30}
31
32/* Not really a checksum but that is the naming used in all vbox code */
33static u32 hgsmi_checksum(u32 offset,
34 const struct hgsmi_buffer_header *header,
35 const struct hgsmi_buffer_tail *tail)
36{
37 u32 checksum;
38
39 checksum = hgsmi_hash_process(0, (u8 *)&offset, sizeof(offset));
40 checksum = hgsmi_hash_process(checksum, (u8 *)header, sizeof(*header));
41 /* 4 -> Do not checksum the checksum itself */
42 checksum = hgsmi_hash_process(checksum, (u8 *)tail, 4);
43
44 return hgsmi_hash_end(checksum);
45}
46
47void *hgsmi_buffer_alloc(struct gen_pool *guest_pool, size_t size,
48 u8 channel, u16 channel_info)
49{
50 struct hgsmi_buffer_header *h;
51 struct hgsmi_buffer_tail *t;
52 size_t total_size;
53 dma_addr_t offset;
54
55 total_size = size + sizeof(*h) + sizeof(*t);
56 h = gen_pool_dma_alloc(guest_pool, total_size, &offset);
57 if (!h)
58 return NULL;
59
60 t = (struct hgsmi_buffer_tail *)((u8 *)h + sizeof(*h) + size);
61
62 h->flags = HGSMI_BUFFER_HEADER_F_SEQ_SINGLE;
63 h->data_size = size;
64 h->channel = channel;
65 h->channel_info = channel_info;
66 memset(&h->u.header_data, 0, sizeof(h->u.header_data));
67
68 t->reserved = 0;
69 t->checksum = hgsmi_checksum(offset, h, t);
70
71 return (u8 *)h + sizeof(*h);
72}
73
74void hgsmi_buffer_free(struct gen_pool *guest_pool, void *buf)
75{
76 struct hgsmi_buffer_header *h =
77 (struct hgsmi_buffer_header *)((u8 *)buf - sizeof(*h));
78 size_t total_size = h->data_size + sizeof(*h) +
79 sizeof(struct hgsmi_buffer_tail);
80
81 gen_pool_free(guest_pool, (unsigned long)h, total_size);
82}
83
84int hgsmi_buffer_submit(struct gen_pool *guest_pool, void *buf)
85{
86 phys_addr_t offset;
87
88 offset = gen_pool_virt_to_phys(guest_pool, (unsigned long)buf -
89 sizeof(struct hgsmi_buffer_header));
90 outl(offset, VGA_PORT_HGSMI_GUEST);
91 /* Make the compiler aware that the host has changed memory. */
92 mb();
93
94 return 0;
95}
diff --git a/drivers/gpu/drm/vboxvideo/vbox_irq.c b/drivers/gpu/drm/vboxvideo/vbox_irq.c
new file mode 100644
index 000000000000..16a1e29f5292
--- /dev/null
+++ b/drivers/gpu/drm/vboxvideo/vbox_irq.c
@@ -0,0 +1,183 @@
1// SPDX-License-Identifier: MIT
2/*
3 * Copyright (C) 2016-2017 Oracle Corporation
4 * This file is based on qxl_irq.c
5 * Copyright 2013 Red Hat Inc.
6 * Authors: Dave Airlie
7 * Alon Levy
8 * Michael Thayer <michael.thayer@oracle.com,
9 * Hans de Goede <hdegoede@redhat.com>
10 */
11
12#include <linux/pci.h>
13#include <drm/drm_irq.h>
14#include <drm/drm_probe_helper.h>
15
16#include "vbox_drv.h"
17#include "vboxvideo.h"
18
19static void vbox_clear_irq(void)
20{
21 outl((u32)~0, VGA_PORT_HGSMI_HOST);
22}
23
24static u32 vbox_get_flags(struct vbox_private *vbox)
25{
26 return readl(vbox->guest_heap + HOST_FLAGS_OFFSET);
27}
28
29void vbox_report_hotplug(struct vbox_private *vbox)
30{
31 schedule_work(&vbox->hotplug_work);
32}
33
34irqreturn_t vbox_irq_handler(int irq, void *arg)
35{
36 struct drm_device *dev = (struct drm_device *)arg;
37 struct vbox_private *vbox = (struct vbox_private *)dev->dev_private;
38 u32 host_flags = vbox_get_flags(vbox);
39
40 if (!(host_flags & HGSMIHOSTFLAGS_IRQ))
41 return IRQ_NONE;
42
43 /*
44 * Due to a bug in the initial host implementation of hot-plug irqs,
45 * the hot-plug and cursor capability flags were never cleared.
46 * Fortunately we can tell when they would have been set by checking
47 * that the VSYNC flag is not set.
48 */
49 if (host_flags &
50 (HGSMIHOSTFLAGS_HOTPLUG | HGSMIHOSTFLAGS_CURSOR_CAPABILITIES) &&
51 !(host_flags & HGSMIHOSTFLAGS_VSYNC))
52 vbox_report_hotplug(vbox);
53
54 vbox_clear_irq();
55
56 return IRQ_HANDLED;
57}
58
59/*
60 * Check that the position hints provided by the host are suitable for GNOME
61 * shell (i.e. all screens disjoint and hints for all enabled screens) and if
62 * not replace them with default ones. Providing valid hints improves the
63 * chances that we will get a known screen layout for pointer mapping.
64 */
65static void validate_or_set_position_hints(struct vbox_private *vbox)
66{
67 struct vbva_modehint *hintsi, *hintsj;
68 bool valid = true;
69 u16 currentx = 0;
70 int i, j;
71
72 for (i = 0; i < vbox->num_crtcs; ++i) {
73 for (j = 0; j < i; ++j) {
74 hintsi = &vbox->last_mode_hints[i];
75 hintsj = &vbox->last_mode_hints[j];
76
77 if (hintsi->enabled && hintsj->enabled) {
78 if (hintsi->dx >= 0xffff ||
79 hintsi->dy >= 0xffff ||
80 hintsj->dx >= 0xffff ||
81 hintsj->dy >= 0xffff ||
82 (hintsi->dx <
83 hintsj->dx + (hintsj->cx & 0x8fff) &&
84 hintsi->dx + (hintsi->cx & 0x8fff) >
85 hintsj->dx) ||
86 (hintsi->dy <
87 hintsj->dy + (hintsj->cy & 0x8fff) &&
88 hintsi->dy + (hintsi->cy & 0x8fff) >
89 hintsj->dy))
90 valid = false;
91 }
92 }
93 }
94 if (!valid)
95 for (i = 0; i < vbox->num_crtcs; ++i) {
96 if (vbox->last_mode_hints[i].enabled) {
97 vbox->last_mode_hints[i].dx = currentx;
98 vbox->last_mode_hints[i].dy = 0;
99 currentx +=
100 vbox->last_mode_hints[i].cx & 0x8fff;
101 }
102 }
103}
104
105/* Query the host for the most recent video mode hints. */
106static void vbox_update_mode_hints(struct vbox_private *vbox)
107{
108 struct drm_connector_list_iter conn_iter;
109 struct drm_device *dev = &vbox->ddev;
110 struct drm_connector *connector;
111 struct vbox_connector *vbox_conn;
112 struct vbva_modehint *hints;
113 u16 flags;
114 bool disconnected;
115 unsigned int crtc_id;
116 int ret;
117
118 ret = hgsmi_get_mode_hints(vbox->guest_pool, vbox->num_crtcs,
119 vbox->last_mode_hints);
120 if (ret) {
121 DRM_ERROR("vboxvideo: hgsmi_get_mode_hints failed: %d\n", ret);
122 return;
123 }
124
125 validate_or_set_position_hints(vbox);
126
127 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
128 drm_connector_list_iter_begin(dev, &conn_iter);
129 drm_for_each_connector_iter(connector, &conn_iter) {
130 vbox_conn = to_vbox_connector(connector);
131
132 hints = &vbox->last_mode_hints[vbox_conn->vbox_crtc->crtc_id];
133 if (hints->magic != VBVAMODEHINT_MAGIC)
134 continue;
135
136 disconnected = !(hints->enabled);
137 crtc_id = vbox_conn->vbox_crtc->crtc_id;
138 vbox_conn->mode_hint.width = hints->cx;
139 vbox_conn->mode_hint.height = hints->cy;
140 vbox_conn->vbox_crtc->x_hint = hints->dx;
141 vbox_conn->vbox_crtc->y_hint = hints->dy;
142 vbox_conn->mode_hint.disconnected = disconnected;
143
144 if (vbox_conn->vbox_crtc->disconnected == disconnected)
145 continue;
146
147 if (disconnected)
148 flags = VBVA_SCREEN_F_ACTIVE | VBVA_SCREEN_F_DISABLED;
149 else
150 flags = VBVA_SCREEN_F_ACTIVE | VBVA_SCREEN_F_BLANK;
151
152 hgsmi_process_display_info(vbox->guest_pool, crtc_id, 0, 0, 0,
153 hints->cx * 4, hints->cx,
154 hints->cy, 0, flags);
155
156 vbox_conn->vbox_crtc->disconnected = disconnected;
157 }
158 drm_connector_list_iter_end(&conn_iter);
159 drm_modeset_unlock(&dev->mode_config.connection_mutex);
160}
161
162static void vbox_hotplug_worker(struct work_struct *work)
163{
164 struct vbox_private *vbox = container_of(work, struct vbox_private,
165 hotplug_work);
166
167 vbox_update_mode_hints(vbox);
168 drm_kms_helper_hotplug_event(&vbox->ddev);
169}
170
171int vbox_irq_init(struct vbox_private *vbox)
172{
173 INIT_WORK(&vbox->hotplug_work, vbox_hotplug_worker);
174 vbox_update_mode_hints(vbox);
175
176 return drm_irq_install(&vbox->ddev, vbox->ddev.pdev->irq);
177}
178
179void vbox_irq_fini(struct vbox_private *vbox)
180{
181 drm_irq_uninstall(&vbox->ddev);
182 flush_work(&vbox->hotplug_work);
183}
diff --git a/drivers/gpu/drm/vboxvideo/vbox_main.c b/drivers/gpu/drm/vboxvideo/vbox_main.c
new file mode 100644
index 000000000000..f4d02de5518a
--- /dev/null
+++ b/drivers/gpu/drm/vboxvideo/vbox_main.c
@@ -0,0 +1,361 @@
1// SPDX-License-Identifier: MIT
2/*
3 * Copyright (C) 2013-2017 Oracle Corporation
4 * This file is based on ast_main.c
5 * Copyright 2012 Red Hat Inc.
6 * Authors: Dave Airlie <airlied@redhat.com>,
7 * Michael Thayer <michael.thayer@oracle.com,
8 * Hans de Goede <hdegoede@redhat.com>
9 */
10
11#include <linux/vbox_err.h>
12#include <drm/drm_fb_helper.h>
13#include <drm/drm_crtc_helper.h>
14
15#include "vbox_drv.h"
16#include "vboxvideo_guest.h"
17#include "vboxvideo_vbe.h"
18
19static void vbox_user_framebuffer_destroy(struct drm_framebuffer *fb)
20{
21 struct vbox_framebuffer *vbox_fb = to_vbox_framebuffer(fb);
22
23 if (vbox_fb->obj)
24 drm_gem_object_put_unlocked(vbox_fb->obj);
25
26 drm_framebuffer_cleanup(fb);
27 kfree(fb);
28}
29
30void vbox_report_caps(struct vbox_private *vbox)
31{
32 u32 caps = VBVACAPS_DISABLE_CURSOR_INTEGRATION |
33 VBVACAPS_IRQ | VBVACAPS_USE_VBVA_ONLY;
34
35 /* The host only accepts VIDEO_MODE_HINTS if it is send separately. */
36 hgsmi_send_caps_info(vbox->guest_pool, caps);
37 caps |= VBVACAPS_VIDEO_MODE_HINTS;
38 hgsmi_send_caps_info(vbox->guest_pool, caps);
39}
40
41/* Send information about dirty rectangles to VBVA. */
42void vbox_framebuffer_dirty_rectangles(struct drm_framebuffer *fb,
43 struct drm_clip_rect *rects,
44 unsigned int num_rects)
45{
46 struct vbox_private *vbox = fb->dev->dev_private;
47 struct drm_display_mode *mode;
48 struct drm_crtc *crtc;
49 int crtc_x, crtc_y;
50 unsigned int i;
51
52 mutex_lock(&vbox->hw_mutex);
53 list_for_each_entry(crtc, &fb->dev->mode_config.crtc_list, head) {
54 if (crtc->primary->state->fb != fb)
55 continue;
56
57 mode = &crtc->state->mode;
58 crtc_x = crtc->primary->state->src_x >> 16;
59 crtc_y = crtc->primary->state->src_y >> 16;
60
61 for (i = 0; i < num_rects; ++i) {
62 struct vbva_cmd_hdr cmd_hdr;
63 unsigned int crtc_id = to_vbox_crtc(crtc)->crtc_id;
64
65 if (rects[i].x1 > crtc_x + mode->hdisplay ||
66 rects[i].y1 > crtc_y + mode->vdisplay ||
67 rects[i].x2 < crtc_x ||
68 rects[i].y2 < crtc_y)
69 continue;
70
71 cmd_hdr.x = (s16)rects[i].x1;
72 cmd_hdr.y = (s16)rects[i].y1;
73 cmd_hdr.w = (u16)rects[i].x2 - rects[i].x1;
74 cmd_hdr.h = (u16)rects[i].y2 - rects[i].y1;
75
76 if (!vbva_buffer_begin_update(&vbox->vbva_info[crtc_id],
77 vbox->guest_pool))
78 continue;
79
80 vbva_write(&vbox->vbva_info[crtc_id], vbox->guest_pool,
81 &cmd_hdr, sizeof(cmd_hdr));
82 vbva_buffer_end_update(&vbox->vbva_info[crtc_id]);
83 }
84 }
85 mutex_unlock(&vbox->hw_mutex);
86}
87
88static int vbox_user_framebuffer_dirty(struct drm_framebuffer *fb,
89 struct drm_file *file_priv,
90 unsigned int flags, unsigned int color,
91 struct drm_clip_rect *rects,
92 unsigned int num_rects)
93{
94 vbox_framebuffer_dirty_rectangles(fb, rects, num_rects);
95
96 return 0;
97}
98
99static const struct drm_framebuffer_funcs vbox_fb_funcs = {
100 .destroy = vbox_user_framebuffer_destroy,
101 .dirty = vbox_user_framebuffer_dirty,
102};
103
104int vbox_framebuffer_init(struct vbox_private *vbox,
105 struct vbox_framebuffer *vbox_fb,
106 const struct drm_mode_fb_cmd2 *mode_cmd,
107 struct drm_gem_object *obj)
108{
109 int ret;
110
111 drm_helper_mode_fill_fb_struct(&vbox->ddev, &vbox_fb->base, mode_cmd);
112 vbox_fb->obj = obj;
113 ret = drm_framebuffer_init(&vbox->ddev, &vbox_fb->base, &vbox_fb_funcs);
114 if (ret) {
115 DRM_ERROR("framebuffer init failed %d\n", ret);
116 return ret;
117 }
118
119 return 0;
120}
121
122static int vbox_accel_init(struct vbox_private *vbox)
123{
124 struct vbva_buffer *vbva;
125 unsigned int i;
126
127 vbox->vbva_info = devm_kcalloc(vbox->ddev.dev, vbox->num_crtcs,
128 sizeof(*vbox->vbva_info), GFP_KERNEL);
129 if (!vbox->vbva_info)
130 return -ENOMEM;
131
132 /* Take a command buffer for each screen from the end of usable VRAM. */
133 vbox->available_vram_size -= vbox->num_crtcs * VBVA_MIN_BUFFER_SIZE;
134
135 vbox->vbva_buffers = pci_iomap_range(vbox->ddev.pdev, 0,
136 vbox->available_vram_size,
137 vbox->num_crtcs *
138 VBVA_MIN_BUFFER_SIZE);
139 if (!vbox->vbva_buffers)
140 return -ENOMEM;
141
142 for (i = 0; i < vbox->num_crtcs; ++i) {
143 vbva_setup_buffer_context(&vbox->vbva_info[i],
144 vbox->available_vram_size +
145 i * VBVA_MIN_BUFFER_SIZE,
146 VBVA_MIN_BUFFER_SIZE);
147 vbva = (void __force *)vbox->vbva_buffers +
148 i * VBVA_MIN_BUFFER_SIZE;
149 if (!vbva_enable(&vbox->vbva_info[i],
150 vbox->guest_pool, vbva, i)) {
151 /* very old host or driver error. */
152 DRM_ERROR("vboxvideo: vbva_enable failed\n");
153 }
154 }
155
156 return 0;
157}
158
159static void vbox_accel_fini(struct vbox_private *vbox)
160{
161 unsigned int i;
162
163 for (i = 0; i < vbox->num_crtcs; ++i)
164 vbva_disable(&vbox->vbva_info[i], vbox->guest_pool, i);
165
166 pci_iounmap(vbox->ddev.pdev, vbox->vbva_buffers);
167}
168
169/* Do we support the 4.3 plus mode hint reporting interface? */
170static bool have_hgsmi_mode_hints(struct vbox_private *vbox)
171{
172 u32 have_hints, have_cursor;
173 int ret;
174
175 ret = hgsmi_query_conf(vbox->guest_pool,
176 VBOX_VBVA_CONF32_MODE_HINT_REPORTING,
177 &have_hints);
178 if (ret)
179 return false;
180
181 ret = hgsmi_query_conf(vbox->guest_pool,
182 VBOX_VBVA_CONF32_GUEST_CURSOR_REPORTING,
183 &have_cursor);
184 if (ret)
185 return false;
186
187 return have_hints == VINF_SUCCESS && have_cursor == VINF_SUCCESS;
188}
189
190bool vbox_check_supported(u16 id)
191{
192 u16 dispi_id;
193
194 vbox_write_ioport(VBE_DISPI_INDEX_ID, id);
195 dispi_id = inw(VBE_DISPI_IOPORT_DATA);
196
197 return dispi_id == id;
198}
199
200int vbox_hw_init(struct vbox_private *vbox)
201{
202 int ret = -ENOMEM;
203
204 vbox->full_vram_size = inl(VBE_DISPI_IOPORT_DATA);
205 vbox->any_pitch = vbox_check_supported(VBE_DISPI_ID_ANYX);
206
207 DRM_INFO("VRAM %08x\n", vbox->full_vram_size);
208
209 /* Map guest-heap at end of vram */
210 vbox->guest_heap =
211 pci_iomap_range(vbox->ddev.pdev, 0, GUEST_HEAP_OFFSET(vbox),
212 GUEST_HEAP_SIZE);
213 if (!vbox->guest_heap)
214 return -ENOMEM;
215
216 /* Create guest-heap mem-pool use 2^4 = 16 byte chunks */
217 vbox->guest_pool = gen_pool_create(4, -1);
218 if (!vbox->guest_pool)
219 goto err_unmap_guest_heap;
220
221 ret = gen_pool_add_virt(vbox->guest_pool,
222 (unsigned long)vbox->guest_heap,
223 GUEST_HEAP_OFFSET(vbox),
224 GUEST_HEAP_USABLE_SIZE, -1);
225 if (ret)
226 goto err_destroy_guest_pool;
227
228 ret = hgsmi_test_query_conf(vbox->guest_pool);
229 if (ret) {
230 DRM_ERROR("vboxvideo: hgsmi_test_query_conf failed\n");
231 goto err_destroy_guest_pool;
232 }
233
234 /* Reduce available VRAM size to reflect the guest heap. */
235 vbox->available_vram_size = GUEST_HEAP_OFFSET(vbox);
236 /* Linux drm represents monitors as a 32-bit array. */
237 hgsmi_query_conf(vbox->guest_pool, VBOX_VBVA_CONF32_MONITOR_COUNT,
238 &vbox->num_crtcs);
239 vbox->num_crtcs = clamp_t(u32, vbox->num_crtcs, 1, VBOX_MAX_SCREENS);
240
241 if (!have_hgsmi_mode_hints(vbox)) {
242 ret = -ENOTSUPP;
243 goto err_destroy_guest_pool;
244 }
245
246 vbox->last_mode_hints = devm_kcalloc(vbox->ddev.dev, vbox->num_crtcs,
247 sizeof(struct vbva_modehint),
248 GFP_KERNEL);
249 if (!vbox->last_mode_hints) {
250 ret = -ENOMEM;
251 goto err_destroy_guest_pool;
252 }
253
254 ret = vbox_accel_init(vbox);
255 if (ret)
256 goto err_destroy_guest_pool;
257
258 return 0;
259
260err_destroy_guest_pool:
261 gen_pool_destroy(vbox->guest_pool);
262err_unmap_guest_heap:
263 pci_iounmap(vbox->ddev.pdev, vbox->guest_heap);
264 return ret;
265}
266
267void vbox_hw_fini(struct vbox_private *vbox)
268{
269 vbox_accel_fini(vbox);
270 gen_pool_destroy(vbox->guest_pool);
271 pci_iounmap(vbox->ddev.pdev, vbox->guest_heap);
272}
273
274int vbox_gem_create(struct vbox_private *vbox,
275 u32 size, bool iskernel, struct drm_gem_object **obj)
276{
277 struct vbox_bo *vboxbo;
278 int ret;
279
280 *obj = NULL;
281
282 size = roundup(size, PAGE_SIZE);
283 if (size == 0)
284 return -EINVAL;
285
286 ret = vbox_bo_create(vbox, size, 0, 0, &vboxbo);
287 if (ret) {
288 if (ret != -ERESTARTSYS)
289 DRM_ERROR("failed to allocate GEM object\n");
290 return ret;
291 }
292
293 *obj = &vboxbo->gem;
294
295 return 0;
296}
297
298int vbox_dumb_create(struct drm_file *file,
299 struct drm_device *dev, struct drm_mode_create_dumb *args)
300{
301 struct vbox_private *vbox =
302 container_of(dev, struct vbox_private, ddev);
303 struct drm_gem_object *gobj;
304 u32 handle;
305 int ret;
306
307 args->pitch = args->width * ((args->bpp + 7) / 8);
308 args->size = args->pitch * args->height;
309
310 ret = vbox_gem_create(vbox, args->size, false, &gobj);
311 if (ret)
312 return ret;
313
314 ret = drm_gem_handle_create(file, gobj, &handle);
315 drm_gem_object_put_unlocked(gobj);
316 if (ret)
317 return ret;
318
319 args->handle = handle;
320
321 return 0;
322}
323
324void vbox_gem_free_object(struct drm_gem_object *obj)
325{
326 struct vbox_bo *vbox_bo = gem_to_vbox_bo(obj);
327
328 ttm_bo_put(&vbox_bo->bo);
329}
330
331static inline u64 vbox_bo_mmap_offset(struct vbox_bo *bo)
332{
333 return drm_vma_node_offset_addr(&bo->bo.vma_node);
334}
335
336int
337vbox_dumb_mmap_offset(struct drm_file *file,
338 struct drm_device *dev,
339 u32 handle, u64 *offset)
340{
341 struct drm_gem_object *obj;
342 int ret;
343 struct vbox_bo *bo;
344
345 mutex_lock(&dev->struct_mutex);
346 obj = drm_gem_object_lookup(file, handle);
347 if (!obj) {
348 ret = -ENOENT;
349 goto out_unlock;
350 }
351
352 bo = gem_to_vbox_bo(obj);
353 *offset = vbox_bo_mmap_offset(bo);
354
355 drm_gem_object_put(obj);
356 ret = 0;
357
358out_unlock:
359 mutex_unlock(&dev->struct_mutex);
360 return ret;
361}
diff --git a/drivers/gpu/drm/vboxvideo/vbox_mode.c b/drivers/gpu/drm/vboxvideo/vbox_mode.c
new file mode 100644
index 000000000000..620a6e38f71f
--- /dev/null
+++ b/drivers/gpu/drm/vboxvideo/vbox_mode.c
@@ -0,0 +1,940 @@
1// SPDX-License-Identifier: MIT
2/*
3 * Copyright (C) 2013-2017 Oracle Corporation
4 * This file is based on ast_mode.c
5 * Copyright 2012 Red Hat Inc.
6 * Parts based on xf86-video-ast
7 * Copyright (c) 2005 ASPEED Technology Inc.
8 * Authors: Dave Airlie <airlied@redhat.com>
9 * Michael Thayer <michael.thayer@oracle.com,
10 * Hans de Goede <hdegoede@redhat.com>
11 */
12#include <linux/export.h>
13
14#include <drm/drm_atomic.h>
15#include <drm/drm_atomic_helper.h>
16#include <drm/drm_fourcc.h>
17#include <drm/drm_plane_helper.h>
18#include <drm/drm_probe_helper.h>
19#include <drm/drm_vblank.h>
20
21#include "hgsmi_channels.h"
22#include "vbox_drv.h"
23#include "vboxvideo.h"
24
25/*
26 * Set a graphics mode. Poke any required values into registers, do an HGSMI
27 * mode set and tell the host we support advanced graphics functions.
28 */
29static void vbox_do_modeset(struct drm_crtc *crtc)
30{
31 struct drm_framebuffer *fb = crtc->primary->state->fb;
32 struct vbox_crtc *vbox_crtc = to_vbox_crtc(crtc);
33 struct vbox_private *vbox;
34 int width, height, bpp, pitch;
35 u16 flags;
36 s32 x_offset, y_offset;
37
38 vbox = crtc->dev->dev_private;
39 width = vbox_crtc->width ? vbox_crtc->width : 640;
40 height = vbox_crtc->height ? vbox_crtc->height : 480;
41 bpp = fb ? fb->format->cpp[0] * 8 : 32;
42 pitch = fb ? fb->pitches[0] : width * bpp / 8;
43 x_offset = vbox->single_framebuffer ? vbox_crtc->x : vbox_crtc->x_hint;
44 y_offset = vbox->single_framebuffer ? vbox_crtc->y : vbox_crtc->y_hint;
45
46 /*
47 * This is the old way of setting graphics modes. It assumed one screen
48 * and a frame-buffer at the start of video RAM. On older versions of
49 * VirtualBox, certain parts of the code still assume that the first
50 * screen is programmed this way, so try to fake it.
51 */
52 if (vbox_crtc->crtc_id == 0 && fb &&
53 vbox_crtc->fb_offset / pitch < 0xffff - crtc->y &&
54 vbox_crtc->fb_offset % (bpp / 8) == 0) {
55 vbox_write_ioport(VBE_DISPI_INDEX_XRES, width);
56 vbox_write_ioport(VBE_DISPI_INDEX_YRES, height);
57 vbox_write_ioport(VBE_DISPI_INDEX_VIRT_WIDTH, pitch * 8 / bpp);
58 vbox_write_ioport(VBE_DISPI_INDEX_BPP, bpp);
59 vbox_write_ioport(VBE_DISPI_INDEX_ENABLE, VBE_DISPI_ENABLED);
60 vbox_write_ioport(
61 VBE_DISPI_INDEX_X_OFFSET,
62 vbox_crtc->fb_offset % pitch / bpp * 8 + vbox_crtc->x);
63 vbox_write_ioport(VBE_DISPI_INDEX_Y_OFFSET,
64 vbox_crtc->fb_offset / pitch + vbox_crtc->y);
65 }
66
67 flags = VBVA_SCREEN_F_ACTIVE;
68 flags |= (fb && crtc->state->enable) ? 0 : VBVA_SCREEN_F_BLANK;
69 flags |= vbox_crtc->disconnected ? VBVA_SCREEN_F_DISABLED : 0;
70 hgsmi_process_display_info(vbox->guest_pool, vbox_crtc->crtc_id,
71 x_offset, y_offset,
72 vbox_crtc->x * bpp / 8 +
73 vbox_crtc->y * pitch,
74 pitch, width, height, bpp, flags);
75}
76
77static int vbox_set_view(struct drm_crtc *crtc)
78{
79 struct vbox_crtc *vbox_crtc = to_vbox_crtc(crtc);
80 struct vbox_private *vbox = crtc->dev->dev_private;
81 struct vbva_infoview *p;
82
83 /*
84 * Tell the host about the view. This design originally targeted the
85 * Windows XP driver architecture and assumed that each screen would
86 * have a dedicated frame buffer with the command buffer following it,
87 * the whole being a "view". The host works out which screen a command
88 * buffer belongs to by checking whether it is in the first view, then
89 * whether it is in the second and so on. The first match wins. We
90 * cheat around this by making the first view be the managed memory
91 * plus the first command buffer, the second the same plus the second
92 * buffer and so on.
93 */
94 p = hgsmi_buffer_alloc(vbox->guest_pool, sizeof(*p),
95 HGSMI_CH_VBVA, VBVA_INFO_VIEW);
96 if (!p)
97 return -ENOMEM;
98
99 p->view_index = vbox_crtc->crtc_id;
100 p->view_offset = vbox_crtc->fb_offset;
101 p->view_size = vbox->available_vram_size - vbox_crtc->fb_offset +
102 vbox_crtc->crtc_id * VBVA_MIN_BUFFER_SIZE;
103 p->max_screen_size = vbox->available_vram_size - vbox_crtc->fb_offset;
104
105 hgsmi_buffer_submit(vbox->guest_pool, p);
106 hgsmi_buffer_free(vbox->guest_pool, p);
107
108 return 0;
109}
110
111/*
112 * Try to map the layout of virtual screens to the range of the input device.
113 * Return true if we need to re-set the crtc modes due to screen offset
114 * changes.
115 */
116static bool vbox_set_up_input_mapping(struct vbox_private *vbox)
117{
118 struct drm_crtc *crtci;
119 struct drm_connector *connectori;
120 struct drm_framebuffer *fb, *fb1 = NULL;
121 bool single_framebuffer = true;
122 bool old_single_framebuffer = vbox->single_framebuffer;
123 u16 width = 0, height = 0;
124
125 /*
126 * Are we using an X.Org-style single large frame-buffer for all crtcs?
127 * If so then screen layout can be deduced from the crtc offsets.
128 * Same fall-back if this is the fbdev frame-buffer.
129 */
130 list_for_each_entry(crtci, &vbox->ddev.mode_config.crtc_list, head) {
131 fb = crtci->primary->state->fb;
132 if (!fb)
133 continue;
134
135 if (!fb1) {
136 fb1 = fb;
137 if (to_vbox_framebuffer(fb1) == &vbox->afb)
138 break;
139 } else if (fb != fb1) {
140 single_framebuffer = false;
141 }
142 }
143 if (!fb1)
144 return false;
145
146 if (single_framebuffer) {
147 vbox->single_framebuffer = true;
148 vbox->input_mapping_width = fb1->width;
149 vbox->input_mapping_height = fb1->height;
150 return old_single_framebuffer != vbox->single_framebuffer;
151 }
152 /* Otherwise calculate the total span of all screens. */
153 list_for_each_entry(connectori, &vbox->ddev.mode_config.connector_list,
154 head) {
155 struct vbox_connector *vbox_connector =
156 to_vbox_connector(connectori);
157 struct vbox_crtc *vbox_crtc = vbox_connector->vbox_crtc;
158
159 width = max_t(u16, width, vbox_crtc->x_hint +
160 vbox_connector->mode_hint.width);
161 height = max_t(u16, height, vbox_crtc->y_hint +
162 vbox_connector->mode_hint.height);
163 }
164
165 vbox->single_framebuffer = false;
166 vbox->input_mapping_width = width;
167 vbox->input_mapping_height = height;
168
169 return old_single_framebuffer != vbox->single_framebuffer;
170}
171
172static void vbox_crtc_set_base_and_mode(struct drm_crtc *crtc,
173 struct drm_framebuffer *fb,
174 int x, int y)
175{
176 struct vbox_bo *bo = gem_to_vbox_bo(to_vbox_framebuffer(fb)->obj);
177 struct vbox_private *vbox = crtc->dev->dev_private;
178 struct vbox_crtc *vbox_crtc = to_vbox_crtc(crtc);
179 bool needs_modeset = drm_atomic_crtc_needs_modeset(crtc->state);
180
181 mutex_lock(&vbox->hw_mutex);
182
183 if (crtc->state->enable) {
184 vbox_crtc->width = crtc->state->mode.hdisplay;
185 vbox_crtc->height = crtc->state->mode.vdisplay;
186 }
187
188 vbox_crtc->x = x;
189 vbox_crtc->y = y;
190 vbox_crtc->fb_offset = vbox_bo_gpu_offset(bo);
191
192 /* vbox_do_modeset() checks vbox->single_framebuffer so update it now */
193 if (needs_modeset && vbox_set_up_input_mapping(vbox)) {
194 struct drm_crtc *crtci;
195
196 list_for_each_entry(crtci, &vbox->ddev.mode_config.crtc_list,
197 head) {
198 if (crtci == crtc)
199 continue;
200 vbox_do_modeset(crtci);
201 }
202 }
203
204 vbox_set_view(crtc);
205 vbox_do_modeset(crtc);
206
207 if (needs_modeset)
208 hgsmi_update_input_mapping(vbox->guest_pool, 0, 0,
209 vbox->input_mapping_width,
210 vbox->input_mapping_height);
211
212 mutex_unlock(&vbox->hw_mutex);
213}
214
215static void vbox_crtc_atomic_enable(struct drm_crtc *crtc,
216 struct drm_crtc_state *old_crtc_state)
217{
218}
219
220static void vbox_crtc_atomic_disable(struct drm_crtc *crtc,
221 struct drm_crtc_state *old_crtc_state)
222{
223}
224
225static void vbox_crtc_atomic_flush(struct drm_crtc *crtc,
226 struct drm_crtc_state *old_crtc_state)
227{
228 struct drm_pending_vblank_event *event;
229 unsigned long flags;
230
231 if (crtc->state && crtc->state->event) {
232 event = crtc->state->event;
233 crtc->state->event = NULL;
234
235 spin_lock_irqsave(&crtc->dev->event_lock, flags);
236 drm_crtc_send_vblank_event(crtc, event);
237 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
238 }
239}
240
241static const struct drm_crtc_helper_funcs vbox_crtc_helper_funcs = {
242 .atomic_enable = vbox_crtc_atomic_enable,
243 .atomic_disable = vbox_crtc_atomic_disable,
244 .atomic_flush = vbox_crtc_atomic_flush,
245};
246
247static void vbox_crtc_destroy(struct drm_crtc *crtc)
248{
249 drm_crtc_cleanup(crtc);
250 kfree(crtc);
251}
252
253static const struct drm_crtc_funcs vbox_crtc_funcs = {
254 .set_config = drm_atomic_helper_set_config,
255 .page_flip = drm_atomic_helper_page_flip,
256 /* .gamma_set = vbox_crtc_gamma_set, */
257 .destroy = vbox_crtc_destroy,
258 .reset = drm_atomic_helper_crtc_reset,
259 .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
260 .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
261};
262
263static int vbox_primary_atomic_check(struct drm_plane *plane,
264 struct drm_plane_state *new_state)
265{
266 struct drm_crtc_state *crtc_state = NULL;
267
268 if (new_state->crtc) {
269 crtc_state = drm_atomic_get_existing_crtc_state(
270 new_state->state, new_state->crtc);
271 if (WARN_ON(!crtc_state))
272 return -EINVAL;
273 }
274
275 return drm_atomic_helper_check_plane_state(new_state, crtc_state,
276 DRM_PLANE_HELPER_NO_SCALING,
277 DRM_PLANE_HELPER_NO_SCALING,
278 false, true);
279}
280
281static void vbox_primary_atomic_update(struct drm_plane *plane,
282 struct drm_plane_state *old_state)
283{
284 struct drm_crtc *crtc = plane->state->crtc;
285 struct drm_framebuffer *fb = plane->state->fb;
286
287 vbox_crtc_set_base_and_mode(crtc, fb,
288 plane->state->src_x >> 16,
289 plane->state->src_y >> 16);
290}
291
292static void vbox_primary_atomic_disable(struct drm_plane *plane,
293 struct drm_plane_state *old_state)
294{
295 struct drm_crtc *crtc = old_state->crtc;
296
297 /* vbox_do_modeset checks plane->state->fb and will disable if NULL */
298 vbox_crtc_set_base_and_mode(crtc, old_state->fb,
299 old_state->src_x >> 16,
300 old_state->src_y >> 16);
301}
302
303static int vbox_primary_prepare_fb(struct drm_plane *plane,
304 struct drm_plane_state *new_state)
305{
306 struct vbox_bo *bo;
307 int ret;
308
309 if (!new_state->fb)
310 return 0;
311
312 bo = gem_to_vbox_bo(to_vbox_framebuffer(new_state->fb)->obj);
313 ret = vbox_bo_pin(bo, TTM_PL_FLAG_VRAM);
314 if (ret)
315 DRM_WARN("Error %d pinning new fb, out of video mem?\n", ret);
316
317 return ret;
318}
319
320static void vbox_primary_cleanup_fb(struct drm_plane *plane,
321 struct drm_plane_state *old_state)
322{
323 struct vbox_bo *bo;
324
325 if (!old_state->fb)
326 return;
327
328 bo = gem_to_vbox_bo(to_vbox_framebuffer(old_state->fb)->obj);
329 vbox_bo_unpin(bo);
330}
331
332static int vbox_cursor_atomic_check(struct drm_plane *plane,
333 struct drm_plane_state *new_state)
334{
335 struct drm_crtc_state *crtc_state = NULL;
336 u32 width = new_state->crtc_w;
337 u32 height = new_state->crtc_h;
338 int ret;
339
340 if (new_state->crtc) {
341 crtc_state = drm_atomic_get_existing_crtc_state(
342 new_state->state, new_state->crtc);
343 if (WARN_ON(!crtc_state))
344 return -EINVAL;
345 }
346
347 ret = drm_atomic_helper_check_plane_state(new_state, crtc_state,
348 DRM_PLANE_HELPER_NO_SCALING,
349 DRM_PLANE_HELPER_NO_SCALING,
350 true, true);
351 if (ret)
352 return ret;
353
354 if (!new_state->fb)
355 return 0;
356
357 if (width > VBOX_MAX_CURSOR_WIDTH || height > VBOX_MAX_CURSOR_HEIGHT ||
358 width == 0 || height == 0)
359 return -EINVAL;
360
361 return 0;
362}
363
364/*
365 * Copy the ARGB image and generate the mask, which is needed in case the host
366 * does not support ARGB cursors. The mask is a 1BPP bitmap with the bit set
367 * if the corresponding alpha value in the ARGB image is greater than 0xF0.
368 */
369static void copy_cursor_image(u8 *src, u8 *dst, u32 width, u32 height,
370 size_t mask_size)
371{
372 size_t line_size = (width + 7) / 8;
373 u32 i, j;
374
375 memcpy(dst + mask_size, src, width * height * 4);
376 for (i = 0; i < height; ++i)
377 for (j = 0; j < width; ++j)
378 if (((u32 *)src)[i * width + j] > 0xf0000000)
379 dst[i * line_size + j / 8] |= (0x80 >> (j % 8));
380}
381
382static void vbox_cursor_atomic_update(struct drm_plane *plane,
383 struct drm_plane_state *old_state)
384{
385 struct vbox_private *vbox =
386 container_of(plane->dev, struct vbox_private, ddev);
387 struct vbox_crtc *vbox_crtc = to_vbox_crtc(plane->state->crtc);
388 struct drm_framebuffer *fb = plane->state->fb;
389 struct vbox_bo *bo = gem_to_vbox_bo(to_vbox_framebuffer(fb)->obj);
390 u32 width = plane->state->crtc_w;
391 u32 height = plane->state->crtc_h;
392 size_t data_size, mask_size;
393 u32 flags;
394 u8 *src;
395
396 /*
397 * VirtualBox uses the host windowing system to draw the cursor so
398 * moves are a no-op, we only need to upload new cursor sprites.
399 */
400 if (fb == old_state->fb)
401 return;
402
403 mutex_lock(&vbox->hw_mutex);
404
405 vbox_crtc->cursor_enabled = true;
406
407 /* pinning is done in prepare/cleanup framebuffer */
408 src = vbox_bo_kmap(bo);
409 if (IS_ERR(src)) {
410 mutex_unlock(&vbox->hw_mutex);
411 DRM_WARN("Could not kmap cursor bo, skipping update\n");
412 return;
413 }
414
415 /*
416 * The mask must be calculated based on the alpha
417 * channel, one bit per ARGB word, and must be 32-bit
418 * padded.
419 */
420 mask_size = ((width + 7) / 8 * height + 3) & ~3;
421 data_size = width * height * 4 + mask_size;
422
423 copy_cursor_image(src, vbox->cursor_data, width, height, mask_size);
424 vbox_bo_kunmap(bo);
425
426 flags = VBOX_MOUSE_POINTER_VISIBLE | VBOX_MOUSE_POINTER_SHAPE |
427 VBOX_MOUSE_POINTER_ALPHA;
428 hgsmi_update_pointer_shape(vbox->guest_pool, flags,
429 min_t(u32, max(fb->hot_x, 0), width),
430 min_t(u32, max(fb->hot_y, 0), height),
431 width, height, vbox->cursor_data, data_size);
432
433 mutex_unlock(&vbox->hw_mutex);
434}
435
436static void vbox_cursor_atomic_disable(struct drm_plane *plane,
437 struct drm_plane_state *old_state)
438{
439 struct vbox_private *vbox =
440 container_of(plane->dev, struct vbox_private, ddev);
441 struct vbox_crtc *vbox_crtc = to_vbox_crtc(old_state->crtc);
442 bool cursor_enabled = false;
443 struct drm_crtc *crtci;
444
445 mutex_lock(&vbox->hw_mutex);
446
447 vbox_crtc->cursor_enabled = false;
448
449 list_for_each_entry(crtci, &vbox->ddev.mode_config.crtc_list, head) {
450 if (to_vbox_crtc(crtci)->cursor_enabled)
451 cursor_enabled = true;
452 }
453
454 if (!cursor_enabled)
455 hgsmi_update_pointer_shape(vbox->guest_pool, 0, 0, 0,
456 0, 0, NULL, 0);
457
458 mutex_unlock(&vbox->hw_mutex);
459}
460
461static int vbox_cursor_prepare_fb(struct drm_plane *plane,
462 struct drm_plane_state *new_state)
463{
464 struct vbox_bo *bo;
465
466 if (!new_state->fb)
467 return 0;
468
469 bo = gem_to_vbox_bo(to_vbox_framebuffer(new_state->fb)->obj);
470 return vbox_bo_pin(bo, TTM_PL_FLAG_SYSTEM);
471}
472
473static void vbox_cursor_cleanup_fb(struct drm_plane *plane,
474 struct drm_plane_state *old_state)
475{
476 struct vbox_bo *bo;
477
478 if (!plane->state->fb)
479 return;
480
481 bo = gem_to_vbox_bo(to_vbox_framebuffer(plane->state->fb)->obj);
482 vbox_bo_unpin(bo);
483}
484
485static const u32 vbox_cursor_plane_formats[] = {
486 DRM_FORMAT_ARGB8888,
487};
488
489static const struct drm_plane_helper_funcs vbox_cursor_helper_funcs = {
490 .atomic_check = vbox_cursor_atomic_check,
491 .atomic_update = vbox_cursor_atomic_update,
492 .atomic_disable = vbox_cursor_atomic_disable,
493 .prepare_fb = vbox_cursor_prepare_fb,
494 .cleanup_fb = vbox_cursor_cleanup_fb,
495};
496
497static const struct drm_plane_funcs vbox_cursor_plane_funcs = {
498 .update_plane = drm_atomic_helper_update_plane,
499 .disable_plane = drm_atomic_helper_disable_plane,
500 .destroy = drm_primary_helper_destroy,
501 .reset = drm_atomic_helper_plane_reset,
502 .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
503 .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
504};
505
506static const u32 vbox_primary_plane_formats[] = {
507 DRM_FORMAT_XRGB8888,
508 DRM_FORMAT_ARGB8888,
509};
510
511static const struct drm_plane_helper_funcs vbox_primary_helper_funcs = {
512 .atomic_check = vbox_primary_atomic_check,
513 .atomic_update = vbox_primary_atomic_update,
514 .atomic_disable = vbox_primary_atomic_disable,
515 .prepare_fb = vbox_primary_prepare_fb,
516 .cleanup_fb = vbox_primary_cleanup_fb,
517};
518
519static const struct drm_plane_funcs vbox_primary_plane_funcs = {
520 .update_plane = drm_atomic_helper_update_plane,
521 .disable_plane = drm_atomic_helper_disable_plane,
522 .destroy = drm_primary_helper_destroy,
523 .reset = drm_atomic_helper_plane_reset,
524 .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
525 .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
526};
527
528static struct drm_plane *vbox_create_plane(struct vbox_private *vbox,
529 unsigned int possible_crtcs,
530 enum drm_plane_type type)
531{
532 const struct drm_plane_helper_funcs *helper_funcs = NULL;
533 const struct drm_plane_funcs *funcs;
534 struct drm_plane *plane;
535 const u32 *formats;
536 int num_formats;
537 int err;
538
539 if (type == DRM_PLANE_TYPE_PRIMARY) {
540 funcs = &vbox_primary_plane_funcs;
541 formats = vbox_primary_plane_formats;
542 helper_funcs = &vbox_primary_helper_funcs;
543 num_formats = ARRAY_SIZE(vbox_primary_plane_formats);
544 } else if (type == DRM_PLANE_TYPE_CURSOR) {
545 funcs = &vbox_cursor_plane_funcs;
546 formats = vbox_cursor_plane_formats;
547 helper_funcs = &vbox_cursor_helper_funcs;
548 num_formats = ARRAY_SIZE(vbox_cursor_plane_formats);
549 } else {
550 return ERR_PTR(-EINVAL);
551 }
552
553 plane = kzalloc(sizeof(*plane), GFP_KERNEL);
554 if (!plane)
555 return ERR_PTR(-ENOMEM);
556
557 err = drm_universal_plane_init(&vbox->ddev, plane, possible_crtcs,
558 funcs, formats, num_formats,
559 NULL, type, NULL);
560 if (err)
561 goto free_plane;
562
563 drm_plane_helper_add(plane, helper_funcs);
564
565 return plane;
566
567free_plane:
568 kfree(plane);
569 return ERR_PTR(-EINVAL);
570}
571
572static struct vbox_crtc *vbox_crtc_init(struct drm_device *dev, unsigned int i)
573{
574 struct vbox_private *vbox =
575 container_of(dev, struct vbox_private, ddev);
576 struct drm_plane *cursor = NULL;
577 struct vbox_crtc *vbox_crtc;
578 struct drm_plane *primary;
579 u32 caps = 0;
580 int ret;
581
582 ret = hgsmi_query_conf(vbox->guest_pool,
583 VBOX_VBVA_CONF32_CURSOR_CAPABILITIES, &caps);
584 if (ret)
585 return ERR_PTR(ret);
586
587 vbox_crtc = kzalloc(sizeof(*vbox_crtc), GFP_KERNEL);
588 if (!vbox_crtc)
589 return ERR_PTR(-ENOMEM);
590
591 primary = vbox_create_plane(vbox, 1 << i, DRM_PLANE_TYPE_PRIMARY);
592 if (IS_ERR(primary)) {
593 ret = PTR_ERR(primary);
594 goto free_mem;
595 }
596
597 if ((caps & VBOX_VBVA_CURSOR_CAPABILITY_HARDWARE)) {
598 cursor = vbox_create_plane(vbox, 1 << i, DRM_PLANE_TYPE_CURSOR);
599 if (IS_ERR(cursor)) {
600 ret = PTR_ERR(cursor);
601 goto clean_primary;
602 }
603 } else {
604 DRM_WARN("VirtualBox host is too old, no cursor support\n");
605 }
606
607 vbox_crtc->crtc_id = i;
608
609 ret = drm_crtc_init_with_planes(dev, &vbox_crtc->base, primary, cursor,
610 &vbox_crtc_funcs, NULL);
611 if (ret)
612 goto clean_cursor;
613
614 drm_mode_crtc_set_gamma_size(&vbox_crtc->base, 256);
615 drm_crtc_helper_add(&vbox_crtc->base, &vbox_crtc_helper_funcs);
616
617 return vbox_crtc;
618
619clean_cursor:
620 if (cursor) {
621 drm_plane_cleanup(cursor);
622 kfree(cursor);
623 }
624clean_primary:
625 drm_plane_cleanup(primary);
626 kfree(primary);
627free_mem:
628 kfree(vbox_crtc);
629 return ERR_PTR(ret);
630}
631
632static void vbox_encoder_destroy(struct drm_encoder *encoder)
633{
634 drm_encoder_cleanup(encoder);
635 kfree(encoder);
636}
637
638static const struct drm_encoder_funcs vbox_enc_funcs = {
639 .destroy = vbox_encoder_destroy,
640};
641
642static struct drm_encoder *vbox_encoder_init(struct drm_device *dev,
643 unsigned int i)
644{
645 struct vbox_encoder *vbox_encoder;
646
647 vbox_encoder = kzalloc(sizeof(*vbox_encoder), GFP_KERNEL);
648 if (!vbox_encoder)
649 return NULL;
650
651 drm_encoder_init(dev, &vbox_encoder->base, &vbox_enc_funcs,
652 DRM_MODE_ENCODER_DAC, NULL);
653
654 vbox_encoder->base.possible_crtcs = 1 << i;
655 return &vbox_encoder->base;
656}
657
658/*
659 * Generate EDID data with a mode-unique serial number for the virtual
660 * monitor to try to persuade Unity that different modes correspond to
661 * different monitors and it should not try to force the same resolution on
662 * them.
663 */
664static void vbox_set_edid(struct drm_connector *connector, int width,
665 int height)
666{
667 enum { EDID_SIZE = 128 };
668 unsigned char edid[EDID_SIZE] = {
669 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, /* header */
670 0x58, 0x58, /* manufacturer (VBX) */
671 0x00, 0x00, /* product code */
672 0x00, 0x00, 0x00, 0x00, /* serial number goes here */
673 0x01, /* week of manufacture */
674 0x00, /* year of manufacture */
675 0x01, 0x03, /* EDID version */
676 0x80, /* capabilities - digital */
677 0x00, /* horiz. res in cm, zero for projectors */
678 0x00, /* vert. res in cm */
679 0x78, /* display gamma (120 == 2.2). */
680 0xEE, /* features (standby, suspend, off, RGB, std */
681 /* colour space, preferred timing mode) */
682 0xEE, 0x91, 0xA3, 0x54, 0x4C, 0x99, 0x26, 0x0F, 0x50, 0x54,
683 /* chromaticity for standard colour space. */
684 0x00, 0x00, 0x00, /* no default timings */
685 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
686 0x01, 0x01,
687 0x01, 0x01, 0x01, 0x01, /* no standard timings */
688 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x06, 0x00, 0x02, 0x02,
689 0x02, 0x02,
690 /* descriptor block 1 goes below */
691 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
692 /* descriptor block 2, monitor ranges */
693 0x00, 0x00, 0x00, 0xFD, 0x00,
694 0x00, 0xC8, 0x00, 0xC8, 0x64, 0x00, 0x0A, 0x20, 0x20, 0x20,
695 0x20, 0x20,
696 /* 0-200Hz vertical, 0-200KHz horizontal, 1000MHz pixel clock */
697 0x20,
698 /* descriptor block 3, monitor name */
699 0x00, 0x00, 0x00, 0xFC, 0x00,
700 'V', 'B', 'O', 'X', ' ', 'm', 'o', 'n', 'i', 't', 'o', 'r',
701 '\n',
702 /* descriptor block 4: dummy data */
703 0x00, 0x00, 0x00, 0x10, 0x00,
704 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20,
705 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
706 0x20,
707 0x00, /* number of extensions */
708 0x00 /* checksum goes here */
709 };
710 int clock = (width + 6) * (height + 6) * 60 / 10000;
711 unsigned int i, sum = 0;
712
713 edid[12] = width & 0xff;
714 edid[13] = width >> 8;
715 edid[14] = height & 0xff;
716 edid[15] = height >> 8;
717 edid[54] = clock & 0xff;
718 edid[55] = clock >> 8;
719 edid[56] = width & 0xff;
720 edid[58] = (width >> 4) & 0xf0;
721 edid[59] = height & 0xff;
722 edid[61] = (height >> 4) & 0xf0;
723 for (i = 0; i < EDID_SIZE - 1; ++i)
724 sum += edid[i];
725 edid[EDID_SIZE - 1] = (0x100 - (sum & 0xFF)) & 0xFF;
726 drm_connector_update_edid_property(connector, (struct edid *)edid);
727}
728
729static int vbox_get_modes(struct drm_connector *connector)
730{
731 struct vbox_connector *vbox_connector = NULL;
732 struct drm_display_mode *mode = NULL;
733 struct vbox_private *vbox = NULL;
734 unsigned int num_modes = 0;
735 int preferred_width, preferred_height;
736
737 vbox_connector = to_vbox_connector(connector);
738 vbox = connector->dev->dev_private;
739
740 hgsmi_report_flags_location(vbox->guest_pool, GUEST_HEAP_OFFSET(vbox) +
741 HOST_FLAGS_OFFSET);
742 if (vbox_connector->vbox_crtc->crtc_id == 0)
743 vbox_report_caps(vbox);
744
745 num_modes = drm_add_modes_noedid(connector, 2560, 1600);
746 preferred_width = vbox_connector->mode_hint.width ?
747 vbox_connector->mode_hint.width : 1024;
748 preferred_height = vbox_connector->mode_hint.height ?
749 vbox_connector->mode_hint.height : 768;
750 mode = drm_cvt_mode(connector->dev, preferred_width, preferred_height,
751 60, false, false, false);
752 if (mode) {
753 mode->type |= DRM_MODE_TYPE_PREFERRED;
754 drm_mode_probed_add(connector, mode);
755 ++num_modes;
756 }
757 vbox_set_edid(connector, preferred_width, preferred_height);
758
759 if (vbox_connector->vbox_crtc->x_hint != -1)
760 drm_object_property_set_value(&connector->base,
761 vbox->ddev.mode_config.suggested_x_property,
762 vbox_connector->vbox_crtc->x_hint);
763 else
764 drm_object_property_set_value(&connector->base,
765 vbox->ddev.mode_config.suggested_x_property, 0);
766
767 if (vbox_connector->vbox_crtc->y_hint != -1)
768 drm_object_property_set_value(&connector->base,
769 vbox->ddev.mode_config.suggested_y_property,
770 vbox_connector->vbox_crtc->y_hint);
771 else
772 drm_object_property_set_value(&connector->base,
773 vbox->ddev.mode_config.suggested_y_property, 0);
774
775 return num_modes;
776}
777
778static void vbox_connector_destroy(struct drm_connector *connector)
779{
780 drm_connector_unregister(connector);
781 drm_connector_cleanup(connector);
782 kfree(connector);
783}
784
785static enum drm_connector_status
786vbox_connector_detect(struct drm_connector *connector, bool force)
787{
788 struct vbox_connector *vbox_connector;
789
790 vbox_connector = to_vbox_connector(connector);
791
792 return vbox_connector->mode_hint.disconnected ?
793 connector_status_disconnected : connector_status_connected;
794}
795
796static int vbox_fill_modes(struct drm_connector *connector, u32 max_x,
797 u32 max_y)
798{
799 struct vbox_connector *vbox_connector;
800 struct drm_device *dev;
801 struct drm_display_mode *mode, *iterator;
802
803 vbox_connector = to_vbox_connector(connector);
804 dev = vbox_connector->base.dev;
805 list_for_each_entry_safe(mode, iterator, &connector->modes, head) {
806 list_del(&mode->head);
807 drm_mode_destroy(dev, mode);
808 }
809
810 return drm_helper_probe_single_connector_modes(connector, max_x, max_y);
811}
812
813static const struct drm_connector_helper_funcs vbox_connector_helper_funcs = {
814 .get_modes = vbox_get_modes,
815};
816
817static const struct drm_connector_funcs vbox_connector_funcs = {
818 .detect = vbox_connector_detect,
819 .fill_modes = vbox_fill_modes,
820 .destroy = vbox_connector_destroy,
821 .reset = drm_atomic_helper_connector_reset,
822 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
823 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
824};
825
826static int vbox_connector_init(struct drm_device *dev,
827 struct vbox_crtc *vbox_crtc,
828 struct drm_encoder *encoder)
829{
830 struct vbox_connector *vbox_connector;
831 struct drm_connector *connector;
832
833 vbox_connector = kzalloc(sizeof(*vbox_connector), GFP_KERNEL);
834 if (!vbox_connector)
835 return -ENOMEM;
836
837 connector = &vbox_connector->base;
838 vbox_connector->vbox_crtc = vbox_crtc;
839
840 drm_connector_init(dev, connector, &vbox_connector_funcs,
841 DRM_MODE_CONNECTOR_VGA);
842 drm_connector_helper_add(connector, &vbox_connector_helper_funcs);
843
844 connector->interlace_allowed = 0;
845 connector->doublescan_allowed = 0;
846
847 drm_mode_create_suggested_offset_properties(dev);
848 drm_object_attach_property(&connector->base,
849 dev->mode_config.suggested_x_property, 0);
850 drm_object_attach_property(&connector->base,
851 dev->mode_config.suggested_y_property, 0);
852
853 drm_connector_attach_encoder(connector, encoder);
854
855 return 0;
856}
857
858static struct drm_framebuffer *vbox_user_framebuffer_create(
859 struct drm_device *dev,
860 struct drm_file *filp,
861 const struct drm_mode_fb_cmd2 *mode_cmd)
862{
863 struct vbox_private *vbox =
864 container_of(dev, struct vbox_private, ddev);
865 struct drm_gem_object *obj;
866 struct vbox_framebuffer *vbox_fb;
867 int ret = -ENOMEM;
868
869 obj = drm_gem_object_lookup(filp, mode_cmd->handles[0]);
870 if (!obj)
871 return ERR_PTR(-ENOENT);
872
873 vbox_fb = kzalloc(sizeof(*vbox_fb), GFP_KERNEL);
874 if (!vbox_fb)
875 goto err_unref_obj;
876
877 ret = vbox_framebuffer_init(vbox, vbox_fb, mode_cmd, obj);
878 if (ret)
879 goto err_free_vbox_fb;
880
881 return &vbox_fb->base;
882
883err_free_vbox_fb:
884 kfree(vbox_fb);
885err_unref_obj:
886 drm_gem_object_put_unlocked(obj);
887 return ERR_PTR(ret);
888}
889
890static const struct drm_mode_config_funcs vbox_mode_funcs = {
891 .fb_create = vbox_user_framebuffer_create,
892 .atomic_check = drm_atomic_helper_check,
893 .atomic_commit = drm_atomic_helper_commit,
894};
895
896int vbox_mode_init(struct vbox_private *vbox)
897{
898 struct drm_device *dev = &vbox->ddev;
899 struct drm_encoder *encoder;
900 struct vbox_crtc *vbox_crtc;
901 unsigned int i;
902 int ret;
903
904 drm_mode_config_init(dev);
905
906 dev->mode_config.funcs = (void *)&vbox_mode_funcs;
907 dev->mode_config.min_width = 0;
908 dev->mode_config.min_height = 0;
909 dev->mode_config.preferred_depth = 24;
910 dev->mode_config.max_width = VBE_DISPI_MAX_XRES;
911 dev->mode_config.max_height = VBE_DISPI_MAX_YRES;
912
913 for (i = 0; i < vbox->num_crtcs; ++i) {
914 vbox_crtc = vbox_crtc_init(dev, i);
915 if (IS_ERR(vbox_crtc)) {
916 ret = PTR_ERR(vbox_crtc);
917 goto err_drm_mode_cleanup;
918 }
919 encoder = vbox_encoder_init(dev, i);
920 if (!encoder) {
921 ret = -ENOMEM;
922 goto err_drm_mode_cleanup;
923 }
924 ret = vbox_connector_init(dev, vbox_crtc, encoder);
925 if (ret)
926 goto err_drm_mode_cleanup;
927 }
928
929 drm_mode_config_reset(dev);
930 return 0;
931
932err_drm_mode_cleanup:
933 drm_mode_config_cleanup(dev);
934 return ret;
935}
936
937void vbox_mode_fini(struct vbox_private *vbox)
938{
939 drm_mode_config_cleanup(&vbox->ddev);
940}
diff --git a/drivers/gpu/drm/vboxvideo/vbox_prime.c b/drivers/gpu/drm/vboxvideo/vbox_prime.c
new file mode 100644
index 000000000000..d61985b0c6eb
--- /dev/null
+++ b/drivers/gpu/drm/vboxvideo/vbox_prime.c
@@ -0,0 +1,56 @@
1// SPDX-License-Identifier: MIT
2/*
3 * Copyright (C) 2017 Oracle Corporation
4 * Copyright 2017 Canonical
5 * Authors: Andreas Pokorny
6 */
7
8#include "vbox_drv.h"
9
10/*
11 * Based on qxl_prime.c:
12 * Empty Implementations as there should not be any other driver for a virtual
13 * device that might share buffers with vboxvideo
14 */
15
16int vbox_gem_prime_pin(struct drm_gem_object *obj)
17{
18 WARN_ONCE(1, "not implemented");
19 return -ENOSYS;
20}
21
22void vbox_gem_prime_unpin(struct drm_gem_object *obj)
23{
24 WARN_ONCE(1, "not implemented");
25}
26
27struct sg_table *vbox_gem_prime_get_sg_table(struct drm_gem_object *obj)
28{
29 WARN_ONCE(1, "not implemented");
30 return ERR_PTR(-ENOSYS);
31}
32
33struct drm_gem_object *vbox_gem_prime_import_sg_table(
34 struct drm_device *dev, struct dma_buf_attachment *attach,
35 struct sg_table *table)
36{
37 WARN_ONCE(1, "not implemented");
38 return ERR_PTR(-ENOSYS);
39}
40
41void *vbox_gem_prime_vmap(struct drm_gem_object *obj)
42{
43 WARN_ONCE(1, "not implemented");
44 return ERR_PTR(-ENOSYS);
45}
46
47void vbox_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
48{
49 WARN_ONCE(1, "not implemented");
50}
51
52int vbox_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *area)
53{
54 WARN_ONCE(1, "not implemented");
55 return -ENOSYS;
56}
diff --git a/drivers/gpu/drm/vboxvideo/vbox_ttm.c b/drivers/gpu/drm/vboxvideo/vbox_ttm.c
new file mode 100644
index 000000000000..30f270027acf
--- /dev/null
+++ b/drivers/gpu/drm/vboxvideo/vbox_ttm.c
@@ -0,0 +1,394 @@
1// SPDX-License-Identifier: MIT
2/*
3 * Copyright (C) 2013-2017 Oracle Corporation
4 * This file is based on ast_ttm.c
5 * Copyright 2012 Red Hat Inc.
6 * Authors: Dave Airlie <airlied@redhat.com>
7 * Michael Thayer <michael.thayer@oracle.com>
8 */
9#include <linux/pci.h>
10#include <drm/drm_file.h>
11#include <drm/ttm/ttm_page_alloc.h>
12#include "vbox_drv.h"
13
14static inline struct vbox_private *vbox_bdev(struct ttm_bo_device *bd)
15{
16 return container_of(bd, struct vbox_private, ttm.bdev);
17}
18
19static void vbox_bo_ttm_destroy(struct ttm_buffer_object *tbo)
20{
21 struct vbox_bo *bo;
22
23 bo = container_of(tbo, struct vbox_bo, bo);
24
25 drm_gem_object_release(&bo->gem);
26 kfree(bo);
27}
28
29static bool vbox_ttm_bo_is_vbox_bo(struct ttm_buffer_object *bo)
30{
31 if (bo->destroy == &vbox_bo_ttm_destroy)
32 return true;
33
34 return false;
35}
36
37static int
38vbox_bo_init_mem_type(struct ttm_bo_device *bdev, u32 type,
39 struct ttm_mem_type_manager *man)
40{
41 switch (type) {
42 case TTM_PL_SYSTEM:
43 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
44 man->available_caching = TTM_PL_MASK_CACHING;
45 man->default_caching = TTM_PL_FLAG_CACHED;
46 break;
47 case TTM_PL_VRAM:
48 man->func = &ttm_bo_manager_func;
49 man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_MAPPABLE;
50 man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
51 man->default_caching = TTM_PL_FLAG_WC;
52 break;
53 default:
54 DRM_ERROR("Unsupported memory type %u\n", (unsigned int)type);
55 return -EINVAL;
56 }
57
58 return 0;
59}
60
61static void
62vbox_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
63{
64 struct vbox_bo *vboxbo = vbox_bo(bo);
65
66 if (!vbox_ttm_bo_is_vbox_bo(bo))
67 return;
68
69 vbox_ttm_placement(vboxbo, TTM_PL_FLAG_SYSTEM);
70 *pl = vboxbo->placement;
71}
72
73static int vbox_bo_verify_access(struct ttm_buffer_object *bo,
74 struct file *filp)
75{
76 return 0;
77}
78
79static int vbox_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
80 struct ttm_mem_reg *mem)
81{
82 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
83 struct vbox_private *vbox = vbox_bdev(bdev);
84
85 mem->bus.addr = NULL;
86 mem->bus.offset = 0;
87 mem->bus.size = mem->num_pages << PAGE_SHIFT;
88 mem->bus.base = 0;
89 mem->bus.is_iomem = false;
90 if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
91 return -EINVAL;
92 switch (mem->mem_type) {
93 case TTM_PL_SYSTEM:
94 /* system memory */
95 return 0;
96 case TTM_PL_VRAM:
97 mem->bus.offset = mem->start << PAGE_SHIFT;
98 mem->bus.base = pci_resource_start(vbox->ddev.pdev, 0);
99 mem->bus.is_iomem = true;
100 break;
101 default:
102 return -EINVAL;
103 }
104 return 0;
105}
106
107static void vbox_ttm_io_mem_free(struct ttm_bo_device *bdev,
108 struct ttm_mem_reg *mem)
109{
110}
111
112static void vbox_ttm_backend_destroy(struct ttm_tt *tt)
113{
114 ttm_tt_fini(tt);
115 kfree(tt);
116}
117
118static struct ttm_backend_func vbox_tt_backend_func = {
119 .destroy = &vbox_ttm_backend_destroy,
120};
121
122static struct ttm_tt *vbox_ttm_tt_create(struct ttm_buffer_object *bo,
123 u32 page_flags)
124{
125 struct ttm_tt *tt;
126
127 tt = kzalloc(sizeof(*tt), GFP_KERNEL);
128 if (!tt)
129 return NULL;
130
131 tt->func = &vbox_tt_backend_func;
132 if (ttm_tt_init(tt, bo, page_flags)) {
133 kfree(tt);
134 return NULL;
135 }
136
137 return tt;
138}
139
140static struct ttm_bo_driver vbox_bo_driver = {
141 .ttm_tt_create = vbox_ttm_tt_create,
142 .init_mem_type = vbox_bo_init_mem_type,
143 .eviction_valuable = ttm_bo_eviction_valuable,
144 .evict_flags = vbox_bo_evict_flags,
145 .verify_access = vbox_bo_verify_access,
146 .io_mem_reserve = &vbox_ttm_io_mem_reserve,
147 .io_mem_free = &vbox_ttm_io_mem_free,
148};
149
150int vbox_mm_init(struct vbox_private *vbox)
151{
152 int ret;
153 struct drm_device *dev = &vbox->ddev;
154 struct ttm_bo_device *bdev = &vbox->ttm.bdev;
155
156 ret = ttm_bo_device_init(&vbox->ttm.bdev,
157 &vbox_bo_driver,
158 dev->anon_inode->i_mapping,
159 DRM_FILE_PAGE_OFFSET, true);
160 if (ret) {
161 DRM_ERROR("Error initialising bo driver; %d\n", ret);
162 return ret;
163 }
164
165 ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM,
166 vbox->available_vram_size >> PAGE_SHIFT);
167 if (ret) {
168 DRM_ERROR("Failed ttm VRAM init: %d\n", ret);
169 goto err_device_release;
170 }
171
172#ifdef DRM_MTRR_WC
173 vbox->fb_mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 0),
174 pci_resource_len(dev->pdev, 0),
175 DRM_MTRR_WC);
176#else
177 vbox->fb_mtrr = arch_phys_wc_add(pci_resource_start(dev->pdev, 0),
178 pci_resource_len(dev->pdev, 0));
179#endif
180 return 0;
181
182err_device_release:
183 ttm_bo_device_release(&vbox->ttm.bdev);
184 return ret;
185}
186
187void vbox_mm_fini(struct vbox_private *vbox)
188{
189#ifdef DRM_MTRR_WC
190 drm_mtrr_del(vbox->fb_mtrr,
191 pci_resource_start(vbox->ddev.pdev, 0),
192 pci_resource_len(vbox->ddev.pdev, 0), DRM_MTRR_WC);
193#else
194 arch_phys_wc_del(vbox->fb_mtrr);
195#endif
196 ttm_bo_device_release(&vbox->ttm.bdev);
197}
198
199void vbox_ttm_placement(struct vbox_bo *bo, int domain)
200{
201 unsigned int i;
202 u32 c = 0;
203
204 bo->placement.placement = bo->placements;
205 bo->placement.busy_placement = bo->placements;
206
207 if (domain & TTM_PL_FLAG_VRAM)
208 bo->placements[c++].flags =
209 TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM;
210 if (domain & TTM_PL_FLAG_SYSTEM)
211 bo->placements[c++].flags =
212 TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
213 if (!c)
214 bo->placements[c++].flags =
215 TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
216
217 bo->placement.num_placement = c;
218 bo->placement.num_busy_placement = c;
219
220 for (i = 0; i < c; ++i) {
221 bo->placements[i].fpfn = 0;
222 bo->placements[i].lpfn = 0;
223 }
224}
225
226int vbox_bo_create(struct vbox_private *vbox, int size, int align,
227 u32 flags, struct vbox_bo **pvboxbo)
228{
229 struct vbox_bo *vboxbo;
230 size_t acc_size;
231 int ret;
232
233 vboxbo = kzalloc(sizeof(*vboxbo), GFP_KERNEL);
234 if (!vboxbo)
235 return -ENOMEM;
236
237 ret = drm_gem_object_init(&vbox->ddev, &vboxbo->gem, size);
238 if (ret)
239 goto err_free_vboxbo;
240
241 vboxbo->bo.bdev = &vbox->ttm.bdev;
242
243 vbox_ttm_placement(vboxbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM);
244
245 acc_size = ttm_bo_dma_acc_size(&vbox->ttm.bdev, size,
246 sizeof(struct vbox_bo));
247
248 ret = ttm_bo_init(&vbox->ttm.bdev, &vboxbo->bo, size,
249 ttm_bo_type_device, &vboxbo->placement,
250 align >> PAGE_SHIFT, false, acc_size,
251 NULL, NULL, vbox_bo_ttm_destroy);
252 if (ret)
253 goto err_free_vboxbo;
254
255 *pvboxbo = vboxbo;
256
257 return 0;
258
259err_free_vboxbo:
260 kfree(vboxbo);
261 return ret;
262}
263
264int vbox_bo_pin(struct vbox_bo *bo, u32 pl_flag)
265{
266 struct ttm_operation_ctx ctx = { false, false };
267 int i, ret;
268
269 if (bo->pin_count) {
270 bo->pin_count++;
271 return 0;
272 }
273
274 ret = vbox_bo_reserve(bo, false);
275 if (ret)
276 return ret;
277
278 vbox_ttm_placement(bo, pl_flag);
279
280 for (i = 0; i < bo->placement.num_placement; i++)
281 bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
282
283 ret = ttm_bo_validate(&bo->bo, &bo->placement, &ctx);
284 if (ret == 0)
285 bo->pin_count = 1;
286
287 vbox_bo_unreserve(bo);
288
289 return ret;
290}
291
292int vbox_bo_unpin(struct vbox_bo *bo)
293{
294 struct ttm_operation_ctx ctx = { false, false };
295 int i, ret;
296
297 if (!bo->pin_count) {
298 DRM_ERROR("unpin bad %p\n", bo);
299 return 0;
300 }
301 bo->pin_count--;
302 if (bo->pin_count)
303 return 0;
304
305 ret = vbox_bo_reserve(bo, false);
306 if (ret) {
307 DRM_ERROR("Error %d reserving bo, leaving it pinned\n", ret);
308 return ret;
309 }
310
311 for (i = 0; i < bo->placement.num_placement; i++)
312 bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
313
314 ret = ttm_bo_validate(&bo->bo, &bo->placement, &ctx);
315
316 vbox_bo_unreserve(bo);
317
318 return ret;
319}
320
321/*
322 * Move a vbox-owned buffer object to system memory if no one else has it
323 * pinned. The caller must have pinned it previously, and this call will
324 * release the caller's pin.
325 */
326int vbox_bo_push_sysram(struct vbox_bo *bo)
327{
328 struct ttm_operation_ctx ctx = { false, false };
329 int i, ret;
330
331 if (!bo->pin_count) {
332 DRM_ERROR("unpin bad %p\n", bo);
333 return 0;
334 }
335 bo->pin_count--;
336 if (bo->pin_count)
337 return 0;
338
339 if (bo->kmap.virtual) {
340 ttm_bo_kunmap(&bo->kmap);
341 bo->kmap.virtual = NULL;
342 }
343
344 vbox_ttm_placement(bo, TTM_PL_FLAG_SYSTEM);
345
346 for (i = 0; i < bo->placement.num_placement; i++)
347 bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
348
349 ret = ttm_bo_validate(&bo->bo, &bo->placement, &ctx);
350 if (ret) {
351 DRM_ERROR("pushing to VRAM failed\n");
352 return ret;
353 }
354
355 return 0;
356}
357
358int vbox_mmap(struct file *filp, struct vm_area_struct *vma)
359{
360 struct drm_file *file_priv;
361 struct vbox_private *vbox;
362
363 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
364 return -EINVAL;
365
366 file_priv = filp->private_data;
367 vbox = file_priv->minor->dev->dev_private;
368
369 return ttm_bo_mmap(filp, vma, &vbox->ttm.bdev);
370}
371
372void *vbox_bo_kmap(struct vbox_bo *bo)
373{
374 int ret;
375
376 if (bo->kmap.virtual)
377 return bo->kmap.virtual;
378
379 ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
380 if (ret) {
381 DRM_ERROR("Error kmapping bo: %d\n", ret);
382 return NULL;
383 }
384
385 return bo->kmap.virtual;
386}
387
388void vbox_bo_kunmap(struct vbox_bo *bo)
389{
390 if (bo->kmap.virtual) {
391 ttm_bo_kunmap(&bo->kmap);
392 bo->kmap.virtual = NULL;
393 }
394}
diff --git a/drivers/gpu/drm/vboxvideo/vboxvideo.h b/drivers/gpu/drm/vboxvideo/vboxvideo.h
new file mode 100644
index 000000000000..0592004f71aa
--- /dev/null
+++ b/drivers/gpu/drm/vboxvideo/vboxvideo.h
@@ -0,0 +1,442 @@
1/* SPDX-License-Identifier: MIT */
2/* Copyright (C) 2006-2016 Oracle Corporation */
3
4#ifndef __VBOXVIDEO_H__
5#define __VBOXVIDEO_H__
6
7#define VBOX_VIDEO_MAX_SCREENS 64
8
9/*
10 * The last 4096 bytes of the guest VRAM contains the generic info for all
11 * DualView chunks: sizes and offsets of chunks. This is filled by miniport.
12 *
13 * Last 4096 bytes of each chunk contain chunk specific data: framebuffer info,
14 * etc. This is used exclusively by the corresponding instance of a display
15 * driver.
16 *
17 * The VRAM layout:
18 * Last 4096 bytes - Adapter information area.
19 * 4096 bytes aligned miniport heap (value specified in the config rouded up).
20 * Slack - what left after dividing the VRAM.
21 * 4096 bytes aligned framebuffers:
22 * last 4096 bytes of each framebuffer is the display information area.
23 *
24 * The Virtual Graphics Adapter information in the guest VRAM is stored by the
25 * guest video driver using structures prepended by VBOXVIDEOINFOHDR.
26 *
27 * When the guest driver writes dword 0 to the VBE_DISPI_INDEX_VBOX_VIDEO
28 * the host starts to process the info. The first element at the start of
29 * the 4096 bytes region should be normally be a LINK that points to
30 * actual information chain. That way the guest driver can have some
31 * fixed layout of the information memory block and just rewrite
32 * the link to point to relevant memory chain.
33 *
34 * The processing stops at the END element.
35 *
36 * The host can access the memory only when the port IO is processed.
37 * All data that will be needed later must be copied from these 4096 bytes.
38 * But other VRAM can be used by host until the mode is disabled.
39 *
40 * The guest driver writes dword 0xffffffff to the VBE_DISPI_INDEX_VBOX_VIDEO
41 * to disable the mode.
42 *
43 * VBE_DISPI_INDEX_VBOX_VIDEO is used to read the configuration information
44 * from the host and issue commands to the host.
45 *
46 * The guest writes the VBE_DISPI_INDEX_VBOX_VIDEO index register, the the
47 * following operations with the VBE data register can be performed:
48 *
49 * Operation Result
50 * write 16 bit value NOP
51 * read 16 bit value count of monitors
52 * write 32 bit value set the vbox cmd value and the cmd processed by the host
53 * read 32 bit value result of the last vbox command is returned
54 */
55
56struct vbva_cmd_hdr {
57 s16 x;
58 s16 y;
59 u16 w;
60 u16 h;
61} __packed;
62
63/*
64 * The VBVA ring buffer is suitable for transferring large (< 2GB) amount of
65 * data. For example big bitmaps which do not fit to the buffer.
66 *
67 * Guest starts writing to the buffer by initializing a record entry in the
68 * records queue. VBVA_F_RECORD_PARTIAL indicates that the record is being
69 * written. As data is written to the ring buffer, the guest increases
70 * free_offset.
71 *
72 * The host reads the records on flushes and processes all completed records.
73 * When host encounters situation when only a partial record presents and
74 * len_and_flags & ~VBVA_F_RECORD_PARTIAL >= VBVA_RING_BUFFER_SIZE -
75 * VBVA_RING_BUFFER_THRESHOLD, the host fetched all record data and updates
76 * data_offset. After that on each flush the host continues fetching the data
77 * until the record is completed.
78 */
79
80#define VBVA_RING_BUFFER_SIZE (4194304 - 1024)
81#define VBVA_RING_BUFFER_THRESHOLD (4096)
82
83#define VBVA_MAX_RECORDS (64)
84
85#define VBVA_F_MODE_ENABLED 0x00000001u
86#define VBVA_F_MODE_VRDP 0x00000002u
87#define VBVA_F_MODE_VRDP_RESET 0x00000004u
88#define VBVA_F_MODE_VRDP_ORDER_MASK 0x00000008u
89
90#define VBVA_F_STATE_PROCESSING 0x00010000u
91
92#define VBVA_F_RECORD_PARTIAL 0x80000000u
93
94struct vbva_record {
95 u32 len_and_flags;
96} __packed;
97
98/*
99 * The minimum HGSMI heap size is PAGE_SIZE (4096 bytes) and is a restriction of
100 * the runtime heapsimple API. Use minimum 2 pages here, because the info area
101 * also may contain other data (for example hgsmi_host_flags structure).
102 */
103#define VBVA_ADAPTER_INFORMATION_SIZE 65536
104#define VBVA_MIN_BUFFER_SIZE 65536
105
106/* The value for port IO to let the adapter to interpret the adapter memory. */
107#define VBOX_VIDEO_DISABLE_ADAPTER_MEMORY 0xFFFFFFFF
108
109/* The value for port IO to let the adapter to interpret the adapter memory. */
110#define VBOX_VIDEO_INTERPRET_ADAPTER_MEMORY 0x00000000
111
112/*
113 * The value for port IO to let the adapter to interpret the display memory.
114 * The display number is encoded in low 16 bits.
115 */
116#define VBOX_VIDEO_INTERPRET_DISPLAY_MEMORY_BASE 0x00010000
117
118struct vbva_host_flags {
119 u32 host_events;
120 u32 supported_orders;
121} __packed;
122
123struct vbva_buffer {
124 struct vbva_host_flags host_flags;
125
126 /* The offset where the data start in the buffer. */
127 u32 data_offset;
128 /* The offset where next data must be placed in the buffer. */
129 u32 free_offset;
130
131 /* The queue of record descriptions. */
132 struct vbva_record records[VBVA_MAX_RECORDS];
133 u32 record_first_index;
134 u32 record_free_index;
135
136 /* Space to leave free when large partial records are transferred. */
137 u32 partial_write_tresh;
138
139 u32 data_len;
140 /* variable size for the rest of the vbva_buffer area in VRAM. */
141 u8 data[0];
142} __packed;
143
144#define VBVA_MAX_RECORD_SIZE (128 * 1024 * 1024)
145
146/* guest->host commands */
147#define VBVA_QUERY_CONF32 1
148#define VBVA_SET_CONF32 2
149#define VBVA_INFO_VIEW 3
150#define VBVA_INFO_HEAP 4
151#define VBVA_FLUSH 5
152#define VBVA_INFO_SCREEN 6
153#define VBVA_ENABLE 7
154#define VBVA_MOUSE_POINTER_SHAPE 8
155/* informs host about HGSMI caps. see vbva_caps below */
156#define VBVA_INFO_CAPS 12
157/* configures scanline, see VBVASCANLINECFG below */
158#define VBVA_SCANLINE_CFG 13
159/* requests scanline info, see VBVASCANLINEINFO below */
160#define VBVA_SCANLINE_INFO 14
161/* inform host about VBVA Command submission */
162#define VBVA_CMDVBVA_SUBMIT 16
163/* inform host about VBVA Command submission */
164#define VBVA_CMDVBVA_FLUSH 17
165/* G->H DMA command */
166#define VBVA_CMDVBVA_CTL 18
167/* Query most recent mode hints sent */
168#define VBVA_QUERY_MODE_HINTS 19
169/*
170 * Report the guest virtual desktop position and size for mapping host and
171 * guest pointer positions.
172 */
173#define VBVA_REPORT_INPUT_MAPPING 20
174/* Report the guest cursor position and query the host position. */
175#define VBVA_CURSOR_POSITION 21
176
177/* host->guest commands */
178#define VBVAHG_EVENT 1
179#define VBVAHG_DISPLAY_CUSTOM 2
180
181/* vbva_conf32::index */
182#define VBOX_VBVA_CONF32_MONITOR_COUNT 0
183#define VBOX_VBVA_CONF32_HOST_HEAP_SIZE 1
184/*
185 * Returns VINF_SUCCESS if the host can report mode hints via VBVA.
186 * Set value to VERR_NOT_SUPPORTED before calling.
187 */
188#define VBOX_VBVA_CONF32_MODE_HINT_REPORTING 2
189/*
190 * Returns VINF_SUCCESS if the host can report guest cursor enabled status via
191 * VBVA. Set value to VERR_NOT_SUPPORTED before calling.
192 */
193#define VBOX_VBVA_CONF32_GUEST_CURSOR_REPORTING 3
194/*
195 * Returns the currently available host cursor capabilities. Available if
196 * VBOX_VBVA_CONF32_GUEST_CURSOR_REPORTING returns success.
197 */
198#define VBOX_VBVA_CONF32_CURSOR_CAPABILITIES 4
199/* Returns the supported flags in vbva_infoscreen.flags. */
200#define VBOX_VBVA_CONF32_SCREEN_FLAGS 5
201/* Returns the max size of VBVA record. */
202#define VBOX_VBVA_CONF32_MAX_RECORD_SIZE 6
203
204struct vbva_conf32 {
205 u32 index;
206 u32 value;
207} __packed;
208
209/* Reserved for historical reasons. */
210#define VBOX_VBVA_CURSOR_CAPABILITY_RESERVED0 BIT(0)
211/*
212 * Guest cursor capability: can the host show a hardware cursor at the host
213 * pointer location?
214 */
215#define VBOX_VBVA_CURSOR_CAPABILITY_HARDWARE BIT(1)
216/* Reserved for historical reasons. */
217#define VBOX_VBVA_CURSOR_CAPABILITY_RESERVED2 BIT(2)
218/* Reserved for historical reasons. Must always be unset. */
219#define VBOX_VBVA_CURSOR_CAPABILITY_RESERVED3 BIT(3)
220/* Reserved for historical reasons. */
221#define VBOX_VBVA_CURSOR_CAPABILITY_RESERVED4 BIT(4)
222/* Reserved for historical reasons. */
223#define VBOX_VBVA_CURSOR_CAPABILITY_RESERVED5 BIT(5)
224
225struct vbva_infoview {
226 /* Index of the screen, assigned by the guest. */
227 u32 view_index;
228
229 /* The screen offset in VRAM, the framebuffer starts here. */
230 u32 view_offset;
231
232 /* The size of the VRAM memory that can be used for the view. */
233 u32 view_size;
234
235 /* The recommended maximum size of the VRAM memory for the screen. */
236 u32 max_screen_size;
237} __packed;
238
239struct vbva_flush {
240 u32 reserved;
241} __packed;
242
243/* vbva_infoscreen.flags */
244#define VBVA_SCREEN_F_NONE 0x0000
245#define VBVA_SCREEN_F_ACTIVE 0x0001
246/*
247 * The virtual monitor has been disabled by the guest and should be removed
248 * by the host and ignored for purposes of pointer position calculation.
249 */
250#define VBVA_SCREEN_F_DISABLED 0x0002
251/*
252 * The virtual monitor has been blanked by the guest and should be blacked
253 * out by the host using width, height, etc values from the vbva_infoscreen
254 * request.
255 */
256#define VBVA_SCREEN_F_BLANK 0x0004
257/*
258 * The virtual monitor has been blanked by the guest and should be blacked
259 * out by the host using the previous mode values for width. height, etc.
260 */
261#define VBVA_SCREEN_F_BLANK2 0x0008
262
263struct vbva_infoscreen {
264 /* Which view contains the screen. */
265 u32 view_index;
266
267 /* Physical X origin relative to the primary screen. */
268 s32 origin_x;
269
270 /* Physical Y origin relative to the primary screen. */
271 s32 origin_y;
272
273 /* Offset of visible framebuffer relative to the framebuffer start. */
274 u32 start_offset;
275
276 /* The scan line size in bytes. */
277 u32 line_size;
278
279 /* Width of the screen. */
280 u32 width;
281
282 /* Height of the screen. */
283 u32 height;
284
285 /* Color depth. */
286 u16 bits_per_pixel;
287
288 /* VBVA_SCREEN_F_* */
289 u16 flags;
290} __packed;
291
292/* vbva_enable.flags */
293#define VBVA_F_NONE 0x00000000
294#define VBVA_F_ENABLE 0x00000001
295#define VBVA_F_DISABLE 0x00000002
296/* extended VBVA to be used with WDDM */
297#define VBVA_F_EXTENDED 0x00000004
298/* vbva offset is absolute VRAM offset */
299#define VBVA_F_ABSOFFSET 0x00000008
300
301struct vbva_enable {
302 u32 flags;
303 u32 offset;
304 s32 result;
305} __packed;
306
307struct vbva_enable_ex {
308 struct vbva_enable base;
309 u32 screen_id;
310} __packed;
311
312struct vbva_mouse_pointer_shape {
313 /* The host result. */
314 s32 result;
315
316 /* VBOX_MOUSE_POINTER_* bit flags. */
317 u32 flags;
318
319 /* X coordinate of the hot spot. */
320 u32 hot_X;
321
322 /* Y coordinate of the hot spot. */
323 u32 hot_y;
324
325 /* Width of the pointer in pixels. */
326 u32 width;
327
328 /* Height of the pointer in scanlines. */
329 u32 height;
330
331 /* Pointer data.
332 *
333 * The data consists of 1 bpp AND mask followed by 32 bpp XOR (color)
334 * mask.
335 *
336 * For pointers without alpha channel the XOR mask pixels are 32 bit
337 * values: (lsb)BGR0(msb). For pointers with alpha channel the XOR mask
338 * consists of (lsb)BGRA(msb) 32 bit values.
339 *
340 * Guest driver must create the AND mask for pointers with alpha chan.,
341 * so if host does not support alpha, the pointer could be displayed as
342 * a normal color pointer. The AND mask can be constructed from alpha
343 * values. For example alpha value >= 0xf0 means bit 0 in the AND mask.
344 *
345 * The AND mask is 1 bpp bitmap with byte aligned scanlines. Size of AND
346 * mask, therefore, is and_len = (width + 7) / 8 * height. The padding
347 * bits at the end of any scanline are undefined.
348 *
349 * The XOR mask follows the AND mask on the next 4 bytes aligned offset:
350 * u8 *xor = and + (and_len + 3) & ~3
351 * Bytes in the gap between the AND and the XOR mask are undefined.
352 * XOR mask scanlines have no gap between them and size of XOR mask is:
353 * xor_len = width * 4 * height.
354 *
355 * Preallocate 4 bytes for accessing actual data as p->data.
356 */
357 u8 data[4];
358} __packed;
359
360/* pointer is visible */
361#define VBOX_MOUSE_POINTER_VISIBLE 0x0001
362/* pointer has alpha channel */
363#define VBOX_MOUSE_POINTER_ALPHA 0x0002
364/* pointerData contains new pointer shape */
365#define VBOX_MOUSE_POINTER_SHAPE 0x0004
366
367/*
368 * The guest driver can handle asynch guest cmd completion by reading the
369 * command offset from io port.
370 */
371#define VBVACAPS_COMPLETEGCMD_BY_IOREAD 0x00000001
372/* the guest driver can handle video adapter IRQs */
373#define VBVACAPS_IRQ 0x00000002
374/* The guest can read video mode hints sent via VBVA. */
375#define VBVACAPS_VIDEO_MODE_HINTS 0x00000004
376/* The guest can switch to a software cursor on demand. */
377#define VBVACAPS_DISABLE_CURSOR_INTEGRATION 0x00000008
378/* The guest does not depend on host handling the VBE registers. */
379#define VBVACAPS_USE_VBVA_ONLY 0x00000010
380
381struct vbva_caps {
382 s32 rc;
383 u32 caps;
384} __packed;
385
386/* Query the most recent mode hints received from the host. */
387struct vbva_query_mode_hints {
388 /* The maximum number of screens to return hints for. */
389 u16 hints_queried_count;
390 /* The size of the mode hint structures directly following this one. */
391 u16 hint_structure_guest_size;
392 /* Return code for the operation. Initialise to VERR_NOT_SUPPORTED. */
393 s32 rc;
394} __packed;
395
396/*
397 * Structure in which a mode hint is returned. The guest allocates an array
398 * of these immediately after the vbva_query_mode_hints structure.
399 * To accommodate future extensions, the vbva_query_mode_hints structure
400 * specifies the size of the vbva_modehint structures allocated by the guest,
401 * and the host only fills out structure elements which fit into that size. The
402 * host should fill any unused members (e.g. dx, dy) or structure space on the
403 * end with ~0. The whole structure can legally be set to ~0 to skip a screen.
404 */
405struct vbva_modehint {
406 u32 magic;
407 u32 cx;
408 u32 cy;
409 u32 bpp; /* Which has never been used... */
410 u32 display;
411 u32 dx; /* X offset into the virtual frame-buffer. */
412 u32 dy; /* Y offset into the virtual frame-buffer. */
413 u32 enabled; /* Not flags. Add new members for new flags. */
414} __packed;
415
416#define VBVAMODEHINT_MAGIC 0x0801add9u
417
418/*
419 * Report the rectangle relative to which absolute pointer events should be
420 * expressed. This information remains valid until the next VBVA resize event
421 * for any screen, at which time it is reset to the bounding rectangle of all
422 * virtual screens and must be re-set.
423 */
424struct vbva_report_input_mapping {
425 s32 x; /* Upper left X co-ordinate relative to the first screen. */
426 s32 y; /* Upper left Y co-ordinate relative to the first screen. */
427 u32 cx; /* Rectangle width. */
428 u32 cy; /* Rectangle height. */
429} __packed;
430
431/*
432 * Report the guest cursor position and query the host one. The host may wish
433 * to use the guest information to re-position its own cursor (though this is
434 * currently unlikely).
435 */
436struct vbva_cursor_position {
437 u32 report_position; /* Are we reporting a position? */
438 u32 x; /* Guest cursor X position */
439 u32 y; /* Guest cursor Y position */
440} __packed;
441
442#endif
diff --git a/drivers/gpu/drm/vboxvideo/vboxvideo_guest.h b/drivers/gpu/drm/vboxvideo/vboxvideo_guest.h
new file mode 100644
index 000000000000..55fcee3a6470
--- /dev/null
+++ b/drivers/gpu/drm/vboxvideo/vboxvideo_guest.h
@@ -0,0 +1,61 @@
1/* SPDX-License-Identifier: MIT */
2/* Copyright (C) 2006-2016 Oracle Corporation */
3
4#ifndef __VBOXVIDEO_GUEST_H__
5#define __VBOXVIDEO_GUEST_H__
6
7#include <linux/genalloc.h>
8#include "vboxvideo.h"
9
10/*
11 * Structure grouping the context needed for sending graphics acceleration
12 * information to the host via VBVA. Each screen has its own VBVA buffer.
13 */
14struct vbva_buf_ctx {
15 /* Offset of the buffer in the VRAM section for the screen */
16 u32 buffer_offset;
17 /* Length of the buffer in bytes */
18 u32 buffer_length;
19 /* Set if we wrote to the buffer faster than the host could read it */
20 bool buffer_overflow;
21 /* VBVA record that we are currently preparing for the host, or NULL */
22 struct vbva_record *record;
23 /*
24 * Pointer to the VBVA buffer mapped into the current address space.
25 * Will be NULL if VBVA is not enabled.
26 */
27 struct vbva_buffer *vbva;
28};
29
30int hgsmi_report_flags_location(struct gen_pool *ctx, u32 location);
31int hgsmi_send_caps_info(struct gen_pool *ctx, u32 caps);
32int hgsmi_test_query_conf(struct gen_pool *ctx);
33int hgsmi_query_conf(struct gen_pool *ctx, u32 index, u32 *value_ret);
34int hgsmi_update_pointer_shape(struct gen_pool *ctx, u32 flags,
35 u32 hot_x, u32 hot_y, u32 width, u32 height,
36 u8 *pixels, u32 len);
37int hgsmi_cursor_position(struct gen_pool *ctx, bool report_position,
38 u32 x, u32 y, u32 *x_host, u32 *y_host);
39
40bool vbva_enable(struct vbva_buf_ctx *vbva_ctx, struct gen_pool *ctx,
41 struct vbva_buffer *vbva, s32 screen);
42void vbva_disable(struct vbva_buf_ctx *vbva_ctx, struct gen_pool *ctx,
43 s32 screen);
44bool vbva_buffer_begin_update(struct vbva_buf_ctx *vbva_ctx,
45 struct gen_pool *ctx);
46void vbva_buffer_end_update(struct vbva_buf_ctx *vbva_ctx);
47bool vbva_write(struct vbva_buf_ctx *vbva_ctx, struct gen_pool *ctx,
48 const void *p, u32 len);
49void vbva_setup_buffer_context(struct vbva_buf_ctx *vbva_ctx,
50 u32 buffer_offset, u32 buffer_length);
51
52void hgsmi_process_display_info(struct gen_pool *ctx, u32 display,
53 s32 origin_x, s32 origin_y, u32 start_offset,
54 u32 pitch, u32 width, u32 height,
55 u16 bpp, u16 flags);
56int hgsmi_update_input_mapping(struct gen_pool *ctx, s32 origin_x, s32 origin_y,
57 u32 width, u32 height);
58int hgsmi_get_mode_hints(struct gen_pool *ctx, unsigned int screens,
59 struct vbva_modehint *hints);
60
61#endif
diff --git a/drivers/gpu/drm/vboxvideo/vboxvideo_vbe.h b/drivers/gpu/drm/vboxvideo/vboxvideo_vbe.h
new file mode 100644
index 000000000000..427235869297
--- /dev/null
+++ b/drivers/gpu/drm/vboxvideo/vboxvideo_vbe.h
@@ -0,0 +1,54 @@
1/* SPDX-License-Identifier: MIT */
2/* Copyright (C) 2006-2016 Oracle Corporation */
3
4#ifndef __VBOXVIDEO_VBE_H__
5#define __VBOXVIDEO_VBE_H__
6
7/* GUEST <-> HOST Communication API */
8
9#define VBE_DISPI_BANK_ADDRESS 0xA0000
10#define VBE_DISPI_BANK_SIZE_KB 64
11
12#define VBE_DISPI_MAX_XRES 16384
13#define VBE_DISPI_MAX_YRES 16384
14#define VBE_DISPI_MAX_BPP 32
15
16#define VBE_DISPI_IOPORT_INDEX 0x01CE
17#define VBE_DISPI_IOPORT_DATA 0x01CF
18
19#define VBE_DISPI_IOPORT_DAC_WRITE_INDEX 0x03C8
20#define VBE_DISPI_IOPORT_DAC_DATA 0x03C9
21
22#define VBE_DISPI_INDEX_ID 0x0
23#define VBE_DISPI_INDEX_XRES 0x1
24#define VBE_DISPI_INDEX_YRES 0x2
25#define VBE_DISPI_INDEX_BPP 0x3
26#define VBE_DISPI_INDEX_ENABLE 0x4
27#define VBE_DISPI_INDEX_BANK 0x5
28#define VBE_DISPI_INDEX_VIRT_WIDTH 0x6
29#define VBE_DISPI_INDEX_VIRT_HEIGHT 0x7
30#define VBE_DISPI_INDEX_X_OFFSET 0x8
31#define VBE_DISPI_INDEX_Y_OFFSET 0x9
32#define VBE_DISPI_INDEX_VBOX_VIDEO 0xa
33#define VBE_DISPI_INDEX_FB_BASE_HI 0xb
34
35#define VBE_DISPI_ID0 0xB0C0
36#define VBE_DISPI_ID1 0xB0C1
37#define VBE_DISPI_ID2 0xB0C2
38#define VBE_DISPI_ID3 0xB0C3
39#define VBE_DISPI_ID4 0xB0C4
40
41#define VBE_DISPI_ID_VBOX_VIDEO 0xBE00
42/* The VBOX interface id. Indicates support for VBVA shared memory interface. */
43#define VBE_DISPI_ID_HGSMI 0xBE01
44#define VBE_DISPI_ID_ANYX 0xBE02
45
46#define VBE_DISPI_DISABLED 0x00
47#define VBE_DISPI_ENABLED 0x01
48#define VBE_DISPI_GETCAPS 0x02
49#define VBE_DISPI_8BIT_DAC 0x20
50
51#define VGA_PORT_HGSMI_HOST 0x3b0
52#define VGA_PORT_HGSMI_GUEST 0x3d0
53
54#endif
diff --git a/drivers/gpu/drm/vboxvideo/vbva_base.c b/drivers/gpu/drm/vboxvideo/vbva_base.c
new file mode 100644
index 000000000000..36bc9824ec3f
--- /dev/null
+++ b/drivers/gpu/drm/vboxvideo/vbva_base.c
@@ -0,0 +1,214 @@
1// SPDX-License-Identifier: MIT
2/* Copyright (C) 2006-2017 Oracle Corporation */
3
4#include <linux/vbox_err.h>
5#include "vbox_drv.h"
6#include "vboxvideo_guest.h"
7#include "hgsmi_channels.h"
8
9/*
10 * There is a hardware ring buffer in the graphics device video RAM, formerly
11 * in the VBox VMMDev PCI memory space.
12 * All graphics commands go there serialized by vbva_buffer_begin_update.
13 * and vbva_buffer_end_update.
14 *
15 * free_offset is writing position. data_offset is reading position.
16 * free_offset == data_offset means buffer is empty.
17 * There must be always gap between data_offset and free_offset when data
18 * are in the buffer.
19 * Guest only changes free_offset, host changes data_offset.
20 */
21
22static u32 vbva_buffer_available(const struct vbva_buffer *vbva)
23{
24 s32 diff = vbva->data_offset - vbva->free_offset;
25
26 return diff > 0 ? diff : vbva->data_len + diff;
27}
28
29static void vbva_buffer_place_data_at(struct vbva_buf_ctx *vbva_ctx,
30 const void *p, u32 len, u32 offset)
31{
32 struct vbva_buffer *vbva = vbva_ctx->vbva;
33 u32 bytes_till_boundary = vbva->data_len - offset;
34 u8 *dst = &vbva->data[offset];
35 s32 diff = len - bytes_till_boundary;
36
37 if (diff <= 0) {
38 /* Chunk will not cross buffer boundary. */
39 memcpy(dst, p, len);
40 } else {
41 /* Chunk crosses buffer boundary. */
42 memcpy(dst, p, bytes_till_boundary);
43 memcpy(&vbva->data[0], (u8 *)p + bytes_till_boundary, diff);
44 }
45}
46
47static void vbva_buffer_flush(struct gen_pool *ctx)
48{
49 struct vbva_flush *p;
50
51 p = hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_VBVA, VBVA_FLUSH);
52 if (!p)
53 return;
54
55 p->reserved = 0;
56
57 hgsmi_buffer_submit(ctx, p);
58 hgsmi_buffer_free(ctx, p);
59}
60
61bool vbva_write(struct vbva_buf_ctx *vbva_ctx, struct gen_pool *ctx,
62 const void *p, u32 len)
63{
64 struct vbva_record *record;
65 struct vbva_buffer *vbva;
66 u32 available;
67
68 vbva = vbva_ctx->vbva;
69 record = vbva_ctx->record;
70
71 if (!vbva || vbva_ctx->buffer_overflow ||
72 !record || !(record->len_and_flags & VBVA_F_RECORD_PARTIAL))
73 return false;
74
75 available = vbva_buffer_available(vbva);
76
77 while (len > 0) {
78 u32 chunk = len;
79
80 if (chunk >= available) {
81 vbva_buffer_flush(ctx);
82 available = vbva_buffer_available(vbva);
83 }
84
85 if (chunk >= available) {
86 if (WARN_ON(available <= vbva->partial_write_tresh)) {
87 vbva_ctx->buffer_overflow = true;
88 return false;
89 }
90 chunk = available - vbva->partial_write_tresh;
91 }
92
93 vbva_buffer_place_data_at(vbva_ctx, p, chunk,
94 vbva->free_offset);
95
96 vbva->free_offset = (vbva->free_offset + chunk) %
97 vbva->data_len;
98 record->len_and_flags += chunk;
99 available -= chunk;
100 len -= chunk;
101 p += chunk;
102 }
103
104 return true;
105}
106
107static bool vbva_inform_host(struct vbva_buf_ctx *vbva_ctx,
108 struct gen_pool *ctx, s32 screen, bool enable)
109{
110 struct vbva_enable_ex *p;
111 bool ret;
112
113 p = hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_VBVA, VBVA_ENABLE);
114 if (!p)
115 return false;
116
117 p->base.flags = enable ? VBVA_F_ENABLE : VBVA_F_DISABLE;
118 p->base.offset = vbva_ctx->buffer_offset;
119 p->base.result = VERR_NOT_SUPPORTED;
120 if (screen >= 0) {
121 p->base.flags |= VBVA_F_EXTENDED | VBVA_F_ABSOFFSET;
122 p->screen_id = screen;
123 }
124
125 hgsmi_buffer_submit(ctx, p);
126
127 if (enable)
128 ret = p->base.result >= 0;
129 else
130 ret = true;
131
132 hgsmi_buffer_free(ctx, p);
133
134 return ret;
135}
136
137bool vbva_enable(struct vbva_buf_ctx *vbva_ctx, struct gen_pool *ctx,
138 struct vbva_buffer *vbva, s32 screen)
139{
140 bool ret = false;
141
142 memset(vbva, 0, sizeof(*vbva));
143 vbva->partial_write_tresh = 256;
144 vbva->data_len = vbva_ctx->buffer_length - sizeof(struct vbva_buffer);
145 vbva_ctx->vbva = vbva;
146
147 ret = vbva_inform_host(vbva_ctx, ctx, screen, true);
148 if (!ret)
149 vbva_disable(vbva_ctx, ctx, screen);
150
151 return ret;
152}
153
154void vbva_disable(struct vbva_buf_ctx *vbva_ctx, struct gen_pool *ctx,
155 s32 screen)
156{
157 vbva_ctx->buffer_overflow = false;
158 vbva_ctx->record = NULL;
159 vbva_ctx->vbva = NULL;
160
161 vbva_inform_host(vbva_ctx, ctx, screen, false);
162}
163
164bool vbva_buffer_begin_update(struct vbva_buf_ctx *vbva_ctx,
165 struct gen_pool *ctx)
166{
167 struct vbva_record *record;
168 u32 next;
169
170 if (!vbva_ctx->vbva ||
171 !(vbva_ctx->vbva->host_flags.host_events & VBVA_F_MODE_ENABLED))
172 return false;
173
174 WARN_ON(vbva_ctx->buffer_overflow || vbva_ctx->record);
175
176 next = (vbva_ctx->vbva->record_free_index + 1) % VBVA_MAX_RECORDS;
177
178 /* Flush if all slots in the records queue are used */
179 if (next == vbva_ctx->vbva->record_first_index)
180 vbva_buffer_flush(ctx);
181
182 /* If even after flush there is no place then fail the request */
183 if (next == vbva_ctx->vbva->record_first_index)
184 return false;
185
186 record = &vbva_ctx->vbva->records[vbva_ctx->vbva->record_free_index];
187 record->len_and_flags = VBVA_F_RECORD_PARTIAL;
188 vbva_ctx->vbva->record_free_index = next;
189 /* Remember which record we are using. */
190 vbva_ctx->record = record;
191
192 return true;
193}
194
195void vbva_buffer_end_update(struct vbva_buf_ctx *vbva_ctx)
196{
197 struct vbva_record *record = vbva_ctx->record;
198
199 WARN_ON(!vbva_ctx->vbva || !record ||
200 !(record->len_and_flags & VBVA_F_RECORD_PARTIAL));
201
202 /* Mark the record completed. */
203 record->len_and_flags &= ~VBVA_F_RECORD_PARTIAL;
204
205 vbva_ctx->buffer_overflow = false;
206 vbva_ctx->record = NULL;
207}
208
209void vbva_setup_buffer_context(struct vbva_buf_ctx *vbva_ctx,
210 u32 buffer_offset, u32 buffer_length)
211{
212 vbva_ctx->buffer_offset = buffer_offset;
213 vbva_ctx->buffer_length = buffer_length;
214}
diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c
index 8dcce7182bb7..92e3f98d8478 100644
--- a/drivers/gpu/drm/vc4/vc4_bo.c
+++ b/drivers/gpu/drm/vc4/vc4_bo.c
@@ -201,8 +201,6 @@ static void vc4_bo_destroy(struct vc4_bo *bo)
201 bo->validated_shader = NULL; 201 bo->validated_shader = NULL;
202 } 202 }
203 203
204 reservation_object_fini(&bo->_resv);
205
206 drm_gem_cma_free_object(obj); 204 drm_gem_cma_free_object(obj);
207} 205}
208 206
@@ -427,8 +425,6 @@ struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size)
427 vc4->bo_labels[VC4_BO_TYPE_KERNEL].num_allocated++; 425 vc4->bo_labels[VC4_BO_TYPE_KERNEL].num_allocated++;
428 vc4->bo_labels[VC4_BO_TYPE_KERNEL].size_allocated += size; 426 vc4->bo_labels[VC4_BO_TYPE_KERNEL].size_allocated += size;
429 mutex_unlock(&vc4->bo_lock); 427 mutex_unlock(&vc4->bo_lock);
430 bo->resv = &bo->_resv;
431 reservation_object_init(bo->resv);
432 428
433 return &bo->base.base; 429 return &bo->base.base;
434} 430}
@@ -684,13 +680,6 @@ static void vc4_bo_cache_time_timer(struct timer_list *t)
684 schedule_work(&vc4->bo_cache.time_work); 680 schedule_work(&vc4->bo_cache.time_work);
685} 681}
686 682
687struct reservation_object *vc4_prime_res_obj(struct drm_gem_object *obj)
688{
689 struct vc4_bo *bo = to_vc4_bo(obj);
690
691 return bo->resv;
692}
693
694struct dma_buf * 683struct dma_buf *
695vc4_prime_export(struct drm_device *dev, struct drm_gem_object *obj, int flags) 684vc4_prime_export(struct drm_device *dev, struct drm_gem_object *obj, int flags)
696{ 685{
@@ -822,14 +811,12 @@ vc4_prime_import_sg_table(struct drm_device *dev,
822 struct sg_table *sgt) 811 struct sg_table *sgt)
823{ 812{
824 struct drm_gem_object *obj; 813 struct drm_gem_object *obj;
825 struct vc4_bo *bo;
826 814
827 obj = drm_gem_cma_prime_import_sg_table(dev, attach, sgt); 815 obj = drm_gem_cma_prime_import_sg_table(dev, attach, sgt);
828 if (IS_ERR(obj)) 816 if (IS_ERR(obj))
829 return obj; 817 return obj;
830 818
831 bo = to_vc4_bo(obj); 819 obj->resv = attach->dmabuf->resv;
832 bo->resv = attach->dmabuf->resv;
833 820
834 return obj; 821 return obj;
835} 822}
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
index 730008d3da76..64c964b7c577 100644
--- a/drivers/gpu/drm/vc4/vc4_crtc.c
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
@@ -834,6 +834,14 @@ static void vc4_crtc_handle_page_flip(struct vc4_crtc *vc4_crtc)
834 drm_crtc_send_vblank_event(crtc, vc4_crtc->event); 834 drm_crtc_send_vblank_event(crtc, vc4_crtc->event);
835 vc4_crtc->event = NULL; 835 vc4_crtc->event = NULL;
836 drm_crtc_vblank_put(crtc); 836 drm_crtc_vblank_put(crtc);
837
838 /* Wait for the page flip to unmask the underrun to ensure that
839 * the display list was updated by the hardware. Before that
840 * happens, the HVS will be using the previous display list with
841 * the CRTC and encoder already reconfigured, leading to
842 * underruns. This can be seen when reconfiguring the CRTC.
843 */
844 vc4_hvs_unmask_underrun(dev, vc4_crtc->channel);
837 } 845 }
838 spin_unlock_irqrestore(&dev->event_lock, flags); 846 spin_unlock_irqrestore(&dev->event_lock, flags);
839} 847}
diff --git a/drivers/gpu/drm/vc4/vc4_debugfs.c b/drivers/gpu/drm/vc4/vc4_debugfs.c
index 7a0003de71ab..59cdad89f844 100644
--- a/drivers/gpu/drm/vc4/vc4_debugfs.c
+++ b/drivers/gpu/drm/vc4/vc4_debugfs.c
@@ -23,6 +23,7 @@ static const struct drm_info_list vc4_debugfs_list[] = {
23 {"vec_regs", vc4_vec_debugfs_regs, 0}, 23 {"vec_regs", vc4_vec_debugfs_regs, 0},
24 {"txp_regs", vc4_txp_debugfs_regs, 0}, 24 {"txp_regs", vc4_txp_debugfs_regs, 0},
25 {"hvs_regs", vc4_hvs_debugfs_regs, 0}, 25 {"hvs_regs", vc4_hvs_debugfs_regs, 0},
26 {"hvs_underrun", vc4_hvs_debugfs_underrun, 0},
26 {"crtc0_regs", vc4_crtc_debugfs_regs, 0, (void *)(uintptr_t)0}, 27 {"crtc0_regs", vc4_crtc_debugfs_regs, 0, (void *)(uintptr_t)0},
27 {"crtc1_regs", vc4_crtc_debugfs_regs, 0, (void *)(uintptr_t)1}, 28 {"crtc1_regs", vc4_crtc_debugfs_regs, 0, (void *)(uintptr_t)1},
28 {"crtc2_regs", vc4_crtc_debugfs_regs, 0, (void *)(uintptr_t)2}, 29 {"crtc2_regs", vc4_crtc_debugfs_regs, 0, (void *)(uintptr_t)2},
@@ -35,6 +36,15 @@ static const struct drm_info_list vc4_debugfs_list[] = {
35int 36int
36vc4_debugfs_init(struct drm_minor *minor) 37vc4_debugfs_init(struct drm_minor *minor)
37{ 38{
39 struct vc4_dev *vc4 = to_vc4_dev(minor->dev);
40 struct dentry *dentry;
41
42 dentry = debugfs_create_bool("hvs_load_tracker", S_IRUGO | S_IWUSR,
43 minor->debugfs_root,
44 &vc4->load_tracker_enabled);
45 if (!dentry)
46 return -ENOMEM;
47
38 return drm_debugfs_create_files(vc4_debugfs_list, VC4_DEBUGFS_ENTRIES, 48 return drm_debugfs_create_files(vc4_debugfs_list, VC4_DEBUGFS_ENTRIES,
39 minor->debugfs_root, minor); 49 minor->debugfs_root, minor);
40} 50}
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c
index 5fcd2f0da7f7..4daf44fd4548 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.c
+++ b/drivers/gpu/drm/vc4/vc4_drv.c
@@ -200,7 +200,6 @@ static struct drm_driver vc4_drm_driver = {
200 .prime_fd_to_handle = drm_gem_prime_fd_to_handle, 200 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
201 .gem_prime_import = drm_gem_prime_import, 201 .gem_prime_import = drm_gem_prime_import,
202 .gem_prime_export = vc4_prime_export, 202 .gem_prime_export = vc4_prime_export,
203 .gem_prime_res_obj = vc4_prime_res_obj,
204 .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table, 203 .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
205 .gem_prime_import_sg_table = vc4_prime_import_sg_table, 204 .gem_prime_import_sg_table = vc4_prime_import_sg_table,
206 .gem_prime_vmap = vc4_prime_vmap, 205 .gem_prime_vmap = vc4_prime_vmap,
@@ -287,7 +286,7 @@ static int vc4_drm_bind(struct device *dev)
287 286
288 vc4_kms_load(drm); 287 vc4_kms_load(drm);
289 288
290 drm_fbdev_generic_setup(drm, 32); 289 drm_fbdev_generic_setup(drm, 16);
291 290
292 return 0; 291 return 0;
293 292
@@ -312,6 +311,7 @@ static void vc4_drm_unbind(struct device *dev)
312 311
313 drm_mode_config_cleanup(drm); 312 drm_mode_config_cleanup(drm);
314 313
314 drm_atomic_private_obj_fini(&vc4->load_tracker);
315 drm_atomic_private_obj_fini(&vc4->ctm_manager); 315 drm_atomic_private_obj_fini(&vc4->ctm_manager);
316 316
317 drm_dev_put(drm); 317 drm_dev_put(drm);
diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h
index 2c635f001c71..7a3c093e7443 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.h
+++ b/drivers/gpu/drm/vc4/vc4_drv.h
@@ -7,7 +7,6 @@
7 */ 7 */
8 8
9#include <linux/mm_types.h> 9#include <linux/mm_types.h>
10#include <linux/reservation.h>
11#include <drm/drmP.h> 10#include <drm/drmP.h>
12#include <drm/drm_util.h> 11#include <drm/drm_util.h>
13#include <drm/drm_encoder.h> 12#include <drm/drm_encoder.h>
@@ -185,10 +184,20 @@ struct vc4_dev {
185 /* Bitmask of the current bin_alloc used for overflow memory. */ 184 /* Bitmask of the current bin_alloc used for overflow memory. */
186 uint32_t bin_alloc_overflow; 185 uint32_t bin_alloc_overflow;
187 186
187 /* Incremented when an underrun error happened after an atomic commit.
188 * This is particularly useful to detect when a specific modeset is too
189 * demanding in term of memory or HVS bandwidth which is hard to guess
190 * at atomic check time.
191 */
192 atomic_t underrun;
193
188 struct work_struct overflow_mem_work; 194 struct work_struct overflow_mem_work;
189 195
190 int power_refcount; 196 int power_refcount;
191 197
198 /* Set to true when the load tracker is active. */
199 bool load_tracker_enabled;
200
192 /* Mutex controlling the power refcount. */ 201 /* Mutex controlling the power refcount. */
193 struct mutex power_lock; 202 struct mutex power_lock;
194 203
@@ -201,6 +210,7 @@ struct vc4_dev {
201 210
202 struct drm_modeset_lock ctm_state_lock; 211 struct drm_modeset_lock ctm_state_lock;
203 struct drm_private_obj ctm_manager; 212 struct drm_private_obj ctm_manager;
213 struct drm_private_obj load_tracker;
204}; 214};
205 215
206static inline struct vc4_dev * 216static inline struct vc4_dev *
@@ -240,10 +250,6 @@ struct vc4_bo {
240 */ 250 */
241 struct vc4_validated_shader_info *validated_shader; 251 struct vc4_validated_shader_info *validated_shader;
242 252
243 /* normally (resv == &_resv) except for imported bo's */
244 struct reservation_object *resv;
245 struct reservation_object _resv;
246
247 /* One of enum vc4_kernel_bo_type, or VC4_BO_TYPE_COUNT + i 253 /* One of enum vc4_kernel_bo_type, or VC4_BO_TYPE_COUNT + i
248 * for user-allocated labels. 254 * for user-allocated labels.
249 */ 255 */
@@ -376,6 +382,16 @@ struct vc4_plane_state {
376 * when async update is not possible. 382 * when async update is not possible.
377 */ 383 */
378 bool dlist_initialized; 384 bool dlist_initialized;
385
386 /* Load of this plane on the HVS block. The load is expressed in HVS
387 * cycles/sec.
388 */
389 u64 hvs_load;
390
391 /* Memory bandwidth needed for this plane. This is expressed in
392 * bytes/sec.
393 */
394 u64 membus_load;
379}; 395};
380 396
381static inline struct vc4_plane_state * 397static inline struct vc4_plane_state *
@@ -685,7 +701,6 @@ int vc4_label_bo_ioctl(struct drm_device *dev, void *data,
685 struct drm_file *file_priv); 701 struct drm_file *file_priv);
686vm_fault_t vc4_fault(struct vm_fault *vmf); 702vm_fault_t vc4_fault(struct vm_fault *vmf);
687int vc4_mmap(struct file *filp, struct vm_area_struct *vma); 703int vc4_mmap(struct file *filp, struct vm_area_struct *vma);
688struct reservation_object *vc4_prime_res_obj(struct drm_gem_object *obj);
689int vc4_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma); 704int vc4_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
690struct drm_gem_object *vc4_prime_import_sg_table(struct drm_device *dev, 705struct drm_gem_object *vc4_prime_import_sg_table(struct drm_device *dev,
691 struct dma_buf_attachment *attach, 706 struct dma_buf_attachment *attach,
@@ -773,6 +788,9 @@ void vc4_irq_reset(struct drm_device *dev);
773extern struct platform_driver vc4_hvs_driver; 788extern struct platform_driver vc4_hvs_driver;
774void vc4_hvs_dump_state(struct drm_device *dev); 789void vc4_hvs_dump_state(struct drm_device *dev);
775int vc4_hvs_debugfs_regs(struct seq_file *m, void *unused); 790int vc4_hvs_debugfs_regs(struct seq_file *m, void *unused);
791int vc4_hvs_debugfs_underrun(struct seq_file *m, void *unused);
792void vc4_hvs_unmask_underrun(struct drm_device *dev, int channel);
793void vc4_hvs_mask_underrun(struct drm_device *dev, int channel);
776 794
777/* vc4_kms.c */ 795/* vc4_kms.c */
778int vc4_kms_load(struct drm_device *dev); 796int vc4_kms_load(struct drm_device *dev);
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
index aea2b8dfec17..5ee5bf7fedf7 100644
--- a/drivers/gpu/drm/vc4/vc4_gem.c
+++ b/drivers/gpu/drm/vc4/vc4_gem.c
@@ -536,7 +536,7 @@ vc4_update_bo_seqnos(struct vc4_exec_info *exec, uint64_t seqno)
536 bo = to_vc4_bo(&exec->bo[i]->base); 536 bo = to_vc4_bo(&exec->bo[i]->base);
537 bo->seqno = seqno; 537 bo->seqno = seqno;
538 538
539 reservation_object_add_shared_fence(bo->resv, exec->fence); 539 reservation_object_add_shared_fence(bo->base.base.resv, exec->fence);
540 } 540 }
541 541
542 list_for_each_entry(bo, &exec->unref_list, unref_head) { 542 list_for_each_entry(bo, &exec->unref_list, unref_head) {
@@ -547,7 +547,7 @@ vc4_update_bo_seqnos(struct vc4_exec_info *exec, uint64_t seqno)
547 bo = to_vc4_bo(&exec->rcl_write_bo[i]->base); 547 bo = to_vc4_bo(&exec->rcl_write_bo[i]->base);
548 bo->write_seqno = seqno; 548 bo->write_seqno = seqno;
549 549
550 reservation_object_add_excl_fence(bo->resv, exec->fence); 550 reservation_object_add_excl_fence(bo->base.base.resv, exec->fence);
551 } 551 }
552} 552}
553 553
@@ -559,7 +559,7 @@ vc4_unlock_bo_reservations(struct drm_device *dev,
559 int i; 559 int i;
560 560
561 for (i = 0; i < exec->bo_count; i++) { 561 for (i = 0; i < exec->bo_count; i++) {
562 struct vc4_bo *bo = to_vc4_bo(&exec->bo[i]->base); 562 struct drm_gem_object *bo = &exec->bo[i]->base;
563 563
564 ww_mutex_unlock(&bo->resv->lock); 564 ww_mutex_unlock(&bo->resv->lock);
565 } 565 }
@@ -581,13 +581,13 @@ vc4_lock_bo_reservations(struct drm_device *dev,
581{ 581{
582 int contended_lock = -1; 582 int contended_lock = -1;
583 int i, ret; 583 int i, ret;
584 struct vc4_bo *bo; 584 struct drm_gem_object *bo;
585 585
586 ww_acquire_init(acquire_ctx, &reservation_ww_class); 586 ww_acquire_init(acquire_ctx, &reservation_ww_class);
587 587
588retry: 588retry:
589 if (contended_lock != -1) { 589 if (contended_lock != -1) {
590 bo = to_vc4_bo(&exec->bo[contended_lock]->base); 590 bo = &exec->bo[contended_lock]->base;
591 ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock, 591 ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock,
592 acquire_ctx); 592 acquire_ctx);
593 if (ret) { 593 if (ret) {
@@ -600,19 +600,19 @@ retry:
600 if (i == contended_lock) 600 if (i == contended_lock)
601 continue; 601 continue;
602 602
603 bo = to_vc4_bo(&exec->bo[i]->base); 603 bo = &exec->bo[i]->base;
604 604
605 ret = ww_mutex_lock_interruptible(&bo->resv->lock, acquire_ctx); 605 ret = ww_mutex_lock_interruptible(&bo->resv->lock, acquire_ctx);
606 if (ret) { 606 if (ret) {
607 int j; 607 int j;
608 608
609 for (j = 0; j < i; j++) { 609 for (j = 0; j < i; j++) {
610 bo = to_vc4_bo(&exec->bo[j]->base); 610 bo = &exec->bo[j]->base;
611 ww_mutex_unlock(&bo->resv->lock); 611 ww_mutex_unlock(&bo->resv->lock);
612 } 612 }
613 613
614 if (contended_lock != -1 && contended_lock >= i) { 614 if (contended_lock != -1 && contended_lock >= i) {
615 bo = to_vc4_bo(&exec->bo[contended_lock]->base); 615 bo = &exec->bo[contended_lock]->base;
616 616
617 ww_mutex_unlock(&bo->resv->lock); 617 ww_mutex_unlock(&bo->resv->lock);
618 } 618 }
@@ -633,7 +633,7 @@ retry:
633 * before we commit the CL to the hardware. 633 * before we commit the CL to the hardware.
634 */ 634 */
635 for (i = 0; i < exec->bo_count; i++) { 635 for (i = 0; i < exec->bo_count; i++) {
636 bo = to_vc4_bo(&exec->bo[i]->base); 636 bo = &exec->bo[i]->base;
637 637
638 ret = reservation_object_reserve_shared(bo->resv, 1); 638 ret = reservation_object_reserve_shared(bo->resv, 1);
639 if (ret) { 639 if (ret) {
diff --git a/drivers/gpu/drm/vc4/vc4_hvs.c b/drivers/gpu/drm/vc4/vc4_hvs.c
index 5d8c749c9749..918e71256ecc 100644
--- a/drivers/gpu/drm/vc4/vc4_hvs.c
+++ b/drivers/gpu/drm/vc4/vc4_hvs.c
@@ -22,6 +22,7 @@
22 * each CRTC. 22 * each CRTC.
23 */ 23 */
24 24
25#include <drm/drm_atomic_helper.h>
25#include <linux/component.h> 26#include <linux/component.h>
26#include "vc4_drv.h" 27#include "vc4_drv.h"
27#include "vc4_regs.h" 28#include "vc4_regs.h"
@@ -102,6 +103,18 @@ int vc4_hvs_debugfs_regs(struct seq_file *m, void *unused)
102 103
103 return 0; 104 return 0;
104} 105}
106
107int vc4_hvs_debugfs_underrun(struct seq_file *m, void *data)
108{
109 struct drm_info_node *node = m->private;
110 struct drm_device *dev = node->minor->dev;
111 struct vc4_dev *vc4 = to_vc4_dev(dev);
112 struct drm_printer p = drm_seq_file_printer(m);
113
114 drm_printf(&p, "%d\n", atomic_read(&vc4->underrun));
115
116 return 0;
117}
105#endif 118#endif
106 119
107/* The filter kernel is composed of dwords each containing 3 9-bit 120/* The filter kernel is composed of dwords each containing 3 9-bit
@@ -166,6 +179,67 @@ static int vc4_hvs_upload_linear_kernel(struct vc4_hvs *hvs,
166 return 0; 179 return 0;
167} 180}
168 181
182void vc4_hvs_mask_underrun(struct drm_device *dev, int channel)
183{
184 struct vc4_dev *vc4 = to_vc4_dev(dev);
185 u32 dispctrl = HVS_READ(SCALER_DISPCTRL);
186
187 dispctrl &= ~SCALER_DISPCTRL_DSPEISLUR(channel);
188
189 HVS_WRITE(SCALER_DISPCTRL, dispctrl);
190}
191
192void vc4_hvs_unmask_underrun(struct drm_device *dev, int channel)
193{
194 struct vc4_dev *vc4 = to_vc4_dev(dev);
195 u32 dispctrl = HVS_READ(SCALER_DISPCTRL);
196
197 dispctrl |= SCALER_DISPCTRL_DSPEISLUR(channel);
198
199 HVS_WRITE(SCALER_DISPSTAT,
200 SCALER_DISPSTAT_EUFLOW(channel));
201 HVS_WRITE(SCALER_DISPCTRL, dispctrl);
202}
203
204static void vc4_hvs_report_underrun(struct drm_device *dev)
205{
206 struct vc4_dev *vc4 = to_vc4_dev(dev);
207
208 atomic_inc(&vc4->underrun);
209 DRM_DEV_ERROR(dev->dev, "HVS underrun\n");
210}
211
212static irqreturn_t vc4_hvs_irq_handler(int irq, void *data)
213{
214 struct drm_device *dev = data;
215 struct vc4_dev *vc4 = to_vc4_dev(dev);
216 irqreturn_t irqret = IRQ_NONE;
217 int channel;
218 u32 control;
219 u32 status;
220
221 status = HVS_READ(SCALER_DISPSTAT);
222 control = HVS_READ(SCALER_DISPCTRL);
223
224 for (channel = 0; channel < SCALER_CHANNELS_COUNT; channel++) {
225 /* Interrupt masking is not always honored, so check it here. */
226 if (status & SCALER_DISPSTAT_EUFLOW(channel) &&
227 control & SCALER_DISPCTRL_DSPEISLUR(channel)) {
228 vc4_hvs_mask_underrun(dev, channel);
229 vc4_hvs_report_underrun(dev);
230
231 irqret = IRQ_HANDLED;
232 }
233 }
234
235 /* Clear every per-channel interrupt flag. */
236 HVS_WRITE(SCALER_DISPSTAT, SCALER_DISPSTAT_IRQMASK(0) |
237 SCALER_DISPSTAT_IRQMASK(1) |
238 SCALER_DISPSTAT_IRQMASK(2));
239
240 return irqret;
241}
242
169static int vc4_hvs_bind(struct device *dev, struct device *master, void *data) 243static int vc4_hvs_bind(struct device *dev, struct device *master, void *data)
170{ 244{
171 struct platform_device *pdev = to_platform_device(dev); 245 struct platform_device *pdev = to_platform_device(dev);
@@ -219,15 +293,36 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data)
219 dispctrl = HVS_READ(SCALER_DISPCTRL); 293 dispctrl = HVS_READ(SCALER_DISPCTRL);
220 294
221 dispctrl |= SCALER_DISPCTRL_ENABLE; 295 dispctrl |= SCALER_DISPCTRL_ENABLE;
296 dispctrl |= SCALER_DISPCTRL_DISPEIRQ(0) |
297 SCALER_DISPCTRL_DISPEIRQ(1) |
298 SCALER_DISPCTRL_DISPEIRQ(2);
222 299
223 /* Set DSP3 (PV1) to use HVS channel 2, which would otherwise 300 /* Set DSP3 (PV1) to use HVS channel 2, which would otherwise
224 * be unused. 301 * be unused.
225 */ 302 */
226 dispctrl &= ~SCALER_DISPCTRL_DSP3_MUX_MASK; 303 dispctrl &= ~SCALER_DISPCTRL_DSP3_MUX_MASK;
304 dispctrl &= ~(SCALER_DISPCTRL_DMAEIRQ |
305 SCALER_DISPCTRL_SLVWREIRQ |
306 SCALER_DISPCTRL_SLVRDEIRQ |
307 SCALER_DISPCTRL_DSPEIEOF(0) |
308 SCALER_DISPCTRL_DSPEIEOF(1) |
309 SCALER_DISPCTRL_DSPEIEOF(2) |
310 SCALER_DISPCTRL_DSPEIEOLN(0) |
311 SCALER_DISPCTRL_DSPEIEOLN(1) |
312 SCALER_DISPCTRL_DSPEIEOLN(2) |
313 SCALER_DISPCTRL_DSPEISLUR(0) |
314 SCALER_DISPCTRL_DSPEISLUR(1) |
315 SCALER_DISPCTRL_DSPEISLUR(2) |
316 SCALER_DISPCTRL_SCLEIRQ);
227 dispctrl |= VC4_SET_FIELD(2, SCALER_DISPCTRL_DSP3_MUX); 317 dispctrl |= VC4_SET_FIELD(2, SCALER_DISPCTRL_DSP3_MUX);
228 318
229 HVS_WRITE(SCALER_DISPCTRL, dispctrl); 319 HVS_WRITE(SCALER_DISPCTRL, dispctrl);
230 320
321 ret = devm_request_irq(dev, platform_get_irq(pdev, 0),
322 vc4_hvs_irq_handler, 0, "vc4 hvs", drm);
323 if (ret)
324 return ret;
325
231 return 0; 326 return 0;
232} 327}
233 328
diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c
index 91b8c72ff361..5160cad25fce 100644
--- a/drivers/gpu/drm/vc4/vc4_kms.c
+++ b/drivers/gpu/drm/vc4/vc4_kms.c
@@ -34,6 +34,18 @@ static struct vc4_ctm_state *to_vc4_ctm_state(struct drm_private_state *priv)
34 return container_of(priv, struct vc4_ctm_state, base); 34 return container_of(priv, struct vc4_ctm_state, base);
35} 35}
36 36
37struct vc4_load_tracker_state {
38 struct drm_private_state base;
39 u64 hvs_load;
40 u64 membus_load;
41};
42
43static struct vc4_load_tracker_state *
44to_vc4_load_tracker_state(struct drm_private_state *priv)
45{
46 return container_of(priv, struct vc4_load_tracker_state, base);
47}
48
37static struct vc4_ctm_state *vc4_get_ctm_state(struct drm_atomic_state *state, 49static struct vc4_ctm_state *vc4_get_ctm_state(struct drm_atomic_state *state,
38 struct drm_private_obj *manager) 50 struct drm_private_obj *manager)
39{ 51{
@@ -138,6 +150,16 @@ vc4_atomic_complete_commit(struct drm_atomic_state *state)
138{ 150{
139 struct drm_device *dev = state->dev; 151 struct drm_device *dev = state->dev;
140 struct vc4_dev *vc4 = to_vc4_dev(dev); 152 struct vc4_dev *vc4 = to_vc4_dev(dev);
153 struct vc4_crtc *vc4_crtc;
154 int i;
155
156 for (i = 0; i < dev->mode_config.num_crtc; i++) {
157 if (!state->crtcs[i].ptr || !state->crtcs[i].commit)
158 continue;
159
160 vc4_crtc = to_vc4_crtc(state->crtcs[i].ptr);
161 vc4_hvs_mask_underrun(dev, vc4_crtc->channel);
162 }
141 163
142 drm_atomic_helper_wait_for_fences(dev, state, false); 164 drm_atomic_helper_wait_for_fences(dev, state, false);
143 165
@@ -385,6 +407,85 @@ vc4_ctm_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
385 return 0; 407 return 0;
386} 408}
387 409
410static int vc4_load_tracker_atomic_check(struct drm_atomic_state *state)
411{
412 struct drm_plane_state *old_plane_state, *new_plane_state;
413 struct vc4_dev *vc4 = to_vc4_dev(state->dev);
414 struct vc4_load_tracker_state *load_state;
415 struct drm_private_state *priv_state;
416 struct drm_plane *plane;
417 int i;
418
419 priv_state = drm_atomic_get_private_obj_state(state,
420 &vc4->load_tracker);
421 if (IS_ERR(priv_state))
422 return PTR_ERR(priv_state);
423
424 load_state = to_vc4_load_tracker_state(priv_state);
425 for_each_oldnew_plane_in_state(state, plane, old_plane_state,
426 new_plane_state, i) {
427 struct vc4_plane_state *vc4_plane_state;
428
429 if (old_plane_state->fb && old_plane_state->crtc) {
430 vc4_plane_state = to_vc4_plane_state(old_plane_state);
431 load_state->membus_load -= vc4_plane_state->membus_load;
432 load_state->hvs_load -= vc4_plane_state->hvs_load;
433 }
434
435 if (new_plane_state->fb && new_plane_state->crtc) {
436 vc4_plane_state = to_vc4_plane_state(new_plane_state);
437 load_state->membus_load += vc4_plane_state->membus_load;
438 load_state->hvs_load += vc4_plane_state->hvs_load;
439 }
440 }
441
442 /* Don't check the load when the tracker is disabled. */
443 if (!vc4->load_tracker_enabled)
444 return 0;
445
446 /* The absolute limit is 2Gbyte/sec, but let's take a margin to let
447 * the system work when other blocks are accessing the memory.
448 */
449 if (load_state->membus_load > SZ_1G + SZ_512M)
450 return -ENOSPC;
451
452 /* HVS clock is supposed to run @ 250Mhz, let's take a margin and
453 * consider the maximum number of cycles is 240M.
454 */
455 if (load_state->hvs_load > 240000000ULL)
456 return -ENOSPC;
457
458 return 0;
459}
460
461static struct drm_private_state *
462vc4_load_tracker_duplicate_state(struct drm_private_obj *obj)
463{
464 struct vc4_load_tracker_state *state;
465
466 state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
467 if (!state)
468 return NULL;
469
470 __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
471
472 return &state->base;
473}
474
475static void vc4_load_tracker_destroy_state(struct drm_private_obj *obj,
476 struct drm_private_state *state)
477{
478 struct vc4_load_tracker_state *load_state;
479
480 load_state = to_vc4_load_tracker_state(state);
481 kfree(load_state);
482}
483
484static const struct drm_private_state_funcs vc4_load_tracker_state_funcs = {
485 .atomic_duplicate_state = vc4_load_tracker_duplicate_state,
486 .atomic_destroy_state = vc4_load_tracker_destroy_state,
487};
488
388static int 489static int
389vc4_atomic_check(struct drm_device *dev, struct drm_atomic_state *state) 490vc4_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
390{ 491{
@@ -394,7 +495,11 @@ vc4_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
394 if (ret < 0) 495 if (ret < 0)
395 return ret; 496 return ret;
396 497
397 return drm_atomic_helper_check(dev, state); 498 ret = drm_atomic_helper_check(dev, state);
499 if (ret)
500 return ret;
501
502 return vc4_load_tracker_atomic_check(state);
398} 503}
399 504
400static const struct drm_mode_config_funcs vc4_mode_funcs = { 505static const struct drm_mode_config_funcs vc4_mode_funcs = {
@@ -407,8 +512,14 @@ int vc4_kms_load(struct drm_device *dev)
407{ 512{
408 struct vc4_dev *vc4 = to_vc4_dev(dev); 513 struct vc4_dev *vc4 = to_vc4_dev(dev);
409 struct vc4_ctm_state *ctm_state; 514 struct vc4_ctm_state *ctm_state;
515 struct vc4_load_tracker_state *load_state;
410 int ret; 516 int ret;
411 517
518 /* Start with the load tracker enabled. Can be disabled through the
519 * debugfs load_tracker file.
520 */
521 vc4->load_tracker_enabled = true;
522
412 sema_init(&vc4->async_modeset, 1); 523 sema_init(&vc4->async_modeset, 1);
413 524
414 /* Set support for vblank irq fast disable, before drm_vblank_init() */ 525 /* Set support for vblank irq fast disable, before drm_vblank_init() */
@@ -436,6 +547,15 @@ int vc4_kms_load(struct drm_device *dev)
436 drm_atomic_private_obj_init(dev, &vc4->ctm_manager, &ctm_state->base, 547 drm_atomic_private_obj_init(dev, &vc4->ctm_manager, &ctm_state->base,
437 &vc4_ctm_state_funcs); 548 &vc4_ctm_state_funcs);
438 549
550 load_state = kzalloc(sizeof(*load_state), GFP_KERNEL);
551 if (!load_state) {
552 drm_atomic_private_obj_fini(&vc4->ctm_manager);
553 return -ENOMEM;
554 }
555
556 drm_atomic_private_obj_init(dev, &vc4->load_tracker, &load_state->base,
557 &vc4_load_tracker_state_funcs);
558
439 drm_mode_config_reset(dev); 559 drm_mode_config_reset(dev);
440 560
441 drm_kms_helper_poll_init(dev); 561 drm_kms_helper_poll_init(dev);
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index d098337c10e9..4d918d3e4858 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -488,6 +488,61 @@ static void vc4_write_scaling_parameters(struct drm_plane_state *state,
488 } 488 }
489} 489}
490 490
491static void vc4_plane_calc_load(struct drm_plane_state *state)
492{
493 unsigned int hvs_load_shift, vrefresh, i;
494 struct drm_framebuffer *fb = state->fb;
495 struct vc4_plane_state *vc4_state;
496 struct drm_crtc_state *crtc_state;
497 unsigned int vscale_factor;
498
499 vc4_state = to_vc4_plane_state(state);
500 crtc_state = drm_atomic_get_existing_crtc_state(state->state,
501 state->crtc);
502 vrefresh = drm_mode_vrefresh(&crtc_state->adjusted_mode);
503
504 /* The HVS is able to process 2 pixels/cycle when scaling the source,
505 * 4 pixels/cycle otherwise.
506 * Alpha blending step seems to be pipelined and it's always operating
507 * at 4 pixels/cycle, so the limiting aspect here seems to be the
508 * scaler block.
509 * HVS load is expressed in clk-cycles/sec (AKA Hz).
510 */
511 if (vc4_state->x_scaling[0] != VC4_SCALING_NONE ||
512 vc4_state->x_scaling[1] != VC4_SCALING_NONE ||
513 vc4_state->y_scaling[0] != VC4_SCALING_NONE ||
514 vc4_state->y_scaling[1] != VC4_SCALING_NONE)
515 hvs_load_shift = 1;
516 else
517 hvs_load_shift = 2;
518
519 vc4_state->membus_load = 0;
520 vc4_state->hvs_load = 0;
521 for (i = 0; i < fb->format->num_planes; i++) {
522 /* Even if the bandwidth/plane required for a single frame is
523 *
524 * vc4_state->src_w[i] * vc4_state->src_h[i] * cpp * vrefresh
525 *
526 * when downscaling, we have to read more pixels per line in
527 * the time frame reserved for a single line, so the bandwidth
528 * demand can be punctually higher. To account for that, we
529 * calculate the down-scaling factor and multiply the plane
530 * load by this number. We're likely over-estimating the read
531 * demand, but that's better than under-estimating it.
532 */
533 vscale_factor = DIV_ROUND_UP(vc4_state->src_h[i],
534 vc4_state->crtc_h);
535 vc4_state->membus_load += vc4_state->src_w[i] *
536 vc4_state->src_h[i] * vscale_factor *
537 fb->format->cpp[i];
538 vc4_state->hvs_load += vc4_state->crtc_h * vc4_state->crtc_w;
539 }
540
541 vc4_state->hvs_load *= vrefresh;
542 vc4_state->hvs_load >>= hvs_load_shift;
543 vc4_state->membus_load *= vrefresh;
544}
545
491static int vc4_plane_allocate_lbm(struct drm_plane_state *state) 546static int vc4_plane_allocate_lbm(struct drm_plane_state *state)
492{ 547{
493 struct vc4_dev *vc4 = to_vc4_dev(state->plane->dev); 548 struct vc4_dev *vc4 = to_vc4_dev(state->plane->dev);
@@ -875,6 +930,8 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
875 */ 930 */
876 vc4_state->dlist_initialized = 1; 931 vc4_state->dlist_initialized = 1;
877 932
933 vc4_plane_calc_load(state);
934
878 return 0; 935 return 0;
879} 936}
880 937
@@ -1082,7 +1139,7 @@ static int vc4_prepare_fb(struct drm_plane *plane,
1082 1139
1083 bo = to_vc4_bo(&drm_fb_cma_get_gem_obj(state->fb, 0)->base); 1140 bo = to_vc4_bo(&drm_fb_cma_get_gem_obj(state->fb, 0)->base);
1084 1141
1085 fence = reservation_object_get_excl_rcu(bo->resv); 1142 fence = reservation_object_get_excl_rcu(bo->base.base.resv);
1086 drm_atomic_set_fence_for_plane(state, fence); 1143 drm_atomic_set_fence_for_plane(state, fence);
1087 1144
1088 if (plane->state->fb == state->fb) 1145 if (plane->state->fb == state->fb)
diff --git a/drivers/gpu/drm/vc4/vc4_regs.h b/drivers/gpu/drm/vc4/vc4_regs.h
index 931088014272..c0c5fadaf7e3 100644
--- a/drivers/gpu/drm/vc4/vc4_regs.h
+++ b/drivers/gpu/drm/vc4/vc4_regs.h
@@ -212,11 +212,11 @@
212 212
213#define PV_HACT_ACT 0x30 213#define PV_HACT_ACT 0x30
214 214
215#define SCALER_CHANNELS_COUNT 3
216
215#define SCALER_DISPCTRL 0x00000000 217#define SCALER_DISPCTRL 0x00000000
216/* Global register for clock gating the HVS */ 218/* Global register for clock gating the HVS */
217# define SCALER_DISPCTRL_ENABLE BIT(31) 219# define SCALER_DISPCTRL_ENABLE BIT(31)
218# define SCALER_DISPCTRL_DSP2EISLUR BIT(15)
219# define SCALER_DISPCTRL_DSP1EISLUR BIT(14)
220# define SCALER_DISPCTRL_DSP3_MUX_MASK VC4_MASK(19, 18) 220# define SCALER_DISPCTRL_DSP3_MUX_MASK VC4_MASK(19, 18)
221# define SCALER_DISPCTRL_DSP3_MUX_SHIFT 18 221# define SCALER_DISPCTRL_DSP3_MUX_SHIFT 18
222 222
@@ -224,45 +224,25 @@
224 * SCALER_DISPSTAT_IRQDISP0. Note that short frame contributions are 224 * SCALER_DISPSTAT_IRQDISP0. Note that short frame contributions are
225 * always enabled. 225 * always enabled.
226 */ 226 */
227# define SCALER_DISPCTRL_DSP0EISLUR BIT(13) 227# define SCALER_DISPCTRL_DSPEISLUR(x) BIT(13 + (x))
228# define SCALER_DISPCTRL_DSP2EIEOLN BIT(12)
229# define SCALER_DISPCTRL_DSP2EIEOF BIT(11)
230# define SCALER_DISPCTRL_DSP1EIEOLN BIT(10)
231# define SCALER_DISPCTRL_DSP1EIEOF BIT(9)
232/* Enables Display 0 end-of-line-N contribution to 228/* Enables Display 0 end-of-line-N contribution to
233 * SCALER_DISPSTAT_IRQDISP0 229 * SCALER_DISPSTAT_IRQDISP0
234 */ 230 */
235# define SCALER_DISPCTRL_DSP0EIEOLN BIT(8) 231# define SCALER_DISPCTRL_DSPEIEOLN(x) BIT(8 + ((x) * 2))
236/* Enables Display 0 EOF contribution to SCALER_DISPSTAT_IRQDISP0 */ 232/* Enables Display 0 EOF contribution to SCALER_DISPSTAT_IRQDISP0 */
237# define SCALER_DISPCTRL_DSP0EIEOF BIT(7) 233# define SCALER_DISPCTRL_DSPEIEOF(x) BIT(7 + ((x) * 2))
238 234
239# define SCALER_DISPCTRL_SLVRDEIRQ BIT(6) 235# define SCALER_DISPCTRL_SLVRDEIRQ BIT(6)
240# define SCALER_DISPCTRL_SLVWREIRQ BIT(5) 236# define SCALER_DISPCTRL_SLVWREIRQ BIT(5)
241# define SCALER_DISPCTRL_DMAEIRQ BIT(4) 237# define SCALER_DISPCTRL_DMAEIRQ BIT(4)
242# define SCALER_DISPCTRL_DISP2EIRQ BIT(3)
243# define SCALER_DISPCTRL_DISP1EIRQ BIT(2)
244/* Enables interrupt generation on the enabled EOF/EOLN/EISLUR 238/* Enables interrupt generation on the enabled EOF/EOLN/EISLUR
245 * bits and short frames.. 239 * bits and short frames..
246 */ 240 */
247# define SCALER_DISPCTRL_DISP0EIRQ BIT(1) 241# define SCALER_DISPCTRL_DISPEIRQ(x) BIT(1 + (x))
248/* Enables interrupt generation on scaler profiler interrupt. */ 242/* Enables interrupt generation on scaler profiler interrupt. */
249# define SCALER_DISPCTRL_SCLEIRQ BIT(0) 243# define SCALER_DISPCTRL_SCLEIRQ BIT(0)
250 244
251#define SCALER_DISPSTAT 0x00000004 245#define SCALER_DISPSTAT 0x00000004
252# define SCALER_DISPSTAT_COBLOW2 BIT(29)
253# define SCALER_DISPSTAT_EOLN2 BIT(28)
254# define SCALER_DISPSTAT_ESFRAME2 BIT(27)
255# define SCALER_DISPSTAT_ESLINE2 BIT(26)
256# define SCALER_DISPSTAT_EUFLOW2 BIT(25)
257# define SCALER_DISPSTAT_EOF2 BIT(24)
258
259# define SCALER_DISPSTAT_COBLOW1 BIT(21)
260# define SCALER_DISPSTAT_EOLN1 BIT(20)
261# define SCALER_DISPSTAT_ESFRAME1 BIT(19)
262# define SCALER_DISPSTAT_ESLINE1 BIT(18)
263# define SCALER_DISPSTAT_EUFLOW1 BIT(17)
264# define SCALER_DISPSTAT_EOF1 BIT(16)
265
266# define SCALER_DISPSTAT_RESP_MASK VC4_MASK(15, 14) 246# define SCALER_DISPSTAT_RESP_MASK VC4_MASK(15, 14)
267# define SCALER_DISPSTAT_RESP_SHIFT 14 247# define SCALER_DISPSTAT_RESP_SHIFT 14
268# define SCALER_DISPSTAT_RESP_OKAY 0 248# define SCALER_DISPSTAT_RESP_OKAY 0
@@ -270,23 +250,26 @@
270# define SCALER_DISPSTAT_RESP_SLVERR 2 250# define SCALER_DISPSTAT_RESP_SLVERR 2
271# define SCALER_DISPSTAT_RESP_DECERR 3 251# define SCALER_DISPSTAT_RESP_DECERR 3
272 252
273# define SCALER_DISPSTAT_COBLOW0 BIT(13) 253# define SCALER_DISPSTAT_COBLOW(x) BIT(13 + ((x) * 8))
274/* Set when the DISPEOLN line is done compositing. */ 254/* Set when the DISPEOLN line is done compositing. */
275# define SCALER_DISPSTAT_EOLN0 BIT(12) 255# define SCALER_DISPSTAT_EOLN(x) BIT(12 + ((x) * 8))
276/* Set when VSTART is seen but there are still pixels in the current 256/* Set when VSTART is seen but there are still pixels in the current
277 * output line. 257 * output line.
278 */ 258 */
279# define SCALER_DISPSTAT_ESFRAME0 BIT(11) 259# define SCALER_DISPSTAT_ESFRAME(x) BIT(11 + ((x) * 8))
280/* Set when HSTART is seen but there are still pixels in the current 260/* Set when HSTART is seen but there are still pixels in the current
281 * output line. 261 * output line.
282 */ 262 */
283# define SCALER_DISPSTAT_ESLINE0 BIT(10) 263# define SCALER_DISPSTAT_ESLINE(x) BIT(10 + ((x) * 8))
284/* Set when the the downstream tries to read from the display FIFO 264/* Set when the the downstream tries to read from the display FIFO
285 * while it's empty. 265 * while it's empty.
286 */ 266 */
287# define SCALER_DISPSTAT_EUFLOW0 BIT(9) 267# define SCALER_DISPSTAT_EUFLOW(x) BIT(9 + ((x) * 8))
288/* Set when the display mode changes from RUN to EOF */ 268/* Set when the display mode changes from RUN to EOF */
289# define SCALER_DISPSTAT_EOF0 BIT(8) 269# define SCALER_DISPSTAT_EOF(x) BIT(8 + ((x) * 8))
270
271# define SCALER_DISPSTAT_IRQMASK(x) VC4_MASK(13 + ((x) * 8), \
272 8 + ((x) * 8))
290 273
291/* Set on AXI invalid DMA ID error. */ 274/* Set on AXI invalid DMA ID error. */
292# define SCALER_DISPSTAT_DMA_ERROR BIT(7) 275# define SCALER_DISPSTAT_DMA_ERROR BIT(7)
@@ -298,12 +281,10 @@
298 * SCALER_DISPSTAT_RESP_ERROR is not SCALER_DISPSTAT_RESP_OKAY. 281 * SCALER_DISPSTAT_RESP_ERROR is not SCALER_DISPSTAT_RESP_OKAY.
299 */ 282 */
300# define SCALER_DISPSTAT_IRQDMA BIT(4) 283# define SCALER_DISPSTAT_IRQDMA BIT(4)
301# define SCALER_DISPSTAT_IRQDISP2 BIT(3)
302# define SCALER_DISPSTAT_IRQDISP1 BIT(2)
303/* Set when any of the EOF/EOLN/ESFRAME/ESLINE bits are set and their 284/* Set when any of the EOF/EOLN/ESFRAME/ESLINE bits are set and their
304 * corresponding interrupt bit is enabled in DISPCTRL. 285 * corresponding interrupt bit is enabled in DISPCTRL.
305 */ 286 */
306# define SCALER_DISPSTAT_IRQDISP0 BIT(1) 287# define SCALER_DISPSTAT_IRQDISP(x) BIT(1 + (x))
307/* On read, the profiler interrupt. On write, clear *all* interrupt bits. */ 288/* On read, the profiler interrupt. On write, clear *all* interrupt bits. */
308# define SCALER_DISPSTAT_IRQSCL BIT(0) 289# define SCALER_DISPSTAT_IRQSCL BIT(0)
309 290
diff --git a/drivers/gpu/drm/vc4/vc4_txp.c b/drivers/gpu/drm/vc4/vc4_txp.c
index 5dabd91f2d7e..cc2888dd7171 100644
--- a/drivers/gpu/drm/vc4/vc4_txp.c
+++ b/drivers/gpu/drm/vc4/vc4_txp.c
@@ -249,7 +249,6 @@ static int vc4_txp_connector_atomic_check(struct drm_connector *conn,
249 struct drm_connector_state *conn_state) 249 struct drm_connector_state *conn_state)
250{ 250{
251 struct drm_crtc_state *crtc_state; 251 struct drm_crtc_state *crtc_state;
252 struct drm_gem_cma_object *gem;
253 struct drm_framebuffer *fb; 252 struct drm_framebuffer *fb;
254 int i; 253 int i;
255 254
@@ -275,8 +274,6 @@ static int vc4_txp_connector_atomic_check(struct drm_connector *conn,
275 if (i == ARRAY_SIZE(drm_fmts)) 274 if (i == ARRAY_SIZE(drm_fmts))
276 return -EINVAL; 275 return -EINVAL;
277 276
278 gem = drm_fb_cma_get_gem_obj(fb, 0);
279
280 /* Pitch must be aligned on 16 bytes. */ 277 /* Pitch must be aligned on 16 bytes. */
281 if (fb->pitches[0] & GENMASK(3, 0)) 278 if (fb->pitches[0] & GENMASK(3, 0))
282 return -EINVAL; 279 return -EINVAL;
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c
index b996ac1d4fcc..7c2893181ba4 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.c
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.c
@@ -205,10 +205,10 @@ static struct drm_driver driver = {
205#if defined(CONFIG_DEBUG_FS) 205#if defined(CONFIG_DEBUG_FS)
206 .debugfs_init = virtio_gpu_debugfs_init, 206 .debugfs_init = virtio_gpu_debugfs_init,
207#endif 207#endif
208 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
208 .gem_prime_export = drm_gem_prime_export, 209 .gem_prime_export = drm_gem_prime_export,
209 .gem_prime_import = drm_gem_prime_import, 210 .gem_prime_import = drm_gem_prime_import,
210 .gem_prime_pin = virtgpu_gem_prime_pin, 211 .gem_prime_get_sg_table = virtgpu_gem_prime_get_sg_table,
211 .gem_prime_unpin = virtgpu_gem_prime_unpin,
212 .gem_prime_vmap = virtgpu_gem_prime_vmap, 212 .gem_prime_vmap = virtgpu_gem_prime_vmap,
213 .gem_prime_vunmap = virtgpu_gem_prime_vunmap, 213 .gem_prime_vunmap = virtgpu_gem_prime_vunmap,
214 .gem_prime_mmap = virtgpu_gem_prime_mmap, 214 .gem_prime_mmap = virtgpu_gem_prime_mmap,
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h
index 3238fdf58eb4..86a264cee362 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -352,8 +352,7 @@ void virtio_gpu_object_free_sg_table(struct virtio_gpu_object *bo);
352int virtio_gpu_object_wait(struct virtio_gpu_object *bo, bool no_wait); 352int virtio_gpu_object_wait(struct virtio_gpu_object *bo, bool no_wait);
353 353
354/* virtgpu_prime.c */ 354/* virtgpu_prime.c */
355int virtgpu_gem_prime_pin(struct drm_gem_object *obj); 355struct sg_table *virtgpu_gem_prime_get_sg_table(struct drm_gem_object *obj);
356void virtgpu_gem_prime_unpin(struct drm_gem_object *obj);
357void *virtgpu_gem_prime_vmap(struct drm_gem_object *obj); 356void *virtgpu_gem_prime_vmap(struct drm_gem_object *obj);
358void virtgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); 357void virtgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
359int virtgpu_gem_prime_mmap(struct drm_gem_object *obj, 358int virtgpu_gem_prime_mmap(struct drm_gem_object *obj,
diff --git a/drivers/gpu/drm/virtio/virtgpu_prime.c b/drivers/gpu/drm/virtio/virtgpu_prime.c
index c59ec34c80a5..22ef151410e0 100644
--- a/drivers/gpu/drm/virtio/virtgpu_prime.c
+++ b/drivers/gpu/drm/virtio/virtgpu_prime.c
@@ -28,15 +28,16 @@
28 * device that might share buffers with virtgpu 28 * device that might share buffers with virtgpu
29 */ 29 */
30 30
31int virtgpu_gem_prime_pin(struct drm_gem_object *obj) 31struct sg_table *virtgpu_gem_prime_get_sg_table(struct drm_gem_object *obj)
32{ 32{
33 WARN_ONCE(1, "not implemented"); 33 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
34 return -ENODEV;
35}
36 34
37void virtgpu_gem_prime_unpin(struct drm_gem_object *obj) 35 if (!bo->tbo.ttm->pages || !bo->tbo.ttm->num_pages)
38{ 36 /* should not happen */
39 WARN_ONCE(1, "not implemented"); 37 return ERR_PTR(-EINVAL);
38
39 return drm_prime_pages_to_sg(bo->tbo.ttm->pages,
40 bo->tbo.ttm->num_pages);
40} 41}
41 42
42void *virtgpu_gem_prime_vmap(struct drm_gem_object *obj) 43void *virtgpu_gem_prime_vmap(struct drm_gem_object *obj)
@@ -56,7 +57,10 @@ void virtgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
56} 57}
57 58
58int virtgpu_gem_prime_mmap(struct drm_gem_object *obj, 59int virtgpu_gem_prime_mmap(struct drm_gem_object *obj,
59 struct vm_area_struct *area) 60 struct vm_area_struct *vma)
60{ 61{
61 return -ENODEV; 62 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
63
64 bo->gem_base.vma_node.vm_node.start = bo->tbo.vma_node.vm_node.start;
65 return drm_gem_prime_mmap(obj, vma);
62} 66}
diff --git a/drivers/gpu/drm/xen/xen_drm_front.c b/drivers/gpu/drm/xen/xen_drm_front.c
index 3e78a832d7f9..84aa4d61dc42 100644
--- a/drivers/gpu/drm/xen/xen_drm_front.c
+++ b/drivers/gpu/drm/xen/xen_drm_front.c
@@ -582,6 +582,7 @@ static void xen_drm_drv_fini(struct xen_drm_front_info *front_info)
582 582
583 drm_kms_helper_poll_fini(dev); 583 drm_kms_helper_poll_fini(dev);
584 drm_dev_unplug(dev); 584 drm_dev_unplug(dev);
585 drm_dev_put(dev);
585 586
586 front_info->drm_info = NULL; 587 front_info->drm_info = NULL;
587 588