aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2019-08-21 01:38:43 -0400
committerDave Airlie <airlied@redhat.com>2019-08-21 02:44:41 -0400
commit5f680625d9765a2f936707465659acac8e44f514 (patch)
tree9be42003b1848c3b8e2e3de35c1875fb94b7ee24
parent8120ed5ebd2aaad1fee1a777effa158e5284b816 (diff)
parentd777478599f781fc5162d1ae95dbee6e5ae05a41 (diff)
Merge tag 'drm-misc-next-2019-08-19' of git://anongit.freedesktop.org/drm/drm-misc into drm-next
drm-misc-next for 5.4: UAPI Changes: Cross-subsystem Changes: Core Changes: - dma-buf: add reservation_object_fences helper, relax reservation_object_add_shared_fence, remove reservation_object seq number (and then restored) - dma-fence: Shrinkage of the dma_fence structure, Merge dma_fence_signal and dma_fence_signal_locked, Store the timestamp in struct dma_fence in a union with cb_list Driver Changes: - More dt-bindings YAML conversions - More removal of drmP.h includes - dw-hdmi: Support get_eld and various i2s improvements - gm12u320: Few fixes - meson: Global cleanup - panfrost: Few refactors, Support for GPU heap allocations - sun4i: Support for DDC enable GPIO - New panels: TI nspire, NEC NL8048HL11, LG Philips LB035Q02, Sharp LS037V7DW01, Sony ACX565AKM, Toppoly TD028TTEC1 Toppoly TD043MTEA1 Signed-off-by: Dave Airlie <airlied@redhat.com> [airlied: fixup dma_resv rename fallout] From: Maxime Ripard <maxime.ripard@bootlin.com> Link: https://patchwork.freedesktop.org/patch/msgid/20190819141923.7l2adietcr2pioct@flea
-rw-r--r--Documentation/devicetree/bindings/display/amlogic,meson-dw-hdmi.txt119
-rw-r--r--Documentation/devicetree/bindings/display/amlogic,meson-dw-hdmi.yaml150
-rw-r--r--Documentation/devicetree/bindings/display/amlogic,meson-vpu.txt121
-rw-r--r--Documentation/devicetree/bindings/display/amlogic,meson-vpu.yaml137
-rw-r--r--Documentation/devicetree/bindings/display/connector/hdmi-connector.txt1
-rw-r--r--Documentation/devicetree/bindings/display/panel/nec,nl8048hl11.yaml62
-rw-r--r--Documentation/devicetree/bindings/display/panel/ti,nspire.yaml36
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.yaml5
-rw-r--r--MAINTAINERS4
-rw-r--r--drivers/dma-buf/Makefile2
-rw-r--r--drivers/dma-buf/dma-buf.c28
-rw-r--r--drivers/dma-buf/dma-fence-array.c32
-rw-r--r--drivers/dma-buf/dma-fence.c55
-rw-r--r--drivers/dma-buf/dma-resv.c (renamed from drivers/dma-buf/reservation.c)156
-rw-r--r--drivers/dma-buf/sw_sync.c16
-rw-r--r--drivers/dma-buf/sync_file.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c20
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c2
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_crtc.c2
-rw-r--r--drivers/gpu/drm/arm/hdlcd_crtc.c12
-rw-r--r--drivers/gpu/drm/arm/hdlcd_drv.c7
-rw-r--r--drivers/gpu/drm/arm/malidp_crtc.c11
-rw-r--r--drivers/gpu/drm/arm/malidp_drv.c8
-rw-r--r--drivers/gpu/drm/arm/malidp_drv.h7
-rw-r--r--drivers/gpu/drm/arm/malidp_hw.c7
-rw-r--r--drivers/gpu/drm/arm/malidp_mw.c5
-rw-r--r--drivers/gpu/drm/arm/malidp_planes.c4
-rw-r--r--drivers/gpu/drm/armada/armada_crtc.c10
-rw-r--r--drivers/gpu/drm/armada/armada_debugfs.c8
-rw-r--r--drivers/gpu/drm/armada/armada_drm.h5
-rw-r--r--drivers/gpu/drm/armada/armada_drv.c8
-rw-r--r--drivers/gpu/drm/armada/armada_fb.c3
-rw-r--r--drivers/gpu/drm/armada/armada_fbdev.c3
-rw-r--r--drivers/gpu/drm/armada/armada_gem.c7
-rw-r--r--drivers/gpu/drm/armada/armada_overlay.c8
-rw-r--r--drivers/gpu/drm/armada/armada_plane.c4
-rw-r--r--drivers/gpu/drm/armada/armada_trace.h5
-rw-r--r--drivers/gpu/drm/aspeed/aspeed_gfx_crtc.c2
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_core.c3
-rw-r--r--drivers/gpu/drm/bridge/dumb-vga-dac.c7
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c20
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi-audio.h1
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c60
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi.c37
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi.h13
-rw-r--r--drivers/gpu/drm/bridge/tc358767.c10
-rw-r--r--drivers/gpu/drm/drm_atomic_uapi.c2
-rw-r--r--drivers/gpu/drm/drm_connector.c10
-rw-r--r--drivers/gpu/drm/drm_gem.c29
-rw-r--r--drivers/gpu/drm/drm_gem_framebuffer_helper.c4
-rw-r--r--drivers/gpu/drm/drm_gem_shmem_helper.c61
-rw-r--r--drivers/gpu/drm/drm_panel.c102
-rw-r--r--drivers/gpu/drm/drm_syncobj.c98
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.c8
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.h2
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c14
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c10
-rw-r--r--drivers/gpu/drm/i2c/tda998x_drv.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c4
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_busy.c4
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_clflush.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_client_blt.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c6
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_fence.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.h10
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_wait.c18
-rw-r--r--drivers/gpu/drm/i915/gt/intel_breadcrumbs.c13
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h2
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_batch_pool.c10
-rw-r--r--drivers/gpu/drm/i915/i915_request.c4
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence.c8
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence.h4
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c8
-rw-r--r--drivers/gpu/drm/i915/i915_vma.h8
-rw-r--r--drivers/gpu/drm/imx/imx-ldb.c11
-rw-r--r--drivers/gpu/drm/imx/parallel-display.c11
-rw-r--r--drivers/gpu/drm/lima/lima_gem.c8
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_fb.c2
-rw-r--r--drivers/gpu/drm/meson/meson_crtc.c17
-rw-r--r--drivers/gpu/drm/meson/meson_drv.c26
-rw-r--r--drivers/gpu/drm/meson/meson_dw_hdmi.c2
-rw-r--r--drivers/gpu/drm/meson/meson_dw_hdmi.h12
-rw-r--r--drivers/gpu/drm/meson/meson_plane.c2
-rw-r--r--drivers/gpu/drm/meson/meson_registers.h136
-rw-r--r--drivers/gpu/drm/meson/meson_vclk.c7
-rw-r--r--drivers/gpu/drm/meson/meson_venc.c169
-rw-r--r--drivers/gpu/drm/meson/meson_venc_cvbs.c3
-rw-r--r--drivers/gpu/drm/meson/meson_viu.c82
-rw-r--r--drivers/gpu/drm/meson/meson_vpp.c25
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c18
-rw-r--r--drivers/gpu/drm/msm/msm_gem.h2
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c2
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_drv.c2
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_out.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndw.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c12
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_prime.c6
-rw-r--r--drivers/gpu/drm/omapdrm/displays/Kconfig38
-rw-r--r--drivers/gpu/drm/omapdrm/displays/Makefile6
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c251
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c271
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c262
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c755
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c390
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c513
-rw-r--r--drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c7
-rw-r--r--drivers/gpu/drm/panel/Kconfig46
-rw-r--r--drivers/gpu/drm/panel/Makefile6
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9322.c34
-rw-r--r--drivers/gpu/drm/panel/panel-lg-lb035q02.c237
-rw-r--r--drivers/gpu/drm/panel/panel-nec-nl8048hl11.c248
-rw-r--r--drivers/gpu/drm/panel/panel-sharp-ls037v7dw01.c226
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c64
-rw-r--r--drivers/gpu/drm/panel/panel-sony-acx565akm.c701
-rw-r--r--drivers/gpu/drm/panel/panel-tpo-td028ttec1.c399
-rw-r--r--drivers/gpu/drm/panel/panel-tpo-td043mtea1.c509
-rw-r--r--drivers/gpu/drm/panfrost/Makefile1
-rw-r--r--drivers/gpu/drm/panfrost/TODO11
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_device.c16
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_device.h5
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_drv.c105
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gem.c133
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gem.h20
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c107
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_job.c13
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_mmu.c216
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_mmu.h3
-rw-r--r--drivers/gpu/drm/pl111/pl111_display.c29
-rw-r--r--drivers/gpu/drm/qxl/qxl_debugfs.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_release.c6
-rw-r--r--drivers/gpu/drm/radeon/cik.c2
-rw-r--r--drivers/gpu/drm/radeon/cik_sdma.c2
-rw-r--r--drivers/gpu/drm/radeon/evergreen_dma.c2
-rw-r--r--drivers/gpu/drm/radeon/r100.c2
-rw-r--r--drivers/gpu/drm/radeon/r200.c2
-rw-r--r--drivers/gpu/drm/radeon/r600.c2
-rw-r--r--drivers/gpu/drm/radeon/r600_dma.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon.h8
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h18
-rw-r--r--drivers/gpu/drm/radeon/radeon_benchmark.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_mn.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c14
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_prime.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_sync.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_vm.c2
-rw-r--r--drivers/gpu/drm/radeon/rv770_dma.c2
-rw-r--r--drivers/gpu/drm/radeon/si_dma.c2
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c54
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h2
-rw-r--r--drivers/gpu/drm/tegra/dc.c13
-rw-r--r--drivers/gpu/drm/tegra/dpaux.c5
-rw-r--r--drivers/gpu/drm/tegra/drm.c8
-rw-r--r--drivers/gpu/drm/tegra/drm.h3
-rw-r--r--drivers/gpu/drm/tegra/dsi.c8
-rw-r--r--drivers/gpu/drm/tegra/fb.c6
-rw-r--r--drivers/gpu/drm/tegra/gem.c3
-rw-r--r--drivers/gpu/drm/tegra/gem.h1
-rw-r--r--drivers/gpu/drm/tegra/gr2d.c1
-rw-r--r--drivers/gpu/drm/tegra/hdmi.c5
-rw-r--r--drivers/gpu/drm/tegra/hub.c3
-rw-r--r--drivers/gpu/drm/tegra/hub.h1
-rw-r--r--drivers/gpu/drm/tegra/plane.c1
-rw-r--r--drivers/gpu/drm/tegra/sor.c3
-rw-r--r--drivers/gpu/drm/tegra/vic.c1
-rw-r--r--drivers/gpu/drm/tiny/gm12u320.c44
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c118
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c16
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c6
-rw-r--r--drivers/gpu/drm/ttm/ttm_execbuf_util.c20
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c2
-rw-r--r--drivers/gpu/drm/v3d/v3d_gem.c4
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_drv.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_gem.c6
-rw-r--r--drivers/gpu/drm/vgem/vgem_fence.c16
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_ioctl.c4
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_plane.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_blit.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_bo.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c6
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front_kms.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/Kconfig5
-rw-r--r--include/drm/bridge/dw_hdmi.h2
-rw-r--r--include/drm/drmP.h2
-rw-r--r--include/drm/drm_connector.h4
-rw-r--r--include/drm/drm_gem.h8
-rw-r--r--include/drm/drm_gem_shmem_helper.h15
-rw-r--r--include/drm/drm_panel.h183
-rw-r--r--include/drm/ttm/ttm_bo_api.h12
-rw-r--r--include/drm/ttm/ttm_bo_driver.h14
-rw-r--r--include/linux/amba/clcd-regs.h1
-rw-r--r--include/linux/dma-buf.h4
-rw-r--r--include/linux/dma-fence.h34
-rw-r--r--include/linux/dma-resv.h (renamed from include/linux/reservation.h)119
-rw-r--r--include/uapi/drm/panfrost_drm.h25
223 files changed, 5062 insertions, 3844 deletions
diff --git a/Documentation/devicetree/bindings/display/amlogic,meson-dw-hdmi.txt b/Documentation/devicetree/bindings/display/amlogic,meson-dw-hdmi.txt
deleted file mode 100644
index 3a50a7862cf3..000000000000
--- a/Documentation/devicetree/bindings/display/amlogic,meson-dw-hdmi.txt
+++ /dev/null
@@ -1,119 +0,0 @@
1Amlogic specific extensions to the Synopsys Designware HDMI Controller
2======================================================================
3
4The Amlogic Meson Synopsys Designware Integration is composed of :
5- A Synopsys DesignWare HDMI Controller IP
6- A TOP control block controlling the Clocks and PHY
7- A custom HDMI PHY in order to convert video to TMDS signal
8 ___________________________________
9| HDMI TOP |<= HPD
10|___________________________________|
11| | |
12| Synopsys HDMI | HDMI PHY |=> TMDS
13| Controller |________________|
14|___________________________________|<=> DDC
15
16The HDMI TOP block only supports HPD sensing.
17The Synopsys HDMI Controller interrupt is routed through the
18TOP Block interrupt.
19Communication to the TOP Block and the Synopsys HDMI Controller is done
20via a pair of dedicated addr+read/write registers.
21The HDMI PHY is configured by registers in the HHI register block.
22
23Pixel data arrives in 4:4:4 format from the VENC block and the VPU HDMI mux
24selects either the ENCI encoder for the 576i or 480i formats or the ENCP
25encoder for all the other formats including interlaced HD formats.
26
27The VENC uses a DVI encoder on top of the ENCI or ENCP encoders to generate
28DVI timings for the HDMI controller.
29
30Amlogic Meson GXBB, GXL and GXM SoCs families embeds the Synopsys DesignWare
31HDMI TX IP version 2.01a with HDCP and I2C & S/PDIF
32audio source interfaces.
33
34Required properties:
35- compatible: value should be different for each SoC family as :
36 - GXBB (S905) : "amlogic,meson-gxbb-dw-hdmi"
37 - GXL (S905X, S905D) : "amlogic,meson-gxl-dw-hdmi"
38 - GXM (S912) : "amlogic,meson-gxm-dw-hdmi"
39 followed by the common "amlogic,meson-gx-dw-hdmi"
40 - G12A (S905X2, S905Y2, S905D2) : "amlogic,meson-g12a-dw-hdmi"
41- reg: Physical base address and length of the controller's registers.
42- interrupts: The HDMI interrupt number
43- clocks, clock-names : must have the phandles to the HDMI iahb and isfr clocks,
44 and the Amlogic Meson venci clocks as described in
45 Documentation/devicetree/bindings/clock/clock-bindings.txt,
46 the clocks are soc specific, the clock-names should be "iahb", "isfr", "venci"
47- resets, resets-names: must have the phandles to the HDMI apb, glue and phy
48 resets as described in :
49 Documentation/devicetree/bindings/reset/reset.txt,
50 the reset-names should be "hdmitx_apb", "hdmitx", "hdmitx_phy"
51
52Optional properties:
53- hdmi-supply: Optional phandle to an external 5V regulator to power the HDMI
54 logic, as described in the file ../regulator/regulator.txt
55
56Required nodes:
57
58The connections to the HDMI ports are modeled using the OF graph
59bindings specified in Documentation/devicetree/bindings/graph.txt.
60
61The following table lists for each supported model the port number
62corresponding to each HDMI output and input.
63
64 Port 0 Port 1
65-----------------------------------------
66 S905 (GXBB) VENC Input TMDS Output
67 S905X (GXL) VENC Input TMDS Output
68 S905D (GXL) VENC Input TMDS Output
69 S912 (GXM) VENC Input TMDS Output
70 S905X2 (G12A) VENC Input TMDS Output
71 S905Y2 (G12A) VENC Input TMDS Output
72 S905D2 (G12A) VENC Input TMDS Output
73
74Example:
75
76hdmi-connector {
77 compatible = "hdmi-connector";
78 type = "a";
79
80 port {
81 hdmi_connector_in: endpoint {
82 remote-endpoint = <&hdmi_tx_tmds_out>;
83 };
84 };
85};
86
87hdmi_tx: hdmi-tx@c883a000 {
88 compatible = "amlogic,meson-gxbb-dw-hdmi", "amlogic,meson-gx-dw-hdmi";
89 reg = <0x0 0xc883a000 0x0 0x1c>;
90 interrupts = <GIC_SPI 57 IRQ_TYPE_EDGE_RISING>;
91 resets = <&reset RESET_HDMITX_CAPB3>,
92 <&reset RESET_HDMI_SYSTEM_RESET>,
93 <&reset RESET_HDMI_TX>;
94 reset-names = "hdmitx_apb", "hdmitx", "hdmitx_phy";
95 clocks = <&clkc CLKID_HDMI_PCLK>,
96 <&clkc CLKID_CLK81>,
97 <&clkc CLKID_GCLK_VENCI_INT0>;
98 clock-names = "isfr", "iahb", "venci";
99 #address-cells = <1>;
100 #size-cells = <0>;
101
102 /* VPU VENC Input */
103 hdmi_tx_venc_port: port@0 {
104 reg = <0>;
105
106 hdmi_tx_in: endpoint {
107 remote-endpoint = <&hdmi_tx_out>;
108 };
109 };
110
111 /* TMDS Output */
112 hdmi_tx_tmds_port: port@1 {
113 reg = <1>;
114
115 hdmi_tx_tmds_out: endpoint {
116 remote-endpoint = <&hdmi_connector_in>;
117 };
118 };
119};
diff --git a/Documentation/devicetree/bindings/display/amlogic,meson-dw-hdmi.yaml b/Documentation/devicetree/bindings/display/amlogic,meson-dw-hdmi.yaml
new file mode 100644
index 000000000000..fb747682006d
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/amlogic,meson-dw-hdmi.yaml
@@ -0,0 +1,150 @@
1# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
2# Copyright 2019 BayLibre, SAS
3%YAML 1.2
4---
5$id: "http://devicetree.org/schemas/display/amlogic,meson-dw-hdmi.yaml#"
6$schema: "http://devicetree.org/meta-schemas/core.yaml#"
7
8title: Amlogic specific extensions to the Synopsys Designware HDMI Controller
9
10maintainers:
11 - Neil Armstrong <narmstrong@baylibre.com>
12
13description: |
14 The Amlogic Meson Synopsys Designware Integration is composed of
15 - A Synopsys DesignWare HDMI Controller IP
16 - A TOP control block controlling the Clocks and PHY
17 - A custom HDMI PHY in order to convert video to TMDS signal
18 ___________________________________
19 | HDMI TOP |<= HPD
20 |___________________________________|
21 | | |
22 | Synopsys HDMI | HDMI PHY |=> TMDS
23 | Controller |________________|
24 |___________________________________|<=> DDC
25
26 The HDMI TOP block only supports HPD sensing.
27 The Synopsys HDMI Controller interrupt is routed through the
28 TOP Block interrupt.
29 Communication to the TOP Block and the Synopsys HDMI Controller is done
30 via a pair of dedicated addr+read/write registers.
31 The HDMI PHY is configured by registers in the HHI register block.
32
33 Pixel data arrives in "4:4:4" format from the VENC block and the VPU HDMI mux
34 selects either the ENCI encoder for the 576i or 480i formats or the ENCP
35 encoder for all the other formats including interlaced HD formats.
36
37 The VENC uses a DVI encoder on top of the ENCI or ENCP encoders to generate
38 DVI timings for the HDMI controller.
39
40 Amlogic Meson GXBB, GXL and GXM SoCs families embeds the Synopsys DesignWare
41 HDMI TX IP version 2.01a with HDCP and I2C & S/PDIF
42 audio source interfaces.
43
44properties:
45 compatible:
46 oneOf:
47 - items:
48 - enum:
49 - amlogic,meson-gxbb-dw-hdmi # GXBB (S905)
50 - amlogic,meson-gxl-dw-hdmi # GXL (S905X, S905D)
51 - amlogic,meson-gxm-dw-hdmi # GXM (S912)
52 - const: amlogic,meson-gx-dw-hdmi
53 - enum:
54 - amlogic,meson-g12a-dw-hdmi # G12A (S905X2, S905Y2, S905D2)
55
56 reg:
57 maxItems: 1
58
59 interrupts:
60 maxItems: 1
61
62 clocks:
63 minItems: 3
64
65 clock-names:
66 items:
67 - const: isfr
68 - const: iahb
69 - const: venci
70
71 resets:
72 minItems: 3
73
74 reset-names:
75 items:
76 - const: hdmitx_apb
77 - const: hdmitx
78 - const: hdmitx_phy
79
80 hdmi-supply:
81 description: phandle to an external 5V regulator to power the HDMI logic
82 allOf:
83 - $ref: /schemas/types.yaml#/definitions/phandle
84
85 port@0:
86 type: object
87 description:
88 A port node pointing to the VENC Input port node.
89
90 port@1:
91 type: object
92 description:
93 A port node pointing to the TMDS Output port node.
94
95 "#address-cells":
96 const: 1
97
98 "#size-cells":
99 const: 0
100
101 "#sound-dai-cells":
102 const: 0
103
104required:
105 - compatible
106 - reg
107 - interrupts
108 - clocks
109 - clock-names
110 - resets
111 - reset-names
112 - port@0
113 - port@1
114 - "#address-cells"
115 - "#size-cells"
116
117additionalProperties: false
118
119examples:
120 - |
121 hdmi_tx: hdmi-tx@c883a000 {
122 compatible = "amlogic,meson-gxbb-dw-hdmi", "amlogic,meson-gx-dw-hdmi";
123 reg = <0xc883a000 0x1c>;
124 interrupts = <57>;
125 resets = <&reset_apb>, <&reset_hdmitx>, <&reset_hdmitx_phy>;
126 reset-names = "hdmitx_apb", "hdmitx", "hdmitx_phy";
127 clocks = <&clk_isfr>, <&clk_iahb>, <&clk_venci>;
128 clock-names = "isfr", "iahb", "venci";
129 #address-cells = <1>;
130 #size-cells = <0>;
131
132 /* VPU VENC Input */
133 hdmi_tx_venc_port: port@0 {
134 reg = <0>;
135
136 hdmi_tx_in: endpoint {
137 remote-endpoint = <&hdmi_tx_out>;
138 };
139 };
140
141 /* TMDS Output */
142 hdmi_tx_tmds_port: port@1 {
143 reg = <1>;
144
145 hdmi_tx_tmds_out: endpoint {
146 remote-endpoint = <&hdmi_connector_in>;
147 };
148 };
149 };
150
diff --git a/Documentation/devicetree/bindings/display/amlogic,meson-vpu.txt b/Documentation/devicetree/bindings/display/amlogic,meson-vpu.txt
deleted file mode 100644
index be40a780501c..000000000000
--- a/Documentation/devicetree/bindings/display/amlogic,meson-vpu.txt
+++ /dev/null
@@ -1,121 +0,0 @@
1Amlogic Meson Display Controller
2================================
3
4The Amlogic Meson Display controller is composed of several components
5that are going to be documented below:
6
7DMC|---------------VPU (Video Processing Unit)----------------|------HHI------|
8 | vd1 _______ _____________ _________________ | |
9D |-------| |----| | | | | HDMI PLL |
10D | vd2 | VIU | | Video Post | | Video Encoders |<---|-----VCLK |
11R |-------| |----| Processing | | | | |
12 | osd2 | | | |---| Enci ----------|----|-----VDAC------|
13R |-------| CSC |----| Scalers | | Encp ----------|----|----HDMI-TX----|
14A | osd1 | | | Blenders | | Encl ----------|----|---------------|
15M |-------|______|----|____________| |________________| | |
16___|__________________________________________________________|_______________|
17
18
19VIU: Video Input Unit
20---------------------
21
22The Video Input Unit is in charge of the pixel scanout from the DDR memory.
23It fetches the frames addresses, stride and parameters from the "Canvas" memory.
24This part is also in charge of the CSC (Colorspace Conversion).
25It can handle 2 OSD Planes and 2 Video Planes.
26
27VPP: Video Post Processing
28--------------------------
29
30The Video Post Processing is in charge of the scaling and blending of the
31various planes into a single pixel stream.
32There is a special "pre-blending" used by the video planes with a dedicated
33scaler and a "post-blending" to merge with the OSD Planes.
34The OSD planes also have a dedicated scaler for one of the OSD.
35
36VENC: Video Encoders
37--------------------
38
39The VENC is composed of the multiple pixel encoders :
40 - ENCI : Interlace Video encoder for CVBS and Interlace HDMI
41 - ENCP : Progressive Video Encoder for HDMI
42 - ENCL : LCD LVDS Encoder
43The VENC Unit gets a Pixel Clocks (VCLK) from a dedicated HDMI PLL and clock
44tree and provides the scanout clock to the VPP and VIU.
45The ENCI is connected to a single VDAC for Composite Output.
46The ENCI and ENCP are connected to an on-chip HDMI Transceiver.
47
48Device Tree Bindings:
49---------------------
50
51VPU: Video Processing Unit
52--------------------------
53
54Required properties:
55- compatible: value should be different for each SoC family as :
56 - GXBB (S905) : "amlogic,meson-gxbb-vpu"
57 - GXL (S905X, S905D) : "amlogic,meson-gxl-vpu"
58 - GXM (S912) : "amlogic,meson-gxm-vpu"
59 followed by the common "amlogic,meson-gx-vpu"
60 - G12A (S905X2, S905Y2, S905D2) : "amlogic,meson-g12a-vpu"
61- reg: base address and size of he following memory-mapped regions :
62 - vpu
63 - hhi
64- reg-names: should contain the names of the previous memory regions
65- interrupts: should contain the VENC Vsync interrupt number
66- amlogic,canvas: phandle to canvas provider node as described in the file
67 ../soc/amlogic/amlogic,canvas.txt
68
69Optional properties:
70- power-domains: Optional phandle to associated power domain as described in
71 the file ../power/power_domain.txt
72
73Required nodes:
74
75The connections to the VPU output video ports are modeled using the OF graph
76bindings specified in Documentation/devicetree/bindings/graph.txt.
77
78The following table lists for each supported model the port number
79corresponding to each VPU output.
80
81 Port 0 Port 1
82-----------------------------------------
83 S905 (GXBB) CVBS VDAC HDMI-TX
84 S905X (GXL) CVBS VDAC HDMI-TX
85 S905D (GXL) CVBS VDAC HDMI-TX
86 S912 (GXM) CVBS VDAC HDMI-TX
87 S905X2 (G12A) CVBS VDAC HDMI-TX
88 S905Y2 (G12A) CVBS VDAC HDMI-TX
89 S905D2 (G12A) CVBS VDAC HDMI-TX
90
91Example:
92
93tv-connector {
94 compatible = "composite-video-connector";
95
96 port {
97 tv_connector_in: endpoint {
98 remote-endpoint = <&cvbs_vdac_out>;
99 };
100 };
101};
102
103vpu: vpu@d0100000 {
104 compatible = "amlogic,meson-gxbb-vpu";
105 reg = <0x0 0xd0100000 0x0 0x100000>,
106 <0x0 0xc883c000 0x0 0x1000>,
107 <0x0 0xc8838000 0x0 0x1000>;
108 reg-names = "vpu", "hhi", "dmc";
109 interrupts = <GIC_SPI 3 IRQ_TYPE_EDGE_RISING>;
110 #address-cells = <1>;
111 #size-cells = <0>;
112
113 /* CVBS VDAC output port */
114 port@0 {
115 reg = <0>;
116
117 cvbs_vdac_out: endpoint {
118 remote-endpoint = <&tv_connector_in>;
119 };
120 };
121};
diff --git a/Documentation/devicetree/bindings/display/amlogic,meson-vpu.yaml b/Documentation/devicetree/bindings/display/amlogic,meson-vpu.yaml
new file mode 100644
index 000000000000..d1205a6697a0
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/amlogic,meson-vpu.yaml
@@ -0,0 +1,137 @@
1# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
2# Copyright 2019 BayLibre, SAS
3%YAML 1.2
4---
5$id: "http://devicetree.org/schemas/display/amlogic,meson-vpu.yaml#"
6$schema: "http://devicetree.org/meta-schemas/core.yaml#"
7
8title: Amlogic Meson Display Controller
9
10maintainers:
11 - Neil Armstrong <narmstrong@baylibre.com>
12
13description: |
14 The Amlogic Meson Display controller is composed of several components
15 that are going to be documented below
16
17 DMC|---------------VPU (Video Processing Unit)----------------|------HHI------|
18 | vd1 _______ _____________ _________________ | |
19 D |-------| |----| | | | | HDMI PLL |
20 D | vd2 | VIU | | Video Post | | Video Encoders |<---|-----VCLK |
21 R |-------| |----| Processing | | | | |
22 | osd2 | | | |---| Enci ----------|----|-----VDAC------|
23 R |-------| CSC |----| Scalers | | Encp ----------|----|----HDMI-TX----|
24 A | osd1 | | | Blenders | | Encl ----------|----|---------------|
25 M |-------|______|----|____________| |________________| | |
26 ___|__________________________________________________________|_______________|
27
28
29 VIU: Video Input Unit
30 ---------------------
31
32 The Video Input Unit is in charge of the pixel scanout from the DDR memory.
33 It fetches the frames addresses, stride and parameters from the "Canvas" memory.
34 This part is also in charge of the CSC (Colorspace Conversion).
35 It can handle 2 OSD Planes and 2 Video Planes.
36
37 VPP: Video Post Processing
38 --------------------------
39
40 The Video Post Processing is in charge of the scaling and blending of the
41 various planes into a single pixel stream.
42 There is a special "pre-blending" used by the video planes with a dedicated
43 scaler and a "post-blending" to merge with the OSD Planes.
44 The OSD planes also have a dedicated scaler for one of the OSD.
45
46 VENC: Video Encoders
47 --------------------
48
49 The VENC is composed of the multiple pixel encoders
50 - ENCI : Interlace Video encoder for CVBS and Interlace HDMI
51 - ENCP : Progressive Video Encoder for HDMI
52 - ENCL : LCD LVDS Encoder
53 The VENC Unit gets a Pixel Clocks (VCLK) from a dedicated HDMI PLL and clock
54 tree and provides the scanout clock to the VPP and VIU.
55 The ENCI is connected to a single VDAC for Composite Output.
56 The ENCI and ENCP are connected to an on-chip HDMI Transceiver.
57
58properties:
59 compatible:
60 oneOf:
61 - items:
62 - enum:
63 - amlogic,meson-gxbb-vpu # GXBB (S905)
64 - amlogic,meson-gxl-vpu # GXL (S905X, S905D)
65 - amlogic,meson-gxm-vpu # GXM (S912)
66 - const: amlogic,meson-gx-vpu
67 - enum:
68 - amlogic,meson-g12a-vpu # G12A (S905X2, S905Y2, S905D2)
69
70 reg:
71 maxItems: 2
72
73 reg-names:
74 items:
75 - const: vpu
76 - const: hhi
77
78 interrupts:
79 maxItems: 1
80
81 power-domains:
82 maxItems: 1
83 description: phandle to the associated power domain
84
85 port@0:
86 type: object
87 description:
88 A port node pointing to the CVBS VDAC port node.
89
90 port@1:
91 type: object
92 description:
93 A port node pointing to the HDMI-TX port node.
94
95 "#address-cells":
96 const: 1
97
98 "#size-cells":
99 const: 0
100
101required:
102 - compatible
103 - reg
104 - interrupts
105 - port@0
106 - port@1
107 - "#address-cells"
108 - "#size-cells"
109
110examples:
111 - |
112 vpu: vpu@d0100000 {
113 compatible = "amlogic,meson-gxbb-vpu", "amlogic,meson-gx-vpu";
114 reg = <0xd0100000 0x100000>, <0xc883c000 0x1000>;
115 reg-names = "vpu", "hhi";
116 interrupts = <3>;
117 #address-cells = <1>;
118 #size-cells = <0>;
119
120 /* CVBS VDAC output port */
121 port@0 {
122 reg = <0>;
123
124 cvbs_vdac_out: endpoint {
125 remote-endpoint = <&tv_connector_in>;
126 };
127 };
128
129 /* HDMI TX output port */
130 port@1 {
131 reg = <1>;
132
133 hdmi_tx_out: endpoint {
134 remote-endpoint = <&hdmi_tx_in>;
135 };
136 };
137 };
diff --git a/Documentation/devicetree/bindings/display/connector/hdmi-connector.txt b/Documentation/devicetree/bindings/display/connector/hdmi-connector.txt
index 508aee461e0d..aeb07c4bd703 100644
--- a/Documentation/devicetree/bindings/display/connector/hdmi-connector.txt
+++ b/Documentation/devicetree/bindings/display/connector/hdmi-connector.txt
@@ -9,6 +9,7 @@ Optional properties:
9- label: a symbolic name for the connector 9- label: a symbolic name for the connector
10- hpd-gpios: HPD GPIO number 10- hpd-gpios: HPD GPIO number
11- ddc-i2c-bus: phandle link to the I2C controller used for DDC EDID probing 11- ddc-i2c-bus: phandle link to the I2C controller used for DDC EDID probing
12- ddc-en-gpios: signal to enable DDC bus
12 13
13Required nodes: 14Required nodes:
14- Video port for HDMI input 15- Video port for HDMI input
diff --git a/Documentation/devicetree/bindings/display/panel/nec,nl8048hl11.yaml b/Documentation/devicetree/bindings/display/panel/nec,nl8048hl11.yaml
new file mode 100644
index 000000000000..aa788eaa2f71
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/nec,nl8048hl11.yaml
@@ -0,0 +1,62 @@
1# SPDX-License-Identifier: GPL-2.0
2%YAML 1.2
3---
4$id: http://devicetree.org/schemas/display/panel/nec,nl8048hl11.yaml#
5$schema: http://devicetree.org/meta-schemas/core.yaml#
6
7title: NEC NL8048HL11 4.1" WVGA TFT LCD panel
8
9description:
10 The NEC NL8048HL11 is a 4.1" WVGA TFT LCD panel with a 24-bit RGB parallel
11 data interface and an SPI control interface.
12
13maintainers:
14 - Laurent Pinchart <laurent.pinchart@ideasonboard.com>
15
16allOf:
17 - $ref: panel-common.yaml#
18
19properties:
20 compatible:
21 const: nec,nl8048hl11
22
23 label: true
24 port: true
25 reg: true
26 reset-gpios: true
27
28 spi-max-frequency:
29 maximum: 10000000
30
31required:
32 - compatible
33 - reg
34 - reset-gpios
35 - port
36
37additionalProperties: false
38
39examples:
40 - |
41 #include <dt-bindings/gpio/gpio.h>
42
43 spi0 {
44 #address-cells = <1>;
45 #size-cells = <0>;
46
47 lcd_panel: panel@0 {
48 compatible = "nec,nl8048hl11";
49 reg = <0>;
50 spi-max-frequency = <10000000>;
51
52 reset-gpios = <&gpio7 7 GPIO_ACTIVE_LOW>;
53
54 port {
55 lcd_in: endpoint {
56 remote-endpoint = <&dpi_out>;
57 };
58 };
59 };
60 };
61
62...
diff --git a/Documentation/devicetree/bindings/display/panel/ti,nspire.yaml b/Documentation/devicetree/bindings/display/panel/ti,nspire.yaml
new file mode 100644
index 000000000000..5c5a3b519e31
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/ti,nspire.yaml
@@ -0,0 +1,36 @@
1# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2%YAML 1.2
3---
4$id: http://devicetree.org/schemas/display/panel/ti,nspire.yaml#
5$schema: http://devicetree.org/meta-schemas/core.yaml#
6
7title: Texas Instruments NSPIRE Display Panels
8
9maintainers:
10 - Linus Walleij <linus.walleij@linaro.org>
11
12allOf:
13 - $ref: panel-common.yaml#
14
15properties:
16 compatible:
17 enum:
18 - ti,nspire-cx-lcd-panel
19 - ti,nspire-classic-lcd-panel
20 port: true
21
22required:
23 - compatible
24
25additionalProperties: false
26
27examples:
28 - |
29 panel {
30 compatible = "ti,nspire-cx-lcd-panel";
31 port {
32 panel_in: endpoint {
33 remote-endpoint = <&pads>;
34 };
35 };
36 };
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.yaml b/Documentation/devicetree/bindings/vendor-prefixes.yaml
index 6992bbbbffab..29dcc6f8a64a 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.yaml
+++ b/Documentation/devicetree/bindings/vendor-prefixes.yaml
@@ -511,6 +511,8 @@ patternProperties:
511 description: Lenovo Group Ltd. 511 description: Lenovo Group Ltd.
512 "^lg,.*": 512 "^lg,.*":
513 description: LG Corporation 513 description: LG Corporation
514 "^lgphilips,.*":
515 description: LG Display
514 "^libretech,.*": 516 "^libretech,.*":
515 description: Shenzhen Libre Technology Co., Ltd 517 description: Shenzhen Libre Technology Co., Ltd
516 "^licheepi,.*": 518 "^licheepi,.*":
@@ -933,6 +935,9 @@ patternProperties:
933 description: Tecon Microprocessor Technologies, LLC. 935 description: Tecon Microprocessor Technologies, LLC.
934 "^topeet,.*": 936 "^topeet,.*":
935 description: Topeet 937 description: Topeet
938 "^toppoly,.*":
939 description: TPO (deprecated, use tpo)
940 deprecated: true
936 "^toradex,.*": 941 "^toradex,.*":
937 description: Toradex AG 942 description: Toradex AG
938 "^toshiba,.*": 943 "^toshiba,.*":
diff --git a/MAINTAINERS b/MAINTAINERS
index 1bd7b9c2d146..c2d975da561f 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -5334,8 +5334,8 @@ L: linux-amlogic@lists.infradead.org
5334W: http://linux-meson.com/ 5334W: http://linux-meson.com/
5335S: Supported 5335S: Supported
5336F: drivers/gpu/drm/meson/ 5336F: drivers/gpu/drm/meson/
5337F: Documentation/devicetree/bindings/display/amlogic,meson-vpu.txt 5337F: Documentation/devicetree/bindings/display/amlogic,meson-vpu.yaml
5338F: Documentation/devicetree/bindings/display/amlogic,meson-dw-hdmi.txt 5338F: Documentation/devicetree/bindings/display/amlogic,meson-dw-hdmi.yaml
5339F: Documentation/gpu/meson.rst 5339F: Documentation/gpu/meson.rst
5340T: git git://anongit.freedesktop.org/drm/drm-misc 5340T: git git://anongit.freedesktop.org/drm/drm-misc
5341 5341
diff --git a/drivers/dma-buf/Makefile b/drivers/dma-buf/Makefile
index e8c7310cb800..dcfb01e7c6f4 100644
--- a/drivers/dma-buf/Makefile
+++ b/drivers/dma-buf/Makefile
@@ -1,6 +1,6 @@
1# SPDX-License-Identifier: GPL-2.0-only 1# SPDX-License-Identifier: GPL-2.0-only
2obj-y := dma-buf.o dma-fence.o dma-fence-array.o dma-fence-chain.o \ 2obj-y := dma-buf.o dma-fence.o dma-fence-array.o dma-fence-chain.o \
3 reservation.o seqno-fence.o 3 dma-resv.o seqno-fence.o
4obj-$(CONFIG_SYNC_FILE) += sync_file.o 4obj-$(CONFIG_SYNC_FILE) += sync_file.o
5obj-$(CONFIG_SW_SYNC) += sw_sync.o sync_debug.o 5obj-$(CONFIG_SW_SYNC) += sw_sync.o sync_debug.o
6obj-$(CONFIG_UDMABUF) += udmabuf.o 6obj-$(CONFIG_UDMABUF) += udmabuf.o
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index f45bfb29ef96..433d91d710e4 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -21,7 +21,7 @@
21#include <linux/module.h> 21#include <linux/module.h>
22#include <linux/seq_file.h> 22#include <linux/seq_file.h>
23#include <linux/poll.h> 23#include <linux/poll.h>
24#include <linux/reservation.h> 24#include <linux/dma-resv.h>
25#include <linux/mm.h> 25#include <linux/mm.h>
26#include <linux/mount.h> 26#include <linux/mount.h>
27#include <linux/pseudo_fs.h> 27#include <linux/pseudo_fs.h>
@@ -104,8 +104,8 @@ static int dma_buf_release(struct inode *inode, struct file *file)
104 list_del(&dmabuf->list_node); 104 list_del(&dmabuf->list_node);
105 mutex_unlock(&db_list.lock); 105 mutex_unlock(&db_list.lock);
106 106
107 if (dmabuf->resv == (struct reservation_object *)&dmabuf[1]) 107 if (dmabuf->resv == (struct dma_resv *)&dmabuf[1])
108 reservation_object_fini(dmabuf->resv); 108 dma_resv_fini(dmabuf->resv);
109 109
110 module_put(dmabuf->owner); 110 module_put(dmabuf->owner);
111 kfree(dmabuf); 111 kfree(dmabuf);
@@ -165,7 +165,7 @@ static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence)
165 * To support cross-device and cross-driver synchronization of buffer access 165 * To support cross-device and cross-driver synchronization of buffer access
166 * implicit fences (represented internally in the kernel with &struct fence) can 166 * implicit fences (represented internally in the kernel with &struct fence) can
167 * be attached to a &dma_buf. The glue for that and a few related things are 167 * be attached to a &dma_buf. The glue for that and a few related things are
168 * provided in the &reservation_object structure. 168 * provided in the &dma_resv structure.
169 * 169 *
170 * Userspace can query the state of these implicitly tracked fences using poll() 170 * Userspace can query the state of these implicitly tracked fences using poll()
171 * and related system calls: 171 * and related system calls:
@@ -195,8 +195,8 @@ static void dma_buf_poll_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
195static __poll_t dma_buf_poll(struct file *file, poll_table *poll) 195static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
196{ 196{
197 struct dma_buf *dmabuf; 197 struct dma_buf *dmabuf;
198 struct reservation_object *resv; 198 struct dma_resv *resv;
199 struct reservation_object_list *fobj; 199 struct dma_resv_list *fobj;
200 struct dma_fence *fence_excl; 200 struct dma_fence *fence_excl;
201 __poll_t events; 201 __poll_t events;
202 unsigned shared_count, seq; 202 unsigned shared_count, seq;
@@ -506,13 +506,13 @@ err_alloc_file:
506struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info) 506struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
507{ 507{
508 struct dma_buf *dmabuf; 508 struct dma_buf *dmabuf;
509 struct reservation_object *resv = exp_info->resv; 509 struct dma_resv *resv = exp_info->resv;
510 struct file *file; 510 struct file *file;
511 size_t alloc_size = sizeof(struct dma_buf); 511 size_t alloc_size = sizeof(struct dma_buf);
512 int ret; 512 int ret;
513 513
514 if (!exp_info->resv) 514 if (!exp_info->resv)
515 alloc_size += sizeof(struct reservation_object); 515 alloc_size += sizeof(struct dma_resv);
516 else 516 else
517 /* prevent &dma_buf[1] == dma_buf->resv */ 517 /* prevent &dma_buf[1] == dma_buf->resv */
518 alloc_size += 1; 518 alloc_size += 1;
@@ -544,8 +544,8 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
544 dmabuf->cb_excl.active = dmabuf->cb_shared.active = 0; 544 dmabuf->cb_excl.active = dmabuf->cb_shared.active = 0;
545 545
546 if (!resv) { 546 if (!resv) {
547 resv = (struct reservation_object *)&dmabuf[1]; 547 resv = (struct dma_resv *)&dmabuf[1];
548 reservation_object_init(resv); 548 dma_resv_init(resv);
549 } 549 }
550 dmabuf->resv = resv; 550 dmabuf->resv = resv;
551 551
@@ -909,11 +909,11 @@ static int __dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
909{ 909{
910 bool write = (direction == DMA_BIDIRECTIONAL || 910 bool write = (direction == DMA_BIDIRECTIONAL ||
911 direction == DMA_TO_DEVICE); 911 direction == DMA_TO_DEVICE);
912 struct reservation_object *resv = dmabuf->resv; 912 struct dma_resv *resv = dmabuf->resv;
913 long ret; 913 long ret;
914 914
915 /* Wait on any implicit rendering fences */ 915 /* Wait on any implicit rendering fences */
916 ret = reservation_object_wait_timeout_rcu(resv, write, true, 916 ret = dma_resv_wait_timeout_rcu(resv, write, true,
917 MAX_SCHEDULE_TIMEOUT); 917 MAX_SCHEDULE_TIMEOUT);
918 if (ret < 0) 918 if (ret < 0)
919 return ret; 919 return ret;
@@ -1154,8 +1154,8 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused)
1154 int ret; 1154 int ret;
1155 struct dma_buf *buf_obj; 1155 struct dma_buf *buf_obj;
1156 struct dma_buf_attachment *attach_obj; 1156 struct dma_buf_attachment *attach_obj;
1157 struct reservation_object *robj; 1157 struct dma_resv *robj;
1158 struct reservation_object_list *fobj; 1158 struct dma_resv_list *fobj;
1159 struct dma_fence *fence; 1159 struct dma_fence *fence;
1160 unsigned seq; 1160 unsigned seq;
1161 int count = 0, attach_count, shared_count, i; 1161 int count = 0, attach_count, shared_count, i;
diff --git a/drivers/dma-buf/dma-fence-array.c b/drivers/dma-buf/dma-fence-array.c
index 12c6f64c0bc2..d3fbd950be94 100644
--- a/drivers/dma-buf/dma-fence-array.c
+++ b/drivers/dma-buf/dma-fence-array.c
@@ -13,6 +13,8 @@
13#include <linux/slab.h> 13#include <linux/slab.h>
14#include <linux/dma-fence-array.h> 14#include <linux/dma-fence-array.h>
15 15
16#define PENDING_ERROR 1
17
16static const char *dma_fence_array_get_driver_name(struct dma_fence *fence) 18static const char *dma_fence_array_get_driver_name(struct dma_fence *fence)
17{ 19{
18 return "dma_fence_array"; 20 return "dma_fence_array";
@@ -23,10 +25,29 @@ static const char *dma_fence_array_get_timeline_name(struct dma_fence *fence)
23 return "unbound"; 25 return "unbound";
24} 26}
25 27
28static void dma_fence_array_set_pending_error(struct dma_fence_array *array,
29 int error)
30{
31 /*
32 * Propagate the first error reported by any of our fences, but only
33 * before we ourselves are signaled.
34 */
35 if (error)
36 cmpxchg(&array->base.error, PENDING_ERROR, error);
37}
38
39static void dma_fence_array_clear_pending_error(struct dma_fence_array *array)
40{
41 /* Clear the error flag if not actually set. */
42 cmpxchg(&array->base.error, PENDING_ERROR, 0);
43}
44
26static void irq_dma_fence_array_work(struct irq_work *wrk) 45static void irq_dma_fence_array_work(struct irq_work *wrk)
27{ 46{
28 struct dma_fence_array *array = container_of(wrk, typeof(*array), work); 47 struct dma_fence_array *array = container_of(wrk, typeof(*array), work);
29 48
49 dma_fence_array_clear_pending_error(array);
50
30 dma_fence_signal(&array->base); 51 dma_fence_signal(&array->base);
31 dma_fence_put(&array->base); 52 dma_fence_put(&array->base);
32} 53}
@@ -38,6 +59,8 @@ static void dma_fence_array_cb_func(struct dma_fence *f,
38 container_of(cb, struct dma_fence_array_cb, cb); 59 container_of(cb, struct dma_fence_array_cb, cb);
39 struct dma_fence_array *array = array_cb->array; 60 struct dma_fence_array *array = array_cb->array;
40 61
62 dma_fence_array_set_pending_error(array, f->error);
63
41 if (atomic_dec_and_test(&array->num_pending)) 64 if (atomic_dec_and_test(&array->num_pending))
42 irq_work_queue(&array->work); 65 irq_work_queue(&array->work);
43 else 66 else
@@ -63,9 +86,14 @@ static bool dma_fence_array_enable_signaling(struct dma_fence *fence)
63 dma_fence_get(&array->base); 86 dma_fence_get(&array->base);
64 if (dma_fence_add_callback(array->fences[i], &cb[i].cb, 87 if (dma_fence_add_callback(array->fences[i], &cb[i].cb,
65 dma_fence_array_cb_func)) { 88 dma_fence_array_cb_func)) {
89 int error = array->fences[i]->error;
90
91 dma_fence_array_set_pending_error(array, error);
66 dma_fence_put(&array->base); 92 dma_fence_put(&array->base);
67 if (atomic_dec_and_test(&array->num_pending)) 93 if (atomic_dec_and_test(&array->num_pending)) {
94 dma_fence_array_clear_pending_error(array);
68 return false; 95 return false;
96 }
69 } 97 }
70 } 98 }
71 99
@@ -142,6 +170,8 @@ struct dma_fence_array *dma_fence_array_create(int num_fences,
142 atomic_set(&array->num_pending, signal_on_any ? 1 : num_fences); 170 atomic_set(&array->num_pending, signal_on_any ? 1 : num_fences);
143 array->fences = fences; 171 array->fences = fences;
144 172
173 array->base.error = PENDING_ERROR;
174
145 return array; 175 return array;
146} 176}
147EXPORT_SYMBOL(dma_fence_array_create); 177EXPORT_SYMBOL(dma_fence_array_create);
diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c
index 59ac96ec7ba8..2c136aee3e79 100644
--- a/drivers/dma-buf/dma-fence.c
+++ b/drivers/dma-buf/dma-fence.c
@@ -60,7 +60,7 @@ static atomic64_t dma_fence_context_counter = ATOMIC64_INIT(1);
60 * 60 *
61 * - Then there's also implicit fencing, where the synchronization points are 61 * - Then there's also implicit fencing, where the synchronization points are
62 * implicitly passed around as part of shared &dma_buf instances. Such 62 * implicitly passed around as part of shared &dma_buf instances. Such
63 * implicit fences are stored in &struct reservation_object through the 63 * implicit fences are stored in &struct dma_resv through the
64 * &dma_buf.resv pointer. 64 * &dma_buf.resv pointer.
65 */ 65 */
66 66
@@ -129,31 +129,27 @@ EXPORT_SYMBOL(dma_fence_context_alloc);
129int dma_fence_signal_locked(struct dma_fence *fence) 129int dma_fence_signal_locked(struct dma_fence *fence)
130{ 130{
131 struct dma_fence_cb *cur, *tmp; 131 struct dma_fence_cb *cur, *tmp;
132 int ret = 0; 132 struct list_head cb_list;
133 133
134 lockdep_assert_held(fence->lock); 134 lockdep_assert_held(fence->lock);
135 135
136 if (WARN_ON(!fence)) 136 if (unlikely(test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
137 &fence->flags)))
137 return -EINVAL; 138 return -EINVAL;
138 139
139 if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) { 140 /* Stash the cb_list before replacing it with the timestamp */
140 ret = -EINVAL; 141 list_replace(&fence->cb_list, &cb_list);
141 142
142 /* 143 fence->timestamp = ktime_get();
143 * we might have raced with the unlocked dma_fence_signal, 144 set_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags);
144 * still run through all callbacks 145 trace_dma_fence_signaled(fence);
145 */
146 } else {
147 fence->timestamp = ktime_get();
148 set_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags);
149 trace_dma_fence_signaled(fence);
150 }
151 146
152 list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) { 147 list_for_each_entry_safe(cur, tmp, &cb_list, node) {
153 list_del_init(&cur->node); 148 INIT_LIST_HEAD(&cur->node);
154 cur->func(fence, cur); 149 cur->func(fence, cur);
155 } 150 }
156 return ret; 151
152 return 0;
157} 153}
158EXPORT_SYMBOL(dma_fence_signal_locked); 154EXPORT_SYMBOL(dma_fence_signal_locked);
159 155
@@ -173,28 +169,16 @@ EXPORT_SYMBOL(dma_fence_signal_locked);
173int dma_fence_signal(struct dma_fence *fence) 169int dma_fence_signal(struct dma_fence *fence)
174{ 170{
175 unsigned long flags; 171 unsigned long flags;
172 int ret;
176 173
177 if (!fence) 174 if (!fence)
178 return -EINVAL; 175 return -EINVAL;
179 176
180 if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) 177 spin_lock_irqsave(fence->lock, flags);
181 return -EINVAL; 178 ret = dma_fence_signal_locked(fence);
182 179 spin_unlock_irqrestore(fence->lock, flags);
183 fence->timestamp = ktime_get();
184 set_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags);
185 trace_dma_fence_signaled(fence);
186
187 if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags)) {
188 struct dma_fence_cb *cur, *tmp;
189 180
190 spin_lock_irqsave(fence->lock, flags); 181 return ret;
191 list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) {
192 list_del_init(&cur->node);
193 cur->func(fence, cur);
194 }
195 spin_unlock_irqrestore(fence->lock, flags);
196 }
197 return 0;
198} 182}
199EXPORT_SYMBOL(dma_fence_signal); 183EXPORT_SYMBOL(dma_fence_signal);
200 184
@@ -248,7 +232,8 @@ void dma_fence_release(struct kref *kref)
248 232
249 trace_dma_fence_destroy(fence); 233 trace_dma_fence_destroy(fence);
250 234
251 if (WARN(!list_empty(&fence->cb_list), 235 if (WARN(!list_empty(&fence->cb_list) &&
236 !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags),
252 "Fence %s:%s:%llx:%llx released with pending signals!\n", 237 "Fence %s:%s:%llx:%llx released with pending signals!\n",
253 fence->ops->get_driver_name(fence), 238 fence->ops->get_driver_name(fence),
254 fence->ops->get_timeline_name(fence), 239 fence->ops->get_timeline_name(fence),
diff --git a/drivers/dma-buf/reservation.c b/drivers/dma-buf/dma-resv.c
index ad6775b32a73..42a8f3f11681 100644
--- a/drivers/dma-buf/reservation.c
+++ b/drivers/dma-buf/dma-resv.c
@@ -32,7 +32,7 @@
32 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> 32 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
33 */ 33 */
34 34
35#include <linux/reservation.h> 35#include <linux/dma-resv.h>
36#include <linux/export.h> 36#include <linux/export.h>
37 37
38/** 38/**
@@ -56,16 +56,15 @@ const char reservation_seqcount_string[] = "reservation_seqcount";
56EXPORT_SYMBOL(reservation_seqcount_string); 56EXPORT_SYMBOL(reservation_seqcount_string);
57 57
58/** 58/**
59 * reservation_object_list_alloc - allocate fence list 59 * dma_resv_list_alloc - allocate fence list
60 * @shared_max: number of fences we need space for 60 * @shared_max: number of fences we need space for
61 * 61 *
62 * Allocate a new reservation_object_list and make sure to correctly initialize 62 * Allocate a new dma_resv_list and make sure to correctly initialize
63 * shared_max. 63 * shared_max.
64 */ 64 */
65static struct reservation_object_list * 65static struct dma_resv_list *dma_resv_list_alloc(unsigned int shared_max)
66reservation_object_list_alloc(unsigned int shared_max)
67{ 66{
68 struct reservation_object_list *list; 67 struct dma_resv_list *list;
69 68
70 list = kmalloc(offsetof(typeof(*list), shared[shared_max]), GFP_KERNEL); 69 list = kmalloc(offsetof(typeof(*list), shared[shared_max]), GFP_KERNEL);
71 if (!list) 70 if (!list)
@@ -78,12 +77,12 @@ reservation_object_list_alloc(unsigned int shared_max)
78} 77}
79 78
80/** 79/**
81 * reservation_object_list_free - free fence list 80 * dma_resv_list_free - free fence list
82 * @list: list to free 81 * @list: list to free
83 * 82 *
84 * Free a reservation_object_list and make sure to drop all references. 83 * Free a dma_resv_list and make sure to drop all references.
85 */ 84 */
86static void reservation_object_list_free(struct reservation_object_list *list) 85static void dma_resv_list_free(struct dma_resv_list *list)
87{ 86{
88 unsigned int i; 87 unsigned int i;
89 88
@@ -97,10 +96,10 @@ static void reservation_object_list_free(struct reservation_object_list *list)
97} 96}
98 97
99/** 98/**
100 * reservation_object_init - initialize a reservation object 99 * dma_resv_init - initialize a reservation object
101 * @obj: the reservation object 100 * @obj: the reservation object
102 */ 101 */
103void reservation_object_init(struct reservation_object *obj) 102void dma_resv_init(struct dma_resv *obj)
104{ 103{
105 ww_mutex_init(&obj->lock, &reservation_ww_class); 104 ww_mutex_init(&obj->lock, &reservation_ww_class);
106 105
@@ -109,15 +108,15 @@ void reservation_object_init(struct reservation_object *obj)
109 RCU_INIT_POINTER(obj->fence, NULL); 108 RCU_INIT_POINTER(obj->fence, NULL);
110 RCU_INIT_POINTER(obj->fence_excl, NULL); 109 RCU_INIT_POINTER(obj->fence_excl, NULL);
111} 110}
112EXPORT_SYMBOL(reservation_object_init); 111EXPORT_SYMBOL(dma_resv_init);
113 112
114/** 113/**
115 * reservation_object_fini - destroys a reservation object 114 * dma_resv_fini - destroys a reservation object
116 * @obj: the reservation object 115 * @obj: the reservation object
117 */ 116 */
118void reservation_object_fini(struct reservation_object *obj) 117void dma_resv_fini(struct dma_resv *obj)
119{ 118{
120 struct reservation_object_list *fobj; 119 struct dma_resv_list *fobj;
121 struct dma_fence *excl; 120 struct dma_fence *excl;
122 121
123 /* 122 /*
@@ -129,32 +128,31 @@ void reservation_object_fini(struct reservation_object *obj)
129 dma_fence_put(excl); 128 dma_fence_put(excl);
130 129
131 fobj = rcu_dereference_protected(obj->fence, 1); 130 fobj = rcu_dereference_protected(obj->fence, 1);
132 reservation_object_list_free(fobj); 131 dma_resv_list_free(fobj);
133 ww_mutex_destroy(&obj->lock); 132 ww_mutex_destroy(&obj->lock);
134} 133}
135EXPORT_SYMBOL(reservation_object_fini); 134EXPORT_SYMBOL(dma_resv_fini);
136 135
137/** 136/**
138 * reservation_object_reserve_shared - Reserve space to add shared fences to 137 * dma_resv_reserve_shared - Reserve space to add shared fences to
139 * a reservation_object. 138 * a dma_resv.
140 * @obj: reservation object 139 * @obj: reservation object
141 * @num_fences: number of fences we want to add 140 * @num_fences: number of fences we want to add
142 * 141 *
143 * Should be called before reservation_object_add_shared_fence(). Must 142 * Should be called before dma_resv_add_shared_fence(). Must
144 * be called with obj->lock held. 143 * be called with obj->lock held.
145 * 144 *
146 * RETURNS 145 * RETURNS
147 * Zero for success, or -errno 146 * Zero for success, or -errno
148 */ 147 */
149int reservation_object_reserve_shared(struct reservation_object *obj, 148int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences)
150 unsigned int num_fences)
151{ 149{
152 struct reservation_object_list *old, *new; 150 struct dma_resv_list *old, *new;
153 unsigned int i, j, k, max; 151 unsigned int i, j, k, max;
154 152
155 reservation_object_assert_held(obj); 153 dma_resv_assert_held(obj);
156 154
157 old = reservation_object_get_list(obj); 155 old = dma_resv_get_list(obj);
158 156
159 if (old && old->shared_max) { 157 if (old && old->shared_max) {
160 if ((old->shared_count + num_fences) <= old->shared_max) 158 if ((old->shared_count + num_fences) <= old->shared_max)
@@ -166,7 +164,7 @@ int reservation_object_reserve_shared(struct reservation_object *obj,
166 max = 4; 164 max = 4;
167 } 165 }
168 166
169 new = reservation_object_list_alloc(max); 167 new = dma_resv_list_alloc(max);
170 if (!new) 168 if (!new)
171 return -ENOMEM; 169 return -ENOMEM;
172 170
@@ -180,7 +178,7 @@ int reservation_object_reserve_shared(struct reservation_object *obj,
180 struct dma_fence *fence; 178 struct dma_fence *fence;
181 179
182 fence = rcu_dereference_protected(old->shared[i], 180 fence = rcu_dereference_protected(old->shared[i],
183 reservation_object_held(obj)); 181 dma_resv_held(obj));
184 if (dma_fence_is_signaled(fence)) 182 if (dma_fence_is_signaled(fence))
185 RCU_INIT_POINTER(new->shared[--k], fence); 183 RCU_INIT_POINTER(new->shared[--k], fence);
186 else 184 else
@@ -206,35 +204,34 @@ int reservation_object_reserve_shared(struct reservation_object *obj,
206 struct dma_fence *fence; 204 struct dma_fence *fence;
207 205
208 fence = rcu_dereference_protected(new->shared[i], 206 fence = rcu_dereference_protected(new->shared[i],
209 reservation_object_held(obj)); 207 dma_resv_held(obj));
210 dma_fence_put(fence); 208 dma_fence_put(fence);
211 } 209 }
212 kfree_rcu(old, rcu); 210 kfree_rcu(old, rcu);
213 211
214 return 0; 212 return 0;
215} 213}
216EXPORT_SYMBOL(reservation_object_reserve_shared); 214EXPORT_SYMBOL(dma_resv_reserve_shared);
217 215
218/** 216/**
219 * reservation_object_add_shared_fence - Add a fence to a shared slot 217 * dma_resv_add_shared_fence - Add a fence to a shared slot
220 * @obj: the reservation object 218 * @obj: the reservation object
221 * @fence: the shared fence to add 219 * @fence: the shared fence to add
222 * 220 *
223 * Add a fence to a shared slot, obj->lock must be held, and 221 * Add a fence to a shared slot, obj->lock must be held, and
224 * reservation_object_reserve_shared() has been called. 222 * dma_resv_reserve_shared() has been called.
225 */ 223 */
226void reservation_object_add_shared_fence(struct reservation_object *obj, 224void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence)
227 struct dma_fence *fence)
228{ 225{
229 struct reservation_object_list *fobj; 226 struct dma_resv_list *fobj;
230 struct dma_fence *old; 227 struct dma_fence *old;
231 unsigned int i, count; 228 unsigned int i, count;
232 229
233 dma_fence_get(fence); 230 dma_fence_get(fence);
234 231
235 reservation_object_assert_held(obj); 232 dma_resv_assert_held(obj);
236 233
237 fobj = reservation_object_get_list(obj); 234 fobj = dma_resv_get_list(obj);
238 count = fobj->shared_count; 235 count = fobj->shared_count;
239 236
240 preempt_disable(); 237 preempt_disable();
@@ -243,7 +240,7 @@ void reservation_object_add_shared_fence(struct reservation_object *obj,
243 for (i = 0; i < count; ++i) { 240 for (i = 0; i < count; ++i) {
244 241
245 old = rcu_dereference_protected(fobj->shared[i], 242 old = rcu_dereference_protected(fobj->shared[i],
246 reservation_object_held(obj)); 243 dma_resv_held(obj));
247 if (old->context == fence->context || 244 if (old->context == fence->context ||
248 dma_fence_is_signaled(old)) 245 dma_fence_is_signaled(old))
249 goto replace; 246 goto replace;
@@ -262,25 +259,24 @@ replace:
262 preempt_enable(); 259 preempt_enable();
263 dma_fence_put(old); 260 dma_fence_put(old);
264} 261}
265EXPORT_SYMBOL(reservation_object_add_shared_fence); 262EXPORT_SYMBOL(dma_resv_add_shared_fence);
266 263
267/** 264/**
268 * reservation_object_add_excl_fence - Add an exclusive fence. 265 * dma_resv_add_excl_fence - Add an exclusive fence.
269 * @obj: the reservation object 266 * @obj: the reservation object
270 * @fence: the shared fence to add 267 * @fence: the shared fence to add
271 * 268 *
272 * Add a fence to the exclusive slot. The obj->lock must be held. 269 * Add a fence to the exclusive slot. The obj->lock must be held.
273 */ 270 */
274void reservation_object_add_excl_fence(struct reservation_object *obj, 271void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence)
275 struct dma_fence *fence)
276{ 272{
277 struct dma_fence *old_fence = reservation_object_get_excl(obj); 273 struct dma_fence *old_fence = dma_resv_get_excl(obj);
278 struct reservation_object_list *old; 274 struct dma_resv_list *old;
279 u32 i = 0; 275 u32 i = 0;
280 276
281 reservation_object_assert_held(obj); 277 dma_resv_assert_held(obj);
282 278
283 old = reservation_object_get_list(obj); 279 old = dma_resv_get_list(obj);
284 if (old) 280 if (old)
285 i = old->shared_count; 281 i = old->shared_count;
286 282
@@ -299,27 +295,26 @@ void reservation_object_add_excl_fence(struct reservation_object *obj,
299 /* inplace update, no shared fences */ 295 /* inplace update, no shared fences */
300 while (i--) 296 while (i--)
301 dma_fence_put(rcu_dereference_protected(old->shared[i], 297 dma_fence_put(rcu_dereference_protected(old->shared[i],
302 reservation_object_held(obj))); 298 dma_resv_held(obj)));
303 299
304 dma_fence_put(old_fence); 300 dma_fence_put(old_fence);
305} 301}
306EXPORT_SYMBOL(reservation_object_add_excl_fence); 302EXPORT_SYMBOL(dma_resv_add_excl_fence);
307 303
308/** 304/**
309* reservation_object_copy_fences - Copy all fences from src to dst. 305* dma_resv_copy_fences - Copy all fences from src to dst.
310* @dst: the destination reservation object 306* @dst: the destination reservation object
311* @src: the source reservation object 307* @src: the source reservation object
312* 308*
313* Copy all fences from src to dst. dst-lock must be held. 309* Copy all fences from src to dst. dst-lock must be held.
314*/ 310*/
315int reservation_object_copy_fences(struct reservation_object *dst, 311int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
316 struct reservation_object *src)
317{ 312{
318 struct reservation_object_list *src_list, *dst_list; 313 struct dma_resv_list *src_list, *dst_list;
319 struct dma_fence *old, *new; 314 struct dma_fence *old, *new;
320 unsigned i; 315 unsigned i;
321 316
322 reservation_object_assert_held(dst); 317 dma_resv_assert_held(dst);
323 318
324 rcu_read_lock(); 319 rcu_read_lock();
325 src_list = rcu_dereference(src->fence); 320 src_list = rcu_dereference(src->fence);
@@ -330,7 +325,7 @@ retry:
330 325
331 rcu_read_unlock(); 326 rcu_read_unlock();
332 327
333 dst_list = reservation_object_list_alloc(shared_count); 328 dst_list = dma_resv_list_alloc(shared_count);
334 if (!dst_list) 329 if (!dst_list)
335 return -ENOMEM; 330 return -ENOMEM;
336 331
@@ -351,7 +346,7 @@ retry:
351 continue; 346 continue;
352 347
353 if (!dma_fence_get_rcu(fence)) { 348 if (!dma_fence_get_rcu(fence)) {
354 reservation_object_list_free(dst_list); 349 dma_resv_list_free(dst_list);
355 src_list = rcu_dereference(src->fence); 350 src_list = rcu_dereference(src->fence);
356 goto retry; 351 goto retry;
357 } 352 }
@@ -370,8 +365,8 @@ retry:
370 new = dma_fence_get_rcu_safe(&src->fence_excl); 365 new = dma_fence_get_rcu_safe(&src->fence_excl);
371 rcu_read_unlock(); 366 rcu_read_unlock();
372 367
373 src_list = reservation_object_get_list(dst); 368 src_list = dma_resv_get_list(dst);
374 old = reservation_object_get_excl(dst); 369 old = dma_resv_get_excl(dst);
375 370
376 preempt_disable(); 371 preempt_disable();
377 write_seqcount_begin(&dst->seq); 372 write_seqcount_begin(&dst->seq);
@@ -381,15 +376,15 @@ retry:
381 write_seqcount_end(&dst->seq); 376 write_seqcount_end(&dst->seq);
382 preempt_enable(); 377 preempt_enable();
383 378
384 reservation_object_list_free(src_list); 379 dma_resv_list_free(src_list);
385 dma_fence_put(old); 380 dma_fence_put(old);
386 381
387 return 0; 382 return 0;
388} 383}
389EXPORT_SYMBOL(reservation_object_copy_fences); 384EXPORT_SYMBOL(dma_resv_copy_fences);
390 385
391/** 386/**
392 * reservation_object_get_fences_rcu - Get an object's shared and exclusive 387 * dma_resv_get_fences_rcu - Get an object's shared and exclusive
393 * fences without update side lock held 388 * fences without update side lock held
394 * @obj: the reservation object 389 * @obj: the reservation object
395 * @pfence_excl: the returned exclusive fence (or NULL) 390 * @pfence_excl: the returned exclusive fence (or NULL)
@@ -401,10 +396,10 @@ EXPORT_SYMBOL(reservation_object_copy_fences);
401 * exclusive fence is not specified the fence is put into the array of the 396 * exclusive fence is not specified the fence is put into the array of the
402 * shared fences as well. Returns either zero or -ENOMEM. 397 * shared fences as well. Returns either zero or -ENOMEM.
403 */ 398 */
404int reservation_object_get_fences_rcu(struct reservation_object *obj, 399int dma_resv_get_fences_rcu(struct dma_resv *obj,
405 struct dma_fence **pfence_excl, 400 struct dma_fence **pfence_excl,
406 unsigned *pshared_count, 401 unsigned *pshared_count,
407 struct dma_fence ***pshared) 402 struct dma_fence ***pshared)
408{ 403{
409 struct dma_fence **shared = NULL; 404 struct dma_fence **shared = NULL;
410 struct dma_fence *fence_excl; 405 struct dma_fence *fence_excl;
@@ -412,7 +407,7 @@ int reservation_object_get_fences_rcu(struct reservation_object *obj,
412 int ret = 1; 407 int ret = 1;
413 408
414 do { 409 do {
415 struct reservation_object_list *fobj; 410 struct dma_resv_list *fobj;
416 unsigned int i, seq; 411 unsigned int i, seq;
417 size_t sz = 0; 412 size_t sz = 0;
418 413
@@ -487,10 +482,10 @@ unlock:
487 *pshared = shared; 482 *pshared = shared;
488 return ret; 483 return ret;
489} 484}
490EXPORT_SYMBOL_GPL(reservation_object_get_fences_rcu); 485EXPORT_SYMBOL_GPL(dma_resv_get_fences_rcu);
491 486
492/** 487/**
493 * reservation_object_wait_timeout_rcu - Wait on reservation's objects 488 * dma_resv_wait_timeout_rcu - Wait on reservation's objects
494 * shared and/or exclusive fences. 489 * shared and/or exclusive fences.
495 * @obj: the reservation object 490 * @obj: the reservation object
496 * @wait_all: if true, wait on all fences, else wait on just exclusive fence 491 * @wait_all: if true, wait on all fences, else wait on just exclusive fence
@@ -501,9 +496,9 @@ EXPORT_SYMBOL_GPL(reservation_object_get_fences_rcu);
501 * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or 496 * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or
502 * greater than zer on success. 497 * greater than zer on success.
503 */ 498 */
504long reservation_object_wait_timeout_rcu(struct reservation_object *obj, 499long dma_resv_wait_timeout_rcu(struct dma_resv *obj,
505 bool wait_all, bool intr, 500 bool wait_all, bool intr,
506 unsigned long timeout) 501 unsigned long timeout)
507{ 502{
508 struct dma_fence *fence; 503 struct dma_fence *fence;
509 unsigned seq, shared_count; 504 unsigned seq, shared_count;
@@ -531,8 +526,7 @@ retry:
531 } 526 }
532 527
533 if (wait_all) { 528 if (wait_all) {
534 struct reservation_object_list *fobj = 529 struct dma_resv_list *fobj = rcu_dereference(obj->fence);
535 rcu_dereference(obj->fence);
536 530
537 if (fobj) 531 if (fobj)
538 shared_count = fobj->shared_count; 532 shared_count = fobj->shared_count;
@@ -575,11 +569,10 @@ unlock_retry:
575 rcu_read_unlock(); 569 rcu_read_unlock();
576 goto retry; 570 goto retry;
577} 571}
578EXPORT_SYMBOL_GPL(reservation_object_wait_timeout_rcu); 572EXPORT_SYMBOL_GPL(dma_resv_wait_timeout_rcu);
579 573
580 574
581static inline int 575static inline int dma_resv_test_signaled_single(struct dma_fence *passed_fence)
582reservation_object_test_signaled_single(struct dma_fence *passed_fence)
583{ 576{
584 struct dma_fence *fence, *lfence = passed_fence; 577 struct dma_fence *fence, *lfence = passed_fence;
585 int ret = 1; 578 int ret = 1;
@@ -596,7 +589,7 @@ reservation_object_test_signaled_single(struct dma_fence *passed_fence)
596} 589}
597 590
598/** 591/**
599 * reservation_object_test_signaled_rcu - Test if a reservation object's 592 * dma_resv_test_signaled_rcu - Test if a reservation object's
600 * fences have been signaled. 593 * fences have been signaled.
601 * @obj: the reservation object 594 * @obj: the reservation object
602 * @test_all: if true, test all fences, otherwise only test the exclusive 595 * @test_all: if true, test all fences, otherwise only test the exclusive
@@ -605,8 +598,7 @@ reservation_object_test_signaled_single(struct dma_fence *passed_fence)
605 * RETURNS 598 * RETURNS
606 * true if all fences signaled, else false 599 * true if all fences signaled, else false
607 */ 600 */
608bool reservation_object_test_signaled_rcu(struct reservation_object *obj, 601bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all)
609 bool test_all)
610{ 602{
611 unsigned seq, shared_count; 603 unsigned seq, shared_count;
612 int ret; 604 int ret;
@@ -620,8 +612,7 @@ retry:
620 if (test_all) { 612 if (test_all) {
621 unsigned i; 613 unsigned i;
622 614
623 struct reservation_object_list *fobj = 615 struct dma_resv_list *fobj = rcu_dereference(obj->fence);
624 rcu_dereference(obj->fence);
625 616
626 if (fobj) 617 if (fobj)
627 shared_count = fobj->shared_count; 618 shared_count = fobj->shared_count;
@@ -629,7 +620,7 @@ retry:
629 for (i = 0; i < shared_count; ++i) { 620 for (i = 0; i < shared_count; ++i) {
630 struct dma_fence *fence = rcu_dereference(fobj->shared[i]); 621 struct dma_fence *fence = rcu_dereference(fobj->shared[i]);
631 622
632 ret = reservation_object_test_signaled_single(fence); 623 ret = dma_resv_test_signaled_single(fence);
633 if (ret < 0) 624 if (ret < 0)
634 goto retry; 625 goto retry;
635 else if (!ret) 626 else if (!ret)
@@ -644,8 +635,7 @@ retry:
644 struct dma_fence *fence_excl = rcu_dereference(obj->fence_excl); 635 struct dma_fence *fence_excl = rcu_dereference(obj->fence_excl);
645 636
646 if (fence_excl) { 637 if (fence_excl) {
647 ret = reservation_object_test_signaled_single( 638 ret = dma_resv_test_signaled_single(fence_excl);
648 fence_excl);
649 if (ret < 0) 639 if (ret < 0)
650 goto retry; 640 goto retry;
651 641
@@ -657,4 +647,4 @@ retry:
657 rcu_read_unlock(); 647 rcu_read_unlock();
658 return ret; 648 return ret;
659} 649}
660EXPORT_SYMBOL_GPL(reservation_object_test_signaled_rcu); 650EXPORT_SYMBOL_GPL(dma_resv_test_signaled_rcu);
diff --git a/drivers/dma-buf/sw_sync.c b/drivers/dma-buf/sw_sync.c
index 051f6c2873c7..6713cfb1995c 100644
--- a/drivers/dma-buf/sw_sync.c
+++ b/drivers/dma-buf/sw_sync.c
@@ -132,17 +132,14 @@ static void timeline_fence_release(struct dma_fence *fence)
132{ 132{
133 struct sync_pt *pt = dma_fence_to_sync_pt(fence); 133 struct sync_pt *pt = dma_fence_to_sync_pt(fence);
134 struct sync_timeline *parent = dma_fence_parent(fence); 134 struct sync_timeline *parent = dma_fence_parent(fence);
135 unsigned long flags;
135 136
137 spin_lock_irqsave(fence->lock, flags);
136 if (!list_empty(&pt->link)) { 138 if (!list_empty(&pt->link)) {
137 unsigned long flags; 139 list_del(&pt->link);
138 140 rb_erase(&pt->node, &parent->pt_tree);
139 spin_lock_irqsave(fence->lock, flags);
140 if (!list_empty(&pt->link)) {
141 list_del(&pt->link);
142 rb_erase(&pt->node, &parent->pt_tree);
143 }
144 spin_unlock_irqrestore(fence->lock, flags);
145 } 141 }
142 spin_unlock_irqrestore(fence->lock, flags);
146 143
147 sync_timeline_put(parent); 144 sync_timeline_put(parent);
148 dma_fence_free(fence); 145 dma_fence_free(fence);
@@ -265,7 +262,8 @@ static struct sync_pt *sync_pt_create(struct sync_timeline *obj,
265 p = &parent->rb_left; 262 p = &parent->rb_left;
266 } else { 263 } else {
267 if (dma_fence_get_rcu(&other->base)) { 264 if (dma_fence_get_rcu(&other->base)) {
268 dma_fence_put(&pt->base); 265 sync_timeline_put(obj);
266 kfree(pt);
269 pt = other; 267 pt = other;
270 goto unlock; 268 goto unlock;
271 } 269 }
diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c
index ee4d1a96d779..25c5c071645b 100644
--- a/drivers/dma-buf/sync_file.c
+++ b/drivers/dma-buf/sync_file.c
@@ -419,7 +419,7 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file,
419 * info->num_fences. 419 * info->num_fences.
420 */ 420 */
421 if (!info.num_fences) { 421 if (!info.num_fences) {
422 info.status = dma_fence_is_signaled(sync_file->fence); 422 info.status = dma_fence_get_status(sync_file->fence);
423 goto no_fences; 423 goto no_fences;
424 } else { 424 } else {
425 info.status = 1; 425 info.status = 1;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index e0c47ae52fc1..42b936b6bbf1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -218,14 +218,14 @@ void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo)
218static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo, 218static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
219 struct amdgpu_amdkfd_fence *ef) 219 struct amdgpu_amdkfd_fence *ef)
220{ 220{
221 struct reservation_object *resv = bo->tbo.base.resv; 221 struct dma_resv *resv = bo->tbo.base.resv;
222 struct reservation_object_list *old, *new; 222 struct dma_resv_list *old, *new;
223 unsigned int i, j, k; 223 unsigned int i, j, k;
224 224
225 if (!ef) 225 if (!ef)
226 return -EINVAL; 226 return -EINVAL;
227 227
228 old = reservation_object_get_list(resv); 228 old = dma_resv_get_list(resv);
229 if (!old) 229 if (!old)
230 return 0; 230 return 0;
231 231
@@ -241,7 +241,7 @@ static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
241 struct dma_fence *f; 241 struct dma_fence *f;
242 242
243 f = rcu_dereference_protected(old->shared[i], 243 f = rcu_dereference_protected(old->shared[i],
244 reservation_object_held(resv)); 244 dma_resv_held(resv));
245 245
246 if (f->context == ef->base.context) 246 if (f->context == ef->base.context)
247 RCU_INIT_POINTER(new->shared[--j], f); 247 RCU_INIT_POINTER(new->shared[--j], f);
@@ -263,7 +263,7 @@ static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
263 struct dma_fence *f; 263 struct dma_fence *f;
264 264
265 f = rcu_dereference_protected(new->shared[i], 265 f = rcu_dereference_protected(new->shared[i],
266 reservation_object_held(resv)); 266 dma_resv_held(resv));
267 dma_fence_put(f); 267 dma_fence_put(f);
268 } 268 }
269 kfree_rcu(old, rcu); 269 kfree_rcu(old, rcu);
@@ -887,7 +887,7 @@ static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
887 AMDGPU_FENCE_OWNER_KFD, false); 887 AMDGPU_FENCE_OWNER_KFD, false);
888 if (ret) 888 if (ret)
889 goto wait_pd_fail; 889 goto wait_pd_fail;
890 ret = reservation_object_reserve_shared(vm->root.base.bo->tbo.base.resv, 1); 890 ret = dma_resv_reserve_shared(vm->root.base.bo->tbo.base.resv, 1);
891 if (ret) 891 if (ret)
892 goto reserve_shared_fail; 892 goto reserve_shared_fail;
893 amdgpu_bo_fence(vm->root.base.bo, 893 amdgpu_bo_fence(vm->root.base.bo,
@@ -2133,7 +2133,7 @@ int amdgpu_amdkfd_add_gws_to_process(void *info, void *gws, struct kgd_mem **mem
2133 * Add process eviction fence to bo so they can 2133 * Add process eviction fence to bo so they can
2134 * evict each other. 2134 * evict each other.
2135 */ 2135 */
2136 ret = reservation_object_reserve_shared(gws_bo->tbo.base.resv, 1); 2136 ret = dma_resv_reserve_shared(gws_bo->tbo.base.resv, 1);
2137 if (ret) 2137 if (ret)
2138 goto reserve_shared_fail; 2138 goto reserve_shared_fail;
2139 amdgpu_bo_fence(gws_bo, &process_info->eviction_fence->base, true); 2139 amdgpu_bo_fence(gws_bo, &process_info->eviction_fence->base, true);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 9ccf32c5456a..8c50be56f458 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -730,7 +730,7 @@ static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
730 730
731 list_for_each_entry(e, &p->validated, tv.head) { 731 list_for_each_entry(e, &p->validated, tv.head) {
732 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo); 732 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
733 struct reservation_object *resv = bo->tbo.base.resv; 733 struct dma_resv *resv = bo->tbo.base.resv;
734 734
735 r = amdgpu_sync_resv(p->adev, &p->job->sync, resv, p->filp, 735 r = amdgpu_sync_resv(p->adev, &p->job->sync, resv, p->filp,
736 amdgpu_bo_explicit_sync(bo)); 736 amdgpu_bo_explicit_sync(bo));
@@ -1727,7 +1727,7 @@ int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
1727 *map = mapping; 1727 *map = mapping;
1728 1728
1729 /* Double check that the BO is reserved by this CS */ 1729 /* Double check that the BO is reserved by this CS */
1730 if (reservation_object_locking_ctx((*bo)->tbo.base.resv) != &parser->ticket) 1730 if (dma_resv_locking_ctx((*bo)->tbo.base.resv) != &parser->ticket)
1731 return -EINVAL; 1731 return -EINVAL;
1732 1732
1733 if (!((*bo)->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)) { 1733 if (!((*bo)->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index f453e277ed24..1d4aaa9580f4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -205,7 +205,7 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
205 goto unpin; 205 goto unpin;
206 } 206 }
207 207
208 r = reservation_object_get_fences_rcu(new_abo->tbo.base.resv, &work->excl, 208 r = dma_resv_get_fences_rcu(new_abo->tbo.base.resv, &work->excl,
209 &work->shared_count, 209 &work->shared_count,
210 &work->shared); 210 &work->shared);
211 if (unlikely(r != 0)) { 211 if (unlikely(r != 0)) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
index f0db7ddcb61b..61f108ec2b5c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
@@ -137,23 +137,23 @@ int amdgpu_gem_prime_mmap(struct drm_gem_object *obj,
137} 137}
138 138
139static int 139static int
140__reservation_object_make_exclusive(struct reservation_object *obj) 140__dma_resv_make_exclusive(struct dma_resv *obj)
141{ 141{
142 struct dma_fence **fences; 142 struct dma_fence **fences;
143 unsigned int count; 143 unsigned int count;
144 int r; 144 int r;
145 145
146 if (!reservation_object_get_list(obj)) /* no shared fences to convert */ 146 if (!dma_resv_get_list(obj)) /* no shared fences to convert */
147 return 0; 147 return 0;
148 148
149 r = reservation_object_get_fences_rcu(obj, NULL, &count, &fences); 149 r = dma_resv_get_fences_rcu(obj, NULL, &count, &fences);
150 if (r) 150 if (r)
151 return r; 151 return r;
152 152
153 if (count == 0) { 153 if (count == 0) {
154 /* Now that was unexpected. */ 154 /* Now that was unexpected. */
155 } else if (count == 1) { 155 } else if (count == 1) {
156 reservation_object_add_excl_fence(obj, fences[0]); 156 dma_resv_add_excl_fence(obj, fences[0]);
157 dma_fence_put(fences[0]); 157 dma_fence_put(fences[0]);
158 kfree(fences); 158 kfree(fences);
159 } else { 159 } else {
@@ -165,7 +165,7 @@ __reservation_object_make_exclusive(struct reservation_object *obj)
165 if (!array) 165 if (!array)
166 goto err_fences_put; 166 goto err_fences_put;
167 167
168 reservation_object_add_excl_fence(obj, &array->base); 168 dma_resv_add_excl_fence(obj, &array->base);
169 dma_fence_put(&array->base); 169 dma_fence_put(&array->base);
170 } 170 }
171 171
@@ -216,7 +216,7 @@ static int amdgpu_dma_buf_map_attach(struct dma_buf *dma_buf,
216 * fences on the reservation object into a single exclusive 216 * fences on the reservation object into a single exclusive
217 * fence. 217 * fence.
218 */ 218 */
219 r = __reservation_object_make_exclusive(bo->tbo.base.resv); 219 r = __dma_resv_make_exclusive(bo->tbo.base.resv);
220 if (r) 220 if (r)
221 goto error_unreserve; 221 goto error_unreserve;
222 } 222 }
@@ -367,7 +367,7 @@ amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
367 struct dma_buf_attachment *attach, 367 struct dma_buf_attachment *attach,
368 struct sg_table *sg) 368 struct sg_table *sg)
369{ 369{
370 struct reservation_object *resv = attach->dmabuf->resv; 370 struct dma_resv *resv = attach->dmabuf->resv;
371 struct amdgpu_device *adev = dev->dev_private; 371 struct amdgpu_device *adev = dev->dev_private;
372 struct amdgpu_bo *bo; 372 struct amdgpu_bo *bo;
373 struct amdgpu_bo_param bp; 373 struct amdgpu_bo_param bp;
@@ -380,7 +380,7 @@ amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
380 bp.flags = 0; 380 bp.flags = 0;
381 bp.type = ttm_bo_type_sg; 381 bp.type = ttm_bo_type_sg;
382 bp.resv = resv; 382 bp.resv = resv;
383 reservation_object_lock(resv, NULL); 383 dma_resv_lock(resv, NULL);
384 ret = amdgpu_bo_create(adev, &bp, &bo); 384 ret = amdgpu_bo_create(adev, &bp, &bo);
385 if (ret) 385 if (ret)
386 goto error; 386 goto error;
@@ -392,11 +392,11 @@ amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
392 if (attach->dmabuf->ops != &amdgpu_dmabuf_ops) 392 if (attach->dmabuf->ops != &amdgpu_dmabuf_ops)
393 bo->prime_shared_count = 1; 393 bo->prime_shared_count = 1;
394 394
395 reservation_object_unlock(resv); 395 dma_resv_unlock(resv);
396 return &bo->tbo.base; 396 return &bo->tbo.base;
397 397
398error: 398error:
399 reservation_object_unlock(resv); 399 dma_resv_unlock(resv);
400 return ERR_PTR(ret); 400 return ERR_PTR(ret);
401} 401}
402 402
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index d532e3d647ca..b174bd5eb38e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -50,7 +50,7 @@ void amdgpu_gem_object_free(struct drm_gem_object *gobj)
50int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size, 50int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
51 int alignment, u32 initial_domain, 51 int alignment, u32 initial_domain,
52 u64 flags, enum ttm_bo_type type, 52 u64 flags, enum ttm_bo_type type,
53 struct reservation_object *resv, 53 struct dma_resv *resv,
54 struct drm_gem_object **obj) 54 struct drm_gem_object **obj)
55{ 55{
56 struct amdgpu_bo *bo; 56 struct amdgpu_bo *bo;
@@ -215,7 +215,7 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
215 union drm_amdgpu_gem_create *args = data; 215 union drm_amdgpu_gem_create *args = data;
216 uint64_t flags = args->in.domain_flags; 216 uint64_t flags = args->in.domain_flags;
217 uint64_t size = args->in.bo_size; 217 uint64_t size = args->in.bo_size;
218 struct reservation_object *resv = NULL; 218 struct dma_resv *resv = NULL;
219 struct drm_gem_object *gobj; 219 struct drm_gem_object *gobj;
220 uint32_t handle; 220 uint32_t handle;
221 int r; 221 int r;
@@ -433,7 +433,7 @@ int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
433 return -ENOENT; 433 return -ENOENT;
434 } 434 }
435 robj = gem_to_amdgpu_bo(gobj); 435 robj = gem_to_amdgpu_bo(gobj);
436 ret = reservation_object_wait_timeout_rcu(robj->tbo.base.resv, true, true, 436 ret = dma_resv_wait_timeout_rcu(robj->tbo.base.resv, true, true,
437 timeout); 437 timeout);
438 438
439 /* ret == 0 means not signaled, 439 /* ret == 0 means not signaled,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h
index 2f17150e26e1..0b66d2e6b5d5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h
@@ -47,7 +47,7 @@ void amdgpu_gem_force_release(struct amdgpu_device *adev);
47int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size, 47int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
48 int alignment, u32 initial_domain, 48 int alignment, u32 initial_domain,
49 u64 flags, enum ttm_bo_type type, 49 u64 flags, enum ttm_bo_type type,
50 struct reservation_object *resv, 50 struct dma_resv *resv,
51 struct drm_gem_object **obj); 51 struct drm_gem_object **obj);
52 52
53int amdgpu_mode_dumb_create(struct drm_file *file_priv, 53int amdgpu_mode_dumb_create(struct drm_file *file_priv,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
index 529065b83885..53734da1c2df 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
@@ -104,7 +104,7 @@ static void amdgpu_pasid_free_cb(struct dma_fence *fence,
104 * 104 *
105 * Free the pasid only after all the fences in resv are signaled. 105 * Free the pasid only after all the fences in resv are signaled.
106 */ 106 */
107void amdgpu_pasid_free_delayed(struct reservation_object *resv, 107void amdgpu_pasid_free_delayed(struct dma_resv *resv,
108 unsigned int pasid) 108 unsigned int pasid)
109{ 109{
110 struct dma_fence *fence, **fences; 110 struct dma_fence *fence, **fences;
@@ -112,7 +112,7 @@ void amdgpu_pasid_free_delayed(struct reservation_object *resv,
112 unsigned count; 112 unsigned count;
113 int r; 113 int r;
114 114
115 r = reservation_object_get_fences_rcu(resv, NULL, &count, &fences); 115 r = dma_resv_get_fences_rcu(resv, NULL, &count, &fences);
116 if (r) 116 if (r)
117 goto fallback; 117 goto fallback;
118 118
@@ -156,7 +156,7 @@ fallback:
156 /* Not enough memory for the delayed delete, as last resort 156 /* Not enough memory for the delayed delete, as last resort
157 * block for all the fences to complete. 157 * block for all the fences to complete.
158 */ 158 */
159 reservation_object_wait_timeout_rcu(resv, true, false, 159 dma_resv_wait_timeout_rcu(resv, true, false,
160 MAX_SCHEDULE_TIMEOUT); 160 MAX_SCHEDULE_TIMEOUT);
161 amdgpu_pasid_free(pasid); 161 amdgpu_pasid_free(pasid);
162} 162}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h
index 7625419f0fc2..8e58325bbca2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h
@@ -72,7 +72,7 @@ struct amdgpu_vmid_mgr {
72 72
73int amdgpu_pasid_alloc(unsigned int bits); 73int amdgpu_pasid_alloc(unsigned int bits);
74void amdgpu_pasid_free(unsigned int pasid); 74void amdgpu_pasid_free(unsigned int pasid);
75void amdgpu_pasid_free_delayed(struct reservation_object *resv, 75void amdgpu_pasid_free_delayed(struct dma_resv *resv,
76 unsigned int pasid); 76 unsigned int pasid);
77 77
78bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev, 78bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
index 50022acc8a81..f1f8cdd695d3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
@@ -179,7 +179,7 @@ static void amdgpu_mn_invalidate_node(struct amdgpu_mn_node *node,
179 if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm, start, end)) 179 if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm, start, end))
180 continue; 180 continue;
181 181
182 r = reservation_object_wait_timeout_rcu(bo->tbo.base.resv, 182 r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv,
183 true, false, MAX_SCHEDULE_TIMEOUT); 183 true, false, MAX_SCHEDULE_TIMEOUT);
184 if (r <= 0) 184 if (r <= 0)
185 DRM_ERROR("(%ld) failed to wait for user bo\n", r); 185 DRM_ERROR("(%ld) failed to wait for user bo\n", r);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 2d07f16f1789..6ebe61e14f29 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -550,7 +550,7 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
550 550
551fail_unreserve: 551fail_unreserve:
552 if (!bp->resv) 552 if (!bp->resv)
553 reservation_object_unlock(bo->tbo.base.resv); 553 dma_resv_unlock(bo->tbo.base.resv);
554 amdgpu_bo_unref(&bo); 554 amdgpu_bo_unref(&bo);
555 return r; 555 return r;
556} 556}
@@ -612,13 +612,13 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
612 612
613 if ((flags & AMDGPU_GEM_CREATE_SHADOW) && !(adev->flags & AMD_IS_APU)) { 613 if ((flags & AMDGPU_GEM_CREATE_SHADOW) && !(adev->flags & AMD_IS_APU)) {
614 if (!bp->resv) 614 if (!bp->resv)
615 WARN_ON(reservation_object_lock((*bo_ptr)->tbo.base.resv, 615 WARN_ON(dma_resv_lock((*bo_ptr)->tbo.base.resv,
616 NULL)); 616 NULL));
617 617
618 r = amdgpu_bo_create_shadow(adev, bp->size, *bo_ptr); 618 r = amdgpu_bo_create_shadow(adev, bp->size, *bo_ptr);
619 619
620 if (!bp->resv) 620 if (!bp->resv)
621 reservation_object_unlock((*bo_ptr)->tbo.base.resv); 621 dma_resv_unlock((*bo_ptr)->tbo.base.resv);
622 622
623 if (r) 623 if (r)
624 amdgpu_bo_unref(bo_ptr); 624 amdgpu_bo_unref(bo_ptr);
@@ -715,7 +715,7 @@ int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
715 return 0; 715 return 0;
716 } 716 }
717 717
718 r = reservation_object_wait_timeout_rcu(bo->tbo.base.resv, false, false, 718 r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, false, false,
719 MAX_SCHEDULE_TIMEOUT); 719 MAX_SCHEDULE_TIMEOUT);
720 if (r < 0) 720 if (r < 0)
721 return r; 721 return r;
@@ -1093,7 +1093,7 @@ int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags)
1093 */ 1093 */
1094void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags) 1094void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags)
1095{ 1095{
1096 reservation_object_assert_held(bo->tbo.base.resv); 1096 dma_resv_assert_held(bo->tbo.base.resv);
1097 1097
1098 if (tiling_flags) 1098 if (tiling_flags)
1099 *tiling_flags = bo->tiling_flags; 1099 *tiling_flags = bo->tiling_flags;
@@ -1242,7 +1242,7 @@ void amdgpu_bo_release_notify(struct ttm_buffer_object *bo)
1242 !(abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE)) 1242 !(abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE))
1243 return; 1243 return;
1244 1244
1245 reservation_object_lock(bo->base.resv, NULL); 1245 dma_resv_lock(bo->base.resv, NULL);
1246 1246
1247 r = amdgpu_fill_buffer(abo, AMDGPU_POISON, bo->base.resv, &fence); 1247 r = amdgpu_fill_buffer(abo, AMDGPU_POISON, bo->base.resv, &fence);
1248 if (!WARN_ON(r)) { 1248 if (!WARN_ON(r)) {
@@ -1250,7 +1250,7 @@ void amdgpu_bo_release_notify(struct ttm_buffer_object *bo)
1250 dma_fence_put(fence); 1250 dma_fence_put(fence);
1251 } 1251 }
1252 1252
1253 reservation_object_unlock(bo->base.resv); 1253 dma_resv_unlock(bo->base.resv);
1254} 1254}
1255 1255
1256/** 1256/**
@@ -1325,12 +1325,12 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
1325void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence, 1325void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
1326 bool shared) 1326 bool shared)
1327{ 1327{
1328 struct reservation_object *resv = bo->tbo.base.resv; 1328 struct dma_resv *resv = bo->tbo.base.resv;
1329 1329
1330 if (shared) 1330 if (shared)
1331 reservation_object_add_shared_fence(resv, fence); 1331 dma_resv_add_shared_fence(resv, fence);
1332 else 1332 else
1333 reservation_object_add_excl_fence(resv, fence); 1333 dma_resv_add_excl_fence(resv, fence);
1334} 1334}
1335 1335
1336/** 1336/**
@@ -1370,7 +1370,7 @@ int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr)
1370u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo) 1370u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
1371{ 1371{
1372 WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_SYSTEM); 1372 WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_SYSTEM);
1373 WARN_ON_ONCE(!reservation_object_is_locked(bo->tbo.base.resv) && 1373 WARN_ON_ONCE(!dma_resv_is_locked(bo->tbo.base.resv) &&
1374 !bo->pin_count && bo->tbo.type != ttm_bo_type_kernel); 1374 !bo->pin_count && bo->tbo.type != ttm_bo_type_kernel);
1375 WARN_ON_ONCE(bo->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET); 1375 WARN_ON_ONCE(bo->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET);
1376 WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_VRAM && 1376 WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_VRAM &&
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index 05dde0dd04ff..658f4c9779b7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -41,7 +41,7 @@ struct amdgpu_bo_param {
41 u32 preferred_domain; 41 u32 preferred_domain;
42 u64 flags; 42 u64 flags;
43 enum ttm_bo_type type; 43 enum ttm_bo_type type;
44 struct reservation_object *resv; 44 struct dma_resv *resv;
45}; 45};
46 46
47/* bo virtual addresses in a vm */ 47/* bo virtual addresses in a vm */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
index 9828f3c7c655..95e5e93edd18 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
@@ -190,10 +190,10 @@ int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
190 */ 190 */
191int amdgpu_sync_resv(struct amdgpu_device *adev, 191int amdgpu_sync_resv(struct amdgpu_device *adev,
192 struct amdgpu_sync *sync, 192 struct amdgpu_sync *sync,
193 struct reservation_object *resv, 193 struct dma_resv *resv,
194 void *owner, bool explicit_sync) 194 void *owner, bool explicit_sync)
195{ 195{
196 struct reservation_object_list *flist; 196 struct dma_resv_list *flist;
197 struct dma_fence *f; 197 struct dma_fence *f;
198 void *fence_owner; 198 void *fence_owner;
199 unsigned i; 199 unsigned i;
@@ -203,16 +203,16 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
203 return -EINVAL; 203 return -EINVAL;
204 204
205 /* always sync to the exclusive fence */ 205 /* always sync to the exclusive fence */
206 f = reservation_object_get_excl(resv); 206 f = dma_resv_get_excl(resv);
207 r = amdgpu_sync_fence(adev, sync, f, false); 207 r = amdgpu_sync_fence(adev, sync, f, false);
208 208
209 flist = reservation_object_get_list(resv); 209 flist = dma_resv_get_list(resv);
210 if (!flist || r) 210 if (!flist || r)
211 return r; 211 return r;
212 212
213 for (i = 0; i < flist->shared_count; ++i) { 213 for (i = 0; i < flist->shared_count; ++i) {
214 f = rcu_dereference_protected(flist->shared[i], 214 f = rcu_dereference_protected(flist->shared[i],
215 reservation_object_held(resv)); 215 dma_resv_held(resv));
216 /* We only want to trigger KFD eviction fences on 216 /* We only want to trigger KFD eviction fences on
217 * evict or move jobs. Skip KFD fences otherwise. 217 * evict or move jobs. Skip KFD fences otherwise.
218 */ 218 */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h
index 10cf23a57f17..b5f1778a2319 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h
@@ -27,7 +27,7 @@
27#include <linux/hashtable.h> 27#include <linux/hashtable.h>
28 28
29struct dma_fence; 29struct dma_fence;
30struct reservation_object; 30struct dma_resv;
31struct amdgpu_device; 31struct amdgpu_device;
32struct amdgpu_ring; 32struct amdgpu_ring;
33 33
@@ -44,7 +44,7 @@ int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
44 struct dma_fence *f, bool explicit); 44 struct dma_fence *f, bool explicit);
45int amdgpu_sync_resv(struct amdgpu_device *adev, 45int amdgpu_sync_resv(struct amdgpu_device *adev,
46 struct amdgpu_sync *sync, 46 struct amdgpu_sync *sync,
47 struct reservation_object *resv, 47 struct dma_resv *resv,
48 void *owner, 48 void *owner,
49 bool explicit_sync); 49 bool explicit_sync);
50struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync, 50struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 8f8b7a350b8b..3e8f9072561e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -303,7 +303,7 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
303 struct amdgpu_copy_mem *src, 303 struct amdgpu_copy_mem *src,
304 struct amdgpu_copy_mem *dst, 304 struct amdgpu_copy_mem *dst,
305 uint64_t size, 305 uint64_t size,
306 struct reservation_object *resv, 306 struct dma_resv *resv,
307 struct dma_fence **f) 307 struct dma_fence **f)
308{ 308{
309 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; 309 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
@@ -1486,7 +1486,7 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
1486{ 1486{
1487 unsigned long num_pages = bo->mem.num_pages; 1487 unsigned long num_pages = bo->mem.num_pages;
1488 struct drm_mm_node *node = bo->mem.mm_node; 1488 struct drm_mm_node *node = bo->mem.mm_node;
1489 struct reservation_object_list *flist; 1489 struct dma_resv_list *flist;
1490 struct dma_fence *f; 1490 struct dma_fence *f;
1491 int i; 1491 int i;
1492 1492
@@ -1494,18 +1494,18 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
1494 * cleanly handle page faults. 1494 * cleanly handle page faults.
1495 */ 1495 */
1496 if (bo->type == ttm_bo_type_kernel && 1496 if (bo->type == ttm_bo_type_kernel &&
1497 !reservation_object_test_signaled_rcu(bo->base.resv, true)) 1497 !dma_resv_test_signaled_rcu(bo->base.resv, true))
1498 return false; 1498 return false;
1499 1499
1500 /* If bo is a KFD BO, check if the bo belongs to the current process. 1500 /* If bo is a KFD BO, check if the bo belongs to the current process.
1501 * If true, then return false as any KFD process needs all its BOs to 1501 * If true, then return false as any KFD process needs all its BOs to
1502 * be resident to run successfully 1502 * be resident to run successfully
1503 */ 1503 */
1504 flist = reservation_object_get_list(bo->base.resv); 1504 flist = dma_resv_get_list(bo->base.resv);
1505 if (flist) { 1505 if (flist) {
1506 for (i = 0; i < flist->shared_count; ++i) { 1506 for (i = 0; i < flist->shared_count; ++i) {
1507 f = rcu_dereference_protected(flist->shared[i], 1507 f = rcu_dereference_protected(flist->shared[i],
1508 reservation_object_held(bo->base.resv)); 1508 dma_resv_held(bo->base.resv));
1509 if (amdkfd_fence_check_mm(f, current->mm)) 1509 if (amdkfd_fence_check_mm(f, current->mm))
1510 return false; 1510 return false;
1511 } 1511 }
@@ -2009,7 +2009,7 @@ error_free:
2009 2009
2010int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset, 2010int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
2011 uint64_t dst_offset, uint32_t byte_count, 2011 uint64_t dst_offset, uint32_t byte_count,
2012 struct reservation_object *resv, 2012 struct dma_resv *resv,
2013 struct dma_fence **fence, bool direct_submit, 2013 struct dma_fence **fence, bool direct_submit,
2014 bool vm_needs_flush) 2014 bool vm_needs_flush)
2015{ 2015{
@@ -2083,7 +2083,7 @@ error_free:
2083 2083
2084int amdgpu_fill_buffer(struct amdgpu_bo *bo, 2084int amdgpu_fill_buffer(struct amdgpu_bo *bo,
2085 uint32_t src_data, 2085 uint32_t src_data,
2086 struct reservation_object *resv, 2086 struct dma_resv *resv,
2087 struct dma_fence **fence) 2087 struct dma_fence **fence)
2088{ 2088{
2089 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 2089 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index bccb8c49e597..0dddedc06ae3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -85,18 +85,18 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev,
85 85
86int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset, 86int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
87 uint64_t dst_offset, uint32_t byte_count, 87 uint64_t dst_offset, uint32_t byte_count,
88 struct reservation_object *resv, 88 struct dma_resv *resv,
89 struct dma_fence **fence, bool direct_submit, 89 struct dma_fence **fence, bool direct_submit,
90 bool vm_needs_flush); 90 bool vm_needs_flush);
91int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev, 91int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
92 struct amdgpu_copy_mem *src, 92 struct amdgpu_copy_mem *src,
93 struct amdgpu_copy_mem *dst, 93 struct amdgpu_copy_mem *dst,
94 uint64_t size, 94 uint64_t size,
95 struct reservation_object *resv, 95 struct dma_resv *resv,
96 struct dma_fence **f); 96 struct dma_fence **f);
97int amdgpu_fill_buffer(struct amdgpu_bo *bo, 97int amdgpu_fill_buffer(struct amdgpu_bo *bo,
98 uint32_t src_data, 98 uint32_t src_data,
99 struct reservation_object *resv, 99 struct dma_resv *resv,
100 struct dma_fence **fence); 100 struct dma_fence **fence);
101 101
102int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma); 102int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index f858607b17a5..b2c364b8695f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -1073,7 +1073,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
1073 ib->length_dw = 16; 1073 ib->length_dw = 16;
1074 1074
1075 if (direct) { 1075 if (direct) {
1076 r = reservation_object_wait_timeout_rcu(bo->tbo.base.resv, 1076 r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv,
1077 true, false, 1077 true, false,
1078 msecs_to_jiffies(10)); 1078 msecs_to_jiffies(10));
1079 if (r == 0) 1079 if (r == 0)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index c8244ce184e8..b7665b31a2ca 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -1702,7 +1702,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
1702 ttm = container_of(bo->tbo.ttm, struct ttm_dma_tt, ttm); 1702 ttm = container_of(bo->tbo.ttm, struct ttm_dma_tt, ttm);
1703 pages_addr = ttm->dma_address; 1703 pages_addr = ttm->dma_address;
1704 } 1704 }
1705 exclusive = reservation_object_get_excl(bo->tbo.base.resv); 1705 exclusive = dma_resv_get_excl(bo->tbo.base.resv);
1706 } 1706 }
1707 1707
1708 if (bo) { 1708 if (bo) {
@@ -1879,18 +1879,18 @@ static void amdgpu_vm_free_mapping(struct amdgpu_device *adev,
1879 */ 1879 */
1880static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) 1880static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1881{ 1881{
1882 struct reservation_object *resv = vm->root.base.bo->tbo.base.resv; 1882 struct dma_resv *resv = vm->root.base.bo->tbo.base.resv;
1883 struct dma_fence *excl, **shared; 1883 struct dma_fence *excl, **shared;
1884 unsigned i, shared_count; 1884 unsigned i, shared_count;
1885 int r; 1885 int r;
1886 1886
1887 r = reservation_object_get_fences_rcu(resv, &excl, 1887 r = dma_resv_get_fences_rcu(resv, &excl,
1888 &shared_count, &shared); 1888 &shared_count, &shared);
1889 if (r) { 1889 if (r) {
1890 /* Not enough memory to grab the fence list, as last resort 1890 /* Not enough memory to grab the fence list, as last resort
1891 * block for all the fences to complete. 1891 * block for all the fences to complete.
1892 */ 1892 */
1893 reservation_object_wait_timeout_rcu(resv, true, false, 1893 dma_resv_wait_timeout_rcu(resv, true, false,
1894 MAX_SCHEDULE_TIMEOUT); 1894 MAX_SCHEDULE_TIMEOUT);
1895 return; 1895 return;
1896 } 1896 }
@@ -1978,7 +1978,7 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
1978 struct amdgpu_vm *vm) 1978 struct amdgpu_vm *vm)
1979{ 1979{
1980 struct amdgpu_bo_va *bo_va, *tmp; 1980 struct amdgpu_bo_va *bo_va, *tmp;
1981 struct reservation_object *resv; 1981 struct dma_resv *resv;
1982 bool clear; 1982 bool clear;
1983 int r; 1983 int r;
1984 1984
@@ -1997,7 +1997,7 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
1997 spin_unlock(&vm->invalidated_lock); 1997 spin_unlock(&vm->invalidated_lock);
1998 1998
1999 /* Try to reserve the BO to avoid clearing its ptes */ 1999 /* Try to reserve the BO to avoid clearing its ptes */
2000 if (!amdgpu_vm_debug && reservation_object_trylock(resv)) 2000 if (!amdgpu_vm_debug && dma_resv_trylock(resv))
2001 clear = false; 2001 clear = false;
2002 /* Somebody else is using the BO right now */ 2002 /* Somebody else is using the BO right now */
2003 else 2003 else
@@ -2008,7 +2008,7 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
2008 return r; 2008 return r;
2009 2009
2010 if (!clear) 2010 if (!clear)
2011 reservation_object_unlock(resv); 2011 dma_resv_unlock(resv);
2012 spin_lock(&vm->invalidated_lock); 2012 spin_lock(&vm->invalidated_lock);
2013 } 2013 }
2014 spin_unlock(&vm->invalidated_lock); 2014 spin_unlock(&vm->invalidated_lock);
@@ -2416,7 +2416,7 @@ void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket)
2416 struct amdgpu_bo *bo; 2416 struct amdgpu_bo *bo;
2417 2417
2418 bo = mapping->bo_va->base.bo; 2418 bo = mapping->bo_va->base.bo;
2419 if (reservation_object_locking_ctx(bo->tbo.base.resv) != 2419 if (dma_resv_locking_ctx(bo->tbo.base.resv) !=
2420 ticket) 2420 ticket)
2421 continue; 2421 continue;
2422 } 2422 }
@@ -2649,7 +2649,7 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
2649 */ 2649 */
2650long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout) 2650long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)
2651{ 2651{
2652 return reservation_object_wait_timeout_rcu(vm->root.base.bo->tbo.base.resv, 2652 return dma_resv_wait_timeout_rcu(vm->root.base.bo->tbo.base.resv,
2653 true, true, timeout); 2653 true, true, timeout);
2654} 2654}
2655 2655
@@ -2724,7 +2724,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
2724 if (r) 2724 if (r)
2725 goto error_free_root; 2725 goto error_free_root;
2726 2726
2727 r = reservation_object_reserve_shared(root->tbo.base.resv, 1); 2727 r = dma_resv_reserve_shared(root->tbo.base.resv, 1);
2728 if (r) 2728 if (r)
2729 goto error_unreserve; 2729 goto error_unreserve;
2730 2730
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 722c70d40d3b..67f8aee4cd1b 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -5695,7 +5695,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
5695 * deadlock during GPU reset when this fence will not signal 5695 * deadlock during GPU reset when this fence will not signal
5696 * but we hold reservation lock for the BO. 5696 * but we hold reservation lock for the BO.
5697 */ 5697 */
5698 r = reservation_object_wait_timeout_rcu(abo->tbo.base.resv, true, 5698 r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
5699 false, 5699 false,
5700 msecs_to_jiffies(5000)); 5700 msecs_to_jiffies(5000));
5701 if (unlikely(r <= 0)) 5701 if (unlikely(r <= 0))
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c b/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c
index fa9a4593bb37..624d257da20f 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c
@@ -27,7 +27,7 @@ static void komeda_crtc_update_clock_ratio(struct komeda_crtc_state *kcrtc_st)
27 return; 27 return;
28 } 28 }
29 29
30 pxlclk = kcrtc_st->base.adjusted_mode.crtc_clock * 1000; 30 pxlclk = kcrtc_st->base.adjusted_mode.crtc_clock * 1000ULL;
31 aclk = komeda_crtc_get_aclk(kcrtc_st); 31 aclk = komeda_crtc_get_aclk(kcrtc_st);
32 32
33 kcrtc_st->clock_ratio = div64_u64(aclk << 32, pxlclk); 33 kcrtc_st->clock_ratio = div64_u64(aclk << 32, pxlclk);
diff --git a/drivers/gpu/drm/arm/hdlcd_crtc.c b/drivers/gpu/drm/arm/hdlcd_crtc.c
index a3efa28436ea..af67fefed38d 100644
--- a/drivers/gpu/drm/arm/hdlcd_crtc.c
+++ b/drivers/gpu/drm/arm/hdlcd_crtc.c
@@ -9,7 +9,12 @@
9 * Implementation of a CRTC class for the HDLCD driver. 9 * Implementation of a CRTC class for the HDLCD driver.
10 */ 10 */
11 11
12#include <drm/drmP.h> 12#include <linux/clk.h>
13#include <linux/of_graph.h>
14#include <linux/platform_data/simplefb.h>
15
16#include <video/videomode.h>
17
13#include <drm/drm_atomic.h> 18#include <drm/drm_atomic.h>
14#include <drm/drm_atomic_helper.h> 19#include <drm/drm_atomic_helper.h>
15#include <drm/drm_crtc.h> 20#include <drm/drm_crtc.h>
@@ -19,10 +24,7 @@
19#include <drm/drm_of.h> 24#include <drm/drm_of.h>
20#include <drm/drm_plane_helper.h> 25#include <drm/drm_plane_helper.h>
21#include <drm/drm_probe_helper.h> 26#include <drm/drm_probe_helper.h>
22#include <linux/clk.h> 27#include <drm/drm_vblank.h>
23#include <linux/of_graph.h>
24#include <linux/platform_data/simplefb.h>
25#include <video/videomode.h>
26 28
27#include "hdlcd_drv.h" 29#include "hdlcd_drv.h"
28#include "hdlcd_regs.h" 30#include "hdlcd_regs.h"
diff --git a/drivers/gpu/drm/arm/hdlcd_drv.c b/drivers/gpu/drm/arm/hdlcd_drv.c
index 27c46a2838c5..2e053815b54a 100644
--- a/drivers/gpu/drm/arm/hdlcd_drv.c
+++ b/drivers/gpu/drm/arm/hdlcd_drv.c
@@ -14,21 +14,26 @@
14#include <linux/clk.h> 14#include <linux/clk.h>
15#include <linux/component.h> 15#include <linux/component.h>
16#include <linux/console.h> 16#include <linux/console.h>
17#include <linux/dma-mapping.h>
17#include <linux/list.h> 18#include <linux/list.h>
18#include <linux/of_graph.h> 19#include <linux/of_graph.h>
19#include <linux/of_reserved_mem.h> 20#include <linux/of_reserved_mem.h>
21#include <linux/platform_device.h>
20#include <linux/pm_runtime.h> 22#include <linux/pm_runtime.h>
21 23
22#include <drm/drmP.h>
23#include <drm/drm_atomic_helper.h> 24#include <drm/drm_atomic_helper.h>
24#include <drm/drm_crtc.h> 25#include <drm/drm_crtc.h>
26#include <drm/drm_debugfs.h>
27#include <drm/drm_drv.h>
25#include <drm/drm_fb_cma_helper.h> 28#include <drm/drm_fb_cma_helper.h>
26#include <drm/drm_fb_helper.h> 29#include <drm/drm_fb_helper.h>
27#include <drm/drm_gem_cma_helper.h> 30#include <drm/drm_gem_cma_helper.h>
28#include <drm/drm_gem_framebuffer_helper.h> 31#include <drm/drm_gem_framebuffer_helper.h>
32#include <drm/drm_irq.h>
29#include <drm/drm_modeset_helper.h> 33#include <drm/drm_modeset_helper.h>
30#include <drm/drm_of.h> 34#include <drm/drm_of.h>
31#include <drm/drm_probe_helper.h> 35#include <drm/drm_probe_helper.h>
36#include <drm/drm_vblank.h>
32 37
33#include "hdlcd_drv.h" 38#include "hdlcd_drv.h"
34#include "hdlcd_regs.h" 39#include "hdlcd_regs.h"
diff --git a/drivers/gpu/drm/arm/malidp_crtc.c b/drivers/gpu/drm/arm/malidp_crtc.c
index db4451260fff..587d94798f5c 100644
--- a/drivers/gpu/drm/arm/malidp_crtc.c
+++ b/drivers/gpu/drm/arm/malidp_crtc.c
@@ -6,14 +6,17 @@
6 * ARM Mali DP500/DP550/DP650 driver (crtc operations) 6 * ARM Mali DP500/DP550/DP650 driver (crtc operations)
7 */ 7 */
8 8
9#include <drm/drmP.h> 9#include <linux/clk.h>
10#include <linux/pm_runtime.h>
11
12#include <video/videomode.h>
13
10#include <drm/drm_atomic.h> 14#include <drm/drm_atomic.h>
11#include <drm/drm_atomic_helper.h> 15#include <drm/drm_atomic_helper.h>
12#include <drm/drm_crtc.h> 16#include <drm/drm_crtc.h>
17#include <drm/drm_print.h>
13#include <drm/drm_probe_helper.h> 18#include <drm/drm_probe_helper.h>
14#include <linux/clk.h> 19#include <drm/drm_vblank.h>
15#include <linux/pm_runtime.h>
16#include <video/videomode.h>
17 20
18#include "malidp_drv.h" 21#include "malidp_drv.h"
19#include "malidp_hw.h" 22#include "malidp_hw.h"
diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c
index c27ff456eddc..333b88a5efb0 100644
--- a/drivers/gpu/drm/arm/malidp_drv.c
+++ b/drivers/gpu/drm/arm/malidp_drv.c
@@ -15,17 +15,19 @@
15#include <linux/pm_runtime.h> 15#include <linux/pm_runtime.h>
16#include <linux/debugfs.h> 16#include <linux/debugfs.h>
17 17
18#include <drm/drmP.h>
19#include <drm/drm_atomic.h> 18#include <drm/drm_atomic.h>
20#include <drm/drm_atomic_helper.h> 19#include <drm/drm_atomic_helper.h>
21#include <drm/drm_crtc.h> 20#include <drm/drm_crtc.h>
22#include <drm/drm_probe_helper.h> 21#include <drm/drm_drv.h>
23#include <drm/drm_fb_helper.h>
24#include <drm/drm_fb_cma_helper.h> 22#include <drm/drm_fb_cma_helper.h>
23#include <drm/drm_fb_helper.h>
24#include <drm/drm_fourcc.h>
25#include <drm/drm_gem_cma_helper.h> 25#include <drm/drm_gem_cma_helper.h>
26#include <drm/drm_gem_framebuffer_helper.h> 26#include <drm/drm_gem_framebuffer_helper.h>
27#include <drm/drm_modeset_helper.h> 27#include <drm/drm_modeset_helper.h>
28#include <drm/drm_of.h> 28#include <drm/drm_of.h>
29#include <drm/drm_probe_helper.h>
30#include <drm/drm_vblank.h>
29 31
30#include "malidp_drv.h" 32#include "malidp_drv.h"
31#include "malidp_mw.h" 33#include "malidp_mw.h"
diff --git a/drivers/gpu/drm/arm/malidp_drv.h b/drivers/gpu/drm/arm/malidp_drv.h
index 0a639af8337e..cdfddfabf2d1 100644
--- a/drivers/gpu/drm/arm/malidp_drv.h
+++ b/drivers/gpu/drm/arm/malidp_drv.h
@@ -9,12 +9,13 @@
9#ifndef __MALIDP_DRV_H__ 9#ifndef __MALIDP_DRV_H__
10#define __MALIDP_DRV_H__ 10#define __MALIDP_DRV_H__
11 11
12#include <drm/drm_writeback.h>
13#include <drm/drm_encoder.h>
14#include <linux/mutex.h> 12#include <linux/mutex.h>
15#include <linux/wait.h> 13#include <linux/wait.h>
16#include <linux/spinlock.h> 14#include <linux/spinlock.h>
17#include <drm/drmP.h> 15
16#include <drm/drm_writeback.h>
17#include <drm/drm_encoder.h>
18
18#include "malidp_hw.h" 19#include "malidp_hw.h"
19 20
20#define MALIDP_CONFIG_VALID_INIT 0 21#define MALIDP_CONFIG_VALID_INIT 0
diff --git a/drivers/gpu/drm/arm/malidp_hw.c b/drivers/gpu/drm/arm/malidp_hw.c
index 380be66d4c6e..bd8265f02e0b 100644
--- a/drivers/gpu/drm/arm/malidp_hw.c
+++ b/drivers/gpu/drm/arm/malidp_hw.c
@@ -9,12 +9,17 @@
9 */ 9 */
10 10
11#include <linux/clk.h> 11#include <linux/clk.h>
12#include <linux/delay.h>
12#include <linux/types.h> 13#include <linux/types.h>
13#include <linux/io.h> 14#include <linux/io.h>
14#include <drm/drmP.h> 15
15#include <video/videomode.h> 16#include <video/videomode.h>
16#include <video/display_timing.h> 17#include <video/display_timing.h>
17 18
19#include <drm/drm_fourcc.h>
20#include <drm/drm_vblank.h>
21#include <drm/drm_print.h>
22
18#include "malidp_drv.h" 23#include "malidp_drv.h"
19#include "malidp_hw.h" 24#include "malidp_hw.h"
20#include "malidp_mw.h" 25#include "malidp_mw.h"
diff --git a/drivers/gpu/drm/arm/malidp_mw.c b/drivers/gpu/drm/arm/malidp_mw.c
index 2e812525025d..22c0847986df 100644
--- a/drivers/gpu/drm/arm/malidp_mw.c
+++ b/drivers/gpu/drm/arm/malidp_mw.c
@@ -5,13 +5,14 @@
5 * 5 *
6 * ARM Mali DP Writeback connector implementation 6 * ARM Mali DP Writeback connector implementation
7 */ 7 */
8
8#include <drm/drm_atomic.h> 9#include <drm/drm_atomic.h>
9#include <drm/drm_atomic_helper.h> 10#include <drm/drm_atomic_helper.h>
10#include <drm/drm_crtc.h> 11#include <drm/drm_crtc.h>
11#include <drm/drm_probe_helper.h>
12#include <drm/drm_fb_cma_helper.h> 12#include <drm/drm_fb_cma_helper.h>
13#include <drm/drm_fourcc.h>
13#include <drm/drm_gem_cma_helper.h> 14#include <drm/drm_gem_cma_helper.h>
14#include <drm/drmP.h> 15#include <drm/drm_probe_helper.h>
15#include <drm/drm_writeback.h> 16#include <drm/drm_writeback.h>
16 17
17#include "malidp_drv.h" 18#include "malidp_drv.h"
diff --git a/drivers/gpu/drm/arm/malidp_planes.c b/drivers/gpu/drm/arm/malidp_planes.c
index 488375bd133d..3c70a53813bf 100644
--- a/drivers/gpu/drm/arm/malidp_planes.c
+++ b/drivers/gpu/drm/arm/malidp_planes.c
@@ -7,11 +7,13 @@
7 */ 7 */
8 8
9#include <linux/iommu.h> 9#include <linux/iommu.h>
10#include <linux/platform_device.h>
10 11
11#include <drm/drmP.h>
12#include <drm/drm_atomic.h> 12#include <drm/drm_atomic.h>
13#include <drm/drm_atomic_helper.h> 13#include <drm/drm_atomic_helper.h>
14#include <drm/drm_drv.h>
14#include <drm/drm_fb_cma_helper.h> 15#include <drm/drm_fb_cma_helper.h>
16#include <drm/drm_fourcc.h>
15#include <drm/drm_gem_cma_helper.h> 17#include <drm/drm_gem_cma_helper.h>
16#include <drm/drm_gem_framebuffer_helper.h> 18#include <drm/drm_gem_framebuffer_helper.h>
17#include <drm/drm_plane_helper.h> 19#include <drm/drm_plane_helper.h>
diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c
index d44fca4e1655..c2b92acd1e9a 100644
--- a/drivers/gpu/drm/armada/armada_crtc.c
+++ b/drivers/gpu/drm/armada/armada_crtc.c
@@ -3,15 +3,19 @@
3 * Copyright (C) 2012 Russell King 3 * Copyright (C) 2012 Russell King
4 * Rewritten from the dovefb driver, and Armada510 manuals. 4 * Rewritten from the dovefb driver, and Armada510 manuals.
5 */ 5 */
6
6#include <linux/clk.h> 7#include <linux/clk.h>
7#include <linux/component.h> 8#include <linux/component.h>
9#include <linux/module.h>
8#include <linux/of_device.h> 10#include <linux/of_device.h>
9#include <linux/platform_device.h> 11#include <linux/platform_device.h>
10#include <drm/drmP.h> 12
11#include <drm/drm_atomic.h> 13#include <drm/drm_atomic.h>
12#include <drm/drm_probe_helper.h>
13#include <drm/drm_plane_helper.h>
14#include <drm/drm_atomic_helper.h> 14#include <drm/drm_atomic_helper.h>
15#include <drm/drm_plane_helper.h>
16#include <drm/drm_probe_helper.h>
17#include <drm/drm_vblank.h>
18
15#include "armada_crtc.h" 19#include "armada_crtc.h"
16#include "armada_drm.h" 20#include "armada_drm.h"
17#include "armada_fb.h" 21#include "armada_fb.h"
diff --git a/drivers/gpu/drm/armada/armada_debugfs.c b/drivers/gpu/drm/armada/armada_debugfs.c
index dc3716dbb2c0..c6fc2f1d58e9 100644
--- a/drivers/gpu/drm/armada/armada_debugfs.c
+++ b/drivers/gpu/drm/armada/armada_debugfs.c
@@ -3,11 +3,15 @@
3 * Copyright (C) 2012 Russell King 3 * Copyright (C) 2012 Russell King
4 * Rewritten from the dovefb driver, and Armada510 manuals. 4 * Rewritten from the dovefb driver, and Armada510 manuals.
5 */ 5 */
6
6#include <linux/ctype.h> 7#include <linux/ctype.h>
7#include <linux/debugfs.h>
8#include <linux/module.h> 8#include <linux/module.h>
9#include <linux/seq_file.h> 9#include <linux/seq_file.h>
10#include <drm/drmP.h> 10#include <linux/uaccess.h>
11
12#include <drm/drm_debugfs.h>
13#include <drm/drm_file.h>
14
11#include "armada_crtc.h" 15#include "armada_crtc.h"
12#include "armada_drm.h" 16#include "armada_drm.h"
13 17
diff --git a/drivers/gpu/drm/armada/armada_drm.h b/drivers/gpu/drm/armada/armada_drm.h
index c7794c8bdd90..a11bdaccbb33 100644
--- a/drivers/gpu/drm/armada/armada_drm.h
+++ b/drivers/gpu/drm/armada/armada_drm.h
@@ -8,11 +8,14 @@
8#include <linux/kfifo.h> 8#include <linux/kfifo.h>
9#include <linux/io.h> 9#include <linux/io.h>
10#include <linux/workqueue.h> 10#include <linux/workqueue.h>
11#include <drm/drmP.h> 11
12#include <drm/drm_device.h>
13#include <drm/drm_mm.h>
12 14
13struct armada_crtc; 15struct armada_crtc;
14struct armada_gem_object; 16struct armada_gem_object;
15struct clk; 17struct clk;
18struct drm_display_mode;
16struct drm_fb_helper; 19struct drm_fb_helper;
17 20
18static inline void 21static inline void
diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c
index 055c92bc88bf..197dca3fc84c 100644
--- a/drivers/gpu/drm/armada/armada_drv.c
+++ b/drivers/gpu/drm/armada/armada_drv.c
@@ -2,14 +2,22 @@
2/* 2/*
3 * Copyright (C) 2012 Russell King 3 * Copyright (C) 2012 Russell King
4 */ 4 */
5
5#include <linux/clk.h> 6#include <linux/clk.h>
6#include <linux/component.h> 7#include <linux/component.h>
7#include <linux/module.h> 8#include <linux/module.h>
8#include <linux/of_graph.h> 9#include <linux/of_graph.h>
10#include <linux/platform_device.h>
11
9#include <drm/drm_atomic_helper.h> 12#include <drm/drm_atomic_helper.h>
13#include <drm/drm_drv.h>
14#include <drm/drm_ioctl.h>
15#include <drm/drm_prime.h>
10#include <drm/drm_probe_helper.h> 16#include <drm/drm_probe_helper.h>
11#include <drm/drm_fb_helper.h> 17#include <drm/drm_fb_helper.h>
12#include <drm/drm_of.h> 18#include <drm/drm_of.h>
19#include <drm/drm_vblank.h>
20
13#include "armada_crtc.h" 21#include "armada_crtc.h"
14#include "armada_drm.h" 22#include "armada_drm.h"
15#include "armada_gem.h" 23#include "armada_gem.h"
diff --git a/drivers/gpu/drm/armada/armada_fb.c b/drivers/gpu/drm/armada/armada_fb.c
index de030cb0aa90..426ca383d696 100644
--- a/drivers/gpu/drm/armada/armada_fb.c
+++ b/drivers/gpu/drm/armada/armada_fb.c
@@ -2,9 +2,12 @@
2/* 2/*
3 * Copyright (C) 2012 Russell King 3 * Copyright (C) 2012 Russell King
4 */ 4 */
5
5#include <drm/drm_modeset_helper.h> 6#include <drm/drm_modeset_helper.h>
6#include <drm/drm_fb_helper.h> 7#include <drm/drm_fb_helper.h>
8#include <drm/drm_fourcc.h>
7#include <drm/drm_gem_framebuffer_helper.h> 9#include <drm/drm_gem_framebuffer_helper.h>
10
8#include "armada_drm.h" 11#include "armada_drm.h"
9#include "armada_fb.h" 12#include "armada_fb.h"
10#include "armada_gem.h" 13#include "armada_gem.h"
diff --git a/drivers/gpu/drm/armada/armada_fbdev.c b/drivers/gpu/drm/armada/armada_fbdev.c
index 096aff530b01..090cc0d699ae 100644
--- a/drivers/gpu/drm/armada/armada_fbdev.c
+++ b/drivers/gpu/drm/armada/armada_fbdev.c
@@ -3,11 +3,14 @@
3 * Copyright (C) 2012 Russell King 3 * Copyright (C) 2012 Russell King
4 * Written from the i915 driver. 4 * Written from the i915 driver.
5 */ 5 */
6
6#include <linux/errno.h> 7#include <linux/errno.h>
7#include <linux/kernel.h> 8#include <linux/kernel.h>
8#include <linux/module.h> 9#include <linux/module.h>
9 10
10#include <drm/drm_fb_helper.h> 11#include <drm/drm_fb_helper.h>
12#include <drm/drm_fourcc.h>
13
11#include "armada_crtc.h" 14#include "armada_crtc.h"
12#include "armada_drm.h" 15#include "armada_drm.h"
13#include "armada_fb.h" 16#include "armada_fb.h"
diff --git a/drivers/gpu/drm/armada/armada_gem.c b/drivers/gpu/drm/armada/armada_gem.c
index 60c509784fa3..93cf8b8bfcff 100644
--- a/drivers/gpu/drm/armada/armada_gem.c
+++ b/drivers/gpu/drm/armada/armada_gem.c
@@ -2,12 +2,17 @@
2/* 2/*
3 * Copyright (C) 2012 Russell King 3 * Copyright (C) 2012 Russell King
4 */ 4 */
5
5#include <linux/dma-buf.h> 6#include <linux/dma-buf.h>
6#include <linux/dma-mapping.h> 7#include <linux/dma-mapping.h>
8#include <linux/mman.h>
7#include <linux/shmem_fs.h> 9#include <linux/shmem_fs.h>
10
11#include <drm/armada_drm.h>
12#include <drm/drm_prime.h>
13
8#include "armada_drm.h" 14#include "armada_drm.h"
9#include "armada_gem.h" 15#include "armada_gem.h"
10#include <drm/armada_drm.h>
11#include "armada_ioctlP.h" 16#include "armada_ioctlP.h"
12 17
13static vm_fault_t armada_gem_vm_fault(struct vm_fault *vmf) 18static vm_fault_t armada_gem_vm_fault(struct vm_fault *vmf)
diff --git a/drivers/gpu/drm/armada/armada_overlay.c b/drivers/gpu/drm/armada/armada_overlay.c
index e8060216b389..07f0da4d9ba1 100644
--- a/drivers/gpu/drm/armada/armada_overlay.c
+++ b/drivers/gpu/drm/armada/armada_overlay.c
@@ -3,12 +3,14 @@
3 * Copyright (C) 2012 Russell King 3 * Copyright (C) 2012 Russell King
4 * Rewritten from the dovefb driver, and Armada510 manuals. 4 * Rewritten from the dovefb driver, and Armada510 manuals.
5 */ 5 */
6#include <drm/drmP.h> 6
7#include <drm/armada_drm.h>
7#include <drm/drm_atomic.h> 8#include <drm/drm_atomic.h>
8#include <drm/drm_atomic_uapi.h>
9#include <drm/drm_atomic_helper.h> 9#include <drm/drm_atomic_helper.h>
10#include <drm/drm_atomic_uapi.h>
11#include <drm/drm_fourcc.h>
10#include <drm/drm_plane_helper.h> 12#include <drm/drm_plane_helper.h>
11#include <drm/armada_drm.h> 13
12#include "armada_crtc.h" 14#include "armada_crtc.h"
13#include "armada_drm.h" 15#include "armada_drm.h"
14#include "armada_fb.h" 16#include "armada_fb.h"
diff --git a/drivers/gpu/drm/armada/armada_plane.c b/drivers/gpu/drm/armada/armada_plane.c
index f08b4f37816d..e7cc2b343bcb 100644
--- a/drivers/gpu/drm/armada/armada_plane.c
+++ b/drivers/gpu/drm/armada/armada_plane.c
@@ -3,10 +3,12 @@
3 * Copyright (C) 2012 Russell King 3 * Copyright (C) 2012 Russell King
4 * Rewritten from the dovefb driver, and Armada510 manuals. 4 * Rewritten from the dovefb driver, and Armada510 manuals.
5 */ 5 */
6#include <drm/drmP.h> 6
7#include <drm/drm_atomic.h> 7#include <drm/drm_atomic.h>
8#include <drm/drm_atomic_helper.h> 8#include <drm/drm_atomic_helper.h>
9#include <drm/drm_fourcc.h>
9#include <drm/drm_plane_helper.h> 10#include <drm/drm_plane_helper.h>
11
10#include "armada_crtc.h" 12#include "armada_crtc.h"
11#include "armada_drm.h" 13#include "armada_drm.h"
12#include "armada_fb.h" 14#include "armada_fb.h"
diff --git a/drivers/gpu/drm/armada/armada_trace.h b/drivers/gpu/drm/armada/armada_trace.h
index f03a56bda596..528f20fe3147 100644
--- a/drivers/gpu/drm/armada/armada_trace.h
+++ b/drivers/gpu/drm/armada/armada_trace.h
@@ -3,7 +3,10 @@
3#define ARMADA_TRACE_H 3#define ARMADA_TRACE_H
4 4
5#include <linux/tracepoint.h> 5#include <linux/tracepoint.h>
6#include <drm/drmP.h> 6
7struct drm_crtc;
8struct drm_framebuffer;
9struct drm_plane;
7 10
8#undef TRACE_SYSTEM 11#undef TRACE_SYSTEM
9#define TRACE_SYSTEM armada 12#define TRACE_SYSTEM armada
diff --git a/drivers/gpu/drm/aspeed/aspeed_gfx_crtc.c b/drivers/gpu/drm/aspeed/aspeed_gfx_crtc.c
index 15db9e426ec4..2184b8be6fd4 100644
--- a/drivers/gpu/drm/aspeed/aspeed_gfx_crtc.c
+++ b/drivers/gpu/drm/aspeed/aspeed_gfx_crtc.c
@@ -215,7 +215,7 @@ static void aspeed_gfx_disable_vblank(struct drm_simple_display_pipe *pipe)
215 writel(reg | CRT_CTRL_VERTICAL_INTR_STS, priv->base + CRT_CTRL1); 215 writel(reg | CRT_CTRL_VERTICAL_INTR_STS, priv->base + CRT_CTRL1);
216} 216}
217 217
218static struct drm_simple_display_pipe_funcs aspeed_gfx_funcs = { 218static const struct drm_simple_display_pipe_funcs aspeed_gfx_funcs = {
219 .enable = aspeed_gfx_pipe_enable, 219 .enable = aspeed_gfx_pipe_enable,
220 .disable = aspeed_gfx_pipe_disable, 220 .disable = aspeed_gfx_pipe_disable,
221 .update = aspeed_gfx_pipe_update, 221 .update = aspeed_gfx_pipe_update,
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
index f2f7f69d6cc3..22885dceaa17 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
@@ -1780,8 +1780,7 @@ void analogix_dp_unbind(struct analogix_dp_device *dp)
1780 if (dp->plat_data->panel) { 1780 if (dp->plat_data->panel) {
1781 if (drm_panel_unprepare(dp->plat_data->panel)) 1781 if (drm_panel_unprepare(dp->plat_data->panel))
1782 DRM_ERROR("failed to turnoff the panel\n"); 1782 DRM_ERROR("failed to turnoff the panel\n");
1783 if (drm_panel_detach(dp->plat_data->panel)) 1783 drm_panel_detach(dp->plat_data->panel);
1784 DRM_ERROR("failed to detach the panel\n");
1785 } 1784 }
1786 1785
1787 drm_dp_aux_unregister(&dp->aux); 1786 drm_dp_aux_unregister(&dp->aux);
diff --git a/drivers/gpu/drm/bridge/dumb-vga-dac.c b/drivers/gpu/drm/bridge/dumb-vga-dac.c
index 8ef6539ae78a..7aa789c35882 100644
--- a/drivers/gpu/drm/bridge/dumb-vga-dac.c
+++ b/drivers/gpu/drm/bridge/dumb-vga-dac.c
@@ -42,7 +42,7 @@ static int dumb_vga_get_modes(struct drm_connector *connector)
42 struct edid *edid; 42 struct edid *edid;
43 int ret; 43 int ret;
44 44
45 if (IS_ERR(vga->ddc)) 45 if (!vga->ddc)
46 goto fallback; 46 goto fallback;
47 47
48 edid = drm_get_edid(connector, vga->ddc); 48 edid = drm_get_edid(connector, vga->ddc);
@@ -84,7 +84,7 @@ dumb_vga_connector_detect(struct drm_connector *connector, bool force)
84 * wire the DDC pins, or the I2C bus might not be working at 84 * wire the DDC pins, or the I2C bus might not be working at
85 * all. 85 * all.
86 */ 86 */
87 if (!IS_ERR(vga->ddc) && drm_probe_ddc(vga->ddc)) 87 if (vga->ddc && drm_probe_ddc(vga->ddc))
88 return connector_status_connected; 88 return connector_status_connected;
89 89
90 return connector_status_unknown; 90 return connector_status_unknown;
@@ -197,6 +197,7 @@ static int dumb_vga_probe(struct platform_device *pdev)
197 if (PTR_ERR(vga->ddc) == -ENODEV) { 197 if (PTR_ERR(vga->ddc) == -ENODEV) {
198 dev_dbg(&pdev->dev, 198 dev_dbg(&pdev->dev,
199 "No i2c bus specified. Disabling EDID readout\n"); 199 "No i2c bus specified. Disabling EDID readout\n");
200 vga->ddc = NULL;
200 } else { 201 } else {
201 dev_err(&pdev->dev, "Couldn't retrieve i2c bus\n"); 202 dev_err(&pdev->dev, "Couldn't retrieve i2c bus\n");
202 return PTR_ERR(vga->ddc); 203 return PTR_ERR(vga->ddc);
@@ -218,7 +219,7 @@ static int dumb_vga_remove(struct platform_device *pdev)
218 219
219 drm_bridge_remove(&vga->bridge); 220 drm_bridge_remove(&vga->bridge);
220 221
221 if (!IS_ERR(vga->ddc)) 222 if (vga->ddc)
222 i2c_put_adapter(vga->ddc); 223 i2c_put_adapter(vga->ddc);
223 224
224 return 0; 225 return 0;
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c
index a494186ae6ce..2b7539701b42 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c
@@ -63,10 +63,6 @@ enum {
63 HDMI_REVISION_ID = 0x0001, 63 HDMI_REVISION_ID = 0x0001,
64 HDMI_IH_AHBDMAAUD_STAT0 = 0x0109, 64 HDMI_IH_AHBDMAAUD_STAT0 = 0x0109,
65 HDMI_IH_MUTE_AHBDMAAUD_STAT0 = 0x0189, 65 HDMI_IH_MUTE_AHBDMAAUD_STAT0 = 0x0189,
66 HDMI_FC_AUDICONF2 = 0x1027,
67 HDMI_FC_AUDSCONF = 0x1063,
68 HDMI_FC_AUDSCONF_LAYOUT1 = 1 << 0,
69 HDMI_FC_AUDSCONF_LAYOUT0 = 0 << 0,
70 HDMI_AHB_DMA_CONF0 = 0x3600, 66 HDMI_AHB_DMA_CONF0 = 0x3600,
71 HDMI_AHB_DMA_START = 0x3601, 67 HDMI_AHB_DMA_START = 0x3601,
72 HDMI_AHB_DMA_STOP = 0x3602, 68 HDMI_AHB_DMA_STOP = 0x3602,
@@ -403,7 +399,7 @@ static int dw_hdmi_prepare(struct snd_pcm_substream *substream)
403{ 399{
404 struct snd_pcm_runtime *runtime = substream->runtime; 400 struct snd_pcm_runtime *runtime = substream->runtime;
405 struct snd_dw_hdmi *dw = substream->private_data; 401 struct snd_dw_hdmi *dw = substream->private_data;
406 u8 threshold, conf0, conf1, layout, ca; 402 u8 threshold, conf0, conf1, ca;
407 403
408 /* Setup as per 3.0.5 FSL 4.1.0 BSP */ 404 /* Setup as per 3.0.5 FSL 4.1.0 BSP */
409 switch (dw->revision) { 405 switch (dw->revision) {
@@ -434,20 +430,12 @@ static int dw_hdmi_prepare(struct snd_pcm_substream *substream)
434 conf1 = default_hdmi_channel_config[runtime->channels - 2].conf1; 430 conf1 = default_hdmi_channel_config[runtime->channels - 2].conf1;
435 ca = default_hdmi_channel_config[runtime->channels - 2].ca; 431 ca = default_hdmi_channel_config[runtime->channels - 2].ca;
436 432
437 /*
438 * For >2 channel PCM audio, we need to select layout 1
439 * and set an appropriate channel map.
440 */
441 if (runtime->channels > 2)
442 layout = HDMI_FC_AUDSCONF_LAYOUT1;
443 else
444 layout = HDMI_FC_AUDSCONF_LAYOUT0;
445
446 writeb_relaxed(threshold, dw->data.base + HDMI_AHB_DMA_THRSLD); 433 writeb_relaxed(threshold, dw->data.base + HDMI_AHB_DMA_THRSLD);
447 writeb_relaxed(conf0, dw->data.base + HDMI_AHB_DMA_CONF0); 434 writeb_relaxed(conf0, dw->data.base + HDMI_AHB_DMA_CONF0);
448 writeb_relaxed(conf1, dw->data.base + HDMI_AHB_DMA_CONF1); 435 writeb_relaxed(conf1, dw->data.base + HDMI_AHB_DMA_CONF1);
449 writeb_relaxed(layout, dw->data.base + HDMI_FC_AUDSCONF); 436
450 writeb_relaxed(ca, dw->data.base + HDMI_FC_AUDICONF2); 437 dw_hdmi_set_channel_count(dw->data.hdmi, runtime->channels);
438 dw_hdmi_set_channel_allocation(dw->data.hdmi, ca);
451 439
452 switch (runtime->format) { 440 switch (runtime->format) {
453 case SNDRV_PCM_FORMAT_IEC958_SUBFRAME_LE: 441 case SNDRV_PCM_FORMAT_IEC958_SUBFRAME_LE:
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-audio.h b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-audio.h
index 63b5756f463b..cb07dc0da5a7 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-audio.h
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-audio.h
@@ -14,6 +14,7 @@ struct dw_hdmi_audio_data {
14 14
15struct dw_hdmi_i2s_audio_data { 15struct dw_hdmi_i2s_audio_data {
16 struct dw_hdmi *hdmi; 16 struct dw_hdmi *hdmi;
17 u8 *eld;
17 18
18 void (*write)(struct dw_hdmi *hdmi, u8 val, int offset); 19 void (*write)(struct dw_hdmi *hdmi, u8 val, int offset);
19 u8 (*read)(struct dw_hdmi *hdmi, int offset); 20 u8 (*read)(struct dw_hdmi *hdmi, int offset);
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c
index 5cbb71a866d5..1d15cf9b6821 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c
@@ -10,6 +10,7 @@
10#include <linux/module.h> 10#include <linux/module.h>
11 11
12#include <drm/bridge/dw_hdmi.h> 12#include <drm/bridge/dw_hdmi.h>
13#include <drm/drm_crtc.h>
13 14
14#include <sound/hdmi-codec.h> 15#include <sound/hdmi-codec.h>
15 16
@@ -44,14 +45,30 @@ static int dw_hdmi_i2s_hw_params(struct device *dev, void *data,
44 u8 inputclkfs = 0; 45 u8 inputclkfs = 0;
45 46
46 /* it cares I2S only */ 47 /* it cares I2S only */
47 if ((fmt->fmt != HDMI_I2S) || 48 if (fmt->bit_clk_master | fmt->frame_clk_master) {
48 (fmt->bit_clk_master | fmt->frame_clk_master)) { 49 dev_err(dev, "unsupported clock settings\n");
49 dev_err(dev, "unsupported format/settings\n");
50 return -EINVAL; 50 return -EINVAL;
51 } 51 }
52 52
53 /* Reset the FIFOs before applying new params */
54 hdmi_write(audio, HDMI_AUD_CONF0_SW_RESET, HDMI_AUD_CONF0);
55 hdmi_write(audio, (u8)~HDMI_MC_SWRSTZ_I2SSWRST_REQ, HDMI_MC_SWRSTZ);
56
53 inputclkfs = HDMI_AUD_INPUTCLKFS_64FS; 57 inputclkfs = HDMI_AUD_INPUTCLKFS_64FS;
54 conf0 = HDMI_AUD_CONF0_I2S_ALL_ENABLE; 58 conf0 = (HDMI_AUD_CONF0_I2S_SELECT | HDMI_AUD_CONF0_I2S_EN0);
59
60 /* Enable the required i2s lanes */
61 switch (hparms->channels) {
62 case 7 ... 8:
63 conf0 |= HDMI_AUD_CONF0_I2S_EN3;
64 /* Fall-thru */
65 case 5 ... 6:
66 conf0 |= HDMI_AUD_CONF0_I2S_EN2;
67 /* Fall-thru */
68 case 3 ... 4:
69 conf0 |= HDMI_AUD_CONF0_I2S_EN1;
70 /* Fall-thru */
71 }
55 72
56 switch (hparms->sample_width) { 73 switch (hparms->sample_width) {
57 case 16: 74 case 16:
@@ -63,7 +80,30 @@ static int dw_hdmi_i2s_hw_params(struct device *dev, void *data,
63 break; 80 break;
64 } 81 }
65 82
83 switch (fmt->fmt) {
84 case HDMI_I2S:
85 conf1 |= HDMI_AUD_CONF1_MODE_I2S;
86 break;
87 case HDMI_RIGHT_J:
88 conf1 |= HDMI_AUD_CONF1_MODE_RIGHT_J;
89 break;
90 case HDMI_LEFT_J:
91 conf1 |= HDMI_AUD_CONF1_MODE_LEFT_J;
92 break;
93 case HDMI_DSP_A:
94 conf1 |= HDMI_AUD_CONF1_MODE_BURST_1;
95 break;
96 case HDMI_DSP_B:
97 conf1 |= HDMI_AUD_CONF1_MODE_BURST_2;
98 break;
99 default:
100 dev_err(dev, "unsupported format\n");
101 return -EINVAL;
102 }
103
66 dw_hdmi_set_sample_rate(hdmi, hparms->sample_rate); 104 dw_hdmi_set_sample_rate(hdmi, hparms->sample_rate);
105 dw_hdmi_set_channel_count(hdmi, hparms->channels);
106 dw_hdmi_set_channel_allocation(hdmi, hparms->cea.channel_allocation);
67 107
68 hdmi_write(audio, inputclkfs, HDMI_AUD_INPUTCLKFS); 108 hdmi_write(audio, inputclkfs, HDMI_AUD_INPUTCLKFS);
69 hdmi_write(audio, conf0, HDMI_AUD_CONF0); 109 hdmi_write(audio, conf0, HDMI_AUD_CONF0);
@@ -80,8 +120,15 @@ static void dw_hdmi_i2s_audio_shutdown(struct device *dev, void *data)
80 struct dw_hdmi *hdmi = audio->hdmi; 120 struct dw_hdmi *hdmi = audio->hdmi;
81 121
82 dw_hdmi_audio_disable(hdmi); 122 dw_hdmi_audio_disable(hdmi);
123}
83 124
84 hdmi_write(audio, HDMI_AUD_CONF0_SW_RESET, HDMI_AUD_CONF0); 125static int dw_hdmi_i2s_get_eld(struct device *dev, void *data, uint8_t *buf,
126 size_t len)
127{
128 struct dw_hdmi_i2s_audio_data *audio = data;
129
130 memcpy(buf, audio->eld, min_t(size_t, MAX_ELD_BYTES, len));
131 return 0;
85} 132}
86 133
87static int dw_hdmi_i2s_get_dai_id(struct snd_soc_component *component, 134static int dw_hdmi_i2s_get_dai_id(struct snd_soc_component *component,
@@ -107,6 +154,7 @@ static int dw_hdmi_i2s_get_dai_id(struct snd_soc_component *component,
107static struct hdmi_codec_ops dw_hdmi_i2s_ops = { 154static struct hdmi_codec_ops dw_hdmi_i2s_ops = {
108 .hw_params = dw_hdmi_i2s_hw_params, 155 .hw_params = dw_hdmi_i2s_hw_params,
109 .audio_shutdown = dw_hdmi_i2s_audio_shutdown, 156 .audio_shutdown = dw_hdmi_i2s_audio_shutdown,
157 .get_eld = dw_hdmi_i2s_get_eld,
110 .get_dai_id = dw_hdmi_i2s_get_dai_id, 158 .get_dai_id = dw_hdmi_i2s_get_dai_id,
111}; 159};
112 160
@@ -119,7 +167,7 @@ static int snd_dw_hdmi_probe(struct platform_device *pdev)
119 167
120 pdata.ops = &dw_hdmi_i2s_ops; 168 pdata.ops = &dw_hdmi_i2s_ops;
121 pdata.i2s = 1; 169 pdata.i2s = 1;
122 pdata.max_i2s_channels = 6; 170 pdata.max_i2s_channels = 8;
123 pdata.data = audio; 171 pdata.data = audio;
124 172
125 memset(&pdevinfo, 0, sizeof(pdevinfo)); 173 memset(&pdevinfo, 0, sizeof(pdevinfo));
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
index 83b94b66e464..4044071090c4 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
@@ -645,6 +645,42 @@ void dw_hdmi_set_sample_rate(struct dw_hdmi *hdmi, unsigned int rate)
645} 645}
646EXPORT_SYMBOL_GPL(dw_hdmi_set_sample_rate); 646EXPORT_SYMBOL_GPL(dw_hdmi_set_sample_rate);
647 647
648void dw_hdmi_set_channel_count(struct dw_hdmi *hdmi, unsigned int cnt)
649{
650 u8 layout;
651
652 mutex_lock(&hdmi->audio_mutex);
653
654 /*
655 * For >2 channel PCM audio, we need to select layout 1
656 * and set an appropriate channel map.
657 */
658 if (cnt > 2)
659 layout = HDMI_FC_AUDSCONF_AUD_PACKET_LAYOUT_LAYOUT1;
660 else
661 layout = HDMI_FC_AUDSCONF_AUD_PACKET_LAYOUT_LAYOUT0;
662
663 hdmi_modb(hdmi, layout, HDMI_FC_AUDSCONF_AUD_PACKET_LAYOUT_MASK,
664 HDMI_FC_AUDSCONF);
665
666 /* Set the audio infoframes channel count */
667 hdmi_modb(hdmi, (cnt - 1) << HDMI_FC_AUDICONF0_CC_OFFSET,
668 HDMI_FC_AUDICONF0_CC_MASK, HDMI_FC_AUDICONF0);
669
670 mutex_unlock(&hdmi->audio_mutex);
671}
672EXPORT_SYMBOL_GPL(dw_hdmi_set_channel_count);
673
674void dw_hdmi_set_channel_allocation(struct dw_hdmi *hdmi, unsigned int ca)
675{
676 mutex_lock(&hdmi->audio_mutex);
677
678 hdmi_writeb(hdmi, ca, HDMI_FC_AUDICONF2);
679
680 mutex_unlock(&hdmi->audio_mutex);
681}
682EXPORT_SYMBOL_GPL(dw_hdmi_set_channel_allocation);
683
648static void hdmi_enable_audio_clk(struct dw_hdmi *hdmi, bool enable) 684static void hdmi_enable_audio_clk(struct dw_hdmi *hdmi, bool enable)
649{ 685{
650 if (enable) 686 if (enable)
@@ -2763,6 +2799,7 @@ __dw_hdmi_probe(struct platform_device *pdev,
2763 struct dw_hdmi_i2s_audio_data audio; 2799 struct dw_hdmi_i2s_audio_data audio;
2764 2800
2765 audio.hdmi = hdmi; 2801 audio.hdmi = hdmi;
2802 audio.eld = hdmi->connector.eld;
2766 audio.write = hdmi_writeb; 2803 audio.write = hdmi_writeb;
2767 audio.read = hdmi_readb; 2804 audio.read = hdmi_readb;
2768 hdmi->enable_audio = dw_hdmi_i2s_audio_enable; 2805 hdmi->enable_audio = dw_hdmi_i2s_audio_enable;
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.h b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.h
index 4e3ec09d3ca4..6988f12d89d9 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.h
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.h
@@ -865,12 +865,18 @@ enum {
865 865
866/* AUD_CONF0 field values */ 866/* AUD_CONF0 field values */
867 HDMI_AUD_CONF0_SW_RESET = 0x80, 867 HDMI_AUD_CONF0_SW_RESET = 0x80,
868 HDMI_AUD_CONF0_I2S_ALL_ENABLE = 0x2F, 868 HDMI_AUD_CONF0_I2S_SELECT = 0x20,
869 HDMI_AUD_CONF0_I2S_EN3 = 0x08,
870 HDMI_AUD_CONF0_I2S_EN2 = 0x04,
871 HDMI_AUD_CONF0_I2S_EN1 = 0x02,
872 HDMI_AUD_CONF0_I2S_EN0 = 0x01,
869 873
870/* AUD_CONF1 field values */ 874/* AUD_CONF1 field values */
871 HDMI_AUD_CONF1_MODE_I2S = 0x00, 875 HDMI_AUD_CONF1_MODE_I2S = 0x00,
872 HDMI_AUD_CONF1_MODE_RIGHT_J = 0x02, 876 HDMI_AUD_CONF1_MODE_RIGHT_J = 0x20,
873 HDMI_AUD_CONF1_MODE_LEFT_J = 0x04, 877 HDMI_AUD_CONF1_MODE_LEFT_J = 0x40,
878 HDMI_AUD_CONF1_MODE_BURST_1 = 0x60,
879 HDMI_AUD_CONF1_MODE_BURST_2 = 0x80,
874 HDMI_AUD_CONF1_WIDTH_16 = 0x10, 880 HDMI_AUD_CONF1_WIDTH_16 = 0x10,
875 HDMI_AUD_CONF1_WIDTH_24 = 0x18, 881 HDMI_AUD_CONF1_WIDTH_24 = 0x18,
876 882
@@ -938,6 +944,7 @@ enum {
938 HDMI_MC_CLKDIS_PIXELCLK_DISABLE = 0x1, 944 HDMI_MC_CLKDIS_PIXELCLK_DISABLE = 0x1,
939 945
940/* MC_SWRSTZ field values */ 946/* MC_SWRSTZ field values */
947 HDMI_MC_SWRSTZ_I2SSWRST_REQ = 0x08,
941 HDMI_MC_SWRSTZ_TMDSSWRST_REQ = 0x02, 948 HDMI_MC_SWRSTZ_TMDSSWRST_REQ = 0x02,
942 949
943/* MC_FLOWCTRL field values */ 950/* MC_FLOWCTRL field values */
diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
index 42f03a985ac0..cebc8e620820 100644
--- a/drivers/gpu/drm/bridge/tc358767.c
+++ b/drivers/gpu/drm/bridge/tc358767.c
@@ -1312,7 +1312,7 @@ static int tc_connector_get_modes(struct drm_connector *connector)
1312{ 1312{
1313 struct tc_data *tc = connector_to_tc(connector); 1313 struct tc_data *tc = connector_to_tc(connector);
1314 struct edid *edid; 1314 struct edid *edid;
1315 unsigned int count; 1315 int count;
1316 int ret; 1316 int ret;
1317 1317
1318 ret = tc_get_display_props(tc); 1318 ret = tc_get_display_props(tc);
@@ -1321,11 +1321,9 @@ static int tc_connector_get_modes(struct drm_connector *connector)
1321 return 0; 1321 return 0;
1322 } 1322 }
1323 1323
1324 if (tc->panel && tc->panel->funcs && tc->panel->funcs->get_modes) { 1324 count = drm_panel_get_modes(tc->panel);
1325 count = tc->panel->funcs->get_modes(tc->panel); 1325 if (count > 0)
1326 if (count > 0) 1326 return count;
1327 return count;
1328 }
1329 1327
1330 edid = drm_get_edid(connector, &tc->aux.ddc); 1328 edid = drm_get_edid(connector, &tc->aux.ddc);
1331 1329
diff --git a/drivers/gpu/drm/drm_atomic_uapi.c b/drivers/gpu/drm/drm_atomic_uapi.c
index 19ae119f1a5d..5a5b42db6f2a 100644
--- a/drivers/gpu/drm/drm_atomic_uapi.c
+++ b/drivers/gpu/drm/drm_atomic_uapi.c
@@ -1037,7 +1037,7 @@ int drm_atomic_set_property(struct drm_atomic_state *state,
1037 * As a contrast, with implicit fencing the kernel keeps track of any 1037 * As a contrast, with implicit fencing the kernel keeps track of any
1038 * ongoing rendering, and automatically ensures that the atomic update waits 1038 * ongoing rendering, and automatically ensures that the atomic update waits
1039 * for any pending rendering to complete. For shared buffers represented with 1039 * for any pending rendering to complete. For shared buffers represented with
1040 * a &struct dma_buf this is tracked in &struct reservation_object. 1040 * a &struct dma_buf this is tracked in &struct dma_resv.
1041 * Implicit syncing is how Linux traditionally worked (e.g. DRI2/3 on X.org), 1041 * Implicit syncing is how Linux traditionally worked (e.g. DRI2/3 on X.org),
1042 * whereas explicit fencing is what Android wants. 1042 * whereas explicit fencing is what Android wants.
1043 * 1043 *
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
index 354798bad576..4c766624b20d 100644
--- a/drivers/gpu/drm/drm_connector.c
+++ b/drivers/gpu/drm/drm_connector.c
@@ -986,12 +986,14 @@ static const struct drm_prop_enum_list hdmi_colorspaces[] = {
986 * - Kernel sends uevent with the connector id and property id through 986 * - Kernel sends uevent with the connector id and property id through
987 * @drm_hdcp_update_content_protection, upon below kernel triggered 987 * @drm_hdcp_update_content_protection, upon below kernel triggered
988 * scenarios: 988 * scenarios:
989 * DESIRED -> ENABLED (authentication success) 989 *
990 * ENABLED -> DESIRED (termination of authentication) 990 * - DESIRED -> ENABLED (authentication success)
991 * - ENABLED -> DESIRED (termination of authentication)
991 * - Please note no uevents for userspace triggered property state changes, 992 * - Please note no uevents for userspace triggered property state changes,
992 * which can't fail such as 993 * which can't fail such as
993 * DESIRED/ENABLED -> UNDESIRED 994 *
994 * UNDESIRED -> DESIRED 995 * - DESIRED/ENABLED -> UNDESIRED
996 * - UNDESIRED -> DESIRED
995 * - Userspace is responsible for polling the property or listen to uevents 997 * - Userspace is responsible for polling the property or listen to uevents
996 * to determine when the value transitions from ENABLED to DESIRED. 998 * to determine when the value transitions from ENABLED to DESIRED.
997 * This signifies the link is no longer protected and userspace should 999 * This signifies the link is no longer protected and userspace should
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index afc38cece3f5..6854f5867d51 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -159,7 +159,7 @@ void drm_gem_private_object_init(struct drm_device *dev,
159 kref_init(&obj->refcount); 159 kref_init(&obj->refcount);
160 obj->handle_count = 0; 160 obj->handle_count = 0;
161 obj->size = size; 161 obj->size = size;
162 reservation_object_init(&obj->_resv); 162 dma_resv_init(&obj->_resv);
163 if (!obj->resv) 163 if (!obj->resv)
164 obj->resv = &obj->_resv; 164 obj->resv = &obj->_resv;
165 165
@@ -633,6 +633,9 @@ void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
633 633
634 pagevec_init(&pvec); 634 pagevec_init(&pvec);
635 for (i = 0; i < npages; i++) { 635 for (i = 0; i < npages; i++) {
636 if (!pages[i])
637 continue;
638
636 if (dirty) 639 if (dirty)
637 set_page_dirty(pages[i]); 640 set_page_dirty(pages[i]);
638 641
@@ -752,7 +755,7 @@ drm_gem_object_lookup(struct drm_file *filp, u32 handle)
752EXPORT_SYMBOL(drm_gem_object_lookup); 755EXPORT_SYMBOL(drm_gem_object_lookup);
753 756
754/** 757/**
755 * drm_gem_reservation_object_wait - Wait on GEM object's reservation's objects 758 * drm_gem_dma_resv_wait - Wait on GEM object's reservation's objects
756 * shared and/or exclusive fences. 759 * shared and/or exclusive fences.
757 * @filep: DRM file private date 760 * @filep: DRM file private date
758 * @handle: userspace handle 761 * @handle: userspace handle
@@ -764,7 +767,7 @@ EXPORT_SYMBOL(drm_gem_object_lookup);
764 * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or 767 * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or
765 * greater than 0 on success. 768 * greater than 0 on success.
766 */ 769 */
767long drm_gem_reservation_object_wait(struct drm_file *filep, u32 handle, 770long drm_gem_dma_resv_wait(struct drm_file *filep, u32 handle,
768 bool wait_all, unsigned long timeout) 771 bool wait_all, unsigned long timeout)
769{ 772{
770 long ret; 773 long ret;
@@ -776,7 +779,7 @@ long drm_gem_reservation_object_wait(struct drm_file *filep, u32 handle,
776 return -EINVAL; 779 return -EINVAL;
777 } 780 }
778 781
779 ret = reservation_object_wait_timeout_rcu(obj->resv, wait_all, 782 ret = dma_resv_wait_timeout_rcu(obj->resv, wait_all,
780 true, timeout); 783 true, timeout);
781 if (ret == 0) 784 if (ret == 0)
782 ret = -ETIME; 785 ret = -ETIME;
@@ -787,7 +790,7 @@ long drm_gem_reservation_object_wait(struct drm_file *filep, u32 handle,
787 790
788 return ret; 791 return ret;
789} 792}
790EXPORT_SYMBOL(drm_gem_reservation_object_wait); 793EXPORT_SYMBOL(drm_gem_dma_resv_wait);
791 794
792/** 795/**
793 * drm_gem_close_ioctl - implementation of the GEM_CLOSE ioctl 796 * drm_gem_close_ioctl - implementation of the GEM_CLOSE ioctl
@@ -953,7 +956,7 @@ drm_gem_object_release(struct drm_gem_object *obj)
953 if (obj->filp) 956 if (obj->filp)
954 fput(obj->filp); 957 fput(obj->filp);
955 958
956 reservation_object_fini(&obj->_resv); 959 dma_resv_fini(&obj->_resv);
957 drm_gem_free_mmap_offset(obj); 960 drm_gem_free_mmap_offset(obj);
958} 961}
959EXPORT_SYMBOL(drm_gem_object_release); 962EXPORT_SYMBOL(drm_gem_object_release);
@@ -1288,7 +1291,7 @@ retry:
1288 if (contended != -1) { 1291 if (contended != -1) {
1289 struct drm_gem_object *obj = objs[contended]; 1292 struct drm_gem_object *obj = objs[contended];
1290 1293
1291 ret = reservation_object_lock_slow_interruptible(obj->resv, 1294 ret = dma_resv_lock_slow_interruptible(obj->resv,
1292 acquire_ctx); 1295 acquire_ctx);
1293 if (ret) { 1296 if (ret) {
1294 ww_acquire_done(acquire_ctx); 1297 ww_acquire_done(acquire_ctx);
@@ -1300,16 +1303,16 @@ retry:
1300 if (i == contended) 1303 if (i == contended)
1301 continue; 1304 continue;
1302 1305
1303 ret = reservation_object_lock_interruptible(objs[i]->resv, 1306 ret = dma_resv_lock_interruptible(objs[i]->resv,
1304 acquire_ctx); 1307 acquire_ctx);
1305 if (ret) { 1308 if (ret) {
1306 int j; 1309 int j;
1307 1310
1308 for (j = 0; j < i; j++) 1311 for (j = 0; j < i; j++)
1309 reservation_object_unlock(objs[j]->resv); 1312 dma_resv_unlock(objs[j]->resv);
1310 1313
1311 if (contended != -1 && contended >= i) 1314 if (contended != -1 && contended >= i)
1312 reservation_object_unlock(objs[contended]->resv); 1315 dma_resv_unlock(objs[contended]->resv);
1313 1316
1314 if (ret == -EDEADLK) { 1317 if (ret == -EDEADLK) {
1315 contended = i; 1318 contended = i;
@@ -1334,7 +1337,7 @@ drm_gem_unlock_reservations(struct drm_gem_object **objs, int count,
1334 int i; 1337 int i;
1335 1338
1336 for (i = 0; i < count; i++) 1339 for (i = 0; i < count; i++)
1337 reservation_object_unlock(objs[i]->resv); 1340 dma_resv_unlock(objs[i]->resv);
1338 1341
1339 ww_acquire_fini(acquire_ctx); 1342 ww_acquire_fini(acquire_ctx);
1340} 1343}
@@ -1410,12 +1413,12 @@ int drm_gem_fence_array_add_implicit(struct xarray *fence_array,
1410 1413
1411 if (!write) { 1414 if (!write) {
1412 struct dma_fence *fence = 1415 struct dma_fence *fence =
1413 reservation_object_get_excl_rcu(obj->resv); 1416 dma_resv_get_excl_rcu(obj->resv);
1414 1417
1415 return drm_gem_fence_array_add(fence_array, fence); 1418 return drm_gem_fence_array_add(fence_array, fence);
1416 } 1419 }
1417 1420
1418 ret = reservation_object_get_fences_rcu(obj->resv, NULL, 1421 ret = dma_resv_get_fences_rcu(obj->resv, NULL,
1419 &fence_count, &fences); 1422 &fence_count, &fences);
1420 if (ret || !fence_count) 1423 if (ret || !fence_count)
1421 return ret; 1424 return ret;
diff --git a/drivers/gpu/drm/drm_gem_framebuffer_helper.c b/drivers/gpu/drm/drm_gem_framebuffer_helper.c
index f61304054786..b9bcd310ca2d 100644
--- a/drivers/gpu/drm/drm_gem_framebuffer_helper.c
+++ b/drivers/gpu/drm/drm_gem_framebuffer_helper.c
@@ -7,7 +7,7 @@
7 7
8#include <linux/dma-buf.h> 8#include <linux/dma-buf.h>
9#include <linux/dma-fence.h> 9#include <linux/dma-fence.h>
10#include <linux/reservation.h> 10#include <linux/dma-resv.h>
11#include <linux/slab.h> 11#include <linux/slab.h>
12 12
13#include <drm/drm_atomic.h> 13#include <drm/drm_atomic.h>
@@ -294,7 +294,7 @@ int drm_gem_fb_prepare_fb(struct drm_plane *plane,
294 return 0; 294 return 0;
295 295
296 obj = drm_gem_fb_get_obj(state->fb, 0); 296 obj = drm_gem_fb_get_obj(state->fb, 0);
297 fence = reservation_object_get_excl_rcu(obj->resv); 297 fence = dma_resv_get_excl_rcu(obj->resv);
298 drm_atomic_set_fence_for_plane(state, fence); 298 drm_atomic_set_fence_for_plane(state, fence);
299 299
300 return 0; 300 return 0;
diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
index 2f64667ac805..df8f2c8adb2b 100644
--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
+++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
@@ -75,6 +75,7 @@ struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t
75 shmem = to_drm_gem_shmem_obj(obj); 75 shmem = to_drm_gem_shmem_obj(obj);
76 mutex_init(&shmem->pages_lock); 76 mutex_init(&shmem->pages_lock);
77 mutex_init(&shmem->vmap_lock); 77 mutex_init(&shmem->vmap_lock);
78 INIT_LIST_HEAD(&shmem->madv_list);
78 79
79 /* 80 /*
80 * Our buffers are kept pinned, so allocating them 81 * Our buffers are kept pinned, so allocating them
@@ -118,11 +119,11 @@ void drm_gem_shmem_free_object(struct drm_gem_object *obj)
118 if (shmem->sgt) { 119 if (shmem->sgt) {
119 dma_unmap_sg(obj->dev->dev, shmem->sgt->sgl, 120 dma_unmap_sg(obj->dev->dev, shmem->sgt->sgl,
120 shmem->sgt->nents, DMA_BIDIRECTIONAL); 121 shmem->sgt->nents, DMA_BIDIRECTIONAL);
121
122 drm_gem_shmem_put_pages(shmem);
123 sg_free_table(shmem->sgt); 122 sg_free_table(shmem->sgt);
124 kfree(shmem->sgt); 123 kfree(shmem->sgt);
125 } 124 }
125 if (shmem->pages)
126 drm_gem_shmem_put_pages(shmem);
126 } 127 }
127 128
128 WARN_ON(shmem->pages_use_count); 129 WARN_ON(shmem->pages_use_count);
@@ -362,6 +363,62 @@ drm_gem_shmem_create_with_handle(struct drm_file *file_priv,
362} 363}
363EXPORT_SYMBOL(drm_gem_shmem_create_with_handle); 364EXPORT_SYMBOL(drm_gem_shmem_create_with_handle);
364 365
366/* Update madvise status, returns true if not purged, else
367 * false or -errno.
368 */
369int drm_gem_shmem_madvise(struct drm_gem_object *obj, int madv)
370{
371 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
372
373 mutex_lock(&shmem->pages_lock);
374
375 if (shmem->madv >= 0)
376 shmem->madv = madv;
377
378 madv = shmem->madv;
379
380 mutex_unlock(&shmem->pages_lock);
381
382 return (madv >= 0);
383}
384EXPORT_SYMBOL(drm_gem_shmem_madvise);
385
386void drm_gem_shmem_purge_locked(struct drm_gem_object *obj)
387{
388 struct drm_device *dev = obj->dev;
389 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
390
391 WARN_ON(!drm_gem_shmem_is_purgeable(shmem));
392
393 drm_gem_shmem_put_pages_locked(shmem);
394
395 shmem->madv = -1;
396
397 drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
398 drm_gem_free_mmap_offset(obj);
399
400 /* Our goal here is to return as much of the memory as
401 * is possible back to the system as we are called from OOM.
402 * To do this we must instruct the shmfs to drop all of its
403 * backing pages, *now*.
404 */
405 shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
406
407 invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
408 0, (loff_t)-1);
409}
410EXPORT_SYMBOL(drm_gem_shmem_purge_locked);
411
412void drm_gem_shmem_purge(struct drm_gem_object *obj)
413{
414 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
415
416 mutex_lock(&shmem->pages_lock);
417 drm_gem_shmem_purge_locked(obj);
418 mutex_unlock(&shmem->pages_lock);
419}
420EXPORT_SYMBOL(drm_gem_shmem_purge);
421
365/** 422/**
366 * drm_gem_shmem_dumb_create - Create a dumb shmem buffer object 423 * drm_gem_shmem_dumb_create - Create a dumb shmem buffer object
367 * @file: DRM file structure to create the dumb buffer for 424 * @file: DRM file structure to create the dumb buffer for
diff --git a/drivers/gpu/drm/drm_panel.c b/drivers/gpu/drm/drm_panel.c
index dbd5b873e8f2..6b0bf42039cf 100644
--- a/drivers/gpu/drm/drm_panel.c
+++ b/drivers/gpu/drm/drm_panel.c
@@ -123,18 +123,110 @@ EXPORT_SYMBOL(drm_panel_attach);
123 * 123 *
124 * This function should not be called by the panel device itself. It 124 * This function should not be called by the panel device itself. It
125 * is only for the drm device that called drm_panel_attach(). 125 * is only for the drm device that called drm_panel_attach().
126 *
127 * Return: 0 on success or a negative error code on failure.
128 */ 126 */
129int drm_panel_detach(struct drm_panel *panel) 127void drm_panel_detach(struct drm_panel *panel)
130{ 128{
131 panel->connector = NULL; 129 panel->connector = NULL;
132 panel->drm = NULL; 130 panel->drm = NULL;
133
134 return 0;
135} 131}
136EXPORT_SYMBOL(drm_panel_detach); 132EXPORT_SYMBOL(drm_panel_detach);
137 133
134/**
135 * drm_panel_prepare - power on a panel
136 * @panel: DRM panel
137 *
138 * Calling this function will enable power and deassert any reset signals to
139 * the panel. After this has completed it is possible to communicate with any
140 * integrated circuitry via a command bus.
141 *
142 * Return: 0 on success or a negative error code on failure.
143 */
144int drm_panel_prepare(struct drm_panel *panel)
145{
146 if (panel && panel->funcs && panel->funcs->prepare)
147 return panel->funcs->prepare(panel);
148
149 return panel ? -ENOSYS : -EINVAL;
150}
151EXPORT_SYMBOL(drm_panel_prepare);
152
153/**
154 * drm_panel_unprepare - power off a panel
155 * @panel: DRM panel
156 *
157 * Calling this function will completely power off a panel (assert the panel's
158 * reset, turn off power supplies, ...). After this function has completed, it
159 * is usually no longer possible to communicate with the panel until another
160 * call to drm_panel_prepare().
161 *
162 * Return: 0 on success or a negative error code on failure.
163 */
164int drm_panel_unprepare(struct drm_panel *panel)
165{
166 if (panel && panel->funcs && panel->funcs->unprepare)
167 return panel->funcs->unprepare(panel);
168
169 return panel ? -ENOSYS : -EINVAL;
170}
171EXPORT_SYMBOL(drm_panel_unprepare);
172
173/**
174 * drm_panel_enable - enable a panel
175 * @panel: DRM panel
176 *
177 * Calling this function will cause the panel display drivers to be turned on
178 * and the backlight to be enabled. Content will be visible on screen after
179 * this call completes.
180 *
181 * Return: 0 on success or a negative error code on failure.
182 */
183int drm_panel_enable(struct drm_panel *panel)
184{
185 if (panel && panel->funcs && panel->funcs->enable)
186 return panel->funcs->enable(panel);
187
188 return panel ? -ENOSYS : -EINVAL;
189}
190EXPORT_SYMBOL(drm_panel_enable);
191
192/**
193 * drm_panel_disable - disable a panel
194 * @panel: DRM panel
195 *
196 * This will typically turn off the panel's backlight or disable the display
197 * drivers. For smart panels it should still be possible to communicate with
198 * the integrated circuitry via any command bus after this call.
199 *
200 * Return: 0 on success or a negative error code on failure.
201 */
202int drm_panel_disable(struct drm_panel *panel)
203{
204 if (panel && panel->funcs && panel->funcs->disable)
205 return panel->funcs->disable(panel);
206
207 return panel ? -ENOSYS : -EINVAL;
208}
209EXPORT_SYMBOL(drm_panel_disable);
210
211/**
212 * drm_panel_get_modes - probe the available display modes of a panel
213 * @panel: DRM panel
214 *
215 * The modes probed from the panel are automatically added to the connector
216 * that the panel is attached to.
217 *
218 * Return: The number of modes available from the panel on success or a
219 * negative error code on failure.
220 */
221int drm_panel_get_modes(struct drm_panel *panel)
222{
223 if (panel && panel->funcs && panel->funcs->get_modes)
224 return panel->funcs->get_modes(panel);
225
226 return panel ? -ENOSYS : -EINVAL;
227}
228EXPORT_SYMBOL(drm_panel_get_modes);
229
138#ifdef CONFIG_OF 230#ifdef CONFIG_OF
139/** 231/**
140 * of_drm_find_panel - look up a panel using a device tree node 232 * of_drm_find_panel - look up a panel using a device tree node
diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
index 1438dcb3ebb1..4b5c7b0ed714 100644
--- a/drivers/gpu/drm/drm_syncobj.c
+++ b/drivers/gpu/drm/drm_syncobj.c
@@ -29,21 +29,97 @@
29/** 29/**
30 * DOC: Overview 30 * DOC: Overview
31 * 31 *
32 * DRM synchronisation objects (syncobj, see struct &drm_syncobj) are 32 * DRM synchronisation objects (syncobj, see struct &drm_syncobj) provide a
33 * persistent objects that contain an optional fence. The fence can be updated 33 * container for a synchronization primitive which can be used by userspace
34 * with a new fence, or be NULL. 34 * to explicitly synchronize GPU commands, can be shared between userspace
35 * processes, and can be shared between different DRM drivers.
36 * Their primary use-case is to implement Vulkan fences and semaphores.
37 * The syncobj userspace API provides ioctls for several operations:
35 * 38 *
36 * syncobj's can be waited upon, where it will wait for the underlying 39 * - Creation and destruction of syncobjs
37 * fence. 40 * - Import and export of syncobjs to/from a syncobj file descriptor
41 * - Import and export a syncobj's underlying fence to/from a sync file
42 * - Reset a syncobj (set its fence to NULL)
43 * - Signal a syncobj (set a trivially signaled fence)
44 * - Wait for a syncobj's fence to appear and be signaled
38 * 45 *
39 * syncobj's can be export to fd's and back, these fd's are opaque and 46 * At it's core, a syncobj is simply a wrapper around a pointer to a struct
40 * have no other use case, except passing the syncobj between processes. 47 * &dma_fence which may be NULL.
48 * When a syncobj is first created, its pointer is either NULL or a pointer
49 * to an already signaled fence depending on whether the
50 * &DRM_SYNCOBJ_CREATE_SIGNALED flag is passed to
51 * &DRM_IOCTL_SYNCOBJ_CREATE.
52 * When GPU work which signals a syncobj is enqueued in a DRM driver,
53 * the syncobj fence is replaced with a fence which will be signaled by the
54 * completion of that work.
55 * When GPU work which waits on a syncobj is enqueued in a DRM driver, the
56 * driver retrieves syncobj's current fence at the time the work is enqueued
57 * waits on that fence before submitting the work to hardware.
58 * If the syncobj's fence is NULL, the enqueue operation is expected to fail.
59 * All manipulation of the syncobjs's fence happens in terms of the current
60 * fence at the time the ioctl is called by userspace regardless of whether
61 * that operation is an immediate host-side operation (signal or reset) or
62 * or an operation which is enqueued in some driver queue.
63 * &DRM_IOCTL_SYNCOBJ_RESET and &DRM_IOCTL_SYNCOBJ_SIGNAL can be used to
64 * manipulate a syncobj from the host by resetting its pointer to NULL or
65 * setting its pointer to a fence which is already signaled.
41 * 66 *
42 * Their primary use-case is to implement Vulkan fences and semaphores.
43 * 67 *
44 * syncobj have a kref reference count, but also have an optional file. 68 * Host-side wait on syncobjs
45 * The file is only created once the syncobj is exported. 69 * --------------------------
46 * The file takes a reference on the kref. 70 *
71 * &DRM_IOCTL_SYNCOBJ_WAIT takes an array of syncobj handles and does a
72 * host-side wait on all of the syncobj fences simultaneously.
73 * If &DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL is set, the wait ioctl will wait on
74 * all of the syncobj fences to be signaled before it returns.
75 * Otherwise, it returns once at least one syncobj fence has been signaled
76 * and the index of a signaled fence is written back to the client.
77 *
78 * Unlike the enqueued GPU work dependencies which fail if they see a NULL
79 * fence in a syncobj, if &DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT is set,
80 * the host-side wait will first wait for the syncobj to receive a non-NULL
81 * fence and then wait on that fence.
82 * If &DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT is not set and any one of the
83 * syncobjs in the array has a NULL fence, -EINVAL will be returned.
84 * Assuming the syncobj starts off with a NULL fence, this allows a client
85 * to do a host wait in one thread (or process) which waits on GPU work
86 * submitted in another thread (or process) without having to manually
87 * synchronize between the two.
88 * This requirement is inherited from the Vulkan fence API.
89 *
90 *
91 * Import/export of syncobjs
92 * -------------------------
93 *
94 * &DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE and &DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD
95 * provide two mechanisms for import/export of syncobjs.
96 *
97 * The first lets the client import or export an entire syncobj to a file
98 * descriptor.
99 * These fd's are opaque and have no other use case, except passing the
100 * syncobj between processes.
101 * All exported file descriptors and any syncobj handles created as a
102 * result of importing those file descriptors own a reference to the
103 * same underlying struct &drm_syncobj and the syncobj can be used
104 * persistently across all the processes with which it is shared.
105 * The syncobj is freed only once the last reference is dropped.
106 * Unlike dma-buf, importing a syncobj creates a new handle (with its own
107 * reference) for every import instead of de-duplicating.
108 * The primary use-case of this persistent import/export is for shared
109 * Vulkan fences and semaphores.
110 *
111 * The second import/export mechanism, which is indicated by
112 * &DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE or
113 * &DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE lets the client
114 * import/export the syncobj's current fence from/to a &sync_file.
115 * When a syncobj is exported to a sync file, that sync file wraps the
116 * sycnobj's fence at the time of export and any later signal or reset
117 * operations on the syncobj will not affect the exported sync file.
118 * When a sync file is imported into a syncobj, the syncobj's fence is set
119 * to the fence wrapped by that sync file.
120 * Because sync files are immutable, resetting or signaling the syncobj
121 * will not affect any sync files whose fences have been imported into the
122 * syncobj.
47 */ 123 */
48 124
49#include <linux/anon_inodes.h> 125#include <linux/anon_inodes.h>
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
index 17ca602db60a..7d83e04ec36e 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
@@ -397,13 +397,13 @@ int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
397 } 397 }
398 398
399 if (op & ETNA_PREP_NOSYNC) { 399 if (op & ETNA_PREP_NOSYNC) {
400 if (!reservation_object_test_signaled_rcu(obj->resv, 400 if (!dma_resv_test_signaled_rcu(obj->resv,
401 write)) 401 write))
402 return -EBUSY; 402 return -EBUSY;
403 } else { 403 } else {
404 unsigned long remain = etnaviv_timeout_to_jiffies(timeout); 404 unsigned long remain = etnaviv_timeout_to_jiffies(timeout);
405 405
406 ret = reservation_object_wait_timeout_rcu(obj->resv, 406 ret = dma_resv_wait_timeout_rcu(obj->resv,
407 write, true, remain); 407 write, true, remain);
408 if (ret <= 0) 408 if (ret <= 0)
409 return ret == 0 ? -ETIMEDOUT : ret; 409 return ret == 0 ? -ETIMEDOUT : ret;
@@ -459,8 +459,8 @@ static void etnaviv_gem_describe_fence(struct dma_fence *fence,
459static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m) 459static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
460{ 460{
461 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); 461 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
462 struct reservation_object *robj = obj->resv; 462 struct dma_resv *robj = obj->resv;
463 struct reservation_object_list *fobj; 463 struct dma_resv_list *fobj;
464 struct dma_fence *fence; 464 struct dma_fence *fence;
465 unsigned long off = drm_vma_node_start(&obj->vma_node); 465 unsigned long off = drm_vma_node_start(&obj->vma_node);
466 466
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.h b/drivers/gpu/drm/etnaviv/etnaviv_gem.h
index fcd5d71b502f..28379e4df253 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.h
@@ -6,7 +6,7 @@
6#ifndef __ETNAVIV_GEM_H__ 6#ifndef __ETNAVIV_GEM_H__
7#define __ETNAVIV_GEM_H__ 7#define __ETNAVIV_GEM_H__
8 8
9#include <linux/reservation.h> 9#include <linux/dma-resv.h>
10#include "etnaviv_cmdbuf.h" 10#include "etnaviv_cmdbuf.h"
11#include "etnaviv_drv.h" 11#include "etnaviv_drv.h"
12 12
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
index 1a636469eeda..998c96b40d8a 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
@@ -4,7 +4,7 @@
4 */ 4 */
5 5
6#include <linux/dma-fence-array.h> 6#include <linux/dma-fence-array.h>
7#include <linux/reservation.h> 7#include <linux/dma-resv.h>
8#include <linux/sync_file.h> 8#include <linux/sync_file.h>
9#include "etnaviv_cmdbuf.h" 9#include "etnaviv_cmdbuf.h"
10#include "etnaviv_drv.h" 10#include "etnaviv_drv.h"
@@ -165,10 +165,10 @@ static int submit_fence_sync(struct etnaviv_gem_submit *submit)
165 165
166 for (i = 0; i < submit->nr_bos; i++) { 166 for (i = 0; i < submit->nr_bos; i++) {
167 struct etnaviv_gem_submit_bo *bo = &submit->bos[i]; 167 struct etnaviv_gem_submit_bo *bo = &submit->bos[i];
168 struct reservation_object *robj = bo->obj->base.resv; 168 struct dma_resv *robj = bo->obj->base.resv;
169 169
170 if (!(bo->flags & ETNA_SUBMIT_BO_WRITE)) { 170 if (!(bo->flags & ETNA_SUBMIT_BO_WRITE)) {
171 ret = reservation_object_reserve_shared(robj, 1); 171 ret = dma_resv_reserve_shared(robj, 1);
172 if (ret) 172 if (ret)
173 return ret; 173 return ret;
174 } 174 }
@@ -177,13 +177,13 @@ static int submit_fence_sync(struct etnaviv_gem_submit *submit)
177 continue; 177 continue;
178 178
179 if (bo->flags & ETNA_SUBMIT_BO_WRITE) { 179 if (bo->flags & ETNA_SUBMIT_BO_WRITE) {
180 ret = reservation_object_get_fences_rcu(robj, &bo->excl, 180 ret = dma_resv_get_fences_rcu(robj, &bo->excl,
181 &bo->nr_shared, 181 &bo->nr_shared,
182 &bo->shared); 182 &bo->shared);
183 if (ret) 183 if (ret)
184 return ret; 184 return ret;
185 } else { 185 } else {
186 bo->excl = reservation_object_get_excl_rcu(robj); 186 bo->excl = dma_resv_get_excl_rcu(robj);
187 } 187 }
188 188
189 } 189 }
@@ -199,10 +199,10 @@ static void submit_attach_object_fences(struct etnaviv_gem_submit *submit)
199 struct drm_gem_object *obj = &submit->bos[i].obj->base; 199 struct drm_gem_object *obj = &submit->bos[i].obj->base;
200 200
201 if (submit->bos[i].flags & ETNA_SUBMIT_BO_WRITE) 201 if (submit->bos[i].flags & ETNA_SUBMIT_BO_WRITE)
202 reservation_object_add_excl_fence(obj->resv, 202 dma_resv_add_excl_fence(obj->resv,
203 submit->out_fence); 203 submit->out_fence);
204 else 204 else
205 reservation_object_add_shared_fence(obj->resv, 205 dma_resv_add_shared_fence(obj->resv,
206 submit->out_fence); 206 submit->out_fence);
207 207
208 submit_unlock_object(submit, i); 208 submit_unlock_object(submit, i);
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
index 279d83eaffc0..a92fd6c70b09 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
@@ -65,17 +65,9 @@ static const struct drm_connector_funcs fsl_dcu_drm_connector_funcs = {
65static int fsl_dcu_drm_connector_get_modes(struct drm_connector *connector) 65static int fsl_dcu_drm_connector_get_modes(struct drm_connector *connector)
66{ 66{
67 struct fsl_dcu_drm_connector *fsl_connector; 67 struct fsl_dcu_drm_connector *fsl_connector;
68 int (*get_modes)(struct drm_panel *panel);
69 int num_modes = 0;
70 68
71 fsl_connector = to_fsl_dcu_connector(connector); 69 fsl_connector = to_fsl_dcu_connector(connector);
72 if (fsl_connector->panel && fsl_connector->panel->funcs && 70 return drm_panel_get_modes(fsl_connector->panel);
73 fsl_connector->panel->funcs->get_modes) {
74 get_modes = fsl_connector->panel->funcs->get_modes;
75 num_modes = get_modes(fsl_connector->panel);
76 }
77
78 return num_modes;
79} 71}
80 72
81static int fsl_dcu_drm_connector_mode_valid(struct drm_connector *connector, 73static int fsl_dcu_drm_connector_mode_valid(struct drm_connector *connector,
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index 61e042918a7f..84c6d4c91c65 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -13,10 +13,10 @@
13#include <sound/asoundef.h> 13#include <sound/asoundef.h>
14#include <sound/hdmi-codec.h> 14#include <sound/hdmi-codec.h>
15 15
16#include <drm/drmP.h>
17#include <drm/drm_atomic_helper.h> 16#include <drm/drm_atomic_helper.h>
18#include <drm/drm_edid.h> 17#include <drm/drm_edid.h>
19#include <drm/drm_of.h> 18#include <drm/drm_of.h>
19#include <drm/drm_print.h>
20#include <drm/drm_probe_helper.h> 20#include <drm/drm_probe_helper.h>
21#include <drm/i2c/tda998x.h> 21#include <drm/i2c/tda998x.h>
22 22
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 9e4ee29fd0fc..29a9ecf66efc 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -29,7 +29,7 @@
29#include <linux/intel-iommu.h> 29#include <linux/intel-iommu.h>
30#include <linux/kernel.h> 30#include <linux/kernel.h>
31#include <linux/module.h> 31#include <linux/module.h>
32#include <linux/reservation.h> 32#include <linux/dma-resv.h>
33#include <linux/slab.h> 33#include <linux/slab.h>
34#include <linux/vgaarb.h> 34#include <linux/vgaarb.h>
35 35
@@ -14431,7 +14431,7 @@ intel_prepare_plane_fb(struct drm_plane *plane,
14431 if (ret < 0) 14431 if (ret < 0)
14432 return ret; 14432 return ret;
14433 14433
14434 fence = reservation_object_get_excl_rcu(obj->base.resv); 14434 fence = dma_resv_get_excl_rcu(obj->base.resv);
14435 if (fence) { 14435 if (fence) {
14436 add_rps_boost_after_vblank(new_state->crtc, fence); 14436 add_rps_boost_after_vblank(new_state->crtc, fence);
14437 dma_fence_put(fence); 14437 dma_fence_put(fence);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_busy.c b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
index 6ad93a09968c..3d4f5775a4ba 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_busy.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
@@ -82,7 +82,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
82{ 82{
83 struct drm_i915_gem_busy *args = data; 83 struct drm_i915_gem_busy *args = data;
84 struct drm_i915_gem_object *obj; 84 struct drm_i915_gem_object *obj;
85 struct reservation_object_list *list; 85 struct dma_resv_list *list;
86 unsigned int seq; 86 unsigned int seq;
87 int err; 87 int err;
88 88
@@ -105,7 +105,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
105 * Alternatively, we can trade that extra information on read/write 105 * Alternatively, we can trade that extra information on read/write
106 * activity with 106 * activity with
107 * args->busy = 107 * args->busy =
108 * !reservation_object_test_signaled_rcu(obj->resv, true); 108 * !dma_resv_test_signaled_rcu(obj->resv, true);
109 * to report the overall busyness. This is what the wait-ioctl does. 109 * to report the overall busyness. This is what the wait-ioctl does.
110 * 110 *
111 */ 111 */
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_clflush.c b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
index 5295285d5843..88ee8ca7967f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
@@ -147,7 +147,7 @@ bool i915_gem_clflush_object(struct drm_i915_gem_object *obj,
147 true, I915_FENCE_TIMEOUT, 147 true, I915_FENCE_TIMEOUT,
148 I915_FENCE_GFP); 148 I915_FENCE_GFP);
149 149
150 reservation_object_add_excl_fence(obj->base.resv, 150 dma_resv_add_excl_fence(obj->base.resv,
151 &clflush->dma); 151 &clflush->dma);
152 152
153 i915_sw_fence_commit(&clflush->wait); 153 i915_sw_fence_commit(&clflush->wait);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
index 2312a0c6af89..4714047f77e1 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
@@ -287,7 +287,7 @@ int i915_gem_schedule_fill_pages_blt(struct drm_i915_gem_object *obj,
287 if (err < 0) { 287 if (err < 0) {
288 dma_fence_set_error(&work->dma, err); 288 dma_fence_set_error(&work->dma, err);
289 } else { 289 } else {
290 reservation_object_add_excl_fence(obj->base.resv, &work->dma); 290 dma_resv_add_excl_fence(obj->base.resv, &work->dma);
291 err = 0; 291 err = 0;
292 } 292 }
293 i915_gem_object_unlock(obj); 293 i915_gem_object_unlock(obj);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
index 570b20ad9e58..96ce95c8ac5a 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
@@ -6,7 +6,7 @@
6 6
7#include <linux/dma-buf.h> 7#include <linux/dma-buf.h>
8#include <linux/highmem.h> 8#include <linux/highmem.h>
9#include <linux/reservation.h> 9#include <linux/dma-resv.h>
10 10
11#include "i915_drv.h" 11#include "i915_drv.h"
12#include "i915_gem_object.h" 12#include "i915_gem_object.h"
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index cbd7c6e3a1f8..924e4a26f2b7 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -5,7 +5,7 @@
5 */ 5 */
6 6
7#include <linux/intel-iommu.h> 7#include <linux/intel-iommu.h>
8#include <linux/reservation.h> 8#include <linux/dma-resv.h>
9#include <linux/sync_file.h> 9#include <linux/sync_file.h>
10#include <linux/uaccess.h> 10#include <linux/uaccess.h>
11 11
@@ -1242,7 +1242,7 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
1242 goto skip_request; 1242 goto skip_request;
1243 1243
1244 i915_vma_lock(batch); 1244 i915_vma_lock(batch);
1245 GEM_BUG_ON(!reservation_object_test_signaled_rcu(batch->resv, true)); 1245 GEM_BUG_ON(!dma_resv_test_signaled_rcu(batch->resv, true));
1246 err = i915_vma_move_to_active(batch, rq, 0); 1246 err = i915_vma_move_to_active(batch, rq, 0);
1247 i915_vma_unlock(batch); 1247 i915_vma_unlock(batch);
1248 if (err) 1248 if (err)
@@ -1313,7 +1313,7 @@ relocate_entry(struct i915_vma *vma,
1313 1313
1314 if (!eb->reloc_cache.vaddr && 1314 if (!eb->reloc_cache.vaddr &&
1315 (DBG_FORCE_RELOC == FORCE_GPU_RELOC || 1315 (DBG_FORCE_RELOC == FORCE_GPU_RELOC ||
1316 !reservation_object_test_signaled_rcu(vma->resv, true))) { 1316 !dma_resv_test_signaled_rcu(vma->resv, true))) {
1317 const unsigned int gen = eb->reloc_cache.gen; 1317 const unsigned int gen = eb->reloc_cache.gen;
1318 unsigned int len; 1318 unsigned int len;
1319 u32 *batch; 1319 u32 *batch;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_fence.c b/drivers/gpu/drm/i915/gem/i915_gem_fence.c
index cf0439e6be83..5496f33a9064 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_fence.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_fence.c
@@ -78,7 +78,7 @@ i915_gem_object_lock_fence(struct drm_i915_gem_object *obj)
78 I915_FENCE_GFP) < 0) 78 I915_FENCE_GFP) < 0)
79 goto err; 79 goto err;
80 80
81 reservation_object_add_excl_fence(obj->base.resv, &stub->dma); 81 dma_resv_add_excl_fence(obj->base.resv, &stub->dma);
82 82
83 return &stub->dma; 83 return &stub->dma;
84 84
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index d5197a2a106f..afd75b85da1d 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -152,7 +152,7 @@ static void __i915_gem_free_object_rcu(struct rcu_head *head)
152 container_of(head, typeof(*obj), rcu); 152 container_of(head, typeof(*obj), rcu);
153 struct drm_i915_private *i915 = to_i915(obj->base.dev); 153 struct drm_i915_private *i915 = to_i915(obj->base.dev);
154 154
155 reservation_object_fini(&obj->base._resv); 155 dma_resv_fini(&obj->base._resv);
156 i915_gem_object_free(obj); 156 i915_gem_object_free(obj);
157 157
158 GEM_BUG_ON(!atomic_read(&i915->mm.free_count)); 158 GEM_BUG_ON(!atomic_read(&i915->mm.free_count));
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index 67aea07ea019..69ad38949141 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -99,22 +99,22 @@ i915_gem_object_put(struct drm_i915_gem_object *obj)
99 __drm_gem_object_put(&obj->base); 99 __drm_gem_object_put(&obj->base);
100} 100}
101 101
102#define assert_object_held(obj) reservation_object_assert_held((obj)->base.resv) 102#define assert_object_held(obj) dma_resv_assert_held((obj)->base.resv)
103 103
104static inline void i915_gem_object_lock(struct drm_i915_gem_object *obj) 104static inline void i915_gem_object_lock(struct drm_i915_gem_object *obj)
105{ 105{
106 reservation_object_lock(obj->base.resv, NULL); 106 dma_resv_lock(obj->base.resv, NULL);
107} 107}
108 108
109static inline int 109static inline int
110i915_gem_object_lock_interruptible(struct drm_i915_gem_object *obj) 110i915_gem_object_lock_interruptible(struct drm_i915_gem_object *obj)
111{ 111{
112 return reservation_object_lock_interruptible(obj->base.resv, NULL); 112 return dma_resv_lock_interruptible(obj->base.resv, NULL);
113} 113}
114 114
115static inline void i915_gem_object_unlock(struct drm_i915_gem_object *obj) 115static inline void i915_gem_object_unlock(struct drm_i915_gem_object *obj)
116{ 116{
117 reservation_object_unlock(obj->base.resv); 117 dma_resv_unlock(obj->base.resv);
118} 118}
119 119
120struct dma_fence * 120struct dma_fence *
@@ -367,7 +367,7 @@ i915_gem_object_last_write_engine(struct drm_i915_gem_object *obj)
367 struct dma_fence *fence; 367 struct dma_fence *fence;
368 368
369 rcu_read_lock(); 369 rcu_read_lock();
370 fence = reservation_object_get_excl_rcu(obj->base.resv); 370 fence = dma_resv_get_excl_rcu(obj->base.resv);
371 rcu_read_unlock(); 371 rcu_read_unlock();
372 372
373 if (fence && dma_fence_is_i915(fence) && !dma_fence_is_signaled(fence)) 373 if (fence && dma_fence_is_i915(fence) && !dma_fence_is_signaled(fence))
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_wait.c b/drivers/gpu/drm/i915/gem/i915_gem_wait.c
index fa46a54bcbe7..8af55cd3e690 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_wait.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_wait.c
@@ -31,7 +31,7 @@ i915_gem_object_wait_fence(struct dma_fence *fence,
31} 31}
32 32
33static long 33static long
34i915_gem_object_wait_reservation(struct reservation_object *resv, 34i915_gem_object_wait_reservation(struct dma_resv *resv,
35 unsigned int flags, 35 unsigned int flags,
36 long timeout) 36 long timeout)
37{ 37{
@@ -43,7 +43,7 @@ i915_gem_object_wait_reservation(struct reservation_object *resv,
43 unsigned int count, i; 43 unsigned int count, i;
44 int ret; 44 int ret;
45 45
46 ret = reservation_object_get_fences_rcu(resv, 46 ret = dma_resv_get_fences_rcu(resv,
47 &excl, &count, &shared); 47 &excl, &count, &shared);
48 if (ret) 48 if (ret)
49 return ret; 49 return ret;
@@ -72,7 +72,7 @@ i915_gem_object_wait_reservation(struct reservation_object *resv,
72 */ 72 */
73 prune_fences = count && timeout >= 0; 73 prune_fences = count && timeout >= 0;
74 } else { 74 } else {
75 excl = reservation_object_get_excl_rcu(resv); 75 excl = dma_resv_get_excl_rcu(resv);
76 } 76 }
77 77
78 if (excl && timeout >= 0) 78 if (excl && timeout >= 0)
@@ -84,10 +84,10 @@ i915_gem_object_wait_reservation(struct reservation_object *resv,
84 * Opportunistically prune the fences iff we know they have *all* been 84 * Opportunistically prune the fences iff we know they have *all* been
85 * signaled. 85 * signaled.
86 */ 86 */
87 if (prune_fences && reservation_object_trylock(resv)) { 87 if (prune_fences && dma_resv_trylock(resv)) {
88 if (reservation_object_test_signaled_rcu(resv, true)) 88 if (dma_resv_test_signaled_rcu(resv, true))
89 reservation_object_add_excl_fence(resv, NULL); 89 dma_resv_add_excl_fence(resv, NULL);
90 reservation_object_unlock(resv); 90 dma_resv_unlock(resv);
91 } 91 }
92 92
93 return timeout; 93 return timeout;
@@ -140,7 +140,7 @@ i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
140 unsigned int count, i; 140 unsigned int count, i;
141 int ret; 141 int ret;
142 142
143 ret = reservation_object_get_fences_rcu(obj->base.resv, 143 ret = dma_resv_get_fences_rcu(obj->base.resv,
144 &excl, &count, &shared); 144 &excl, &count, &shared);
145 if (ret) 145 if (ret)
146 return ret; 146 return ret;
@@ -152,7 +152,7 @@ i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
152 152
153 kfree(shared); 153 kfree(shared);
154 } else { 154 } else {
155 excl = reservation_object_get_excl_rcu(obj->base.resv); 155 excl = dma_resv_get_excl_rcu(obj->base.resv);
156 } 156 }
157 157
158 if (excl) { 158 if (excl) {
diff --git a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
index c092bdf5f0bf..ea56b2cc6095 100644
--- a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
@@ -112,18 +112,18 @@ __dma_fence_signal__timestamp(struct dma_fence *fence, ktime_t timestamp)
112} 112}
113 113
114static void 114static void
115__dma_fence_signal__notify(struct dma_fence *fence) 115__dma_fence_signal__notify(struct dma_fence *fence,
116 const struct list_head *list)
116{ 117{
117 struct dma_fence_cb *cur, *tmp; 118 struct dma_fence_cb *cur, *tmp;
118 119
119 lockdep_assert_held(fence->lock); 120 lockdep_assert_held(fence->lock);
120 lockdep_assert_irqs_disabled(); 121 lockdep_assert_irqs_disabled();
121 122
122 list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) { 123 list_for_each_entry_safe(cur, tmp, list, node) {
123 INIT_LIST_HEAD(&cur->node); 124 INIT_LIST_HEAD(&cur->node);
124 cur->func(fence, cur); 125 cur->func(fence, cur);
125 } 126 }
126 INIT_LIST_HEAD(&fence->cb_list);
127} 127}
128 128
129void intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine) 129void intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine)
@@ -185,11 +185,12 @@ void intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine)
185 list_for_each_safe(pos, next, &signal) { 185 list_for_each_safe(pos, next, &signal) {
186 struct i915_request *rq = 186 struct i915_request *rq =
187 list_entry(pos, typeof(*rq), signal_link); 187 list_entry(pos, typeof(*rq), signal_link);
188 188 struct list_head cb_list;
189 __dma_fence_signal__timestamp(&rq->fence, timestamp);
190 189
191 spin_lock(&rq->lock); 190 spin_lock(&rq->lock);
192 __dma_fence_signal__notify(&rq->fence); 191 list_replace(&rq->fence.cb_list, &cb_list);
192 __dma_fence_signal__timestamp(&rq->fence, timestamp);
193 __dma_fence_signal__notify(&rq->fence, &cb_list);
193 spin_unlock(&rq->lock); 194 spin_unlock(&rq->lock);
194 195
195 i915_request_put(rq); 196 i915_request_put(rq);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 2e13ecc9cbb6..681ab0aea70e 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -43,7 +43,7 @@
43#include <linux/mm_types.h> 43#include <linux/mm_types.h>
44#include <linux/perf_event.h> 44#include <linux/perf_event.h>
45#include <linux/pm_qos.h> 45#include <linux/pm_qos.h>
46#include <linux/reservation.h> 46#include <linux/dma-resv.h>
47#include <linux/shmem_fs.h> 47#include <linux/shmem_fs.h>
48#include <linux/stackdepot.h> 48#include <linux/stackdepot.h>
49 49
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 65863e955f40..f6e66a7f4bf0 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -29,7 +29,7 @@
29#include <drm/i915_drm.h> 29#include <drm/i915_drm.h>
30#include <linux/dma-fence-array.h> 30#include <linux/dma-fence-array.h>
31#include <linux/kthread.h> 31#include <linux/kthread.h>
32#include <linux/reservation.h> 32#include <linux/dma-resv.h>
33#include <linux/shmem_fs.h> 33#include <linux/shmem_fs.h>
34#include <linux/slab.h> 34#include <linux/slab.h>
35#include <linux/stop_machine.h> 35#include <linux/stop_machine.h>
diff --git a/drivers/gpu/drm/i915/i915_gem_batch_pool.c b/drivers/gpu/drm/i915/i915_gem_batch_pool.c
index b17f23991253..8675a608a6fe 100644
--- a/drivers/gpu/drm/i915/i915_gem_batch_pool.c
+++ b/drivers/gpu/drm/i915/i915_gem_batch_pool.c
@@ -94,10 +94,10 @@ i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
94 list = &pool->cache_list[n]; 94 list = &pool->cache_list[n];
95 95
96 list_for_each_entry(obj, list, batch_pool_link) { 96 list_for_each_entry(obj, list, batch_pool_link) {
97 struct reservation_object *resv = obj->base.resv; 97 struct dma_resv *resv = obj->base.resv;
98 98
99 /* The batches are strictly LRU ordered */ 99 /* The batches are strictly LRU ordered */
100 if (!reservation_object_test_signaled_rcu(resv, true)) 100 if (!dma_resv_test_signaled_rcu(resv, true))
101 break; 101 break;
102 102
103 /* 103 /*
@@ -109,9 +109,9 @@ i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
109 * than replace the existing fence. 109 * than replace the existing fence.
110 */ 110 */
111 if (rcu_access_pointer(resv->fence)) { 111 if (rcu_access_pointer(resv->fence)) {
112 reservation_object_lock(resv, NULL); 112 dma_resv_lock(resv, NULL);
113 reservation_object_add_excl_fence(resv, NULL); 113 dma_resv_add_excl_fence(resv, NULL);
114 reservation_object_unlock(resv); 114 dma_resv_unlock(resv);
115 } 115 }
116 116
117 if (obj->base.size >= size) 117 if (obj->base.size >= size)
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 8ac7d14ec8c9..6d5d7eb25663 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -1038,7 +1038,7 @@ i915_request_await_object(struct i915_request *to,
1038 struct dma_fence **shared; 1038 struct dma_fence **shared;
1039 unsigned int count, i; 1039 unsigned int count, i;
1040 1040
1041 ret = reservation_object_get_fences_rcu(obj->base.resv, 1041 ret = dma_resv_get_fences_rcu(obj->base.resv,
1042 &excl, &count, &shared); 1042 &excl, &count, &shared);
1043 if (ret) 1043 if (ret)
1044 return ret; 1044 return ret;
@@ -1055,7 +1055,7 @@ i915_request_await_object(struct i915_request *to,
1055 dma_fence_put(shared[i]); 1055 dma_fence_put(shared[i]);
1056 kfree(shared); 1056 kfree(shared);
1057 } else { 1057 } else {
1058 excl = reservation_object_get_excl_rcu(obj->base.resv); 1058 excl = dma_resv_get_excl_rcu(obj->base.resv);
1059 } 1059 }
1060 1060
1061 if (excl) { 1061 if (excl) {
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c
index 5387aafd3424..362e4e00b4c6 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.c
+++ b/drivers/gpu/drm/i915/i915_sw_fence.c
@@ -7,7 +7,7 @@
7#include <linux/slab.h> 7#include <linux/slab.h>
8#include <linux/dma-fence.h> 8#include <linux/dma-fence.h>
9#include <linux/irq_work.h> 9#include <linux/irq_work.h>
10#include <linux/reservation.h> 10#include <linux/dma-resv.h>
11 11
12#include "i915_sw_fence.h" 12#include "i915_sw_fence.h"
13#include "i915_selftest.h" 13#include "i915_selftest.h"
@@ -510,7 +510,7 @@ int __i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
510} 510}
511 511
512int i915_sw_fence_await_reservation(struct i915_sw_fence *fence, 512int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
513 struct reservation_object *resv, 513 struct dma_resv *resv,
514 const struct dma_fence_ops *exclude, 514 const struct dma_fence_ops *exclude,
515 bool write, 515 bool write,
516 unsigned long timeout, 516 unsigned long timeout,
@@ -526,7 +526,7 @@ int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
526 struct dma_fence **shared; 526 struct dma_fence **shared;
527 unsigned int count, i; 527 unsigned int count, i;
528 528
529 ret = reservation_object_get_fences_rcu(resv, 529 ret = dma_resv_get_fences_rcu(resv,
530 &excl, &count, &shared); 530 &excl, &count, &shared);
531 if (ret) 531 if (ret)
532 return ret; 532 return ret;
@@ -551,7 +551,7 @@ int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
551 dma_fence_put(shared[i]); 551 dma_fence_put(shared[i]);
552 kfree(shared); 552 kfree(shared);
553 } else { 553 } else {
554 excl = reservation_object_get_excl_rcu(resv); 554 excl = dma_resv_get_excl_rcu(resv);
555 } 555 }
556 556
557 if (ret >= 0 && excl && excl->ops != exclude) { 557 if (ret >= 0 && excl && excl->ops != exclude) {
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.h b/drivers/gpu/drm/i915/i915_sw_fence.h
index 9cb5c3b307a6..8cf353e8c3e0 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.h
+++ b/drivers/gpu/drm/i915/i915_sw_fence.h
@@ -16,7 +16,7 @@
16#include <linux/wait.h> 16#include <linux/wait.h>
17 17
18struct completion; 18struct completion;
19struct reservation_object; 19struct dma_resv;
20 20
21struct i915_sw_fence { 21struct i915_sw_fence {
22 wait_queue_head_t wait; 22 wait_queue_head_t wait;
@@ -82,7 +82,7 @@ int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
82 gfp_t gfp); 82 gfp_t gfp);
83 83
84int i915_sw_fence_await_reservation(struct i915_sw_fence *fence, 84int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
85 struct reservation_object *resv, 85 struct dma_resv *resv,
86 const struct dma_fence_ops *exclude, 86 const struct dma_fence_ops *exclude,
87 bool write, 87 bool write,
88 unsigned long timeout, 88 unsigned long timeout,
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index eb16a1a93bbc..2645f4e850c2 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -890,7 +890,7 @@ static void export_fence(struct i915_vma *vma,
890 struct i915_request *rq, 890 struct i915_request *rq,
891 unsigned int flags) 891 unsigned int flags)
892{ 892{
893 struct reservation_object *resv = vma->resv; 893 struct dma_resv *resv = vma->resv;
894 894
895 /* 895 /*
896 * Ignore errors from failing to allocate the new fence, we can't 896 * Ignore errors from failing to allocate the new fence, we can't
@@ -898,9 +898,9 @@ static void export_fence(struct i915_vma *vma,
898 * synchronisation leading to rendering corruption. 898 * synchronisation leading to rendering corruption.
899 */ 899 */
900 if (flags & EXEC_OBJECT_WRITE) 900 if (flags & EXEC_OBJECT_WRITE)
901 reservation_object_add_excl_fence(resv, &rq->fence); 901 dma_resv_add_excl_fence(resv, &rq->fence);
902 else if (reservation_object_reserve_shared(resv, 1) == 0) 902 else if (dma_resv_reserve_shared(resv, 1) == 0)
903 reservation_object_add_shared_fence(resv, &rq->fence); 903 dma_resv_add_shared_fence(resv, &rq->fence);
904} 904}
905 905
906int i915_vma_move_to_active(struct i915_vma *vma, 906int i915_vma_move_to_active(struct i915_vma *vma,
diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h
index 4b769db649bf..59a497561fc4 100644
--- a/drivers/gpu/drm/i915/i915_vma.h
+++ b/drivers/gpu/drm/i915/i915_vma.h
@@ -55,7 +55,7 @@ struct i915_vma {
55 struct i915_address_space *vm; 55 struct i915_address_space *vm;
56 const struct i915_vma_ops *ops; 56 const struct i915_vma_ops *ops;
57 struct i915_fence_reg *fence; 57 struct i915_fence_reg *fence;
58 struct reservation_object *resv; /** Alias of obj->resv */ 58 struct dma_resv *resv; /** Alias of obj->resv */
59 struct sg_table *pages; 59 struct sg_table *pages;
60 void __iomem *iomap; 60 void __iomem *iomap;
61 void *private; /* owned by creator */ 61 void *private; /* owned by creator */
@@ -299,16 +299,16 @@ void i915_vma_close(struct i915_vma *vma);
299void i915_vma_reopen(struct i915_vma *vma); 299void i915_vma_reopen(struct i915_vma *vma);
300void i915_vma_destroy(struct i915_vma *vma); 300void i915_vma_destroy(struct i915_vma *vma);
301 301
302#define assert_vma_held(vma) reservation_object_assert_held((vma)->resv) 302#define assert_vma_held(vma) dma_resv_assert_held((vma)->resv)
303 303
304static inline void i915_vma_lock(struct i915_vma *vma) 304static inline void i915_vma_lock(struct i915_vma *vma)
305{ 305{
306 reservation_object_lock(vma->resv, NULL); 306 dma_resv_lock(vma->resv, NULL);
307} 307}
308 308
309static inline void i915_vma_unlock(struct i915_vma *vma) 309static inline void i915_vma_unlock(struct i915_vma *vma)
310{ 310{
311 reservation_object_unlock(vma->resv); 311 dma_resv_unlock(vma->resv);
312} 312}
313 313
314int __i915_vma_do_pin(struct i915_vma *vma, 314int __i915_vma_do_pin(struct i915_vma *vma,
diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c
index db461b6a257f..695f307f36b2 100644
--- a/drivers/gpu/drm/imx/imx-ldb.c
+++ b/drivers/gpu/drm/imx/imx-ldb.c
@@ -124,14 +124,11 @@ static void imx_ldb_ch_set_bus_format(struct imx_ldb_channel *imx_ldb_ch,
124static int imx_ldb_connector_get_modes(struct drm_connector *connector) 124static int imx_ldb_connector_get_modes(struct drm_connector *connector)
125{ 125{
126 struct imx_ldb_channel *imx_ldb_ch = con_to_imx_ldb_ch(connector); 126 struct imx_ldb_channel *imx_ldb_ch = con_to_imx_ldb_ch(connector);
127 int num_modes = 0; 127 int num_modes;
128 128
129 if (imx_ldb_ch->panel && imx_ldb_ch->panel->funcs && 129 num_modes = drm_panel_get_modes(imx_ldb_ch->panel);
130 imx_ldb_ch->panel->funcs->get_modes) { 130 if (num_modes > 0)
131 num_modes = imx_ldb_ch->panel->funcs->get_modes(imx_ldb_ch->panel); 131 return num_modes;
132 if (num_modes > 0)
133 return num_modes;
134 }
135 132
136 if (!imx_ldb_ch->edid && imx_ldb_ch->ddc) 133 if (!imx_ldb_ch->edid && imx_ldb_ch->ddc)
137 imx_ldb_ch->edid = drm_get_edid(connector, imx_ldb_ch->ddc); 134 imx_ldb_ch->edid = drm_get_edid(connector, imx_ldb_ch->ddc);
diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c
index 2e51b2fade75..e7ce17503ae1 100644
--- a/drivers/gpu/drm/imx/parallel-display.c
+++ b/drivers/gpu/drm/imx/parallel-display.c
@@ -47,14 +47,11 @@ static int imx_pd_connector_get_modes(struct drm_connector *connector)
47{ 47{
48 struct imx_parallel_display *imxpd = con_to_imxpd(connector); 48 struct imx_parallel_display *imxpd = con_to_imxpd(connector);
49 struct device_node *np = imxpd->dev->of_node; 49 struct device_node *np = imxpd->dev->of_node;
50 int num_modes = 0; 50 int num_modes;
51 51
52 if (imxpd->panel && imxpd->panel->funcs && 52 num_modes = drm_panel_get_modes(imxpd->panel);
53 imxpd->panel->funcs->get_modes) { 53 if (num_modes > 0)
54 num_modes = imxpd->panel->funcs->get_modes(imxpd->panel); 54 return num_modes;
55 if (num_modes > 0)
56 return num_modes;
57 }
58 55
59 if (imxpd->edid) { 56 if (imxpd->edid) {
60 drm_connector_update_edid_property(connector, imxpd->edid); 57 drm_connector_update_edid_property(connector, imxpd->edid);
diff --git a/drivers/gpu/drm/lima/lima_gem.c b/drivers/gpu/drm/lima/lima_gem.c
index fd1a024703d2..ff3d9acc24fc 100644
--- a/drivers/gpu/drm/lima/lima_gem.c
+++ b/drivers/gpu/drm/lima/lima_gem.c
@@ -136,7 +136,7 @@ static int lima_gem_sync_bo(struct lima_sched_task *task, struct lima_bo *bo,
136 int err = 0; 136 int err = 0;
137 137
138 if (!write) { 138 if (!write) {
139 err = reservation_object_reserve_shared(bo->gem.resv, 1); 139 err = dma_resv_reserve_shared(bo->gem.resv, 1);
140 if (err) 140 if (err)
141 return err; 141 return err;
142 } 142 }
@@ -296,9 +296,9 @@ int lima_gem_submit(struct drm_file *file, struct lima_submit *submit)
296 296
297 for (i = 0; i < submit->nr_bos; i++) { 297 for (i = 0; i < submit->nr_bos; i++) {
298 if (submit->bos[i].flags & LIMA_SUBMIT_BO_WRITE) 298 if (submit->bos[i].flags & LIMA_SUBMIT_BO_WRITE)
299 reservation_object_add_excl_fence(bos[i]->gem.resv, fence); 299 dma_resv_add_excl_fence(bos[i]->gem.resv, fence);
300 else 300 else
301 reservation_object_add_shared_fence(bos[i]->gem.resv, fence); 301 dma_resv_add_shared_fence(bos[i]->gem.resv, fence);
302 } 302 }
303 303
304 lima_gem_unlock_bos(bos, submit->nr_bos, &ctx); 304 lima_gem_unlock_bos(bos, submit->nr_bos, &ctx);
@@ -341,7 +341,7 @@ int lima_gem_wait(struct drm_file *file, u32 handle, u32 op, s64 timeout_ns)
341 341
342 timeout = drm_timeout_abs_to_jiffies(timeout_ns); 342 timeout = drm_timeout_abs_to_jiffies(timeout_ns);
343 343
344 ret = drm_gem_reservation_object_wait(file, handle, write, timeout); 344 ret = drm_gem_dma_resv_wait(file, handle, write, timeout);
345 if (ret == 0) 345 if (ret == 0)
346 ret = timeout ? -ETIMEDOUT : -EBUSY; 346 ret = timeout ? -ETIMEDOUT : -EBUSY;
347 347
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_fb.c b/drivers/gpu/drm/mediatek/mtk_drm_fb.c
index ae40b080ae47..3f230a28a2dc 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_fb.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_fb.c
@@ -4,7 +4,7 @@
4 */ 4 */
5 5
6#include <linux/dma-buf.h> 6#include <linux/dma-buf.h>
7#include <linux/reservation.h> 7#include <linux/dma-resv.h>
8 8
9#include <drm/drm_modeset_helper.h> 9#include <drm/drm_modeset_helper.h>
10#include <drm/drm_fb_helper.h> 10#include <drm/drm_fb_helper.h>
diff --git a/drivers/gpu/drm/meson/meson_crtc.c b/drivers/gpu/drm/meson/meson_crtc.c
index 3320a74e67fa..bba25325aa9c 100644
--- a/drivers/gpu/drm/meson/meson_crtc.c
+++ b/drivers/gpu/drm/meson/meson_crtc.c
@@ -265,11 +265,11 @@ static void meson_crtc_enable_vd1(struct meson_drm *priv)
265 265
266static void meson_g12a_crtc_enable_vd1(struct meson_drm *priv) 266static void meson_g12a_crtc_enable_vd1(struct meson_drm *priv)
267{ 267{
268 writel_relaxed(((1 << 16) | /* post bld premult*/ 268 writel_relaxed(VD_BLEND_PREBLD_SRC_VD1 |
269 (1 << 8) | /* post src */ 269 VD_BLEND_PREBLD_PREMULT_EN |
270 (1 << 4) | /* pre bld premult*/ 270 VD_BLEND_POSTBLD_SRC_VD1 |
271 (1 << 0)), 271 VD_BLEND_POSTBLD_PREMULT_EN,
272 priv->io_base + _REG(VD1_BLEND_SRC_CTRL)); 272 priv->io_base + _REG(VD1_BLEND_SRC_CTRL));
273} 273}
274 274
275void meson_crtc_irq(struct meson_drm *priv) 275void meson_crtc_irq(struct meson_drm *priv)
@@ -487,7 +487,12 @@ void meson_crtc_irq(struct meson_drm *priv)
487 writel_relaxed(priv->viu.vd1_range_map_cr, 487 writel_relaxed(priv->viu.vd1_range_map_cr,
488 priv->io_base + meson_crtc->viu_offset + 488 priv->io_base + meson_crtc->viu_offset +
489 _REG(VD1_IF0_RANGE_MAP_CR)); 489 _REG(VD1_IF0_RANGE_MAP_CR));
490 writel_relaxed(0x78404, 490 writel_relaxed(VPP_VSC_BANK_LENGTH(4) |
491 VPP_HSC_BANK_LENGTH(4) |
492 VPP_SC_VD_EN_ENABLE |
493 VPP_SC_TOP_EN_ENABLE |
494 VPP_SC_HSC_EN_ENABLE |
495 VPP_SC_VSC_EN_ENABLE,
491 priv->io_base + _REG(VPP_SC_MISC)); 496 priv->io_base + _REG(VPP_SC_MISC));
492 writel_relaxed(priv->viu.vpp_pic_in_height, 497 writel_relaxed(priv->viu.vpp_pic_in_height,
493 priv->io_base + _REG(VPP_PIC_IN_HEIGHT)); 498 priv->io_base + _REG(VPP_PIC_IN_HEIGHT));
diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
index 42af49afdd75..ae0166181606 100644
--- a/drivers/gpu/drm/meson/meson_drv.c
+++ b/drivers/gpu/drm/meson/meson_drv.c
@@ -140,10 +140,28 @@ static struct regmap_config meson_regmap_config = {
140 140
141static void meson_vpu_init(struct meson_drm *priv) 141static void meson_vpu_init(struct meson_drm *priv)
142{ 142{
143 writel_relaxed(0x210000, priv->io_base + _REG(VPU_RDARB_MODE_L1C1)); 143 u32 value;
144 writel_relaxed(0x10000, priv->io_base + _REG(VPU_RDARB_MODE_L1C2)); 144
145 writel_relaxed(0x900000, priv->io_base + _REG(VPU_RDARB_MODE_L2C1)); 145 /*
146 writel_relaxed(0x20000, priv->io_base + _REG(VPU_WRARB_MODE_L2C1)); 146 * Slave dc0 and dc5 connected to master port 1.
147 * By default other slaves are connected to master port 0.
148 */
149 value = VPU_RDARB_SLAVE_TO_MASTER_PORT(0, 1) |
150 VPU_RDARB_SLAVE_TO_MASTER_PORT(5, 1);
151 writel_relaxed(value, priv->io_base + _REG(VPU_RDARB_MODE_L1C1));
152
153 /* Slave dc0 connected to master port 1 */
154 value = VPU_RDARB_SLAVE_TO_MASTER_PORT(0, 1);
155 writel_relaxed(value, priv->io_base + _REG(VPU_RDARB_MODE_L1C2));
156
157 /* Slave dc4 and dc7 connected to master port 1 */
158 value = VPU_RDARB_SLAVE_TO_MASTER_PORT(4, 1) |
159 VPU_RDARB_SLAVE_TO_MASTER_PORT(7, 1);
160 writel_relaxed(value, priv->io_base + _REG(VPU_RDARB_MODE_L2C1));
161
162 /* Slave dc1 connected to master port 1 */
163 value = VPU_RDARB_SLAVE_TO_MASTER_PORT(1, 1);
164 writel_relaxed(value, priv->io_base + _REG(VPU_WRARB_MODE_L2C1));
147} 165}
148 166
149static void meson_remove_framebuffers(void) 167static void meson_remove_framebuffers(void)
diff --git a/drivers/gpu/drm/meson/meson_dw_hdmi.c b/drivers/gpu/drm/meson/meson_dw_hdmi.c
index 9f0b08eaf003..f893ebd0b799 100644
--- a/drivers/gpu/drm/meson/meson_dw_hdmi.c
+++ b/drivers/gpu/drm/meson/meson_dw_hdmi.c
@@ -429,6 +429,8 @@ static int dw_hdmi_phy_init(struct dw_hdmi *hdmi, void *data,
429 /* Enable internal pixclk, tmds_clk, spdif_clk, i2s_clk, cecclk */ 429 /* Enable internal pixclk, tmds_clk, spdif_clk, i2s_clk, cecclk */
430 dw_hdmi_top_write_bits(dw_hdmi, HDMITX_TOP_CLK_CNTL, 430 dw_hdmi_top_write_bits(dw_hdmi, HDMITX_TOP_CLK_CNTL,
431 0x3, 0x3); 431 0x3, 0x3);
432
433 /* Enable cec_clk and hdcp22_tmdsclk_en */
432 dw_hdmi_top_write_bits(dw_hdmi, HDMITX_TOP_CLK_CNTL, 434 dw_hdmi_top_write_bits(dw_hdmi, HDMITX_TOP_CLK_CNTL,
433 0x3 << 4, 0x3 << 4); 435 0x3 << 4, 0x3 << 4);
434 436
diff --git a/drivers/gpu/drm/meson/meson_dw_hdmi.h b/drivers/gpu/drm/meson/meson_dw_hdmi.h
index 1b2ef043eb5c..08e1c14e4ea0 100644
--- a/drivers/gpu/drm/meson/meson_dw_hdmi.h
+++ b/drivers/gpu/drm/meson/meson_dw_hdmi.h
@@ -100,7 +100,8 @@
100#define HDMITX_TOP_INTR_RXSENSE_RISE BIT(6) 100#define HDMITX_TOP_INTR_RXSENSE_RISE BIT(6)
101#define HDMITX_TOP_INTR_RXSENSE_FALL BIT(7) 101#define HDMITX_TOP_INTR_RXSENSE_FALL BIT(7)
102 102
103/* Bit 14:12 RW tmds_sel: 3'b000=Output zero; 3'b001=Output normal TMDS data; 103/*
104 * Bit 14:12 RW tmds_sel: 3'b000=Output zero; 3'b001=Output normal TMDS data;
104 * 3'b010=Output PRBS data; 3'b100=Output shift pattern. Default 0. 105 * 3'b010=Output PRBS data; 3'b100=Output shift pattern. Default 0.
105 * Bit 11: 9 RW shift_pttn_repeat: 0=New pattern every clk cycle; 1=New pattern 106 * Bit 11: 9 RW shift_pttn_repeat: 0=New pattern every clk cycle; 1=New pattern
106 * every 2 clk cycles; ...; 7=New pattern every 8 clk cycles. Default 0. 107 * every 2 clk cycles; ...; 7=New pattern every 8 clk cycles. Default 0.
@@ -135,7 +136,8 @@
135/* Bit 9: 0 RW tmds_clk_pttn[29:20]. Default 0. */ 136/* Bit 9: 0 RW tmds_clk_pttn[29:20]. Default 0. */
136#define HDMITX_TOP_TMDS_CLK_PTTN_23 (0x00B) 137#define HDMITX_TOP_TMDS_CLK_PTTN_23 (0x00B)
137 138
138/* Bit 1 RW shift_tmds_clk_pttn:1=Enable shifting clk pattern, 139/*
140 * Bit 1 RW shift_tmds_clk_pttn:1=Enable shifting clk pattern,
139 * used when TMDS CLK rate = TMDS character rate /4. Default 0. 141 * used when TMDS CLK rate = TMDS character rate /4. Default 0.
140 * Bit 0 R Reserved. Default 0. 142 * Bit 0 R Reserved. Default 0.
141 * [ 1] shift_tmds_clk_pttn 143 * [ 1] shift_tmds_clk_pttn
@@ -143,12 +145,14 @@
143 */ 145 */
144#define HDMITX_TOP_TMDS_CLK_PTTN_CNTL (0x00C) 146#define HDMITX_TOP_TMDS_CLK_PTTN_CNTL (0x00C)
145 147
146/* Bit 0 RW revocmem_wr_fail: Read back 1 to indicate Host write REVOC MEM 148/*
149 * Bit 0 RW revocmem_wr_fail: Read back 1 to indicate Host write REVOC MEM
147 * failure, write 1 to clear the failure flag. Default 0. 150 * failure, write 1 to clear the failure flag. Default 0.
148 */ 151 */
149#define HDMITX_TOP_REVOCMEM_STAT (0x00D) 152#define HDMITX_TOP_REVOCMEM_STAT (0x00D)
150 153
151/* Bit 1 R filtered RxSense status 154/*
155 * Bit 1 R filtered RxSense status
152 * Bit 0 R filtered HPD status. 156 * Bit 0 R filtered HPD status.
153 */ 157 */
154#define HDMITX_TOP_STAT0 (0x00E) 158#define HDMITX_TOP_STAT0 (0x00E)
diff --git a/drivers/gpu/drm/meson/meson_plane.c b/drivers/gpu/drm/meson/meson_plane.c
index 80b8d70c4d75..b9e1e117fb85 100644
--- a/drivers/gpu/drm/meson/meson_plane.c
+++ b/drivers/gpu/drm/meson/meson_plane.c
@@ -328,7 +328,7 @@ static void meson_plane_atomic_disable(struct drm_plane *plane,
328 328
329 /* Disable OSD1 */ 329 /* Disable OSD1 */
330 if (meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu")) 330 if (meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu"))
331 writel_bits_relaxed(3 << 8, 0, 331 writel_bits_relaxed(VIU_OSD1_POSTBLD_SRC_OSD1, 0,
332 priv->io_base + _REG(OSD1_BLEND_SRC_CTRL)); 332 priv->io_base + _REG(OSD1_BLEND_SRC_CTRL));
333 else 333 else
334 writel_bits_relaxed(VPP_OSD1_POSTBLEND, 0, 334 writel_bits_relaxed(VPP_OSD1_POSTBLEND, 0,
diff --git a/drivers/gpu/drm/meson/meson_registers.h b/drivers/gpu/drm/meson/meson_registers.h
index 057453ce027c..05fce48ceee0 100644
--- a/drivers/gpu/drm/meson/meson_registers.h
+++ b/drivers/gpu/drm/meson/meson_registers.h
@@ -12,7 +12,7 @@
12#define _REG(reg) ((reg) << 2) 12#define _REG(reg) ((reg) << 2)
13 13
14#define writel_bits_relaxed(mask, val, addr) \ 14#define writel_bits_relaxed(mask, val, addr) \
15 writel_relaxed((readl_relaxed(addr) & ~(mask)) | (val), addr) 15 writel_relaxed((readl_relaxed(addr) & ~(mask)) | ((val) & (mask)), addr)
16 16
17/* vpp2 */ 17/* vpp2 */
18#define VPP2_DUMMY_DATA 0x1900 18#define VPP2_DUMMY_DATA 0x1900
@@ -138,11 +138,19 @@
138#define VIU_ADDR_START 0x1a00 138#define VIU_ADDR_START 0x1a00
139#define VIU_ADDR_END 0x1aff 139#define VIU_ADDR_END 0x1aff
140#define VIU_SW_RESET 0x1a01 140#define VIU_SW_RESET 0x1a01
141#define VIU_SW_RESET_OSD1 BIT(0)
141#define VIU_MISC_CTRL0 0x1a06 142#define VIU_MISC_CTRL0 0x1a06
143#define VIU_CTRL0_VD1_AFBC_MASK 0x170000
142#define VIU_MISC_CTRL1 0x1a07 144#define VIU_MISC_CTRL1 0x1a07
143#define D2D3_INTF_LENGTH 0x1a08 145#define D2D3_INTF_LENGTH 0x1a08
144#define D2D3_INTF_CTRL0 0x1a09 146#define D2D3_INTF_CTRL0 0x1a09
145#define VIU_OSD1_CTRL_STAT 0x1a10 147#define VIU_OSD1_CTRL_STAT 0x1a10
148#define VIU_OSD1_OSD_BLK_ENABLE BIT(0)
149#define VIU_OSD1_POSTBLD_SRC_VD1 (1 << 8)
150#define VIU_OSD1_POSTBLD_SRC_VD2 (2 << 8)
151#define VIU_OSD1_POSTBLD_SRC_OSD1 (3 << 8)
152#define VIU_OSD1_POSTBLD_SRC_OSD2 (4 << 8)
153#define VIU_OSD1_OSD_ENABLE BIT(21)
146#define VIU_OSD1_CTRL_STAT2 0x1a2d 154#define VIU_OSD1_CTRL_STAT2 0x1a2d
147#define VIU_OSD1_COLOR_ADDR 0x1a11 155#define VIU_OSD1_COLOR_ADDR 0x1a11
148#define VIU_OSD1_COLOR 0x1a12 156#define VIU_OSD1_COLOR 0x1a12
@@ -232,6 +240,12 @@
232#define VIU_OSD3_MALI_UNPACK_CTRL 0x3d9f 240#define VIU_OSD3_MALI_UNPACK_CTRL 0x3d9f
233#define VIU_OSD3_DIMM_CTRL 0x3da0 241#define VIU_OSD3_DIMM_CTRL 0x3da0
234 242
243#define VIU_OSD_DDR_PRIORITY_URGENT BIT(0)
244#define VIU_OSD_HOLD_FIFO_LINES(lines) ((lines & 0x1f) << 5)
245#define VIU_OSD_FIFO_DEPTH_VAL(val) ((val & 0x7f) << 12)
246#define VIU_OSD_WORDS_PER_BURST(words) (((words & 0x4) >> 1) << 22)
247#define VIU_OSD_FIFO_LIMITS(size) ((size & 0xf) << 24)
248
235#define VD1_IF0_GEN_REG 0x1a50 249#define VD1_IF0_GEN_REG 0x1a50
236#define VD1_IF0_CANVAS0 0x1a51 250#define VD1_IF0_CANVAS0 0x1a51
237#define VD1_IF0_CANVAS1 0x1a52 251#define VD1_IF0_CANVAS1 0x1a52
@@ -341,6 +355,7 @@
341#define VPP_LINE_IN_LENGTH 0x1d01 355#define VPP_LINE_IN_LENGTH 0x1d01
342#define VPP_PIC_IN_HEIGHT 0x1d02 356#define VPP_PIC_IN_HEIGHT 0x1d02
343#define VPP_SCALE_COEF_IDX 0x1d03 357#define VPP_SCALE_COEF_IDX 0x1d03
358#define VPP_SCALE_HORIZONTAL_COEF BIT(8)
344#define VPP_SCALE_COEF 0x1d04 359#define VPP_SCALE_COEF 0x1d04
345#define VPP_VSC_REGION12_STARTP 0x1d05 360#define VPP_VSC_REGION12_STARTP 0x1d05
346#define VPP_VSC_REGION34_STARTP 0x1d06 361#define VPP_VSC_REGION34_STARTP 0x1d06
@@ -362,6 +377,12 @@
362#define VPP_HSC_REGION4_PHASE_SLOPE 0x1d17 377#define VPP_HSC_REGION4_PHASE_SLOPE 0x1d17
363#define VPP_HSC_PHASE_CTRL 0x1d18 378#define VPP_HSC_PHASE_CTRL 0x1d18
364#define VPP_SC_MISC 0x1d19 379#define VPP_SC_MISC 0x1d19
380#define VPP_SC_VD_EN_ENABLE BIT(15)
381#define VPP_SC_TOP_EN_ENABLE BIT(16)
382#define VPP_SC_HSC_EN_ENABLE BIT(17)
383#define VPP_SC_VSC_EN_ENABLE BIT(18)
384#define VPP_VSC_BANK_LENGTH(length) (length & 0x7)
385#define VPP_HSC_BANK_LENGTH(length) ((length & 0x7) << 8)
365#define VPP_PREBLEND_VD1_H_START_END 0x1d1a 386#define VPP_PREBLEND_VD1_H_START_END 0x1d1a
366#define VPP_PREBLEND_VD1_V_START_END 0x1d1b 387#define VPP_PREBLEND_VD1_V_START_END 0x1d1b
367#define VPP_POSTBLEND_VD1_H_START_END 0x1d1c 388#define VPP_POSTBLEND_VD1_H_START_END 0x1d1c
@@ -371,24 +392,28 @@
371#define VPP_PREBLEND_H_SIZE 0x1d20 392#define VPP_PREBLEND_H_SIZE 0x1d20
372#define VPP_POSTBLEND_H_SIZE 0x1d21 393#define VPP_POSTBLEND_H_SIZE 0x1d21
373#define VPP_HOLD_LINES 0x1d22 394#define VPP_HOLD_LINES 0x1d22
395#define VPP_POSTBLEND_HOLD_LINES(lines) (lines & 0xf)
396#define VPP_PREBLEND_HOLD_LINES(lines) ((lines & 0xf) << 8)
374#define VPP_BLEND_ONECOLOR_CTRL 0x1d23 397#define VPP_BLEND_ONECOLOR_CTRL 0x1d23
375#define VPP_PREBLEND_CURRENT_XY 0x1d24 398#define VPP_PREBLEND_CURRENT_XY 0x1d24
376#define VPP_POSTBLEND_CURRENT_XY 0x1d25 399#define VPP_POSTBLEND_CURRENT_XY 0x1d25
377#define VPP_MISC 0x1d26 400#define VPP_MISC 0x1d26
378#define VPP_PREBLEND_ENABLE BIT(6) 401#define VPP_PREBLEND_ENABLE BIT(6)
379#define VPP_POSTBLEND_ENABLE BIT(7) 402#define VPP_POSTBLEND_ENABLE BIT(7)
380#define VPP_OSD2_ALPHA_PREMULT BIT(8) 403#define VPP_OSD2_ALPHA_PREMULT BIT(8)
381#define VPP_OSD1_ALPHA_PREMULT BIT(9) 404#define VPP_OSD1_ALPHA_PREMULT BIT(9)
382#define VPP_VD1_POSTBLEND BIT(10) 405#define VPP_VD1_POSTBLEND BIT(10)
383#define VPP_VD2_POSTBLEND BIT(11) 406#define VPP_VD2_POSTBLEND BIT(11)
384#define VPP_OSD1_POSTBLEND BIT(12) 407#define VPP_OSD1_POSTBLEND BIT(12)
385#define VPP_OSD2_POSTBLEND BIT(13) 408#define VPP_OSD2_POSTBLEND BIT(13)
386#define VPP_VD1_PREBLEND BIT(14) 409#define VPP_VD1_PREBLEND BIT(14)
387#define VPP_VD2_PREBLEND BIT(15) 410#define VPP_VD2_PREBLEND BIT(15)
388#define VPP_OSD1_PREBLEND BIT(16) 411#define VPP_OSD1_PREBLEND BIT(16)
389#define VPP_OSD2_PREBLEND BIT(17) 412#define VPP_OSD2_PREBLEND BIT(17)
390#define VPP_COLOR_MNG_ENABLE BIT(28) 413#define VPP_COLOR_MNG_ENABLE BIT(28)
391#define VPP_OFIFO_SIZE 0x1d27 414#define VPP_OFIFO_SIZE 0x1d27
415#define VPP_OFIFO_SIZE_MASK GENMASK(13, 0)
416#define VPP_OFIFO_SIZE_DEFAULT (0xfff << 20 | 0x1000)
392#define VPP_FIFO_STATUS 0x1d28 417#define VPP_FIFO_STATUS 0x1d28
393#define VPP_SMOKE_CTRL 0x1d29 418#define VPP_SMOKE_CTRL 0x1d29
394#define VPP_SMOKE1_VAL 0x1d2a 419#define VPP_SMOKE1_VAL 0x1d2a
@@ -404,6 +429,8 @@
404#define VPP_HSC_PHASE_CTRL1 0x1d34 429#define VPP_HSC_PHASE_CTRL1 0x1d34
405#define VPP_HSC_INI_PAT_CTRL 0x1d35 430#define VPP_HSC_INI_PAT_CTRL 0x1d35
406#define VPP_VADJ_CTRL 0x1d40 431#define VPP_VADJ_CTRL 0x1d40
432#define VPP_MINUS_BLACK_LVL_VADJ1_ENABLE BIT(1)
433
407#define VPP_VADJ1_Y 0x1d41 434#define VPP_VADJ1_Y 0x1d41
408#define VPP_VADJ1_MA_MB 0x1d42 435#define VPP_VADJ1_MA_MB 0x1d42
409#define VPP_VADJ1_MC_MD 0x1d43 436#define VPP_VADJ1_MC_MD 0x1d43
@@ -463,6 +490,7 @@
463#define VPP_PEAKING_VGAIN 0x1d92 490#define VPP_PEAKING_VGAIN 0x1d92
464#define VPP_PEAKING_NLP_1 0x1d93 491#define VPP_PEAKING_NLP_1 0x1d93
465#define VPP_DOLBY_CTRL 0x1d93 492#define VPP_DOLBY_CTRL 0x1d93
493#define VPP_PPS_DUMMY_DATA_MODE (1 << 17)
466#define VPP_PEAKING_NLP_2 0x1d94 494#define VPP_PEAKING_NLP_2 0x1d94
467#define VPP_PEAKING_NLP_3 0x1d95 495#define VPP_PEAKING_NLP_3 0x1d95
468#define VPP_PEAKING_NLP_4 0x1d96 496#define VPP_PEAKING_NLP_4 0x1d96
@@ -593,6 +621,7 @@
593#define OSD34_SCI_WH_M1 0x3d29 621#define OSD34_SCI_WH_M1 0x3d29
594#define OSD34_SCO_H_START_END 0x3d2a 622#define OSD34_SCO_H_START_END 0x3d2a
595#define OSD34_SCO_V_START_END 0x3d2b 623#define OSD34_SCO_V_START_END 0x3d2b
624
596/* viu2 */ 625/* viu2 */
597#define VIU2_ADDR_START 0x1e00 626#define VIU2_ADDR_START 0x1e00
598#define VIU2_ADDR_END 0x1eff 627#define VIU2_ADDR_END 0x1eff
@@ -706,6 +735,25 @@
706#define VENC_UPSAMPLE_CTRL0 0x1b64 735#define VENC_UPSAMPLE_CTRL0 0x1b64
707#define VENC_UPSAMPLE_CTRL1 0x1b65 736#define VENC_UPSAMPLE_CTRL1 0x1b65
708#define VENC_UPSAMPLE_CTRL2 0x1b66 737#define VENC_UPSAMPLE_CTRL2 0x1b66
738#define VENC_UPSAMPLE_CTRL_F0_2_CLK_RATIO BIT(0)
739#define VENC_UPSAMPLE_CTRL_F1_EN BIT(5)
740#define VENC_UPSAMPLE_CTRL_F1_UPSAMPLE_EN BIT(6)
741#define VENC_UPSAMPLE_CTRL_INTERLACE_HIGH_LUMA (0x0 << 12)
742#define VENC_UPSAMPLE_CTRL_CVBS (0x1 << 12)
743#define VENC_UPSAMPLE_CTRL_S_VIDEO_LUMA (0x2 << 12)
744#define VENC_UPSAMPLE_CTRL_S_VIDEO_CHROMA (0x3 << 12)
745#define VENC_UPSAMPLE_CTRL_INTERLACE_PB (0x4 << 12)
746#define VENC_UPSAMPLE_CTRL_INTERLACE_PR (0x5 << 12)
747#define VENC_UPSAMPLE_CTRL_INTERLACE_R (0x6 << 12)
748#define VENC_UPSAMPLE_CTRL_INTERLACE_G (0x7 << 12)
749#define VENC_UPSAMPLE_CTRL_INTERLACE_B (0x8 << 12)
750#define VENC_UPSAMPLE_CTRL_PROGRESSIVE_Y (0x9 << 12)
751#define VENC_UPSAMPLE_CTRL_PROGRESSIVE_PB (0xa << 12)
752#define VENC_UPSAMPLE_CTRL_PROGRESSIVE_PR (0xb << 12)
753#define VENC_UPSAMPLE_CTRL_PROGRESSIVE_R (0xc << 12)
754#define VENC_UPSAMPLE_CTRL_PROGRESSIVE_G (0xd << 12)
755#define VENC_UPSAMPLE_CTRL_PROGRESSIVE_B (0xe << 12)
756#define VENC_UPSAMPLE_CTRL_VDAC_TEST_VALUE (0xf << 12)
709#define TCON_INVERT_CTL 0x1b67 757#define TCON_INVERT_CTL 0x1b67
710#define VENC_VIDEO_PROG_MODE 0x1b68 758#define VENC_VIDEO_PROG_MODE 0x1b68
711#define VENC_ENCI_LINE 0x1b69 759#define VENC_ENCI_LINE 0x1b69
@@ -714,6 +762,7 @@
714#define VENC_ENCP_PIXEL 0x1b6c 762#define VENC_ENCP_PIXEL 0x1b6c
715#define VENC_STATA 0x1b6d 763#define VENC_STATA 0x1b6d
716#define VENC_INTCTRL 0x1b6e 764#define VENC_INTCTRL 0x1b6e
765#define VENC_INTCTRL_ENCI_LNRST_INT_EN BIT(1)
717#define VENC_INTFLAG 0x1b6f 766#define VENC_INTFLAG 0x1b6f
718#define VENC_VIDEO_TST_EN 0x1b70 767#define VENC_VIDEO_TST_EN 0x1b70
719#define VENC_VIDEO_TST_MDSEL 0x1b71 768#define VENC_VIDEO_TST_MDSEL 0x1b71
@@ -724,6 +773,7 @@
724#define VENC_VIDEO_TST_CLRBAR_WIDTH 0x1b76 773#define VENC_VIDEO_TST_CLRBAR_WIDTH 0x1b76
725#define VENC_VIDEO_TST_VDCNT_STSET 0x1b77 774#define VENC_VIDEO_TST_VDCNT_STSET 0x1b77
726#define VENC_VDAC_DACSEL0 0x1b78 775#define VENC_VDAC_DACSEL0 0x1b78
776#define VENC_VDAC_SEL_ATV_DMD BIT(5)
727#define VENC_VDAC_DACSEL1 0x1b79 777#define VENC_VDAC_DACSEL1 0x1b79
728#define VENC_VDAC_DACSEL2 0x1b7a 778#define VENC_VDAC_DACSEL2 0x1b7a
729#define VENC_VDAC_DACSEL3 0x1b7b 779#define VENC_VDAC_DACSEL3 0x1b7b
@@ -744,6 +794,7 @@
744#define VENC_VDAC_DAC5_GAINCTRL 0x1bfa 794#define VENC_VDAC_DAC5_GAINCTRL 0x1bfa
745#define VENC_VDAC_DAC5_OFFSET 0x1bfb 795#define VENC_VDAC_DAC5_OFFSET 0x1bfb
746#define VENC_VDAC_FIFO_CTRL 0x1bfc 796#define VENC_VDAC_FIFO_CTRL 0x1bfc
797#define VENC_VDAC_FIFO_EN_ENCI_ENABLE BIT(13)
747#define ENCL_TCON_INVERT_CTL 0x1bfd 798#define ENCL_TCON_INVERT_CTL 0x1bfd
748#define ENCP_VIDEO_EN 0x1b80 799#define ENCP_VIDEO_EN 0x1b80
749#define ENCP_VIDEO_SYNC_MODE 0x1b81 800#define ENCP_VIDEO_SYNC_MODE 0x1b81
@@ -759,6 +810,7 @@
759#define ENCP_VIDEO_SYNC_OFFST 0x1b8b 810#define ENCP_VIDEO_SYNC_OFFST 0x1b8b
760#define ENCP_VIDEO_MACV_OFFST 0x1b8c 811#define ENCP_VIDEO_MACV_OFFST 0x1b8c
761#define ENCP_VIDEO_MODE 0x1b8d 812#define ENCP_VIDEO_MODE 0x1b8d
813#define ENCP_VIDEO_MODE_DE_V_HIGH BIT(14)
762#define ENCP_VIDEO_MODE_ADV 0x1b8e 814#define ENCP_VIDEO_MODE_ADV 0x1b8e
763#define ENCP_DBG_PX_RST 0x1b90 815#define ENCP_DBG_PX_RST 0x1b90
764#define ENCP_DBG_LN_RST 0x1b91 816#define ENCP_DBG_LN_RST 0x1b91
@@ -837,6 +889,11 @@
837#define C656_FS_LNED 0x1be7 889#define C656_FS_LNED 0x1be7
838#define ENCI_VIDEO_MODE 0x1b00 890#define ENCI_VIDEO_MODE 0x1b00
839#define ENCI_VIDEO_MODE_ADV 0x1b01 891#define ENCI_VIDEO_MODE_ADV 0x1b01
892#define ENCI_VIDEO_MODE_ADV_DMXMD(val) (val & 0x3)
893#define ENCI_VIDEO_MODE_ADV_VBICTL_LINE_17_22 BIT(2)
894#define ENCI_VIDEO_MODE_ADV_YBW_MEDIUM (0 << 4)
895#define ENCI_VIDEO_MODE_ADV_YBW_LOW (0x1 << 4)
896#define ENCI_VIDEO_MODE_ADV_YBW_HIGH (0x2 << 4)
840#define ENCI_VIDEO_FSC_ADJ 0x1b02 897#define ENCI_VIDEO_FSC_ADJ 0x1b02
841#define ENCI_VIDEO_BRIGHT 0x1b03 898#define ENCI_VIDEO_BRIGHT 0x1b03
842#define ENCI_VIDEO_CONT 0x1b04 899#define ENCI_VIDEO_CONT 0x1b04
@@ -907,13 +964,17 @@
907#define ENCI_DBG_MAXPX 0x1b4c 964#define ENCI_DBG_MAXPX 0x1b4c
908#define ENCI_DBG_MAXLN 0x1b4d 965#define ENCI_DBG_MAXLN 0x1b4d
909#define ENCI_MACV_MAX_AMP 0x1b50 966#define ENCI_MACV_MAX_AMP 0x1b50
967#define ENCI_MACV_MAX_AMP_ENABLE_CHANGE BIT(15)
968#define ENCI_MACV_MAX_AMP_VAL(val) (val & 0x83ff)
910#define ENCI_MACV_PULSE_LO 0x1b51 969#define ENCI_MACV_PULSE_LO 0x1b51
911#define ENCI_MACV_PULSE_HI 0x1b52 970#define ENCI_MACV_PULSE_HI 0x1b52
912#define ENCI_MACV_BKP_MAX 0x1b53 971#define ENCI_MACV_BKP_MAX 0x1b53
913#define ENCI_CFILT_CTRL 0x1b54 972#define ENCI_CFILT_CTRL 0x1b54
973#define ENCI_CFILT_CMPT_SEL_HIGH BIT(1)
914#define ENCI_CFILT7 0x1b55 974#define ENCI_CFILT7 0x1b55
915#define ENCI_YC_DELAY 0x1b56 975#define ENCI_YC_DELAY 0x1b56
916#define ENCI_VIDEO_EN 0x1b57 976#define ENCI_VIDEO_EN 0x1b57
977#define ENCI_VIDEO_EN_ENABLE BIT(0)
917#define ENCI_DVI_HSO_BEGIN 0x1c00 978#define ENCI_DVI_HSO_BEGIN 0x1c00
918#define ENCI_DVI_HSO_END 0x1c01 979#define ENCI_DVI_HSO_END 0x1c01
919#define ENCI_DVI_VSO_BLINE_EVN 0x1c02 980#define ENCI_DVI_VSO_BLINE_EVN 0x1c02
@@ -925,6 +986,10 @@
925#define ENCI_DVI_VSO_END_EVN 0x1c08 986#define ENCI_DVI_VSO_END_EVN 0x1c08
926#define ENCI_DVI_VSO_END_ODD 0x1c09 987#define ENCI_DVI_VSO_END_ODD 0x1c09
927#define ENCI_CFILT_CTRL2 0x1c0a 988#define ENCI_CFILT_CTRL2 0x1c0a
989#define ENCI_CFILT_CMPT_CR_DLY(delay) (delay & 0xf)
990#define ENCI_CFILT_CMPT_CB_DLY(delay) ((delay & 0xf) << 4)
991#define ENCI_CFILT_CVBS_CR_DLY(delay) ((delay & 0xf) << 8)
992#define ENCI_CFILT_CVBS_CB_DLY(delay) ((delay & 0xf) << 12)
928#define ENCI_DACSEL_0 0x1c0b 993#define ENCI_DACSEL_0 0x1c0b
929#define ENCI_DACSEL_1 0x1c0c 994#define ENCI_DACSEL_1 0x1c0c
930#define ENCP_DACSEL_0 0x1c0d 995#define ENCP_DACSEL_0 0x1c0d
@@ -939,6 +1004,8 @@
939#define ENCI_TST_CLRBAR_WIDTH 0x1c16 1004#define ENCI_TST_CLRBAR_WIDTH 0x1c16
940#define ENCI_TST_VDCNT_STSET 0x1c17 1005#define ENCI_TST_VDCNT_STSET 0x1c17
941#define ENCI_VFIFO2VD_CTL 0x1c18 1006#define ENCI_VFIFO2VD_CTL 0x1c18
1007#define ENCI_VFIFO2VD_CTL_ENABLE BIT(0)
1008#define ENCI_VFIFO2VD_CTL_VD_SEL(val) ((val & 0xff) << 8)
942#define ENCI_VFIFO2VD_PIXEL_START 0x1c19 1009#define ENCI_VFIFO2VD_PIXEL_START 0x1c19
943#define ENCI_VFIFO2VD_PIXEL_END 0x1c1a 1010#define ENCI_VFIFO2VD_PIXEL_END 0x1c1a
944#define ENCI_VFIFO2VD_LINE_TOP_START 0x1c1b 1011#define ENCI_VFIFO2VD_LINE_TOP_START 0x1c1b
@@ -1001,6 +1068,7 @@
1001#define VENC_VDAC_DAC5_FILT_CTRL0 0x1c56 1068#define VENC_VDAC_DAC5_FILT_CTRL0 0x1c56
1002#define VENC_VDAC_DAC5_FILT_CTRL1 0x1c57 1069#define VENC_VDAC_DAC5_FILT_CTRL1 0x1c57
1003#define VENC_VDAC_DAC0_FILT_CTRL0 0x1c58 1070#define VENC_VDAC_DAC0_FILT_CTRL0 0x1c58
1071#define VENC_VDAC_DAC0_FILT_CTRL0_EN BIT(0)
1004#define VENC_VDAC_DAC0_FILT_CTRL1 0x1c59 1072#define VENC_VDAC_DAC0_FILT_CTRL1 0x1c59
1005#define VENC_VDAC_DAC1_FILT_CTRL0 0x1c5a 1073#define VENC_VDAC_DAC1_FILT_CTRL0 0x1c5a
1006#define VENC_VDAC_DAC1_FILT_CTRL1 0x1c5b 1074#define VENC_VDAC_DAC1_FILT_CTRL1 0x1c5b
@@ -1406,6 +1474,18 @@
1406#define VIU2_SEL_VENC_ENCP (2 << 2) 1474#define VIU2_SEL_VENC_ENCP (2 << 2)
1407#define VIU2_SEL_VENC_ENCT (3 << 2) 1475#define VIU2_SEL_VENC_ENCT (3 << 2)
1408#define VPU_HDMI_SETTING 0x271b 1476#define VPU_HDMI_SETTING 0x271b
1477#define VPU_HDMI_ENCI_DATA_TO_HDMI BIT(0)
1478#define VPU_HDMI_ENCP_DATA_TO_HDMI BIT(1)
1479#define VPU_HDMI_INV_HSYNC BIT(2)
1480#define VPU_HDMI_INV_VSYNC BIT(3)
1481#define VPU_HDMI_OUTPUT_CRYCB (0 << 5)
1482#define VPU_HDMI_OUTPUT_YCBCR (1 << 5)
1483#define VPU_HDMI_OUTPUT_YCRCB (2 << 5)
1484#define VPU_HDMI_OUTPUT_CBCRY (3 << 5)
1485#define VPU_HDMI_OUTPUT_CBYCR (4 << 5)
1486#define VPU_HDMI_OUTPUT_CRCBY (5 << 5)
1487#define VPU_HDMI_WR_RATE(rate) (((rate & 0x1f) - 1) << 8)
1488#define VPU_HDMI_RD_RATE(rate) (((rate & 0x1f) - 1) << 12)
1409#define ENCI_INFO_READ 0x271c 1489#define ENCI_INFO_READ 0x271c
1410#define ENCP_INFO_READ 0x271d 1490#define ENCP_INFO_READ 0x271d
1411#define ENCT_INFO_READ 0x271e 1491#define ENCT_INFO_READ 0x271e
@@ -1482,6 +1562,7 @@
1482#define VPU_RDARB_MODE_L1C2 0x2799 1562#define VPU_RDARB_MODE_L1C2 0x2799
1483#define VPU_RDARB_MODE_L2C1 0x279d 1563#define VPU_RDARB_MODE_L2C1 0x279d
1484#define VPU_WRARB_MODE_L2C1 0x27a2 1564#define VPU_WRARB_MODE_L2C1 0x27a2
1565#define VPU_RDARB_SLAVE_TO_MASTER_PORT(dc, port) (port << (16 + dc))
1485 1566
1486/* osd super scale */ 1567/* osd super scale */
1487#define OSDSR_HV_SIZEIN 0x3130 1568#define OSDSR_HV_SIZEIN 0x3130
@@ -1523,7 +1604,6 @@
1523#define OSD1_AFBCD_STATUS 0x31a8 1604#define OSD1_AFBCD_STATUS 0x31a8
1524#define OSD1_AFBCD_PIXEL_HSCOPE 0x31a9 1605#define OSD1_AFBCD_PIXEL_HSCOPE 0x31a9
1525#define OSD1_AFBCD_PIXEL_VSCOPE 0x31aa 1606#define OSD1_AFBCD_PIXEL_VSCOPE 0x31aa
1526#define VIU_MISC_CTRL1 0x1a07
1527 1607
1528/* add for gxm and 962e dv core2 */ 1608/* add for gxm and 962e dv core2 */
1529#define DOLBY_CORE2A_SWAP_CTRL1 0x3434 1609#define DOLBY_CORE2A_SWAP_CTRL1 0x3434
@@ -1538,8 +1618,6 @@
1538#define VPU_MAFBC_COMMAND 0x3a05 1618#define VPU_MAFBC_COMMAND 0x3a05
1539#define VPU_MAFBC_STATUS 0x3a06 1619#define VPU_MAFBC_STATUS 0x3a06
1540#define VPU_MAFBC_SURFACE_CFG 0x3a07 1620#define VPU_MAFBC_SURFACE_CFG 0x3a07
1541
1542/* osd afbc on g12a */
1543#define VPU_MAFBC_HEADER_BUF_ADDR_LOW_S0 0x3a10 1621#define VPU_MAFBC_HEADER_BUF_ADDR_LOW_S0 0x3a10
1544#define VPU_MAFBC_HEADER_BUF_ADDR_HIGH_S0 0x3a11 1622#define VPU_MAFBC_HEADER_BUF_ADDR_HIGH_S0 0x3a11
1545#define VPU_MAFBC_FORMAT_SPECIFIER_S0 0x3a12 1623#define VPU_MAFBC_FORMAT_SPECIFIER_S0 0x3a12
@@ -1597,10 +1675,18 @@
1597#define VPU_MAFBC_PREFETCH_CFG_S3 0x3a7c 1675#define VPU_MAFBC_PREFETCH_CFG_S3 0x3a7c
1598 1676
1599#define DOLBY_PATH_CTRL 0x1a0c 1677#define DOLBY_PATH_CTRL 0x1a0c
1678#define DOLBY_BYPASS_EN(val) (val & 0xf)
1600#define OSD_PATH_MISC_CTRL 0x1a0e 1679#define OSD_PATH_MISC_CTRL 0x1a0e
1601#define MALI_AFBCD_TOP_CTRL 0x1a0f 1680#define MALI_AFBCD_TOP_CTRL 0x1a0f
1602 1681
1603#define VIU_OSD_BLEND_CTRL 0x39b0 1682#define VIU_OSD_BLEND_CTRL 0x39b0
1683#define VIU_OSD_BLEND_REORDER(dest, src) ((src) << (dest * 4))
1684#define VIU_OSD_BLEND_DIN_EN(bits) ((bits & 0xf) << 20)
1685#define VIU_OSD_BLEND1_DIN3_BYPASS_TO_DOUT1 BIT(24)
1686#define VIU_OSD_BLEND1_DOUT_BYPASS_TO_BLEND2 BIT(25)
1687#define VIU_OSD_BLEND_DIN0_BYPASS_TO_DOUT0 BIT(26)
1688#define VIU_OSD_BLEND_BLEN2_PREMULT_EN(input) ((input & 0x3) << 27)
1689#define VIU_OSD_BLEND_HOLD_LINES(lines) ((lines & 0x7) << 29)
1604#define VIU_OSD_BLEND_CTRL1 0x39c0 1690#define VIU_OSD_BLEND_CTRL1 0x39c0
1605#define VIU_OSD_BLEND_DIN0_SCOPE_H 0x39b1 1691#define VIU_OSD_BLEND_DIN0_SCOPE_H 0x39b1
1606#define VIU_OSD_BLEND_DIN0_SCOPE_V 0x39b2 1692#define VIU_OSD_BLEND_DIN0_SCOPE_V 0x39b2
@@ -1630,13 +1716,27 @@
1630#define VPP_SLEEP_CTRL 0x1dfa 1716#define VPP_SLEEP_CTRL 0x1dfa
1631#define VD1_BLEND_SRC_CTRL 0x1dfb 1717#define VD1_BLEND_SRC_CTRL 0x1dfb
1632#define VD2_BLEND_SRC_CTRL 0x1dfc 1718#define VD2_BLEND_SRC_CTRL 0x1dfc
1719#define VD_BLEND_PREBLD_SRC_VD1 (1 << 0)
1720#define VD_BLEND_PREBLD_SRC_VD2 (2 << 0)
1721#define VD_BLEND_PREBLD_SRC_OSD1 (3 << 0)
1722#define VD_BLEND_PREBLD_SRC_OSD2 (4 << 0)
1723#define VD_BLEND_PREBLD_PREMULT_EN BIT(4)
1724#define VD_BLEND_POSTBLD_SRC_VD1 (1 << 8)
1725#define VD_BLEND_POSTBLD_SRC_VD2 (2 << 8)
1726#define VD_BLEND_POSTBLD_SRC_OSD1 (3 << 8)
1727#define VD_BLEND_POSTBLD_SRC_OSD2 (4 << 8)
1728#define VD_BLEND_POSTBLD_PREMULT_EN BIT(16)
1633#define OSD1_BLEND_SRC_CTRL 0x1dfd 1729#define OSD1_BLEND_SRC_CTRL 0x1dfd
1634#define OSD2_BLEND_SRC_CTRL 0x1dfe 1730#define OSD2_BLEND_SRC_CTRL 0x1dfe
1731#define OSD_BLEND_POSTBLD_SRC_VD1 (1 << 8)
1732#define OSD_BLEND_POSTBLD_SRC_VD2 (2 << 8)
1733#define OSD_BLEND_POSTBLD_SRC_OSD1 (3 << 8)
1734#define OSD_BLEND_POSTBLD_SRC_OSD2 (4 << 8)
1735#define OSD_BLEND_PATH_SEL_ENABLE BIT(20)
1635 1736
1636#define VPP_POST_BLEND_BLEND_DUMMY_DATA 0x3968 1737#define VPP_POST_BLEND_BLEND_DUMMY_DATA 0x3968
1637#define VPP_POST_BLEND_DUMMY_ALPHA 0x3969 1738#define VPP_POST_BLEND_DUMMY_ALPHA 0x3969
1638#define VPP_RDARB_MODE 0x3978 1739#define VPP_RDARB_MODE 0x3978
1639#define VPP_RDARB_REQEN_SLV 0x3979 1740#define VPP_RDARB_REQEN_SLV 0x3979
1640#define VPU_RDARB_MODE_L2C1 0x279d
1641 1741
1642#endif /* __MESON_REGISTERS_H */ 1742#endif /* __MESON_REGISTERS_H */
diff --git a/drivers/gpu/drm/meson/meson_vclk.c b/drivers/gpu/drm/meson/meson_vclk.c
index 8abff51f937d..869231c93617 100644
--- a/drivers/gpu/drm/meson/meson_vclk.c
+++ b/drivers/gpu/drm/meson/meson_vclk.c
@@ -97,6 +97,7 @@
97#define HHI_VDAC_CNTL1 0x2F8 /* 0xbe offset in data sheet */ 97#define HHI_VDAC_CNTL1 0x2F8 /* 0xbe offset in data sheet */
98 98
99#define HHI_HDMI_PLL_CNTL 0x320 /* 0xc8 offset in data sheet */ 99#define HHI_HDMI_PLL_CNTL 0x320 /* 0xc8 offset in data sheet */
100#define HHI_HDMI_PLL_CNTL_EN BIT(30)
100#define HHI_HDMI_PLL_CNTL2 0x324 /* 0xc9 offset in data sheet */ 101#define HHI_HDMI_PLL_CNTL2 0x324 /* 0xc9 offset in data sheet */
101#define HHI_HDMI_PLL_CNTL3 0x328 /* 0xca offset in data sheet */ 102#define HHI_HDMI_PLL_CNTL3 0x328 /* 0xca offset in data sheet */
102#define HHI_HDMI_PLL_CNTL4 0x32C /* 0xcb offset in data sheet */ 103#define HHI_HDMI_PLL_CNTL4 0x32C /* 0xcb offset in data sheet */
@@ -469,7 +470,7 @@ void meson_hdmi_pll_set_params(struct meson_drm *priv, unsigned int m,
469 470
470 /* Enable and unreset */ 471 /* Enable and unreset */
471 regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL, 472 regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL,
472 0x7 << 28, 0x4 << 28); 473 0x7 << 28, HHI_HDMI_PLL_CNTL_EN);
473 474
474 /* Poll for lock bit */ 475 /* Poll for lock bit */
475 regmap_read_poll_timeout(priv->hhi, HHI_HDMI_PLL_CNTL, 476 regmap_read_poll_timeout(priv->hhi, HHI_HDMI_PLL_CNTL,
@@ -496,6 +497,7 @@ void meson_hdmi_pll_set_params(struct meson_drm *priv, unsigned int m,
496 regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x0b3a0400 | m); 497 regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x0b3a0400 | m);
497 498
498 /* Enable and reset */ 499 /* Enable and reset */
500 /* TODO: add specific macro for g12a here */
499 regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL, 501 regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL,
500 0x3 << 28, 0x3 << 28); 502 0x3 << 28, 0x3 << 28);
501 503
@@ -970,7 +972,8 @@ void meson_vclk_setup(struct meson_drm *priv, unsigned int target,
970 meson_venci_cvbs_clock_config(priv); 972 meson_venci_cvbs_clock_config(priv);
971 return; 973 return;
972 } else if (target == MESON_VCLK_TARGET_DMT) { 974 } else if (target == MESON_VCLK_TARGET_DMT) {
973 /* The DMT clock path is fixed after the PLL: 975 /*
976 * The DMT clock path is fixed after the PLL:
974 * - automatic PLL freq + OD management 977 * - automatic PLL freq + OD management
975 * - vid_pll_div = VID_PLL_DIV_5 978 * - vid_pll_div = VID_PLL_DIV_5
976 * - vclk_div = 2 979 * - vclk_div = 2
diff --git a/drivers/gpu/drm/meson/meson_venc.c b/drivers/gpu/drm/meson/meson_venc.c
index 3d4791798ae0..679d2274531c 100644
--- a/drivers/gpu/drm/meson/meson_venc.c
+++ b/drivers/gpu/drm/meson/meson_venc.c
@@ -61,9 +61,9 @@
61/* HHI Registers */ 61/* HHI Registers */
62#define HHI_GCLK_MPEG2 0x148 /* 0x52 offset in data sheet */ 62#define HHI_GCLK_MPEG2 0x148 /* 0x52 offset in data sheet */
63#define HHI_VDAC_CNTL0 0x2F4 /* 0xbd offset in data sheet */ 63#define HHI_VDAC_CNTL0 0x2F4 /* 0xbd offset in data sheet */
64#define HHI_VDAC_CNTL0_G12A 0x2EC /* 0xbd offset in data sheet */ 64#define HHI_VDAC_CNTL0_G12A 0x2EC /* 0xbb offset in data sheet */
65#define HHI_VDAC_CNTL1 0x2F8 /* 0xbe offset in data sheet */ 65#define HHI_VDAC_CNTL1 0x2F8 /* 0xbe offset in data sheet */
66#define HHI_VDAC_CNTL1_G12A 0x2F0 /* 0xbe offset in data sheet */ 66#define HHI_VDAC_CNTL1_G12A 0x2F0 /* 0xbc offset in data sheet */
67#define HHI_HDMI_PHY_CNTL0 0x3a0 /* 0xe8 offset in data sheet */ 67#define HHI_HDMI_PHY_CNTL0 0x3a0 /* 0xe8 offset in data sheet */
68 68
69struct meson_cvbs_enci_mode meson_cvbs_enci_pal = { 69struct meson_cvbs_enci_mode meson_cvbs_enci_pal = {
@@ -192,7 +192,7 @@ union meson_hdmi_venc_mode meson_hdmi_enci_mode_480i = {
192 .hso_end = 129, 192 .hso_end = 129,
193 .vso_even = 3, 193 .vso_even = 3,
194 .vso_odd = 260, 194 .vso_odd = 260,
195 .macv_max_amp = 0x810b, 195 .macv_max_amp = 0xb,
196 .video_prog_mode = 0xf0, 196 .video_prog_mode = 0xf0,
197 .video_mode = 0x8, 197 .video_mode = 0x8,
198 .sch_adjust = 0x20, 198 .sch_adjust = 0x20,
@@ -212,7 +212,7 @@ union meson_hdmi_venc_mode meson_hdmi_enci_mode_576i = {
212 .hso_end = 129, 212 .hso_end = 129,
213 .vso_even = 3, 213 .vso_even = 3,
214 .vso_odd = 260, 214 .vso_odd = 260,
215 .macv_max_amp = 8107, 215 .macv_max_amp = 0x7,
216 .video_prog_mode = 0xff, 216 .video_prog_mode = 0xff,
217 .video_mode = 0x13, 217 .video_mode = 0x13,
218 .sch_adjust = 0x28, 218 .sch_adjust = 0x28,
@@ -976,6 +976,7 @@ void meson_venc_hdmi_mode_set(struct meson_drm *priv, int vic,
976 unsigned int eof_lines; 976 unsigned int eof_lines;
977 unsigned int sof_lines; 977 unsigned int sof_lines;
978 unsigned int vsync_lines; 978 unsigned int vsync_lines;
979 u32 reg;
979 980
980 /* Use VENCI for 480i and 576i and double HDMI pixels */ 981 /* Use VENCI for 480i and 576i and double HDMI pixels */
981 if (mode->flags & DRM_MODE_FLAG_DBLCLK) { 982 if (mode->flags & DRM_MODE_FLAG_DBLCLK) {
@@ -1048,8 +1049,11 @@ void meson_venc_hdmi_mode_set(struct meson_drm *priv, int vic,
1048 unsigned int lines_f1; 1049 unsigned int lines_f1;
1049 1050
1050 /* CVBS Filter settings */ 1051 /* CVBS Filter settings */
1051 writel_relaxed(0x12, priv->io_base + _REG(ENCI_CFILT_CTRL)); 1052 writel_relaxed(ENCI_CFILT_CMPT_SEL_HIGH | 0x10,
1052 writel_relaxed(0x12, priv->io_base + _REG(ENCI_CFILT_CTRL2)); 1053 priv->io_base + _REG(ENCI_CFILT_CTRL));
1054 writel_relaxed(ENCI_CFILT_CMPT_CR_DLY(2) |
1055 ENCI_CFILT_CMPT_CB_DLY(1),
1056 priv->io_base + _REG(ENCI_CFILT_CTRL2));
1053 1057
1054 /* Digital Video Select : Interlace, clk27 clk, external */ 1058 /* Digital Video Select : Interlace, clk27 clk, external */
1055 writel_relaxed(0, priv->io_base + _REG(VENC_DVI_SETTING)); 1059 writel_relaxed(0, priv->io_base + _REG(VENC_DVI_SETTING));
@@ -1071,8 +1075,9 @@ void meson_venc_hdmi_mode_set(struct meson_drm *priv, int vic,
1071 priv->io_base + _REG(ENCI_SYNC_VSO_ODDLN)); 1075 priv->io_base + _REG(ENCI_SYNC_VSO_ODDLN));
1072 1076
1073 /* Macrovision max amplitude change */ 1077 /* Macrovision max amplitude change */
1074 writel_relaxed(vmode->enci.macv_max_amp, 1078 writel_relaxed(ENCI_MACV_MAX_AMP_ENABLE_CHANGE |
1075 priv->io_base + _REG(ENCI_MACV_MAX_AMP)); 1079 ENCI_MACV_MAX_AMP_VAL(vmode->enci.macv_max_amp),
1080 priv->io_base + _REG(ENCI_MACV_MAX_AMP));
1076 1081
1077 /* Video mode */ 1082 /* Video mode */
1078 writel_relaxed(vmode->enci.video_prog_mode, 1083 writel_relaxed(vmode->enci.video_prog_mode,
@@ -1080,7 +1085,8 @@ void meson_venc_hdmi_mode_set(struct meson_drm *priv, int vic,
1080 writel_relaxed(vmode->enci.video_mode, 1085 writel_relaxed(vmode->enci.video_mode,
1081 priv->io_base + _REG(ENCI_VIDEO_MODE)); 1086 priv->io_base + _REG(ENCI_VIDEO_MODE));
1082 1087
1083 /* Advanced Video Mode : 1088 /*
1089 * Advanced Video Mode :
1084 * Demux shifting 0x2 1090 * Demux shifting 0x2
1085 * Blank line end at line17/22 1091 * Blank line end at line17/22
1086 * High bandwidth Luma Filter 1092 * High bandwidth Luma Filter
@@ -1088,7 +1094,10 @@ void meson_venc_hdmi_mode_set(struct meson_drm *priv, int vic,
1088 * Bypass luma low pass filter 1094 * Bypass luma low pass filter
1089 * No macrovision on CSYNC 1095 * No macrovision on CSYNC
1090 */ 1096 */
1091 writel_relaxed(0x26, priv->io_base + _REG(ENCI_VIDEO_MODE_ADV)); 1097 writel_relaxed(ENCI_VIDEO_MODE_ADV_DMXMD(2) |
1098 ENCI_VIDEO_MODE_ADV_VBICTL_LINE_17_22 |
1099 ENCI_VIDEO_MODE_ADV_YBW_HIGH,
1100 priv->io_base + _REG(ENCI_VIDEO_MODE_ADV));
1092 1101
1093 writel(vmode->enci.sch_adjust, 1102 writel(vmode->enci.sch_adjust,
1094 priv->io_base + _REG(ENCI_VIDEO_SCH)); 1103 priv->io_base + _REG(ENCI_VIDEO_SCH));
@@ -1104,8 +1113,17 @@ void meson_venc_hdmi_mode_set(struct meson_drm *priv, int vic,
1104 /* UNreset Interlaced TV Encoder */ 1113 /* UNreset Interlaced TV Encoder */
1105 writel_relaxed(0, priv->io_base + _REG(ENCI_DBG_PX_RST)); 1114 writel_relaxed(0, priv->io_base + _REG(ENCI_DBG_PX_RST));
1106 1115
1107 /* Enable Vfifo2vd, Y_Cb_Y_Cr select */ 1116 /*
1108 writel_relaxed(0x4e01, priv->io_base + _REG(ENCI_VFIFO2VD_CTL)); 1117 * Enable Vfifo2vd and set Y_Cb_Y_Cr:
1118 * Corresponding value:
1119 * Y => 00 or 10
1120 * Cb => 01
1121 * Cr => 11
1122 * Ex: 0x4e => 01001110 would mean Cb/Y/Cr/Y
1123 */
1124 writel_relaxed(ENCI_VFIFO2VD_CTL_ENABLE |
1125 ENCI_VFIFO2VD_CTL_VD_SEL(0x4e),
1126 priv->io_base + _REG(ENCI_VFIFO2VD_CTL));
1109 1127
1110 /* Timings */ 1128 /* Timings */
1111 writel_relaxed(vmode->enci.pixel_start, 1129 writel_relaxed(vmode->enci.pixel_start,
@@ -1127,7 +1145,8 @@ void meson_venc_hdmi_mode_set(struct meson_drm *priv, int vic,
1127 meson_vpp_setup_mux(priv, MESON_VIU_VPP_MUX_ENCI); 1145 meson_vpp_setup_mux(priv, MESON_VIU_VPP_MUX_ENCI);
1128 1146
1129 /* Interlace video enable */ 1147 /* Interlace video enable */
1130 writel_relaxed(1, priv->io_base + _REG(ENCI_VIDEO_EN)); 1148 writel_relaxed(ENCI_VIDEO_EN_ENABLE,
1149 priv->io_base + _REG(ENCI_VIDEO_EN));
1131 1150
1132 lines_f0 = mode->vtotal >> 1; 1151 lines_f0 = mode->vtotal >> 1;
1133 lines_f1 = lines_f0 + 1; 1152 lines_f1 = lines_f0 + 1;
@@ -1374,7 +1393,8 @@ void meson_venc_hdmi_mode_set(struct meson_drm *priv, int vic,
1374 writel_relaxed(1, priv->io_base + _REG(ENCP_VIDEO_EN)); 1393 writel_relaxed(1, priv->io_base + _REG(ENCP_VIDEO_EN));
1375 1394
1376 /* Set DE signal’s polarity is active high */ 1395 /* Set DE signal’s polarity is active high */
1377 writel_bits_relaxed(BIT(14), BIT(14), 1396 writel_bits_relaxed(ENCP_VIDEO_MODE_DE_V_HIGH,
1397 ENCP_VIDEO_MODE_DE_V_HIGH,
1378 priv->io_base + _REG(ENCP_VIDEO_MODE)); 1398 priv->io_base + _REG(ENCP_VIDEO_MODE));
1379 1399
1380 /* Program DE timing */ 1400 /* Program DE timing */
@@ -1493,13 +1513,39 @@ void meson_venc_hdmi_mode_set(struct meson_drm *priv, int vic,
1493 meson_vpp_setup_mux(priv, MESON_VIU_VPP_MUX_ENCP); 1513 meson_vpp_setup_mux(priv, MESON_VIU_VPP_MUX_ENCP);
1494 } 1514 }
1495 1515
1496 writel_relaxed((use_enci ? 1 : 2) | 1516 /* Set VPU HDMI setting */
1497 (mode->flags & DRM_MODE_FLAG_PHSYNC ? 1 << 2 : 0) | 1517 /* Select ENCP or ENCI data to HDMI */
1498 (mode->flags & DRM_MODE_FLAG_PVSYNC ? 1 << 3 : 0) | 1518 if (use_enci)
1499 4 << 5 | 1519 reg = VPU_HDMI_ENCI_DATA_TO_HDMI;
1500 (venc_repeat ? 1 << 8 : 0) | 1520 else
1501 (hdmi_repeat ? 1 << 12 : 0), 1521 reg = VPU_HDMI_ENCP_DATA_TO_HDMI;
1502 priv->io_base + _REG(VPU_HDMI_SETTING)); 1522
1523 /* Invert polarity of HSYNC from VENC */
1524 if (mode->flags & DRM_MODE_FLAG_PHSYNC)
1525 reg |= VPU_HDMI_INV_HSYNC;
1526
1527 /* Invert polarity of VSYNC from VENC */
1528 if (mode->flags & DRM_MODE_FLAG_PVSYNC)
1529 reg |= VPU_HDMI_INV_VSYNC;
1530
1531 /* Output data format: CbYCr */
1532 reg |= VPU_HDMI_OUTPUT_CBYCR;
1533
1534 /*
1535 * Write rate to the async FIFO between VENC and HDMI.
1536 * One write every 2 wr_clk.
1537 */
1538 if (venc_repeat)
1539 reg |= VPU_HDMI_WR_RATE(2);
1540
1541 /*
1542 * Read rate to the async FIFO between VENC and HDMI.
1543 * One read every 2 wr_clk.
1544 */
1545 if (hdmi_repeat)
1546 reg |= VPU_HDMI_RD_RATE(2);
1547
1548 writel_relaxed(reg, priv->io_base + _REG(VPU_HDMI_SETTING));
1503 1549
1504 priv->venc.hdmi_repeat = hdmi_repeat; 1550 priv->venc.hdmi_repeat = hdmi_repeat;
1505 priv->venc.venc_repeat = venc_repeat; 1551 priv->venc.venc_repeat = venc_repeat;
@@ -1512,12 +1558,17 @@ EXPORT_SYMBOL_GPL(meson_venc_hdmi_mode_set);
1512void meson_venci_cvbs_mode_set(struct meson_drm *priv, 1558void meson_venci_cvbs_mode_set(struct meson_drm *priv,
1513 struct meson_cvbs_enci_mode *mode) 1559 struct meson_cvbs_enci_mode *mode)
1514{ 1560{
1561 u32 reg;
1562
1515 if (mode->mode_tag == priv->venc.current_mode) 1563 if (mode->mode_tag == priv->venc.current_mode)
1516 return; 1564 return;
1517 1565
1518 /* CVBS Filter settings */ 1566 /* CVBS Filter settings */
1519 writel_relaxed(0x12, priv->io_base + _REG(ENCI_CFILT_CTRL)); 1567 writel_relaxed(ENCI_CFILT_CMPT_SEL_HIGH | 0x10,
1520 writel_relaxed(0x12, priv->io_base + _REG(ENCI_CFILT_CTRL2)); 1568 priv->io_base + _REG(ENCI_CFILT_CTRL));
1569 writel_relaxed(ENCI_CFILT_CMPT_CR_DLY(2) |
1570 ENCI_CFILT_CMPT_CB_DLY(1),
1571 priv->io_base + _REG(ENCI_CFILT_CTRL2));
1521 1572
1522 /* Digital Video Select : Interlace, clk27 clk, external */ 1573 /* Digital Video Select : Interlace, clk27 clk, external */
1523 writel_relaxed(0, priv->io_base + _REG(VENC_DVI_SETTING)); 1574 writel_relaxed(0, priv->io_base + _REG(VENC_DVI_SETTING));
@@ -1539,8 +1590,9 @@ void meson_venci_cvbs_mode_set(struct meson_drm *priv,
1539 priv->io_base + _REG(ENCI_SYNC_VSO_ODDLN)); 1590 priv->io_base + _REG(ENCI_SYNC_VSO_ODDLN));
1540 1591
1541 /* Macrovision max amplitude change */ 1592 /* Macrovision max amplitude change */
1542 writel_relaxed(0x8100 + mode->macv_max_amp, 1593 writel_relaxed(ENCI_MACV_MAX_AMP_ENABLE_CHANGE |
1543 priv->io_base + _REG(ENCI_MACV_MAX_AMP)); 1594 ENCI_MACV_MAX_AMP_VAL(mode->macv_max_amp),
1595 priv->io_base + _REG(ENCI_MACV_MAX_AMP));
1544 1596
1545 /* Video mode */ 1597 /* Video mode */
1546 writel_relaxed(mode->video_prog_mode, 1598 writel_relaxed(mode->video_prog_mode,
@@ -1548,7 +1600,8 @@ void meson_venci_cvbs_mode_set(struct meson_drm *priv,
1548 writel_relaxed(mode->video_mode, 1600 writel_relaxed(mode->video_mode,
1549 priv->io_base + _REG(ENCI_VIDEO_MODE)); 1601 priv->io_base + _REG(ENCI_VIDEO_MODE));
1550 1602
1551 /* Advanced Video Mode : 1603 /*
1604 * Advanced Video Mode :
1552 * Demux shifting 0x2 1605 * Demux shifting 0x2
1553 * Blank line end at line17/22 1606 * Blank line end at line17/22
1554 * High bandwidth Luma Filter 1607 * High bandwidth Luma Filter
@@ -1556,7 +1609,10 @@ void meson_venci_cvbs_mode_set(struct meson_drm *priv,
1556 * Bypass luma low pass filter 1609 * Bypass luma low pass filter
1557 * No macrovision on CSYNC 1610 * No macrovision on CSYNC
1558 */ 1611 */
1559 writel_relaxed(0x26, priv->io_base + _REG(ENCI_VIDEO_MODE_ADV)); 1612 writel_relaxed(ENCI_VIDEO_MODE_ADV_DMXMD(2) |
1613 ENCI_VIDEO_MODE_ADV_VBICTL_LINE_17_22 |
1614 ENCI_VIDEO_MODE_ADV_YBW_HIGH,
1615 priv->io_base + _REG(ENCI_VIDEO_MODE_ADV));
1560 1616
1561 writel(mode->sch_adjust, priv->io_base + _REG(ENCI_VIDEO_SCH)); 1617 writel(mode->sch_adjust, priv->io_base + _REG(ENCI_VIDEO_SCH));
1562 1618
@@ -1588,16 +1644,50 @@ void meson_venci_cvbs_mode_set(struct meson_drm *priv,
1588 /* UNreset Interlaced TV Encoder */ 1644 /* UNreset Interlaced TV Encoder */
1589 writel_relaxed(0, priv->io_base + _REG(ENCI_DBG_PX_RST)); 1645 writel_relaxed(0, priv->io_base + _REG(ENCI_DBG_PX_RST));
1590 1646
1591 /* Enable Vfifo2vd, Y_Cb_Y_Cr select */ 1647 /*
1592 writel_relaxed(0x4e01, priv->io_base + _REG(ENCI_VFIFO2VD_CTL)); 1648 * Enable Vfifo2vd and set Y_Cb_Y_Cr:
1649 * Corresponding value:
1650 * Y => 00 or 10
1651 * Cb => 01
1652 * Cr => 11
1653 * Ex: 0x4e => 01001110 would mean Cb/Y/Cr/Y
1654 */
1655 writel_relaxed(ENCI_VFIFO2VD_CTL_ENABLE |
1656 ENCI_VFIFO2VD_CTL_VD_SEL(0x4e),
1657 priv->io_base + _REG(ENCI_VFIFO2VD_CTL));
1593 1658
1594 /* Power UP Dacs */ 1659 /* Power UP Dacs */
1595 writel_relaxed(0, priv->io_base + _REG(VENC_VDAC_SETTING)); 1660 writel_relaxed(0, priv->io_base + _REG(VENC_VDAC_SETTING));
1596 1661
1597 /* Video Upsampling */ 1662 /* Video Upsampling */
1598 writel_relaxed(0x0061, priv->io_base + _REG(VENC_UPSAMPLE_CTRL0)); 1663 /*
1599 writel_relaxed(0x4061, priv->io_base + _REG(VENC_UPSAMPLE_CTRL1)); 1664 * CTRL0, CTRL1 and CTRL2:
1600 writel_relaxed(0x5061, priv->io_base + _REG(VENC_UPSAMPLE_CTRL2)); 1665 * Filter0: input data sample every 2 cloks
1666 * Filter1: filtering and upsample enable
1667 */
1668 reg = VENC_UPSAMPLE_CTRL_F0_2_CLK_RATIO | VENC_UPSAMPLE_CTRL_F1_EN |
1669 VENC_UPSAMPLE_CTRL_F1_UPSAMPLE_EN;
1670
1671 /*
1672 * Upsample CTRL0:
1673 * Interlace High Bandwidth Luma
1674 */
1675 writel_relaxed(VENC_UPSAMPLE_CTRL_INTERLACE_HIGH_LUMA | reg,
1676 priv->io_base + _REG(VENC_UPSAMPLE_CTRL0));
1677
1678 /*
1679 * Upsample CTRL1:
1680 * Interlace Pb
1681 */
1682 writel_relaxed(VENC_UPSAMPLE_CTRL_INTERLACE_PB | reg,
1683 priv->io_base + _REG(VENC_UPSAMPLE_CTRL1));
1684
1685 /*
1686 * Upsample CTRL2:
1687 * Interlace R
1688 */
1689 writel_relaxed(VENC_UPSAMPLE_CTRL_INTERLACE_PR | reg,
1690 priv->io_base + _REG(VENC_UPSAMPLE_CTRL2));
1601 1691
1602 /* Select Interlace Y DACs */ 1692 /* Select Interlace Y DACs */
1603 writel_relaxed(0, priv->io_base + _REG(VENC_VDAC_DACSEL0)); 1693 writel_relaxed(0, priv->io_base + _REG(VENC_VDAC_DACSEL0));
@@ -1611,14 +1701,16 @@ void meson_venci_cvbs_mode_set(struct meson_drm *priv,
1611 meson_vpp_setup_mux(priv, MESON_VIU_VPP_MUX_ENCI); 1701 meson_vpp_setup_mux(priv, MESON_VIU_VPP_MUX_ENCI);
1612 1702
1613 /* Enable ENCI FIFO */ 1703 /* Enable ENCI FIFO */
1614 writel_relaxed(0x2000, priv->io_base + _REG(VENC_VDAC_FIFO_CTRL)); 1704 writel_relaxed(VENC_VDAC_FIFO_EN_ENCI_ENABLE,
1705 priv->io_base + _REG(VENC_VDAC_FIFO_CTRL));
1615 1706
1616 /* Select ENCI DACs 0, 1, 4, and 5 */ 1707 /* Select ENCI DACs 0, 1, 4, and 5 */
1617 writel_relaxed(0x11, priv->io_base + _REG(ENCI_DACSEL_0)); 1708 writel_relaxed(0x11, priv->io_base + _REG(ENCI_DACSEL_0));
1618 writel_relaxed(0x11, priv->io_base + _REG(ENCI_DACSEL_1)); 1709 writel_relaxed(0x11, priv->io_base + _REG(ENCI_DACSEL_1));
1619 1710
1620 /* Interlace video enable */ 1711 /* Interlace video enable */
1621 writel_relaxed(1, priv->io_base + _REG(ENCI_VIDEO_EN)); 1712 writel_relaxed(ENCI_VIDEO_EN_ENABLE,
1713 priv->io_base + _REG(ENCI_VIDEO_EN));
1622 1714
1623 /* Configure Video Saturation / Contrast / Brightness / Hue */ 1715 /* Configure Video Saturation / Contrast / Brightness / Hue */
1624 writel_relaxed(mode->video_saturation, 1716 writel_relaxed(mode->video_saturation,
@@ -1631,7 +1723,8 @@ void meson_venci_cvbs_mode_set(struct meson_drm *priv,
1631 priv->io_base + _REG(ENCI_VIDEO_HUE)); 1723 priv->io_base + _REG(ENCI_VIDEO_HUE));
1632 1724
1633 /* Enable DAC0 Filter */ 1725 /* Enable DAC0 Filter */
1634 writel_relaxed(0x1, priv->io_base + _REG(VENC_VDAC_DAC0_FILT_CTRL0)); 1726 writel_relaxed(VENC_VDAC_DAC0_FILT_CTRL0_EN,
1727 priv->io_base + _REG(VENC_VDAC_DAC0_FILT_CTRL0));
1635 writel_relaxed(0xfc48, priv->io_base + _REG(VENC_VDAC_DAC0_FILT_CTRL1)); 1728 writel_relaxed(0xfc48, priv->io_base + _REG(VENC_VDAC_DAC0_FILT_CTRL1));
1636 1729
1637 /* 0 in Macrovision register 0 */ 1730 /* 0 in Macrovision register 0 */
@@ -1652,7 +1745,8 @@ unsigned int meson_venci_get_field(struct meson_drm *priv)
1652 1745
1653void meson_venc_enable_vsync(struct meson_drm *priv) 1746void meson_venc_enable_vsync(struct meson_drm *priv)
1654{ 1747{
1655 writel_relaxed(2, priv->io_base + _REG(VENC_INTCTRL)); 1748 writel_relaxed(VENC_INTCTRL_ENCI_LNRST_INT_EN,
1749 priv->io_base + _REG(VENC_INTCTRL));
1656 regmap_update_bits(priv->hhi, HHI_GCLK_MPEG2, BIT(25), BIT(25)); 1750 regmap_update_bits(priv->hhi, HHI_GCLK_MPEG2, BIT(25), BIT(25));
1657} 1751}
1658 1752
@@ -1680,7 +1774,8 @@ void meson_venc_init(struct meson_drm *priv)
1680 regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL0, 0); 1774 regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL0, 0);
1681 1775
1682 /* Disable HDMI */ 1776 /* Disable HDMI */
1683 writel_bits_relaxed(0x3, 0, 1777 writel_bits_relaxed(VPU_HDMI_ENCI_DATA_TO_HDMI |
1778 VPU_HDMI_ENCP_DATA_TO_HDMI, 0,
1684 priv->io_base + _REG(VPU_HDMI_SETTING)); 1779 priv->io_base + _REG(VPU_HDMI_SETTING));
1685 1780
1686 /* Disable all encoders */ 1781 /* Disable all encoders */
diff --git a/drivers/gpu/drm/meson/meson_venc_cvbs.c b/drivers/gpu/drm/meson/meson_venc_cvbs.c
index 45a467f10b9b..6dc130a24070 100644
--- a/drivers/gpu/drm/meson/meson_venc_cvbs.c
+++ b/drivers/gpu/drm/meson/meson_venc_cvbs.c
@@ -171,7 +171,8 @@ static void meson_venc_cvbs_encoder_enable(struct drm_encoder *encoder)
171 struct meson_drm *priv = meson_venc_cvbs->priv; 171 struct meson_drm *priv = meson_venc_cvbs->priv;
172 172
173 /* VDAC0 source is not from ATV */ 173 /* VDAC0 source is not from ATV */
174 writel_bits_relaxed(BIT(5), 0, priv->io_base + _REG(VENC_VDAC_DACSEL0)); 174 writel_bits_relaxed(VENC_VDAC_SEL_ATV_DMD, 0,
175 priv->io_base + _REG(VENC_VDAC_DACSEL0));
175 176
176 if (meson_vpu_is_compatible(priv, "amlogic,meson-gxbb-vpu")) { 177 if (meson_vpu_is_compatible(priv, "amlogic,meson-gxbb-vpu")) {
177 regmap_write(priv->hhi, HHI_VDAC_CNTL0, 1); 178 regmap_write(priv->hhi, HHI_VDAC_CNTL0, 1);
diff --git a/drivers/gpu/drm/meson/meson_viu.c b/drivers/gpu/drm/meson/meson_viu.c
index 9f8a450d50d5..e70cd55d56c9 100644
--- a/drivers/gpu/drm/meson/meson_viu.c
+++ b/drivers/gpu/drm/meson/meson_viu.c
@@ -320,9 +320,9 @@ void meson_viu_osd1_reset(struct meson_drm *priv)
320 priv->io_base + _REG(VIU_OSD1_CTRL_STAT2)); 320 priv->io_base + _REG(VIU_OSD1_CTRL_STAT2));
321 321
322 /* Reset OSD1 */ 322 /* Reset OSD1 */
323 writel_bits_relaxed(BIT(0), BIT(0), 323 writel_bits_relaxed(VIU_SW_RESET_OSD1, VIU_SW_RESET_OSD1,
324 priv->io_base + _REG(VIU_SW_RESET)); 324 priv->io_base + _REG(VIU_SW_RESET));
325 writel_bits_relaxed(BIT(0), 0, 325 writel_bits_relaxed(VIU_SW_RESET_OSD1, 0,
326 priv->io_base + _REG(VIU_SW_RESET)); 326 priv->io_base + _REG(VIU_SW_RESET));
327 327
328 /* Rewrite these registers state lost in the reset */ 328 /* Rewrite these registers state lost in the reset */
@@ -335,15 +335,22 @@ void meson_viu_osd1_reset(struct meson_drm *priv)
335 meson_viu_load_matrix(priv); 335 meson_viu_load_matrix(priv);
336} 336}
337 337
338static inline uint32_t meson_viu_osd_burst_length_reg(uint32_t length)
339{
340 uint32_t val = (((length & 0x80) % 24) / 12);
341
342 return (((val & 0x3) << 10) | (((val & 0x4) >> 2) << 31));
343}
344
338void meson_viu_init(struct meson_drm *priv) 345void meson_viu_init(struct meson_drm *priv)
339{ 346{
340 uint32_t reg; 347 uint32_t reg;
341 348
342 /* Disable OSDs */ 349 /* Disable OSDs */
343 writel_bits_relaxed(BIT(0) | BIT(21), 0, 350 writel_bits_relaxed(VIU_OSD1_OSD_BLK_ENABLE | VIU_OSD1_OSD_ENABLE, 0,
344 priv->io_base + _REG(VIU_OSD1_CTRL_STAT)); 351 priv->io_base + _REG(VIU_OSD1_CTRL_STAT));
345 writel_bits_relaxed(BIT(0) | BIT(21), 0, 352 writel_bits_relaxed(VIU_OSD1_OSD_BLK_ENABLE | VIU_OSD1_OSD_ENABLE, 0,
346 priv->io_base + _REG(VIU_OSD2_CTRL_STAT)); 353 priv->io_base + _REG(VIU_OSD2_CTRL_STAT));
347 354
348 /* On GXL/GXM, Use the 10bit HDR conversion matrix */ 355 /* On GXL/GXM, Use the 10bit HDR conversion matrix */
349 if (meson_vpu_is_compatible(priv, "amlogic,meson-gxm-vpu") || 356 if (meson_vpu_is_compatible(priv, "amlogic,meson-gxm-vpu") ||
@@ -354,19 +361,17 @@ void meson_viu_init(struct meson_drm *priv)
354 true); 361 true);
355 362
356 /* Initialize OSD1 fifo control register */ 363 /* Initialize OSD1 fifo control register */
357 reg = BIT(0) | /* Urgent DDR request priority */ 364 reg = VIU_OSD_DDR_PRIORITY_URGENT |
358 (4 << 5); /* hold_fifo_lines */ 365 VIU_OSD_HOLD_FIFO_LINES(4) |
366 VIU_OSD_FIFO_DEPTH_VAL(32) | /* fifo_depth_val: 32*8=256 */
367 VIU_OSD_WORDS_PER_BURST(4) | /* 4 words in 1 burst */
368 VIU_OSD_FIFO_LIMITS(2); /* fifo_lim: 2*16=32 */
369
359 if (meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu")) 370 if (meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu"))
360 reg |= (1 << 10) | /* burst length 32 */ 371 reg |= meson_viu_osd_burst_length_reg(32);
361 (32 << 12) | /* fifo_depth_val: 32*8=256 */
362 (2 << 22) | /* 4 words in 1 burst */
363 (2 << 24) |
364 (1 << 31);
365 else 372 else
366 reg |= (3 << 10) | /* burst length 64 */ 373 reg |= meson_viu_osd_burst_length_reg(64);
367 (32 << 12) | /* fifo_depth_val: 32*8=256 */ 374
368 (2 << 22) | /* 4 words in 1 burst */
369 (2 << 24);
370 writel_relaxed(reg, priv->io_base + _REG(VIU_OSD1_FIFO_CTRL_STAT)); 375 writel_relaxed(reg, priv->io_base + _REG(VIU_OSD1_FIFO_CTRL_STAT));
371 writel_relaxed(reg, priv->io_base + _REG(VIU_OSD2_FIFO_CTRL_STAT)); 376 writel_relaxed(reg, priv->io_base + _REG(VIU_OSD2_FIFO_CTRL_STAT));
372 377
@@ -379,12 +384,9 @@ void meson_viu_init(struct meson_drm *priv)
379 priv->io_base + _REG(VIU_OSD2_CTRL_STAT2)); 384 priv->io_base + _REG(VIU_OSD2_CTRL_STAT2));
380 385
381 /* Disable VD1 AFBC */ 386 /* Disable VD1 AFBC */
382 /* di_mif0_en=0 mif0_to_vpp_en=0 di_mad_en=0 */ 387 /* di_mif0_en=0 mif0_to_vpp_en=0 di_mad_en=0 and afbc vd1 set=0*/
383 writel_bits_relaxed(0x7 << 16, 0, 388 writel_bits_relaxed(VIU_CTRL0_VD1_AFBC_MASK, 0,
384 priv->io_base + _REG(VIU_MISC_CTRL0)); 389 priv->io_base + _REG(VIU_MISC_CTRL0));
385 /* afbc vd1 set=0 */
386 writel_bits_relaxed(BIT(20), 0,
387 priv->io_base + _REG(VIU_MISC_CTRL0));
388 writel_relaxed(0, priv->io_base + _REG(AFBC_ENABLE)); 390 writel_relaxed(0, priv->io_base + _REG(AFBC_ENABLE));
389 391
390 writel_relaxed(0x00FF00C0, 392 writel_relaxed(0x00FF00C0,
@@ -393,27 +395,31 @@ void meson_viu_init(struct meson_drm *priv)
393 priv->io_base + _REG(VD2_IF0_LUMA_FIFO_SIZE)); 395 priv->io_base + _REG(VD2_IF0_LUMA_FIFO_SIZE));
394 396
395 if (meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu")) { 397 if (meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu")) {
396 writel_relaxed(4 << 29 | 398 writel_relaxed(VIU_OSD_BLEND_REORDER(0, 1) |
397 1 << 27 | 399 VIU_OSD_BLEND_REORDER(1, 0) |
398 1 << 26 | /* blend_din0 input to blend0 */ 400 VIU_OSD_BLEND_REORDER(2, 0) |
399 1 << 25 | /* blend1_dout to blend2 */ 401 VIU_OSD_BLEND_REORDER(3, 0) |
400 1 << 24 | /* blend1_din3 input to blend1 */ 402 VIU_OSD_BLEND_DIN_EN(1) |
401 1 << 20 | 403 VIU_OSD_BLEND1_DIN3_BYPASS_TO_DOUT1 |
402 0 << 16 | 404 VIU_OSD_BLEND1_DOUT_BYPASS_TO_BLEND2 |
403 1, 405 VIU_OSD_BLEND_DIN0_BYPASS_TO_DOUT0 |
404 priv->io_base + _REG(VIU_OSD_BLEND_CTRL)); 406 VIU_OSD_BLEND_BLEN2_PREMULT_EN(1) |
405 writel_relaxed(1 << 20, 407 VIU_OSD_BLEND_HOLD_LINES(4),
406 priv->io_base + _REG(OSD1_BLEND_SRC_CTRL)); 408 priv->io_base + _REG(VIU_OSD_BLEND_CTRL));
407 writel_relaxed(1 << 20, 409
408 priv->io_base + _REG(OSD2_BLEND_SRC_CTRL)); 410 writel_relaxed(OSD_BLEND_PATH_SEL_ENABLE,
411 priv->io_base + _REG(OSD1_BLEND_SRC_CTRL));
412 writel_relaxed(OSD_BLEND_PATH_SEL_ENABLE,
413 priv->io_base + _REG(OSD2_BLEND_SRC_CTRL));
409 writel_relaxed(0, priv->io_base + _REG(VD1_BLEND_SRC_CTRL)); 414 writel_relaxed(0, priv->io_base + _REG(VD1_BLEND_SRC_CTRL));
410 writel_relaxed(0, priv->io_base + _REG(VD2_BLEND_SRC_CTRL)); 415 writel_relaxed(0, priv->io_base + _REG(VD2_BLEND_SRC_CTRL));
411 writel_relaxed(0, 416 writel_relaxed(0,
412 priv->io_base + _REG(VIU_OSD_BLEND_DUMMY_DATA0)); 417 priv->io_base + _REG(VIU_OSD_BLEND_DUMMY_DATA0));
413 writel_relaxed(0, 418 writel_relaxed(0,
414 priv->io_base + _REG(VIU_OSD_BLEND_DUMMY_ALPHA)); 419 priv->io_base + _REG(VIU_OSD_BLEND_DUMMY_ALPHA));
415 writel_bits_relaxed(0x3 << 2, 0x3 << 2, 420
416 priv->io_base + _REG(DOLBY_PATH_CTRL)); 421 writel_bits_relaxed(DOLBY_BYPASS_EN(0xc), DOLBY_BYPASS_EN(0xc),
422 priv->io_base + _REG(DOLBY_PATH_CTRL));
417 } 423 }
418 424
419 priv->viu.osd1_enabled = false; 425 priv->viu.osd1_enabled = false;
diff --git a/drivers/gpu/drm/meson/meson_vpp.c b/drivers/gpu/drm/meson/meson_vpp.c
index cbe6cf46e541..1429f3be6028 100644
--- a/drivers/gpu/drm/meson/meson_vpp.c
+++ b/drivers/gpu/drm/meson/meson_vpp.c
@@ -56,7 +56,7 @@ static void meson_vpp_write_scaling_filter_coefs(struct meson_drm *priv,
56{ 56{
57 int i; 57 int i;
58 58
59 writel_relaxed(is_horizontal ? BIT(8) : 0, 59 writel_relaxed(is_horizontal ? VPP_SCALE_HORIZONTAL_COEF : 0,
60 priv->io_base + _REG(VPP_OSD_SCALE_COEF_IDX)); 60 priv->io_base + _REG(VPP_OSD_SCALE_COEF_IDX));
61 for (i = 0; i < 33; i++) 61 for (i = 0; i < 33; i++)
62 writel_relaxed(coefs[i], 62 writel_relaxed(coefs[i],
@@ -81,7 +81,7 @@ static void meson_vpp_write_vd_scaling_filter_coefs(struct meson_drm *priv,
81{ 81{
82 int i; 82 int i;
83 83
84 writel_relaxed(is_horizontal ? BIT(8) : 0, 84 writel_relaxed(is_horizontal ? VPP_SCALE_HORIZONTAL_COEF : 0,
85 priv->io_base + _REG(VPP_SCALE_COEF_IDX)); 85 priv->io_base + _REG(VPP_SCALE_COEF_IDX));
86 for (i = 0; i < 33; i++) 86 for (i = 0; i < 33; i++)
87 writel_relaxed(coefs[i], 87 writel_relaxed(coefs[i],
@@ -96,7 +96,8 @@ void meson_vpp_init(struct meson_drm *priv)
96 else if (meson_vpu_is_compatible(priv, "amlogic,meson-gxm-vpu")) { 96 else if (meson_vpu_is_compatible(priv, "amlogic,meson-gxm-vpu")) {
97 writel_bits_relaxed(0xff << 16, 0xff << 16, 97 writel_bits_relaxed(0xff << 16, 0xff << 16,
98 priv->io_base + _REG(VIU_MISC_CTRL1)); 98 priv->io_base + _REG(VIU_MISC_CTRL1));
99 writel_relaxed(0x20000, priv->io_base + _REG(VPP_DOLBY_CTRL)); 99 writel_relaxed(VPP_PPS_DUMMY_DATA_MODE,
100 priv->io_base + _REG(VPP_DOLBY_CTRL));
100 writel_relaxed(0x1020080, 101 writel_relaxed(0x1020080,
101 priv->io_base + _REG(VPP_DUMMY_DATA1)); 102 priv->io_base + _REG(VPP_DUMMY_DATA1));
102 } else if (meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu")) 103 } else if (meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu"))
@@ -104,12 +105,13 @@ void meson_vpp_init(struct meson_drm *priv)
104 105
105 /* Initialize vpu fifo control registers */ 106 /* Initialize vpu fifo control registers */
106 if (meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu")) 107 if (meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu"))
107 writel_relaxed(0xfff << 20 | 0x1000, 108 writel_relaxed(VPP_OFIFO_SIZE_DEFAULT,
108 priv->io_base + _REG(VPP_OFIFO_SIZE)); 109 priv->io_base + _REG(VPP_OFIFO_SIZE));
109 else 110 else
110 writel_relaxed(readl_relaxed(priv->io_base + _REG(VPP_OFIFO_SIZE)) | 111 writel_bits_relaxed(VPP_OFIFO_SIZE_MASK, 0x77f,
111 0x77f, priv->io_base + _REG(VPP_OFIFO_SIZE)); 112 priv->io_base + _REG(VPP_OFIFO_SIZE));
112 writel_relaxed(0x08080808, priv->io_base + _REG(VPP_HOLD_LINES)); 113 writel_relaxed(VPP_POSTBLEND_HOLD_LINES(4) | VPP_PREBLEND_HOLD_LINES(4),
114 priv->io_base + _REG(VPP_HOLD_LINES));
113 115
114 if (!meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu")) { 116 if (!meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu")) {
115 /* Turn off preblend */ 117 /* Turn off preblend */
@@ -137,10 +139,15 @@ void meson_vpp_init(struct meson_drm *priv)
137 writel_relaxed(0, priv->io_base + _REG(VPP_OSD_SC_CTRL0)); 139 writel_relaxed(0, priv->io_base + _REG(VPP_OSD_SC_CTRL0));
138 writel_relaxed(0, priv->io_base + _REG(VPP_OSD_VSC_CTRL0)); 140 writel_relaxed(0, priv->io_base + _REG(VPP_OSD_VSC_CTRL0));
139 writel_relaxed(0, priv->io_base + _REG(VPP_OSD_HSC_CTRL0)); 141 writel_relaxed(0, priv->io_base + _REG(VPP_OSD_HSC_CTRL0));
140 writel_relaxed(4 | (4 << 8) | BIT(15), 142
143 /* Set horizontal/vertical bank length and enable video scale out */
144 writel_relaxed(VPP_VSC_BANK_LENGTH(4) | VPP_HSC_BANK_LENGTH(4) |
145 VPP_SC_VD_EN_ENABLE,
141 priv->io_base + _REG(VPP_SC_MISC)); 146 priv->io_base + _REG(VPP_SC_MISC));
142 147
143 writel_relaxed(1, priv->io_base + _REG(VPP_VADJ_CTRL)); 148 /* Enable minus black level for vadj1 */
149 writel_relaxed(VPP_MINUS_BLACK_LVL_VADJ1_ENABLE,
150 priv->io_base + _REG(VPP_VADJ_CTRL));
144 151
145 /* Write in the proper filter coefficients. */ 152 /* Write in the proper filter coefficients. */
146 meson_vpp_write_scaling_filter_coefs(priv, 153 meson_vpp_write_scaling_filter_coefs(priv,
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 8cc70026c358..0c2a1252c8be 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -700,13 +700,13 @@ void msm_gem_vunmap(struct drm_gem_object *obj, enum msm_gem_lock subclass)
700int msm_gem_sync_object(struct drm_gem_object *obj, 700int msm_gem_sync_object(struct drm_gem_object *obj,
701 struct msm_fence_context *fctx, bool exclusive) 701 struct msm_fence_context *fctx, bool exclusive)
702{ 702{
703 struct reservation_object_list *fobj; 703 struct dma_resv_list *fobj;
704 struct dma_fence *fence; 704 struct dma_fence *fence;
705 int i, ret; 705 int i, ret;
706 706
707 fobj = reservation_object_get_list(obj->resv); 707 fobj = dma_resv_get_list(obj->resv);
708 if (!fobj || (fobj->shared_count == 0)) { 708 if (!fobj || (fobj->shared_count == 0)) {
709 fence = reservation_object_get_excl(obj->resv); 709 fence = dma_resv_get_excl(obj->resv);
710 /* don't need to wait on our own fences, since ring is fifo */ 710 /* don't need to wait on our own fences, since ring is fifo */
711 if (fence && (fence->context != fctx->context)) { 711 if (fence && (fence->context != fctx->context)) {
712 ret = dma_fence_wait(fence, true); 712 ret = dma_fence_wait(fence, true);
@@ -720,7 +720,7 @@ int msm_gem_sync_object(struct drm_gem_object *obj,
720 720
721 for (i = 0; i < fobj->shared_count; i++) { 721 for (i = 0; i < fobj->shared_count; i++) {
722 fence = rcu_dereference_protected(fobj->shared[i], 722 fence = rcu_dereference_protected(fobj->shared[i],
723 reservation_object_held(obj->resv)); 723 dma_resv_held(obj->resv));
724 if (fence->context != fctx->context) { 724 if (fence->context != fctx->context) {
725 ret = dma_fence_wait(fence, true); 725 ret = dma_fence_wait(fence, true);
726 if (ret) 726 if (ret)
@@ -738,9 +738,9 @@ void msm_gem_move_to_active(struct drm_gem_object *obj,
738 WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED); 738 WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
739 msm_obj->gpu = gpu; 739 msm_obj->gpu = gpu;
740 if (exclusive) 740 if (exclusive)
741 reservation_object_add_excl_fence(obj->resv, fence); 741 dma_resv_add_excl_fence(obj->resv, fence);
742 else 742 else
743 reservation_object_add_shared_fence(obj->resv, fence); 743 dma_resv_add_shared_fence(obj->resv, fence);
744 list_del_init(&msm_obj->mm_list); 744 list_del_init(&msm_obj->mm_list);
745 list_add_tail(&msm_obj->mm_list, &gpu->active_list); 745 list_add_tail(&msm_obj->mm_list, &gpu->active_list);
746} 746}
@@ -765,7 +765,7 @@ int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
765 op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout); 765 op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
766 long ret; 766 long ret;
767 767
768 ret = reservation_object_wait_timeout_rcu(obj->resv, write, 768 ret = dma_resv_wait_timeout_rcu(obj->resv, write,
769 true, remain); 769 true, remain);
770 if (ret == 0) 770 if (ret == 0)
771 return remain == 0 ? -EBUSY : -ETIMEDOUT; 771 return remain == 0 ? -EBUSY : -ETIMEDOUT;
@@ -797,8 +797,8 @@ static void describe_fence(struct dma_fence *fence, const char *type,
797void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m) 797void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
798{ 798{
799 struct msm_gem_object *msm_obj = to_msm_bo(obj); 799 struct msm_gem_object *msm_obj = to_msm_bo(obj);
800 struct reservation_object *robj = obj->resv; 800 struct dma_resv *robj = obj->resv;
801 struct reservation_object_list *fobj; 801 struct dma_resv_list *fobj;
802 struct dma_fence *fence; 802 struct dma_fence *fence;
803 struct msm_gem_vma *vma; 803 struct msm_gem_vma *vma;
804 uint64_t off = drm_vma_node_start(&obj->vma_node); 804 uint64_t off = drm_vma_node_start(&obj->vma_node);
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index 8cfcf8f09e3e..9e0953c2b7ce 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -8,7 +8,7 @@
8#define __MSM_GEM_H__ 8#define __MSM_GEM_H__
9 9
10#include <linux/kref.h> 10#include <linux/kref.h>
11#include <linux/reservation.h> 11#include <linux/dma-resv.h>
12#include "msm_drv.h" 12#include "msm_drv.h"
13 13
14/* Additional internal-use only BO flags: */ 14/* Additional internal-use only BO flags: */
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index 348f8c2be806..2e1556b7af26 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -225,7 +225,7 @@ static int submit_fence_sync(struct msm_gem_submit *submit, bool no_implicit)
225 * strange place to call it. OTOH this is a 225 * strange place to call it. OTOH this is a
226 * convenient can-fail point to hook it in. 226 * convenient can-fail point to hook it in.
227 */ 227 */
228 ret = reservation_object_reserve_shared(msm_obj->base.resv, 228 ret = dma_resv_reserve_shared(msm_obj->base.resv,
229 1); 229 1);
230 if (ret) 230 if (ret)
231 return ret; 231 return ret;
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_drv.c b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
index 878ef6822812..e8506335cd15 100644
--- a/drivers/gpu/drm/mxsfb/mxsfb_drv.c
+++ b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
@@ -17,7 +17,7 @@
17#include <linux/of_graph.h> 17#include <linux/of_graph.h>
18#include <linux/of_reserved_mem.h> 18#include <linux/of_reserved_mem.h>
19#include <linux/pm_runtime.h> 19#include <linux/pm_runtime.h>
20#include <linux/reservation.h> 20#include <linux/dma-resv.h>
21#include <linux/spinlock.h> 21#include <linux/spinlock.h>
22 22
23#include <drm/drm_atomic.h> 23#include <drm/drm_atomic.h>
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_out.c b/drivers/gpu/drm/mxsfb/mxsfb_out.c
index 231d016c6f47..be36f4d6cc96 100644
--- a/drivers/gpu/drm/mxsfb/mxsfb_out.c
+++ b/drivers/gpu/drm/mxsfb/mxsfb_out.c
@@ -30,7 +30,7 @@ static int mxsfb_panel_get_modes(struct drm_connector *connector)
30 drm_connector_to_mxsfb_drm_private(connector); 30 drm_connector_to_mxsfb_drm_private(connector);
31 31
32 if (mxsfb->panel) 32 if (mxsfb->panel)
33 return mxsfb->panel->funcs->get_modes(mxsfb->panel); 33 return drm_panel_get_modes(mxsfb->panel);
34 34
35 return 0; 35 return 0;
36} 36}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
index 89f8e76a2d7d..027a01b97d1c 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
@@ -457,7 +457,7 @@ nv50_wndw_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state)
457 asyw->image.handle[0] = ctxdma->object.handle; 457 asyw->image.handle[0] = ctxdma->object.handle;
458 } 458 }
459 459
460 asyw->state.fence = reservation_object_get_excl_rcu(fb->nvbo->bo.base.resv); 460 asyw->state.fence = dma_resv_get_excl_rcu(fb->nvbo->bo.base.resv);
461 asyw->image.offset[0] = fb->nvbo->bo.offset; 461 asyw->image.offset[0] = fb->nvbo->bo.offset;
462 462
463 if (wndw->func->prepare) { 463 if (wndw->func->prepare) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 99e391be9370..e0b1bbee936f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -188,7 +188,7 @@ nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags,
188int 188int
189nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align, 189nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align,
190 uint32_t flags, uint32_t tile_mode, uint32_t tile_flags, 190 uint32_t flags, uint32_t tile_mode, uint32_t tile_flags,
191 struct sg_table *sg, struct reservation_object *robj, 191 struct sg_table *sg, struct dma_resv *robj,
192 struct nouveau_bo **pnvbo) 192 struct nouveau_bo **pnvbo)
193{ 193{
194 struct nouveau_drm *drm = cli->drm; 194 struct nouveau_drm *drm = cli->drm;
@@ -1324,7 +1324,7 @@ nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
1324{ 1324{
1325 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); 1325 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1326 struct drm_device *dev = drm->dev; 1326 struct drm_device *dev = drm->dev;
1327 struct dma_fence *fence = reservation_object_get_excl(bo->base.resv); 1327 struct dma_fence *fence = dma_resv_get_excl(bo->base.resv);
1328 1328
1329 nv10_bo_put_tile_region(dev, *old_tile, fence); 1329 nv10_bo_put_tile_region(dev, *old_tile, fence);
1330 *old_tile = new_tile; 1330 *old_tile = new_tile;
@@ -1655,12 +1655,12 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
1655void 1655void
1656nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence, bool exclusive) 1656nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence, bool exclusive)
1657{ 1657{
1658 struct reservation_object *resv = nvbo->bo.base.resv; 1658 struct dma_resv *resv = nvbo->bo.base.resv;
1659 1659
1660 if (exclusive) 1660 if (exclusive)
1661 reservation_object_add_excl_fence(resv, &fence->base); 1661 dma_resv_add_excl_fence(resv, &fence->base);
1662 else if (fence) 1662 else if (fence)
1663 reservation_object_add_shared_fence(resv, &fence->base); 1663 dma_resv_add_shared_fence(resv, &fence->base);
1664} 1664}
1665 1665
1666struct ttm_bo_driver nouveau_bo_driver = { 1666struct ttm_bo_driver nouveau_bo_driver = {
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.h b/drivers/gpu/drm/nouveau/nouveau_bo.h
index d675efe8e7f9..3ae84834bd5c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.h
@@ -73,7 +73,7 @@ extern struct ttm_bo_driver nouveau_bo_driver;
73void nouveau_bo_move_init(struct nouveau_drm *); 73void nouveau_bo_move_init(struct nouveau_drm *);
74int nouveau_bo_new(struct nouveau_cli *, u64 size, int align, u32 flags, 74int nouveau_bo_new(struct nouveau_cli *, u64 size, int align, u32 flags,
75 u32 tile_mode, u32 tile_flags, struct sg_table *sg, 75 u32 tile_mode, u32 tile_flags, struct sg_table *sg,
76 struct reservation_object *robj, 76 struct dma_resv *robj,
77 struct nouveau_bo **); 77 struct nouveau_bo **);
78int nouveau_bo_pin(struct nouveau_bo *, u32 flags, bool contig); 78int nouveau_bo_pin(struct nouveau_bo *, u32 flags, bool contig);
79int nouveau_bo_unpin(struct nouveau_bo *); 79int nouveau_bo_unpin(struct nouveau_bo *);
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index e5f249ab216a..8df390078c85 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -335,20 +335,20 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e
335{ 335{
336 struct nouveau_fence_chan *fctx = chan->fence; 336 struct nouveau_fence_chan *fctx = chan->fence;
337 struct dma_fence *fence; 337 struct dma_fence *fence;
338 struct reservation_object *resv = nvbo->bo.base.resv; 338 struct dma_resv *resv = nvbo->bo.base.resv;
339 struct reservation_object_list *fobj; 339 struct dma_resv_list *fobj;
340 struct nouveau_fence *f; 340 struct nouveau_fence *f;
341 int ret = 0, i; 341 int ret = 0, i;
342 342
343 if (!exclusive) { 343 if (!exclusive) {
344 ret = reservation_object_reserve_shared(resv, 1); 344 ret = dma_resv_reserve_shared(resv, 1);
345 345
346 if (ret) 346 if (ret)
347 return ret; 347 return ret;
348 } 348 }
349 349
350 fobj = reservation_object_get_list(resv); 350 fobj = dma_resv_get_list(resv);
351 fence = reservation_object_get_excl(resv); 351 fence = dma_resv_get_excl(resv);
352 352
353 if (fence && (!exclusive || !fobj || !fobj->shared_count)) { 353 if (fence && (!exclusive || !fobj || !fobj->shared_count)) {
354 struct nouveau_channel *prev = NULL; 354 struct nouveau_channel *prev = NULL;
@@ -377,7 +377,7 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e
377 bool must_wait = true; 377 bool must_wait = true;
378 378
379 fence = rcu_dereference_protected(fobj->shared[i], 379 fence = rcu_dereference_protected(fobj->shared[i],
380 reservation_object_held(resv)); 380 dma_resv_held(resv));
381 381
382 f = nouveau_local_fence(fence, chan->drm); 382 f = nouveau_local_fence(fence, chan->drm);
383 if (f) { 383 if (f) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index c7368aa0bdec..c77302f969e8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -887,7 +887,7 @@ nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
887 return -ENOENT; 887 return -ENOENT;
888 nvbo = nouveau_gem_object(gem); 888 nvbo = nouveau_gem_object(gem);
889 889
890 lret = reservation_object_wait_timeout_rcu(nvbo->bo.base.resv, write, true, 890 lret = dma_resv_wait_timeout_rcu(nvbo->bo.base.resv, write, true,
891 no_wait ? 0 : 30 * HZ); 891 no_wait ? 0 : 30 * HZ);
892 if (!lret) 892 if (!lret)
893 ret = -EBUSY; 893 ret = -EBUSY;
diff --git a/drivers/gpu/drm/nouveau/nouveau_prime.c b/drivers/gpu/drm/nouveau/nouveau_prime.c
index e86ad7ae622b..7262ced9688a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_prime.c
+++ b/drivers/gpu/drm/nouveau/nouveau_prime.c
@@ -62,16 +62,16 @@ struct drm_gem_object *nouveau_gem_prime_import_sg_table(struct drm_device *dev,
62{ 62{
63 struct nouveau_drm *drm = nouveau_drm(dev); 63 struct nouveau_drm *drm = nouveau_drm(dev);
64 struct nouveau_bo *nvbo; 64 struct nouveau_bo *nvbo;
65 struct reservation_object *robj = attach->dmabuf->resv; 65 struct dma_resv *robj = attach->dmabuf->resv;
66 u32 flags = 0; 66 u32 flags = 0;
67 int ret; 67 int ret;
68 68
69 flags = TTM_PL_FLAG_TT; 69 flags = TTM_PL_FLAG_TT;
70 70
71 reservation_object_lock(robj, NULL); 71 dma_resv_lock(robj, NULL);
72 ret = nouveau_bo_new(&drm->client, attach->dmabuf->size, 0, flags, 0, 0, 72 ret = nouveau_bo_new(&drm->client, attach->dmabuf->size, 0, flags, 0, 0,
73 sg, robj, &nvbo); 73 sg, robj, &nvbo);
74 reservation_object_unlock(robj); 74 dma_resv_unlock(robj);
75 if (ret) 75 if (ret)
76 return ERR_PTR(ret); 76 return ERR_PTR(ret);
77 77
diff --git a/drivers/gpu/drm/omapdrm/displays/Kconfig b/drivers/gpu/drm/omapdrm/displays/Kconfig
index c2566da32ac4..240dda102845 100644
--- a/drivers/gpu/drm/omapdrm/displays/Kconfig
+++ b/drivers/gpu/drm/omapdrm/displays/Kconfig
@@ -29,42 +29,4 @@ config DRM_OMAP_PANEL_DSI_CM
29 help 29 help
30 Driver for generic DSI command mode panels. 30 Driver for generic DSI command mode panels.
31 31
32config DRM_OMAP_PANEL_SONY_ACX565AKM
33 tristate "ACX565AKM Panel"
34 depends on SPI && BACKLIGHT_CLASS_DEVICE
35 help
36 This is the LCD panel used on Nokia N900
37
38config DRM_OMAP_PANEL_LGPHILIPS_LB035Q02
39 tristate "LG.Philips LB035Q02 LCD Panel"
40 depends on SPI
41 help
42 LCD Panel used on the Gumstix Overo Palo35
43
44config DRM_OMAP_PANEL_SHARP_LS037V7DW01
45 tristate "Sharp LS037V7DW01 LCD Panel"
46 depends on BACKLIGHT_CLASS_DEVICE
47 help
48 LCD Panel used in TI's SDP3430 and EVM boards
49
50config DRM_OMAP_PANEL_TPO_TD028TTEC1
51 tristate "TPO TD028TTEC1 LCD Panel"
52 depends on SPI
53 help
54 LCD panel used in Openmoko.
55
56config DRM_OMAP_PANEL_TPO_TD043MTEA1
57 tristate "TPO TD043MTEA1 LCD Panel"
58 depends on SPI
59 help
60 LCD Panel used in OMAP3 Pandora
61
62config DRM_OMAP_PANEL_NEC_NL8048HL11
63 tristate "NEC NL8048HL11 Panel"
64 depends on SPI
65 depends on BACKLIGHT_CLASS_DEVICE
66 help
67 This NEC NL8048HL11 panel is TFT LCD used in the
68 Zoom2/3/3630 sdp boards.
69
70endmenu 32endmenu
diff --git a/drivers/gpu/drm/omapdrm/displays/Makefile b/drivers/gpu/drm/omapdrm/displays/Makefile
index 1db34d4fed64..cb76859dc574 100644
--- a/drivers/gpu/drm/omapdrm/displays/Makefile
+++ b/drivers/gpu/drm/omapdrm/displays/Makefile
@@ -4,9 +4,3 @@ obj-$(CONFIG_DRM_OMAP_ENCODER_TPD12S015) += encoder-tpd12s015.o
4obj-$(CONFIG_DRM_OMAP_CONNECTOR_HDMI) += connector-hdmi.o 4obj-$(CONFIG_DRM_OMAP_CONNECTOR_HDMI) += connector-hdmi.o
5obj-$(CONFIG_DRM_OMAP_CONNECTOR_ANALOG_TV) += connector-analog-tv.o 5obj-$(CONFIG_DRM_OMAP_CONNECTOR_ANALOG_TV) += connector-analog-tv.o
6obj-$(CONFIG_DRM_OMAP_PANEL_DSI_CM) += panel-dsi-cm.o 6obj-$(CONFIG_DRM_OMAP_PANEL_DSI_CM) += panel-dsi-cm.o
7obj-$(CONFIG_DRM_OMAP_PANEL_SONY_ACX565AKM) += panel-sony-acx565akm.o
8obj-$(CONFIG_DRM_OMAP_PANEL_LGPHILIPS_LB035Q02) += panel-lgphilips-lb035q02.o
9obj-$(CONFIG_DRM_OMAP_PANEL_SHARP_LS037V7DW01) += panel-sharp-ls037v7dw01.o
10obj-$(CONFIG_DRM_OMAP_PANEL_TPO_TD028TTEC1) += panel-tpo-td028ttec1.o
11obj-$(CONFIG_DRM_OMAP_PANEL_TPO_TD043MTEA1) += panel-tpo-td043mtea1.o
12obj-$(CONFIG_DRM_OMAP_PANEL_NEC_NL8048HL11) += panel-nec-nl8048hl11.o
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c b/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c
deleted file mode 100644
index 1fd0d84e6e38..000000000000
--- a/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c
+++ /dev/null
@@ -1,251 +0,0 @@
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * LG.Philips LB035Q02 LCD Panel driver
4 *
5 * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com/
6 * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
7 * Based on a driver by: Steve Sakoman <steve@sakoman.com>
8 */
9
10#include <linux/module.h>
11#include <linux/delay.h>
12#include <linux/spi/spi.h>
13#include <linux/mutex.h>
14#include <linux/gpio.h>
15#include <linux/gpio/consumer.h>
16
17#include "../dss/omapdss.h"
18
19static const struct videomode lb035q02_vm = {
20 .hactive = 320,
21 .vactive = 240,
22
23 .pixelclock = 6500000,
24
25 .hsync_len = 2,
26 .hfront_porch = 20,
27 .hback_porch = 68,
28
29 .vsync_len = 2,
30 .vfront_porch = 4,
31 .vback_porch = 18,
32
33 .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW,
34};
35
36struct panel_drv_data {
37 struct omap_dss_device dssdev;
38
39 struct spi_device *spi;
40
41 struct videomode vm;
42
43 struct gpio_desc *enable_gpio;
44};
45
46#define to_panel_data(p) container_of(p, struct panel_drv_data, dssdev)
47
48static int lb035q02_write_reg(struct spi_device *spi, u8 reg, u16 val)
49{
50 struct spi_message msg;
51 struct spi_transfer index_xfer = {
52 .len = 3,
53 .cs_change = 1,
54 };
55 struct spi_transfer value_xfer = {
56 .len = 3,
57 };
58 u8 buffer[16];
59
60 spi_message_init(&msg);
61
62 /* register index */
63 buffer[0] = 0x70;
64 buffer[1] = 0x00;
65 buffer[2] = reg & 0x7f;
66 index_xfer.tx_buf = buffer;
67 spi_message_add_tail(&index_xfer, &msg);
68
69 /* register value */
70 buffer[4] = 0x72;
71 buffer[5] = val >> 8;
72 buffer[6] = val;
73 value_xfer.tx_buf = buffer + 4;
74 spi_message_add_tail(&value_xfer, &msg);
75
76 return spi_sync(spi, &msg);
77}
78
79static void init_lb035q02_panel(struct spi_device *spi)
80{
81 /* Init sequence from page 28 of the lb035q02 spec */
82 lb035q02_write_reg(spi, 0x01, 0x6300);
83 lb035q02_write_reg(spi, 0x02, 0x0200);
84 lb035q02_write_reg(spi, 0x03, 0x0177);
85 lb035q02_write_reg(spi, 0x04, 0x04c7);
86 lb035q02_write_reg(spi, 0x05, 0xffc0);
87 lb035q02_write_reg(spi, 0x06, 0xe806);
88 lb035q02_write_reg(spi, 0x0a, 0x4008);
89 lb035q02_write_reg(spi, 0x0b, 0x0000);
90 lb035q02_write_reg(spi, 0x0d, 0x0030);
91 lb035q02_write_reg(spi, 0x0e, 0x2800);
92 lb035q02_write_reg(spi, 0x0f, 0x0000);
93 lb035q02_write_reg(spi, 0x16, 0x9f80);
94 lb035q02_write_reg(spi, 0x17, 0x0a0f);
95 lb035q02_write_reg(spi, 0x1e, 0x00c1);
96 lb035q02_write_reg(spi, 0x30, 0x0300);
97 lb035q02_write_reg(spi, 0x31, 0x0007);
98 lb035q02_write_reg(spi, 0x32, 0x0000);
99 lb035q02_write_reg(spi, 0x33, 0x0000);
100 lb035q02_write_reg(spi, 0x34, 0x0707);
101 lb035q02_write_reg(spi, 0x35, 0x0004);
102 lb035q02_write_reg(spi, 0x36, 0x0302);
103 lb035q02_write_reg(spi, 0x37, 0x0202);
104 lb035q02_write_reg(spi, 0x3a, 0x0a0d);
105 lb035q02_write_reg(spi, 0x3b, 0x0806);
106}
107
108static int lb035q02_connect(struct omap_dss_device *src,
109 struct omap_dss_device *dst)
110{
111 struct panel_drv_data *ddata = to_panel_data(dst);
112
113 init_lb035q02_panel(ddata->spi);
114
115 return 0;
116}
117
118static void lb035q02_disconnect(struct omap_dss_device *src,
119 struct omap_dss_device *dst)
120{
121}
122
123static void lb035q02_enable(struct omap_dss_device *dssdev)
124{
125 struct panel_drv_data *ddata = to_panel_data(dssdev);
126
127 if (ddata->enable_gpio)
128 gpiod_set_value_cansleep(ddata->enable_gpio, 1);
129}
130
131static void lb035q02_disable(struct omap_dss_device *dssdev)
132{
133 struct panel_drv_data *ddata = to_panel_data(dssdev);
134
135 if (ddata->enable_gpio)
136 gpiod_set_value_cansleep(ddata->enable_gpio, 0);
137}
138
139static int lb035q02_get_modes(struct omap_dss_device *dssdev,
140 struct drm_connector *connector)
141{
142 struct panel_drv_data *ddata = to_panel_data(dssdev);
143
144 return omapdss_display_get_modes(connector, &ddata->vm);
145}
146
147static const struct omap_dss_device_ops lb035q02_ops = {
148 .connect = lb035q02_connect,
149 .disconnect = lb035q02_disconnect,
150
151 .enable = lb035q02_enable,
152 .disable = lb035q02_disable,
153
154 .get_modes = lb035q02_get_modes,
155};
156
157static int lb035q02_probe_of(struct spi_device *spi)
158{
159 struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev);
160 struct gpio_desc *gpio;
161
162 gpio = devm_gpiod_get(&spi->dev, "enable", GPIOD_OUT_LOW);
163 if (IS_ERR(gpio)) {
164 dev_err(&spi->dev, "failed to parse enable gpio\n");
165 return PTR_ERR(gpio);
166 }
167
168 ddata->enable_gpio = gpio;
169
170 return 0;
171}
172
173static int lb035q02_panel_spi_probe(struct spi_device *spi)
174{
175 struct panel_drv_data *ddata;
176 struct omap_dss_device *dssdev;
177 int r;
178
179 ddata = devm_kzalloc(&spi->dev, sizeof(*ddata), GFP_KERNEL);
180 if (ddata == NULL)
181 return -ENOMEM;
182
183 dev_set_drvdata(&spi->dev, ddata);
184
185 ddata->spi = spi;
186
187 r = lb035q02_probe_of(spi);
188 if (r)
189 return r;
190
191 ddata->vm = lb035q02_vm;
192
193 dssdev = &ddata->dssdev;
194 dssdev->dev = &spi->dev;
195 dssdev->ops = &lb035q02_ops;
196 dssdev->type = OMAP_DISPLAY_TYPE_DPI;
197 dssdev->display = true;
198 dssdev->owner = THIS_MODULE;
199 dssdev->of_ports = BIT(0);
200 dssdev->ops_flags = OMAP_DSS_DEVICE_OP_MODES;
201
202 /*
203 * Note: According to the panel documentation:
204 * DE is active LOW
205 * DATA needs to be driven on the FALLING edge
206 */
207 dssdev->bus_flags = DRM_BUS_FLAG_DE_HIGH
208 | DRM_BUS_FLAG_SYNC_DRIVE_NEGEDGE
209 | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE;
210
211 omapdss_display_init(dssdev);
212 omapdss_device_register(dssdev);
213
214 return 0;
215}
216
217static int lb035q02_panel_spi_remove(struct spi_device *spi)
218{
219 struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev);
220 struct omap_dss_device *dssdev = &ddata->dssdev;
221
222 omapdss_device_unregister(dssdev);
223
224 lb035q02_disable(dssdev);
225
226 return 0;
227}
228
229static const struct of_device_id lb035q02_of_match[] = {
230 { .compatible = "omapdss,lgphilips,lb035q02", },
231 {},
232};
233
234MODULE_DEVICE_TABLE(of, lb035q02_of_match);
235
236static struct spi_driver lb035q02_spi_driver = {
237 .probe = lb035q02_panel_spi_probe,
238 .remove = lb035q02_panel_spi_remove,
239 .driver = {
240 .name = "panel_lgphilips_lb035q02",
241 .of_match_table = lb035q02_of_match,
242 .suppress_bind_attrs = true,
243 },
244};
245
246module_spi_driver(lb035q02_spi_driver);
247
248MODULE_ALIAS("spi:lgphilips,lb035q02");
249MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ti.com>");
250MODULE_DESCRIPTION("LG.Philips LB035Q02 LCD Panel driver");
251MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c b/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c
deleted file mode 100644
index eba5bd1d702f..000000000000
--- a/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c
+++ /dev/null
@@ -1,271 +0,0 @@
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * NEC NL8048HL11 Panel driver
4 *
5 * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com/
6 * Author: Erik Gilling <konkers@android.com>
7 * Converted to new DSS device model: Tomi Valkeinen <tomi.valkeinen@ti.com>
8 */
9
10#include <linux/delay.h>
11#include <linux/gpio/consumer.h>
12#include <linux/module.h>
13#include <linux/spi/spi.h>
14
15#include "../dss/omapdss.h"
16
17struct panel_drv_data {
18 struct omap_dss_device dssdev;
19
20 struct videomode vm;
21
22 struct gpio_desc *res_gpio;
23
24 struct spi_device *spi;
25};
26
27#define LCD_XRES 800
28#define LCD_YRES 480
29/*
30 * NEC PIX Clock Ratings
31 * MIN:21.8MHz TYP:23.8MHz MAX:25.7MHz
32 */
33#define LCD_PIXEL_CLOCK 23800000
34
35static const struct {
36 unsigned char addr;
37 unsigned char dat;
38} nec_8048_init_seq[] = {
39 { 3, 0x01 }, { 0, 0x00 }, { 1, 0x01 }, { 4, 0x00 }, { 5, 0x14 },
40 { 6, 0x24 }, { 16, 0xD7 }, { 17, 0x00 }, { 18, 0x00 }, { 19, 0x55 },
41 { 20, 0x01 }, { 21, 0x70 }, { 22, 0x1E }, { 23, 0x25 }, { 24, 0x25 },
42 { 25, 0x02 }, { 26, 0x02 }, { 27, 0xA0 }, { 32, 0x2F }, { 33, 0x0F },
43 { 34, 0x0F }, { 35, 0x0F }, { 36, 0x0F }, { 37, 0x0F }, { 38, 0x0F },
44 { 39, 0x00 }, { 40, 0x02 }, { 41, 0x02 }, { 42, 0x02 }, { 43, 0x0F },
45 { 44, 0x0F }, { 45, 0x0F }, { 46, 0x0F }, { 47, 0x0F }, { 48, 0x0F },
46 { 49, 0x0F }, { 50, 0x00 }, { 51, 0x02 }, { 52, 0x02 }, { 53, 0x02 },
47 { 80, 0x0C }, { 83, 0x42 }, { 84, 0x42 }, { 85, 0x41 }, { 86, 0x14 },
48 { 89, 0x88 }, { 90, 0x01 }, { 91, 0x00 }, { 92, 0x02 }, { 93, 0x0C },
49 { 94, 0x1C }, { 95, 0x27 }, { 98, 0x49 }, { 99, 0x27 }, { 102, 0x76 },
50 { 103, 0x27 }, { 112, 0x01 }, { 113, 0x0E }, { 114, 0x02 },
51 { 115, 0x0C }, { 118, 0x0C }, { 121, 0x30 }, { 130, 0x00 },
52 { 131, 0x00 }, { 132, 0xFC }, { 134, 0x00 }, { 136, 0x00 },
53 { 138, 0x00 }, { 139, 0x00 }, { 140, 0x00 }, { 141, 0xFC },
54 { 143, 0x00 }, { 145, 0x00 }, { 147, 0x00 }, { 148, 0x00 },
55 { 149, 0x00 }, { 150, 0xFC }, { 152, 0x00 }, { 154, 0x00 },
56 { 156, 0x00 }, { 157, 0x00 }, { 2, 0x00 },
57};
58
59static const struct videomode nec_8048_panel_vm = {
60 .hactive = LCD_XRES,
61 .vactive = LCD_YRES,
62 .pixelclock = LCD_PIXEL_CLOCK,
63 .hfront_porch = 6,
64 .hsync_len = 1,
65 .hback_porch = 4,
66 .vfront_porch = 3,
67 .vsync_len = 1,
68 .vback_porch = 4,
69
70 .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW,
71};
72
73#define to_panel_data(p) container_of(p, struct panel_drv_data, dssdev)
74
75static int nec_8048_spi_send(struct spi_device *spi, unsigned char reg_addr,
76 unsigned char reg_data)
77{
78 int ret = 0;
79 unsigned int cmd = 0, data = 0;
80
81 cmd = 0x0000 | reg_addr; /* register address write */
82 data = 0x0100 | reg_data; /* register data write */
83 data = (cmd << 16) | data;
84
85 ret = spi_write(spi, (unsigned char *)&data, 4);
86 if (ret)
87 pr_err("error in spi_write %x\n", data);
88
89 return ret;
90}
91
92static int init_nec_8048_wvga_lcd(struct spi_device *spi)
93{
94 unsigned int i;
95 /* Initialization Sequence */
96 /* nec_8048_spi_send(spi, REG, VAL) */
97 for (i = 0; i < (ARRAY_SIZE(nec_8048_init_seq) - 1); i++)
98 nec_8048_spi_send(spi, nec_8048_init_seq[i].addr,
99 nec_8048_init_seq[i].dat);
100 udelay(20);
101 nec_8048_spi_send(spi, nec_8048_init_seq[i].addr,
102 nec_8048_init_seq[i].dat);
103 return 0;
104}
105
106static int nec_8048_connect(struct omap_dss_device *src,
107 struct omap_dss_device *dst)
108{
109 return 0;
110}
111
112static void nec_8048_disconnect(struct omap_dss_device *src,
113 struct omap_dss_device *dst)
114{
115}
116
117static void nec_8048_enable(struct omap_dss_device *dssdev)
118{
119 struct panel_drv_data *ddata = to_panel_data(dssdev);
120
121 gpiod_set_value_cansleep(ddata->res_gpio, 1);
122}
123
124static void nec_8048_disable(struct omap_dss_device *dssdev)
125{
126 struct panel_drv_data *ddata = to_panel_data(dssdev);
127
128 gpiod_set_value_cansleep(ddata->res_gpio, 0);
129}
130
131static int nec_8048_get_modes(struct omap_dss_device *dssdev,
132 struct drm_connector *connector)
133{
134 struct panel_drv_data *ddata = to_panel_data(dssdev);
135
136 return omapdss_display_get_modes(connector, &ddata->vm);
137}
138
139static const struct omap_dss_device_ops nec_8048_ops = {
140 .connect = nec_8048_connect,
141 .disconnect = nec_8048_disconnect,
142
143 .enable = nec_8048_enable,
144 .disable = nec_8048_disable,
145
146 .get_modes = nec_8048_get_modes,
147};
148
149static int nec_8048_probe(struct spi_device *spi)
150{
151 struct panel_drv_data *ddata;
152 struct omap_dss_device *dssdev;
153 struct gpio_desc *gpio;
154 int r;
155
156 dev_dbg(&spi->dev, "%s\n", __func__);
157
158 spi->mode = SPI_MODE_0;
159 spi->bits_per_word = 32;
160
161 r = spi_setup(spi);
162 if (r < 0) {
163 dev_err(&spi->dev, "spi_setup failed: %d\n", r);
164 return r;
165 }
166
167 init_nec_8048_wvga_lcd(spi);
168
169 ddata = devm_kzalloc(&spi->dev, sizeof(*ddata), GFP_KERNEL);
170 if (ddata == NULL)
171 return -ENOMEM;
172
173 dev_set_drvdata(&spi->dev, ddata);
174
175 ddata->spi = spi;
176
177 gpio = devm_gpiod_get(&spi->dev, "reset", GPIOD_OUT_LOW);
178 if (IS_ERR(gpio)) {
179 dev_err(&spi->dev, "failed to get reset gpio\n");
180 return PTR_ERR(gpio);
181 }
182
183 ddata->res_gpio = gpio;
184
185 ddata->vm = nec_8048_panel_vm;
186
187 dssdev = &ddata->dssdev;
188 dssdev->dev = &spi->dev;
189 dssdev->ops = &nec_8048_ops;
190 dssdev->type = OMAP_DISPLAY_TYPE_DPI;
191 dssdev->display = true;
192 dssdev->owner = THIS_MODULE;
193 dssdev->of_ports = BIT(0);
194 dssdev->ops_flags = OMAP_DSS_DEVICE_OP_MODES;
195 dssdev->bus_flags = DRM_BUS_FLAG_DE_HIGH
196 | DRM_BUS_FLAG_SYNC_DRIVE_POSEDGE
197 | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE;
198
199 omapdss_display_init(dssdev);
200 omapdss_device_register(dssdev);
201
202 return 0;
203}
204
205static int nec_8048_remove(struct spi_device *spi)
206{
207 struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev);
208 struct omap_dss_device *dssdev = &ddata->dssdev;
209
210 dev_dbg(&ddata->spi->dev, "%s\n", __func__);
211
212 omapdss_device_unregister(dssdev);
213
214 nec_8048_disable(dssdev);
215
216 return 0;
217}
218
219#ifdef CONFIG_PM_SLEEP
220static int nec_8048_suspend(struct device *dev)
221{
222 struct spi_device *spi = to_spi_device(dev);
223
224 nec_8048_spi_send(spi, 2, 0x01);
225 mdelay(40);
226
227 return 0;
228}
229
230static int nec_8048_resume(struct device *dev)
231{
232 struct spi_device *spi = to_spi_device(dev);
233
234 /* reinitialize the panel */
235 spi_setup(spi);
236 nec_8048_spi_send(spi, 2, 0x00);
237 init_nec_8048_wvga_lcd(spi);
238
239 return 0;
240}
241static SIMPLE_DEV_PM_OPS(nec_8048_pm_ops, nec_8048_suspend,
242 nec_8048_resume);
243#define NEC_8048_PM_OPS (&nec_8048_pm_ops)
244#else
245#define NEC_8048_PM_OPS NULL
246#endif
247
248static const struct of_device_id nec_8048_of_match[] = {
249 { .compatible = "omapdss,nec,nl8048hl11", },
250 {},
251};
252
253MODULE_DEVICE_TABLE(of, nec_8048_of_match);
254
255static struct spi_driver nec_8048_driver = {
256 .driver = {
257 .name = "panel-nec-nl8048hl11",
258 .pm = NEC_8048_PM_OPS,
259 .of_match_table = nec_8048_of_match,
260 .suppress_bind_attrs = true,
261 },
262 .probe = nec_8048_probe,
263 .remove = nec_8048_remove,
264};
265
266module_spi_driver(nec_8048_driver);
267
268MODULE_ALIAS("spi:nec,nl8048hl11");
269MODULE_AUTHOR("Erik Gilling <konkers@android.com>");
270MODULE_DESCRIPTION("NEC-NL8048HL11 Driver");
271MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c b/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c
deleted file mode 100644
index 3ab50fd1f3f2..000000000000
--- a/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c
+++ /dev/null
@@ -1,262 +0,0 @@
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * LCD panel driver for Sharp LS037V7DW01
4 *
5 * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com/
6 * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
7 */
8
9#include <linux/delay.h>
10#include <linux/gpio/consumer.h>
11#include <linux/module.h>
12#include <linux/of.h>
13#include <linux/platform_device.h>
14#include <linux/slab.h>
15#include <linux/regulator/consumer.h>
16
17#include "../dss/omapdss.h"
18
19struct panel_drv_data {
20 struct omap_dss_device dssdev;
21 struct regulator *vcc;
22
23 struct videomode vm;
24
25 struct gpio_desc *resb_gpio; /* low = reset active min 20 us */
26 struct gpio_desc *ini_gpio; /* high = power on */
27 struct gpio_desc *mo_gpio; /* low = 480x640, high = 240x320 */
28 struct gpio_desc *lr_gpio; /* high = conventional horizontal scanning */
29 struct gpio_desc *ud_gpio; /* high = conventional vertical scanning */
30};
31
32static const struct videomode sharp_ls_vm = {
33 .hactive = 480,
34 .vactive = 640,
35
36 .pixelclock = 19200000,
37
38 .hsync_len = 2,
39 .hfront_porch = 1,
40 .hback_porch = 28,
41
42 .vsync_len = 1,
43 .vfront_porch = 1,
44 .vback_porch = 1,
45
46 .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW,
47};
48
49#define to_panel_data(p) container_of(p, struct panel_drv_data, dssdev)
50
51static int sharp_ls_connect(struct omap_dss_device *src,
52 struct omap_dss_device *dst)
53{
54 return 0;
55}
56
57static void sharp_ls_disconnect(struct omap_dss_device *src,
58 struct omap_dss_device *dst)
59{
60}
61
62static void sharp_ls_pre_enable(struct omap_dss_device *dssdev)
63{
64 struct panel_drv_data *ddata = to_panel_data(dssdev);
65 int r;
66
67 if (ddata->vcc) {
68 r = regulator_enable(ddata->vcc);
69 if (r)
70 dev_err(dssdev->dev, "%s: failed to enable regulator\n",
71 __func__);
72 }
73}
74
75static void sharp_ls_enable(struct omap_dss_device *dssdev)
76{
77 struct panel_drv_data *ddata = to_panel_data(dssdev);
78
79 /* wait couple of vsyncs until enabling the LCD */
80 msleep(50);
81
82 if (ddata->resb_gpio)
83 gpiod_set_value_cansleep(ddata->resb_gpio, 1);
84
85 if (ddata->ini_gpio)
86 gpiod_set_value_cansleep(ddata->ini_gpio, 1);
87}
88
89static void sharp_ls_disable(struct omap_dss_device *dssdev)
90{
91 struct panel_drv_data *ddata = to_panel_data(dssdev);
92
93 if (ddata->ini_gpio)
94 gpiod_set_value_cansleep(ddata->ini_gpio, 0);
95
96 if (ddata->resb_gpio)
97 gpiod_set_value_cansleep(ddata->resb_gpio, 0);
98
99 /* wait at least 5 vsyncs after disabling the LCD */
100 msleep(100);
101}
102
103static void sharp_ls_post_disable(struct omap_dss_device *dssdev)
104{
105 struct panel_drv_data *ddata = to_panel_data(dssdev);
106
107 if (ddata->vcc)
108 regulator_disable(ddata->vcc);
109}
110
111static int sharp_ls_get_modes(struct omap_dss_device *dssdev,
112 struct drm_connector *connector)
113{
114 struct panel_drv_data *ddata = to_panel_data(dssdev);
115
116 return omapdss_display_get_modes(connector, &ddata->vm);
117}
118
119static const struct omap_dss_device_ops sharp_ls_ops = {
120 .connect = sharp_ls_connect,
121 .disconnect = sharp_ls_disconnect,
122
123 .pre_enable = sharp_ls_pre_enable,
124 .enable = sharp_ls_enable,
125 .disable = sharp_ls_disable,
126 .post_disable = sharp_ls_post_disable,
127
128 .get_modes = sharp_ls_get_modes,
129};
130
131static int sharp_ls_get_gpio_of(struct device *dev, int index, int val,
132 const char *desc, struct gpio_desc **gpiod)
133{
134 struct gpio_desc *gd;
135
136 *gpiod = NULL;
137
138 gd = devm_gpiod_get_index(dev, desc, index, GPIOD_OUT_LOW);
139 if (IS_ERR(gd))
140 return PTR_ERR(gd);
141
142 *gpiod = gd;
143 return 0;
144}
145
146static int sharp_ls_probe_of(struct platform_device *pdev)
147{
148 struct panel_drv_data *ddata = platform_get_drvdata(pdev);
149 int r;
150
151 ddata->vcc = devm_regulator_get(&pdev->dev, "envdd");
152 if (IS_ERR(ddata->vcc)) {
153 dev_err(&pdev->dev, "failed to get regulator\n");
154 return PTR_ERR(ddata->vcc);
155 }
156
157 /* lcd INI */
158 r = sharp_ls_get_gpio_of(&pdev->dev, 0, 0, "enable", &ddata->ini_gpio);
159 if (r)
160 return r;
161
162 /* lcd RESB */
163 r = sharp_ls_get_gpio_of(&pdev->dev, 0, 0, "reset", &ddata->resb_gpio);
164 if (r)
165 return r;
166
167 /* lcd MO */
168 r = sharp_ls_get_gpio_of(&pdev->dev, 0, 0, "mode", &ddata->mo_gpio);
169 if (r)
170 return r;
171
172 /* lcd LR */
173 r = sharp_ls_get_gpio_of(&pdev->dev, 1, 1, "mode", &ddata->lr_gpio);
174 if (r)
175 return r;
176
177 /* lcd UD */
178 r = sharp_ls_get_gpio_of(&pdev->dev, 2, 1, "mode", &ddata->ud_gpio);
179 if (r)
180 return r;
181
182 return 0;
183}
184
185static int sharp_ls_probe(struct platform_device *pdev)
186{
187 struct panel_drv_data *ddata;
188 struct omap_dss_device *dssdev;
189 int r;
190
191 ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL);
192 if (ddata == NULL)
193 return -ENOMEM;
194
195 platform_set_drvdata(pdev, ddata);
196
197 r = sharp_ls_probe_of(pdev);
198 if (r)
199 return r;
200
201 ddata->vm = sharp_ls_vm;
202
203 dssdev = &ddata->dssdev;
204 dssdev->dev = &pdev->dev;
205 dssdev->ops = &sharp_ls_ops;
206 dssdev->type = OMAP_DISPLAY_TYPE_DPI;
207 dssdev->display = true;
208 dssdev->owner = THIS_MODULE;
209 dssdev->of_ports = BIT(0);
210 dssdev->ops_flags = OMAP_DSS_DEVICE_OP_MODES;
211
212 /*
213 * Note: According to the panel documentation:
214 * DATA needs to be driven on the FALLING edge
215 */
216 dssdev->bus_flags = DRM_BUS_FLAG_DE_HIGH
217 | DRM_BUS_FLAG_SYNC_DRIVE_NEGEDGE
218 | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE;
219
220 omapdss_display_init(dssdev);
221 omapdss_device_register(dssdev);
222
223 return 0;
224}
225
226static int __exit sharp_ls_remove(struct platform_device *pdev)
227{
228 struct panel_drv_data *ddata = platform_get_drvdata(pdev);
229 struct omap_dss_device *dssdev = &ddata->dssdev;
230
231 omapdss_device_unregister(dssdev);
232
233 if (omapdss_device_is_enabled(dssdev)) {
234 sharp_ls_disable(dssdev);
235 sharp_ls_post_disable(dssdev);
236 }
237
238 return 0;
239}
240
241static const struct of_device_id sharp_ls_of_match[] = {
242 { .compatible = "omapdss,sharp,ls037v7dw01", },
243 {},
244};
245
246MODULE_DEVICE_TABLE(of, sharp_ls_of_match);
247
248static struct platform_driver sharp_ls_driver = {
249 .probe = sharp_ls_probe,
250 .remove = __exit_p(sharp_ls_remove),
251 .driver = {
252 .name = "panel-sharp-ls037v7dw01",
253 .of_match_table = sharp_ls_of_match,
254 .suppress_bind_attrs = true,
255 },
256};
257
258module_platform_driver(sharp_ls_driver);
259
260MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ti.com>");
261MODULE_DESCRIPTION("Sharp LS037V7DW01 Panel Driver");
262MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c
deleted file mode 100644
index 588a1a6bbcc3..000000000000
--- a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c
+++ /dev/null
@@ -1,755 +0,0 @@
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Sony ACX565AKM LCD Panel driver
4 *
5 * Copyright (C) 2010 Nokia Corporation
6 *
7 * Original Driver Author: Imre Deak <imre.deak@nokia.com>
8 * Based on panel-generic.c by Tomi Valkeinen <tomi.valkeinen@ti.com>
9 * Adapted to new DSS2 framework: Roger Quadros <roger.quadros@nokia.com>
10 */
11
12#include <linux/backlight.h>
13#include <linux/delay.h>
14#include <linux/gpio/consumer.h>
15#include <linux/jiffies.h>
16#include <linux/kernel.h>
17#include <linux/module.h>
18#include <linux/platform_device.h>
19#include <linux/sched.h>
20#include <linux/spi/spi.h>
21
22#include "../dss/omapdss.h"
23
24#define MIPID_CMD_READ_DISP_ID 0x04
25#define MIPID_CMD_READ_RED 0x06
26#define MIPID_CMD_READ_GREEN 0x07
27#define MIPID_CMD_READ_BLUE 0x08
28#define MIPID_CMD_READ_DISP_STATUS 0x09
29#define MIPID_CMD_RDDSDR 0x0F
30#define MIPID_CMD_SLEEP_IN 0x10
31#define MIPID_CMD_SLEEP_OUT 0x11
32#define MIPID_CMD_DISP_OFF 0x28
33#define MIPID_CMD_DISP_ON 0x29
34#define MIPID_CMD_WRITE_DISP_BRIGHTNESS 0x51
35#define MIPID_CMD_READ_DISP_BRIGHTNESS 0x52
36#define MIPID_CMD_WRITE_CTRL_DISP 0x53
37
38#define CTRL_DISP_BRIGHTNESS_CTRL_ON (1 << 5)
39#define CTRL_DISP_AMBIENT_LIGHT_CTRL_ON (1 << 4)
40#define CTRL_DISP_BACKLIGHT_ON (1 << 2)
41#define CTRL_DISP_AUTO_BRIGHTNESS_ON (1 << 1)
42
43#define MIPID_CMD_READ_CTRL_DISP 0x54
44#define MIPID_CMD_WRITE_CABC 0x55
45#define MIPID_CMD_READ_CABC 0x56
46
47#define MIPID_VER_LPH8923 3
48#define MIPID_VER_LS041Y3 4
49#define MIPID_VER_L4F00311 8
50#define MIPID_VER_ACX565AKM 9
51
52struct panel_drv_data {
53 struct omap_dss_device dssdev;
54
55 struct gpio_desc *reset_gpio;
56
57 struct videomode vm;
58
59 char *name;
60 int enabled;
61 int model;
62 int revision;
63 u8 display_id[3];
64 unsigned has_bc:1;
65 unsigned has_cabc:1;
66 unsigned cabc_mode;
67 unsigned long hw_guard_end; /* next value of jiffies
68 when we can issue the
69 next sleep in/out command */
70 unsigned long hw_guard_wait; /* max guard time in jiffies */
71
72 struct spi_device *spi;
73 struct mutex mutex;
74
75 struct backlight_device *bl_dev;
76};
77
78static const struct videomode acx565akm_panel_vm = {
79 .hactive = 800,
80 .vactive = 480,
81 .pixelclock = 24000000,
82 .hfront_porch = 28,
83 .hsync_len = 4,
84 .hback_porch = 24,
85 .vfront_porch = 3,
86 .vsync_len = 3,
87 .vback_porch = 4,
88
89 .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW,
90};
91
92#define to_panel_data(p) container_of(p, struct panel_drv_data, dssdev)
93
94static void acx565akm_transfer(struct panel_drv_data *ddata, int cmd,
95 const u8 *wbuf, int wlen, u8 *rbuf, int rlen)
96{
97 struct spi_message m;
98 struct spi_transfer *x, xfer[5];
99 int r;
100
101 BUG_ON(ddata->spi == NULL);
102
103 spi_message_init(&m);
104
105 memset(xfer, 0, sizeof(xfer));
106 x = &xfer[0];
107
108 cmd &= 0xff;
109 x->tx_buf = &cmd;
110 x->bits_per_word = 9;
111 x->len = 2;
112
113 if (rlen > 1 && wlen == 0) {
114 /*
115 * Between the command and the response data there is a
116 * dummy clock cycle. Add an extra bit after the command
117 * word to account for this.
118 */
119 x->bits_per_word = 10;
120 cmd <<= 1;
121 }
122 spi_message_add_tail(x, &m);
123
124 if (wlen) {
125 x++;
126 x->tx_buf = wbuf;
127 x->len = wlen;
128 x->bits_per_word = 9;
129 spi_message_add_tail(x, &m);
130 }
131
132 if (rlen) {
133 x++;
134 x->rx_buf = rbuf;
135 x->len = rlen;
136 spi_message_add_tail(x, &m);
137 }
138
139 r = spi_sync(ddata->spi, &m);
140 if (r < 0)
141 dev_dbg(&ddata->spi->dev, "spi_sync %d\n", r);
142}
143
144static inline void acx565akm_cmd(struct panel_drv_data *ddata, int cmd)
145{
146 acx565akm_transfer(ddata, cmd, NULL, 0, NULL, 0);
147}
148
149static inline void acx565akm_write(struct panel_drv_data *ddata,
150 int reg, const u8 *buf, int len)
151{
152 acx565akm_transfer(ddata, reg, buf, len, NULL, 0);
153}
154
155static inline void acx565akm_read(struct panel_drv_data *ddata,
156 int reg, u8 *buf, int len)
157{
158 acx565akm_transfer(ddata, reg, NULL, 0, buf, len);
159}
160
161static void hw_guard_start(struct panel_drv_data *ddata, int guard_msec)
162{
163 ddata->hw_guard_wait = msecs_to_jiffies(guard_msec);
164 ddata->hw_guard_end = jiffies + ddata->hw_guard_wait;
165}
166
167static void hw_guard_wait(struct panel_drv_data *ddata)
168{
169 unsigned long wait = ddata->hw_guard_end - jiffies;
170
171 if ((long)wait > 0 && wait <= ddata->hw_guard_wait) {
172 set_current_state(TASK_UNINTERRUPTIBLE);
173 schedule_timeout(wait);
174 }
175}
176
177static void set_sleep_mode(struct panel_drv_data *ddata, int on)
178{
179 int cmd;
180
181 if (on)
182 cmd = MIPID_CMD_SLEEP_IN;
183 else
184 cmd = MIPID_CMD_SLEEP_OUT;
185 /*
186 * We have to keep 120msec between sleep in/out commands.
187 * (8.2.15, 8.2.16).
188 */
189 hw_guard_wait(ddata);
190 acx565akm_cmd(ddata, cmd);
191 hw_guard_start(ddata, 120);
192}
193
194static void set_display_state(struct panel_drv_data *ddata, int enabled)
195{
196 int cmd = enabled ? MIPID_CMD_DISP_ON : MIPID_CMD_DISP_OFF;
197
198 acx565akm_cmd(ddata, cmd);
199}
200
201static int panel_enabled(struct panel_drv_data *ddata)
202{
203 __be32 v;
204 u32 disp_status;
205 int enabled;
206
207 acx565akm_read(ddata, MIPID_CMD_READ_DISP_STATUS, (u8 *)&v, 4);
208 disp_status = __be32_to_cpu(v);
209 enabled = (disp_status & (1 << 17)) && (disp_status & (1 << 10));
210 dev_dbg(&ddata->spi->dev,
211 "LCD panel %senabled by bootloader (status 0x%04x)\n",
212 enabled ? "" : "not ", disp_status);
213 return enabled;
214}
215
216static int panel_detect(struct panel_drv_data *ddata)
217{
218 acx565akm_read(ddata, MIPID_CMD_READ_DISP_ID, ddata->display_id, 3);
219 dev_dbg(&ddata->spi->dev, "MIPI display ID: %02x%02x%02x\n",
220 ddata->display_id[0],
221 ddata->display_id[1],
222 ddata->display_id[2]);
223
224 switch (ddata->display_id[0]) {
225 case 0x10:
226 ddata->model = MIPID_VER_ACX565AKM;
227 ddata->name = "acx565akm";
228 ddata->has_bc = 1;
229 ddata->has_cabc = 1;
230 break;
231 case 0x29:
232 ddata->model = MIPID_VER_L4F00311;
233 ddata->name = "l4f00311";
234 break;
235 case 0x45:
236 ddata->model = MIPID_VER_LPH8923;
237 ddata->name = "lph8923";
238 break;
239 case 0x83:
240 ddata->model = MIPID_VER_LS041Y3;
241 ddata->name = "ls041y3";
242 break;
243 default:
244 ddata->name = "unknown";
245 dev_err(&ddata->spi->dev, "invalid display ID\n");
246 return -ENODEV;
247 }
248
249 ddata->revision = ddata->display_id[1];
250
251 dev_info(&ddata->spi->dev, "omapfb: %s rev %02x LCD detected\n",
252 ddata->name, ddata->revision);
253
254 return 0;
255}
256
257/*----------------------Backlight Control-------------------------*/
258
259static void enable_backlight_ctrl(struct panel_drv_data *ddata, int enable)
260{
261 u16 ctrl;
262
263 acx565akm_read(ddata, MIPID_CMD_READ_CTRL_DISP, (u8 *)&ctrl, 1);
264 if (enable) {
265 ctrl |= CTRL_DISP_BRIGHTNESS_CTRL_ON |
266 CTRL_DISP_BACKLIGHT_ON;
267 } else {
268 ctrl &= ~(CTRL_DISP_BRIGHTNESS_CTRL_ON |
269 CTRL_DISP_BACKLIGHT_ON);
270 }
271
272 ctrl |= 1 << 8;
273 acx565akm_write(ddata, MIPID_CMD_WRITE_CTRL_DISP, (u8 *)&ctrl, 2);
274}
275
276static void set_cabc_mode(struct panel_drv_data *ddata, unsigned int mode)
277{
278 u16 cabc_ctrl;
279
280 ddata->cabc_mode = mode;
281 if (!ddata->enabled)
282 return;
283 cabc_ctrl = 0;
284 acx565akm_read(ddata, MIPID_CMD_READ_CABC, (u8 *)&cabc_ctrl, 1);
285 cabc_ctrl &= ~3;
286 cabc_ctrl |= (1 << 8) | (mode & 3);
287 acx565akm_write(ddata, MIPID_CMD_WRITE_CABC, (u8 *)&cabc_ctrl, 2);
288}
289
290static unsigned int get_cabc_mode(struct panel_drv_data *ddata)
291{
292 return ddata->cabc_mode;
293}
294
295static unsigned int get_hw_cabc_mode(struct panel_drv_data *ddata)
296{
297 u8 cabc_ctrl;
298
299 acx565akm_read(ddata, MIPID_CMD_READ_CABC, &cabc_ctrl, 1);
300 return cabc_ctrl & 3;
301}
302
303static void acx565akm_set_brightness(struct panel_drv_data *ddata, int level)
304{
305 int bv;
306
307 bv = level | (1 << 8);
308 acx565akm_write(ddata, MIPID_CMD_WRITE_DISP_BRIGHTNESS, (u8 *)&bv, 2);
309
310 if (level)
311 enable_backlight_ctrl(ddata, 1);
312 else
313 enable_backlight_ctrl(ddata, 0);
314}
315
316static int acx565akm_get_actual_brightness(struct panel_drv_data *ddata)
317{
318 u8 bv;
319
320 acx565akm_read(ddata, MIPID_CMD_READ_DISP_BRIGHTNESS, &bv, 1);
321
322 return bv;
323}
324
325
326static int acx565akm_bl_update_status(struct backlight_device *dev)
327{
328 struct panel_drv_data *ddata = dev_get_drvdata(&dev->dev);
329 int level;
330
331 dev_dbg(&ddata->spi->dev, "%s\n", __func__);
332
333 if (dev->props.fb_blank == FB_BLANK_UNBLANK &&
334 dev->props.power == FB_BLANK_UNBLANK)
335 level = dev->props.brightness;
336 else
337 level = 0;
338
339 if (ddata->has_bc)
340 acx565akm_set_brightness(ddata, level);
341 else
342 return -ENODEV;
343
344 return 0;
345}
346
347static int acx565akm_bl_get_intensity(struct backlight_device *dev)
348{
349 struct panel_drv_data *ddata = dev_get_drvdata(&dev->dev);
350
351 dev_dbg(&dev->dev, "%s\n", __func__);
352
353 if (!ddata->has_bc)
354 return -ENODEV;
355
356 if (dev->props.fb_blank == FB_BLANK_UNBLANK &&
357 dev->props.power == FB_BLANK_UNBLANK) {
358 if (ddata->has_bc)
359 return acx565akm_get_actual_brightness(ddata);
360 else
361 return dev->props.brightness;
362 }
363
364 return 0;
365}
366
367static int acx565akm_bl_update_status_locked(struct backlight_device *dev)
368{
369 struct panel_drv_data *ddata = dev_get_drvdata(&dev->dev);
370 int r;
371
372 mutex_lock(&ddata->mutex);
373 r = acx565akm_bl_update_status(dev);
374 mutex_unlock(&ddata->mutex);
375
376 return r;
377}
378
379static int acx565akm_bl_get_intensity_locked(struct backlight_device *dev)
380{
381 struct panel_drv_data *ddata = dev_get_drvdata(&dev->dev);
382 int r;
383
384 mutex_lock(&ddata->mutex);
385 r = acx565akm_bl_get_intensity(dev);
386 mutex_unlock(&ddata->mutex);
387
388 return r;
389}
390
391static const struct backlight_ops acx565akm_bl_ops = {
392 .get_brightness = acx565akm_bl_get_intensity_locked,
393 .update_status = acx565akm_bl_update_status_locked,
394};
395
396/*--------------------Auto Brightness control via Sysfs---------------------*/
397
398static const char * const cabc_modes[] = {
399 "off", /* always used when CABC is not supported */
400 "ui",
401 "still-image",
402 "moving-image",
403};
404
405static ssize_t show_cabc_mode(struct device *dev,
406 struct device_attribute *attr,
407 char *buf)
408{
409 struct panel_drv_data *ddata = dev_get_drvdata(dev);
410 const char *mode_str;
411 int mode;
412 int len;
413
414 if (!ddata->has_cabc)
415 mode = 0;
416 else
417 mode = get_cabc_mode(ddata);
418 mode_str = "unknown";
419 if (mode >= 0 && mode < ARRAY_SIZE(cabc_modes))
420 mode_str = cabc_modes[mode];
421 len = snprintf(buf, PAGE_SIZE, "%s\n", mode_str);
422
423 return len < PAGE_SIZE - 1 ? len : PAGE_SIZE - 1;
424}
425
426static ssize_t store_cabc_mode(struct device *dev,
427 struct device_attribute *attr,
428 const char *buf, size_t count)
429{
430 struct panel_drv_data *ddata = dev_get_drvdata(dev);
431 int i;
432
433 for (i = 0; i < ARRAY_SIZE(cabc_modes); i++) {
434 const char *mode_str = cabc_modes[i];
435 int cmp_len = strlen(mode_str);
436
437 if (count > 0 && buf[count - 1] == '\n')
438 count--;
439 if (count != cmp_len)
440 continue;
441
442 if (strncmp(buf, mode_str, cmp_len) == 0)
443 break;
444 }
445
446 if (i == ARRAY_SIZE(cabc_modes))
447 return -EINVAL;
448
449 if (!ddata->has_cabc && i != 0)
450 return -EINVAL;
451
452 mutex_lock(&ddata->mutex);
453 set_cabc_mode(ddata, i);
454 mutex_unlock(&ddata->mutex);
455
456 return count;
457}
458
459static ssize_t show_cabc_available_modes(struct device *dev,
460 struct device_attribute *attr,
461 char *buf)
462{
463 struct panel_drv_data *ddata = dev_get_drvdata(dev);
464 int len;
465 int i;
466
467 if (!ddata->has_cabc)
468 return snprintf(buf, PAGE_SIZE, "%s\n", cabc_modes[0]);
469
470 for (i = 0, len = 0;
471 len < PAGE_SIZE && i < ARRAY_SIZE(cabc_modes); i++)
472 len += snprintf(&buf[len], PAGE_SIZE - len, "%s%s%s",
473 i ? " " : "", cabc_modes[i],
474 i == ARRAY_SIZE(cabc_modes) - 1 ? "\n" : "");
475
476 return len < PAGE_SIZE ? len : PAGE_SIZE - 1;
477}
478
479static DEVICE_ATTR(cabc_mode, S_IRUGO | S_IWUSR,
480 show_cabc_mode, store_cabc_mode);
481static DEVICE_ATTR(cabc_available_modes, S_IRUGO,
482 show_cabc_available_modes, NULL);
483
484static struct attribute *bldev_attrs[] = {
485 &dev_attr_cabc_mode.attr,
486 &dev_attr_cabc_available_modes.attr,
487 NULL,
488};
489
490static const struct attribute_group bldev_attr_group = {
491 .attrs = bldev_attrs,
492};
493
494static int acx565akm_connect(struct omap_dss_device *src,
495 struct omap_dss_device *dst)
496{
497 return 0;
498}
499
500static void acx565akm_disconnect(struct omap_dss_device *src,
501 struct omap_dss_device *dst)
502{
503}
504
505static int acx565akm_panel_power_on(struct omap_dss_device *dssdev)
506{
507 struct panel_drv_data *ddata = to_panel_data(dssdev);
508
509 dev_dbg(&ddata->spi->dev, "%s\n", __func__);
510
511 /*FIXME tweak me */
512 msleep(50);
513
514 if (ddata->reset_gpio)
515 gpiod_set_value(ddata->reset_gpio, 1);
516
517 if (ddata->enabled) {
518 dev_dbg(&ddata->spi->dev, "panel already enabled\n");
519 return 0;
520 }
521
522 /*
523 * We have to meet all the following delay requirements:
524 * 1. tRW: reset pulse width 10usec (7.12.1)
525 * 2. tRT: reset cancel time 5msec (7.12.1)
526 * 3. Providing PCLK,HS,VS signals for 2 frames = ~50msec worst
527 * case (7.6.2)
528 * 4. 120msec before the sleep out command (7.12.1)
529 */
530 msleep(120);
531
532 set_sleep_mode(ddata, 0);
533 ddata->enabled = 1;
534
535 /* 5msec between sleep out and the next command. (8.2.16) */
536 usleep_range(5000, 10000);
537 set_display_state(ddata, 1);
538 set_cabc_mode(ddata, ddata->cabc_mode);
539
540 return acx565akm_bl_update_status(ddata->bl_dev);
541}
542
543static void acx565akm_panel_power_off(struct omap_dss_device *dssdev)
544{
545 struct panel_drv_data *ddata = to_panel_data(dssdev);
546
547 dev_dbg(dssdev->dev, "%s\n", __func__);
548
549 if (!ddata->enabled)
550 return;
551
552 set_display_state(ddata, 0);
553 set_sleep_mode(ddata, 1);
554 ddata->enabled = 0;
555 /*
556 * We have to provide PCLK,HS,VS signals for 2 frames (worst case
557 * ~50msec) after sending the sleep in command and asserting the
558 * reset signal. We probably could assert the reset w/o the delay
559 * but we still delay to avoid possible artifacts. (7.6.1)
560 */
561 msleep(50);
562
563 if (ddata->reset_gpio)
564 gpiod_set_value(ddata->reset_gpio, 0);
565
566 /* FIXME need to tweak this delay */
567 msleep(100);
568}
569
570static void acx565akm_enable(struct omap_dss_device *dssdev)
571{
572 struct panel_drv_data *ddata = to_panel_data(dssdev);
573
574 mutex_lock(&ddata->mutex);
575 acx565akm_panel_power_on(dssdev);
576 mutex_unlock(&ddata->mutex);
577}
578
579static void acx565akm_disable(struct omap_dss_device *dssdev)
580{
581 struct panel_drv_data *ddata = to_panel_data(dssdev);
582
583 mutex_lock(&ddata->mutex);
584 acx565akm_panel_power_off(dssdev);
585 mutex_unlock(&ddata->mutex);
586}
587
588static int acx565akm_get_modes(struct omap_dss_device *dssdev,
589 struct drm_connector *connector)
590{
591 struct panel_drv_data *ddata = to_panel_data(dssdev);
592
593 return omapdss_display_get_modes(connector, &ddata->vm);
594}
595
596static const struct omap_dss_device_ops acx565akm_ops = {
597 .connect = acx565akm_connect,
598 .disconnect = acx565akm_disconnect,
599
600 .enable = acx565akm_enable,
601 .disable = acx565akm_disable,
602
603 .get_modes = acx565akm_get_modes,
604};
605
606static int acx565akm_probe(struct spi_device *spi)
607{
608 struct panel_drv_data *ddata;
609 struct omap_dss_device *dssdev;
610 struct backlight_device *bldev;
611 int max_brightness, brightness;
612 struct backlight_properties props;
613 struct gpio_desc *gpio;
614 int r;
615
616 dev_dbg(&spi->dev, "%s\n", __func__);
617
618 spi->mode = SPI_MODE_3;
619
620 ddata = devm_kzalloc(&spi->dev, sizeof(*ddata), GFP_KERNEL);
621 if (ddata == NULL)
622 return -ENOMEM;
623
624 dev_set_drvdata(&spi->dev, ddata);
625
626 ddata->spi = spi;
627
628 mutex_init(&ddata->mutex);
629
630 gpio = devm_gpiod_get_optional(&spi->dev, "reset", GPIOD_OUT_LOW);
631 if (IS_ERR(gpio)) {
632 dev_err(&spi->dev, "failed to parse reset gpio\n");
633 return PTR_ERR(gpio);
634 }
635
636 ddata->reset_gpio = gpio;
637
638 if (ddata->reset_gpio)
639 gpiod_set_value(ddata->reset_gpio, 1);
640
641 /*
642 * After reset we have to wait 5 msec before the first
643 * command can be sent.
644 */
645 usleep_range(5000, 10000);
646
647 ddata->enabled = panel_enabled(ddata);
648
649 r = panel_detect(ddata);
650
651 if (!ddata->enabled && ddata->reset_gpio)
652 gpiod_set_value(ddata->reset_gpio, 0);
653
654 if (r) {
655 dev_err(&spi->dev, "%s panel detect error\n", __func__);
656 return r;
657 }
658
659 memset(&props, 0, sizeof(props));
660 props.fb_blank = FB_BLANK_UNBLANK;
661 props.power = FB_BLANK_UNBLANK;
662 props.type = BACKLIGHT_RAW;
663
664 bldev = backlight_device_register("acx565akm", &ddata->spi->dev,
665 ddata, &acx565akm_bl_ops, &props);
666 if (IS_ERR(bldev))
667 return PTR_ERR(bldev);
668 ddata->bl_dev = bldev;
669 if (ddata->has_cabc) {
670 r = sysfs_create_group(&bldev->dev.kobj, &bldev_attr_group);
671 if (r) {
672 dev_err(&bldev->dev,
673 "%s failed to create sysfs files\n", __func__);
674 goto err_backlight_unregister;
675 }
676 ddata->cabc_mode = get_hw_cabc_mode(ddata);
677 }
678
679 max_brightness = 255;
680
681 if (ddata->has_bc)
682 brightness = acx565akm_get_actual_brightness(ddata);
683 else
684 brightness = 0;
685
686 bldev->props.max_brightness = max_brightness;
687 bldev->props.brightness = brightness;
688
689 acx565akm_bl_update_status(bldev);
690
691
692 ddata->vm = acx565akm_panel_vm;
693
694 dssdev = &ddata->dssdev;
695 dssdev->dev = &spi->dev;
696 dssdev->ops = &acx565akm_ops;
697 dssdev->type = OMAP_DISPLAY_TYPE_SDI;
698 dssdev->display = true;
699 dssdev->owner = THIS_MODULE;
700 dssdev->of_ports = BIT(0);
701 dssdev->ops_flags = OMAP_DSS_DEVICE_OP_MODES;
702 dssdev->bus_flags = DRM_BUS_FLAG_DE_HIGH
703 | DRM_BUS_FLAG_SYNC_DRIVE_NEGEDGE
704 | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE;
705
706 omapdss_display_init(dssdev);
707 omapdss_device_register(dssdev);
708
709 return 0;
710
711err_backlight_unregister:
712 backlight_device_unregister(bldev);
713 return r;
714}
715
716static int acx565akm_remove(struct spi_device *spi)
717{
718 struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev);
719 struct omap_dss_device *dssdev = &ddata->dssdev;
720
721 dev_dbg(&ddata->spi->dev, "%s\n", __func__);
722
723 sysfs_remove_group(&ddata->bl_dev->dev.kobj, &bldev_attr_group);
724 backlight_device_unregister(ddata->bl_dev);
725
726 omapdss_device_unregister(dssdev);
727
728 if (omapdss_device_is_enabled(dssdev))
729 acx565akm_disable(dssdev);
730
731 return 0;
732}
733
734static const struct of_device_id acx565akm_of_match[] = {
735 { .compatible = "omapdss,sony,acx565akm", },
736 {},
737};
738MODULE_DEVICE_TABLE(of, acx565akm_of_match);
739
740static struct spi_driver acx565akm_driver = {
741 .driver = {
742 .name = "acx565akm",
743 .of_match_table = acx565akm_of_match,
744 .suppress_bind_attrs = true,
745 },
746 .probe = acx565akm_probe,
747 .remove = acx565akm_remove,
748};
749
750module_spi_driver(acx565akm_driver);
751
752MODULE_ALIAS("spi:sony,acx565akm");
753MODULE_AUTHOR("Nokia Corporation");
754MODULE_DESCRIPTION("acx565akm LCD Driver");
755MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c
deleted file mode 100644
index c885018ac6ce..000000000000
--- a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c
+++ /dev/null
@@ -1,390 +0,0 @@
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Toppoly TD028TTEC1 panel support
4 *
5 * Copyright (C) 2008 Nokia Corporation
6 * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
7 *
8 * Neo 1973 code (jbt6k74.c):
9 * Copyright (C) 2006-2007 by OpenMoko, Inc.
10 * Author: Harald Welte <laforge@openmoko.org>
11 *
12 * Ported and adapted from Neo 1973 U-Boot by:
13 * H. Nikolaus Schaller <hns@goldelico.com>
14 */
15
16#include <linux/module.h>
17#include <linux/delay.h>
18#include <linux/spi/spi.h>
19
20#include "../dss/omapdss.h"
21
22struct panel_drv_data {
23 struct omap_dss_device dssdev;
24
25 struct videomode vm;
26
27 struct backlight_device *backlight;
28
29 struct spi_device *spi_dev;
30};
31
32static const struct videomode td028ttec1_panel_vm = {
33 .hactive = 480,
34 .vactive = 640,
35 .pixelclock = 22153000,
36 .hfront_porch = 24,
37 .hsync_len = 8,
38 .hback_porch = 8,
39 .vfront_porch = 4,
40 .vsync_len = 2,
41 .vback_porch = 2,
42
43 .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW,
44};
45
46#define JBT_COMMAND 0x000
47#define JBT_DATA 0x100
48
49static int jbt_ret_write_0(struct panel_drv_data *ddata, u8 reg)
50{
51 int rc;
52 u16 tx_buf = JBT_COMMAND | reg;
53
54 rc = spi_write(ddata->spi_dev, (u8 *)&tx_buf,
55 1*sizeof(u16));
56 if (rc != 0)
57 dev_err(&ddata->spi_dev->dev,
58 "jbt_ret_write_0 spi_write ret %d\n", rc);
59
60 return rc;
61}
62
63static int jbt_reg_write_1(struct panel_drv_data *ddata, u8 reg, u8 data)
64{
65 int rc;
66 u16 tx_buf[2];
67
68 tx_buf[0] = JBT_COMMAND | reg;
69 tx_buf[1] = JBT_DATA | data;
70 rc = spi_write(ddata->spi_dev, (u8 *)tx_buf,
71 2*sizeof(u16));
72 if (rc != 0)
73 dev_err(&ddata->spi_dev->dev,
74 "jbt_reg_write_1 spi_write ret %d\n", rc);
75
76 return rc;
77}
78
79static int jbt_reg_write_2(struct panel_drv_data *ddata, u8 reg, u16 data)
80{
81 int rc;
82 u16 tx_buf[3];
83
84 tx_buf[0] = JBT_COMMAND | reg;
85 tx_buf[1] = JBT_DATA | (data >> 8);
86 tx_buf[2] = JBT_DATA | (data & 0xff);
87
88 rc = spi_write(ddata->spi_dev, (u8 *)tx_buf,
89 3*sizeof(u16));
90
91 if (rc != 0)
92 dev_err(&ddata->spi_dev->dev,
93 "jbt_reg_write_2 spi_write ret %d\n", rc);
94
95 return rc;
96}
97
98enum jbt_register {
99 JBT_REG_SLEEP_IN = 0x10,
100 JBT_REG_SLEEP_OUT = 0x11,
101
102 JBT_REG_DISPLAY_OFF = 0x28,
103 JBT_REG_DISPLAY_ON = 0x29,
104
105 JBT_REG_RGB_FORMAT = 0x3a,
106 JBT_REG_QUAD_RATE = 0x3b,
107
108 JBT_REG_POWER_ON_OFF = 0xb0,
109 JBT_REG_BOOSTER_OP = 0xb1,
110 JBT_REG_BOOSTER_MODE = 0xb2,
111 JBT_REG_BOOSTER_FREQ = 0xb3,
112 JBT_REG_OPAMP_SYSCLK = 0xb4,
113 JBT_REG_VSC_VOLTAGE = 0xb5,
114 JBT_REG_VCOM_VOLTAGE = 0xb6,
115 JBT_REG_EXT_DISPL = 0xb7,
116 JBT_REG_OUTPUT_CONTROL = 0xb8,
117 JBT_REG_DCCLK_DCEV = 0xb9,
118 JBT_REG_DISPLAY_MODE1 = 0xba,
119 JBT_REG_DISPLAY_MODE2 = 0xbb,
120 JBT_REG_DISPLAY_MODE = 0xbc,
121 JBT_REG_ASW_SLEW = 0xbd,
122 JBT_REG_DUMMY_DISPLAY = 0xbe,
123 JBT_REG_DRIVE_SYSTEM = 0xbf,
124
125 JBT_REG_SLEEP_OUT_FR_A = 0xc0,
126 JBT_REG_SLEEP_OUT_FR_B = 0xc1,
127 JBT_REG_SLEEP_OUT_FR_C = 0xc2,
128 JBT_REG_SLEEP_IN_LCCNT_D = 0xc3,
129 JBT_REG_SLEEP_IN_LCCNT_E = 0xc4,
130 JBT_REG_SLEEP_IN_LCCNT_F = 0xc5,
131 JBT_REG_SLEEP_IN_LCCNT_G = 0xc6,
132
133 JBT_REG_GAMMA1_FINE_1 = 0xc7,
134 JBT_REG_GAMMA1_FINE_2 = 0xc8,
135 JBT_REG_GAMMA1_INCLINATION = 0xc9,
136 JBT_REG_GAMMA1_BLUE_OFFSET = 0xca,
137
138 JBT_REG_BLANK_CONTROL = 0xcf,
139 JBT_REG_BLANK_TH_TV = 0xd0,
140 JBT_REG_CKV_ON_OFF = 0xd1,
141 JBT_REG_CKV_1_2 = 0xd2,
142 JBT_REG_OEV_TIMING = 0xd3,
143 JBT_REG_ASW_TIMING_1 = 0xd4,
144 JBT_REG_ASW_TIMING_2 = 0xd5,
145
146 JBT_REG_HCLOCK_VGA = 0xec,
147 JBT_REG_HCLOCK_QVGA = 0xed,
148};
149
150#define to_panel_data(p) container_of(p, struct panel_drv_data, dssdev)
151
152static int td028ttec1_panel_connect(struct omap_dss_device *src,
153 struct omap_dss_device *dst)
154{
155 return 0;
156}
157
158static void td028ttec1_panel_disconnect(struct omap_dss_device *src,
159 struct omap_dss_device *dst)
160{
161}
162
163static void td028ttec1_panel_enable(struct omap_dss_device *dssdev)
164{
165 struct panel_drv_data *ddata = to_panel_data(dssdev);
166 int r = 0;
167
168 dev_dbg(dssdev->dev, "%s: state %d\n", __func__, dssdev->state);
169
170 /* three times command zero */
171 r |= jbt_ret_write_0(ddata, 0x00);
172 usleep_range(1000, 2000);
173 r |= jbt_ret_write_0(ddata, 0x00);
174 usleep_range(1000, 2000);
175 r |= jbt_ret_write_0(ddata, 0x00);
176 usleep_range(1000, 2000);
177
178 if (r) {
179 dev_warn(dssdev->dev, "%s: transfer error\n", __func__);
180 return;
181 }
182
183 /* deep standby out */
184 r |= jbt_reg_write_1(ddata, JBT_REG_POWER_ON_OFF, 0x17);
185
186 /* RGB I/F on, RAM write off, QVGA through, SIGCON enable */
187 r |= jbt_reg_write_1(ddata, JBT_REG_DISPLAY_MODE, 0x80);
188
189 /* Quad mode off */
190 r |= jbt_reg_write_1(ddata, JBT_REG_QUAD_RATE, 0x00);
191
192 /* AVDD on, XVDD on */
193 r |= jbt_reg_write_1(ddata, JBT_REG_POWER_ON_OFF, 0x16);
194
195 /* Output control */
196 r |= jbt_reg_write_2(ddata, JBT_REG_OUTPUT_CONTROL, 0xfff9);
197
198 /* Sleep mode off */
199 r |= jbt_ret_write_0(ddata, JBT_REG_SLEEP_OUT);
200
201 /* at this point we have like 50% grey */
202
203 /* initialize register set */
204 r |= jbt_reg_write_1(ddata, JBT_REG_DISPLAY_MODE1, 0x01);
205 r |= jbt_reg_write_1(ddata, JBT_REG_DISPLAY_MODE2, 0x00);
206 r |= jbt_reg_write_1(ddata, JBT_REG_RGB_FORMAT, 0x60);
207 r |= jbt_reg_write_1(ddata, JBT_REG_DRIVE_SYSTEM, 0x10);
208 r |= jbt_reg_write_1(ddata, JBT_REG_BOOSTER_OP, 0x56);
209 r |= jbt_reg_write_1(ddata, JBT_REG_BOOSTER_MODE, 0x33);
210 r |= jbt_reg_write_1(ddata, JBT_REG_BOOSTER_FREQ, 0x11);
211 r |= jbt_reg_write_1(ddata, JBT_REG_BOOSTER_FREQ, 0x11);
212 r |= jbt_reg_write_1(ddata, JBT_REG_OPAMP_SYSCLK, 0x02);
213 r |= jbt_reg_write_1(ddata, JBT_REG_VSC_VOLTAGE, 0x2b);
214 r |= jbt_reg_write_1(ddata, JBT_REG_VCOM_VOLTAGE, 0x40);
215 r |= jbt_reg_write_1(ddata, JBT_REG_EXT_DISPL, 0x03);
216 r |= jbt_reg_write_1(ddata, JBT_REG_DCCLK_DCEV, 0x04);
217 /*
218 * default of 0x02 in JBT_REG_ASW_SLEW responsible for 72Hz requirement
219 * to avoid red / blue flicker
220 */
221 r |= jbt_reg_write_1(ddata, JBT_REG_ASW_SLEW, 0x04);
222 r |= jbt_reg_write_1(ddata, JBT_REG_DUMMY_DISPLAY, 0x00);
223
224 r |= jbt_reg_write_1(ddata, JBT_REG_SLEEP_OUT_FR_A, 0x11);
225 r |= jbt_reg_write_1(ddata, JBT_REG_SLEEP_OUT_FR_B, 0x11);
226 r |= jbt_reg_write_1(ddata, JBT_REG_SLEEP_OUT_FR_C, 0x11);
227 r |= jbt_reg_write_2(ddata, JBT_REG_SLEEP_IN_LCCNT_D, 0x2040);
228 r |= jbt_reg_write_2(ddata, JBT_REG_SLEEP_IN_LCCNT_E, 0x60c0);
229 r |= jbt_reg_write_2(ddata, JBT_REG_SLEEP_IN_LCCNT_F, 0x1020);
230 r |= jbt_reg_write_2(ddata, JBT_REG_SLEEP_IN_LCCNT_G, 0x60c0);
231
232 r |= jbt_reg_write_2(ddata, JBT_REG_GAMMA1_FINE_1, 0x5533);
233 r |= jbt_reg_write_1(ddata, JBT_REG_GAMMA1_FINE_2, 0x00);
234 r |= jbt_reg_write_1(ddata, JBT_REG_GAMMA1_INCLINATION, 0x00);
235 r |= jbt_reg_write_1(ddata, JBT_REG_GAMMA1_BLUE_OFFSET, 0x00);
236
237 r |= jbt_reg_write_2(ddata, JBT_REG_HCLOCK_VGA, 0x1f0);
238 r |= jbt_reg_write_1(ddata, JBT_REG_BLANK_CONTROL, 0x02);
239 r |= jbt_reg_write_2(ddata, JBT_REG_BLANK_TH_TV, 0x0804);
240
241 r |= jbt_reg_write_1(ddata, JBT_REG_CKV_ON_OFF, 0x01);
242 r |= jbt_reg_write_2(ddata, JBT_REG_CKV_1_2, 0x0000);
243
244 r |= jbt_reg_write_2(ddata, JBT_REG_OEV_TIMING, 0x0d0e);
245 r |= jbt_reg_write_2(ddata, JBT_REG_ASW_TIMING_1, 0x11a4);
246 r |= jbt_reg_write_1(ddata, JBT_REG_ASW_TIMING_2, 0x0e);
247
248 r |= jbt_ret_write_0(ddata, JBT_REG_DISPLAY_ON);
249
250 if (r)
251 dev_err(dssdev->dev, "%s: write error\n", __func__);
252
253 backlight_enable(ddata->backlight);
254}
255
256static void td028ttec1_panel_disable(struct omap_dss_device *dssdev)
257{
258 struct panel_drv_data *ddata = to_panel_data(dssdev);
259
260 backlight_disable(ddata->backlight);
261
262 dev_dbg(dssdev->dev, "td028ttec1_panel_disable()\n");
263
264 jbt_ret_write_0(ddata, JBT_REG_DISPLAY_OFF);
265 jbt_reg_write_2(ddata, JBT_REG_OUTPUT_CONTROL, 0x8002);
266 jbt_ret_write_0(ddata, JBT_REG_SLEEP_IN);
267 jbt_reg_write_1(ddata, JBT_REG_POWER_ON_OFF, 0x00);
268}
269
270static int td028ttec1_panel_get_modes(struct omap_dss_device *dssdev,
271 struct drm_connector *connector)
272{
273 struct panel_drv_data *ddata = to_panel_data(dssdev);
274
275 return omapdss_display_get_modes(connector, &ddata->vm);
276}
277
278static const struct omap_dss_device_ops td028ttec1_ops = {
279 .connect = td028ttec1_panel_connect,
280 .disconnect = td028ttec1_panel_disconnect,
281
282 .enable = td028ttec1_panel_enable,
283 .disable = td028ttec1_panel_disable,
284
285 .get_modes = td028ttec1_panel_get_modes,
286};
287
288static int td028ttec1_panel_probe(struct spi_device *spi)
289{
290 struct panel_drv_data *ddata;
291 struct omap_dss_device *dssdev;
292 int r;
293
294 dev_dbg(&spi->dev, "%s\n", __func__);
295
296 spi->bits_per_word = 9;
297 spi->mode = SPI_MODE_3;
298
299 r = spi_setup(spi);
300 if (r < 0) {
301 dev_err(&spi->dev, "spi_setup failed: %d\n", r);
302 return r;
303 }
304
305 ddata = devm_kzalloc(&spi->dev, sizeof(*ddata), GFP_KERNEL);
306 if (ddata == NULL)
307 return -ENOMEM;
308
309 ddata->backlight = devm_of_find_backlight(&spi->dev);
310 if (IS_ERR(ddata->backlight))
311 return PTR_ERR(ddata->backlight);
312
313 dev_set_drvdata(&spi->dev, ddata);
314
315 ddata->spi_dev = spi;
316
317 ddata->vm = td028ttec1_panel_vm;
318
319 dssdev = &ddata->dssdev;
320 dssdev->dev = &spi->dev;
321 dssdev->ops = &td028ttec1_ops;
322 dssdev->type = OMAP_DISPLAY_TYPE_DPI;
323 dssdev->display = true;
324 dssdev->owner = THIS_MODULE;
325 dssdev->of_ports = BIT(0);
326 dssdev->ops_flags = OMAP_DSS_DEVICE_OP_MODES;
327
328 /*
329 * Note: According to the panel documentation:
330 * SYNC needs to be driven on the FALLING edge
331 */
332 dssdev->bus_flags = DRM_BUS_FLAG_DE_HIGH
333 | DRM_BUS_FLAG_SYNC_DRIVE_POSEDGE
334 | DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE;
335
336 omapdss_display_init(dssdev);
337 omapdss_device_register(dssdev);
338
339 return 0;
340}
341
342static int td028ttec1_panel_remove(struct spi_device *spi)
343{
344 struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev);
345 struct omap_dss_device *dssdev = &ddata->dssdev;
346
347 dev_dbg(&ddata->spi_dev->dev, "%s\n", __func__);
348
349 omapdss_device_unregister(dssdev);
350
351 td028ttec1_panel_disable(dssdev);
352
353 return 0;
354}
355
356static const struct of_device_id td028ttec1_of_match[] = {
357 { .compatible = "omapdss,tpo,td028ttec1", },
358 /* keep to not break older DTB */
359 { .compatible = "omapdss,toppoly,td028ttec1", },
360 {},
361};
362
363MODULE_DEVICE_TABLE(of, td028ttec1_of_match);
364
365static const struct spi_device_id td028ttec1_ids[] = {
366 { "toppoly,td028ttec1", 0 },
367 { "tpo,td028ttec1", 0},
368 { /* sentinel */ }
369};
370
371MODULE_DEVICE_TABLE(spi, td028ttec1_ids);
372
373
374static struct spi_driver td028ttec1_spi_driver = {
375 .probe = td028ttec1_panel_probe,
376 .remove = td028ttec1_panel_remove,
377 .id_table = td028ttec1_ids,
378
379 .driver = {
380 .name = "panel-tpo-td028ttec1",
381 .of_match_table = td028ttec1_of_match,
382 .suppress_bind_attrs = true,
383 },
384};
385
386module_spi_driver(td028ttec1_spi_driver);
387
388MODULE_AUTHOR("H. Nikolaus Schaller <hns@goldelico.com>");
389MODULE_DESCRIPTION("Toppoly TD028TTEC1 panel driver");
390MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c
deleted file mode 100644
index ce09217da597..000000000000
--- a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c
+++ /dev/null
@@ -1,513 +0,0 @@
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * TPO TD043MTEA1 Panel driver
4 *
5 * Author: Gražvydas Ignotas <notasas@gmail.com>
6 * Converted to new DSS device model: Tomi Valkeinen <tomi.valkeinen@ti.com>
7 */
8
9#include <linux/delay.h>
10#include <linux/err.h>
11#include <linux/gpio/consumer.h>
12#include <linux/module.h>
13#include <linux/regulator/consumer.h>
14#include <linux/slab.h>
15#include <linux/spi/spi.h>
16
17#include "../dss/omapdss.h"
18
19#define TPO_R02_MODE(x) ((x) & 7)
20#define TPO_R02_MODE_800x480 7
21#define TPO_R02_NCLK_RISING BIT(3)
22#define TPO_R02_HSYNC_HIGH BIT(4)
23#define TPO_R02_VSYNC_HIGH BIT(5)
24
25#define TPO_R03_NSTANDBY BIT(0)
26#define TPO_R03_EN_CP_CLK BIT(1)
27#define TPO_R03_EN_VGL_PUMP BIT(2)
28#define TPO_R03_EN_PWM BIT(3)
29#define TPO_R03_DRIVING_CAP_100 BIT(4)
30#define TPO_R03_EN_PRE_CHARGE BIT(6)
31#define TPO_R03_SOFTWARE_CTL BIT(7)
32
33#define TPO_R04_NFLIP_H BIT(0)
34#define TPO_R04_NFLIP_V BIT(1)
35#define TPO_R04_CP_CLK_FREQ_1H BIT(2)
36#define TPO_R04_VGL_FREQ_1H BIT(4)
37
38#define TPO_R03_VAL_NORMAL (TPO_R03_NSTANDBY | TPO_R03_EN_CP_CLK | \
39 TPO_R03_EN_VGL_PUMP | TPO_R03_EN_PWM | \
40 TPO_R03_DRIVING_CAP_100 | TPO_R03_EN_PRE_CHARGE | \
41 TPO_R03_SOFTWARE_CTL)
42
43#define TPO_R03_VAL_STANDBY (TPO_R03_DRIVING_CAP_100 | \
44 TPO_R03_EN_PRE_CHARGE | TPO_R03_SOFTWARE_CTL)
45
46static const u16 tpo_td043_def_gamma[12] = {
47 105, 315, 381, 431, 490, 537, 579, 686, 780, 837, 880, 1023
48};
49
50struct panel_drv_data {
51 struct omap_dss_device dssdev;
52
53 struct videomode vm;
54
55 struct spi_device *spi;
56 struct regulator *vcc_reg;
57 struct gpio_desc *reset_gpio;
58 u16 gamma[12];
59 u32 mode;
60 u32 vmirror:1;
61 u32 powered_on:1;
62 u32 spi_suspended:1;
63 u32 power_on_resume:1;
64};
65
66static const struct videomode tpo_td043_vm = {
67 .hactive = 800,
68 .vactive = 480,
69
70 .pixelclock = 36000000,
71
72 .hsync_len = 1,
73 .hfront_porch = 68,
74 .hback_porch = 214,
75
76 .vsync_len = 1,
77 .vfront_porch = 39,
78 .vback_porch = 34,
79
80 .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW,
81};
82
83#define to_panel_data(p) container_of(p, struct panel_drv_data, dssdev)
84
85static int tpo_td043_write(struct spi_device *spi, u8 addr, u8 data)
86{
87 struct spi_message m;
88 struct spi_transfer xfer;
89 u16 w;
90 int r;
91
92 spi_message_init(&m);
93
94 memset(&xfer, 0, sizeof(xfer));
95
96 w = ((u16)addr << 10) | (1 << 8) | data;
97 xfer.tx_buf = &w;
98 xfer.bits_per_word = 16;
99 xfer.len = 2;
100 spi_message_add_tail(&xfer, &m);
101
102 r = spi_sync(spi, &m);
103 if (r < 0)
104 dev_warn(&spi->dev, "failed to write to LCD reg (%d)\n", r);
105 return r;
106}
107
108static void tpo_td043_write_gamma(struct spi_device *spi, u16 gamma[12])
109{
110 u8 i, val;
111
112 /* gamma bits [9:8] */
113 for (val = i = 0; i < 4; i++)
114 val |= (gamma[i] & 0x300) >> ((i + 1) * 2);
115 tpo_td043_write(spi, 0x11, val);
116
117 for (val = i = 0; i < 4; i++)
118 val |= (gamma[i+4] & 0x300) >> ((i + 1) * 2);
119 tpo_td043_write(spi, 0x12, val);
120
121 for (val = i = 0; i < 4; i++)
122 val |= (gamma[i+8] & 0x300) >> ((i + 1) * 2);
123 tpo_td043_write(spi, 0x13, val);
124
125 /* gamma bits [7:0] */
126 for (val = i = 0; i < 12; i++)
127 tpo_td043_write(spi, 0x14 + i, gamma[i] & 0xff);
128}
129
130static int tpo_td043_write_mirror(struct spi_device *spi, bool h, bool v)
131{
132 u8 reg4 = TPO_R04_NFLIP_H | TPO_R04_NFLIP_V |
133 TPO_R04_CP_CLK_FREQ_1H | TPO_R04_VGL_FREQ_1H;
134 if (h)
135 reg4 &= ~TPO_R04_NFLIP_H;
136 if (v)
137 reg4 &= ~TPO_R04_NFLIP_V;
138
139 return tpo_td043_write(spi, 4, reg4);
140}
141
142static ssize_t tpo_td043_vmirror_show(struct device *dev,
143 struct device_attribute *attr, char *buf)
144{
145 struct panel_drv_data *ddata = dev_get_drvdata(dev);
146
147 return snprintf(buf, PAGE_SIZE, "%d\n", ddata->vmirror);
148}
149
150static ssize_t tpo_td043_vmirror_store(struct device *dev,
151 struct device_attribute *attr, const char *buf, size_t count)
152{
153 struct panel_drv_data *ddata = dev_get_drvdata(dev);
154 int val;
155 int ret;
156
157 ret = kstrtoint(buf, 0, &val);
158 if (ret < 0)
159 return ret;
160
161 val = !!val;
162
163 ret = tpo_td043_write_mirror(ddata->spi, false, val);
164 if (ret < 0)
165 return ret;
166
167 ddata->vmirror = val;
168
169 return count;
170}
171
172static ssize_t tpo_td043_mode_show(struct device *dev,
173 struct device_attribute *attr, char *buf)
174{
175 struct panel_drv_data *ddata = dev_get_drvdata(dev);
176
177 return snprintf(buf, PAGE_SIZE, "%d\n", ddata->mode);
178}
179
180static ssize_t tpo_td043_mode_store(struct device *dev,
181 struct device_attribute *attr, const char *buf, size_t count)
182{
183 struct panel_drv_data *ddata = dev_get_drvdata(dev);
184 long val;
185 int ret;
186
187 ret = kstrtol(buf, 0, &val);
188 if (ret != 0 || val & ~7)
189 return -EINVAL;
190
191 ddata->mode = val;
192
193 val |= TPO_R02_NCLK_RISING;
194 tpo_td043_write(ddata->spi, 2, val);
195
196 return count;
197}
198
199static ssize_t tpo_td043_gamma_show(struct device *dev,
200 struct device_attribute *attr, char *buf)
201{
202 struct panel_drv_data *ddata = dev_get_drvdata(dev);
203 ssize_t len = 0;
204 int ret;
205 int i;
206
207 for (i = 0; i < ARRAY_SIZE(ddata->gamma); i++) {
208 ret = snprintf(buf + len, PAGE_SIZE - len, "%u ",
209 ddata->gamma[i]);
210 if (ret < 0)
211 return ret;
212 len += ret;
213 }
214 buf[len - 1] = '\n';
215
216 return len;
217}
218
219static ssize_t tpo_td043_gamma_store(struct device *dev,
220 struct device_attribute *attr, const char *buf, size_t count)
221{
222 struct panel_drv_data *ddata = dev_get_drvdata(dev);
223 unsigned int g[12];
224 int ret;
225 int i;
226
227 ret = sscanf(buf, "%u %u %u %u %u %u %u %u %u %u %u %u",
228 &g[0], &g[1], &g[2], &g[3], &g[4], &g[5],
229 &g[6], &g[7], &g[8], &g[9], &g[10], &g[11]);
230
231 if (ret != 12)
232 return -EINVAL;
233
234 for (i = 0; i < 12; i++)
235 ddata->gamma[i] = g[i];
236
237 tpo_td043_write_gamma(ddata->spi, ddata->gamma);
238
239 return count;
240}
241
242static DEVICE_ATTR(vmirror, S_IRUGO | S_IWUSR,
243 tpo_td043_vmirror_show, tpo_td043_vmirror_store);
244static DEVICE_ATTR(mode, S_IRUGO | S_IWUSR,
245 tpo_td043_mode_show, tpo_td043_mode_store);
246static DEVICE_ATTR(gamma, S_IRUGO | S_IWUSR,
247 tpo_td043_gamma_show, tpo_td043_gamma_store);
248
249static struct attribute *tpo_td043_attrs[] = {
250 &dev_attr_vmirror.attr,
251 &dev_attr_mode.attr,
252 &dev_attr_gamma.attr,
253 NULL,
254};
255
256static const struct attribute_group tpo_td043_attr_group = {
257 .attrs = tpo_td043_attrs,
258};
259
260static int tpo_td043_power_on(struct panel_drv_data *ddata)
261{
262 int r;
263
264 if (ddata->powered_on)
265 return 0;
266
267 r = regulator_enable(ddata->vcc_reg);
268 if (r != 0)
269 return r;
270
271 /* wait for panel to stabilize */
272 msleep(160);
273
274 gpiod_set_value(ddata->reset_gpio, 0);
275
276 tpo_td043_write(ddata->spi, 2,
277 TPO_R02_MODE(ddata->mode) | TPO_R02_NCLK_RISING);
278 tpo_td043_write(ddata->spi, 3, TPO_R03_VAL_NORMAL);
279 tpo_td043_write(ddata->spi, 0x20, 0xf0);
280 tpo_td043_write(ddata->spi, 0x21, 0xf0);
281 tpo_td043_write_mirror(ddata->spi, false, ddata->vmirror);
282 tpo_td043_write_gamma(ddata->spi, ddata->gamma);
283
284 ddata->powered_on = 1;
285 return 0;
286}
287
288static void tpo_td043_power_off(struct panel_drv_data *ddata)
289{
290 if (!ddata->powered_on)
291 return;
292
293 tpo_td043_write(ddata->spi, 3,
294 TPO_R03_VAL_STANDBY | TPO_R03_EN_PWM);
295
296 gpiod_set_value(ddata->reset_gpio, 1);
297
298 /* wait for at least 2 vsyncs before cutting off power */
299 msleep(50);
300
301 tpo_td043_write(ddata->spi, 3, TPO_R03_VAL_STANDBY);
302
303 regulator_disable(ddata->vcc_reg);
304
305 ddata->powered_on = 0;
306}
307
308static int tpo_td043_connect(struct omap_dss_device *src,
309 struct omap_dss_device *dst)
310{
311 return 0;
312}
313
314static void tpo_td043_disconnect(struct omap_dss_device *src,
315 struct omap_dss_device *dst)
316{
317}
318
319static void tpo_td043_enable(struct omap_dss_device *dssdev)
320{
321 struct panel_drv_data *ddata = to_panel_data(dssdev);
322 int r;
323
324 /*
325 * If we are resuming from system suspend, SPI clocks might not be
326 * enabled yet, so we'll program the LCD from SPI PM resume callback.
327 */
328 if (!ddata->spi_suspended) {
329 r = tpo_td043_power_on(ddata);
330 if (r) {
331 dev_err(&ddata->spi->dev, "%s: power on failed (%d)\n",
332 __func__, r);
333 return;
334 }
335 }
336}
337
338static void tpo_td043_disable(struct omap_dss_device *dssdev)
339{
340 struct panel_drv_data *ddata = to_panel_data(dssdev);
341
342 if (!ddata->spi_suspended)
343 tpo_td043_power_off(ddata);
344}
345
346static int tpo_td043_get_modes(struct omap_dss_device *dssdev,
347 struct drm_connector *connector)
348{
349 struct panel_drv_data *ddata = to_panel_data(dssdev);
350
351 return omapdss_display_get_modes(connector, &ddata->vm);
352}
353
354static const struct omap_dss_device_ops tpo_td043_ops = {
355 .connect = tpo_td043_connect,
356 .disconnect = tpo_td043_disconnect,
357
358 .enable = tpo_td043_enable,
359 .disable = tpo_td043_disable,
360
361 .get_modes = tpo_td043_get_modes,
362};
363
364static int tpo_td043_probe(struct spi_device *spi)
365{
366 struct panel_drv_data *ddata;
367 struct omap_dss_device *dssdev;
368 struct gpio_desc *gpio;
369 int r;
370
371 dev_dbg(&spi->dev, "%s\n", __func__);
372
373 spi->bits_per_word = 16;
374 spi->mode = SPI_MODE_0;
375
376 r = spi_setup(spi);
377 if (r < 0) {
378 dev_err(&spi->dev, "spi_setup failed: %d\n", r);
379 return r;
380 }
381
382 ddata = devm_kzalloc(&spi->dev, sizeof(*ddata), GFP_KERNEL);
383 if (ddata == NULL)
384 return -ENOMEM;
385
386 dev_set_drvdata(&spi->dev, ddata);
387
388 ddata->spi = spi;
389
390 ddata->mode = TPO_R02_MODE_800x480;
391 memcpy(ddata->gamma, tpo_td043_def_gamma, sizeof(ddata->gamma));
392
393 ddata->vcc_reg = devm_regulator_get(&spi->dev, "vcc");
394 if (IS_ERR(ddata->vcc_reg)) {
395 dev_err(&spi->dev, "failed to get LCD VCC regulator\n");
396 return PTR_ERR(ddata->vcc_reg);
397 }
398
399 gpio = devm_gpiod_get(&spi->dev, "reset", GPIOD_OUT_HIGH);
400 if (IS_ERR(gpio)) {
401 dev_err(&spi->dev, "failed to get reset gpio\n");
402 return PTR_ERR(gpio);
403 }
404
405 ddata->reset_gpio = gpio;
406
407 r = sysfs_create_group(&spi->dev.kobj, &tpo_td043_attr_group);
408 if (r) {
409 dev_err(&spi->dev, "failed to create sysfs files\n");
410 return r;
411 }
412
413 ddata->vm = tpo_td043_vm;
414
415 dssdev = &ddata->dssdev;
416 dssdev->dev = &spi->dev;
417 dssdev->ops = &tpo_td043_ops;
418 dssdev->type = OMAP_DISPLAY_TYPE_DPI;
419 dssdev->display = true;
420 dssdev->owner = THIS_MODULE;
421 dssdev->of_ports = BIT(0);
422 dssdev->ops_flags = OMAP_DSS_DEVICE_OP_MODES;
423
424 /*
425 * Note: According to the panel documentation:
426 * SYNC needs to be driven on the FALLING edge
427 */
428 dssdev->bus_flags = DRM_BUS_FLAG_DE_HIGH
429 | DRM_BUS_FLAG_SYNC_DRIVE_POSEDGE
430 | DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE;
431
432 omapdss_display_init(dssdev);
433 omapdss_device_register(dssdev);
434
435 return 0;
436}
437
438static int tpo_td043_remove(struct spi_device *spi)
439{
440 struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev);
441 struct omap_dss_device *dssdev = &ddata->dssdev;
442
443 dev_dbg(&ddata->spi->dev, "%s\n", __func__);
444
445 omapdss_device_unregister(dssdev);
446
447 if (omapdss_device_is_enabled(dssdev))
448 tpo_td043_disable(dssdev);
449
450 sysfs_remove_group(&spi->dev.kobj, &tpo_td043_attr_group);
451
452 return 0;
453}
454
455#ifdef CONFIG_PM_SLEEP
456static int tpo_td043_spi_suspend(struct device *dev)
457{
458 struct panel_drv_data *ddata = dev_get_drvdata(dev);
459
460 dev_dbg(dev, "tpo_td043_spi_suspend, tpo %p\n", ddata);
461
462 ddata->power_on_resume = ddata->powered_on;
463 tpo_td043_power_off(ddata);
464 ddata->spi_suspended = 1;
465
466 return 0;
467}
468
469static int tpo_td043_spi_resume(struct device *dev)
470{
471 struct panel_drv_data *ddata = dev_get_drvdata(dev);
472 int ret;
473
474 dev_dbg(dev, "tpo_td043_spi_resume\n");
475
476 if (ddata->power_on_resume) {
477 ret = tpo_td043_power_on(ddata);
478 if (ret)
479 return ret;
480 }
481 ddata->spi_suspended = 0;
482
483 return 0;
484}
485#endif
486
487static SIMPLE_DEV_PM_OPS(tpo_td043_spi_pm,
488 tpo_td043_spi_suspend, tpo_td043_spi_resume);
489
490static const struct of_device_id tpo_td043_of_match[] = {
491 { .compatible = "omapdss,tpo,td043mtea1", },
492 {},
493};
494
495MODULE_DEVICE_TABLE(of, tpo_td043_of_match);
496
497static struct spi_driver tpo_td043_spi_driver = {
498 .driver = {
499 .name = "panel-tpo-td043mtea1",
500 .pm = &tpo_td043_spi_pm,
501 .of_match_table = tpo_td043_of_match,
502 .suppress_bind_attrs = true,
503 },
504 .probe = tpo_td043_probe,
505 .remove = tpo_td043_remove,
506};
507
508module_spi_driver(tpo_td043_spi_driver);
509
510MODULE_ALIAS("spi:tpo,td043mtea1");
511MODULE_AUTHOR("Gražvydas Ignotas <notasas@gmail.com>");
512MODULE_DESCRIPTION("TPO TD043MTEA1 LCD Driver");
513MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c b/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c
index a140de79c50e..31502857f013 100644
--- a/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c
+++ b/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c
@@ -176,17 +176,10 @@ static const struct of_device_id omapdss_of_match[] __initconst = {
176static const struct of_device_id omapdss_of_fixups_whitelist[] __initconst = { 176static const struct of_device_id omapdss_of_fixups_whitelist[] __initconst = {
177 { .compatible = "composite-video-connector" }, 177 { .compatible = "composite-video-connector" },
178 { .compatible = "hdmi-connector" }, 178 { .compatible = "hdmi-connector" },
179 { .compatible = "lgphilips,lb035q02" },
180 { .compatible = "nec,nl8048hl11" },
181 { .compatible = "panel-dsi-cm" }, 179 { .compatible = "panel-dsi-cm" },
182 { .compatible = "sharp,ls037v7dw01" },
183 { .compatible = "sony,acx565akm" },
184 { .compatible = "svideo-connector" }, 180 { .compatible = "svideo-connector" },
185 { .compatible = "ti,opa362" }, 181 { .compatible = "ti,opa362" },
186 { .compatible = "ti,tpd12s015" }, 182 { .compatible = "ti,tpd12s015" },
187 { .compatible = "toppoly,td028ttec1" },
188 { .compatible = "tpo,td028ttec1" },
189 { .compatible = "tpo,td043mtea1" },
190 {}, 183 {},
191}; 184};
192 185
diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
index eaecd40cc32e..f152bc4eeb53 100644
--- a/drivers/gpu/drm/panel/Kconfig
+++ b/drivers/gpu/drm/panel/Kconfig
@@ -103,6 +103,14 @@ config DRM_PANEL_SAMSUNG_LD9040
103 depends on OF && SPI 103 depends on OF && SPI
104 select VIDEOMODE_HELPERS 104 select VIDEOMODE_HELPERS
105 105
106config DRM_PANEL_LG_LB035Q02
107 tristate "LG LB035Q024573 RGB panel"
108 depends on GPIOLIB && OF && SPI
109 help
110 Say Y here if you want to enable support for the LB035Q02 RGB panel
111 (found on the Gumstix Overo Palo35 board). To compile this driver as
112 a module, choose M here.
113
106config DRM_PANEL_LG_LG4573 114config DRM_PANEL_LG_LG4573
107 tristate "LG4573 RGB/SPI panel" 115 tristate "LG4573 RGB/SPI panel"
108 depends on OF && SPI 116 depends on OF && SPI
@@ -111,6 +119,14 @@ config DRM_PANEL_LG_LG4573
111 Say Y here if you want to enable support for LG4573 RGB panel. 119 Say Y here if you want to enable support for LG4573 RGB panel.
112 To compile this driver as a module, choose M here. 120 To compile this driver as a module, choose M here.
113 121
122config DRM_PANEL_NEC_NL8048HL11
123 tristate "NEC NL8048HL11 RGB panel"
124 depends on GPIOLIB && OF && SPI
125 help
126 Say Y here if you want to enable support for the NEC NL8048HL11 RGB
127 panel (found on the Zoom2/3/3630 SDP boards). To compile this driver
128 as a module, choose M here.
129
114config DRM_PANEL_NOVATEK_NT39016 130config DRM_PANEL_NOVATEK_NT39016
115 tristate "Novatek NT39016 RGB/SPI panel" 131 tristate "Novatek NT39016 RGB/SPI panel"
116 depends on OF && SPI 132 depends on OF && SPI
@@ -266,6 +282,13 @@ config DRM_PANEL_SHARP_LQ101R1SX01
266 To compile this driver as a module, choose M here: the module 282 To compile this driver as a module, choose M here: the module
267 will be called panel-sharp-lq101r1sx01. 283 will be called panel-sharp-lq101r1sx01.
268 284
285config DRM_PANEL_SHARP_LS037V7DW01
286 tristate "Sharp LS037V7DW01 VGA LCD panel"
287 depends on GPIOLIB && OF && REGULATOR
288 help
289 Say Y here if you want to enable support for Sharp LS037V7DW01 VGA
290 (480x640) LCD panel (found on the TI SDP3430 board).
291
269config DRM_PANEL_SHARP_LS043T1LE01 292config DRM_PANEL_SHARP_LS043T1LE01
270 tristate "Sharp LS043T1LE01 qHD video mode panel" 293 tristate "Sharp LS043T1LE01 qHD video mode panel"
271 depends on OF 294 depends on OF
@@ -293,6 +316,29 @@ config DRM_PANEL_SITRONIX_ST7789V
293 Say Y here if you want to enable support for the Sitronix 316 Say Y here if you want to enable support for the Sitronix
294 ST7789V controller for 240x320 LCD panels 317 ST7789V controller for 240x320 LCD panels
295 318
319config DRM_PANEL_SONY_ACX565AKM
320 tristate "Sony ACX565AKM panel"
321 depends on GPIOLIB && OF && SPI
322 depends on BACKLIGHT_CLASS_DEVICE
323 help
324 Say Y here if you want to enable support for the Sony ACX565AKM
325 800x600 3.5" panel (found on the Nokia N900).
326
327config DRM_PANEL_TPO_TD028TTEC1
328 tristate "Toppoly (TPO) TD028TTEC1 panel driver"
329 depends on OF && SPI
330 depends on BACKLIGHT_CLASS_DEVICE
331 help
332 Say Y here if you want to enable support for TPO TD028TTEC1 480x640
333 2.8" panel (found on the OpenMoko Neo FreeRunner and Neo 1973).
334
335config DRM_PANEL_TPO_TD043MTEA1
336 tristate "Toppoly (TPO) TD043MTEA1 panel driver"
337 depends on GPIOLIB && OF && REGULATOR && SPI
338 help
339 Say Y here if you want to enable support for TPO TD043MTEA1 800x480
340 4.3" panel (found on the OMAP3 Pandora board).
341
296config DRM_PANEL_TPO_TPG110 342config DRM_PANEL_TPO_TPG110
297 tristate "TPO TPG 800x400 panel" 343 tristate "TPO TPG 800x400 panel"
298 depends on OF && SPI && GPIOLIB 344 depends on OF && SPI && GPIOLIB
diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile
index 62dae45f8f74..b6cd39fe0f20 100644
--- a/drivers/gpu/drm/panel/Makefile
+++ b/drivers/gpu/drm/panel/Makefile
@@ -8,7 +8,9 @@ obj-$(CONFIG_DRM_PANEL_ILITEK_ILI9881C) += panel-ilitek-ili9881c.o
8obj-$(CONFIG_DRM_PANEL_INNOLUX_P079ZCA) += panel-innolux-p079zca.o 8obj-$(CONFIG_DRM_PANEL_INNOLUX_P079ZCA) += panel-innolux-p079zca.o
9obj-$(CONFIG_DRM_PANEL_JDI_LT070ME05000) += panel-jdi-lt070me05000.o 9obj-$(CONFIG_DRM_PANEL_JDI_LT070ME05000) += panel-jdi-lt070me05000.o
10obj-$(CONFIG_DRM_PANEL_KINGDISPLAY_KD097D04) += panel-kingdisplay-kd097d04.o 10obj-$(CONFIG_DRM_PANEL_KINGDISPLAY_KD097D04) += panel-kingdisplay-kd097d04.o
11obj-$(CONFIG_DRM_PANEL_LG_LB035Q02) += panel-lg-lb035q02.o
11obj-$(CONFIG_DRM_PANEL_LG_LG4573) += panel-lg-lg4573.o 12obj-$(CONFIG_DRM_PANEL_LG_LG4573) += panel-lg-lg4573.o
13obj-$(CONFIG_DRM_PANEL_NEC_NL8048HL11) += panel-nec-nl8048hl11.o
12obj-$(CONFIG_DRM_PANEL_NOVATEK_NT39016) += panel-novatek-nt39016.o 14obj-$(CONFIG_DRM_PANEL_NOVATEK_NT39016) += panel-novatek-nt39016.o
13obj-$(CONFIG_DRM_PANEL_OLIMEX_LCD_OLINUXINO) += panel-olimex-lcd-olinuxino.o 15obj-$(CONFIG_DRM_PANEL_OLIMEX_LCD_OLINUXINO) += panel-olimex-lcd-olinuxino.o
14obj-$(CONFIG_DRM_PANEL_ORISETECH_OTM8009A) += panel-orisetech-otm8009a.o 16obj-$(CONFIG_DRM_PANEL_ORISETECH_OTM8009A) += panel-orisetech-otm8009a.o
@@ -27,8 +29,12 @@ obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E63M0) += panel-samsung-s6e63m0.o
27obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0) += panel-samsung-s6e8aa0.o 29obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0) += panel-samsung-s6e8aa0.o
28obj-$(CONFIG_DRM_PANEL_SEIKO_43WVF1G) += panel-seiko-43wvf1g.o 30obj-$(CONFIG_DRM_PANEL_SEIKO_43WVF1G) += panel-seiko-43wvf1g.o
29obj-$(CONFIG_DRM_PANEL_SHARP_LQ101R1SX01) += panel-sharp-lq101r1sx01.o 31obj-$(CONFIG_DRM_PANEL_SHARP_LQ101R1SX01) += panel-sharp-lq101r1sx01.o
32obj-$(CONFIG_DRM_PANEL_SHARP_LS037V7DW01) += panel-sharp-ls037v7dw01.o
30obj-$(CONFIG_DRM_PANEL_SHARP_LS043T1LE01) += panel-sharp-ls043t1le01.o 33obj-$(CONFIG_DRM_PANEL_SHARP_LS043T1LE01) += panel-sharp-ls043t1le01.o
31obj-$(CONFIG_DRM_PANEL_SITRONIX_ST7701) += panel-sitronix-st7701.o 34obj-$(CONFIG_DRM_PANEL_SITRONIX_ST7701) += panel-sitronix-st7701.o
32obj-$(CONFIG_DRM_PANEL_SITRONIX_ST7789V) += panel-sitronix-st7789v.o 35obj-$(CONFIG_DRM_PANEL_SITRONIX_ST7789V) += panel-sitronix-st7789v.o
36obj-$(CONFIG_DRM_PANEL_SONY_ACX565AKM) += panel-sony-acx565akm.o
37obj-$(CONFIG_DRM_PANEL_TPO_TD028TTEC1) += panel-tpo-td028ttec1.o
38obj-$(CONFIG_DRM_PANEL_TPO_TD043MTEA1) += panel-tpo-td043mtea1.o
33obj-$(CONFIG_DRM_PANEL_TPO_TPG110) += panel-tpo-tpg110.o 39obj-$(CONFIG_DRM_PANEL_TPO_TPG110) += panel-tpo-tpg110.o
34obj-$(CONFIG_DRM_PANEL_TRULY_NT35597_WQXGA) += panel-truly-nt35597.o 40obj-$(CONFIG_DRM_PANEL_TRULY_NT35597_WQXGA) += panel-truly-nt35597.o
diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9322.c b/drivers/gpu/drm/panel/panel-ilitek-ili9322.c
index 53dd1e128795..3c58f63adbf7 100644
--- a/drivers/gpu/drm/panel/panel-ilitek-ili9322.c
+++ b/drivers/gpu/drm/panel/panel-ilitek-ili9322.c
@@ -349,7 +349,6 @@ static const struct regmap_config ili9322_regmap_config = {
349 349
350static int ili9322_init(struct drm_panel *panel, struct ili9322 *ili) 350static int ili9322_init(struct drm_panel *panel, struct ili9322 *ili)
351{ 351{
352 struct drm_connector *connector = panel->connector;
353 u8 reg; 352 u8 reg;
354 int ret; 353 int ret;
355 int i; 354 int i;
@@ -407,23 +406,11 @@ static int ili9322_init(struct drm_panel *panel, struct ili9322 *ili)
407 * Polarity and inverted color order for RGB input. 406 * Polarity and inverted color order for RGB input.
408 * None of this applies in the BT.656 mode. 407 * None of this applies in the BT.656 mode.
409 */ 408 */
410 if (ili->conf->dclk_active_high) { 409 reg = 0;
410 if (ili->conf->dclk_active_high)
411 reg = ILI9322_POL_DCLK; 411 reg = ILI9322_POL_DCLK;
412 connector->display_info.bus_flags |= 412 if (ili->conf->de_active_high)
413 DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE;
414 } else {
415 reg = 0;
416 connector->display_info.bus_flags |=
417 DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE;
418 }
419 if (ili->conf->de_active_high) {
420 reg |= ILI9322_POL_DE; 413 reg |= ILI9322_POL_DE;
421 connector->display_info.bus_flags |=
422 DRM_BUS_FLAG_DE_HIGH;
423 } else {
424 connector->display_info.bus_flags |=
425 DRM_BUS_FLAG_DE_LOW;
426 }
427 if (ili->conf->hsync_active_high) 414 if (ili->conf->hsync_active_high)
428 reg |= ILI9322_POL_HSYNC; 415 reg |= ILI9322_POL_HSYNC;
429 if (ili->conf->vsync_active_high) 416 if (ili->conf->vsync_active_high)
@@ -659,9 +646,20 @@ static int ili9322_get_modes(struct drm_panel *panel)
659 struct drm_connector *connector = panel->connector; 646 struct drm_connector *connector = panel->connector;
660 struct ili9322 *ili = panel_to_ili9322(panel); 647 struct ili9322 *ili = panel_to_ili9322(panel);
661 struct drm_display_mode *mode; 648 struct drm_display_mode *mode;
649 struct drm_display_info *info;
650
651 info = &connector->display_info;
652 info->width_mm = ili->conf->width_mm;
653 info->height_mm = ili->conf->height_mm;
654 if (ili->conf->dclk_active_high)
655 info->bus_flags |= DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE;
656 else
657 info->bus_flags |= DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE;
662 658
663 connector->display_info.width_mm = ili->conf->width_mm; 659 if (ili->conf->de_active_high)
664 connector->display_info.height_mm = ili->conf->height_mm; 660 info->bus_flags |= DRM_BUS_FLAG_DE_HIGH;
661 else
662 info->bus_flags |= DRM_BUS_FLAG_DE_LOW;
665 663
666 switch (ili->input) { 664 switch (ili->input) {
667 case ILI9322_INPUT_SRGB_DUMMY_320X240: 665 case ILI9322_INPUT_SRGB_DUMMY_320X240:
diff --git a/drivers/gpu/drm/panel/panel-lg-lb035q02.c b/drivers/gpu/drm/panel/panel-lg-lb035q02.c
new file mode 100644
index 000000000000..fc82a525b071
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-lg-lb035q02.c
@@ -0,0 +1,237 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * LG.Philips LB035Q02 LCD Panel Driver
4 *
5 * Copyright (C) 2019 Texas Instruments Incorporated
6 *
7 * Based on the omapdrm-specific panel-lgphilips-lb035q02 driver
8 *
9 * Copyright (C) 2013 Texas Instruments Incorporated
10 * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
11 *
12 * Based on a driver by: Steve Sakoman <steve@sakoman.com>
13 */
14
15#include <linux/gpio/consumer.h>
16#include <linux/module.h>
17#include <linux/spi/spi.h>
18
19#include <drm/drm_connector.h>
20#include <drm/drm_modes.h>
21#include <drm/drm_panel.h>
22
23struct lb035q02_device {
24 struct drm_panel panel;
25
26 struct spi_device *spi;
27 struct gpio_desc *enable_gpio;
28};
29
30#define to_lb035q02_device(p) container_of(p, struct lb035q02_device, panel)
31
32static int lb035q02_write(struct lb035q02_device *lcd, u16 reg, u16 val)
33{
34 struct spi_message msg;
35 struct spi_transfer index_xfer = {
36 .len = 3,
37 .cs_change = 1,
38 };
39 struct spi_transfer value_xfer = {
40 .len = 3,
41 };
42 u8 buffer[16];
43
44 spi_message_init(&msg);
45
46 /* register index */
47 buffer[0] = 0x70;
48 buffer[1] = 0x00;
49 buffer[2] = reg & 0x7f;
50 index_xfer.tx_buf = buffer;
51 spi_message_add_tail(&index_xfer, &msg);
52
53 /* register value */
54 buffer[4] = 0x72;
55 buffer[5] = val >> 8;
56 buffer[6] = val;
57 value_xfer.tx_buf = buffer + 4;
58 spi_message_add_tail(&value_xfer, &msg);
59
60 return spi_sync(lcd->spi, &msg);
61}
62
63static int lb035q02_init(struct lb035q02_device *lcd)
64{
65 /* Init sequence from page 28 of the lb035q02 spec. */
66 static const struct {
67 u16 index;
68 u16 value;
69 } init_data[] = {
70 { 0x01, 0x6300 },
71 { 0x02, 0x0200 },
72 { 0x03, 0x0177 },
73 { 0x04, 0x04c7 },
74 { 0x05, 0xffc0 },
75 { 0x06, 0xe806 },
76 { 0x0a, 0x4008 },
77 { 0x0b, 0x0000 },
78 { 0x0d, 0x0030 },
79 { 0x0e, 0x2800 },
80 { 0x0f, 0x0000 },
81 { 0x16, 0x9f80 },
82 { 0x17, 0x0a0f },
83 { 0x1e, 0x00c1 },
84 { 0x30, 0x0300 },
85 { 0x31, 0x0007 },
86 { 0x32, 0x0000 },
87 { 0x33, 0x0000 },
88 { 0x34, 0x0707 },
89 { 0x35, 0x0004 },
90 { 0x36, 0x0302 },
91 { 0x37, 0x0202 },
92 { 0x3a, 0x0a0d },
93 { 0x3b, 0x0806 },
94 };
95
96 unsigned int i;
97 int ret;
98
99 for (i = 0; i < ARRAY_SIZE(init_data); ++i) {
100 ret = lb035q02_write(lcd, init_data[i].index,
101 init_data[i].value);
102 if (ret < 0)
103 return ret;
104 }
105
106 return 0;
107}
108
109static int lb035q02_disable(struct drm_panel *panel)
110{
111 struct lb035q02_device *lcd = to_lb035q02_device(panel);
112
113 gpiod_set_value_cansleep(lcd->enable_gpio, 0);
114
115 return 0;
116}
117
118static int lb035q02_enable(struct drm_panel *panel)
119{
120 struct lb035q02_device *lcd = to_lb035q02_device(panel);
121
122 gpiod_set_value_cansleep(lcd->enable_gpio, 1);
123
124 return 0;
125}
126
127static const struct drm_display_mode lb035q02_mode = {
128 .clock = 6500,
129 .hdisplay = 320,
130 .hsync_start = 320 + 20,
131 .hsync_end = 320 + 20 + 2,
132 .htotal = 320 + 20 + 2 + 68,
133 .vdisplay = 240,
134 .vsync_start = 240 + 4,
135 .vsync_end = 240 + 4 + 2,
136 .vtotal = 240 + 4 + 2 + 18,
137 .vrefresh = 60,
138 .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
139 .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
140 .width_mm = 70,
141 .height_mm = 53,
142};
143
144static int lb035q02_get_modes(struct drm_panel *panel)
145{
146 struct drm_connector *connector = panel->connector;
147 struct drm_display_mode *mode;
148
149 mode = drm_mode_duplicate(panel->drm, &lb035q02_mode);
150 if (!mode)
151 return -ENOMEM;
152
153 drm_mode_set_name(mode);
154 drm_mode_probed_add(connector, mode);
155
156 connector->display_info.width_mm = lb035q02_mode.width_mm;
157 connector->display_info.height_mm = lb035q02_mode.height_mm;
158 /*
159 * FIXME: According to the datasheet pixel data is sampled on the
160 * rising edge of the clock, but the code running on the Gumstix Overo
161 * Palo35 indicates sampling on the negative edge. This should be
162 * tested on a real device.
163 */
164 connector->display_info.bus_flags = DRM_BUS_FLAG_DE_HIGH
165 | DRM_BUS_FLAG_SYNC_SAMPLE_POSEDGE
166 | DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE;
167
168 return 1;
169}
170
171static const struct drm_panel_funcs lb035q02_funcs = {
172 .disable = lb035q02_disable,
173 .enable = lb035q02_enable,
174 .get_modes = lb035q02_get_modes,
175};
176
177static int lb035q02_probe(struct spi_device *spi)
178{
179 struct lb035q02_device *lcd;
180 int ret;
181
182 lcd = devm_kzalloc(&spi->dev, sizeof(*lcd), GFP_KERNEL);
183 if (!lcd)
184 return -ENOMEM;
185
186 spi_set_drvdata(spi, lcd);
187 lcd->spi = spi;
188
189 lcd->enable_gpio = devm_gpiod_get(&spi->dev, "enable", GPIOD_OUT_LOW);
190 if (IS_ERR(lcd->enable_gpio)) {
191 dev_err(&spi->dev, "failed to parse enable gpio\n");
192 return PTR_ERR(lcd->enable_gpio);
193 }
194
195 ret = lb035q02_init(lcd);
196 if (ret < 0)
197 return ret;
198
199 drm_panel_init(&lcd->panel);
200 lcd->panel.dev = &lcd->spi->dev;
201 lcd->panel.funcs = &lb035q02_funcs;
202
203 return drm_panel_add(&lcd->panel);
204}
205
206static int lb035q02_remove(struct spi_device *spi)
207{
208 struct lb035q02_device *lcd = spi_get_drvdata(spi);
209
210 drm_panel_remove(&lcd->panel);
211 drm_panel_disable(&lcd->panel);
212
213 return 0;
214}
215
216static const struct of_device_id lb035q02_of_match[] = {
217 { .compatible = "lgphilips,lb035q02", },
218 { /* sentinel */ },
219};
220
221MODULE_DEVICE_TABLE(of, lb035q02_of_match);
222
223static struct spi_driver lb035q02_driver = {
224 .probe = lb035q02_probe,
225 .remove = lb035q02_remove,
226 .driver = {
227 .name = "panel-lg-lb035q02",
228 .of_match_table = lb035q02_of_match,
229 },
230};
231
232module_spi_driver(lb035q02_driver);
233
234MODULE_ALIAS("spi:lgphilips,lb035q02");
235MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ti.com>");
236MODULE_DESCRIPTION("LG.Philips LB035Q02 LCD Panel driver");
237MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panel/panel-nec-nl8048hl11.c b/drivers/gpu/drm/panel/panel-nec-nl8048hl11.c
new file mode 100644
index 000000000000..299b217c83e1
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-nec-nl8048hl11.c
@@ -0,0 +1,248 @@
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * NEC NL8048HL11 Panel Driver
4 *
5 * Copyright (C) 2019 Texas Instruments Incorporated
6 *
7 * Based on the omapdrm-specific panel-nec-nl8048hl11 driver
8 *
9 * Copyright (C) 2010 Texas Instruments Incorporated
10 * Author: Erik Gilling <konkers@android.com>
11 */
12
13#include <linux/delay.h>
14#include <linux/gpio/consumer.h>
15#include <linux/module.h>
16#include <linux/pm.h>
17#include <linux/spi/spi.h>
18
19#include <drm/drm_connector.h>
20#include <drm/drm_modes.h>
21#include <drm/drm_panel.h>
22
23struct nl8048_panel {
24 struct drm_panel panel;
25
26 struct spi_device *spi;
27 struct gpio_desc *reset_gpio;
28};
29
30#define to_nl8048_device(p) container_of(p, struct nl8048_panel, panel)
31
32static int nl8048_write(struct nl8048_panel *lcd, unsigned char addr,
33 unsigned char value)
34{
35 u8 data[4] = { value, 0x01, addr, 0x00 };
36 int ret;
37
38 ret = spi_write(lcd->spi, data, sizeof(data));
39 if (ret)
40 dev_err(&lcd->spi->dev, "SPI write to %u failed: %d\n",
41 addr, ret);
42
43 return ret;
44}
45
46static int nl8048_init(struct nl8048_panel *lcd)
47{
48 static const struct {
49 unsigned char addr;
50 unsigned char data;
51 } nl8048_init_seq[] = {
52 { 3, 0x01 }, { 0, 0x00 }, { 1, 0x01 }, { 4, 0x00 },
53 { 5, 0x14 }, { 6, 0x24 }, { 16, 0xd7 }, { 17, 0x00 },
54 { 18, 0x00 }, { 19, 0x55 }, { 20, 0x01 }, { 21, 0x70 },
55 { 22, 0x1e }, { 23, 0x25 }, { 24, 0x25 }, { 25, 0x02 },
56 { 26, 0x02 }, { 27, 0xa0 }, { 32, 0x2f }, { 33, 0x0f },
57 { 34, 0x0f }, { 35, 0x0f }, { 36, 0x0f }, { 37, 0x0f },
58 { 38, 0x0f }, { 39, 0x00 }, { 40, 0x02 }, { 41, 0x02 },
59 { 42, 0x02 }, { 43, 0x0f }, { 44, 0x0f }, { 45, 0x0f },
60 { 46, 0x0f }, { 47, 0x0f }, { 48, 0x0f }, { 49, 0x0f },
61 { 50, 0x00 }, { 51, 0x02 }, { 52, 0x02 }, { 53, 0x02 },
62 { 80, 0x0c }, { 83, 0x42 }, { 84, 0x42 }, { 85, 0x41 },
63 { 86, 0x14 }, { 89, 0x88 }, { 90, 0x01 }, { 91, 0x00 },
64 { 92, 0x02 }, { 93, 0x0c }, { 94, 0x1c }, { 95, 0x27 },
65 { 98, 0x49 }, { 99, 0x27 }, { 102, 0x76 }, { 103, 0x27 },
66 { 112, 0x01 }, { 113, 0x0e }, { 114, 0x02 }, { 115, 0x0c },
67 { 118, 0x0c }, { 121, 0x30 }, { 130, 0x00 }, { 131, 0x00 },
68 { 132, 0xfc }, { 134, 0x00 }, { 136, 0x00 }, { 138, 0x00 },
69 { 139, 0x00 }, { 140, 0x00 }, { 141, 0xfc }, { 143, 0x00 },
70 { 145, 0x00 }, { 147, 0x00 }, { 148, 0x00 }, { 149, 0x00 },
71 { 150, 0xfc }, { 152, 0x00 }, { 154, 0x00 }, { 156, 0x00 },
72 { 157, 0x00 },
73 };
74
75 unsigned int i;
76 int ret;
77
78 for (i = 0; i < ARRAY_SIZE(nl8048_init_seq); ++i) {
79 ret = nl8048_write(lcd, nl8048_init_seq[i].addr,
80 nl8048_init_seq[i].data);
81 if (ret < 0)
82 return ret;
83 }
84
85 udelay(20);
86
87 return nl8048_write(lcd, 2, 0x00);
88}
89
90static int nl8048_disable(struct drm_panel *panel)
91{
92 struct nl8048_panel *lcd = to_nl8048_device(panel);
93
94 gpiod_set_value_cansleep(lcd->reset_gpio, 0);
95
96 return 0;
97}
98
99static int nl8048_enable(struct drm_panel *panel)
100{
101 struct nl8048_panel *lcd = to_nl8048_device(panel);
102
103 gpiod_set_value_cansleep(lcd->reset_gpio, 1);
104
105 return 0;
106}
107
108static const struct drm_display_mode nl8048_mode = {
109 /* NEC PIX Clock Ratings MIN:21.8MHz TYP:23.8MHz MAX:25.7MHz */
110 .clock = 23800,
111 .hdisplay = 800,
112 .hsync_start = 800 + 6,
113 .hsync_end = 800 + 6 + 1,
114 .htotal = 800 + 6 + 1 + 4,
115 .vdisplay = 480,
116 .vsync_start = 480 + 3,
117 .vsync_end = 480 + 3 + 1,
118 .vtotal = 480 + 3 + 1 + 4,
119 .vrefresh = 60,
120 .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
121 .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
122 .width_mm = 89,
123 .height_mm = 53,
124};
125
126static int nl8048_get_modes(struct drm_panel *panel)
127{
128 struct drm_connector *connector = panel->connector;
129 struct drm_display_mode *mode;
130
131 mode = drm_mode_duplicate(panel->drm, &nl8048_mode);
132 if (!mode)
133 return -ENOMEM;
134
135 drm_mode_set_name(mode);
136 drm_mode_probed_add(connector, mode);
137
138 connector->display_info.width_mm = nl8048_mode.width_mm;
139 connector->display_info.height_mm = nl8048_mode.height_mm;
140 connector->display_info.bus_flags = DRM_BUS_FLAG_DE_HIGH
141 | DRM_BUS_FLAG_SYNC_SAMPLE_NEGEDGE
142 | DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE;
143
144 return 1;
145}
146
147static const struct drm_panel_funcs nl8048_funcs = {
148 .disable = nl8048_disable,
149 .enable = nl8048_enable,
150 .get_modes = nl8048_get_modes,
151};
152
153static int __maybe_unused nl8048_suspend(struct device *dev)
154{
155 struct nl8048_panel *lcd = dev_get_drvdata(dev);
156
157 nl8048_write(lcd, 2, 0x01);
158 msleep(40);
159
160 return 0;
161}
162
163static int __maybe_unused nl8048_resume(struct device *dev)
164{
165 struct nl8048_panel *lcd = dev_get_drvdata(dev);
166
167 /* Reinitialize the panel. */
168 spi_setup(lcd->spi);
169 nl8048_write(lcd, 2, 0x00);
170 nl8048_init(lcd);
171
172 return 0;
173}
174
175static SIMPLE_DEV_PM_OPS(nl8048_pm_ops, nl8048_suspend, nl8048_resume);
176
177static int nl8048_probe(struct spi_device *spi)
178{
179 struct nl8048_panel *lcd;
180 int ret;
181
182 lcd = devm_kzalloc(&spi->dev, sizeof(*lcd), GFP_KERNEL);
183 if (!lcd)
184 return -ENOMEM;
185
186 spi_set_drvdata(spi, lcd);
187 lcd->spi = spi;
188
189 lcd->reset_gpio = devm_gpiod_get(&spi->dev, "reset", GPIOD_OUT_LOW);
190 if (IS_ERR(lcd->reset_gpio)) {
191 dev_err(&spi->dev, "failed to parse reset gpio\n");
192 return PTR_ERR(lcd->reset_gpio);
193 }
194
195 spi->mode = SPI_MODE_0;
196 spi->bits_per_word = 32;
197
198 ret = spi_setup(spi);
199 if (ret < 0) {
200 dev_err(&spi->dev, "failed to setup SPI: %d\n", ret);
201 return ret;
202 }
203
204 ret = nl8048_init(lcd);
205 if (ret < 0)
206 return ret;
207
208 drm_panel_init(&lcd->panel);
209 lcd->panel.dev = &lcd->spi->dev;
210 lcd->panel.funcs = &nl8048_funcs;
211
212 return drm_panel_add(&lcd->panel);
213}
214
215static int nl8048_remove(struct spi_device *spi)
216{
217 struct nl8048_panel *lcd = spi_get_drvdata(spi);
218
219 drm_panel_remove(&lcd->panel);
220 drm_panel_disable(&lcd->panel);
221 drm_panel_unprepare(&lcd->panel);
222
223 return 0;
224}
225
226static const struct of_device_id nl8048_of_match[] = {
227 { .compatible = "nec,nl8048hl11", },
228 { /* sentinel */ },
229};
230
231MODULE_DEVICE_TABLE(of, nl8048_of_match);
232
233static struct spi_driver nl8048_driver = {
234 .probe = nl8048_probe,
235 .remove = nl8048_remove,
236 .driver = {
237 .name = "panel-nec-nl8048hl11",
238 .pm = &nl8048_pm_ops,
239 .of_match_table = nl8048_of_match,
240 },
241};
242
243module_spi_driver(nl8048_driver);
244
245MODULE_ALIAS("spi:nec,nl8048hl11");
246MODULE_AUTHOR("Erik Gilling <konkers@android.com>");
247MODULE_DESCRIPTION("NEC-NL8048HL11 Driver");
248MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panel/panel-sharp-ls037v7dw01.c b/drivers/gpu/drm/panel/panel-sharp-ls037v7dw01.c
new file mode 100644
index 000000000000..46cd9a250129
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-sharp-ls037v7dw01.c
@@ -0,0 +1,226 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Sharp LS037V7DW01 LCD Panel Driver
4 *
5 * Copyright (C) 2019 Texas Instruments Incorporated
6 *
7 * Based on the omapdrm-specific panel-sharp-ls037v7dw01 driver
8 *
9 * Copyright (C) 2013 Texas Instruments Incorporated
10 * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
11 */
12
13#include <linux/delay.h>
14#include <linux/gpio/consumer.h>
15#include <linux/module.h>
16#include <linux/of.h>
17#include <linux/platform_device.h>
18#include <linux/regulator/consumer.h>
19
20#include <drm/drm_connector.h>
21#include <drm/drm_modes.h>
22#include <drm/drm_panel.h>
23
24struct ls037v7dw01_panel {
25 struct drm_panel panel;
26 struct platform_device *pdev;
27
28 struct regulator *vdd;
29 struct gpio_desc *resb_gpio; /* low = reset active min 20 us */
30 struct gpio_desc *ini_gpio; /* high = power on */
31 struct gpio_desc *mo_gpio; /* low = 480x640, high = 240x320 */
32 struct gpio_desc *lr_gpio; /* high = conventional horizontal scanning */
33 struct gpio_desc *ud_gpio; /* high = conventional vertical scanning */
34};
35
36#define to_ls037v7dw01_device(p) \
37 container_of(p, struct ls037v7dw01_panel, panel)
38
39static int ls037v7dw01_disable(struct drm_panel *panel)
40{
41 struct ls037v7dw01_panel *lcd = to_ls037v7dw01_device(panel);
42
43 gpiod_set_value_cansleep(lcd->ini_gpio, 0);
44 gpiod_set_value_cansleep(lcd->resb_gpio, 0);
45
46 /* Wait at least 5 vsyncs after disabling the LCD. */
47 msleep(100);
48
49 return 0;
50}
51
52static int ls037v7dw01_unprepare(struct drm_panel *panel)
53{
54 struct ls037v7dw01_panel *lcd = to_ls037v7dw01_device(panel);
55
56 regulator_disable(lcd->vdd);
57 return 0;
58}
59
60static int ls037v7dw01_prepare(struct drm_panel *panel)
61{
62 struct ls037v7dw01_panel *lcd = to_ls037v7dw01_device(panel);
63 int ret;
64
65 ret = regulator_enable(lcd->vdd);
66 if (ret < 0)
67 dev_err(&lcd->pdev->dev, "%s: failed to enable regulator\n",
68 __func__);
69
70 return ret;
71}
72
73static int ls037v7dw01_enable(struct drm_panel *panel)
74{
75 struct ls037v7dw01_panel *lcd = to_ls037v7dw01_device(panel);
76
77 /* Wait couple of vsyncs before enabling the LCD. */
78 msleep(50);
79
80 gpiod_set_value_cansleep(lcd->resb_gpio, 1);
81 gpiod_set_value_cansleep(lcd->ini_gpio, 1);
82
83 return 0;
84}
85
86static const struct drm_display_mode ls037v7dw01_mode = {
87 .clock = 19200,
88 .hdisplay = 480,
89 .hsync_start = 480 + 1,
90 .hsync_end = 480 + 1 + 2,
91 .htotal = 480 + 1 + 2 + 28,
92 .vdisplay = 640,
93 .vsync_start = 640 + 1,
94 .vsync_end = 640 + 1 + 1,
95 .vtotal = 640 + 1 + 1 + 1,
96 .vrefresh = 58,
97 .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
98 .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
99 .width_mm = 56,
100 .height_mm = 75,
101};
102
103static int ls037v7dw01_get_modes(struct drm_panel *panel)
104{
105 struct drm_connector *connector = panel->connector;
106 struct drm_display_mode *mode;
107
108 mode = drm_mode_duplicate(panel->drm, &ls037v7dw01_mode);
109 if (!mode)
110 return -ENOMEM;
111
112 drm_mode_set_name(mode);
113 drm_mode_probed_add(connector, mode);
114
115 connector->display_info.width_mm = ls037v7dw01_mode.width_mm;
116 connector->display_info.height_mm = ls037v7dw01_mode.height_mm;
117 /*
118 * FIXME: According to the datasheet pixel data is sampled on the
119 * rising edge of the clock, but the code running on the SDP3430
120 * indicates sampling on the negative edge. This should be tested on a
121 * real device.
122 */
123 connector->display_info.bus_flags = DRM_BUS_FLAG_DE_HIGH
124 | DRM_BUS_FLAG_SYNC_SAMPLE_POSEDGE
125 | DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE;
126
127 return 1;
128}
129
130static const struct drm_panel_funcs ls037v7dw01_funcs = {
131 .disable = ls037v7dw01_disable,
132 .unprepare = ls037v7dw01_unprepare,
133 .prepare = ls037v7dw01_prepare,
134 .enable = ls037v7dw01_enable,
135 .get_modes = ls037v7dw01_get_modes,
136};
137
138static int ls037v7dw01_probe(struct platform_device *pdev)
139{
140 struct ls037v7dw01_panel *lcd;
141
142 lcd = devm_kzalloc(&pdev->dev, sizeof(*lcd), GFP_KERNEL);
143 if (!lcd)
144 return -ENOMEM;
145
146 platform_set_drvdata(pdev, lcd);
147 lcd->pdev = pdev;
148
149 lcd->vdd = devm_regulator_get(&pdev->dev, "envdd");
150 if (IS_ERR(lcd->vdd)) {
151 dev_err(&pdev->dev, "failed to get regulator\n");
152 return PTR_ERR(lcd->vdd);
153 }
154
155 lcd->ini_gpio = devm_gpiod_get(&pdev->dev, "enable", GPIOD_OUT_LOW);
156 if (IS_ERR(lcd->ini_gpio)) {
157 dev_err(&pdev->dev, "failed to get enable gpio\n");
158 return PTR_ERR(lcd->ini_gpio);
159 }
160
161 lcd->resb_gpio = devm_gpiod_get(&pdev->dev, "reset", GPIOD_OUT_LOW);
162 if (IS_ERR(lcd->resb_gpio)) {
163 dev_err(&pdev->dev, "failed to get reset gpio\n");
164 return PTR_ERR(lcd->resb_gpio);
165 }
166
167 lcd->mo_gpio = devm_gpiod_get_index(&pdev->dev, "mode", 0,
168 GPIOD_OUT_LOW);
169 if (IS_ERR(lcd->mo_gpio)) {
170 dev_err(&pdev->dev, "failed to get mode[0] gpio\n");
171 return PTR_ERR(lcd->mo_gpio);
172 }
173
174 lcd->lr_gpio = devm_gpiod_get_index(&pdev->dev, "mode", 1,
175 GPIOD_OUT_LOW);
176 if (IS_ERR(lcd->lr_gpio)) {
177 dev_err(&pdev->dev, "failed to get mode[1] gpio\n");
178 return PTR_ERR(lcd->lr_gpio);
179 }
180
181 lcd->ud_gpio = devm_gpiod_get_index(&pdev->dev, "mode", 2,
182 GPIOD_OUT_LOW);
183 if (IS_ERR(lcd->ud_gpio)) {
184 dev_err(&pdev->dev, "failed to get mode[2] gpio\n");
185 return PTR_ERR(lcd->ud_gpio);
186 }
187
188 drm_panel_init(&lcd->panel);
189 lcd->panel.dev = &pdev->dev;
190 lcd->panel.funcs = &ls037v7dw01_funcs;
191
192 return drm_panel_add(&lcd->panel);
193}
194
195static int ls037v7dw01_remove(struct platform_device *pdev)
196{
197 struct ls037v7dw01_panel *lcd = platform_get_drvdata(pdev);
198
199 drm_panel_remove(&lcd->panel);
200 drm_panel_disable(&lcd->panel);
201 drm_panel_unprepare(&lcd->panel);
202
203 return 0;
204}
205
206static const struct of_device_id ls037v7dw01_of_match[] = {
207 { .compatible = "sharp,ls037v7dw01", },
208 { /* sentinel */ },
209};
210
211MODULE_DEVICE_TABLE(of, ls037v7dw01_of_match);
212
213static struct platform_driver ls037v7dw01_driver = {
214 .probe = ls037v7dw01_probe,
215 .remove = ls037v7dw01_remove,
216 .driver = {
217 .name = "panel-sharp-ls037v7dw01",
218 .of_match_table = ls037v7dw01_of_match,
219 },
220};
221
222module_platform_driver(ls037v7dw01_driver);
223
224MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ti.com>");
225MODULE_DESCRIPTION("Sharp LS037V7DW01 Panel Driver");
226MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index bff7578f84dd..28fa6ba7b767 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -2833,6 +2833,64 @@ static const struct panel_desc tianma_tm070rvhg71 = {
2833 .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG, 2833 .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
2834}; 2834};
2835 2835
2836static const struct drm_display_mode ti_nspire_cx_lcd_mode[] = {
2837 {
2838 .clock = 10000,
2839 .hdisplay = 320,
2840 .hsync_start = 320 + 50,
2841 .hsync_end = 320 + 50 + 6,
2842 .htotal = 320 + 50 + 6 + 38,
2843 .vdisplay = 240,
2844 .vsync_start = 240 + 3,
2845 .vsync_end = 240 + 3 + 1,
2846 .vtotal = 240 + 3 + 1 + 17,
2847 .vrefresh = 60,
2848 .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
2849 },
2850};
2851
2852static const struct panel_desc ti_nspire_cx_lcd_panel = {
2853 .modes = ti_nspire_cx_lcd_mode,
2854 .num_modes = 1,
2855 .bpc = 8,
2856 .size = {
2857 .width = 65,
2858 .height = 49,
2859 },
2860 .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
2861 .bus_flags = DRM_BUS_FLAG_PIXDATA_NEGEDGE,
2862};
2863
2864static const struct drm_display_mode ti_nspire_classic_lcd_mode[] = {
2865 {
2866 .clock = 10000,
2867 .hdisplay = 320,
2868 .hsync_start = 320 + 6,
2869 .hsync_end = 320 + 6 + 6,
2870 .htotal = 320 + 6 + 6 + 6,
2871 .vdisplay = 240,
2872 .vsync_start = 240 + 0,
2873 .vsync_end = 240 + 0 + 1,
2874 .vtotal = 240 + 0 + 1 + 0,
2875 .vrefresh = 60,
2876 .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC,
2877 },
2878};
2879
2880static const struct panel_desc ti_nspire_classic_lcd_panel = {
2881 .modes = ti_nspire_classic_lcd_mode,
2882 .num_modes = 1,
2883 /* The grayscale panel has 8 bit for the color .. Y (black) */
2884 .bpc = 8,
2885 .size = {
2886 .width = 71,
2887 .height = 53,
2888 },
2889 /* This is the grayscale bus format */
2890 .bus_format = MEDIA_BUS_FMT_Y8_1X8,
2891 .bus_flags = DRM_BUS_FLAG_PIXDATA_POSEDGE,
2892};
2893
2836static const struct drm_display_mode toshiba_lt089ac29000_mode = { 2894static const struct drm_display_mode toshiba_lt089ac29000_mode = {
2837 .clock = 79500, 2895 .clock = 79500,
2838 .hdisplay = 1280, 2896 .hdisplay = 1280,
@@ -3303,6 +3361,12 @@ static const struct of_device_id platform_of_match[] = {
3303 .compatible = "tianma,tm070rvhg71", 3361 .compatible = "tianma,tm070rvhg71",
3304 .data = &tianma_tm070rvhg71, 3362 .data = &tianma_tm070rvhg71,
3305 }, { 3363 }, {
3364 .compatible = "ti,nspire-cx-lcd-panel",
3365 .data = &ti_nspire_cx_lcd_panel,
3366 }, {
3367 .compatible = "ti,nspire-classic-lcd-panel",
3368 .data = &ti_nspire_classic_lcd_panel,
3369 }, {
3306 .compatible = "toshiba,lt089ac29000", 3370 .compatible = "toshiba,lt089ac29000",
3307 .data = &toshiba_lt089ac29000, 3371 .data = &toshiba_lt089ac29000,
3308 }, { 3372 }, {
diff --git a/drivers/gpu/drm/panel/panel-sony-acx565akm.c b/drivers/gpu/drm/panel/panel-sony-acx565akm.c
new file mode 100644
index 000000000000..305259b58767
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-sony-acx565akm.c
@@ -0,0 +1,701 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Sony ACX565AKM LCD Panel driver
4 *
5 * Copyright (C) 2019 Texas Instruments Incorporated
6 *
7 * Based on the omapdrm-specific panel-sony-acx565akm driver
8 *
9 * Copyright (C) 2010 Nokia Corporation
10 * Author: Imre Deak <imre.deak@nokia.com>
11 */
12
13/*
14 * TODO (to be addressed with hardware access to test the changes):
15 *
16 * - Update backlight support to use backlight_update_status() etc.
17 * - Use prepare/unprepare for the basic power on/off of the backligt
18 */
19
20#include <linux/backlight.h>
21#include <linux/delay.h>
22#include <linux/gpio/consumer.h>
23#include <linux/jiffies.h>
24#include <linux/module.h>
25#include <linux/mutex.h>
26#include <linux/sched.h>
27#include <linux/spi/spi.h>
28#include <video/mipi_display.h>
29
30#include <drm/drm_connector.h>
31#include <drm/drm_modes.h>
32#include <drm/drm_panel.h>
33
34#define CTRL_DISP_BRIGHTNESS_CTRL_ON BIT(5)
35#define CTRL_DISP_AMBIENT_LIGHT_CTRL_ON BIT(4)
36#define CTRL_DISP_BACKLIGHT_ON BIT(2)
37#define CTRL_DISP_AUTO_BRIGHTNESS_ON BIT(1)
38
39#define MIPID_CMD_WRITE_CABC 0x55
40#define MIPID_CMD_READ_CABC 0x56
41
42#define MIPID_VER_LPH8923 3
43#define MIPID_VER_LS041Y3 4
44#define MIPID_VER_L4F00311 8
45#define MIPID_VER_ACX565AKM 9
46
47struct acx565akm_panel {
48 struct drm_panel panel;
49
50 struct spi_device *spi;
51 struct gpio_desc *reset_gpio;
52 struct backlight_device *backlight;
53
54 struct mutex mutex;
55
56 const char *name;
57 u8 display_id[3];
58 int model;
59 int revision;
60 bool has_bc;
61 bool has_cabc;
62
63 bool enabled;
64 unsigned int cabc_mode;
65 /*
66 * Next value of jiffies when we can issue the next sleep in/out
67 * command.
68 */
69 unsigned long hw_guard_end;
70 unsigned long hw_guard_wait; /* max guard time in jiffies */
71};
72
73#define to_acx565akm_device(p) container_of(p, struct acx565akm_panel, panel)
74
75static void acx565akm_transfer(struct acx565akm_panel *lcd, int cmd,
76 const u8 *wbuf, int wlen, u8 *rbuf, int rlen)
77{
78 struct spi_message m;
79 struct spi_transfer *x, xfer[5];
80 int ret;
81
82 spi_message_init(&m);
83
84 memset(xfer, 0, sizeof(xfer));
85 x = &xfer[0];
86
87 cmd &= 0xff;
88 x->tx_buf = &cmd;
89 x->bits_per_word = 9;
90 x->len = 2;
91
92 if (rlen > 1 && wlen == 0) {
93 /*
94 * Between the command and the response data there is a
95 * dummy clock cycle. Add an extra bit after the command
96 * word to account for this.
97 */
98 x->bits_per_word = 10;
99 cmd <<= 1;
100 }
101 spi_message_add_tail(x, &m);
102
103 if (wlen) {
104 x++;
105 x->tx_buf = wbuf;
106 x->len = wlen;
107 x->bits_per_word = 9;
108 spi_message_add_tail(x, &m);
109 }
110
111 if (rlen) {
112 x++;
113 x->rx_buf = rbuf;
114 x->len = rlen;
115 spi_message_add_tail(x, &m);
116 }
117
118 ret = spi_sync(lcd->spi, &m);
119 if (ret < 0)
120 dev_dbg(&lcd->spi->dev, "spi_sync %d\n", ret);
121}
122
123static inline void acx565akm_cmd(struct acx565akm_panel *lcd, int cmd)
124{
125 acx565akm_transfer(lcd, cmd, NULL, 0, NULL, 0);
126}
127
128static inline void acx565akm_write(struct acx565akm_panel *lcd,
129 int reg, const u8 *buf, int len)
130{
131 acx565akm_transfer(lcd, reg, buf, len, NULL, 0);
132}
133
134static inline void acx565akm_read(struct acx565akm_panel *lcd,
135 int reg, u8 *buf, int len)
136{
137 acx565akm_transfer(lcd, reg, NULL, 0, buf, len);
138}
139
140/* -----------------------------------------------------------------------------
141 * Auto Brightness Control Via sysfs
142 */
143
144static unsigned int acx565akm_get_cabc_mode(struct acx565akm_panel *lcd)
145{
146 return lcd->cabc_mode;
147}
148
149static void acx565akm_set_cabc_mode(struct acx565akm_panel *lcd,
150 unsigned int mode)
151{
152 u16 cabc_ctrl;
153
154 lcd->cabc_mode = mode;
155 if (!lcd->enabled)
156 return;
157 cabc_ctrl = 0;
158 acx565akm_read(lcd, MIPID_CMD_READ_CABC, (u8 *)&cabc_ctrl, 1);
159 cabc_ctrl &= ~3;
160 cabc_ctrl |= (1 << 8) | (mode & 3);
161 acx565akm_write(lcd, MIPID_CMD_WRITE_CABC, (u8 *)&cabc_ctrl, 2);
162}
163
164static unsigned int acx565akm_get_hw_cabc_mode(struct acx565akm_panel *lcd)
165{
166 u8 cabc_ctrl;
167
168 acx565akm_read(lcd, MIPID_CMD_READ_CABC, &cabc_ctrl, 1);
169 return cabc_ctrl & 3;
170}
171
172static const char * const acx565akm_cabc_modes[] = {
173 "off", /* always used when CABC is not supported */
174 "ui",
175 "still-image",
176 "moving-image",
177};
178
179static ssize_t cabc_mode_show(struct device *dev,
180 struct device_attribute *attr,
181 char *buf)
182{
183 struct acx565akm_panel *lcd = dev_get_drvdata(dev);
184 const char *mode_str;
185 int mode;
186
187 if (!lcd->has_cabc)
188 mode = 0;
189 else
190 mode = acx565akm_get_cabc_mode(lcd);
191
192 mode_str = "unknown";
193 if (mode >= 0 && mode < ARRAY_SIZE(acx565akm_cabc_modes))
194 mode_str = acx565akm_cabc_modes[mode];
195
196 return sprintf(buf, "%s\n", mode_str);
197}
198
199static ssize_t cabc_mode_store(struct device *dev,
200 struct device_attribute *attr,
201 const char *buf, size_t count)
202{
203 struct acx565akm_panel *lcd = dev_get_drvdata(dev);
204 unsigned int i;
205
206 for (i = 0; i < ARRAY_SIZE(acx565akm_cabc_modes); i++) {
207 const char *mode_str = acx565akm_cabc_modes[i];
208 int cmp_len = strlen(mode_str);
209
210 if (count > 0 && buf[count - 1] == '\n')
211 count--;
212 if (count != cmp_len)
213 continue;
214
215 if (strncmp(buf, mode_str, cmp_len) == 0)
216 break;
217 }
218
219 if (i == ARRAY_SIZE(acx565akm_cabc_modes))
220 return -EINVAL;
221
222 if (!lcd->has_cabc && i != 0)
223 return -EINVAL;
224
225 mutex_lock(&lcd->mutex);
226 acx565akm_set_cabc_mode(lcd, i);
227 mutex_unlock(&lcd->mutex);
228
229 return count;
230}
231
232static ssize_t cabc_available_modes_show(struct device *dev,
233 struct device_attribute *attr,
234 char *buf)
235{
236 struct acx565akm_panel *lcd = dev_get_drvdata(dev);
237 unsigned int i;
238 size_t len = 0;
239
240 if (!lcd->has_cabc)
241 return sprintf(buf, "%s\n", acx565akm_cabc_modes[0]);
242
243 for (i = 0; i < ARRAY_SIZE(acx565akm_cabc_modes); i++)
244 len += sprintf(&buf[len], "%s%s", i ? " " : "",
245 acx565akm_cabc_modes[i]);
246
247 buf[len++] = '\n';
248
249 return len;
250}
251
252static DEVICE_ATTR_RW(cabc_mode);
253static DEVICE_ATTR_RO(cabc_available_modes);
254
255static struct attribute *acx565akm_cabc_attrs[] = {
256 &dev_attr_cabc_mode.attr,
257 &dev_attr_cabc_available_modes.attr,
258 NULL,
259};
260
261static const struct attribute_group acx565akm_cabc_attr_group = {
262 .attrs = acx565akm_cabc_attrs,
263};
264
265/* -----------------------------------------------------------------------------
266 * Backlight Device
267 */
268
269static int acx565akm_get_actual_brightness(struct acx565akm_panel *lcd)
270{
271 u8 bv;
272
273 acx565akm_read(lcd, MIPI_DCS_GET_DISPLAY_BRIGHTNESS, &bv, 1);
274
275 return bv;
276}
277
278static void acx565akm_set_brightness(struct acx565akm_panel *lcd, int level)
279{
280 u16 ctrl;
281 int bv;
282
283 bv = level | (1 << 8);
284 acx565akm_write(lcd, MIPI_DCS_SET_DISPLAY_BRIGHTNESS, (u8 *)&bv, 2);
285
286 acx565akm_read(lcd, MIPI_DCS_GET_CONTROL_DISPLAY, (u8 *)&ctrl, 1);
287 if (level)
288 ctrl |= CTRL_DISP_BRIGHTNESS_CTRL_ON |
289 CTRL_DISP_BACKLIGHT_ON;
290 else
291 ctrl &= ~(CTRL_DISP_BRIGHTNESS_CTRL_ON |
292 CTRL_DISP_BACKLIGHT_ON);
293
294 ctrl |= 1 << 8;
295 acx565akm_write(lcd, MIPI_DCS_WRITE_CONTROL_DISPLAY, (u8 *)&ctrl, 2);
296}
297
298static int acx565akm_bl_update_status_locked(struct backlight_device *dev)
299{
300 struct acx565akm_panel *lcd = dev_get_drvdata(&dev->dev);
301 int level;
302
303 if (dev->props.fb_blank == FB_BLANK_UNBLANK &&
304 dev->props.power == FB_BLANK_UNBLANK)
305 level = dev->props.brightness;
306 else
307 level = 0;
308
309 acx565akm_set_brightness(lcd, level);
310
311 return 0;
312}
313
314static int acx565akm_bl_update_status(struct backlight_device *dev)
315{
316 struct acx565akm_panel *lcd = dev_get_drvdata(&dev->dev);
317 int ret;
318
319 mutex_lock(&lcd->mutex);
320 ret = acx565akm_bl_update_status_locked(dev);
321 mutex_unlock(&lcd->mutex);
322
323 return ret;
324}
325
326static int acx565akm_bl_get_intensity(struct backlight_device *dev)
327{
328 struct acx565akm_panel *lcd = dev_get_drvdata(&dev->dev);
329 unsigned int intensity;
330
331 mutex_lock(&lcd->mutex);
332
333 if (dev->props.fb_blank == FB_BLANK_UNBLANK &&
334 dev->props.power == FB_BLANK_UNBLANK)
335 intensity = acx565akm_get_actual_brightness(lcd);
336 else
337 intensity = 0;
338
339 mutex_unlock(&lcd->mutex);
340
341 return intensity;
342}
343
344static const struct backlight_ops acx565akm_bl_ops = {
345 .get_brightness = acx565akm_bl_get_intensity,
346 .update_status = acx565akm_bl_update_status,
347};
348
349static int acx565akm_backlight_init(struct acx565akm_panel *lcd)
350{
351 struct backlight_properties props = {
352 .fb_blank = FB_BLANK_UNBLANK,
353 .power = FB_BLANK_UNBLANK,
354 .type = BACKLIGHT_RAW,
355 };
356 int ret;
357
358 lcd->backlight = backlight_device_register(lcd->name, &lcd->spi->dev,
359 lcd, &acx565akm_bl_ops,
360 &props);
361 if (IS_ERR(lcd->backlight)) {
362 ret = PTR_ERR(lcd->backlight);
363 lcd->backlight = NULL;
364 return ret;
365 }
366
367 if (lcd->has_cabc) {
368 ret = sysfs_create_group(&lcd->backlight->dev.kobj,
369 &acx565akm_cabc_attr_group);
370 if (ret < 0) {
371 dev_err(&lcd->spi->dev,
372 "%s failed to create sysfs files\n", __func__);
373 backlight_device_unregister(lcd->backlight);
374 return ret;
375 }
376
377 lcd->cabc_mode = acx565akm_get_hw_cabc_mode(lcd);
378 }
379
380 lcd->backlight->props.max_brightness = 255;
381 lcd->backlight->props.brightness = acx565akm_get_actual_brightness(lcd);
382
383 acx565akm_bl_update_status_locked(lcd->backlight);
384
385 return 0;
386}
387
388static void acx565akm_backlight_cleanup(struct acx565akm_panel *lcd)
389{
390 if (lcd->has_cabc)
391 sysfs_remove_group(&lcd->backlight->dev.kobj,
392 &acx565akm_cabc_attr_group);
393
394 backlight_device_unregister(lcd->backlight);
395}
396
397/* -----------------------------------------------------------------------------
398 * DRM Bridge Operations
399 */
400
401static void acx565akm_set_sleep_mode(struct acx565akm_panel *lcd, int on)
402{
403 int cmd = on ? MIPI_DCS_ENTER_SLEEP_MODE : MIPI_DCS_EXIT_SLEEP_MODE;
404 unsigned long wait;
405
406 /*
407 * We have to keep 120msec between sleep in/out commands.
408 * (8.2.15, 8.2.16).
409 */
410 wait = lcd->hw_guard_end - jiffies;
411 if ((long)wait > 0 && wait <= lcd->hw_guard_wait) {
412 set_current_state(TASK_UNINTERRUPTIBLE);
413 schedule_timeout(wait);
414 }
415
416 acx565akm_cmd(lcd, cmd);
417
418 lcd->hw_guard_wait = msecs_to_jiffies(120);
419 lcd->hw_guard_end = jiffies + lcd->hw_guard_wait;
420}
421
422static void acx565akm_set_display_state(struct acx565akm_panel *lcd,
423 int enabled)
424{
425 int cmd = enabled ? MIPI_DCS_SET_DISPLAY_ON : MIPI_DCS_SET_DISPLAY_OFF;
426
427 acx565akm_cmd(lcd, cmd);
428}
429
430static int acx565akm_power_on(struct acx565akm_panel *lcd)
431{
432 /*FIXME tweak me */
433 msleep(50);
434
435 gpiod_set_value(lcd->reset_gpio, 1);
436
437 if (lcd->enabled) {
438 dev_dbg(&lcd->spi->dev, "panel already enabled\n");
439 return 0;
440 }
441
442 /*
443 * We have to meet all the following delay requirements:
444 * 1. tRW: reset pulse width 10usec (7.12.1)
445 * 2. tRT: reset cancel time 5msec (7.12.1)
446 * 3. Providing PCLK,HS,VS signals for 2 frames = ~50msec worst
447 * case (7.6.2)
448 * 4. 120msec before the sleep out command (7.12.1)
449 */
450 msleep(120);
451
452 acx565akm_set_sleep_mode(lcd, 0);
453 lcd->enabled = true;
454
455 /* 5msec between sleep out and the next command. (8.2.16) */
456 usleep_range(5000, 10000);
457 acx565akm_set_display_state(lcd, 1);
458 acx565akm_set_cabc_mode(lcd, lcd->cabc_mode);
459
460 return acx565akm_bl_update_status_locked(lcd->backlight);
461}
462
463static void acx565akm_power_off(struct acx565akm_panel *lcd)
464{
465 if (!lcd->enabled)
466 return;
467
468 acx565akm_set_display_state(lcd, 0);
469 acx565akm_set_sleep_mode(lcd, 1);
470 lcd->enabled = false;
471 /*
472 * We have to provide PCLK,HS,VS signals for 2 frames (worst case
473 * ~50msec) after sending the sleep in command and asserting the
474 * reset signal. We probably could assert the reset w/o the delay
475 * but we still delay to avoid possible artifacts. (7.6.1)
476 */
477 msleep(50);
478
479 gpiod_set_value(lcd->reset_gpio, 0);
480
481 /* FIXME need to tweak this delay */
482 msleep(100);
483}
484
485static int acx565akm_disable(struct drm_panel *panel)
486{
487 struct acx565akm_panel *lcd = to_acx565akm_device(panel);
488
489 mutex_lock(&lcd->mutex);
490 acx565akm_power_off(lcd);
491 mutex_unlock(&lcd->mutex);
492
493 return 0;
494}
495
496static int acx565akm_enable(struct drm_panel *panel)
497{
498 struct acx565akm_panel *lcd = to_acx565akm_device(panel);
499
500 mutex_lock(&lcd->mutex);
501 acx565akm_power_on(lcd);
502 mutex_unlock(&lcd->mutex);
503
504 return 0;
505}
506
507static const struct drm_display_mode acx565akm_mode = {
508 .clock = 24000,
509 .hdisplay = 800,
510 .hsync_start = 800 + 28,
511 .hsync_end = 800 + 28 + 4,
512 .htotal = 800 + 28 + 4 + 24,
513 .vdisplay = 480,
514 .vsync_start = 480 + 3,
515 .vsync_end = 480 + 3 + 3,
516 .vtotal = 480 + 3 + 3 + 4,
517 .vrefresh = 57,
518 .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
519 .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
520 .width_mm = 77,
521 .height_mm = 46,
522};
523
524static int acx565akm_get_modes(struct drm_panel *panel)
525{
526 struct drm_connector *connector = panel->connector;
527 struct drm_display_mode *mode;
528
529 mode = drm_mode_duplicate(panel->drm, &acx565akm_mode);
530 if (!mode)
531 return -ENOMEM;
532
533 drm_mode_set_name(mode);
534 drm_mode_probed_add(connector, mode);
535
536 connector->display_info.width_mm = acx565akm_mode.width_mm;
537 connector->display_info.height_mm = acx565akm_mode.height_mm;
538 connector->display_info.bus_flags = DRM_BUS_FLAG_DE_HIGH
539 | DRM_BUS_FLAG_SYNC_SAMPLE_POSEDGE
540 | DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE;
541
542 return 1;
543}
544
545static const struct drm_panel_funcs acx565akm_funcs = {
546 .disable = acx565akm_disable,
547 .enable = acx565akm_enable,
548 .get_modes = acx565akm_get_modes,
549};
550
551/* -----------------------------------------------------------------------------
552 * Probe, Detect and Remove
553 */
554
555static int acx565akm_detect(struct acx565akm_panel *lcd)
556{
557 __be32 value;
558 u32 status;
559 int ret = 0;
560
561 /*
562 * After being taken out of reset the panel needs 5ms before the first
563 * command can be sent.
564 */
565 gpiod_set_value(lcd->reset_gpio, 1);
566 usleep_range(5000, 10000);
567
568 acx565akm_read(lcd, MIPI_DCS_GET_DISPLAY_STATUS, (u8 *)&value, 4);
569 status = __be32_to_cpu(value);
570 lcd->enabled = (status & (1 << 17)) && (status & (1 << 10));
571
572 dev_dbg(&lcd->spi->dev,
573 "LCD panel %s by bootloader (status 0x%04x)\n",
574 lcd->enabled ? "enabled" : "disabled ", status);
575
576 acx565akm_read(lcd, MIPI_DCS_GET_DISPLAY_ID, lcd->display_id, 3);
577 dev_dbg(&lcd->spi->dev, "MIPI display ID: %02x%02x%02x\n",
578 lcd->display_id[0], lcd->display_id[1], lcd->display_id[2]);
579
580 switch (lcd->display_id[0]) {
581 case 0x10:
582 lcd->model = MIPID_VER_ACX565AKM;
583 lcd->name = "acx565akm";
584 lcd->has_bc = 1;
585 lcd->has_cabc = 1;
586 break;
587 case 0x29:
588 lcd->model = MIPID_VER_L4F00311;
589 lcd->name = "l4f00311";
590 break;
591 case 0x45:
592 lcd->model = MIPID_VER_LPH8923;
593 lcd->name = "lph8923";
594 break;
595 case 0x83:
596 lcd->model = MIPID_VER_LS041Y3;
597 lcd->name = "ls041y3";
598 break;
599 default:
600 lcd->name = "unknown";
601 dev_err(&lcd->spi->dev, "unknown display ID\n");
602 ret = -ENODEV;
603 goto done;
604 }
605
606 lcd->revision = lcd->display_id[1];
607
608 dev_info(&lcd->spi->dev, "%s rev %02x panel detected\n",
609 lcd->name, lcd->revision);
610
611done:
612 if (!lcd->enabled)
613 gpiod_set_value(lcd->reset_gpio, 0);
614
615 return ret;
616}
617
618static int acx565akm_probe(struct spi_device *spi)
619{
620 struct acx565akm_panel *lcd;
621 int ret;
622
623 lcd = devm_kzalloc(&spi->dev, sizeof(*lcd), GFP_KERNEL);
624 if (!lcd)
625 return -ENOMEM;
626
627 spi_set_drvdata(spi, lcd);
628 spi->mode = SPI_MODE_3;
629
630 lcd->spi = spi;
631 mutex_init(&lcd->mutex);
632
633 lcd->reset_gpio = devm_gpiod_get(&spi->dev, "reset", GPIOD_OUT_LOW);
634 if (IS_ERR(lcd->reset_gpio)) {
635 dev_err(&spi->dev, "failed to get reset GPIO\n");
636 return PTR_ERR(lcd->reset_gpio);
637 }
638
639 ret = acx565akm_detect(lcd);
640 if (ret < 0) {
641 dev_err(&spi->dev, "panel detection failed\n");
642 return ret;
643 }
644
645 if (lcd->has_bc) {
646 ret = acx565akm_backlight_init(lcd);
647 if (ret < 0)
648 return ret;
649 }
650
651 drm_panel_init(&lcd->panel);
652 lcd->panel.dev = &lcd->spi->dev;
653 lcd->panel.funcs = &acx565akm_funcs;
654
655 ret = drm_panel_add(&lcd->panel);
656 if (ret < 0) {
657 if (lcd->has_bc)
658 acx565akm_backlight_cleanup(lcd);
659 return ret;
660 }
661
662 return 0;
663}
664
665static int acx565akm_remove(struct spi_device *spi)
666{
667 struct acx565akm_panel *lcd = spi_get_drvdata(spi);
668
669 drm_panel_remove(&lcd->panel);
670
671 if (lcd->has_bc)
672 acx565akm_backlight_cleanup(lcd);
673
674 drm_panel_disable(&lcd->panel);
675 drm_panel_unprepare(&lcd->panel);
676
677 return 0;
678}
679
680static const struct of_device_id acx565akm_of_match[] = {
681 { .compatible = "sony,acx565akm", },
682 { /* sentinel */ },
683};
684
685MODULE_DEVICE_TABLE(of, acx565akm_of_match);
686
687static struct spi_driver acx565akm_driver = {
688 .probe = acx565akm_probe,
689 .remove = acx565akm_remove,
690 .driver = {
691 .name = "panel-sony-acx565akm",
692 .of_match_table = acx565akm_of_match,
693 },
694};
695
696module_spi_driver(acx565akm_driver);
697
698MODULE_ALIAS("spi:sony,acx565akm");
699MODULE_AUTHOR("Nokia Corporation");
700MODULE_DESCRIPTION("Sony ACX565AKM LCD Panel Driver");
701MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c b/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c
new file mode 100644
index 000000000000..d7b2e34626ef
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c
@@ -0,0 +1,399 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Toppoly TD028TTEC1 Panel Driver
4 *
5 * Copyright (C) 2019 Texas Instruments Incorporated
6 *
7 * Based on the omapdrm-specific panel-tpo-td028ttec1 driver
8 *
9 * Copyright (C) 2008 Nokia Corporation
10 * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
11 *
12 * Neo 1973 code (jbt6k74.c):
13 * Copyright (C) 2006-2007 OpenMoko, Inc.
14 * Author: Harald Welte <laforge@openmoko.org>
15 *
16 * Ported and adapted from Neo 1973 U-Boot by:
17 * H. Nikolaus Schaller <hns@goldelico.com>
18 */
19
20#include <linux/backlight.h>
21#include <linux/delay.h>
22#include <linux/module.h>
23#include <linux/spi/spi.h>
24
25#include <drm/drm_connector.h>
26#include <drm/drm_modes.h>
27#include <drm/drm_panel.h>
28
29#define JBT_COMMAND 0x000
30#define JBT_DATA 0x100
31
32#define JBT_REG_SLEEP_IN 0x10
33#define JBT_REG_SLEEP_OUT 0x11
34
35#define JBT_REG_DISPLAY_OFF 0x28
36#define JBT_REG_DISPLAY_ON 0x29
37
38#define JBT_REG_RGB_FORMAT 0x3a
39#define JBT_REG_QUAD_RATE 0x3b
40
41#define JBT_REG_POWER_ON_OFF 0xb0
42#define JBT_REG_BOOSTER_OP 0xb1
43#define JBT_REG_BOOSTER_MODE 0xb2
44#define JBT_REG_BOOSTER_FREQ 0xb3
45#define JBT_REG_OPAMP_SYSCLK 0xb4
46#define JBT_REG_VSC_VOLTAGE 0xb5
47#define JBT_REG_VCOM_VOLTAGE 0xb6
48#define JBT_REG_EXT_DISPL 0xb7
49#define JBT_REG_OUTPUT_CONTROL 0xb8
50#define JBT_REG_DCCLK_DCEV 0xb9
51#define JBT_REG_DISPLAY_MODE1 0xba
52#define JBT_REG_DISPLAY_MODE2 0xbb
53#define JBT_REG_DISPLAY_MODE 0xbc
54#define JBT_REG_ASW_SLEW 0xbd
55#define JBT_REG_DUMMY_DISPLAY 0xbe
56#define JBT_REG_DRIVE_SYSTEM 0xbf
57
58#define JBT_REG_SLEEP_OUT_FR_A 0xc0
59#define JBT_REG_SLEEP_OUT_FR_B 0xc1
60#define JBT_REG_SLEEP_OUT_FR_C 0xc2
61#define JBT_REG_SLEEP_IN_LCCNT_D 0xc3
62#define JBT_REG_SLEEP_IN_LCCNT_E 0xc4
63#define JBT_REG_SLEEP_IN_LCCNT_F 0xc5
64#define JBT_REG_SLEEP_IN_LCCNT_G 0xc6
65
66#define JBT_REG_GAMMA1_FINE_1 0xc7
67#define JBT_REG_GAMMA1_FINE_2 0xc8
68#define JBT_REG_GAMMA1_INCLINATION 0xc9
69#define JBT_REG_GAMMA1_BLUE_OFFSET 0xca
70
71#define JBT_REG_BLANK_CONTROL 0xcf
72#define JBT_REG_BLANK_TH_TV 0xd0
73#define JBT_REG_CKV_ON_OFF 0xd1
74#define JBT_REG_CKV_1_2 0xd2
75#define JBT_REG_OEV_TIMING 0xd3
76#define JBT_REG_ASW_TIMING_1 0xd4
77#define JBT_REG_ASW_TIMING_2 0xd5
78
79#define JBT_REG_HCLOCK_VGA 0xec
80#define JBT_REG_HCLOCK_QVGA 0xed
81
82struct td028ttec1_panel {
83 struct drm_panel panel;
84
85 struct spi_device *spi;
86 struct backlight_device *backlight;
87};
88
89#define to_td028ttec1_device(p) container_of(p, struct td028ttec1_panel, panel)
90
91static int jbt_ret_write_0(struct td028ttec1_panel *lcd, u8 reg, int *err)
92{
93 struct spi_device *spi = lcd->spi;
94 u16 tx_buf = JBT_COMMAND | reg;
95 int ret;
96
97 if (err && *err)
98 return *err;
99
100 ret = spi_write(spi, (u8 *)&tx_buf, sizeof(tx_buf));
101 if (ret < 0) {
102 dev_err(&spi->dev, "%s: SPI write failed: %d\n", __func__, ret);
103 if (err)
104 *err = ret;
105 }
106
107 return ret;
108}
109
110static int jbt_reg_write_1(struct td028ttec1_panel *lcd,
111 u8 reg, u8 data, int *err)
112{
113 struct spi_device *spi = lcd->spi;
114 u16 tx_buf[2];
115 int ret;
116
117 if (err && *err)
118 return *err;
119
120 tx_buf[0] = JBT_COMMAND | reg;
121 tx_buf[1] = JBT_DATA | data;
122
123 ret = spi_write(spi, (u8 *)tx_buf, sizeof(tx_buf));
124 if (ret < 0) {
125 dev_err(&spi->dev, "%s: SPI write failed: %d\n", __func__, ret);
126 if (err)
127 *err = ret;
128 }
129
130 return ret;
131}
132
133static int jbt_reg_write_2(struct td028ttec1_panel *lcd,
134 u8 reg, u16 data, int *err)
135{
136 struct spi_device *spi = lcd->spi;
137 u16 tx_buf[3];
138 int ret;
139
140 if (err && *err)
141 return *err;
142
143 tx_buf[0] = JBT_COMMAND | reg;
144 tx_buf[1] = JBT_DATA | (data >> 8);
145 tx_buf[2] = JBT_DATA | (data & 0xff);
146
147 ret = spi_write(spi, (u8 *)tx_buf, sizeof(tx_buf));
148 if (ret < 0) {
149 dev_err(&spi->dev, "%s: SPI write failed: %d\n", __func__, ret);
150 if (err)
151 *err = ret;
152 }
153
154 return ret;
155}
156
157static int td028ttec1_prepare(struct drm_panel *panel)
158{
159 struct td028ttec1_panel *lcd = to_td028ttec1_device(panel);
160 unsigned int i;
161 int ret = 0;
162
163 /* Three times command zero */
164 for (i = 0; i < 3; ++i) {
165 jbt_ret_write_0(lcd, 0x00, &ret);
166 usleep_range(1000, 2000);
167 }
168
169 /* deep standby out */
170 jbt_reg_write_1(lcd, JBT_REG_POWER_ON_OFF, 0x17, &ret);
171
172 /* RGB I/F on, RAM write off, QVGA through, SIGCON enable */
173 jbt_reg_write_1(lcd, JBT_REG_DISPLAY_MODE, 0x80, &ret);
174
175 /* Quad mode off */
176 jbt_reg_write_1(lcd, JBT_REG_QUAD_RATE, 0x00, &ret);
177
178 /* AVDD on, XVDD on */
179 jbt_reg_write_1(lcd, JBT_REG_POWER_ON_OFF, 0x16, &ret);
180
181 /* Output control */
182 jbt_reg_write_2(lcd, JBT_REG_OUTPUT_CONTROL, 0xfff9, &ret);
183
184 /* Sleep mode off */
185 jbt_ret_write_0(lcd, JBT_REG_SLEEP_OUT, &ret);
186
187 /* at this point we have like 50% grey */
188
189 /* initialize register set */
190 jbt_reg_write_1(lcd, JBT_REG_DISPLAY_MODE1, 0x01, &ret);
191 jbt_reg_write_1(lcd, JBT_REG_DISPLAY_MODE2, 0x00, &ret);
192 jbt_reg_write_1(lcd, JBT_REG_RGB_FORMAT, 0x60, &ret);
193 jbt_reg_write_1(lcd, JBT_REG_DRIVE_SYSTEM, 0x10, &ret);
194 jbt_reg_write_1(lcd, JBT_REG_BOOSTER_OP, 0x56, &ret);
195 jbt_reg_write_1(lcd, JBT_REG_BOOSTER_MODE, 0x33, &ret);
196 jbt_reg_write_1(lcd, JBT_REG_BOOSTER_FREQ, 0x11, &ret);
197 jbt_reg_write_1(lcd, JBT_REG_BOOSTER_FREQ, 0x11, &ret);
198 jbt_reg_write_1(lcd, JBT_REG_OPAMP_SYSCLK, 0x02, &ret);
199 jbt_reg_write_1(lcd, JBT_REG_VSC_VOLTAGE, 0x2b, &ret);
200 jbt_reg_write_1(lcd, JBT_REG_VCOM_VOLTAGE, 0x40, &ret);
201 jbt_reg_write_1(lcd, JBT_REG_EXT_DISPL, 0x03, &ret);
202 jbt_reg_write_1(lcd, JBT_REG_DCCLK_DCEV, 0x04, &ret);
203 /*
204 * default of 0x02 in JBT_REG_ASW_SLEW responsible for 72Hz requirement
205 * to avoid red / blue flicker
206 */
207 jbt_reg_write_1(lcd, JBT_REG_ASW_SLEW, 0x04, &ret);
208 jbt_reg_write_1(lcd, JBT_REG_DUMMY_DISPLAY, 0x00, &ret);
209
210 jbt_reg_write_1(lcd, JBT_REG_SLEEP_OUT_FR_A, 0x11, &ret);
211 jbt_reg_write_1(lcd, JBT_REG_SLEEP_OUT_FR_B, 0x11, &ret);
212 jbt_reg_write_1(lcd, JBT_REG_SLEEP_OUT_FR_C, 0x11, &ret);
213 jbt_reg_write_2(lcd, JBT_REG_SLEEP_IN_LCCNT_D, 0x2040, &ret);
214 jbt_reg_write_2(lcd, JBT_REG_SLEEP_IN_LCCNT_E, 0x60c0, &ret);
215 jbt_reg_write_2(lcd, JBT_REG_SLEEP_IN_LCCNT_F, 0x1020, &ret);
216 jbt_reg_write_2(lcd, JBT_REG_SLEEP_IN_LCCNT_G, 0x60c0, &ret);
217
218 jbt_reg_write_2(lcd, JBT_REG_GAMMA1_FINE_1, 0x5533, &ret);
219 jbt_reg_write_1(lcd, JBT_REG_GAMMA1_FINE_2, 0x00, &ret);
220 jbt_reg_write_1(lcd, JBT_REG_GAMMA1_INCLINATION, 0x00, &ret);
221 jbt_reg_write_1(lcd, JBT_REG_GAMMA1_BLUE_OFFSET, 0x00, &ret);
222
223 jbt_reg_write_2(lcd, JBT_REG_HCLOCK_VGA, 0x1f0, &ret);
224 jbt_reg_write_1(lcd, JBT_REG_BLANK_CONTROL, 0x02, &ret);
225 jbt_reg_write_2(lcd, JBT_REG_BLANK_TH_TV, 0x0804, &ret);
226
227 jbt_reg_write_1(lcd, JBT_REG_CKV_ON_OFF, 0x01, &ret);
228 jbt_reg_write_2(lcd, JBT_REG_CKV_1_2, 0x0000, &ret);
229
230 jbt_reg_write_2(lcd, JBT_REG_OEV_TIMING, 0x0d0e, &ret);
231 jbt_reg_write_2(lcd, JBT_REG_ASW_TIMING_1, 0x11a4, &ret);
232 jbt_reg_write_1(lcd, JBT_REG_ASW_TIMING_2, 0x0e, &ret);
233
234 return ret;
235}
236
237static int td028ttec1_enable(struct drm_panel *panel)
238{
239 struct td028ttec1_panel *lcd = to_td028ttec1_device(panel);
240 int ret;
241
242 ret = jbt_ret_write_0(lcd, JBT_REG_DISPLAY_ON, NULL);
243 if (ret)
244 return ret;
245
246 backlight_enable(lcd->backlight);
247
248 return 0;
249}
250
251static int td028ttec1_disable(struct drm_panel *panel)
252{
253 struct td028ttec1_panel *lcd = to_td028ttec1_device(panel);
254
255 backlight_disable(lcd->backlight);
256
257 jbt_ret_write_0(lcd, JBT_REG_DISPLAY_OFF, NULL);
258
259 return 0;
260}
261
262static int td028ttec1_unprepare(struct drm_panel *panel)
263{
264 struct td028ttec1_panel *lcd = to_td028ttec1_device(panel);
265
266 jbt_reg_write_2(lcd, JBT_REG_OUTPUT_CONTROL, 0x8002, NULL);
267 jbt_ret_write_0(lcd, JBT_REG_SLEEP_IN, NULL);
268 jbt_reg_write_1(lcd, JBT_REG_POWER_ON_OFF, 0x00, NULL);
269
270 return 0;
271}
272
273static const struct drm_display_mode td028ttec1_mode = {
274 .clock = 22153,
275 .hdisplay = 480,
276 .hsync_start = 480 + 24,
277 .hsync_end = 480 + 24 + 8,
278 .htotal = 480 + 24 + 8 + 8,
279 .vdisplay = 640,
280 .vsync_start = 640 + 4,
281 .vsync_end = 640 + 4 + 2,
282 .vtotal = 640 + 4 + 2 + 2,
283 .vrefresh = 66,
284 .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
285 .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
286 .width_mm = 43,
287 .height_mm = 58,
288};
289
290static int td028ttec1_get_modes(struct drm_panel *panel)
291{
292 struct drm_connector *connector = panel->connector;
293 struct drm_display_mode *mode;
294
295 mode = drm_mode_duplicate(panel->drm, &td028ttec1_mode);
296 if (!mode)
297 return -ENOMEM;
298
299 drm_mode_set_name(mode);
300 drm_mode_probed_add(connector, mode);
301
302 connector->display_info.width_mm = td028ttec1_mode.width_mm;
303 connector->display_info.height_mm = td028ttec1_mode.height_mm;
304 /*
305 * FIXME: According to the datasheet sync signals are sampled on the
306 * rising edge of the clock, but the code running on the OpenMoko Neo
307 * FreeRunner and Neo 1973 indicates sampling on the falling edge. This
308 * should be tested on a real device.
309 */
310 connector->display_info.bus_flags = DRM_BUS_FLAG_DE_HIGH
311 | DRM_BUS_FLAG_SYNC_SAMPLE_NEGEDGE
312 | DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE;
313
314 return 1;
315}
316
317static const struct drm_panel_funcs td028ttec1_funcs = {
318 .prepare = td028ttec1_prepare,
319 .enable = td028ttec1_enable,
320 .disable = td028ttec1_disable,
321 .unprepare = td028ttec1_unprepare,
322 .get_modes = td028ttec1_get_modes,
323};
324
325static int td028ttec1_probe(struct spi_device *spi)
326{
327 struct td028ttec1_panel *lcd;
328 int ret;
329
330 lcd = devm_kzalloc(&spi->dev, sizeof(*lcd), GFP_KERNEL);
331 if (!lcd)
332 return -ENOMEM;
333
334 spi_set_drvdata(spi, lcd);
335 lcd->spi = spi;
336
337 lcd->backlight = devm_of_find_backlight(&spi->dev);
338 if (IS_ERR(lcd->backlight))
339 return PTR_ERR(lcd->backlight);
340
341 spi->mode = SPI_MODE_3;
342 spi->bits_per_word = 9;
343
344 ret = spi_setup(spi);
345 if (ret < 0) {
346 dev_err(&spi->dev, "failed to setup SPI: %d\n", ret);
347 return ret;
348 }
349
350 drm_panel_init(&lcd->panel);
351 lcd->panel.dev = &lcd->spi->dev;
352 lcd->panel.funcs = &td028ttec1_funcs;
353
354 return drm_panel_add(&lcd->panel);
355}
356
357static int td028ttec1_remove(struct spi_device *spi)
358{
359 struct td028ttec1_panel *lcd = spi_get_drvdata(spi);
360
361 drm_panel_remove(&lcd->panel);
362 drm_panel_disable(&lcd->panel);
363 drm_panel_unprepare(&lcd->panel);
364
365 return 0;
366}
367
368static const struct of_device_id td028ttec1_of_match[] = {
369 { .compatible = "tpo,td028ttec1", },
370 /* DT backward compatibility. */
371 { .compatible = "toppoly,td028ttec1", },
372 { /* sentinel */ },
373};
374
375MODULE_DEVICE_TABLE(of, td028ttec1_of_match);
376
377static const struct spi_device_id td028ttec1_ids[] = {
378 { "tpo,td028ttec1", 0},
379 { "toppoly,td028ttec1", 0 },
380 { /* sentinel */ }
381};
382
383MODULE_DEVICE_TABLE(spi, td028ttec1_ids);
384
385static struct spi_driver td028ttec1_driver = {
386 .probe = td028ttec1_probe,
387 .remove = td028ttec1_remove,
388 .id_table = td028ttec1_ids,
389 .driver = {
390 .name = "panel-tpo-td028ttec1",
391 .of_match_table = td028ttec1_of_match,
392 },
393};
394
395module_spi_driver(td028ttec1_driver);
396
397MODULE_AUTHOR("H. Nikolaus Schaller <hns@goldelico.com>");
398MODULE_DESCRIPTION("Toppoly TD028TTEC1 panel driver");
399MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panel/panel-tpo-td043mtea1.c b/drivers/gpu/drm/panel/panel-tpo-td043mtea1.c
new file mode 100644
index 000000000000..84370562910f
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-tpo-td043mtea1.c
@@ -0,0 +1,509 @@
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Toppoly TD043MTEA1 Panel Driver
4 *
5 * Copyright (C) 2019 Texas Instruments Incorporated
6 *
7 * Based on the omapdrm-specific panel-tpo-td043mtea1 driver
8 *
9 * Author: Gražvydas Ignotas <notasas@gmail.com>
10 */
11
12#include <linux/delay.h>
13#include <linux/module.h>
14#include <linux/regulator/consumer.h>
15#include <linux/spi/spi.h>
16
17#include <drm/drm_connector.h>
18#include <drm/drm_modes.h>
19#include <drm/drm_panel.h>
20
21#define TPO_R02_MODE(x) ((x) & 7)
22#define TPO_R02_MODE_800x480 7
23#define TPO_R02_NCLK_RISING BIT(3)
24#define TPO_R02_HSYNC_HIGH BIT(4)
25#define TPO_R02_VSYNC_HIGH BIT(5)
26
27#define TPO_R03_NSTANDBY BIT(0)
28#define TPO_R03_EN_CP_CLK BIT(1)
29#define TPO_R03_EN_VGL_PUMP BIT(2)
30#define TPO_R03_EN_PWM BIT(3)
31#define TPO_R03_DRIVING_CAP_100 BIT(4)
32#define TPO_R03_EN_PRE_CHARGE BIT(6)
33#define TPO_R03_SOFTWARE_CTL BIT(7)
34
35#define TPO_R04_NFLIP_H BIT(0)
36#define TPO_R04_NFLIP_V BIT(1)
37#define TPO_R04_CP_CLK_FREQ_1H BIT(2)
38#define TPO_R04_VGL_FREQ_1H BIT(4)
39
40#define TPO_R03_VAL_NORMAL \
41 (TPO_R03_NSTANDBY | TPO_R03_EN_CP_CLK | TPO_R03_EN_VGL_PUMP | \
42 TPO_R03_EN_PWM | TPO_R03_DRIVING_CAP_100 | TPO_R03_EN_PRE_CHARGE | \
43 TPO_R03_SOFTWARE_CTL)
44
45#define TPO_R03_VAL_STANDBY \
46 (TPO_R03_DRIVING_CAP_100 | TPO_R03_EN_PRE_CHARGE | \
47 TPO_R03_SOFTWARE_CTL)
48
49static const u16 td043mtea1_def_gamma[12] = {
50 105, 315, 381, 431, 490, 537, 579, 686, 780, 837, 880, 1023
51};
52
53struct td043mtea1_panel {
54 struct drm_panel panel;
55
56 struct spi_device *spi;
57 struct regulator *vcc_reg;
58 struct gpio_desc *reset_gpio;
59
60 unsigned int mode;
61 u16 gamma[12];
62 bool vmirror;
63 bool powered_on;
64 bool spi_suspended;
65 bool power_on_resume;
66};
67
68#define to_td043mtea1_device(p) container_of(p, struct td043mtea1_panel, panel)
69
70/* -----------------------------------------------------------------------------
71 * Hardware Access
72 */
73
74static int td043mtea1_write(struct td043mtea1_panel *lcd, u8 addr, u8 value)
75{
76 struct spi_message msg;
77 struct spi_transfer xfer;
78 u16 data;
79 int ret;
80
81 spi_message_init(&msg);
82
83 memset(&xfer, 0, sizeof(xfer));
84
85 data = ((u16)addr << 10) | (1 << 8) | value;
86 xfer.tx_buf = &data;
87 xfer.bits_per_word = 16;
88 xfer.len = 2;
89 spi_message_add_tail(&xfer, &msg);
90
91 ret = spi_sync(lcd->spi, &msg);
92 if (ret < 0)
93 dev_warn(&lcd->spi->dev, "failed to write to LCD reg (%d)\n",
94 ret);
95
96 return ret;
97}
98
99static void td043mtea1_write_gamma(struct td043mtea1_panel *lcd)
100{
101 const u16 *gamma = lcd->gamma;
102 unsigned int i;
103 u8 val;
104
105 /* gamma bits [9:8] */
106 for (val = i = 0; i < 4; i++)
107 val |= (gamma[i] & 0x300) >> ((i + 1) * 2);
108 td043mtea1_write(lcd, 0x11, val);
109
110 for (val = i = 0; i < 4; i++)
111 val |= (gamma[i + 4] & 0x300) >> ((i + 1) * 2);
112 td043mtea1_write(lcd, 0x12, val);
113
114 for (val = i = 0; i < 4; i++)
115 val |= (gamma[i + 8] & 0x300) >> ((i + 1) * 2);
116 td043mtea1_write(lcd, 0x13, val);
117
118 /* gamma bits [7:0] */
119 for (i = 0; i < 12; i++)
120 td043mtea1_write(lcd, 0x14 + i, gamma[i] & 0xff);
121}
122
123static int td043mtea1_write_mirror(struct td043mtea1_panel *lcd)
124{
125 u8 reg4 = TPO_R04_NFLIP_H | TPO_R04_NFLIP_V |
126 TPO_R04_CP_CLK_FREQ_1H | TPO_R04_VGL_FREQ_1H;
127 if (lcd->vmirror)
128 reg4 &= ~TPO_R04_NFLIP_V;
129
130 return td043mtea1_write(lcd, 4, reg4);
131}
132
133static int td043mtea1_power_on(struct td043mtea1_panel *lcd)
134{
135 int ret;
136
137 if (lcd->powered_on)
138 return 0;
139
140 ret = regulator_enable(lcd->vcc_reg);
141 if (ret < 0)
142 return ret;
143
144 /* Wait for the panel to stabilize. */
145 msleep(160);
146
147 gpiod_set_value(lcd->reset_gpio, 0);
148
149 td043mtea1_write(lcd, 2, TPO_R02_MODE(lcd->mode) | TPO_R02_NCLK_RISING);
150 td043mtea1_write(lcd, 3, TPO_R03_VAL_NORMAL);
151 td043mtea1_write(lcd, 0x20, 0xf0);
152 td043mtea1_write(lcd, 0x21, 0xf0);
153 td043mtea1_write_mirror(lcd);
154 td043mtea1_write_gamma(lcd);
155
156 lcd->powered_on = true;
157
158 return 0;
159}
160
161static void td043mtea1_power_off(struct td043mtea1_panel *lcd)
162{
163 if (!lcd->powered_on)
164 return;
165
166 td043mtea1_write(lcd, 3, TPO_R03_VAL_STANDBY | TPO_R03_EN_PWM);
167
168 gpiod_set_value(lcd->reset_gpio, 1);
169
170 /* wait for at least 2 vsyncs before cutting off power */
171 msleep(50);
172
173 td043mtea1_write(lcd, 3, TPO_R03_VAL_STANDBY);
174
175 regulator_disable(lcd->vcc_reg);
176
177 lcd->powered_on = false;
178}
179
180/* -----------------------------------------------------------------------------
181 * sysfs
182 */
183
184static ssize_t vmirror_show(struct device *dev, struct device_attribute *attr,
185 char *buf)
186{
187 struct td043mtea1_panel *lcd = dev_get_drvdata(dev);
188
189 return snprintf(buf, PAGE_SIZE, "%d\n", lcd->vmirror);
190}
191
192static ssize_t vmirror_store(struct device *dev, struct device_attribute *attr,
193 const char *buf, size_t count)
194{
195 struct td043mtea1_panel *lcd = dev_get_drvdata(dev);
196 int val;
197 int ret;
198
199 ret = kstrtoint(buf, 0, &val);
200 if (ret < 0)
201 return ret;
202
203 lcd->vmirror = !!val;
204
205 ret = td043mtea1_write_mirror(lcd);
206 if (ret < 0)
207 return ret;
208
209 return count;
210}
211
212static ssize_t mode_show(struct device *dev, struct device_attribute *attr,
213 char *buf)
214{
215 struct td043mtea1_panel *lcd = dev_get_drvdata(dev);
216
217 return snprintf(buf, PAGE_SIZE, "%d\n", lcd->mode);
218}
219
220static ssize_t mode_store(struct device *dev, struct device_attribute *attr,
221 const char *buf, size_t count)
222{
223 struct td043mtea1_panel *lcd = dev_get_drvdata(dev);
224 long val;
225 int ret;
226
227 ret = kstrtol(buf, 0, &val);
228 if (ret != 0 || val & ~7)
229 return -EINVAL;
230
231 lcd->mode = val;
232
233 val |= TPO_R02_NCLK_RISING;
234 td043mtea1_write(lcd, 2, val);
235
236 return count;
237}
238
239static ssize_t gamma_show(struct device *dev, struct device_attribute *attr,
240 char *buf)
241{
242 struct td043mtea1_panel *lcd = dev_get_drvdata(dev);
243 ssize_t len = 0;
244 unsigned int i;
245 int ret;
246
247 for (i = 0; i < ARRAY_SIZE(lcd->gamma); i++) {
248 ret = snprintf(buf + len, PAGE_SIZE - len, "%u ",
249 lcd->gamma[i]);
250 if (ret < 0)
251 return ret;
252 len += ret;
253 }
254 buf[len - 1] = '\n';
255
256 return len;
257}
258
259static ssize_t gamma_store(struct device *dev, struct device_attribute *attr,
260 const char *buf, size_t count)
261{
262 struct td043mtea1_panel *lcd = dev_get_drvdata(dev);
263 unsigned int g[12];
264 unsigned int i;
265 int ret;
266
267 ret = sscanf(buf, "%u %u %u %u %u %u %u %u %u %u %u %u",
268 &g[0], &g[1], &g[2], &g[3], &g[4], &g[5],
269 &g[6], &g[7], &g[8], &g[9], &g[10], &g[11]);
270 if (ret != 12)
271 return -EINVAL;
272
273 for (i = 0; i < 12; i++)
274 lcd->gamma[i] = g[i];
275
276 td043mtea1_write_gamma(lcd);
277
278 return count;
279}
280
281static DEVICE_ATTR_RW(vmirror);
282static DEVICE_ATTR_RW(mode);
283static DEVICE_ATTR_RW(gamma);
284
285static struct attribute *td043mtea1_attrs[] = {
286 &dev_attr_vmirror.attr,
287 &dev_attr_mode.attr,
288 &dev_attr_gamma.attr,
289 NULL,
290};
291
292static const struct attribute_group td043mtea1_attr_group = {
293 .attrs = td043mtea1_attrs,
294};
295
296/* -----------------------------------------------------------------------------
297 * Panel Operations
298 */
299
300static int td043mtea1_unprepare(struct drm_panel *panel)
301{
302 struct td043mtea1_panel *lcd = to_td043mtea1_device(panel);
303
304 if (!lcd->spi_suspended)
305 td043mtea1_power_off(lcd);
306
307 return 0;
308}
309
310static int td043mtea1_prepare(struct drm_panel *panel)
311{
312 struct td043mtea1_panel *lcd = to_td043mtea1_device(panel);
313 int ret;
314
315 /*
316 * If we are resuming from system suspend, SPI might not be enabled
317 * yet, so we'll program the LCD from SPI PM resume callback.
318 */
319 if (lcd->spi_suspended)
320 return 0;
321
322 ret = td043mtea1_power_on(lcd);
323 if (ret) {
324 dev_err(&lcd->spi->dev, "%s: power on failed (%d)\n",
325 __func__, ret);
326 return ret;
327 }
328
329 return 0;
330}
331
332static const struct drm_display_mode td043mtea1_mode = {
333 .clock = 36000,
334 .hdisplay = 800,
335 .hsync_start = 800 + 68,
336 .hsync_end = 800 + 68 + 1,
337 .htotal = 800 + 68 + 1 + 214,
338 .vdisplay = 480,
339 .vsync_start = 480 + 39,
340 .vsync_end = 480 + 39 + 1,
341 .vtotal = 480 + 39 + 1 + 34,
342 .vrefresh = 60,
343 .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
344 .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
345 .width_mm = 94,
346 .height_mm = 56,
347};
348
349static int td043mtea1_get_modes(struct drm_panel *panel)
350{
351 struct drm_connector *connector = panel->connector;
352 struct drm_display_mode *mode;
353
354 mode = drm_mode_duplicate(panel->drm, &td043mtea1_mode);
355 if (!mode)
356 return -ENOMEM;
357
358 drm_mode_set_name(mode);
359 drm_mode_probed_add(connector, mode);
360
361 connector->display_info.width_mm = td043mtea1_mode.width_mm;
362 connector->display_info.height_mm = td043mtea1_mode.height_mm;
363 /*
364 * FIXME: According to the datasheet sync signals are sampled on the
365 * rising edge of the clock, but the code running on the OMAP3 Pandora
366 * indicates sampling on the falling edge. This should be tested on a
367 * real device.
368 */
369 connector->display_info.bus_flags = DRM_BUS_FLAG_DE_HIGH
370 | DRM_BUS_FLAG_SYNC_SAMPLE_NEGEDGE
371 | DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE;
372
373 return 1;
374}
375
376static const struct drm_panel_funcs td043mtea1_funcs = {
377 .unprepare = td043mtea1_unprepare,
378 .prepare = td043mtea1_prepare,
379 .get_modes = td043mtea1_get_modes,
380};
381
382/* -----------------------------------------------------------------------------
383 * Power Management, Probe and Remove
384 */
385
386static int __maybe_unused td043mtea1_suspend(struct device *dev)
387{
388 struct td043mtea1_panel *lcd = dev_get_drvdata(dev);
389
390 if (lcd->powered_on) {
391 td043mtea1_power_off(lcd);
392 lcd->powered_on = true;
393 }
394
395 lcd->spi_suspended = true;
396
397 return 0;
398}
399
400static int __maybe_unused td043mtea1_resume(struct device *dev)
401{
402 struct td043mtea1_panel *lcd = dev_get_drvdata(dev);
403 int ret;
404
405 lcd->spi_suspended = false;
406
407 if (lcd->powered_on) {
408 lcd->powered_on = false;
409 ret = td043mtea1_power_on(lcd);
410 if (ret)
411 return ret;
412 }
413
414 return 0;
415}
416
417static SIMPLE_DEV_PM_OPS(td043mtea1_pm_ops, td043mtea1_suspend,
418 td043mtea1_resume);
419
420static int td043mtea1_probe(struct spi_device *spi)
421{
422 struct td043mtea1_panel *lcd;
423 int ret;
424
425 lcd = devm_kzalloc(&spi->dev, sizeof(*lcd), GFP_KERNEL);
426 if (lcd == NULL)
427 return -ENOMEM;
428
429 spi_set_drvdata(spi, lcd);
430 lcd->spi = spi;
431 lcd->mode = TPO_R02_MODE_800x480;
432 memcpy(lcd->gamma, td043mtea1_def_gamma, sizeof(lcd->gamma));
433
434 lcd->vcc_reg = devm_regulator_get(&spi->dev, "vcc");
435 if (IS_ERR(lcd->vcc_reg)) {
436 dev_err(&spi->dev, "failed to get VCC regulator\n");
437 return PTR_ERR(lcd->vcc_reg);
438 }
439
440 lcd->reset_gpio = devm_gpiod_get(&spi->dev, "reset", GPIOD_OUT_HIGH);
441 if (IS_ERR(lcd->reset_gpio)) {
442 dev_err(&spi->dev, "failed to get reset GPIO\n");
443 return PTR_ERR(lcd->reset_gpio);
444 }
445
446 spi->bits_per_word = 16;
447 spi->mode = SPI_MODE_0;
448
449 ret = spi_setup(spi);
450 if (ret < 0) {
451 dev_err(&spi->dev, "failed to setup SPI: %d\n", ret);
452 return ret;
453 }
454
455 ret = sysfs_create_group(&spi->dev.kobj, &td043mtea1_attr_group);
456 if (ret < 0) {
457 dev_err(&spi->dev, "failed to create sysfs files\n");
458 return ret;
459 }
460
461 drm_panel_init(&lcd->panel);
462 lcd->panel.dev = &lcd->spi->dev;
463 lcd->panel.funcs = &td043mtea1_funcs;
464
465 ret = drm_panel_add(&lcd->panel);
466 if (ret < 0) {
467 sysfs_remove_group(&spi->dev.kobj, &td043mtea1_attr_group);
468 return ret;
469 }
470
471 return 0;
472}
473
474static int td043mtea1_remove(struct spi_device *spi)
475{
476 struct td043mtea1_panel *lcd = spi_get_drvdata(spi);
477
478 drm_panel_remove(&lcd->panel);
479 drm_panel_disable(&lcd->panel);
480 drm_panel_unprepare(&lcd->panel);
481
482 sysfs_remove_group(&spi->dev.kobj, &td043mtea1_attr_group);
483
484 return 0;
485}
486
487static const struct of_device_id td043mtea1_of_match[] = {
488 { .compatible = "tpo,td043mtea1", },
489 { /* sentinel */ },
490};
491
492MODULE_DEVICE_TABLE(of, td043mtea1_of_match);
493
494static struct spi_driver td043mtea1_driver = {
495 .probe = td043mtea1_probe,
496 .remove = td043mtea1_remove,
497 .driver = {
498 .name = "panel-tpo-td043mtea1",
499 .pm = &td043mtea1_pm_ops,
500 .of_match_table = td043mtea1_of_match,
501 },
502};
503
504module_spi_driver(td043mtea1_driver);
505
506MODULE_ALIAS("spi:tpo,td043mtea1");
507MODULE_AUTHOR("Gražvydas Ignotas <notasas@gmail.com>");
508MODULE_DESCRIPTION("TPO TD043MTEA1 Panel Driver");
509MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panfrost/Makefile b/drivers/gpu/drm/panfrost/Makefile
index ecf0864cb515..b71935862417 100644
--- a/drivers/gpu/drm/panfrost/Makefile
+++ b/drivers/gpu/drm/panfrost/Makefile
@@ -5,6 +5,7 @@ panfrost-y := \
5 panfrost_device.o \ 5 panfrost_device.o \
6 panfrost_devfreq.o \ 6 panfrost_devfreq.o \
7 panfrost_gem.o \ 7 panfrost_gem.o \
8 panfrost_gem_shrinker.o \
8 panfrost_gpu.o \ 9 panfrost_gpu.o \
9 panfrost_job.o \ 10 panfrost_job.o \
10 panfrost_mmu.o \ 11 panfrost_mmu.o \
diff --git a/drivers/gpu/drm/panfrost/TODO b/drivers/gpu/drm/panfrost/TODO
index c2e44add37d8..e7727b292355 100644
--- a/drivers/gpu/drm/panfrost/TODO
+++ b/drivers/gpu/drm/panfrost/TODO
@@ -6,22 +6,11 @@
6 - Bifrost specific feature and issue handling 6 - Bifrost specific feature and issue handling
7 - Coherent DMA support 7 - Coherent DMA support
8 8
9- Support for 2MB pages. The io-pgtable code already supports this. Finishing
10 support involves either copying or adapting the iommu API to handle passing
11 aligned addresses and sizes to the io-pgtable code.
12
13- Per FD address space support. The h/w supports multiple addresses spaces. 9- Per FD address space support. The h/w supports multiple addresses spaces.
14 The hard part is handling when more address spaces are needed than what 10 The hard part is handling when more address spaces are needed than what
15 the h/w provides. 11 the h/w provides.
16 12
17- Support pinning pages on demand (GPU page faults).
18
19- Support userspace controlled GPU virtual addresses. Needed for Vulkan. (Tomeu) 13- Support userspace controlled GPU virtual addresses. Needed for Vulkan. (Tomeu)
20 14
21- Support for madvise and a shrinker.
22
23- Compute job support. So called 'compute only' jobs need to be plumbed up to 15- Compute job support. So called 'compute only' jobs need to be plumbed up to
24 userspace. 16 userspace.
25
26- Performance counter support. (Boris)
27
diff --git a/drivers/gpu/drm/panfrost/panfrost_device.c b/drivers/gpu/drm/panfrost/panfrost_device.c
index 8a111d7c0200..9814f4ccbd26 100644
--- a/drivers/gpu/drm/panfrost/panfrost_device.c
+++ b/drivers/gpu/drm/panfrost/panfrost_device.c
@@ -254,18 +254,22 @@ const char *panfrost_exception_name(struct panfrost_device *pfdev, u32 exception
254 return "UNKNOWN"; 254 return "UNKNOWN";
255} 255}
256 256
257void panfrost_device_reset(struct panfrost_device *pfdev)
258{
259 panfrost_gpu_soft_reset(pfdev);
260
261 panfrost_gpu_power_on(pfdev);
262 panfrost_mmu_reset(pfdev);
263 panfrost_job_enable_interrupts(pfdev);
264}
265
257#ifdef CONFIG_PM 266#ifdef CONFIG_PM
258int panfrost_device_resume(struct device *dev) 267int panfrost_device_resume(struct device *dev)
259{ 268{
260 struct platform_device *pdev = to_platform_device(dev); 269 struct platform_device *pdev = to_platform_device(dev);
261 struct panfrost_device *pfdev = platform_get_drvdata(pdev); 270 struct panfrost_device *pfdev = platform_get_drvdata(pdev);
262 271
263 panfrost_gpu_soft_reset(pfdev); 272 panfrost_device_reset(pfdev);
264
265 /* TODO: Re-enable all other address spaces */
266 panfrost_gpu_power_on(pfdev);
267 panfrost_mmu_enable(pfdev, 0);
268 panfrost_job_enable_interrupts(pfdev);
269 panfrost_devfreq_resume(pfdev); 273 panfrost_devfreq_resume(pfdev);
270 274
271 return 0; 275 return 0;
diff --git a/drivers/gpu/drm/panfrost/panfrost_device.h b/drivers/gpu/drm/panfrost/panfrost_device.h
index ea5948ff3647..4e5641db9c7e 100644
--- a/drivers/gpu/drm/panfrost/panfrost_device.h
+++ b/drivers/gpu/drm/panfrost/panfrost_device.h
@@ -85,6 +85,10 @@ struct panfrost_device {
85 struct mutex sched_lock; 85 struct mutex sched_lock;
86 struct mutex reset_lock; 86 struct mutex reset_lock;
87 87
88 struct mutex shrinker_lock;
89 struct list_head shrinker_list;
90 struct shrinker shrinker;
91
88 struct { 92 struct {
89 struct devfreq *devfreq; 93 struct devfreq *devfreq;
90 struct thermal_cooling_device *cooling; 94 struct thermal_cooling_device *cooling;
@@ -128,6 +132,7 @@ int panfrost_unstable_ioctl_check(void);
128 132
129int panfrost_device_init(struct panfrost_device *pfdev); 133int panfrost_device_init(struct panfrost_device *pfdev);
130void panfrost_device_fini(struct panfrost_device *pfdev); 134void panfrost_device_fini(struct panfrost_device *pfdev);
135void panfrost_device_reset(struct panfrost_device *pfdev);
131 136
132int panfrost_device_resume(struct device *dev); 137int panfrost_device_resume(struct device *dev);
133int panfrost_device_suspend(struct device *dev); 138int panfrost_device_suspend(struct device *dev);
diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c
index b187daa4da85..b41754658681 100644
--- a/drivers/gpu/drm/panfrost/panfrost_drv.c
+++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
@@ -78,29 +78,26 @@ static int panfrost_ioctl_get_param(struct drm_device *ddev, void *data, struct
78static int panfrost_ioctl_create_bo(struct drm_device *dev, void *data, 78static int panfrost_ioctl_create_bo(struct drm_device *dev, void *data,
79 struct drm_file *file) 79 struct drm_file *file)
80{ 80{
81 int ret; 81 struct panfrost_gem_object *bo;
82 struct drm_gem_shmem_object *shmem;
83 struct drm_panfrost_create_bo *args = data; 82 struct drm_panfrost_create_bo *args = data;
84 83
85 if (!args->size || args->flags || args->pad) 84 if (!args->size || args->pad ||
85 (args->flags & ~(PANFROST_BO_NOEXEC | PANFROST_BO_HEAP)))
86 return -EINVAL; 86 return -EINVAL;
87 87
88 shmem = drm_gem_shmem_create_with_handle(file, dev, args->size, 88 /* Heaps should never be executable */
89 &args->handle); 89 if ((args->flags & PANFROST_BO_HEAP) &&
90 if (IS_ERR(shmem)) 90 !(args->flags & PANFROST_BO_NOEXEC))
91 return PTR_ERR(shmem); 91 return -EINVAL;
92 92
93 ret = panfrost_mmu_map(to_panfrost_bo(&shmem->base)); 93 bo = panfrost_gem_create_with_handle(file, dev, args->size, args->flags,
94 if (ret) 94 &args->handle);
95 goto err_free; 95 if (IS_ERR(bo))
96 return PTR_ERR(bo);
96 97
97 args->offset = to_panfrost_bo(&shmem->base)->node.start << PAGE_SHIFT; 98 args->offset = bo->node.start << PAGE_SHIFT;
98 99
99 return 0; 100 return 0;
100
101err_free:
102 drm_gem_handle_delete(file, args->handle);
103 return ret;
104} 101}
105 102
106/** 103/**
@@ -277,7 +274,7 @@ panfrost_ioctl_wait_bo(struct drm_device *dev, void *data,
277 if (!gem_obj) 274 if (!gem_obj)
278 return -ENOENT; 275 return -ENOENT;
279 276
280 ret = reservation_object_wait_timeout_rcu(gem_obj->resv, true, 277 ret = dma_resv_wait_timeout_rcu(gem_obj->resv, true,
281 true, timeout); 278 true, timeout);
282 if (!ret) 279 if (!ret)
283 ret = timeout ? -ETIMEDOUT : -EBUSY; 280 ret = timeout ? -ETIMEDOUT : -EBUSY;
@@ -305,6 +302,10 @@ static int panfrost_ioctl_mmap_bo(struct drm_device *dev, void *data,
305 return -ENOENT; 302 return -ENOENT;
306 } 303 }
307 304
305 /* Don't allow mmapping of heap objects as pages are not pinned. */
306 if (to_panfrost_bo(gem_obj)->is_heap)
307 return -EINVAL;
308
308 ret = drm_gem_create_mmap_offset(gem_obj); 309 ret = drm_gem_create_mmap_offset(gem_obj);
309 if (ret == 0) 310 if (ret == 0)
310 args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node); 311 args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
@@ -333,6 +334,38 @@ static int panfrost_ioctl_get_bo_offset(struct drm_device *dev, void *data,
333 return 0; 334 return 0;
334} 335}
335 336
337static int panfrost_ioctl_madvise(struct drm_device *dev, void *data,
338 struct drm_file *file_priv)
339{
340 struct drm_panfrost_madvise *args = data;
341 struct panfrost_device *pfdev = dev->dev_private;
342 struct drm_gem_object *gem_obj;
343
344 gem_obj = drm_gem_object_lookup(file_priv, args->handle);
345 if (!gem_obj) {
346 DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
347 return -ENOENT;
348 }
349
350 args->retained = drm_gem_shmem_madvise(gem_obj, args->madv);
351
352 if (args->retained) {
353 struct panfrost_gem_object *bo = to_panfrost_bo(gem_obj);
354
355 mutex_lock(&pfdev->shrinker_lock);
356
357 if (args->madv == PANFROST_MADV_DONTNEED)
358 list_add_tail(&bo->base.madv_list, &pfdev->shrinker_list);
359 else if (args->madv == PANFROST_MADV_WILLNEED)
360 list_del_init(&bo->base.madv_list);
361
362 mutex_unlock(&pfdev->shrinker_lock);
363 }
364
365 drm_gem_object_put_unlocked(gem_obj);
366 return 0;
367}
368
336int panfrost_unstable_ioctl_check(void) 369int panfrost_unstable_ioctl_check(void)
337{ 370{
338 if (!unstable_ioctls) 371 if (!unstable_ioctls)
@@ -341,6 +374,32 @@ int panfrost_unstable_ioctl_check(void)
341 return 0; 374 return 0;
342} 375}
343 376
377#define PFN_4G (SZ_4G >> PAGE_SHIFT)
378#define PFN_4G_MASK (PFN_4G - 1)
379#define PFN_16M (SZ_16M >> PAGE_SHIFT)
380
381static void panfrost_drm_mm_color_adjust(const struct drm_mm_node *node,
382 unsigned long color,
383 u64 *start, u64 *end)
384{
385 /* Executable buffers can't start or end on a 4GB boundary */
386 if (!(color & PANFROST_BO_NOEXEC)) {
387 u64 next_seg;
388
389 if ((*start & PFN_4G_MASK) == 0)
390 (*start)++;
391
392 if ((*end & PFN_4G_MASK) == 0)
393 (*end)--;
394
395 next_seg = ALIGN(*start, PFN_4G);
396 if (next_seg - *start <= PFN_16M)
397 *start = next_seg + 1;
398
399 *end = min(*end, ALIGN(*start, PFN_4G) - 1);
400 }
401}
402
344static int 403static int
345panfrost_open(struct drm_device *dev, struct drm_file *file) 404panfrost_open(struct drm_device *dev, struct drm_file *file)
346{ 405{
@@ -384,10 +443,16 @@ static const struct drm_ioctl_desc panfrost_drm_driver_ioctls[] = {
384 PANFROST_IOCTL(GET_BO_OFFSET, get_bo_offset, DRM_RENDER_ALLOW), 443 PANFROST_IOCTL(GET_BO_OFFSET, get_bo_offset, DRM_RENDER_ALLOW),
385 PANFROST_IOCTL(PERFCNT_ENABLE, perfcnt_enable, DRM_RENDER_ALLOW), 444 PANFROST_IOCTL(PERFCNT_ENABLE, perfcnt_enable, DRM_RENDER_ALLOW),
386 PANFROST_IOCTL(PERFCNT_DUMP, perfcnt_dump, DRM_RENDER_ALLOW), 445 PANFROST_IOCTL(PERFCNT_DUMP, perfcnt_dump, DRM_RENDER_ALLOW),
446 PANFROST_IOCTL(MADVISE, madvise, DRM_RENDER_ALLOW),
387}; 447};
388 448
389DEFINE_DRM_GEM_SHMEM_FOPS(panfrost_drm_driver_fops); 449DEFINE_DRM_GEM_SHMEM_FOPS(panfrost_drm_driver_fops);
390 450
451/*
452 * Panfrost driver version:
453 * - 1.0 - initial interface
454 * - 1.1 - adds HEAP and NOEXEC flags for CREATE_BO
455 */
391static struct drm_driver panfrost_drm_driver = { 456static struct drm_driver panfrost_drm_driver = {
392 .driver_features = DRIVER_RENDER | DRIVER_GEM | DRIVER_SYNCOBJ, 457 .driver_features = DRIVER_RENDER | DRIVER_GEM | DRIVER_SYNCOBJ,
393 .open = panfrost_open, 458 .open = panfrost_open,
@@ -399,7 +464,7 @@ static struct drm_driver panfrost_drm_driver = {
399 .desc = "panfrost DRM", 464 .desc = "panfrost DRM",
400 .date = "20180908", 465 .date = "20180908",
401 .major = 1, 466 .major = 1,
402 .minor = 0, 467 .minor = 1,
403 468
404 .gem_create_object = panfrost_gem_create_object, 469 .gem_create_object = panfrost_gem_create_object,
405 .prime_handle_to_fd = drm_gem_prime_handle_to_fd, 470 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
@@ -432,9 +497,12 @@ static int panfrost_probe(struct platform_device *pdev)
432 pfdev->ddev = ddev; 497 pfdev->ddev = ddev;
433 498
434 spin_lock_init(&pfdev->mm_lock); 499 spin_lock_init(&pfdev->mm_lock);
500 mutex_init(&pfdev->shrinker_lock);
501 INIT_LIST_HEAD(&pfdev->shrinker_list);
435 502
436 /* 4G enough for now. can be 48-bit */ 503 /* 4G enough for now. can be 48-bit */
437 drm_mm_init(&pfdev->mm, SZ_32M >> PAGE_SHIFT, (SZ_4G - SZ_32M) >> PAGE_SHIFT); 504 drm_mm_init(&pfdev->mm, SZ_32M >> PAGE_SHIFT, (SZ_4G - SZ_32M) >> PAGE_SHIFT);
505 pfdev->mm.color_adjust = panfrost_drm_mm_color_adjust;
438 506
439 pm_runtime_use_autosuspend(pfdev->dev); 507 pm_runtime_use_autosuspend(pfdev->dev);
440 pm_runtime_set_autosuspend_delay(pfdev->dev, 50); /* ~3 frames */ 508 pm_runtime_set_autosuspend_delay(pfdev->dev, 50); /* ~3 frames */
@@ -462,6 +530,8 @@ static int panfrost_probe(struct platform_device *pdev)
462 if (err < 0) 530 if (err < 0)
463 goto err_out1; 531 goto err_out1;
464 532
533 panfrost_gem_shrinker_init(ddev);
534
465 return 0; 535 return 0;
466 536
467err_out1: 537err_out1:
@@ -478,6 +548,7 @@ static int panfrost_remove(struct platform_device *pdev)
478 struct drm_device *ddev = pfdev->ddev; 548 struct drm_device *ddev = pfdev->ddev;
479 549
480 drm_dev_unregister(ddev); 550 drm_dev_unregister(ddev);
551 panfrost_gem_shrinker_cleanup(ddev);
481 pm_runtime_get_sync(pfdev->dev); 552 pm_runtime_get_sync(pfdev->dev);
482 pm_runtime_put_sync_autosuspend(pfdev->dev); 553 pm_runtime_put_sync_autosuspend(pfdev->dev);
483 pm_runtime_disable(pfdev->dev); 554 pm_runtime_disable(pfdev->dev);
diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.c b/drivers/gpu/drm/panfrost/panfrost_gem.c
index 543ab1b81bd5..e71f27c4041e 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gem.c
+++ b/drivers/gpu/drm/panfrost/panfrost_gem.c
@@ -19,20 +19,92 @@ static void panfrost_gem_free_object(struct drm_gem_object *obj)
19 struct panfrost_gem_object *bo = to_panfrost_bo(obj); 19 struct panfrost_gem_object *bo = to_panfrost_bo(obj);
20 struct panfrost_device *pfdev = obj->dev->dev_private; 20 struct panfrost_device *pfdev = obj->dev->dev_private;
21 21
22 if (bo->sgts) {
23 int i;
24 int n_sgt = bo->base.base.size / SZ_2M;
25
26 for (i = 0; i < n_sgt; i++) {
27 if (bo->sgts[i].sgl) {
28 dma_unmap_sg(pfdev->dev, bo->sgts[i].sgl,
29 bo->sgts[i].nents, DMA_BIDIRECTIONAL);
30 sg_free_table(&bo->sgts[i]);
31 }
32 }
33 kfree(bo->sgts);
34 }
35
36 mutex_lock(&pfdev->shrinker_lock);
37 if (!list_empty(&bo->base.madv_list))
38 list_del(&bo->base.madv_list);
39 mutex_unlock(&pfdev->shrinker_lock);
40
41 drm_gem_shmem_free_object(obj);
42}
43
44static int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv)
45{
46 int ret;
47 size_t size = obj->size;
48 u64 align;
49 struct panfrost_gem_object *bo = to_panfrost_bo(obj);
50 struct panfrost_device *pfdev = obj->dev->dev_private;
51 unsigned long color = bo->noexec ? PANFROST_BO_NOEXEC : 0;
52
53 /*
54 * Executable buffers cannot cross a 16MB boundary as the program
55 * counter is 24-bits. We assume executable buffers will be less than
56 * 16MB and aligning executable buffers to their size will avoid
57 * crossing a 16MB boundary.
58 */
59 if (!bo->noexec)
60 align = size >> PAGE_SHIFT;
61 else
62 align = size >= SZ_2M ? SZ_2M >> PAGE_SHIFT : 0;
63
64 spin_lock(&pfdev->mm_lock);
65 ret = drm_mm_insert_node_generic(&pfdev->mm, &bo->node,
66 size >> PAGE_SHIFT, align, color, 0);
67 if (ret)
68 goto out;
69
70 if (!bo->is_heap) {
71 ret = panfrost_mmu_map(bo);
72 if (ret)
73 drm_mm_remove_node(&bo->node);
74 }
75out:
76 spin_unlock(&pfdev->mm_lock);
77 return ret;
78}
79
80static void panfrost_gem_close(struct drm_gem_object *obj, struct drm_file *file_priv)
81{
82 struct panfrost_gem_object *bo = to_panfrost_bo(obj);
83 struct panfrost_device *pfdev = obj->dev->dev_private;
84
22 if (bo->is_mapped) 85 if (bo->is_mapped)
23 panfrost_mmu_unmap(bo); 86 panfrost_mmu_unmap(bo);
24 87
25 spin_lock(&pfdev->mm_lock); 88 spin_lock(&pfdev->mm_lock);
26 drm_mm_remove_node(&bo->node); 89 if (drm_mm_node_allocated(&bo->node))
90 drm_mm_remove_node(&bo->node);
27 spin_unlock(&pfdev->mm_lock); 91 spin_unlock(&pfdev->mm_lock);
92}
28 93
29 drm_gem_shmem_free_object(obj); 94static int panfrost_gem_pin(struct drm_gem_object *obj)
95{
96 if (to_panfrost_bo(obj)->is_heap)
97 return -EINVAL;
98
99 return drm_gem_shmem_pin(obj);
30} 100}
31 101
32static const struct drm_gem_object_funcs panfrost_gem_funcs = { 102static const struct drm_gem_object_funcs panfrost_gem_funcs = {
33 .free = panfrost_gem_free_object, 103 .free = panfrost_gem_free_object,
104 .open = panfrost_gem_open,
105 .close = panfrost_gem_close,
34 .print_info = drm_gem_shmem_print_info, 106 .print_info = drm_gem_shmem_print_info,
35 .pin = drm_gem_shmem_pin, 107 .pin = panfrost_gem_pin,
36 .unpin = drm_gem_shmem_unpin, 108 .unpin = drm_gem_shmem_unpin,
37 .get_sg_table = drm_gem_shmem_get_sg_table, 109 .get_sg_table = drm_gem_shmem_get_sg_table,
38 .vmap = drm_gem_shmem_vmap, 110 .vmap = drm_gem_shmem_vmap,
@@ -50,10 +122,7 @@ static const struct drm_gem_object_funcs panfrost_gem_funcs = {
50 */ 122 */
51struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t size) 123struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t size)
52{ 124{
53 int ret;
54 struct panfrost_device *pfdev = dev->dev_private;
55 struct panfrost_gem_object *obj; 125 struct panfrost_gem_object *obj;
56 u64 align;
57 126
58 obj = kzalloc(sizeof(*obj), GFP_KERNEL); 127 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
59 if (!obj) 128 if (!obj)
@@ -61,21 +130,42 @@ struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t
61 130
62 obj->base.base.funcs = &panfrost_gem_funcs; 131 obj->base.base.funcs = &panfrost_gem_funcs;
63 132
64 size = roundup(size, PAGE_SIZE); 133 return &obj->base.base;
65 align = size >= SZ_2M ? SZ_2M >> PAGE_SHIFT : 0; 134}
66 135
67 spin_lock(&pfdev->mm_lock); 136struct panfrost_gem_object *
68 ret = drm_mm_insert_node_generic(&pfdev->mm, &obj->node, 137panfrost_gem_create_with_handle(struct drm_file *file_priv,
69 size >> PAGE_SHIFT, align, 0, 0); 138 struct drm_device *dev, size_t size,
70 spin_unlock(&pfdev->mm_lock); 139 u32 flags,
140 uint32_t *handle)
141{
142 int ret;
143 struct drm_gem_shmem_object *shmem;
144 struct panfrost_gem_object *bo;
145
146 /* Round up heap allocations to 2MB to keep fault handling simple */
147 if (flags & PANFROST_BO_HEAP)
148 size = roundup(size, SZ_2M);
149
150 shmem = drm_gem_shmem_create(dev, size);
151 if (IS_ERR(shmem))
152 return ERR_CAST(shmem);
153
154 bo = to_panfrost_bo(&shmem->base);
155 bo->noexec = !!(flags & PANFROST_BO_NOEXEC);
156 bo->is_heap = !!(flags & PANFROST_BO_HEAP);
157
158 /*
159 * Allocate an id of idr table where the obj is registered
160 * and handle has the id what user can see.
161 */
162 ret = drm_gem_handle_create(file_priv, &shmem->base, handle);
163 /* drop reference from allocate - handle holds it now. */
164 drm_gem_object_put_unlocked(&shmem->base);
71 if (ret) 165 if (ret)
72 goto free_obj; 166 return ERR_PTR(ret);
73
74 return &obj->base.base;
75 167
76free_obj: 168 return bo;
77 kfree(obj);
78 return ERR_PTR(ret);
79} 169}
80 170
81struct drm_gem_object * 171struct drm_gem_object *
@@ -84,15 +174,14 @@ panfrost_gem_prime_import_sg_table(struct drm_device *dev,
84 struct sg_table *sgt) 174 struct sg_table *sgt)
85{ 175{
86 struct drm_gem_object *obj; 176 struct drm_gem_object *obj;
87 struct panfrost_gem_object *pobj; 177 struct panfrost_gem_object *bo;
88 178
89 obj = drm_gem_shmem_prime_import_sg_table(dev, attach, sgt); 179 obj = drm_gem_shmem_prime_import_sg_table(dev, attach, sgt);
90 if (IS_ERR(obj)) 180 if (IS_ERR(obj))
91 return ERR_CAST(obj); 181 return ERR_CAST(obj);
92 182
93 pobj = to_panfrost_bo(obj); 183 bo = to_panfrost_bo(obj);
94 184 bo->noexec = true;
95 panfrost_mmu_map(pobj);
96 185
97 return obj; 186 return obj;
98} 187}
diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.h b/drivers/gpu/drm/panfrost/panfrost_gem.h
index 6dbcaba020fc..e10f58316915 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gem.h
+++ b/drivers/gpu/drm/panfrost/panfrost_gem.h
@@ -9,9 +9,12 @@
9 9
10struct panfrost_gem_object { 10struct panfrost_gem_object {
11 struct drm_gem_shmem_object base; 11 struct drm_gem_shmem_object base;
12 struct sg_table *sgts;
12 13
13 struct drm_mm_node node; 14 struct drm_mm_node node;
14 bool is_mapped; 15 bool is_mapped :1;
16 bool noexec :1;
17 bool is_heap :1;
15}; 18};
16 19
17static inline 20static inline
@@ -20,6 +23,12 @@ struct panfrost_gem_object *to_panfrost_bo(struct drm_gem_object *obj)
20 return container_of(to_drm_gem_shmem_obj(obj), struct panfrost_gem_object, base); 23 return container_of(to_drm_gem_shmem_obj(obj), struct panfrost_gem_object, base);
21} 24}
22 25
26static inline
27struct panfrost_gem_object *drm_mm_node_to_panfrost_bo(struct drm_mm_node *node)
28{
29 return container_of(node, struct panfrost_gem_object, node);
30}
31
23struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t size); 32struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t size);
24 33
25struct drm_gem_object * 34struct drm_gem_object *
@@ -27,4 +36,13 @@ panfrost_gem_prime_import_sg_table(struct drm_device *dev,
27 struct dma_buf_attachment *attach, 36 struct dma_buf_attachment *attach,
28 struct sg_table *sgt); 37 struct sg_table *sgt);
29 38
39struct panfrost_gem_object *
40panfrost_gem_create_with_handle(struct drm_file *file_priv,
41 struct drm_device *dev, size_t size,
42 u32 flags,
43 uint32_t *handle);
44
45void panfrost_gem_shrinker_init(struct drm_device *dev);
46void panfrost_gem_shrinker_cleanup(struct drm_device *dev);
47
30#endif /* __PANFROST_GEM_H__ */ 48#endif /* __PANFROST_GEM_H__ */
diff --git a/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c b/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
new file mode 100644
index 000000000000..d191632b6197
--- /dev/null
+++ b/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
@@ -0,0 +1,107 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/* Copyright (C) 2019 Arm Ltd.
3 *
4 * Based on msm_gem_freedreno.c:
5 * Copyright (C) 2016 Red Hat
6 * Author: Rob Clark <robdclark@gmail.com>
7 */
8
9#include <linux/list.h>
10
11#include <drm/drm_device.h>
12#include <drm/drm_gem_shmem_helper.h>
13
14#include "panfrost_device.h"
15#include "panfrost_gem.h"
16#include "panfrost_mmu.h"
17
18static unsigned long
19panfrost_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
20{
21 struct panfrost_device *pfdev =
22 container_of(shrinker, struct panfrost_device, shrinker);
23 struct drm_gem_shmem_object *shmem;
24 unsigned long count = 0;
25
26 if (!mutex_trylock(&pfdev->shrinker_lock))
27 return 0;
28
29 list_for_each_entry(shmem, &pfdev->shrinker_list, madv_list) {
30 if (drm_gem_shmem_is_purgeable(shmem))
31 count += shmem->base.size >> PAGE_SHIFT;
32 }
33
34 mutex_unlock(&pfdev->shrinker_lock);
35
36 return count;
37}
38
39static void panfrost_gem_purge(struct drm_gem_object *obj)
40{
41 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
42 mutex_lock(&shmem->pages_lock);
43
44 panfrost_mmu_unmap(to_panfrost_bo(obj));
45 drm_gem_shmem_purge_locked(obj);
46
47 mutex_unlock(&shmem->pages_lock);
48}
49
50static unsigned long
51panfrost_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
52{
53 struct panfrost_device *pfdev =
54 container_of(shrinker, struct panfrost_device, shrinker);
55 struct drm_gem_shmem_object *shmem, *tmp;
56 unsigned long freed = 0;
57
58 if (!mutex_trylock(&pfdev->shrinker_lock))
59 return SHRINK_STOP;
60
61 list_for_each_entry_safe(shmem, tmp, &pfdev->shrinker_list, madv_list) {
62 if (freed >= sc->nr_to_scan)
63 break;
64 if (drm_gem_shmem_is_purgeable(shmem)) {
65 panfrost_gem_purge(&shmem->base);
66 freed += shmem->base.size >> PAGE_SHIFT;
67 list_del_init(&shmem->madv_list);
68 }
69 }
70
71 mutex_unlock(&pfdev->shrinker_lock);
72
73 if (freed > 0)
74 pr_info_ratelimited("Purging %lu bytes\n", freed << PAGE_SHIFT);
75
76 return freed;
77}
78
79/**
80 * panfrost_gem_shrinker_init - Initialize panfrost shrinker
81 * @dev: DRM device
82 *
83 * This function registers and sets up the panfrost shrinker.
84 */
85void panfrost_gem_shrinker_init(struct drm_device *dev)
86{
87 struct panfrost_device *pfdev = dev->dev_private;
88 pfdev->shrinker.count_objects = panfrost_gem_shrinker_count;
89 pfdev->shrinker.scan_objects = panfrost_gem_shrinker_scan;
90 pfdev->shrinker.seeks = DEFAULT_SEEKS;
91 WARN_ON(register_shrinker(&pfdev->shrinker));
92}
93
94/**
95 * panfrost_gem_shrinker_cleanup - Clean up panfrost shrinker
96 * @dev: DRM device
97 *
98 * This function unregisters the panfrost shrinker.
99 */
100void panfrost_gem_shrinker_cleanup(struct drm_device *dev)
101{
102 struct panfrost_device *pfdev = dev->dev_private;
103
104 if (pfdev->shrinker.nr_deferred) {
105 unregister_shrinker(&pfdev->shrinker);
106 }
107}
diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c
index 9bb9260d9181..0fc4539fd08d 100644
--- a/drivers/gpu/drm/panfrost/panfrost_job.c
+++ b/drivers/gpu/drm/panfrost/panfrost_job.c
@@ -6,7 +6,7 @@
6#include <linux/io.h> 6#include <linux/io.h>
7#include <linux/platform_device.h> 7#include <linux/platform_device.h>
8#include <linux/pm_runtime.h> 8#include <linux/pm_runtime.h>
9#include <linux/reservation.h> 9#include <linux/dma-resv.h>
10#include <drm/gpu_scheduler.h> 10#include <drm/gpu_scheduler.h>
11#include <drm/panfrost_drm.h> 11#include <drm/panfrost_drm.h>
12 12
@@ -199,7 +199,7 @@ static void panfrost_acquire_object_fences(struct drm_gem_object **bos,
199 int i; 199 int i;
200 200
201 for (i = 0; i < bo_count; i++) 201 for (i = 0; i < bo_count; i++)
202 implicit_fences[i] = reservation_object_get_excl_rcu(bos[i]->resv); 202 implicit_fences[i] = dma_resv_get_excl_rcu(bos[i]->resv);
203} 203}
204 204
205static void panfrost_attach_object_fences(struct drm_gem_object **bos, 205static void panfrost_attach_object_fences(struct drm_gem_object **bos,
@@ -209,7 +209,7 @@ static void panfrost_attach_object_fences(struct drm_gem_object **bos,
209 int i; 209 int i;
210 210
211 for (i = 0; i < bo_count; i++) 211 for (i = 0; i < bo_count; i++)
212 reservation_object_add_excl_fence(bos[i]->resv, fence); 212 dma_resv_add_excl_fence(bos[i]->resv, fence);
213} 213}
214 214
215int panfrost_job_push(struct panfrost_job *job) 215int panfrost_job_push(struct panfrost_job *job)
@@ -395,12 +395,7 @@ static void panfrost_job_timedout(struct drm_sched_job *sched_job)
395 /* panfrost_core_dump(pfdev); */ 395 /* panfrost_core_dump(pfdev); */
396 396
397 panfrost_devfreq_record_transition(pfdev, js); 397 panfrost_devfreq_record_transition(pfdev, js);
398 panfrost_gpu_soft_reset(pfdev); 398 panfrost_device_reset(pfdev);
399
400 /* TODO: Re-enable all other address spaces */
401 panfrost_mmu_enable(pfdev, 0);
402 panfrost_gpu_power_on(pfdev);
403 panfrost_job_enable_interrupts(pfdev);
404 399
405 for (i = 0; i < NUM_JOB_SLOTS; i++) 400 for (i = 0; i < NUM_JOB_SLOTS; i++)
406 drm_sched_resubmit_jobs(&pfdev->js->queue[i].sched); 401 drm_sched_resubmit_jobs(&pfdev->js->queue[i].sched);
diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c
index 92ac995dd9c6..2ed411f09d80 100644
--- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
+++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
@@ -2,6 +2,7 @@
2/* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */ 2/* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
3#include <linux/bitfield.h> 3#include <linux/bitfield.h>
4#include <linux/delay.h> 4#include <linux/delay.h>
5#include <linux/dma-mapping.h>
5#include <linux/interrupt.h> 6#include <linux/interrupt.h>
6#include <linux/io.h> 7#include <linux/io.h>
7#include <linux/iopoll.h> 8#include <linux/iopoll.h>
@@ -9,6 +10,7 @@
9#include <linux/iommu.h> 10#include <linux/iommu.h>
10#include <linux/platform_device.h> 11#include <linux/platform_device.h>
11#include <linux/pm_runtime.h> 12#include <linux/pm_runtime.h>
13#include <linux/shmem_fs.h>
12#include <linux/sizes.h> 14#include <linux/sizes.h>
13 15
14#include "panfrost_device.h" 16#include "panfrost_device.h"
@@ -105,15 +107,12 @@ static int mmu_hw_do_operation(struct panfrost_device *pfdev, u32 as_nr,
105 return ret; 107 return ret;
106} 108}
107 109
108void panfrost_mmu_enable(struct panfrost_device *pfdev, u32 as_nr) 110static void panfrost_mmu_enable(struct panfrost_device *pfdev, u32 as_nr)
109{ 111{
110 struct io_pgtable_cfg *cfg = &pfdev->mmu->pgtbl_cfg; 112 struct io_pgtable_cfg *cfg = &pfdev->mmu->pgtbl_cfg;
111 u64 transtab = cfg->arm_mali_lpae_cfg.transtab; 113 u64 transtab = cfg->arm_mali_lpae_cfg.transtab;
112 u64 memattr = cfg->arm_mali_lpae_cfg.memattr; 114 u64 memattr = cfg->arm_mali_lpae_cfg.memattr;
113 115
114 mmu_write(pfdev, MMU_INT_CLEAR, ~0);
115 mmu_write(pfdev, MMU_INT_MASK, ~0);
116
117 mmu_write(pfdev, AS_TRANSTAB_LO(as_nr), transtab & 0xffffffffUL); 116 mmu_write(pfdev, AS_TRANSTAB_LO(as_nr), transtab & 0xffffffffUL);
118 mmu_write(pfdev, AS_TRANSTAB_HI(as_nr), transtab >> 32); 117 mmu_write(pfdev, AS_TRANSTAB_HI(as_nr), transtab >> 32);
119 118
@@ -137,6 +136,14 @@ static void mmu_disable(struct panfrost_device *pfdev, u32 as_nr)
137 write_cmd(pfdev, as_nr, AS_COMMAND_UPDATE); 136 write_cmd(pfdev, as_nr, AS_COMMAND_UPDATE);
138} 137}
139 138
139void panfrost_mmu_reset(struct panfrost_device *pfdev)
140{
141 panfrost_mmu_enable(pfdev, 0);
142
143 mmu_write(pfdev, MMU_INT_CLEAR, ~0);
144 mmu_write(pfdev, MMU_INT_MASK, ~0);
145}
146
140static size_t get_pgsize(u64 addr, size_t size) 147static size_t get_pgsize(u64 addr, size_t size)
141{ 148{
142 if (addr & (SZ_2M - 1) || size < SZ_2M) 149 if (addr & (SZ_2M - 1) || size < SZ_2M)
@@ -145,27 +152,13 @@ static size_t get_pgsize(u64 addr, size_t size)
145 return SZ_2M; 152 return SZ_2M;
146} 153}
147 154
148int panfrost_mmu_map(struct panfrost_gem_object *bo) 155static int mmu_map_sg(struct panfrost_device *pfdev, u64 iova,
156 int prot, struct sg_table *sgt)
149{ 157{
150 struct drm_gem_object *obj = &bo->base.base;
151 struct panfrost_device *pfdev = to_panfrost_device(obj->dev);
152 struct io_pgtable_ops *ops = pfdev->mmu->pgtbl_ops;
153 u64 iova = bo->node.start << PAGE_SHIFT;
154 unsigned int count; 158 unsigned int count;
155 struct scatterlist *sgl; 159 struct scatterlist *sgl;
156 struct sg_table *sgt; 160 struct io_pgtable_ops *ops = pfdev->mmu->pgtbl_ops;
157 int ret; 161 u64 start_iova = iova;
158
159 if (WARN_ON(bo->is_mapped))
160 return 0;
161
162 sgt = drm_gem_shmem_get_pages_sgt(obj);
163 if (WARN_ON(IS_ERR(sgt)))
164 return PTR_ERR(sgt);
165
166 ret = pm_runtime_get_sync(pfdev->dev);
167 if (ret < 0)
168 return ret;
169 162
170 mutex_lock(&pfdev->mmu->lock); 163 mutex_lock(&pfdev->mmu->lock);
171 164
@@ -178,18 +171,45 @@ int panfrost_mmu_map(struct panfrost_gem_object *bo)
178 while (len) { 171 while (len) {
179 size_t pgsize = get_pgsize(iova | paddr, len); 172 size_t pgsize = get_pgsize(iova | paddr, len);
180 173
181 ops->map(ops, iova, paddr, pgsize, IOMMU_WRITE | IOMMU_READ); 174 ops->map(ops, iova, paddr, pgsize, prot);
182 iova += pgsize; 175 iova += pgsize;
183 paddr += pgsize; 176 paddr += pgsize;
184 len -= pgsize; 177 len -= pgsize;
185 } 178 }
186 } 179 }
187 180
188 mmu_hw_do_operation(pfdev, 0, bo->node.start << PAGE_SHIFT, 181 mmu_hw_do_operation(pfdev, 0, start_iova, iova - start_iova,
189 bo->node.size << PAGE_SHIFT, AS_COMMAND_FLUSH_PT); 182 AS_COMMAND_FLUSH_PT);
190 183
191 mutex_unlock(&pfdev->mmu->lock); 184 mutex_unlock(&pfdev->mmu->lock);
192 185
186 return 0;
187}
188
189int panfrost_mmu_map(struct panfrost_gem_object *bo)
190{
191 struct drm_gem_object *obj = &bo->base.base;
192 struct panfrost_device *pfdev = to_panfrost_device(obj->dev);
193 struct sg_table *sgt;
194 int ret;
195 int prot = IOMMU_READ | IOMMU_WRITE;
196
197 if (WARN_ON(bo->is_mapped))
198 return 0;
199
200 if (bo->noexec)
201 prot |= IOMMU_NOEXEC;
202
203 sgt = drm_gem_shmem_get_pages_sgt(obj);
204 if (WARN_ON(IS_ERR(sgt)))
205 return PTR_ERR(sgt);
206
207 ret = pm_runtime_get_sync(pfdev->dev);
208 if (ret < 0)
209 return ret;
210
211 mmu_map_sg(pfdev, bo->node.start << PAGE_SHIFT, prot, sgt);
212
193 pm_runtime_mark_last_busy(pfdev->dev); 213 pm_runtime_mark_last_busy(pfdev->dev);
194 pm_runtime_put_autosuspend(pfdev->dev); 214 pm_runtime_put_autosuspend(pfdev->dev);
195 bo->is_mapped = true; 215 bo->is_mapped = true;
@@ -222,12 +242,12 @@ void panfrost_mmu_unmap(struct panfrost_gem_object *bo)
222 size_t unmapped_page; 242 size_t unmapped_page;
223 size_t pgsize = get_pgsize(iova, len - unmapped_len); 243 size_t pgsize = get_pgsize(iova, len - unmapped_len);
224 244
225 unmapped_page = ops->unmap(ops, iova, pgsize); 245 if (ops->iova_to_phys(ops, iova)) {
226 if (!unmapped_page) 246 unmapped_page = ops->unmap(ops, iova, pgsize);
227 break; 247 WARN_ON(unmapped_page != pgsize);
228 248 }
229 iova += unmapped_page; 249 iova += pgsize;
230 unmapped_len += unmapped_page; 250 unmapped_len += pgsize;
231 } 251 }
232 252
233 mmu_hw_do_operation(pfdev, 0, bo->node.start << PAGE_SHIFT, 253 mmu_hw_do_operation(pfdev, 0, bo->node.start << PAGE_SHIFT,
@@ -263,6 +283,105 @@ static const struct iommu_gather_ops mmu_tlb_ops = {
263 .tlb_sync = mmu_tlb_sync_context, 283 .tlb_sync = mmu_tlb_sync_context,
264}; 284};
265 285
286static struct drm_mm_node *addr_to_drm_mm_node(struct panfrost_device *pfdev, int as, u64 addr)
287{
288 struct drm_mm_node *node;
289 u64 offset = addr >> PAGE_SHIFT;
290
291 drm_mm_for_each_node(node, &pfdev->mm) {
292 if (offset >= node->start && offset < (node->start + node->size))
293 return node;
294 }
295 return NULL;
296}
297
298#define NUM_FAULT_PAGES (SZ_2M / PAGE_SIZE)
299
300int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, u64 addr)
301{
302 int ret, i;
303 struct drm_mm_node *node;
304 struct panfrost_gem_object *bo;
305 struct address_space *mapping;
306 pgoff_t page_offset;
307 struct sg_table *sgt;
308 struct page **pages;
309
310 node = addr_to_drm_mm_node(pfdev, as, addr);
311 if (!node)
312 return -ENOENT;
313
314 bo = drm_mm_node_to_panfrost_bo(node);
315 if (!bo->is_heap) {
316 dev_WARN(pfdev->dev, "matching BO is not heap type (GPU VA = %llx)",
317 node->start << PAGE_SHIFT);
318 return -EINVAL;
319 }
320 /* Assume 2MB alignment and size multiple */
321 addr &= ~((u64)SZ_2M - 1);
322 page_offset = addr >> PAGE_SHIFT;
323 page_offset -= node->start;
324
325 mutex_lock(&bo->base.pages_lock);
326
327 if (!bo->base.pages) {
328 bo->sgts = kvmalloc_array(bo->base.base.size / SZ_2M,
329 sizeof(struct sg_table), GFP_KERNEL | __GFP_ZERO);
330 if (!bo->sgts)
331 return -ENOMEM;
332
333 pages = kvmalloc_array(bo->base.base.size >> PAGE_SHIFT,
334 sizeof(struct page *), GFP_KERNEL | __GFP_ZERO);
335 if (!pages) {
336 kfree(bo->sgts);
337 bo->sgts = NULL;
338 return -ENOMEM;
339 }
340 bo->base.pages = pages;
341 bo->base.pages_use_count = 1;
342 } else
343 pages = bo->base.pages;
344
345 mapping = bo->base.base.filp->f_mapping;
346 mapping_set_unevictable(mapping);
347
348 for (i = page_offset; i < page_offset + NUM_FAULT_PAGES; i++) {
349 pages[i] = shmem_read_mapping_page(mapping, i);
350 if (IS_ERR(pages[i])) {
351 mutex_unlock(&bo->base.pages_lock);
352 ret = PTR_ERR(pages[i]);
353 goto err_pages;
354 }
355 }
356
357 mutex_unlock(&bo->base.pages_lock);
358
359 sgt = &bo->sgts[page_offset / (SZ_2M / PAGE_SIZE)];
360 ret = sg_alloc_table_from_pages(sgt, pages + page_offset,
361 NUM_FAULT_PAGES, 0, SZ_2M, GFP_KERNEL);
362 if (ret)
363 goto err_pages;
364
365 if (!dma_map_sg(pfdev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL)) {
366 ret = -EINVAL;
367 goto err_map;
368 }
369
370 mmu_map_sg(pfdev, addr, IOMMU_WRITE | IOMMU_READ | IOMMU_NOEXEC, sgt);
371
372 bo->is_mapped = true;
373
374 dev_dbg(pfdev->dev, "mapped page fault @ %llx", addr);
375
376 return 0;
377
378err_map:
379 sg_free_table(sgt);
380err_pages:
381 drm_gem_shmem_put_pages(&bo->base);
382 return ret;
383}
384
266static const char *access_type_name(struct panfrost_device *pfdev, 385static const char *access_type_name(struct panfrost_device *pfdev,
267 u32 fault_status) 386 u32 fault_status)
268{ 387{
@@ -287,13 +406,19 @@ static const char *access_type_name(struct panfrost_device *pfdev,
287static irqreturn_t panfrost_mmu_irq_handler(int irq, void *data) 406static irqreturn_t panfrost_mmu_irq_handler(int irq, void *data)
288{ 407{
289 struct panfrost_device *pfdev = data; 408 struct panfrost_device *pfdev = data;
290 u32 status = mmu_read(pfdev, MMU_INT_STAT);
291 int i;
292 409
293 if (!status) 410 if (!mmu_read(pfdev, MMU_INT_STAT))
294 return IRQ_NONE; 411 return IRQ_NONE;
295 412
296 dev_err(pfdev->dev, "mmu irq status=%x\n", status); 413 mmu_write(pfdev, MMU_INT_MASK, 0);
414 return IRQ_WAKE_THREAD;
415}
416
417static irqreturn_t panfrost_mmu_irq_handler_thread(int irq, void *data)
418{
419 struct panfrost_device *pfdev = data;
420 u32 status = mmu_read(pfdev, MMU_INT_RAWSTAT);
421 int i, ret;
297 422
298 for (i = 0; status; i++) { 423 for (i = 0; status; i++) {
299 u32 mask = BIT(i) | BIT(i + 16); 424 u32 mask = BIT(i) | BIT(i + 16);
@@ -315,6 +440,18 @@ static irqreturn_t panfrost_mmu_irq_handler(int irq, void *data)
315 access_type = (fault_status >> 8) & 0x3; 440 access_type = (fault_status >> 8) & 0x3;
316 source_id = (fault_status >> 16); 441 source_id = (fault_status >> 16);
317 442
443 /* Page fault only */
444 if ((status & mask) == BIT(i)) {
445 WARN_ON(exception_type < 0xC1 || exception_type > 0xC4);
446
447 ret = panfrost_mmu_map_fault_addr(pfdev, i, addr);
448 if (!ret) {
449 mmu_write(pfdev, MMU_INT_CLEAR, BIT(i));
450 status &= ~mask;
451 continue;
452 }
453 }
454
318 /* terminal fault, print info about the fault */ 455 /* terminal fault, print info about the fault */
319 dev_err(pfdev->dev, 456 dev_err(pfdev->dev,
320 "Unhandled Page fault in AS%d at VA 0x%016llX\n" 457 "Unhandled Page fault in AS%d at VA 0x%016llX\n"
@@ -337,6 +474,7 @@ static irqreturn_t panfrost_mmu_irq_handler(int irq, void *data)
337 status &= ~mask; 474 status &= ~mask;
338 } 475 }
339 476
477 mmu_write(pfdev, MMU_INT_MASK, ~0);
340 return IRQ_HANDLED; 478 return IRQ_HANDLED;
341}; 479};
342 480
@@ -355,16 +493,14 @@ int panfrost_mmu_init(struct panfrost_device *pfdev)
355 if (irq <= 0) 493 if (irq <= 0)
356 return -ENODEV; 494 return -ENODEV;
357 495
358 err = devm_request_irq(pfdev->dev, irq, panfrost_mmu_irq_handler, 496 err = devm_request_threaded_irq(pfdev->dev, irq, panfrost_mmu_irq_handler,
359 IRQF_SHARED, "mmu", pfdev); 497 panfrost_mmu_irq_handler_thread,
498 IRQF_SHARED, "mmu", pfdev);
360 499
361 if (err) { 500 if (err) {
362 dev_err(pfdev->dev, "failed to request mmu irq"); 501 dev_err(pfdev->dev, "failed to request mmu irq");
363 return err; 502 return err;
364 } 503 }
365 mmu_write(pfdev, MMU_INT_CLEAR, ~0);
366 mmu_write(pfdev, MMU_INT_MASK, ~0);
367
368 pfdev->mmu->pgtbl_cfg = (struct io_pgtable_cfg) { 504 pfdev->mmu->pgtbl_cfg = (struct io_pgtable_cfg) {
369 .pgsize_bitmap = SZ_4K | SZ_2M, 505 .pgsize_bitmap = SZ_4K | SZ_2M,
370 .ias = FIELD_GET(0xff, pfdev->features.mmu_features), 506 .ias = FIELD_GET(0xff, pfdev->features.mmu_features),
diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.h b/drivers/gpu/drm/panfrost/panfrost_mmu.h
index f5878d86a5ce..d5f9b24537db 100644
--- a/drivers/gpu/drm/panfrost/panfrost_mmu.h
+++ b/drivers/gpu/drm/panfrost/panfrost_mmu.h
@@ -11,7 +11,6 @@ void panfrost_mmu_unmap(struct panfrost_gem_object *bo);
11 11
12int panfrost_mmu_init(struct panfrost_device *pfdev); 12int panfrost_mmu_init(struct panfrost_device *pfdev);
13void panfrost_mmu_fini(struct panfrost_device *pfdev); 13void panfrost_mmu_fini(struct panfrost_device *pfdev);
14 14void panfrost_mmu_reset(struct panfrost_device *pfdev);
15void panfrost_mmu_enable(struct panfrost_device *pfdev, u32 as_nr);
16 15
17#endif 16#endif
diff --git a/drivers/gpu/drm/pl111/pl111_display.c b/drivers/gpu/drm/pl111/pl111_display.c
index 9a153125e5d2..024771a4083e 100644
--- a/drivers/gpu/drm/pl111/pl111_display.c
+++ b/drivers/gpu/drm/pl111/pl111_display.c
@@ -128,6 +128,7 @@ static void pl111_display_enable(struct drm_simple_display_pipe *pipe,
128 struct drm_framebuffer *fb = plane->state->fb; 128 struct drm_framebuffer *fb = plane->state->fb;
129 struct drm_connector *connector = priv->connector; 129 struct drm_connector *connector = priv->connector;
130 struct drm_bridge *bridge = priv->bridge; 130 struct drm_bridge *bridge = priv->bridge;
131 bool grayscale = false;
131 u32 cntl; 132 u32 cntl;
132 u32 ppl, hsw, hfp, hbp; 133 u32 ppl, hsw, hfp, hbp;
133 u32 lpp, vsw, vfp, vbp; 134 u32 lpp, vsw, vfp, vbp;
@@ -187,6 +188,20 @@ static void pl111_display_enable(struct drm_simple_display_pipe *pipe,
187 if (connector->display_info.bus_flags & 188 if (connector->display_info.bus_flags &
188 DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE) 189 DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE)
189 tim2 |= TIM2_IPC; 190 tim2 |= TIM2_IPC;
191
192 if (connector->display_info.num_bus_formats == 1 &&
193 connector->display_info.bus_formats[0] ==
194 MEDIA_BUS_FMT_Y8_1X8)
195 grayscale = true;
196
197 /*
198 * The AC pin bias frequency is set to max count when using
199 * grayscale so at least once in a while we will reverse
200 * polarity and get rid of any DC built up that could
201 * damage the display.
202 */
203 if (grayscale)
204 tim2 |= TIM2_ACB_MASK;
190 } 205 }
191 206
192 if (bridge) { 207 if (bridge) {
@@ -218,8 +233,18 @@ static void pl111_display_enable(struct drm_simple_display_pipe *pipe,
218 233
219 writel(0, priv->regs + CLCD_TIM3); 234 writel(0, priv->regs + CLCD_TIM3);
220 235
221 /* Hard-code TFT panel */ 236 /*
222 cntl = CNTL_LCDEN | CNTL_LCDTFT | CNTL_LCDVCOMP(1); 237 * Detect grayscale bus format. We do not support a grayscale mode
238 * toward userspace, instead we expose an RGB24 buffer and then the
239 * hardware will activate its grayscaler to convert to the grayscale
240 * format.
241 */
242 if (grayscale)
243 cntl = CNTL_LCDEN | CNTL_LCDMONO8;
244 else
245 /* Else we assume TFT display */
246 cntl = CNTL_LCDEN | CNTL_LCDTFT | CNTL_LCDVCOMP(1);
247
223 /* On the ST Micro variant, assume all 24 bits are connected */ 248 /* On the ST Micro variant, assume all 24 bits are connected */
224 if (priv->variant->st_bitmux_control) 249 if (priv->variant->st_bitmux_control)
225 cntl |= CNTL_ST_CDWID_24; 250 cntl |= CNTL_ST_CDWID_24;
diff --git a/drivers/gpu/drm/qxl/qxl_debugfs.c b/drivers/gpu/drm/qxl/qxl_debugfs.c
index 94439212a5c5..a4f4175bbdbe 100644
--- a/drivers/gpu/drm/qxl/qxl_debugfs.c
+++ b/drivers/gpu/drm/qxl/qxl_debugfs.c
@@ -57,7 +57,7 @@ qxl_debugfs_buffers_info(struct seq_file *m, void *data)
57 struct qxl_bo *bo; 57 struct qxl_bo *bo;
58 58
59 list_for_each_entry(bo, &qdev->gem.objects, list) { 59 list_for_each_entry(bo, &qdev->gem.objects, list) {
60 struct reservation_object_list *fobj; 60 struct dma_resv_list *fobj;
61 int rel; 61 int rel;
62 62
63 rcu_read_lock(); 63 rcu_read_lock();
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
index df55b83e0a55..312216caeea2 100644
--- a/drivers/gpu/drm/qxl/qxl_release.c
+++ b/drivers/gpu/drm/qxl/qxl_release.c
@@ -238,7 +238,7 @@ static int qxl_release_validate_bo(struct qxl_bo *bo)
238 return ret; 238 return ret;
239 } 239 }
240 240
241 ret = reservation_object_reserve_shared(bo->tbo.base.resv, 1); 241 ret = dma_resv_reserve_shared(bo->tbo.base.resv, 1);
242 if (ret) 242 if (ret)
243 return ret; 243 return ret;
244 244
@@ -458,9 +458,9 @@ void qxl_release_fence_buffer_objects(struct qxl_release *release)
458 list_for_each_entry(entry, &release->bos, head) { 458 list_for_each_entry(entry, &release->bos, head) {
459 bo = entry->bo; 459 bo = entry->bo;
460 460
461 reservation_object_add_shared_fence(bo->base.resv, &release->base); 461 dma_resv_add_shared_fence(bo->base.resv, &release->base);
462 ttm_bo_add_to_lru(bo); 462 ttm_bo_add_to_lru(bo);
463 reservation_object_unlock(bo->base.resv); 463 dma_resv_unlock(bo->base.resv);
464 } 464 }
465 spin_unlock(&glob->lru_lock); 465 spin_unlock(&glob->lru_lock);
466 ww_acquire_fini(&release->ticket); 466 ww_acquire_fini(&release->ticket);
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index 40f4d29edfe2..62eab82a64f9 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -3659,7 +3659,7 @@ bool cik_semaphore_ring_emit(struct radeon_device *rdev,
3659struct radeon_fence *cik_copy_cpdma(struct radeon_device *rdev, 3659struct radeon_fence *cik_copy_cpdma(struct radeon_device *rdev,
3660 uint64_t src_offset, uint64_t dst_offset, 3660 uint64_t src_offset, uint64_t dst_offset,
3661 unsigned num_gpu_pages, 3661 unsigned num_gpu_pages,
3662 struct reservation_object *resv) 3662 struct dma_resv *resv)
3663{ 3663{
3664 struct radeon_fence *fence; 3664 struct radeon_fence *fence;
3665 struct radeon_sync sync; 3665 struct radeon_sync sync;
diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c
index 589217a7e435..35b9dc6ce46a 100644
--- a/drivers/gpu/drm/radeon/cik_sdma.c
+++ b/drivers/gpu/drm/radeon/cik_sdma.c
@@ -579,7 +579,7 @@ void cik_sdma_fini(struct radeon_device *rdev)
579struct radeon_fence *cik_copy_dma(struct radeon_device *rdev, 579struct radeon_fence *cik_copy_dma(struct radeon_device *rdev,
580 uint64_t src_offset, uint64_t dst_offset, 580 uint64_t src_offset, uint64_t dst_offset,
581 unsigned num_gpu_pages, 581 unsigned num_gpu_pages,
582 struct reservation_object *resv) 582 struct dma_resv *resv)
583{ 583{
584 struct radeon_fence *fence; 584 struct radeon_fence *fence;
585 struct radeon_sync sync; 585 struct radeon_sync sync;
diff --git a/drivers/gpu/drm/radeon/evergreen_dma.c b/drivers/gpu/drm/radeon/evergreen_dma.c
index 5505a04ca402..a46ee6c2099d 100644
--- a/drivers/gpu/drm/radeon/evergreen_dma.c
+++ b/drivers/gpu/drm/radeon/evergreen_dma.c
@@ -108,7 +108,7 @@ struct radeon_fence *evergreen_copy_dma(struct radeon_device *rdev,
108 uint64_t src_offset, 108 uint64_t src_offset,
109 uint64_t dst_offset, 109 uint64_t dst_offset,
110 unsigned num_gpu_pages, 110 unsigned num_gpu_pages,
111 struct reservation_object *resv) 111 struct dma_resv *resv)
112{ 112{
113 struct radeon_fence *fence; 113 struct radeon_fence *fence;
114 struct radeon_sync sync; 114 struct radeon_sync sync;
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 5c05193da520..7089dfc8c2a9 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -891,7 +891,7 @@ struct radeon_fence *r100_copy_blit(struct radeon_device *rdev,
891 uint64_t src_offset, 891 uint64_t src_offset,
892 uint64_t dst_offset, 892 uint64_t dst_offset,
893 unsigned num_gpu_pages, 893 unsigned num_gpu_pages,
894 struct reservation_object *resv) 894 struct dma_resv *resv)
895{ 895{
896 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 896 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
897 struct radeon_fence *fence; 897 struct radeon_fence *fence;
diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c
index 9ce6dd83d284..840401413c58 100644
--- a/drivers/gpu/drm/radeon/r200.c
+++ b/drivers/gpu/drm/radeon/r200.c
@@ -84,7 +84,7 @@ struct radeon_fence *r200_copy_dma(struct radeon_device *rdev,
84 uint64_t src_offset, 84 uint64_t src_offset,
85 uint64_t dst_offset, 85 uint64_t dst_offset,
86 unsigned num_gpu_pages, 86 unsigned num_gpu_pages,
87 struct reservation_object *resv) 87 struct dma_resv *resv)
88{ 88{
89 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 89 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
90 struct radeon_fence *fence; 90 struct radeon_fence *fence;
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 7d175a9e8330..e937cc01910d 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -2963,7 +2963,7 @@ bool r600_semaphore_ring_emit(struct radeon_device *rdev,
2963struct radeon_fence *r600_copy_cpdma(struct radeon_device *rdev, 2963struct radeon_fence *r600_copy_cpdma(struct radeon_device *rdev,
2964 uint64_t src_offset, uint64_t dst_offset, 2964 uint64_t src_offset, uint64_t dst_offset,
2965 unsigned num_gpu_pages, 2965 unsigned num_gpu_pages,
2966 struct reservation_object *resv) 2966 struct dma_resv *resv)
2967{ 2967{
2968 struct radeon_fence *fence; 2968 struct radeon_fence *fence;
2969 struct radeon_sync sync; 2969 struct radeon_sync sync;
diff --git a/drivers/gpu/drm/radeon/r600_dma.c b/drivers/gpu/drm/radeon/r600_dma.c
index 35d92ef8a0d4..af6c0da45f28 100644
--- a/drivers/gpu/drm/radeon/r600_dma.c
+++ b/drivers/gpu/drm/radeon/r600_dma.c
@@ -444,7 +444,7 @@ void r600_dma_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
444struct radeon_fence *r600_copy_dma(struct radeon_device *rdev, 444struct radeon_fence *r600_copy_dma(struct radeon_device *rdev,
445 uint64_t src_offset, uint64_t dst_offset, 445 uint64_t src_offset, uint64_t dst_offset,
446 unsigned num_gpu_pages, 446 unsigned num_gpu_pages,
447 struct reservation_object *resv) 447 struct dma_resv *resv)
448{ 448{
449 struct radeon_fence *fence; 449 struct radeon_fence *fence;
450 struct radeon_sync sync; 450 struct radeon_sync sync;
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 3f7701321d21..de1d090df034 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -619,7 +619,7 @@ void radeon_sync_fence(struct radeon_sync *sync,
619 struct radeon_fence *fence); 619 struct radeon_fence *fence);
620int radeon_sync_resv(struct radeon_device *rdev, 620int radeon_sync_resv(struct radeon_device *rdev,
621 struct radeon_sync *sync, 621 struct radeon_sync *sync,
622 struct reservation_object *resv, 622 struct dma_resv *resv,
623 bool shared); 623 bool shared);
624int radeon_sync_rings(struct radeon_device *rdev, 624int radeon_sync_rings(struct radeon_device *rdev,
625 struct radeon_sync *sync, 625 struct radeon_sync *sync,
@@ -1912,20 +1912,20 @@ struct radeon_asic {
1912 uint64_t src_offset, 1912 uint64_t src_offset,
1913 uint64_t dst_offset, 1913 uint64_t dst_offset,
1914 unsigned num_gpu_pages, 1914 unsigned num_gpu_pages,
1915 struct reservation_object *resv); 1915 struct dma_resv *resv);
1916 u32 blit_ring_index; 1916 u32 blit_ring_index;
1917 struct radeon_fence *(*dma)(struct radeon_device *rdev, 1917 struct radeon_fence *(*dma)(struct radeon_device *rdev,
1918 uint64_t src_offset, 1918 uint64_t src_offset,
1919 uint64_t dst_offset, 1919 uint64_t dst_offset,
1920 unsigned num_gpu_pages, 1920 unsigned num_gpu_pages,
1921 struct reservation_object *resv); 1921 struct dma_resv *resv);
1922 u32 dma_ring_index; 1922 u32 dma_ring_index;
1923 /* method used for bo copy */ 1923 /* method used for bo copy */
1924 struct radeon_fence *(*copy)(struct radeon_device *rdev, 1924 struct radeon_fence *(*copy)(struct radeon_device *rdev,
1925 uint64_t src_offset, 1925 uint64_t src_offset,
1926 uint64_t dst_offset, 1926 uint64_t dst_offset,
1927 unsigned num_gpu_pages, 1927 unsigned num_gpu_pages,
1928 struct reservation_object *resv); 1928 struct dma_resv *resv);
1929 /* ring used for bo copies */ 1929 /* ring used for bo copies */
1930 u32 copy_ring_index; 1930 u32 copy_ring_index;
1931 } copy; 1931 } copy;
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index e3f036c20d64..a74fa18cd27b 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -86,7 +86,7 @@ struct radeon_fence *r100_copy_blit(struct radeon_device *rdev,
86 uint64_t src_offset, 86 uint64_t src_offset,
87 uint64_t dst_offset, 87 uint64_t dst_offset,
88 unsigned num_gpu_pages, 88 unsigned num_gpu_pages,
89 struct reservation_object *resv); 89 struct dma_resv *resv);
90int r100_set_surface_reg(struct radeon_device *rdev, int reg, 90int r100_set_surface_reg(struct radeon_device *rdev, int reg,
91 uint32_t tiling_flags, uint32_t pitch, 91 uint32_t tiling_flags, uint32_t pitch,
92 uint32_t offset, uint32_t obj_size); 92 uint32_t offset, uint32_t obj_size);
@@ -157,7 +157,7 @@ struct radeon_fence *r200_copy_dma(struct radeon_device *rdev,
157 uint64_t src_offset, 157 uint64_t src_offset,
158 uint64_t dst_offset, 158 uint64_t dst_offset,
159 unsigned num_gpu_pages, 159 unsigned num_gpu_pages,
160 struct reservation_object *resv); 160 struct dma_resv *resv);
161void r200_set_safe_registers(struct radeon_device *rdev); 161void r200_set_safe_registers(struct radeon_device *rdev);
162 162
163/* 163/*
@@ -347,11 +347,11 @@ int r600_dma_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
347struct radeon_fence *r600_copy_cpdma(struct radeon_device *rdev, 347struct radeon_fence *r600_copy_cpdma(struct radeon_device *rdev,
348 uint64_t src_offset, uint64_t dst_offset, 348 uint64_t src_offset, uint64_t dst_offset,
349 unsigned num_gpu_pages, 349 unsigned num_gpu_pages,
350 struct reservation_object *resv); 350 struct dma_resv *resv);
351struct radeon_fence *r600_copy_dma(struct radeon_device *rdev, 351struct radeon_fence *r600_copy_dma(struct radeon_device *rdev,
352 uint64_t src_offset, uint64_t dst_offset, 352 uint64_t src_offset, uint64_t dst_offset,
353 unsigned num_gpu_pages, 353 unsigned num_gpu_pages,
354 struct reservation_object *resv); 354 struct dma_resv *resv);
355void r600_hpd_init(struct radeon_device *rdev); 355void r600_hpd_init(struct radeon_device *rdev);
356void r600_hpd_fini(struct radeon_device *rdev); 356void r600_hpd_fini(struct radeon_device *rdev);
357bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); 357bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
@@ -473,7 +473,7 @@ void r700_cp_fini(struct radeon_device *rdev);
473struct radeon_fence *rv770_copy_dma(struct radeon_device *rdev, 473struct radeon_fence *rv770_copy_dma(struct radeon_device *rdev,
474 uint64_t src_offset, uint64_t dst_offset, 474 uint64_t src_offset, uint64_t dst_offset,
475 unsigned num_gpu_pages, 475 unsigned num_gpu_pages,
476 struct reservation_object *resv); 476 struct dma_resv *resv);
477u32 rv770_get_xclk(struct radeon_device *rdev); 477u32 rv770_get_xclk(struct radeon_device *rdev);
478int rv770_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk); 478int rv770_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
479int rv770_get_temp(struct radeon_device *rdev); 479int rv770_get_temp(struct radeon_device *rdev);
@@ -547,7 +547,7 @@ void evergreen_dma_ring_ib_execute(struct radeon_device *rdev,
547struct radeon_fence *evergreen_copy_dma(struct radeon_device *rdev, 547struct radeon_fence *evergreen_copy_dma(struct radeon_device *rdev,
548 uint64_t src_offset, uint64_t dst_offset, 548 uint64_t src_offset, uint64_t dst_offset,
549 unsigned num_gpu_pages, 549 unsigned num_gpu_pages,
550 struct reservation_object *resv); 550 struct dma_resv *resv);
551int evergreen_get_temp(struct radeon_device *rdev); 551int evergreen_get_temp(struct radeon_device *rdev);
552int evergreen_get_allowed_info_register(struct radeon_device *rdev, 552int evergreen_get_allowed_info_register(struct radeon_device *rdev,
553 u32 reg, u32 *val); 553 u32 reg, u32 *val);
@@ -725,7 +725,7 @@ int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
725struct radeon_fence *si_copy_dma(struct radeon_device *rdev, 725struct radeon_fence *si_copy_dma(struct radeon_device *rdev,
726 uint64_t src_offset, uint64_t dst_offset, 726 uint64_t src_offset, uint64_t dst_offset,
727 unsigned num_gpu_pages, 727 unsigned num_gpu_pages,
728 struct reservation_object *resv); 728 struct dma_resv *resv);
729 729
730void si_dma_vm_copy_pages(struct radeon_device *rdev, 730void si_dma_vm_copy_pages(struct radeon_device *rdev,
731 struct radeon_ib *ib, 731 struct radeon_ib *ib,
@@ -796,11 +796,11 @@ void cik_sdma_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
796struct radeon_fence *cik_copy_dma(struct radeon_device *rdev, 796struct radeon_fence *cik_copy_dma(struct radeon_device *rdev,
797 uint64_t src_offset, uint64_t dst_offset, 797 uint64_t src_offset, uint64_t dst_offset,
798 unsigned num_gpu_pages, 798 unsigned num_gpu_pages,
799 struct reservation_object *resv); 799 struct dma_resv *resv);
800struct radeon_fence *cik_copy_cpdma(struct radeon_device *rdev, 800struct radeon_fence *cik_copy_cpdma(struct radeon_device *rdev,
801 uint64_t src_offset, uint64_t dst_offset, 801 uint64_t src_offset, uint64_t dst_offset,
802 unsigned num_gpu_pages, 802 unsigned num_gpu_pages,
803 struct reservation_object *resv); 803 struct dma_resv *resv);
804int cik_sdma_ring_test(struct radeon_device *rdev, struct radeon_ring *ring); 804int cik_sdma_ring_test(struct radeon_device *rdev, struct radeon_ring *ring);
805int cik_sdma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring); 805int cik_sdma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
806bool cik_sdma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring); 806bool cik_sdma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
diff --git a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c
index 1ea50ce16312..ac9a5ec481c3 100644
--- a/drivers/gpu/drm/radeon/radeon_benchmark.c
+++ b/drivers/gpu/drm/radeon/radeon_benchmark.c
@@ -35,7 +35,7 @@
35static int radeon_benchmark_do_move(struct radeon_device *rdev, unsigned size, 35static int radeon_benchmark_do_move(struct radeon_device *rdev, unsigned size,
36 uint64_t saddr, uint64_t daddr, 36 uint64_t saddr, uint64_t daddr,
37 int flag, int n, 37 int flag, int n,
38 struct reservation_object *resv) 38 struct dma_resv *resv)
39{ 39{
40 unsigned long start_jiffies; 40 unsigned long start_jiffies;
41 unsigned long end_jiffies; 41 unsigned long end_jiffies;
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 7e5254a34e84..7b5460678382 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -255,7 +255,7 @@ static int radeon_cs_sync_rings(struct radeon_cs_parser *p)
255 int r; 255 int r;
256 256
257 list_for_each_entry(reloc, &p->validated, tv.head) { 257 list_for_each_entry(reloc, &p->validated, tv.head) {
258 struct reservation_object *resv; 258 struct dma_resv *resv;
259 259
260 resv = reloc->robj->tbo.base.resv; 260 resv = reloc->robj->tbo.base.resv;
261 r = radeon_sync_resv(p->rdev, &p->ib.sync, resv, 261 r = radeon_sync_resv(p->rdev, &p->ib.sync, resv,
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 7bf73230ac0b..e81b01f8db90 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -533,7 +533,7 @@ static int radeon_crtc_page_flip_target(struct drm_crtc *crtc,
533 DRM_ERROR("failed to pin new rbo buffer before flip\n"); 533 DRM_ERROR("failed to pin new rbo buffer before flip\n");
534 goto cleanup; 534 goto cleanup;
535 } 535 }
536 work->fence = dma_fence_get(reservation_object_get_excl(new_rbo->tbo.base.resv)); 536 work->fence = dma_fence_get(dma_resv_get_excl(new_rbo->tbo.base.resv));
537 radeon_bo_get_tiling_flags(new_rbo, &tiling_flags, NULL); 537 radeon_bo_get_tiling_flags(new_rbo, &tiling_flags, NULL);
538 radeon_bo_unreserve(new_rbo); 538 radeon_bo_unreserve(new_rbo);
539 539
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index 03873f21a734..4cf58dbbe439 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -114,7 +114,7 @@ static int radeon_gem_set_domain(struct drm_gem_object *gobj,
114 } 114 }
115 if (domain == RADEON_GEM_DOMAIN_CPU) { 115 if (domain == RADEON_GEM_DOMAIN_CPU) {
116 /* Asking for cpu access wait for object idle */ 116 /* Asking for cpu access wait for object idle */
117 r = reservation_object_wait_timeout_rcu(robj->tbo.base.resv, true, true, 30 * HZ); 117 r = dma_resv_wait_timeout_rcu(robj->tbo.base.resv, true, true, 30 * HZ);
118 if (!r) 118 if (!r)
119 r = -EBUSY; 119 r = -EBUSY;
120 120
@@ -449,7 +449,7 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
449 } 449 }
450 robj = gem_to_radeon_bo(gobj); 450 robj = gem_to_radeon_bo(gobj);
451 451
452 r = reservation_object_test_signaled_rcu(robj->tbo.base.resv, true); 452 r = dma_resv_test_signaled_rcu(robj->tbo.base.resv, true);
453 if (r == 0) 453 if (r == 0)
454 r = -EBUSY; 454 r = -EBUSY;
455 else 455 else
@@ -478,7 +478,7 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
478 } 478 }
479 robj = gem_to_radeon_bo(gobj); 479 robj = gem_to_radeon_bo(gobj);
480 480
481 ret = reservation_object_wait_timeout_rcu(robj->tbo.base.resv, true, true, 30 * HZ); 481 ret = dma_resv_wait_timeout_rcu(robj->tbo.base.resv, true, true, 30 * HZ);
482 if (ret == 0) 482 if (ret == 0)
483 r = -EBUSY; 483 r = -EBUSY;
484 else if (ret < 0) 484 else if (ret < 0)
diff --git a/drivers/gpu/drm/radeon/radeon_mn.c b/drivers/gpu/drm/radeon/radeon_mn.c
index 0d64ace0e6c1..6902f998ede9 100644
--- a/drivers/gpu/drm/radeon/radeon_mn.c
+++ b/drivers/gpu/drm/radeon/radeon_mn.c
@@ -163,7 +163,7 @@ static int radeon_mn_invalidate_range_start(struct mmu_notifier *mn,
163 continue; 163 continue;
164 } 164 }
165 165
166 r = reservation_object_wait_timeout_rcu(bo->tbo.base.resv, 166 r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv,
167 true, false, MAX_SCHEDULE_TIMEOUT); 167 true, false, MAX_SCHEDULE_TIMEOUT);
168 if (r <= 0) 168 if (r <= 0)
169 DRM_ERROR("(%ld) failed to wait for user bo\n", r); 169 DRM_ERROR("(%ld) failed to wait for user bo\n", r);
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 9db8ba29ef68..2abe1eab471f 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -183,7 +183,7 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
183int radeon_bo_create(struct radeon_device *rdev, 183int radeon_bo_create(struct radeon_device *rdev,
184 unsigned long size, int byte_align, bool kernel, 184 unsigned long size, int byte_align, bool kernel,
185 u32 domain, u32 flags, struct sg_table *sg, 185 u32 domain, u32 flags, struct sg_table *sg,
186 struct reservation_object *resv, 186 struct dma_resv *resv,
187 struct radeon_bo **bo_ptr) 187 struct radeon_bo **bo_ptr)
188{ 188{
189 struct radeon_bo *bo; 189 struct radeon_bo *bo;
@@ -610,7 +610,7 @@ int radeon_bo_get_surface_reg(struct radeon_bo *bo)
610 int steal; 610 int steal;
611 int i; 611 int i;
612 612
613 reservation_object_assert_held(bo->tbo.base.resv); 613 dma_resv_assert_held(bo->tbo.base.resv);
614 614
615 if (!bo->tiling_flags) 615 if (!bo->tiling_flags)
616 return 0; 616 return 0;
@@ -736,7 +736,7 @@ void radeon_bo_get_tiling_flags(struct radeon_bo *bo,
736 uint32_t *tiling_flags, 736 uint32_t *tiling_flags,
737 uint32_t *pitch) 737 uint32_t *pitch)
738{ 738{
739 reservation_object_assert_held(bo->tbo.base.resv); 739 dma_resv_assert_held(bo->tbo.base.resv);
740 740
741 if (tiling_flags) 741 if (tiling_flags)
742 *tiling_flags = bo->tiling_flags; 742 *tiling_flags = bo->tiling_flags;
@@ -748,7 +748,7 @@ int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
748 bool force_drop) 748 bool force_drop)
749{ 749{
750 if (!force_drop) 750 if (!force_drop)
751 reservation_object_assert_held(bo->tbo.base.resv); 751 dma_resv_assert_held(bo->tbo.base.resv);
752 752
753 if (!(bo->tiling_flags & RADEON_TILING_SURFACE)) 753 if (!(bo->tiling_flags & RADEON_TILING_SURFACE))
754 return 0; 754 return 0;
@@ -870,10 +870,10 @@ int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, bool no_wait)
870void radeon_bo_fence(struct radeon_bo *bo, struct radeon_fence *fence, 870void radeon_bo_fence(struct radeon_bo *bo, struct radeon_fence *fence,
871 bool shared) 871 bool shared)
872{ 872{
873 struct reservation_object *resv = bo->tbo.base.resv; 873 struct dma_resv *resv = bo->tbo.base.resv;
874 874
875 if (shared) 875 if (shared)
876 reservation_object_add_shared_fence(resv, &fence->base); 876 dma_resv_add_shared_fence(resv, &fence->base);
877 else 877 else
878 reservation_object_add_excl_fence(resv, &fence->base); 878 dma_resv_add_excl_fence(resv, &fence->base);
879} 879}
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index e5554bf9140e..d23f2ed4126e 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -126,7 +126,7 @@ extern int radeon_bo_create(struct radeon_device *rdev,
126 unsigned long size, int byte_align, 126 unsigned long size, int byte_align,
127 bool kernel, u32 domain, u32 flags, 127 bool kernel, u32 domain, u32 flags,
128 struct sg_table *sg, 128 struct sg_table *sg,
129 struct reservation_object *resv, 129 struct dma_resv *resv,
130 struct radeon_bo **bo_ptr); 130 struct radeon_bo **bo_ptr);
131extern int radeon_bo_kmap(struct radeon_bo *bo, void **ptr); 131extern int radeon_bo_kmap(struct radeon_bo *bo, void **ptr);
132extern void radeon_bo_kunmap(struct radeon_bo *bo); 132extern void radeon_bo_kunmap(struct radeon_bo *bo);
diff --git a/drivers/gpu/drm/radeon/radeon_prime.c b/drivers/gpu/drm/radeon/radeon_prime.c
index 52b0d0cd8cbe..b906e8fbd5f3 100644
--- a/drivers/gpu/drm/radeon/radeon_prime.c
+++ b/drivers/gpu/drm/radeon/radeon_prime.c
@@ -63,15 +63,15 @@ struct drm_gem_object *radeon_gem_prime_import_sg_table(struct drm_device *dev,
63 struct dma_buf_attachment *attach, 63 struct dma_buf_attachment *attach,
64 struct sg_table *sg) 64 struct sg_table *sg)
65{ 65{
66 struct reservation_object *resv = attach->dmabuf->resv; 66 struct dma_resv *resv = attach->dmabuf->resv;
67 struct radeon_device *rdev = dev->dev_private; 67 struct radeon_device *rdev = dev->dev_private;
68 struct radeon_bo *bo; 68 struct radeon_bo *bo;
69 int ret; 69 int ret;
70 70
71 reservation_object_lock(resv, NULL); 71 dma_resv_lock(resv, NULL);
72 ret = radeon_bo_create(rdev, attach->dmabuf->size, PAGE_SIZE, false, 72 ret = radeon_bo_create(rdev, attach->dmabuf->size, PAGE_SIZE, false,
73 RADEON_GEM_DOMAIN_GTT, 0, sg, resv, &bo); 73 RADEON_GEM_DOMAIN_GTT, 0, sg, resv, &bo);
74 reservation_object_unlock(resv); 74 dma_resv_unlock(resv);
75 if (ret) 75 if (ret)
76 return ERR_PTR(ret); 76 return ERR_PTR(ret);
77 77
diff --git a/drivers/gpu/drm/radeon/radeon_sync.c b/drivers/gpu/drm/radeon/radeon_sync.c
index 8c9780b5a884..55cc77a73c7b 100644
--- a/drivers/gpu/drm/radeon/radeon_sync.c
+++ b/drivers/gpu/drm/radeon/radeon_sync.c
@@ -87,30 +87,30 @@ void radeon_sync_fence(struct radeon_sync *sync,
87 */ 87 */
88int radeon_sync_resv(struct radeon_device *rdev, 88int radeon_sync_resv(struct radeon_device *rdev,
89 struct radeon_sync *sync, 89 struct radeon_sync *sync,
90 struct reservation_object *resv, 90 struct dma_resv *resv,
91 bool shared) 91 bool shared)
92{ 92{
93 struct reservation_object_list *flist; 93 struct dma_resv_list *flist;
94 struct dma_fence *f; 94 struct dma_fence *f;
95 struct radeon_fence *fence; 95 struct radeon_fence *fence;
96 unsigned i; 96 unsigned i;
97 int r = 0; 97 int r = 0;
98 98
99 /* always sync to the exclusive fence */ 99 /* always sync to the exclusive fence */
100 f = reservation_object_get_excl(resv); 100 f = dma_resv_get_excl(resv);
101 fence = f ? to_radeon_fence(f) : NULL; 101 fence = f ? to_radeon_fence(f) : NULL;
102 if (fence && fence->rdev == rdev) 102 if (fence && fence->rdev == rdev)
103 radeon_sync_fence(sync, fence); 103 radeon_sync_fence(sync, fence);
104 else if (f) 104 else if (f)
105 r = dma_fence_wait(f, true); 105 r = dma_fence_wait(f, true);
106 106
107 flist = reservation_object_get_list(resv); 107 flist = dma_resv_get_list(resv);
108 if (shared || !flist || r) 108 if (shared || !flist || r)
109 return r; 109 return r;
110 110
111 for (i = 0; i < flist->shared_count; ++i) { 111 for (i = 0; i < flist->shared_count; ++i) {
112 f = rcu_dereference_protected(flist->shared[i], 112 f = rcu_dereference_protected(flist->shared[i],
113 reservation_object_held(resv)); 113 dma_resv_held(resv));
114 fence = to_radeon_fence(f); 114 fence = to_radeon_fence(f);
115 if (fence && fence->rdev == rdev) 115 if (fence && fence->rdev == rdev)
116 radeon_sync_fence(sync, fence); 116 radeon_sync_fence(sync, fence);
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index 311e69c2ed7f..1ad5c3b86b64 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -477,7 +477,7 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
477 return -EINVAL; 477 return -EINVAL;
478 } 478 }
479 479
480 f = reservation_object_get_excl(bo->tbo.base.resv); 480 f = dma_resv_get_excl(bo->tbo.base.resv);
481 if (f) { 481 if (f) {
482 r = radeon_fence_wait((struct radeon_fence *)f, false); 482 r = radeon_fence_wait((struct radeon_fence *)f, false);
483 if (r) { 483 if (r) {
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
index e48a05533126..e0ad547786e8 100644
--- a/drivers/gpu/drm/radeon/radeon_vm.c
+++ b/drivers/gpu/drm/radeon/radeon_vm.c
@@ -831,7 +831,7 @@ static int radeon_vm_update_ptes(struct radeon_device *rdev,
831 int r; 831 int r;
832 832
833 radeon_sync_resv(rdev, &ib->sync, pt->tbo.base.resv, true); 833 radeon_sync_resv(rdev, &ib->sync, pt->tbo.base.resv, true);
834 r = reservation_object_reserve_shared(pt->tbo.base.resv, 1); 834 r = dma_resv_reserve_shared(pt->tbo.base.resv, 1);
835 if (r) 835 if (r)
836 return r; 836 return r;
837 837
diff --git a/drivers/gpu/drm/radeon/rv770_dma.c b/drivers/gpu/drm/radeon/rv770_dma.c
index 0866b38ef264..4c91614b5e70 100644
--- a/drivers/gpu/drm/radeon/rv770_dma.c
+++ b/drivers/gpu/drm/radeon/rv770_dma.c
@@ -42,7 +42,7 @@
42struct radeon_fence *rv770_copy_dma(struct radeon_device *rdev, 42struct radeon_fence *rv770_copy_dma(struct radeon_device *rdev,
43 uint64_t src_offset, uint64_t dst_offset, 43 uint64_t src_offset, uint64_t dst_offset,
44 unsigned num_gpu_pages, 44 unsigned num_gpu_pages,
45 struct reservation_object *resv) 45 struct dma_resv *resv)
46{ 46{
47 struct radeon_fence *fence; 47 struct radeon_fence *fence;
48 struct radeon_sync sync; 48 struct radeon_sync sync;
diff --git a/drivers/gpu/drm/radeon/si_dma.c b/drivers/gpu/drm/radeon/si_dma.c
index 4773bb7d947e..d2fa302a5be9 100644
--- a/drivers/gpu/drm/radeon/si_dma.c
+++ b/drivers/gpu/drm/radeon/si_dma.c
@@ -231,7 +231,7 @@ void si_dma_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
231struct radeon_fence *si_copy_dma(struct radeon_device *rdev, 231struct radeon_fence *si_copy_dma(struct radeon_device *rdev,
232 uint64_t src_offset, uint64_t dst_offset, 232 uint64_t src_offset, uint64_t dst_offset,
233 unsigned num_gpu_pages, 233 unsigned num_gpu_pages,
234 struct reservation_object *resv) 234 struct dma_resv *resv)
235{ 235{
236 struct radeon_fence *fence; 236 struct radeon_fence *fence;
237 struct radeon_sync sync; 237 struct radeon_sync sync;
diff --git a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
index 8ca5af0c912f..a44dca4b0219 100644
--- a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
+++ b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
@@ -97,10 +97,34 @@ crtcs_exit:
97 return crtcs; 97 return crtcs;
98} 98}
99 99
100static int sun8i_dw_hdmi_find_connector_pdev(struct device *dev,
101 struct platform_device **pdev_out)
102{
103 struct platform_device *pdev;
104 struct device_node *remote;
105
106 remote = of_graph_get_remote_node(dev->of_node, 1, -1);
107 if (!remote)
108 return -ENODEV;
109
110 if (!of_device_is_compatible(remote, "hdmi-connector")) {
111 of_node_put(remote);
112 return -ENODEV;
113 }
114
115 pdev = of_find_device_by_node(remote);
116 of_node_put(remote);
117 if (!pdev)
118 return -ENODEV;
119
120 *pdev_out = pdev;
121 return 0;
122}
123
100static int sun8i_dw_hdmi_bind(struct device *dev, struct device *master, 124static int sun8i_dw_hdmi_bind(struct device *dev, struct device *master,
101 void *data) 125 void *data)
102{ 126{
103 struct platform_device *pdev = to_platform_device(dev); 127 struct platform_device *pdev = to_platform_device(dev), *connector_pdev;
104 struct dw_hdmi_plat_data *plat_data; 128 struct dw_hdmi_plat_data *plat_data;
105 struct drm_device *drm = data; 129 struct drm_device *drm = data;
106 struct device_node *phy_node; 130 struct device_node *phy_node;
@@ -150,16 +174,30 @@ static int sun8i_dw_hdmi_bind(struct device *dev, struct device *master,
150 return PTR_ERR(hdmi->regulator); 174 return PTR_ERR(hdmi->regulator);
151 } 175 }
152 176
177 ret = sun8i_dw_hdmi_find_connector_pdev(dev, &connector_pdev);
178 if (!ret) {
179 hdmi->ddc_en = gpiod_get_optional(&connector_pdev->dev,
180 "ddc-en", GPIOD_OUT_HIGH);
181 platform_device_put(connector_pdev);
182
183 if (IS_ERR(hdmi->ddc_en)) {
184 dev_err(dev, "Couldn't get ddc-en gpio\n");
185 return PTR_ERR(hdmi->ddc_en);
186 }
187 }
188
153 ret = regulator_enable(hdmi->regulator); 189 ret = regulator_enable(hdmi->regulator);
154 if (ret) { 190 if (ret) {
155 dev_err(dev, "Failed to enable regulator\n"); 191 dev_err(dev, "Failed to enable regulator\n");
156 return ret; 192 goto err_unref_ddc_en;
157 } 193 }
158 194
195 gpiod_set_value(hdmi->ddc_en, 1);
196
159 ret = reset_control_deassert(hdmi->rst_ctrl); 197 ret = reset_control_deassert(hdmi->rst_ctrl);
160 if (ret) { 198 if (ret) {
161 dev_err(dev, "Could not deassert ctrl reset control\n"); 199 dev_err(dev, "Could not deassert ctrl reset control\n");
162 goto err_disable_regulator; 200 goto err_disable_ddc_en;
163 } 201 }
164 202
165 ret = clk_prepare_enable(hdmi->clk_tmds); 203 ret = clk_prepare_enable(hdmi->clk_tmds);
@@ -212,8 +250,12 @@ err_disable_clk_tmds:
212 clk_disable_unprepare(hdmi->clk_tmds); 250 clk_disable_unprepare(hdmi->clk_tmds);
213err_assert_ctrl_reset: 251err_assert_ctrl_reset:
214 reset_control_assert(hdmi->rst_ctrl); 252 reset_control_assert(hdmi->rst_ctrl);
215err_disable_regulator: 253err_disable_ddc_en:
254 gpiod_set_value(hdmi->ddc_en, 0);
216 regulator_disable(hdmi->regulator); 255 regulator_disable(hdmi->regulator);
256err_unref_ddc_en:
257 if (hdmi->ddc_en)
258 gpiod_put(hdmi->ddc_en);
217 259
218 return ret; 260 return ret;
219} 261}
@@ -227,7 +269,11 @@ static void sun8i_dw_hdmi_unbind(struct device *dev, struct device *master,
227 sun8i_hdmi_phy_remove(hdmi); 269 sun8i_hdmi_phy_remove(hdmi);
228 clk_disable_unprepare(hdmi->clk_tmds); 270 clk_disable_unprepare(hdmi->clk_tmds);
229 reset_control_assert(hdmi->rst_ctrl); 271 reset_control_assert(hdmi->rst_ctrl);
272 gpiod_set_value(hdmi->ddc_en, 0);
230 regulator_disable(hdmi->regulator); 273 regulator_disable(hdmi->regulator);
274
275 if (hdmi->ddc_en)
276 gpiod_put(hdmi->ddc_en);
231} 277}
232 278
233static const struct component_ops sun8i_dw_hdmi_ops = { 279static const struct component_ops sun8i_dw_hdmi_ops = {
diff --git a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h
index 720c5aa8adc1..d707c9171824 100644
--- a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h
+++ b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h
@@ -9,6 +9,7 @@
9#include <drm/bridge/dw_hdmi.h> 9#include <drm/bridge/dw_hdmi.h>
10#include <drm/drm_encoder.h> 10#include <drm/drm_encoder.h>
11#include <linux/clk.h> 11#include <linux/clk.h>
12#include <linux/gpio/consumer.h>
12#include <linux/regmap.h> 13#include <linux/regmap.h>
13#include <linux/regulator/consumer.h> 14#include <linux/regulator/consumer.h>
14#include <linux/reset.h> 15#include <linux/reset.h>
@@ -190,6 +191,7 @@ struct sun8i_dw_hdmi {
190 struct regulator *regulator; 191 struct regulator *regulator;
191 const struct sun8i_dw_hdmi_quirks *quirks; 192 const struct sun8i_dw_hdmi_quirks *quirks;
192 struct reset_control *rst_ctrl; 193 struct reset_control *rst_ctrl;
194 struct gpio_desc *ddc_en;
193}; 195};
194 196
195static inline struct sun8i_dw_hdmi * 197static inline struct sun8i_dw_hdmi *
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index 4a75d149e368..fbf57bc3cdab 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -6,23 +6,28 @@
6 6
7#include <linux/clk.h> 7#include <linux/clk.h>
8#include <linux/debugfs.h> 8#include <linux/debugfs.h>
9#include <linux/delay.h>
9#include <linux/iommu.h> 10#include <linux/iommu.h>
11#include <linux/module.h>
10#include <linux/of_device.h> 12#include <linux/of_device.h>
11#include <linux/pm_runtime.h> 13#include <linux/pm_runtime.h>
12#include <linux/reset.h> 14#include <linux/reset.h>
13 15
14#include <soc/tegra/pmc.h> 16#include <soc/tegra/pmc.h>
15 17
18#include <drm/drm_atomic.h>
19#include <drm/drm_atomic_helper.h>
20#include <drm/drm_debugfs.h>
21#include <drm/drm_fourcc.h>
22#include <drm/drm_plane_helper.h>
23#include <drm/drm_vblank.h>
24
16#include "dc.h" 25#include "dc.h"
17#include "drm.h" 26#include "drm.h"
18#include "gem.h" 27#include "gem.h"
19#include "hub.h" 28#include "hub.h"
20#include "plane.h" 29#include "plane.h"
21 30
22#include <drm/drm_atomic.h>
23#include <drm/drm_atomic_helper.h>
24#include <drm/drm_plane_helper.h>
25
26static void tegra_crtc_atomic_destroy_state(struct drm_crtc *crtc, 31static void tegra_crtc_atomic_destroy_state(struct drm_crtc *crtc,
27 struct drm_crtc_state *state); 32 struct drm_crtc_state *state);
28 33
diff --git a/drivers/gpu/drm/tegra/dpaux.c b/drivers/gpu/drm/tegra/dpaux.c
index 2d94da225e51..a0f6f9b0d258 100644
--- a/drivers/gpu/drm/tegra/dpaux.c
+++ b/drivers/gpu/drm/tegra/dpaux.c
@@ -8,14 +8,15 @@
8#include <linux/gpio.h> 8#include <linux/gpio.h>
9#include <linux/interrupt.h> 9#include <linux/interrupt.h>
10#include <linux/io.h> 10#include <linux/io.h>
11#include <linux/module.h>
11#include <linux/of_gpio.h> 12#include <linux/of_gpio.h>
12#include <linux/pinctrl/pinconf-generic.h> 13#include <linux/pinctrl/pinconf-generic.h>
13#include <linux/pinctrl/pinctrl.h> 14#include <linux/pinctrl/pinctrl.h>
14#include <linux/pinctrl/pinmux.h> 15#include <linux/pinctrl/pinmux.h>
15#include <linux/pm_runtime.h>
16#include <linux/platform_device.h> 16#include <linux/platform_device.h>
17#include <linux/reset.h> 17#include <linux/pm_runtime.h>
18#include <linux/regulator/consumer.h> 18#include <linux/regulator/consumer.h>
19#include <linux/reset.h>
19#include <linux/workqueue.h> 20#include <linux/workqueue.h>
20 21
21#include <drm/drm_dp_helper.h> 22#include <drm/drm_dp_helper.h>
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index 870904bfad78..6fb7d74ff553 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -8,9 +8,17 @@
8#include <linux/host1x.h> 8#include <linux/host1x.h>
9#include <linux/idr.h> 9#include <linux/idr.h>
10#include <linux/iommu.h> 10#include <linux/iommu.h>
11#include <linux/module.h>
12#include <linux/platform_device.h>
11 13
12#include <drm/drm_atomic.h> 14#include <drm/drm_atomic.h>
13#include <drm/drm_atomic_helper.h> 15#include <drm/drm_atomic_helper.h>
16#include <drm/drm_debugfs.h>
17#include <drm/drm_drv.h>
18#include <drm/drm_fourcc.h>
19#include <drm/drm_ioctl.h>
20#include <drm/drm_prime.h>
21#include <drm/drm_vblank.h>
14 22
15#if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU) 23#if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)
16#include <asm/dma-iommu.h> 24#include <asm/dma-iommu.h>
diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h
index 86daa19fcf24..29911eff9ceb 100644
--- a/drivers/gpu/drm/tegra/drm.h
+++ b/drivers/gpu/drm/tegra/drm.h
@@ -7,18 +7,17 @@
7#ifndef HOST1X_DRM_H 7#ifndef HOST1X_DRM_H
8#define HOST1X_DRM_H 1 8#define HOST1X_DRM_H 1
9 9
10#include <uapi/drm/tegra_drm.h>
11#include <linux/host1x.h> 10#include <linux/host1x.h>
12#include <linux/iova.h> 11#include <linux/iova.h>
13#include <linux/of_gpio.h> 12#include <linux/of_gpio.h>
14 13
15#include <drm/drmP.h>
16#include <drm/drm_atomic.h> 14#include <drm/drm_atomic.h>
17#include <drm/drm_edid.h> 15#include <drm/drm_edid.h>
18#include <drm/drm_encoder.h> 16#include <drm/drm_encoder.h>
19#include <drm/drm_fb_helper.h> 17#include <drm/drm_fb_helper.h>
20#include <drm/drm_fixed.h> 18#include <drm/drm_fixed.h>
21#include <drm/drm_probe_helper.h> 19#include <drm/drm_probe_helper.h>
20#include <uapi/drm/tegra_drm.h>
22 21
23#include "gem.h" 22#include "gem.h"
24#include "hub.h" 23#include "hub.h"
diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c
index 2fbfefe9cb42..a5d47e301c5f 100644
--- a/drivers/gpu/drm/tegra/dsi.c
+++ b/drivers/gpu/drm/tegra/dsi.c
@@ -5,22 +5,24 @@
5 5
6#include <linux/clk.h> 6#include <linux/clk.h>
7#include <linux/debugfs.h> 7#include <linux/debugfs.h>
8#include <linux/delay.h>
8#include <linux/host1x.h> 9#include <linux/host1x.h>
9#include <linux/module.h> 10#include <linux/module.h>
10#include <linux/of.h> 11#include <linux/of.h>
11#include <linux/of_platform.h> 12#include <linux/of_platform.h>
12#include <linux/platform_device.h> 13#include <linux/platform_device.h>
13#include <linux/pm_runtime.h> 14#include <linux/pm_runtime.h>
15#include <linux/regulator/consumer.h>
14#include <linux/reset.h> 16#include <linux/reset.h>
15 17
16#include <linux/regulator/consumer.h> 18#include <video/mipi_display.h>
17 19
18#include <drm/drm_atomic_helper.h> 20#include <drm/drm_atomic_helper.h>
21#include <drm/drm_debugfs.h>
22#include <drm/drm_file.h>
19#include <drm/drm_mipi_dsi.h> 23#include <drm/drm_mipi_dsi.h>
20#include <drm/drm_panel.h> 24#include <drm/drm_panel.h>
21 25
22#include <video/mipi_display.h>
23
24#include "dc.h" 26#include "dc.h"
25#include "drm.h" 27#include "drm.h"
26#include "dsi.h" 28#include "dsi.h"
diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c
index 888ed0d74ccd..e34325c83d28 100644
--- a/drivers/gpu/drm/tegra/fb.c
+++ b/drivers/gpu/drm/tegra/fb.c
@@ -9,11 +9,13 @@
9 9
10#include <linux/console.h> 10#include <linux/console.h>
11 11
12#include "drm.h" 12#include <drm/drm_fourcc.h>
13#include "gem.h"
14#include <drm/drm_gem_framebuffer_helper.h> 13#include <drm/drm_gem_framebuffer_helper.h>
15#include <drm/drm_modeset_helper.h> 14#include <drm/drm_modeset_helper.h>
16 15
16#include "drm.h"
17#include "gem.h"
18
17#ifdef CONFIG_DRM_FBDEV_EMULATION 19#ifdef CONFIG_DRM_FBDEV_EMULATION
18static inline struct tegra_fbdev *to_tegra_fbdev(struct drm_fb_helper *helper) 20static inline struct tegra_fbdev *to_tegra_fbdev(struct drm_fb_helper *helper)
19{ 21{
diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c
index 0a3d925d5284..fb7667c8dd4c 100644
--- a/drivers/gpu/drm/tegra/gem.c
+++ b/drivers/gpu/drm/tegra/gem.c
@@ -12,6 +12,9 @@
12 12
13#include <linux/dma-buf.h> 13#include <linux/dma-buf.h>
14#include <linux/iommu.h> 14#include <linux/iommu.h>
15
16#include <drm/drm_drv.h>
17#include <drm/drm_prime.h>
15#include <drm/tegra_drm.h> 18#include <drm/tegra_drm.h>
16 19
17#include "drm.h" 20#include "drm.h"
diff --git a/drivers/gpu/drm/tegra/gem.h b/drivers/gpu/drm/tegra/gem.h
index f1f758b25886..83ffb1e14ca3 100644
--- a/drivers/gpu/drm/tegra/gem.h
+++ b/drivers/gpu/drm/tegra/gem.h
@@ -11,7 +11,6 @@
11#include <linux/host1x.h> 11#include <linux/host1x.h>
12 12
13#include <drm/drm.h> 13#include <drm/drm.h>
14#include <drm/drmP.h>
15#include <drm/drm_gem.h> 14#include <drm/drm_gem.h>
16 15
17#define TEGRA_BO_BOTTOM_UP (1 << 0) 16#define TEGRA_BO_BOTTOM_UP (1 << 0)
diff --git a/drivers/gpu/drm/tegra/gr2d.c b/drivers/gpu/drm/tegra/gr2d.c
index 8dbfb30344e7..641299cc85b8 100644
--- a/drivers/gpu/drm/tegra/gr2d.c
+++ b/drivers/gpu/drm/tegra/gr2d.c
@@ -5,6 +5,7 @@
5 5
6#include <linux/clk.h> 6#include <linux/clk.h>
7#include <linux/iommu.h> 7#include <linux/iommu.h>
8#include <linux/module.h>
8#include <linux/of_device.h> 9#include <linux/of_device.h>
9 10
10#include "drm.h" 11#include "drm.h"
diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
index 334c4d7d238b..50269ffbcb6b 100644
--- a/drivers/gpu/drm/tegra/hdmi.c
+++ b/drivers/gpu/drm/tegra/hdmi.c
@@ -6,9 +6,11 @@
6 6
7#include <linux/clk.h> 7#include <linux/clk.h>
8#include <linux/debugfs.h> 8#include <linux/debugfs.h>
9#include <linux/delay.h>
9#include <linux/gpio.h> 10#include <linux/gpio.h>
10#include <linux/hdmi.h> 11#include <linux/hdmi.h>
11#include <linux/math64.h> 12#include <linux/math64.h>
13#include <linux/module.h>
12#include <linux/of_device.h> 14#include <linux/of_device.h>
13#include <linux/pm_runtime.h> 15#include <linux/pm_runtime.h>
14#include <linux/regulator/consumer.h> 16#include <linux/regulator/consumer.h>
@@ -16,6 +18,9 @@
16 18
17#include <drm/drm_atomic_helper.h> 19#include <drm/drm_atomic_helper.h>
18#include <drm/drm_crtc.h> 20#include <drm/drm_crtc.h>
21#include <drm/drm_debugfs.h>
22#include <drm/drm_file.h>
23#include <drm/drm_fourcc.h>
19#include <drm/drm_probe_helper.h> 24#include <drm/drm_probe_helper.h>
20 25
21#include "hda.h" 26#include "hda.h"
diff --git a/drivers/gpu/drm/tegra/hub.c b/drivers/gpu/drm/tegra/hub.c
index 92f202ec0577..839b49c40e51 100644
--- a/drivers/gpu/drm/tegra/hub.c
+++ b/drivers/gpu/drm/tegra/hub.c
@@ -4,6 +4,7 @@
4 */ 4 */
5 5
6#include <linux/clk.h> 6#include <linux/clk.h>
7#include <linux/delay.h>
7#include <linux/host1x.h> 8#include <linux/host1x.h>
8#include <linux/module.h> 9#include <linux/module.h>
9#include <linux/of.h> 10#include <linux/of.h>
@@ -13,9 +14,9 @@
13#include <linux/pm_runtime.h> 14#include <linux/pm_runtime.h>
14#include <linux/reset.h> 15#include <linux/reset.h>
15 16
16#include <drm/drmP.h>
17#include <drm/drm_atomic.h> 17#include <drm/drm_atomic.h>
18#include <drm/drm_atomic_helper.h> 18#include <drm/drm_atomic_helper.h>
19#include <drm/drm_fourcc.h>
19#include <drm/drm_probe_helper.h> 20#include <drm/drm_probe_helper.h>
20 21
21#include "drm.h" 22#include "drm.h"
diff --git a/drivers/gpu/drm/tegra/hub.h b/drivers/gpu/drm/tegra/hub.h
index 41541e261c91..767a60d9313c 100644
--- a/drivers/gpu/drm/tegra/hub.h
+++ b/drivers/gpu/drm/tegra/hub.h
@@ -6,7 +6,6 @@
6#ifndef TEGRA_HUB_H 6#ifndef TEGRA_HUB_H
7#define TEGRA_HUB_H 1 7#define TEGRA_HUB_H 1
8 8
9#include <drm/drmP.h>
10#include <drm/drm_plane.h> 9#include <drm/drm_plane.h>
11 10
12#include "plane.h" 11#include "plane.h"
diff --git a/drivers/gpu/drm/tegra/plane.c b/drivers/gpu/drm/tegra/plane.c
index df80ca07e46e..6bab71d6e81d 100644
--- a/drivers/gpu/drm/tegra/plane.c
+++ b/drivers/gpu/drm/tegra/plane.c
@@ -5,6 +5,7 @@
5 5
6#include <drm/drm_atomic.h> 6#include <drm/drm_atomic.h>
7#include <drm/drm_atomic_helper.h> 7#include <drm/drm_atomic_helper.h>
8#include <drm/drm_fourcc.h>
8#include <drm/drm_plane_helper.h> 9#include <drm/drm_plane_helper.h>
9 10
10#include "dc.h" 11#include "dc.h"
diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c
index 4ffe3794e6d3..e1669ada0a40 100644
--- a/drivers/gpu/drm/tegra/sor.c
+++ b/drivers/gpu/drm/tegra/sor.c
@@ -8,6 +8,7 @@
8#include <linux/debugfs.h> 8#include <linux/debugfs.h>
9#include <linux/gpio.h> 9#include <linux/gpio.h>
10#include <linux/io.h> 10#include <linux/io.h>
11#include <linux/module.h>
11#include <linux/of_device.h> 12#include <linux/of_device.h>
12#include <linux/platform_device.h> 13#include <linux/platform_device.h>
13#include <linux/pm_runtime.h> 14#include <linux/pm_runtime.h>
@@ -17,7 +18,9 @@
17#include <soc/tegra/pmc.h> 18#include <soc/tegra/pmc.h>
18 19
19#include <drm/drm_atomic_helper.h> 20#include <drm/drm_atomic_helper.h>
21#include <drm/drm_debugfs.h>
20#include <drm/drm_dp_helper.h> 22#include <drm/drm_dp_helper.h>
23#include <drm/drm_file.h>
21#include <drm/drm_panel.h> 24#include <drm/drm_panel.h>
22#include <drm/drm_scdc_helper.h> 25#include <drm/drm_scdc_helper.h>
23 26
diff --git a/drivers/gpu/drm/tegra/vic.c b/drivers/gpu/drm/tegra/vic.c
index 958548ef69e7..cd0399fd8c63 100644
--- a/drivers/gpu/drm/tegra/vic.c
+++ b/drivers/gpu/drm/tegra/vic.c
@@ -4,6 +4,7 @@
4 */ 4 */
5 5
6#include <linux/clk.h> 6#include <linux/clk.h>
7#include <linux/delay.h>
7#include <linux/host1x.h> 8#include <linux/host1x.h>
8#include <linux/iommu.h> 9#include <linux/iommu.h>
9#include <linux/module.h> 10#include <linux/module.h>
diff --git a/drivers/gpu/drm/tiny/gm12u320.c b/drivers/gpu/drm/tiny/gm12u320.c
index b6f47b8cf240..03d0e2df6774 100644
--- a/drivers/gpu/drm/tiny/gm12u320.c
+++ b/drivers/gpu/drm/tiny/gm12u320.c
@@ -33,7 +33,6 @@ MODULE_PARM_DESC(eco_mode, "Turn on Eco mode (less bright, more silent)");
33#define DRIVER_DATE "2019" 33#define DRIVER_DATE "2019"
34#define DRIVER_MAJOR 1 34#define DRIVER_MAJOR 1
35#define DRIVER_MINOR 0 35#define DRIVER_MINOR 0
36#define DRIVER_PATCHLEVEL 1
37 36
38/* 37/*
39 * The DLP has an actual width of 854 pixels, but that is not a multiple 38 * The DLP has an actual width of 854 pixels, but that is not a multiple
@@ -45,6 +44,9 @@ MODULE_PARM_DESC(eco_mode, "Turn on Eco mode (less bright, more silent)");
45 44
46#define GM12U320_BLOCK_COUNT 20 45#define GM12U320_BLOCK_COUNT 20
47 46
47#define GM12U320_ERR(fmt, ...) \
48 DRM_DEV_ERROR(&gm12u320->udev->dev, fmt, ##__VA_ARGS__)
49
48#define MISC_RCV_EPT 1 50#define MISC_RCV_EPT 1
49#define DATA_RCV_EPT 2 51#define DATA_RCV_EPT 2
50#define DATA_SND_EPT 3 52#define DATA_SND_EPT 3
@@ -220,7 +222,7 @@ static int gm12u320_misc_request(struct gm12u320_device *gm12u320,
220 usb_sndbulkpipe(gm12u320->udev, MISC_SND_EPT), 222 usb_sndbulkpipe(gm12u320->udev, MISC_SND_EPT),
221 gm12u320->cmd_buf, CMD_SIZE, &len, CMD_TIMEOUT); 223 gm12u320->cmd_buf, CMD_SIZE, &len, CMD_TIMEOUT);
222 if (ret || len != CMD_SIZE) { 224 if (ret || len != CMD_SIZE) {
223 dev_err(&gm12u320->udev->dev, "Misc. req. error %d\n", ret); 225 GM12U320_ERR("Misc. req. error %d\n", ret);
224 return -EIO; 226 return -EIO;
225 } 227 }
226 228
@@ -230,7 +232,7 @@ static int gm12u320_misc_request(struct gm12u320_device *gm12u320,
230 gm12u320->cmd_buf, MISC_VALUE_SIZE, &len, 232 gm12u320->cmd_buf, MISC_VALUE_SIZE, &len,
231 DATA_TIMEOUT); 233 DATA_TIMEOUT);
232 if (ret || len != MISC_VALUE_SIZE) { 234 if (ret || len != MISC_VALUE_SIZE) {
233 dev_err(&gm12u320->udev->dev, "Misc. value error %d\n", ret); 235 GM12U320_ERR("Misc. value error %d\n", ret);
234 return -EIO; 236 return -EIO;
235 } 237 }
236 /* cmd_buf[0] now contains the read value, which we don't use */ 238 /* cmd_buf[0] now contains the read value, which we don't use */
@@ -241,7 +243,7 @@ static int gm12u320_misc_request(struct gm12u320_device *gm12u320,
241 gm12u320->cmd_buf, READ_STATUS_SIZE, &len, 243 gm12u320->cmd_buf, READ_STATUS_SIZE, &len,
242 CMD_TIMEOUT); 244 CMD_TIMEOUT);
243 if (ret || len != READ_STATUS_SIZE) { 245 if (ret || len != READ_STATUS_SIZE) {
244 dev_err(&gm12u320->udev->dev, "Misc. status error %d\n", ret); 246 GM12U320_ERR("Misc. status error %d\n", ret);
245 return -EIO; 247 return -EIO;
246 } 248 }
247 249
@@ -278,7 +280,7 @@ static void gm12u320_copy_fb_to_blocks(struct gm12u320_device *gm12u320)
278 280
279 vaddr = drm_gem_shmem_vmap(fb->obj[0]); 281 vaddr = drm_gem_shmem_vmap(fb->obj[0]);
280 if (IS_ERR(vaddr)) { 282 if (IS_ERR(vaddr)) {
281 DRM_ERROR("failed to vmap fb: %ld\n", PTR_ERR(vaddr)); 283 GM12U320_ERR("failed to vmap fb: %ld\n", PTR_ERR(vaddr));
282 goto put_fb; 284 goto put_fb;
283 } 285 }
284 286
@@ -286,7 +288,7 @@ static void gm12u320_copy_fb_to_blocks(struct gm12u320_device *gm12u320)
286 ret = dma_buf_begin_cpu_access( 288 ret = dma_buf_begin_cpu_access(
287 fb->obj[0]->import_attach->dmabuf, DMA_FROM_DEVICE); 289 fb->obj[0]->import_attach->dmabuf, DMA_FROM_DEVICE);
288 if (ret) { 290 if (ret) {
289 DRM_ERROR("dma_buf_begin_cpu_access err: %d\n", ret); 291 GM12U320_ERR("dma_buf_begin_cpu_access err: %d\n", ret);
290 goto vunmap; 292 goto vunmap;
291 } 293 }
292 } 294 }
@@ -329,7 +331,7 @@ static void gm12u320_copy_fb_to_blocks(struct gm12u320_device *gm12u320)
329 ret = dma_buf_end_cpu_access(fb->obj[0]->import_attach->dmabuf, 331 ret = dma_buf_end_cpu_access(fb->obj[0]->import_attach->dmabuf,
330 DMA_FROM_DEVICE); 332 DMA_FROM_DEVICE);
331 if (ret) 333 if (ret)
332 DRM_ERROR("dma_buf_end_cpu_access err: %d\n", ret); 334 GM12U320_ERR("dma_buf_end_cpu_access err: %d\n", ret);
333 } 335 }
334vunmap: 336vunmap:
335 drm_gem_shmem_vunmap(fb->obj[0], vaddr); 337 drm_gem_shmem_vunmap(fb->obj[0], vaddr);
@@ -340,17 +342,6 @@ unlock:
340 mutex_unlock(&gm12u320->fb_update.lock); 342 mutex_unlock(&gm12u320->fb_update.lock);
341} 343}
342 344
343static int gm12u320_fb_update_ready(struct gm12u320_device *gm12u320)
344{
345 int ret;
346
347 mutex_lock(&gm12u320->fb_update.lock);
348 ret = !gm12u320->fb_update.run || gm12u320->fb_update.fb != NULL;
349 mutex_unlock(&gm12u320->fb_update.lock);
350
351 return ret;
352}
353
354static void gm12u320_fb_update_work(struct work_struct *work) 345static void gm12u320_fb_update_work(struct work_struct *work)
355{ 346{
356 struct gm12u320_device *gm12u320 = 347 struct gm12u320_device *gm12u320 =
@@ -424,14 +415,15 @@ static void gm12u320_fb_update_work(struct work_struct *work)
424 * switches back to showing its logo. 415 * switches back to showing its logo.
425 */ 416 */
426 wait_event_timeout(gm12u320->fb_update.waitq, 417 wait_event_timeout(gm12u320->fb_update.waitq,
427 gm12u320_fb_update_ready(gm12u320), 418 !gm12u320->fb_update.run ||
419 gm12u320->fb_update.fb != NULL,
428 IDLE_TIMEOUT); 420 IDLE_TIMEOUT);
429 } 421 }
430 return; 422 return;
431err: 423err:
432 /* Do not log errors caused by module unload or device unplug */ 424 /* Do not log errors caused by module unload or device unplug */
433 if (ret != -ECONNRESET && ret != -ESHUTDOWN) 425 if (ret != -ENODEV && ret != -ECONNRESET && ret != -ESHUTDOWN)
434 dev_err(&gm12u320->udev->dev, "Frame update error: %d\n", ret); 426 GM12U320_ERR("Frame update error: %d\n", ret);
435} 427}
436 428
437static void gm12u320_fb_mark_dirty(struct drm_framebuffer *fb, 429static void gm12u320_fb_mark_dirty(struct drm_framebuffer *fb,
@@ -746,7 +738,7 @@ static int gm12u320_usb_probe(struct usb_interface *interface,
746 if (ret) 738 if (ret)
747 goto err_put; 739 goto err_put;
748 740
749 drm_fbdev_generic_setup(dev, dev->mode_config.preferred_depth); 741 drm_fbdev_generic_setup(dev, 0);
750 742
751 return 0; 743 return 0;
752 744
@@ -765,9 +757,8 @@ static void gm12u320_usb_disconnect(struct usb_interface *interface)
765 drm_dev_put(dev); 757 drm_dev_put(dev);
766} 758}
767 759
768#ifdef CONFIG_PM 760static __maybe_unused int gm12u320_suspend(struct usb_interface *interface,
769static int gm12u320_suspend(struct usb_interface *interface, 761 pm_message_t message)
770 pm_message_t message)
771{ 762{
772 struct drm_device *dev = usb_get_intfdata(interface); 763 struct drm_device *dev = usb_get_intfdata(interface);
773 struct gm12u320_device *gm12u320 = dev->dev_private; 764 struct gm12u320_device *gm12u320 = dev->dev_private;
@@ -778,7 +769,7 @@ static int gm12u320_suspend(struct usb_interface *interface,
778 return 0; 769 return 0;
779} 770}
780 771
781static int gm12u320_resume(struct usb_interface *interface) 772static __maybe_unused int gm12u320_resume(struct usb_interface *interface)
782{ 773{
783 struct drm_device *dev = usb_get_intfdata(interface); 774 struct drm_device *dev = usb_get_intfdata(interface);
784 struct gm12u320_device *gm12u320 = dev->dev_private; 775 struct gm12u320_device *gm12u320 = dev->dev_private;
@@ -789,7 +780,6 @@ static int gm12u320_resume(struct usb_interface *interface)
789 780
790 return 0; 781 return 0;
791} 782}
792#endif
793 783
794static const struct usb_device_id id_table[] = { 784static const struct usb_device_id id_table[] = {
795 { USB_DEVICE(0x1de1, 0xc102) }, 785 { USB_DEVICE(0x1de1, 0xc102) },
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 9157dcc897a2..20ff56f27aa4 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -41,7 +41,7 @@
41#include <linux/file.h> 41#include <linux/file.h>
42#include <linux/module.h> 42#include <linux/module.h>
43#include <linux/atomic.h> 43#include <linux/atomic.h>
44#include <linux/reservation.h> 44#include <linux/dma-resv.h>
45 45
46static void ttm_bo_global_kobj_release(struct kobject *kobj); 46static void ttm_bo_global_kobj_release(struct kobject *kobj);
47 47
@@ -161,7 +161,7 @@ static void ttm_bo_release_list(struct kref *list_kref)
161 atomic_dec(&bo->bdev->glob->bo_count); 161 atomic_dec(&bo->bdev->glob->bo_count);
162 dma_fence_put(bo->moving); 162 dma_fence_put(bo->moving);
163 if (!ttm_bo_uses_embedded_gem_object(bo)) 163 if (!ttm_bo_uses_embedded_gem_object(bo))
164 reservation_object_fini(&bo->base._resv); 164 dma_resv_fini(&bo->base._resv);
165 mutex_destroy(&bo->wu_mutex); 165 mutex_destroy(&bo->wu_mutex);
166 bo->destroy(bo); 166 bo->destroy(bo);
167 ttm_mem_global_free(bdev->glob->mem_glob, acc_size); 167 ttm_mem_global_free(bdev->glob->mem_glob, acc_size);
@@ -173,7 +173,7 @@ static void ttm_bo_add_mem_to_lru(struct ttm_buffer_object *bo,
173 struct ttm_bo_device *bdev = bo->bdev; 173 struct ttm_bo_device *bdev = bo->bdev;
174 struct ttm_mem_type_manager *man; 174 struct ttm_mem_type_manager *man;
175 175
176 reservation_object_assert_held(bo->base.resv); 176 dma_resv_assert_held(bo->base.resv);
177 177
178 if (!list_empty(&bo->lru)) 178 if (!list_empty(&bo->lru))
179 return; 179 return;
@@ -244,7 +244,7 @@ static void ttm_bo_bulk_move_set_pos(struct ttm_lru_bulk_move_pos *pos,
244void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo, 244void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo,
245 struct ttm_lru_bulk_move *bulk) 245 struct ttm_lru_bulk_move *bulk)
246{ 246{
247 reservation_object_assert_held(bo->base.resv); 247 dma_resv_assert_held(bo->base.resv);
248 248
249 ttm_bo_del_from_lru(bo); 249 ttm_bo_del_from_lru(bo);
250 ttm_bo_add_to_lru(bo); 250 ttm_bo_add_to_lru(bo);
@@ -277,8 +277,8 @@ void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk)
277 if (!pos->first) 277 if (!pos->first)
278 continue; 278 continue;
279 279
280 reservation_object_assert_held(pos->first->base.resv); 280 dma_resv_assert_held(pos->first->base.resv);
281 reservation_object_assert_held(pos->last->base.resv); 281 dma_resv_assert_held(pos->last->base.resv);
282 282
283 man = &pos->first->bdev->man[TTM_PL_TT]; 283 man = &pos->first->bdev->man[TTM_PL_TT];
284 list_bulk_move_tail(&man->lru[i], &pos->first->lru, 284 list_bulk_move_tail(&man->lru[i], &pos->first->lru,
@@ -292,8 +292,8 @@ void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk)
292 if (!pos->first) 292 if (!pos->first)
293 continue; 293 continue;
294 294
295 reservation_object_assert_held(pos->first->base.resv); 295 dma_resv_assert_held(pos->first->base.resv);
296 reservation_object_assert_held(pos->last->base.resv); 296 dma_resv_assert_held(pos->last->base.resv);
297 297
298 man = &pos->first->bdev->man[TTM_PL_VRAM]; 298 man = &pos->first->bdev->man[TTM_PL_VRAM];
299 list_bulk_move_tail(&man->lru[i], &pos->first->lru, 299 list_bulk_move_tail(&man->lru[i], &pos->first->lru,
@@ -307,8 +307,8 @@ void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk)
307 if (!pos->first) 307 if (!pos->first)
308 continue; 308 continue;
309 309
310 reservation_object_assert_held(pos->first->base.resv); 310 dma_resv_assert_held(pos->first->base.resv);
311 reservation_object_assert_held(pos->last->base.resv); 311 dma_resv_assert_held(pos->last->base.resv);
312 312
313 lru = &pos->first->bdev->glob->swap_lru[i]; 313 lru = &pos->first->bdev->glob->swap_lru[i];
314 list_bulk_move_tail(lru, &pos->first->swap, &pos->last->swap); 314 list_bulk_move_tail(lru, &pos->first->swap, &pos->last->swap);
@@ -442,29 +442,29 @@ static int ttm_bo_individualize_resv(struct ttm_buffer_object *bo)
442 if (bo->base.resv == &bo->base._resv) 442 if (bo->base.resv == &bo->base._resv)
443 return 0; 443 return 0;
444 444
445 BUG_ON(!reservation_object_trylock(&bo->base._resv)); 445 BUG_ON(!dma_resv_trylock(&bo->base._resv));
446 446
447 r = reservation_object_copy_fences(&bo->base._resv, bo->base.resv); 447 r = dma_resv_copy_fences(&bo->base._resv, bo->base.resv);
448 if (r) 448 if (r)
449 reservation_object_unlock(&bo->base._resv); 449 dma_resv_unlock(&bo->base._resv);
450 450
451 return r; 451 return r;
452} 452}
453 453
454static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo) 454static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
455{ 455{
456 struct reservation_object_list *fobj; 456 struct dma_resv_list *fobj;
457 struct dma_fence *fence; 457 struct dma_fence *fence;
458 int i; 458 int i;
459 459
460 fobj = reservation_object_get_list(&bo->base._resv); 460 fobj = dma_resv_get_list(&bo->base._resv);
461 fence = reservation_object_get_excl(&bo->base._resv); 461 fence = dma_resv_get_excl(&bo->base._resv);
462 if (fence && !fence->ops->signaled) 462 if (fence && !fence->ops->signaled)
463 dma_fence_enable_sw_signaling(fence); 463 dma_fence_enable_sw_signaling(fence);
464 464
465 for (i = 0; fobj && i < fobj->shared_count; ++i) { 465 for (i = 0; fobj && i < fobj->shared_count; ++i) {
466 fence = rcu_dereference_protected(fobj->shared[i], 466 fence = rcu_dereference_protected(fobj->shared[i],
467 reservation_object_held(bo->base.resv)); 467 dma_resv_held(bo->base.resv));
468 468
469 if (!fence->ops->signaled) 469 if (!fence->ops->signaled)
470 dma_fence_enable_sw_signaling(fence); 470 dma_fence_enable_sw_signaling(fence);
@@ -482,23 +482,23 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
482 /* Last resort, if we fail to allocate memory for the 482 /* Last resort, if we fail to allocate memory for the
483 * fences block for the BO to become idle 483 * fences block for the BO to become idle
484 */ 484 */
485 reservation_object_wait_timeout_rcu(bo->base.resv, true, false, 485 dma_resv_wait_timeout_rcu(bo->base.resv, true, false,
486 30 * HZ); 486 30 * HZ);
487 spin_lock(&glob->lru_lock); 487 spin_lock(&glob->lru_lock);
488 goto error; 488 goto error;
489 } 489 }
490 490
491 spin_lock(&glob->lru_lock); 491 spin_lock(&glob->lru_lock);
492 ret = reservation_object_trylock(bo->base.resv) ? 0 : -EBUSY; 492 ret = dma_resv_trylock(bo->base.resv) ? 0 : -EBUSY;
493 if (!ret) { 493 if (!ret) {
494 if (reservation_object_test_signaled_rcu(&bo->base._resv, true)) { 494 if (dma_resv_test_signaled_rcu(&bo->base._resv, true)) {
495 ttm_bo_del_from_lru(bo); 495 ttm_bo_del_from_lru(bo);
496 spin_unlock(&glob->lru_lock); 496 spin_unlock(&glob->lru_lock);
497 if (bo->base.resv != &bo->base._resv) 497 if (bo->base.resv != &bo->base._resv)
498 reservation_object_unlock(&bo->base._resv); 498 dma_resv_unlock(&bo->base._resv);
499 499
500 ttm_bo_cleanup_memtype_use(bo); 500 ttm_bo_cleanup_memtype_use(bo);
501 reservation_object_unlock(bo->base.resv); 501 dma_resv_unlock(bo->base.resv);
502 return; 502 return;
503 } 503 }
504 504
@@ -514,10 +514,10 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
514 ttm_bo_add_to_lru(bo); 514 ttm_bo_add_to_lru(bo);
515 } 515 }
516 516
517 reservation_object_unlock(bo->base.resv); 517 dma_resv_unlock(bo->base.resv);
518 } 518 }
519 if (bo->base.resv != &bo->base._resv) 519 if (bo->base.resv != &bo->base._resv)
520 reservation_object_unlock(&bo->base._resv); 520 dma_resv_unlock(&bo->base._resv);
521 521
522error: 522error:
523 kref_get(&bo->list_kref); 523 kref_get(&bo->list_kref);
@@ -546,7 +546,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
546 bool unlock_resv) 546 bool unlock_resv)
547{ 547{
548 struct ttm_bo_global *glob = bo->bdev->glob; 548 struct ttm_bo_global *glob = bo->bdev->glob;
549 struct reservation_object *resv; 549 struct dma_resv *resv;
550 int ret; 550 int ret;
551 551
552 if (unlikely(list_empty(&bo->ddestroy))) 552 if (unlikely(list_empty(&bo->ddestroy)))
@@ -554,7 +554,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
554 else 554 else
555 resv = &bo->base._resv; 555 resv = &bo->base._resv;
556 556
557 if (reservation_object_test_signaled_rcu(resv, true)) 557 if (dma_resv_test_signaled_rcu(resv, true))
558 ret = 0; 558 ret = 0;
559 else 559 else
560 ret = -EBUSY; 560 ret = -EBUSY;
@@ -563,10 +563,10 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
563 long lret; 563 long lret;
564 564
565 if (unlock_resv) 565 if (unlock_resv)
566 reservation_object_unlock(bo->base.resv); 566 dma_resv_unlock(bo->base.resv);
567 spin_unlock(&glob->lru_lock); 567 spin_unlock(&glob->lru_lock);
568 568
569 lret = reservation_object_wait_timeout_rcu(resv, true, 569 lret = dma_resv_wait_timeout_rcu(resv, true,
570 interruptible, 570 interruptible,
571 30 * HZ); 571 30 * HZ);
572 572
@@ -576,7 +576,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
576 return -EBUSY; 576 return -EBUSY;
577 577
578 spin_lock(&glob->lru_lock); 578 spin_lock(&glob->lru_lock);
579 if (unlock_resv && !reservation_object_trylock(bo->base.resv)) { 579 if (unlock_resv && !dma_resv_trylock(bo->base.resv)) {
580 /* 580 /*
581 * We raced, and lost, someone else holds the reservation now, 581 * We raced, and lost, someone else holds the reservation now,
582 * and is probably busy in ttm_bo_cleanup_memtype_use. 582 * and is probably busy in ttm_bo_cleanup_memtype_use.
@@ -593,7 +593,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
593 593
594 if (ret || unlikely(list_empty(&bo->ddestroy))) { 594 if (ret || unlikely(list_empty(&bo->ddestroy))) {
595 if (unlock_resv) 595 if (unlock_resv)
596 reservation_object_unlock(bo->base.resv); 596 dma_resv_unlock(bo->base.resv);
597 spin_unlock(&glob->lru_lock); 597 spin_unlock(&glob->lru_lock);
598 return ret; 598 return ret;
599 } 599 }
@@ -606,7 +606,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
606 ttm_bo_cleanup_memtype_use(bo); 606 ttm_bo_cleanup_memtype_use(bo);
607 607
608 if (unlock_resv) 608 if (unlock_resv)
609 reservation_object_unlock(bo->base.resv); 609 dma_resv_unlock(bo->base.resv);
610 610
611 return 0; 611 return 0;
612} 612}
@@ -634,12 +634,12 @@ static bool ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
634 634
635 if (remove_all || bo->base.resv != &bo->base._resv) { 635 if (remove_all || bo->base.resv != &bo->base._resv) {
636 spin_unlock(&glob->lru_lock); 636 spin_unlock(&glob->lru_lock);
637 reservation_object_lock(bo->base.resv, NULL); 637 dma_resv_lock(bo->base.resv, NULL);
638 638
639 spin_lock(&glob->lru_lock); 639 spin_lock(&glob->lru_lock);
640 ttm_bo_cleanup_refs(bo, false, !remove_all, true); 640 ttm_bo_cleanup_refs(bo, false, !remove_all, true);
641 641
642 } else if (reservation_object_trylock(bo->base.resv)) { 642 } else if (dma_resv_trylock(bo->base.resv)) {
643 ttm_bo_cleanup_refs(bo, false, !remove_all, true); 643 ttm_bo_cleanup_refs(bo, false, !remove_all, true);
644 } else { 644 } else {
645 spin_unlock(&glob->lru_lock); 645 spin_unlock(&glob->lru_lock);
@@ -711,7 +711,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo,
711 struct ttm_placement placement; 711 struct ttm_placement placement;
712 int ret = 0; 712 int ret = 0;
713 713
714 reservation_object_assert_held(bo->base.resv); 714 dma_resv_assert_held(bo->base.resv);
715 715
716 placement.num_placement = 0; 716 placement.num_placement = 0;
717 placement.num_busy_placement = 0; 717 placement.num_busy_placement = 0;
@@ -782,7 +782,7 @@ static bool ttm_bo_evict_swapout_allowable(struct ttm_buffer_object *bo,
782 bool ret = false; 782 bool ret = false;
783 783
784 if (bo->base.resv == ctx->resv) { 784 if (bo->base.resv == ctx->resv) {
785 reservation_object_assert_held(bo->base.resv); 785 dma_resv_assert_held(bo->base.resv);
786 if (ctx->flags & TTM_OPT_FLAG_ALLOW_RES_EVICT 786 if (ctx->flags & TTM_OPT_FLAG_ALLOW_RES_EVICT
787 || !list_empty(&bo->ddestroy)) 787 || !list_empty(&bo->ddestroy))
788 ret = true; 788 ret = true;
@@ -790,7 +790,7 @@ static bool ttm_bo_evict_swapout_allowable(struct ttm_buffer_object *bo,
790 if (busy) 790 if (busy)
791 *busy = false; 791 *busy = false;
792 } else { 792 } else {
793 ret = reservation_object_trylock(bo->base.resv); 793 ret = dma_resv_trylock(bo->base.resv);
794 *locked = ret; 794 *locked = ret;
795 if (busy) 795 if (busy)
796 *busy = !ret; 796 *busy = !ret;
@@ -818,10 +818,10 @@ static int ttm_mem_evict_wait_busy(struct ttm_buffer_object *busy_bo,
818 return -EBUSY; 818 return -EBUSY;
819 819
820 if (ctx->interruptible) 820 if (ctx->interruptible)
821 r = reservation_object_lock_interruptible(busy_bo->base.resv, 821 r = dma_resv_lock_interruptible(busy_bo->base.resv,
822 ticket); 822 ticket);
823 else 823 else
824 r = reservation_object_lock(busy_bo->base.resv, ticket); 824 r = dma_resv_lock(busy_bo->base.resv, ticket);
825 825
826 /* 826 /*
827 * TODO: It would be better to keep the BO locked until allocation is at 827 * TODO: It would be better to keep the BO locked until allocation is at
@@ -829,7 +829,7 @@ static int ttm_mem_evict_wait_busy(struct ttm_buffer_object *busy_bo,
829 * of TTM. 829 * of TTM.
830 */ 830 */
831 if (!r) 831 if (!r)
832 reservation_object_unlock(busy_bo->base.resv); 832 dma_resv_unlock(busy_bo->base.resv);
833 833
834 return r == -EDEADLK ? -EBUSY : r; 834 return r == -EDEADLK ? -EBUSY : r;
835} 835}
@@ -855,7 +855,7 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
855 if (!ttm_bo_evict_swapout_allowable(bo, ctx, &locked, 855 if (!ttm_bo_evict_swapout_allowable(bo, ctx, &locked,
856 &busy)) { 856 &busy)) {
857 if (busy && !busy_bo && ticket != 857 if (busy && !busy_bo && ticket !=
858 reservation_object_locking_ctx(bo->base.resv)) 858 dma_resv_locking_ctx(bo->base.resv))
859 busy_bo = bo; 859 busy_bo = bo;
860 continue; 860 continue;
861 } 861 }
@@ -863,7 +863,7 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
863 if (place && !bdev->driver->eviction_valuable(bo, 863 if (place && !bdev->driver->eviction_valuable(bo,
864 place)) { 864 place)) {
865 if (locked) 865 if (locked)
866 reservation_object_unlock(bo->base.resv); 866 dma_resv_unlock(bo->base.resv);
867 continue; 867 continue;
868 } 868 }
869 break; 869 break;
@@ -935,9 +935,9 @@ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
935 spin_unlock(&man->move_lock); 935 spin_unlock(&man->move_lock);
936 936
937 if (fence) { 937 if (fence) {
938 reservation_object_add_shared_fence(bo->base.resv, fence); 938 dma_resv_add_shared_fence(bo->base.resv, fence);
939 939
940 ret = reservation_object_reserve_shared(bo->base.resv, 1); 940 ret = dma_resv_reserve_shared(bo->base.resv, 1);
941 if (unlikely(ret)) { 941 if (unlikely(ret)) {
942 dma_fence_put(fence); 942 dma_fence_put(fence);
943 return ret; 943 return ret;
@@ -964,7 +964,7 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
964 struct ww_acquire_ctx *ticket; 964 struct ww_acquire_ctx *ticket;
965 int ret; 965 int ret;
966 966
967 ticket = reservation_object_locking_ctx(bo->base.resv); 967 ticket = dma_resv_locking_ctx(bo->base.resv);
968 do { 968 do {
969 ret = (*man->func->get_node)(man, bo, place, mem); 969 ret = (*man->func->get_node)(man, bo, place, mem);
970 if (unlikely(ret != 0)) 970 if (unlikely(ret != 0))
@@ -1094,7 +1094,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
1094 bool type_found = false; 1094 bool type_found = false;
1095 int i, ret; 1095 int i, ret;
1096 1096
1097 ret = reservation_object_reserve_shared(bo->base.resv, 1); 1097 ret = dma_resv_reserve_shared(bo->base.resv, 1);
1098 if (unlikely(ret)) 1098 if (unlikely(ret))
1099 return ret; 1099 return ret;
1100 1100
@@ -1175,7 +1175,7 @@ static int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
1175 int ret = 0; 1175 int ret = 0;
1176 struct ttm_mem_reg mem; 1176 struct ttm_mem_reg mem;
1177 1177
1178 reservation_object_assert_held(bo->base.resv); 1178 dma_resv_assert_held(bo->base.resv);
1179 1179
1180 mem.num_pages = bo->num_pages; 1180 mem.num_pages = bo->num_pages;
1181 mem.size = mem.num_pages << PAGE_SHIFT; 1181 mem.size = mem.num_pages << PAGE_SHIFT;
@@ -1245,7 +1245,7 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
1245 int ret; 1245 int ret;
1246 uint32_t new_flags; 1246 uint32_t new_flags;
1247 1247
1248 reservation_object_assert_held(bo->base.resv); 1248 dma_resv_assert_held(bo->base.resv);
1249 /* 1249 /*
1250 * Check whether we need to move buffer. 1250 * Check whether we need to move buffer.
1251 */ 1251 */
@@ -1282,7 +1282,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
1282 struct ttm_operation_ctx *ctx, 1282 struct ttm_operation_ctx *ctx,
1283 size_t acc_size, 1283 size_t acc_size,
1284 struct sg_table *sg, 1284 struct sg_table *sg,
1285 struct reservation_object *resv, 1285 struct dma_resv *resv,
1286 void (*destroy) (struct ttm_buffer_object *)) 1286 void (*destroy) (struct ttm_buffer_object *))
1287{ 1287{
1288 int ret = 0; 1288 int ret = 0;
@@ -1336,7 +1336,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
1336 bo->sg = sg; 1336 bo->sg = sg;
1337 if (resv) { 1337 if (resv) {
1338 bo->base.resv = resv; 1338 bo->base.resv = resv;
1339 reservation_object_assert_held(bo->base.resv); 1339 dma_resv_assert_held(bo->base.resv);
1340 } else { 1340 } else {
1341 bo->base.resv = &bo->base._resv; 1341 bo->base.resv = &bo->base._resv;
1342 } 1342 }
@@ -1345,7 +1345,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
1345 * bo.gem is not initialized, so we have to setup the 1345 * bo.gem is not initialized, so we have to setup the
1346 * struct elements we want use regardless. 1346 * struct elements we want use regardless.
1347 */ 1347 */
1348 reservation_object_init(&bo->base._resv); 1348 dma_resv_init(&bo->base._resv);
1349 drm_vma_node_reset(&bo->base.vma_node); 1349 drm_vma_node_reset(&bo->base.vma_node);
1350 } 1350 }
1351 atomic_inc(&bo->bdev->glob->bo_count); 1351 atomic_inc(&bo->bdev->glob->bo_count);
@@ -1363,7 +1363,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
1363 * since otherwise lockdep will be angered in radeon. 1363 * since otherwise lockdep will be angered in radeon.
1364 */ 1364 */
1365 if (!resv) { 1365 if (!resv) {
1366 locked = reservation_object_trylock(bo->base.resv); 1366 locked = dma_resv_trylock(bo->base.resv);
1367 WARN_ON(!locked); 1367 WARN_ON(!locked);
1368 } 1368 }
1369 1369
@@ -1397,7 +1397,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
1397 bool interruptible, 1397 bool interruptible,
1398 size_t acc_size, 1398 size_t acc_size,
1399 struct sg_table *sg, 1399 struct sg_table *sg,
1400 struct reservation_object *resv, 1400 struct dma_resv *resv,
1401 void (*destroy) (struct ttm_buffer_object *)) 1401 void (*destroy) (struct ttm_buffer_object *))
1402{ 1402{
1403 struct ttm_operation_ctx ctx = { interruptible, false }; 1403 struct ttm_operation_ctx ctx = { interruptible, false };
@@ -1807,13 +1807,13 @@ int ttm_bo_wait(struct ttm_buffer_object *bo,
1807 long timeout = 15 * HZ; 1807 long timeout = 15 * HZ;
1808 1808
1809 if (no_wait) { 1809 if (no_wait) {
1810 if (reservation_object_test_signaled_rcu(bo->base.resv, true)) 1810 if (dma_resv_test_signaled_rcu(bo->base.resv, true))
1811 return 0; 1811 return 0;
1812 else 1812 else
1813 return -EBUSY; 1813 return -EBUSY;
1814 } 1814 }
1815 1815
1816 timeout = reservation_object_wait_timeout_rcu(bo->base.resv, true, 1816 timeout = dma_resv_wait_timeout_rcu(bo->base.resv, true,
1817 interruptible, timeout); 1817 interruptible, timeout);
1818 if (timeout < 0) 1818 if (timeout < 0)
1819 return timeout; 1819 return timeout;
@@ -1821,7 +1821,7 @@ int ttm_bo_wait(struct ttm_buffer_object *bo,
1821 if (timeout == 0) 1821 if (timeout == 0)
1822 return -EBUSY; 1822 return -EBUSY;
1823 1823
1824 reservation_object_add_excl_fence(bo->base.resv, NULL); 1824 dma_resv_add_excl_fence(bo->base.resv, NULL);
1825 return 0; 1825 return 0;
1826} 1826}
1827EXPORT_SYMBOL(ttm_bo_wait); 1827EXPORT_SYMBOL(ttm_bo_wait);
@@ -1937,7 +1937,7 @@ out:
1937 * already swapped buffer. 1937 * already swapped buffer.
1938 */ 1938 */
1939 if (locked) 1939 if (locked)
1940 reservation_object_unlock(bo->base.resv); 1940 dma_resv_unlock(bo->base.resv);
1941 kref_put(&bo->list_kref, ttm_bo_release_list); 1941 kref_put(&bo->list_kref, ttm_bo_release_list);
1942 return ret; 1942 return ret;
1943} 1943}
@@ -1975,14 +1975,14 @@ int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo)
1975 ret = mutex_lock_interruptible(&bo->wu_mutex); 1975 ret = mutex_lock_interruptible(&bo->wu_mutex);
1976 if (unlikely(ret != 0)) 1976 if (unlikely(ret != 0))
1977 return -ERESTARTSYS; 1977 return -ERESTARTSYS;
1978 if (!reservation_object_is_locked(bo->base.resv)) 1978 if (!dma_resv_is_locked(bo->base.resv))
1979 goto out_unlock; 1979 goto out_unlock;
1980 ret = reservation_object_lock_interruptible(bo->base.resv, NULL); 1980 ret = dma_resv_lock_interruptible(bo->base.resv, NULL);
1981 if (ret == -EINTR) 1981 if (ret == -EINTR)
1982 ret = -ERESTARTSYS; 1982 ret = -ERESTARTSYS;
1983 if (unlikely(ret != 0)) 1983 if (unlikely(ret != 0))
1984 goto out_unlock; 1984 goto out_unlock;
1985 reservation_object_unlock(bo->base.resv); 1985 dma_resv_unlock(bo->base.resv);
1986 1986
1987out_unlock: 1987out_unlock:
1988 mutex_unlock(&bo->wu_mutex); 1988 mutex_unlock(&bo->wu_mutex);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 425a6d627b30..fe81c565e7ef 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -38,7 +38,7 @@
38#include <linux/slab.h> 38#include <linux/slab.h>
39#include <linux/vmalloc.h> 39#include <linux/vmalloc.h>
40#include <linux/module.h> 40#include <linux/module.h>
41#include <linux/reservation.h> 41#include <linux/dma-resv.h>
42 42
43struct ttm_transfer_obj { 43struct ttm_transfer_obj {
44 struct ttm_buffer_object base; 44 struct ttm_buffer_object base;
@@ -518,8 +518,8 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
518 fbo->base.destroy = &ttm_transfered_destroy; 518 fbo->base.destroy = &ttm_transfered_destroy;
519 fbo->base.acc_size = 0; 519 fbo->base.acc_size = 0;
520 fbo->base.base.resv = &fbo->base.base._resv; 520 fbo->base.base.resv = &fbo->base.base._resv;
521 reservation_object_init(fbo->base.base.resv); 521 dma_resv_init(fbo->base.base.resv);
522 ret = reservation_object_trylock(fbo->base.base.resv); 522 ret = dma_resv_trylock(fbo->base.base.resv);
523 WARN_ON(!ret); 523 WARN_ON(!ret);
524 524
525 *new_obj = &fbo->base; 525 *new_obj = &fbo->base;
@@ -689,7 +689,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
689 int ret; 689 int ret;
690 struct ttm_buffer_object *ghost_obj; 690 struct ttm_buffer_object *ghost_obj;
691 691
692 reservation_object_add_excl_fence(bo->base.resv, fence); 692 dma_resv_add_excl_fence(bo->base.resv, fence);
693 if (evict) { 693 if (evict) {
694 ret = ttm_bo_wait(bo, false, false); 694 ret = ttm_bo_wait(bo, false, false);
695 if (ret) 695 if (ret)
@@ -716,7 +716,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
716 if (ret) 716 if (ret)
717 return ret; 717 return ret;
718 718
719 reservation_object_add_excl_fence(ghost_obj->base.resv, fence); 719 dma_resv_add_excl_fence(ghost_obj->base.resv, fence);
720 720
721 /** 721 /**
722 * If we're not moving to fixed memory, the TTM object 722 * If we're not moving to fixed memory, the TTM object
@@ -752,7 +752,7 @@ int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
752 752
753 int ret; 753 int ret;
754 754
755 reservation_object_add_excl_fence(bo->base.resv, fence); 755 dma_resv_add_excl_fence(bo->base.resv, fence);
756 756
757 if (!evict) { 757 if (!evict) {
758 struct ttm_buffer_object *ghost_obj; 758 struct ttm_buffer_object *ghost_obj;
@@ -772,7 +772,7 @@ int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
772 if (ret) 772 if (ret)
773 return ret; 773 return ret;
774 774
775 reservation_object_add_excl_fence(ghost_obj->base.resv, fence); 775 dma_resv_add_excl_fence(ghost_obj->base.resv, fence);
776 776
777 /** 777 /**
778 * If we're not moving to fixed memory, the TTM object 778 * If we're not moving to fixed memory, the TTM object
@@ -841,7 +841,7 @@ int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo)
841 if (ret) 841 if (ret)
842 return ret; 842 return ret;
843 843
844 ret = reservation_object_copy_fences(ghost->base.resv, bo->base.resv); 844 ret = dma_resv_copy_fences(ghost->base.resv, bo->base.resv);
845 /* Last resort, wait for the BO to be idle when we are OOM */ 845 /* Last resort, wait for the BO to be idle when we are OOM */
846 if (ret) 846 if (ret)
847 ttm_bo_wait(bo, false, false); 847 ttm_bo_wait(bo, false, false);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 85f5bcbe0c76..76eedb963693 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -71,7 +71,7 @@ static vm_fault_t ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
71 ttm_bo_get(bo); 71 ttm_bo_get(bo);
72 up_read(&vmf->vma->vm_mm->mmap_sem); 72 up_read(&vmf->vma->vm_mm->mmap_sem);
73 (void) dma_fence_wait(bo->moving, true); 73 (void) dma_fence_wait(bo->moving, true);
74 reservation_object_unlock(bo->base.resv); 74 dma_resv_unlock(bo->base.resv);
75 ttm_bo_put(bo); 75 ttm_bo_put(bo);
76 goto out_unlock; 76 goto out_unlock;
77 } 77 }
@@ -131,7 +131,7 @@ static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
131 * for reserve, and if it fails, retry the fault after waiting 131 * for reserve, and if it fails, retry the fault after waiting
132 * for the buffer to become unreserved. 132 * for the buffer to become unreserved.
133 */ 133 */
134 if (unlikely(!reservation_object_trylock(bo->base.resv))) { 134 if (unlikely(!dma_resv_trylock(bo->base.resv))) {
135 if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) { 135 if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) {
136 if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) { 136 if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
137 ttm_bo_get(bo); 137 ttm_bo_get(bo);
@@ -296,7 +296,7 @@ static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
296out_io_unlock: 296out_io_unlock:
297 ttm_mem_io_unlock(man); 297 ttm_mem_io_unlock(man);
298out_unlock: 298out_unlock:
299 reservation_object_unlock(bo->base.resv); 299 dma_resv_unlock(bo->base.resv);
300 return ret; 300 return ret;
301} 301}
302 302
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
index 3aefe72fb5cb..131dae8f4170 100644
--- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -39,7 +39,7 @@ static void ttm_eu_backoff_reservation_reverse(struct list_head *list,
39 list_for_each_entry_continue_reverse(entry, list, head) { 39 list_for_each_entry_continue_reverse(entry, list, head) {
40 struct ttm_buffer_object *bo = entry->bo; 40 struct ttm_buffer_object *bo = entry->bo;
41 41
42 reservation_object_unlock(bo->base.resv); 42 dma_resv_unlock(bo->base.resv);
43 } 43 }
44} 44}
45 45
@@ -71,7 +71,7 @@ void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
71 71
72 if (list_empty(&bo->lru)) 72 if (list_empty(&bo->lru))
73 ttm_bo_add_to_lru(bo); 73 ttm_bo_add_to_lru(bo);
74 reservation_object_unlock(bo->base.resv); 74 dma_resv_unlock(bo->base.resv);
75 } 75 }
76 spin_unlock(&glob->lru_lock); 76 spin_unlock(&glob->lru_lock);
77 77
@@ -114,7 +114,7 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
114 114
115 ret = __ttm_bo_reserve(bo, intr, (ticket == NULL), ticket); 115 ret = __ttm_bo_reserve(bo, intr, (ticket == NULL), ticket);
116 if (!ret && unlikely(atomic_read(&bo->cpu_writers) > 0)) { 116 if (!ret && unlikely(atomic_read(&bo->cpu_writers) > 0)) {
117 reservation_object_unlock(bo->base.resv); 117 dma_resv_unlock(bo->base.resv);
118 118
119 ret = -EBUSY; 119 ret = -EBUSY;
120 120
@@ -130,7 +130,7 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
130 if (!entry->num_shared) 130 if (!entry->num_shared)
131 continue; 131 continue;
132 132
133 ret = reservation_object_reserve_shared(bo->base.resv, 133 ret = dma_resv_reserve_shared(bo->base.resv,
134 entry->num_shared); 134 entry->num_shared);
135 if (!ret) 135 if (!ret)
136 continue; 136 continue;
@@ -144,16 +144,16 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
144 144
145 if (ret == -EDEADLK) { 145 if (ret == -EDEADLK) {
146 if (intr) { 146 if (intr) {
147 ret = reservation_object_lock_slow_interruptible(bo->base.resv, 147 ret = dma_resv_lock_slow_interruptible(bo->base.resv,
148 ticket); 148 ticket);
149 } else { 149 } else {
150 reservation_object_lock_slow(bo->base.resv, ticket); 150 dma_resv_lock_slow(bo->base.resv, ticket);
151 ret = 0; 151 ret = 0;
152 } 152 }
153 } 153 }
154 154
155 if (!ret && entry->num_shared) 155 if (!ret && entry->num_shared)
156 ret = reservation_object_reserve_shared(bo->base.resv, 156 ret = dma_resv_reserve_shared(bo->base.resv,
157 entry->num_shared); 157 entry->num_shared);
158 158
159 if (unlikely(ret != 0)) { 159 if (unlikely(ret != 0)) {
@@ -201,14 +201,14 @@ void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
201 list_for_each_entry(entry, list, head) { 201 list_for_each_entry(entry, list, head) {
202 bo = entry->bo; 202 bo = entry->bo;
203 if (entry->num_shared) 203 if (entry->num_shared)
204 reservation_object_add_shared_fence(bo->base.resv, fence); 204 dma_resv_add_shared_fence(bo->base.resv, fence);
205 else 205 else
206 reservation_object_add_excl_fence(bo->base.resv, fence); 206 dma_resv_add_excl_fence(bo->base.resv, fence);
207 if (list_empty(&bo->lru)) 207 if (list_empty(&bo->lru))
208 ttm_bo_add_to_lru(bo); 208 ttm_bo_add_to_lru(bo);
209 else 209 else
210 ttm_bo_move_to_lru_tail(bo, NULL); 210 ttm_bo_move_to_lru_tail(bo, NULL);
211 reservation_object_unlock(bo->base.resv); 211 dma_resv_unlock(bo->base.resv);
212 } 212 }
213 spin_unlock(&glob->lru_lock); 213 spin_unlock(&glob->lru_lock);
214 if (ticket) 214 if (ticket)
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index 00b4a3337840..e0e9b4f69db6 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -48,7 +48,7 @@ int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc)
48 struct ttm_bo_device *bdev = bo->bdev; 48 struct ttm_bo_device *bdev = bo->bdev;
49 uint32_t page_flags = 0; 49 uint32_t page_flags = 0;
50 50
51 reservation_object_assert_held(bo->base.resv); 51 dma_resv_assert_held(bo->base.resv);
52 52
53 if (bdev->need_dma32) 53 if (bdev->need_dma32)
54 page_flags |= TTM_PAGE_FLAG_DMA32; 54 page_flags |= TTM_PAGE_FLAG_DMA32;
diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c
index 79744137d89f..5d80507b539b 100644
--- a/drivers/gpu/drm/v3d/v3d_gem.c
+++ b/drivers/gpu/drm/v3d/v3d_gem.c
@@ -409,7 +409,7 @@ v3d_wait_bo_ioctl(struct drm_device *dev, void *data,
409 if (args->pad != 0) 409 if (args->pad != 0)
410 return -EINVAL; 410 return -EINVAL;
411 411
412 ret = drm_gem_reservation_object_wait(file_priv, args->handle, 412 ret = drm_gem_dma_resv_wait(file_priv, args->handle,
413 true, timeout_jiffies); 413 true, timeout_jiffies);
414 414
415 /* Decrement the user's timeout, in case we got interrupted 415 /* Decrement the user's timeout, in case we got interrupted
@@ -495,7 +495,7 @@ v3d_attach_fences_and_unlock_reservation(struct drm_file *file_priv,
495 495
496 for (i = 0; i < job->bo_count; i++) { 496 for (i = 0; i < job->bo_count; i++) {
497 /* XXX: Use shared fences for read-only objects. */ 497 /* XXX: Use shared fences for read-only objects. */
498 reservation_object_add_excl_fence(job->bo[i]->resv, 498 dma_resv_add_excl_fence(job->bo[i]->resv,
499 job->done_fence); 499 job->done_fence);
500 } 500 }
501 501
diff --git a/drivers/gpu/drm/vboxvideo/vbox_drv.c b/drivers/gpu/drm/vboxvideo/vbox_drv.c
index 6189ea89bb71..862db495d111 100644
--- a/drivers/gpu/drm/vboxvideo/vbox_drv.c
+++ b/drivers/gpu/drm/vboxvideo/vbox_drv.c
@@ -32,7 +32,7 @@ static const struct pci_device_id pciidlist[] = {
32}; 32};
33MODULE_DEVICE_TABLE(pci, pciidlist); 33MODULE_DEVICE_TABLE(pci, pciidlist);
34 34
35static struct drm_fb_helper_funcs vbox_fb_helper_funcs = { 35static const struct drm_fb_helper_funcs vbox_fb_helper_funcs = {
36 .fb_probe = vboxfb_create, 36 .fb_probe = vboxfb_create,
37}; 37};
38 38
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
index b72b760e3018..7a06cb6e31c5 100644
--- a/drivers/gpu/drm/vc4/vc4_gem.c
+++ b/drivers/gpu/drm/vc4/vc4_gem.c
@@ -543,7 +543,7 @@ vc4_update_bo_seqnos(struct vc4_exec_info *exec, uint64_t seqno)
543 bo = to_vc4_bo(&exec->bo[i]->base); 543 bo = to_vc4_bo(&exec->bo[i]->base);
544 bo->seqno = seqno; 544 bo->seqno = seqno;
545 545
546 reservation_object_add_shared_fence(bo->base.base.resv, exec->fence); 546 dma_resv_add_shared_fence(bo->base.base.resv, exec->fence);
547 } 547 }
548 548
549 list_for_each_entry(bo, &exec->unref_list, unref_head) { 549 list_for_each_entry(bo, &exec->unref_list, unref_head) {
@@ -554,7 +554,7 @@ vc4_update_bo_seqnos(struct vc4_exec_info *exec, uint64_t seqno)
554 bo = to_vc4_bo(&exec->rcl_write_bo[i]->base); 554 bo = to_vc4_bo(&exec->rcl_write_bo[i]->base);
555 bo->write_seqno = seqno; 555 bo->write_seqno = seqno;
556 556
557 reservation_object_add_excl_fence(bo->base.base.resv, exec->fence); 557 dma_resv_add_excl_fence(bo->base.base.resv, exec->fence);
558 } 558 }
559} 559}
560 560
@@ -642,7 +642,7 @@ retry:
642 for (i = 0; i < exec->bo_count; i++) { 642 for (i = 0; i < exec->bo_count; i++) {
643 bo = &exec->bo[i]->base; 643 bo = &exec->bo[i]->base;
644 644
645 ret = reservation_object_reserve_shared(bo->resv, 1); 645 ret = dma_resv_reserve_shared(bo->resv, 1);
646 if (ret) { 646 if (ret) {
647 vc4_unlock_bo_reservations(dev, exec, acquire_ctx); 647 vc4_unlock_bo_reservations(dev, exec, acquire_ctx);
648 return ret; 648 return ret;
diff --git a/drivers/gpu/drm/vgem/vgem_fence.c b/drivers/gpu/drm/vgem/vgem_fence.c
index d8630467549c..9268f6fc3f66 100644
--- a/drivers/gpu/drm/vgem/vgem_fence.c
+++ b/drivers/gpu/drm/vgem/vgem_fence.c
@@ -21,7 +21,7 @@
21 */ 21 */
22 22
23#include <linux/dma-buf.h> 23#include <linux/dma-buf.h>
24#include <linux/reservation.h> 24#include <linux/dma-resv.h>
25 25
26#include <drm/drm_file.h> 26#include <drm/drm_file.h>
27 27
@@ -128,7 +128,7 @@ int vgem_fence_attach_ioctl(struct drm_device *dev,
128{ 128{
129 struct drm_vgem_fence_attach *arg = data; 129 struct drm_vgem_fence_attach *arg = data;
130 struct vgem_file *vfile = file->driver_priv; 130 struct vgem_file *vfile = file->driver_priv;
131 struct reservation_object *resv; 131 struct dma_resv *resv;
132 struct drm_gem_object *obj; 132 struct drm_gem_object *obj;
133 struct dma_fence *fence; 133 struct dma_fence *fence;
134 int ret; 134 int ret;
@@ -151,7 +151,7 @@ int vgem_fence_attach_ioctl(struct drm_device *dev,
151 151
152 /* Check for a conflicting fence */ 152 /* Check for a conflicting fence */
153 resv = obj->resv; 153 resv = obj->resv;
154 if (!reservation_object_test_signaled_rcu(resv, 154 if (!dma_resv_test_signaled_rcu(resv,
155 arg->flags & VGEM_FENCE_WRITE)) { 155 arg->flags & VGEM_FENCE_WRITE)) {
156 ret = -EBUSY; 156 ret = -EBUSY;
157 goto err_fence; 157 goto err_fence;
@@ -159,12 +159,12 @@ int vgem_fence_attach_ioctl(struct drm_device *dev,
159 159
160 /* Expose the fence via the dma-buf */ 160 /* Expose the fence via the dma-buf */
161 ret = 0; 161 ret = 0;
162 reservation_object_lock(resv, NULL); 162 dma_resv_lock(resv, NULL);
163 if (arg->flags & VGEM_FENCE_WRITE) 163 if (arg->flags & VGEM_FENCE_WRITE)
164 reservation_object_add_excl_fence(resv, fence); 164 dma_resv_add_excl_fence(resv, fence);
165 else if ((ret = reservation_object_reserve_shared(resv, 1)) == 0) 165 else if ((ret = dma_resv_reserve_shared(resv, 1)) == 0)
166 reservation_object_add_shared_fence(resv, fence); 166 dma_resv_add_shared_fence(resv, fence);
167 reservation_object_unlock(resv); 167 dma_resv_unlock(resv);
168 168
169 /* Record the fence in our idr for later signaling */ 169 /* Record the fence in our idr for later signaling */
170 if (ret == 0) { 170 if (ret == 0) {
diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
index 3c430dd65f67..0a88ef11b9d3 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
@@ -396,7 +396,7 @@ static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,
396 (vgdev, qobj->hw_res_handle, 396 (vgdev, qobj->hw_res_handle,
397 vfpriv->ctx_id, offset, args->level, 397 vfpriv->ctx_id, offset, args->level,
398 &box, fence); 398 &box, fence);
399 reservation_object_add_excl_fence(qobj->tbo.base.resv, 399 dma_resv_add_excl_fence(qobj->tbo.base.resv,
400 &fence->f); 400 &fence->f);
401 401
402 dma_fence_put(&fence->f); 402 dma_fence_put(&fence->f);
@@ -450,7 +450,7 @@ static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
450 (vgdev, qobj, 450 (vgdev, qobj,
451 vfpriv ? vfpriv->ctx_id : 0, offset, 451 vfpriv ? vfpriv->ctx_id : 0, offset,
452 args->level, &box, fence); 452 args->level, &box, fence);
453 reservation_object_add_excl_fence(qobj->tbo.base.resv, 453 dma_resv_add_excl_fence(qobj->tbo.base.resv,
454 &fence->f); 454 &fence->f);
455 dma_fence_put(&fence->f); 455 dma_fence_put(&fence->f);
456 } 456 }
diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c
index 3dc08f991a8d..a492ac3f4a7e 100644
--- a/drivers/gpu/drm/virtio/virtgpu_plane.c
+++ b/drivers/gpu/drm/virtio/virtgpu_plane.c
@@ -212,7 +212,7 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
212 0, 0, vgfb->fence); 212 0, 0, vgfb->fence);
213 ret = virtio_gpu_object_reserve(bo, false); 213 ret = virtio_gpu_object_reserve(bo, false);
214 if (!ret) { 214 if (!ret) {
215 reservation_object_add_excl_fence(bo->tbo.base.resv, 215 dma_resv_add_excl_fence(bo->tbo.base.resv,
216 &vgfb->fence->f); 216 &vgfb->fence->f);
217 dma_fence_put(&vgfb->fence->f); 217 dma_fence_put(&vgfb->fence->f);
218 vgfb->fence = NULL; 218 vgfb->fence = NULL;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
index 6c01ad2785dd..bb46ca0c458f 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
@@ -459,9 +459,9 @@ int vmw_bo_cpu_blit(struct ttm_buffer_object *dst,
459 459
460 /* Buffer objects need to be either pinned or reserved: */ 460 /* Buffer objects need to be either pinned or reserved: */
461 if (!(dst->mem.placement & TTM_PL_FLAG_NO_EVICT)) 461 if (!(dst->mem.placement & TTM_PL_FLAG_NO_EVICT))
462 reservation_object_assert_held(dst->base.resv); 462 dma_resv_assert_held(dst->base.resv);
463 if (!(src->mem.placement & TTM_PL_FLAG_NO_EVICT)) 463 if (!(src->mem.placement & TTM_PL_FLAG_NO_EVICT))
464 reservation_object_assert_held(src->base.resv); 464 dma_resv_assert_held(src->base.resv);
465 465
466 if (dst->ttm->state == tt_unpopulated) { 466 if (dst->ttm->state == tt_unpopulated) {
467 ret = dst->ttm->bdev->driver->ttm_tt_populate(dst->ttm, &ctx); 467 ret = dst->ttm->bdev->driver->ttm_tt_populate(dst->ttm, &ctx);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
index 0d9a1d454cd4..aad8d8140259 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
@@ -341,7 +341,7 @@ void vmw_bo_pin_reserved(struct vmw_buffer_object *vbo, bool pin)
341 uint32_t old_mem_type = bo->mem.mem_type; 341 uint32_t old_mem_type = bo->mem.mem_type;
342 int ret; 342 int ret;
343 343
344 reservation_object_assert_held(bo->base.resv); 344 dma_resv_assert_held(bo->base.resv);
345 345
346 if (pin) { 346 if (pin) {
347 if (vbo->pin_count++ > 0) 347 if (vbo->pin_count++ > 0)
@@ -690,7 +690,7 @@ static int vmw_user_bo_synccpu_grab(struct vmw_user_buffer_object *user_bo,
690 bool nonblock = !!(flags & drm_vmw_synccpu_dontblock); 690 bool nonblock = !!(flags & drm_vmw_synccpu_dontblock);
691 long lret; 691 long lret;
692 692
693 lret = reservation_object_wait_timeout_rcu 693 lret = dma_resv_wait_timeout_rcu
694 (bo->base.resv, true, true, 694 (bo->base.resv, true, true,
695 nonblock ? 0 : MAX_SCHEDULE_TIMEOUT); 695 nonblock ? 0 : MAX_SCHEDULE_TIMEOUT);
696 if (!lret) 696 if (!lret)
@@ -1008,10 +1008,10 @@ void vmw_bo_fence_single(struct ttm_buffer_object *bo,
1008 1008
1009 if (fence == NULL) { 1009 if (fence == NULL) {
1010 vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL); 1010 vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
1011 reservation_object_add_excl_fence(bo->base.resv, &fence->base); 1011 dma_resv_add_excl_fence(bo->base.resv, &fence->base);
1012 dma_fence_put(&fence->base); 1012 dma_fence_put(&fence->base);
1013 } else 1013 } else
1014 reservation_object_add_excl_fence(bo->base.resv, &fence->base); 1014 dma_resv_add_excl_fence(bo->base.resv, &fence->base);
1015} 1015}
1016 1016
1017 1017
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
index 4e8df76681c4..3ca5cf375b01 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
@@ -171,7 +171,7 @@ static int vmw_cotable_unscrub(struct vmw_resource *res)
171 } *cmd; 171 } *cmd;
172 172
173 WARN_ON_ONCE(bo->mem.mem_type != VMW_PL_MOB); 173 WARN_ON_ONCE(bo->mem.mem_type != VMW_PL_MOB);
174 reservation_object_assert_held(bo->base.resv); 174 dma_resv_assert_held(bo->base.resv);
175 175
176 cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd)); 176 cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
177 if (!cmd) 177 if (!cmd)
@@ -313,7 +313,7 @@ static int vmw_cotable_unbind(struct vmw_resource *res,
313 return 0; 313 return 0;
314 314
315 WARN_ON_ONCE(bo->mem.mem_type != VMW_PL_MOB); 315 WARN_ON_ONCE(bo->mem.mem_type != VMW_PL_MOB);
316 reservation_object_assert_held(bo->base.resv); 316 dma_resv_assert_held(bo->base.resv);
317 317
318 mutex_lock(&dev_priv->binding_mutex); 318 mutex_lock(&dev_priv->binding_mutex);
319 if (!vcotbl->scrubbed) 319 if (!vcotbl->scrubbed)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index 434dfadb0e52..178a6cd1a06f 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -185,6 +185,9 @@ static long vmw_fence_wait(struct dma_fence *f, bool intr, signed long timeout)
185 185
186 spin_lock(f->lock); 186 spin_lock(f->lock);
187 187
188 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &f->flags))
189 goto out;
190
188 if (intr && signal_pending(current)) { 191 if (intr && signal_pending(current)) {
189 ret = -ERESTARTSYS; 192 ret = -ERESTARTSYS;
190 goto out; 193 goto out;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 2eb3532e3291..5581a7826b4c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -41,7 +41,7 @@ void vmw_resource_mob_attach(struct vmw_resource *res)
41{ 41{
42 struct vmw_buffer_object *backup = res->backup; 42 struct vmw_buffer_object *backup = res->backup;
43 43
44 reservation_object_assert_held(backup->base.base.resv); 44 dma_resv_assert_held(res->backup->base.base.resv);
45 res->used_prio = (res->res_dirty) ? res->func->dirty_prio : 45 res->used_prio = (res->res_dirty) ? res->func->dirty_prio :
46 res->func->prio; 46 res->func->prio;
47 list_add_tail(&res->mob_head, &backup->res_list); 47 list_add_tail(&res->mob_head, &backup->res_list);
@@ -56,7 +56,7 @@ void vmw_resource_mob_detach(struct vmw_resource *res)
56{ 56{
57 struct vmw_buffer_object *backup = res->backup; 57 struct vmw_buffer_object *backup = res->backup;
58 58
59 reservation_object_assert_held(backup->base.base.resv); 59 dma_resv_assert_held(backup->base.base.resv);
60 if (vmw_resource_mob_attached(res)) { 60 if (vmw_resource_mob_attached(res)) {
61 list_del_init(&res->mob_head); 61 list_del_init(&res->mob_head);
62 vmw_bo_prio_del(backup, res->used_prio); 62 vmw_bo_prio_del(backup, res->used_prio);
@@ -719,7 +719,7 @@ void vmw_resource_unbind_list(struct vmw_buffer_object *vbo)
719 .num_shared = 0 719 .num_shared = 0
720 }; 720 };
721 721
722 reservation_object_assert_held(vbo->base.base.resv); 722 dma_resv_assert_held(vbo->base.base.resv);
723 list_for_each_entry_safe(res, next, &vbo->res_list, mob_head) { 723 list_for_each_entry_safe(res, next, &vbo->res_list, mob_head) {
724 if (!res->func->unbind) 724 if (!res->func->unbind)
725 continue; 725 continue;
diff --git a/drivers/gpu/drm/xen/xen_drm_front_kms.c b/drivers/gpu/drm/xen/xen_drm_front_kms.c
index de990036199d..21ad1c359b61 100644
--- a/drivers/gpu/drm/xen/xen_drm_front_kms.c
+++ b/drivers/gpu/drm/xen/xen_drm_front_kms.c
@@ -46,7 +46,7 @@ static void fb_destroy(struct drm_framebuffer *fb)
46 drm_gem_fb_destroy(fb); 46 drm_gem_fb_destroy(fb);
47} 47}
48 48
49static struct drm_framebuffer_funcs fb_funcs = { 49static const struct drm_framebuffer_funcs fb_funcs = {
50 .destroy = fb_destroy, 50 .destroy = fb_destroy,
51}; 51};
52 52
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/Kconfig b/drivers/video/fbdev/omap2/omapfb/displays/Kconfig
index 8c1c5a4cfe18..744416dc530e 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/Kconfig
+++ b/drivers/video/fbdev/omap2/omapfb/displays/Kconfig
@@ -49,6 +49,7 @@ config FB_OMAP2_PANEL_DSI_CM
49config FB_OMAP2_PANEL_SONY_ACX565AKM 49config FB_OMAP2_PANEL_SONY_ACX565AKM
50 tristate "ACX565AKM Panel" 50 tristate "ACX565AKM Panel"
51 depends on SPI && BACKLIGHT_CLASS_DEVICE 51 depends on SPI && BACKLIGHT_CLASS_DEVICE
52 depends on DRM_PANEL_SONY_ACX565AKM = n
52 help 53 help
53 This is the LCD panel used on Nokia N900 54 This is the LCD panel used on Nokia N900
54 55
@@ -61,18 +62,21 @@ config FB_OMAP2_PANEL_LGPHILIPS_LB035Q02
61config FB_OMAP2_PANEL_SHARP_LS037V7DW01 62config FB_OMAP2_PANEL_SHARP_LS037V7DW01
62 tristate "Sharp LS037V7DW01 LCD Panel" 63 tristate "Sharp LS037V7DW01 LCD Panel"
63 depends on BACKLIGHT_CLASS_DEVICE 64 depends on BACKLIGHT_CLASS_DEVICE
65 depends on DRM_PANEL_SHARP_LS037V7DW01 = n
64 help 66 help
65 LCD Panel used in TI's SDP3430 and EVM boards 67 LCD Panel used in TI's SDP3430 and EVM boards
66 68
67config FB_OMAP2_PANEL_TPO_TD028TTEC1 69config FB_OMAP2_PANEL_TPO_TD028TTEC1
68 tristate "TPO TD028TTEC1 LCD Panel" 70 tristate "TPO TD028TTEC1 LCD Panel"
69 depends on SPI 71 depends on SPI
72 depends on DRM_PANEL_TPO_TD028TTEC1 = n
70 help 73 help
71 LCD panel used in Openmoko. 74 LCD panel used in Openmoko.
72 75
73config FB_OMAP2_PANEL_TPO_TD043MTEA1 76config FB_OMAP2_PANEL_TPO_TD043MTEA1
74 tristate "TPO TD043MTEA1 LCD Panel" 77 tristate "TPO TD043MTEA1 LCD Panel"
75 depends on SPI 78 depends on SPI
79 depends on DRM_PANEL_TPO_TD043MTEA1 = n
76 help 80 help
77 LCD Panel used in OMAP3 Pandora 81 LCD Panel used in OMAP3 Pandora
78 82
@@ -80,6 +84,7 @@ config FB_OMAP2_PANEL_NEC_NL8048HL11
80 tristate "NEC NL8048HL11 Panel" 84 tristate "NEC NL8048HL11 Panel"
81 depends on SPI 85 depends on SPI
82 depends on BACKLIGHT_CLASS_DEVICE 86 depends on BACKLIGHT_CLASS_DEVICE
87 depends on DRM_PANEL_NEC_NL8048HL11 = n
83 help 88 help
84 This NEC NL8048HL11 panel is TFT LCD used in the 89 This NEC NL8048HL11 panel is TFT LCD used in the
85 Zoom2/3/3630 sdp boards. 90 Zoom2/3/3630 sdp boards.
diff --git a/include/drm/bridge/dw_hdmi.h b/include/drm/bridge/dw_hdmi.h
index c402364aec0d..cf528c289857 100644
--- a/include/drm/bridge/dw_hdmi.h
+++ b/include/drm/bridge/dw_hdmi.h
@@ -155,6 +155,8 @@ void dw_hdmi_resume(struct dw_hdmi *hdmi);
155void dw_hdmi_setup_rx_sense(struct dw_hdmi *hdmi, bool hpd, bool rx_sense); 155void dw_hdmi_setup_rx_sense(struct dw_hdmi *hdmi, bool hpd, bool rx_sense);
156 156
157void dw_hdmi_set_sample_rate(struct dw_hdmi *hdmi, unsigned int rate); 157void dw_hdmi_set_sample_rate(struct dw_hdmi *hdmi, unsigned int rate);
158void dw_hdmi_set_channel_count(struct dw_hdmi *hdmi, unsigned int cnt);
159void dw_hdmi_set_channel_allocation(struct dw_hdmi *hdmi, unsigned int ca);
158void dw_hdmi_audio_enable(struct dw_hdmi *hdmi); 160void dw_hdmi_audio_enable(struct dw_hdmi *hdmi);
159void dw_hdmi_audio_disable(struct dw_hdmi *hdmi); 161void dw_hdmi_audio_disable(struct dw_hdmi *hdmi);
160void dw_hdmi_set_high_tmds_clock_ratio(struct dw_hdmi *hdmi); 162void dw_hdmi_set_high_tmds_clock_ratio(struct dw_hdmi *hdmi);
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 94aae87b1138..037b1f7a87a5 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -87,7 +87,7 @@ struct module;
87 87
88struct device_node; 88struct device_node;
89struct videomode; 89struct videomode;
90struct reservation_object; 90struct dma_resv;
91struct dma_buf_attachment; 91struct dma_buf_attachment;
92 92
93struct pci_dev; 93struct pci_dev;
diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h
index 0b9997e27689..681cb590f952 100644
--- a/include/drm/drm_connector.h
+++ b/include/drm/drm_connector.h
@@ -543,8 +543,8 @@ struct drm_connector_state {
543 * 543 *
544 * This is also used in the atomic helpers to map encoders to their 544 * This is also used in the atomic helpers to map encoders to their
545 * current and previous connectors, see 545 * current and previous connectors, see
546 * &drm_atomic_get_old_connector_for_encoder() and 546 * drm_atomic_get_old_connector_for_encoder() and
547 * &drm_atomic_get_new_connector_for_encoder(). 547 * drm_atomic_get_new_connector_for_encoder().
548 * 548 *
549 * NOTE: Atomic drivers must fill this out (either themselves or through 549 * NOTE: Atomic drivers must fill this out (either themselves or through
550 * helpers), for otherwise the GETCONNECTOR and GETENCODER IOCTLs will 550 * helpers), for otherwise the GETCONNECTOR and GETENCODER IOCTLs will
diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h
index ae693c0666cd..6aaba14f5972 100644
--- a/include/drm/drm_gem.h
+++ b/include/drm/drm_gem.h
@@ -35,7 +35,7 @@
35 */ 35 */
36 36
37#include <linux/kref.h> 37#include <linux/kref.h>
38#include <linux/reservation.h> 38#include <linux/dma-resv.h>
39 39
40#include <drm/drm_vma_manager.h> 40#include <drm/drm_vma_manager.h>
41 41
@@ -276,7 +276,7 @@ struct drm_gem_object {
276 * 276 *
277 * Normally (@resv == &@_resv) except for imported GEM objects. 277 * Normally (@resv == &@_resv) except for imported GEM objects.
278 */ 278 */
279 struct reservation_object *resv; 279 struct dma_resv *resv;
280 280
281 /** 281 /**
282 * @_resv: 282 * @_resv:
@@ -285,7 +285,7 @@ struct drm_gem_object {
285 * 285 *
286 * This is unused for imported GEM objects. 286 * This is unused for imported GEM objects.
287 */ 287 */
288 struct reservation_object _resv; 288 struct dma_resv _resv;
289 289
290 /** 290 /**
291 * @funcs: 291 * @funcs:
@@ -390,7 +390,7 @@ void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
390int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles, 390int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles,
391 int count, struct drm_gem_object ***objs_out); 391 int count, struct drm_gem_object ***objs_out);
392struct drm_gem_object *drm_gem_object_lookup(struct drm_file *filp, u32 handle); 392struct drm_gem_object *drm_gem_object_lookup(struct drm_file *filp, u32 handle);
393long drm_gem_reservation_object_wait(struct drm_file *filep, u32 handle, 393long drm_gem_dma_resv_wait(struct drm_file *filep, u32 handle,
394 bool wait_all, unsigned long timeout); 394 bool wait_all, unsigned long timeout);
395int drm_gem_lock_reservations(struct drm_gem_object **objs, int count, 395int drm_gem_lock_reservations(struct drm_gem_object **objs, int count,
396 struct ww_acquire_ctx *acquire_ctx); 396 struct ww_acquire_ctx *acquire_ctx);
diff --git a/include/drm/drm_gem_shmem_helper.h b/include/drm/drm_gem_shmem_helper.h
index 038b6d313447..ce1600fdfc3e 100644
--- a/include/drm/drm_gem_shmem_helper.h
+++ b/include/drm/drm_gem_shmem_helper.h
@@ -44,6 +44,9 @@ struct drm_gem_shmem_object {
44 */ 44 */
45 unsigned int pages_use_count; 45 unsigned int pages_use_count;
46 46
47 int madv;
48 struct list_head madv_list;
49
47 /** 50 /**
48 * @pages_mark_dirty_on_put: 51 * @pages_mark_dirty_on_put:
49 * 52 *
@@ -121,6 +124,18 @@ void drm_gem_shmem_unpin(struct drm_gem_object *obj);
121void *drm_gem_shmem_vmap(struct drm_gem_object *obj); 124void *drm_gem_shmem_vmap(struct drm_gem_object *obj);
122void drm_gem_shmem_vunmap(struct drm_gem_object *obj, void *vaddr); 125void drm_gem_shmem_vunmap(struct drm_gem_object *obj, void *vaddr);
123 126
127int drm_gem_shmem_madvise(struct drm_gem_object *obj, int madv);
128
129static inline bool drm_gem_shmem_is_purgeable(struct drm_gem_shmem_object *shmem)
130{
131 return (shmem->madv > 0) &&
132 !shmem->vmap_use_count && shmem->sgt &&
133 !shmem->base.dma_buf && !shmem->base.import_attach;
134}
135
136void drm_gem_shmem_purge_locked(struct drm_gem_object *obj);
137void drm_gem_shmem_purge(struct drm_gem_object *obj);
138
124struct drm_gem_shmem_object * 139struct drm_gem_shmem_object *
125drm_gem_shmem_create_with_handle(struct drm_file *file_priv, 140drm_gem_shmem_create_with_handle(struct drm_file *file_priv,
126 struct drm_device *dev, size_t size, 141 struct drm_device *dev, size_t size,
diff --git a/include/drm/drm_panel.h b/include/drm/drm_panel.h
index 26377836141c..624bd15ecfab 100644
--- a/include/drm/drm_panel.h
+++ b/include/drm/drm_panel.h
@@ -36,14 +36,6 @@ struct display_timing;
36 36
37/** 37/**
38 * struct drm_panel_funcs - perform operations on a given panel 38 * struct drm_panel_funcs - perform operations on a given panel
39 * @disable: disable panel (turn off back light, etc.)
40 * @unprepare: turn off panel
41 * @prepare: turn on panel and perform set up
42 * @enable: enable panel (turn on back light, etc.)
43 * @get_modes: add modes to the connector that the panel is attached to and
44 * return the number of modes added
45 * @get_timings: copy display timings into the provided array and return
46 * the number of display timings available
47 * 39 *
48 * The .prepare() function is typically called before the display controller 40 * The .prepare() function is typically called before the display controller
49 * starts to transmit video data. Panel drivers can use this to turn the panel 41 * starts to transmit video data. Panel drivers can use this to turn the panel
@@ -69,132 +61,107 @@ struct display_timing;
69 * the panel. This is the job of the .unprepare() function. 61 * the panel. This is the job of the .unprepare() function.
70 */ 62 */
71struct drm_panel_funcs { 63struct drm_panel_funcs {
72 int (*disable)(struct drm_panel *panel); 64 /**
73 int (*unprepare)(struct drm_panel *panel); 65 * @prepare:
66 *
67 * Turn on panel and perform set up.
68 */
74 int (*prepare)(struct drm_panel *panel); 69 int (*prepare)(struct drm_panel *panel);
70
71 /**
72 * @enable:
73 *
74 * Enable panel (turn on back light, etc.).
75 */
75 int (*enable)(struct drm_panel *panel); 76 int (*enable)(struct drm_panel *panel);
77
78 /**
79 * @disable:
80 *
81 * Disable panel (turn off back light, etc.).
82 */
83 int (*disable)(struct drm_panel *panel);
84
85 /**
86 * @unprepare:
87 *
88 * Turn off panel.
89 */
90 int (*unprepare)(struct drm_panel *panel);
91
92 /**
93 * @get_modes:
94 *
95 * Add modes to the connector that the panel is attached to and
96 * return the number of modes added.
97 */
76 int (*get_modes)(struct drm_panel *panel); 98 int (*get_modes)(struct drm_panel *panel);
99
100 /**
101 * @get_timings:
102 *
103 * Copy display timings into the provided array and return
104 * the number of display timings available.
105 */
77 int (*get_timings)(struct drm_panel *panel, unsigned int num_timings, 106 int (*get_timings)(struct drm_panel *panel, unsigned int num_timings,
78 struct display_timing *timings); 107 struct display_timing *timings);
79}; 108};
80 109
81/** 110/**
82 * struct drm_panel - DRM panel object 111 * struct drm_panel - DRM panel object
83 * @drm: DRM device owning the panel
84 * @connector: DRM connector that the panel is attached to
85 * @dev: parent device of the panel
86 * @link: link from panel device (supplier) to DRM device (consumer)
87 * @funcs: operations that can be performed on the panel
88 * @list: panel entry in registry
89 */ 112 */
90struct drm_panel { 113struct drm_panel {
114 /**
115 * @drm:
116 *
117 * DRM device owning the panel.
118 */
91 struct drm_device *drm; 119 struct drm_device *drm;
120
121 /**
122 * @connector:
123 *
124 * DRM connector that the panel is attached to.
125 */
92 struct drm_connector *connector; 126 struct drm_connector *connector;
127
128 /**
129 * @dev:
130 *
131 * Parent device of the panel.
132 */
93 struct device *dev; 133 struct device *dev;
94 134
135 /**
136 * @funcs:
137 *
138 * Operations that can be performed on the panel.
139 */
95 const struct drm_panel_funcs *funcs; 140 const struct drm_panel_funcs *funcs;
96 141
142 /**
143 * @list:
144 *
145 * Panel entry in registry.
146 */
97 struct list_head list; 147 struct list_head list;
98}; 148};
99 149
100/**
101 * drm_disable_unprepare - power off a panel
102 * @panel: DRM panel
103 *
104 * Calling this function will completely power off a panel (assert the panel's
105 * reset, turn off power supplies, ...). After this function has completed, it
106 * is usually no longer possible to communicate with the panel until another
107 * call to drm_panel_prepare().
108 *
109 * Return: 0 on success or a negative error code on failure.
110 */
111static inline int drm_panel_unprepare(struct drm_panel *panel)
112{
113 if (panel && panel->funcs && panel->funcs->unprepare)
114 return panel->funcs->unprepare(panel);
115
116 return panel ? -ENOSYS : -EINVAL;
117}
118
119/**
120 * drm_panel_disable - disable a panel
121 * @panel: DRM panel
122 *
123 * This will typically turn off the panel's backlight or disable the display
124 * drivers. For smart panels it should still be possible to communicate with
125 * the integrated circuitry via any command bus after this call.
126 *
127 * Return: 0 on success or a negative error code on failure.
128 */
129static inline int drm_panel_disable(struct drm_panel *panel)
130{
131 if (panel && panel->funcs && panel->funcs->disable)
132 return panel->funcs->disable(panel);
133
134 return panel ? -ENOSYS : -EINVAL;
135}
136
137/**
138 * drm_panel_prepare - power on a panel
139 * @panel: DRM panel
140 *
141 * Calling this function will enable power and deassert any reset signals to
142 * the panel. After this has completed it is possible to communicate with any
143 * integrated circuitry via a command bus.
144 *
145 * Return: 0 on success or a negative error code on failure.
146 */
147static inline int drm_panel_prepare(struct drm_panel *panel)
148{
149 if (panel && panel->funcs && panel->funcs->prepare)
150 return panel->funcs->prepare(panel);
151
152 return panel ? -ENOSYS : -EINVAL;
153}
154
155/**
156 * drm_panel_enable - enable a panel
157 * @panel: DRM panel
158 *
159 * Calling this function will cause the panel display drivers to be turned on
160 * and the backlight to be enabled. Content will be visible on screen after
161 * this call completes.
162 *
163 * Return: 0 on success or a negative error code on failure.
164 */
165static inline int drm_panel_enable(struct drm_panel *panel)
166{
167 if (panel && panel->funcs && panel->funcs->enable)
168 return panel->funcs->enable(panel);
169
170 return panel ? -ENOSYS : -EINVAL;
171}
172
173/**
174 * drm_panel_get_modes - probe the available display modes of a panel
175 * @panel: DRM panel
176 *
177 * The modes probed from the panel are automatically added to the connector
178 * that the panel is attached to.
179 *
180 * Return: The number of modes available from the panel on success or a
181 * negative error code on failure.
182 */
183static inline int drm_panel_get_modes(struct drm_panel *panel)
184{
185 if (panel && panel->funcs && panel->funcs->get_modes)
186 return panel->funcs->get_modes(panel);
187
188 return panel ? -ENOSYS : -EINVAL;
189}
190
191void drm_panel_init(struct drm_panel *panel); 150void drm_panel_init(struct drm_panel *panel);
192 151
193int drm_panel_add(struct drm_panel *panel); 152int drm_panel_add(struct drm_panel *panel);
194void drm_panel_remove(struct drm_panel *panel); 153void drm_panel_remove(struct drm_panel *panel);
195 154
196int drm_panel_attach(struct drm_panel *panel, struct drm_connector *connector); 155int drm_panel_attach(struct drm_panel *panel, struct drm_connector *connector);
197int drm_panel_detach(struct drm_panel *panel); 156void drm_panel_detach(struct drm_panel *panel);
157
158int drm_panel_prepare(struct drm_panel *panel);
159int drm_panel_unprepare(struct drm_panel *panel);
160
161int drm_panel_enable(struct drm_panel *panel);
162int drm_panel_disable(struct drm_panel *panel);
163
164int drm_panel_get_modes(struct drm_panel *panel);
198 165
199#if defined(CONFIG_OF) && defined(CONFIG_DRM_PANEL) 166#if defined(CONFIG_OF) && defined(CONFIG_DRM_PANEL)
200struct drm_panel *of_drm_find_panel(const struct device_node *np); 167struct drm_panel *of_drm_find_panel(const struct device_node *np);
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index 65ef5376de59..43c4929a2171 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -40,7 +40,7 @@
40#include <linux/mutex.h> 40#include <linux/mutex.h>
41#include <linux/mm.h> 41#include <linux/mm.h>
42#include <linux/bitmap.h> 42#include <linux/bitmap.h>
43#include <linux/reservation.h> 43#include <linux/dma-resv.h>
44 44
45struct ttm_bo_global; 45struct ttm_bo_global;
46 46
@@ -273,7 +273,7 @@ struct ttm_bo_kmap_obj {
273struct ttm_operation_ctx { 273struct ttm_operation_ctx {
274 bool interruptible; 274 bool interruptible;
275 bool no_wait_gpu; 275 bool no_wait_gpu;
276 struct reservation_object *resv; 276 struct dma_resv *resv;
277 uint64_t bytes_moved; 277 uint64_t bytes_moved;
278 uint32_t flags; 278 uint32_t flags;
279}; 279};
@@ -493,7 +493,7 @@ size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
493 * @page_alignment: Data alignment in pages. 493 * @page_alignment: Data alignment in pages.
494 * @ctx: TTM operation context for memory allocation. 494 * @ctx: TTM operation context for memory allocation.
495 * @acc_size: Accounted size for this object. 495 * @acc_size: Accounted size for this object.
496 * @resv: Pointer to a reservation_object, or NULL to let ttm allocate one. 496 * @resv: Pointer to a dma_resv, or NULL to let ttm allocate one.
497 * @destroy: Destroy function. Use NULL for kfree(). 497 * @destroy: Destroy function. Use NULL for kfree().
498 * 498 *
499 * This function initializes a pre-allocated struct ttm_buffer_object. 499 * This function initializes a pre-allocated struct ttm_buffer_object.
@@ -526,7 +526,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
526 struct ttm_operation_ctx *ctx, 526 struct ttm_operation_ctx *ctx,
527 size_t acc_size, 527 size_t acc_size,
528 struct sg_table *sg, 528 struct sg_table *sg,
529 struct reservation_object *resv, 529 struct dma_resv *resv,
530 void (*destroy) (struct ttm_buffer_object *)); 530 void (*destroy) (struct ttm_buffer_object *));
531 531
532/** 532/**
@@ -545,7 +545,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
545 * point to the shmem object backing a GEM object if TTM is used to back a 545 * point to the shmem object backing a GEM object if TTM is used to back a
546 * GEM user interface. 546 * GEM user interface.
547 * @acc_size: Accounted size for this object. 547 * @acc_size: Accounted size for this object.
548 * @resv: Pointer to a reservation_object, or NULL to let ttm allocate one. 548 * @resv: Pointer to a dma_resv, or NULL to let ttm allocate one.
549 * @destroy: Destroy function. Use NULL for kfree(). 549 * @destroy: Destroy function. Use NULL for kfree().
550 * 550 *
551 * This function initializes a pre-allocated struct ttm_buffer_object. 551 * This function initializes a pre-allocated struct ttm_buffer_object.
@@ -570,7 +570,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev, struct ttm_buffer_object *bo,
570 unsigned long size, enum ttm_bo_type type, 570 unsigned long size, enum ttm_bo_type type,
571 struct ttm_placement *placement, 571 struct ttm_placement *placement,
572 uint32_t page_alignment, bool interrubtible, size_t acc_size, 572 uint32_t page_alignment, bool interrubtible, size_t acc_size,
573 struct sg_table *sg, struct reservation_object *resv, 573 struct sg_table *sg, struct dma_resv *resv,
574 void (*destroy) (struct ttm_buffer_object *)); 574 void (*destroy) (struct ttm_buffer_object *));
575 575
576/** 576/**
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index 076d7ca0f8b6..6f536caea368 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -35,7 +35,7 @@
35#include <linux/workqueue.h> 35#include <linux/workqueue.h>
36#include <linux/fs.h> 36#include <linux/fs.h>
37#include <linux/spinlock.h> 37#include <linux/spinlock.h>
38#include <linux/reservation.h> 38#include <linux/dma-resv.h>
39 39
40#include "ttm_bo_api.h" 40#include "ttm_bo_api.h"
41#include "ttm_memory.h" 41#include "ttm_memory.h"
@@ -664,14 +664,14 @@ static inline int __ttm_bo_reserve(struct ttm_buffer_object *bo,
664 if (WARN_ON(ticket)) 664 if (WARN_ON(ticket))
665 return -EBUSY; 665 return -EBUSY;
666 666
667 success = reservation_object_trylock(bo->base.resv); 667 success = dma_resv_trylock(bo->base.resv);
668 return success ? 0 : -EBUSY; 668 return success ? 0 : -EBUSY;
669 } 669 }
670 670
671 if (interruptible) 671 if (interruptible)
672 ret = reservation_object_lock_interruptible(bo->base.resv, ticket); 672 ret = dma_resv_lock_interruptible(bo->base.resv, ticket);
673 else 673 else
674 ret = reservation_object_lock(bo->base.resv, ticket); 674 ret = dma_resv_lock(bo->base.resv, ticket);
675 if (ret == -EINTR) 675 if (ret == -EINTR)
676 return -ERESTARTSYS; 676 return -ERESTARTSYS;
677 return ret; 677 return ret;
@@ -755,10 +755,10 @@ static inline int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
755 WARN_ON(!kref_read(&bo->kref)); 755 WARN_ON(!kref_read(&bo->kref));
756 756
757 if (interruptible) 757 if (interruptible)
758 ret = reservation_object_lock_slow_interruptible(bo->base.resv, 758 ret = dma_resv_lock_slow_interruptible(bo->base.resv,
759 ticket); 759 ticket);
760 else 760 else
761 reservation_object_lock_slow(bo->base.resv, ticket); 761 dma_resv_lock_slow(bo->base.resv, ticket);
762 762
763 if (likely(ret == 0)) 763 if (likely(ret == 0))
764 ttm_bo_del_sub_from_lru(bo); 764 ttm_bo_del_sub_from_lru(bo);
@@ -783,7 +783,7 @@ static inline void ttm_bo_unreserve(struct ttm_buffer_object *bo)
783 else 783 else
784 ttm_bo_move_to_lru_tail(bo, NULL); 784 ttm_bo_move_to_lru_tail(bo, NULL);
785 spin_unlock(&bo->bdev->glob->lru_lock); 785 spin_unlock(&bo->bdev->glob->lru_lock);
786 reservation_object_unlock(bo->base.resv); 786 dma_resv_unlock(bo->base.resv);
787} 787}
788 788
789/* 789/*
diff --git a/include/linux/amba/clcd-regs.h b/include/linux/amba/clcd-regs.h
index 516a6fda83c5..421b0fa90d6a 100644
--- a/include/linux/amba/clcd-regs.h
+++ b/include/linux/amba/clcd-regs.h
@@ -42,6 +42,7 @@
42#define TIM2_PCD_LO_MASK GENMASK(4, 0) 42#define TIM2_PCD_LO_MASK GENMASK(4, 0)
43#define TIM2_PCD_LO_BITS 5 43#define TIM2_PCD_LO_BITS 5
44#define TIM2_CLKSEL (1 << 5) 44#define TIM2_CLKSEL (1 << 5)
45#define TIM2_ACB_MASK GENMASK(10, 6)
45#define TIM2_IVS (1 << 11) 46#define TIM2_IVS (1 << 11)
46#define TIM2_IHS (1 << 12) 47#define TIM2_IHS (1 << 12)
47#define TIM2_IPC (1 << 13) 48#define TIM2_IPC (1 << 13)
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index bae060fae862..ec212cb27fdc 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -306,7 +306,7 @@ struct dma_buf {
306 struct module *owner; 306 struct module *owner;
307 struct list_head list_node; 307 struct list_head list_node;
308 void *priv; 308 void *priv;
309 struct reservation_object *resv; 309 struct dma_resv *resv;
310 310
311 /* poll support */ 311 /* poll support */
312 wait_queue_head_t poll; 312 wait_queue_head_t poll;
@@ -365,7 +365,7 @@ struct dma_buf_export_info {
365 const struct dma_buf_ops *ops; 365 const struct dma_buf_ops *ops;
366 size_t size; 366 size_t size;
367 int flags; 367 int flags;
368 struct reservation_object *resv; 368 struct dma_resv *resv;
369 void *priv; 369 void *priv;
370}; 370};
371 371
diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h
index 05d29dbc7e62..3347c54f3a87 100644
--- a/include/linux/dma-fence.h
+++ b/include/linux/dma-fence.h
@@ -63,15 +63,35 @@ struct dma_fence_cb;
63 * been completed, or never called at all. 63 * been completed, or never called at all.
64 */ 64 */
65struct dma_fence { 65struct dma_fence {
66 struct kref refcount;
67 const struct dma_fence_ops *ops;
68 struct rcu_head rcu;
69 struct list_head cb_list;
70 spinlock_t *lock; 66 spinlock_t *lock;
67 const struct dma_fence_ops *ops;
68 /*
69 * We clear the callback list on kref_put so that by the time we
70 * release the fence it is unused. No one should be adding to the
71 * cb_list that they don't themselves hold a reference for.
72 *
73 * The lifetime of the timestamp is similarly tied to both the
74 * rcu freelist and the cb_list. The timestamp is only set upon
75 * signaling while simultaneously notifying the cb_list. Ergo, we
76 * only use either the cb_list of timestamp. Upon destruction,
77 * neither are accessible, and so we can use the rcu. This means
78 * that the cb_list is *only* valid until the signal bit is set,
79 * and to read either you *must* hold a reference to the fence,
80 * and not just the rcu_read_lock.
81 *
82 * Listed in chronological order.
83 */
84 union {
85 struct list_head cb_list;
86 /* @cb_list replaced by @timestamp on dma_fence_signal() */
87 ktime_t timestamp;
88 /* @timestamp replaced by @rcu on dma_fence_release() */
89 struct rcu_head rcu;
90 };
71 u64 context; 91 u64 context;
72 u64 seqno; 92 u64 seqno;
73 unsigned long flags; 93 unsigned long flags;
74 ktime_t timestamp; 94 struct kref refcount;
75 int error; 95 int error;
76}; 96};
77 97
@@ -273,7 +293,7 @@ static inline struct dma_fence *dma_fence_get(struct dma_fence *fence)
273} 293}
274 294
275/** 295/**
276 * dma_fence_get_rcu - get a fence from a reservation_object_list with 296 * dma_fence_get_rcu - get a fence from a dma_resv_list with
277 * rcu read lock 297 * rcu read lock
278 * @fence: fence to increase refcount of 298 * @fence: fence to increase refcount of
279 * 299 *
@@ -297,7 +317,7 @@ static inline struct dma_fence *dma_fence_get_rcu(struct dma_fence *fence)
297 * so long as the caller is using RCU on the pointer to the fence. 317 * so long as the caller is using RCU on the pointer to the fence.
298 * 318 *
299 * An alternative mechanism is to employ a seqlock to protect a bunch of 319 * An alternative mechanism is to employ a seqlock to protect a bunch of
300 * fences, such as used by struct reservation_object. When using a seqlock, 320 * fences, such as used by struct dma_resv. When using a seqlock,
301 * the seqlock must be taken before and checked after a reference to the 321 * the seqlock must be taken before and checked after a reference to the
302 * fence is acquired (as shown here). 322 * fence is acquired (as shown here).
303 * 323 *
diff --git a/include/linux/reservation.h b/include/linux/dma-resv.h
index 56b782fec49b..ee50d10f052b 100644
--- a/include/linux/reservation.h
+++ b/include/linux/dma-resv.h
@@ -50,54 +50,52 @@ extern struct lock_class_key reservation_seqcount_class;
50extern const char reservation_seqcount_string[]; 50extern const char reservation_seqcount_string[];
51 51
52/** 52/**
53 * struct reservation_object_list - a list of shared fences 53 * struct dma_resv_list - a list of shared fences
54 * @rcu: for internal use 54 * @rcu: for internal use
55 * @shared_count: table of shared fences 55 * @shared_count: table of shared fences
56 * @shared_max: for growing shared fence table 56 * @shared_max: for growing shared fence table
57 * @shared: shared fence table 57 * @shared: shared fence table
58 */ 58 */
59struct reservation_object_list { 59struct dma_resv_list {
60 struct rcu_head rcu; 60 struct rcu_head rcu;
61 u32 shared_count, shared_max; 61 u32 shared_count, shared_max;
62 struct dma_fence __rcu *shared[]; 62 struct dma_fence __rcu *shared[];
63}; 63};
64 64
65/** 65/**
66 * struct reservation_object - a reservation object manages fences for a buffer 66 * struct dma_resv - a reservation object manages fences for a buffer
67 * @lock: update side lock 67 * @lock: update side lock
68 * @seq: sequence count for managing RCU read-side synchronization 68 * @seq: sequence count for managing RCU read-side synchronization
69 * @fence_excl: the exclusive fence, if there is one currently 69 * @fence_excl: the exclusive fence, if there is one currently
70 * @fence: list of current shared fences 70 * @fence: list of current shared fences
71 */ 71 */
72struct reservation_object { 72struct dma_resv {
73 struct ww_mutex lock; 73 struct ww_mutex lock;
74 seqcount_t seq; 74 seqcount_t seq;
75 75
76 struct dma_fence __rcu *fence_excl; 76 struct dma_fence __rcu *fence_excl;
77 struct reservation_object_list __rcu *fence; 77 struct dma_resv_list __rcu *fence;
78}; 78};
79 79
80#define reservation_object_held(obj) lockdep_is_held(&(obj)->lock.base) 80#define dma_resv_held(obj) lockdep_is_held(&(obj)->lock.base)
81#define reservation_object_assert_held(obj) \ 81#define dma_resv_assert_held(obj) lockdep_assert_held(&(obj)->lock.base)
82 lockdep_assert_held(&(obj)->lock.base)
83 82
84/** 83/**
85 * reservation_object_get_list - get the reservation object's 84 * dma_resv_get_list - get the reservation object's
86 * shared fence list, with update-side lock held 85 * shared fence list, with update-side lock held
87 * @obj: the reservation object 86 * @obj: the reservation object
88 * 87 *
89 * Returns the shared fence list. Does NOT take references to 88 * Returns the shared fence list. Does NOT take references to
90 * the fence. The obj->lock must be held. 89 * the fence. The obj->lock must be held.
91 */ 90 */
92static inline struct reservation_object_list * 91static inline struct dma_resv_list *dma_resv_get_list(struct dma_resv *obj)
93reservation_object_get_list(struct reservation_object *obj)
94{ 92{
95 return rcu_dereference_protected(obj->fence, 93 return rcu_dereference_protected(obj->fence,
96 reservation_object_held(obj)); 94 dma_resv_held(obj));
97} 95}
98 96
99/** 97/**
100 * reservation_object_lock - lock the reservation object 98 * dma_resv_lock - lock the reservation object
101 * @obj: the reservation object 99 * @obj: the reservation object
102 * @ctx: the locking context 100 * @ctx: the locking context
103 * 101 *
@@ -111,15 +109,14 @@ reservation_object_get_list(struct reservation_object *obj)
111 * is detected. See ww_mutex_lock() and ww_acquire_init(). A reservation 109 * is detected. See ww_mutex_lock() and ww_acquire_init(). A reservation
112 * object may be locked by itself by passing NULL as @ctx. 110 * object may be locked by itself by passing NULL as @ctx.
113 */ 111 */
114static inline int 112static inline int dma_resv_lock(struct dma_resv *obj,
115reservation_object_lock(struct reservation_object *obj, 113 struct ww_acquire_ctx *ctx)
116 struct ww_acquire_ctx *ctx)
117{ 114{
118 return ww_mutex_lock(&obj->lock, ctx); 115 return ww_mutex_lock(&obj->lock, ctx);
119} 116}
120 117
121/** 118/**
122 * reservation_object_lock_interruptible - lock the reservation object 119 * dma_resv_lock_interruptible - lock the reservation object
123 * @obj: the reservation object 120 * @obj: the reservation object
124 * @ctx: the locking context 121 * @ctx: the locking context
125 * 122 *
@@ -133,48 +130,45 @@ reservation_object_lock(struct reservation_object *obj,
133 * is detected. See ww_mutex_lock() and ww_acquire_init(). A reservation 130 * is detected. See ww_mutex_lock() and ww_acquire_init(). A reservation
134 * object may be locked by itself by passing NULL as @ctx. 131 * object may be locked by itself by passing NULL as @ctx.
135 */ 132 */
136static inline int 133static inline int dma_resv_lock_interruptible(struct dma_resv *obj,
137reservation_object_lock_interruptible(struct reservation_object *obj, 134 struct ww_acquire_ctx *ctx)
138 struct ww_acquire_ctx *ctx)
139{ 135{
140 return ww_mutex_lock_interruptible(&obj->lock, ctx); 136 return ww_mutex_lock_interruptible(&obj->lock, ctx);
141} 137}
142 138
143/** 139/**
144 * reservation_object_lock_slow - slowpath lock the reservation object 140 * dma_resv_lock_slow - slowpath lock the reservation object
145 * @obj: the reservation object 141 * @obj: the reservation object
146 * @ctx: the locking context 142 * @ctx: the locking context
147 * 143 *
148 * Acquires the reservation object after a die case. This function 144 * Acquires the reservation object after a die case. This function
149 * will sleep until the lock becomes available. See reservation_object_lock() as 145 * will sleep until the lock becomes available. See dma_resv_lock() as
150 * well. 146 * well.
151 */ 147 */
152static inline void 148static inline void dma_resv_lock_slow(struct dma_resv *obj,
153reservation_object_lock_slow(struct reservation_object *obj, 149 struct ww_acquire_ctx *ctx)
154 struct ww_acquire_ctx *ctx)
155{ 150{
156 ww_mutex_lock_slow(&obj->lock, ctx); 151 ww_mutex_lock_slow(&obj->lock, ctx);
157} 152}
158 153
159/** 154/**
160 * reservation_object_lock_slow_interruptible - slowpath lock the reservation 155 * dma_resv_lock_slow_interruptible - slowpath lock the reservation
161 * object, interruptible 156 * object, interruptible
162 * @obj: the reservation object 157 * @obj: the reservation object
163 * @ctx: the locking context 158 * @ctx: the locking context
164 * 159 *
165 * Acquires the reservation object interruptible after a die case. This function 160 * Acquires the reservation object interruptible after a die case. This function
166 * will sleep until the lock becomes available. See 161 * will sleep until the lock becomes available. See
167 * reservation_object_lock_interruptible() as well. 162 * dma_resv_lock_interruptible() as well.
168 */ 163 */
169static inline int 164static inline int dma_resv_lock_slow_interruptible(struct dma_resv *obj,
170reservation_object_lock_slow_interruptible(struct reservation_object *obj, 165 struct ww_acquire_ctx *ctx)
171 struct ww_acquire_ctx *ctx)
172{ 166{
173 return ww_mutex_lock_slow_interruptible(&obj->lock, ctx); 167 return ww_mutex_lock_slow_interruptible(&obj->lock, ctx);
174} 168}
175 169
176/** 170/**
177 * reservation_object_trylock - trylock the reservation object 171 * dma_resv_trylock - trylock the reservation object
178 * @obj: the reservation object 172 * @obj: the reservation object
179 * 173 *
180 * Tries to lock the reservation object for exclusive access and modification. 174 * Tries to lock the reservation object for exclusive access and modification.
@@ -187,51 +181,46 @@ reservation_object_lock_slow_interruptible(struct reservation_object *obj,
187 * 181 *
188 * Returns true if the lock was acquired, false otherwise. 182 * Returns true if the lock was acquired, false otherwise.
189 */ 183 */
190static inline bool __must_check 184static inline bool __must_check dma_resv_trylock(struct dma_resv *obj)
191reservation_object_trylock(struct reservation_object *obj)
192{ 185{
193 return ww_mutex_trylock(&obj->lock); 186 return ww_mutex_trylock(&obj->lock);
194} 187}
195 188
196/** 189/**
197 * reservation_object_is_locked - is the reservation object locked 190 * dma_resv_is_locked - is the reservation object locked
198 * @obj: the reservation object 191 * @obj: the reservation object
199 * 192 *
200 * Returns true if the mutex is locked, false if unlocked. 193 * Returns true if the mutex is locked, false if unlocked.
201 */ 194 */
202static inline bool 195static inline bool dma_resv_is_locked(struct dma_resv *obj)
203reservation_object_is_locked(struct reservation_object *obj)
204{ 196{
205 return ww_mutex_is_locked(&obj->lock); 197 return ww_mutex_is_locked(&obj->lock);
206} 198}
207 199
208/** 200/**
209 * reservation_object_locking_ctx - returns the context used to lock the object 201 * dma_resv_locking_ctx - returns the context used to lock the object
210 * @obj: the reservation object 202 * @obj: the reservation object
211 * 203 *
212 * Returns the context used to lock a reservation object or NULL if no context 204 * Returns the context used to lock a reservation object or NULL if no context
213 * was used or the object is not locked at all. 205 * was used or the object is not locked at all.
214 */ 206 */
215static inline struct ww_acquire_ctx * 207static inline struct ww_acquire_ctx *dma_resv_locking_ctx(struct dma_resv *obj)
216reservation_object_locking_ctx(struct reservation_object *obj)
217{ 208{
218 return READ_ONCE(obj->lock.ctx); 209 return READ_ONCE(obj->lock.ctx);
219} 210}
220 211
221/** 212/**
222 * reservation_object_unlock - unlock the reservation object 213 * dma_resv_unlock - unlock the reservation object
223 * @obj: the reservation object 214 * @obj: the reservation object
224 * 215 *
225 * Unlocks the reservation object following exclusive access. 216 * Unlocks the reservation object following exclusive access.
226 */ 217 */
227static inline void 218static inline void dma_resv_unlock(struct dma_resv *obj)
228reservation_object_unlock(struct reservation_object *obj)
229{ 219{
230#ifdef CONFIG_DEBUG_MUTEXES 220#ifdef CONFIG_DEBUG_MUTEXES
231 /* Test shared fence slot reservation */ 221 /* Test shared fence slot reservation */
232 if (rcu_access_pointer(obj->fence)) { 222 if (rcu_access_pointer(obj->fence)) {
233 struct reservation_object_list *fence = 223 struct dma_resv_list *fence = dma_resv_get_list(obj);
234 reservation_object_get_list(obj);
235 224
236 fence->shared_max = fence->shared_count; 225 fence->shared_max = fence->shared_count;
237 } 226 }
@@ -240,7 +229,7 @@ reservation_object_unlock(struct reservation_object *obj)
240} 229}
241 230
242/** 231/**
243 * reservation_object_get_excl - get the reservation object's 232 * dma_resv_get_excl - get the reservation object's
244 * exclusive fence, with update-side lock held 233 * exclusive fence, with update-side lock held
245 * @obj: the reservation object 234 * @obj: the reservation object
246 * 235 *
@@ -252,14 +241,14 @@ reservation_object_unlock(struct reservation_object *obj)
252 * The exclusive fence or NULL 241 * The exclusive fence or NULL
253 */ 242 */
254static inline struct dma_fence * 243static inline struct dma_fence *
255reservation_object_get_excl(struct reservation_object *obj) 244dma_resv_get_excl(struct dma_resv *obj)
256{ 245{
257 return rcu_dereference_protected(obj->fence_excl, 246 return rcu_dereference_protected(obj->fence_excl,
258 reservation_object_held(obj)); 247 dma_resv_held(obj));
259} 248}
260 249
261/** 250/**
262 * reservation_object_get_excl_rcu - get the reservation object's 251 * dma_resv_get_excl_rcu - get the reservation object's
263 * exclusive fence, without lock held. 252 * exclusive fence, without lock held.
264 * @obj: the reservation object 253 * @obj: the reservation object
265 * 254 *
@@ -270,7 +259,7 @@ reservation_object_get_excl(struct reservation_object *obj)
270 * The exclusive fence or NULL if none 259 * The exclusive fence or NULL if none
271 */ 260 */
272static inline struct dma_fence * 261static inline struct dma_fence *
273reservation_object_get_excl_rcu(struct reservation_object *obj) 262dma_resv_get_excl_rcu(struct dma_resv *obj)
274{ 263{
275 struct dma_fence *fence; 264 struct dma_fence *fence;
276 265
@@ -284,29 +273,23 @@ reservation_object_get_excl_rcu(struct reservation_object *obj)
284 return fence; 273 return fence;
285} 274}
286 275
287void reservation_object_init(struct reservation_object *obj); 276void dma_resv_init(struct dma_resv *obj);
288void reservation_object_fini(struct reservation_object *obj); 277void dma_resv_fini(struct dma_resv *obj);
289int reservation_object_reserve_shared(struct reservation_object *obj, 278int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences);
290 unsigned int num_fences); 279void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence);
291void reservation_object_add_shared_fence(struct reservation_object *obj,
292 struct dma_fence *fence);
293 280
294void reservation_object_add_excl_fence(struct reservation_object *obj, 281void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence);
295 struct dma_fence *fence);
296 282
297int reservation_object_get_fences_rcu(struct reservation_object *obj, 283int dma_resv_get_fences_rcu(struct dma_resv *obj,
298 struct dma_fence **pfence_excl, 284 struct dma_fence **pfence_excl,
299 unsigned *pshared_count, 285 unsigned *pshared_count,
300 struct dma_fence ***pshared); 286 struct dma_fence ***pshared);
301 287
302int reservation_object_copy_fences(struct reservation_object *dst, 288int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src);
303 struct reservation_object *src);
304 289
305long reservation_object_wait_timeout_rcu(struct reservation_object *obj, 290long dma_resv_wait_timeout_rcu(struct dma_resv *obj, bool wait_all, bool intr,
306 bool wait_all, bool intr, 291 unsigned long timeout);
307 unsigned long timeout);
308 292
309bool reservation_object_test_signaled_rcu(struct reservation_object *obj, 293bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all);
310 bool test_all);
311 294
312#endif /* _LINUX_RESERVATION_H */ 295#endif /* _LINUX_RESERVATION_H */
diff --git a/include/uapi/drm/panfrost_drm.h b/include/uapi/drm/panfrost_drm.h
index cb577fb96b38..ec19db1eead8 100644
--- a/include/uapi/drm/panfrost_drm.h
+++ b/include/uapi/drm/panfrost_drm.h
@@ -20,6 +20,7 @@ extern "C" {
20#define DRM_PANFROST_GET_BO_OFFSET 0x05 20#define DRM_PANFROST_GET_BO_OFFSET 0x05
21#define DRM_PANFROST_PERFCNT_ENABLE 0x06 21#define DRM_PANFROST_PERFCNT_ENABLE 0x06
22#define DRM_PANFROST_PERFCNT_DUMP 0x07 22#define DRM_PANFROST_PERFCNT_DUMP 0x07
23#define DRM_PANFROST_MADVISE 0x08
23 24
24#define DRM_IOCTL_PANFROST_SUBMIT DRM_IOW(DRM_COMMAND_BASE + DRM_PANFROST_SUBMIT, struct drm_panfrost_submit) 25#define DRM_IOCTL_PANFROST_SUBMIT DRM_IOW(DRM_COMMAND_BASE + DRM_PANFROST_SUBMIT, struct drm_panfrost_submit)
25#define DRM_IOCTL_PANFROST_WAIT_BO DRM_IOW(DRM_COMMAND_BASE + DRM_PANFROST_WAIT_BO, struct drm_panfrost_wait_bo) 26#define DRM_IOCTL_PANFROST_WAIT_BO DRM_IOW(DRM_COMMAND_BASE + DRM_PANFROST_WAIT_BO, struct drm_panfrost_wait_bo)
@@ -27,6 +28,7 @@ extern "C" {
27#define DRM_IOCTL_PANFROST_MMAP_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_MMAP_BO, struct drm_panfrost_mmap_bo) 28#define DRM_IOCTL_PANFROST_MMAP_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_MMAP_BO, struct drm_panfrost_mmap_bo)
28#define DRM_IOCTL_PANFROST_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_GET_PARAM, struct drm_panfrost_get_param) 29#define DRM_IOCTL_PANFROST_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_GET_PARAM, struct drm_panfrost_get_param)
29#define DRM_IOCTL_PANFROST_GET_BO_OFFSET DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_GET_BO_OFFSET, struct drm_panfrost_get_bo_offset) 30#define DRM_IOCTL_PANFROST_GET_BO_OFFSET DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_GET_BO_OFFSET, struct drm_panfrost_get_bo_offset)
31#define DRM_IOCTL_PANFROST_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_MADVISE, struct drm_panfrost_madvise)
30 32
31/* 33/*
32 * Unstable ioctl(s): only exposed when the unsafe unstable_ioctls module 34 * Unstable ioctl(s): only exposed when the unsafe unstable_ioctls module
@@ -82,6 +84,9 @@ struct drm_panfrost_wait_bo {
82 __s64 timeout_ns; /* absolute */ 84 __s64 timeout_ns; /* absolute */
83}; 85};
84 86
87#define PANFROST_BO_NOEXEC 1
88#define PANFROST_BO_HEAP 2
89
85/** 90/**
86 * struct drm_panfrost_create_bo - ioctl argument for creating Panfrost BOs. 91 * struct drm_panfrost_create_bo - ioctl argument for creating Panfrost BOs.
87 * 92 *
@@ -198,6 +203,26 @@ struct drm_panfrost_perfcnt_dump {
198 __u64 buf_ptr; 203 __u64 buf_ptr;
199}; 204};
200 205
206/* madvise provides a way to tell the kernel in case a buffers contents
207 * can be discarded under memory pressure, which is useful for userspace
208 * bo cache where we want to optimistically hold on to buffer allocate
209 * and potential mmap, but allow the pages to be discarded under memory
210 * pressure.
211 *
212 * Typical usage would involve madvise(DONTNEED) when buffer enters BO
213 * cache, and madvise(WILLNEED) if trying to recycle buffer from BO cache.
214 * In the WILLNEED case, 'retained' indicates to userspace whether the
215 * backing pages still exist.
216 */
217#define PANFROST_MADV_WILLNEED 0 /* backing pages are needed, status returned in 'retained' */
218#define PANFROST_MADV_DONTNEED 1 /* backing pages not needed */
219
220struct drm_panfrost_madvise {
221 __u32 handle; /* in, GEM handle */
222 __u32 madv; /* in, PANFROST_MADV_x */
223 __u32 retained; /* out, whether backing store still exists */
224};
225
201#if defined(__cplusplus) 226#if defined(__cplusplus)
202} 227}
203#endif 228#endif