aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/display/bridge/adi,adv7511.txt18
-rw-r--r--Documentation/devicetree/bindings/display/bridge/cdns,dsi.txt133
-rw-r--r--Documentation/devicetree/bindings/display/bridge/thine,thc63lvd1024.txt60
-rw-r--r--Documentation/devicetree/bindings/display/sunxi/sun6i-dsi.txt93
-rw-r--r--Documentation/gpu/drivers.rst1
-rw-r--r--Documentation/gpu/kms-properties.csv1
-rw-r--r--Documentation/gpu/todo.rst18
-rw-r--r--Documentation/gpu/xen-front.rst31
-rw-r--r--MAINTAINERS9
-rw-r--r--drivers/gpu/drm/Kconfig2
-rw-r--r--drivers/gpu/drm/Makefile1
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c2
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h13
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c89
-rw-r--r--drivers/gpu/drm/bochs/bochs_kms.c2
-rw-r--r--drivers/gpu/drm/bridge/Kconfig16
-rw-r--r--drivers/gpu/drm/bridge/Makefile2
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511.h6
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_drv.c44
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_core.c331
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_core.h5
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c236
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_reg.h7
-rw-r--r--drivers/gpu/drm/bridge/cdns-dsi.c1623
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c1
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c18
-rw-r--r--drivers/gpu/drm/bridge/tc358767.c2
-rw-r--r--drivers/gpu/drm/bridge/thc63lvd1024.c206
-rw-r--r--drivers/gpu/drm/drm_atomic.c12
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c107
-rw-r--r--drivers/gpu/drm/drm_blend.c39
-rw-r--r--drivers/gpu/drm/drm_crtc.c45
-rw-r--r--drivers/gpu/drm/drm_crtc_internal.h2
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c9
-rw-r--r--drivers/gpu/drm/drm_drv.c54
-rw-r--r--drivers/gpu/drm/drm_edid.c2
-rw-r--r--drivers/gpu/drm/drm_framebuffer.c34
-rw-r--r--drivers/gpu/drm/drm_gem.c9
-rw-r--r--drivers/gpu/drm/drm_gem_framebuffer_helper.c19
-rw-r--r--drivers/gpu/drm/drm_lease.c2
-rw-r--r--drivers/gpu/drm/drm_panel_orientation_quirks.c22
-rw-r--r--drivers/gpu/drm/drm_plane.c54
-rw-r--r--drivers/gpu/drm/drm_prime.c13
-rw-r--r--drivers/gpu/drm/drm_scdc_helper.c10
-rw-r--r--drivers/gpu/drm/drm_simple_kms_helper.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_dp.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c20
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c4
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_crt.c2
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_dp.c2
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_hdmi.c2
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_lvds.c2
-rw-r--r--drivers/gpu/drm/gma500/mdfld_dsi_output.c2
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_hdmi.c2
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_drv.h2
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_lvds.c2
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_sdvo.c2
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c2
-rw-r--r--drivers/gpu/drm/i2c/tda998x_drv.c2
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c8
-rw-r--r--drivers/gpu/drm/i915/intel_display.c4
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c2
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c2
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_drv.c11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.c3
-rw-r--r--drivers/gpu/drm/omapdrm/omap_plane.c2
-rw-r--r--drivers/gpu/drm/pl111/pl111_display.c11
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c2
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_drv.h1
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_kms.c16
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_plane.c15
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_plane.h2
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_vsp.c42
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_vsp.h3
-rw-r--r--drivers/gpu/drm/rockchip/analogix_dp-rockchip.c37
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.h1
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fb.c61
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_gem.c4
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_psr.c158
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_psr.h7
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c15
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.h1
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_vop_reg.c1
-rw-r--r--drivers/gpu/drm/sti/Kconfig3
-rw-r--r--drivers/gpu/drm/sti/sti_drv.c24
-rw-r--r--drivers/gpu/drm/sti/sti_plane.c9
-rw-r--r--drivers/gpu/drm/stm/drv.c2
-rw-r--r--drivers/gpu/drm/stm/ltdc.c35
-rw-r--r--drivers/gpu/drm/stm/ltdc.h10
-rw-r--r--drivers/gpu/drm/sun4i/Kconfig10
-rw-r--r--drivers/gpu/drm/sun4i/Makefile4
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_backend.c16
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_backend.h3
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_layer.c2
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.c86
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.h46
-rw-r--r--drivers/gpu/drm/sun4i/sun6i_mipi_dphy.c292
-rw-r--r--drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c1107
-rw-r--r--drivers/gpu/drm/sun4i/sun6i_mipi_dsi.h63
-rw-r--r--drivers/gpu/drm/tegra/drm.c21
-rw-r--r--drivers/gpu/drm/tinydrm/core/tinydrm-core.c2
-rw-r--r--drivers/gpu/drm/tinydrm/core/tinydrm-helpers.c30
-rw-r--r--drivers/gpu/drm/tinydrm/core/tinydrm-pipe.c22
-rw-r--r--drivers/gpu/drm/tinydrm/ili9225.c28
-rw-r--r--drivers/gpu/drm/tinydrm/mi0283qt.c8
-rw-r--r--drivers/gpu/drm/tinydrm/mipi-dbi.c30
-rw-r--r--drivers/gpu/drm/tinydrm/repaper.c33
-rw-r--r--drivers/gpu/drm/tinydrm/st7586.c28
-rw-r--r--drivers/gpu/drm/tinydrm/st7735r.c7
-rw-r--r--drivers/gpu/drm/tve200/tve200_display.c11
-rw-r--r--drivers/gpu/drm/udl/udl_connector.c2
-rw-r--r--drivers/gpu/drm/udl/udl_dmabuf.c5
-rw-r--r--drivers/gpu/drm/udl/udl_drv.c2
-rw-r--r--drivers/gpu/drm/udl/udl_drv.h2
-rw-r--r--drivers/gpu/drm/udl/udl_gem.c5
-rw-r--r--drivers/gpu/drm/udl/udl_main.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_crtc.c75
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.c6
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.h37
-rw-r--r--drivers/gpu/drm/vc4/vc4_hvs.c4
-rw-r--r--drivers/gpu/drm/vc4/vc4_kms.c224
-rw-r--r--drivers/gpu/drm/vc4/vc4_plane.c152
-rw-r--r--drivers/gpu/drm/vc4/vc4_regs.h97
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_display.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c35
-rw-r--r--drivers/gpu/drm/xen/Kconfig17
-rw-r--r--drivers/gpu/drm/xen/Makefile11
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front.c840
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front.h158
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front_cfg.c77
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front_cfg.h37
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front_conn.c115
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front_conn.h27
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front_evtchnl.c387
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front_evtchnl.h81
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front_gem.c308
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front_gem.h40
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front_kms.c366
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front_kms.h26
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front_shbuf.c414
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front_shbuf.h64
-rw-r--r--drivers/gpu/drm/zte/zx_plane.c2
-rw-r--r--drivers/gpu/drm/zte/zx_vou.c5
-rw-r--r--drivers/gpu/drm/zte/zx_vou.h3
-rw-r--r--drivers/staging/vboxvideo/vbox_drv.c2
-rw-r--r--include/drm/bridge/analogix_dp.h3
-rw-r--r--include/drm/drmP.h28
-rw-r--r--include/drm/drm_blend.h3
-rw-r--r--include/drm/drm_device.h9
-rw-r--r--include/drm/drm_drv.h15
-rw-r--r--include/drm/drm_edid.h2
-rw-r--r--include/drm/drm_gem_framebuffer_helper.h3
-rw-r--r--include/drm/drm_legacy.h4
-rw-r--r--include/drm/drm_mode_config.h8
-rw-r--r--include/drm/drm_modeset_helper_vtables.h5
-rw-r--r--include/drm/drm_plane.h21
-rw-r--r--include/drm/drm_property.h26
-rw-r--r--include/drm/drm_simple_kms_helper.h6
-rw-r--r--include/drm/tinydrm/mipi-dbi.h4
-rw-r--r--include/drm/tinydrm/tinydrm-helpers.h5
-rw-r--r--include/drm/tinydrm/tinydrm.h8
-rw-r--r--scripts/coccinelle/api/drm-get-put.cocci10
165 files changed, 8499 insertions, 1096 deletions
diff --git a/Documentation/devicetree/bindings/display/bridge/adi,adv7511.txt b/Documentation/devicetree/bindings/display/bridge/adi,adv7511.txt
index 0047b1394c70..2c887536258c 100644
--- a/Documentation/devicetree/bindings/display/bridge/adi,adv7511.txt
+++ b/Documentation/devicetree/bindings/display/bridge/adi,adv7511.txt
@@ -14,7 +14,13 @@ Required properties:
14 "adi,adv7513" 14 "adi,adv7513"
15 "adi,adv7533" 15 "adi,adv7533"
16 16
17- reg: I2C slave address 17- reg: I2C slave addresses
18 The ADV7511 internal registers are split into four pages exposed through
19 different I2C addresses, creating four register maps. Each map has it own
20 I2C address and acts as a standard slave device on the I2C bus. The main
21 address is mandatory, others are optional and revert to defaults if not
22 specified.
23
18 24
19The ADV7511 supports a large number of input data formats that differ by their 25The ADV7511 supports a large number of input data formats that differ by their
20color depth, color format, clock mode, bit justification and random 26color depth, color format, clock mode, bit justification and random
@@ -70,6 +76,9 @@ Optional properties:
70 rather than generate its own timings for HDMI output. 76 rather than generate its own timings for HDMI output.
71- clocks: from common clock binding: reference to the CEC clock. 77- clocks: from common clock binding: reference to the CEC clock.
72- clock-names: from common clock binding: must be "cec". 78- clock-names: from common clock binding: must be "cec".
79- reg-names : Names of maps with programmable addresses.
80 It can contain any map needing a non-default address.
81 Possible maps names are : "main", "edid", "cec", "packet"
73 82
74Required nodes: 83Required nodes:
75 84
@@ -88,7 +97,12 @@ Example
88 97
89 adv7511w: hdmi@39 { 98 adv7511w: hdmi@39 {
90 compatible = "adi,adv7511w"; 99 compatible = "adi,adv7511w";
91 reg = <39>; 100 /*
101 * The EDID page will be accessible on address 0x66 on the I2C
102 * bus. All other maps continue to use their default addresses.
103 */
104 reg = <0x39>, <0x66>;
105 reg-names = "main", "edid";
92 interrupt-parent = <&gpio3>; 106 interrupt-parent = <&gpio3>;
93 interrupts = <29 IRQ_TYPE_EDGE_FALLING>; 107 interrupts = <29 IRQ_TYPE_EDGE_FALLING>;
94 clocks = <&cec_clock>; 108 clocks = <&cec_clock>;
diff --git a/Documentation/devicetree/bindings/display/bridge/cdns,dsi.txt b/Documentation/devicetree/bindings/display/bridge/cdns,dsi.txt
new file mode 100644
index 000000000000..f5725bb6c61c
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/bridge/cdns,dsi.txt
@@ -0,0 +1,133 @@
1Cadence DSI bridge
2==================
3
4The Cadence DSI bridge is a DPI to DSI bridge supporting up to 4 DSI lanes.
5
6Required properties:
7- compatible: should be set to "cdns,dsi".
8- reg: physical base address and length of the controller's registers.
9- interrupts: interrupt line connected to the DSI bridge.
10- clocks: DSI bridge clocks.
11- clock-names: must contain "dsi_p_clk" and "dsi_sys_clk".
12- phys: phandle link to the MIPI D-PHY controller.
13- phy-names: must contain "dphy".
14- #address-cells: must be set to 1.
15- #size-cells: must be set to 0.
16
17Optional properties:
18- resets: DSI reset lines.
19- reset-names: can contain "dsi_p_rst".
20
21Required subnodes:
22- ports: Ports as described in Documentation/devicetree/bindings/graph.txt.
23 2 ports are available:
24 * port 0: this port is only needed if some of your DSI devices are
25 controlled through an external bus like I2C or SPI. Can have at
26 most 4 endpoints. The endpoint number is directly encoding the
27 DSI virtual channel used by this device.
28 * port 1: represents the DPI input.
29 Other ports will be added later to support the new kind of inputs.
30
31- one subnode per DSI device connected on the DSI bus. Each DSI device should
32 contain a reg property encoding its virtual channel.
33
34Cadence DPHY
35============
36
37Cadence DPHY block.
38
39Required properties:
40- compatible: should be set to "cdns,dphy".
41- reg: physical base address and length of the DPHY registers.
42- clocks: DPHY reference clocks.
43- clock-names: must contain "psm" and "pll_ref".
44- #phy-cells: must be set to 0.
45
46
47Example:
48 dphy0: dphy@fd0e0000{
49 compatible = "cdns,dphy";
50 reg = <0x0 0xfd0e0000 0x0 0x1000>;
51 clocks = <&psm_clk>, <&pll_ref_clk>;
52 clock-names = "psm", "pll_ref";
53 #phy-cells = <0>;
54 };
55
56 dsi0: dsi@fd0c0000 {
57 compatible = "cdns,dsi";
58 reg = <0x0 0xfd0c0000 0x0 0x1000>;
59 clocks = <&pclk>, <&sysclk>;
60 clock-names = "dsi_p_clk", "dsi_sys_clk";
61 interrupts = <1>;
62 phys = <&dphy0>;
63 phy-names = "dphy";
64 #address-cells = <1>;
65 #size-cells = <0>;
66
67 ports {
68 #address-cells = <1>;
69 #size-cells = <0>;
70
71 port@1 {
72 reg = <1>;
73 dsi0_dpi_input: endpoint {
74 remote-endpoint = <&xxx_dpi_output>;
75 };
76 };
77 };
78
79 panel: dsi-dev@0 {
80 compatible = "<vendor,panel>";
81 reg = <0>;
82 };
83 };
84
85or
86
87 dsi0: dsi@fd0c0000 {
88 compatible = "cdns,dsi";
89 reg = <0x0 0xfd0c0000 0x0 0x1000>;
90 clocks = <&pclk>, <&sysclk>;
91 clock-names = "dsi_p_clk", "dsi_sys_clk";
92 interrupts = <1>;
93 phys = <&dphy1>;
94 phy-names = "dphy";
95 #address-cells = <1>;
96 #size-cells = <0>;
97
98 ports {
99 #address-cells = <1>;
100 #size-cells = <0>;
101
102 port@0 {
103 reg = <0>;
104 #address-cells = <1>;
105 #size-cells = <0>;
106
107 dsi0_output: endpoint@0 {
108 reg = <0>;
109 remote-endpoint = <&dsi_panel_input>;
110 };
111 };
112
113 port@1 {
114 reg = <1>;
115 dsi0_dpi_input: endpoint {
116 remote-endpoint = <&xxx_dpi_output>;
117 };
118 };
119 };
120 };
121
122 i2c@xxx {
123 panel: panel@59 {
124 compatible = "<vendor,panel>";
125 reg = <0x59>;
126
127 port {
128 dsi_panel_input: endpoint {
129 remote-endpoint = <&dsi0_output>;
130 };
131 };
132 };
133 };
diff --git a/Documentation/devicetree/bindings/display/bridge/thine,thc63lvd1024.txt b/Documentation/devicetree/bindings/display/bridge/thine,thc63lvd1024.txt
new file mode 100644
index 000000000000..37f0c04d5a28
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/bridge/thine,thc63lvd1024.txt
@@ -0,0 +1,60 @@
1Thine Electronics THC63LVD1024 LVDS decoder
2-------------------------------------------
3
4The THC63LVD1024 is a dual link LVDS receiver designed to convert LVDS streams
5to parallel data outputs. The chip supports single/dual input/output modes,
6handling up to two LVDS input streams and up to two digital CMOS/TTL outputs.
7
8Single or dual operation mode, output data mapping and DDR output modes are
9configured through input signals and the chip does not expose any control bus.
10
11Required properties:
12- compatible: Shall be "thine,thc63lvd1024"
13- vcc-supply: Power supply for TTL output, TTL CLOCKOUT signal, LVDS input,
14 PPL and digital circuitry
15
16Optional properties:
17- powerdown-gpios: Power down GPIO signal, pin name "/PDWN". Active low
18- oe-gpios: Output enable GPIO signal, pin name "OE". Active high
19
20The THC63LVD1024 video port connections are modeled according
21to OF graph bindings specified by Documentation/devicetree/bindings/graph.txt
22
23Required video port nodes:
24- port@0: First LVDS input port
25- port@2: First digital CMOS/TTL parallel output
26
27Optional video port nodes:
28- port@1: Second LVDS input port
29- port@3: Second digital CMOS/TTL parallel output
30
31Example:
32--------
33
34 thc63lvd1024: lvds-decoder {
35 compatible = "thine,thc63lvd1024";
36
37 vcc-supply = <&reg_lvds_vcc>;
38 powerdown-gpios = <&gpio4 15 GPIO_ACTIVE_LOW>;
39
40 ports {
41 #address-cells = <1>;
42 #size-cells = <0>;
43
44 port@0 {
45 reg = <0>;
46
47 lvds_dec_in_0: endpoint {
48 remote-endpoint = <&lvds_out>;
49 };
50 };
51
52 port@2{
53 reg = <2>;
54
55 lvds_dec_out_2: endpoint {
56 remote-endpoint = <&adv7511_in>;
57 };
58 };
59 };
60 };
diff --git a/Documentation/devicetree/bindings/display/sunxi/sun6i-dsi.txt b/Documentation/devicetree/bindings/display/sunxi/sun6i-dsi.txt
new file mode 100644
index 000000000000..6a6cf5de08b0
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/sunxi/sun6i-dsi.txt
@@ -0,0 +1,93 @@
1Allwinner A31 DSI Encoder
2=========================
3
4The DSI pipeline consists of two separate blocks: the DSI controller
5itself, and its associated D-PHY.
6
7DSI Encoder
8-----------
9
10The DSI Encoder generates the DSI signal from the TCON's.
11
12Required properties:
13 - compatible: value must be one of:
14 * allwinner,sun6i-a31-mipi-dsi
15 - reg: base address and size of memory-mapped region
16 - interrupts: interrupt associated to this IP
17 - clocks: phandles to the clocks feeding the DSI encoder
18 * bus: the DSI interface clock
19 * mod: the DSI module clock
20 - clock-names: the clock names mentioned above
21 - phys: phandle to the D-PHY
22 - phy-names: must be "dphy"
23 - resets: phandle to the reset controller driving the encoder
24
25 - ports: A ports node with endpoint definitions as defined in
26 Documentation/devicetree/bindings/media/video-interfaces.txt. The
27 first port should be the input endpoint, usually coming from the
28 associated TCON.
29
30Any MIPI-DSI device attached to this should be described according to
31the bindings defined in ../mipi-dsi-bus.txt
32
33D-PHY
34-----
35
36Required properties:
37 - compatible: value must be one of:
38 * allwinner,sun6i-a31-mipi-dphy
39 - reg: base address and size of memory-mapped region
40 - clocks: phandles to the clocks feeding the DSI encoder
41 * bus: the DSI interface clock
42 * mod: the DSI module clock
43 - clock-names: the clock names mentioned above
44 - resets: phandle to the reset controller driving the encoder
45
46Example:
47
48dsi0: dsi@1ca0000 {
49 compatible = "allwinner,sun6i-a31-mipi-dsi";
50 reg = <0x01ca0000 0x1000>;
51 interrupts = <GIC_SPI 89 IRQ_TYPE_LEVEL_HIGH>;
52 clocks = <&ccu CLK_BUS_MIPI_DSI>,
53 <&ccu CLK_DSI_SCLK>;
54 clock-names = "bus", "mod";
55 resets = <&ccu RST_BUS_MIPI_DSI>;
56 phys = <&dphy0>;
57 phy-names = "dphy";
58 #address-cells = <1>;
59 #size-cells = <0>;
60
61 panel@0 {
62 compatible = "bananapi,lhr050h41", "ilitek,ili9881c";
63 reg = <0>;
64 power-gpios = <&pio 1 7 GPIO_ACTIVE_HIGH>; /* PB07 */
65 reset-gpios = <&r_pio 0 5 GPIO_ACTIVE_LOW>; /* PL05 */
66 backlight = <&pwm_bl>;
67 };
68
69 ports {
70 #address-cells = <1>;
71 #size-cells = <0>;
72
73 port@0 {
74 #address-cells = <1>;
75 #size-cells = <0>;
76 reg = <0>;
77
78 dsi0_in_tcon0: endpoint {
79 remote-endpoint = <&tcon0_out_dsi0>;
80 };
81 };
82 };
83};
84
85dphy0: d-phy@1ca1000 {
86 compatible = "allwinner,sun6i-a31-mipi-dphy";
87 reg = <0x01ca1000 0x1000>;
88 clocks = <&ccu CLK_BUS_MIPI_DSI>,
89 <&ccu CLK_DSI_DPHY>;
90 clock-names = "bus", "mod";
91 resets = <&ccu RST_BUS_MIPI_DSI>;
92 #phy-cells = <0>;
93};
diff --git a/Documentation/gpu/drivers.rst b/Documentation/gpu/drivers.rst
index e8c84419a2a1..d3ab6abae838 100644
--- a/Documentation/gpu/drivers.rst
+++ b/Documentation/gpu/drivers.rst
@@ -12,6 +12,7 @@ GPU Driver Documentation
12 tve200 12 tve200
13 vc4 13 vc4
14 bridge/dw-hdmi 14 bridge/dw-hdmi
15 xen-front
15 16
16.. only:: subproject and html 17.. only:: subproject and html
17 18
diff --git a/Documentation/gpu/kms-properties.csv b/Documentation/gpu/kms-properties.csv
index 6b28b014cb7d..07ed22ea3bd6 100644
--- a/Documentation/gpu/kms-properties.csv
+++ b/Documentation/gpu/kms-properties.csv
@@ -98,5 +98,4 @@ radeon,DVI-I,“coherent”,RANGE,"Min=0, Max=1",Connector,TBD
98,,"""underscan vborder""",RANGE,"Min=0, Max=128",Connector,TBD 98,,"""underscan vborder""",RANGE,"Min=0, Max=128",Connector,TBD
99,Audio,“audio”,ENUM,"{ ""off"", ""on"", ""auto"" }",Connector,TBD 99,Audio,“audio”,ENUM,"{ ""off"", ""on"", ""auto"" }",Connector,TBD
100,FMT Dithering,“dither”,ENUM,"{ ""off"", ""on"" }",Connector,TBD 100,FMT Dithering,“dither”,ENUM,"{ ""off"", ""on"" }",Connector,TBD
101rcar-du,Generic,"""alpha""",RANGE,"Min=0, Max=255",Plane,TBD
102,,"""colorkey""",RANGE,"Min=0, Max=0x01ffffff",Plane,TBD 101,,"""colorkey""",RANGE,"Min=0, Max=0x01ffffff",Plane,TBD
diff --git a/Documentation/gpu/todo.rst b/Documentation/gpu/todo.rst
index f4d0b3476d9c..a7c150d6b63f 100644
--- a/Documentation/gpu/todo.rst
+++ b/Documentation/gpu/todo.rst
@@ -212,6 +212,24 @@ probably use drm_fb_helper_fbdev_teardown().
212 212
213Contact: Maintainer of the driver you plan to convert 213Contact: Maintainer of the driver you plan to convert
214 214
215Clean up mmap forwarding
216------------------------
217
218A lot of drivers forward gem mmap calls to dma-buf mmap for imported buffers.
219And also a lot of them forward dma-buf mmap to the gem mmap implementations.
220Would be great to refactor this all into a set of small common helpers.
221
222Contact: Daniel Vetter
223
224Put a reservation_object into drm_gem_object
225--------------------------------------------
226
227This would remove the need for the ->gem_prime_res_obj callback. It would also
228allow us to implement generic helpers for waiting for a bo, allowing for quite a
229bit of refactoring in the various wait ioctl implementations.
230
231Contact: Daniel Vetter
232
215idr_init_base() 233idr_init_base()
216--------------- 234---------------
217 235
diff --git a/Documentation/gpu/xen-front.rst b/Documentation/gpu/xen-front.rst
new file mode 100644
index 000000000000..d988da7d1983
--- /dev/null
+++ b/Documentation/gpu/xen-front.rst
@@ -0,0 +1,31 @@
1====================================================
2 drm/xen-front Xen para-virtualized frontend driver
3====================================================
4
5This frontend driver implements Xen para-virtualized display
6according to the display protocol described at
7include/xen/interface/io/displif.h
8
9Driver modes of operation in terms of display buffers used
10==========================================================
11
12.. kernel-doc:: drivers/gpu/drm/xen/xen_drm_front.h
13 :doc: Driver modes of operation in terms of display buffers used
14
15Buffers allocated by the frontend driver
16----------------------------------------
17
18.. kernel-doc:: drivers/gpu/drm/xen/xen_drm_front.h
19 :doc: Buffers allocated by the frontend driver
20
21Buffers allocated by the backend
22--------------------------------
23
24.. kernel-doc:: drivers/gpu/drm/xen/xen_drm_front.h
25 :doc: Buffers allocated by the backend
26
27Driver limitations
28==================
29
30.. kernel-doc:: drivers/gpu/drm/xen/xen_drm_front.h
31 :doc: Driver limitations
diff --git a/MAINTAINERS b/MAINTAINERS
index 79bb02ff812f..8daa96a99eac 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -4830,6 +4830,15 @@ S: Maintained
4830F: drivers/gpu/drm/tinydrm/ 4830F: drivers/gpu/drm/tinydrm/
4831F: include/drm/tinydrm/ 4831F: include/drm/tinydrm/
4832 4832
4833DRM DRIVERS FOR XEN
4834M: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
4835T: git git://anongit.freedesktop.org/drm/drm-misc
4836L: dri-devel@lists.freedesktop.org
4837L: xen-devel@lists.xen.org
4838S: Supported
4839F: drivers/gpu/drm/xen/
4840F: Documentation/gpu/xen-front.rst
4841
4833DRM TTM SUBSYSTEM 4842DRM TTM SUBSYSTEM
4834M: Christian Koenig <christian.koenig@amd.com> 4843M: Christian Koenig <christian.koenig@amd.com>
4835M: Roger He <Hongbo.He@amd.com> 4844M: Roger He <Hongbo.He@amd.com>
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index deeefa7a1773..757825ac60df 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -289,6 +289,8 @@ source "drivers/gpu/drm/pl111/Kconfig"
289 289
290source "drivers/gpu/drm/tve200/Kconfig" 290source "drivers/gpu/drm/tve200/Kconfig"
291 291
292source "drivers/gpu/drm/xen/Kconfig"
293
292# Keep legacy drivers last 294# Keep legacy drivers last
293 295
294menuconfig DRM_LEGACY 296menuconfig DRM_LEGACY
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 50093ff4479b..9d66657ea117 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -103,3 +103,4 @@ obj-$(CONFIG_DRM_MXSFB) += mxsfb/
103obj-$(CONFIG_DRM_TINYDRM) += tinydrm/ 103obj-$(CONFIG_DRM_TINYDRM) += tinydrm/
104obj-$(CONFIG_DRM_PL111) += pl111/ 104obj-$(CONFIG_DRM_PL111) += pl111/
105obj-$(CONFIG_DRM_TVE200) += tve200/ 105obj-$(CONFIG_DRM_TVE200) += tve200/
106obj-$(CONFIG_DRM_XEN) += xen/
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index 831b73392d82..036dff8a1f33 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -799,7 +799,7 @@ static int ast_get_modes(struct drm_connector *connector)
799 return 0; 799 return 0;
800} 800}
801 801
802static int ast_mode_valid(struct drm_connector *connector, 802static enum drm_mode_status ast_mode_valid(struct drm_connector *connector,
803 struct drm_display_mode *mode) 803 struct drm_display_mode *mode)
804{ 804{
805 struct ast_private *ast = connector->dev->dev_private; 805 struct ast_private *ast = connector->dev->dev_private;
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h
index ab32d5b268d2..60c937f42114 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h
@@ -299,7 +299,6 @@ struct atmel_hlcdc_layer {
299struct atmel_hlcdc_plane { 299struct atmel_hlcdc_plane {
300 struct drm_plane base; 300 struct drm_plane base;
301 struct atmel_hlcdc_layer layer; 301 struct atmel_hlcdc_layer layer;
302 struct atmel_hlcdc_plane_properties *properties;
303}; 302};
304 303
305static inline struct atmel_hlcdc_plane * 304static inline struct atmel_hlcdc_plane *
@@ -346,18 +345,6 @@ struct atmel_hlcdc_dc_desc {
346}; 345};
347 346
348/** 347/**
349 * Atmel HLCDC Plane properties.
350 *
351 * This structure stores plane property definitions.
352 *
353 * @alpha: alpha blending (or transparency) property
354 * @rotation: rotation property
355 */
356struct atmel_hlcdc_plane_properties {
357 struct drm_property *alpha;
358};
359
360/**
361 * Atmel HLCDC Display Controller. 348 * Atmel HLCDC Display Controller.
362 * 349 *
363 * @desc: HLCDC Display Controller description 350 * @desc: HLCDC Display Controller description
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
index e18800ed7cd1..73c875db45f4 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
@@ -31,7 +31,6 @@
31 * @src_y: y buffer position 31 * @src_y: y buffer position
32 * @src_w: buffer width 32 * @src_w: buffer width
33 * @src_h: buffer height 33 * @src_h: buffer height
34 * @alpha: alpha blending of the plane
35 * @disc_x: x discard position 34 * @disc_x: x discard position
36 * @disc_y: y discard position 35 * @disc_y: y discard position
37 * @disc_w: discard width 36 * @disc_w: discard width
@@ -54,8 +53,6 @@ struct atmel_hlcdc_plane_state {
54 uint32_t src_w; 53 uint32_t src_w;
55 uint32_t src_h; 54 uint32_t src_h;
56 55
57 u8 alpha;
58
59 int disc_x; 56 int disc_x;
60 int disc_y; 57 int disc_y;
61 int disc_w; 58 int disc_w;
@@ -385,7 +382,7 @@ atmel_hlcdc_plane_update_general_settings(struct atmel_hlcdc_plane *plane,
385 cfg |= ATMEL_HLCDC_LAYER_LAEN; 382 cfg |= ATMEL_HLCDC_LAYER_LAEN;
386 else 383 else
387 cfg |= ATMEL_HLCDC_LAYER_GAEN | 384 cfg |= ATMEL_HLCDC_LAYER_GAEN |
388 ATMEL_HLCDC_LAYER_GA(state->alpha); 385 ATMEL_HLCDC_LAYER_GA(state->base.alpha >> 8);
389 } 386 }
390 387
391 if (state->disc_h && state->disc_w) 388 if (state->disc_h && state->disc_w)
@@ -553,7 +550,7 @@ atmel_hlcdc_plane_prepare_disc_area(struct drm_crtc_state *c_state)
553 550
554 if (!ovl_s->fb || 551 if (!ovl_s->fb ||
555 ovl_s->fb->format->has_alpha || 552 ovl_s->fb->format->has_alpha ||
556 ovl_state->alpha != 255) 553 ovl_s->alpha != DRM_BLEND_ALPHA_OPAQUE)
557 continue; 554 continue;
558 555
559 /* TODO: implement a smarter hidden area detection */ 556 /* TODO: implement a smarter hidden area detection */
@@ -829,51 +826,18 @@ static void atmel_hlcdc_plane_destroy(struct drm_plane *p)
829 drm_plane_cleanup(p); 826 drm_plane_cleanup(p);
830} 827}
831 828
832static int atmel_hlcdc_plane_atomic_set_property(struct drm_plane *p, 829static int atmel_hlcdc_plane_init_properties(struct atmel_hlcdc_plane *plane)
833 struct drm_plane_state *s,
834 struct drm_property *property,
835 uint64_t val)
836{
837 struct atmel_hlcdc_plane *plane = drm_plane_to_atmel_hlcdc_plane(p);
838 struct atmel_hlcdc_plane_properties *props = plane->properties;
839 struct atmel_hlcdc_plane_state *state =
840 drm_plane_state_to_atmel_hlcdc_plane_state(s);
841
842 if (property == props->alpha)
843 state->alpha = val;
844 else
845 return -EINVAL;
846
847 return 0;
848}
849
850static int atmel_hlcdc_plane_atomic_get_property(struct drm_plane *p,
851 const struct drm_plane_state *s,
852 struct drm_property *property,
853 uint64_t *val)
854{
855 struct atmel_hlcdc_plane *plane = drm_plane_to_atmel_hlcdc_plane(p);
856 struct atmel_hlcdc_plane_properties *props = plane->properties;
857 const struct atmel_hlcdc_plane_state *state =
858 container_of(s, const struct atmel_hlcdc_plane_state, base);
859
860 if (property == props->alpha)
861 *val = state->alpha;
862 else
863 return -EINVAL;
864
865 return 0;
866}
867
868static int atmel_hlcdc_plane_init_properties(struct atmel_hlcdc_plane *plane,
869 struct atmel_hlcdc_plane_properties *props)
870{ 830{
871 const struct atmel_hlcdc_layer_desc *desc = plane->layer.desc; 831 const struct atmel_hlcdc_layer_desc *desc = plane->layer.desc;
872 832
873 if (desc->type == ATMEL_HLCDC_OVERLAY_LAYER || 833 if (desc->type == ATMEL_HLCDC_OVERLAY_LAYER ||
874 desc->type == ATMEL_HLCDC_CURSOR_LAYER) 834 desc->type == ATMEL_HLCDC_CURSOR_LAYER) {
875 drm_object_attach_property(&plane->base.base, 835 int ret;
876 props->alpha, 255); 836
837 ret = drm_plane_create_alpha_property(&plane->base);
838 if (ret)
839 return ret;
840 }
877 841
878 if (desc->layout.xstride && desc->layout.pstride) { 842 if (desc->layout.xstride && desc->layout.pstride) {
879 int ret; 843 int ret;
@@ -988,8 +952,8 @@ static void atmel_hlcdc_plane_reset(struct drm_plane *p)
988 return; 952 return;
989 } 953 }
990 954
991 state->alpha = 255;
992 p->state = &state->base; 955 p->state = &state->base;
956 p->state->alpha = DRM_BLEND_ALPHA_OPAQUE;
993 p->state->plane = p; 957 p->state->plane = p;
994 } 958 }
995} 959}
@@ -1042,13 +1006,10 @@ static const struct drm_plane_funcs layer_plane_funcs = {
1042 .reset = atmel_hlcdc_plane_reset, 1006 .reset = atmel_hlcdc_plane_reset,
1043 .atomic_duplicate_state = atmel_hlcdc_plane_atomic_duplicate_state, 1007 .atomic_duplicate_state = atmel_hlcdc_plane_atomic_duplicate_state,
1044 .atomic_destroy_state = atmel_hlcdc_plane_atomic_destroy_state, 1008 .atomic_destroy_state = atmel_hlcdc_plane_atomic_destroy_state,
1045 .atomic_set_property = atmel_hlcdc_plane_atomic_set_property,
1046 .atomic_get_property = atmel_hlcdc_plane_atomic_get_property,
1047}; 1009};
1048 1010
1049static int atmel_hlcdc_plane_create(struct drm_device *dev, 1011static int atmel_hlcdc_plane_create(struct drm_device *dev,
1050 const struct atmel_hlcdc_layer_desc *desc, 1012 const struct atmel_hlcdc_layer_desc *desc)
1051 struct atmel_hlcdc_plane_properties *props)
1052{ 1013{
1053 struct atmel_hlcdc_dc *dc = dev->dev_private; 1014 struct atmel_hlcdc_dc *dc = dev->dev_private;
1054 struct atmel_hlcdc_plane *plane; 1015 struct atmel_hlcdc_plane *plane;
@@ -1060,7 +1021,6 @@ static int atmel_hlcdc_plane_create(struct drm_device *dev,
1060 return -ENOMEM; 1021 return -ENOMEM;
1061 1022
1062 atmel_hlcdc_layer_init(&plane->layer, desc, dc->hlcdc->regmap); 1023 atmel_hlcdc_layer_init(&plane->layer, desc, dc->hlcdc->regmap);
1063 plane->properties = props;
1064 1024
1065 if (desc->type == ATMEL_HLCDC_BASE_LAYER) 1025 if (desc->type == ATMEL_HLCDC_BASE_LAYER)
1066 type = DRM_PLANE_TYPE_PRIMARY; 1026 type = DRM_PLANE_TYPE_PRIMARY;
@@ -1081,7 +1041,7 @@ static int atmel_hlcdc_plane_create(struct drm_device *dev,
1081 &atmel_hlcdc_layer_plane_helper_funcs); 1041 &atmel_hlcdc_layer_plane_helper_funcs);
1082 1042
1083 /* Set default property values*/ 1043 /* Set default property values*/
1084 ret = atmel_hlcdc_plane_init_properties(plane, props); 1044 ret = atmel_hlcdc_plane_init_properties(plane);
1085 if (ret) 1045 if (ret)
1086 return ret; 1046 return ret;
1087 1047
@@ -1090,34 +1050,13 @@ static int atmel_hlcdc_plane_create(struct drm_device *dev,
1090 return 0; 1050 return 0;
1091} 1051}
1092 1052
1093static struct atmel_hlcdc_plane_properties *
1094atmel_hlcdc_plane_create_properties(struct drm_device *dev)
1095{
1096 struct atmel_hlcdc_plane_properties *props;
1097
1098 props = devm_kzalloc(dev->dev, sizeof(*props), GFP_KERNEL);
1099 if (!props)
1100 return ERR_PTR(-ENOMEM);
1101
1102 props->alpha = drm_property_create_range(dev, 0, "alpha", 0, 255);
1103 if (!props->alpha)
1104 return ERR_PTR(-ENOMEM);
1105
1106 return props;
1107}
1108
1109int atmel_hlcdc_create_planes(struct drm_device *dev) 1053int atmel_hlcdc_create_planes(struct drm_device *dev)
1110{ 1054{
1111 struct atmel_hlcdc_dc *dc = dev->dev_private; 1055 struct atmel_hlcdc_dc *dc = dev->dev_private;
1112 struct atmel_hlcdc_plane_properties *props;
1113 const struct atmel_hlcdc_layer_desc *descs = dc->desc->layers; 1056 const struct atmel_hlcdc_layer_desc *descs = dc->desc->layers;
1114 int nlayers = dc->desc->nlayers; 1057 int nlayers = dc->desc->nlayers;
1115 int i, ret; 1058 int i, ret;
1116 1059
1117 props = atmel_hlcdc_plane_create_properties(dev);
1118 if (IS_ERR(props))
1119 return PTR_ERR(props);
1120
1121 dc->dscrpool = dmam_pool_create("atmel-hlcdc-dscr", dev->dev, 1060 dc->dscrpool = dmam_pool_create("atmel-hlcdc-dscr", dev->dev,
1122 sizeof(struct atmel_hlcdc_dma_channel_dscr), 1061 sizeof(struct atmel_hlcdc_dma_channel_dscr),
1123 sizeof(u64), 0); 1062 sizeof(u64), 0);
@@ -1130,7 +1069,7 @@ int atmel_hlcdc_create_planes(struct drm_device *dev)
1130 descs[i].type != ATMEL_HLCDC_CURSOR_LAYER) 1069 descs[i].type != ATMEL_HLCDC_CURSOR_LAYER)
1131 continue; 1070 continue;
1132 1071
1133 ret = atmel_hlcdc_plane_create(dev, &descs[i], props); 1072 ret = atmel_hlcdc_plane_create(dev, &descs[i]);
1134 if (ret) 1073 if (ret)
1135 return ret; 1074 return ret;
1136 } 1075 }
diff --git a/drivers/gpu/drm/bochs/bochs_kms.c b/drivers/gpu/drm/bochs/bochs_kms.c
index a24a18fbd65a..233980a78591 100644
--- a/drivers/gpu/drm/bochs/bochs_kms.c
+++ b/drivers/gpu/drm/bochs/bochs_kms.c
@@ -188,7 +188,7 @@ static int bochs_connector_get_modes(struct drm_connector *connector)
188 return count; 188 return count;
189} 189}
190 190
191static int bochs_connector_mode_valid(struct drm_connector *connector, 191static enum drm_mode_status bochs_connector_mode_valid(struct drm_connector *connector,
192 struct drm_display_mode *mode) 192 struct drm_display_mode *mode)
193{ 193{
194 struct bochs_device *bochs = 194 struct bochs_device *bochs =
diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
index 3aa65bdecb0e..1d75d3a1f951 100644
--- a/drivers/gpu/drm/bridge/Kconfig
+++ b/drivers/gpu/drm/bridge/Kconfig
@@ -25,6 +25,16 @@ config DRM_ANALOGIX_ANX78XX
25 the HDMI output of an application processor to MyDP 25 the HDMI output of an application processor to MyDP
26 or DisplayPort. 26 or DisplayPort.
27 27
28config DRM_CDNS_DSI
29 tristate "Cadence DPI/DSI bridge"
30 select DRM_KMS_HELPER
31 select DRM_MIPI_DSI
32 select DRM_PANEL_BRIDGE
33 depends on OF
34 help
35 Support Cadence DPI to DSI bridge. This is an internal
36 bridge and is meant to be directly embedded in a SoC.
37
28config DRM_DUMB_VGA_DAC 38config DRM_DUMB_VGA_DAC
29 tristate "Dumb VGA DAC Bridge support" 39 tristate "Dumb VGA DAC Bridge support"
30 depends on OF 40 depends on OF
@@ -93,6 +103,12 @@ config DRM_SII9234
93 It is an I2C driver, that detects connection of MHL bridge 103 It is an I2C driver, that detects connection of MHL bridge
94 and starts encapsulation of HDMI signal. 104 and starts encapsulation of HDMI signal.
95 105
106config DRM_THINE_THC63LVD1024
107 tristate "Thine THC63LVD1024 LVDS decoder bridge"
108 depends on OF
109 ---help---
110 Thine THC63LVD1024 LVDS/parallel converter driver.
111
96config DRM_TOSHIBA_TC358767 112config DRM_TOSHIBA_TC358767
97 tristate "Toshiba TC358767 eDP bridge" 113 tristate "Toshiba TC358767 eDP bridge"
98 depends on OF 114 depends on OF
diff --git a/drivers/gpu/drm/bridge/Makefile b/drivers/gpu/drm/bridge/Makefile
index 373eb28f31ed..35f88d48ec20 100644
--- a/drivers/gpu/drm/bridge/Makefile
+++ b/drivers/gpu/drm/bridge/Makefile
@@ -1,5 +1,6 @@
1# SPDX-License-Identifier: GPL-2.0 1# SPDX-License-Identifier: GPL-2.0
2obj-$(CONFIG_DRM_ANALOGIX_ANX78XX) += analogix-anx78xx.o 2obj-$(CONFIG_DRM_ANALOGIX_ANX78XX) += analogix-anx78xx.o
3obj-$(CONFIG_DRM_CDNS_DSI) += cdns-dsi.o
3obj-$(CONFIG_DRM_DUMB_VGA_DAC) += dumb-vga-dac.o 4obj-$(CONFIG_DRM_DUMB_VGA_DAC) += dumb-vga-dac.o
4obj-$(CONFIG_DRM_LVDS_ENCODER) += lvds-encoder.o 5obj-$(CONFIG_DRM_LVDS_ENCODER) += lvds-encoder.o
5obj-$(CONFIG_DRM_MEGACHIPS_STDPXXXX_GE_B850V3_FW) += megachips-stdpxxxx-ge-b850v3-fw.o 6obj-$(CONFIG_DRM_MEGACHIPS_STDPXXXX_GE_B850V3_FW) += megachips-stdpxxxx-ge-b850v3-fw.o
@@ -8,6 +9,7 @@ obj-$(CONFIG_DRM_PARADE_PS8622) += parade-ps8622.o
8obj-$(CONFIG_DRM_SIL_SII8620) += sil-sii8620.o 9obj-$(CONFIG_DRM_SIL_SII8620) += sil-sii8620.o
9obj-$(CONFIG_DRM_SII902X) += sii902x.o 10obj-$(CONFIG_DRM_SII902X) += sii902x.o
10obj-$(CONFIG_DRM_SII9234) += sii9234.o 11obj-$(CONFIG_DRM_SII9234) += sii9234.o
12obj-$(CONFIG_DRM_THINE_THC63LVD1024) += thc63lvd1024.o
11obj-$(CONFIG_DRM_TOSHIBA_TC358767) += tc358767.o 13obj-$(CONFIG_DRM_TOSHIBA_TC358767) += tc358767.o
12obj-$(CONFIG_DRM_ANALOGIX_DP) += analogix/ 14obj-$(CONFIG_DRM_ANALOGIX_DP) += analogix/
13obj-$(CONFIG_DRM_I2C_ADV7511) += adv7511/ 15obj-$(CONFIG_DRM_I2C_ADV7511) += adv7511/
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511.h b/drivers/gpu/drm/bridge/adv7511/adv7511.h
index d034b2cb5eee..73d8ccb97742 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511.h
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511.h
@@ -93,6 +93,11 @@
93#define ADV7511_REG_CHIP_ID_HIGH 0xf5 93#define ADV7511_REG_CHIP_ID_HIGH 0xf5
94#define ADV7511_REG_CHIP_ID_LOW 0xf6 94#define ADV7511_REG_CHIP_ID_LOW 0xf6
95 95
96/* Hardware defined default addresses for I2C register maps */
97#define ADV7511_CEC_I2C_ADDR_DEFAULT 0x3c
98#define ADV7511_EDID_I2C_ADDR_DEFAULT 0x3f
99#define ADV7511_PACKET_I2C_ADDR_DEFAULT 0x38
100
96#define ADV7511_CSC_ENABLE BIT(7) 101#define ADV7511_CSC_ENABLE BIT(7)
97#define ADV7511_CSC_UPDATE_MODE BIT(5) 102#define ADV7511_CSC_UPDATE_MODE BIT(5)
98 103
@@ -321,6 +326,7 @@ enum adv7511_type {
321struct adv7511 { 326struct adv7511 {
322 struct i2c_client *i2c_main; 327 struct i2c_client *i2c_main;
323 struct i2c_client *i2c_edid; 328 struct i2c_client *i2c_edid;
329 struct i2c_client *i2c_packet;
324 struct i2c_client *i2c_cec; 330 struct i2c_client *i2c_cec;
325 331
326 struct regmap *regmap; 332 struct regmap *regmap;
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
index efa29db5fc2b..2614cea538e2 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
@@ -586,7 +586,7 @@ static int adv7511_get_modes(struct adv7511 *adv7511,
586 /* Reading the EDID only works if the device is powered */ 586 /* Reading the EDID only works if the device is powered */
587 if (!adv7511->powered) { 587 if (!adv7511->powered) {
588 unsigned int edid_i2c_addr = 588 unsigned int edid_i2c_addr =
589 (adv7511->i2c_main->addr << 1) + 4; 589 (adv7511->i2c_edid->addr << 1);
590 590
591 __adv7511_power_on(adv7511); 591 __adv7511_power_on(adv7511);
592 592
@@ -654,7 +654,7 @@ adv7511_detect(struct adv7511 *adv7511, struct drm_connector *connector)
654 return status; 654 return status;
655} 655}
656 656
657static int adv7511_mode_valid(struct adv7511 *adv7511, 657static enum drm_mode_status adv7511_mode_valid(struct adv7511 *adv7511,
658 struct drm_display_mode *mode) 658 struct drm_display_mode *mode)
659{ 659{
660 if (mode->clock > 165000) 660 if (mode->clock > 165000)
@@ -969,10 +969,10 @@ static int adv7511_init_cec_regmap(struct adv7511 *adv)
969{ 969{
970 int ret; 970 int ret;
971 971
972 adv->i2c_cec = i2c_new_dummy(adv->i2c_main->adapter, 972 adv->i2c_cec = i2c_new_secondary_device(adv->i2c_main, "cec",
973 adv->i2c_main->addr - 1); 973 ADV7511_CEC_I2C_ADDR_DEFAULT);
974 if (!adv->i2c_cec) 974 if (!adv->i2c_cec)
975 return -ENOMEM; 975 return -EINVAL;
976 i2c_set_clientdata(adv->i2c_cec, adv); 976 i2c_set_clientdata(adv->i2c_cec, adv);
977 977
978 adv->regmap_cec = devm_regmap_init_i2c(adv->i2c_cec, 978 adv->regmap_cec = devm_regmap_init_i2c(adv->i2c_cec,
@@ -1082,8 +1082,6 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
1082 struct adv7511_link_config link_config; 1082 struct adv7511_link_config link_config;
1083 struct adv7511 *adv7511; 1083 struct adv7511 *adv7511;
1084 struct device *dev = &i2c->dev; 1084 struct device *dev = &i2c->dev;
1085 unsigned int main_i2c_addr = i2c->addr << 1;
1086 unsigned int edid_i2c_addr = main_i2c_addr + 4;
1087 unsigned int val; 1085 unsigned int val;
1088 int ret; 1086 int ret;
1089 1087
@@ -1153,23 +1151,34 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
1153 if (ret) 1151 if (ret)
1154 goto uninit_regulators; 1152 goto uninit_regulators;
1155 1153
1156 regmap_write(adv7511->regmap, ADV7511_REG_EDID_I2C_ADDR, edid_i2c_addr);
1157 regmap_write(adv7511->regmap, ADV7511_REG_PACKET_I2C_ADDR,
1158 main_i2c_addr - 0xa);
1159 regmap_write(adv7511->regmap, ADV7511_REG_CEC_I2C_ADDR,
1160 main_i2c_addr - 2);
1161
1162 adv7511_packet_disable(adv7511, 0xffff); 1154 adv7511_packet_disable(adv7511, 0xffff);
1163 1155
1164 adv7511->i2c_edid = i2c_new_dummy(i2c->adapter, edid_i2c_addr >> 1); 1156 adv7511->i2c_edid = i2c_new_secondary_device(i2c, "edid",
1157 ADV7511_EDID_I2C_ADDR_DEFAULT);
1165 if (!adv7511->i2c_edid) { 1158 if (!adv7511->i2c_edid) {
1166 ret = -ENOMEM; 1159 ret = -EINVAL;
1167 goto uninit_regulators; 1160 goto uninit_regulators;
1168 } 1161 }
1169 1162
1163 regmap_write(adv7511->regmap, ADV7511_REG_EDID_I2C_ADDR,
1164 adv7511->i2c_edid->addr << 1);
1165
1166 adv7511->i2c_packet = i2c_new_secondary_device(i2c, "packet",
1167 ADV7511_PACKET_I2C_ADDR_DEFAULT);
1168 if (!adv7511->i2c_packet) {
1169 ret = -EINVAL;
1170 goto err_i2c_unregister_edid;
1171 }
1172
1173 regmap_write(adv7511->regmap, ADV7511_REG_PACKET_I2C_ADDR,
1174 adv7511->i2c_packet->addr << 1);
1175
1170 ret = adv7511_init_cec_regmap(adv7511); 1176 ret = adv7511_init_cec_regmap(adv7511);
1171 if (ret) 1177 if (ret)
1172 goto err_i2c_unregister_edid; 1178 goto err_i2c_unregister_packet;
1179
1180 regmap_write(adv7511->regmap, ADV7511_REG_CEC_I2C_ADDR,
1181 adv7511->i2c_cec->addr << 1);
1173 1182
1174 INIT_WORK(&adv7511->hpd_work, adv7511_hpd_work); 1183 INIT_WORK(&adv7511->hpd_work, adv7511_hpd_work);
1175 1184
@@ -1207,6 +1216,8 @@ err_unregister_cec:
1207 i2c_unregister_device(adv7511->i2c_cec); 1216 i2c_unregister_device(adv7511->i2c_cec);
1208 if (adv7511->cec_clk) 1217 if (adv7511->cec_clk)
1209 clk_disable_unprepare(adv7511->cec_clk); 1218 clk_disable_unprepare(adv7511->cec_clk);
1219err_i2c_unregister_packet:
1220 i2c_unregister_device(adv7511->i2c_packet);
1210err_i2c_unregister_edid: 1221err_i2c_unregister_edid:
1211 i2c_unregister_device(adv7511->i2c_edid); 1222 i2c_unregister_device(adv7511->i2c_edid);
1212uninit_regulators: 1223uninit_regulators:
@@ -1233,6 +1244,7 @@ static int adv7511_remove(struct i2c_client *i2c)
1233 1244
1234 cec_unregister_adapter(adv7511->cec_adap); 1245 cec_unregister_adapter(adv7511->cec_adap);
1235 1246
1247 i2c_unregister_device(adv7511->i2c_packet);
1236 i2c_unregister_device(adv7511->i2c_edid); 1248 i2c_unregister_device(adv7511->i2c_edid);
1237 1249
1238 return 0; 1250 return 0;
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
index 5c52307146c7..2bcbfadb6ac5 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
@@ -43,8 +43,10 @@ struct bridge_init {
43 struct device_node *node; 43 struct device_node *node;
44}; 44};
45 45
46static void analogix_dp_init_dp(struct analogix_dp_device *dp) 46static int analogix_dp_init_dp(struct analogix_dp_device *dp)
47{ 47{
48 int ret;
49
48 analogix_dp_reset(dp); 50 analogix_dp_reset(dp);
49 51
50 analogix_dp_swreset(dp); 52 analogix_dp_swreset(dp);
@@ -56,10 +58,13 @@ static void analogix_dp_init_dp(struct analogix_dp_device *dp)
56 analogix_dp_enable_sw_function(dp); 58 analogix_dp_enable_sw_function(dp);
57 59
58 analogix_dp_config_interrupt(dp); 60 analogix_dp_config_interrupt(dp);
59 analogix_dp_init_analog_func(dp); 61 ret = analogix_dp_init_analog_func(dp);
62 if (ret)
63 return ret;
60 64
61 analogix_dp_init_hpd(dp); 65 analogix_dp_init_hpd(dp);
62 analogix_dp_init_aux(dp); 66 analogix_dp_init_aux(dp);
67 return 0;
63} 68}
64 69
65static int analogix_dp_detect_hpd(struct analogix_dp_device *dp) 70static int analogix_dp_detect_hpd(struct analogix_dp_device *dp)
@@ -71,7 +76,7 @@ static int analogix_dp_detect_hpd(struct analogix_dp_device *dp)
71 return 0; 76 return 0;
72 77
73 timeout_loop++; 78 timeout_loop++;
74 usleep_range(10, 11); 79 usleep_range(1000, 1100);
75 } 80 }
76 81
77 /* 82 /*
@@ -148,87 +153,146 @@ int analogix_dp_disable_psr(struct analogix_dp_device *dp)
148 psr_vsc.DB1 = 0; 153 psr_vsc.DB1 = 0;
149 154
150 ret = drm_dp_dpcd_writeb(&dp->aux, DP_SET_POWER, DP_SET_POWER_D0); 155 ret = drm_dp_dpcd_writeb(&dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
151 if (ret != 1) 156 if (ret != 1) {
152 dev_err(dp->dev, "Failed to set DP Power0 %d\n", ret); 157 dev_err(dp->dev, "Failed to set DP Power0 %d\n", ret);
158 return ret;
159 }
153 160
154 return analogix_dp_send_psr_spd(dp, &psr_vsc, false); 161 return analogix_dp_send_psr_spd(dp, &psr_vsc, false);
155} 162}
156EXPORT_SYMBOL_GPL(analogix_dp_disable_psr); 163EXPORT_SYMBOL_GPL(analogix_dp_disable_psr);
157 164
158static bool analogix_dp_detect_sink_psr(struct analogix_dp_device *dp) 165static int analogix_dp_detect_sink_psr(struct analogix_dp_device *dp)
159{ 166{
160 unsigned char psr_version; 167 unsigned char psr_version;
168 int ret;
169
170 ret = drm_dp_dpcd_readb(&dp->aux, DP_PSR_SUPPORT, &psr_version);
171 if (ret != 1) {
172 dev_err(dp->dev, "failed to get PSR version, disable it\n");
173 return ret;
174 }
161 175
162 drm_dp_dpcd_readb(&dp->aux, DP_PSR_SUPPORT, &psr_version);
163 dev_dbg(dp->dev, "Panel PSR version : %x\n", psr_version); 176 dev_dbg(dp->dev, "Panel PSR version : %x\n", psr_version);
164 177
165 return (psr_version & DP_PSR_IS_SUPPORTED) ? true : false; 178 dp->psr_enable = (psr_version & DP_PSR_IS_SUPPORTED) ? true : false;
179
180 return 0;
166} 181}
167 182
168static void analogix_dp_enable_sink_psr(struct analogix_dp_device *dp) 183static int analogix_dp_enable_sink_psr(struct analogix_dp_device *dp)
169{ 184{
170 unsigned char psr_en; 185 unsigned char psr_en;
186 int ret;
171 187
172 /* Disable psr function */ 188 /* Disable psr function */
173 drm_dp_dpcd_readb(&dp->aux, DP_PSR_EN_CFG, &psr_en); 189 ret = drm_dp_dpcd_readb(&dp->aux, DP_PSR_EN_CFG, &psr_en);
190 if (ret != 1) {
191 dev_err(dp->dev, "failed to get psr config\n");
192 goto end;
193 }
194
174 psr_en &= ~DP_PSR_ENABLE; 195 psr_en &= ~DP_PSR_ENABLE;
175 drm_dp_dpcd_writeb(&dp->aux, DP_PSR_EN_CFG, psr_en); 196 ret = drm_dp_dpcd_writeb(&dp->aux, DP_PSR_EN_CFG, psr_en);
197 if (ret != 1) {
198 dev_err(dp->dev, "failed to disable panel psr\n");
199 goto end;
200 }
176 201
177 /* Main-Link transmitter remains active during PSR active states */ 202 /* Main-Link transmitter remains active during PSR active states */
178 psr_en = DP_PSR_MAIN_LINK_ACTIVE | DP_PSR_CRC_VERIFICATION; 203 psr_en = DP_PSR_MAIN_LINK_ACTIVE | DP_PSR_CRC_VERIFICATION;
179 drm_dp_dpcd_writeb(&dp->aux, DP_PSR_EN_CFG, psr_en); 204 ret = drm_dp_dpcd_writeb(&dp->aux, DP_PSR_EN_CFG, psr_en);
205 if (ret != 1) {
206 dev_err(dp->dev, "failed to set panel psr\n");
207 goto end;
208 }
180 209
181 /* Enable psr function */ 210 /* Enable psr function */
182 psr_en = DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE | 211 psr_en = DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE |
183 DP_PSR_CRC_VERIFICATION; 212 DP_PSR_CRC_VERIFICATION;
184 drm_dp_dpcd_writeb(&dp->aux, DP_PSR_EN_CFG, psr_en); 213 ret = drm_dp_dpcd_writeb(&dp->aux, DP_PSR_EN_CFG, psr_en);
214 if (ret != 1) {
215 dev_err(dp->dev, "failed to set panel psr\n");
216 goto end;
217 }
185 218
186 analogix_dp_enable_psr_crc(dp); 219 analogix_dp_enable_psr_crc(dp);
220
221 return 0;
222end:
223 dev_err(dp->dev, "enable psr fail, force to disable psr\n");
224 dp->psr_enable = false;
225
226 return ret;
187} 227}
188 228
189static void 229static int
190analogix_dp_enable_rx_to_enhanced_mode(struct analogix_dp_device *dp, 230analogix_dp_enable_rx_to_enhanced_mode(struct analogix_dp_device *dp,
191 bool enable) 231 bool enable)
192{ 232{
193 u8 data; 233 u8 data;
234 int ret;
194 235
195 drm_dp_dpcd_readb(&dp->aux, DP_LANE_COUNT_SET, &data); 236 ret = drm_dp_dpcd_readb(&dp->aux, DP_LANE_COUNT_SET, &data);
237 if (ret != 1)
238 return ret;
196 239
197 if (enable) 240 if (enable)
198 drm_dp_dpcd_writeb(&dp->aux, DP_LANE_COUNT_SET, 241 ret = drm_dp_dpcd_writeb(&dp->aux, DP_LANE_COUNT_SET,
199 DP_LANE_COUNT_ENHANCED_FRAME_EN | 242 DP_LANE_COUNT_ENHANCED_FRAME_EN |
200 DPCD_LANE_COUNT_SET(data)); 243 DPCD_LANE_COUNT_SET(data));
201 else 244 else
202 drm_dp_dpcd_writeb(&dp->aux, DP_LANE_COUNT_SET, 245 ret = drm_dp_dpcd_writeb(&dp->aux, DP_LANE_COUNT_SET,
203 DPCD_LANE_COUNT_SET(data)); 246 DPCD_LANE_COUNT_SET(data));
247
248 return ret < 0 ? ret : 0;
204} 249}
205 250
206static int analogix_dp_is_enhanced_mode_available(struct analogix_dp_device *dp) 251static int analogix_dp_is_enhanced_mode_available(struct analogix_dp_device *dp,
252 u8 *enhanced_mode_support)
207{ 253{
208 u8 data; 254 u8 data;
209 int retval; 255 int ret;
210 256
211 drm_dp_dpcd_readb(&dp->aux, DP_MAX_LANE_COUNT, &data); 257 ret = drm_dp_dpcd_readb(&dp->aux, DP_MAX_LANE_COUNT, &data);
212 retval = DPCD_ENHANCED_FRAME_CAP(data); 258 if (ret != 1) {
259 *enhanced_mode_support = 0;
260 return ret;
261 }
213 262
214 return retval; 263 *enhanced_mode_support = DPCD_ENHANCED_FRAME_CAP(data);
264
265 return 0;
215} 266}
216 267
217static void analogix_dp_set_enhanced_mode(struct analogix_dp_device *dp) 268static int analogix_dp_set_enhanced_mode(struct analogix_dp_device *dp)
218{ 269{
219 u8 data; 270 u8 data;
271 int ret;
272
273 ret = analogix_dp_is_enhanced_mode_available(dp, &data);
274 if (ret < 0)
275 return ret;
276
277 ret = analogix_dp_enable_rx_to_enhanced_mode(dp, data);
278 if (ret < 0)
279 return ret;
220 280
221 data = analogix_dp_is_enhanced_mode_available(dp);
222 analogix_dp_enable_rx_to_enhanced_mode(dp, data);
223 analogix_dp_enable_enhanced_mode(dp, data); 281 analogix_dp_enable_enhanced_mode(dp, data);
282
283 return 0;
224} 284}
225 285
226static void analogix_dp_training_pattern_dis(struct analogix_dp_device *dp) 286static int analogix_dp_training_pattern_dis(struct analogix_dp_device *dp)
227{ 287{
288 int ret;
289
228 analogix_dp_set_training_pattern(dp, DP_NONE); 290 analogix_dp_set_training_pattern(dp, DP_NONE);
229 291
230 drm_dp_dpcd_writeb(&dp->aux, DP_TRAINING_PATTERN_SET, 292 ret = drm_dp_dpcd_writeb(&dp->aux, DP_TRAINING_PATTERN_SET,
231 DP_TRAINING_PATTERN_DISABLE); 293 DP_TRAINING_PATTERN_DISABLE);
294
295 return ret < 0 ? ret : 0;
232} 296}
233 297
234static void 298static void
@@ -276,6 +340,12 @@ static int analogix_dp_link_start(struct analogix_dp_device *dp)
276 retval = drm_dp_dpcd_write(&dp->aux, DP_LINK_BW_SET, buf, 2); 340 retval = drm_dp_dpcd_write(&dp->aux, DP_LINK_BW_SET, buf, 2);
277 if (retval < 0) 341 if (retval < 0)
278 return retval; 342 return retval;
343 /* set enhanced mode if available */
344 retval = analogix_dp_set_enhanced_mode(dp);
345 if (retval < 0) {
346 dev_err(dp->dev, "failed to set enhance mode\n");
347 return retval;
348 }
279 349
280 /* Set TX pre-emphasis to minimum */ 350 /* Set TX pre-emphasis to minimum */
281 for (lane = 0; lane < lane_count; lane++) 351 for (lane = 0; lane < lane_count; lane++)
@@ -531,7 +601,7 @@ static int analogix_dp_process_equalizer_training(struct analogix_dp_device *dp)
531{ 601{
532 int lane, lane_count, retval; 602 int lane, lane_count, retval;
533 u32 reg; 603 u32 reg;
534 u8 link_align, link_status[2], adjust_request[2], spread; 604 u8 link_align, link_status[2], adjust_request[2];
535 605
536 usleep_range(400, 401); 606 usleep_range(400, 401);
537 607
@@ -560,10 +630,11 @@ static int analogix_dp_process_equalizer_training(struct analogix_dp_device *dp)
560 630
561 if (!analogix_dp_channel_eq_ok(link_status, link_align, lane_count)) { 631 if (!analogix_dp_channel_eq_ok(link_status, link_align, lane_count)) {
562 /* traing pattern Set to Normal */ 632 /* traing pattern Set to Normal */
563 analogix_dp_training_pattern_dis(dp); 633 retval = analogix_dp_training_pattern_dis(dp);
634 if (retval < 0)
635 return retval;
564 636
565 dev_info(dp->dev, "Link Training success!\n"); 637 dev_info(dp->dev, "Link Training success!\n");
566
567 analogix_dp_get_link_bandwidth(dp, &reg); 638 analogix_dp_get_link_bandwidth(dp, &reg);
568 dp->link_train.link_rate = reg; 639 dp->link_train.link_rate = reg;
569 dev_dbg(dp->dev, "final bandwidth = %.2x\n", 640 dev_dbg(dp->dev, "final bandwidth = %.2x\n",
@@ -574,22 +645,6 @@ static int analogix_dp_process_equalizer_training(struct analogix_dp_device *dp)
574 dev_dbg(dp->dev, "final lane count = %.2x\n", 645 dev_dbg(dp->dev, "final lane count = %.2x\n",
575 dp->link_train.lane_count); 646 dp->link_train.lane_count);
576 647
577 retval = drm_dp_dpcd_readb(&dp->aux, DP_MAX_DOWNSPREAD,
578 &spread);
579 if (retval != 1) {
580 dev_err(dp->dev, "failed to read downspread %d\n",
581 retval);
582 dp->fast_train_support = false;
583 } else {
584 dp->fast_train_support =
585 (spread & DP_NO_AUX_HANDSHAKE_LINK_TRAINING) ?
586 true : false;
587 }
588 dev_dbg(dp->dev, "fast link training %s\n",
589 dp->fast_train_support ? "supported" : "unsupported");
590
591 /* set enhanced mode if available */
592 analogix_dp_set_enhanced_mode(dp);
593 dp->link_train.lt_state = FINISHED; 648 dp->link_train.lt_state = FINISHED;
594 649
595 return 0; 650 return 0;
@@ -793,7 +848,7 @@ static int analogix_dp_fast_link_train(struct analogix_dp_device *dp)
793 848
794static int analogix_dp_train_link(struct analogix_dp_device *dp) 849static int analogix_dp_train_link(struct analogix_dp_device *dp)
795{ 850{
796 if (dp->fast_train_support) 851 if (dp->fast_train_enable)
797 return analogix_dp_fast_link_train(dp); 852 return analogix_dp_fast_link_train(dp);
798 853
799 return analogix_dp_full_link_train(dp, dp->video_info.max_lane_count, 854 return analogix_dp_full_link_train(dp, dp->video_info.max_lane_count,
@@ -819,11 +874,10 @@ static int analogix_dp_config_video(struct analogix_dp_device *dp)
819 if (analogix_dp_is_slave_video_stream_clock_on(dp) == 0) 874 if (analogix_dp_is_slave_video_stream_clock_on(dp) == 0)
820 break; 875 break;
821 if (timeout_loop > DP_TIMEOUT_LOOP_COUNT) { 876 if (timeout_loop > DP_TIMEOUT_LOOP_COUNT) {
822 dev_err(dp->dev, "Timeout of video streamclk ok\n"); 877 dev_err(dp->dev, "Timeout of slave video streamclk ok\n");
823 return -ETIMEDOUT; 878 return -ETIMEDOUT;
824 } 879 }
825 880 usleep_range(1000, 1001);
826 usleep_range(1, 2);
827 } 881 }
828 882
829 /* Set to use the register calculated M/N video */ 883 /* Set to use the register calculated M/N video */
@@ -838,6 +892,9 @@ static int analogix_dp_config_video(struct analogix_dp_device *dp)
838 /* Configure video slave mode */ 892 /* Configure video slave mode */
839 analogix_dp_enable_video_master(dp, 0); 893 analogix_dp_enable_video_master(dp, 0);
840 894
895 /* Enable video */
896 analogix_dp_start_video(dp);
897
841 timeout_loop = 0; 898 timeout_loop = 0;
842 899
843 for (;;) { 900 for (;;) {
@@ -850,8 +907,9 @@ static int analogix_dp_config_video(struct analogix_dp_device *dp)
850 done_count = 0; 907 done_count = 0;
851 } 908 }
852 if (timeout_loop > DP_TIMEOUT_LOOP_COUNT) { 909 if (timeout_loop > DP_TIMEOUT_LOOP_COUNT) {
853 dev_err(dp->dev, "Timeout of video streamclk ok\n"); 910 dev_warn(dp->dev,
854 return -ETIMEDOUT; 911 "Ignoring timeout of video streamclk ok\n");
912 break;
855 } 913 }
856 914
857 usleep_range(1000, 1001); 915 usleep_range(1000, 1001);
@@ -860,24 +918,32 @@ static int analogix_dp_config_video(struct analogix_dp_device *dp)
860 return 0; 918 return 0;
861} 919}
862 920
863static void analogix_dp_enable_scramble(struct analogix_dp_device *dp, 921static int analogix_dp_enable_scramble(struct analogix_dp_device *dp,
864 bool enable) 922 bool enable)
865{ 923{
866 u8 data; 924 u8 data;
925 int ret;
867 926
868 if (enable) { 927 if (enable) {
869 analogix_dp_enable_scrambling(dp); 928 analogix_dp_enable_scrambling(dp);
870 929
871 drm_dp_dpcd_readb(&dp->aux, DP_TRAINING_PATTERN_SET, &data); 930 ret = drm_dp_dpcd_readb(&dp->aux, DP_TRAINING_PATTERN_SET,
872 drm_dp_dpcd_writeb(&dp->aux, DP_TRAINING_PATTERN_SET, 931 &data);
932 if (ret != 1)
933 return ret;
934 ret = drm_dp_dpcd_writeb(&dp->aux, DP_TRAINING_PATTERN_SET,
873 (u8)(data & ~DP_LINK_SCRAMBLING_DISABLE)); 935 (u8)(data & ~DP_LINK_SCRAMBLING_DISABLE));
874 } else { 936 } else {
875 analogix_dp_disable_scrambling(dp); 937 analogix_dp_disable_scrambling(dp);
876 938
877 drm_dp_dpcd_readb(&dp->aux, DP_TRAINING_PATTERN_SET, &data); 939 ret = drm_dp_dpcd_readb(&dp->aux, DP_TRAINING_PATTERN_SET,
878 drm_dp_dpcd_writeb(&dp->aux, DP_TRAINING_PATTERN_SET, 940 &data);
941 if (ret != 1)
942 return ret;
943 ret = drm_dp_dpcd_writeb(&dp->aux, DP_TRAINING_PATTERN_SET,
879 (u8)(data | DP_LINK_SCRAMBLING_DISABLE)); 944 (u8)(data | DP_LINK_SCRAMBLING_DISABLE));
880 } 945 }
946 return ret < 0 ? ret : 0;
881} 947}
882 948
883static irqreturn_t analogix_dp_hardirq(int irq, void *arg) 949static irqreturn_t analogix_dp_hardirq(int irq, void *arg)
@@ -916,7 +982,23 @@ static irqreturn_t analogix_dp_irq_thread(int irq, void *arg)
916 return IRQ_HANDLED; 982 return IRQ_HANDLED;
917} 983}
918 984
919static void analogix_dp_commit(struct analogix_dp_device *dp) 985static int analogix_dp_fast_link_train_detection(struct analogix_dp_device *dp)
986{
987 int ret;
988 u8 spread;
989
990 ret = drm_dp_dpcd_readb(&dp->aux, DP_MAX_DOWNSPREAD, &spread);
991 if (ret != 1) {
992 dev_err(dp->dev, "failed to read downspread %d\n", ret);
993 return ret;
994 }
995 dp->fast_train_enable = !!(spread & DP_NO_AUX_HANDSHAKE_LINK_TRAINING);
996 dev_dbg(dp->dev, "fast link training %s\n",
997 dp->fast_train_enable ? "supported" : "unsupported");
998 return 0;
999}
1000
1001static int analogix_dp_commit(struct analogix_dp_device *dp)
920{ 1002{
921 int ret; 1003 int ret;
922 1004
@@ -926,34 +1008,50 @@ static void analogix_dp_commit(struct analogix_dp_device *dp)
926 DRM_ERROR("failed to disable the panel\n"); 1008 DRM_ERROR("failed to disable the panel\n");
927 } 1009 }
928 1010
929 ret = readx_poll_timeout(analogix_dp_train_link, dp, ret, !ret, 100, 1011 ret = analogix_dp_train_link(dp);
930 DP_TIMEOUT_TRAINING_US * 5);
931 if (ret) { 1012 if (ret) {
932 dev_err(dp->dev, "unable to do link train, ret=%d\n", ret); 1013 dev_err(dp->dev, "unable to do link train, ret=%d\n", ret);
933 return; 1014 return ret;
934 } 1015 }
935 1016
936 analogix_dp_enable_scramble(dp, 1); 1017 ret = analogix_dp_enable_scramble(dp, 1);
937 analogix_dp_enable_rx_to_enhanced_mode(dp, 1); 1018 if (ret < 0) {
938 analogix_dp_enable_enhanced_mode(dp, 1); 1019 dev_err(dp->dev, "can not enable scramble\n");
1020 return ret;
1021 }
939 1022
940 analogix_dp_init_video(dp); 1023 analogix_dp_init_video(dp);
941 ret = analogix_dp_config_video(dp); 1024 ret = analogix_dp_config_video(dp);
942 if (ret) 1025 if (ret) {
943 dev_err(dp->dev, "unable to config video\n"); 1026 dev_err(dp->dev, "unable to config video\n");
1027 return ret;
1028 }
944 1029
945 /* Safe to enable the panel now */ 1030 /* Safe to enable the panel now */
946 if (dp->plat_data->panel) { 1031 if (dp->plat_data->panel) {
947 if (drm_panel_enable(dp->plat_data->panel)) 1032 ret = drm_panel_enable(dp->plat_data->panel);
1033 if (ret) {
948 DRM_ERROR("failed to enable the panel\n"); 1034 DRM_ERROR("failed to enable the panel\n");
1035 return ret;
1036 }
949 } 1037 }
950 1038
951 /* Enable video */ 1039 ret = analogix_dp_detect_sink_psr(dp);
952 analogix_dp_start_video(dp); 1040 if (ret)
1041 return ret;
953 1042
954 dp->psr_enable = analogix_dp_detect_sink_psr(dp); 1043 if (dp->psr_enable) {
955 if (dp->psr_enable) 1044 ret = analogix_dp_enable_sink_psr(dp);
956 analogix_dp_enable_sink_psr(dp); 1045 if (ret)
1046 return ret;
1047 }
1048
1049 /* Check whether panel supports fast training */
1050 ret = analogix_dp_fast_link_train_detection(dp);
1051 if (ret)
1052 dp->psr_enable = false;
1053
1054 return ret;
957} 1055}
958 1056
959/* 1057/*
@@ -1150,24 +1248,80 @@ static void analogix_dp_bridge_pre_enable(struct drm_bridge *bridge)
1150 DRM_ERROR("failed to setup the panel ret = %d\n", ret); 1248 DRM_ERROR("failed to setup the panel ret = %d\n", ret);
1151} 1249}
1152 1250
1153static void analogix_dp_bridge_enable(struct drm_bridge *bridge) 1251static int analogix_dp_set_bridge(struct analogix_dp_device *dp)
1154{ 1252{
1155 struct analogix_dp_device *dp = bridge->driver_private; 1253 int ret;
1156
1157 if (dp->dpms_mode == DRM_MODE_DPMS_ON)
1158 return;
1159 1254
1160 pm_runtime_get_sync(dp->dev); 1255 pm_runtime_get_sync(dp->dev);
1161 1256
1162 if (dp->plat_data->power_on) 1257 ret = clk_prepare_enable(dp->clock);
1163 dp->plat_data->power_on(dp->plat_data); 1258 if (ret < 0) {
1259 DRM_ERROR("Failed to prepare_enable the clock clk [%d]\n", ret);
1260 goto out_dp_clk_pre;
1261 }
1262
1263 if (dp->plat_data->power_on_start)
1264 dp->plat_data->power_on_start(dp->plat_data);
1164 1265
1165 phy_power_on(dp->phy); 1266 phy_power_on(dp->phy);
1166 analogix_dp_init_dp(dp); 1267
1268 ret = analogix_dp_init_dp(dp);
1269 if (ret)
1270 goto out_dp_init;
1271
1272 /*
1273 * According to DP spec v1.3 chap 3.5.1.2 Link Training,
1274 * We should first make sure the HPD signal is asserted high by device
1275 * when we want to establish a link with it.
1276 */
1277 ret = analogix_dp_detect_hpd(dp);
1278 if (ret) {
1279 DRM_ERROR("failed to get hpd single ret = %d\n", ret);
1280 goto out_dp_init;
1281 }
1282
1283 ret = analogix_dp_commit(dp);
1284 if (ret) {
1285 DRM_ERROR("dp commit error, ret = %d\n", ret);
1286 goto out_dp_init;
1287 }
1288
1289 if (dp->plat_data->power_on_end)
1290 dp->plat_data->power_on_end(dp->plat_data);
1291
1167 enable_irq(dp->irq); 1292 enable_irq(dp->irq);
1168 analogix_dp_commit(dp); 1293 return 0;
1169 1294
1170 dp->dpms_mode = DRM_MODE_DPMS_ON; 1295out_dp_init:
1296 phy_power_off(dp->phy);
1297 if (dp->plat_data->power_off)
1298 dp->plat_data->power_off(dp->plat_data);
1299 clk_disable_unprepare(dp->clock);
1300out_dp_clk_pre:
1301 pm_runtime_put_sync(dp->dev);
1302
1303 return ret;
1304}
1305
1306static void analogix_dp_bridge_enable(struct drm_bridge *bridge)
1307{
1308 struct analogix_dp_device *dp = bridge->driver_private;
1309 int timeout_loop = 0;
1310
1311 if (dp->dpms_mode == DRM_MODE_DPMS_ON)
1312 return;
1313
1314 while (timeout_loop < MAX_PLL_LOCK_LOOP) {
1315 if (analogix_dp_set_bridge(dp) == 0) {
1316 dp->dpms_mode = DRM_MODE_DPMS_ON;
1317 return;
1318 }
1319 dev_err(dp->dev, "failed to set bridge, retry: %d\n",
1320 timeout_loop);
1321 timeout_loop++;
1322 usleep_range(10, 11);
1323 }
1324 dev_err(dp->dev, "too many times retry set bridge, give it up\n");
1171} 1325}
1172 1326
1173static void analogix_dp_bridge_disable(struct drm_bridge *bridge) 1327static void analogix_dp_bridge_disable(struct drm_bridge *bridge)
@@ -1186,11 +1340,15 @@ static void analogix_dp_bridge_disable(struct drm_bridge *bridge)
1186 } 1340 }
1187 1341
1188 disable_irq(dp->irq); 1342 disable_irq(dp->irq);
1189 phy_power_off(dp->phy);
1190 1343
1191 if (dp->plat_data->power_off) 1344 if (dp->plat_data->power_off)
1192 dp->plat_data->power_off(dp->plat_data); 1345 dp->plat_data->power_off(dp->plat_data);
1193 1346
1347 analogix_dp_set_analog_power_down(dp, POWER_ALL, 1);
1348 phy_power_off(dp->phy);
1349
1350 clk_disable_unprepare(dp->clock);
1351
1194 pm_runtime_put_sync(dp->dev); 1352 pm_runtime_put_sync(dp->dev);
1195 1353
1196 ret = analogix_dp_prepare_panel(dp, false, true); 1354 ret = analogix_dp_prepare_panel(dp, false, true);
@@ -1198,6 +1356,7 @@ static void analogix_dp_bridge_disable(struct drm_bridge *bridge)
1198 DRM_ERROR("failed to setup the panel ret = %d\n", ret); 1356 DRM_ERROR("failed to setup the panel ret = %d\n", ret);
1199 1357
1200 dp->psr_enable = false; 1358 dp->psr_enable = false;
1359 dp->fast_train_enable = false;
1201 dp->dpms_mode = DRM_MODE_DPMS_OFF; 1360 dp->dpms_mode = DRM_MODE_DPMS_OFF;
1202} 1361}
1203 1362
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h
index 6a96ef7e6934..769255dc6e99 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h
@@ -19,6 +19,7 @@
19#define DP_TIMEOUT_LOOP_COUNT 100 19#define DP_TIMEOUT_LOOP_COUNT 100
20#define MAX_CR_LOOP 5 20#define MAX_CR_LOOP 5
21#define MAX_EQ_LOOP 5 21#define MAX_EQ_LOOP 5
22#define MAX_PLL_LOCK_LOOP 5
22 23
23/* Training takes 22ms if AUX channel comm fails. Use this as retry interval */ 24/* Training takes 22ms if AUX channel comm fails. Use this as retry interval */
24#define DP_TIMEOUT_TRAINING_US 22000 25#define DP_TIMEOUT_TRAINING_US 22000
@@ -173,7 +174,7 @@ struct analogix_dp_device {
173 int hpd_gpio; 174 int hpd_gpio;
174 bool force_hpd; 175 bool force_hpd;
175 bool psr_enable; 176 bool psr_enable;
176 bool fast_train_support; 177 bool fast_train_enable;
177 178
178 struct mutex panel_lock; 179 struct mutex panel_lock;
179 bool panel_is_modeset; 180 bool panel_is_modeset;
@@ -197,7 +198,7 @@ void analogix_dp_set_pll_power_down(struct analogix_dp_device *dp, bool enable);
197void analogix_dp_set_analog_power_down(struct analogix_dp_device *dp, 198void analogix_dp_set_analog_power_down(struct analogix_dp_device *dp,
198 enum analog_power_block block, 199 enum analog_power_block block,
199 bool enable); 200 bool enable);
200void analogix_dp_init_analog_func(struct analogix_dp_device *dp); 201int analogix_dp_init_analog_func(struct analogix_dp_device *dp);
201void analogix_dp_init_hpd(struct analogix_dp_device *dp); 202void analogix_dp_init_hpd(struct analogix_dp_device *dp);
202void analogix_dp_force_hpd(struct analogix_dp_device *dp); 203void analogix_dp_force_hpd(struct analogix_dp_device *dp);
203enum dp_irq_type analogix_dp_get_irq_type(struct analogix_dp_device *dp); 204enum dp_irq_type analogix_dp_get_irq_type(struct analogix_dp_device *dp);
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c
index 9df2f3ef000c..a5f2763d72e4 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c
@@ -126,9 +126,14 @@ void analogix_dp_reset(struct analogix_dp_device *dp)
126 analogix_dp_stop_video(dp); 126 analogix_dp_stop_video(dp);
127 analogix_dp_enable_video_mute(dp, 0); 127 analogix_dp_enable_video_mute(dp, 0);
128 128
129 reg = MASTER_VID_FUNC_EN_N | SLAVE_VID_FUNC_EN_N | 129 if (dp->plat_data && is_rockchip(dp->plat_data->dev_type))
130 AUD_FIFO_FUNC_EN_N | AUD_FUNC_EN_N | 130 reg = RK_VID_CAP_FUNC_EN_N | RK_VID_FIFO_FUNC_EN_N |
131 HDCP_FUNC_EN_N | SW_FUNC_EN_N; 131 SW_FUNC_EN_N;
132 else
133 reg = MASTER_VID_FUNC_EN_N | SLAVE_VID_FUNC_EN_N |
134 AUD_FIFO_FUNC_EN_N | AUD_FUNC_EN_N |
135 HDCP_FUNC_EN_N | SW_FUNC_EN_N;
136
132 writel(reg, dp->reg_base + ANALOGIX_DP_FUNC_EN_1); 137 writel(reg, dp->reg_base + ANALOGIX_DP_FUNC_EN_1);
133 138
134 reg = SSC_FUNC_EN_N | AUX_FUNC_EN_N | 139 reg = SSC_FUNC_EN_N | AUX_FUNC_EN_N |
@@ -230,16 +235,20 @@ enum pll_status analogix_dp_get_pll_lock_status(struct analogix_dp_device *dp)
230void analogix_dp_set_pll_power_down(struct analogix_dp_device *dp, bool enable) 235void analogix_dp_set_pll_power_down(struct analogix_dp_device *dp, bool enable)
231{ 236{
232 u32 reg; 237 u32 reg;
238 u32 mask = DP_PLL_PD;
239 u32 pd_addr = ANALOGIX_DP_PLL_CTL;
233 240
234 if (enable) { 241 if (dp->plat_data && is_rockchip(dp->plat_data->dev_type)) {
235 reg = readl(dp->reg_base + ANALOGIX_DP_PLL_CTL); 242 pd_addr = ANALOGIX_DP_PD;
236 reg |= DP_PLL_PD; 243 mask = RK_PLL_PD;
237 writel(reg, dp->reg_base + ANALOGIX_DP_PLL_CTL);
238 } else {
239 reg = readl(dp->reg_base + ANALOGIX_DP_PLL_CTL);
240 reg &= ~DP_PLL_PD;
241 writel(reg, dp->reg_base + ANALOGIX_DP_PLL_CTL);
242 } 244 }
245
246 reg = readl(dp->reg_base + pd_addr);
247 if (enable)
248 reg |= mask;
249 else
250 reg &= ~mask;
251 writel(reg, dp->reg_base + pd_addr);
243} 252}
244 253
245void analogix_dp_set_analog_power_down(struct analogix_dp_device *dp, 254void analogix_dp_set_analog_power_down(struct analogix_dp_device *dp,
@@ -248,83 +257,98 @@ void analogix_dp_set_analog_power_down(struct analogix_dp_device *dp,
248{ 257{
249 u32 reg; 258 u32 reg;
250 u32 phy_pd_addr = ANALOGIX_DP_PHY_PD; 259 u32 phy_pd_addr = ANALOGIX_DP_PHY_PD;
260 u32 mask;
251 261
252 if (dp->plat_data && is_rockchip(dp->plat_data->dev_type)) 262 if (dp->plat_data && is_rockchip(dp->plat_data->dev_type))
253 phy_pd_addr = ANALOGIX_DP_PD; 263 phy_pd_addr = ANALOGIX_DP_PD;
254 264
255 switch (block) { 265 switch (block) {
256 case AUX_BLOCK: 266 case AUX_BLOCK:
257 if (enable) { 267 if (dp->plat_data && is_rockchip(dp->plat_data->dev_type))
258 reg = readl(dp->reg_base + phy_pd_addr); 268 mask = RK_AUX_PD;
259 reg |= AUX_PD; 269 else
260 writel(reg, dp->reg_base + phy_pd_addr); 270 mask = AUX_PD;
261 } else { 271
262 reg = readl(dp->reg_base + phy_pd_addr); 272 reg = readl(dp->reg_base + phy_pd_addr);
263 reg &= ~AUX_PD; 273 if (enable)
264 writel(reg, dp->reg_base + phy_pd_addr); 274 reg |= mask;
265 } 275 else
276 reg &= ~mask;
277 writel(reg, dp->reg_base + phy_pd_addr);
266 break; 278 break;
267 case CH0_BLOCK: 279 case CH0_BLOCK:
268 if (enable) { 280 mask = CH0_PD;
269 reg = readl(dp->reg_base + phy_pd_addr); 281 reg = readl(dp->reg_base + phy_pd_addr);
270 reg |= CH0_PD; 282
271 writel(reg, dp->reg_base + phy_pd_addr); 283 if (enable)
272 } else { 284 reg |= mask;
273 reg = readl(dp->reg_base + phy_pd_addr); 285 else
274 reg &= ~CH0_PD; 286 reg &= ~mask;
275 writel(reg, dp->reg_base + phy_pd_addr); 287 writel(reg, dp->reg_base + phy_pd_addr);
276 }
277 break; 288 break;
278 case CH1_BLOCK: 289 case CH1_BLOCK:
279 if (enable) { 290 mask = CH1_PD;
280 reg = readl(dp->reg_base + phy_pd_addr); 291 reg = readl(dp->reg_base + phy_pd_addr);
281 reg |= CH1_PD; 292
282 writel(reg, dp->reg_base + phy_pd_addr); 293 if (enable)
283 } else { 294 reg |= mask;
284 reg = readl(dp->reg_base + phy_pd_addr); 295 else
285 reg &= ~CH1_PD; 296 reg &= ~mask;
286 writel(reg, dp->reg_base + phy_pd_addr); 297 writel(reg, dp->reg_base + phy_pd_addr);
287 }
288 break; 298 break;
289 case CH2_BLOCK: 299 case CH2_BLOCK:
290 if (enable) { 300 mask = CH2_PD;
291 reg = readl(dp->reg_base + phy_pd_addr); 301 reg = readl(dp->reg_base + phy_pd_addr);
292 reg |= CH2_PD; 302
293 writel(reg, dp->reg_base + phy_pd_addr); 303 if (enable)
294 } else { 304 reg |= mask;
295 reg = readl(dp->reg_base + phy_pd_addr); 305 else
296 reg &= ~CH2_PD; 306 reg &= ~mask;
297 writel(reg, dp->reg_base + phy_pd_addr); 307 writel(reg, dp->reg_base + phy_pd_addr);
298 }
299 break; 308 break;
300 case CH3_BLOCK: 309 case CH3_BLOCK:
301 if (enable) { 310 mask = CH3_PD;
302 reg = readl(dp->reg_base + phy_pd_addr); 311 reg = readl(dp->reg_base + phy_pd_addr);
303 reg |= CH3_PD; 312
304 writel(reg, dp->reg_base + phy_pd_addr); 313 if (enable)
305 } else { 314 reg |= mask;
306 reg = readl(dp->reg_base + phy_pd_addr); 315 else
307 reg &= ~CH3_PD; 316 reg &= ~mask;
308 writel(reg, dp->reg_base + phy_pd_addr); 317 writel(reg, dp->reg_base + phy_pd_addr);
309 }
310 break; 318 break;
311 case ANALOG_TOTAL: 319 case ANALOG_TOTAL:
312 if (enable) { 320 /*
313 reg = readl(dp->reg_base + phy_pd_addr); 321 * There is no bit named DP_PHY_PD, so We used DP_INC_BG
314 reg |= DP_PHY_PD; 322 * to power off everything instead of DP_PHY_PD in
315 writel(reg, dp->reg_base + phy_pd_addr); 323 * Rockchip
316 } else { 324 */
317 reg = readl(dp->reg_base + phy_pd_addr); 325 if (dp->plat_data && is_rockchip(dp->plat_data->dev_type))
318 reg &= ~DP_PHY_PD; 326 mask = DP_INC_BG;
319 writel(reg, dp->reg_base + phy_pd_addr); 327 else
320 } 328 mask = DP_PHY_PD;
329
330 reg = readl(dp->reg_base + phy_pd_addr);
331 if (enable)
332 reg |= mask;
333 else
334 reg &= ~mask;
335
336 writel(reg, dp->reg_base + phy_pd_addr);
337 if (dp->plat_data && is_rockchip(dp->plat_data->dev_type))
338 usleep_range(10, 15);
321 break; 339 break;
322 case POWER_ALL: 340 case POWER_ALL:
323 if (enable) { 341 if (enable) {
324 reg = DP_PHY_PD | AUX_PD | CH3_PD | CH2_PD | 342 reg = DP_ALL_PD;
325 CH1_PD | CH0_PD;
326 writel(reg, dp->reg_base + phy_pd_addr); 343 writel(reg, dp->reg_base + phy_pd_addr);
327 } else { 344 } else {
345 reg = DP_ALL_PD;
346 writel(reg, dp->reg_base + phy_pd_addr);
347 usleep_range(10, 15);
348 reg &= ~DP_INC_BG;
349 writel(reg, dp->reg_base + phy_pd_addr);
350 usleep_range(10, 15);
351
328 writel(0x00, dp->reg_base + phy_pd_addr); 352 writel(0x00, dp->reg_base + phy_pd_addr);
329 } 353 }
330 break; 354 break;
@@ -333,7 +357,7 @@ void analogix_dp_set_analog_power_down(struct analogix_dp_device *dp,
333 } 357 }
334} 358}
335 359
336void analogix_dp_init_analog_func(struct analogix_dp_device *dp) 360int analogix_dp_init_analog_func(struct analogix_dp_device *dp)
337{ 361{
338 u32 reg; 362 u32 reg;
339 int timeout_loop = 0; 363 int timeout_loop = 0;
@@ -355,7 +379,7 @@ void analogix_dp_init_analog_func(struct analogix_dp_device *dp)
355 timeout_loop++; 379 timeout_loop++;
356 if (DP_TIMEOUT_LOOP_COUNT < timeout_loop) { 380 if (DP_TIMEOUT_LOOP_COUNT < timeout_loop) {
357 dev_err(dp->dev, "failed to get pll lock status\n"); 381 dev_err(dp->dev, "failed to get pll lock status\n");
358 return; 382 return -ETIMEDOUT;
359 } 383 }
360 usleep_range(10, 20); 384 usleep_range(10, 20);
361 } 385 }
@@ -366,6 +390,7 @@ void analogix_dp_init_analog_func(struct analogix_dp_device *dp)
366 reg &= ~(SERDES_FIFO_FUNC_EN_N | LS_CLK_DOMAIN_FUNC_EN_N 390 reg &= ~(SERDES_FIFO_FUNC_EN_N | LS_CLK_DOMAIN_FUNC_EN_N
367 | AUX_FUNC_EN_N); 391 | AUX_FUNC_EN_N);
368 writel(reg, dp->reg_base + ANALOGIX_DP_FUNC_EN_2); 392 writel(reg, dp->reg_base + ANALOGIX_DP_FUNC_EN_2);
393 return 0;
369} 394}
370 395
371void analogix_dp_clear_hotplug_interrupts(struct analogix_dp_device *dp) 396void analogix_dp_clear_hotplug_interrupts(struct analogix_dp_device *dp)
@@ -450,17 +475,22 @@ void analogix_dp_init_aux(struct analogix_dp_device *dp)
450 reg = RPLY_RECEIV | AUX_ERR; 475 reg = RPLY_RECEIV | AUX_ERR;
451 writel(reg, dp->reg_base + ANALOGIX_DP_INT_STA); 476 writel(reg, dp->reg_base + ANALOGIX_DP_INT_STA);
452 477
478 analogix_dp_set_analog_power_down(dp, AUX_BLOCK, true);
479 usleep_range(10, 11);
480 analogix_dp_set_analog_power_down(dp, AUX_BLOCK, false);
481
453 analogix_dp_reset_aux(dp); 482 analogix_dp_reset_aux(dp);
454 483
455 /* Disable AUX transaction H/W retry */ 484 /* AUX_BIT_PERIOD_EXPECTED_DELAY doesn't apply to Rockchip IP */
456 if (dp->plat_data && is_rockchip(dp->plat_data->dev_type)) 485 if (dp->plat_data && is_rockchip(dp->plat_data->dev_type))
457 reg = AUX_BIT_PERIOD_EXPECTED_DELAY(0) | 486 reg = 0;
458 AUX_HW_RETRY_COUNT_SEL(3) |
459 AUX_HW_RETRY_INTERVAL_600_MICROSECONDS;
460 else 487 else
461 reg = AUX_BIT_PERIOD_EXPECTED_DELAY(3) | 488 reg = AUX_BIT_PERIOD_EXPECTED_DELAY(3);
462 AUX_HW_RETRY_COUNT_SEL(0) | 489
463 AUX_HW_RETRY_INTERVAL_600_MICROSECONDS; 490 /* Disable AUX transaction H/W retry */
491 reg |= AUX_HW_RETRY_COUNT_SEL(0) |
492 AUX_HW_RETRY_INTERVAL_600_MICROSECONDS;
493
464 writel(reg, dp->reg_base + ANALOGIX_DP_AUX_HW_RETRY_CTL); 494 writel(reg, dp->reg_base + ANALOGIX_DP_AUX_HW_RETRY_CTL);
465 495
466 /* Receive AUX Channel DEFER commands equal to DEFFER_COUNT*64 */ 496 /* Receive AUX Channel DEFER commands equal to DEFFER_COUNT*64 */
@@ -947,8 +977,12 @@ void analogix_dp_config_video_slave_mode(struct analogix_dp_device *dp)
947 u32 reg; 977 u32 reg;
948 978
949 reg = readl(dp->reg_base + ANALOGIX_DP_FUNC_EN_1); 979 reg = readl(dp->reg_base + ANALOGIX_DP_FUNC_EN_1);
950 reg &= ~(MASTER_VID_FUNC_EN_N | SLAVE_VID_FUNC_EN_N); 980 if (dp->plat_data && is_rockchip(dp->plat_data->dev_type)) {
951 reg |= MASTER_VID_FUNC_EN_N; 981 reg &= ~(RK_VID_CAP_FUNC_EN_N | RK_VID_FIFO_FUNC_EN_N);
982 } else {
983 reg &= ~(MASTER_VID_FUNC_EN_N | SLAVE_VID_FUNC_EN_N);
984 reg |= MASTER_VID_FUNC_EN_N;
985 }
952 writel(reg, dp->reg_base + ANALOGIX_DP_FUNC_EN_1); 986 writel(reg, dp->reg_base + ANALOGIX_DP_FUNC_EN_1);
953 987
954 reg = readl(dp->reg_base + ANALOGIX_DP_VIDEO_CTL_10); 988 reg = readl(dp->reg_base + ANALOGIX_DP_VIDEO_CTL_10);
@@ -1072,10 +1106,11 @@ ssize_t analogix_dp_transfer(struct analogix_dp_device *dp,
1072 struct drm_dp_aux_msg *msg) 1106 struct drm_dp_aux_msg *msg)
1073{ 1107{
1074 u32 reg; 1108 u32 reg;
1109 u32 status_reg;
1075 u8 *buffer = msg->buffer; 1110 u8 *buffer = msg->buffer;
1076 int timeout_loop = 0;
1077 unsigned int i; 1111 unsigned int i;
1078 int num_transferred = 0; 1112 int num_transferred = 0;
1113 int ret;
1079 1114
1080 /* Buffer size of AUX CH is 16 bytes */ 1115 /* Buffer size of AUX CH is 16 bytes */
1081 if (WARN_ON(msg->size > 16)) 1116 if (WARN_ON(msg->size > 16))
@@ -1139,17 +1174,20 @@ ssize_t analogix_dp_transfer(struct analogix_dp_device *dp,
1139 1174
1140 writel(reg, dp->reg_base + ANALOGIX_DP_AUX_CH_CTL_2); 1175 writel(reg, dp->reg_base + ANALOGIX_DP_AUX_CH_CTL_2);
1141 1176
1142 /* Is AUX CH command reply received? */ 1177 ret = readx_poll_timeout(readl, dp->reg_base + ANALOGIX_DP_AUX_CH_CTL_2,
1178 reg, !(reg & AUX_EN), 25, 500 * 1000);
1179 if (ret) {
1180 dev_err(dp->dev, "AUX CH enable timeout!\n");
1181 goto aux_error;
1182 }
1183
1143 /* TODO: Wait for an interrupt instead of looping? */ 1184 /* TODO: Wait for an interrupt instead of looping? */
1144 reg = readl(dp->reg_base + ANALOGIX_DP_INT_STA); 1185 /* Is AUX CH command reply received? */
1145 while (!(reg & RPLY_RECEIV)) { 1186 ret = readx_poll_timeout(readl, dp->reg_base + ANALOGIX_DP_INT_STA,
1146 timeout_loop++; 1187 reg, reg & RPLY_RECEIV, 10, 20 * 1000);
1147 if (timeout_loop > DP_TIMEOUT_LOOP_COUNT) { 1188 if (ret) {
1148 dev_err(dp->dev, "AUX CH command reply failed!\n"); 1189 dev_err(dp->dev, "AUX CH cmd reply timeout!\n");
1149 return -ETIMEDOUT; 1190 goto aux_error;
1150 }
1151 reg = readl(dp->reg_base + ANALOGIX_DP_INT_STA);
1152 usleep_range(10, 11);
1153 } 1191 }
1154 1192
1155 /* Clear interrupt source for AUX CH command reply */ 1193 /* Clear interrupt source for AUX CH command reply */
@@ -1157,17 +1195,13 @@ ssize_t analogix_dp_transfer(struct analogix_dp_device *dp,
1157 1195
1158 /* Clear interrupt source for AUX CH access error */ 1196 /* Clear interrupt source for AUX CH access error */
1159 reg = readl(dp->reg_base + ANALOGIX_DP_INT_STA); 1197 reg = readl(dp->reg_base + ANALOGIX_DP_INT_STA);
1160 if (reg & AUX_ERR) { 1198 status_reg = readl(dp->reg_base + ANALOGIX_DP_AUX_CH_STA);
1199 if ((reg & AUX_ERR) || (status_reg & AUX_STATUS_MASK)) {
1161 writel(AUX_ERR, dp->reg_base + ANALOGIX_DP_INT_STA); 1200 writel(AUX_ERR, dp->reg_base + ANALOGIX_DP_INT_STA);
1162 return -EREMOTEIO;
1163 }
1164 1201
1165 /* Check AUX CH error access status */ 1202 dev_warn(dp->dev, "AUX CH error happened: %#x (%d)\n",
1166 reg = readl(dp->reg_base + ANALOGIX_DP_AUX_CH_STA); 1203 status_reg & AUX_STATUS_MASK, !!(reg & AUX_ERR));
1167 if ((reg & AUX_STATUS_MASK)) { 1204 goto aux_error;
1168 dev_err(dp->dev, "AUX CH error happened: %d\n\n",
1169 reg & AUX_STATUS_MASK);
1170 return -EREMOTEIO;
1171 } 1205 }
1172 1206
1173 if (msg->request & DP_AUX_I2C_READ) { 1207 if (msg->request & DP_AUX_I2C_READ) {
@@ -1193,4 +1227,10 @@ ssize_t analogix_dp_transfer(struct analogix_dp_device *dp,
1193 msg->reply = DP_AUX_NATIVE_REPLY_ACK; 1227 msg->reply = DP_AUX_NATIVE_REPLY_ACK;
1194 1228
1195 return num_transferred > 0 ? num_transferred : -EBUSY; 1229 return num_transferred > 0 ? num_transferred : -EBUSY;
1230
1231aux_error:
1232 /* if aux err happen, reset aux */
1233 analogix_dp_init_aux(dp);
1234
1235 return -EREMOTEIO;
1196} 1236}
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.h b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.h
index 40200c652533..0cf27c731727 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.h
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.h
@@ -127,7 +127,9 @@
127 127
128/* ANALOGIX_DP_FUNC_EN_1 */ 128/* ANALOGIX_DP_FUNC_EN_1 */
129#define MASTER_VID_FUNC_EN_N (0x1 << 7) 129#define MASTER_VID_FUNC_EN_N (0x1 << 7)
130#define RK_VID_CAP_FUNC_EN_N (0x1 << 6)
130#define SLAVE_VID_FUNC_EN_N (0x1 << 5) 131#define SLAVE_VID_FUNC_EN_N (0x1 << 5)
132#define RK_VID_FIFO_FUNC_EN_N (0x1 << 5)
131#define AUD_FIFO_FUNC_EN_N (0x1 << 4) 133#define AUD_FIFO_FUNC_EN_N (0x1 << 4)
132#define AUD_FUNC_EN_N (0x1 << 3) 134#define AUD_FUNC_EN_N (0x1 << 3)
133#define HDCP_FUNC_EN_N (0x1 << 2) 135#define HDCP_FUNC_EN_N (0x1 << 2)
@@ -342,12 +344,17 @@
342#define DP_PLL_REF_BIT_1_2500V (0x7 << 0) 344#define DP_PLL_REF_BIT_1_2500V (0x7 << 0)
343 345
344/* ANALOGIX_DP_PHY_PD */ 346/* ANALOGIX_DP_PHY_PD */
347#define DP_INC_BG (0x1 << 7)
348#define DP_EXP_BG (0x1 << 6)
345#define DP_PHY_PD (0x1 << 5) 349#define DP_PHY_PD (0x1 << 5)
350#define RK_AUX_PD (0x1 << 5)
346#define AUX_PD (0x1 << 4) 351#define AUX_PD (0x1 << 4)
352#define RK_PLL_PD (0x1 << 4)
347#define CH3_PD (0x1 << 3) 353#define CH3_PD (0x1 << 3)
348#define CH2_PD (0x1 << 2) 354#define CH2_PD (0x1 << 2)
349#define CH1_PD (0x1 << 1) 355#define CH1_PD (0x1 << 1)
350#define CH0_PD (0x1 << 0) 356#define CH0_PD (0x1 << 0)
357#define DP_ALL_PD (0xff)
351 358
352/* ANALOGIX_DP_PHY_TEST */ 359/* ANALOGIX_DP_PHY_TEST */
353#define MACRO_RST (0x1 << 5) 360#define MACRO_RST (0x1 << 5)
diff --git a/drivers/gpu/drm/bridge/cdns-dsi.c b/drivers/gpu/drm/bridge/cdns-dsi.c
new file mode 100644
index 000000000000..c255fc3e1be5
--- /dev/null
+++ b/drivers/gpu/drm/bridge/cdns-dsi.c
@@ -0,0 +1,1623 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright: 2017 Cadence Design Systems, Inc.
4 *
5 * Author: Boris Brezillon <boris.brezillon@bootlin.com>
6 */
7
8#include <drm/drm_atomic_helper.h>
9#include <drm/drm_bridge.h>
10#include <drm/drm_crtc_helper.h>
11#include <drm/drm_mipi_dsi.h>
12#include <drm/drm_panel.h>
13#include <video/mipi_display.h>
14
15#include <linux/clk.h>
16#include <linux/iopoll.h>
17#include <linux/module.h>
18#include <linux/of_address.h>
19#include <linux/of_graph.h>
20#include <linux/platform_device.h>
21#include <linux/pm_runtime.h>
22#include <linux/reset.h>
23
24#define IP_CONF 0x0
25#define SP_HS_FIFO_DEPTH(x) (((x) & GENMASK(30, 26)) >> 26)
26#define SP_LP_FIFO_DEPTH(x) (((x) & GENMASK(25, 21)) >> 21)
27#define VRS_FIFO_DEPTH(x) (((x) & GENMASK(20, 16)) >> 16)
28#define DIRCMD_FIFO_DEPTH(x) (((x) & GENMASK(15, 13)) >> 13)
29#define SDI_IFACE_32 BIT(12)
30#define INTERNAL_DATAPATH_32 (0 << 10)
31#define INTERNAL_DATAPATH_16 (1 << 10)
32#define INTERNAL_DATAPATH_8 (3 << 10)
33#define INTERNAL_DATAPATH_SIZE ((x) & GENMASK(11, 10))
34#define NUM_IFACE(x) ((((x) & GENMASK(9, 8)) >> 8) + 1)
35#define MAX_LANE_NB(x) (((x) & GENMASK(7, 6)) >> 6)
36#define RX_FIFO_DEPTH(x) ((x) & GENMASK(5, 0))
37
38#define MCTL_MAIN_DATA_CTL 0x4
39#define TE_MIPI_POLLING_EN BIT(25)
40#define TE_HW_POLLING_EN BIT(24)
41#define DISP_EOT_GEN BIT(18)
42#define HOST_EOT_GEN BIT(17)
43#define DISP_GEN_CHECKSUM BIT(16)
44#define DISP_GEN_ECC BIT(15)
45#define BTA_EN BIT(14)
46#define READ_EN BIT(13)
47#define REG_TE_EN BIT(12)
48#define IF_TE_EN(x) BIT(8 + (x))
49#define TVG_SEL BIT(6)
50#define VID_EN BIT(5)
51#define IF_VID_SELECT(x) ((x) << 2)
52#define IF_VID_SELECT_MASK GENMASK(3, 2)
53#define IF_VID_MODE BIT(1)
54#define LINK_EN BIT(0)
55
56#define MCTL_MAIN_PHY_CTL 0x8
57#define HS_INVERT_DAT(x) BIT(19 + ((x) * 2))
58#define SWAP_PINS_DAT(x) BIT(18 + ((x) * 2))
59#define HS_INVERT_CLK BIT(17)
60#define SWAP_PINS_CLK BIT(16)
61#define HS_SKEWCAL_EN BIT(15)
62#define WAIT_BURST_TIME(x) ((x) << 10)
63#define DATA_ULPM_EN(x) BIT(6 + (x))
64#define CLK_ULPM_EN BIT(5)
65#define CLK_CONTINUOUS BIT(4)
66#define DATA_LANE_EN(x) BIT((x) - 1)
67
68#define MCTL_MAIN_EN 0xc
69#define DATA_FORCE_STOP BIT(17)
70#define CLK_FORCE_STOP BIT(16)
71#define IF_EN(x) BIT(13 + (x))
72#define DATA_LANE_ULPM_REQ(l) BIT(9 + (l))
73#define CLK_LANE_ULPM_REQ BIT(8)
74#define DATA_LANE_START(x) BIT(4 + (x))
75#define CLK_LANE_EN BIT(3)
76#define PLL_START BIT(0)
77
78#define MCTL_DPHY_CFG0 0x10
79#define DPHY_C_RSTB BIT(20)
80#define DPHY_D_RSTB(x) GENMASK(15 + (x), 16)
81#define DPHY_PLL_PDN BIT(10)
82#define DPHY_CMN_PDN BIT(9)
83#define DPHY_C_PDN BIT(8)
84#define DPHY_D_PDN(x) GENMASK(3 + (x), 4)
85#define DPHY_ALL_D_PDN GENMASK(7, 4)
86#define DPHY_PLL_PSO BIT(1)
87#define DPHY_CMN_PSO BIT(0)
88
89#define MCTL_DPHY_TIMEOUT1 0x14
90#define HSTX_TIMEOUT(x) ((x) << 4)
91#define HSTX_TIMEOUT_MAX GENMASK(17, 0)
92#define CLK_DIV(x) (x)
93#define CLK_DIV_MAX GENMASK(3, 0)
94
95#define MCTL_DPHY_TIMEOUT2 0x18
96#define LPRX_TIMEOUT(x) (x)
97
98#define MCTL_ULPOUT_TIME 0x1c
99#define DATA_LANE_ULPOUT_TIME(x) ((x) << 9)
100#define CLK_LANE_ULPOUT_TIME(x) (x)
101
102#define MCTL_3DVIDEO_CTL 0x20
103#define VID_VSYNC_3D_EN BIT(7)
104#define VID_VSYNC_3D_LR BIT(5)
105#define VID_VSYNC_3D_SECOND_EN BIT(4)
106#define VID_VSYNC_3DFORMAT_LINE (0 << 2)
107#define VID_VSYNC_3DFORMAT_FRAME (1 << 2)
108#define VID_VSYNC_3DFORMAT_PIXEL (2 << 2)
109#define VID_VSYNC_3DMODE_OFF 0
110#define VID_VSYNC_3DMODE_PORTRAIT 1
111#define VID_VSYNC_3DMODE_LANDSCAPE 2
112
113#define MCTL_MAIN_STS 0x24
114#define MCTL_MAIN_STS_CTL 0x130
115#define MCTL_MAIN_STS_CLR 0x150
116#define MCTL_MAIN_STS_FLAG 0x170
117#define HS_SKEWCAL_DONE BIT(11)
118#define IF_UNTERM_PKT_ERR(x) BIT(8 + (x))
119#define LPRX_TIMEOUT_ERR BIT(7)
120#define HSTX_TIMEOUT_ERR BIT(6)
121#define DATA_LANE_RDY(l) BIT(2 + (l))
122#define CLK_LANE_RDY BIT(1)
123#define PLL_LOCKED BIT(0)
124
125#define MCTL_DPHY_ERR 0x28
126#define MCTL_DPHY_ERR_CTL1 0x148
127#define MCTL_DPHY_ERR_CLR 0x168
128#define MCTL_DPHY_ERR_FLAG 0x188
129#define ERR_CONT_LP(x, l) BIT(18 + ((x) * 4) + (l))
130#define ERR_CONTROL(l) BIT(14 + (l))
131#define ERR_SYNESC(l) BIT(10 + (l))
132#define ERR_ESC(l) BIT(6 + (l))
133
134#define MCTL_DPHY_ERR_CTL2 0x14c
135#define ERR_CONT_LP_EDGE(x, l) BIT(12 + ((x) * 4) + (l))
136#define ERR_CONTROL_EDGE(l) BIT(8 + (l))
137#define ERR_SYN_ESC_EDGE(l) BIT(4 + (l))
138#define ERR_ESC_EDGE(l) BIT(0 + (l))
139
140#define MCTL_LANE_STS 0x2c
141#define PPI_C_TX_READY_HS BIT(18)
142#define DPHY_PLL_LOCK BIT(17)
143#define PPI_D_RX_ULPS_ESC(x) (((x) & GENMASK(15, 12)) >> 12)
144#define LANE_STATE_START 0
145#define LANE_STATE_IDLE 1
146#define LANE_STATE_WRITE 2
147#define LANE_STATE_ULPM 3
148#define LANE_STATE_READ 4
149#define DATA_LANE_STATE(l, val) \
150 (((val) >> (2 + 2 * (l) + ((l) ? 1 : 0))) & GENMASK((l) ? 1 : 2, 0))
151#define CLK_LANE_STATE_HS 2
152#define CLK_LANE_STATE(val) ((val) & GENMASK(1, 0))
153
154#define DSC_MODE_CTL 0x30
155#define DSC_MODE_EN BIT(0)
156
157#define DSC_CMD_SEND 0x34
158#define DSC_SEND_PPS BIT(0)
159#define DSC_EXECUTE_QUEUE BIT(1)
160
161#define DSC_PPS_WRDAT 0x38
162
163#define DSC_MODE_STS 0x3c
164#define DSC_PPS_DONE BIT(1)
165#define DSC_EXEC_DONE BIT(2)
166
167#define CMD_MODE_CTL 0x70
168#define IF_LP_EN(x) BIT(9 + (x))
169#define IF_VCHAN_ID(x, c) ((c) << ((x) * 2))
170
171#define CMD_MODE_CTL2 0x74
172#define TE_TIMEOUT(x) ((x) << 11)
173#define FILL_VALUE(x) ((x) << 3)
174#define ARB_IF_WITH_HIGHEST_PRIORITY(x) ((x) << 1)
175#define ARB_ROUND_ROBIN_MODE BIT(0)
176
177#define CMD_MODE_STS 0x78
178#define CMD_MODE_STS_CTL 0x134
179#define CMD_MODE_STS_CLR 0x154
180#define CMD_MODE_STS_FLAG 0x174
181#define ERR_IF_UNDERRUN(x) BIT(4 + (x))
182#define ERR_UNWANTED_READ BIT(3)
183#define ERR_TE_MISS BIT(2)
184#define ERR_NO_TE BIT(1)
185#define CSM_RUNNING BIT(0)
186
187#define DIRECT_CMD_SEND 0x80
188
189#define DIRECT_CMD_MAIN_SETTINGS 0x84
190#define TRIGGER_VAL(x) ((x) << 25)
191#define CMD_LP_EN BIT(24)
192#define CMD_SIZE(x) ((x) << 16)
193#define CMD_VCHAN_ID(x) ((x) << 14)
194#define CMD_DATATYPE(x) ((x) << 8)
195#define CMD_LONG BIT(3)
196#define WRITE_CMD 0
197#define READ_CMD 1
198#define TE_REQ 4
199#define TRIGGER_REQ 5
200#define BTA_REQ 6
201
202#define DIRECT_CMD_STS 0x88
203#define DIRECT_CMD_STS_CTL 0x138
204#define DIRECT_CMD_STS_CLR 0x158
205#define DIRECT_CMD_STS_FLAG 0x178
206#define RCVD_ACK_VAL(val) ((val) >> 16)
207#define RCVD_TRIGGER_VAL(val) (((val) & GENMASK(14, 11)) >> 11)
208#define READ_COMPLETED_WITH_ERR BIT(10)
209#define BTA_FINISHED BIT(9)
210#define BTA_COMPLETED BIT(8)
211#define TE_RCVD BIT(7)
212#define TRIGGER_RCVD BIT(6)
213#define ACK_WITH_ERR_RCVD BIT(5)
214#define ACK_RCVD BIT(4)
215#define READ_COMPLETED BIT(3)
216#define TRIGGER_COMPLETED BIT(2)
217#define WRITE_COMPLETED BIT(1)
218#define SENDING_CMD BIT(0)
219
220#define DIRECT_CMD_STOP_READ 0x8c
221
222#define DIRECT_CMD_WRDATA 0x90
223
224#define DIRECT_CMD_FIFO_RST 0x94
225
226#define DIRECT_CMD_RDDATA 0xa0
227
228#define DIRECT_CMD_RD_PROPS 0xa4
229#define RD_DCS BIT(18)
230#define RD_VCHAN_ID(val) (((val) >> 16) & GENMASK(1, 0))
231#define RD_SIZE(val) ((val) & GENMASK(15, 0))
232
233#define DIRECT_CMD_RD_STS 0xa8
234#define DIRECT_CMD_RD_STS_CTL 0x13c
235#define DIRECT_CMD_RD_STS_CLR 0x15c
236#define DIRECT_CMD_RD_STS_FLAG 0x17c
237#define ERR_EOT_WITH_ERR BIT(8)
238#define ERR_MISSING_EOT BIT(7)
239#define ERR_WRONG_LENGTH BIT(6)
240#define ERR_OVERSIZE BIT(5)
241#define ERR_RECEIVE BIT(4)
242#define ERR_UNDECODABLE BIT(3)
243#define ERR_CHECKSUM BIT(2)
244#define ERR_UNCORRECTABLE BIT(1)
245#define ERR_FIXED BIT(0)
246
247#define VID_MAIN_CTL 0xb0
248#define VID_IGNORE_MISS_VSYNC BIT(31)
249#define VID_FIELD_SW BIT(28)
250#define VID_INTERLACED_EN BIT(27)
251#define RECOVERY_MODE(x) ((x) << 25)
252#define RECOVERY_MODE_NEXT_HSYNC 0
253#define RECOVERY_MODE_NEXT_STOP_POINT 2
254#define RECOVERY_MODE_NEXT_VSYNC 3
255#define REG_BLKEOL_MODE(x) ((x) << 23)
256#define REG_BLKLINE_MODE(x) ((x) << 21)
257#define REG_BLK_MODE_NULL_PKT 0
258#define REG_BLK_MODE_BLANKING_PKT 1
259#define REG_BLK_MODE_LP 2
260#define SYNC_PULSE_HORIZONTAL BIT(20)
261#define SYNC_PULSE_ACTIVE BIT(19)
262#define BURST_MODE BIT(18)
263#define VID_PIXEL_MODE_MASK GENMASK(17, 14)
264#define VID_PIXEL_MODE_RGB565 (0 << 14)
265#define VID_PIXEL_MODE_RGB666_PACKED (1 << 14)
266#define VID_PIXEL_MODE_RGB666 (2 << 14)
267#define VID_PIXEL_MODE_RGB888 (3 << 14)
268#define VID_PIXEL_MODE_RGB101010 (4 << 14)
269#define VID_PIXEL_MODE_RGB121212 (5 << 14)
270#define VID_PIXEL_MODE_YUV420 (8 << 14)
271#define VID_PIXEL_MODE_YUV422_PACKED (9 << 14)
272#define VID_PIXEL_MODE_YUV422 (10 << 14)
273#define VID_PIXEL_MODE_YUV422_24B (11 << 14)
274#define VID_PIXEL_MODE_DSC_COMP (12 << 14)
275#define VID_DATATYPE(x) ((x) << 8)
276#define VID_VIRTCHAN_ID(iface, x) ((x) << (4 + (iface) * 2))
277#define STOP_MODE(x) ((x) << 2)
278#define START_MODE(x) (x)
279
280#define VID_VSIZE1 0xb4
281#define VFP_LEN(x) ((x) << 12)
282#define VBP_LEN(x) ((x) << 6)
283#define VSA_LEN(x) (x)
284
285#define VID_VSIZE2 0xb8
286#define VACT_LEN(x) (x)
287
288#define VID_HSIZE1 0xc0
289#define HBP_LEN(x) ((x) << 16)
290#define HSA_LEN(x) (x)
291
292#define VID_HSIZE2 0xc4
293#define HFP_LEN(x) ((x) << 16)
294#define HACT_LEN(x) (x)
295
296#define VID_BLKSIZE1 0xcc
297#define BLK_EOL_PKT_LEN(x) ((x) << 15)
298#define BLK_LINE_EVENT_PKT_LEN(x) (x)
299
300#define VID_BLKSIZE2 0xd0
301#define BLK_LINE_PULSE_PKT_LEN(x) (x)
302
303#define VID_PKT_TIME 0xd8
304#define BLK_EOL_DURATION(x) (x)
305
306#define VID_DPHY_TIME 0xdc
307#define REG_WAKEUP_TIME(x) ((x) << 17)
308#define REG_LINE_DURATION(x) (x)
309
310#define VID_ERR_COLOR1 0xe0
311#define COL_GREEN(x) ((x) << 12)
312#define COL_RED(x) (x)
313
314#define VID_ERR_COLOR2 0xe4
315#define PAD_VAL(x) ((x) << 12)
316#define COL_BLUE(x) (x)
317
318#define VID_VPOS 0xe8
319#define LINE_VAL(val) (((val) & GENMASK(14, 2)) >> 2)
320#define LINE_POS(val) ((val) & GENMASK(1, 0))
321
322#define VID_HPOS 0xec
323#define HORIZ_VAL(val) (((val) & GENMASK(17, 3)) >> 3)
324#define HORIZ_POS(val) ((val) & GENMASK(2, 0))
325
326#define VID_MODE_STS 0xf0
327#define VID_MODE_STS_CTL 0x140
328#define VID_MODE_STS_CLR 0x160
329#define VID_MODE_STS_FLAG 0x180
330#define VSG_RECOVERY BIT(10)
331#define ERR_VRS_WRONG_LEN BIT(9)
332#define ERR_LONG_READ BIT(8)
333#define ERR_LINE_WRITE BIT(7)
334#define ERR_BURST_WRITE BIT(6)
335#define ERR_SMALL_HEIGHT BIT(5)
336#define ERR_SMALL_LEN BIT(4)
337#define ERR_MISSING_VSYNC BIT(3)
338#define ERR_MISSING_HSYNC BIT(2)
339#define ERR_MISSING_DATA BIT(1)
340#define VSG_RUNNING BIT(0)
341
342#define VID_VCA_SETTING1 0xf4
343#define BURST_LP BIT(16)
344#define MAX_BURST_LIMIT(x) (x)
345
346#define VID_VCA_SETTING2 0xf8
347#define MAX_LINE_LIMIT(x) ((x) << 16)
348#define EXACT_BURST_LIMIT(x) (x)
349
350#define TVG_CTL 0xfc
351#define TVG_STRIPE_SIZE(x) ((x) << 5)
352#define TVG_MODE_MASK GENMASK(4, 3)
353#define TVG_MODE_SINGLE_COLOR (0 << 3)
354#define TVG_MODE_VSTRIPES (2 << 3)
355#define TVG_MODE_HSTRIPES (3 << 3)
356#define TVG_STOPMODE_MASK GENMASK(2, 1)
357#define TVG_STOPMODE_EOF (0 << 1)
358#define TVG_STOPMODE_EOL (1 << 1)
359#define TVG_STOPMODE_NOW (2 << 1)
360#define TVG_RUN BIT(0)
361
362#define TVG_IMG_SIZE 0x100
363#define TVG_NBLINES(x) ((x) << 16)
364#define TVG_LINE_SIZE(x) (x)
365
366#define TVG_COLOR1 0x104
367#define TVG_COL1_GREEN(x) ((x) << 12)
368#define TVG_COL1_RED(x) (x)
369
370#define TVG_COLOR1_BIS 0x108
371#define TVG_COL1_BLUE(x) (x)
372
373#define TVG_COLOR2 0x10c
374#define TVG_COL2_GREEN(x) ((x) << 12)
375#define TVG_COL2_RED(x) (x)
376
377#define TVG_COLOR2_BIS 0x110
378#define TVG_COL2_BLUE(x) (x)
379
380#define TVG_STS 0x114
381#define TVG_STS_CTL 0x144
382#define TVG_STS_CLR 0x164
383#define TVG_STS_FLAG 0x184
384#define TVG_STS_RUNNING BIT(0)
385
386#define STS_CTL_EDGE(e) ((e) << 16)
387
388#define DPHY_LANES_MAP 0x198
389#define DAT_REMAP_CFG(b, l) ((l) << ((b) * 8))
390
391#define DPI_IRQ_EN 0x1a0
392#define DPI_IRQ_CLR 0x1a4
393#define DPI_IRQ_STS 0x1a8
394#define PIXEL_BUF_OVERFLOW BIT(0)
395
396#define DPI_CFG 0x1ac
397#define DPI_CFG_FIFO_DEPTH(x) ((x) >> 16)
398#define DPI_CFG_FIFO_LEVEL(x) ((x) & GENMASK(15, 0))
399
400#define TEST_GENERIC 0x1f0
401#define TEST_STATUS(x) ((x) >> 16)
402#define TEST_CTRL(x) (x)
403
404#define ID_REG 0x1fc
405#define REV_VENDOR_ID(x) (((x) & GENMASK(31, 20)) >> 20)
406#define REV_PRODUCT_ID(x) (((x) & GENMASK(19, 12)) >> 12)
407#define REV_HW(x) (((x) & GENMASK(11, 8)) >> 8)
408#define REV_MAJOR(x) (((x) & GENMASK(7, 4)) >> 4)
409#define REV_MINOR(x) ((x) & GENMASK(3, 0))
410
411#define DSI_OUTPUT_PORT 0
412#define DSI_INPUT_PORT(inputid) (1 + (inputid))
413
414#define DSI_HBP_FRAME_OVERHEAD 12
415#define DSI_HSA_FRAME_OVERHEAD 14
416#define DSI_HFP_FRAME_OVERHEAD 6
417#define DSI_HSS_VSS_VSE_FRAME_OVERHEAD 4
418#define DSI_BLANKING_FRAME_OVERHEAD 6
419#define DSI_NULL_FRAME_OVERHEAD 6
420#define DSI_EOT_PKT_SIZE 4
421
422#define REG_WAKEUP_TIME_NS 800
423#define DPHY_PLL_RATE_HZ 108000000
424
425/* DPHY registers */
426#define DPHY_PMA_CMN(reg) (reg)
427#define DPHY_PMA_LCLK(reg) (0x100 + (reg))
428#define DPHY_PMA_LDATA(lane, reg) (0x200 + ((lane) * 0x100) + (reg))
429#define DPHY_PMA_RCLK(reg) (0x600 + (reg))
430#define DPHY_PMA_RDATA(lane, reg) (0x700 + ((lane) * 0x100) + (reg))
431#define DPHY_PCS(reg) (0xb00 + (reg))
432
433#define DPHY_CMN_SSM DPHY_PMA_CMN(0x20)
434#define DPHY_CMN_SSM_EN BIT(0)
435#define DPHY_CMN_TX_MODE_EN BIT(9)
436
437#define DPHY_CMN_PWM DPHY_PMA_CMN(0x40)
438#define DPHY_CMN_PWM_DIV(x) ((x) << 20)
439#define DPHY_CMN_PWM_LOW(x) ((x) << 10)
440#define DPHY_CMN_PWM_HIGH(x) (x)
441
442#define DPHY_CMN_FBDIV DPHY_PMA_CMN(0x4c)
443#define DPHY_CMN_FBDIV_VAL(low, high) (((high) << 11) | ((low) << 22))
444#define DPHY_CMN_FBDIV_FROM_REG (BIT(10) | BIT(21))
445
446#define DPHY_CMN_OPIPDIV DPHY_PMA_CMN(0x50)
447#define DPHY_CMN_IPDIV_FROM_REG BIT(0)
448#define DPHY_CMN_IPDIV(x) ((x) << 1)
449#define DPHY_CMN_OPDIV_FROM_REG BIT(6)
450#define DPHY_CMN_OPDIV(x) ((x) << 7)
451
452#define DPHY_PSM_CFG DPHY_PCS(0x4)
453#define DPHY_PSM_CFG_FROM_REG BIT(0)
454#define DPHY_PSM_CLK_DIV(x) ((x) << 1)
455
456struct cdns_dsi_output {
457 struct mipi_dsi_device *dev;
458 struct drm_panel *panel;
459 struct drm_bridge *bridge;
460};
461
462enum cdns_dsi_input_id {
463 CDNS_SDI_INPUT,
464 CDNS_DPI_INPUT,
465 CDNS_DSC_INPUT,
466};
467
468struct cdns_dphy_cfg {
469 u8 pll_ipdiv;
470 u8 pll_opdiv;
471 u16 pll_fbdiv;
472 unsigned long lane_bps;
473 unsigned int nlanes;
474};
475
476struct cdns_dsi_cfg {
477 unsigned int hfp;
478 unsigned int hsa;
479 unsigned int hbp;
480 unsigned int hact;
481 unsigned int htotal;
482};
483
484struct cdns_dphy;
485
486enum cdns_dphy_clk_lane_cfg {
487 DPHY_CLK_CFG_LEFT_DRIVES_ALL = 0,
488 DPHY_CLK_CFG_LEFT_DRIVES_RIGHT = 1,
489 DPHY_CLK_CFG_LEFT_DRIVES_LEFT = 2,
490 DPHY_CLK_CFG_RIGHT_DRIVES_ALL = 3,
491};
492
493struct cdns_dphy_ops {
494 int (*probe)(struct cdns_dphy *dphy);
495 void (*remove)(struct cdns_dphy *dphy);
496 void (*set_psm_div)(struct cdns_dphy *dphy, u8 div);
497 void (*set_clk_lane_cfg)(struct cdns_dphy *dphy,
498 enum cdns_dphy_clk_lane_cfg cfg);
499 void (*set_pll_cfg)(struct cdns_dphy *dphy,
500 const struct cdns_dphy_cfg *cfg);
501 unsigned long (*get_wakeup_time_ns)(struct cdns_dphy *dphy);
502};
503
504struct cdns_dphy {
505 struct cdns_dphy_cfg cfg;
506 void __iomem *regs;
507 struct clk *psm_clk;
508 struct clk *pll_ref_clk;
509 const struct cdns_dphy_ops *ops;
510};
511
512struct cdns_dsi_input {
513 enum cdns_dsi_input_id id;
514 struct drm_bridge bridge;
515};
516
517struct cdns_dsi {
518 struct mipi_dsi_host base;
519 void __iomem *regs;
520 struct cdns_dsi_input input;
521 struct cdns_dsi_output output;
522 unsigned int direct_cmd_fifo_depth;
523 unsigned int rx_fifo_depth;
524 struct completion direct_cmd_comp;
525 struct clk *dsi_p_clk;
526 struct reset_control *dsi_p_rst;
527 struct clk *dsi_sys_clk;
528 bool link_initialized;
529 struct cdns_dphy *dphy;
530};
531
532static inline struct cdns_dsi *input_to_dsi(struct cdns_dsi_input *input)
533{
534 return container_of(input, struct cdns_dsi, input);
535}
536
537static inline struct cdns_dsi *to_cdns_dsi(struct mipi_dsi_host *host)
538{
539 return container_of(host, struct cdns_dsi, base);
540}
541
542static inline struct cdns_dsi_input *
543bridge_to_cdns_dsi_input(struct drm_bridge *bridge)
544{
545 return container_of(bridge, struct cdns_dsi_input, bridge);
546}
547
548static int cdns_dsi_get_dphy_pll_cfg(struct cdns_dphy *dphy,
549 struct cdns_dphy_cfg *cfg,
550 unsigned int dpi_htotal,
551 unsigned int dpi_bpp,
552 unsigned int dpi_hz,
553 unsigned int dsi_htotal,
554 unsigned int dsi_nlanes,
555 unsigned int *dsi_hfp_ext)
556{
557 u64 dlane_bps, dlane_bps_max, fbdiv, fbdiv_max, adj_dsi_htotal;
558 unsigned long pll_ref_hz = clk_get_rate(dphy->pll_ref_clk);
559
560 memset(cfg, 0, sizeof(*cfg));
561
562 cfg->nlanes = dsi_nlanes;
563
564 if (pll_ref_hz < 9600000 || pll_ref_hz >= 150000000)
565 return -EINVAL;
566 else if (pll_ref_hz < 19200000)
567 cfg->pll_ipdiv = 1;
568 else if (pll_ref_hz < 38400000)
569 cfg->pll_ipdiv = 2;
570 else if (pll_ref_hz < 76800000)
571 cfg->pll_ipdiv = 4;
572 else
573 cfg->pll_ipdiv = 8;
574
575 /*
576 * Make sure DSI htotal is aligned on a lane boundary when calculating
577 * the expected data rate. This is done by extending HFP in case of
578 * misalignment.
579 */
580 adj_dsi_htotal = dsi_htotal;
581 if (dsi_htotal % dsi_nlanes)
582 adj_dsi_htotal += dsi_nlanes - (dsi_htotal % dsi_nlanes);
583
584 dlane_bps = (u64)dpi_hz * adj_dsi_htotal;
585
586 /* data rate in bytes/sec is not an integer, refuse the mode. */
587 if (do_div(dlane_bps, dsi_nlanes * dpi_htotal))
588 return -EINVAL;
589
590 /* data rate was in bytes/sec, convert to bits/sec. */
591 dlane_bps *= 8;
592
593 if (dlane_bps > 2500000000UL || dlane_bps < 160000000UL)
594 return -EINVAL;
595 else if (dlane_bps >= 1250000000)
596 cfg->pll_opdiv = 1;
597 else if (dlane_bps >= 630000000)
598 cfg->pll_opdiv = 2;
599 else if (dlane_bps >= 320000000)
600 cfg->pll_opdiv = 4;
601 else if (dlane_bps >= 160000000)
602 cfg->pll_opdiv = 8;
603
604 /*
605 * Allow a deviation of 0.2% on the per-lane data rate to try to
606 * recover a potential mismatch between DPI and PPI clks.
607 */
608 dlane_bps_max = dlane_bps + DIV_ROUND_DOWN_ULL(dlane_bps, 500);
609 fbdiv_max = DIV_ROUND_DOWN_ULL(dlane_bps_max * 2 *
610 cfg->pll_opdiv * cfg->pll_ipdiv,
611 pll_ref_hz);
612 fbdiv = DIV_ROUND_UP_ULL(dlane_bps * 2 * cfg->pll_opdiv *
613 cfg->pll_ipdiv,
614 pll_ref_hz);
615
616 /*
617 * Iterate over all acceptable fbdiv and try to find an adjusted DSI
618 * htotal length providing an exact match.
619 *
620 * Note that we could do something even trickier by relying on the fact
621 * that a new line is not necessarily aligned on a lane boundary, so,
622 * by making adj_dsi_htotal non aligned on a dsi_lanes we can improve a
623 * bit the precision. With this, the step would be
624 *
625 * pll_ref_hz / (2 * opdiv * ipdiv * nlanes)
626 *
627 * instead of
628 *
629 * pll_ref_hz / (2 * opdiv * ipdiv)
630 *
631 * The drawback of this approach is that we would need to make sure the
632 * number or lines is a multiple of the realignment periodicity which is
633 * a function of the number of lanes and the original misalignment. For
634 * example, for NLANES = 4 and HTOTAL % NLANES = 3, it takes 4 lines
635 * to realign on a lane:
636 * LINE 0: expected number of bytes, starts emitting first byte of
637 * LINE 1 on LANE 3
638 * LINE 1: expected number of bytes, starts emitting first 2 bytes of
639 * LINE 2 on LANES 2 and 3
640 * LINE 2: expected number of bytes, starts emitting first 3 bytes of
641 * of LINE 3 on LANES 1, 2 and 3
642 * LINE 3: one byte less, now things are realigned on LANE 0 for LINE 4
643 *
644 * I figured this extra complexity was not worth the benefit, but if
645 * someone really has unfixable mismatch, that would be something to
646 * investigate.
647 */
648 for (; fbdiv <= fbdiv_max; fbdiv++) {
649 u32 rem;
650
651 adj_dsi_htotal = (u64)fbdiv * pll_ref_hz * dsi_nlanes *
652 dpi_htotal;
653
654 /*
655 * Do the division in 2 steps to avoid an overflow on the
656 * divider.
657 */
658 rem = do_div(adj_dsi_htotal, dpi_hz);
659 if (rem)
660 continue;
661
662 rem = do_div(adj_dsi_htotal,
663 cfg->pll_opdiv * cfg->pll_ipdiv * 2 * 8);
664 if (rem)
665 continue;
666
667 cfg->pll_fbdiv = fbdiv;
668 *dsi_hfp_ext = adj_dsi_htotal - dsi_htotal;
669 break;
670 }
671
672 /* No match, let's just reject the display mode. */
673 if (!cfg->pll_fbdiv)
674 return -EINVAL;
675
676 dlane_bps = DIV_ROUND_DOWN_ULL((u64)dpi_hz * adj_dsi_htotal * 8,
677 dsi_nlanes * dpi_htotal);
678 cfg->lane_bps = dlane_bps;
679
680 return 0;
681}
682
683static int cdns_dphy_setup_psm(struct cdns_dphy *dphy)
684{
685 unsigned long psm_clk_hz = clk_get_rate(dphy->psm_clk);
686 unsigned long psm_div;
687
688 if (!psm_clk_hz || psm_clk_hz > 100000000)
689 return -EINVAL;
690
691 psm_div = DIV_ROUND_CLOSEST(psm_clk_hz, 1000000);
692 if (dphy->ops->set_psm_div)
693 dphy->ops->set_psm_div(dphy, psm_div);
694
695 return 0;
696}
697
698static void cdns_dphy_set_clk_lane_cfg(struct cdns_dphy *dphy,
699 enum cdns_dphy_clk_lane_cfg cfg)
700{
701 if (dphy->ops->set_clk_lane_cfg)
702 dphy->ops->set_clk_lane_cfg(dphy, cfg);
703}
704
705static void cdns_dphy_set_pll_cfg(struct cdns_dphy *dphy,
706 const struct cdns_dphy_cfg *cfg)
707{
708 if (dphy->ops->set_pll_cfg)
709 dphy->ops->set_pll_cfg(dphy, cfg);
710}
711
712static unsigned long cdns_dphy_get_wakeup_time_ns(struct cdns_dphy *dphy)
713{
714 return dphy->ops->get_wakeup_time_ns(dphy);
715}
716
717static unsigned int dpi_to_dsi_timing(unsigned int dpi_timing,
718 unsigned int dpi_bpp,
719 unsigned int dsi_pkt_overhead)
720{
721 unsigned int dsi_timing = DIV_ROUND_UP(dpi_timing * dpi_bpp, 8);
722
723 if (dsi_timing < dsi_pkt_overhead)
724 dsi_timing = 0;
725 else
726 dsi_timing -= dsi_pkt_overhead;
727
728 return dsi_timing;
729}
730
731static int cdns_dsi_mode2cfg(struct cdns_dsi *dsi,
732 const struct drm_display_mode *mode,
733 struct cdns_dsi_cfg *dsi_cfg,
734 struct cdns_dphy_cfg *dphy_cfg,
735 bool mode_valid_check)
736{
737 unsigned long dsi_htotal = 0, dsi_hss_hsa_hse_hbp = 0;
738 struct cdns_dsi_output *output = &dsi->output;
739 unsigned int dsi_hfp_ext = 0, dpi_hfp, tmp;
740 bool sync_pulse = false;
741 int bpp, nlanes, ret;
742
743 memset(dsi_cfg, 0, sizeof(*dsi_cfg));
744
745 if (output->dev->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)
746 sync_pulse = true;
747
748 bpp = mipi_dsi_pixel_format_to_bpp(output->dev->format);
749 nlanes = output->dev->lanes;
750
751 if (mode_valid_check)
752 tmp = mode->htotal -
753 (sync_pulse ? mode->hsync_end : mode->hsync_start);
754 else
755 tmp = mode->crtc_htotal -
756 (sync_pulse ?
757 mode->crtc_hsync_end : mode->crtc_hsync_start);
758
759 dsi_cfg->hbp = dpi_to_dsi_timing(tmp, bpp, DSI_HBP_FRAME_OVERHEAD);
760 dsi_htotal += dsi_cfg->hbp + DSI_HBP_FRAME_OVERHEAD;
761 dsi_hss_hsa_hse_hbp += dsi_cfg->hbp + DSI_HBP_FRAME_OVERHEAD;
762
763 if (sync_pulse) {
764 if (mode_valid_check)
765 tmp = mode->hsync_end - mode->hsync_start;
766 else
767 tmp = mode->crtc_hsync_end - mode->crtc_hsync_start;
768
769 dsi_cfg->hsa = dpi_to_dsi_timing(tmp, bpp,
770 DSI_HSA_FRAME_OVERHEAD);
771 dsi_htotal += dsi_cfg->hsa + DSI_HSA_FRAME_OVERHEAD;
772 dsi_hss_hsa_hse_hbp += dsi_cfg->hsa + DSI_HSA_FRAME_OVERHEAD;
773 }
774
775 dsi_cfg->hact = dpi_to_dsi_timing(mode_valid_check ?
776 mode->hdisplay : mode->crtc_hdisplay,
777 bpp, 0);
778 dsi_htotal += dsi_cfg->hact;
779
780 if (mode_valid_check)
781 dpi_hfp = mode->hsync_start - mode->hdisplay;
782 else
783 dpi_hfp = mode->crtc_hsync_start - mode->crtc_hdisplay;
784
785 dsi_cfg->hfp = dpi_to_dsi_timing(dpi_hfp, bpp, DSI_HFP_FRAME_OVERHEAD);
786 dsi_htotal += dsi_cfg->hfp + DSI_HFP_FRAME_OVERHEAD;
787
788 if (mode_valid_check)
789 ret = cdns_dsi_get_dphy_pll_cfg(dsi->dphy, dphy_cfg,
790 mode->htotal, bpp,
791 mode->clock * 1000,
792 dsi_htotal, nlanes,
793 &dsi_hfp_ext);
794 else
795 ret = cdns_dsi_get_dphy_pll_cfg(dsi->dphy, dphy_cfg,
796 mode->crtc_htotal, bpp,
797 mode->crtc_clock * 1000,
798 dsi_htotal, nlanes,
799 &dsi_hfp_ext);
800
801 if (ret)
802 return ret;
803
804 dsi_cfg->hfp += dsi_hfp_ext;
805 dsi_htotal += dsi_hfp_ext;
806 dsi_cfg->htotal = dsi_htotal;
807
808 /*
809 * Make sure DPI(HFP) > DSI(HSS+HSA+HSE+HBP) to guarantee that the FIFO
810 * is empty before we start a receiving a new line on the DPI
811 * interface.
812 */
813 if ((u64)dphy_cfg->lane_bps * dpi_hfp * nlanes <
814 (u64)dsi_hss_hsa_hse_hbp *
815 (mode_valid_check ? mode->clock : mode->crtc_clock) * 1000)
816 return -EINVAL;
817
818 return 0;
819}
820
821static int cdns_dsi_bridge_attach(struct drm_bridge *bridge)
822{
823 struct cdns_dsi_input *input = bridge_to_cdns_dsi_input(bridge);
824 struct cdns_dsi *dsi = input_to_dsi(input);
825 struct cdns_dsi_output *output = &dsi->output;
826
827 if (!drm_core_check_feature(bridge->dev, DRIVER_ATOMIC)) {
828 dev_err(dsi->base.dev,
829 "cdns-dsi driver is only compatible with DRM devices supporting atomic updates");
830 return -ENOTSUPP;
831 }
832
833 return drm_bridge_attach(bridge->encoder, output->bridge, bridge);
834}
835
836static enum drm_mode_status
837cdns_dsi_bridge_mode_valid(struct drm_bridge *bridge,
838 const struct drm_display_mode *mode)
839{
840 struct cdns_dsi_input *input = bridge_to_cdns_dsi_input(bridge);
841 struct cdns_dsi *dsi = input_to_dsi(input);
842 struct cdns_dsi_output *output = &dsi->output;
843 struct cdns_dphy_cfg dphy_cfg;
844 struct cdns_dsi_cfg dsi_cfg;
845 int bpp, nlanes, ret;
846
847 /*
848 * VFP_DSI should be less than VFP_DPI and VFP_DSI should be at
849 * least 1.
850 */
851 if (mode->vtotal - mode->vsync_end < 2)
852 return MODE_V_ILLEGAL;
853
854 /* VSA_DSI = VSA_DPI and must be at least 2. */
855 if (mode->vsync_end - mode->vsync_start < 2)
856 return MODE_V_ILLEGAL;
857
858 /* HACT must be 32-bits aligned. */
859 bpp = mipi_dsi_pixel_format_to_bpp(output->dev->format);
860 if ((mode->hdisplay * bpp) % 32)
861 return MODE_H_ILLEGAL;
862
863 nlanes = output->dev->lanes;
864
865 ret = cdns_dsi_mode2cfg(dsi, mode, &dsi_cfg, &dphy_cfg, true);
866 if (ret)
867 return MODE_CLOCK_RANGE;
868
869 return MODE_OK;
870}
871
872static void cdns_dsi_bridge_disable(struct drm_bridge *bridge)
873{
874 struct cdns_dsi_input *input = bridge_to_cdns_dsi_input(bridge);
875 struct cdns_dsi *dsi = input_to_dsi(input);
876 u32 val;
877
878 val = readl(dsi->regs + MCTL_MAIN_DATA_CTL);
879 val &= ~(IF_VID_SELECT_MASK | IF_VID_MODE | VID_EN | HOST_EOT_GEN |
880 DISP_EOT_GEN);
881 writel(val, dsi->regs + MCTL_MAIN_DATA_CTL);
882
883 val = readl(dsi->regs + MCTL_MAIN_EN) & ~IF_EN(input->id);
884 writel(val, dsi->regs + MCTL_MAIN_EN);
885 pm_runtime_put(dsi->base.dev);
886}
887
888static void cdns_dsi_hs_init(struct cdns_dsi *dsi,
889 const struct cdns_dphy_cfg *dphy_cfg)
890{
891 u32 status;
892
893 /*
894 * Power all internal DPHY blocks down and maintain their reset line
895 * asserted before changing the DPHY config.
896 */
897 writel(DPHY_CMN_PSO | DPHY_PLL_PSO | DPHY_ALL_D_PDN | DPHY_C_PDN |
898 DPHY_CMN_PDN | DPHY_PLL_PDN,
899 dsi->regs + MCTL_DPHY_CFG0);
900
901 /*
902 * Configure the internal PSM clk divider so that the DPHY has a
903 * 1MHz clk (or something close).
904 */
905 WARN_ON_ONCE(cdns_dphy_setup_psm(dsi->dphy));
906
907 /*
908 * Configure attach clk lanes to data lanes: the DPHY has 2 clk lanes
909 * and 8 data lanes, each clk lane can be attache different set of
910 * data lanes. The 2 groups are named 'left' and 'right', so here we
911 * just say that we want the 'left' clk lane to drive the 'left' data
912 * lanes.
913 */
914 cdns_dphy_set_clk_lane_cfg(dsi->dphy, DPHY_CLK_CFG_LEFT_DRIVES_LEFT);
915
916 /*
917 * Configure the DPHY PLL that will be used to generate the TX byte
918 * clk.
919 */
920 cdns_dphy_set_pll_cfg(dsi->dphy, dphy_cfg);
921
922 /* Start TX state machine. */
923 writel(DPHY_CMN_SSM_EN | DPHY_CMN_TX_MODE_EN,
924 dsi->dphy->regs + DPHY_CMN_SSM);
925
926 /* Activate the PLL and wait until it's locked. */
927 writel(PLL_LOCKED, dsi->regs + MCTL_MAIN_STS_CLR);
928 writel(DPHY_CMN_PSO | DPHY_ALL_D_PDN | DPHY_C_PDN | DPHY_CMN_PDN,
929 dsi->regs + MCTL_DPHY_CFG0);
930 WARN_ON_ONCE(readl_poll_timeout(dsi->regs + MCTL_MAIN_STS, status,
931 status & PLL_LOCKED, 100, 100));
932 /* De-assert data and clock reset lines. */
933 writel(DPHY_CMN_PSO | DPHY_ALL_D_PDN | DPHY_C_PDN | DPHY_CMN_PDN |
934 DPHY_D_RSTB(dphy_cfg->nlanes) | DPHY_C_RSTB,
935 dsi->regs + MCTL_DPHY_CFG0);
936}
937
938static void cdns_dsi_init_link(struct cdns_dsi *dsi)
939{
940 struct cdns_dsi_output *output = &dsi->output;
941 unsigned long sysclk_period, ulpout;
942 u32 val;
943 int i;
944
945 if (dsi->link_initialized)
946 return;
947
948 val = 0;
949 for (i = 1; i < output->dev->lanes; i++)
950 val |= DATA_LANE_EN(i);
951
952 if (!(output->dev->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS))
953 val |= CLK_CONTINUOUS;
954
955 writel(val, dsi->regs + MCTL_MAIN_PHY_CTL);
956
957 /* ULPOUT should be set to 1ms and is expressed in sysclk cycles. */
958 sysclk_period = NSEC_PER_SEC / clk_get_rate(dsi->dsi_sys_clk);
959 ulpout = DIV_ROUND_UP(NSEC_PER_MSEC, sysclk_period);
960 writel(CLK_LANE_ULPOUT_TIME(ulpout) | DATA_LANE_ULPOUT_TIME(ulpout),
961 dsi->regs + MCTL_ULPOUT_TIME);
962
963 writel(LINK_EN, dsi->regs + MCTL_MAIN_DATA_CTL);
964
965 val = CLK_LANE_EN | PLL_START;
966 for (i = 0; i < output->dev->lanes; i++)
967 val |= DATA_LANE_START(i);
968
969 writel(val, dsi->regs + MCTL_MAIN_EN);
970
971 dsi->link_initialized = true;
972}
973
974static void cdns_dsi_bridge_enable(struct drm_bridge *bridge)
975{
976 struct cdns_dsi_input *input = bridge_to_cdns_dsi_input(bridge);
977 struct cdns_dsi *dsi = input_to_dsi(input);
978 struct cdns_dsi_output *output = &dsi->output;
979 struct drm_display_mode *mode;
980 struct cdns_dphy_cfg dphy_cfg;
981 unsigned long tx_byte_period;
982 struct cdns_dsi_cfg dsi_cfg;
983 u32 tmp, reg_wakeup, div;
984 int bpp, nlanes;
985
986 if (WARN_ON(pm_runtime_get_sync(dsi->base.dev) < 0))
987 return;
988
989 mode = &bridge->encoder->crtc->state->adjusted_mode;
990 bpp = mipi_dsi_pixel_format_to_bpp(output->dev->format);
991 nlanes = output->dev->lanes;
992
993 WARN_ON_ONCE(cdns_dsi_mode2cfg(dsi, mode, &dsi_cfg, &dphy_cfg, false));
994
995 cdns_dsi_hs_init(dsi, &dphy_cfg);
996 cdns_dsi_init_link(dsi);
997
998 writel(HBP_LEN(dsi_cfg.hbp) | HSA_LEN(dsi_cfg.hsa),
999 dsi->regs + VID_HSIZE1);
1000 writel(HFP_LEN(dsi_cfg.hfp) | HACT_LEN(dsi_cfg.hact),
1001 dsi->regs + VID_HSIZE2);
1002
1003 writel(VBP_LEN(mode->crtc_vtotal - mode->crtc_vsync_end - 1) |
1004 VFP_LEN(mode->crtc_vsync_start - mode->crtc_vdisplay) |
1005 VSA_LEN(mode->crtc_vsync_end - mode->crtc_vsync_start + 1),
1006 dsi->regs + VID_VSIZE1);
1007 writel(mode->crtc_vdisplay, dsi->regs + VID_VSIZE2);
1008
1009 tmp = dsi_cfg.htotal -
1010 (dsi_cfg.hsa + DSI_BLANKING_FRAME_OVERHEAD +
1011 DSI_HSA_FRAME_OVERHEAD);
1012 writel(BLK_LINE_PULSE_PKT_LEN(tmp), dsi->regs + VID_BLKSIZE2);
1013 if (output->dev->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)
1014 writel(MAX_LINE_LIMIT(tmp - DSI_NULL_FRAME_OVERHEAD),
1015 dsi->regs + VID_VCA_SETTING2);
1016
1017 tmp = dsi_cfg.htotal -
1018 (DSI_HSS_VSS_VSE_FRAME_OVERHEAD + DSI_BLANKING_FRAME_OVERHEAD);
1019 writel(BLK_LINE_EVENT_PKT_LEN(tmp), dsi->regs + VID_BLKSIZE1);
1020 if (!(output->dev->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE))
1021 writel(MAX_LINE_LIMIT(tmp - DSI_NULL_FRAME_OVERHEAD),
1022 dsi->regs + VID_VCA_SETTING2);
1023
1024 tmp = DIV_ROUND_UP(dsi_cfg.htotal, nlanes) -
1025 DIV_ROUND_UP(dsi_cfg.hsa, nlanes);
1026
1027 if (!(output->dev->mode_flags & MIPI_DSI_MODE_EOT_PACKET))
1028 tmp -= DIV_ROUND_UP(DSI_EOT_PKT_SIZE, nlanes);
1029
1030 tx_byte_period = DIV_ROUND_DOWN_ULL((u64)NSEC_PER_SEC * 8,
1031 dphy_cfg.lane_bps);
1032 reg_wakeup = cdns_dphy_get_wakeup_time_ns(dsi->dphy) /
1033 tx_byte_period;
1034 writel(REG_WAKEUP_TIME(reg_wakeup) | REG_LINE_DURATION(tmp),
1035 dsi->regs + VID_DPHY_TIME);
1036
1037 /*
1038 * HSTX and LPRX timeouts are both expressed in TX byte clk cycles and
1039 * both should be set to at least the time it takes to transmit a
1040 * frame.
1041 */
1042 tmp = NSEC_PER_SEC / drm_mode_vrefresh(mode);
1043 tmp /= tx_byte_period;
1044
1045 for (div = 0; div <= CLK_DIV_MAX; div++) {
1046 if (tmp <= HSTX_TIMEOUT_MAX)
1047 break;
1048
1049 tmp >>= 1;
1050 }
1051
1052 if (tmp > HSTX_TIMEOUT_MAX)
1053 tmp = HSTX_TIMEOUT_MAX;
1054
1055 writel(CLK_DIV(div) | HSTX_TIMEOUT(tmp),
1056 dsi->regs + MCTL_DPHY_TIMEOUT1);
1057
1058 writel(LPRX_TIMEOUT(tmp), dsi->regs + MCTL_DPHY_TIMEOUT2);
1059
1060 if (output->dev->mode_flags & MIPI_DSI_MODE_VIDEO) {
1061 switch (output->dev->format) {
1062 case MIPI_DSI_FMT_RGB888:
1063 tmp = VID_PIXEL_MODE_RGB888 |
1064 VID_DATATYPE(MIPI_DSI_PACKED_PIXEL_STREAM_24);
1065 break;
1066
1067 case MIPI_DSI_FMT_RGB666:
1068 tmp = VID_PIXEL_MODE_RGB666 |
1069 VID_DATATYPE(MIPI_DSI_PIXEL_STREAM_3BYTE_18);
1070 break;
1071
1072 case MIPI_DSI_FMT_RGB666_PACKED:
1073 tmp = VID_PIXEL_MODE_RGB666_PACKED |
1074 VID_DATATYPE(MIPI_DSI_PACKED_PIXEL_STREAM_18);
1075 break;
1076
1077 case MIPI_DSI_FMT_RGB565:
1078 tmp = VID_PIXEL_MODE_RGB565 |
1079 VID_DATATYPE(MIPI_DSI_PACKED_PIXEL_STREAM_16);
1080 break;
1081
1082 default:
1083 dev_err(dsi->base.dev, "Unsupported DSI format\n");
1084 return;
1085 }
1086
1087 if (output->dev->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)
1088 tmp |= SYNC_PULSE_ACTIVE | SYNC_PULSE_HORIZONTAL;
1089
1090 tmp |= REG_BLKLINE_MODE(REG_BLK_MODE_BLANKING_PKT) |
1091 REG_BLKEOL_MODE(REG_BLK_MODE_BLANKING_PKT) |
1092 RECOVERY_MODE(RECOVERY_MODE_NEXT_HSYNC) |
1093 VID_IGNORE_MISS_VSYNC;
1094
1095 writel(tmp, dsi->regs + VID_MAIN_CTL);
1096 }
1097
1098 tmp = readl(dsi->regs + MCTL_MAIN_DATA_CTL);
1099 tmp &= ~(IF_VID_SELECT_MASK | HOST_EOT_GEN | IF_VID_MODE);
1100
1101 if (!(output->dev->mode_flags & MIPI_DSI_MODE_EOT_PACKET))
1102 tmp |= HOST_EOT_GEN;
1103
1104 if (output->dev->mode_flags & MIPI_DSI_MODE_VIDEO)
1105 tmp |= IF_VID_MODE | IF_VID_SELECT(input->id) | VID_EN;
1106
1107 writel(tmp, dsi->regs + MCTL_MAIN_DATA_CTL);
1108
1109 tmp = readl(dsi->regs + MCTL_MAIN_EN) | IF_EN(input->id);
1110 writel(tmp, dsi->regs + MCTL_MAIN_EN);
1111}
1112
1113static const struct drm_bridge_funcs cdns_dsi_bridge_funcs = {
1114 .attach = cdns_dsi_bridge_attach,
1115 .mode_valid = cdns_dsi_bridge_mode_valid,
1116 .disable = cdns_dsi_bridge_disable,
1117 .enable = cdns_dsi_bridge_enable,
1118};
1119
1120static int cdns_dsi_attach(struct mipi_dsi_host *host,
1121 struct mipi_dsi_device *dev)
1122{
1123 struct cdns_dsi *dsi = to_cdns_dsi(host);
1124 struct cdns_dsi_output *output = &dsi->output;
1125 struct cdns_dsi_input *input = &dsi->input;
1126 struct drm_bridge *bridge;
1127 struct drm_panel *panel;
1128 struct device_node *np;
1129 int ret;
1130
1131 /*
1132 * We currently do not support connecting several DSI devices to the
1133 * same host. In order to support that we'd need the DRM bridge
1134 * framework to allow dynamic reconfiguration of the bridge chain.
1135 */
1136 if (output->dev)
1137 return -EBUSY;
1138
1139 /* We do not support burst mode yet. */
1140 if (dev->mode_flags & MIPI_DSI_MODE_VIDEO_BURST)
1141 return -ENOTSUPP;
1142
1143 /*
1144 * The host <-> device link might be described using an OF-graph
1145 * representation, in this case we extract the device of_node from
1146 * this representation, otherwise we use dsidev->dev.of_node which
1147 * should have been filled by the core.
1148 */
1149 np = of_graph_get_remote_node(dsi->base.dev->of_node, DSI_OUTPUT_PORT,
1150 dev->channel);
1151 if (!np)
1152 np = of_node_get(dev->dev.of_node);
1153
1154 panel = of_drm_find_panel(np);
1155 if (panel) {
1156 bridge = drm_panel_bridge_add(panel, DRM_MODE_CONNECTOR_DSI);
1157 } else {
1158 bridge = of_drm_find_bridge(dev->dev.of_node);
1159 if (!bridge)
1160 bridge = ERR_PTR(-EINVAL);
1161 }
1162
1163 of_node_put(np);
1164
1165 if (IS_ERR(bridge)) {
1166 ret = PTR_ERR(bridge);
1167 dev_err(host->dev, "failed to add DSI device %s (err = %d)",
1168 dev->name, ret);
1169 return ret;
1170 }
1171
1172 output->dev = dev;
1173 output->bridge = bridge;
1174 output->panel = panel;
1175
1176 /*
1177 * The DSI output has been properly configured, we can now safely
1178 * register the input to the bridge framework so that it can take place
1179 * in a display pipeline.
1180 */
1181 drm_bridge_add(&input->bridge);
1182
1183 return 0;
1184}
1185
1186static int cdns_dsi_detach(struct mipi_dsi_host *host,
1187 struct mipi_dsi_device *dev)
1188{
1189 struct cdns_dsi *dsi = to_cdns_dsi(host);
1190 struct cdns_dsi_output *output = &dsi->output;
1191 struct cdns_dsi_input *input = &dsi->input;
1192
1193 drm_bridge_remove(&input->bridge);
1194 if (output->panel)
1195 drm_panel_bridge_remove(output->bridge);
1196
1197 return 0;
1198}
1199
1200static irqreturn_t cdns_dsi_interrupt(int irq, void *data)
1201{
1202 struct cdns_dsi *dsi = data;
1203 irqreturn_t ret = IRQ_NONE;
1204 u32 flag, ctl;
1205
1206 flag = readl(dsi->regs + DIRECT_CMD_STS_FLAG);
1207 if (flag) {
1208 ctl = readl(dsi->regs + DIRECT_CMD_STS_CTL);
1209 ctl &= ~flag;
1210 writel(ctl, dsi->regs + DIRECT_CMD_STS_CTL);
1211 complete(&dsi->direct_cmd_comp);
1212 ret = IRQ_HANDLED;
1213 }
1214
1215 return ret;
1216}
1217
1218static ssize_t cdns_dsi_transfer(struct mipi_dsi_host *host,
1219 const struct mipi_dsi_msg *msg)
1220{
1221 struct cdns_dsi *dsi = to_cdns_dsi(host);
1222 u32 cmd, sts, val, wait = WRITE_COMPLETED, ctl = 0;
1223 struct mipi_dsi_packet packet;
1224 int ret, i, tx_len, rx_len;
1225
1226 ret = pm_runtime_get_sync(host->dev);
1227 if (ret < 0)
1228 return ret;
1229
1230 cdns_dsi_init_link(dsi);
1231
1232 ret = mipi_dsi_create_packet(&packet, msg);
1233 if (ret)
1234 goto out;
1235
1236 tx_len = msg->tx_buf ? msg->tx_len : 0;
1237 rx_len = msg->rx_buf ? msg->rx_len : 0;
1238
1239 /* For read operations, the maximum TX len is 2. */
1240 if (rx_len && tx_len > 2) {
1241 ret = -ENOTSUPP;
1242 goto out;
1243 }
1244
1245 /* TX len is limited by the CMD FIFO depth. */
1246 if (tx_len > dsi->direct_cmd_fifo_depth) {
1247 ret = -ENOTSUPP;
1248 goto out;
1249 }
1250
1251 /* RX len is limited by the RX FIFO depth. */
1252 if (rx_len > dsi->rx_fifo_depth) {
1253 ret = -ENOTSUPP;
1254 goto out;
1255 }
1256
1257 cmd = CMD_SIZE(tx_len) | CMD_VCHAN_ID(msg->channel) |
1258 CMD_DATATYPE(msg->type);
1259
1260 if (msg->flags & MIPI_DSI_MSG_USE_LPM)
1261 cmd |= CMD_LP_EN;
1262
1263 if (mipi_dsi_packet_format_is_long(msg->type))
1264 cmd |= CMD_LONG;
1265
1266 if (rx_len) {
1267 cmd |= READ_CMD;
1268 wait = READ_COMPLETED_WITH_ERR | READ_COMPLETED;
1269 ctl = READ_EN | BTA_EN;
1270 } else if (msg->flags & MIPI_DSI_MSG_REQ_ACK) {
1271 cmd |= BTA_REQ;
1272 wait = ACK_WITH_ERR_RCVD | ACK_RCVD;
1273 ctl = BTA_EN;
1274 }
1275
1276 writel(readl(dsi->regs + MCTL_MAIN_DATA_CTL) | ctl,
1277 dsi->regs + MCTL_MAIN_DATA_CTL);
1278
1279 writel(cmd, dsi->regs + DIRECT_CMD_MAIN_SETTINGS);
1280
1281 for (i = 0; i < tx_len; i += 4) {
1282 const u8 *buf = msg->tx_buf;
1283 int j;
1284
1285 val = 0;
1286 for (j = 0; j < 4 && j + i < tx_len; j++)
1287 val |= (u32)buf[i + j] << (8 * j);
1288
1289 writel(val, dsi->regs + DIRECT_CMD_WRDATA);
1290 }
1291
1292 /* Clear status flags before sending the command. */
1293 writel(wait, dsi->regs + DIRECT_CMD_STS_CLR);
1294 writel(wait, dsi->regs + DIRECT_CMD_STS_CTL);
1295 reinit_completion(&dsi->direct_cmd_comp);
1296 writel(0, dsi->regs + DIRECT_CMD_SEND);
1297
1298 wait_for_completion_timeout(&dsi->direct_cmd_comp,
1299 msecs_to_jiffies(1000));
1300
1301 sts = readl(dsi->regs + DIRECT_CMD_STS);
1302 writel(wait, dsi->regs + DIRECT_CMD_STS_CLR);
1303 writel(0, dsi->regs + DIRECT_CMD_STS_CTL);
1304
1305 writel(readl(dsi->regs + MCTL_MAIN_DATA_CTL) & ~ctl,
1306 dsi->regs + MCTL_MAIN_DATA_CTL);
1307
1308 /* We did not receive the events we were waiting for. */
1309 if (!(sts & wait)) {
1310 ret = -ETIMEDOUT;
1311 goto out;
1312 }
1313
1314 /* 'READ' or 'WRITE with ACK' failed. */
1315 if (sts & (READ_COMPLETED_WITH_ERR | ACK_WITH_ERR_RCVD)) {
1316 ret = -EIO;
1317 goto out;
1318 }
1319
1320 for (i = 0; i < rx_len; i += 4) {
1321 u8 *buf = msg->rx_buf;
1322 int j;
1323
1324 val = readl(dsi->regs + DIRECT_CMD_RDDATA);
1325 for (j = 0; j < 4 && j + i < rx_len; j++)
1326 buf[i + j] = val >> (8 * j);
1327 }
1328
1329out:
1330 pm_runtime_put(host->dev);
1331 return ret;
1332}
1333
1334static const struct mipi_dsi_host_ops cdns_dsi_ops = {
1335 .attach = cdns_dsi_attach,
1336 .detach = cdns_dsi_detach,
1337 .transfer = cdns_dsi_transfer,
1338};
1339
1340static int cdns_dsi_resume(struct device *dev)
1341{
1342 struct cdns_dsi *dsi = dev_get_drvdata(dev);
1343
1344 reset_control_deassert(dsi->dsi_p_rst);
1345 clk_prepare_enable(dsi->dsi_p_clk);
1346 clk_prepare_enable(dsi->dsi_sys_clk);
1347 clk_prepare_enable(dsi->dphy->psm_clk);
1348 clk_prepare_enable(dsi->dphy->pll_ref_clk);
1349
1350 return 0;
1351}
1352
1353static int cdns_dsi_suspend(struct device *dev)
1354{
1355 struct cdns_dsi *dsi = dev_get_drvdata(dev);
1356
1357 clk_disable_unprepare(dsi->dphy->pll_ref_clk);
1358 clk_disable_unprepare(dsi->dphy->psm_clk);
1359 clk_disable_unprepare(dsi->dsi_sys_clk);
1360 clk_disable_unprepare(dsi->dsi_p_clk);
1361 reset_control_assert(dsi->dsi_p_rst);
1362 dsi->link_initialized = false;
1363 return 0;
1364}
1365
1366static UNIVERSAL_DEV_PM_OPS(cdns_dsi_pm_ops, cdns_dsi_suspend, cdns_dsi_resume,
1367 NULL);
1368
1369static unsigned long cdns_dphy_ref_get_wakeup_time_ns(struct cdns_dphy *dphy)
1370{
1371 /* Default wakeup time is 800 ns (in a simulated environment). */
1372 return 800;
1373}
1374
1375static void cdns_dphy_ref_set_pll_cfg(struct cdns_dphy *dphy,
1376 const struct cdns_dphy_cfg *cfg)
1377{
1378 u32 fbdiv_low, fbdiv_high;
1379
1380 fbdiv_low = (cfg->pll_fbdiv / 4) - 2;
1381 fbdiv_high = cfg->pll_fbdiv - fbdiv_low - 2;
1382
1383 writel(DPHY_CMN_IPDIV_FROM_REG | DPHY_CMN_OPDIV_FROM_REG |
1384 DPHY_CMN_IPDIV(cfg->pll_ipdiv) |
1385 DPHY_CMN_OPDIV(cfg->pll_opdiv),
1386 dphy->regs + DPHY_CMN_OPIPDIV);
1387 writel(DPHY_CMN_FBDIV_FROM_REG |
1388 DPHY_CMN_FBDIV_VAL(fbdiv_low, fbdiv_high),
1389 dphy->regs + DPHY_CMN_FBDIV);
1390 writel(DPHY_CMN_PWM_HIGH(6) | DPHY_CMN_PWM_LOW(0x101) |
1391 DPHY_CMN_PWM_DIV(0x8),
1392 dphy->regs + DPHY_CMN_PWM);
1393}
1394
1395static void cdns_dphy_ref_set_psm_div(struct cdns_dphy *dphy, u8 div)
1396{
1397 writel(DPHY_PSM_CFG_FROM_REG | DPHY_PSM_CLK_DIV(div),
1398 dphy->regs + DPHY_PSM_CFG);
1399}
1400
1401/*
1402 * This is the reference implementation of DPHY hooks. Specific integration of
1403 * this IP may have to re-implement some of them depending on how they decided
1404 * to wire things in the SoC.
1405 */
1406static const struct cdns_dphy_ops ref_dphy_ops = {
1407 .get_wakeup_time_ns = cdns_dphy_ref_get_wakeup_time_ns,
1408 .set_pll_cfg = cdns_dphy_ref_set_pll_cfg,
1409 .set_psm_div = cdns_dphy_ref_set_psm_div,
1410};
1411
1412static const struct of_device_id cdns_dphy_of_match[] = {
1413 { .compatible = "cdns,dphy", .data = &ref_dphy_ops },
1414 { /* sentinel */ },
1415};
1416
1417static struct cdns_dphy *cdns_dphy_probe(struct platform_device *pdev)
1418{
1419 const struct of_device_id *match;
1420 struct cdns_dphy *dphy;
1421 struct of_phandle_args args;
1422 struct resource res;
1423 int ret;
1424
1425 ret = of_parse_phandle_with_args(pdev->dev.of_node, "phys",
1426 "#phy-cells", 0, &args);
1427 if (ret)
1428 return ERR_PTR(-ENOENT);
1429
1430 match = of_match_node(cdns_dphy_of_match, args.np);
1431 if (!match || !match->data)
1432 return ERR_PTR(-EINVAL);
1433
1434 dphy = devm_kzalloc(&pdev->dev, sizeof(*dphy), GFP_KERNEL);
1435 if (!dphy)
1436 return ERR_PTR(-ENOMEM);
1437
1438 dphy->ops = match->data;
1439
1440 ret = of_address_to_resource(args.np, 0, &res);
1441 if (ret)
1442 return ERR_PTR(ret);
1443
1444 dphy->regs = devm_ioremap_resource(&pdev->dev, &res);
1445 if (IS_ERR(dphy->regs))
1446 return ERR_CAST(dphy->regs);
1447
1448 dphy->psm_clk = of_clk_get_by_name(args.np, "psm");
1449 if (IS_ERR(dphy->psm_clk))
1450 return ERR_CAST(dphy->psm_clk);
1451
1452 dphy->pll_ref_clk = of_clk_get_by_name(args.np, "pll_ref");
1453 if (IS_ERR(dphy->pll_ref_clk)) {
1454 ret = PTR_ERR(dphy->pll_ref_clk);
1455 goto err_put_psm_clk;
1456 }
1457
1458 if (dphy->ops->probe) {
1459 ret = dphy->ops->probe(dphy);
1460 if (ret)
1461 goto err_put_pll_ref_clk;
1462 }
1463
1464 return dphy;
1465
1466err_put_pll_ref_clk:
1467 clk_put(dphy->pll_ref_clk);
1468
1469err_put_psm_clk:
1470 clk_put(dphy->psm_clk);
1471
1472 return ERR_PTR(ret);
1473}
1474
1475static void cdns_dphy_remove(struct cdns_dphy *dphy)
1476{
1477 if (dphy->ops->remove)
1478 dphy->ops->remove(dphy);
1479
1480 clk_put(dphy->pll_ref_clk);
1481 clk_put(dphy->psm_clk);
1482}
1483
1484static int cdns_dsi_drm_probe(struct platform_device *pdev)
1485{
1486 struct cdns_dsi *dsi;
1487 struct cdns_dsi_input *input;
1488 struct resource *res;
1489 int ret, irq;
1490 u32 val;
1491
1492 dsi = devm_kzalloc(&pdev->dev, sizeof(*dsi), GFP_KERNEL);
1493 if (!dsi)
1494 return -ENOMEM;
1495
1496 platform_set_drvdata(pdev, dsi);
1497
1498 input = &dsi->input;
1499
1500 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1501 dsi->regs = devm_ioremap_resource(&pdev->dev, res);
1502 if (IS_ERR(dsi->regs))
1503 return PTR_ERR(dsi->regs);
1504
1505 dsi->dsi_p_clk = devm_clk_get(&pdev->dev, "dsi_p_clk");
1506 if (IS_ERR(dsi->dsi_p_clk))
1507 return PTR_ERR(dsi->dsi_p_clk);
1508
1509 dsi->dsi_p_rst = devm_reset_control_get_optional_exclusive(&pdev->dev,
1510 "dsi_p_rst");
1511 if (IS_ERR(dsi->dsi_p_rst))
1512 return PTR_ERR(dsi->dsi_p_rst);
1513
1514 dsi->dsi_sys_clk = devm_clk_get(&pdev->dev, "dsi_sys_clk");
1515 if (IS_ERR(dsi->dsi_sys_clk))
1516 return PTR_ERR(dsi->dsi_sys_clk);
1517
1518 irq = platform_get_irq(pdev, 0);
1519 if (irq < 0)
1520 return irq;
1521
1522 dsi->dphy = cdns_dphy_probe(pdev);
1523 if (IS_ERR(dsi->dphy))
1524 return PTR_ERR(dsi->dphy);
1525
1526 ret = clk_prepare_enable(dsi->dsi_p_clk);
1527 if (ret)
1528 goto err_remove_dphy;
1529
1530 val = readl(dsi->regs + ID_REG);
1531 if (REV_VENDOR_ID(val) != 0xcad) {
1532 dev_err(&pdev->dev, "invalid vendor id\n");
1533 ret = -EINVAL;
1534 goto err_disable_pclk;
1535 }
1536
1537 val = readl(dsi->regs + IP_CONF);
1538 dsi->direct_cmd_fifo_depth = 1 << (DIRCMD_FIFO_DEPTH(val) + 2);
1539 dsi->rx_fifo_depth = RX_FIFO_DEPTH(val);
1540 init_completion(&dsi->direct_cmd_comp);
1541
1542 writel(0, dsi->regs + MCTL_MAIN_DATA_CTL);
1543 writel(0, dsi->regs + MCTL_MAIN_EN);
1544 writel(0, dsi->regs + MCTL_MAIN_PHY_CTL);
1545
1546 /*
1547 * We only support the DPI input, so force input->id to
1548 * CDNS_DPI_INPUT.
1549 */
1550 input->id = CDNS_DPI_INPUT;
1551 input->bridge.funcs = &cdns_dsi_bridge_funcs;
1552 input->bridge.of_node = pdev->dev.of_node;
1553
1554 /* Mask all interrupts before registering the IRQ handler. */
1555 writel(0, dsi->regs + MCTL_MAIN_STS_CTL);
1556 writel(0, dsi->regs + MCTL_DPHY_ERR_CTL1);
1557 writel(0, dsi->regs + CMD_MODE_STS_CTL);
1558 writel(0, dsi->regs + DIRECT_CMD_STS_CTL);
1559 writel(0, dsi->regs + DIRECT_CMD_RD_STS_CTL);
1560 writel(0, dsi->regs + VID_MODE_STS_CTL);
1561 writel(0, dsi->regs + TVG_STS_CTL);
1562 writel(0, dsi->regs + DPI_IRQ_EN);
1563 ret = devm_request_irq(&pdev->dev, irq, cdns_dsi_interrupt, 0,
1564 dev_name(&pdev->dev), dsi);
1565 if (ret)
1566 goto err_disable_pclk;
1567
1568 pm_runtime_enable(&pdev->dev);
1569 dsi->base.dev = &pdev->dev;
1570 dsi->base.ops = &cdns_dsi_ops;
1571
1572 ret = mipi_dsi_host_register(&dsi->base);
1573 if (ret)
1574 goto err_disable_runtime_pm;
1575
1576 clk_disable_unprepare(dsi->dsi_p_clk);
1577
1578 return 0;
1579
1580err_disable_runtime_pm:
1581 pm_runtime_disable(&pdev->dev);
1582
1583err_disable_pclk:
1584 clk_disable_unprepare(dsi->dsi_p_clk);
1585
1586err_remove_dphy:
1587 cdns_dphy_remove(dsi->dphy);
1588
1589 return ret;
1590}
1591
1592static int cdns_dsi_drm_remove(struct platform_device *pdev)
1593{
1594 struct cdns_dsi *dsi = platform_get_drvdata(pdev);
1595
1596 mipi_dsi_host_unregister(&dsi->base);
1597 pm_runtime_disable(&pdev->dev);
1598 cdns_dphy_remove(dsi->dphy);
1599
1600 return 0;
1601}
1602
1603static const struct of_device_id cdns_dsi_of_match[] = {
1604 { .compatible = "cdns,dsi" },
1605 { },
1606};
1607
1608static struct platform_driver cdns_dsi_platform_driver = {
1609 .probe = cdns_dsi_drm_probe,
1610 .remove = cdns_dsi_drm_remove,
1611 .driver = {
1612 .name = "cdns-dsi",
1613 .of_match_table = cdns_dsi_of_match,
1614 .pm = &cdns_dsi_pm_ops,
1615 },
1616};
1617module_platform_driver(cdns_dsi_platform_driver);
1618
1619MODULE_AUTHOR("Boris Brezillon <boris.brezillon@bootlin.com>");
1620MODULE_DESCRIPTION("Cadence DSI driver");
1621MODULE_LICENSE("GPL");
1622MODULE_ALIAS("platform:cdns-dsi");
1623
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c
index 3b7e5c59a5e9..8f9c8a6b46de 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c
@@ -152,7 +152,6 @@ static struct platform_driver snd_dw_hdmi_driver = {
152 .remove = snd_dw_hdmi_remove, 152 .remove = snd_dw_hdmi_remove,
153 .driver = { 153 .driver = {
154 .name = DRIVER_NAME, 154 .name = DRIVER_NAME,
155 .owner = THIS_MODULE,
156 }, 155 },
157}; 156};
158module_platform_driver(snd_dw_hdmi_driver); 157module_platform_driver(snd_dw_hdmi_driver);
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
index 226171a3ece1..fd7999642cf8 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
@@ -1,12 +1,8 @@
1// SPDX-License-Identifier: GPL-2.0+
1/* 2/*
2 * Copyright (c) 2016, Fuzhou Rockchip Electronics Co., Ltd 3 * Copyright (c) 2016, Fuzhou Rockchip Electronics Co., Ltd
3 * Copyright (C) STMicroelectronics SA 2017 4 * Copyright (C) STMicroelectronics SA 2017
4 * 5 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * Modified by Philippe Cornu <philippe.cornu@st.com> 6 * Modified by Philippe Cornu <philippe.cornu@st.com>
11 * This generic Synopsys DesignWare MIPI DSI host driver is based on the 7 * This generic Synopsys DesignWare MIPI DSI host driver is based on the
12 * Rockchip version from rockchip/dw-mipi-dsi.c with phy & bridge APIs. 8 * Rockchip version from rockchip/dw-mipi-dsi.c with phy & bridge APIs.
@@ -775,20 +771,20 @@ static void dw_mipi_dsi_bridge_mode_set(struct drm_bridge *bridge,
775 771
776 clk_prepare_enable(dsi->pclk); 772 clk_prepare_enable(dsi->pclk);
777 773
778 ret = phy_ops->get_lane_mbps(priv_data, mode, dsi->mode_flags, 774 ret = phy_ops->get_lane_mbps(priv_data, adjusted_mode, dsi->mode_flags,
779 dsi->lanes, dsi->format, &dsi->lane_mbps); 775 dsi->lanes, dsi->format, &dsi->lane_mbps);
780 if (ret) 776 if (ret)
781 DRM_DEBUG_DRIVER("Phy get_lane_mbps() failed\n"); 777 DRM_DEBUG_DRIVER("Phy get_lane_mbps() failed\n");
782 778
783 pm_runtime_get_sync(dsi->dev); 779 pm_runtime_get_sync(dsi->dev);
784 dw_mipi_dsi_init(dsi); 780 dw_mipi_dsi_init(dsi);
785 dw_mipi_dsi_dpi_config(dsi, mode); 781 dw_mipi_dsi_dpi_config(dsi, adjusted_mode);
786 dw_mipi_dsi_packet_handler_config(dsi); 782 dw_mipi_dsi_packet_handler_config(dsi);
787 dw_mipi_dsi_video_mode_config(dsi); 783 dw_mipi_dsi_video_mode_config(dsi);
788 dw_mipi_dsi_video_packet_config(dsi, mode); 784 dw_mipi_dsi_video_packet_config(dsi, adjusted_mode);
789 dw_mipi_dsi_command_mode_config(dsi); 785 dw_mipi_dsi_command_mode_config(dsi);
790 dw_mipi_dsi_line_timer_config(dsi, mode); 786 dw_mipi_dsi_line_timer_config(dsi, adjusted_mode);
791 dw_mipi_dsi_vertical_timing_config(dsi, mode); 787 dw_mipi_dsi_vertical_timing_config(dsi, adjusted_mode);
792 788
793 dw_mipi_dsi_dphy_init(dsi); 789 dw_mipi_dsi_dphy_init(dsi);
794 dw_mipi_dsi_dphy_timing_config(dsi); 790 dw_mipi_dsi_dphy_timing_config(dsi);
@@ -802,7 +798,7 @@ static void dw_mipi_dsi_bridge_mode_set(struct drm_bridge *bridge,
802 798
803 dw_mipi_dsi_dphy_enable(dsi); 799 dw_mipi_dsi_dphy_enable(dsi);
804 800
805 dw_mipi_dsi_wait_for_two_frames(mode); 801 dw_mipi_dsi_wait_for_two_frames(adjusted_mode);
806 802
807 /* Switch to cmd mode for panel-bridge pre_enable & panel prepare */ 803 /* Switch to cmd mode for panel-bridge pre_enable & panel prepare */
808 dw_mipi_dsi_set_mode(dsi, 0); 804 dw_mipi_dsi_set_mode(dsi, 0);
diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
index 08ab7d6aea65..0fd9cf27542c 100644
--- a/drivers/gpu/drm/bridge/tc358767.c
+++ b/drivers/gpu/drm/bridge/tc358767.c
@@ -1102,7 +1102,7 @@ static bool tc_bridge_mode_fixup(struct drm_bridge *bridge,
1102 return true; 1102 return true;
1103} 1103}
1104 1104
1105static int tc_connector_mode_valid(struct drm_connector *connector, 1105static enum drm_mode_status tc_connector_mode_valid(struct drm_connector *connector,
1106 struct drm_display_mode *mode) 1106 struct drm_display_mode *mode)
1107{ 1107{
1108 /* DPI interface clock limitation: upto 154 MHz */ 1108 /* DPI interface clock limitation: upto 154 MHz */
diff --git a/drivers/gpu/drm/bridge/thc63lvd1024.c b/drivers/gpu/drm/bridge/thc63lvd1024.c
new file mode 100644
index 000000000000..c8b9edd5a7f4
--- /dev/null
+++ b/drivers/gpu/drm/bridge/thc63lvd1024.c
@@ -0,0 +1,206 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * THC63LVD1024 LVDS to parallel data DRM bridge driver.
4 *
5 * Copyright (C) 2018 Jacopo Mondi <jacopo+renesas@jmondi.org>
6 */
7
8#include <drm/drmP.h>
9#include <drm/drm_bridge.h>
10#include <drm/drm_panel.h>
11
12#include <linux/gpio/consumer.h>
13#include <linux/of_graph.h>
14#include <linux/regulator/consumer.h>
15#include <linux/slab.h>
16
17enum thc63_ports {
18 THC63_LVDS_IN0,
19 THC63_LVDS_IN1,
20 THC63_RGB_OUT0,
21 THC63_RGB_OUT1,
22};
23
24struct thc63_dev {
25 struct device *dev;
26
27 struct regulator *vcc;
28
29 struct gpio_desc *pdwn;
30 struct gpio_desc *oe;
31
32 struct drm_bridge bridge;
33 struct drm_bridge *next;
34};
35
36static inline struct thc63_dev *to_thc63(struct drm_bridge *bridge)
37{
38 return container_of(bridge, struct thc63_dev, bridge);
39}
40
41static int thc63_attach(struct drm_bridge *bridge)
42{
43 struct thc63_dev *thc63 = to_thc63(bridge);
44
45 return drm_bridge_attach(bridge->encoder, thc63->next, bridge);
46}
47
48static void thc63_enable(struct drm_bridge *bridge)
49{
50 struct thc63_dev *thc63 = to_thc63(bridge);
51 int ret;
52
53 ret = regulator_enable(thc63->vcc);
54 if (ret) {
55 dev_err(thc63->dev,
56 "Failed to enable regulator \"vcc\": %d\n", ret);
57 return;
58 }
59
60 gpiod_set_value(thc63->pdwn, 0);
61 gpiod_set_value(thc63->oe, 1);
62}
63
64static void thc63_disable(struct drm_bridge *bridge)
65{
66 struct thc63_dev *thc63 = to_thc63(bridge);
67 int ret;
68
69 gpiod_set_value(thc63->oe, 0);
70 gpiod_set_value(thc63->pdwn, 1);
71
72 ret = regulator_disable(thc63->vcc);
73 if (ret)
74 dev_err(thc63->dev,
75 "Failed to disable regulator \"vcc\": %d\n", ret);
76}
77
78static const struct drm_bridge_funcs thc63_bridge_func = {
79 .attach = thc63_attach,
80 .enable = thc63_enable,
81 .disable = thc63_disable,
82};
83
84static int thc63_parse_dt(struct thc63_dev *thc63)
85{
86 struct device_node *thc63_out;
87 struct device_node *remote;
88
89 thc63_out = of_graph_get_endpoint_by_regs(thc63->dev->of_node,
90 THC63_RGB_OUT0, -1);
91 if (!thc63_out) {
92 dev_err(thc63->dev, "Missing endpoint in port@%u\n",
93 THC63_RGB_OUT0);
94 return -ENODEV;
95 }
96
97 remote = of_graph_get_remote_port_parent(thc63_out);
98 of_node_put(thc63_out);
99 if (!remote) {
100 dev_err(thc63->dev, "Endpoint in port@%u unconnected\n",
101 THC63_RGB_OUT0);
102 return -ENODEV;
103 }
104
105 if (!of_device_is_available(remote)) {
106 dev_err(thc63->dev, "port@%u remote endpoint is disabled\n",
107 THC63_RGB_OUT0);
108 of_node_put(remote);
109 return -ENODEV;
110 }
111
112 thc63->next = of_drm_find_bridge(remote);
113 of_node_put(remote);
114 if (!thc63->next)
115 return -EPROBE_DEFER;
116
117 return 0;
118}
119
120static int thc63_gpio_init(struct thc63_dev *thc63)
121{
122 thc63->oe = devm_gpiod_get_optional(thc63->dev, "oe", GPIOD_OUT_LOW);
123 if (IS_ERR(thc63->oe)) {
124 dev_err(thc63->dev, "Unable to get \"oe-gpios\": %ld\n",
125 PTR_ERR(thc63->oe));
126 return PTR_ERR(thc63->oe);
127 }
128
129 thc63->pdwn = devm_gpiod_get_optional(thc63->dev, "powerdown",
130 GPIOD_OUT_HIGH);
131 if (IS_ERR(thc63->pdwn)) {
132 dev_err(thc63->dev, "Unable to get \"powerdown-gpios\": %ld\n",
133 PTR_ERR(thc63->pdwn));
134 return PTR_ERR(thc63->pdwn);
135 }
136
137 return 0;
138}
139
140static int thc63_probe(struct platform_device *pdev)
141{
142 struct thc63_dev *thc63;
143 int ret;
144
145 thc63 = devm_kzalloc(&pdev->dev, sizeof(*thc63), GFP_KERNEL);
146 if (!thc63)
147 return -ENOMEM;
148
149 thc63->dev = &pdev->dev;
150 platform_set_drvdata(pdev, thc63);
151
152 thc63->vcc = devm_regulator_get_optional(thc63->dev, "vcc");
153 if (IS_ERR(thc63->vcc)) {
154 if (PTR_ERR(thc63->vcc) == -EPROBE_DEFER)
155 return -EPROBE_DEFER;
156
157 dev_err(thc63->dev, "Unable to get \"vcc\" supply: %ld\n",
158 PTR_ERR(thc63->vcc));
159 return PTR_ERR(thc63->vcc);
160 }
161
162 ret = thc63_gpio_init(thc63);
163 if (ret)
164 return ret;
165
166 ret = thc63_parse_dt(thc63);
167 if (ret)
168 return ret;
169
170 thc63->bridge.driver_private = thc63;
171 thc63->bridge.of_node = pdev->dev.of_node;
172 thc63->bridge.funcs = &thc63_bridge_func;
173
174 drm_bridge_add(&thc63->bridge);
175
176 return 0;
177}
178
179static int thc63_remove(struct platform_device *pdev)
180{
181 struct thc63_dev *thc63 = platform_get_drvdata(pdev);
182
183 drm_bridge_remove(&thc63->bridge);
184
185 return 0;
186}
187
188static const struct of_device_id thc63_match[] = {
189 { .compatible = "thine,thc63lvd1024", },
190 { },
191};
192MODULE_DEVICE_TABLE(of, thc63_match);
193
194static struct platform_driver thc63_driver = {
195 .probe = thc63_probe,
196 .remove = thc63_remove,
197 .driver = {
198 .name = "thc63lvd1024",
199 .of_match_table = thc63_match,
200 },
201};
202module_platform_driver(thc63_driver);
203
204MODULE_AUTHOR("Jacopo Mondi <jacopo@jmondi.org>");
205MODULE_DESCRIPTION("Thine THC63LVD1024 LVDS decoder DRM bridge driver");
206MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index 7d25c42f22db..9bdd67781917 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -783,6 +783,8 @@ static int drm_atomic_plane_set_property(struct drm_plane *plane,
783 state->src_w = val; 783 state->src_w = val;
784 } else if (property == config->prop_src_h) { 784 } else if (property == config->prop_src_h) {
785 state->src_h = val; 785 state->src_h = val;
786 } else if (property == plane->alpha_property) {
787 state->alpha = val;
786 } else if (property == plane->rotation_property) { 788 } else if (property == plane->rotation_property) {
787 if (!is_power_of_2(val & DRM_MODE_ROTATE_MASK)) 789 if (!is_power_of_2(val & DRM_MODE_ROTATE_MASK))
788 return -EINVAL; 790 return -EINVAL;
@@ -848,6 +850,8 @@ drm_atomic_plane_get_property(struct drm_plane *plane,
848 *val = state->src_w; 850 *val = state->src_w;
849 } else if (property == config->prop_src_h) { 851 } else if (property == config->prop_src_h) {
850 *val = state->src_h; 852 *val = state->src_h;
853 } else if (property == plane->alpha_property) {
854 *val = state->alpha;
851 } else if (property == plane->rotation_property) { 855 } else if (property == plane->rotation_property) {
852 *val = state->rotation; 856 *val = state->rotation;
853 } else if (property == plane->zpos_property) { 857 } else if (property == plane->zpos_property) {
@@ -1492,6 +1496,14 @@ EXPORT_SYMBOL(drm_atomic_set_fb_for_plane);
1492 * Otherwise, if &drm_plane_state.fence is not set this function we just set it 1496 * Otherwise, if &drm_plane_state.fence is not set this function we just set it
1493 * with the received implicit fence. In both cases this function consumes a 1497 * with the received implicit fence. In both cases this function consumes a
1494 * reference for @fence. 1498 * reference for @fence.
1499 *
1500 * This way explicit fencing can be used to overrule implicit fencing, which is
1501 * important to make explicit fencing use-cases work: One example is using one
1502 * buffer for 2 screens with different refresh rates. Implicit fencing will
1503 * clamp rendering to the refresh rate of the slower screen, whereas explicit
1504 * fence allows 2 independent render and display loops on a single buffer. If a
1505 * driver allows obeys both implicit and explicit fences for plane updates, then
1506 * it will break all the benefits of explicit fencing.
1495 */ 1507 */
1496void 1508void
1497drm_atomic_set_fence_for_plane(struct drm_plane_state *plane_state, 1509drm_atomic_set_fence_for_plane(struct drm_plane_state *plane_state,
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index c35654591c12..9cb2209f6fc8 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -875,6 +875,11 @@ EXPORT_SYMBOL(drm_atomic_helper_check_planes);
875 * functions depend upon an updated adjusted_mode.clock to e.g. properly compute 875 * functions depend upon an updated adjusted_mode.clock to e.g. properly compute
876 * watermarks. 876 * watermarks.
877 * 877 *
878 * Note that zpos normalization will add all enable planes to the state which
879 * might not desired for some drivers.
880 * For example enable/disable of a cursor plane which have fixed zpos value
881 * would trigger all other enabled planes to be forced to the state change.
882 *
878 * RETURNS: 883 * RETURNS:
879 * Zero for success or -errno 884 * Zero for success or -errno
880 */ 885 */
@@ -887,6 +892,12 @@ int drm_atomic_helper_check(struct drm_device *dev,
887 if (ret) 892 if (ret)
888 return ret; 893 return ret;
889 894
895 if (dev->mode_config.normalize_zpos) {
896 ret = drm_atomic_normalize_zpos(dev, state);
897 if (ret)
898 return ret;
899 }
900
890 ret = drm_atomic_helper_check_planes(dev, state); 901 ret = drm_atomic_helper_check_planes(dev, state);
891 if (ret) 902 if (ret)
892 return ret; 903 return ret;
@@ -1561,6 +1572,17 @@ void drm_atomic_helper_async_commit(struct drm_device *dev,
1561 for_each_new_plane_in_state(state, plane, plane_state, i) { 1572 for_each_new_plane_in_state(state, plane, plane_state, i) {
1562 funcs = plane->helper_private; 1573 funcs = plane->helper_private;
1563 funcs->atomic_async_update(plane, plane_state); 1574 funcs->atomic_async_update(plane, plane_state);
1575
1576 /*
1577 * ->atomic_async_update() is supposed to update the
1578 * plane->state in-place, make sure at least common
1579 * properties have been properly updated.
1580 */
1581 WARN_ON_ONCE(plane->state->fb != plane_state->fb);
1582 WARN_ON_ONCE(plane->state->crtc_x != plane_state->crtc_x);
1583 WARN_ON_ONCE(plane->state->crtc_y != plane_state->crtc_y);
1584 WARN_ON_ONCE(plane->state->src_x != plane_state->src_x);
1585 WARN_ON_ONCE(plane->state->src_y != plane_state->src_y);
1564 } 1586 }
1565} 1587}
1566EXPORT_SYMBOL(drm_atomic_helper_async_commit); 1588EXPORT_SYMBOL(drm_atomic_helper_async_commit);
@@ -2659,7 +2681,7 @@ int drm_atomic_helper_disable_plane(struct drm_plane *plane,
2659 goto fail; 2681 goto fail;
2660 } 2682 }
2661 2683
2662 if (plane_state->crtc && (plane == plane->crtc->cursor)) 2684 if (plane_state->crtc && plane_state->crtc->cursor == plane)
2663 plane_state->state->legacy_cursor_update = true; 2685 plane_state->state->legacy_cursor_update = true;
2664 2686
2665 ret = __drm_atomic_helper_disable_plane(plane, plane_state); 2687 ret = __drm_atomic_helper_disable_plane(plane, plane_state);
@@ -2881,31 +2903,9 @@ commit:
2881 return 0; 2903 return 0;
2882} 2904}
2883 2905
2884/** 2906static int __drm_atomic_helper_disable_all(struct drm_device *dev,
2885 * drm_atomic_helper_disable_all - disable all currently active outputs 2907 struct drm_modeset_acquire_ctx *ctx,
2886 * @dev: DRM device 2908 bool clean_old_fbs)
2887 * @ctx: lock acquisition context
2888 *
2889 * Loops through all connectors, finding those that aren't turned off and then
2890 * turns them off by setting their DPMS mode to OFF and deactivating the CRTC
2891 * that they are connected to.
2892 *
2893 * This is used for example in suspend/resume to disable all currently active
2894 * functions when suspending. If you just want to shut down everything at e.g.
2895 * driver unload, look at drm_atomic_helper_shutdown().
2896 *
2897 * Note that if callers haven't already acquired all modeset locks this might
2898 * return -EDEADLK, which must be handled by calling drm_modeset_backoff().
2899 *
2900 * Returns:
2901 * 0 on success or a negative error code on failure.
2902 *
2903 * See also:
2904 * drm_atomic_helper_suspend(), drm_atomic_helper_resume() and
2905 * drm_atomic_helper_shutdown().
2906 */
2907int drm_atomic_helper_disable_all(struct drm_device *dev,
2908 struct drm_modeset_acquire_ctx *ctx)
2909{ 2909{
2910 struct drm_atomic_state *state; 2910 struct drm_atomic_state *state;
2911 struct drm_connector_state *conn_state; 2911 struct drm_connector_state *conn_state;
@@ -2957,8 +2957,11 @@ int drm_atomic_helper_disable_all(struct drm_device *dev,
2957 goto free; 2957 goto free;
2958 2958
2959 drm_atomic_set_fb_for_plane(plane_state, NULL); 2959 drm_atomic_set_fb_for_plane(plane_state, NULL);
2960 plane_mask |= BIT(drm_plane_index(plane)); 2960
2961 plane->old_fb = plane->fb; 2961 if (clean_old_fbs) {
2962 plane->old_fb = plane->fb;
2963 plane_mask |= BIT(drm_plane_index(plane));
2964 }
2962 } 2965 }
2963 2966
2964 ret = drm_atomic_commit(state); 2967 ret = drm_atomic_commit(state);
@@ -2969,6 +2972,34 @@ free:
2969 return ret; 2972 return ret;
2970} 2973}
2971 2974
2975/**
2976 * drm_atomic_helper_disable_all - disable all currently active outputs
2977 * @dev: DRM device
2978 * @ctx: lock acquisition context
2979 *
2980 * Loops through all connectors, finding those that aren't turned off and then
2981 * turns them off by setting their DPMS mode to OFF and deactivating the CRTC
2982 * that they are connected to.
2983 *
2984 * This is used for example in suspend/resume to disable all currently active
2985 * functions when suspending. If you just want to shut down everything at e.g.
2986 * driver unload, look at drm_atomic_helper_shutdown().
2987 *
2988 * Note that if callers haven't already acquired all modeset locks this might
2989 * return -EDEADLK, which must be handled by calling drm_modeset_backoff().
2990 *
2991 * Returns:
2992 * 0 on success or a negative error code on failure.
2993 *
2994 * See also:
2995 * drm_atomic_helper_suspend(), drm_atomic_helper_resume() and
2996 * drm_atomic_helper_shutdown().
2997 */
2998int drm_atomic_helper_disable_all(struct drm_device *dev,
2999 struct drm_modeset_acquire_ctx *ctx)
3000{
3001 return __drm_atomic_helper_disable_all(dev, ctx, false);
3002}
2972EXPORT_SYMBOL(drm_atomic_helper_disable_all); 3003EXPORT_SYMBOL(drm_atomic_helper_disable_all);
2973 3004
2974/** 3005/**
@@ -2991,7 +3022,7 @@ void drm_atomic_helper_shutdown(struct drm_device *dev)
2991 while (1) { 3022 while (1) {
2992 ret = drm_modeset_lock_all_ctx(dev, &ctx); 3023 ret = drm_modeset_lock_all_ctx(dev, &ctx);
2993 if (!ret) 3024 if (!ret)
2994 ret = drm_atomic_helper_disable_all(dev, &ctx); 3025 ret = __drm_atomic_helper_disable_all(dev, &ctx, true);
2995 3026
2996 if (ret != -EDEADLK) 3027 if (ret != -EDEADLK)
2997 break; 3028 break;
@@ -3095,14 +3126,14 @@ int drm_atomic_helper_commit_duplicated_state(struct drm_atomic_state *state,
3095 struct drm_connector_state *new_conn_state; 3126 struct drm_connector_state *new_conn_state;
3096 struct drm_crtc *crtc; 3127 struct drm_crtc *crtc;
3097 struct drm_crtc_state *new_crtc_state; 3128 struct drm_crtc_state *new_crtc_state;
3098 unsigned plane_mask = 0;
3099 struct drm_device *dev = state->dev;
3100 int ret;
3101 3129
3102 state->acquire_ctx = ctx; 3130 state->acquire_ctx = ctx;
3103 3131
3104 for_each_new_plane_in_state(state, plane, new_plane_state, i) { 3132 for_each_new_plane_in_state(state, plane, new_plane_state, i) {
3105 plane_mask |= BIT(drm_plane_index(plane)); 3133 WARN_ON(plane->crtc != new_plane_state->crtc);
3134 WARN_ON(plane->fb != new_plane_state->fb);
3135 WARN_ON(plane->old_fb);
3136
3106 state->planes[i].old_state = plane->state; 3137 state->planes[i].old_state = plane->state;
3107 } 3138 }
3108 3139
@@ -3112,11 +3143,7 @@ int drm_atomic_helper_commit_duplicated_state(struct drm_atomic_state *state,
3112 for_each_new_connector_in_state(state, connector, new_conn_state, i) 3143 for_each_new_connector_in_state(state, connector, new_conn_state, i)
3113 state->connectors[i].old_state = connector->state; 3144 state->connectors[i].old_state = connector->state;
3114 3145
3115 ret = drm_atomic_commit(state); 3146 return drm_atomic_commit(state);
3116 if (plane_mask)
3117 drm_atomic_clean_old_fb(dev, plane_mask, ret);
3118
3119 return ret;
3120} 3147}
3121EXPORT_SYMBOL(drm_atomic_helper_commit_duplicated_state); 3148EXPORT_SYMBOL(drm_atomic_helper_commit_duplicated_state);
3122 3149
@@ -3484,6 +3511,10 @@ void drm_atomic_helper_plane_reset(struct drm_plane *plane)
3484 if (plane->state) { 3511 if (plane->state) {
3485 plane->state->plane = plane; 3512 plane->state->plane = plane;
3486 plane->state->rotation = DRM_MODE_ROTATE_0; 3513 plane->state->rotation = DRM_MODE_ROTATE_0;
3514
3515 /* Reset the alpha value to fully opaque if it matters */
3516 if (plane->alpha_property)
3517 plane->state->alpha = plane->alpha_property->values[1];
3487 } 3518 }
3488} 3519}
3489EXPORT_SYMBOL(drm_atomic_helper_plane_reset); 3520EXPORT_SYMBOL(drm_atomic_helper_plane_reset);
diff --git a/drivers/gpu/drm/drm_blend.c b/drivers/gpu/drm/drm_blend.c
index 5a81e1b4c076..a16a74d7e15e 100644
--- a/drivers/gpu/drm/drm_blend.c
+++ b/drivers/gpu/drm/drm_blend.c
@@ -88,6 +88,13 @@
88 * On top of this basic transformation additional properties can be exposed by 88 * On top of this basic transformation additional properties can be exposed by
89 * the driver: 89 * the driver:
90 * 90 *
91 * alpha:
92 * Alpha is setup with drm_plane_create_alpha_property(). It controls the
93 * plane-wide opacity, from transparent (0) to opaque (0xffff). It can be
94 * combined with pixel alpha.
95 * The pixel values in the framebuffers are expected to not be
96 * pre-multiplied by the global alpha associated to the plane.
97 *
91 * rotation: 98 * rotation:
92 * Rotation is set up with drm_plane_create_rotation_property(). It adds a 99 * Rotation is set up with drm_plane_create_rotation_property(). It adds a
93 * rotation and reflection step between the source and destination rectangles. 100 * rotation and reflection step between the source and destination rectangles.
@@ -106,6 +113,38 @@
106 */ 113 */
107 114
108/** 115/**
116 * drm_plane_create_alpha_property - create a new alpha property
117 * @plane: drm plane
118 *
119 * This function creates a generic, mutable, alpha property and enables support
120 * for it in the DRM core. It is attached to @plane.
121 *
122 * The alpha property will be allowed to be within the bounds of 0
123 * (transparent) to 0xffff (opaque).
124 *
125 * Returns:
126 * 0 on success, negative error code on failure.
127 */
128int drm_plane_create_alpha_property(struct drm_plane *plane)
129{
130 struct drm_property *prop;
131
132 prop = drm_property_create_range(plane->dev, 0, "alpha",
133 0, DRM_BLEND_ALPHA_OPAQUE);
134 if (!prop)
135 return -ENOMEM;
136
137 drm_object_attach_property(&plane->base, prop, DRM_BLEND_ALPHA_OPAQUE);
138 plane->alpha_property = prop;
139
140 if (plane->state)
141 plane->state->alpha = DRM_BLEND_ALPHA_OPAQUE;
142
143 return 0;
144}
145EXPORT_SYMBOL(drm_plane_create_alpha_property);
146
147/**
109 * drm_plane_create_rotation_property - create a new rotation property 148 * drm_plane_create_rotation_property - create a new rotation property
110 * @plane: drm plane 149 * @plane: drm plane
111 * @rotation: initial value of the rotation property 150 * @rotation: initial value of the rotation property
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 03583887cfec..a231dd5dce16 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -402,6 +402,7 @@ int drm_mode_getcrtc(struct drm_device *dev,
402{ 402{
403 struct drm_mode_crtc *crtc_resp = data; 403 struct drm_mode_crtc *crtc_resp = data;
404 struct drm_crtc *crtc; 404 struct drm_crtc *crtc;
405 struct drm_plane *plane;
405 406
406 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 407 if (!drm_core_check_feature(dev, DRIVER_MODESET))
407 return -EINVAL; 408 return -EINVAL;
@@ -410,34 +411,36 @@ int drm_mode_getcrtc(struct drm_device *dev,
410 if (!crtc) 411 if (!crtc)
411 return -ENOENT; 412 return -ENOENT;
412 413
414 plane = crtc->primary;
415
413 crtc_resp->gamma_size = crtc->gamma_size; 416 crtc_resp->gamma_size = crtc->gamma_size;
414 417
415 drm_modeset_lock(&crtc->primary->mutex, NULL); 418 drm_modeset_lock(&plane->mutex, NULL);
416 if (crtc->primary->state && crtc->primary->state->fb) 419 if (plane->state && plane->state->fb)
417 crtc_resp->fb_id = crtc->primary->state->fb->base.id; 420 crtc_resp->fb_id = plane->state->fb->base.id;
418 else if (!crtc->primary->state && crtc->primary->fb) 421 else if (!plane->state && plane->fb)
419 crtc_resp->fb_id = crtc->primary->fb->base.id; 422 crtc_resp->fb_id = plane->fb->base.id;
420 else 423 else
421 crtc_resp->fb_id = 0; 424 crtc_resp->fb_id = 0;
422 425
423 if (crtc->primary->state) { 426 if (plane->state) {
424 crtc_resp->x = crtc->primary->state->src_x >> 16; 427 crtc_resp->x = plane->state->src_x >> 16;
425 crtc_resp->y = crtc->primary->state->src_y >> 16; 428 crtc_resp->y = plane->state->src_y >> 16;
426 } 429 }
427 drm_modeset_unlock(&crtc->primary->mutex); 430 drm_modeset_unlock(&plane->mutex);
428 431
429 drm_modeset_lock(&crtc->mutex, NULL); 432 drm_modeset_lock(&crtc->mutex, NULL);
430 if (crtc->state) { 433 if (crtc->state) {
431 if (crtc->state->enable) { 434 if (crtc->state->enable) {
432 drm_mode_convert_to_umode(&crtc_resp->mode, &crtc->state->mode); 435 drm_mode_convert_to_umode(&crtc_resp->mode, &crtc->state->mode);
433 crtc_resp->mode_valid = 1; 436 crtc_resp->mode_valid = 1;
434
435 } else { 437 } else {
436 crtc_resp->mode_valid = 0; 438 crtc_resp->mode_valid = 0;
437 } 439 }
438 } else { 440 } else {
439 crtc_resp->x = crtc->x; 441 crtc_resp->x = crtc->x;
440 crtc_resp->y = crtc->y; 442 crtc_resp->y = crtc->y;
443
441 if (crtc->enabled) { 444 if (crtc->enabled) {
442 drm_mode_convert_to_umode(&crtc_resp->mode, &crtc->mode); 445 drm_mode_convert_to_umode(&crtc_resp->mode, &crtc->mode);
443 crtc_resp->mode_valid = 1; 446 crtc_resp->mode_valid = 1;
@@ -471,7 +474,7 @@ static int __drm_mode_set_config_internal(struct drm_mode_set *set,
471 474
472 ret = crtc->funcs->set_config(set, ctx); 475 ret = crtc->funcs->set_config(set, ctx);
473 if (ret == 0) { 476 if (ret == 0) {
474 crtc->primary->crtc = crtc; 477 crtc->primary->crtc = fb ? crtc : NULL;
475 crtc->primary->fb = fb; 478 crtc->primary->fb = fb;
476 } 479 }
477 480
@@ -554,6 +557,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
554 struct drm_mode_config *config = &dev->mode_config; 557 struct drm_mode_config *config = &dev->mode_config;
555 struct drm_mode_crtc *crtc_req = data; 558 struct drm_mode_crtc *crtc_req = data;
556 struct drm_crtc *crtc; 559 struct drm_crtc *crtc;
560 struct drm_plane *plane;
557 struct drm_connector **connector_set = NULL, *connector; 561 struct drm_connector **connector_set = NULL, *connector;
558 struct drm_framebuffer *fb = NULL; 562 struct drm_framebuffer *fb = NULL;
559 struct drm_display_mode *mode = NULL; 563 struct drm_display_mode *mode = NULL;
@@ -580,22 +584,33 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
580 } 584 }
581 DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name); 585 DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name);
582 586
587 plane = crtc->primary;
588
583 mutex_lock(&crtc->dev->mode_config.mutex); 589 mutex_lock(&crtc->dev->mode_config.mutex);
584 drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE); 590 drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
585retry: 591retry:
586 ret = drm_modeset_lock_all_ctx(crtc->dev, &ctx); 592 ret = drm_modeset_lock_all_ctx(crtc->dev, &ctx);
587 if (ret) 593 if (ret)
588 goto out; 594 goto out;
595
589 if (crtc_req->mode_valid) { 596 if (crtc_req->mode_valid) {
590 /* If we have a mode we need a framebuffer. */ 597 /* If we have a mode we need a framebuffer. */
591 /* If we pass -1, set the mode with the currently bound fb */ 598 /* If we pass -1, set the mode with the currently bound fb */
592 if (crtc_req->fb_id == -1) { 599 if (crtc_req->fb_id == -1) {
593 if (!crtc->primary->fb) { 600 struct drm_framebuffer *old_fb;
601
602 if (plane->state)
603 old_fb = plane->state->fb;
604 else
605 old_fb = plane->fb;
606
607 if (!old_fb) {
594 DRM_DEBUG_KMS("CRTC doesn't have current FB\n"); 608 DRM_DEBUG_KMS("CRTC doesn't have current FB\n");
595 ret = -EINVAL; 609 ret = -EINVAL;
596 goto out; 610 goto out;
597 } 611 }
598 fb = crtc->primary->fb; 612
613 fb = old_fb;
599 /* Make refcounting symmetric with the lookup path. */ 614 /* Make refcounting symmetric with the lookup path. */
600 drm_framebuffer_get(fb); 615 drm_framebuffer_get(fb);
601 } else { 616 } else {
@@ -627,8 +642,8 @@ retry:
627 * match real hardware capabilities. Skip the check in that 642 * match real hardware capabilities. Skip the check in that
628 * case. 643 * case.
629 */ 644 */
630 if (!crtc->primary->format_default) { 645 if (!plane->format_default) {
631 ret = drm_plane_check_pixel_format(crtc->primary, 646 ret = drm_plane_check_pixel_format(plane,
632 fb->format->format, 647 fb->format->format,
633 fb->modifier); 648 fb->modifier);
634 if (ret) { 649 if (ret) {
diff --git a/drivers/gpu/drm/drm_crtc_internal.h b/drivers/gpu/drm/drm_crtc_internal.h
index 3c2b82865ad2..5d307b23a4e6 100644
--- a/drivers/gpu/drm/drm_crtc_internal.h
+++ b/drivers/gpu/drm/drm_crtc_internal.h
@@ -220,3 +220,5 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
220 220
221/* drm_edid.c */ 221/* drm_edid.c */
222void drm_mode_fixup_1366x768(struct drm_display_mode *mode); 222void drm_mode_fixup_1366x768(struct drm_display_mode *mode);
223void drm_reset_display_info(struct drm_connector *connector);
224u32 drm_add_display_info(struct drm_connector *connector, const struct edid *edid);
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 6fac4129e6a2..658830620ca3 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -2941,12 +2941,14 @@ static void drm_dp_mst_dump_mstb(struct seq_file *m,
2941 } 2941 }
2942} 2942}
2943 2943
2944#define DP_PAYLOAD_TABLE_SIZE 64
2945
2944static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr, 2946static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
2945 char *buf) 2947 char *buf)
2946{ 2948{
2947 int i; 2949 int i;
2948 2950
2949 for (i = 0; i < 64; i += 16) { 2951 for (i = 0; i < DP_PAYLOAD_TABLE_SIZE; i += 16) {
2950 if (drm_dp_dpcd_read(mgr->aux, 2952 if (drm_dp_dpcd_read(mgr->aux,
2951 DP_PAYLOAD_TABLE_UPDATE_STATUS + i, 2953 DP_PAYLOAD_TABLE_UPDATE_STATUS + i,
2952 &buf[i], 16) != 16) 2954 &buf[i], 16) != 16)
@@ -3015,7 +3017,7 @@ void drm_dp_mst_dump_topology(struct seq_file *m,
3015 3017
3016 mutex_lock(&mgr->lock); 3018 mutex_lock(&mgr->lock);
3017 if (mgr->mst_primary) { 3019 if (mgr->mst_primary) {
3018 u8 buf[64]; 3020 u8 buf[DP_PAYLOAD_TABLE_SIZE];
3019 int ret; 3021 int ret;
3020 3022
3021 ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, buf, DP_RECEIVER_CAP_SIZE); 3023 ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, buf, DP_RECEIVER_CAP_SIZE);
@@ -3033,8 +3035,7 @@ void drm_dp_mst_dump_topology(struct seq_file *m,
3033 seq_printf(m, " revision: hw: %x.%x sw: %x.%x\n", 3035 seq_printf(m, " revision: hw: %x.%x sw: %x.%x\n",
3034 buf[0x9] >> 4, buf[0x9] & 0xf, buf[0xa], buf[0xb]); 3036 buf[0x9] >> 4, buf[0x9] & 0xf, buf[0xa], buf[0xb]);
3035 if (dump_dp_payload_table(mgr, buf)) 3037 if (dump_dp_payload_table(mgr, buf))
3036 seq_printf(m, "payload table: %*ph\n", 63, buf); 3038 seq_printf(m, "payload table: %*ph\n", DP_PAYLOAD_TABLE_SIZE, buf);
3037
3038 } 3039 }
3039 3040
3040 mutex_unlock(&mgr->lock); 3041 mutex_unlock(&mgr->lock);
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index a1b9338736e3..32a83b41ab61 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -32,6 +32,7 @@
32#include <linux/moduleparam.h> 32#include <linux/moduleparam.h>
33#include <linux/mount.h> 33#include <linux/mount.h>
34#include <linux/slab.h> 34#include <linux/slab.h>
35#include <linux/srcu.h>
35 36
36#include <drm/drm_drv.h> 37#include <drm/drm_drv.h>
37#include <drm/drmP.h> 38#include <drm/drmP.h>
@@ -75,6 +76,8 @@ static bool drm_core_init_complete = false;
75 76
76static struct dentry *drm_debugfs_root; 77static struct dentry *drm_debugfs_root;
77 78
79DEFINE_STATIC_SRCU(drm_unplug_srcu);
80
78/* 81/*
79 * DRM Minors 82 * DRM Minors
80 * A DRM device can provide several char-dev interfaces on the DRM-Major. Each 83 * A DRM device can provide several char-dev interfaces on the DRM-Major. Each
@@ -318,18 +321,51 @@ void drm_put_dev(struct drm_device *dev)
318} 321}
319EXPORT_SYMBOL(drm_put_dev); 322EXPORT_SYMBOL(drm_put_dev);
320 323
321static void drm_device_set_unplugged(struct drm_device *dev) 324/**
325 * drm_dev_enter - Enter device critical section
326 * @dev: DRM device
327 * @idx: Pointer to index that will be passed to the matching drm_dev_exit()
328 *
329 * This function marks and protects the beginning of a section that should not
330 * be entered after the device has been unplugged. The section end is marked
331 * with drm_dev_exit(). Calls to this function can be nested.
332 *
333 * Returns:
334 * True if it is OK to enter the section, false otherwise.
335 */
336bool drm_dev_enter(struct drm_device *dev, int *idx)
337{
338 *idx = srcu_read_lock(&drm_unplug_srcu);
339
340 if (dev->unplugged) {
341 srcu_read_unlock(&drm_unplug_srcu, *idx);
342 return false;
343 }
344
345 return true;
346}
347EXPORT_SYMBOL(drm_dev_enter);
348
349/**
350 * drm_dev_exit - Exit device critical section
351 * @idx: index returned from drm_dev_enter()
352 *
353 * This function marks the end of a section that should not be entered after
354 * the device has been unplugged.
355 */
356void drm_dev_exit(int idx)
322{ 357{
323 smp_wmb(); 358 srcu_read_unlock(&drm_unplug_srcu, idx);
324 atomic_set(&dev->unplugged, 1);
325} 359}
360EXPORT_SYMBOL(drm_dev_exit);
326 361
327/** 362/**
328 * drm_dev_unplug - unplug a DRM device 363 * drm_dev_unplug - unplug a DRM device
329 * @dev: DRM device 364 * @dev: DRM device
330 * 365 *
331 * This unplugs a hotpluggable DRM device, which makes it inaccessible to 366 * This unplugs a hotpluggable DRM device, which makes it inaccessible to
332 * userspace operations. Entry-points can use drm_dev_is_unplugged(). This 367 * userspace operations. Entry-points can use drm_dev_enter() and
368 * drm_dev_exit() to protect device resources in a race free manner. This
333 * essentially unregisters the device like drm_dev_unregister(), but can be 369 * essentially unregisters the device like drm_dev_unregister(), but can be
334 * called while there are still open users of @dev. 370 * called while there are still open users of @dev.
335 */ 371 */
@@ -338,10 +374,18 @@ void drm_dev_unplug(struct drm_device *dev)
338 drm_dev_unregister(dev); 374 drm_dev_unregister(dev);
339 375
340 mutex_lock(&drm_global_mutex); 376 mutex_lock(&drm_global_mutex);
341 drm_device_set_unplugged(dev);
342 if (dev->open_count == 0) 377 if (dev->open_count == 0)
343 drm_dev_put(dev); 378 drm_dev_put(dev);
344 mutex_unlock(&drm_global_mutex); 379 mutex_unlock(&drm_global_mutex);
380
381 /*
382 * After synchronizing any critical read section is guaranteed to see
383 * the new value of ->unplugged, and any critical section which might
384 * still have seen the old value of ->unplugged is guaranteed to have
385 * finished.
386 */
387 dev->unplugged = true;
388 synchronize_srcu(&drm_unplug_srcu);
345} 389}
346EXPORT_SYMBOL(drm_dev_unplug); 390EXPORT_SYMBOL(drm_dev_unplug);
347 391
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 39f1db4acda4..08d33b48b14a 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -4455,7 +4455,6 @@ drm_reset_display_info(struct drm_connector *connector)
4455 4455
4456 info->non_desktop = 0; 4456 info->non_desktop = 0;
4457} 4457}
4458EXPORT_SYMBOL_GPL(drm_reset_display_info);
4459 4458
4460u32 drm_add_display_info(struct drm_connector *connector, const struct edid *edid) 4459u32 drm_add_display_info(struct drm_connector *connector, const struct edid *edid)
4461{ 4460{
@@ -4533,7 +4532,6 @@ u32 drm_add_display_info(struct drm_connector *connector, const struct edid *edi
4533 info->color_formats |= DRM_COLOR_FORMAT_YCRCB422; 4532 info->color_formats |= DRM_COLOR_FORMAT_YCRCB422;
4534 return quirks; 4533 return quirks;
4535} 4534}
4536EXPORT_SYMBOL_GPL(drm_add_display_info);
4537 4535
4538static int validate_displayid(u8 *displayid, int length, int idx) 4536static int validate_displayid(u8 *displayid, int length, int idx)
4539{ 4537{
diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c
index ad67203de715..8c4d32adcc17 100644
--- a/drivers/gpu/drm/drm_framebuffer.c
+++ b/drivers/gpu/drm/drm_framebuffer.c
@@ -468,29 +468,31 @@ int drm_mode_getfb(struct drm_device *dev,
468 goto out; 468 goto out;
469 } 469 }
470 470
471 if (!fb->funcs->create_handle) {
472 ret = -ENODEV;
473 goto out;
474 }
475
471 r->height = fb->height; 476 r->height = fb->height;
472 r->width = fb->width; 477 r->width = fb->width;
473 r->depth = fb->format->depth; 478 r->depth = fb->format->depth;
474 r->bpp = fb->format->cpp[0] * 8; 479 r->bpp = fb->format->cpp[0] * 8;
475 r->pitch = fb->pitches[0]; 480 r->pitch = fb->pitches[0];
476 if (fb->funcs->create_handle) { 481
477 if (drm_is_current_master(file_priv) || capable(CAP_SYS_ADMIN) || 482 /* GET_FB() is an unprivileged ioctl so we must not return a
478 drm_is_control_client(file_priv)) { 483 * buffer-handle to non-master processes! For
479 ret = fb->funcs->create_handle(fb, file_priv, 484 * backwards-compatibility reasons, we cannot make GET_FB() privileged,
480 &r->handle); 485 * so just return an invalid handle for non-masters.
481 } else { 486 */
482 /* GET_FB() is an unprivileged ioctl so we must not 487 if (!drm_is_current_master(file_priv) && !capable(CAP_SYS_ADMIN) &&
483 * return a buffer-handle to non-master processes! For 488 !drm_is_control_client(file_priv)) {
484 * backwards-compatibility reasons, we cannot make 489 r->handle = 0;
485 * GET_FB() privileged, so just return an invalid handle 490 ret = 0;
486 * for non-masters. */ 491 goto out;
487 r->handle = 0;
488 ret = 0;
489 }
490 } else {
491 ret = -ENODEV;
492 } 492 }
493 493
494 ret = fb->funcs->create_handle(fb, file_priv, &r->handle);
495
494out: 496out:
495 drm_framebuffer_put(fb); 497 drm_framebuffer_put(fb);
496 498
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 4975ba9a7bc8..4a16d7b26c89 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -436,9 +436,12 @@ err_unref:
436 * @obj: object to register 436 * @obj: object to register
437 * @handlep: pionter to return the created handle to the caller 437 * @handlep: pionter to return the created handle to the caller
438 * 438 *
439 * Create a handle for this object. This adds a handle reference 439 * Create a handle for this object. This adds a handle reference to the object,
440 * to the object, which includes a regular reference count. Callers 440 * which includes a regular reference count. Callers will likely want to
441 * will likely want to dereference the object afterwards. 441 * dereference the object afterwards.
442 *
443 * Since this publishes @obj to userspace it must be fully set up by this point,
444 * drivers must call this last in their buffer object creation callbacks.
442 */ 445 */
443int drm_gem_handle_create(struct drm_file *file_priv, 446int drm_gem_handle_create(struct drm_file *file_priv,
444 struct drm_gem_object *obj, 447 struct drm_gem_object *obj,
diff --git a/drivers/gpu/drm/drm_gem_framebuffer_helper.c b/drivers/gpu/drm/drm_gem_framebuffer_helper.c
index 4d682a6e8bcb..acfbc0641a06 100644
--- a/drivers/gpu/drm/drm_gem_framebuffer_helper.c
+++ b/drivers/gpu/drm/drm_gem_framebuffer_helper.c
@@ -22,6 +22,7 @@
22#include <drm/drm_gem.h> 22#include <drm/drm_gem.h>
23#include <drm/drm_gem_framebuffer_helper.h> 23#include <drm/drm_gem_framebuffer_helper.h>
24#include <drm/drm_modeset_helper.h> 24#include <drm/drm_modeset_helper.h>
25#include <drm/drm_simple_kms_helper.h>
25 26
26/** 27/**
27 * DOC: overview 28 * DOC: overview
@@ -266,6 +267,24 @@ int drm_gem_fb_prepare_fb(struct drm_plane *plane,
266EXPORT_SYMBOL_GPL(drm_gem_fb_prepare_fb); 267EXPORT_SYMBOL_GPL(drm_gem_fb_prepare_fb);
267 268
268/** 269/**
270 * drm_gem_fb_simple_display_pipe_prepare_fb - prepare_fb helper for
271 * &drm_simple_display_pipe
272 * @pipe: Simple display pipe
273 * @plane_state: Plane state
274 *
275 * This function uses drm_gem_fb_prepare_fb() to check if the plane FB has a
276 * &dma_buf attached, extracts the exclusive fence and attaches it to plane
277 * state for the atomic helper to wait on. Drivers can use this as their
278 * &drm_simple_display_pipe_funcs.prepare_fb callback.
279 */
280int drm_gem_fb_simple_display_pipe_prepare_fb(struct drm_simple_display_pipe *pipe,
281 struct drm_plane_state *plane_state)
282{
283 return drm_gem_fb_prepare_fb(&pipe->plane, plane_state);
284}
285EXPORT_SYMBOL(drm_gem_fb_simple_display_pipe_prepare_fb);
286
287/**
269 * drm_gem_fbdev_fb_create - Create a GEM backed &drm_framebuffer for fbdev 288 * drm_gem_fbdev_fb_create - Create a GEM backed &drm_framebuffer for fbdev
270 * emulation 289 * emulation
271 * @dev: DRM device 290 * @dev: DRM device
diff --git a/drivers/gpu/drm/drm_lease.c b/drivers/gpu/drm/drm_lease.c
index d345563fdff3..50c73c0a20b9 100644
--- a/drivers/gpu/drm/drm_lease.c
+++ b/drivers/gpu/drm/drm_lease.c
@@ -340,7 +340,7 @@ static void _drm_lease_revoke(struct drm_master *top)
340 break; 340 break;
341 341
342 /* Over */ 342 /* Over */
343 master = list_entry(master->lessee_list.next, struct drm_master, lessee_list); 343 master = list_next_entry(master, lessee_list);
344 } 344 }
345 } 345 }
346} 346}
diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c
index 902cc1a71e45..caebddda8bce 100644
--- a/drivers/gpu/drm/drm_panel_orientation_quirks.c
+++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c
@@ -60,7 +60,7 @@ static const struct drm_dmi_panel_orientation_data itworks_tw891 = {
60 .orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP, 60 .orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
61}; 61};
62 62
63static const struct drm_dmi_panel_orientation_data vios_lth17 = { 63static const struct drm_dmi_panel_orientation_data lcd800x1280_rightside_up = {
64 .width = 800, 64 .width = 800,
65 .height = 1280, 65 .height = 1280,
66 .orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP, 66 .orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
@@ -102,12 +102,30 @@ static const struct dmi_system_id orientation_data[] = {
102 DMI_EXACT_MATCH(DMI_BOARD_NAME, "TW891"), 102 DMI_EXACT_MATCH(DMI_BOARD_NAME, "TW891"),
103 }, 103 },
104 .driver_data = (void *)&itworks_tw891, 104 .driver_data = (void *)&itworks_tw891,
105 }, { /*
106 * Lenovo Ideapad Miix 310 laptop, only some production batches
107 * have a portrait screen, the resolution checks makes the quirk
108 * apply only to those batches.
109 */
110 .matches = {
111 DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
112 DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "80SG"),
113 DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "MIIX 310-10ICR"),
114 },
115 .driver_data = (void *)&lcd800x1280_rightside_up,
116 }, { /* Lenovo Ideapad Miix 320 */
117 .matches = {
118 DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
119 DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "80XF"),
120 DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Lenovo MIIX 320-10ICR"),
121 },
122 .driver_data = (void *)&lcd800x1280_rightside_up,
105 }, { /* VIOS LTH17 */ 123 }, { /* VIOS LTH17 */
106 .matches = { 124 .matches = {
107 DMI_EXACT_MATCH(DMI_SYS_VENDOR, "VIOS"), 125 DMI_EXACT_MATCH(DMI_SYS_VENDOR, "VIOS"),
108 DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "LTH17"), 126 DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "LTH17"),
109 }, 127 },
110 .driver_data = (void *)&vios_lth17, 128 .driver_data = (void *)&lcd800x1280_rightside_up,
111 }, 129 },
112 {} 130 {}
113}; 131};
diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c
index 6d2a6e428a3e..035054455301 100644
--- a/drivers/gpu/drm/drm_plane.c
+++ b/drivers/gpu/drm/drm_plane.c
@@ -756,6 +756,7 @@ static int drm_mode_cursor_universal(struct drm_crtc *crtc,
756 struct drm_modeset_acquire_ctx *ctx) 756 struct drm_modeset_acquire_ctx *ctx)
757{ 757{
758 struct drm_device *dev = crtc->dev; 758 struct drm_device *dev = crtc->dev;
759 struct drm_plane *plane = crtc->cursor;
759 struct drm_framebuffer *fb = NULL; 760 struct drm_framebuffer *fb = NULL;
760 struct drm_mode_fb_cmd2 fbreq = { 761 struct drm_mode_fb_cmd2 fbreq = {
761 .width = req->width, 762 .width = req->width,
@@ -769,8 +770,8 @@ static int drm_mode_cursor_universal(struct drm_crtc *crtc,
769 uint32_t src_w = 0, src_h = 0; 770 uint32_t src_w = 0, src_h = 0;
770 int ret = 0; 771 int ret = 0;
771 772
772 BUG_ON(!crtc->cursor); 773 BUG_ON(!plane);
773 WARN_ON(crtc->cursor->crtc != crtc && crtc->cursor->crtc != NULL); 774 WARN_ON(plane->crtc != crtc && plane->crtc != NULL);
774 775
775 /* 776 /*
776 * Obtain fb we'll be using (either new or existing) and take an extra 777 * Obtain fb we'll be using (either new or existing) and take an extra
@@ -784,13 +785,18 @@ static int drm_mode_cursor_universal(struct drm_crtc *crtc,
784 DRM_DEBUG_KMS("failed to wrap cursor buffer in drm framebuffer\n"); 785 DRM_DEBUG_KMS("failed to wrap cursor buffer in drm framebuffer\n");
785 return PTR_ERR(fb); 786 return PTR_ERR(fb);
786 } 787 }
788
787 fb->hot_x = req->hot_x; 789 fb->hot_x = req->hot_x;
788 fb->hot_y = req->hot_y; 790 fb->hot_y = req->hot_y;
789 } else { 791 } else {
790 fb = NULL; 792 fb = NULL;
791 } 793 }
792 } else { 794 } else {
793 fb = crtc->cursor->fb; 795 if (plane->state)
796 fb = plane->state->fb;
797 else
798 fb = plane->fb;
799
794 if (fb) 800 if (fb)
795 drm_framebuffer_get(fb); 801 drm_framebuffer_get(fb);
796 } 802 }
@@ -810,7 +816,7 @@ static int drm_mode_cursor_universal(struct drm_crtc *crtc,
810 src_h = fb->height << 16; 816 src_h = fb->height << 16;
811 } 817 }
812 818
813 ret = __setplane_internal(crtc->cursor, crtc, fb, 819 ret = __setplane_internal(plane, crtc, fb,
814 crtc_x, crtc_y, crtc_w, crtc_h, 820 crtc_x, crtc_y, crtc_w, crtc_h,
815 0, 0, src_w, src_h, ctx); 821 0, 0, src_w, src_h, ctx);
816 822
@@ -931,7 +937,8 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
931{ 937{
932 struct drm_mode_crtc_page_flip_target *page_flip = data; 938 struct drm_mode_crtc_page_flip_target *page_flip = data;
933 struct drm_crtc *crtc; 939 struct drm_crtc *crtc;
934 struct drm_framebuffer *fb = NULL; 940 struct drm_plane *plane;
941 struct drm_framebuffer *fb = NULL, *old_fb;
935 struct drm_pending_vblank_event *e = NULL; 942 struct drm_pending_vblank_event *e = NULL;
936 u32 target_vblank = page_flip->sequence; 943 u32 target_vblank = page_flip->sequence;
937 struct drm_modeset_acquire_ctx ctx; 944 struct drm_modeset_acquire_ctx ctx;
@@ -959,6 +966,8 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
959 if (!crtc) 966 if (!crtc)
960 return -ENOENT; 967 return -ENOENT;
961 968
969 plane = crtc->primary;
970
962 if (crtc->funcs->page_flip_target) { 971 if (crtc->funcs->page_flip_target) {
963 u32 current_vblank; 972 u32 current_vblank;
964 int r; 973 int r;
@@ -1003,11 +1012,16 @@ retry:
1003 ret = drm_modeset_lock(&crtc->mutex, &ctx); 1012 ret = drm_modeset_lock(&crtc->mutex, &ctx);
1004 if (ret) 1013 if (ret)
1005 goto out; 1014 goto out;
1006 ret = drm_modeset_lock(&crtc->primary->mutex, &ctx); 1015 ret = drm_modeset_lock(&plane->mutex, &ctx);
1007 if (ret) 1016 if (ret)
1008 goto out; 1017 goto out;
1009 1018
1010 if (crtc->primary->fb == NULL) { 1019 if (plane->state)
1020 old_fb = plane->state->fb;
1021 else
1022 old_fb = plane->fb;
1023
1024 if (old_fb == NULL) {
1011 /* The framebuffer is currently unbound, presumably 1025 /* The framebuffer is currently unbound, presumably
1012 * due to a hotplug event, that userspace has not 1026 * due to a hotplug event, that userspace has not
1013 * yet discovered. 1027 * yet discovered.
@@ -1022,8 +1036,8 @@ retry:
1022 goto out; 1036 goto out;
1023 } 1037 }
1024 1038
1025 if (crtc->state) { 1039 if (plane->state) {
1026 const struct drm_plane_state *state = crtc->primary->state; 1040 const struct drm_plane_state *state = plane->state;
1027 1041
1028 ret = drm_framebuffer_check_src_coords(state->src_x, 1042 ret = drm_framebuffer_check_src_coords(state->src_x,
1029 state->src_y, 1043 state->src_y,
@@ -1031,12 +1045,13 @@ retry:
1031 state->src_h, 1045 state->src_h,
1032 fb); 1046 fb);
1033 } else { 1047 } else {
1034 ret = drm_crtc_check_viewport(crtc, crtc->x, crtc->y, &crtc->mode, fb); 1048 ret = drm_crtc_check_viewport(crtc, crtc->x, crtc->y,
1049 &crtc->mode, fb);
1035 } 1050 }
1036 if (ret) 1051 if (ret)
1037 goto out; 1052 goto out;
1038 1053
1039 if (crtc->primary->fb->format != fb->format) { 1054 if (old_fb->format != fb->format) {
1040 DRM_DEBUG_KMS("Page flip is not allowed to change frame buffer format.\n"); 1055 DRM_DEBUG_KMS("Page flip is not allowed to change frame buffer format.\n");
1041 ret = -EINVAL; 1056 ret = -EINVAL;
1042 goto out; 1057 goto out;
@@ -1048,10 +1063,12 @@ retry:
1048 ret = -ENOMEM; 1063 ret = -ENOMEM;
1049 goto out; 1064 goto out;
1050 } 1065 }
1066
1051 e->event.base.type = DRM_EVENT_FLIP_COMPLETE; 1067 e->event.base.type = DRM_EVENT_FLIP_COMPLETE;
1052 e->event.base.length = sizeof(e->event); 1068 e->event.base.length = sizeof(e->event);
1053 e->event.vbl.user_data = page_flip->user_data; 1069 e->event.vbl.user_data = page_flip->user_data;
1054 e->event.vbl.crtc_id = crtc->base.id; 1070 e->event.vbl.crtc_id = crtc->base.id;
1071
1055 ret = drm_event_reserve_init(dev, file_priv, &e->base, &e->event.base); 1072 ret = drm_event_reserve_init(dev, file_priv, &e->base, &e->event.base);
1056 if (ret) { 1073 if (ret) {
1057 kfree(e); 1074 kfree(e);
@@ -1060,7 +1077,7 @@ retry:
1060 } 1077 }
1061 } 1078 }
1062 1079
1063 crtc->primary->old_fb = crtc->primary->fb; 1080 plane->old_fb = plane->fb;
1064 if (crtc->funcs->page_flip_target) 1081 if (crtc->funcs->page_flip_target)
1065 ret = crtc->funcs->page_flip_target(crtc, fb, e, 1082 ret = crtc->funcs->page_flip_target(crtc, fb, e,
1066 page_flip->flags, 1083 page_flip->flags,
@@ -1073,19 +1090,18 @@ retry:
1073 if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT) 1090 if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT)
1074 drm_event_cancel_free(dev, &e->base); 1091 drm_event_cancel_free(dev, &e->base);
1075 /* Keep the old fb, don't unref it. */ 1092 /* Keep the old fb, don't unref it. */
1076 crtc->primary->old_fb = NULL; 1093 plane->old_fb = NULL;
1077 } else { 1094 } else {
1078 crtc->primary->fb = fb; 1095 plane->fb = fb;
1079 /* Unref only the old framebuffer. */ 1096 drm_framebuffer_get(fb);
1080 fb = NULL;
1081 } 1097 }
1082 1098
1083out: 1099out:
1084 if (fb) 1100 if (fb)
1085 drm_framebuffer_put(fb); 1101 drm_framebuffer_put(fb);
1086 if (crtc->primary->old_fb) 1102 if (plane->old_fb)
1087 drm_framebuffer_put(crtc->primary->old_fb); 1103 drm_framebuffer_put(plane->old_fb);
1088 crtc->primary->old_fb = NULL; 1104 plane->old_fb = NULL;
1089 1105
1090 if (ret == -EDEADLK) { 1106 if (ret == -EDEADLK) {
1091 ret = drm_modeset_backoff(&ctx); 1107 ret = drm_modeset_backoff(&ctx);
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 7856a9b3f8a8..caf675e3e692 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -331,6 +331,9 @@ EXPORT_SYMBOL(drm_gem_map_dma_buf);
331 331
332/** 332/**
333 * drm_gem_unmap_dma_buf - unmap_dma_buf implementation for GEM 333 * drm_gem_unmap_dma_buf - unmap_dma_buf implementation for GEM
334 * @attach: attachment to unmap buffer from
335 * @sgt: scatterlist info of the buffer to unmap
336 * @dir: direction of DMA transfer
334 * 337 *
335 * Not implemented. The unmap is done at drm_gem_map_detach(). This can be 338 * Not implemented. The unmap is done at drm_gem_map_detach(). This can be
336 * used as the &dma_buf_ops.unmap_dma_buf callback. 339 * used as the &dma_buf_ops.unmap_dma_buf callback.
@@ -429,6 +432,8 @@ EXPORT_SYMBOL(drm_gem_dmabuf_vunmap);
429 432
430/** 433/**
431 * drm_gem_dmabuf_kmap_atomic - map_atomic implementation for GEM 434 * drm_gem_dmabuf_kmap_atomic - map_atomic implementation for GEM
435 * @dma_buf: buffer to be mapped
436 * @page_num: page number within the buffer
432 * 437 *
433 * Not implemented. This can be used as the &dma_buf_ops.map_atomic callback. 438 * Not implemented. This can be used as the &dma_buf_ops.map_atomic callback.
434 */ 439 */
@@ -441,6 +446,9 @@ EXPORT_SYMBOL(drm_gem_dmabuf_kmap_atomic);
441 446
442/** 447/**
443 * drm_gem_dmabuf_kunmap_atomic - unmap_atomic implementation for GEM 448 * drm_gem_dmabuf_kunmap_atomic - unmap_atomic implementation for GEM
449 * @dma_buf: buffer to be unmapped
450 * @page_num: page number within the buffer
451 * @addr: virtual address of the buffer
444 * 452 *
445 * Not implemented. This can be used as the &dma_buf_ops.unmap_atomic callback. 453 * Not implemented. This can be used as the &dma_buf_ops.unmap_atomic callback.
446 */ 454 */
@@ -453,6 +461,8 @@ EXPORT_SYMBOL(drm_gem_dmabuf_kunmap_atomic);
453 461
454/** 462/**
455 * drm_gem_dmabuf_kmap - map implementation for GEM 463 * drm_gem_dmabuf_kmap - map implementation for GEM
464 * @dma_buf: buffer to be mapped
465 * @page_num: page number within the buffer
456 * 466 *
457 * Not implemented. This can be used as the &dma_buf_ops.map callback. 467 * Not implemented. This can be used as the &dma_buf_ops.map callback.
458 */ 468 */
@@ -464,6 +474,9 @@ EXPORT_SYMBOL(drm_gem_dmabuf_kmap);
464 474
465/** 475/**
466 * drm_gem_dmabuf_kunmap - unmap implementation for GEM 476 * drm_gem_dmabuf_kunmap - unmap implementation for GEM
477 * @dma_buf: buffer to be unmapped
478 * @page_num: page number within the buffer
479 * @addr: virtual address of the buffer
467 * 480 *
468 * Not implemented. This can be used as the &dma_buf_ops.unmap callback. 481 * Not implemented. This can be used as the &dma_buf_ops.unmap callback.
469 */ 482 */
diff --git a/drivers/gpu/drm/drm_scdc_helper.c b/drivers/gpu/drm/drm_scdc_helper.c
index 657ea5ab6c3f..870e25f1f788 100644
--- a/drivers/gpu/drm/drm_scdc_helper.c
+++ b/drivers/gpu/drm/drm_scdc_helper.c
@@ -141,7 +141,7 @@ bool drm_scdc_get_scrambling_status(struct i2c_adapter *adapter)
141 141
142 ret = drm_scdc_readb(adapter, SCDC_SCRAMBLER_STATUS, &status); 142 ret = drm_scdc_readb(adapter, SCDC_SCRAMBLER_STATUS, &status);
143 if (ret < 0) { 143 if (ret < 0) {
144 DRM_ERROR("Failed to read scrambling status: %d\n", ret); 144 DRM_DEBUG_KMS("Failed to read scrambling status: %d\n", ret);
145 return false; 145 return false;
146 } 146 }
147 147
@@ -168,7 +168,7 @@ bool drm_scdc_set_scrambling(struct i2c_adapter *adapter, bool enable)
168 168
169 ret = drm_scdc_readb(adapter, SCDC_TMDS_CONFIG, &config); 169 ret = drm_scdc_readb(adapter, SCDC_TMDS_CONFIG, &config);
170 if (ret < 0) { 170 if (ret < 0) {
171 DRM_ERROR("Failed to read TMDS config: %d\n", ret); 171 DRM_DEBUG_KMS("Failed to read TMDS config: %d\n", ret);
172 return false; 172 return false;
173 } 173 }
174 174
@@ -179,7 +179,7 @@ bool drm_scdc_set_scrambling(struct i2c_adapter *adapter, bool enable)
179 179
180 ret = drm_scdc_writeb(adapter, SCDC_TMDS_CONFIG, config); 180 ret = drm_scdc_writeb(adapter, SCDC_TMDS_CONFIG, config);
181 if (ret < 0) { 181 if (ret < 0) {
182 DRM_ERROR("Failed to enable scrambling: %d\n", ret); 182 DRM_DEBUG_KMS("Failed to enable scrambling: %d\n", ret);
183 return false; 183 return false;
184 } 184 }
185 185
@@ -223,7 +223,7 @@ bool drm_scdc_set_high_tmds_clock_ratio(struct i2c_adapter *adapter, bool set)
223 223
224 ret = drm_scdc_readb(adapter, SCDC_TMDS_CONFIG, &config); 224 ret = drm_scdc_readb(adapter, SCDC_TMDS_CONFIG, &config);
225 if (ret < 0) { 225 if (ret < 0) {
226 DRM_ERROR("Failed to read TMDS config: %d\n", ret); 226 DRM_DEBUG_KMS("Failed to read TMDS config: %d\n", ret);
227 return false; 227 return false;
228 } 228 }
229 229
@@ -234,7 +234,7 @@ bool drm_scdc_set_high_tmds_clock_ratio(struct i2c_adapter *adapter, bool set)
234 234
235 ret = drm_scdc_writeb(adapter, SCDC_TMDS_CONFIG, config); 235 ret = drm_scdc_writeb(adapter, SCDC_TMDS_CONFIG, config);
236 if (ret < 0) { 236 if (ret < 0) {
237 DRM_ERROR("Failed to set TMDS clock ratio: %d\n", ret); 237 DRM_DEBUG_KMS("Failed to set TMDS clock ratio: %d\n", ret);
238 return false; 238 return false;
239 } 239 }
240 240
diff --git a/drivers/gpu/drm/drm_simple_kms_helper.c b/drivers/gpu/drm/drm_simple_kms_helper.c
index 987a353c7f72..7a00455ca568 100644
--- a/drivers/gpu/drm/drm_simple_kms_helper.c
+++ b/drivers/gpu/drm/drm_simple_kms_helper.c
@@ -64,13 +64,15 @@ static int drm_simple_kms_crtc_check(struct drm_crtc *crtc,
64static void drm_simple_kms_crtc_enable(struct drm_crtc *crtc, 64static void drm_simple_kms_crtc_enable(struct drm_crtc *crtc,
65 struct drm_crtc_state *old_state) 65 struct drm_crtc_state *old_state)
66{ 66{
67 struct drm_plane *plane;
67 struct drm_simple_display_pipe *pipe; 68 struct drm_simple_display_pipe *pipe;
68 69
69 pipe = container_of(crtc, struct drm_simple_display_pipe, crtc); 70 pipe = container_of(crtc, struct drm_simple_display_pipe, crtc);
70 if (!pipe->funcs || !pipe->funcs->enable) 71 if (!pipe->funcs || !pipe->funcs->enable)
71 return; 72 return;
72 73
73 pipe->funcs->enable(pipe, crtc->state); 74 plane = &pipe->plane;
75 pipe->funcs->enable(pipe, crtc->state, plane->state);
74} 76}
75 77
76static void drm_simple_kms_crtc_disable(struct drm_crtc *crtc, 78static void drm_simple_kms_crtc_disable(struct drm_crtc *crtc,
diff --git a/drivers/gpu/drm/exynos/exynos_dp.c b/drivers/gpu/drm/exynos/exynos_dp.c
index 964831dab102..86330f396784 100644
--- a/drivers/gpu/drm/exynos/exynos_dp.c
+++ b/drivers/gpu/drm/exynos/exynos_dp.c
@@ -162,7 +162,7 @@ static int exynos_dp_bind(struct device *dev, struct device *master, void *data)
162 dp->drm_dev = drm_dev; 162 dp->drm_dev = drm_dev;
163 163
164 dp->plat_data.dev_type = EXYNOS_DP; 164 dp->plat_data.dev_type = EXYNOS_DP;
165 dp->plat_data.power_on = exynos_dp_poweron; 165 dp->plat_data.power_on_start = exynos_dp_poweron;
166 dp->plat_data.power_off = exynos_dp_poweroff; 166 dp->plat_data.power_off = exynos_dp_poweroff;
167 dp->plat_data.attach = exynos_dp_bridge_attach; 167 dp->plat_data.attach = exynos_dp_bridge_attach;
168 dp->plat_data.get_modes = exynos_dp_get_modes; 168 dp->plat_data.get_modes = exynos_dp_get_modes;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index a518e9c6d6cc..39284bb7c2c2 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -37,26 +37,6 @@
37#define DRIVER_MAJOR 1 37#define DRIVER_MAJOR 1
38#define DRIVER_MINOR 0 38#define DRIVER_MINOR 0
39 39
40int exynos_atomic_check(struct drm_device *dev,
41 struct drm_atomic_state *state)
42{
43 int ret;
44
45 ret = drm_atomic_helper_check_modeset(dev, state);
46 if (ret)
47 return ret;
48
49 ret = drm_atomic_normalize_zpos(dev, state);
50 if (ret)
51 return ret;
52
53 ret = drm_atomic_helper_check_planes(dev, state);
54 if (ret)
55 return ret;
56
57 return ret;
58}
59
60static int exynos_drm_open(struct drm_device *dev, struct drm_file *file) 40static int exynos_drm_open(struct drm_device *dev, struct drm_file *file)
61{ 41{
62 struct drm_exynos_file_private *file_priv; 42 struct drm_exynos_file_private *file_priv;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index df2262f70d91..075957cb6ba1 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -275,7 +275,6 @@ static inline int exynos_dpi_bind(struct drm_device *dev,
275 275
276int exynos_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state, 276int exynos_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state,
277 bool nonblock); 277 bool nonblock);
278int exynos_atomic_check(struct drm_device *dev, struct drm_atomic_state *state);
279 278
280 279
281extern struct platform_driver fimd_driver; 280extern struct platform_driver fimd_driver;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index f0e79178bde6..7fcc1a7ab1a0 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -161,7 +161,7 @@ static struct drm_mode_config_helper_funcs exynos_drm_mode_config_helpers = {
161static const struct drm_mode_config_funcs exynos_drm_mode_config_funcs = { 161static const struct drm_mode_config_funcs exynos_drm_mode_config_funcs = {
162 .fb_create = exynos_user_fb_create, 162 .fb_create = exynos_user_fb_create,
163 .output_poll_changed = drm_fb_helper_output_poll_changed, 163 .output_poll_changed = drm_fb_helper_output_poll_changed,
164 .atomic_check = exynos_atomic_check, 164 .atomic_check = drm_atomic_helper_check,
165 .atomic_commit = drm_atomic_helper_commit, 165 .atomic_commit = drm_atomic_helper_commit,
166}; 166};
167 167
@@ -182,4 +182,6 @@ void exynos_drm_mode_config_init(struct drm_device *dev)
182 dev->mode_config.helper_private = &exynos_drm_mode_config_helpers; 182 dev->mode_config.helper_private = &exynos_drm_mode_config_helpers;
183 183
184 dev->mode_config.allow_fb_modifiers = true; 184 dev->mode_config.allow_fb_modifiers = true;
185
186 dev->mode_config.normalize_zpos = true;
185} 187}
diff --git a/drivers/gpu/drm/gma500/cdv_intel_crt.c b/drivers/gpu/drm/gma500/cdv_intel_crt.c
index b837e7a92196..cb5a14b7ec7f 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_crt.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_crt.c
@@ -64,7 +64,7 @@ static void cdv_intel_crt_dpms(struct drm_encoder *encoder, int mode)
64 REG_WRITE(reg, temp); 64 REG_WRITE(reg, temp);
65} 65}
66 66
67static int cdv_intel_crt_mode_valid(struct drm_connector *connector, 67static enum drm_mode_status cdv_intel_crt_mode_valid(struct drm_connector *connector,
68 struct drm_display_mode *mode) 68 struct drm_display_mode *mode)
69{ 69{
70 if (mode->flags & DRM_MODE_FLAG_DBLSCAN) 70 if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c
index a4bb89b7878f..5ea785f07ba8 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_dp.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c
@@ -505,7 +505,7 @@ static void cdv_intel_edp_backlight_off (struct gma_encoder *intel_encoder)
505 msleep(intel_dp->backlight_off_delay); 505 msleep(intel_dp->backlight_off_delay);
506} 506}
507 507
508static int 508static enum drm_mode_status
509cdv_intel_dp_mode_valid(struct drm_connector *connector, 509cdv_intel_dp_mode_valid(struct drm_connector *connector,
510 struct drm_display_mode *mode) 510 struct drm_display_mode *mode)
511{ 511{
diff --git a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
index 563f193fcfac..f0878998526a 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
@@ -223,7 +223,7 @@ static int cdv_hdmi_get_modes(struct drm_connector *connector)
223 return ret; 223 return ret;
224} 224}
225 225
226static int cdv_hdmi_mode_valid(struct drm_connector *connector, 226static enum drm_mode_status cdv_hdmi_mode_valid(struct drm_connector *connector,
227 struct drm_display_mode *mode) 227 struct drm_display_mode *mode)
228{ 228{
229 if (mode->clock > 165000) 229 if (mode->clock > 165000)
diff --git a/drivers/gpu/drm/gma500/cdv_intel_lvds.c b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
index e64960db3224..de9531caaca0 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
@@ -244,7 +244,7 @@ static void cdv_intel_lvds_restore(struct drm_connector *connector)
244{ 244{
245} 245}
246 246
247static int cdv_intel_lvds_mode_valid(struct drm_connector *connector, 247static enum drm_mode_status cdv_intel_lvds_mode_valid(struct drm_connector *connector,
248 struct drm_display_mode *mode) 248 struct drm_display_mode *mode)
249{ 249{
250 struct drm_device *dev = connector->dev; 250 struct drm_device *dev = connector->dev;
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_output.c b/drivers/gpu/drm/gma500/mdfld_dsi_output.c
index acb3848ef1c9..fe020926ea4f 100644
--- a/drivers/gpu/drm/gma500/mdfld_dsi_output.c
+++ b/drivers/gpu/drm/gma500/mdfld_dsi_output.c
@@ -346,7 +346,7 @@ static int mdfld_dsi_connector_get_modes(struct drm_connector *connector)
346 return 0; 346 return 0;
347} 347}
348 348
349static int mdfld_dsi_connector_mode_valid(struct drm_connector *connector, 349static enum drm_mode_status mdfld_dsi_connector_mode_valid(struct drm_connector *connector,
350 struct drm_display_mode *mode) 350 struct drm_display_mode *mode)
351{ 351{
352 struct mdfld_dsi_connector *dsi_connector = 352 struct mdfld_dsi_connector *dsi_connector =
diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi.c b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
index 8b2eb32ee988..78566a80ad25 100644
--- a/drivers/gpu/drm/gma500/oaktrail_hdmi.c
+++ b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
@@ -509,7 +509,7 @@ static void oaktrail_hdmi_dpms(struct drm_encoder *encoder, int mode)
509 HDMI_WRITE(HDMI_VIDEO_REG, temp); 509 HDMI_WRITE(HDMI_VIDEO_REG, temp);
510} 510}
511 511
512static int oaktrail_hdmi_mode_valid(struct drm_connector *connector, 512static enum drm_mode_status oaktrail_hdmi_mode_valid(struct drm_connector *connector,
513 struct drm_display_mode *mode) 513 struct drm_display_mode *mode)
514{ 514{
515 if (mode->clock > 165000) 515 if (mode->clock > 165000)
diff --git a/drivers/gpu/drm/gma500/psb_intel_drv.h b/drivers/gpu/drm/gma500/psb_intel_drv.h
index e8e4ea14b12b..e05e5399af2d 100644
--- a/drivers/gpu/drm/gma500/psb_intel_drv.h
+++ b/drivers/gpu/drm/gma500/psb_intel_drv.h
@@ -255,7 +255,7 @@ extern int intelfb_remove(struct drm_device *dev,
255extern bool psb_intel_lvds_mode_fixup(struct drm_encoder *encoder, 255extern bool psb_intel_lvds_mode_fixup(struct drm_encoder *encoder,
256 const struct drm_display_mode *mode, 256 const struct drm_display_mode *mode,
257 struct drm_display_mode *adjusted_mode); 257 struct drm_display_mode *adjusted_mode);
258extern int psb_intel_lvds_mode_valid(struct drm_connector *connector, 258extern enum drm_mode_status psb_intel_lvds_mode_valid(struct drm_connector *connector,
259 struct drm_display_mode *mode); 259 struct drm_display_mode *mode);
260extern int psb_intel_lvds_set_property(struct drm_connector *connector, 260extern int psb_intel_lvds_set_property(struct drm_connector *connector,
261 struct drm_property *property, 261 struct drm_property *property,
diff --git a/drivers/gpu/drm/gma500/psb_intel_lvds.c b/drivers/gpu/drm/gma500/psb_intel_lvds.c
index be3eefec5152..8baf6325c6e4 100644
--- a/drivers/gpu/drm/gma500/psb_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/psb_intel_lvds.c
@@ -343,7 +343,7 @@ static void psb_intel_lvds_restore(struct drm_connector *connector)
343 } 343 }
344} 344}
345 345
346int psb_intel_lvds_mode_valid(struct drm_connector *connector, 346enum drm_mode_status psb_intel_lvds_mode_valid(struct drm_connector *connector,
347 struct drm_display_mode *mode) 347 struct drm_display_mode *mode)
348{ 348{
349 struct drm_psb_private *dev_priv = connector->dev->dev_private; 349 struct drm_psb_private *dev_priv = connector->dev->dev_private;
diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
index 84507912be84..8dc2b19f913b 100644
--- a/drivers/gpu/drm/gma500/psb_intel_sdvo.c
+++ b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
@@ -1157,7 +1157,7 @@ static void psb_intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
1157 return; 1157 return;
1158} 1158}
1159 1159
1160static int psb_intel_sdvo_mode_valid(struct drm_connector *connector, 1160static enum drm_mode_status psb_intel_sdvo_mode_valid(struct drm_connector *connector,
1161 struct drm_display_mode *mode) 1161 struct drm_display_mode *mode)
1162{ 1162{
1163 struct psb_intel_sdvo *psb_intel_sdvo = intel_attached_sdvo(connector); 1163 struct psb_intel_sdvo *psb_intel_sdvo = intel_attached_sdvo(connector);
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
index f4eba87c96f3..d2f4749ebf8d 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
@@ -27,7 +27,7 @@ static int hibmc_connector_get_modes(struct drm_connector *connector)
27 return drm_add_modes_noedid(connector, 800, 600); 27 return drm_add_modes_noedid(connector, 800, 600);
28} 28}
29 29
30static int hibmc_connector_mode_valid(struct drm_connector *connector, 30static enum drm_mode_status hibmc_connector_mode_valid(struct drm_connector *connector,
31 struct drm_display_mode *mode) 31 struct drm_display_mode *mode)
32{ 32{
33 return MODE_OK; 33 return MODE_OK;
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index 9e67a7b4e3a4..421c8a72369e 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -1106,7 +1106,7 @@ static int tda998x_connector_get_modes(struct drm_connector *connector)
1106 return n; 1106 return n;
1107} 1107}
1108 1108
1109static int tda998x_connector_mode_valid(struct drm_connector *connector, 1109static enum drm_mode_status tda998x_connector_mode_valid(struct drm_connector *connector,
1110 struct drm_display_mode *mode) 1110 struct drm_display_mode *mode)
1111{ 1111{
1112 /* TDA19988 dotclock can go up to 165MHz */ 1112 /* TDA19988 dotclock can go up to 165MHz */
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index c0a8805b277f..de0e22322c76 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -748,6 +748,11 @@ intel_crt_detect(struct drm_connector *connector,
748 connector->base.id, connector->name, 748 connector->base.id, connector->name,
749 force); 749 force);
750 750
751 if (i915_modparams.load_detect_test) {
752 intel_display_power_get(dev_priv, intel_encoder->power_domain);
753 goto load_detect;
754 }
755
751 /* Skip machines without VGA that falsely report hotplug events */ 756 /* Skip machines without VGA that falsely report hotplug events */
752 if (dmi_check_system(intel_spurious_crt_detect)) 757 if (dmi_check_system(intel_spurious_crt_detect))
753 return connector_status_disconnected; 758 return connector_status_disconnected;
@@ -776,11 +781,12 @@ intel_crt_detect(struct drm_connector *connector,
776 * broken monitor (without edid) to work behind a broken kvm (that fails 781 * broken monitor (without edid) to work behind a broken kvm (that fails
777 * to have the right resistors for HP detection) needs to fix this up. 782 * to have the right resistors for HP detection) needs to fix this up.
778 * For now just bail out. */ 783 * For now just bail out. */
779 if (I915_HAS_HOTPLUG(dev_priv) && !i915_modparams.load_detect_test) { 784 if (I915_HAS_HOTPLUG(dev_priv)) {
780 status = connector_status_disconnected; 785 status = connector_status_disconnected;
781 goto out; 786 goto out;
782 } 787 }
783 788
789load_detect:
784 if (!force) { 790 if (!force) {
785 status = connector->status; 791 status = connector->status;
786 goto out; 792 goto out;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 3b48fd2561fe..182f9bf98484 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -2824,7 +2824,7 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
2824 continue; 2824 continue;
2825 2825
2826 if (intel_plane_ggtt_offset(state) == plane_config->base) { 2826 if (intel_plane_ggtt_offset(state) == plane_config->base) {
2827 fb = c->primary->fb; 2827 fb = state->base.fb;
2828 drm_framebuffer_get(fb); 2828 drm_framebuffer_get(fb);
2829 goto valid_fb; 2829 goto valid_fb;
2830 } 2830 }
@@ -9974,6 +9974,8 @@ found:
9974 ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector)); 9974 ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector));
9975 if (!ret) 9975 if (!ret)
9976 ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, crtc)); 9976 ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, crtc));
9977 if (!ret)
9978 ret = drm_atomic_add_affected_planes(restore_state, crtc);
9977 if (ret) { 9979 if (ret) {
9978 DRM_DEBUG_KMS("Failed to create a copy of old state to restore: %i\n", ret); 9980 DRM_DEBUG_KMS("Failed to create a copy of old state to restore: %i\n", ret);
9979 goto fail; 9981 goto fail;
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index 6467a5cc2ca3..6490ee18727c 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -640,7 +640,7 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
640 if (!crtc->state->active) 640 if (!crtc->state->active)
641 continue; 641 continue;
642 642
643 WARN(!crtc->primary->fb, 643 WARN(!crtc->primary->state->fb,
644 "re-used BIOS config but lost an fb on crtc %d\n", 644 "re-used BIOS config but lost an fb on crtc %d\n",
645 crtc->base.id); 645 crtc->base.id);
646 } 646 }
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index fb50a9ddaae8..8918539a19aa 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -1586,7 +1586,7 @@ static uint32_t mga_vga_calculate_mode_bandwidth(struct drm_display_mode *mode,
1586 1586
1587#define MODE_BANDWIDTH MODE_BAD 1587#define MODE_BANDWIDTH MODE_BAD
1588 1588
1589static int mga_vga_mode_valid(struct drm_connector *connector, 1589static enum drm_mode_status mga_vga_mode_valid(struct drm_connector *connector,
1590 struct drm_display_mode *mode) 1590 struct drm_display_mode *mode)
1591{ 1591{
1592 struct drm_device *dev = connector->dev; 1592 struct drm_device *dev = connector->dev;
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_drv.c b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
index 5cae8db9dcd4..ffe5137ccaf8 100644
--- a/drivers/gpu/drm/mxsfb/mxsfb_drv.c
+++ b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
@@ -99,7 +99,8 @@ static const struct drm_mode_config_funcs mxsfb_mode_config_funcs = {
99}; 99};
100 100
101static void mxsfb_pipe_enable(struct drm_simple_display_pipe *pipe, 101static void mxsfb_pipe_enable(struct drm_simple_display_pipe *pipe,
102 struct drm_crtc_state *crtc_state) 102 struct drm_crtc_state *crtc_state,
103 struct drm_plane_state *plane_state)
103{ 104{
104 struct mxsfb_drm_private *mxsfb = drm_pipe_to_mxsfb_drm_private(pipe); 105 struct mxsfb_drm_private *mxsfb = drm_pipe_to_mxsfb_drm_private(pipe);
105 106
@@ -125,12 +126,6 @@ static void mxsfb_pipe_update(struct drm_simple_display_pipe *pipe,
125 mxsfb_plane_atomic_update(mxsfb, plane_state); 126 mxsfb_plane_atomic_update(mxsfb, plane_state);
126} 127}
127 128
128static int mxsfb_pipe_prepare_fb(struct drm_simple_display_pipe *pipe,
129 struct drm_plane_state *plane_state)
130{
131 return drm_gem_fb_prepare_fb(&pipe->plane, plane_state);
132}
133
134static int mxsfb_pipe_enable_vblank(struct drm_simple_display_pipe *pipe) 129static int mxsfb_pipe_enable_vblank(struct drm_simple_display_pipe *pipe)
135{ 130{
136 struct mxsfb_drm_private *mxsfb = drm_pipe_to_mxsfb_drm_private(pipe); 131 struct mxsfb_drm_private *mxsfb = drm_pipe_to_mxsfb_drm_private(pipe);
@@ -159,7 +154,7 @@ static struct drm_simple_display_pipe_funcs mxsfb_funcs = {
159 .enable = mxsfb_pipe_enable, 154 .enable = mxsfb_pipe_enable,
160 .disable = mxsfb_pipe_disable, 155 .disable = mxsfb_pipe_disable,
161 .update = mxsfb_pipe_update, 156 .update = mxsfb_pipe_update,
162 .prepare_fb = mxsfb_pipe_prepare_fb, 157 .prepare_fb = drm_gem_fb_simple_display_pipe_prepare_fb,
163 .enable_vblank = mxsfb_pipe_enable_vblank, 158 .enable_vblank = mxsfb_pipe_enable_vblank,
164 .disable_vblank = mxsfb_pipe_disable_vblank, 159 .disable_vblank = mxsfb_pipe_disable_vblank,
165}; 160};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c
index e4c8d310d870..81c3567d4e67 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c
@@ -134,7 +134,7 @@ nvkm_cstate_find_best(struct nvkm_clk *clk, struct nvkm_pstate *pstate,
134 nvkm_volt_map(volt, volt->max2_id, clk->temp)); 134 nvkm_volt_map(volt, volt->max2_id, clk->temp));
135 135
136 for (cstate = start; &cstate->head != &pstate->list; 136 for (cstate = start; &cstate->head != &pstate->list;
137 cstate = list_entry(cstate->head.prev, typeof(*cstate), head)) { 137 cstate = list_prev_entry(cstate, head)) {
138 if (nvkm_cstate_valid(clk, cstate, max_volt, clk->temp)) 138 if (nvkm_cstate_valid(clk, cstate, max_volt, clk->temp))
139 break; 139 break;
140 } 140 }
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
index 3632854c2b91..ef3b0e3571ec 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.c
+++ b/drivers/gpu/drm/omapdrm/omap_drv.c
@@ -319,6 +319,9 @@ static int omap_modeset_init(struct drm_device *dev)
319 dev->mode_config.max_width = 8192; 319 dev->mode_config.max_width = 8192;
320 dev->mode_config.max_height = 8192; 320 dev->mode_config.max_height = 8192;
321 321
322 /* We want the zpos to be normalized */
323 dev->mode_config.normalize_zpos = true;
324
322 dev->mode_config.funcs = &omap_mode_config_funcs; 325 dev->mode_config.funcs = &omap_mode_config_funcs;
323 dev->mode_config.helper_private = &omap_mode_config_helper_funcs; 326 dev->mode_config.helper_private = &omap_mode_config_helper_funcs;
324 327
diff --git a/drivers/gpu/drm/omapdrm/omap_plane.c b/drivers/gpu/drm/omapdrm/omap_plane.c
index 2899435cad6e..161233cbc9a0 100644
--- a/drivers/gpu/drm/omapdrm/omap_plane.c
+++ b/drivers/gpu/drm/omapdrm/omap_plane.c
@@ -65,7 +65,7 @@ static void omap_plane_atomic_update(struct drm_plane *plane,
65 info.rotation_type = OMAP_DSS_ROT_NONE; 65 info.rotation_type = OMAP_DSS_ROT_NONE;
66 info.rotation = DRM_MODE_ROTATE_0; 66 info.rotation = DRM_MODE_ROTATE_0;
67 info.global_alpha = 0xff; 67 info.global_alpha = 0xff;
68 info.zorder = state->zpos; 68 info.zorder = state->normalized_zpos;
69 69
70 /* update scanout: */ 70 /* update scanout: */
71 omap_framebuffer_update_scanout(state->fb, state, &info); 71 omap_framebuffer_update_scanout(state->fb, state, &info);
diff --git a/drivers/gpu/drm/pl111/pl111_display.c b/drivers/gpu/drm/pl111/pl111_display.c
index 310646427907..19b0d006a54a 100644
--- a/drivers/gpu/drm/pl111/pl111_display.c
+++ b/drivers/gpu/drm/pl111/pl111_display.c
@@ -120,7 +120,8 @@ static int pl111_display_check(struct drm_simple_display_pipe *pipe,
120} 120}
121 121
122static void pl111_display_enable(struct drm_simple_display_pipe *pipe, 122static void pl111_display_enable(struct drm_simple_display_pipe *pipe,
123 struct drm_crtc_state *cstate) 123 struct drm_crtc_state *cstate,
124 struct drm_plane_state *plane_state)
124{ 125{
125 struct drm_crtc *crtc = &pipe->crtc; 126 struct drm_crtc *crtc = &pipe->crtc;
126 struct drm_plane *plane = &pipe->plane; 127 struct drm_plane *plane = &pipe->plane;
@@ -376,19 +377,13 @@ static void pl111_display_disable_vblank(struct drm_simple_display_pipe *pipe)
376 writel(0, priv->regs + priv->ienb); 377 writel(0, priv->regs + priv->ienb);
377} 378}
378 379
379static int pl111_display_prepare_fb(struct drm_simple_display_pipe *pipe,
380 struct drm_plane_state *plane_state)
381{
382 return drm_gem_fb_prepare_fb(&pipe->plane, plane_state);
383}
384
385static struct drm_simple_display_pipe_funcs pl111_display_funcs = { 380static struct drm_simple_display_pipe_funcs pl111_display_funcs = {
386 .mode_valid = pl111_mode_valid, 381 .mode_valid = pl111_mode_valid,
387 .check = pl111_display_check, 382 .check = pl111_display_check,
388 .enable = pl111_display_enable, 383 .enable = pl111_display_enable,
389 .disable = pl111_display_disable, 384 .disable = pl111_display_disable,
390 .update = pl111_display_update, 385 .update = pl111_display_update,
391 .prepare_fb = pl111_display_prepare_fb, 386 .prepare_fb = drm_gem_fb_simple_display_pipe_prepare_fb,
392}; 387};
393 388
394static int pl111_clk_div_choose_div(struct clk_hw *hw, unsigned long rate, 389static int pl111_clk_div_choose_div(struct clk_hw *hw, unsigned long rate,
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index ecb35ed0eac8..820cbca3bf6e 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -1037,7 +1037,7 @@ static int qxl_conn_get_modes(struct drm_connector *connector)
1037 return ret; 1037 return ret;
1038} 1038}
1039 1039
1040static int qxl_conn_mode_valid(struct drm_connector *connector, 1040static enum drm_mode_status qxl_conn_mode_valid(struct drm_connector *connector,
1041 struct drm_display_mode *mode) 1041 struct drm_display_mode *mode)
1042{ 1042{
1043 struct drm_device *ddev = connector->dev; 1043 struct drm_device *ddev = connector->dev;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.h b/drivers/gpu/drm/rcar-du/rcar_du_drv.h
index 5c7ec15818c7..131d8e88b06c 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.h
@@ -87,7 +87,6 @@ struct rcar_du_device {
87 struct rcar_du_vsp vsps[RCAR_DU_MAX_VSPS]; 87 struct rcar_du_vsp vsps[RCAR_DU_MAX_VSPS];
88 88
89 struct { 89 struct {
90 struct drm_property *alpha;
91 struct drm_property *colorkey; 90 struct drm_property *colorkey;
92 } props; 91 } props;
93 92
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
index 0329b354bfa0..f4ac0f884f00 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
@@ -233,15 +233,7 @@ static int rcar_du_atomic_check(struct drm_device *dev,
233 struct rcar_du_device *rcdu = dev->dev_private; 233 struct rcar_du_device *rcdu = dev->dev_private;
234 int ret; 234 int ret;
235 235
236 ret = drm_atomic_helper_check_modeset(dev, state); 236 ret = drm_atomic_helper_check(dev, state);
237 if (ret)
238 return ret;
239
240 ret = drm_atomic_normalize_zpos(dev, state);
241 if (ret)
242 return ret;
243
244 ret = drm_atomic_helper_check_planes(dev, state);
245 if (ret) 237 if (ret)
246 return ret; 238 return ret;
247 239
@@ -415,11 +407,6 @@ static int rcar_du_encoders_init(struct rcar_du_device *rcdu)
415 407
416static int rcar_du_properties_init(struct rcar_du_device *rcdu) 408static int rcar_du_properties_init(struct rcar_du_device *rcdu)
417{ 409{
418 rcdu->props.alpha =
419 drm_property_create_range(rcdu->ddev, 0, "alpha", 0, 255);
420 if (rcdu->props.alpha == NULL)
421 return -ENOMEM;
422
423 /* 410 /*
424 * The color key is expressed as an RGB888 triplet stored in a 32-bit 411 * The color key is expressed as an RGB888 triplet stored in a 32-bit
425 * integer in XRGB8888 format. Bit 24 is used as a flag to disable (0) 412 * integer in XRGB8888 format. Bit 24 is used as a flag to disable (0)
@@ -529,6 +516,7 @@ int rcar_du_modeset_init(struct rcar_du_device *rcdu)
529 dev->mode_config.min_height = 0; 516 dev->mode_config.min_height = 0;
530 dev->mode_config.max_width = 4095; 517 dev->mode_config.max_width = 4095;
531 dev->mode_config.max_height = 2047; 518 dev->mode_config.max_height = 2047;
519 dev->mode_config.normalize_zpos = true;
532 dev->mode_config.funcs = &rcar_du_mode_config_funcs; 520 dev->mode_config.funcs = &rcar_du_mode_config_funcs;
533 dev->mode_config.helper_private = &rcar_du_mode_config_helper; 521 dev->mode_config.helper_private = &rcar_du_mode_config_helper;
534 522
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_plane.c b/drivers/gpu/drm/rcar-du/rcar_du_plane.c
index 68556bd9dad2..c20f7ed48c8d 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_plane.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_plane.c
@@ -423,7 +423,7 @@ static void rcar_du_plane_setup_mode(struct rcar_du_group *rgrp,
423 rcar_du_plane_write(rgrp, index, PnALPHAR, PnALPHAR_ABIT_0); 423 rcar_du_plane_write(rgrp, index, PnALPHAR, PnALPHAR_ABIT_0);
424 else 424 else
425 rcar_du_plane_write(rgrp, index, PnALPHAR, 425 rcar_du_plane_write(rgrp, index, PnALPHAR,
426 PnALPHAR_ABIT_X | state->alpha); 426 PnALPHAR_ABIT_X | state->state.alpha >> 8);
427 427
428 pnmr = PnMR_BM_MD | state->format->pnmr; 428 pnmr = PnMR_BM_MD | state->format->pnmr;
429 429
@@ -692,11 +692,11 @@ static void rcar_du_plane_reset(struct drm_plane *plane)
692 692
693 state->hwindex = -1; 693 state->hwindex = -1;
694 state->source = RCAR_DU_PLANE_MEMORY; 694 state->source = RCAR_DU_PLANE_MEMORY;
695 state->alpha = 255;
696 state->colorkey = RCAR_DU_COLORKEY_NONE; 695 state->colorkey = RCAR_DU_COLORKEY_NONE;
697 state->state.zpos = plane->type == DRM_PLANE_TYPE_PRIMARY ? 0 : 1; 696 state->state.zpos = plane->type == DRM_PLANE_TYPE_PRIMARY ? 0 : 1;
698 697
699 plane->state = &state->state; 698 plane->state = &state->state;
699 plane->state->alpha = DRM_BLEND_ALPHA_OPAQUE;
700 plane->state->plane = plane; 700 plane->state->plane = plane;
701} 701}
702 702
@@ -708,9 +708,7 @@ static int rcar_du_plane_atomic_set_property(struct drm_plane *plane,
708 struct rcar_du_plane_state *rstate = to_rcar_plane_state(state); 708 struct rcar_du_plane_state *rstate = to_rcar_plane_state(state);
709 struct rcar_du_device *rcdu = to_rcar_plane(plane)->group->dev; 709 struct rcar_du_device *rcdu = to_rcar_plane(plane)->group->dev;
710 710
711 if (property == rcdu->props.alpha) 711 if (property == rcdu->props.colorkey)
712 rstate->alpha = val;
713 else if (property == rcdu->props.colorkey)
714 rstate->colorkey = val; 712 rstate->colorkey = val;
715 else 713 else
716 return -EINVAL; 714 return -EINVAL;
@@ -726,9 +724,7 @@ static int rcar_du_plane_atomic_get_property(struct drm_plane *plane,
726 container_of(state, const struct rcar_du_plane_state, state); 724 container_of(state, const struct rcar_du_plane_state, state);
727 struct rcar_du_device *rcdu = to_rcar_plane(plane)->group->dev; 725 struct rcar_du_device *rcdu = to_rcar_plane(plane)->group->dev;
728 726
729 if (property == rcdu->props.alpha) 727 if (property == rcdu->props.colorkey)
730 *val = rstate->alpha;
731 else if (property == rcdu->props.colorkey)
732 *val = rstate->colorkey; 728 *val = rstate->colorkey;
733 else 729 else
734 return -EINVAL; 730 return -EINVAL;
@@ -797,10 +793,9 @@ int rcar_du_planes_init(struct rcar_du_group *rgrp)
797 continue; 793 continue;
798 794
799 drm_object_attach_property(&plane->plane.base, 795 drm_object_attach_property(&plane->plane.base,
800 rcdu->props.alpha, 255);
801 drm_object_attach_property(&plane->plane.base,
802 rcdu->props.colorkey, 796 rcdu->props.colorkey,
803 RCAR_DU_COLORKEY_NONE); 797 RCAR_DU_COLORKEY_NONE);
798 drm_plane_create_alpha_property(&plane->plane);
804 drm_plane_create_zpos_property(&plane->plane, 1, 1, 7); 799 drm_plane_create_zpos_property(&plane->plane, 1, 1, 7);
805 } 800 }
806 801
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_plane.h b/drivers/gpu/drm/rcar-du/rcar_du_plane.h
index 890321b4665d..5c19c69e4691 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_plane.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_plane.h
@@ -50,7 +50,6 @@ static inline struct rcar_du_plane *to_rcar_plane(struct drm_plane *plane)
50 * @state: base DRM plane state 50 * @state: base DRM plane state
51 * @format: information about the pixel format used by the plane 51 * @format: information about the pixel format used by the plane
52 * @hwindex: 0-based hardware plane index, -1 means unused 52 * @hwindex: 0-based hardware plane index, -1 means unused
53 * @alpha: value of the plane alpha property
54 * @colorkey: value of the plane colorkey property 53 * @colorkey: value of the plane colorkey property
55 */ 54 */
56struct rcar_du_plane_state { 55struct rcar_du_plane_state {
@@ -60,7 +59,6 @@ struct rcar_du_plane_state {
60 int hwindex; 59 int hwindex;
61 enum rcar_du_plane_source source; 60 enum rcar_du_plane_source source;
62 61
63 unsigned int alpha;
64 unsigned int colorkey; 62 unsigned int colorkey;
65}; 63};
66 64
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
index 2c260c33840b..b3bec0125696 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
@@ -54,6 +54,7 @@ void rcar_du_vsp_enable(struct rcar_du_crtc *crtc)
54 }; 54 };
55 struct rcar_du_plane_state state = { 55 struct rcar_du_plane_state state = {
56 .state = { 56 .state = {
57 .alpha = DRM_BLEND_ALPHA_OPAQUE,
57 .crtc = &crtc->crtc, 58 .crtc = &crtc->crtc,
58 .dst.x1 = 0, 59 .dst.x1 = 0,
59 .dst.y1 = 0, 60 .dst.y1 = 0,
@@ -67,7 +68,6 @@ void rcar_du_vsp_enable(struct rcar_du_crtc *crtc)
67 }, 68 },
68 .format = rcar_du_format_info(DRM_FORMAT_ARGB8888), 69 .format = rcar_du_format_info(DRM_FORMAT_ARGB8888),
69 .source = RCAR_DU_PLANE_VSPD1, 70 .source = RCAR_DU_PLANE_VSPD1,
70 .alpha = 255,
71 .colorkey = 0, 71 .colorkey = 0,
72 }; 72 };
73 73
@@ -173,7 +173,7 @@ static void rcar_du_vsp_plane_setup(struct rcar_du_vsp_plane *plane)
173 struct vsp1_du_atomic_config cfg = { 173 struct vsp1_du_atomic_config cfg = {
174 .pixelformat = 0, 174 .pixelformat = 0,
175 .pitch = fb->pitches[0], 175 .pitch = fb->pitches[0],
176 .alpha = state->alpha, 176 .alpha = state->state.alpha >> 8,
177 .zpos = state->state.zpos, 177 .zpos = state->state.zpos,
178 }; 178 };
179 unsigned int i; 179 unsigned int i;
@@ -335,44 +335,13 @@ static void rcar_du_vsp_plane_reset(struct drm_plane *plane)
335 if (state == NULL) 335 if (state == NULL)
336 return; 336 return;
337 337
338 state->alpha = 255; 338 state->state.alpha = DRM_BLEND_ALPHA_OPAQUE;
339 state->state.zpos = plane->type == DRM_PLANE_TYPE_PRIMARY ? 0 : 1; 339 state->state.zpos = plane->type == DRM_PLANE_TYPE_PRIMARY ? 0 : 1;
340 340
341 plane->state = &state->state; 341 plane->state = &state->state;
342 plane->state->plane = plane; 342 plane->state->plane = plane;
343} 343}
344 344
345static int rcar_du_vsp_plane_atomic_set_property(struct drm_plane *plane,
346 struct drm_plane_state *state, struct drm_property *property,
347 uint64_t val)
348{
349 struct rcar_du_vsp_plane_state *rstate = to_rcar_vsp_plane_state(state);
350 struct rcar_du_device *rcdu = to_rcar_vsp_plane(plane)->vsp->dev;
351
352 if (property == rcdu->props.alpha)
353 rstate->alpha = val;
354 else
355 return -EINVAL;
356
357 return 0;
358}
359
360static int rcar_du_vsp_plane_atomic_get_property(struct drm_plane *plane,
361 const struct drm_plane_state *state, struct drm_property *property,
362 uint64_t *val)
363{
364 const struct rcar_du_vsp_plane_state *rstate =
365 container_of(state, const struct rcar_du_vsp_plane_state, state);
366 struct rcar_du_device *rcdu = to_rcar_vsp_plane(plane)->vsp->dev;
367
368 if (property == rcdu->props.alpha)
369 *val = rstate->alpha;
370 else
371 return -EINVAL;
372
373 return 0;
374}
375
376static const struct drm_plane_funcs rcar_du_vsp_plane_funcs = { 345static const struct drm_plane_funcs rcar_du_vsp_plane_funcs = {
377 .update_plane = drm_atomic_helper_update_plane, 346 .update_plane = drm_atomic_helper_update_plane,
378 .disable_plane = drm_atomic_helper_disable_plane, 347 .disable_plane = drm_atomic_helper_disable_plane,
@@ -380,8 +349,6 @@ static const struct drm_plane_funcs rcar_du_vsp_plane_funcs = {
380 .destroy = drm_plane_cleanup, 349 .destroy = drm_plane_cleanup,
381 .atomic_duplicate_state = rcar_du_vsp_plane_atomic_duplicate_state, 350 .atomic_duplicate_state = rcar_du_vsp_plane_atomic_duplicate_state,
382 .atomic_destroy_state = rcar_du_vsp_plane_atomic_destroy_state, 351 .atomic_destroy_state = rcar_du_vsp_plane_atomic_destroy_state,
383 .atomic_set_property = rcar_du_vsp_plane_atomic_set_property,
384 .atomic_get_property = rcar_du_vsp_plane_atomic_get_property,
385}; 352};
386 353
387int rcar_du_vsp_init(struct rcar_du_vsp *vsp, struct device_node *np, 354int rcar_du_vsp_init(struct rcar_du_vsp *vsp, struct device_node *np,
@@ -438,8 +405,7 @@ int rcar_du_vsp_init(struct rcar_du_vsp *vsp, struct device_node *np,
438 if (type == DRM_PLANE_TYPE_PRIMARY) 405 if (type == DRM_PLANE_TYPE_PRIMARY)
439 continue; 406 continue;
440 407
441 drm_object_attach_property(&plane->plane.base, 408 drm_plane_create_alpha_property(&plane->plane);
442 rcdu->props.alpha, 255);
443 drm_plane_create_zpos_property(&plane->plane, 1, 1, 409 drm_plane_create_zpos_property(&plane->plane, 1, 1,
444 vsp->num_planes - 1); 410 vsp->num_planes - 1);
445 } 411 }
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vsp.h b/drivers/gpu/drm/rcar-du/rcar_du_vsp.h
index 4c5d7bbce6aa..8a8a25c8c8e8 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_vsp.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_vsp.h
@@ -44,15 +44,12 @@ static inline struct rcar_du_vsp_plane *to_rcar_vsp_plane(struct drm_plane *p)
44 * @state: base DRM plane state 44 * @state: base DRM plane state
45 * @format: information about the pixel format used by the plane 45 * @format: information about the pixel format used by the plane
46 * @sg_tables: scatter-gather tables for the frame buffer memory 46 * @sg_tables: scatter-gather tables for the frame buffer memory
47 * @alpha: value of the plane alpha property
48 */ 47 */
49struct rcar_du_vsp_plane_state { 48struct rcar_du_vsp_plane_state {
50 struct drm_plane_state state; 49 struct drm_plane_state state;
51 50
52 const struct rcar_du_format_info *format; 51 const struct rcar_du_format_info *format;
53 struct sg_table sg_tables[3]; 52 struct sg_table sg_tables[3];
54
55 unsigned int alpha;
56}; 53};
57 54
58static inline struct rcar_du_vsp_plane_state * 55static inline struct rcar_du_vsp_plane_state *
diff --git a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
index 3e8bf79bea58..080f05352195 100644
--- a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
+++ b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
@@ -77,13 +77,13 @@ struct rockchip_dp_device {
77 struct analogix_dp_plat_data plat_data; 77 struct analogix_dp_plat_data plat_data;
78}; 78};
79 79
80static void analogix_dp_psr_set(struct drm_encoder *encoder, bool enabled) 80static int analogix_dp_psr_set(struct drm_encoder *encoder, bool enabled)
81{ 81{
82 struct rockchip_dp_device *dp = to_dp(encoder); 82 struct rockchip_dp_device *dp = to_dp(encoder);
83 int ret; 83 int ret;
84 84
85 if (!analogix_dp_psr_enabled(dp->adp)) 85 if (!analogix_dp_psr_enabled(dp->adp))
86 return; 86 return 0;
87 87
88 DRM_DEV_DEBUG(dp->dev, "%s PSR...\n", enabled ? "Entry" : "Exit"); 88 DRM_DEV_DEBUG(dp->dev, "%s PSR...\n", enabled ? "Entry" : "Exit");
89 89
@@ -91,13 +91,13 @@ static void analogix_dp_psr_set(struct drm_encoder *encoder, bool enabled)
91 PSR_WAIT_LINE_FLAG_TIMEOUT_MS); 91 PSR_WAIT_LINE_FLAG_TIMEOUT_MS);
92 if (ret) { 92 if (ret) {
93 DRM_DEV_ERROR(dp->dev, "line flag interrupt did not arrive\n"); 93 DRM_DEV_ERROR(dp->dev, "line flag interrupt did not arrive\n");
94 return; 94 return -ETIMEDOUT;
95 } 95 }
96 96
97 if (enabled) 97 if (enabled)
98 analogix_dp_enable_psr(dp->adp); 98 return analogix_dp_enable_psr(dp->adp);
99 else 99 else
100 analogix_dp_disable_psr(dp->adp); 100 return analogix_dp_disable_psr(dp->adp);
101} 101}
102 102
103static int rockchip_dp_pre_init(struct rockchip_dp_device *dp) 103static int rockchip_dp_pre_init(struct rockchip_dp_device *dp)
@@ -109,7 +109,7 @@ static int rockchip_dp_pre_init(struct rockchip_dp_device *dp)
109 return 0; 109 return 0;
110} 110}
111 111
112static int rockchip_dp_poweron(struct analogix_dp_plat_data *plat_data) 112static int rockchip_dp_poweron_start(struct analogix_dp_plat_data *plat_data)
113{ 113{
114 struct rockchip_dp_device *dp = to_dp(plat_data); 114 struct rockchip_dp_device *dp = to_dp(plat_data);
115 int ret; 115 int ret;
@@ -127,7 +127,14 @@ static int rockchip_dp_poweron(struct analogix_dp_plat_data *plat_data)
127 return ret; 127 return ret;
128 } 128 }
129 129
130 return rockchip_drm_psr_activate(&dp->encoder); 130 return ret;
131}
132
133static int rockchip_dp_poweron_end(struct analogix_dp_plat_data *plat_data)
134{
135 struct rockchip_dp_device *dp = to_dp(plat_data);
136
137 return rockchip_drm_psr_inhibit_put(&dp->encoder);
131} 138}
132 139
133static int rockchip_dp_powerdown(struct analogix_dp_plat_data *plat_data) 140static int rockchip_dp_powerdown(struct analogix_dp_plat_data *plat_data)
@@ -135,7 +142,7 @@ static int rockchip_dp_powerdown(struct analogix_dp_plat_data *plat_data)
135 struct rockchip_dp_device *dp = to_dp(plat_data); 142 struct rockchip_dp_device *dp = to_dp(plat_data);
136 int ret; 143 int ret;
137 144
138 ret = rockchip_drm_psr_deactivate(&dp->encoder); 145 ret = rockchip_drm_psr_inhibit_get(&dp->encoder);
139 if (ret != 0) 146 if (ret != 0)
140 return ret; 147 return ret;
141 148
@@ -218,6 +225,7 @@ rockchip_dp_drm_encoder_atomic_check(struct drm_encoder *encoder,
218 struct drm_connector_state *conn_state) 225 struct drm_connector_state *conn_state)
219{ 226{
220 struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc_state); 227 struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc_state);
228 struct drm_display_info *di = &conn_state->connector->display_info;
221 229
222 /* 230 /*
223 * The hardware IC designed that VOP must output the RGB10 video 231 * The hardware IC designed that VOP must output the RGB10 video
@@ -229,6 +237,7 @@ rockchip_dp_drm_encoder_atomic_check(struct drm_encoder *encoder,
229 237
230 s->output_mode = ROCKCHIP_OUT_MODE_AAAA; 238 s->output_mode = ROCKCHIP_OUT_MODE_AAAA;
231 s->output_type = DRM_MODE_CONNECTOR_eDP; 239 s->output_type = DRM_MODE_CONNECTOR_eDP;
240 s->output_bpc = di->bpc;
232 241
233 return 0; 242 return 0;
234} 243}
@@ -328,7 +337,8 @@ static int rockchip_dp_bind(struct device *dev, struct device *master,
328 dp->plat_data.encoder = &dp->encoder; 337 dp->plat_data.encoder = &dp->encoder;
329 338
330 dp->plat_data.dev_type = dp->data->chip_type; 339 dp->plat_data.dev_type = dp->data->chip_type;
331 dp->plat_data.power_on = rockchip_dp_poweron; 340 dp->plat_data.power_on_start = rockchip_dp_poweron_start;
341 dp->plat_data.power_on_end = rockchip_dp_poweron_end;
332 dp->plat_data.power_off = rockchip_dp_powerdown; 342 dp->plat_data.power_off = rockchip_dp_powerdown;
333 dp->plat_data.get_modes = rockchip_dp_get_modes; 343 dp->plat_data.get_modes = rockchip_dp_get_modes;
334 344
@@ -358,6 +368,8 @@ static void rockchip_dp_unbind(struct device *dev, struct device *master,
358 analogix_dp_unbind(dp->adp); 368 analogix_dp_unbind(dp->adp);
359 rockchip_drm_psr_unregister(&dp->encoder); 369 rockchip_drm_psr_unregister(&dp->encoder);
360 dp->encoder.funcs->destroy(&dp->encoder); 370 dp->encoder.funcs->destroy(&dp->encoder);
371
372 dp->adp = ERR_PTR(-ENODEV);
361} 373}
362 374
363static const struct component_ops rockchip_dp_component_ops = { 375static const struct component_ops rockchip_dp_component_ops = {
@@ -381,6 +393,7 @@ static int rockchip_dp_probe(struct platform_device *pdev)
381 return -ENOMEM; 393 return -ENOMEM;
382 394
383 dp->dev = dev; 395 dp->dev = dev;
396 dp->adp = ERR_PTR(-ENODEV);
384 dp->plat_data.panel = panel; 397 dp->plat_data.panel = panel;
385 398
386 ret = rockchip_dp_of_probe(dp); 399 ret = rockchip_dp_of_probe(dp);
@@ -404,6 +417,9 @@ static int rockchip_dp_suspend(struct device *dev)
404{ 417{
405 struct rockchip_dp_device *dp = dev_get_drvdata(dev); 418 struct rockchip_dp_device *dp = dev_get_drvdata(dev);
406 419
420 if (IS_ERR(dp->adp))
421 return 0;
422
407 return analogix_dp_suspend(dp->adp); 423 return analogix_dp_suspend(dp->adp);
408} 424}
409 425
@@ -411,6 +427,9 @@ static int rockchip_dp_resume(struct device *dev)
411{ 427{
412 struct rockchip_dp_device *dp = dev_get_drvdata(dev); 428 struct rockchip_dp_device *dp = dev_get_drvdata(dev);
413 429
430 if (IS_ERR(dp->adp))
431 return 0;
432
414 return analogix_dp_resume(dp->adp); 433 return analogix_dp_resume(dp->adp);
415} 434}
416#endif 435#endif
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
index 9c064a40458b..3a6ebfc26036 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
@@ -36,6 +36,7 @@ struct rockchip_crtc_state {
36 struct drm_crtc_state base; 36 struct drm_crtc_state base;
37 int output_type; 37 int output_type;
38 int output_mode; 38 int output_mode;
39 int output_bpc;
39}; 40};
40#define to_rockchip_crtc_state(s) \ 41#define to_rockchip_crtc_state(s) \
41 container_of(s, struct rockchip_crtc_state, base) 42 container_of(s, struct rockchip_crtc_state, base)
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
index e266539e04e5..d4f4118b482d 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
@@ -167,8 +167,67 @@ err_gem_object_unreference:
167 return ERR_PTR(ret); 167 return ERR_PTR(ret);
168} 168}
169 169
170static void
171rockchip_drm_psr_inhibit_get_state(struct drm_atomic_state *state)
172{
173 struct drm_crtc *crtc;
174 struct drm_crtc_state *crtc_state;
175 struct drm_encoder *encoder;
176 u32 encoder_mask = 0;
177 int i;
178
179 for_each_old_crtc_in_state(state, crtc, crtc_state, i) {
180 encoder_mask |= crtc_state->encoder_mask;
181 encoder_mask |= crtc->state->encoder_mask;
182 }
183
184 drm_for_each_encoder_mask(encoder, state->dev, encoder_mask)
185 rockchip_drm_psr_inhibit_get(encoder);
186}
187
188static void
189rockchip_drm_psr_inhibit_put_state(struct drm_atomic_state *state)
190{
191 struct drm_crtc *crtc;
192 struct drm_crtc_state *crtc_state;
193 struct drm_encoder *encoder;
194 u32 encoder_mask = 0;
195 int i;
196
197 for_each_old_crtc_in_state(state, crtc, crtc_state, i) {
198 encoder_mask |= crtc_state->encoder_mask;
199 encoder_mask |= crtc->state->encoder_mask;
200 }
201
202 drm_for_each_encoder_mask(encoder, state->dev, encoder_mask)
203 rockchip_drm_psr_inhibit_put(encoder);
204}
205
206static void
207rockchip_atomic_helper_commit_tail_rpm(struct drm_atomic_state *old_state)
208{
209 struct drm_device *dev = old_state->dev;
210
211 rockchip_drm_psr_inhibit_get_state(old_state);
212
213 drm_atomic_helper_commit_modeset_disables(dev, old_state);
214
215 drm_atomic_helper_commit_modeset_enables(dev, old_state);
216
217 drm_atomic_helper_commit_planes(dev, old_state,
218 DRM_PLANE_COMMIT_ACTIVE_ONLY);
219
220 rockchip_drm_psr_inhibit_put_state(old_state);
221
222 drm_atomic_helper_commit_hw_done(old_state);
223
224 drm_atomic_helper_wait_for_vblanks(dev, old_state);
225
226 drm_atomic_helper_cleanup_planes(dev, old_state);
227}
228
170static const struct drm_mode_config_helper_funcs rockchip_mode_config_helpers = { 229static const struct drm_mode_config_helper_funcs rockchip_mode_config_helpers = {
171 .atomic_commit_tail = drm_atomic_helper_commit_tail_rpm, 230 .atomic_commit_tail = rockchip_atomic_helper_commit_tail_rpm,
172}; 231};
173 232
174static const struct drm_mode_config_funcs rockchip_drm_mode_config_funcs = { 233static const struct drm_mode_config_funcs rockchip_drm_mode_config_funcs = {
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
index 074db7a92809..a8db758d523e 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
@@ -357,8 +357,8 @@ err_free_rk_obj:
357} 357}
358 358
359/* 359/*
360 * rockchip_gem_free_object - (struct drm_driver)->gem_free_object callback 360 * rockchip_gem_free_object - (struct drm_driver)->gem_free_object_unlocked
361 * function 361 * callback function
362 */ 362 */
363void rockchip_gem_free_object(struct drm_gem_object *obj) 363void rockchip_gem_free_object(struct drm_gem_object *obj)
364{ 364{
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_psr.c b/drivers/gpu/drm/rockchip/rockchip_drm_psr.c
index b339ca943139..79d00d861a31 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_psr.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_psr.c
@@ -20,42 +20,19 @@
20 20
21#define PSR_FLUSH_TIMEOUT_MS 100 21#define PSR_FLUSH_TIMEOUT_MS 100
22 22
23enum psr_state {
24 PSR_FLUSH,
25 PSR_ENABLE,
26 PSR_DISABLE,
27};
28
29struct psr_drv { 23struct psr_drv {
30 struct list_head list; 24 struct list_head list;
31 struct drm_encoder *encoder; 25 struct drm_encoder *encoder;
32 26
33 struct mutex lock; 27 struct mutex lock;
34 bool active; 28 int inhibit_count;
35 enum psr_state state; 29 bool enabled;
36 30
37 struct delayed_work flush_work; 31 struct delayed_work flush_work;
38 32
39 void (*set)(struct drm_encoder *encoder, bool enable); 33 int (*set)(struct drm_encoder *encoder, bool enable);
40}; 34};
41 35
42static struct psr_drv *find_psr_by_crtc(struct drm_crtc *crtc)
43{
44 struct rockchip_drm_private *drm_drv = crtc->dev->dev_private;
45 struct psr_drv *psr;
46
47 mutex_lock(&drm_drv->psr_list_lock);
48 list_for_each_entry(psr, &drm_drv->psr_list, list) {
49 if (psr->encoder->crtc == crtc)
50 goto out;
51 }
52 psr = ERR_PTR(-ENODEV);
53
54out:
55 mutex_unlock(&drm_drv->psr_list_lock);
56 return psr;
57}
58
59static struct psr_drv *find_psr_by_encoder(struct drm_encoder *encoder) 36static struct psr_drv *find_psr_by_encoder(struct drm_encoder *encoder)
60{ 37{
61 struct rockchip_drm_private *drm_drv = encoder->dev->dev_private; 38 struct rockchip_drm_private *drm_drv = encoder->dev->dev_private;
@@ -73,46 +50,22 @@ out:
73 return psr; 50 return psr;
74} 51}
75 52
76static void psr_set_state_locked(struct psr_drv *psr, enum psr_state state) 53static int psr_set_state_locked(struct psr_drv *psr, bool enable)
77{ 54{
78 /* 55 int ret;
79 * Allowed finite state machine:
80 *
81 * PSR_ENABLE < = = = = = > PSR_FLUSH
82 * | ^ |
83 * | | |
84 * v | |
85 * PSR_DISABLE < - - - - - - - - -
86 */
87 if (state == psr->state || !psr->active)
88 return;
89
90 /* Already disabled in flush, change the state, but not the hardware */
91 if (state == PSR_DISABLE && psr->state == PSR_FLUSH) {
92 psr->state = state;
93 return;
94 }
95 56
96 psr->state = state; 57 if (psr->inhibit_count > 0)
58 return -EINVAL;
97 59
98 /* Actually commit the state change to hardware */ 60 if (enable == psr->enabled)
99 switch (psr->state) { 61 return 0;
100 case PSR_ENABLE:
101 psr->set(psr->encoder, true);
102 break;
103 62
104 case PSR_DISABLE: 63 ret = psr->set(psr->encoder, enable);
105 case PSR_FLUSH: 64 if (ret)
106 psr->set(psr->encoder, false); 65 return ret;
107 break;
108 }
109}
110 66
111static void psr_set_state(struct psr_drv *psr, enum psr_state state) 67 psr->enabled = enable;
112{ 68 return 0;
113 mutex_lock(&psr->lock);
114 psr_set_state_locked(psr, state);
115 mutex_unlock(&psr->lock);
116} 69}
117 70
118static void psr_flush_handler(struct work_struct *work) 71static void psr_flush_handler(struct work_struct *work)
@@ -120,21 +73,24 @@ static void psr_flush_handler(struct work_struct *work)
120 struct psr_drv *psr = container_of(to_delayed_work(work), 73 struct psr_drv *psr = container_of(to_delayed_work(work),
121 struct psr_drv, flush_work); 74 struct psr_drv, flush_work);
122 75
123 /* If the state has changed since we initiated the flush, do nothing */
124 mutex_lock(&psr->lock); 76 mutex_lock(&psr->lock);
125 if (psr->state == PSR_FLUSH) 77 psr_set_state_locked(psr, true);
126 psr_set_state_locked(psr, PSR_ENABLE);
127 mutex_unlock(&psr->lock); 78 mutex_unlock(&psr->lock);
128} 79}
129 80
130/** 81/**
131 * rockchip_drm_psr_activate - activate PSR on the given pipe 82 * rockchip_drm_psr_inhibit_put - release PSR inhibit on given encoder
132 * @encoder: encoder to obtain the PSR encoder 83 * @encoder: encoder to obtain the PSR encoder
133 * 84 *
85 * Decrements PSR inhibit count on given encoder. Should be called only
86 * for a PSR inhibit count increment done before. If PSR inhibit counter
87 * reaches zero, PSR flush work is scheduled to make the hardware enter
88 * PSR mode in PSR_FLUSH_TIMEOUT_MS.
89 *
134 * Returns: 90 * Returns:
135 * Zero on success, negative errno on failure. 91 * Zero on success, negative errno on failure.
136 */ 92 */
137int rockchip_drm_psr_activate(struct drm_encoder *encoder) 93int rockchip_drm_psr_inhibit_put(struct drm_encoder *encoder)
138{ 94{
139 struct psr_drv *psr = find_psr_by_encoder(encoder); 95 struct psr_drv *psr = find_psr_by_encoder(encoder);
140 96
@@ -142,21 +98,30 @@ int rockchip_drm_psr_activate(struct drm_encoder *encoder)
142 return PTR_ERR(psr); 98 return PTR_ERR(psr);
143 99
144 mutex_lock(&psr->lock); 100 mutex_lock(&psr->lock);
145 psr->active = true; 101 --psr->inhibit_count;
102 WARN_ON(psr->inhibit_count < 0);
103 if (!psr->inhibit_count)
104 mod_delayed_work(system_wq, &psr->flush_work,
105 PSR_FLUSH_TIMEOUT_MS);
146 mutex_unlock(&psr->lock); 106 mutex_unlock(&psr->lock);
147 107
148 return 0; 108 return 0;
149} 109}
150EXPORT_SYMBOL(rockchip_drm_psr_activate); 110EXPORT_SYMBOL(rockchip_drm_psr_inhibit_put);
151 111
152/** 112/**
153 * rockchip_drm_psr_deactivate - deactivate PSR on the given pipe 113 * rockchip_drm_psr_inhibit_get - acquire PSR inhibit on given encoder
154 * @encoder: encoder to obtain the PSR encoder 114 * @encoder: encoder to obtain the PSR encoder
155 * 115 *
116 * Increments PSR inhibit count on given encoder. This function guarantees
117 * that after it returns PSR is turned off on given encoder and no PSR-related
118 * hardware state change occurs at least until a matching call to
119 * rockchip_drm_psr_inhibit_put() is done.
120 *
156 * Returns: 121 * Returns:
157 * Zero on success, negative errno on failure. 122 * Zero on success, negative errno on failure.
158 */ 123 */
159int rockchip_drm_psr_deactivate(struct drm_encoder *encoder) 124int rockchip_drm_psr_inhibit_get(struct drm_encoder *encoder)
160{ 125{
161 struct psr_drv *psr = find_psr_by_encoder(encoder); 126 struct psr_drv *psr = find_psr_by_encoder(encoder);
162 127
@@ -164,37 +129,25 @@ int rockchip_drm_psr_deactivate(struct drm_encoder *encoder)
164 return PTR_ERR(psr); 129 return PTR_ERR(psr);
165 130
166 mutex_lock(&psr->lock); 131 mutex_lock(&psr->lock);
167 psr->active = false; 132 psr_set_state_locked(psr, false);
133 ++psr->inhibit_count;
168 mutex_unlock(&psr->lock); 134 mutex_unlock(&psr->lock);
169 cancel_delayed_work_sync(&psr->flush_work); 135 cancel_delayed_work_sync(&psr->flush_work);
170 136
171 return 0; 137 return 0;
172} 138}
173EXPORT_SYMBOL(rockchip_drm_psr_deactivate); 139EXPORT_SYMBOL(rockchip_drm_psr_inhibit_get);
174 140
175static void rockchip_drm_do_flush(struct psr_drv *psr) 141static void rockchip_drm_do_flush(struct psr_drv *psr)
176{ 142{
177 psr_set_state(psr, PSR_FLUSH); 143 cancel_delayed_work_sync(&psr->flush_work);
178 mod_delayed_work(system_wq, &psr->flush_work, PSR_FLUSH_TIMEOUT_MS);
179}
180
181/**
182 * rockchip_drm_psr_flush - flush a single pipe
183 * @crtc: CRTC of the pipe to flush
184 *
185 * Returns:
186 * 0 on success, -errno on fail
187 */
188int rockchip_drm_psr_flush(struct drm_crtc *crtc)
189{
190 struct psr_drv *psr = find_psr_by_crtc(crtc);
191 if (IS_ERR(psr))
192 return PTR_ERR(psr);
193 144
194 rockchip_drm_do_flush(psr); 145 mutex_lock(&psr->lock);
195 return 0; 146 if (!psr_set_state_locked(psr, false))
147 mod_delayed_work(system_wq, &psr->flush_work,
148 PSR_FLUSH_TIMEOUT_MS);
149 mutex_unlock(&psr->lock);
196} 150}
197EXPORT_SYMBOL(rockchip_drm_psr_flush);
198 151
199/** 152/**
200 * rockchip_drm_psr_flush_all - force to flush all registered PSR encoders 153 * rockchip_drm_psr_flush_all - force to flush all registered PSR encoders
@@ -225,11 +178,16 @@ EXPORT_SYMBOL(rockchip_drm_psr_flush_all);
225 * @encoder: encoder that obtain the PSR function 178 * @encoder: encoder that obtain the PSR function
226 * @psr_set: call back to set PSR state 179 * @psr_set: call back to set PSR state
227 * 180 *
181 * The function returns with PSR inhibit counter initialized with one
182 * and the caller (typically encoder driver) needs to call
183 * rockchip_drm_psr_inhibit_put() when it becomes ready to accept PSR
184 * enable request.
185 *
228 * Returns: 186 * Returns:
229 * Zero on success, negative errno on failure. 187 * Zero on success, negative errno on failure.
230 */ 188 */
231int rockchip_drm_psr_register(struct drm_encoder *encoder, 189int rockchip_drm_psr_register(struct drm_encoder *encoder,
232 void (*psr_set)(struct drm_encoder *, bool enable)) 190 int (*psr_set)(struct drm_encoder *, bool enable))
233{ 191{
234 struct rockchip_drm_private *drm_drv = encoder->dev->dev_private; 192 struct rockchip_drm_private *drm_drv = encoder->dev->dev_private;
235 struct psr_drv *psr; 193 struct psr_drv *psr;
@@ -244,8 +202,8 @@ int rockchip_drm_psr_register(struct drm_encoder *encoder,
244 INIT_DELAYED_WORK(&psr->flush_work, psr_flush_handler); 202 INIT_DELAYED_WORK(&psr->flush_work, psr_flush_handler);
245 mutex_init(&psr->lock); 203 mutex_init(&psr->lock);
246 204
247 psr->active = true; 205 psr->inhibit_count = 1;
248 psr->state = PSR_DISABLE; 206 psr->enabled = false;
249 psr->encoder = encoder; 207 psr->encoder = encoder;
250 psr->set = psr_set; 208 psr->set = psr_set;
251 209
@@ -262,6 +220,11 @@ EXPORT_SYMBOL(rockchip_drm_psr_register);
262 * @encoder: encoder that obtain the PSR function 220 * @encoder: encoder that obtain the PSR function
263 * @psr_set: call back to set PSR state 221 * @psr_set: call back to set PSR state
264 * 222 *
223 * It is expected that the PSR inhibit counter is 1 when this function is
224 * called, which corresponds to a state when related encoder has been
225 * disconnected from any CRTCs and its driver called
226 * rockchip_drm_psr_inhibit_get() to stop the PSR logic.
227 *
265 * Returns: 228 * Returns:
266 * Zero on success, negative errno on failure. 229 * Zero on success, negative errno on failure.
267 */ 230 */
@@ -273,7 +236,12 @@ void rockchip_drm_psr_unregister(struct drm_encoder *encoder)
273 mutex_lock(&drm_drv->psr_list_lock); 236 mutex_lock(&drm_drv->psr_list_lock);
274 list_for_each_entry_safe(psr, n, &drm_drv->psr_list, list) { 237 list_for_each_entry_safe(psr, n, &drm_drv->psr_list, list) {
275 if (psr->encoder == encoder) { 238 if (psr->encoder == encoder) {
276 cancel_delayed_work_sync(&psr->flush_work); 239 /*
240 * Any other value would mean that the encoder
241 * is still in use.
242 */
243 WARN_ON(psr->inhibit_count != 1);
244
277 list_del(&psr->list); 245 list_del(&psr->list);
278 kfree(psr); 246 kfree(psr);
279 } 247 }
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_psr.h b/drivers/gpu/drm/rockchip/rockchip_drm_psr.h
index b1ea0155e57c..860c62494496 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_psr.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_psr.h
@@ -16,13 +16,12 @@
16#define __ROCKCHIP_DRM_PSR___ 16#define __ROCKCHIP_DRM_PSR___
17 17
18void rockchip_drm_psr_flush_all(struct drm_device *dev); 18void rockchip_drm_psr_flush_all(struct drm_device *dev);
19int rockchip_drm_psr_flush(struct drm_crtc *crtc);
20 19
21int rockchip_drm_psr_activate(struct drm_encoder *encoder); 20int rockchip_drm_psr_inhibit_put(struct drm_encoder *encoder);
22int rockchip_drm_psr_deactivate(struct drm_encoder *encoder); 21int rockchip_drm_psr_inhibit_get(struct drm_encoder *encoder);
23 22
24int rockchip_drm_psr_register(struct drm_encoder *encoder, 23int rockchip_drm_psr_register(struct drm_encoder *encoder,
25 void (*psr_set)(struct drm_encoder *, bool enable)); 24 int (*psr_set)(struct drm_encoder *, bool enable));
26void rockchip_drm_psr_unregister(struct drm_encoder *encoder); 25void rockchip_drm_psr_unregister(struct drm_encoder *encoder);
27 26
28#endif /* __ROCKCHIP_DRM_PSR__ */ 27#endif /* __ROCKCHIP_DRM_PSR__ */
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index 53d4afe15278..fe3faa7c38d9 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -925,6 +925,12 @@ static void vop_crtc_atomic_enable(struct drm_crtc *crtc,
925 if (s->output_mode == ROCKCHIP_OUT_MODE_AAAA && 925 if (s->output_mode == ROCKCHIP_OUT_MODE_AAAA &&
926 !(vop_data->feature & VOP_FEATURE_OUTPUT_RGB10)) 926 !(vop_data->feature & VOP_FEATURE_OUTPUT_RGB10))
927 s->output_mode = ROCKCHIP_OUT_MODE_P888; 927 s->output_mode = ROCKCHIP_OUT_MODE_P888;
928
929 if (s->output_mode == ROCKCHIP_OUT_MODE_AAAA && s->output_bpc == 8)
930 VOP_REG_SET(vop, common, pre_dither_down, 1);
931 else
932 VOP_REG_SET(vop, common, pre_dither_down, 0);
933
928 VOP_REG_SET(vop, common, out_mode, s->output_mode); 934 VOP_REG_SET(vop, common, out_mode, s->output_mode);
929 935
930 VOP_REG_SET(vop, modeset, htotal_pw, (htotal << 16) | hsync_len); 936 VOP_REG_SET(vop, modeset, htotal_pw, (htotal << 16) | hsync_len);
@@ -1017,22 +1023,15 @@ static void vop_crtc_atomic_flush(struct drm_crtc *crtc,
1017 continue; 1023 continue;
1018 1024
1019 drm_framebuffer_get(old_plane_state->fb); 1025 drm_framebuffer_get(old_plane_state->fb);
1026 WARN_ON(drm_crtc_vblank_get(crtc) != 0);
1020 drm_flip_work_queue(&vop->fb_unref_work, old_plane_state->fb); 1027 drm_flip_work_queue(&vop->fb_unref_work, old_plane_state->fb);
1021 set_bit(VOP_PENDING_FB_UNREF, &vop->pending); 1028 set_bit(VOP_PENDING_FB_UNREF, &vop->pending);
1022 WARN_ON(drm_crtc_vblank_get(crtc) != 0);
1023 } 1029 }
1024} 1030}
1025 1031
1026static void vop_crtc_atomic_begin(struct drm_crtc *crtc,
1027 struct drm_crtc_state *old_crtc_state)
1028{
1029 rockchip_drm_psr_flush(crtc);
1030}
1031
1032static const struct drm_crtc_helper_funcs vop_crtc_helper_funcs = { 1032static const struct drm_crtc_helper_funcs vop_crtc_helper_funcs = {
1033 .mode_fixup = vop_crtc_mode_fixup, 1033 .mode_fixup = vop_crtc_mode_fixup,
1034 .atomic_flush = vop_crtc_atomic_flush, 1034 .atomic_flush = vop_crtc_atomic_flush,
1035 .atomic_begin = vop_crtc_atomic_begin,
1036 .atomic_enable = vop_crtc_atomic_enable, 1035 .atomic_enable = vop_crtc_atomic_enable,
1037 .atomic_disable = vop_crtc_atomic_disable, 1036 .atomic_disable = vop_crtc_atomic_disable,
1038}; 1037};
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
index 56bbd2e2a8ef..084acdd0019a 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
@@ -67,6 +67,7 @@ struct vop_common {
67 struct vop_reg cfg_done; 67 struct vop_reg cfg_done;
68 struct vop_reg dsp_blank; 68 struct vop_reg dsp_blank;
69 struct vop_reg data_blank; 69 struct vop_reg data_blank;
70 struct vop_reg pre_dither_down;
70 struct vop_reg dither_down; 71 struct vop_reg dither_down;
71 struct vop_reg dither_up; 72 struct vop_reg dither_up;
72 struct vop_reg gate_en; 73 struct vop_reg gate_en;
diff --git a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
index 2e4eea3459fe..08023d3ecb76 100644
--- a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
+++ b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
@@ -264,6 +264,7 @@ static const struct vop_common rk3288_common = {
264 .standby = VOP_REG_SYNC(RK3288_SYS_CTRL, 0x1, 22), 264 .standby = VOP_REG_SYNC(RK3288_SYS_CTRL, 0x1, 22),
265 .gate_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 23), 265 .gate_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 23),
266 .mmu_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 20), 266 .mmu_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 20),
267 .pre_dither_down = VOP_REG(RK3288_DSP_CTRL1, 0x1, 1),
267 .dither_down = VOP_REG(RK3288_DSP_CTRL1, 0xf, 1), 268 .dither_down = VOP_REG(RK3288_DSP_CTRL1, 0xf, 1),
268 .dither_up = VOP_REG(RK3288_DSP_CTRL1, 0x1, 6), 269 .dither_up = VOP_REG(RK3288_DSP_CTRL1, 0x1, 6),
269 .data_blank = VOP_REG(RK3288_DSP_CTRL0, 0x1, 19), 270 .data_blank = VOP_REG(RK3288_DSP_CTRL0, 0x1, 19),
diff --git a/drivers/gpu/drm/sti/Kconfig b/drivers/gpu/drm/sti/Kconfig
index cca4b3c9aeb5..1963cc1b1cc5 100644
--- a/drivers/gpu/drm/sti/Kconfig
+++ b/drivers/gpu/drm/sti/Kconfig
@@ -1,6 +1,6 @@
1config DRM_STI 1config DRM_STI
2 tristate "DRM Support for STMicroelectronics SoC stiH4xx Series" 2 tristate "DRM Support for STMicroelectronics SoC stiH4xx Series"
3 depends on DRM && (ARCH_STI || ARCH_MULTIPLATFORM) 3 depends on OF && DRM && (ARCH_STI || ARCH_MULTIPLATFORM)
4 select RESET_CONTROLLER 4 select RESET_CONTROLLER
5 select DRM_KMS_HELPER 5 select DRM_KMS_HELPER
6 select DRM_GEM_CMA_HELPER 6 select DRM_GEM_CMA_HELPER
@@ -8,6 +8,5 @@ config DRM_STI
8 select DRM_PANEL 8 select DRM_PANEL
9 select FW_LOADER 9 select FW_LOADER
10 select SND_SOC_HDMI_CODEC if SND_SOC 10 select SND_SOC_HDMI_CODEC if SND_SOC
11 select OF
12 help 11 help
13 Choose this option to enable DRM on STM stiH4xx chipset 12 Choose this option to enable DRM on STM stiH4xx chipset
diff --git a/drivers/gpu/drm/sti/sti_drv.c b/drivers/gpu/drm/sti/sti_drv.c
index 55b6967d27e1..90c46b49c931 100644
--- a/drivers/gpu/drm/sti/sti_drv.c
+++ b/drivers/gpu/drm/sti/sti_drv.c
@@ -119,30 +119,10 @@ err:
119 return ret; 119 return ret;
120} 120}
121 121
122static int sti_atomic_check(struct drm_device *dev,
123 struct drm_atomic_state *state)
124{
125 int ret;
126
127 ret = drm_atomic_helper_check_modeset(dev, state);
128 if (ret)
129 return ret;
130
131 ret = drm_atomic_normalize_zpos(dev, state);
132 if (ret)
133 return ret;
134
135 ret = drm_atomic_helper_check_planes(dev, state);
136 if (ret)
137 return ret;
138
139 return ret;
140}
141
142static const struct drm_mode_config_funcs sti_mode_config_funcs = { 122static const struct drm_mode_config_funcs sti_mode_config_funcs = {
143 .fb_create = drm_gem_fb_create, 123 .fb_create = drm_gem_fb_create,
144 .output_poll_changed = drm_fb_helper_output_poll_changed, 124 .output_poll_changed = drm_fb_helper_output_poll_changed,
145 .atomic_check = sti_atomic_check, 125 .atomic_check = drm_atomic_helper_check,
146 .atomic_commit = drm_atomic_helper_commit, 126 .atomic_commit = drm_atomic_helper_commit,
147}; 127};
148 128
@@ -160,6 +140,8 @@ static void sti_mode_config_init(struct drm_device *dev)
160 dev->mode_config.max_height = STI_MAX_FB_HEIGHT; 140 dev->mode_config.max_height = STI_MAX_FB_HEIGHT;
161 141
162 dev->mode_config.funcs = &sti_mode_config_funcs; 142 dev->mode_config.funcs = &sti_mode_config_funcs;
143
144 dev->mode_config.normalize_zpos = true;
163} 145}
164 146
165DEFINE_DRM_GEM_CMA_FOPS(sti_driver_fops); 147DEFINE_DRM_GEM_CMA_FOPS(sti_driver_fops);
diff --git a/drivers/gpu/drm/sti/sti_plane.c b/drivers/gpu/drm/sti/sti_plane.c
index b074609c960a..b48cd86e0250 100644
--- a/drivers/gpu/drm/sti/sti_plane.c
+++ b/drivers/gpu/drm/sti/sti_plane.c
@@ -40,6 +40,7 @@ void sti_plane_update_fps(struct sti_plane *plane,
40 bool new_frame, 40 bool new_frame,
41 bool new_field) 41 bool new_field)
42{ 42{
43 struct drm_plane_state *state = plane->drm_plane.state;
43 ktime_t now; 44 ktime_t now;
44 struct sti_fps_info *fps; 45 struct sti_fps_info *fps;
45 int fpks, fipks, ms_since_last, num_frames, num_fields; 46 int fpks, fipks, ms_since_last, num_frames, num_fields;
@@ -66,14 +67,14 @@ void sti_plane_update_fps(struct sti_plane *plane,
66 fps->last_timestamp = now; 67 fps->last_timestamp = now;
67 fps->last_frame_counter = fps->curr_frame_counter; 68 fps->last_frame_counter = fps->curr_frame_counter;
68 69
69 if (plane->drm_plane.fb) { 70 if (state->fb) {
70 fpks = (num_frames * 1000000) / ms_since_last; 71 fpks = (num_frames * 1000000) / ms_since_last;
71 snprintf(plane->fps_info.fps_str, FPS_LENGTH, 72 snprintf(plane->fps_info.fps_str, FPS_LENGTH,
72 "%-8s %4dx%-4d %.4s @ %3d.%-3.3d fps (%s)", 73 "%-8s %4dx%-4d %.4s @ %3d.%-3.3d fps (%s)",
73 plane->drm_plane.name, 74 plane->drm_plane.name,
74 plane->drm_plane.fb->width, 75 state->fb->width,
75 plane->drm_plane.fb->height, 76 state->fb->height,
76 (char *)&plane->drm_plane.fb->format->format, 77 (char *)&state->fb->format->format,
77 fpks / 1000, fpks % 1000, 78 fpks / 1000, fpks % 1000,
78 sti_plane_to_str(plane)); 79 sti_plane_to_str(plane));
79 } 80 }
diff --git a/drivers/gpu/drm/stm/drv.c b/drivers/gpu/drm/stm/drv.c
index 9ab00a87f7cc..8698e08313e1 100644
--- a/drivers/gpu/drm/stm/drv.c
+++ b/drivers/gpu/drm/stm/drv.c
@@ -72,8 +72,6 @@ static struct drm_driver drv_driver = {
72 .gem_prime_vmap = drm_gem_cma_prime_vmap, 72 .gem_prime_vmap = drm_gem_cma_prime_vmap,
73 .gem_prime_vunmap = drm_gem_cma_prime_vunmap, 73 .gem_prime_vunmap = drm_gem_cma_prime_vunmap,
74 .gem_prime_mmap = drm_gem_cma_prime_mmap, 74 .gem_prime_mmap = drm_gem_cma_prime_mmap,
75 .enable_vblank = ltdc_crtc_enable_vblank,
76 .disable_vblank = ltdc_crtc_disable_vblank,
77}; 75};
78 76
79static int drv_load(struct drm_device *ddev) 77static int drv_load(struct drm_device *ddev)
diff --git a/drivers/gpu/drm/stm/ltdc.c b/drivers/gpu/drm/stm/ltdc.c
index 1a3277e483d5..e3121d9e4230 100644
--- a/drivers/gpu/drm/stm/ltdc.c
+++ b/drivers/gpu/drm/stm/ltdc.c
@@ -392,9 +392,6 @@ static void ltdc_crtc_update_clut(struct drm_crtc *crtc)
392 u32 val; 392 u32 val;
393 int i; 393 int i;
394 394
395 if (!crtc || !crtc->state)
396 return;
397
398 if (!crtc->state->color_mgmt_changed || !crtc->state->gamma_lut) 395 if (!crtc->state->color_mgmt_changed || !crtc->state->gamma_lut)
399 return; 396 return;
400 397
@@ -569,9 +566,9 @@ static const struct drm_crtc_helper_funcs ltdc_crtc_helper_funcs = {
569 .atomic_disable = ltdc_crtc_atomic_disable, 566 .atomic_disable = ltdc_crtc_atomic_disable,
570}; 567};
571 568
572int ltdc_crtc_enable_vblank(struct drm_device *ddev, unsigned int pipe) 569static int ltdc_crtc_enable_vblank(struct drm_crtc *crtc)
573{ 570{
574 struct ltdc_device *ldev = ddev->dev_private; 571 struct ltdc_device *ldev = crtc_to_ltdc(crtc);
575 572
576 DRM_DEBUG_DRIVER("\n"); 573 DRM_DEBUG_DRIVER("\n");
577 reg_set(ldev->regs, LTDC_IER, IER_LIE); 574 reg_set(ldev->regs, LTDC_IER, IER_LIE);
@@ -579,9 +576,9 @@ int ltdc_crtc_enable_vblank(struct drm_device *ddev, unsigned int pipe)
579 return 0; 576 return 0;
580} 577}
581 578
582void ltdc_crtc_disable_vblank(struct drm_device *ddev, unsigned int pipe) 579static void ltdc_crtc_disable_vblank(struct drm_crtc *crtc)
583{ 580{
584 struct ltdc_device *ldev = ddev->dev_private; 581 struct ltdc_device *ldev = crtc_to_ltdc(crtc);
585 582
586 DRM_DEBUG_DRIVER("\n"); 583 DRM_DEBUG_DRIVER("\n");
587 reg_clear(ldev->regs, LTDC_IER, IER_LIE); 584 reg_clear(ldev->regs, LTDC_IER, IER_LIE);
@@ -594,6 +591,8 @@ static const struct drm_crtc_funcs ltdc_crtc_funcs = {
594 .reset = drm_atomic_helper_crtc_reset, 591 .reset = drm_atomic_helper_crtc_reset,
595 .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, 592 .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
596 .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, 593 .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
594 .enable_vblank = ltdc_crtc_enable_vblank,
595 .disable_vblank = ltdc_crtc_disable_vblank,
597 .gamma_set = drm_atomic_helper_legacy_gamma_set, 596 .gamma_set = drm_atomic_helper_legacy_gamma_set,
598}; 597};
599 598
@@ -727,6 +726,8 @@ static void ltdc_plane_atomic_update(struct drm_plane *plane,
727 reg_update_bits(ldev->regs, LTDC_L1CR + lofs, 726 reg_update_bits(ldev->regs, LTDC_L1CR + lofs,
728 LXCR_LEN | LXCR_CLUTEN, val); 727 LXCR_LEN | LXCR_CLUTEN, val);
729 728
729 ldev->plane_fpsi[plane->index].counter++;
730
730 mutex_lock(&ldev->err_lock); 731 mutex_lock(&ldev->err_lock);
731 if (ldev->error_status & ISR_FUIF) { 732 if (ldev->error_status & ISR_FUIF) {
732 DRM_DEBUG_DRIVER("Fifo underrun\n"); 733 DRM_DEBUG_DRIVER("Fifo underrun\n");
@@ -752,6 +753,25 @@ static void ltdc_plane_atomic_disable(struct drm_plane *plane,
752 oldstate->crtc->base.id, plane->base.id); 753 oldstate->crtc->base.id, plane->base.id);
753} 754}
754 755
756static void ltdc_plane_atomic_print_state(struct drm_printer *p,
757 const struct drm_plane_state *state)
758{
759 struct drm_plane *plane = state->plane;
760 struct ltdc_device *ldev = plane_to_ltdc(plane);
761 struct fps_info *fpsi = &ldev->plane_fpsi[plane->index];
762 int ms_since_last;
763 ktime_t now;
764
765 now = ktime_get();
766 ms_since_last = ktime_to_ms(ktime_sub(now, fpsi->last_timestamp));
767
768 drm_printf(p, "\tuser_updates=%dfps\n",
769 DIV_ROUND_CLOSEST(fpsi->counter * 1000, ms_since_last));
770
771 fpsi->last_timestamp = now;
772 fpsi->counter = 0;
773}
774
755static const struct drm_plane_funcs ltdc_plane_funcs = { 775static const struct drm_plane_funcs ltdc_plane_funcs = {
756 .update_plane = drm_atomic_helper_update_plane, 776 .update_plane = drm_atomic_helper_update_plane,
757 .disable_plane = drm_atomic_helper_disable_plane, 777 .disable_plane = drm_atomic_helper_disable_plane,
@@ -759,6 +779,7 @@ static const struct drm_plane_funcs ltdc_plane_funcs = {
759 .reset = drm_atomic_helper_plane_reset, 779 .reset = drm_atomic_helper_plane_reset,
760 .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state, 780 .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
761 .atomic_destroy_state = drm_atomic_helper_plane_destroy_state, 781 .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
782 .atomic_print_state = ltdc_plane_atomic_print_state,
762}; 783};
763 784
764static const struct drm_plane_helper_funcs ltdc_plane_helper_funcs = { 785static const struct drm_plane_helper_funcs ltdc_plane_helper_funcs = {
diff --git a/drivers/gpu/drm/stm/ltdc.h b/drivers/gpu/drm/stm/ltdc.h
index edb268129c54..1e16d6afb0d2 100644
--- a/drivers/gpu/drm/stm/ltdc.h
+++ b/drivers/gpu/drm/stm/ltdc.h
@@ -20,6 +20,13 @@ struct ltdc_caps {
20 bool non_alpha_only_l1; /* non-native no-alpha formats on layer 1 */ 20 bool non_alpha_only_l1; /* non-native no-alpha formats on layer 1 */
21}; 21};
22 22
23#define LTDC_MAX_LAYER 4
24
25struct fps_info {
26 unsigned int counter;
27 ktime_t last_timestamp;
28};
29
23struct ltdc_device { 30struct ltdc_device {
24 void __iomem *regs; 31 void __iomem *regs;
25 struct clk *pixel_clk; /* lcd pixel clock */ 32 struct clk *pixel_clk; /* lcd pixel clock */
@@ -27,10 +34,9 @@ struct ltdc_device {
27 struct ltdc_caps caps; 34 struct ltdc_caps caps;
28 u32 error_status; 35 u32 error_status;
29 u32 irq_status; 36 u32 irq_status;
37 struct fps_info plane_fpsi[LTDC_MAX_LAYER];
30}; 38};
31 39
32int ltdc_crtc_enable_vblank(struct drm_device *dev, unsigned int pipe);
33void ltdc_crtc_disable_vblank(struct drm_device *dev, unsigned int pipe);
34int ltdc_load(struct drm_device *ddev); 40int ltdc_load(struct drm_device *ddev);
35void ltdc_unload(struct drm_device *ddev); 41void ltdc_unload(struct drm_device *ddev);
36 42
diff --git a/drivers/gpu/drm/sun4i/Kconfig b/drivers/gpu/drm/sun4i/Kconfig
index eee6bc0eaf97..156a865c3e6d 100644
--- a/drivers/gpu/drm/sun4i/Kconfig
+++ b/drivers/gpu/drm/sun4i/Kconfig
@@ -40,6 +40,16 @@ config DRM_SUN4I_BACKEND
40 do some alpha blending and feed graphics to TCON. If M is 40 do some alpha blending and feed graphics to TCON. If M is
41 selected the module will be called sun4i-backend. 41 selected the module will be called sun4i-backend.
42 42
43config DRM_SUN6I_DSI
44 tristate "Allwinner A31 MIPI-DSI Controller Support"
45 default MACH_SUN8I
46 select CRC_CCITT
47 select DRM_MIPI_DSI
48 help
49 Choose this option if you want have an Allwinner SoC with
50 MIPI-DSI support. If M is selected the module will be called
51 sun6i-dsi
52
43config DRM_SUN8I_DW_HDMI 53config DRM_SUN8I_DW_HDMI
44 tristate "Support for Allwinner version of DesignWare HDMI" 54 tristate "Support for Allwinner version of DesignWare HDMI"
45 depends on DRM_SUN4I 55 depends on DRM_SUN4I
diff --git a/drivers/gpu/drm/sun4i/Makefile b/drivers/gpu/drm/sun4i/Makefile
index 330843ce4280..2589f4acd5ae 100644
--- a/drivers/gpu/drm/sun4i/Makefile
+++ b/drivers/gpu/drm/sun4i/Makefile
@@ -24,6 +24,9 @@ sun4i-tcon-y += sun4i_lvds.o
24sun4i-tcon-y += sun4i_tcon.o 24sun4i-tcon-y += sun4i_tcon.o
25sun4i-tcon-y += sun4i_rgb.o 25sun4i-tcon-y += sun4i_rgb.o
26 26
27sun6i-dsi-y += sun6i_mipi_dphy.o
28sun6i-dsi-y += sun6i_mipi_dsi.o
29
27obj-$(CONFIG_DRM_SUN4I) += sun4i-drm.o 30obj-$(CONFIG_DRM_SUN4I) += sun4i-drm.o
28obj-$(CONFIG_DRM_SUN4I) += sun4i-tcon.o 31obj-$(CONFIG_DRM_SUN4I) += sun4i-tcon.o
29obj-$(CONFIG_DRM_SUN4I) += sun4i_tv.o 32obj-$(CONFIG_DRM_SUN4I) += sun4i_tv.o
@@ -31,5 +34,6 @@ obj-$(CONFIG_DRM_SUN4I) += sun6i_drc.o
31 34
32obj-$(CONFIG_DRM_SUN4I_BACKEND) += sun4i-backend.o sun4i-frontend.o 35obj-$(CONFIG_DRM_SUN4I_BACKEND) += sun4i-backend.o sun4i-frontend.o
33obj-$(CONFIG_DRM_SUN4I_HDMI) += sun4i-drm-hdmi.o 36obj-$(CONFIG_DRM_SUN4I_HDMI) += sun4i-drm-hdmi.o
37obj-$(CONFIG_DRM_SUN6I_DSI) += sun6i-dsi.o
34obj-$(CONFIG_DRM_SUN8I_DW_HDMI) += sun8i-drm-hdmi.o 38obj-$(CONFIG_DRM_SUN8I_DW_HDMI) += sun8i-drm-hdmi.o
35obj-$(CONFIG_DRM_SUN8I_MIXER) += sun8i-mixer.o 39obj-$(CONFIG_DRM_SUN8I_MIXER) += sun8i-mixer.o
diff --git a/drivers/gpu/drm/sun4i/sun4i_backend.c b/drivers/gpu/drm/sun4i/sun4i_backend.c
index 9bad54f3de38..de0a76dfa1a2 100644
--- a/drivers/gpu/drm/sun4i/sun4i_backend.c
+++ b/drivers/gpu/drm/sun4i/sun4i_backend.c
@@ -295,6 +295,15 @@ int sun4i_backend_update_layer_formats(struct sun4i_backend *backend,
295 DRM_DEBUG_DRIVER("Switching display backend interlaced mode %s\n", 295 DRM_DEBUG_DRIVER("Switching display backend interlaced mode %s\n",
296 interlaced ? "on" : "off"); 296 interlaced ? "on" : "off");
297 297
298 val = SUN4I_BACKEND_ATTCTL_REG0_LAY_GLBALPHA(state->alpha >> 8);
299 if (state->alpha != DRM_BLEND_ALPHA_OPAQUE)
300 val |= SUN4I_BACKEND_ATTCTL_REG0_LAY_GLBALPHA_EN;
301 regmap_update_bits(backend->engine.regs,
302 SUN4I_BACKEND_ATTCTL_REG0(layer),
303 SUN4I_BACKEND_ATTCTL_REG0_LAY_GLBALPHA_MASK |
304 SUN4I_BACKEND_ATTCTL_REG0_LAY_GLBALPHA_EN,
305 val);
306
298 if (sun4i_backend_format_is_yuv(fb->format->format)) 307 if (sun4i_backend_format_is_yuv(fb->format->format))
299 return sun4i_backend_update_yuv_format(backend, layer, plane); 308 return sun4i_backend_update_yuv_format(backend, layer, plane);
300 309
@@ -490,7 +499,7 @@ static int sun4i_backend_atomic_check(struct sunxi_engine *engine,
490 DRM_DEBUG_DRIVER("Plane FB format is %s\n", 499 DRM_DEBUG_DRIVER("Plane FB format is %s\n",
491 drm_get_format_name(fb->format->format, 500 drm_get_format_name(fb->format->format,
492 &format_name)); 501 &format_name));
493 if (fb->format->has_alpha) 502 if (fb->format->has_alpha || (plane_state->alpha != DRM_BLEND_ALPHA_OPAQUE))
494 num_alpha_planes++; 503 num_alpha_planes++;
495 504
496 if (sun4i_backend_format_is_yuv(fb->format->format)) { 505 if (sun4i_backend_format_is_yuv(fb->format->format)) {
@@ -548,7 +557,8 @@ static int sun4i_backend_atomic_check(struct sunxi_engine *engine,
548 } 557 }
549 558
550 /* We can't have an alpha plane at the lowest position */ 559 /* We can't have an alpha plane at the lowest position */
551 if (plane_states[0]->fb->format->has_alpha) 560 if (plane_states[0]->fb->format->has_alpha ||
561 (plane_states[0]->alpha != DRM_BLEND_ALPHA_OPAQUE))
552 return -EINVAL; 562 return -EINVAL;
553 563
554 for (i = 1; i < num_planes; i++) { 564 for (i = 1; i < num_planes; i++) {
@@ -560,7 +570,7 @@ static int sun4i_backend_atomic_check(struct sunxi_engine *engine,
560 * The only alpha position is the lowest plane of the 570 * The only alpha position is the lowest plane of the
561 * second pipe. 571 * second pipe.
562 */ 572 */
563 if (fb->format->has_alpha) 573 if (fb->format->has_alpha || (p_state->alpha != DRM_BLEND_ALPHA_OPAQUE))
564 current_pipe++; 574 current_pipe++;
565 575
566 s_state->pipe = current_pipe; 576 s_state->pipe = current_pipe;
diff --git a/drivers/gpu/drm/sun4i/sun4i_backend.h b/drivers/gpu/drm/sun4i/sun4i_backend.h
index 316f2179e9e1..4caee0392fa4 100644
--- a/drivers/gpu/drm/sun4i/sun4i_backend.h
+++ b/drivers/gpu/drm/sun4i/sun4i_backend.h
@@ -68,12 +68,15 @@
68#define SUN4I_BACKEND_CKMIN_REG 0x884 68#define SUN4I_BACKEND_CKMIN_REG 0x884
69#define SUN4I_BACKEND_CKCFG_REG 0x888 69#define SUN4I_BACKEND_CKCFG_REG 0x888
70#define SUN4I_BACKEND_ATTCTL_REG0(l) (0x890 + (0x4 * (l))) 70#define SUN4I_BACKEND_ATTCTL_REG0(l) (0x890 + (0x4 * (l)))
71#define SUN4I_BACKEND_ATTCTL_REG0_LAY_GLBALPHA_MASK GENMASK(31, 24)
72#define SUN4I_BACKEND_ATTCTL_REG0_LAY_GLBALPHA(x) ((x) << 24)
71#define SUN4I_BACKEND_ATTCTL_REG0_LAY_PIPESEL_MASK BIT(15) 73#define SUN4I_BACKEND_ATTCTL_REG0_LAY_PIPESEL_MASK BIT(15)
72#define SUN4I_BACKEND_ATTCTL_REG0_LAY_PIPESEL(x) ((x) << 15) 74#define SUN4I_BACKEND_ATTCTL_REG0_LAY_PIPESEL(x) ((x) << 15)
73#define SUN4I_BACKEND_ATTCTL_REG0_LAY_PRISEL_MASK GENMASK(11, 10) 75#define SUN4I_BACKEND_ATTCTL_REG0_LAY_PRISEL_MASK GENMASK(11, 10)
74#define SUN4I_BACKEND_ATTCTL_REG0_LAY_PRISEL(x) ((x) << 10) 76#define SUN4I_BACKEND_ATTCTL_REG0_LAY_PRISEL(x) ((x) << 10)
75#define SUN4I_BACKEND_ATTCTL_REG0_LAY_YUVEN BIT(2) 77#define SUN4I_BACKEND_ATTCTL_REG0_LAY_YUVEN BIT(2)
76#define SUN4I_BACKEND_ATTCTL_REG0_LAY_VDOEN BIT(1) 78#define SUN4I_BACKEND_ATTCTL_REG0_LAY_VDOEN BIT(1)
79#define SUN4I_BACKEND_ATTCTL_REG0_LAY_GLBALPHA_EN BIT(0)
77 80
78#define SUN4I_BACKEND_ATTCTL_REG1(l) (0x8a0 + (0x4 * (l))) 81#define SUN4I_BACKEND_ATTCTL_REG1(l) (0x8a0 + (0x4 * (l)))
79#define SUN4I_BACKEND_ATTCTL_REG1_LAY_HSCAFCT GENMASK(15, 14) 82#define SUN4I_BACKEND_ATTCTL_REG1_LAY_HSCAFCT GENMASK(15, 14)
diff --git a/drivers/gpu/drm/sun4i/sun4i_layer.c b/drivers/gpu/drm/sun4i/sun4i_layer.c
index 2949a3c912c1..750ad24de1d7 100644
--- a/drivers/gpu/drm/sun4i/sun4i_layer.c
+++ b/drivers/gpu/drm/sun4i/sun4i_layer.c
@@ -37,6 +37,7 @@ static void sun4i_backend_layer_reset(struct drm_plane *plane)
37 if (state) { 37 if (state) {
38 plane->state = &state->state; 38 plane->state = &state->state;
39 plane->state->plane = plane; 39 plane->state->plane = plane;
40 plane->state->alpha = DRM_BLEND_ALPHA_OPAQUE;
40 plane->state->zpos = layer->id; 41 plane->state->zpos = layer->id;
41 } 42 }
42} 43}
@@ -167,6 +168,7 @@ static struct sun4i_layer *sun4i_layer_init_one(struct drm_device *drm,
167 &sun4i_backend_layer_helper_funcs); 168 &sun4i_backend_layer_helper_funcs);
168 layer->backend = backend; 169 layer->backend = backend;
169 170
171 drm_plane_create_alpha_property(&layer->plane);
170 drm_plane_create_zpos_property(&layer->plane, 0, 0, 172 drm_plane_create_zpos_property(&layer->plane, 0, 0,
171 SUN4I_BACKEND_NUM_LAYERS - 1); 173 SUN4I_BACKEND_NUM_LAYERS - 1);
172 174
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c
index c3d92d537240..08747fc3ee71 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c
@@ -35,6 +35,7 @@
35#include "sun4i_lvds.h" 35#include "sun4i_lvds.h"
36#include "sun4i_rgb.h" 36#include "sun4i_rgb.h"
37#include "sun4i_tcon.h" 37#include "sun4i_tcon.h"
38#include "sun6i_mipi_dsi.h"
38#include "sunxi_engine.h" 39#include "sunxi_engine.h"
39 40
40static struct drm_connector *sun4i_tcon_get_connector(const struct drm_encoder *encoder) 41static struct drm_connector *sun4i_tcon_get_connector(const struct drm_encoder *encoder)
@@ -169,6 +170,7 @@ void sun4i_tcon_set_status(struct sun4i_tcon *tcon,
169 case DRM_MODE_ENCODER_LVDS: 170 case DRM_MODE_ENCODER_LVDS:
170 is_lvds = true; 171 is_lvds = true;
171 /* Fallthrough */ 172 /* Fallthrough */
173 case DRM_MODE_ENCODER_DSI:
172 case DRM_MODE_ENCODER_NONE: 174 case DRM_MODE_ENCODER_NONE:
173 channel = 0; 175 channel = 0;
174 break; 176 break;
@@ -201,7 +203,8 @@ void sun4i_tcon_enable_vblank(struct sun4i_tcon *tcon, bool enable)
201 DRM_DEBUG_DRIVER("%sabling VBLANK interrupt\n", enable ? "En" : "Dis"); 203 DRM_DEBUG_DRIVER("%sabling VBLANK interrupt\n", enable ? "En" : "Dis");
202 204
203 mask = SUN4I_TCON_GINT0_VBLANK_ENABLE(0) | 205 mask = SUN4I_TCON_GINT0_VBLANK_ENABLE(0) |
204 SUN4I_TCON_GINT0_VBLANK_ENABLE(1); 206 SUN4I_TCON_GINT0_VBLANK_ENABLE(1) |
207 SUN4I_TCON_GINT0_TCON0_TRI_FINISH_ENABLE;
205 208
206 if (enable) 209 if (enable)
207 val = mask; 210 val = mask;
@@ -273,6 +276,71 @@ static void sun4i_tcon0_mode_set_common(struct sun4i_tcon *tcon,
273 SUN4I_TCON0_BASIC0_Y(mode->crtc_vdisplay)); 276 SUN4I_TCON0_BASIC0_Y(mode->crtc_vdisplay));
274} 277}
275 278
279static void sun4i_tcon0_mode_set_cpu(struct sun4i_tcon *tcon,
280 struct mipi_dsi_device *device,
281 const struct drm_display_mode *mode)
282{
283 u8 bpp = mipi_dsi_pixel_format_to_bpp(device->format);
284 u8 lanes = device->lanes;
285 u32 block_space, start_delay;
286 u32 tcon_div;
287
288 tcon->dclk_min_div = 4;
289 tcon->dclk_max_div = 127;
290
291 sun4i_tcon0_mode_set_common(tcon, mode);
292
293 regmap_update_bits(tcon->regs, SUN4I_TCON0_CTL_REG,
294 SUN4I_TCON0_CTL_IF_MASK,
295 SUN4I_TCON0_CTL_IF_8080);
296
297 regmap_write(tcon->regs, SUN4I_TCON_ECC_FIFO_REG,
298 SUN4I_TCON_ECC_FIFO_EN);
299
300 regmap_write(tcon->regs, SUN4I_TCON0_CPU_IF_REG,
301 SUN4I_TCON0_CPU_IF_MODE_DSI |
302 SUN4I_TCON0_CPU_IF_TRI_FIFO_FLUSH |
303 SUN4I_TCON0_CPU_IF_TRI_FIFO_EN |
304 SUN4I_TCON0_CPU_IF_TRI_EN);
305
306 /*
307 * This looks suspicious, but it works...
308 *
309 * The datasheet says that this should be set higher than 20 *
310 * pixel cycle, but it's not clear what a pixel cycle is.
311 */
312 regmap_read(tcon->regs, SUN4I_TCON0_DCLK_REG, &tcon_div);
313 tcon_div &= GENMASK(6, 0);
314 block_space = mode->htotal * bpp / (tcon_div * lanes);
315 block_space -= mode->hdisplay + 40;
316
317 regmap_write(tcon->regs, SUN4I_TCON0_CPU_TRI0_REG,
318 SUN4I_TCON0_CPU_TRI0_BLOCK_SPACE(block_space) |
319 SUN4I_TCON0_CPU_TRI0_BLOCK_SIZE(mode->hdisplay));
320
321 regmap_write(tcon->regs, SUN4I_TCON0_CPU_TRI1_REG,
322 SUN4I_TCON0_CPU_TRI1_BLOCK_NUM(mode->vdisplay));
323
324 start_delay = (mode->crtc_vtotal - mode->crtc_vdisplay - 10 - 1);
325 start_delay = start_delay * mode->crtc_htotal * 149;
326 start_delay = start_delay / (mode->crtc_clock / 1000) / 8;
327 regmap_write(tcon->regs, SUN4I_TCON0_CPU_TRI2_REG,
328 SUN4I_TCON0_CPU_TRI2_TRANS_START_SET(10) |
329 SUN4I_TCON0_CPU_TRI2_START_DELAY(start_delay));
330
331 /*
332 * The Allwinner BSP has a comment that the period should be
333 * the display clock * 15, but uses an hardcoded 3000...
334 */
335 regmap_write(tcon->regs, SUN4I_TCON_SAFE_PERIOD_REG,
336 SUN4I_TCON_SAFE_PERIOD_NUM(3000) |
337 SUN4I_TCON_SAFE_PERIOD_MODE(3));
338
339 /* Enable the output on the pins */
340 regmap_write(tcon->regs, SUN4I_TCON0_IO_TRI_REG,
341 0xe0000000);
342}
343
276static void sun4i_tcon0_mode_set_lvds(struct sun4i_tcon *tcon, 344static void sun4i_tcon0_mode_set_lvds(struct sun4i_tcon *tcon,
277 const struct drm_encoder *encoder, 345 const struct drm_encoder *encoder,
278 const struct drm_display_mode *mode) 346 const struct drm_display_mode *mode)
@@ -538,7 +606,17 @@ void sun4i_tcon_mode_set(struct sun4i_tcon *tcon,
538 const struct drm_encoder *encoder, 606 const struct drm_encoder *encoder,
539 const struct drm_display_mode *mode) 607 const struct drm_display_mode *mode)
540{ 608{
609 struct sun6i_dsi *dsi;
610
541 switch (encoder->encoder_type) { 611 switch (encoder->encoder_type) {
612 case DRM_MODE_ENCODER_DSI:
613 /*
614 * This is not really elegant, but it's the "cleaner"
615 * way I could think of...
616 */
617 dsi = encoder_to_sun6i_dsi(encoder);
618 sun4i_tcon0_mode_set_cpu(tcon, dsi->device, mode);
619 break;
542 case DRM_MODE_ENCODER_LVDS: 620 case DRM_MODE_ENCODER_LVDS:
543 sun4i_tcon0_mode_set_lvds(tcon, encoder, mode); 621 sun4i_tcon0_mode_set_lvds(tcon, encoder, mode);
544 break; 622 break;
@@ -582,7 +660,8 @@ static irqreturn_t sun4i_tcon_handler(int irq, void *private)
582 regmap_read(tcon->regs, SUN4I_TCON_GINT0_REG, &status); 660 regmap_read(tcon->regs, SUN4I_TCON_GINT0_REG, &status);
583 661
584 if (!(status & (SUN4I_TCON_GINT0_VBLANK_INT(0) | 662 if (!(status & (SUN4I_TCON_GINT0_VBLANK_INT(0) |
585 SUN4I_TCON_GINT0_VBLANK_INT(1)))) 663 SUN4I_TCON_GINT0_VBLANK_INT(1) |
664 SUN4I_TCON_GINT0_TCON0_TRI_FINISH_INT)))
586 return IRQ_NONE; 665 return IRQ_NONE;
587 666
588 drm_crtc_handle_vblank(&scrtc->crtc); 667 drm_crtc_handle_vblank(&scrtc->crtc);
@@ -591,7 +670,8 @@ static irqreturn_t sun4i_tcon_handler(int irq, void *private)
591 /* Acknowledge the interrupt */ 670 /* Acknowledge the interrupt */
592 regmap_update_bits(tcon->regs, SUN4I_TCON_GINT0_REG, 671 regmap_update_bits(tcon->regs, SUN4I_TCON_GINT0_REG,
593 SUN4I_TCON_GINT0_VBLANK_INT(0) | 672 SUN4I_TCON_GINT0_VBLANK_INT(0) |
594 SUN4I_TCON_GINT0_VBLANK_INT(1), 673 SUN4I_TCON_GINT0_VBLANK_INT(1) |
674 SUN4I_TCON_GINT0_TCON0_TRI_FINISH_INT,
595 0); 675 0);
596 676
597 if (engine->ops->vblank_quirk) 677 if (engine->ops->vblank_quirk)
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.h b/drivers/gpu/drm/sun4i/sun4i_tcon.h
index 161e09427124..f6a071cd5a6f 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.h
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.h
@@ -28,13 +28,32 @@
28 28
29#define SUN4I_TCON_GINT0_REG 0x4 29#define SUN4I_TCON_GINT0_REG 0x4
30#define SUN4I_TCON_GINT0_VBLANK_ENABLE(pipe) BIT(31 - (pipe)) 30#define SUN4I_TCON_GINT0_VBLANK_ENABLE(pipe) BIT(31 - (pipe))
31#define SUN4I_TCON_GINT0_TCON0_TRI_FINISH_ENABLE BIT(27)
32#define SUN4I_TCON_GINT0_TCON0_TRI_COUNTER_ENABLE BIT(26)
31#define SUN4I_TCON_GINT0_VBLANK_INT(pipe) BIT(15 - (pipe)) 33#define SUN4I_TCON_GINT0_VBLANK_INT(pipe) BIT(15 - (pipe))
34#define SUN4I_TCON_GINT0_TCON0_TRI_FINISH_INT BIT(11)
35#define SUN4I_TCON_GINT0_TCON0_TRI_COUNTER_INT BIT(10)
32 36
33#define SUN4I_TCON_GINT1_REG 0x8 37#define SUN4I_TCON_GINT1_REG 0x8
38
34#define SUN4I_TCON_FRM_CTL_REG 0x10 39#define SUN4I_TCON_FRM_CTL_REG 0x10
40#define SUN4I_TCON_FRM_CTL_EN BIT(31)
41
42#define SUN4I_TCON_FRM_SEED_PR_REG 0x14
43#define SUN4I_TCON_FRM_SEED_PG_REG 0x18
44#define SUN4I_TCON_FRM_SEED_PB_REG 0x1c
45#define SUN4I_TCON_FRM_SEED_LR_REG 0x20
46#define SUN4I_TCON_FRM_SEED_LG_REG 0x24
47#define SUN4I_TCON_FRM_SEED_LB_REG 0x28
48#define SUN4I_TCON_FRM_TBL0_REG 0x2c
49#define SUN4I_TCON_FRM_TBL1_REG 0x30
50#define SUN4I_TCON_FRM_TBL2_REG 0x34
51#define SUN4I_TCON_FRM_TBL3_REG 0x38
35 52
36#define SUN4I_TCON0_CTL_REG 0x40 53#define SUN4I_TCON0_CTL_REG 0x40
37#define SUN4I_TCON0_CTL_TCON_ENABLE BIT(31) 54#define SUN4I_TCON0_CTL_TCON_ENABLE BIT(31)
55#define SUN4I_TCON0_CTL_IF_MASK GENMASK(25, 24)
56#define SUN4I_TCON0_CTL_IF_8080 (1 << 24)
38#define SUN4I_TCON0_CTL_CLK_DELAY_MASK GENMASK(8, 4) 57#define SUN4I_TCON0_CTL_CLK_DELAY_MASK GENMASK(8, 4)
39#define SUN4I_TCON0_CTL_CLK_DELAY(delay) ((delay << 4) & SUN4I_TCON0_CTL_CLK_DELAY_MASK) 58#define SUN4I_TCON0_CTL_CLK_DELAY(delay) ((delay << 4) & SUN4I_TCON0_CTL_CLK_DELAY_MASK)
40#define SUN4I_TCON0_CTL_SRC_SEL_MASK GENMASK(2, 0) 59#define SUN4I_TCON0_CTL_SRC_SEL_MASK GENMASK(2, 0)
@@ -61,7 +80,14 @@
61#define SUN4I_TCON0_BASIC3_V_SYNC(height) (((height) - 1) & 0x7ff) 80#define SUN4I_TCON0_BASIC3_V_SYNC(height) (((height) - 1) & 0x7ff)
62 81
63#define SUN4I_TCON0_HV_IF_REG 0x58 82#define SUN4I_TCON0_HV_IF_REG 0x58
83
64#define SUN4I_TCON0_CPU_IF_REG 0x60 84#define SUN4I_TCON0_CPU_IF_REG 0x60
85#define SUN4I_TCON0_CPU_IF_MODE_MASK GENMASK(31, 28)
86#define SUN4I_TCON0_CPU_IF_MODE_DSI (1 << 28)
87#define SUN4I_TCON0_CPU_IF_TRI_FIFO_FLUSH BIT(16)
88#define SUN4I_TCON0_CPU_IF_TRI_FIFO_EN BIT(2)
89#define SUN4I_TCON0_CPU_IF_TRI_EN BIT(0)
90
65#define SUN4I_TCON0_CPU_WR_REG 0x64 91#define SUN4I_TCON0_CPU_WR_REG 0x64
66#define SUN4I_TCON0_CPU_RD0_REG 0x68 92#define SUN4I_TCON0_CPU_RD0_REG 0x68
67#define SUN4I_TCON0_CPU_RDA_REG 0x6c 93#define SUN4I_TCON0_CPU_RDA_REG 0x6c
@@ -128,6 +154,10 @@
128 154
129#define SUN4I_TCON1_IO_POL_REG 0xf0 155#define SUN4I_TCON1_IO_POL_REG 0xf0
130#define SUN4I_TCON1_IO_TRI_REG 0xf4 156#define SUN4I_TCON1_IO_TRI_REG 0xf4
157
158#define SUN4I_TCON_ECC_FIFO_REG 0xf8
159#define SUN4I_TCON_ECC_FIFO_EN BIT(3)
160
131#define SUN4I_TCON_CEU_CTL_REG 0x100 161#define SUN4I_TCON_CEU_CTL_REG 0x100
132#define SUN4I_TCON_CEU_MUL_RR_REG 0x110 162#define SUN4I_TCON_CEU_MUL_RR_REG 0x110
133#define SUN4I_TCON_CEU_MUL_RG_REG 0x114 163#define SUN4I_TCON_CEU_MUL_RG_REG 0x114
@@ -144,6 +174,22 @@
144#define SUN4I_TCON_CEU_RANGE_R_REG 0x140 174#define SUN4I_TCON_CEU_RANGE_R_REG 0x140
145#define SUN4I_TCON_CEU_RANGE_G_REG 0x144 175#define SUN4I_TCON_CEU_RANGE_G_REG 0x144
146#define SUN4I_TCON_CEU_RANGE_B_REG 0x148 176#define SUN4I_TCON_CEU_RANGE_B_REG 0x148
177
178#define SUN4I_TCON0_CPU_TRI0_REG 0x160
179#define SUN4I_TCON0_CPU_TRI0_BLOCK_SPACE(space) ((((space) - 1) & 0xfff) << 16)
180#define SUN4I_TCON0_CPU_TRI0_BLOCK_SIZE(size) (((size) - 1) & 0xfff)
181
182#define SUN4I_TCON0_CPU_TRI1_REG 0x164
183#define SUN4I_TCON0_CPU_TRI1_BLOCK_NUM(num) (((num) - 1) & 0xffff)
184
185#define SUN4I_TCON0_CPU_TRI2_REG 0x168
186#define SUN4I_TCON0_CPU_TRI2_START_DELAY(delay) (((delay) & 0xffff) << 16)
187#define SUN4I_TCON0_CPU_TRI2_TRANS_START_SET(set) ((set) & 0xfff)
188
189#define SUN4I_TCON_SAFE_PERIOD_REG 0x1f0
190#define SUN4I_TCON_SAFE_PERIOD_NUM(num) (((num) & 0xfff) << 16)
191#define SUN4I_TCON_SAFE_PERIOD_MODE(mode) ((mode) & 0x3)
192
147#define SUN4I_TCON_MUX_CTRL_REG 0x200 193#define SUN4I_TCON_MUX_CTRL_REG 0x200
148 194
149#define SUN4I_TCON0_LVDS_ANA0_REG 0x220 195#define SUN4I_TCON0_LVDS_ANA0_REG 0x220
diff --git a/drivers/gpu/drm/sun4i/sun6i_mipi_dphy.c b/drivers/gpu/drm/sun4i/sun6i_mipi_dphy.c
new file mode 100644
index 000000000000..e4d19431fa0e
--- /dev/null
+++ b/drivers/gpu/drm/sun4i/sun6i_mipi_dphy.c
@@ -0,0 +1,292 @@
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Copyright (c) 2016 Allwinnertech Co., Ltd.
4 * Copyright (C) 2017-2018 Bootlin
5 *
6 * Maxime Ripard <maxime.ripard@free-electrons.com>
7 */
8
9#include <linux/bitops.h>
10#include <linux/clk.h>
11#include <linux/of_address.h>
12#include <linux/regmap.h>
13#include <linux/reset.h>
14
15#include "sun6i_mipi_dsi.h"
16
17#define SUN6I_DPHY_GCTL_REG 0x00
18#define SUN6I_DPHY_GCTL_LANE_NUM(n) ((((n) - 1) & 3) << 4)
19#define SUN6I_DPHY_GCTL_EN BIT(0)
20
21#define SUN6I_DPHY_TX_CTL_REG 0x04
22#define SUN6I_DPHY_TX_CTL_HS_TX_CLK_CONT BIT(28)
23
24#define SUN6I_DPHY_TX_TIME0_REG 0x10
25#define SUN6I_DPHY_TX_TIME0_HS_TRAIL(n) (((n) & 0xff) << 24)
26#define SUN6I_DPHY_TX_TIME0_HS_PREPARE(n) (((n) & 0xff) << 16)
27#define SUN6I_DPHY_TX_TIME0_LP_CLK_DIV(n) ((n) & 0xff)
28
29#define SUN6I_DPHY_TX_TIME1_REG 0x14
30#define SUN6I_DPHY_TX_TIME1_CLK_POST(n) (((n) & 0xff) << 24)
31#define SUN6I_DPHY_TX_TIME1_CLK_PRE(n) (((n) & 0xff) << 16)
32#define SUN6I_DPHY_TX_TIME1_CLK_ZERO(n) (((n) & 0xff) << 8)
33#define SUN6I_DPHY_TX_TIME1_CLK_PREPARE(n) ((n) & 0xff)
34
35#define SUN6I_DPHY_TX_TIME2_REG 0x18
36#define SUN6I_DPHY_TX_TIME2_CLK_TRAIL(n) ((n) & 0xff)
37
38#define SUN6I_DPHY_TX_TIME3_REG 0x1c
39
40#define SUN6I_DPHY_TX_TIME4_REG 0x20
41#define SUN6I_DPHY_TX_TIME4_HS_TX_ANA1(n) (((n) & 0xff) << 8)
42#define SUN6I_DPHY_TX_TIME4_HS_TX_ANA0(n) ((n) & 0xff)
43
44#define SUN6I_DPHY_ANA0_REG 0x4c
45#define SUN6I_DPHY_ANA0_REG_PWS BIT(31)
46#define SUN6I_DPHY_ANA0_REG_DMPC BIT(28)
47#define SUN6I_DPHY_ANA0_REG_DMPD(n) (((n) & 0xf) << 24)
48#define SUN6I_DPHY_ANA0_REG_SLV(n) (((n) & 7) << 12)
49#define SUN6I_DPHY_ANA0_REG_DEN(n) (((n) & 0xf) << 8)
50
51#define SUN6I_DPHY_ANA1_REG 0x50
52#define SUN6I_DPHY_ANA1_REG_VTTMODE BIT(31)
53#define SUN6I_DPHY_ANA1_REG_CSMPS(n) (((n) & 3) << 28)
54#define SUN6I_DPHY_ANA1_REG_SVTT(n) (((n) & 0xf) << 24)
55
56#define SUN6I_DPHY_ANA2_REG 0x54
57#define SUN6I_DPHY_ANA2_EN_P2S_CPU(n) (((n) & 0xf) << 24)
58#define SUN6I_DPHY_ANA2_EN_P2S_CPU_MASK GENMASK(27, 24)
59#define SUN6I_DPHY_ANA2_EN_CK_CPU BIT(4)
60#define SUN6I_DPHY_ANA2_REG_ENIB BIT(1)
61
62#define SUN6I_DPHY_ANA3_REG 0x58
63#define SUN6I_DPHY_ANA3_EN_VTTD(n) (((n) & 0xf) << 28)
64#define SUN6I_DPHY_ANA3_EN_VTTD_MASK GENMASK(31, 28)
65#define SUN6I_DPHY_ANA3_EN_VTTC BIT(27)
66#define SUN6I_DPHY_ANA3_EN_DIV BIT(26)
67#define SUN6I_DPHY_ANA3_EN_LDOC BIT(25)
68#define SUN6I_DPHY_ANA3_EN_LDOD BIT(24)
69#define SUN6I_DPHY_ANA3_EN_LDOR BIT(18)
70
71#define SUN6I_DPHY_ANA4_REG 0x5c
72#define SUN6I_DPHY_ANA4_REG_DMPLVC BIT(24)
73#define SUN6I_DPHY_ANA4_REG_DMPLVD(n) (((n) & 0xf) << 20)
74#define SUN6I_DPHY_ANA4_REG_CKDV(n) (((n) & 0x1f) << 12)
75#define SUN6I_DPHY_ANA4_REG_TMSC(n) (((n) & 3) << 10)
76#define SUN6I_DPHY_ANA4_REG_TMSD(n) (((n) & 3) << 8)
77#define SUN6I_DPHY_ANA4_REG_TXDNSC(n) (((n) & 3) << 6)
78#define SUN6I_DPHY_ANA4_REG_TXDNSD(n) (((n) & 3) << 4)
79#define SUN6I_DPHY_ANA4_REG_TXPUSC(n) (((n) & 3) << 2)
80#define SUN6I_DPHY_ANA4_REG_TXPUSD(n) ((n) & 3)
81
82#define SUN6I_DPHY_DBG5_REG 0xf4
83
84int sun6i_dphy_init(struct sun6i_dphy *dphy, unsigned int lanes)
85{
86 reset_control_deassert(dphy->reset);
87 clk_prepare_enable(dphy->mod_clk);
88 clk_set_rate_exclusive(dphy->mod_clk, 150000000);
89
90 regmap_write(dphy->regs, SUN6I_DPHY_TX_CTL_REG,
91 SUN6I_DPHY_TX_CTL_HS_TX_CLK_CONT);
92
93 regmap_write(dphy->regs, SUN6I_DPHY_TX_TIME0_REG,
94 SUN6I_DPHY_TX_TIME0_LP_CLK_DIV(14) |
95 SUN6I_DPHY_TX_TIME0_HS_PREPARE(6) |
96 SUN6I_DPHY_TX_TIME0_HS_TRAIL(10));
97
98 regmap_write(dphy->regs, SUN6I_DPHY_TX_TIME1_REG,
99 SUN6I_DPHY_TX_TIME1_CLK_PREPARE(7) |
100 SUN6I_DPHY_TX_TIME1_CLK_ZERO(50) |
101 SUN6I_DPHY_TX_TIME1_CLK_PRE(3) |
102 SUN6I_DPHY_TX_TIME1_CLK_POST(10));
103
104 regmap_write(dphy->regs, SUN6I_DPHY_TX_TIME2_REG,
105 SUN6I_DPHY_TX_TIME2_CLK_TRAIL(30));
106
107 regmap_write(dphy->regs, SUN6I_DPHY_TX_TIME3_REG, 0);
108
109 regmap_write(dphy->regs, SUN6I_DPHY_TX_TIME4_REG,
110 SUN6I_DPHY_TX_TIME4_HS_TX_ANA0(3) |
111 SUN6I_DPHY_TX_TIME4_HS_TX_ANA1(3));
112
113 regmap_write(dphy->regs, SUN6I_DPHY_GCTL_REG,
114 SUN6I_DPHY_GCTL_LANE_NUM(lanes) |
115 SUN6I_DPHY_GCTL_EN);
116
117 return 0;
118}
119
120int sun6i_dphy_power_on(struct sun6i_dphy *dphy, unsigned int lanes)
121{
122 u8 lanes_mask = GENMASK(lanes - 1, 0);
123
124 regmap_write(dphy->regs, SUN6I_DPHY_ANA0_REG,
125 SUN6I_DPHY_ANA0_REG_PWS |
126 SUN6I_DPHY_ANA0_REG_DMPC |
127 SUN6I_DPHY_ANA0_REG_SLV(7) |
128 SUN6I_DPHY_ANA0_REG_DMPD(lanes_mask) |
129 SUN6I_DPHY_ANA0_REG_DEN(lanes_mask));
130
131 regmap_write(dphy->regs, SUN6I_DPHY_ANA1_REG,
132 SUN6I_DPHY_ANA1_REG_CSMPS(1) |
133 SUN6I_DPHY_ANA1_REG_SVTT(7));
134
135 regmap_write(dphy->regs, SUN6I_DPHY_ANA4_REG,
136 SUN6I_DPHY_ANA4_REG_CKDV(1) |
137 SUN6I_DPHY_ANA4_REG_TMSC(1) |
138 SUN6I_DPHY_ANA4_REG_TMSD(1) |
139 SUN6I_DPHY_ANA4_REG_TXDNSC(1) |
140 SUN6I_DPHY_ANA4_REG_TXDNSD(1) |
141 SUN6I_DPHY_ANA4_REG_TXPUSC(1) |
142 SUN6I_DPHY_ANA4_REG_TXPUSD(1) |
143 SUN6I_DPHY_ANA4_REG_DMPLVC |
144 SUN6I_DPHY_ANA4_REG_DMPLVD(lanes_mask));
145
146 regmap_write(dphy->regs, SUN6I_DPHY_ANA2_REG,
147 SUN6I_DPHY_ANA2_REG_ENIB);
148 udelay(5);
149
150 regmap_write(dphy->regs, SUN6I_DPHY_ANA3_REG,
151 SUN6I_DPHY_ANA3_EN_LDOR |
152 SUN6I_DPHY_ANA3_EN_LDOC |
153 SUN6I_DPHY_ANA3_EN_LDOD);
154 udelay(1);
155
156 regmap_update_bits(dphy->regs, SUN6I_DPHY_ANA3_REG,
157 SUN6I_DPHY_ANA3_EN_VTTC |
158 SUN6I_DPHY_ANA3_EN_VTTD_MASK,
159 SUN6I_DPHY_ANA3_EN_VTTC |
160 SUN6I_DPHY_ANA3_EN_VTTD(lanes_mask));
161 udelay(1);
162
163 regmap_update_bits(dphy->regs, SUN6I_DPHY_ANA3_REG,
164 SUN6I_DPHY_ANA3_EN_DIV,
165 SUN6I_DPHY_ANA3_EN_DIV);
166 udelay(1);
167
168 regmap_update_bits(dphy->regs, SUN6I_DPHY_ANA2_REG,
169 SUN6I_DPHY_ANA2_EN_CK_CPU,
170 SUN6I_DPHY_ANA2_EN_CK_CPU);
171 udelay(1);
172
173 regmap_update_bits(dphy->regs, SUN6I_DPHY_ANA1_REG,
174 SUN6I_DPHY_ANA1_REG_VTTMODE,
175 SUN6I_DPHY_ANA1_REG_VTTMODE);
176
177 regmap_update_bits(dphy->regs, SUN6I_DPHY_ANA2_REG,
178 SUN6I_DPHY_ANA2_EN_P2S_CPU_MASK,
179 SUN6I_DPHY_ANA2_EN_P2S_CPU(lanes_mask));
180
181 return 0;
182}
183
184int sun6i_dphy_power_off(struct sun6i_dphy *dphy)
185{
186 regmap_update_bits(dphy->regs, SUN6I_DPHY_ANA1_REG,
187 SUN6I_DPHY_ANA1_REG_VTTMODE, 0);
188
189 return 0;
190}
191
192int sun6i_dphy_exit(struct sun6i_dphy *dphy)
193{
194 clk_rate_exclusive_put(dphy->mod_clk);
195 clk_disable_unprepare(dphy->mod_clk);
196 reset_control_assert(dphy->reset);
197
198 return 0;
199}
200
201static struct regmap_config sun6i_dphy_regmap_config = {
202 .reg_bits = 32,
203 .val_bits = 32,
204 .reg_stride = 4,
205 .max_register = SUN6I_DPHY_DBG5_REG,
206 .name = "mipi-dphy",
207};
208
209static const struct of_device_id sun6i_dphy_of_table[] = {
210 { .compatible = "allwinner,sun6i-a31-mipi-dphy" },
211 { }
212};
213
214int sun6i_dphy_probe(struct sun6i_dsi *dsi, struct device_node *node)
215{
216 struct sun6i_dphy *dphy;
217 struct resource res;
218 void __iomem *regs;
219 int ret;
220
221 if (!of_match_node(sun6i_dphy_of_table, node)) {
222 dev_err(dsi->dev, "Incompatible D-PHY\n");
223 return -EINVAL;
224 }
225
226 dphy = devm_kzalloc(dsi->dev, sizeof(*dphy), GFP_KERNEL);
227 if (!dphy)
228 return -ENOMEM;
229
230 ret = of_address_to_resource(node, 0, &res);
231 if (ret) {
232 dev_err(dsi->dev, "phy: Couldn't get our resources\n");
233 return ret;
234 }
235
236 regs = devm_ioremap_resource(dsi->dev, &res);
237 if (IS_ERR(regs)) {
238 dev_err(dsi->dev, "Couldn't map the DPHY encoder registers\n");
239 return PTR_ERR(regs);
240 }
241
242 dphy->regs = devm_regmap_init_mmio(dsi->dev, regs,
243 &sun6i_dphy_regmap_config);
244 if (IS_ERR(dphy->regs)) {
245 dev_err(dsi->dev, "Couldn't create the DPHY encoder regmap\n");
246 return PTR_ERR(dphy->regs);
247 }
248
249 dphy->reset = of_reset_control_get_shared(node, NULL);
250 if (IS_ERR(dphy->reset)) {
251 dev_err(dsi->dev, "Couldn't get our reset line\n");
252 return PTR_ERR(dphy->reset);
253 }
254
255 dphy->bus_clk = of_clk_get_by_name(node, "bus");
256 if (IS_ERR(dphy->bus_clk)) {
257 dev_err(dsi->dev, "Couldn't get the DPHY bus clock\n");
258 ret = PTR_ERR(dphy->bus_clk);
259 goto err_free_reset;
260 }
261 regmap_mmio_attach_clk(dphy->regs, dphy->bus_clk);
262
263 dphy->mod_clk = of_clk_get_by_name(node, "mod");
264 if (IS_ERR(dphy->mod_clk)) {
265 dev_err(dsi->dev, "Couldn't get the DPHY mod clock\n");
266 ret = PTR_ERR(dphy->mod_clk);
267 goto err_free_bus;
268 }
269
270 dsi->dphy = dphy;
271
272 return 0;
273
274err_free_bus:
275 regmap_mmio_detach_clk(dphy->regs);
276 clk_put(dphy->bus_clk);
277err_free_reset:
278 reset_control_put(dphy->reset);
279 return ret;
280}
281
282int sun6i_dphy_remove(struct sun6i_dsi *dsi)
283{
284 struct sun6i_dphy *dphy = dsi->dphy;
285
286 regmap_mmio_detach_clk(dphy->regs);
287 clk_put(dphy->mod_clk);
288 clk_put(dphy->bus_clk);
289 reset_control_put(dphy->reset);
290
291 return 0;
292}
diff --git a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
new file mode 100644
index 000000000000..bfbf761f0c1d
--- /dev/null
+++ b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
@@ -0,0 +1,1107 @@
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Copyright (c) 2016 Allwinnertech Co., Ltd.
4 * Copyright (C) 2017-2018 Bootlin
5 *
6 * Maxime Ripard <maxime.ripard@bootlin.com>
7 */
8
9#include <linux/clk.h>
10#include <linux/component.h>
11#include <linux/crc-ccitt.h>
12#include <linux/of_address.h>
13#include <linux/pm_runtime.h>
14#include <linux/regmap.h>
15#include <linux/reset.h>
16
17#include <linux/phy/phy.h>
18
19#include <drm/drmP.h>
20#include <drm/drm_atomic_helper.h>
21#include <drm/drm_crtc_helper.h>
22#include <drm/drm_mipi_dsi.h>
23#include <drm/drm_panel.h>
24
25#include "sun4i_drv.h"
26#include "sun6i_mipi_dsi.h"
27
28#include <video/mipi_display.h>
29
30#define SUN6I_DSI_CTL_REG 0x000
31#define SUN6I_DSI_CTL_EN BIT(0)
32
33#define SUN6I_DSI_BASIC_CTL_REG 0x00c
34#define SUN6I_DSI_BASIC_CTL_HBP_DIS BIT(2)
35#define SUN6I_DSI_BASIC_CTL_HSA_HSE_DIS BIT(1)
36#define SUN6I_DSI_BASIC_CTL_VIDEO_BURST BIT(0)
37
38#define SUN6I_DSI_BASIC_CTL0_REG 0x010
39#define SUN6I_DSI_BASIC_CTL0_HS_EOTP_EN BIT(18)
40#define SUN6I_DSI_BASIC_CTL0_CRC_EN BIT(17)
41#define SUN6I_DSI_BASIC_CTL0_ECC_EN BIT(16)
42#define SUN6I_DSI_BASIC_CTL0_INST_ST BIT(0)
43
44#define SUN6I_DSI_BASIC_CTL1_REG 0x014
45#define SUN6I_DSI_BASIC_CTL1_VIDEO_ST_DELAY(n) (((n) & 0x1fff) << 4)
46#define SUN6I_DSI_BASIC_CTL1_VIDEO_FILL BIT(2)
47#define SUN6I_DSI_BASIC_CTL1_VIDEO_PRECISION BIT(1)
48#define SUN6I_DSI_BASIC_CTL1_VIDEO_MODE BIT(0)
49
50#define SUN6I_DSI_BASIC_SIZE0_REG 0x018
51#define SUN6I_DSI_BASIC_SIZE0_VBP(n) (((n) & 0xfff) << 16)
52#define SUN6I_DSI_BASIC_SIZE0_VSA(n) ((n) & 0xfff)
53
54#define SUN6I_DSI_BASIC_SIZE1_REG 0x01c
55#define SUN6I_DSI_BASIC_SIZE1_VT(n) (((n) & 0xfff) << 16)
56#define SUN6I_DSI_BASIC_SIZE1_VACT(n) ((n) & 0xfff)
57
58#define SUN6I_DSI_INST_FUNC_REG(n) (0x020 + (n) * 0x04)
59#define SUN6I_DSI_INST_FUNC_INST_MODE(n) (((n) & 0xf) << 28)
60#define SUN6I_DSI_INST_FUNC_ESCAPE_ENTRY(n) (((n) & 0xf) << 24)
61#define SUN6I_DSI_INST_FUNC_TRANS_PACKET(n) (((n) & 0xf) << 20)
62#define SUN6I_DSI_INST_FUNC_LANE_CEN BIT(4)
63#define SUN6I_DSI_INST_FUNC_LANE_DEN(n) ((n) & 0xf)
64
65#define SUN6I_DSI_INST_LOOP_SEL_REG 0x040
66
67#define SUN6I_DSI_INST_LOOP_NUM_REG(n) (0x044 + (n) * 0x10)
68#define SUN6I_DSI_INST_LOOP_NUM_N1(n) (((n) & 0xfff) << 16)
69#define SUN6I_DSI_INST_LOOP_NUM_N0(n) ((n) & 0xfff)
70
71#define SUN6I_DSI_INST_JUMP_SEL_REG 0x048
72
73#define SUN6I_DSI_INST_JUMP_CFG_REG(n) (0x04c + (n) * 0x04)
74#define SUN6I_DSI_INST_JUMP_CFG_TO(n) (((n) & 0xf) << 20)
75#define SUN6I_DSI_INST_JUMP_CFG_POINT(n) (((n) & 0xf) << 16)
76#define SUN6I_DSI_INST_JUMP_CFG_NUM(n) ((n) & 0xffff)
77
78#define SUN6I_DSI_TRANS_START_REG 0x060
79
80#define SUN6I_DSI_TRANS_ZERO_REG 0x078
81
82#define SUN6I_DSI_TCON_DRQ_REG 0x07c
83#define SUN6I_DSI_TCON_DRQ_ENABLE_MODE BIT(28)
84#define SUN6I_DSI_TCON_DRQ_SET(n) ((n) & 0x3ff)
85
86#define SUN6I_DSI_PIXEL_CTL0_REG 0x080
87#define SUN6I_DSI_PIXEL_CTL0_PD_PLUG_DISABLE BIT(16)
88#define SUN6I_DSI_PIXEL_CTL0_FORMAT(n) ((n) & 0xf)
89
90#define SUN6I_DSI_PIXEL_CTL1_REG 0x084
91
92#define SUN6I_DSI_PIXEL_PH_REG 0x090
93#define SUN6I_DSI_PIXEL_PH_ECC(n) (((n) & 0xff) << 24)
94#define SUN6I_DSI_PIXEL_PH_WC(n) (((n) & 0xffff) << 8)
95#define SUN6I_DSI_PIXEL_PH_VC(n) (((n) & 3) << 6)
96#define SUN6I_DSI_PIXEL_PH_DT(n) ((n) & 0x3f)
97
98#define SUN6I_DSI_PIXEL_PF0_REG 0x098
99#define SUN6I_DSI_PIXEL_PF0_CRC_FORCE(n) ((n) & 0xffff)
100
101#define SUN6I_DSI_PIXEL_PF1_REG 0x09c
102#define SUN6I_DSI_PIXEL_PF1_CRC_INIT_LINEN(n) (((n) & 0xffff) << 16)
103#define SUN6I_DSI_PIXEL_PF1_CRC_INIT_LINE0(n) ((n) & 0xffff)
104
105#define SUN6I_DSI_SYNC_HSS_REG 0x0b0
106
107#define SUN6I_DSI_SYNC_HSE_REG 0x0b4
108
109#define SUN6I_DSI_SYNC_VSS_REG 0x0b8
110
111#define SUN6I_DSI_SYNC_VSE_REG 0x0bc
112
113#define SUN6I_DSI_BLK_HSA0_REG 0x0c0
114
115#define SUN6I_DSI_BLK_HSA1_REG 0x0c4
116#define SUN6I_DSI_BLK_PF(n) (((n) & 0xffff) << 16)
117#define SUN6I_DSI_BLK_PD(n) ((n) & 0xff)
118
119#define SUN6I_DSI_BLK_HBP0_REG 0x0c8
120
121#define SUN6I_DSI_BLK_HBP1_REG 0x0cc
122
123#define SUN6I_DSI_BLK_HFP0_REG 0x0d0
124
125#define SUN6I_DSI_BLK_HFP1_REG 0x0d4
126
127#define SUN6I_DSI_BLK_HBLK0_REG 0x0e0
128
129#define SUN6I_DSI_BLK_HBLK1_REG 0x0e4
130
131#define SUN6I_DSI_BLK_VBLK0_REG 0x0e8
132
133#define SUN6I_DSI_BLK_VBLK1_REG 0x0ec
134
135#define SUN6I_DSI_BURST_LINE_REG 0x0f0
136#define SUN6I_DSI_BURST_LINE_SYNC_POINT(n) (((n) & 0xffff) << 16)
137#define SUN6I_DSI_BURST_LINE_NUM(n) ((n) & 0xffff)
138
139#define SUN6I_DSI_BURST_DRQ_REG 0x0f4
140#define SUN6I_DSI_BURST_DRQ_EDGE1(n) (((n) & 0xffff) << 16)
141#define SUN6I_DSI_BURST_DRQ_EDGE0(n) ((n) & 0xffff)
142
143#define SUN6I_DSI_CMD_CTL_REG 0x200
144#define SUN6I_DSI_CMD_CTL_RX_OVERFLOW BIT(26)
145#define SUN6I_DSI_CMD_CTL_RX_FLAG BIT(25)
146#define SUN6I_DSI_CMD_CTL_TX_FLAG BIT(9)
147
148#define SUN6I_DSI_CMD_RX_REG(n) (0x240 + (n) * 0x04)
149
150#define SUN6I_DSI_DEBUG_DATA_REG 0x2f8
151
152#define SUN6I_DSI_CMD_TX_REG(n) (0x300 + (n) * 0x04)
153
154enum sun6i_dsi_start_inst {
155 DSI_START_LPRX,
156 DSI_START_LPTX,
157 DSI_START_HSC,
158 DSI_START_HSD,
159};
160
161enum sun6i_dsi_inst_id {
162 DSI_INST_ID_LP11 = 0,
163 DSI_INST_ID_TBA,
164 DSI_INST_ID_HSC,
165 DSI_INST_ID_HSD,
166 DSI_INST_ID_LPDT,
167 DSI_INST_ID_HSCEXIT,
168 DSI_INST_ID_NOP,
169 DSI_INST_ID_DLY,
170 DSI_INST_ID_END = 15,
171};
172
173enum sun6i_dsi_inst_mode {
174 DSI_INST_MODE_STOP = 0,
175 DSI_INST_MODE_TBA,
176 DSI_INST_MODE_HS,
177 DSI_INST_MODE_ESCAPE,
178 DSI_INST_MODE_HSCEXIT,
179 DSI_INST_MODE_NOP,
180};
181
182enum sun6i_dsi_inst_escape {
183 DSI_INST_ESCA_LPDT = 0,
184 DSI_INST_ESCA_ULPS,
185 DSI_INST_ESCA_UN1,
186 DSI_INST_ESCA_UN2,
187 DSI_INST_ESCA_RESET,
188 DSI_INST_ESCA_UN3,
189 DSI_INST_ESCA_UN4,
190 DSI_INST_ESCA_UN5,
191};
192
193enum sun6i_dsi_inst_packet {
194 DSI_INST_PACK_PIXEL = 0,
195 DSI_INST_PACK_COMMAND,
196};
197
198static const u32 sun6i_dsi_ecc_array[] = {
199 [0] = (BIT(0) | BIT(1) | BIT(2) | BIT(4) | BIT(5) | BIT(7) | BIT(10) |
200 BIT(11) | BIT(13) | BIT(16) | BIT(20) | BIT(21) | BIT(22) |
201 BIT(23)),
202 [1] = (BIT(0) | BIT(1) | BIT(3) | BIT(4) | BIT(6) | BIT(8) | BIT(10) |
203 BIT(12) | BIT(14) | BIT(17) | BIT(20) | BIT(21) | BIT(22) |
204 BIT(23)),
205 [2] = (BIT(0) | BIT(2) | BIT(3) | BIT(5) | BIT(6) | BIT(9) | BIT(11) |
206 BIT(12) | BIT(15) | BIT(18) | BIT(20) | BIT(21) | BIT(22)),
207 [3] = (BIT(1) | BIT(2) | BIT(3) | BIT(7) | BIT(8) | BIT(9) | BIT(13) |
208 BIT(14) | BIT(15) | BIT(19) | BIT(20) | BIT(21) | BIT(23)),
209 [4] = (BIT(4) | BIT(5) | BIT(6) | BIT(7) | BIT(8) | BIT(9) | BIT(16) |
210 BIT(17) | BIT(18) | BIT(19) | BIT(20) | BIT(22) | BIT(23)),
211 [5] = (BIT(10) | BIT(11) | BIT(12) | BIT(13) | BIT(14) | BIT(15) |
212 BIT(16) | BIT(17) | BIT(18) | BIT(19) | BIT(21) | BIT(22) |
213 BIT(23)),
214};
215
216static u32 sun6i_dsi_ecc_compute(unsigned int data)
217{
218 int i;
219 u8 ecc = 0;
220
221 for (i = 0; i < ARRAY_SIZE(sun6i_dsi_ecc_array); i++) {
222 u32 field = sun6i_dsi_ecc_array[i];
223 bool init = false;
224 u8 val = 0;
225 int j;
226
227 for (j = 0; j < 24; j++) {
228 if (!(BIT(j) & field))
229 continue;
230
231 if (!init) {
232 val = (BIT(j) & data) ? 1 : 0;
233 init = true;
234 } else {
235 val ^= (BIT(j) & data) ? 1 : 0;
236 }
237 }
238
239 ecc |= val << i;
240 }
241
242 return ecc;
243}
244
245static u16 sun6i_dsi_crc_compute(u8 const *buffer, size_t len)
246{
247 return crc_ccitt(0xffff, buffer, len);
248}
249
250static u16 sun6i_dsi_crc_repeat_compute(u8 pd, size_t len)
251{
252 u8 buffer[len];
253
254 memset(buffer, pd, len);
255
256 return sun6i_dsi_crc_compute(buffer, len);
257}
258
259static u32 sun6i_dsi_build_sync_pkt(u8 dt, u8 vc, u8 d0, u8 d1)
260{
261 u32 val = dt & 0x3f;
262
263 val |= (vc & 3) << 6;
264 val |= (d0 & 0xff) << 8;
265 val |= (d1 & 0xff) << 16;
266 val |= sun6i_dsi_ecc_compute(val) << 24;
267
268 return val;
269}
270
271static u32 sun6i_dsi_build_blk0_pkt(u8 vc, u16 wc)
272{
273 return sun6i_dsi_build_sync_pkt(MIPI_DSI_BLANKING_PACKET, vc,
274 wc & 0xff, wc >> 8);
275}
276
277static u32 sun6i_dsi_build_blk1_pkt(u16 pd, size_t len)
278{
279 u32 val = SUN6I_DSI_BLK_PD(pd);
280
281 return val | SUN6I_DSI_BLK_PF(sun6i_dsi_crc_repeat_compute(pd, len));
282}
283
284static void sun6i_dsi_inst_abort(struct sun6i_dsi *dsi)
285{
286 regmap_update_bits(dsi->regs, SUN6I_DSI_BASIC_CTL0_REG,
287 SUN6I_DSI_BASIC_CTL0_INST_ST, 0);
288}
289
290static void sun6i_dsi_inst_commit(struct sun6i_dsi *dsi)
291{
292 regmap_update_bits(dsi->regs, SUN6I_DSI_BASIC_CTL0_REG,
293 SUN6I_DSI_BASIC_CTL0_INST_ST,
294 SUN6I_DSI_BASIC_CTL0_INST_ST);
295}
296
297static int sun6i_dsi_inst_wait_for_completion(struct sun6i_dsi *dsi)
298{
299 u32 val;
300
301 return regmap_read_poll_timeout(dsi->regs, SUN6I_DSI_BASIC_CTL0_REG,
302 val,
303 !(val & SUN6I_DSI_BASIC_CTL0_INST_ST),
304 100, 5000);
305}
306
307static void sun6i_dsi_inst_setup(struct sun6i_dsi *dsi,
308 enum sun6i_dsi_inst_id id,
309 enum sun6i_dsi_inst_mode mode,
310 bool clock, u8 data,
311 enum sun6i_dsi_inst_packet packet,
312 enum sun6i_dsi_inst_escape escape)
313{
314 regmap_write(dsi->regs, SUN6I_DSI_INST_FUNC_REG(id),
315 SUN6I_DSI_INST_FUNC_INST_MODE(mode) |
316 SUN6I_DSI_INST_FUNC_ESCAPE_ENTRY(escape) |
317 SUN6I_DSI_INST_FUNC_TRANS_PACKET(packet) |
318 (clock ? SUN6I_DSI_INST_FUNC_LANE_CEN : 0) |
319 SUN6I_DSI_INST_FUNC_LANE_DEN(data));
320}
321
322static void sun6i_dsi_inst_init(struct sun6i_dsi *dsi,
323 struct mipi_dsi_device *device)
324{
325 u8 lanes_mask = GENMASK(device->lanes - 1, 0);
326
327 sun6i_dsi_inst_setup(dsi, DSI_INST_ID_LP11, DSI_INST_MODE_STOP,
328 true, lanes_mask, 0, 0);
329
330 sun6i_dsi_inst_setup(dsi, DSI_INST_ID_TBA, DSI_INST_MODE_TBA,
331 false, 1, 0, 0);
332
333 sun6i_dsi_inst_setup(dsi, DSI_INST_ID_HSC, DSI_INST_MODE_HS,
334 true, 0, DSI_INST_PACK_PIXEL, 0);
335
336 sun6i_dsi_inst_setup(dsi, DSI_INST_ID_HSD, DSI_INST_MODE_HS,
337 false, lanes_mask, DSI_INST_PACK_PIXEL, 0);
338
339 sun6i_dsi_inst_setup(dsi, DSI_INST_ID_LPDT, DSI_INST_MODE_ESCAPE,
340 false, 1, DSI_INST_PACK_COMMAND,
341 DSI_INST_ESCA_LPDT);
342
343 sun6i_dsi_inst_setup(dsi, DSI_INST_ID_HSCEXIT, DSI_INST_MODE_HSCEXIT,
344 true, 0, 0, 0);
345
346 sun6i_dsi_inst_setup(dsi, DSI_INST_ID_NOP, DSI_INST_MODE_STOP,
347 false, lanes_mask, 0, 0);
348
349 sun6i_dsi_inst_setup(dsi, DSI_INST_ID_DLY, DSI_INST_MODE_NOP,
350 true, lanes_mask, 0, 0);
351
352 regmap_write(dsi->regs, SUN6I_DSI_INST_JUMP_CFG_REG(0),
353 SUN6I_DSI_INST_JUMP_CFG_POINT(DSI_INST_ID_NOP) |
354 SUN6I_DSI_INST_JUMP_CFG_TO(DSI_INST_ID_HSCEXIT) |
355 SUN6I_DSI_INST_JUMP_CFG_NUM(1));
356};
357
358static u16 sun6i_dsi_get_video_start_delay(struct sun6i_dsi *dsi,
359 struct drm_display_mode *mode)
360{
361 return mode->vtotal - (mode->vsync_end - mode->vdisplay) + 1;
362}
363
364static void sun6i_dsi_setup_burst(struct sun6i_dsi *dsi,
365 struct drm_display_mode *mode)
366{
367 struct mipi_dsi_device *device = dsi->device;
368 u32 val = 0;
369
370 if ((mode->hsync_end - mode->hdisplay) > 20) {
371 /* Maaaaaagic */
372 u16 drq = (mode->hsync_end - mode->hdisplay) - 20;
373
374 drq *= mipi_dsi_pixel_format_to_bpp(device->format);
375 drq /= 32;
376
377 val = (SUN6I_DSI_TCON_DRQ_ENABLE_MODE |
378 SUN6I_DSI_TCON_DRQ_SET(drq));
379 }
380
381 regmap_write(dsi->regs, SUN6I_DSI_TCON_DRQ_REG, val);
382}
383
384static void sun6i_dsi_setup_inst_loop(struct sun6i_dsi *dsi,
385 struct drm_display_mode *mode)
386{
387 u16 delay = 50 - 1;
388
389 regmap_write(dsi->regs, SUN6I_DSI_INST_LOOP_NUM_REG(0),
390 SUN6I_DSI_INST_LOOP_NUM_N0(50 - 1) |
391 SUN6I_DSI_INST_LOOP_NUM_N1(delay));
392 regmap_write(dsi->regs, SUN6I_DSI_INST_LOOP_NUM_REG(1),
393 SUN6I_DSI_INST_LOOP_NUM_N0(50 - 1) |
394 SUN6I_DSI_INST_LOOP_NUM_N1(delay));
395}
396
397static void sun6i_dsi_setup_format(struct sun6i_dsi *dsi,
398 struct drm_display_mode *mode)
399{
400 struct mipi_dsi_device *device = dsi->device;
401 u32 val = SUN6I_DSI_PIXEL_PH_VC(device->channel);
402 u8 dt, fmt;
403 u16 wc;
404
405 /*
406 * TODO: The format defines are only valid in video mode and
407 * change in command mode.
408 */
409 switch (device->format) {
410 case MIPI_DSI_FMT_RGB888:
411 dt = MIPI_DSI_PACKED_PIXEL_STREAM_24;
412 fmt = 8;
413 break;
414 case MIPI_DSI_FMT_RGB666:
415 dt = MIPI_DSI_PIXEL_STREAM_3BYTE_18;
416 fmt = 9;
417 break;
418 case MIPI_DSI_FMT_RGB666_PACKED:
419 dt = MIPI_DSI_PACKED_PIXEL_STREAM_18;
420 fmt = 10;
421 break;
422 case MIPI_DSI_FMT_RGB565:
423 dt = MIPI_DSI_PACKED_PIXEL_STREAM_16;
424 fmt = 11;
425 break;
426 default:
427 return;
428 }
429 val |= SUN6I_DSI_PIXEL_PH_DT(dt);
430
431 wc = mode->hdisplay * mipi_dsi_pixel_format_to_bpp(device->format) / 8;
432 val |= SUN6I_DSI_PIXEL_PH_WC(wc);
433 val |= SUN6I_DSI_PIXEL_PH_ECC(sun6i_dsi_ecc_compute(val));
434
435 regmap_write(dsi->regs, SUN6I_DSI_PIXEL_PH_REG, val);
436
437 regmap_write(dsi->regs, SUN6I_DSI_PIXEL_PF0_REG,
438 SUN6I_DSI_PIXEL_PF0_CRC_FORCE(0xffff));
439
440 regmap_write(dsi->regs, SUN6I_DSI_PIXEL_PF1_REG,
441 SUN6I_DSI_PIXEL_PF1_CRC_INIT_LINE0(0xffff) |
442 SUN6I_DSI_PIXEL_PF1_CRC_INIT_LINEN(0xffff));
443
444 regmap_write(dsi->regs, SUN6I_DSI_PIXEL_CTL0_REG,
445 SUN6I_DSI_PIXEL_CTL0_PD_PLUG_DISABLE |
446 SUN6I_DSI_PIXEL_CTL0_FORMAT(fmt));
447}
448
449static void sun6i_dsi_setup_timings(struct sun6i_dsi *dsi,
450 struct drm_display_mode *mode)
451{
452 struct mipi_dsi_device *device = dsi->device;
453 unsigned int Bpp = mipi_dsi_pixel_format_to_bpp(device->format) / 8;
454 u16 hbp, hfp, hsa, hblk, vblk;
455
456 regmap_write(dsi->regs, SUN6I_DSI_BASIC_CTL_REG, 0);
457
458 regmap_write(dsi->regs, SUN6I_DSI_SYNC_HSS_REG,
459 sun6i_dsi_build_sync_pkt(MIPI_DSI_H_SYNC_START,
460 device->channel,
461 0, 0));
462
463 regmap_write(dsi->regs, SUN6I_DSI_SYNC_HSE_REG,
464 sun6i_dsi_build_sync_pkt(MIPI_DSI_H_SYNC_END,
465 device->channel,
466 0, 0));
467
468 regmap_write(dsi->regs, SUN6I_DSI_SYNC_VSS_REG,
469 sun6i_dsi_build_sync_pkt(MIPI_DSI_V_SYNC_START,
470 device->channel,
471 0, 0));
472
473 regmap_write(dsi->regs, SUN6I_DSI_SYNC_VSE_REG,
474 sun6i_dsi_build_sync_pkt(MIPI_DSI_V_SYNC_END,
475 device->channel,
476 0, 0));
477
478 regmap_write(dsi->regs, SUN6I_DSI_BASIC_SIZE0_REG,
479 SUN6I_DSI_BASIC_SIZE0_VSA(mode->vsync_end -
480 mode->vsync_start) |
481 SUN6I_DSI_BASIC_SIZE0_VBP(mode->vsync_start -
482 mode->vdisplay));
483
484 regmap_write(dsi->regs, SUN6I_DSI_BASIC_SIZE1_REG,
485 SUN6I_DSI_BASIC_SIZE1_VACT(mode->vdisplay) |
486 SUN6I_DSI_BASIC_SIZE1_VT(mode->vtotal));
487
488 /*
489 * A sync period is composed of a blanking packet (4 bytes +
490 * payload + 2 bytes) and a sync event packet (4 bytes). Its
491 * minimal size is therefore 10 bytes
492 */
493#define HSA_PACKET_OVERHEAD 10
494 hsa = max((unsigned int)HSA_PACKET_OVERHEAD,
495 (mode->hsync_end - mode->hsync_start) * Bpp - HSA_PACKET_OVERHEAD);
496 regmap_write(dsi->regs, SUN6I_DSI_BLK_HSA0_REG,
497 sun6i_dsi_build_blk0_pkt(device->channel, hsa));
498 regmap_write(dsi->regs, SUN6I_DSI_BLK_HSA1_REG,
499 sun6i_dsi_build_blk1_pkt(0, hsa));
500
501 /*
502 * The backporch is set using a blanking packet (4 bytes +
503 * payload + 2 bytes). Its minimal size is therefore 6 bytes
504 */
505#define HBP_PACKET_OVERHEAD 6
506 hbp = max((unsigned int)HBP_PACKET_OVERHEAD,
507 (mode->hsync_start - mode->hdisplay) * Bpp - HBP_PACKET_OVERHEAD);
508 regmap_write(dsi->regs, SUN6I_DSI_BLK_HBP0_REG,
509 sun6i_dsi_build_blk0_pkt(device->channel, hbp));
510 regmap_write(dsi->regs, SUN6I_DSI_BLK_HBP1_REG,
511 sun6i_dsi_build_blk1_pkt(0, hbp));
512
513 /*
514 * The frontporch is set using a blanking packet (4 bytes +
515 * payload + 2 bytes). Its minimal size is therefore 6 bytes
516 */
517#define HFP_PACKET_OVERHEAD 6
518 hfp = max((unsigned int)HFP_PACKET_OVERHEAD,
519 (mode->htotal - mode->hsync_end) * Bpp - HFP_PACKET_OVERHEAD);
520 regmap_write(dsi->regs, SUN6I_DSI_BLK_HFP0_REG,
521 sun6i_dsi_build_blk0_pkt(device->channel, hfp));
522 regmap_write(dsi->regs, SUN6I_DSI_BLK_HFP1_REG,
523 sun6i_dsi_build_blk1_pkt(0, hfp));
524
525 /*
526 * hblk seems to be the line + porches length.
527 */
528 hblk = mode->htotal * Bpp - hsa;
529 regmap_write(dsi->regs, SUN6I_DSI_BLK_HBLK0_REG,
530 sun6i_dsi_build_blk0_pkt(device->channel, hblk));
531 regmap_write(dsi->regs, SUN6I_DSI_BLK_HBLK1_REG,
532 sun6i_dsi_build_blk1_pkt(0, hblk));
533
534 /*
535 * And I'm not entirely sure what vblk is about. The driver in
536 * Allwinner BSP is using a rather convoluted calculation
537 * there only for 4 lanes. However, using 0 (the !4 lanes
538 * case) even with a 4 lanes screen seems to work...
539 */
540 vblk = 0;
541 regmap_write(dsi->regs, SUN6I_DSI_BLK_VBLK0_REG,
542 sun6i_dsi_build_blk0_pkt(device->channel, vblk));
543 regmap_write(dsi->regs, SUN6I_DSI_BLK_VBLK1_REG,
544 sun6i_dsi_build_blk1_pkt(0, vblk));
545}
546
547static int sun6i_dsi_start(struct sun6i_dsi *dsi,
548 enum sun6i_dsi_start_inst func)
549{
550 switch (func) {
551 case DSI_START_LPTX:
552 regmap_write(dsi->regs, SUN6I_DSI_INST_JUMP_SEL_REG,
553 DSI_INST_ID_LPDT << (4 * DSI_INST_ID_LP11) |
554 DSI_INST_ID_END << (4 * DSI_INST_ID_LPDT));
555 break;
556 case DSI_START_LPRX:
557 regmap_write(dsi->regs, SUN6I_DSI_INST_JUMP_SEL_REG,
558 DSI_INST_ID_LPDT << (4 * DSI_INST_ID_LP11) |
559 DSI_INST_ID_DLY << (4 * DSI_INST_ID_LPDT) |
560 DSI_INST_ID_TBA << (4 * DSI_INST_ID_DLY) |
561 DSI_INST_ID_END << (4 * DSI_INST_ID_TBA));
562 break;
563 case DSI_START_HSC:
564 regmap_write(dsi->regs, SUN6I_DSI_INST_JUMP_SEL_REG,
565 DSI_INST_ID_HSC << (4 * DSI_INST_ID_LP11) |
566 DSI_INST_ID_END << (4 * DSI_INST_ID_HSC));
567 break;
568 case DSI_START_HSD:
569 regmap_write(dsi->regs, SUN6I_DSI_INST_JUMP_SEL_REG,
570 DSI_INST_ID_NOP << (4 * DSI_INST_ID_LP11) |
571 DSI_INST_ID_HSD << (4 * DSI_INST_ID_NOP) |
572 DSI_INST_ID_DLY << (4 * DSI_INST_ID_HSD) |
573 DSI_INST_ID_NOP << (4 * DSI_INST_ID_DLY) |
574 DSI_INST_ID_END << (4 * DSI_INST_ID_HSCEXIT));
575 break;
576 default:
577 regmap_write(dsi->regs, SUN6I_DSI_INST_JUMP_SEL_REG,
578 DSI_INST_ID_END << (4 * DSI_INST_ID_LP11));
579 break;
580 }
581
582 sun6i_dsi_inst_abort(dsi);
583 sun6i_dsi_inst_commit(dsi);
584
585 if (func == DSI_START_HSC)
586 regmap_write_bits(dsi->regs,
587 SUN6I_DSI_INST_FUNC_REG(DSI_INST_ID_LP11),
588 SUN6I_DSI_INST_FUNC_LANE_CEN, 0);
589
590 return 0;
591}
592
593static void sun6i_dsi_encoder_enable(struct drm_encoder *encoder)
594{
595 struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
596 struct sun6i_dsi *dsi = encoder_to_sun6i_dsi(encoder);
597 struct mipi_dsi_device *device = dsi->device;
598 u16 delay;
599
600 DRM_DEBUG_DRIVER("Enabling DSI output\n");
601
602 pm_runtime_get_sync(dsi->dev);
603
604 delay = sun6i_dsi_get_video_start_delay(dsi, mode);
605 regmap_write(dsi->regs, SUN6I_DSI_BASIC_CTL1_REG,
606 SUN6I_DSI_BASIC_CTL1_VIDEO_ST_DELAY(delay) |
607 SUN6I_DSI_BASIC_CTL1_VIDEO_FILL |
608 SUN6I_DSI_BASIC_CTL1_VIDEO_PRECISION |
609 SUN6I_DSI_BASIC_CTL1_VIDEO_MODE);
610
611 sun6i_dsi_setup_burst(dsi, mode);
612 sun6i_dsi_setup_inst_loop(dsi, mode);
613 sun6i_dsi_setup_format(dsi, mode);
614 sun6i_dsi_setup_timings(dsi, mode);
615
616 sun6i_dphy_init(dsi->dphy, device->lanes);
617 sun6i_dphy_power_on(dsi->dphy, device->lanes);
618
619 if (!IS_ERR(dsi->panel))
620 drm_panel_prepare(dsi->panel);
621
622 /*
623 * FIXME: This should be moved after the switch to HS mode.
624 *
625 * Unfortunately, once in HS mode, it seems like we're not
626 * able to send DCS commands anymore, which would prevent any
627 * panel to send any DCS command as part as their enable
628 * method, which is quite common.
629 *
630 * I haven't seen any artifact due to that sub-optimal
631 * ordering on the panels I've tested it with, so I guess this
632 * will do for now, until that IP is better understood.
633 */
634 if (!IS_ERR(dsi->panel))
635 drm_panel_enable(dsi->panel);
636
637 sun6i_dsi_start(dsi, DSI_START_HSC);
638
639 udelay(1000);
640
641 sun6i_dsi_start(dsi, DSI_START_HSD);
642}
643
644static void sun6i_dsi_encoder_disable(struct drm_encoder *encoder)
645{
646 struct sun6i_dsi *dsi = encoder_to_sun6i_dsi(encoder);
647
648 DRM_DEBUG_DRIVER("Disabling DSI output\n");
649
650 if (!IS_ERR(dsi->panel)) {
651 drm_panel_disable(dsi->panel);
652 drm_panel_unprepare(dsi->panel);
653 }
654
655 sun6i_dphy_power_off(dsi->dphy);
656 sun6i_dphy_exit(dsi->dphy);
657
658 pm_runtime_put(dsi->dev);
659}
660
661static int sun6i_dsi_get_modes(struct drm_connector *connector)
662{
663 struct sun6i_dsi *dsi = connector_to_sun6i_dsi(connector);
664
665 return drm_panel_get_modes(dsi->panel);
666}
667
668static struct drm_connector_helper_funcs sun6i_dsi_connector_helper_funcs = {
669 .get_modes = sun6i_dsi_get_modes,
670};
671
672static enum drm_connector_status
673sun6i_dsi_connector_detect(struct drm_connector *connector, bool force)
674{
675 return connector_status_connected;
676}
677
678static const struct drm_connector_funcs sun6i_dsi_connector_funcs = {
679 .detect = sun6i_dsi_connector_detect,
680 .fill_modes = drm_helper_probe_single_connector_modes,
681 .destroy = drm_connector_cleanup,
682 .reset = drm_atomic_helper_connector_reset,
683 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
684 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
685};
686
687static const struct drm_encoder_helper_funcs sun6i_dsi_enc_helper_funcs = {
688 .disable = sun6i_dsi_encoder_disable,
689 .enable = sun6i_dsi_encoder_enable,
690};
691
692static const struct drm_encoder_funcs sun6i_dsi_enc_funcs = {
693 .destroy = drm_encoder_cleanup,
694};
695
696static u32 sun6i_dsi_dcs_build_pkt_hdr(struct sun6i_dsi *dsi,
697 const struct mipi_dsi_msg *msg)
698{
699 u32 pkt = msg->type;
700
701 if (msg->type == MIPI_DSI_DCS_LONG_WRITE) {
702 pkt |= ((msg->tx_len + 1) & 0xffff) << 8;
703 pkt |= (((msg->tx_len + 1) >> 8) & 0xffff) << 16;
704 } else {
705 pkt |= (((u8 *)msg->tx_buf)[0] << 8);
706 if (msg->tx_len > 1)
707 pkt |= (((u8 *)msg->tx_buf)[1] << 16);
708 }
709
710 pkt |= sun6i_dsi_ecc_compute(pkt) << 24;
711
712 return pkt;
713}
714
715static int sun6i_dsi_dcs_write_short(struct sun6i_dsi *dsi,
716 const struct mipi_dsi_msg *msg)
717{
718 regmap_write(dsi->regs, SUN6I_DSI_CMD_TX_REG(0),
719 sun6i_dsi_dcs_build_pkt_hdr(dsi, msg));
720 regmap_write_bits(dsi->regs, SUN6I_DSI_CMD_CTL_REG,
721 0xff, (4 - 1));
722
723 sun6i_dsi_start(dsi, DSI_START_LPTX);
724
725 return msg->tx_len;
726}
727
728static int sun6i_dsi_dcs_write_long(struct sun6i_dsi *dsi,
729 const struct mipi_dsi_msg *msg)
730{
731 int ret, len = 0;
732 u8 *bounce;
733 u16 crc;
734
735 regmap_write(dsi->regs, SUN6I_DSI_CMD_TX_REG(0),
736 sun6i_dsi_dcs_build_pkt_hdr(dsi, msg));
737
738 bounce = kzalloc(msg->tx_len + sizeof(crc), GFP_KERNEL);
739 if (!bounce)
740 return -ENOMEM;
741
742 memcpy(bounce, msg->tx_buf, msg->tx_len);
743 len += msg->tx_len;
744
745 crc = sun6i_dsi_crc_compute(bounce, msg->tx_len);
746 memcpy((u8 *)bounce + msg->tx_len, &crc, sizeof(crc));
747 len += sizeof(crc);
748
749 regmap_bulk_write(dsi->regs, SUN6I_DSI_CMD_TX_REG(1), bounce, len);
750 regmap_write(dsi->regs, SUN6I_DSI_CMD_CTL_REG, len + 4 - 1);
751 kfree(bounce);
752
753 sun6i_dsi_start(dsi, DSI_START_LPTX);
754
755 ret = sun6i_dsi_inst_wait_for_completion(dsi);
756 if (ret < 0) {
757 sun6i_dsi_inst_abort(dsi);
758 return ret;
759 }
760
761 /*
762 * TODO: There's some bits (reg 0x200, bits 8/9) that
763 * apparently can be used to check whether the data have been
764 * sent, but I couldn't get it to work reliably.
765 */
766 return msg->tx_len;
767}
768
769static int sun6i_dsi_dcs_read(struct sun6i_dsi *dsi,
770 const struct mipi_dsi_msg *msg)
771{
772 u32 val;
773 int ret;
774 u8 byte0;
775
776 regmap_write(dsi->regs, SUN6I_DSI_CMD_TX_REG(0),
777 sun6i_dsi_dcs_build_pkt_hdr(dsi, msg));
778 regmap_write(dsi->regs, SUN6I_DSI_CMD_CTL_REG,
779 (4 - 1));
780
781 sun6i_dsi_start(dsi, DSI_START_LPRX);
782
783 ret = sun6i_dsi_inst_wait_for_completion(dsi);
784 if (ret < 0) {
785 sun6i_dsi_inst_abort(dsi);
786 return ret;
787 }
788
789 /*
790 * TODO: There's some bits (reg 0x200, bits 24/25) that
791 * apparently can be used to check whether the data have been
792 * received, but I couldn't get it to work reliably.
793 */
794 regmap_read(dsi->regs, SUN6I_DSI_CMD_CTL_REG, &val);
795 if (val & SUN6I_DSI_CMD_CTL_RX_OVERFLOW)
796 return -EIO;
797
798 regmap_read(dsi->regs, SUN6I_DSI_CMD_RX_REG(0), &val);
799 byte0 = val & 0xff;
800 if (byte0 == MIPI_DSI_RX_ACKNOWLEDGE_AND_ERROR_REPORT)
801 return -EIO;
802
803 ((u8 *)msg->rx_buf)[0] = (val >> 8);
804
805 return 1;
806}
807
808static int sun6i_dsi_attach(struct mipi_dsi_host *host,
809 struct mipi_dsi_device *device)
810{
811 struct sun6i_dsi *dsi = host_to_sun6i_dsi(host);
812
813 dsi->device = device;
814 dsi->panel = of_drm_find_panel(device->dev.of_node);
815 if (!dsi->panel)
816 return -EINVAL;
817
818 dev_info(host->dev, "Attached device %s\n", device->name);
819
820 return 0;
821}
822
823static int sun6i_dsi_detach(struct mipi_dsi_host *host,
824 struct mipi_dsi_device *device)
825{
826 struct sun6i_dsi *dsi = host_to_sun6i_dsi(host);
827
828 dsi->panel = NULL;
829 dsi->device = NULL;
830
831 return 0;
832}
833
834static ssize_t sun6i_dsi_transfer(struct mipi_dsi_host *host,
835 const struct mipi_dsi_msg *msg)
836{
837 struct sun6i_dsi *dsi = host_to_sun6i_dsi(host);
838 int ret;
839
840 ret = sun6i_dsi_inst_wait_for_completion(dsi);
841 if (ret < 0)
842 sun6i_dsi_inst_abort(dsi);
843
844 regmap_write(dsi->regs, SUN6I_DSI_CMD_CTL_REG,
845 SUN6I_DSI_CMD_CTL_RX_OVERFLOW |
846 SUN6I_DSI_CMD_CTL_RX_FLAG |
847 SUN6I_DSI_CMD_CTL_TX_FLAG);
848
849 switch (msg->type) {
850 case MIPI_DSI_DCS_SHORT_WRITE:
851 case MIPI_DSI_DCS_SHORT_WRITE_PARAM:
852 ret = sun6i_dsi_dcs_write_short(dsi, msg);
853 break;
854
855 case MIPI_DSI_DCS_LONG_WRITE:
856 ret = sun6i_dsi_dcs_write_long(dsi, msg);
857 break;
858
859 case MIPI_DSI_DCS_READ:
860 if (msg->rx_len == 1) {
861 ret = sun6i_dsi_dcs_read(dsi, msg);
862 break;
863 }
864
865 default:
866 ret = -EINVAL;
867 }
868
869 return ret;
870}
871
872static const struct mipi_dsi_host_ops sun6i_dsi_host_ops = {
873 .attach = sun6i_dsi_attach,
874 .detach = sun6i_dsi_detach,
875 .transfer = sun6i_dsi_transfer,
876};
877
878static const struct regmap_config sun6i_dsi_regmap_config = {
879 .reg_bits = 32,
880 .val_bits = 32,
881 .reg_stride = 4,
882 .max_register = SUN6I_DSI_CMD_TX_REG(255),
883 .name = "mipi-dsi",
884};
885
886static int sun6i_dsi_bind(struct device *dev, struct device *master,
887 void *data)
888{
889 struct drm_device *drm = data;
890 struct sun4i_drv *drv = drm->dev_private;
891 struct sun6i_dsi *dsi = dev_get_drvdata(dev);
892 int ret;
893
894 if (!dsi->panel)
895 return -EPROBE_DEFER;
896
897 dsi->drv = drv;
898
899 drm_encoder_helper_add(&dsi->encoder,
900 &sun6i_dsi_enc_helper_funcs);
901 ret = drm_encoder_init(drm,
902 &dsi->encoder,
903 &sun6i_dsi_enc_funcs,
904 DRM_MODE_ENCODER_DSI,
905 NULL);
906 if (ret) {
907 dev_err(dsi->dev, "Couldn't initialise the DSI encoder\n");
908 return ret;
909 }
910 dsi->encoder.possible_crtcs = BIT(0);
911
912 drm_connector_helper_add(&dsi->connector,
913 &sun6i_dsi_connector_helper_funcs);
914 ret = drm_connector_init(drm, &dsi->connector,
915 &sun6i_dsi_connector_funcs,
916 DRM_MODE_CONNECTOR_DSI);
917 if (ret) {
918 dev_err(dsi->dev,
919 "Couldn't initialise the DSI connector\n");
920 goto err_cleanup_connector;
921 }
922
923 drm_mode_connector_attach_encoder(&dsi->connector, &dsi->encoder);
924 drm_panel_attach(dsi->panel, &dsi->connector);
925
926 return 0;
927
928err_cleanup_connector:
929 drm_encoder_cleanup(&dsi->encoder);
930 return ret;
931}
932
933static void sun6i_dsi_unbind(struct device *dev, struct device *master,
934 void *data)
935{
936 struct sun6i_dsi *dsi = dev_get_drvdata(dev);
937
938 drm_panel_detach(dsi->panel);
939}
940
941static const struct component_ops sun6i_dsi_ops = {
942 .bind = sun6i_dsi_bind,
943 .unbind = sun6i_dsi_unbind,
944};
945
946static int sun6i_dsi_probe(struct platform_device *pdev)
947{
948 struct device *dev = &pdev->dev;
949 struct device_node *dphy_node;
950 struct sun6i_dsi *dsi;
951 struct resource *res;
952 void __iomem *base;
953 int ret;
954
955 dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL);
956 if (!dsi)
957 return -ENOMEM;
958 dev_set_drvdata(dev, dsi);
959 dsi->dev = dev;
960 dsi->host.ops = &sun6i_dsi_host_ops;
961 dsi->host.dev = dev;
962
963 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
964 base = devm_ioremap_resource(dev, res);
965 if (IS_ERR(base)) {
966 dev_err(dev, "Couldn't map the DSI encoder registers\n");
967 return PTR_ERR(base);
968 }
969
970 dsi->regs = devm_regmap_init_mmio_clk(dev, "bus", base,
971 &sun6i_dsi_regmap_config);
972 if (IS_ERR(dsi->regs)) {
973 dev_err(dev, "Couldn't create the DSI encoder regmap\n");
974 return PTR_ERR(dsi->regs);
975 }
976
977 dsi->reset = devm_reset_control_get_shared(dev, NULL);
978 if (IS_ERR(dsi->reset)) {
979 dev_err(dev, "Couldn't get our reset line\n");
980 return PTR_ERR(dsi->reset);
981 }
982
983 dsi->mod_clk = devm_clk_get(dev, "mod");
984 if (IS_ERR(dsi->mod_clk)) {
985 dev_err(dev, "Couldn't get the DSI mod clock\n");
986 return PTR_ERR(dsi->mod_clk);
987 }
988
989 /*
990 * In order to operate properly, that clock seems to be always
991 * set to 297MHz.
992 */
993 clk_set_rate_exclusive(dsi->mod_clk, 297000000);
994
995 dphy_node = of_parse_phandle(dev->of_node, "phys", 0);
996 ret = sun6i_dphy_probe(dsi, dphy_node);
997 of_node_put(dphy_node);
998 if (ret) {
999 dev_err(dev, "Couldn't get the MIPI D-PHY\n");
1000 goto err_unprotect_clk;
1001 }
1002
1003 pm_runtime_enable(dev);
1004
1005 ret = mipi_dsi_host_register(&dsi->host);
1006 if (ret) {
1007 dev_err(dev, "Couldn't register MIPI-DSI host\n");
1008 goto err_remove_phy;
1009 }
1010
1011 ret = component_add(&pdev->dev, &sun6i_dsi_ops);
1012 if (ret) {
1013 dev_err(dev, "Couldn't register our component\n");
1014 goto err_remove_dsi_host;
1015 }
1016
1017 return 0;
1018
1019err_remove_dsi_host:
1020 mipi_dsi_host_unregister(&dsi->host);
1021err_remove_phy:
1022 pm_runtime_disable(dev);
1023 sun6i_dphy_remove(dsi);
1024err_unprotect_clk:
1025 clk_rate_exclusive_put(dsi->mod_clk);
1026 return ret;
1027}
1028
1029static int sun6i_dsi_remove(struct platform_device *pdev)
1030{
1031 struct device *dev = &pdev->dev;
1032 struct sun6i_dsi *dsi = dev_get_drvdata(dev);
1033
1034 component_del(&pdev->dev, &sun6i_dsi_ops);
1035 mipi_dsi_host_unregister(&dsi->host);
1036 pm_runtime_disable(dev);
1037 sun6i_dphy_remove(dsi);
1038 clk_rate_exclusive_put(dsi->mod_clk);
1039
1040 return 0;
1041}
1042
1043static int sun6i_dsi_runtime_resume(struct device *dev)
1044{
1045 struct sun6i_dsi *dsi = dev_get_drvdata(dev);
1046
1047 reset_control_deassert(dsi->reset);
1048 clk_prepare_enable(dsi->mod_clk);
1049
1050 /*
1051 * Enable the DSI block.
1052 *
1053 * Some part of it can only be done once we get a number of
1054 * lanes, see sun6i_dsi_inst_init
1055 */
1056 regmap_write(dsi->regs, SUN6I_DSI_CTL_REG, SUN6I_DSI_CTL_EN);
1057
1058 regmap_write(dsi->regs, SUN6I_DSI_BASIC_CTL0_REG,
1059 SUN6I_DSI_BASIC_CTL0_ECC_EN | SUN6I_DSI_BASIC_CTL0_CRC_EN);
1060
1061 regmap_write(dsi->regs, SUN6I_DSI_TRANS_START_REG, 10);
1062 regmap_write(dsi->regs, SUN6I_DSI_TRANS_ZERO_REG, 0);
1063
1064 if (dsi->device)
1065 sun6i_dsi_inst_init(dsi, dsi->device);
1066
1067 regmap_write(dsi->regs, SUN6I_DSI_DEBUG_DATA_REG, 0xff);
1068
1069 return 0;
1070}
1071
1072static int sun6i_dsi_runtime_suspend(struct device *dev)
1073{
1074 struct sun6i_dsi *dsi = dev_get_drvdata(dev);
1075
1076 clk_disable_unprepare(dsi->mod_clk);
1077 reset_control_assert(dsi->reset);
1078
1079 return 0;
1080}
1081
1082static const struct dev_pm_ops sun6i_dsi_pm_ops = {
1083 SET_RUNTIME_PM_OPS(sun6i_dsi_runtime_suspend,
1084 sun6i_dsi_runtime_resume,
1085 NULL)
1086};
1087
1088static const struct of_device_id sun6i_dsi_of_table[] = {
1089 { .compatible = "allwinner,sun6i-a31-mipi-dsi" },
1090 { }
1091};
1092MODULE_DEVICE_TABLE(of, sun6i_dsi_of_table);
1093
1094static struct platform_driver sun6i_dsi_platform_driver = {
1095 .probe = sun6i_dsi_probe,
1096 .remove = sun6i_dsi_remove,
1097 .driver = {
1098 .name = "sun6i-mipi-dsi",
1099 .of_match_table = sun6i_dsi_of_table,
1100 .pm = &sun6i_dsi_pm_ops,
1101 },
1102};
1103module_platform_driver(sun6i_dsi_platform_driver);
1104
1105MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com>");
1106MODULE_DESCRIPTION("Allwinner A31 DSI Driver");
1107MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.h b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.h
new file mode 100644
index 000000000000..dbbc5b3ecbda
--- /dev/null
+++ b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.h
@@ -0,0 +1,63 @@
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Copyright (c) 2016 Allwinnertech Co., Ltd.
4 * Copyright (C) 2017-2018 Bootlin
5 *
6 * Maxime Ripard <maxime.ripard@bootlin.com>
7 */
8
9#ifndef _SUN6I_MIPI_DSI_H_
10#define _SUN6I_MIPI_DSI_H_
11
12#include <drm/drm_connector.h>
13#include <drm/drm_encoder.h>
14#include <drm/drm_mipi_dsi.h>
15
16struct sun6i_dphy {
17 struct clk *bus_clk;
18 struct clk *mod_clk;
19 struct regmap *regs;
20 struct reset_control *reset;
21};
22
23struct sun6i_dsi {
24 struct drm_connector connector;
25 struct drm_encoder encoder;
26 struct mipi_dsi_host host;
27
28 struct clk *bus_clk;
29 struct clk *mod_clk;
30 struct regmap *regs;
31 struct reset_control *reset;
32 struct sun6i_dphy *dphy;
33
34 struct device *dev;
35 struct sun4i_drv *drv;
36 struct mipi_dsi_device *device;
37 struct drm_panel *panel;
38};
39
40static inline struct sun6i_dsi *host_to_sun6i_dsi(struct mipi_dsi_host *host)
41{
42 return container_of(host, struct sun6i_dsi, host);
43};
44
45static inline struct sun6i_dsi *connector_to_sun6i_dsi(struct drm_connector *connector)
46{
47 return container_of(connector, struct sun6i_dsi, connector);
48};
49
50static inline struct sun6i_dsi *encoder_to_sun6i_dsi(const struct drm_encoder *encoder)
51{
52 return container_of(encoder, struct sun6i_dsi, encoder);
53};
54
55int sun6i_dphy_probe(struct sun6i_dsi *dsi, struct device_node *node);
56int sun6i_dphy_remove(struct sun6i_dsi *dsi);
57
58int sun6i_dphy_init(struct sun6i_dphy *dphy, unsigned int lanes);
59int sun6i_dphy_power_on(struct sun6i_dphy *dphy, unsigned int lanes);
60int sun6i_dphy_power_off(struct sun6i_dphy *dphy);
61int sun6i_dphy_exit(struct sun6i_dphy *dphy);
62
63#endif /* _SUN6I_MIPI_DSI_H_ */
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index 7afe2f635f74..a0519612ae2c 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -38,26 +38,11 @@ static int tegra_atomic_check(struct drm_device *drm,
38{ 38{
39 int err; 39 int err;
40 40
41 err = drm_atomic_helper_check_modeset(drm, state); 41 err = drm_atomic_helper_check(drm, state);
42 if (err < 0) 42 if (err < 0)
43 return err; 43 return err;
44 44
45 err = tegra_display_hub_atomic_check(drm, state); 45 return tegra_display_hub_atomic_check(drm, state);
46 if (err < 0)
47 return err;
48
49 err = drm_atomic_normalize_zpos(drm, state);
50 if (err < 0)
51 return err;
52
53 err = drm_atomic_helper_check_planes(drm, state);
54 if (err < 0)
55 return err;
56
57 if (state->legacy_cursor_update)
58 state->async_update = !drm_atomic_helper_async_check(drm, state);
59
60 return 0;
61} 46}
62 47
63static const struct drm_mode_config_funcs tegra_drm_mode_config_funcs = { 48static const struct drm_mode_config_funcs tegra_drm_mode_config_funcs = {
@@ -151,6 +136,8 @@ static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
151 136
152 drm->mode_config.allow_fb_modifiers = true; 137 drm->mode_config.allow_fb_modifiers = true;
153 138
139 drm->mode_config.normalize_zpos = true;
140
154 drm->mode_config.funcs = &tegra_drm_mode_config_funcs; 141 drm->mode_config.funcs = &tegra_drm_mode_config_funcs;
155 drm->mode_config.helper_private = &tegra_drm_mode_config_helpers; 142 drm->mode_config.helper_private = &tegra_drm_mode_config_helpers;
156 143
diff --git a/drivers/gpu/drm/tinydrm/core/tinydrm-core.c b/drivers/gpu/drm/tinydrm/core/tinydrm-core.c
index 4c6616278c48..24a33bf862fa 100644
--- a/drivers/gpu/drm/tinydrm/core/tinydrm-core.c
+++ b/drivers/gpu/drm/tinydrm/core/tinydrm-core.c
@@ -91,7 +91,7 @@ EXPORT_SYMBOL(tinydrm_gem_cma_prime_import_sg_table);
91 * GEM object state and frees the memory used to store the object itself using 91 * GEM object state and frees the memory used to store the object itself using
92 * drm_gem_cma_free_object(). It also handles PRIME buffers which has the kernel 92 * drm_gem_cma_free_object(). It also handles PRIME buffers which has the kernel
93 * virtual address set by tinydrm_gem_cma_prime_import_sg_table(). Drivers 93 * virtual address set by tinydrm_gem_cma_prime_import_sg_table(). Drivers
94 * can use this as their &drm_driver->gem_free_object callback. 94 * can use this as their &drm_driver->gem_free_object_unlocked callback.
95 */ 95 */
96void tinydrm_gem_cma_free_object(struct drm_gem_object *gem_obj) 96void tinydrm_gem_cma_free_object(struct drm_gem_object *gem_obj)
97{ 97{
diff --git a/drivers/gpu/drm/tinydrm/core/tinydrm-helpers.c b/drivers/gpu/drm/tinydrm/core/tinydrm-helpers.c
index d1c3ce9ab294..dcd390163a4a 100644
--- a/drivers/gpu/drm/tinydrm/core/tinydrm-helpers.c
+++ b/drivers/gpu/drm/tinydrm/core/tinydrm-helpers.c
@@ -78,6 +78,36 @@ bool tinydrm_merge_clips(struct drm_clip_rect *dst,
78} 78}
79EXPORT_SYMBOL(tinydrm_merge_clips); 79EXPORT_SYMBOL(tinydrm_merge_clips);
80 80
81int tinydrm_fb_dirty(struct drm_framebuffer *fb,
82 struct drm_file *file_priv,
83 unsigned int flags, unsigned int color,
84 struct drm_clip_rect *clips,
85 unsigned int num_clips)
86{
87 struct tinydrm_device *tdev = fb->dev->dev_private;
88 struct drm_plane *plane = &tdev->pipe.plane;
89 int ret = 0;
90
91 drm_modeset_lock(&plane->mutex, NULL);
92
93 /* fbdev can flush even when we're not interested */
94 if (plane->state->fb == fb) {
95 mutex_lock(&tdev->dirty_lock);
96 ret = tdev->fb_dirty(fb, file_priv, flags,
97 color, clips, num_clips);
98 mutex_unlock(&tdev->dirty_lock);
99 }
100
101 drm_modeset_unlock(&plane->mutex);
102
103 if (ret)
104 dev_err_once(fb->dev->dev,
105 "Failed to update display %d\n", ret);
106
107 return ret;
108}
109EXPORT_SYMBOL(tinydrm_fb_dirty);
110
81/** 111/**
82 * tinydrm_memcpy - Copy clip buffer 112 * tinydrm_memcpy - Copy clip buffer
83 * @dst: Destination buffer 113 * @dst: Destination buffer
diff --git a/drivers/gpu/drm/tinydrm/core/tinydrm-pipe.c b/drivers/gpu/drm/tinydrm/core/tinydrm-pipe.c
index 11ae950b0fc9..7e8e24d0b7a7 100644
--- a/drivers/gpu/drm/tinydrm/core/tinydrm-pipe.c
+++ b/drivers/gpu/drm/tinydrm/core/tinydrm-pipe.c
@@ -125,9 +125,8 @@ void tinydrm_display_pipe_update(struct drm_simple_display_pipe *pipe,
125 struct drm_crtc *crtc = &tdev->pipe.crtc; 125 struct drm_crtc *crtc = &tdev->pipe.crtc;
126 126
127 if (fb && (fb != old_state->fb)) { 127 if (fb && (fb != old_state->fb)) {
128 pipe->plane.fb = fb; 128 if (tdev->fb_dirty)
129 if (fb->funcs->dirty) 129 tdev->fb_dirty(fb, NULL, 0, 0, NULL, 0);
130 fb->funcs->dirty(fb, NULL, 0, 0, NULL, 0);
131 } 130 }
132 131
133 if (crtc->state->event) { 132 if (crtc->state->event) {
@@ -139,23 +138,6 @@ void tinydrm_display_pipe_update(struct drm_simple_display_pipe *pipe,
139} 138}
140EXPORT_SYMBOL(tinydrm_display_pipe_update); 139EXPORT_SYMBOL(tinydrm_display_pipe_update);
141 140
142/**
143 * tinydrm_display_pipe_prepare_fb - Display pipe prepare_fb helper
144 * @pipe: Simple display pipe
145 * @plane_state: Plane state
146 *
147 * This function uses drm_gem_fb_prepare_fb() to check if the plane FB has an
148 * dma-buf attached, extracts the exclusive fence and attaches it to plane
149 * state for the atomic helper to wait on. Drivers can use this as their
150 * &drm_simple_display_pipe_funcs->prepare_fb callback.
151 */
152int tinydrm_display_pipe_prepare_fb(struct drm_simple_display_pipe *pipe,
153 struct drm_plane_state *plane_state)
154{
155 return drm_gem_fb_prepare_fb(&pipe->plane, plane_state);
156}
157EXPORT_SYMBOL(tinydrm_display_pipe_prepare_fb);
158
159static int tinydrm_rotate_mode(struct drm_display_mode *mode, 141static int tinydrm_rotate_mode(struct drm_display_mode *mode,
160 unsigned int rotation) 142 unsigned int rotation)
161{ 143{
diff --git a/drivers/gpu/drm/tinydrm/ili9225.c b/drivers/gpu/drm/tinydrm/ili9225.c
index a0759502b81a..841c69aba059 100644
--- a/drivers/gpu/drm/tinydrm/ili9225.c
+++ b/drivers/gpu/drm/tinydrm/ili9225.c
@@ -88,14 +88,8 @@ static int ili9225_fb_dirty(struct drm_framebuffer *fb,
88 bool full; 88 bool full;
89 void *tr; 89 void *tr;
90 90
91 mutex_lock(&tdev->dirty_lock);
92
93 if (!mipi->enabled) 91 if (!mipi->enabled)
94 goto out_unlock; 92 return 0;
95
96 /* fbdev can flush even when we're not interested */
97 if (tdev->pipe.plane.fb != fb)
98 goto out_unlock;
99 93
100 full = tinydrm_merge_clips(&clip, clips, num_clips, flags, 94 full = tinydrm_merge_clips(&clip, clips, num_clips, flags,
101 fb->width, fb->height); 95 fb->width, fb->height);
@@ -108,7 +102,7 @@ static int ili9225_fb_dirty(struct drm_framebuffer *fb,
108 tr = mipi->tx_buf; 102 tr = mipi->tx_buf;
109 ret = mipi_dbi_buf_copy(mipi->tx_buf, fb, &clip, swap); 103 ret = mipi_dbi_buf_copy(mipi->tx_buf, fb, &clip, swap);
110 if (ret) 104 if (ret)
111 goto out_unlock; 105 return ret;
112 } else { 106 } else {
113 tr = cma_obj->vaddr; 107 tr = cma_obj->vaddr;
114 } 108 }
@@ -159,24 +153,18 @@ static int ili9225_fb_dirty(struct drm_framebuffer *fb,
159 ret = mipi_dbi_command_buf(mipi, ILI9225_WRITE_DATA_TO_GRAM, tr, 153 ret = mipi_dbi_command_buf(mipi, ILI9225_WRITE_DATA_TO_GRAM, tr,
160 (clip.x2 - clip.x1) * (clip.y2 - clip.y1) * 2); 154 (clip.x2 - clip.x1) * (clip.y2 - clip.y1) * 2);
161 155
162out_unlock:
163 mutex_unlock(&tdev->dirty_lock);
164
165 if (ret)
166 dev_err_once(fb->dev->dev, "Failed to update display %d\n",
167 ret);
168
169 return ret; 156 return ret;
170} 157}
171 158
172static const struct drm_framebuffer_funcs ili9225_fb_funcs = { 159static const struct drm_framebuffer_funcs ili9225_fb_funcs = {
173 .destroy = drm_gem_fb_destroy, 160 .destroy = drm_gem_fb_destroy,
174 .create_handle = drm_gem_fb_create_handle, 161 .create_handle = drm_gem_fb_create_handle,
175 .dirty = ili9225_fb_dirty, 162 .dirty = tinydrm_fb_dirty,
176}; 163};
177 164
178static void ili9225_pipe_enable(struct drm_simple_display_pipe *pipe, 165static void ili9225_pipe_enable(struct drm_simple_display_pipe *pipe,
179 struct drm_crtc_state *crtc_state) 166 struct drm_crtc_state *crtc_state,
167 struct drm_plane_state *plane_state)
180{ 168{
181 struct tinydrm_device *tdev = pipe_to_tinydrm(pipe); 169 struct tinydrm_device *tdev = pipe_to_tinydrm(pipe);
182 struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev); 170 struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev);
@@ -268,7 +256,7 @@ static void ili9225_pipe_enable(struct drm_simple_display_pipe *pipe,
268 256
269 ili9225_command(mipi, ILI9225_DISPLAY_CONTROL_1, 0x1017); 257 ili9225_command(mipi, ILI9225_DISPLAY_CONTROL_1, 0x1017);
270 258
271 mipi_dbi_enable_flush(mipi); 259 mipi_dbi_enable_flush(mipi, crtc_state, plane_state);
272} 260}
273 261
274static void ili9225_pipe_disable(struct drm_simple_display_pipe *pipe) 262static void ili9225_pipe_disable(struct drm_simple_display_pipe *pipe)
@@ -341,6 +329,8 @@ static int ili9225_init(struct device *dev, struct mipi_dbi *mipi,
341 if (ret) 329 if (ret)
342 return ret; 330 return ret;
343 331
332 tdev->fb_dirty = ili9225_fb_dirty;
333
344 ret = tinydrm_display_pipe_init(tdev, pipe_funcs, 334 ret = tinydrm_display_pipe_init(tdev, pipe_funcs,
345 DRM_MODE_CONNECTOR_VIRTUAL, 335 DRM_MODE_CONNECTOR_VIRTUAL,
346 ili9225_formats, 336 ili9225_formats,
@@ -364,7 +354,7 @@ static const struct drm_simple_display_pipe_funcs ili9225_pipe_funcs = {
364 .enable = ili9225_pipe_enable, 354 .enable = ili9225_pipe_enable,
365 .disable = ili9225_pipe_disable, 355 .disable = ili9225_pipe_disable,
366 .update = tinydrm_display_pipe_update, 356 .update = tinydrm_display_pipe_update,
367 .prepare_fb = tinydrm_display_pipe_prepare_fb, 357 .prepare_fb = drm_gem_fb_simple_display_pipe_prepare_fb,
368}; 358};
369 359
370static const struct drm_display_mode ili9225_mode = { 360static const struct drm_display_mode ili9225_mode = {
diff --git a/drivers/gpu/drm/tinydrm/mi0283qt.c b/drivers/gpu/drm/tinydrm/mi0283qt.c
index d8ed6e6f8e05..d5ef65179c16 100644
--- a/drivers/gpu/drm/tinydrm/mi0283qt.c
+++ b/drivers/gpu/drm/tinydrm/mi0283qt.c
@@ -19,6 +19,7 @@
19 19
20#include <drm/drm_fb_helper.h> 20#include <drm/drm_fb_helper.h>
21#include <drm/drm_modeset_helper.h> 21#include <drm/drm_modeset_helper.h>
22#include <drm/drm_gem_framebuffer_helper.h>
22#include <drm/tinydrm/mipi-dbi.h> 23#include <drm/tinydrm/mipi-dbi.h>
23#include <drm/tinydrm/tinydrm-helpers.h> 24#include <drm/tinydrm/tinydrm-helpers.h>
24#include <video/mipi_display.h> 25#include <video/mipi_display.h>
@@ -49,7 +50,8 @@
49#define ILI9341_MADCTL_MY BIT(7) 50#define ILI9341_MADCTL_MY BIT(7)
50 51
51static void mi0283qt_enable(struct drm_simple_display_pipe *pipe, 52static void mi0283qt_enable(struct drm_simple_display_pipe *pipe,
52 struct drm_crtc_state *crtc_state) 53 struct drm_crtc_state *crtc_state,
54 struct drm_plane_state *plane_state)
53{ 55{
54 struct tinydrm_device *tdev = pipe_to_tinydrm(pipe); 56 struct tinydrm_device *tdev = pipe_to_tinydrm(pipe);
55 struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev); 57 struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev);
@@ -126,14 +128,14 @@ static void mi0283qt_enable(struct drm_simple_display_pipe *pipe,
126 msleep(100); 128 msleep(100);
127 129
128out_enable: 130out_enable:
129 mipi_dbi_enable_flush(mipi); 131 mipi_dbi_enable_flush(mipi, crtc_state, plane_state);
130} 132}
131 133
132static const struct drm_simple_display_pipe_funcs mi0283qt_pipe_funcs = { 134static const struct drm_simple_display_pipe_funcs mi0283qt_pipe_funcs = {
133 .enable = mi0283qt_enable, 135 .enable = mi0283qt_enable,
134 .disable = mipi_dbi_pipe_disable, 136 .disable = mipi_dbi_pipe_disable,
135 .update = tinydrm_display_pipe_update, 137 .update = tinydrm_display_pipe_update,
136 .prepare_fb = tinydrm_display_pipe_prepare_fb, 138 .prepare_fb = drm_gem_fb_simple_display_pipe_prepare_fb,
137}; 139};
138 140
139static const struct drm_display_mode mi0283qt_mode = { 141static const struct drm_display_mode mi0283qt_mode = {
diff --git a/drivers/gpu/drm/tinydrm/mipi-dbi.c b/drivers/gpu/drm/tinydrm/mipi-dbi.c
index 9e903812b573..4d1fb31a781f 100644
--- a/drivers/gpu/drm/tinydrm/mipi-dbi.c
+++ b/drivers/gpu/drm/tinydrm/mipi-dbi.c
@@ -219,14 +219,8 @@ static int mipi_dbi_fb_dirty(struct drm_framebuffer *fb,
219 bool full; 219 bool full;
220 void *tr; 220 void *tr;
221 221
222 mutex_lock(&tdev->dirty_lock);
223
224 if (!mipi->enabled) 222 if (!mipi->enabled)
225 goto out_unlock; 223 return 0;
226
227 /* fbdev can flush even when we're not interested */
228 if (tdev->pipe.plane.fb != fb)
229 goto out_unlock;
230 224
231 full = tinydrm_merge_clips(&clip, clips, num_clips, flags, 225 full = tinydrm_merge_clips(&clip, clips, num_clips, flags,
232 fb->width, fb->height); 226 fb->width, fb->height);
@@ -239,7 +233,7 @@ static int mipi_dbi_fb_dirty(struct drm_framebuffer *fb,
239 tr = mipi->tx_buf; 233 tr = mipi->tx_buf;
240 ret = mipi_dbi_buf_copy(mipi->tx_buf, fb, &clip, swap); 234 ret = mipi_dbi_buf_copy(mipi->tx_buf, fb, &clip, swap);
241 if (ret) 235 if (ret)
242 goto out_unlock; 236 return ret;
243 } else { 237 } else {
244 tr = cma_obj->vaddr; 238 tr = cma_obj->vaddr;
245 } 239 }
@@ -254,20 +248,13 @@ static int mipi_dbi_fb_dirty(struct drm_framebuffer *fb,
254 ret = mipi_dbi_command_buf(mipi, MIPI_DCS_WRITE_MEMORY_START, tr, 248 ret = mipi_dbi_command_buf(mipi, MIPI_DCS_WRITE_MEMORY_START, tr,
255 (clip.x2 - clip.x1) * (clip.y2 - clip.y1) * 2); 249 (clip.x2 - clip.x1) * (clip.y2 - clip.y1) * 2);
256 250
257out_unlock:
258 mutex_unlock(&tdev->dirty_lock);
259
260 if (ret)
261 dev_err_once(fb->dev->dev, "Failed to update display %d\n",
262 ret);
263
264 return ret; 251 return ret;
265} 252}
266 253
267static const struct drm_framebuffer_funcs mipi_dbi_fb_funcs = { 254static const struct drm_framebuffer_funcs mipi_dbi_fb_funcs = {
268 .destroy = drm_gem_fb_destroy, 255 .destroy = drm_gem_fb_destroy,
269 .create_handle = drm_gem_fb_create_handle, 256 .create_handle = drm_gem_fb_create_handle,
270 .dirty = mipi_dbi_fb_dirty, 257 .dirty = tinydrm_fb_dirty,
271}; 258};
272 259
273/** 260/**
@@ -278,13 +265,16 @@ static const struct drm_framebuffer_funcs mipi_dbi_fb_funcs = {
278 * enables the backlight. Drivers can use this in their 265 * enables the backlight. Drivers can use this in their
279 * &drm_simple_display_pipe_funcs->enable callback. 266 * &drm_simple_display_pipe_funcs->enable callback.
280 */ 267 */
281void mipi_dbi_enable_flush(struct mipi_dbi *mipi) 268void mipi_dbi_enable_flush(struct mipi_dbi *mipi,
269 struct drm_crtc_state *crtc_state,
270 struct drm_plane_state *plane_state)
282{ 271{
283 struct drm_framebuffer *fb = mipi->tinydrm.pipe.plane.fb; 272 struct tinydrm_device *tdev = &mipi->tinydrm;
273 struct drm_framebuffer *fb = plane_state->fb;
284 274
285 mipi->enabled = true; 275 mipi->enabled = true;
286 if (fb) 276 if (fb)
287 fb->funcs->dirty(fb, NULL, 0, 0, NULL, 0); 277 tdev->fb_dirty(fb, NULL, 0, 0, NULL, 0);
288 278
289 backlight_enable(mipi->backlight); 279 backlight_enable(mipi->backlight);
290} 280}
@@ -381,6 +371,8 @@ int mipi_dbi_init(struct device *dev, struct mipi_dbi *mipi,
381 if (ret) 371 if (ret)
382 return ret; 372 return ret;
383 373
374 tdev->fb_dirty = mipi_dbi_fb_dirty;
375
384 /* TODO: Maybe add DRM_MODE_CONNECTOR_SPI */ 376 /* TODO: Maybe add DRM_MODE_CONNECTOR_SPI */
385 ret = tinydrm_display_pipe_init(tdev, pipe_funcs, 377 ret = tinydrm_display_pipe_init(tdev, pipe_funcs,
386 DRM_MODE_CONNECTOR_VIRTUAL, 378 DRM_MODE_CONNECTOR_VIRTUAL,
diff --git a/drivers/gpu/drm/tinydrm/repaper.c b/drivers/gpu/drm/tinydrm/repaper.c
index 75740630c410..1ee6855212a0 100644
--- a/drivers/gpu/drm/tinydrm/repaper.c
+++ b/drivers/gpu/drm/tinydrm/repaper.c
@@ -540,14 +540,8 @@ static int repaper_fb_dirty(struct drm_framebuffer *fb,
540 clip.y1 = 0; 540 clip.y1 = 0;
541 clip.y2 = fb->height; 541 clip.y2 = fb->height;
542 542
543 mutex_lock(&tdev->dirty_lock);
544
545 if (!epd->enabled) 543 if (!epd->enabled)
546 goto out_unlock; 544 return 0;
547
548 /* fbdev can flush even when we're not interested */
549 if (tdev->pipe.plane.fb != fb)
550 goto out_unlock;
551 545
552 repaper_get_temperature(epd); 546 repaper_get_temperature(epd);
553 547
@@ -555,16 +549,14 @@ static int repaper_fb_dirty(struct drm_framebuffer *fb,
555 epd->factored_stage_time); 549 epd->factored_stage_time);
556 550
557 buf = kmalloc(fb->width * fb->height, GFP_KERNEL); 551 buf = kmalloc(fb->width * fb->height, GFP_KERNEL);
558 if (!buf) { 552 if (!buf)
559 ret = -ENOMEM; 553 return -ENOMEM;
560 goto out_unlock;
561 }
562 554
563 if (import_attach) { 555 if (import_attach) {
564 ret = dma_buf_begin_cpu_access(import_attach->dmabuf, 556 ret = dma_buf_begin_cpu_access(import_attach->dmabuf,
565 DMA_FROM_DEVICE); 557 DMA_FROM_DEVICE);
566 if (ret) 558 if (ret)
567 goto out_unlock; 559 goto out_free;
568 } 560 }
569 561
570 tinydrm_xrgb8888_to_gray8(buf, cma_obj->vaddr, fb, &clip); 562 tinydrm_xrgb8888_to_gray8(buf, cma_obj->vaddr, fb, &clip);
@@ -573,7 +565,7 @@ static int repaper_fb_dirty(struct drm_framebuffer *fb,
573 ret = dma_buf_end_cpu_access(import_attach->dmabuf, 565 ret = dma_buf_end_cpu_access(import_attach->dmabuf,
574 DMA_FROM_DEVICE); 566 DMA_FROM_DEVICE);
575 if (ret) 567 if (ret)
576 goto out_unlock; 568 goto out_free;
577 } 569 }
578 570
579 repaper_gray8_to_mono_reversed(buf, fb->width, fb->height); 571 repaper_gray8_to_mono_reversed(buf, fb->width, fb->height);
@@ -625,11 +617,7 @@ static int repaper_fb_dirty(struct drm_framebuffer *fb,
625 } 617 }
626 } 618 }
627 619
628out_unlock: 620out_free:
629 mutex_unlock(&tdev->dirty_lock);
630
631 if (ret)
632 DRM_DEV_ERROR(fb->dev->dev, "Failed to update display (%d)\n", ret);
633 kfree(buf); 621 kfree(buf);
634 622
635 return ret; 623 return ret;
@@ -638,7 +626,7 @@ out_unlock:
638static const struct drm_framebuffer_funcs repaper_fb_funcs = { 626static const struct drm_framebuffer_funcs repaper_fb_funcs = {
639 .destroy = drm_gem_fb_destroy, 627 .destroy = drm_gem_fb_destroy,
640 .create_handle = drm_gem_fb_create_handle, 628 .create_handle = drm_gem_fb_create_handle,
641 .dirty = repaper_fb_dirty, 629 .dirty = tinydrm_fb_dirty,
642}; 630};
643 631
644static void power_off(struct repaper_epd *epd) 632static void power_off(struct repaper_epd *epd)
@@ -659,7 +647,8 @@ static void power_off(struct repaper_epd *epd)
659} 647}
660 648
661static void repaper_pipe_enable(struct drm_simple_display_pipe *pipe, 649static void repaper_pipe_enable(struct drm_simple_display_pipe *pipe,
662 struct drm_crtc_state *crtc_state) 650 struct drm_crtc_state *crtc_state,
651 struct drm_plane_state *plane_state)
663{ 652{
664 struct tinydrm_device *tdev = pipe_to_tinydrm(pipe); 653 struct tinydrm_device *tdev = pipe_to_tinydrm(pipe);
665 struct repaper_epd *epd = epd_from_tinydrm(tdev); 654 struct repaper_epd *epd = epd_from_tinydrm(tdev);
@@ -852,7 +841,7 @@ static const struct drm_simple_display_pipe_funcs repaper_pipe_funcs = {
852 .enable = repaper_pipe_enable, 841 .enable = repaper_pipe_enable,
853 .disable = repaper_pipe_disable, 842 .disable = repaper_pipe_disable,
854 .update = tinydrm_display_pipe_update, 843 .update = tinydrm_display_pipe_update,
855 .prepare_fb = tinydrm_display_pipe_prepare_fb, 844 .prepare_fb = drm_gem_fb_simple_display_pipe_prepare_fb,
856}; 845};
857 846
858static const uint32_t repaper_formats[] = { 847static const uint32_t repaper_formats[] = {
@@ -1069,6 +1058,8 @@ static int repaper_probe(struct spi_device *spi)
1069 if (ret) 1058 if (ret)
1070 return ret; 1059 return ret;
1071 1060
1061 tdev->fb_dirty = repaper_fb_dirty;
1062
1072 ret = tinydrm_display_pipe_init(tdev, &repaper_pipe_funcs, 1063 ret = tinydrm_display_pipe_init(tdev, &repaper_pipe_funcs,
1073 DRM_MODE_CONNECTOR_VIRTUAL, 1064 DRM_MODE_CONNECTOR_VIRTUAL,
1074 repaper_formats, 1065 repaper_formats,
diff --git a/drivers/gpu/drm/tinydrm/st7586.c b/drivers/gpu/drm/tinydrm/st7586.c
index a6396ef9cc4a..5c29e3803ecb 100644
--- a/drivers/gpu/drm/tinydrm/st7586.c
+++ b/drivers/gpu/drm/tinydrm/st7586.c
@@ -120,14 +120,8 @@ static int st7586_fb_dirty(struct drm_framebuffer *fb,
120 int start, end; 120 int start, end;
121 int ret = 0; 121 int ret = 0;
122 122
123 mutex_lock(&tdev->dirty_lock);
124
125 if (!mipi->enabled) 123 if (!mipi->enabled)
126 goto out_unlock; 124 return 0;
127
128 /* fbdev can flush even when we're not interested */
129 if (tdev->pipe.plane.fb != fb)
130 goto out_unlock;
131 125
132 tinydrm_merge_clips(&clip, clips, num_clips, flags, fb->width, 126 tinydrm_merge_clips(&clip, clips, num_clips, flags, fb->width,
133 fb->height); 127 fb->height);
@@ -141,7 +135,7 @@ static int st7586_fb_dirty(struct drm_framebuffer *fb,
141 135
142 ret = st7586_buf_copy(mipi->tx_buf, fb, &clip); 136 ret = st7586_buf_copy(mipi->tx_buf, fb, &clip);
143 if (ret) 137 if (ret)
144 goto out_unlock; 138 return ret;
145 139
146 /* Pixels are packed 3 per byte */ 140 /* Pixels are packed 3 per byte */
147 start = clip.x1 / 3; 141 start = clip.x1 / 3;
@@ -158,24 +152,18 @@ static int st7586_fb_dirty(struct drm_framebuffer *fb,
158 (u8 *)mipi->tx_buf, 152 (u8 *)mipi->tx_buf,
159 (end - start) * (clip.y2 - clip.y1)); 153 (end - start) * (clip.y2 - clip.y1));
160 154
161out_unlock:
162 mutex_unlock(&tdev->dirty_lock);
163
164 if (ret)
165 dev_err_once(fb->dev->dev, "Failed to update display %d\n",
166 ret);
167
168 return ret; 155 return ret;
169} 156}
170 157
171static const struct drm_framebuffer_funcs st7586_fb_funcs = { 158static const struct drm_framebuffer_funcs st7586_fb_funcs = {
172 .destroy = drm_gem_fb_destroy, 159 .destroy = drm_gem_fb_destroy,
173 .create_handle = drm_gem_fb_create_handle, 160 .create_handle = drm_gem_fb_create_handle,
174 .dirty = st7586_fb_dirty, 161 .dirty = tinydrm_fb_dirty,
175}; 162};
176 163
177static void st7586_pipe_enable(struct drm_simple_display_pipe *pipe, 164static void st7586_pipe_enable(struct drm_simple_display_pipe *pipe,
178 struct drm_crtc_state *crtc_state) 165 struct drm_crtc_state *crtc_state,
166 struct drm_plane_state *plane_state)
179{ 167{
180 struct tinydrm_device *tdev = pipe_to_tinydrm(pipe); 168 struct tinydrm_device *tdev = pipe_to_tinydrm(pipe);
181 struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev); 169 struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev);
@@ -237,7 +225,7 @@ static void st7586_pipe_enable(struct drm_simple_display_pipe *pipe,
237 225
238 mipi_dbi_command(mipi, MIPI_DCS_SET_DISPLAY_ON); 226 mipi_dbi_command(mipi, MIPI_DCS_SET_DISPLAY_ON);
239 227
240 mipi_dbi_enable_flush(mipi); 228 mipi_dbi_enable_flush(mipi, crtc_state, plane_state);
241} 229}
242 230
243static void st7586_pipe_disable(struct drm_simple_display_pipe *pipe) 231static void st7586_pipe_disable(struct drm_simple_display_pipe *pipe)
@@ -277,6 +265,8 @@ static int st7586_init(struct device *dev, struct mipi_dbi *mipi,
277 if (ret) 265 if (ret)
278 return ret; 266 return ret;
279 267
268 tdev->fb_dirty = st7586_fb_dirty;
269
280 ret = tinydrm_display_pipe_init(tdev, pipe_funcs, 270 ret = tinydrm_display_pipe_init(tdev, pipe_funcs,
281 DRM_MODE_CONNECTOR_VIRTUAL, 271 DRM_MODE_CONNECTOR_VIRTUAL,
282 st7586_formats, 272 st7586_formats,
@@ -300,7 +290,7 @@ static const struct drm_simple_display_pipe_funcs st7586_pipe_funcs = {
300 .enable = st7586_pipe_enable, 290 .enable = st7586_pipe_enable,
301 .disable = st7586_pipe_disable, 291 .disable = st7586_pipe_disable,
302 .update = tinydrm_display_pipe_update, 292 .update = tinydrm_display_pipe_update,
303 .prepare_fb = tinydrm_display_pipe_prepare_fb, 293 .prepare_fb = drm_gem_fb_simple_display_pipe_prepare_fb,
304}; 294};
305 295
306static const struct drm_display_mode st7586_mode = { 296static const struct drm_display_mode st7586_mode = {
diff --git a/drivers/gpu/drm/tinydrm/st7735r.c b/drivers/gpu/drm/tinydrm/st7735r.c
index 67d197ecfc4b..6c7b15c9da4f 100644
--- a/drivers/gpu/drm/tinydrm/st7735r.c
+++ b/drivers/gpu/drm/tinydrm/st7735r.c
@@ -37,7 +37,8 @@
37#define ST7735R_MV BIT(5) 37#define ST7735R_MV BIT(5)
38 38
39static void jd_t18003_t01_pipe_enable(struct drm_simple_display_pipe *pipe, 39static void jd_t18003_t01_pipe_enable(struct drm_simple_display_pipe *pipe,
40 struct drm_crtc_state *crtc_state) 40 struct drm_crtc_state *crtc_state,
41 struct drm_plane_state *plane_state)
41{ 42{
42 struct tinydrm_device *tdev = pipe_to_tinydrm(pipe); 43 struct tinydrm_device *tdev = pipe_to_tinydrm(pipe);
43 struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev); 44 struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev);
@@ -98,14 +99,14 @@ static void jd_t18003_t01_pipe_enable(struct drm_simple_display_pipe *pipe,
98 99
99 msleep(20); 100 msleep(20);
100 101
101 mipi_dbi_enable_flush(mipi); 102 mipi_dbi_enable_flush(mipi, crtc_state, plane_state);
102} 103}
103 104
104static const struct drm_simple_display_pipe_funcs jd_t18003_t01_pipe_funcs = { 105static const struct drm_simple_display_pipe_funcs jd_t18003_t01_pipe_funcs = {
105 .enable = jd_t18003_t01_pipe_enable, 106 .enable = jd_t18003_t01_pipe_enable,
106 .disable = mipi_dbi_pipe_disable, 107 .disable = mipi_dbi_pipe_disable,
107 .update = tinydrm_display_pipe_update, 108 .update = tinydrm_display_pipe_update,
108 .prepare_fb = tinydrm_display_pipe_prepare_fb, 109 .prepare_fb = drm_gem_fb_simple_display_pipe_prepare_fb,
109}; 110};
110 111
111static const struct drm_display_mode jd_t18003_t01_mode = { 112static const struct drm_display_mode jd_t18003_t01_mode = {
diff --git a/drivers/gpu/drm/tve200/tve200_display.c b/drivers/gpu/drm/tve200/tve200_display.c
index db397fcb345a..e8723a2412a6 100644
--- a/drivers/gpu/drm/tve200/tve200_display.c
+++ b/drivers/gpu/drm/tve200/tve200_display.c
@@ -120,7 +120,8 @@ static int tve200_display_check(struct drm_simple_display_pipe *pipe,
120} 120}
121 121
122static void tve200_display_enable(struct drm_simple_display_pipe *pipe, 122static void tve200_display_enable(struct drm_simple_display_pipe *pipe,
123 struct drm_crtc_state *cstate) 123 struct drm_crtc_state *cstate,
124 struct drm_plane_state *plane_state)
124{ 125{
125 struct drm_crtc *crtc = &pipe->crtc; 126 struct drm_crtc *crtc = &pipe->crtc;
126 struct drm_plane *plane = &pipe->plane; 127 struct drm_plane *plane = &pipe->plane;
@@ -292,18 +293,12 @@ static void tve200_display_disable_vblank(struct drm_simple_display_pipe *pipe)
292 writel(0, priv->regs + TVE200_INT_EN); 293 writel(0, priv->regs + TVE200_INT_EN);
293} 294}
294 295
295static int tve200_display_prepare_fb(struct drm_simple_display_pipe *pipe,
296 struct drm_plane_state *plane_state)
297{
298 return drm_gem_fb_prepare_fb(&pipe->plane, plane_state);
299}
300
301static const struct drm_simple_display_pipe_funcs tve200_display_funcs = { 296static const struct drm_simple_display_pipe_funcs tve200_display_funcs = {
302 .check = tve200_display_check, 297 .check = tve200_display_check,
303 .enable = tve200_display_enable, 298 .enable = tve200_display_enable,
304 .disable = tve200_display_disable, 299 .disable = tve200_display_disable,
305 .update = tve200_display_update, 300 .update = tve200_display_update,
306 .prepare_fb = tve200_display_prepare_fb, 301 .prepare_fb = drm_gem_fb_simple_display_pipe_prepare_fb,
307 .enable_vblank = tve200_display_enable_vblank, 302 .enable_vblank = tve200_display_enable_vblank,
308 .disable_vblank = tve200_display_disable_vblank, 303 .disable_vblank = tve200_display_disable_vblank,
309}; 304};
diff --git a/drivers/gpu/drm/udl/udl_connector.c b/drivers/gpu/drm/udl/udl_connector.c
index c3dc1fd20cb4..09dc585aa46f 100644
--- a/drivers/gpu/drm/udl/udl_connector.c
+++ b/drivers/gpu/drm/udl/udl_connector.c
@@ -105,7 +105,7 @@ static int udl_get_modes(struct drm_connector *connector)
105 return 0; 105 return 0;
106} 106}
107 107
108static int udl_mode_valid(struct drm_connector *connector, 108static enum drm_mode_status udl_mode_valid(struct drm_connector *connector,
109 struct drm_display_mode *mode) 109 struct drm_display_mode *mode)
110{ 110{
111 struct udl_device *udl = connector->dev->dev_private; 111 struct udl_device *udl = connector->dev->dev_private;
diff --git a/drivers/gpu/drm/udl/udl_dmabuf.c b/drivers/gpu/drm/udl/udl_dmabuf.c
index 2867ed155ff6..0a20695eb120 100644
--- a/drivers/gpu/drm/udl/udl_dmabuf.c
+++ b/drivers/gpu/drm/udl/udl_dmabuf.c
@@ -76,6 +76,7 @@ static struct sg_table *udl_map_dma_buf(struct dma_buf_attachment *attach,
76 struct udl_drm_dmabuf_attachment *udl_attach = attach->priv; 76 struct udl_drm_dmabuf_attachment *udl_attach = attach->priv;
77 struct udl_gem_object *obj = to_udl_bo(attach->dmabuf->priv); 77 struct udl_gem_object *obj = to_udl_bo(attach->dmabuf->priv);
78 struct drm_device *dev = obj->base.dev; 78 struct drm_device *dev = obj->base.dev;
79 struct udl_device *udl = dev->dev_private;
79 struct scatterlist *rd, *wr; 80 struct scatterlist *rd, *wr;
80 struct sg_table *sgt = NULL; 81 struct sg_table *sgt = NULL;
81 unsigned int i; 82 unsigned int i;
@@ -112,7 +113,7 @@ static struct sg_table *udl_map_dma_buf(struct dma_buf_attachment *attach,
112 return ERR_PTR(-ENOMEM); 113 return ERR_PTR(-ENOMEM);
113 } 114 }
114 115
115 mutex_lock(&dev->struct_mutex); 116 mutex_lock(&udl->gem_lock);
116 117
117 rd = obj->sg->sgl; 118 rd = obj->sg->sgl;
118 wr = sgt->sgl; 119 wr = sgt->sgl;
@@ -137,7 +138,7 @@ static struct sg_table *udl_map_dma_buf(struct dma_buf_attachment *attach,
137 attach->priv = udl_attach; 138 attach->priv = udl_attach;
138 139
139err_unlock: 140err_unlock:
140 mutex_unlock(&dev->struct_mutex); 141 mutex_unlock(&udl->gem_lock);
141 return sgt; 142 return sgt;
142} 143}
143 144
diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c
index 3c45a3064726..9ef515df724b 100644
--- a/drivers/gpu/drm/udl/udl_drv.c
+++ b/drivers/gpu/drm/udl/udl_drv.c
@@ -53,7 +53,7 @@ static struct drm_driver driver = {
53 .unload = udl_driver_unload, 53 .unload = udl_driver_unload,
54 54
55 /* gem hooks */ 55 /* gem hooks */
56 .gem_free_object = udl_gem_free_object, 56 .gem_free_object_unlocked = udl_gem_free_object,
57 .gem_vm_ops = &udl_gem_vm_ops, 57 .gem_vm_ops = &udl_gem_vm_ops,
58 58
59 .dumb_create = udl_dumb_create, 59 .dumb_create = udl_dumb_create,
diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h
index 2a75ab80527a..55c0cc309198 100644
--- a/drivers/gpu/drm/udl/udl_drv.h
+++ b/drivers/gpu/drm/udl/udl_drv.h
@@ -54,6 +54,8 @@ struct udl_device {
54 struct usb_device *udev; 54 struct usb_device *udev;
55 struct drm_crtc *crtc; 55 struct drm_crtc *crtc;
56 56
57 struct mutex gem_lock;
58
57 int sku_pixel_limit; 59 int sku_pixel_limit;
58 60
59 struct urb_list urbs; 61 struct urb_list urbs;
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
index dee6bd9a3dd1..9a15cce22cce 100644
--- a/drivers/gpu/drm/udl/udl_gem.c
+++ b/drivers/gpu/drm/udl/udl_gem.c
@@ -214,9 +214,10 @@ int udl_gem_mmap(struct drm_file *file, struct drm_device *dev,
214{ 214{
215 struct udl_gem_object *gobj; 215 struct udl_gem_object *gobj;
216 struct drm_gem_object *obj; 216 struct drm_gem_object *obj;
217 struct udl_device *udl = dev->dev_private;
217 int ret = 0; 218 int ret = 0;
218 219
219 mutex_lock(&dev->struct_mutex); 220 mutex_lock(&udl->gem_lock);
220 obj = drm_gem_object_lookup(file, handle); 221 obj = drm_gem_object_lookup(file, handle);
221 if (obj == NULL) { 222 if (obj == NULL) {
222 ret = -ENOENT; 223 ret = -ENOENT;
@@ -236,6 +237,6 @@ int udl_gem_mmap(struct drm_file *file, struct drm_device *dev,
236out: 237out:
237 drm_gem_object_put(&gobj->base); 238 drm_gem_object_put(&gobj->base);
238unlock: 239unlock:
239 mutex_unlock(&dev->struct_mutex); 240 mutex_unlock(&udl->gem_lock);
240 return ret; 241 return ret;
241} 242}
diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c
index f1ec4528a73e..d518de8f496b 100644
--- a/drivers/gpu/drm/udl/udl_main.c
+++ b/drivers/gpu/drm/udl/udl_main.c
@@ -324,6 +324,8 @@ int udl_driver_load(struct drm_device *dev, unsigned long flags)
324 udl->ddev = dev; 324 udl->ddev = dev;
325 dev->dev_private = udl; 325 dev->dev_private = udl;
326 326
327 mutex_init(&udl->gem_lock);
328
327 if (!udl_parse_vendor_descriptor(dev, udl->udev)) { 329 if (!udl_parse_vendor_descriptor(dev, udl->udev)) {
328 ret = -ENODEV; 330 ret = -ENODEV;
329 DRM_ERROR("firmware not recognized. Assume incompatible device\n"); 331 DRM_ERROR("firmware not recognized. Assume incompatible device\n");
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
index bf4667481935..83d3b7912fc2 100644
--- a/drivers/gpu/drm/vc4/vc4_crtc.c
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
@@ -42,51 +42,18 @@
42#include "vc4_drv.h" 42#include "vc4_drv.h"
43#include "vc4_regs.h" 43#include "vc4_regs.h"
44 44
45struct vc4_crtc {
46 struct drm_crtc base;
47 const struct vc4_crtc_data *data;
48 void __iomem *regs;
49
50 /* Timestamp at start of vblank irq - unaffected by lock delays. */
51 ktime_t t_vblank;
52
53 /* Which HVS channel we're using for our CRTC. */
54 int channel;
55
56 u8 lut_r[256];
57 u8 lut_g[256];
58 u8 lut_b[256];
59 /* Size in pixels of the COB memory allocated to this CRTC. */
60 u32 cob_size;
61
62 struct drm_pending_vblank_event *event;
63};
64
65struct vc4_crtc_state { 45struct vc4_crtc_state {
66 struct drm_crtc_state base; 46 struct drm_crtc_state base;
67 /* Dlist area for this CRTC configuration. */ 47 /* Dlist area for this CRTC configuration. */
68 struct drm_mm_node mm; 48 struct drm_mm_node mm;
69}; 49};
70 50
71static inline struct vc4_crtc *
72to_vc4_crtc(struct drm_crtc *crtc)
73{
74 return (struct vc4_crtc *)crtc;
75}
76
77static inline struct vc4_crtc_state * 51static inline struct vc4_crtc_state *
78to_vc4_crtc_state(struct drm_crtc_state *crtc_state) 52to_vc4_crtc_state(struct drm_crtc_state *crtc_state)
79{ 53{
80 return (struct vc4_crtc_state *)crtc_state; 54 return (struct vc4_crtc_state *)crtc_state;
81} 55}
82 56
83struct vc4_crtc_data {
84 /* Which channel of the HVS this pixelvalve sources from. */
85 int hvs_channel;
86
87 enum vc4_encoder_type encoder_types[4];
88};
89
90#define CRTC_WRITE(offset, val) writel(val, vc4_crtc->regs + (offset)) 57#define CRTC_WRITE(offset, val) writel(val, vc4_crtc->regs + (offset))
91#define CRTC_READ(offset) readl(vc4_crtc->regs + (offset)) 58#define CRTC_READ(offset) readl(vc4_crtc->regs + (offset))
92 59
@@ -298,23 +265,21 @@ vc4_crtc_lut_load(struct drm_crtc *crtc)
298 HVS_WRITE(SCALER_GAMDATA, vc4_crtc->lut_b[i]); 265 HVS_WRITE(SCALER_GAMDATA, vc4_crtc->lut_b[i]);
299} 266}
300 267
301static int 268static void
302vc4_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, 269vc4_crtc_update_gamma_lut(struct drm_crtc *crtc)
303 uint32_t size,
304 struct drm_modeset_acquire_ctx *ctx)
305{ 270{
306 struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); 271 struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
272 struct drm_color_lut *lut = crtc->state->gamma_lut->data;
273 u32 length = drm_color_lut_size(crtc->state->gamma_lut);
307 u32 i; 274 u32 i;
308 275
309 for (i = 0; i < size; i++) { 276 for (i = 0; i < length; i++) {
310 vc4_crtc->lut_r[i] = r[i] >> 8; 277 vc4_crtc->lut_r[i] = drm_color_lut_extract(lut[i].red, 8);
311 vc4_crtc->lut_g[i] = g[i] >> 8; 278 vc4_crtc->lut_g[i] = drm_color_lut_extract(lut[i].green, 8);
312 vc4_crtc->lut_b[i] = b[i] >> 8; 279 vc4_crtc->lut_b[i] = drm_color_lut_extract(lut[i].blue, 8);
313 } 280 }
314 281
315 vc4_crtc_lut_load(crtc); 282 vc4_crtc_lut_load(crtc);
316
317 return 0;
318} 283}
319 284
320static u32 vc4_get_fifo_full_level(u32 format) 285static u32 vc4_get_fifo_full_level(u32 format)
@@ -699,6 +664,22 @@ static void vc4_crtc_atomic_flush(struct drm_crtc *crtc,
699 if (crtc->state->active && old_state->active) 664 if (crtc->state->active && old_state->active)
700 vc4_crtc_update_dlist(crtc); 665 vc4_crtc_update_dlist(crtc);
701 666
667 if (crtc->state->color_mgmt_changed) {
668 u32 dispbkgndx = HVS_READ(SCALER_DISPBKGNDX(vc4_crtc->channel));
669
670 if (crtc->state->gamma_lut) {
671 vc4_crtc_update_gamma_lut(crtc);
672 dispbkgndx |= SCALER_DISPBKGND_GAMMA;
673 } else {
674 /* Unsetting DISPBKGND_GAMMA skips the gamma lut step
675 * in hardware, which is the same as a linear lut that
676 * DRM expects us to use in absence of a user lut.
677 */
678 dispbkgndx &= ~SCALER_DISPBKGND_GAMMA;
679 }
680 HVS_WRITE(SCALER_DISPBKGNDX(vc4_crtc->channel), dispbkgndx);
681 }
682
702 if (debug_dump_regs) { 683 if (debug_dump_regs) {
703 DRM_INFO("CRTC %d HVS after:\n", drm_crtc_index(crtc)); 684 DRM_INFO("CRTC %d HVS after:\n", drm_crtc_index(crtc));
704 vc4_hvs_dump_state(dev); 685 vc4_hvs_dump_state(dev);
@@ -909,7 +890,7 @@ static const struct drm_crtc_funcs vc4_crtc_funcs = {
909 .reset = vc4_crtc_reset, 890 .reset = vc4_crtc_reset,
910 .atomic_duplicate_state = vc4_crtc_duplicate_state, 891 .atomic_duplicate_state = vc4_crtc_duplicate_state,
911 .atomic_destroy_state = vc4_crtc_destroy_state, 892 .atomic_destroy_state = vc4_crtc_destroy_state,
912 .gamma_set = vc4_crtc_gamma_set, 893 .gamma_set = drm_atomic_helper_legacy_gamma_set,
913 .enable_vblank = vc4_enable_vblank, 894 .enable_vblank = vc4_enable_vblank,
914 .disable_vblank = vc4_disable_vblank, 895 .disable_vblank = vc4_disable_vblank,
915}; 896};
@@ -1035,6 +1016,12 @@ static int vc4_crtc_bind(struct device *dev, struct device *master, void *data)
1035 primary_plane->crtc = crtc; 1016 primary_plane->crtc = crtc;
1036 vc4_crtc->channel = vc4_crtc->data->hvs_channel; 1017 vc4_crtc->channel = vc4_crtc->data->hvs_channel;
1037 drm_mode_crtc_set_gamma_size(crtc, ARRAY_SIZE(vc4_crtc->lut_r)); 1018 drm_mode_crtc_set_gamma_size(crtc, ARRAY_SIZE(vc4_crtc->lut_r));
1019 drm_crtc_enable_color_mgmt(crtc, 0, false, crtc->gamma_size);
1020
1021 /* We support CTM, but only for one CRTC at a time. It's therefore
1022 * implemented as private driver state in vc4_kms, not here.
1023 */
1024 drm_crtc_enable_color_mgmt(crtc, 0, true, crtc->gamma_size);
1038 1025
1039 /* Set up some arbitrary number of planes. We're not limited 1026 /* Set up some arbitrary number of planes. We're not limited
1040 * by a set number of physical registers, just the space in 1027 * by a set number of physical registers, just the space in
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c
index 94b99c90425a..40ddeaafd65f 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.c
+++ b/drivers/gpu/drm/vc4/vc4_drv.c
@@ -318,8 +318,8 @@ dev_unref:
318 318
319static void vc4_drm_unbind(struct device *dev) 319static void vc4_drm_unbind(struct device *dev)
320{ 320{
321 struct platform_device *pdev = to_platform_device(dev); 321 struct drm_device *drm = dev_get_drvdata(dev);
322 struct drm_device *drm = platform_get_drvdata(pdev); 322 struct vc4_dev *vc4 = to_vc4_dev(drm);
323 323
324 drm_dev_unregister(drm); 324 drm_dev_unregister(drm);
325 325
@@ -327,6 +327,8 @@ static void vc4_drm_unbind(struct device *dev)
327 327
328 drm_mode_config_cleanup(drm); 328 drm_mode_config_cleanup(drm);
329 329
330 drm_atomic_private_obj_fini(&vc4->ctm_manager);
331
330 drm_dev_unref(drm); 332 drm_dev_unref(drm);
331} 333}
332 334
diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h
index 1b4cd1fabf56..22589d39083c 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.h
+++ b/drivers/gpu/drm/vc4/vc4_drv.h
@@ -10,6 +10,7 @@
10#include <drm/drmP.h> 10#include <drm/drmP.h>
11#include <drm/drm_encoder.h> 11#include <drm/drm_encoder.h>
12#include <drm/drm_gem_cma_helper.h> 12#include <drm/drm_gem_cma_helper.h>
13#include <drm/drm_atomic.h>
13 14
14#include "uapi/drm/vc4_drm.h" 15#include "uapi/drm/vc4_drm.h"
15 16
@@ -193,6 +194,9 @@ struct vc4_dev {
193 } hangcheck; 194 } hangcheck;
194 195
195 struct semaphore async_modeset; 196 struct semaphore async_modeset;
197
198 struct drm_modeset_lock ctm_state_lock;
199 struct drm_private_obj ctm_manager;
196}; 200};
197 201
198static inline struct vc4_dev * 202static inline struct vc4_dev *
@@ -392,6 +396,39 @@ to_vc4_encoder(struct drm_encoder *encoder)
392 return container_of(encoder, struct vc4_encoder, base); 396 return container_of(encoder, struct vc4_encoder, base);
393} 397}
394 398
399struct vc4_crtc_data {
400 /* Which channel of the HVS this pixelvalve sources from. */
401 int hvs_channel;
402
403 enum vc4_encoder_type encoder_types[4];
404};
405
406struct vc4_crtc {
407 struct drm_crtc base;
408 const struct vc4_crtc_data *data;
409 void __iomem *regs;
410
411 /* Timestamp at start of vblank irq - unaffected by lock delays. */
412 ktime_t t_vblank;
413
414 /* Which HVS channel we're using for our CRTC. */
415 int channel;
416
417 u8 lut_r[256];
418 u8 lut_g[256];
419 u8 lut_b[256];
420 /* Size in pixels of the COB memory allocated to this CRTC. */
421 u32 cob_size;
422
423 struct drm_pending_vblank_event *event;
424};
425
426static inline struct vc4_crtc *
427to_vc4_crtc(struct drm_crtc *crtc)
428{
429 return (struct vc4_crtc *)crtc;
430}
431
395#define V3D_READ(offset) readl(vc4->v3d->regs + offset) 432#define V3D_READ(offset) readl(vc4->v3d->regs + offset)
396#define V3D_WRITE(offset, val) writel(val, vc4->v3d->regs + offset) 433#define V3D_WRITE(offset, val) writel(val, vc4->v3d->regs + offset)
397#define HVS_READ(offset) readl(vc4->hvs->regs + offset) 434#define HVS_READ(offset) readl(vc4->hvs->regs + offset)
diff --git a/drivers/gpu/drm/vc4/vc4_hvs.c b/drivers/gpu/drm/vc4/vc4_hvs.c
index 2b62fc5b8d85..5d8c749c9749 100644
--- a/drivers/gpu/drm/vc4/vc4_hvs.c
+++ b/drivers/gpu/drm/vc4/vc4_hvs.c
@@ -58,6 +58,10 @@ static const struct {
58 HVS_REG(SCALER_DISPSTAT2), 58 HVS_REG(SCALER_DISPSTAT2),
59 HVS_REG(SCALER_DISPBASE2), 59 HVS_REG(SCALER_DISPBASE2),
60 HVS_REG(SCALER_DISPALPHA2), 60 HVS_REG(SCALER_DISPALPHA2),
61 HVS_REG(SCALER_OLEDOFFS),
62 HVS_REG(SCALER_OLEDCOEF0),
63 HVS_REG(SCALER_OLEDCOEF1),
64 HVS_REG(SCALER_OLEDCOEF2),
61}; 65};
62 66
63void vc4_hvs_dump_state(struct drm_device *dev) 67void vc4_hvs_dump_state(struct drm_device *dev)
diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c
index ba60153dddb5..8a411e5f8776 100644
--- a/drivers/gpu/drm/vc4/vc4_kms.c
+++ b/drivers/gpu/drm/vc4/vc4_kms.c
@@ -23,6 +23,117 @@
23#include <drm/drm_fb_cma_helper.h> 23#include <drm/drm_fb_cma_helper.h>
24#include <drm/drm_gem_framebuffer_helper.h> 24#include <drm/drm_gem_framebuffer_helper.h>
25#include "vc4_drv.h" 25#include "vc4_drv.h"
26#include "vc4_regs.h"
27
28struct vc4_ctm_state {
29 struct drm_private_state base;
30 struct drm_color_ctm *ctm;
31 int fifo;
32};
33
34static struct vc4_ctm_state *to_vc4_ctm_state(struct drm_private_state *priv)
35{
36 return container_of(priv, struct vc4_ctm_state, base);
37}
38
39static struct vc4_ctm_state *vc4_get_ctm_state(struct drm_atomic_state *state,
40 struct drm_private_obj *manager)
41{
42 struct drm_device *dev = state->dev;
43 struct vc4_dev *vc4 = dev->dev_private;
44 struct drm_private_state *priv_state;
45 int ret;
46
47 ret = drm_modeset_lock(&vc4->ctm_state_lock, state->acquire_ctx);
48 if (ret)
49 return ERR_PTR(ret);
50
51 priv_state = drm_atomic_get_private_obj_state(state, manager);
52 if (IS_ERR(priv_state))
53 return ERR_CAST(priv_state);
54
55 return to_vc4_ctm_state(priv_state);
56}
57
58static struct drm_private_state *
59vc4_ctm_duplicate_state(struct drm_private_obj *obj)
60{
61 struct vc4_ctm_state *state;
62
63 state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
64 if (!state)
65 return NULL;
66
67 __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
68
69 return &state->base;
70}
71
72static void vc4_ctm_destroy_state(struct drm_private_obj *obj,
73 struct drm_private_state *state)
74{
75 struct vc4_ctm_state *ctm_state = to_vc4_ctm_state(state);
76
77 kfree(ctm_state);
78}
79
80static const struct drm_private_state_funcs vc4_ctm_state_funcs = {
81 .atomic_duplicate_state = vc4_ctm_duplicate_state,
82 .atomic_destroy_state = vc4_ctm_destroy_state,
83};
84
85/* Converts a DRM S31.32 value to the HW S0.9 format. */
86static u16 vc4_ctm_s31_32_to_s0_9(u64 in)
87{
88 u16 r;
89
90 /* Sign bit. */
91 r = in & BIT_ULL(63) ? BIT(9) : 0;
92
93 if ((in & GENMASK_ULL(62, 32)) > 0) {
94 /* We have zero integer bits so we can only saturate here. */
95 r |= GENMASK(8, 0);
96 } else {
97 /* Otherwise take the 9 most important fractional bits. */
98 r |= (in >> 23) & GENMASK(8, 0);
99 }
100
101 return r;
102}
103
104static void
105vc4_ctm_commit(struct vc4_dev *vc4, struct drm_atomic_state *state)
106{
107 struct vc4_ctm_state *ctm_state = to_vc4_ctm_state(vc4->ctm_manager.state);
108 struct drm_color_ctm *ctm = ctm_state->ctm;
109
110 if (ctm_state->fifo) {
111 HVS_WRITE(SCALER_OLEDCOEF2,
112 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[0]),
113 SCALER_OLEDCOEF2_R_TO_R) |
114 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[3]),
115 SCALER_OLEDCOEF2_R_TO_G) |
116 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[6]),
117 SCALER_OLEDCOEF2_R_TO_B));
118 HVS_WRITE(SCALER_OLEDCOEF1,
119 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[1]),
120 SCALER_OLEDCOEF1_G_TO_R) |
121 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[4]),
122 SCALER_OLEDCOEF1_G_TO_G) |
123 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[7]),
124 SCALER_OLEDCOEF1_G_TO_B));
125 HVS_WRITE(SCALER_OLEDCOEF0,
126 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[2]),
127 SCALER_OLEDCOEF0_B_TO_R) |
128 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[5]),
129 SCALER_OLEDCOEF0_B_TO_G) |
130 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[8]),
131 SCALER_OLEDCOEF0_B_TO_B));
132 }
133
134 HVS_WRITE(SCALER_OLEDOFFS,
135 VC4_SET_FIELD(ctm_state->fifo, SCALER_OLEDOFFS_DISPFIFO));
136}
26 137
27static void 138static void
28vc4_atomic_complete_commit(struct drm_atomic_state *state) 139vc4_atomic_complete_commit(struct drm_atomic_state *state)
@@ -36,6 +147,8 @@ vc4_atomic_complete_commit(struct drm_atomic_state *state)
36 147
37 drm_atomic_helper_commit_modeset_disables(dev, state); 148 drm_atomic_helper_commit_modeset_disables(dev, state);
38 149
150 vc4_ctm_commit(vc4, state);
151
39 drm_atomic_helper_commit_planes(dev, state, 0); 152 drm_atomic_helper_commit_planes(dev, state, 0);
40 153
41 drm_atomic_helper_commit_modeset_enables(dev, state); 154 drm_atomic_helper_commit_modeset_enables(dev, state);
@@ -90,6 +203,26 @@ static int vc4_atomic_commit(struct drm_device *dev,
90 struct vc4_dev *vc4 = to_vc4_dev(dev); 203 struct vc4_dev *vc4 = to_vc4_dev(dev);
91 int ret; 204 int ret;
92 205
206 if (state->async_update) {
207 ret = down_interruptible(&vc4->async_modeset);
208 if (ret)
209 return ret;
210
211 ret = drm_atomic_helper_prepare_planes(dev, state);
212 if (ret) {
213 up(&vc4->async_modeset);
214 return ret;
215 }
216
217 drm_atomic_helper_async_commit(dev, state);
218
219 drm_atomic_helper_cleanup_planes(dev, state);
220
221 up(&vc4->async_modeset);
222
223 return 0;
224 }
225
93 ret = drm_atomic_helper_setup_commit(state, nonblock); 226 ret = drm_atomic_helper_setup_commit(state, nonblock);
94 if (ret) 227 if (ret)
95 return ret; 228 return ret;
@@ -187,9 +320,89 @@ static struct drm_framebuffer *vc4_fb_create(struct drm_device *dev,
187 return drm_gem_fb_create(dev, file_priv, mode_cmd); 320 return drm_gem_fb_create(dev, file_priv, mode_cmd);
188} 321}
189 322
323/* Our CTM has some peculiar limitations: we can only enable it for one CRTC
324 * at a time and the HW only supports S0.9 scalars. To account for the latter,
325 * we don't allow userland to set a CTM that we have no hope of approximating.
326 */
327static int
328vc4_ctm_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
329{
330 struct vc4_dev *vc4 = to_vc4_dev(dev);
331 struct vc4_ctm_state *ctm_state = NULL;
332 struct drm_crtc *crtc;
333 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
334 struct drm_color_ctm *ctm;
335 int i;
336
337 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
338 /* CTM is being disabled. */
339 if (!new_crtc_state->ctm && old_crtc_state->ctm) {
340 ctm_state = vc4_get_ctm_state(state, &vc4->ctm_manager);
341 if (IS_ERR(ctm_state))
342 return PTR_ERR(ctm_state);
343 ctm_state->fifo = 0;
344 }
345 }
346
347 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
348 if (new_crtc_state->ctm == old_crtc_state->ctm)
349 continue;
350
351 if (!ctm_state) {
352 ctm_state = vc4_get_ctm_state(state, &vc4->ctm_manager);
353 if (IS_ERR(ctm_state))
354 return PTR_ERR(ctm_state);
355 }
356
357 /* CTM is being enabled or the matrix changed. */
358 if (new_crtc_state->ctm) {
359 /* fifo is 1-based since 0 disables CTM. */
360 int fifo = to_vc4_crtc(crtc)->channel + 1;
361
362 /* Check userland isn't trying to turn on CTM for more
363 * than one CRTC at a time.
364 */
365 if (ctm_state->fifo && ctm_state->fifo != fifo) {
366 DRM_DEBUG_DRIVER("Too many CTM configured\n");
367 return -EINVAL;
368 }
369
370 /* Check we can approximate the specified CTM.
371 * We disallow scalars |c| > 1.0 since the HW has
372 * no integer bits.
373 */
374 ctm = new_crtc_state->ctm->data;
375 for (i = 0; i < ARRAY_SIZE(ctm->matrix); i++) {
376 u64 val = ctm->matrix[i];
377
378 val &= ~BIT_ULL(63);
379 if (val > BIT_ULL(32))
380 return -EINVAL;
381 }
382
383 ctm_state->fifo = fifo;
384 ctm_state->ctm = ctm;
385 }
386 }
387
388 return 0;
389}
390
391static int
392vc4_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
393{
394 int ret;
395
396 ret = vc4_ctm_atomic_check(dev, state);
397 if (ret < 0)
398 return ret;
399
400 return drm_atomic_helper_check(dev, state);
401}
402
190static const struct drm_mode_config_funcs vc4_mode_funcs = { 403static const struct drm_mode_config_funcs vc4_mode_funcs = {
191 .output_poll_changed = drm_fb_helper_output_poll_changed, 404 .output_poll_changed = drm_fb_helper_output_poll_changed,
192 .atomic_check = drm_atomic_helper_check, 405 .atomic_check = vc4_atomic_check,
193 .atomic_commit = vc4_atomic_commit, 406 .atomic_commit = vc4_atomic_commit,
194 .fb_create = vc4_fb_create, 407 .fb_create = vc4_fb_create,
195}; 408};
@@ -197,6 +410,7 @@ static const struct drm_mode_config_funcs vc4_mode_funcs = {
197int vc4_kms_load(struct drm_device *dev) 410int vc4_kms_load(struct drm_device *dev)
198{ 411{
199 struct vc4_dev *vc4 = to_vc4_dev(dev); 412 struct vc4_dev *vc4 = to_vc4_dev(dev);
413 struct vc4_ctm_state *ctm_state;
200 int ret; 414 int ret;
201 415
202 sema_init(&vc4->async_modeset, 1); 416 sema_init(&vc4->async_modeset, 1);
@@ -217,6 +431,14 @@ int vc4_kms_load(struct drm_device *dev)
217 dev->mode_config.async_page_flip = true; 431 dev->mode_config.async_page_flip = true;
218 dev->mode_config.allow_fb_modifiers = true; 432 dev->mode_config.allow_fb_modifiers = true;
219 433
434 drm_modeset_lock_init(&vc4->ctm_state_lock);
435
436 ctm_state = kzalloc(sizeof(*ctm_state), GFP_KERNEL);
437 if (!ctm_state)
438 return -ENOMEM;
439 drm_atomic_private_obj_init(&vc4->ctm_manager, &ctm_state->base,
440 &vc4_ctm_state_funcs);
441
220 drm_mode_config_reset(dev); 442 drm_mode_config_reset(dev);
221 443
222 if (dev->mode_config.num_connector) 444 if (dev->mode_config.num_connector)
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index ce39390be389..3483c05cc3d6 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -201,6 +201,7 @@ static void vc4_plane_reset(struct drm_plane *plane)
201 return; 201 return;
202 202
203 plane->state = &vc4_state->base; 203 plane->state = &vc4_state->base;
204 plane->state->alpha = DRM_BLEND_ALPHA_OPAQUE;
204 vc4_state->base.plane = plane; 205 vc4_state->base.plane = plane;
205} 206}
206 207
@@ -467,6 +468,7 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
467 u32 ctl0_offset = vc4_state->dlist_count; 468 u32 ctl0_offset = vc4_state->dlist_count;
468 const struct hvs_format *format = vc4_get_hvs_format(fb->format->format); 469 const struct hvs_format *format = vc4_get_hvs_format(fb->format->format);
469 int num_planes = drm_format_num_planes(format->drm); 470 int num_planes = drm_format_num_planes(format->drm);
471 bool mix_plane_alpha;
470 bool covers_screen; 472 bool covers_screen;
471 u32 scl0, scl1, pitch0; 473 u32 scl0, scl1, pitch0;
472 u32 lbm_size, tiling; 474 u32 lbm_size, tiling;
@@ -552,7 +554,7 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
552 /* Position Word 0: Image Positions and Alpha Value */ 554 /* Position Word 0: Image Positions and Alpha Value */
553 vc4_state->pos0_offset = vc4_state->dlist_count; 555 vc4_state->pos0_offset = vc4_state->dlist_count;
554 vc4_dlist_write(vc4_state, 556 vc4_dlist_write(vc4_state,
555 VC4_SET_FIELD(0xff, SCALER_POS0_FIXED_ALPHA) | 557 VC4_SET_FIELD(state->alpha >> 8, SCALER_POS0_FIXED_ALPHA) |
556 VC4_SET_FIELD(vc4_state->crtc_x, SCALER_POS0_START_X) | 558 VC4_SET_FIELD(vc4_state->crtc_x, SCALER_POS0_START_X) |
557 VC4_SET_FIELD(vc4_state->crtc_y, SCALER_POS0_START_Y)); 559 VC4_SET_FIELD(vc4_state->crtc_y, SCALER_POS0_START_Y));
558 560
@@ -565,6 +567,13 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
565 SCALER_POS1_SCL_HEIGHT)); 567 SCALER_POS1_SCL_HEIGHT));
566 } 568 }
567 569
570 /* Don't waste cycles mixing with plane alpha if the set alpha
571 * is opaque or there is no per-pixel alpha information.
572 * In any case we use the alpha property value as the fixed alpha.
573 */
574 mix_plane_alpha = state->alpha != DRM_BLEND_ALPHA_OPAQUE &&
575 fb->format->has_alpha;
576
568 /* Position Word 2: Source Image Size, Alpha */ 577 /* Position Word 2: Source Image Size, Alpha */
569 vc4_state->pos2_offset = vc4_state->dlist_count; 578 vc4_state->pos2_offset = vc4_state->dlist_count;
570 vc4_dlist_write(vc4_state, 579 vc4_dlist_write(vc4_state,
@@ -572,6 +581,7 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
572 SCALER_POS2_ALPHA_MODE_PIPELINE : 581 SCALER_POS2_ALPHA_MODE_PIPELINE :
573 SCALER_POS2_ALPHA_MODE_FIXED, 582 SCALER_POS2_ALPHA_MODE_FIXED,
574 SCALER_POS2_ALPHA_MODE) | 583 SCALER_POS2_ALPHA_MODE) |
584 (mix_plane_alpha ? SCALER_POS2_ALPHA_MIX : 0) |
575 (fb->format->has_alpha ? SCALER_POS2_ALPHA_PREMULT : 0) | 585 (fb->format->has_alpha ? SCALER_POS2_ALPHA_PREMULT : 0) |
576 VC4_SET_FIELD(vc4_state->src_w[0], SCALER_POS2_WIDTH) | 586 VC4_SET_FIELD(vc4_state->src_w[0], SCALER_POS2_WIDTH) |
577 VC4_SET_FIELD(vc4_state->src_h[0], SCALER_POS2_HEIGHT)); 587 VC4_SET_FIELD(vc4_state->src_h[0], SCALER_POS2_HEIGHT));
@@ -653,10 +663,11 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
653 vc4_state->crtc_w == state->crtc->mode.hdisplay && 663 vc4_state->crtc_w == state->crtc->mode.hdisplay &&
654 vc4_state->crtc_h == state->crtc->mode.vdisplay; 664 vc4_state->crtc_h == state->crtc->mode.vdisplay;
655 /* Background fill might be necessary when the plane has per-pixel 665 /* Background fill might be necessary when the plane has per-pixel
656 * alpha content and blends from the background or does not cover 666 * alpha content or a non-opaque plane alpha and could blend from the
657 * the entire screen. 667 * background or does not cover the entire screen.
658 */ 668 */
659 vc4_state->needs_bg_fill = fb->format->has_alpha || !covers_screen; 669 vc4_state->needs_bg_fill = fb->format->has_alpha || !covers_screen ||
670 state->alpha != DRM_BLEND_ALPHA_OPAQUE;
660 671
661 return 0; 672 return 0;
662} 673}
@@ -741,6 +752,57 @@ void vc4_plane_async_set_fb(struct drm_plane *plane, struct drm_framebuffer *fb)
741 vc4_state->dlist[vc4_state->ptr0_offset] = addr; 752 vc4_state->dlist[vc4_state->ptr0_offset] = addr;
742} 753}
743 754
755static void vc4_plane_atomic_async_update(struct drm_plane *plane,
756 struct drm_plane_state *state)
757{
758 struct vc4_plane_state *vc4_state = to_vc4_plane_state(plane->state);
759
760 if (plane->state->fb != state->fb) {
761 vc4_plane_async_set_fb(plane, state->fb);
762 drm_atomic_set_fb_for_plane(plane->state, state->fb);
763 }
764
765 /* Set the cursor's position on the screen. This is the
766 * expected change from the drm_mode_cursor_universal()
767 * helper.
768 */
769 plane->state->crtc_x = state->crtc_x;
770 plane->state->crtc_y = state->crtc_y;
771
772 /* Allow changing the start position within the cursor BO, if
773 * that matters.
774 */
775 plane->state->src_x = state->src_x;
776 plane->state->src_y = state->src_y;
777
778 /* Update the display list based on the new crtc_x/y. */
779 vc4_plane_atomic_check(plane, plane->state);
780
781 /* Note that we can't just call vc4_plane_write_dlist()
782 * because that would smash the context data that the HVS is
783 * currently using.
784 */
785 writel(vc4_state->dlist[vc4_state->pos0_offset],
786 &vc4_state->hw_dlist[vc4_state->pos0_offset]);
787 writel(vc4_state->dlist[vc4_state->pos2_offset],
788 &vc4_state->hw_dlist[vc4_state->pos2_offset]);
789 writel(vc4_state->dlist[vc4_state->ptr0_offset],
790 &vc4_state->hw_dlist[vc4_state->ptr0_offset]);
791}
792
793static int vc4_plane_atomic_async_check(struct drm_plane *plane,
794 struct drm_plane_state *state)
795{
796 /* No configuring new scaling in the fast path. */
797 if (plane->state->crtc_w != state->crtc_w ||
798 plane->state->crtc_h != state->crtc_h ||
799 plane->state->src_w != state->src_w ||
800 plane->state->src_h != state->src_h)
801 return -EINVAL;
802
803 return 0;
804}
805
744static int vc4_prepare_fb(struct drm_plane *plane, 806static int vc4_prepare_fb(struct drm_plane *plane,
745 struct drm_plane_state *state) 807 struct drm_plane_state *state)
746{ 808{
@@ -780,6 +842,8 @@ static const struct drm_plane_helper_funcs vc4_plane_helper_funcs = {
780 .atomic_update = vc4_plane_atomic_update, 842 .atomic_update = vc4_plane_atomic_update,
781 .prepare_fb = vc4_prepare_fb, 843 .prepare_fb = vc4_prepare_fb,
782 .cleanup_fb = vc4_cleanup_fb, 844 .cleanup_fb = vc4_cleanup_fb,
845 .atomic_async_check = vc4_plane_atomic_async_check,
846 .atomic_async_update = vc4_plane_atomic_async_update,
783}; 847};
784 848
785static void vc4_plane_destroy(struct drm_plane *plane) 849static void vc4_plane_destroy(struct drm_plane *plane)
@@ -788,82 +852,6 @@ static void vc4_plane_destroy(struct drm_plane *plane)
788 drm_plane_cleanup(plane); 852 drm_plane_cleanup(plane);
789} 853}
790 854
791/* Implements immediate (non-vblank-synced) updates of the cursor
792 * position, or falls back to the atomic helper otherwise.
793 */
794static int
795vc4_update_plane(struct drm_plane *plane,
796 struct drm_crtc *crtc,
797 struct drm_framebuffer *fb,
798 int crtc_x, int crtc_y,
799 unsigned int crtc_w, unsigned int crtc_h,
800 uint32_t src_x, uint32_t src_y,
801 uint32_t src_w, uint32_t src_h,
802 struct drm_modeset_acquire_ctx *ctx)
803{
804 struct drm_plane_state *plane_state;
805 struct vc4_plane_state *vc4_state;
806
807 if (plane != crtc->cursor)
808 goto out;
809
810 plane_state = plane->state;
811 vc4_state = to_vc4_plane_state(plane_state);
812
813 if (!plane_state)
814 goto out;
815
816 /* No configuring new scaling in the fast path. */
817 if (crtc_w != plane_state->crtc_w ||
818 crtc_h != plane_state->crtc_h ||
819 src_w != plane_state->src_w ||
820 src_h != plane_state->src_h) {
821 goto out;
822 }
823
824 if (fb != plane_state->fb) {
825 drm_atomic_set_fb_for_plane(plane->state, fb);
826 vc4_plane_async_set_fb(plane, fb);
827 }
828
829 /* Set the cursor's position on the screen. This is the
830 * expected change from the drm_mode_cursor_universal()
831 * helper.
832 */
833 plane_state->crtc_x = crtc_x;
834 plane_state->crtc_y = crtc_y;
835
836 /* Allow changing the start position within the cursor BO, if
837 * that matters.
838 */
839 plane_state->src_x = src_x;
840 plane_state->src_y = src_y;
841
842 /* Update the display list based on the new crtc_x/y. */
843 vc4_plane_atomic_check(plane, plane_state);
844
845 /* Note that we can't just call vc4_plane_write_dlist()
846 * because that would smash the context data that the HVS is
847 * currently using.
848 */
849 writel(vc4_state->dlist[vc4_state->pos0_offset],
850 &vc4_state->hw_dlist[vc4_state->pos0_offset]);
851 writel(vc4_state->dlist[vc4_state->pos2_offset],
852 &vc4_state->hw_dlist[vc4_state->pos2_offset]);
853 writel(vc4_state->dlist[vc4_state->ptr0_offset],
854 &vc4_state->hw_dlist[vc4_state->ptr0_offset]);
855
856 return 0;
857
858out:
859 return drm_atomic_helper_update_plane(plane, crtc, fb,
860 crtc_x, crtc_y,
861 crtc_w, crtc_h,
862 src_x, src_y,
863 src_w, src_h,
864 ctx);
865}
866
867static bool vc4_format_mod_supported(struct drm_plane *plane, 855static bool vc4_format_mod_supported(struct drm_plane *plane,
868 uint32_t format, 856 uint32_t format,
869 uint64_t modifier) 857 uint64_t modifier)
@@ -891,7 +879,7 @@ static bool vc4_format_mod_supported(struct drm_plane *plane,
891} 879}
892 880
893static const struct drm_plane_funcs vc4_plane_funcs = { 881static const struct drm_plane_funcs vc4_plane_funcs = {
894 .update_plane = vc4_update_plane, 882 .update_plane = drm_atomic_helper_update_plane,
895 .disable_plane = drm_atomic_helper_disable_plane, 883 .disable_plane = drm_atomic_helper_disable_plane,
896 .destroy = vc4_plane_destroy, 884 .destroy = vc4_plane_destroy,
897 .set_property = NULL, 885 .set_property = NULL,
@@ -939,5 +927,7 @@ struct drm_plane *vc4_plane_init(struct drm_device *dev,
939 927
940 drm_plane_helper_add(plane, &vc4_plane_helper_funcs); 928 drm_plane_helper_add(plane, &vc4_plane_helper_funcs);
941 929
930 drm_plane_create_alpha_property(plane);
931
942 return plane; 932 return plane;
943} 933}
diff --git a/drivers/gpu/drm/vc4/vc4_regs.h b/drivers/gpu/drm/vc4/vc4_regs.h
index a141496104a6..d1fb6fec46eb 100644
--- a/drivers/gpu/drm/vc4/vc4_regs.h
+++ b/drivers/gpu/drm/vc4/vc4_regs.h
@@ -330,6 +330,21 @@
330#define SCALER_DISPCTRL0 0x00000040 330#define SCALER_DISPCTRL0 0x00000040
331# define SCALER_DISPCTRLX_ENABLE BIT(31) 331# define SCALER_DISPCTRLX_ENABLE BIT(31)
332# define SCALER_DISPCTRLX_RESET BIT(30) 332# define SCALER_DISPCTRLX_RESET BIT(30)
333/* Generates a single frame when VSTART is seen and stops at the last
334 * pixel read from the FIFO.
335 */
336# define SCALER_DISPCTRLX_ONESHOT BIT(29)
337/* Processes a single context in the dlist and then task switch,
338 * instead of an entire line.
339 */
340# define SCALER_DISPCTRLX_ONECTX BIT(28)
341/* Set to have DISPSLAVE return 2 16bpp pixels and no status data. */
342# define SCALER_DISPCTRLX_FIFO32 BIT(27)
343/* Turns on output to the DISPSLAVE register instead of the normal
344 * FIFO.
345 */
346# define SCALER_DISPCTRLX_FIFOREG BIT(26)
347
333# define SCALER_DISPCTRLX_WIDTH_MASK VC4_MASK(23, 12) 348# define SCALER_DISPCTRLX_WIDTH_MASK VC4_MASK(23, 12)
334# define SCALER_DISPCTRLX_WIDTH_SHIFT 12 349# define SCALER_DISPCTRLX_WIDTH_SHIFT 12
335# define SCALER_DISPCTRLX_HEIGHT_MASK VC4_MASK(11, 0) 350# define SCALER_DISPCTRLX_HEIGHT_MASK VC4_MASK(11, 0)
@@ -402,6 +417,68 @@
402 */ 417 */
403# define SCALER_GAMADDR_SRAMENB BIT(30) 418# define SCALER_GAMADDR_SRAMENB BIT(30)
404 419
420#define SCALER_OLEDOFFS 0x00000080
421/* Clamps R to [16,235] and G/B to [16,240]. */
422# define SCALER_OLEDOFFS_YUVCLAMP BIT(31)
423
424/* Chooses which display FIFO the matrix applies to. */
425# define SCALER_OLEDOFFS_DISPFIFO_MASK VC4_MASK(25, 24)
426# define SCALER_OLEDOFFS_DISPFIFO_SHIFT 24
427# define SCALER_OLEDOFFS_DISPFIFO_DISABLED 0
428# define SCALER_OLEDOFFS_DISPFIFO_0 1
429# define SCALER_OLEDOFFS_DISPFIFO_1 2
430# define SCALER_OLEDOFFS_DISPFIFO_2 3
431
432/* Offsets are 8-bit 2s-complement. */
433# define SCALER_OLEDOFFS_RED_MASK VC4_MASK(23, 16)
434# define SCALER_OLEDOFFS_RED_SHIFT 16
435# define SCALER_OLEDOFFS_GREEN_MASK VC4_MASK(15, 8)
436# define SCALER_OLEDOFFS_GREEN_SHIFT 8
437# define SCALER_OLEDOFFS_BLUE_MASK VC4_MASK(7, 0)
438# define SCALER_OLEDOFFS_BLUE_SHIFT 0
439
440/* The coefficients are S0.9 fractions. */
441#define SCALER_OLEDCOEF0 0x00000084
442# define SCALER_OLEDCOEF0_B_TO_R_MASK VC4_MASK(29, 20)
443# define SCALER_OLEDCOEF0_B_TO_R_SHIFT 20
444# define SCALER_OLEDCOEF0_B_TO_G_MASK VC4_MASK(19, 10)
445# define SCALER_OLEDCOEF0_B_TO_G_SHIFT 10
446# define SCALER_OLEDCOEF0_B_TO_B_MASK VC4_MASK(9, 0)
447# define SCALER_OLEDCOEF0_B_TO_B_SHIFT 0
448
449#define SCALER_OLEDCOEF1 0x00000088
450# define SCALER_OLEDCOEF1_G_TO_R_MASK VC4_MASK(29, 20)
451# define SCALER_OLEDCOEF1_G_TO_R_SHIFT 20
452# define SCALER_OLEDCOEF1_G_TO_G_MASK VC4_MASK(19, 10)
453# define SCALER_OLEDCOEF1_G_TO_G_SHIFT 10
454# define SCALER_OLEDCOEF1_G_TO_B_MASK VC4_MASK(9, 0)
455# define SCALER_OLEDCOEF1_G_TO_B_SHIFT 0
456
457#define SCALER_OLEDCOEF2 0x0000008c
458# define SCALER_OLEDCOEF2_R_TO_R_MASK VC4_MASK(29, 20)
459# define SCALER_OLEDCOEF2_R_TO_R_SHIFT 20
460# define SCALER_OLEDCOEF2_R_TO_G_MASK VC4_MASK(19, 10)
461# define SCALER_OLEDCOEF2_R_TO_G_SHIFT 10
462# define SCALER_OLEDCOEF2_R_TO_B_MASK VC4_MASK(9, 0)
463# define SCALER_OLEDCOEF2_R_TO_B_SHIFT 0
464
465/* Slave addresses for DMAing from HVS composition output to other
466 * devices. The top bits are valid only in !FIFO32 mode.
467 */
468#define SCALER_DISPSLAVE0 0x000000c0
469#define SCALER_DISPSLAVE1 0x000000c9
470#define SCALER_DISPSLAVE2 0x000000d0
471# define SCALER_DISPSLAVE_ISSUE_VSTART BIT(31)
472# define SCALER_DISPSLAVE_ISSUE_HSTART BIT(30)
473/* Set when the current line has been read and an HSTART is required. */
474# define SCALER_DISPSLAVE_EOL BIT(26)
475/* Set when the display FIFO is empty. */
476# define SCALER_DISPSLAVE_EMPTY BIT(25)
477/* Set when there is RGB data ready to read. */
478# define SCALER_DISPSLAVE_VALID BIT(24)
479# define SCALER_DISPSLAVE_RGB_MASK VC4_MASK(23, 0)
480# define SCALER_DISPSLAVE_RGB_SHIFT 0
481
405#define SCALER_GAMDATA 0x000000e0 482#define SCALER_GAMDATA 0x000000e0
406#define SCALER_DLIST_START 0x00002000 483#define SCALER_DLIST_START 0x00002000
407#define SCALER_DLIST_SIZE 0x00004000 484#define SCALER_DLIST_SIZE 0x00004000
@@ -767,6 +844,10 @@ enum hvs_pixel_format {
767 HVS_PIXEL_FORMAT_YCBCR_YUV420_2PLANE = 9, 844 HVS_PIXEL_FORMAT_YCBCR_YUV420_2PLANE = 9,
768 HVS_PIXEL_FORMAT_YCBCR_YUV422_3PLANE = 10, 845 HVS_PIXEL_FORMAT_YCBCR_YUV422_3PLANE = 10,
769 HVS_PIXEL_FORMAT_YCBCR_YUV422_2PLANE = 11, 846 HVS_PIXEL_FORMAT_YCBCR_YUV422_2PLANE = 11,
847 HVS_PIXEL_FORMAT_H264 = 12,
848 HVS_PIXEL_FORMAT_PALETTE = 13,
849 HVS_PIXEL_FORMAT_YUV444_RGB = 14,
850 HVS_PIXEL_FORMAT_AYUV444_RGB = 15,
770}; 851};
771 852
772/* Note: the LSB is the rightmost character shown. Only valid for 853/* Note: the LSB is the rightmost character shown. Only valid for
@@ -800,12 +881,27 @@ enum hvs_pixel_format {
800#define SCALER_CTL0_TILING_128B 2 881#define SCALER_CTL0_TILING_128B 2
801#define SCALER_CTL0_TILING_256B_OR_T 3 882#define SCALER_CTL0_TILING_256B_OR_T 3
802 883
884#define SCALER_CTL0_ALPHA_MASK BIT(19)
803#define SCALER_CTL0_HFLIP BIT(16) 885#define SCALER_CTL0_HFLIP BIT(16)
804#define SCALER_CTL0_VFLIP BIT(15) 886#define SCALER_CTL0_VFLIP BIT(15)
805 887
888#define SCALER_CTL0_KEY_MODE_MASK VC4_MASK(18, 17)
889#define SCALER_CTL0_KEY_MODE_SHIFT 17
890#define SCALER_CTL0_KEY_DISABLED 0
891#define SCALER_CTL0_KEY_LUMA_OR_COMMON_RGB 1
892#define SCALER_CTL0_KEY_MATCH 2 /* turn transparent */
893#define SCALER_CTL0_KEY_REPLACE 3 /* replace with value from key mask word 2 */
894
806#define SCALER_CTL0_ORDER_MASK VC4_MASK(14, 13) 895#define SCALER_CTL0_ORDER_MASK VC4_MASK(14, 13)
807#define SCALER_CTL0_ORDER_SHIFT 13 896#define SCALER_CTL0_ORDER_SHIFT 13
808 897
898#define SCALER_CTL0_RGBA_EXPAND_MASK VC4_MASK(12, 11)
899#define SCALER_CTL0_RGBA_EXPAND_SHIFT 11
900#define SCALER_CTL0_RGBA_EXPAND_ZERO 0
901#define SCALER_CTL0_RGBA_EXPAND_LSB 1
902#define SCALER_CTL0_RGBA_EXPAND_MSB 2
903#define SCALER_CTL0_RGBA_EXPAND_ROUND 3
904
809#define SCALER_CTL0_SCL1_MASK VC4_MASK(10, 8) 905#define SCALER_CTL0_SCL1_MASK VC4_MASK(10, 8)
810#define SCALER_CTL0_SCL1_SHIFT 8 906#define SCALER_CTL0_SCL1_SHIFT 8
811 907
@@ -849,6 +945,7 @@ enum hvs_pixel_format {
849#define SCALER_POS2_ALPHA_MODE_FIXED_NONZERO 2 945#define SCALER_POS2_ALPHA_MODE_FIXED_NONZERO 2
850#define SCALER_POS2_ALPHA_MODE_FIXED_OVER_0x07 3 946#define SCALER_POS2_ALPHA_MODE_FIXED_OVER_0x07 3
851#define SCALER_POS2_ALPHA_PREMULT BIT(29) 947#define SCALER_POS2_ALPHA_PREMULT BIT(29)
948#define SCALER_POS2_ALPHA_MIX BIT(28)
852 949
853#define SCALER_POS2_HEIGHT_MASK VC4_MASK(27, 16) 950#define SCALER_POS2_HEIGHT_MASK VC4_MASK(27, 16)
854#define SCALER_POS2_HEIGHT_SHIFT 16 951#define SCALER_POS2_HEIGHT_SHIFT 16
diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c
index 8cc8c34d67f5..a5edd86603d9 100644
--- a/drivers/gpu/drm/virtio/virtgpu_display.c
+++ b/drivers/gpu/drm/virtio/virtgpu_display.c
@@ -208,7 +208,7 @@ static int virtio_gpu_conn_get_modes(struct drm_connector *connector)
208 return count; 208 return count;
209} 209}
210 210
211static int virtio_gpu_conn_mode_valid(struct drm_connector *connector, 211static enum drm_mode_status virtio_gpu_conn_mode_valid(struct drm_connector *connector,
212 struct drm_display_mode *mode) 212 struct drm_display_mode *mode)
213{ 213{
214 struct virtio_gpu_output *output = 214 struct virtio_gpu_output *output =
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index f11601b6fd74..6728c6247b4b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -384,9 +384,9 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
384 hotspot_x = du->hotspot_x; 384 hotspot_x = du->hotspot_x;
385 hotspot_y = du->hotspot_y; 385 hotspot_y = du->hotspot_y;
386 386
387 if (plane->fb) { 387 if (plane->state->fb) {
388 hotspot_x += plane->fb->hot_x; 388 hotspot_x += plane->state->fb->hot_x;
389 hotspot_y += plane->fb->hot_y; 389 hotspot_y += plane->state->fb->hot_y;
390 } 390 }
391 391
392 du->cursor_surface = vps->surf; 392 du->cursor_surface = vps->surf;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index 3824595fece1..4a5907e3f560 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -281,39 +281,6 @@ drm_connector_helper_funcs vmw_ldu_connector_helper_funcs = {
281 * Legacy Display Plane Functions 281 * Legacy Display Plane Functions
282 */ 282 */
283 283
284/**
285 * vmw_ldu_primary_plane_cleanup_fb - Noop
286 *
287 * @plane: display plane
288 * @old_state: Contains the FB to clean up
289 *
290 * Unpins the display surface
291 *
292 * Returns 0 on success
293 */
294static void
295vmw_ldu_primary_plane_cleanup_fb(struct drm_plane *plane,
296 struct drm_plane_state *old_state)
297{
298}
299
300
301/**
302 * vmw_ldu_primary_plane_prepare_fb - Noop
303 *
304 * @plane: display plane
305 * @new_state: info on the new plane state, including the FB
306 *
307 * Returns 0 on success
308 */
309static int
310vmw_ldu_primary_plane_prepare_fb(struct drm_plane *plane,
311 struct drm_plane_state *new_state)
312{
313 return 0;
314}
315
316
317static void 284static void
318vmw_ldu_primary_plane_atomic_update(struct drm_plane *plane, 285vmw_ldu_primary_plane_atomic_update(struct drm_plane *plane,
319 struct drm_plane_state *old_state) 286 struct drm_plane_state *old_state)
@@ -373,8 +340,6 @@ static const struct
373drm_plane_helper_funcs vmw_ldu_primary_plane_helper_funcs = { 340drm_plane_helper_funcs vmw_ldu_primary_plane_helper_funcs = {
374 .atomic_check = vmw_du_primary_plane_atomic_check, 341 .atomic_check = vmw_du_primary_plane_atomic_check,
375 .atomic_update = vmw_ldu_primary_plane_atomic_update, 342 .atomic_update = vmw_ldu_primary_plane_atomic_update,
376 .prepare_fb = vmw_ldu_primary_plane_prepare_fb,
377 .cleanup_fb = vmw_ldu_primary_plane_cleanup_fb,
378}; 343};
379 344
380static const struct drm_crtc_helper_funcs vmw_ldu_crtc_helper_funcs = { 345static const struct drm_crtc_helper_funcs vmw_ldu_crtc_helper_funcs = {
diff --git a/drivers/gpu/drm/xen/Kconfig b/drivers/gpu/drm/xen/Kconfig
new file mode 100644
index 000000000000..4cca160782ab
--- /dev/null
+++ b/drivers/gpu/drm/xen/Kconfig
@@ -0,0 +1,17 @@
1config DRM_XEN
2 bool "DRM Support for Xen guest OS"
3 depends on XEN
4 help
5 Choose this option if you want to enable DRM support
6 for Xen.
7
8config DRM_XEN_FRONTEND
9 tristate "Para-virtualized frontend driver for Xen guest OS"
10 depends on DRM_XEN
11 depends on DRM
12 select DRM_KMS_HELPER
13 select VIDEOMODE_HELPERS
14 select XEN_XENBUS_FRONTEND
15 help
16 Choose this option if you want to enable a para-virtualized
17 frontend DRM/KMS driver for Xen guest OSes.
diff --git a/drivers/gpu/drm/xen/Makefile b/drivers/gpu/drm/xen/Makefile
new file mode 100644
index 000000000000..712afff5ffc3
--- /dev/null
+++ b/drivers/gpu/drm/xen/Makefile
@@ -0,0 +1,11 @@
1# SPDX-License-Identifier: GPL-2.0 OR MIT
2
3drm_xen_front-objs := xen_drm_front.o \
4 xen_drm_front_kms.o \
5 xen_drm_front_conn.o \
6 xen_drm_front_evtchnl.o \
7 xen_drm_front_shbuf.o \
8 xen_drm_front_cfg.o \
9 xen_drm_front_gem.o
10
11obj-$(CONFIG_DRM_XEN_FRONTEND) += drm_xen_front.o
diff --git a/drivers/gpu/drm/xen/xen_drm_front.c b/drivers/gpu/drm/xen/xen_drm_front.c
new file mode 100644
index 000000000000..1b0ea9ac330e
--- /dev/null
+++ b/drivers/gpu/drm/xen/xen_drm_front.c
@@ -0,0 +1,840 @@
1// SPDX-License-Identifier: GPL-2.0 OR MIT
2
3/*
4 * Xen para-virtual DRM device
5 *
6 * Copyright (C) 2016-2018 EPAM Systems Inc.
7 *
8 * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
9 */
10
11#include <drm/drmP.h>
12#include <drm/drm_atomic_helper.h>
13#include <drm/drm_crtc_helper.h>
14#include <drm/drm_gem.h>
15
16#include <linux/of_device.h>
17
18#include <xen/platform_pci.h>
19#include <xen/xen.h>
20#include <xen/xenbus.h>
21
22#include <xen/interface/io/displif.h>
23
24#include "xen_drm_front.h"
25#include "xen_drm_front_cfg.h"
26#include "xen_drm_front_evtchnl.h"
27#include "xen_drm_front_gem.h"
28#include "xen_drm_front_kms.h"
29#include "xen_drm_front_shbuf.h"
30
31struct xen_drm_front_dbuf {
32 struct list_head list;
33 u64 dbuf_cookie;
34 u64 fb_cookie;
35 struct xen_drm_front_shbuf *shbuf;
36};
37
38static int dbuf_add_to_list(struct xen_drm_front_info *front_info,
39 struct xen_drm_front_shbuf *shbuf, u64 dbuf_cookie)
40{
41 struct xen_drm_front_dbuf *dbuf;
42
43 dbuf = kzalloc(sizeof(*dbuf), GFP_KERNEL);
44 if (!dbuf)
45 return -ENOMEM;
46
47 dbuf->dbuf_cookie = dbuf_cookie;
48 dbuf->shbuf = shbuf;
49 list_add(&dbuf->list, &front_info->dbuf_list);
50 return 0;
51}
52
53static struct xen_drm_front_dbuf *dbuf_get(struct list_head *dbuf_list,
54 u64 dbuf_cookie)
55{
56 struct xen_drm_front_dbuf *buf, *q;
57
58 list_for_each_entry_safe(buf, q, dbuf_list, list)
59 if (buf->dbuf_cookie == dbuf_cookie)
60 return buf;
61
62 return NULL;
63}
64
65static void dbuf_flush_fb(struct list_head *dbuf_list, u64 fb_cookie)
66{
67 struct xen_drm_front_dbuf *buf, *q;
68
69 list_for_each_entry_safe(buf, q, dbuf_list, list)
70 if (buf->fb_cookie == fb_cookie)
71 xen_drm_front_shbuf_flush(buf->shbuf);
72}
73
74static void dbuf_free(struct list_head *dbuf_list, u64 dbuf_cookie)
75{
76 struct xen_drm_front_dbuf *buf, *q;
77
78 list_for_each_entry_safe(buf, q, dbuf_list, list)
79 if (buf->dbuf_cookie == dbuf_cookie) {
80 list_del(&buf->list);
81 xen_drm_front_shbuf_unmap(buf->shbuf);
82 xen_drm_front_shbuf_free(buf->shbuf);
83 kfree(buf);
84 break;
85 }
86}
87
88static void dbuf_free_all(struct list_head *dbuf_list)
89{
90 struct xen_drm_front_dbuf *buf, *q;
91
92 list_for_each_entry_safe(buf, q, dbuf_list, list) {
93 list_del(&buf->list);
94 xen_drm_front_shbuf_unmap(buf->shbuf);
95 xen_drm_front_shbuf_free(buf->shbuf);
96 kfree(buf);
97 }
98}
99
100static struct xendispl_req *
101be_prepare_req(struct xen_drm_front_evtchnl *evtchnl, u8 operation)
102{
103 struct xendispl_req *req;
104
105 req = RING_GET_REQUEST(&evtchnl->u.req.ring,
106 evtchnl->u.req.ring.req_prod_pvt);
107 req->operation = operation;
108 req->id = evtchnl->evt_next_id++;
109 evtchnl->evt_id = req->id;
110 return req;
111}
112
113static int be_stream_do_io(struct xen_drm_front_evtchnl *evtchnl,
114 struct xendispl_req *req)
115{
116 reinit_completion(&evtchnl->u.req.completion);
117 if (unlikely(evtchnl->state != EVTCHNL_STATE_CONNECTED))
118 return -EIO;
119
120 xen_drm_front_evtchnl_flush(evtchnl);
121 return 0;
122}
123
124static int be_stream_wait_io(struct xen_drm_front_evtchnl *evtchnl)
125{
126 if (wait_for_completion_timeout(&evtchnl->u.req.completion,
127 msecs_to_jiffies(XEN_DRM_FRONT_WAIT_BACK_MS)) <= 0)
128 return -ETIMEDOUT;
129
130 return evtchnl->u.req.resp_status;
131}
132
133int xen_drm_front_mode_set(struct xen_drm_front_drm_pipeline *pipeline,
134 u32 x, u32 y, u32 width, u32 height,
135 u32 bpp, u64 fb_cookie)
136{
137 struct xen_drm_front_evtchnl *evtchnl;
138 struct xen_drm_front_info *front_info;
139 struct xendispl_req *req;
140 unsigned long flags;
141 int ret;
142
143 front_info = pipeline->drm_info->front_info;
144 evtchnl = &front_info->evt_pairs[pipeline->index].req;
145 if (unlikely(!evtchnl))
146 return -EIO;
147
148 mutex_lock(&evtchnl->u.req.req_io_lock);
149
150 spin_lock_irqsave(&front_info->io_lock, flags);
151 req = be_prepare_req(evtchnl, XENDISPL_OP_SET_CONFIG);
152 req->op.set_config.x = x;
153 req->op.set_config.y = y;
154 req->op.set_config.width = width;
155 req->op.set_config.height = height;
156 req->op.set_config.bpp = bpp;
157 req->op.set_config.fb_cookie = fb_cookie;
158
159 ret = be_stream_do_io(evtchnl, req);
160 spin_unlock_irqrestore(&front_info->io_lock, flags);
161
162 if (ret == 0)
163 ret = be_stream_wait_io(evtchnl);
164
165 mutex_unlock(&evtchnl->u.req.req_io_lock);
166 return ret;
167}
168
169int xen_drm_front_dbuf_create(struct xen_drm_front_info *front_info,
170 u64 dbuf_cookie, u32 width, u32 height,
171 u32 bpp, u64 size, struct page **pages)
172{
173 struct xen_drm_front_evtchnl *evtchnl;
174 struct xen_drm_front_shbuf *shbuf;
175 struct xendispl_req *req;
176 struct xen_drm_front_shbuf_cfg buf_cfg;
177 unsigned long flags;
178 int ret;
179
180 evtchnl = &front_info->evt_pairs[GENERIC_OP_EVT_CHNL].req;
181 if (unlikely(!evtchnl))
182 return -EIO;
183
184 memset(&buf_cfg, 0, sizeof(buf_cfg));
185 buf_cfg.xb_dev = front_info->xb_dev;
186 buf_cfg.pages = pages;
187 buf_cfg.size = size;
188 buf_cfg.be_alloc = front_info->cfg.be_alloc;
189
190 shbuf = xen_drm_front_shbuf_alloc(&buf_cfg);
191 if (!shbuf)
192 return -ENOMEM;
193
194 ret = dbuf_add_to_list(front_info, shbuf, dbuf_cookie);
195 if (ret < 0) {
196 xen_drm_front_shbuf_free(shbuf);
197 return ret;
198 }
199
200 mutex_lock(&evtchnl->u.req.req_io_lock);
201
202 spin_lock_irqsave(&front_info->io_lock, flags);
203 req = be_prepare_req(evtchnl, XENDISPL_OP_DBUF_CREATE);
204 req->op.dbuf_create.gref_directory =
205 xen_drm_front_shbuf_get_dir_start(shbuf);
206 req->op.dbuf_create.buffer_sz = size;
207 req->op.dbuf_create.dbuf_cookie = dbuf_cookie;
208 req->op.dbuf_create.width = width;
209 req->op.dbuf_create.height = height;
210 req->op.dbuf_create.bpp = bpp;
211 if (buf_cfg.be_alloc)
212 req->op.dbuf_create.flags |= XENDISPL_DBUF_FLG_REQ_ALLOC;
213
214 ret = be_stream_do_io(evtchnl, req);
215 spin_unlock_irqrestore(&front_info->io_lock, flags);
216
217 if (ret < 0)
218 goto fail;
219
220 ret = be_stream_wait_io(evtchnl);
221 if (ret < 0)
222 goto fail;
223
224 ret = xen_drm_front_shbuf_map(shbuf);
225 if (ret < 0)
226 goto fail;
227
228 mutex_unlock(&evtchnl->u.req.req_io_lock);
229 return 0;
230
231fail:
232 mutex_unlock(&evtchnl->u.req.req_io_lock);
233 dbuf_free(&front_info->dbuf_list, dbuf_cookie);
234 return ret;
235}
236
237static int xen_drm_front_dbuf_destroy(struct xen_drm_front_info *front_info,
238 u64 dbuf_cookie)
239{
240 struct xen_drm_front_evtchnl *evtchnl;
241 struct xendispl_req *req;
242 unsigned long flags;
243 bool be_alloc;
244 int ret;
245
246 evtchnl = &front_info->evt_pairs[GENERIC_OP_EVT_CHNL].req;
247 if (unlikely(!evtchnl))
248 return -EIO;
249
250 be_alloc = front_info->cfg.be_alloc;
251
252 /*
253 * For the backend allocated buffer release references now, so backend
254 * can free the buffer.
255 */
256 if (be_alloc)
257 dbuf_free(&front_info->dbuf_list, dbuf_cookie);
258
259 mutex_lock(&evtchnl->u.req.req_io_lock);
260
261 spin_lock_irqsave(&front_info->io_lock, flags);
262 req = be_prepare_req(evtchnl, XENDISPL_OP_DBUF_DESTROY);
263 req->op.dbuf_destroy.dbuf_cookie = dbuf_cookie;
264
265 ret = be_stream_do_io(evtchnl, req);
266 spin_unlock_irqrestore(&front_info->io_lock, flags);
267
268 if (ret == 0)
269 ret = be_stream_wait_io(evtchnl);
270
271 /*
272 * Do this regardless of communication status with the backend:
273 * if we cannot remove remote resources remove what we can locally.
274 */
275 if (!be_alloc)
276 dbuf_free(&front_info->dbuf_list, dbuf_cookie);
277
278 mutex_unlock(&evtchnl->u.req.req_io_lock);
279 return ret;
280}
281
282int xen_drm_front_fb_attach(struct xen_drm_front_info *front_info,
283 u64 dbuf_cookie, u64 fb_cookie, u32 width,
284 u32 height, u32 pixel_format)
285{
286 struct xen_drm_front_evtchnl *evtchnl;
287 struct xen_drm_front_dbuf *buf;
288 struct xendispl_req *req;
289 unsigned long flags;
290 int ret;
291
292 evtchnl = &front_info->evt_pairs[GENERIC_OP_EVT_CHNL].req;
293 if (unlikely(!evtchnl))
294 return -EIO;
295
296 buf = dbuf_get(&front_info->dbuf_list, dbuf_cookie);
297 if (!buf)
298 return -EINVAL;
299
300 buf->fb_cookie = fb_cookie;
301
302 mutex_lock(&evtchnl->u.req.req_io_lock);
303
304 spin_lock_irqsave(&front_info->io_lock, flags);
305 req = be_prepare_req(evtchnl, XENDISPL_OP_FB_ATTACH);
306 req->op.fb_attach.dbuf_cookie = dbuf_cookie;
307 req->op.fb_attach.fb_cookie = fb_cookie;
308 req->op.fb_attach.width = width;
309 req->op.fb_attach.height = height;
310 req->op.fb_attach.pixel_format = pixel_format;
311
312 ret = be_stream_do_io(evtchnl, req);
313 spin_unlock_irqrestore(&front_info->io_lock, flags);
314
315 if (ret == 0)
316 ret = be_stream_wait_io(evtchnl);
317
318 mutex_unlock(&evtchnl->u.req.req_io_lock);
319 return ret;
320}
321
322int xen_drm_front_fb_detach(struct xen_drm_front_info *front_info,
323 u64 fb_cookie)
324{
325 struct xen_drm_front_evtchnl *evtchnl;
326 struct xendispl_req *req;
327 unsigned long flags;
328 int ret;
329
330 evtchnl = &front_info->evt_pairs[GENERIC_OP_EVT_CHNL].req;
331 if (unlikely(!evtchnl))
332 return -EIO;
333
334 mutex_lock(&evtchnl->u.req.req_io_lock);
335
336 spin_lock_irqsave(&front_info->io_lock, flags);
337 req = be_prepare_req(evtchnl, XENDISPL_OP_FB_DETACH);
338 req->op.fb_detach.fb_cookie = fb_cookie;
339
340 ret = be_stream_do_io(evtchnl, req);
341 spin_unlock_irqrestore(&front_info->io_lock, flags);
342
343 if (ret == 0)
344 ret = be_stream_wait_io(evtchnl);
345
346 mutex_unlock(&evtchnl->u.req.req_io_lock);
347 return ret;
348}
349
350int xen_drm_front_page_flip(struct xen_drm_front_info *front_info,
351 int conn_idx, u64 fb_cookie)
352{
353 struct xen_drm_front_evtchnl *evtchnl;
354 struct xendispl_req *req;
355 unsigned long flags;
356 int ret;
357
358 if (unlikely(conn_idx >= front_info->num_evt_pairs))
359 return -EINVAL;
360
361 dbuf_flush_fb(&front_info->dbuf_list, fb_cookie);
362 evtchnl = &front_info->evt_pairs[conn_idx].req;
363
364 mutex_lock(&evtchnl->u.req.req_io_lock);
365
366 spin_lock_irqsave(&front_info->io_lock, flags);
367 req = be_prepare_req(evtchnl, XENDISPL_OP_PG_FLIP);
368 req->op.pg_flip.fb_cookie = fb_cookie;
369
370 ret = be_stream_do_io(evtchnl, req);
371 spin_unlock_irqrestore(&front_info->io_lock, flags);
372
373 if (ret == 0)
374 ret = be_stream_wait_io(evtchnl);
375
376 mutex_unlock(&evtchnl->u.req.req_io_lock);
377 return ret;
378}
379
380void xen_drm_front_on_frame_done(struct xen_drm_front_info *front_info,
381 int conn_idx, u64 fb_cookie)
382{
383 struct xen_drm_front_drm_info *drm_info = front_info->drm_info;
384
385 if (unlikely(conn_idx >= front_info->cfg.num_connectors))
386 return;
387
388 xen_drm_front_kms_on_frame_done(&drm_info->pipeline[conn_idx],
389 fb_cookie);
390}
391
392static int xen_drm_drv_dumb_create(struct drm_file *filp,
393 struct drm_device *dev,
394 struct drm_mode_create_dumb *args)
395{
396 struct xen_drm_front_drm_info *drm_info = dev->dev_private;
397 struct drm_gem_object *obj;
398 int ret;
399
400 /*
401 * Dumb creation is a two stage process: first we create a fully
402 * constructed GEM object which is communicated to the backend, and
403 * only after that we can create GEM's handle. This is done so,
404 * because of the possible races: once you create a handle it becomes
405 * immediately visible to user-space, so the latter can try accessing
406 * object without pages etc.
407 * For details also see drm_gem_handle_create
408 */
409 args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
410 args->size = args->pitch * args->height;
411
412 obj = xen_drm_front_gem_create(dev, args->size);
413 if (IS_ERR_OR_NULL(obj)) {
414 ret = PTR_ERR(obj);
415 goto fail;
416 }
417
418 ret = xen_drm_front_dbuf_create(drm_info->front_info,
419 xen_drm_front_dbuf_to_cookie(obj),
420 args->width, args->height, args->bpp,
421 args->size,
422 xen_drm_front_gem_get_pages(obj));
423 if (ret)
424 goto fail_backend;
425
426 /* This is the tail of GEM object creation */
427 ret = drm_gem_handle_create(filp, obj, &args->handle);
428 if (ret)
429 goto fail_handle;
430
431 /* Drop reference from allocate - handle holds it now */
432 drm_gem_object_put_unlocked(obj);
433 return 0;
434
435fail_handle:
436 xen_drm_front_dbuf_destroy(drm_info->front_info,
437 xen_drm_front_dbuf_to_cookie(obj));
438fail_backend:
439 /* drop reference from allocate */
440 drm_gem_object_put_unlocked(obj);
441fail:
442 DRM_ERROR("Failed to create dumb buffer: %d\n", ret);
443 return ret;
444}
445
446static void xen_drm_drv_free_object_unlocked(struct drm_gem_object *obj)
447{
448 struct xen_drm_front_drm_info *drm_info = obj->dev->dev_private;
449 int idx;
450
451 if (drm_dev_enter(obj->dev, &idx)) {
452 xen_drm_front_dbuf_destroy(drm_info->front_info,
453 xen_drm_front_dbuf_to_cookie(obj));
454 drm_dev_exit(idx);
455 } else {
456 dbuf_free(&drm_info->front_info->dbuf_list,
457 xen_drm_front_dbuf_to_cookie(obj));
458 }
459
460 xen_drm_front_gem_free_object_unlocked(obj);
461}
462
463static void xen_drm_drv_release(struct drm_device *dev)
464{
465 struct xen_drm_front_drm_info *drm_info = dev->dev_private;
466 struct xen_drm_front_info *front_info = drm_info->front_info;
467
468 xen_drm_front_kms_fini(drm_info);
469
470 drm_atomic_helper_shutdown(dev);
471 drm_mode_config_cleanup(dev);
472
473 drm_dev_fini(dev);
474 kfree(dev);
475
476 if (front_info->cfg.be_alloc)
477 xenbus_switch_state(front_info->xb_dev,
478 XenbusStateInitialising);
479
480 kfree(drm_info);
481}
482
483static const struct file_operations xen_drm_dev_fops = {
484 .owner = THIS_MODULE,
485 .open = drm_open,
486 .release = drm_release,
487 .unlocked_ioctl = drm_ioctl,
488#ifdef CONFIG_COMPAT
489 .compat_ioctl = drm_compat_ioctl,
490#endif
491 .poll = drm_poll,
492 .read = drm_read,
493 .llseek = no_llseek,
494 .mmap = xen_drm_front_gem_mmap,
495};
496
497static const struct vm_operations_struct xen_drm_drv_vm_ops = {
498 .open = drm_gem_vm_open,
499 .close = drm_gem_vm_close,
500};
501
502static struct drm_driver xen_drm_driver = {
503 .driver_features = DRIVER_GEM | DRIVER_MODESET |
504 DRIVER_PRIME | DRIVER_ATOMIC,
505 .release = xen_drm_drv_release,
506 .gem_vm_ops = &xen_drm_drv_vm_ops,
507 .gem_free_object_unlocked = xen_drm_drv_free_object_unlocked,
508 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
509 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
510 .gem_prime_import = drm_gem_prime_import,
511 .gem_prime_export = drm_gem_prime_export,
512 .gem_prime_import_sg_table = xen_drm_front_gem_import_sg_table,
513 .gem_prime_get_sg_table = xen_drm_front_gem_get_sg_table,
514 .gem_prime_vmap = xen_drm_front_gem_prime_vmap,
515 .gem_prime_vunmap = xen_drm_front_gem_prime_vunmap,
516 .gem_prime_mmap = xen_drm_front_gem_prime_mmap,
517 .dumb_create = xen_drm_drv_dumb_create,
518 .fops = &xen_drm_dev_fops,
519 .name = "xendrm-du",
520 .desc = "Xen PV DRM Display Unit",
521 .date = "20180221",
522 .major = 1,
523 .minor = 0,
524
525};
526
527static int xen_drm_drv_init(struct xen_drm_front_info *front_info)
528{
529 struct device *dev = &front_info->xb_dev->dev;
530 struct xen_drm_front_drm_info *drm_info;
531 struct drm_device *drm_dev;
532 int ret;
533
534 DRM_INFO("Creating %s\n", xen_drm_driver.desc);
535
536 drm_info = kzalloc(sizeof(*drm_info), GFP_KERNEL);
537 if (!drm_info) {
538 ret = -ENOMEM;
539 goto fail;
540 }
541
542 drm_info->front_info = front_info;
543 front_info->drm_info = drm_info;
544
545 drm_dev = drm_dev_alloc(&xen_drm_driver, dev);
546 if (!drm_dev) {
547 ret = -ENOMEM;
548 goto fail;
549 }
550
551 drm_info->drm_dev = drm_dev;
552
553 drm_dev->dev_private = drm_info;
554
555 ret = xen_drm_front_kms_init(drm_info);
556 if (ret) {
557 DRM_ERROR("Failed to initialize DRM/KMS, ret %d\n", ret);
558 goto fail_modeset;
559 }
560
561 ret = drm_dev_register(drm_dev, 0);
562 if (ret)
563 goto fail_register;
564
565 DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
566 xen_drm_driver.name, xen_drm_driver.major,
567 xen_drm_driver.minor, xen_drm_driver.patchlevel,
568 xen_drm_driver.date, drm_dev->primary->index);
569
570 return 0;
571
572fail_register:
573 drm_dev_unregister(drm_dev);
574fail_modeset:
575 drm_kms_helper_poll_fini(drm_dev);
576 drm_mode_config_cleanup(drm_dev);
577fail:
578 kfree(drm_info);
579 return ret;
580}
581
582static void xen_drm_drv_fini(struct xen_drm_front_info *front_info)
583{
584 struct xen_drm_front_drm_info *drm_info = front_info->drm_info;
585 struct drm_device *dev;
586
587 if (!drm_info)
588 return;
589
590 dev = drm_info->drm_dev;
591 if (!dev)
592 return;
593
594 /* Nothing to do if device is already unplugged */
595 if (drm_dev_is_unplugged(dev))
596 return;
597
598 drm_kms_helper_poll_fini(dev);
599 drm_dev_unplug(dev);
600
601 front_info->drm_info = NULL;
602
603 xen_drm_front_evtchnl_free_all(front_info);
604 dbuf_free_all(&front_info->dbuf_list);
605
606 /*
607 * If we are not using backend allocated buffers, then tell the
608 * backend we are ready to (re)initialize. Otherwise, wait for
609 * drm_driver.release.
610 */
611 if (!front_info->cfg.be_alloc)
612 xenbus_switch_state(front_info->xb_dev,
613 XenbusStateInitialising);
614}
615
616static int displback_initwait(struct xen_drm_front_info *front_info)
617{
618 struct xen_drm_front_cfg *cfg = &front_info->cfg;
619 int ret;
620
621 cfg->front_info = front_info;
622 ret = xen_drm_front_cfg_card(front_info, cfg);
623 if (ret < 0)
624 return ret;
625
626 DRM_INFO("Have %d conector(s)\n", cfg->num_connectors);
627 /* Create event channels for all connectors and publish */
628 ret = xen_drm_front_evtchnl_create_all(front_info);
629 if (ret < 0)
630 return ret;
631
632 return xen_drm_front_evtchnl_publish_all(front_info);
633}
634
635static int displback_connect(struct xen_drm_front_info *front_info)
636{
637 xen_drm_front_evtchnl_set_state(front_info, EVTCHNL_STATE_CONNECTED);
638 return xen_drm_drv_init(front_info);
639}
640
641static void displback_disconnect(struct xen_drm_front_info *front_info)
642{
643 if (!front_info->drm_info)
644 return;
645
646 /* Tell the backend to wait until we release the DRM driver. */
647 xenbus_switch_state(front_info->xb_dev, XenbusStateReconfiguring);
648
649 xen_drm_drv_fini(front_info);
650}
651
652static void displback_changed(struct xenbus_device *xb_dev,
653 enum xenbus_state backend_state)
654{
655 struct xen_drm_front_info *front_info = dev_get_drvdata(&xb_dev->dev);
656 int ret;
657
658 DRM_DEBUG("Backend state is %s, front is %s\n",
659 xenbus_strstate(backend_state),
660 xenbus_strstate(xb_dev->state));
661
662 switch (backend_state) {
663 case XenbusStateReconfiguring:
664 /* fall through */
665 case XenbusStateReconfigured:
666 /* fall through */
667 case XenbusStateInitialised:
668 break;
669
670 case XenbusStateInitialising:
671 if (xb_dev->state == XenbusStateReconfiguring)
672 break;
673
674 /* recovering after backend unexpected closure */
675 displback_disconnect(front_info);
676 break;
677
678 case XenbusStateInitWait:
679 if (xb_dev->state == XenbusStateReconfiguring)
680 break;
681
682 /* recovering after backend unexpected closure */
683 displback_disconnect(front_info);
684 if (xb_dev->state != XenbusStateInitialising)
685 break;
686
687 ret = displback_initwait(front_info);
688 if (ret < 0)
689 xenbus_dev_fatal(xb_dev, ret, "initializing frontend");
690 else
691 xenbus_switch_state(xb_dev, XenbusStateInitialised);
692 break;
693
694 case XenbusStateConnected:
695 if (xb_dev->state != XenbusStateInitialised)
696 break;
697
698 ret = displback_connect(front_info);
699 if (ret < 0) {
700 displback_disconnect(front_info);
701 xenbus_dev_fatal(xb_dev, ret, "connecting backend");
702 } else {
703 xenbus_switch_state(xb_dev, XenbusStateConnected);
704 }
705 break;
706
707 case XenbusStateClosing:
708 /*
709 * in this state backend starts freeing resources,
710 * so let it go into closed state, so we can also
711 * remove ours
712 */
713 break;
714
715 case XenbusStateUnknown:
716 /* fall through */
717 case XenbusStateClosed:
718 if (xb_dev->state == XenbusStateClosed)
719 break;
720
721 displback_disconnect(front_info);
722 break;
723 }
724}
725
726static int xen_drv_probe(struct xenbus_device *xb_dev,
727 const struct xenbus_device_id *id)
728{
729 struct xen_drm_front_info *front_info;
730 struct device *dev = &xb_dev->dev;
731 int ret;
732
733 /*
734 * The device is not spawn from a device tree, so arch_setup_dma_ops
735 * is not called, thus leaving the device with dummy DMA ops.
736 * This makes the device return error on PRIME buffer import, which
737 * is not correct: to fix this call of_dma_configure() with a NULL
738 * node to set default DMA ops.
739 */
740 dev->bus->force_dma = true;
741 dev->coherent_dma_mask = DMA_BIT_MASK(32);
742 ret = of_dma_configure(dev, NULL);
743 if (ret < 0) {
744 DRM_ERROR("Cannot setup DMA ops, ret %d", ret);
745 return ret;
746 }
747
748 front_info = devm_kzalloc(&xb_dev->dev,
749 sizeof(*front_info), GFP_KERNEL);
750 if (!front_info)
751 return -ENOMEM;
752
753 front_info->xb_dev = xb_dev;
754 spin_lock_init(&front_info->io_lock);
755 INIT_LIST_HEAD(&front_info->dbuf_list);
756 dev_set_drvdata(&xb_dev->dev, front_info);
757
758 return xenbus_switch_state(xb_dev, XenbusStateInitialising);
759}
760
761static int xen_drv_remove(struct xenbus_device *dev)
762{
763 struct xen_drm_front_info *front_info = dev_get_drvdata(&dev->dev);
764 int to = 100;
765
766 xenbus_switch_state(dev, XenbusStateClosing);
767
768 /*
769 * On driver removal it is disconnected from XenBus,
770 * so no backend state change events come via .otherend_changed
771 * callback. This prevents us from exiting gracefully, e.g.
772 * signaling the backend to free event channels, waiting for its
773 * state to change to XenbusStateClosed and cleaning at our end.
774 * Normally when front driver removed backend will finally go into
775 * XenbusStateInitWait state.
776 *
777 * Workaround: read backend's state manually and wait with time-out.
778 */
779 while ((xenbus_read_unsigned(front_info->xb_dev->otherend, "state",
780 XenbusStateUnknown) != XenbusStateInitWait) &&
781 to--)
782 msleep(10);
783
784 if (!to) {
785 unsigned int state;
786
787 state = xenbus_read_unsigned(front_info->xb_dev->otherend,
788 "state", XenbusStateUnknown);
789 DRM_ERROR("Backend state is %s while removing driver\n",
790 xenbus_strstate(state));
791 }
792
793 xen_drm_drv_fini(front_info);
794 xenbus_frontend_closed(dev);
795 return 0;
796}
797
798static const struct xenbus_device_id xen_driver_ids[] = {
799 { XENDISPL_DRIVER_NAME },
800 { "" }
801};
802
803static struct xenbus_driver xen_driver = {
804 .ids = xen_driver_ids,
805 .probe = xen_drv_probe,
806 .remove = xen_drv_remove,
807 .otherend_changed = displback_changed,
808};
809
810static int __init xen_drv_init(void)
811{
812 /* At the moment we only support case with XEN_PAGE_SIZE == PAGE_SIZE */
813 if (XEN_PAGE_SIZE != PAGE_SIZE) {
814 DRM_ERROR(XENDISPL_DRIVER_NAME ": different kernel and Xen page sizes are not supported: XEN_PAGE_SIZE (%lu) != PAGE_SIZE (%lu)\n",
815 XEN_PAGE_SIZE, PAGE_SIZE);
816 return -ENODEV;
817 }
818
819 if (!xen_domain())
820 return -ENODEV;
821
822 if (!xen_has_pv_devices())
823 return -ENODEV;
824
825 DRM_INFO("Registering XEN PV " XENDISPL_DRIVER_NAME "\n");
826 return xenbus_register_frontend(&xen_driver);
827}
828
829static void __exit xen_drv_fini(void)
830{
831 DRM_INFO("Unregistering XEN PV " XENDISPL_DRIVER_NAME "\n");
832 xenbus_unregister_driver(&xen_driver);
833}
834
835module_init(xen_drv_init);
836module_exit(xen_drv_fini);
837
838MODULE_DESCRIPTION("Xen para-virtualized display device frontend");
839MODULE_LICENSE("GPL");
840MODULE_ALIAS("xen:" XENDISPL_DRIVER_NAME);
diff --git a/drivers/gpu/drm/xen/xen_drm_front.h b/drivers/gpu/drm/xen/xen_drm_front.h
new file mode 100644
index 000000000000..2c2479b571ae
--- /dev/null
+++ b/drivers/gpu/drm/xen/xen_drm_front.h
@@ -0,0 +1,158 @@
1/* SPDX-License-Identifier: GPL-2.0 OR MIT */
2
3/*
4 * Xen para-virtual DRM device
5 *
6 * Copyright (C) 2016-2018 EPAM Systems Inc.
7 *
8 * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
9 */
10
11#ifndef __XEN_DRM_FRONT_H_
12#define __XEN_DRM_FRONT_H_
13
14#include <drm/drmP.h>
15#include <drm/drm_simple_kms_helper.h>
16
17#include <linux/scatterlist.h>
18
19#include "xen_drm_front_cfg.h"
20
21/**
22 * DOC: Driver modes of operation in terms of display buffers used
23 *
24 * Depending on the requirements for the para-virtualized environment, namely
25 * requirements dictated by the accompanying DRM/(v)GPU drivers running in both
26 * host and guest environments, display buffers can be allocated by either
27 * frontend driver or backend.
28 */
29
30/**
31 * DOC: Buffers allocated by the frontend driver
32 *
33 * In this mode of operation driver allocates buffers from system memory.
34 *
35 * Note! If used with accompanying DRM/(v)GPU drivers this mode of operation
36 * may require IOMMU support on the platform, so accompanying DRM/vGPU
37 * hardware can still reach display buffer memory while importing PRIME
38 * buffers from the frontend driver.
39 */
40
41/**
42 * DOC: Buffers allocated by the backend
43 *
44 * This mode of operation is run-time configured via guest domain configuration
45 * through XenStore entries.
46 *
47 * For systems which do not provide IOMMU support, but having specific
48 * requirements for display buffers it is possible to allocate such buffers
49 * at backend side and share those with the frontend.
50 * For example, if host domain is 1:1 mapped and has DRM/GPU hardware expecting
51 * physically contiguous memory, this allows implementing zero-copying
52 * use-cases.
53 *
54 * Note, while using this scenario the following should be considered:
55 *
56 * #. If guest domain dies then pages/grants received from the backend
57 * cannot be claimed back
58 *
59 * #. Misbehaving guest may send too many requests to the
60 * backend exhausting its grant references and memory
61 * (consider this from security POV)
62 */
63
64/**
65 * DOC: Driver limitations
66 *
67 * #. Only primary plane without additional properties is supported.
68 *
69 * #. Only one video mode per connector supported which is configured
70 * via XenStore.
71 *
72 * #. All CRTCs operate at fixed frequency of 60Hz.
73 */
74
75/* timeout in ms to wait for backend to respond */
76#define XEN_DRM_FRONT_WAIT_BACK_MS 3000
77
78#ifndef GRANT_INVALID_REF
79/*
80 * Note on usage of grant reference 0 as invalid grant reference:
81 * grant reference 0 is valid, but never exposed to a PV driver,
82 * because of the fact it is already in use/reserved by the PV console.
83 */
84#define GRANT_INVALID_REF 0
85#endif
86
87struct xen_drm_front_info {
88 struct xenbus_device *xb_dev;
89 struct xen_drm_front_drm_info *drm_info;
90
91 /* to protect data between backend IO code and interrupt handler */
92 spinlock_t io_lock;
93
94 int num_evt_pairs;
95 struct xen_drm_front_evtchnl_pair *evt_pairs;
96 struct xen_drm_front_cfg cfg;
97
98 /* display buffers */
99 struct list_head dbuf_list;
100};
101
102struct xen_drm_front_drm_pipeline {
103 struct xen_drm_front_drm_info *drm_info;
104
105 int index;
106
107 struct drm_simple_display_pipe pipe;
108
109 struct drm_connector conn;
110 /* These are only for connector mode checking */
111 int width, height;
112
113 struct drm_pending_vblank_event *pending_event;
114
115 struct delayed_work pflip_to_worker;
116
117 bool conn_connected;
118};
119
120struct xen_drm_front_drm_info {
121 struct xen_drm_front_info *front_info;
122 struct drm_device *drm_dev;
123
124 struct xen_drm_front_drm_pipeline pipeline[XEN_DRM_FRONT_MAX_CRTCS];
125};
126
127static inline u64 xen_drm_front_fb_to_cookie(struct drm_framebuffer *fb)
128{
129 return (u64)fb;
130}
131
132static inline u64 xen_drm_front_dbuf_to_cookie(struct drm_gem_object *gem_obj)
133{
134 return (u64)gem_obj;
135}
136
137int xen_drm_front_mode_set(struct xen_drm_front_drm_pipeline *pipeline,
138 u32 x, u32 y, u32 width, u32 height,
139 u32 bpp, u64 fb_cookie);
140
141int xen_drm_front_dbuf_create(struct xen_drm_front_info *front_info,
142 u64 dbuf_cookie, u32 width, u32 height,
143 u32 bpp, u64 size, struct page **pages);
144
145int xen_drm_front_fb_attach(struct xen_drm_front_info *front_info,
146 u64 dbuf_cookie, u64 fb_cookie, u32 width,
147 u32 height, u32 pixel_format);
148
149int xen_drm_front_fb_detach(struct xen_drm_front_info *front_info,
150 u64 fb_cookie);
151
152int xen_drm_front_page_flip(struct xen_drm_front_info *front_info,
153 int conn_idx, u64 fb_cookie);
154
155void xen_drm_front_on_frame_done(struct xen_drm_front_info *front_info,
156 int conn_idx, u64 fb_cookie);
157
158#endif /* __XEN_DRM_FRONT_H_ */
diff --git a/drivers/gpu/drm/xen/xen_drm_front_cfg.c b/drivers/gpu/drm/xen/xen_drm_front_cfg.c
new file mode 100644
index 000000000000..5baf2b9de93c
--- /dev/null
+++ b/drivers/gpu/drm/xen/xen_drm_front_cfg.c
@@ -0,0 +1,77 @@
1// SPDX-License-Identifier: GPL-2.0 OR MIT
2
3/*
4 * Xen para-virtual DRM device
5 *
6 * Copyright (C) 2016-2018 EPAM Systems Inc.
7 *
8 * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
9 */
10
11#include <drm/drmP.h>
12
13#include <linux/device.h>
14
15#include <xen/interface/io/displif.h>
16#include <xen/xenbus.h>
17
18#include "xen_drm_front.h"
19#include "xen_drm_front_cfg.h"
20
21static int cfg_connector(struct xen_drm_front_info *front_info,
22 struct xen_drm_front_cfg_connector *connector,
23 const char *path, int index)
24{
25 char *connector_path;
26
27 connector_path = devm_kasprintf(&front_info->xb_dev->dev,
28 GFP_KERNEL, "%s/%d", path, index);
29 if (!connector_path)
30 return -ENOMEM;
31
32 if (xenbus_scanf(XBT_NIL, connector_path, XENDISPL_FIELD_RESOLUTION,
33 "%d" XENDISPL_RESOLUTION_SEPARATOR "%d",
34 &connector->width, &connector->height) < 0) {
35 /* either no entry configured or wrong resolution set */
36 connector->width = 0;
37 connector->height = 0;
38 return -EINVAL;
39 }
40
41 connector->xenstore_path = connector_path;
42
43 DRM_INFO("Connector %s: resolution %dx%d\n",
44 connector_path, connector->width, connector->height);
45 return 0;
46}
47
48int xen_drm_front_cfg_card(struct xen_drm_front_info *front_info,
49 struct xen_drm_front_cfg *cfg)
50{
51 struct xenbus_device *xb_dev = front_info->xb_dev;
52 int ret, i;
53
54 if (xenbus_read_unsigned(front_info->xb_dev->nodename,
55 XENDISPL_FIELD_BE_ALLOC, 0)) {
56 DRM_INFO("Backend can provide display buffers\n");
57 cfg->be_alloc = true;
58 }
59
60 cfg->num_connectors = 0;
61 for (i = 0; i < ARRAY_SIZE(cfg->connectors); i++) {
62 ret = cfg_connector(front_info, &cfg->connectors[i],
63 xb_dev->nodename, i);
64 if (ret < 0)
65 break;
66 cfg->num_connectors++;
67 }
68
69 if (!cfg->num_connectors) {
70 DRM_ERROR("No connector(s) configured at %s\n",
71 xb_dev->nodename);
72 return -ENODEV;
73 }
74
75 return 0;
76}
77
diff --git a/drivers/gpu/drm/xen/xen_drm_front_cfg.h b/drivers/gpu/drm/xen/xen_drm_front_cfg.h
new file mode 100644
index 000000000000..aa8490ba9146
--- /dev/null
+++ b/drivers/gpu/drm/xen/xen_drm_front_cfg.h
@@ -0,0 +1,37 @@
1/* SPDX-License-Identifier: GPL-2.0 OR MIT */
2
3/*
4 * Xen para-virtual DRM device
5 *
6 * Copyright (C) 2016-2018 EPAM Systems Inc.
7 *
8 * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
9 */
10
11#ifndef __XEN_DRM_FRONT_CFG_H_
12#define __XEN_DRM_FRONT_CFG_H_
13
14#include <linux/types.h>
15
16#define XEN_DRM_FRONT_MAX_CRTCS 4
17
18struct xen_drm_front_cfg_connector {
19 int width;
20 int height;
21 char *xenstore_path;
22};
23
24struct xen_drm_front_cfg {
25 struct xen_drm_front_info *front_info;
26 /* number of connectors in this configuration */
27 int num_connectors;
28 /* connector configurations */
29 struct xen_drm_front_cfg_connector connectors[XEN_DRM_FRONT_MAX_CRTCS];
30 /* set if dumb buffers are allocated externally on backend side */
31 bool be_alloc;
32};
33
34int xen_drm_front_cfg_card(struct xen_drm_front_info *front_info,
35 struct xen_drm_front_cfg *cfg);
36
37#endif /* __XEN_DRM_FRONT_CFG_H_ */
diff --git a/drivers/gpu/drm/xen/xen_drm_front_conn.c b/drivers/gpu/drm/xen/xen_drm_front_conn.c
new file mode 100644
index 000000000000..c91ae532fa55
--- /dev/null
+++ b/drivers/gpu/drm/xen/xen_drm_front_conn.c
@@ -0,0 +1,115 @@
1// SPDX-License-Identifier: GPL-2.0 OR MIT
2
3/*
4 * Xen para-virtual DRM device
5 *
6 * Copyright (C) 2016-2018 EPAM Systems Inc.
7 *
8 * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
9 */
10
11#include <drm/drm_atomic_helper.h>
12#include <drm/drm_crtc_helper.h>
13
14#include <video/videomode.h>
15
16#include "xen_drm_front.h"
17#include "xen_drm_front_conn.h"
18#include "xen_drm_front_kms.h"
19
20static struct xen_drm_front_drm_pipeline *
21to_xen_drm_pipeline(struct drm_connector *connector)
22{
23 return container_of(connector, struct xen_drm_front_drm_pipeline, conn);
24}
25
26static const u32 plane_formats[] = {
27 DRM_FORMAT_RGB565,
28 DRM_FORMAT_RGB888,
29 DRM_FORMAT_XRGB8888,
30 DRM_FORMAT_ARGB8888,
31 DRM_FORMAT_XRGB4444,
32 DRM_FORMAT_ARGB4444,
33 DRM_FORMAT_XRGB1555,
34 DRM_FORMAT_ARGB1555,
35};
36
37const u32 *xen_drm_front_conn_get_formats(int *format_count)
38{
39 *format_count = ARRAY_SIZE(plane_formats);
40 return plane_formats;
41}
42
43static int connector_detect(struct drm_connector *connector,
44 struct drm_modeset_acquire_ctx *ctx,
45 bool force)
46{
47 struct xen_drm_front_drm_pipeline *pipeline =
48 to_xen_drm_pipeline(connector);
49
50 if (drm_dev_is_unplugged(connector->dev))
51 pipeline->conn_connected = false;
52
53 return pipeline->conn_connected ? connector_status_connected :
54 connector_status_disconnected;
55}
56
57#define XEN_DRM_CRTC_VREFRESH_HZ 60
58
59static int connector_get_modes(struct drm_connector *connector)
60{
61 struct xen_drm_front_drm_pipeline *pipeline =
62 to_xen_drm_pipeline(connector);
63 struct drm_display_mode *mode;
64 struct videomode videomode;
65 int width, height;
66
67 mode = drm_mode_create(connector->dev);
68 if (!mode)
69 return 0;
70
71 memset(&videomode, 0, sizeof(videomode));
72 videomode.hactive = pipeline->width;
73 videomode.vactive = pipeline->height;
74 width = videomode.hactive + videomode.hfront_porch +
75 videomode.hback_porch + videomode.hsync_len;
76 height = videomode.vactive + videomode.vfront_porch +
77 videomode.vback_porch + videomode.vsync_len;
78 videomode.pixelclock = width * height * XEN_DRM_CRTC_VREFRESH_HZ;
79 mode->type = DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER;
80
81 drm_display_mode_from_videomode(&videomode, mode);
82 drm_mode_probed_add(connector, mode);
83 return 1;
84}
85
86static const struct drm_connector_helper_funcs connector_helper_funcs = {
87 .get_modes = connector_get_modes,
88 .detect_ctx = connector_detect,
89};
90
91static const struct drm_connector_funcs connector_funcs = {
92 .dpms = drm_helper_connector_dpms,
93 .fill_modes = drm_helper_probe_single_connector_modes,
94 .destroy = drm_connector_cleanup,
95 .reset = drm_atomic_helper_connector_reset,
96 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
97 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
98};
99
100int xen_drm_front_conn_init(struct xen_drm_front_drm_info *drm_info,
101 struct drm_connector *connector)
102{
103 struct xen_drm_front_drm_pipeline *pipeline =
104 to_xen_drm_pipeline(connector);
105
106 drm_connector_helper_add(connector, &connector_helper_funcs);
107
108 pipeline->conn_connected = true;
109
110 connector->polled = DRM_CONNECTOR_POLL_CONNECT |
111 DRM_CONNECTOR_POLL_DISCONNECT;
112
113 return drm_connector_init(drm_info->drm_dev, connector,
114 &connector_funcs, DRM_MODE_CONNECTOR_VIRTUAL);
115}
diff --git a/drivers/gpu/drm/xen/xen_drm_front_conn.h b/drivers/gpu/drm/xen/xen_drm_front_conn.h
new file mode 100644
index 000000000000..39de7cf5adbe
--- /dev/null
+++ b/drivers/gpu/drm/xen/xen_drm_front_conn.h
@@ -0,0 +1,27 @@
1/* SPDX-License-Identifier: GPL-2.0 OR MIT */
2
3/*
4 * Xen para-virtual DRM device
5 *
6 * Copyright (C) 2016-2018 EPAM Systems Inc.
7 *
8 * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
9 */
10
11#ifndef __XEN_DRM_FRONT_CONN_H_
12#define __XEN_DRM_FRONT_CONN_H_
13
14#include <drm/drmP.h>
15#include <drm/drm_crtc.h>
16#include <drm/drm_encoder.h>
17
18#include <linux/wait.h>
19
20struct xen_drm_front_drm_info;
21
22int xen_drm_front_conn_init(struct xen_drm_front_drm_info *drm_info,
23 struct drm_connector *connector);
24
25const u32 *xen_drm_front_conn_get_formats(int *format_count);
26
27#endif /* __XEN_DRM_FRONT_CONN_H_ */
diff --git a/drivers/gpu/drm/xen/xen_drm_front_evtchnl.c b/drivers/gpu/drm/xen/xen_drm_front_evtchnl.c
new file mode 100644
index 000000000000..945226a95e9b
--- /dev/null
+++ b/drivers/gpu/drm/xen/xen_drm_front_evtchnl.c
@@ -0,0 +1,387 @@
1// SPDX-License-Identifier: GPL-2.0 OR MIT
2
3/*
4 * Xen para-virtual DRM device
5 *
6 * Copyright (C) 2016-2018 EPAM Systems Inc.
7 *
8 * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
9 */
10
11#include <drm/drmP.h>
12
13#include <linux/errno.h>
14#include <linux/irq.h>
15
16#include <xen/xenbus.h>
17#include <xen/events.h>
18#include <xen/grant_table.h>
19
20#include "xen_drm_front.h"
21#include "xen_drm_front_evtchnl.h"
22
23static irqreturn_t evtchnl_interrupt_ctrl(int irq, void *dev_id)
24{
25 struct xen_drm_front_evtchnl *evtchnl = dev_id;
26 struct xen_drm_front_info *front_info = evtchnl->front_info;
27 struct xendispl_resp *resp;
28 RING_IDX i, rp;
29 unsigned long flags;
30
31 if (unlikely(evtchnl->state != EVTCHNL_STATE_CONNECTED))
32 return IRQ_HANDLED;
33
34 spin_lock_irqsave(&front_info->io_lock, flags);
35
36again:
37 rp = evtchnl->u.req.ring.sring->rsp_prod;
38 /* ensure we see queued responses up to rp */
39 virt_rmb();
40
41 for (i = evtchnl->u.req.ring.rsp_cons; i != rp; i++) {
42 resp = RING_GET_RESPONSE(&evtchnl->u.req.ring, i);
43 if (unlikely(resp->id != evtchnl->evt_id))
44 continue;
45
46 switch (resp->operation) {
47 case XENDISPL_OP_PG_FLIP:
48 case XENDISPL_OP_FB_ATTACH:
49 case XENDISPL_OP_FB_DETACH:
50 case XENDISPL_OP_DBUF_CREATE:
51 case XENDISPL_OP_DBUF_DESTROY:
52 case XENDISPL_OP_SET_CONFIG:
53 evtchnl->u.req.resp_status = resp->status;
54 complete(&evtchnl->u.req.completion);
55 break;
56
57 default:
58 DRM_ERROR("Operation %d is not supported\n",
59 resp->operation);
60 break;
61 }
62 }
63
64 evtchnl->u.req.ring.rsp_cons = i;
65
66 if (i != evtchnl->u.req.ring.req_prod_pvt) {
67 int more_to_do;
68
69 RING_FINAL_CHECK_FOR_RESPONSES(&evtchnl->u.req.ring,
70 more_to_do);
71 if (more_to_do)
72 goto again;
73 } else {
74 evtchnl->u.req.ring.sring->rsp_event = i + 1;
75 }
76
77 spin_unlock_irqrestore(&front_info->io_lock, flags);
78 return IRQ_HANDLED;
79}
80
81static irqreturn_t evtchnl_interrupt_evt(int irq, void *dev_id)
82{
83 struct xen_drm_front_evtchnl *evtchnl = dev_id;
84 struct xen_drm_front_info *front_info = evtchnl->front_info;
85 struct xendispl_event_page *page = evtchnl->u.evt.page;
86 u32 cons, prod;
87 unsigned long flags;
88
89 if (unlikely(evtchnl->state != EVTCHNL_STATE_CONNECTED))
90 return IRQ_HANDLED;
91
92 spin_lock_irqsave(&front_info->io_lock, flags);
93
94 prod = page->in_prod;
95 /* ensure we see ring contents up to prod */
96 virt_rmb();
97 if (prod == page->in_cons)
98 goto out;
99
100 for (cons = page->in_cons; cons != prod; cons++) {
101 struct xendispl_evt *event;
102
103 event = &XENDISPL_IN_RING_REF(page, cons);
104 if (unlikely(event->id != evtchnl->evt_id++))
105 continue;
106
107 switch (event->type) {
108 case XENDISPL_EVT_PG_FLIP:
109 xen_drm_front_on_frame_done(front_info, evtchnl->index,
110 event->op.pg_flip.fb_cookie);
111 break;
112 }
113 }
114 page->in_cons = cons;
115 /* ensure ring contents */
116 virt_wmb();
117
118out:
119 spin_unlock_irqrestore(&front_info->io_lock, flags);
120 return IRQ_HANDLED;
121}
122
123static void evtchnl_free(struct xen_drm_front_info *front_info,
124 struct xen_drm_front_evtchnl *evtchnl)
125{
126 unsigned long page = 0;
127
128 if (evtchnl->type == EVTCHNL_TYPE_REQ)
129 page = (unsigned long)evtchnl->u.req.ring.sring;
130 else if (evtchnl->type == EVTCHNL_TYPE_EVT)
131 page = (unsigned long)evtchnl->u.evt.page;
132 if (!page)
133 return;
134
135 evtchnl->state = EVTCHNL_STATE_DISCONNECTED;
136
137 if (evtchnl->type == EVTCHNL_TYPE_REQ) {
138 /* release all who still waits for response if any */
139 evtchnl->u.req.resp_status = -EIO;
140 complete_all(&evtchnl->u.req.completion);
141 }
142
143 if (evtchnl->irq)
144 unbind_from_irqhandler(evtchnl->irq, evtchnl);
145
146 if (evtchnl->port)
147 xenbus_free_evtchn(front_info->xb_dev, evtchnl->port);
148
149 /* end access and free the page */
150 if (evtchnl->gref != GRANT_INVALID_REF)
151 gnttab_end_foreign_access(evtchnl->gref, 0, page);
152
153 memset(evtchnl, 0, sizeof(*evtchnl));
154}
155
156static int evtchnl_alloc(struct xen_drm_front_info *front_info, int index,
157 struct xen_drm_front_evtchnl *evtchnl,
158 enum xen_drm_front_evtchnl_type type)
159{
160 struct xenbus_device *xb_dev = front_info->xb_dev;
161 unsigned long page;
162 grant_ref_t gref;
163 irq_handler_t handler;
164 int ret;
165
166 memset(evtchnl, 0, sizeof(*evtchnl));
167 evtchnl->type = type;
168 evtchnl->index = index;
169 evtchnl->front_info = front_info;
170 evtchnl->state = EVTCHNL_STATE_DISCONNECTED;
171 evtchnl->gref = GRANT_INVALID_REF;
172
173 page = get_zeroed_page(GFP_NOIO | __GFP_HIGH);
174 if (!page) {
175 ret = -ENOMEM;
176 goto fail;
177 }
178
179 if (type == EVTCHNL_TYPE_REQ) {
180 struct xen_displif_sring *sring;
181
182 init_completion(&evtchnl->u.req.completion);
183 mutex_init(&evtchnl->u.req.req_io_lock);
184 sring = (struct xen_displif_sring *)page;
185 SHARED_RING_INIT(sring);
186 FRONT_RING_INIT(&evtchnl->u.req.ring, sring, XEN_PAGE_SIZE);
187
188 ret = xenbus_grant_ring(xb_dev, sring, 1, &gref);
189 if (ret < 0) {
190 evtchnl->u.req.ring.sring = NULL;
191 free_page(page);
192 goto fail;
193 }
194
195 handler = evtchnl_interrupt_ctrl;
196 } else {
197 ret = gnttab_grant_foreign_access(xb_dev->otherend_id,
198 virt_to_gfn((void *)page), 0);
199 if (ret < 0) {
200 free_page(page);
201 goto fail;
202 }
203
204 evtchnl->u.evt.page = (struct xendispl_event_page *)page;
205 gref = ret;
206 handler = evtchnl_interrupt_evt;
207 }
208 evtchnl->gref = gref;
209
210 ret = xenbus_alloc_evtchn(xb_dev, &evtchnl->port);
211 if (ret < 0)
212 goto fail;
213
214 ret = bind_evtchn_to_irqhandler(evtchnl->port,
215 handler, 0, xb_dev->devicetype,
216 evtchnl);
217 if (ret < 0)
218 goto fail;
219
220 evtchnl->irq = ret;
221 return 0;
222
223fail:
224 DRM_ERROR("Failed to allocate ring: %d\n", ret);
225 return ret;
226}
227
228int xen_drm_front_evtchnl_create_all(struct xen_drm_front_info *front_info)
229{
230 struct xen_drm_front_cfg *cfg;
231 int ret, conn;
232
233 cfg = &front_info->cfg;
234
235 front_info->evt_pairs =
236 kcalloc(cfg->num_connectors,
237 sizeof(struct xen_drm_front_evtchnl_pair),
238 GFP_KERNEL);
239 if (!front_info->evt_pairs) {
240 ret = -ENOMEM;
241 goto fail;
242 }
243
244 for (conn = 0; conn < cfg->num_connectors; conn++) {
245 ret = evtchnl_alloc(front_info, conn,
246 &front_info->evt_pairs[conn].req,
247 EVTCHNL_TYPE_REQ);
248 if (ret < 0) {
249 DRM_ERROR("Error allocating control channel\n");
250 goto fail;
251 }
252
253 ret = evtchnl_alloc(front_info, conn,
254 &front_info->evt_pairs[conn].evt,
255 EVTCHNL_TYPE_EVT);
256 if (ret < 0) {
257 DRM_ERROR("Error allocating in-event channel\n");
258 goto fail;
259 }
260 }
261 front_info->num_evt_pairs = cfg->num_connectors;
262 return 0;
263
264fail:
265 xen_drm_front_evtchnl_free_all(front_info);
266 return ret;
267}
268
269static int evtchnl_publish(struct xenbus_transaction xbt,
270 struct xen_drm_front_evtchnl *evtchnl,
271 const char *path, const char *node_ring,
272 const char *node_chnl)
273{
274 struct xenbus_device *xb_dev = evtchnl->front_info->xb_dev;
275 int ret;
276
277 /* write control channel ring reference */
278 ret = xenbus_printf(xbt, path, node_ring, "%u", evtchnl->gref);
279 if (ret < 0) {
280 xenbus_dev_error(xb_dev, ret, "writing ring-ref");
281 return ret;
282 }
283
284 /* write event channel ring reference */
285 ret = xenbus_printf(xbt, path, node_chnl, "%u", evtchnl->port);
286 if (ret < 0) {
287 xenbus_dev_error(xb_dev, ret, "writing event channel");
288 return ret;
289 }
290
291 return 0;
292}
293
294int xen_drm_front_evtchnl_publish_all(struct xen_drm_front_info *front_info)
295{
296 struct xenbus_transaction xbt;
297 struct xen_drm_front_cfg *plat_data;
298 int ret, conn;
299
300 plat_data = &front_info->cfg;
301
302again:
303 ret = xenbus_transaction_start(&xbt);
304 if (ret < 0) {
305 xenbus_dev_fatal(front_info->xb_dev, ret,
306 "starting transaction");
307 return ret;
308 }
309
310 for (conn = 0; conn < plat_data->num_connectors; conn++) {
311 ret = evtchnl_publish(xbt, &front_info->evt_pairs[conn].req,
312 plat_data->connectors[conn].xenstore_path,
313 XENDISPL_FIELD_REQ_RING_REF,
314 XENDISPL_FIELD_REQ_CHANNEL);
315 if (ret < 0)
316 goto fail;
317
318 ret = evtchnl_publish(xbt, &front_info->evt_pairs[conn].evt,
319 plat_data->connectors[conn].xenstore_path,
320 XENDISPL_FIELD_EVT_RING_REF,
321 XENDISPL_FIELD_EVT_CHANNEL);
322 if (ret < 0)
323 goto fail;
324 }
325
326 ret = xenbus_transaction_end(xbt, 0);
327 if (ret < 0) {
328 if (ret == -EAGAIN)
329 goto again;
330
331 xenbus_dev_fatal(front_info->xb_dev, ret,
332 "completing transaction");
333 goto fail_to_end;
334 }
335
336 return 0;
337
338fail:
339 xenbus_transaction_end(xbt, 1);
340
341fail_to_end:
342 xenbus_dev_fatal(front_info->xb_dev, ret, "writing Xen store");
343 return ret;
344}
345
346void xen_drm_front_evtchnl_flush(struct xen_drm_front_evtchnl *evtchnl)
347{
348 int notify;
349
350 evtchnl->u.req.ring.req_prod_pvt++;
351 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&evtchnl->u.req.ring, notify);
352 if (notify)
353 notify_remote_via_irq(evtchnl->irq);
354}
355
356void xen_drm_front_evtchnl_set_state(struct xen_drm_front_info *front_info,
357 enum xen_drm_front_evtchnl_state state)
358{
359 unsigned long flags;
360 int i;
361
362 if (!front_info->evt_pairs)
363 return;
364
365 spin_lock_irqsave(&front_info->io_lock, flags);
366 for (i = 0; i < front_info->num_evt_pairs; i++) {
367 front_info->evt_pairs[i].req.state = state;
368 front_info->evt_pairs[i].evt.state = state;
369 }
370 spin_unlock_irqrestore(&front_info->io_lock, flags);
371}
372
373void xen_drm_front_evtchnl_free_all(struct xen_drm_front_info *front_info)
374{
375 int i;
376
377 if (!front_info->evt_pairs)
378 return;
379
380 for (i = 0; i < front_info->num_evt_pairs; i++) {
381 evtchnl_free(front_info, &front_info->evt_pairs[i].req);
382 evtchnl_free(front_info, &front_info->evt_pairs[i].evt);
383 }
384
385 kfree(front_info->evt_pairs);
386 front_info->evt_pairs = NULL;
387}
diff --git a/drivers/gpu/drm/xen/xen_drm_front_evtchnl.h b/drivers/gpu/drm/xen/xen_drm_front_evtchnl.h
new file mode 100644
index 000000000000..b0af6994332b
--- /dev/null
+++ b/drivers/gpu/drm/xen/xen_drm_front_evtchnl.h
@@ -0,0 +1,81 @@
1/* SPDX-License-Identifier: GPL-2.0 OR MIT */
2
3/*
4 * Xen para-virtual DRM device
5 *
6 * Copyright (C) 2016-2018 EPAM Systems Inc.
7 *
8 * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
9 */
10
11#ifndef __XEN_DRM_FRONT_EVTCHNL_H_
12#define __XEN_DRM_FRONT_EVTCHNL_H_
13
14#include <linux/completion.h>
15#include <linux/types.h>
16
17#include <xen/interface/io/ring.h>
18#include <xen/interface/io/displif.h>
19
20/*
21 * All operations which are not connector oriented use this ctrl event channel,
22 * e.g. fb_attach/destroy which belong to a DRM device, not to a CRTC.
23 */
24#define GENERIC_OP_EVT_CHNL 0
25
26enum xen_drm_front_evtchnl_state {
27 EVTCHNL_STATE_DISCONNECTED,
28 EVTCHNL_STATE_CONNECTED,
29};
30
31enum xen_drm_front_evtchnl_type {
32 EVTCHNL_TYPE_REQ,
33 EVTCHNL_TYPE_EVT,
34};
35
36struct xen_drm_front_drm_info;
37
38struct xen_drm_front_evtchnl {
39 struct xen_drm_front_info *front_info;
40 int gref;
41 int port;
42 int irq;
43 int index;
44 enum xen_drm_front_evtchnl_state state;
45 enum xen_drm_front_evtchnl_type type;
46 /* either response id or incoming event id */
47 u16 evt_id;
48 /* next request id or next expected event id */
49 u16 evt_next_id;
50 union {
51 struct {
52 struct xen_displif_front_ring ring;
53 struct completion completion;
54 /* latest response status */
55 int resp_status;
56 /* serializer for backend IO: request/response */
57 struct mutex req_io_lock;
58 } req;
59 struct {
60 struct xendispl_event_page *page;
61 } evt;
62 } u;
63};
64
65struct xen_drm_front_evtchnl_pair {
66 struct xen_drm_front_evtchnl req;
67 struct xen_drm_front_evtchnl evt;
68};
69
70int xen_drm_front_evtchnl_create_all(struct xen_drm_front_info *front_info);
71
72int xen_drm_front_evtchnl_publish_all(struct xen_drm_front_info *front_info);
73
74void xen_drm_front_evtchnl_flush(struct xen_drm_front_evtchnl *evtchnl);
75
76void xen_drm_front_evtchnl_set_state(struct xen_drm_front_info *front_info,
77 enum xen_drm_front_evtchnl_state state);
78
79void xen_drm_front_evtchnl_free_all(struct xen_drm_front_info *front_info);
80
81#endif /* __XEN_DRM_FRONT_EVTCHNL_H_ */
diff --git a/drivers/gpu/drm/xen/xen_drm_front_gem.c b/drivers/gpu/drm/xen/xen_drm_front_gem.c
new file mode 100644
index 000000000000..c85bfe7571cb
--- /dev/null
+++ b/drivers/gpu/drm/xen/xen_drm_front_gem.c
@@ -0,0 +1,308 @@
1// SPDX-License-Identifier: GPL-2.0 OR MIT
2
3/*
4 * Xen para-virtual DRM device
5 *
6 * Copyright (C) 2016-2018 EPAM Systems Inc.
7 *
8 * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
9 */
10
11#include "xen_drm_front_gem.h"
12
13#include <drm/drmP.h>
14#include <drm/drm_crtc_helper.h>
15#include <drm/drm_fb_helper.h>
16#include <drm/drm_gem.h>
17
18#include <linux/dma-buf.h>
19#include <linux/scatterlist.h>
20#include <linux/shmem_fs.h>
21
22#include <xen/balloon.h>
23
24#include "xen_drm_front.h"
25#include "xen_drm_front_shbuf.h"
26
27struct xen_gem_object {
28 struct drm_gem_object base;
29
30 size_t num_pages;
31 struct page **pages;
32
33 /* set for buffers allocated by the backend */
34 bool be_alloc;
35
36 /* this is for imported PRIME buffer */
37 struct sg_table *sgt_imported;
38};
39
40static inline struct xen_gem_object *
41to_xen_gem_obj(struct drm_gem_object *gem_obj)
42{
43 return container_of(gem_obj, struct xen_gem_object, base);
44}
45
46static int gem_alloc_pages_array(struct xen_gem_object *xen_obj,
47 size_t buf_size)
48{
49 xen_obj->num_pages = DIV_ROUND_UP(buf_size, PAGE_SIZE);
50 xen_obj->pages = kvmalloc_array(xen_obj->num_pages,
51 sizeof(struct page *), GFP_KERNEL);
52 return !xen_obj->pages ? -ENOMEM : 0;
53}
54
55static void gem_free_pages_array(struct xen_gem_object *xen_obj)
56{
57 kvfree(xen_obj->pages);
58 xen_obj->pages = NULL;
59}
60
61static struct xen_gem_object *gem_create_obj(struct drm_device *dev,
62 size_t size)
63{
64 struct xen_gem_object *xen_obj;
65 int ret;
66
67 xen_obj = kzalloc(sizeof(*xen_obj), GFP_KERNEL);
68 if (!xen_obj)
69 return ERR_PTR(-ENOMEM);
70
71 ret = drm_gem_object_init(dev, &xen_obj->base, size);
72 if (ret < 0) {
73 kfree(xen_obj);
74 return ERR_PTR(ret);
75 }
76
77 return xen_obj;
78}
79
80static struct xen_gem_object *gem_create(struct drm_device *dev, size_t size)
81{
82 struct xen_drm_front_drm_info *drm_info = dev->dev_private;
83 struct xen_gem_object *xen_obj;
84 int ret;
85
86 size = round_up(size, PAGE_SIZE);
87 xen_obj = gem_create_obj(dev, size);
88 if (IS_ERR_OR_NULL(xen_obj))
89 return xen_obj;
90
91 if (drm_info->front_info->cfg.be_alloc) {
92 /*
93 * backend will allocate space for this buffer, so
94 * only allocate array of pointers to pages
95 */
96 ret = gem_alloc_pages_array(xen_obj, size);
97 if (ret < 0)
98 goto fail;
99
100 /*
101 * allocate ballooned pages which will be used to map
102 * grant references provided by the backend
103 */
104 ret = alloc_xenballooned_pages(xen_obj->num_pages,
105 xen_obj->pages);
106 if (ret < 0) {
107 DRM_ERROR("Cannot allocate %zu ballooned pages: %d\n",
108 xen_obj->num_pages, ret);
109 gem_free_pages_array(xen_obj);
110 goto fail;
111 }
112
113 xen_obj->be_alloc = true;
114 return xen_obj;
115 }
116 /*
117 * need to allocate backing pages now, so we can share those
118 * with the backend
119 */
120 xen_obj->num_pages = DIV_ROUND_UP(size, PAGE_SIZE);
121 xen_obj->pages = drm_gem_get_pages(&xen_obj->base);
122 if (IS_ERR_OR_NULL(xen_obj->pages)) {
123 ret = PTR_ERR(xen_obj->pages);
124 xen_obj->pages = NULL;
125 goto fail;
126 }
127
128 return xen_obj;
129
130fail:
131 DRM_ERROR("Failed to allocate buffer with size %zu\n", size);
132 return ERR_PTR(ret);
133}
134
135struct drm_gem_object *xen_drm_front_gem_create(struct drm_device *dev,
136 size_t size)
137{
138 struct xen_gem_object *xen_obj;
139
140 xen_obj = gem_create(dev, size);
141 if (IS_ERR_OR_NULL(xen_obj))
142 return ERR_CAST(xen_obj);
143
144 return &xen_obj->base;
145}
146
147void xen_drm_front_gem_free_object_unlocked(struct drm_gem_object *gem_obj)
148{
149 struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj);
150
151 if (xen_obj->base.import_attach) {
152 drm_prime_gem_destroy(&xen_obj->base, xen_obj->sgt_imported);
153 gem_free_pages_array(xen_obj);
154 } else {
155 if (xen_obj->pages) {
156 if (xen_obj->be_alloc) {
157 free_xenballooned_pages(xen_obj->num_pages,
158 xen_obj->pages);
159 gem_free_pages_array(xen_obj);
160 } else {
161 drm_gem_put_pages(&xen_obj->base,
162 xen_obj->pages, true, false);
163 }
164 }
165 }
166 drm_gem_object_release(gem_obj);
167 kfree(xen_obj);
168}
169
170struct page **xen_drm_front_gem_get_pages(struct drm_gem_object *gem_obj)
171{
172 struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj);
173
174 return xen_obj->pages;
175}
176
177struct sg_table *xen_drm_front_gem_get_sg_table(struct drm_gem_object *gem_obj)
178{
179 struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj);
180
181 if (!xen_obj->pages)
182 return NULL;
183
184 return drm_prime_pages_to_sg(xen_obj->pages, xen_obj->num_pages);
185}
186
187struct drm_gem_object *
188xen_drm_front_gem_import_sg_table(struct drm_device *dev,
189 struct dma_buf_attachment *attach,
190 struct sg_table *sgt)
191{
192 struct xen_drm_front_drm_info *drm_info = dev->dev_private;
193 struct xen_gem_object *xen_obj;
194 size_t size;
195 int ret;
196
197 size = attach->dmabuf->size;
198 xen_obj = gem_create_obj(dev, size);
199 if (IS_ERR_OR_NULL(xen_obj))
200 return ERR_CAST(xen_obj);
201
202 ret = gem_alloc_pages_array(xen_obj, size);
203 if (ret < 0)
204 return ERR_PTR(ret);
205
206 xen_obj->sgt_imported = sgt;
207
208 ret = drm_prime_sg_to_page_addr_arrays(sgt, xen_obj->pages,
209 NULL, xen_obj->num_pages);
210 if (ret < 0)
211 return ERR_PTR(ret);
212
213 ret = xen_drm_front_dbuf_create(drm_info->front_info,
214 xen_drm_front_dbuf_to_cookie(&xen_obj->base),
215 0, 0, 0, size, xen_obj->pages);
216 if (ret < 0)
217 return ERR_PTR(ret);
218
219 DRM_DEBUG("Imported buffer of size %zu with nents %u\n",
220 size, sgt->nents);
221
222 return &xen_obj->base;
223}
224
225static int gem_mmap_obj(struct xen_gem_object *xen_obj,
226 struct vm_area_struct *vma)
227{
228 unsigned long addr = vma->vm_start;
229 int i;
230
231 /*
232 * clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the
233 * vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map
234 * the whole buffer.
235 */
236 vma->vm_flags &= ~VM_PFNMAP;
237 vma->vm_flags |= VM_MIXEDMAP;
238 vma->vm_pgoff = 0;
239 vma->vm_page_prot =
240 pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
241
242 /*
243 * vm_operations_struct.fault handler will be called if CPU access
244 * to VM is here. For GPUs this isn't the case, because CPU
245 * doesn't touch the memory. Insert pages now, so both CPU and GPU are
246 * happy.
247 * FIXME: as we insert all the pages now then no .fault handler must
248 * be called, so don't provide one
249 */
250 for (i = 0; i < xen_obj->num_pages; i++) {
251 int ret;
252
253 ret = vm_insert_page(vma, addr, xen_obj->pages[i]);
254 if (ret < 0) {
255 DRM_ERROR("Failed to insert pages into vma: %d\n", ret);
256 return ret;
257 }
258
259 addr += PAGE_SIZE;
260 }
261 return 0;
262}
263
264int xen_drm_front_gem_mmap(struct file *filp, struct vm_area_struct *vma)
265{
266 struct xen_gem_object *xen_obj;
267 struct drm_gem_object *gem_obj;
268 int ret;
269
270 ret = drm_gem_mmap(filp, vma);
271 if (ret < 0)
272 return ret;
273
274 gem_obj = vma->vm_private_data;
275 xen_obj = to_xen_gem_obj(gem_obj);
276 return gem_mmap_obj(xen_obj, vma);
277}
278
279void *xen_drm_front_gem_prime_vmap(struct drm_gem_object *gem_obj)
280{
281 struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj);
282
283 if (!xen_obj->pages)
284 return NULL;
285
286 return vmap(xen_obj->pages, xen_obj->num_pages,
287 VM_MAP, pgprot_writecombine(PAGE_KERNEL));
288}
289
290void xen_drm_front_gem_prime_vunmap(struct drm_gem_object *gem_obj,
291 void *vaddr)
292{
293 vunmap(vaddr);
294}
295
296int xen_drm_front_gem_prime_mmap(struct drm_gem_object *gem_obj,
297 struct vm_area_struct *vma)
298{
299 struct xen_gem_object *xen_obj;
300 int ret;
301
302 ret = drm_gem_mmap_obj(gem_obj, gem_obj->size, vma);
303 if (ret < 0)
304 return ret;
305
306 xen_obj = to_xen_gem_obj(gem_obj);
307 return gem_mmap_obj(xen_obj, vma);
308}
diff --git a/drivers/gpu/drm/xen/xen_drm_front_gem.h b/drivers/gpu/drm/xen/xen_drm_front_gem.h
new file mode 100644
index 000000000000..d5ab734fdafe
--- /dev/null
+++ b/drivers/gpu/drm/xen/xen_drm_front_gem.h
@@ -0,0 +1,40 @@
1/* SPDX-License-Identifier: GPL-2.0 OR MIT */
2
3/*
4 * Xen para-virtual DRM device
5 *
6 * Copyright (C) 2016-2018 EPAM Systems Inc.
7 *
8 * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
9 */
10
11#ifndef __XEN_DRM_FRONT_GEM_H
12#define __XEN_DRM_FRONT_GEM_H
13
14#include <drm/drmP.h>
15
16struct drm_gem_object *xen_drm_front_gem_create(struct drm_device *dev,
17 size_t size);
18
19struct drm_gem_object *
20xen_drm_front_gem_import_sg_table(struct drm_device *dev,
21 struct dma_buf_attachment *attach,
22 struct sg_table *sgt);
23
24struct sg_table *xen_drm_front_gem_get_sg_table(struct drm_gem_object *gem_obj);
25
26struct page **xen_drm_front_gem_get_pages(struct drm_gem_object *obj);
27
28void xen_drm_front_gem_free_object_unlocked(struct drm_gem_object *gem_obj);
29
30int xen_drm_front_gem_mmap(struct file *filp, struct vm_area_struct *vma);
31
32void *xen_drm_front_gem_prime_vmap(struct drm_gem_object *gem_obj);
33
34void xen_drm_front_gem_prime_vunmap(struct drm_gem_object *gem_obj,
35 void *vaddr);
36
37int xen_drm_front_gem_prime_mmap(struct drm_gem_object *gem_obj,
38 struct vm_area_struct *vma);
39
40#endif /* __XEN_DRM_FRONT_GEM_H */
diff --git a/drivers/gpu/drm/xen/xen_drm_front_kms.c b/drivers/gpu/drm/xen/xen_drm_front_kms.c
new file mode 100644
index 000000000000..a3479eb72d79
--- /dev/null
+++ b/drivers/gpu/drm/xen/xen_drm_front_kms.c
@@ -0,0 +1,366 @@
1// SPDX-License-Identifier: GPL-2.0 OR MIT
2
3/*
4 * Xen para-virtual DRM device
5 *
6 * Copyright (C) 2016-2018 EPAM Systems Inc.
7 *
8 * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
9 */
10
11#include "xen_drm_front_kms.h"
12
13#include <drm/drmP.h>
14#include <drm/drm_atomic.h>
15#include <drm/drm_atomic_helper.h>
16#include <drm/drm_crtc_helper.h>
17#include <drm/drm_gem.h>
18#include <drm/drm_gem_framebuffer_helper.h>
19
20#include "xen_drm_front.h"
21#include "xen_drm_front_conn.h"
22
23/*
24 * Timeout in ms to wait for frame done event from the backend:
25 * must be a bit more than IO time-out
26 */
27#define FRAME_DONE_TO_MS (XEN_DRM_FRONT_WAIT_BACK_MS + 100)
28
29static struct xen_drm_front_drm_pipeline *
30to_xen_drm_pipeline(struct drm_simple_display_pipe *pipe)
31{
32 return container_of(pipe, struct xen_drm_front_drm_pipeline, pipe);
33}
34
35static void fb_destroy(struct drm_framebuffer *fb)
36{
37 struct xen_drm_front_drm_info *drm_info = fb->dev->dev_private;
38 int idx;
39
40 if (drm_dev_enter(fb->dev, &idx)) {
41 xen_drm_front_fb_detach(drm_info->front_info,
42 xen_drm_front_fb_to_cookie(fb));
43 drm_dev_exit(idx);
44 }
45 drm_gem_fb_destroy(fb);
46}
47
48static struct drm_framebuffer_funcs fb_funcs = {
49 .destroy = fb_destroy,
50};
51
52static struct drm_framebuffer *
53fb_create(struct drm_device *dev, struct drm_file *filp,
54 const struct drm_mode_fb_cmd2 *mode_cmd)
55{
56 struct xen_drm_front_drm_info *drm_info = dev->dev_private;
57 static struct drm_framebuffer *fb;
58 struct drm_gem_object *gem_obj;
59 int ret;
60
61 fb = drm_gem_fb_create_with_funcs(dev, filp, mode_cmd, &fb_funcs);
62 if (IS_ERR_OR_NULL(fb))
63 return fb;
64
65 gem_obj = drm_gem_object_lookup(filp, mode_cmd->handles[0]);
66 if (!gem_obj) {
67 DRM_ERROR("Failed to lookup GEM object\n");
68 ret = -ENOENT;
69 goto fail;
70 }
71
72 drm_gem_object_put_unlocked(gem_obj);
73
74 ret = xen_drm_front_fb_attach(drm_info->front_info,
75 xen_drm_front_dbuf_to_cookie(gem_obj),
76 xen_drm_front_fb_to_cookie(fb),
77 fb->width, fb->height,
78 fb->format->format);
79 if (ret < 0) {
80 DRM_ERROR("Back failed to attach FB %p: %d\n", fb, ret);
81 goto fail;
82 }
83
84 return fb;
85
86fail:
87 drm_gem_fb_destroy(fb);
88 return ERR_PTR(ret);
89}
90
91static const struct drm_mode_config_funcs mode_config_funcs = {
92 .fb_create = fb_create,
93 .atomic_check = drm_atomic_helper_check,
94 .atomic_commit = drm_atomic_helper_commit,
95};
96
97static void send_pending_event(struct xen_drm_front_drm_pipeline *pipeline)
98{
99 struct drm_crtc *crtc = &pipeline->pipe.crtc;
100 struct drm_device *dev = crtc->dev;
101 unsigned long flags;
102
103 spin_lock_irqsave(&dev->event_lock, flags);
104 if (pipeline->pending_event)
105 drm_crtc_send_vblank_event(crtc, pipeline->pending_event);
106 pipeline->pending_event = NULL;
107 spin_unlock_irqrestore(&dev->event_lock, flags);
108}
109
110static void display_enable(struct drm_simple_display_pipe *pipe,
111 struct drm_crtc_state *crtc_state,
112 struct drm_plane_state *plane_state)
113{
114 struct xen_drm_front_drm_pipeline *pipeline =
115 to_xen_drm_pipeline(pipe);
116 struct drm_crtc *crtc = &pipe->crtc;
117 struct drm_framebuffer *fb = plane_state->fb;
118 int ret, idx;
119
120 if (!drm_dev_enter(pipe->crtc.dev, &idx))
121 return;
122
123 ret = xen_drm_front_mode_set(pipeline, crtc->x, crtc->y,
124 fb->width, fb->height,
125 fb->format->cpp[0] * 8,
126 xen_drm_front_fb_to_cookie(fb));
127
128 if (ret) {
129 DRM_ERROR("Failed to enable display: %d\n", ret);
130 pipeline->conn_connected = false;
131 }
132
133 drm_dev_exit(idx);
134}
135
136static void display_disable(struct drm_simple_display_pipe *pipe)
137{
138 struct xen_drm_front_drm_pipeline *pipeline =
139 to_xen_drm_pipeline(pipe);
140 int ret = 0, idx;
141
142 if (drm_dev_enter(pipe->crtc.dev, &idx)) {
143 ret = xen_drm_front_mode_set(pipeline, 0, 0, 0, 0, 0,
144 xen_drm_front_fb_to_cookie(NULL));
145 drm_dev_exit(idx);
146 }
147 if (ret)
148 DRM_ERROR("Failed to disable display: %d\n", ret);
149
150 /* Make sure we can restart with enabled connector next time */
151 pipeline->conn_connected = true;
152
153 /* release stalled event if any */
154 send_pending_event(pipeline);
155}
156
157void xen_drm_front_kms_on_frame_done(struct xen_drm_front_drm_pipeline *pipeline,
158 u64 fb_cookie)
159{
160 /*
161 * This runs in interrupt context, e.g. under
162 * drm_info->front_info->io_lock, so we cannot call _sync version
163 * to cancel the work
164 */
165 cancel_delayed_work(&pipeline->pflip_to_worker);
166
167 send_pending_event(pipeline);
168}
169
170static void pflip_to_worker(struct work_struct *work)
171{
172 struct delayed_work *delayed_work = to_delayed_work(work);
173 struct xen_drm_front_drm_pipeline *pipeline =
174 container_of(delayed_work,
175 struct xen_drm_front_drm_pipeline,
176 pflip_to_worker);
177
178 DRM_ERROR("Frame done timed-out, releasing");
179 send_pending_event(pipeline);
180}
181
182static bool display_send_page_flip(struct drm_simple_display_pipe *pipe,
183 struct drm_plane_state *old_plane_state)
184{
185 struct drm_plane_state *plane_state =
186 drm_atomic_get_new_plane_state(old_plane_state->state,
187 &pipe->plane);
188
189 /*
190 * If old_plane_state->fb is NULL and plane_state->fb is not,
191 * then this is an atomic commit which will enable display.
192 * If old_plane_state->fb is not NULL and plane_state->fb is,
193 * then this is an atomic commit which will disable display.
194 * Ignore these and do not send page flip as this framebuffer will be
195 * sent to the backend as a part of display_set_config call.
196 */
197 if (old_plane_state->fb && plane_state->fb) {
198 struct xen_drm_front_drm_pipeline *pipeline =
199 to_xen_drm_pipeline(pipe);
200 struct xen_drm_front_drm_info *drm_info = pipeline->drm_info;
201 int ret;
202
203 schedule_delayed_work(&pipeline->pflip_to_worker,
204 msecs_to_jiffies(FRAME_DONE_TO_MS));
205
206 ret = xen_drm_front_page_flip(drm_info->front_info,
207 pipeline->index,
208 xen_drm_front_fb_to_cookie(plane_state->fb));
209 if (ret) {
210 DRM_ERROR("Failed to send page flip request to backend: %d\n", ret);
211
212 pipeline->conn_connected = false;
213 /*
214 * Report the flip not handled, so pending event is
215 * sent, unblocking user-space.
216 */
217 return false;
218 }
219 /*
220 * Signal that page flip was handled, pending event will be sent
221 * on frame done event from the backend.
222 */
223 return true;
224 }
225
226 return false;
227}
228
229static void display_update(struct drm_simple_display_pipe *pipe,
230 struct drm_plane_state *old_plane_state)
231{
232 struct xen_drm_front_drm_pipeline *pipeline =
233 to_xen_drm_pipeline(pipe);
234 struct drm_crtc *crtc = &pipe->crtc;
235 struct drm_pending_vblank_event *event;
236 int idx;
237
238 event = crtc->state->event;
239 if (event) {
240 struct drm_device *dev = crtc->dev;
241 unsigned long flags;
242
243 WARN_ON(pipeline->pending_event);
244
245 spin_lock_irqsave(&dev->event_lock, flags);
246 crtc->state->event = NULL;
247
248 pipeline->pending_event = event;
249 spin_unlock_irqrestore(&dev->event_lock, flags);
250 }
251
252 if (!drm_dev_enter(pipe->crtc.dev, &idx)) {
253 send_pending_event(pipeline);
254 return;
255 }
256
257 /*
258 * Send page flip request to the backend *after* we have event cached
259 * above, so on page flip done event from the backend we can
260 * deliver it and there is no race condition between this code and
261 * event from the backend.
262 * If this is not a page flip, e.g. no flip done event from the backend
263 * is expected, then send now.
264 */
265 if (!display_send_page_flip(pipe, old_plane_state))
266 send_pending_event(pipeline);
267
268 drm_dev_exit(idx);
269}
270
271static enum drm_mode_status
272display_mode_valid(struct drm_crtc *crtc, const struct drm_display_mode *mode)
273{
274 struct xen_drm_front_drm_pipeline *pipeline =
275 container_of(crtc, struct xen_drm_front_drm_pipeline,
276 pipe.crtc);
277
278 if (mode->hdisplay != pipeline->width)
279 return MODE_ERROR;
280
281 if (mode->vdisplay != pipeline->height)
282 return MODE_ERROR;
283
284 return MODE_OK;
285}
286
287static const struct drm_simple_display_pipe_funcs display_funcs = {
288 .mode_valid = display_mode_valid,
289 .enable = display_enable,
290 .disable = display_disable,
291 .prepare_fb = drm_gem_fb_simple_display_pipe_prepare_fb,
292 .update = display_update,
293};
294
295static int display_pipe_init(struct xen_drm_front_drm_info *drm_info,
296 int index, struct xen_drm_front_cfg_connector *cfg,
297 struct xen_drm_front_drm_pipeline *pipeline)
298{
299 struct drm_device *dev = drm_info->drm_dev;
300 const u32 *formats;
301 int format_count;
302 int ret;
303
304 pipeline->drm_info = drm_info;
305 pipeline->index = index;
306 pipeline->height = cfg->height;
307 pipeline->width = cfg->width;
308
309 INIT_DELAYED_WORK(&pipeline->pflip_to_worker, pflip_to_worker);
310
311 ret = xen_drm_front_conn_init(drm_info, &pipeline->conn);
312 if (ret)
313 return ret;
314
315 formats = xen_drm_front_conn_get_formats(&format_count);
316
317 return drm_simple_display_pipe_init(dev, &pipeline->pipe,
318 &display_funcs, formats,
319 format_count, NULL,
320 &pipeline->conn);
321}
322
323int xen_drm_front_kms_init(struct xen_drm_front_drm_info *drm_info)
324{
325 struct drm_device *dev = drm_info->drm_dev;
326 int i, ret;
327
328 drm_mode_config_init(dev);
329
330 dev->mode_config.min_width = 0;
331 dev->mode_config.min_height = 0;
332 dev->mode_config.max_width = 4095;
333 dev->mode_config.max_height = 2047;
334 dev->mode_config.funcs = &mode_config_funcs;
335
336 for (i = 0; i < drm_info->front_info->cfg.num_connectors; i++) {
337 struct xen_drm_front_cfg_connector *cfg =
338 &drm_info->front_info->cfg.connectors[i];
339 struct xen_drm_front_drm_pipeline *pipeline =
340 &drm_info->pipeline[i];
341
342 ret = display_pipe_init(drm_info, i, cfg, pipeline);
343 if (ret) {
344 drm_mode_config_cleanup(dev);
345 return ret;
346 }
347 }
348
349 drm_mode_config_reset(dev);
350 drm_kms_helper_poll_init(dev);
351 return 0;
352}
353
354void xen_drm_front_kms_fini(struct xen_drm_front_drm_info *drm_info)
355{
356 int i;
357
358 for (i = 0; i < drm_info->front_info->cfg.num_connectors; i++) {
359 struct xen_drm_front_drm_pipeline *pipeline =
360 &drm_info->pipeline[i];
361
362 cancel_delayed_work_sync(&pipeline->pflip_to_worker);
363
364 send_pending_event(pipeline);
365 }
366}
diff --git a/drivers/gpu/drm/xen/xen_drm_front_kms.h b/drivers/gpu/drm/xen/xen_drm_front_kms.h
new file mode 100644
index 000000000000..ab2fbad4fbbf
--- /dev/null
+++ b/drivers/gpu/drm/xen/xen_drm_front_kms.h
@@ -0,0 +1,26 @@
1/* SPDX-License-Identifier: GPL-2.0 OR MIT */
2
3/*
4 * Xen para-virtual DRM device
5 *
6 * Copyright (C) 2016-2018 EPAM Systems Inc.
7 *
8 * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
9 */
10
11#ifndef __XEN_DRM_FRONT_KMS_H_
12#define __XEN_DRM_FRONT_KMS_H_
13
14#include <linux/types.h>
15
16struct xen_drm_front_drm_info;
17struct xen_drm_front_drm_pipeline;
18
19int xen_drm_front_kms_init(struct xen_drm_front_drm_info *drm_info);
20
21void xen_drm_front_kms_fini(struct xen_drm_front_drm_info *drm_info);
22
23void xen_drm_front_kms_on_frame_done(struct xen_drm_front_drm_pipeline *pipeline,
24 u64 fb_cookie);
25
26#endif /* __XEN_DRM_FRONT_KMS_H_ */
diff --git a/drivers/gpu/drm/xen/xen_drm_front_shbuf.c b/drivers/gpu/drm/xen/xen_drm_front_shbuf.c
new file mode 100644
index 000000000000..d5705251a0d6
--- /dev/null
+++ b/drivers/gpu/drm/xen/xen_drm_front_shbuf.c
@@ -0,0 +1,414 @@
1// SPDX-License-Identifier: GPL-2.0 OR MIT
2
3/*
4 * Xen para-virtual DRM device
5 *
6 * Copyright (C) 2016-2018 EPAM Systems Inc.
7 *
8 * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
9 */
10
11#include <drm/drmP.h>
12
13#if defined(CONFIG_X86)
14#include <drm/drm_cache.h>
15#endif
16#include <linux/errno.h>
17#include <linux/mm.h>
18
19#include <asm/xen/hypervisor.h>
20#include <xen/balloon.h>
21#include <xen/xen.h>
22#include <xen/xenbus.h>
23#include <xen/interface/io/ring.h>
24#include <xen/interface/io/displif.h>
25
26#include "xen_drm_front.h"
27#include "xen_drm_front_shbuf.h"
28
29struct xen_drm_front_shbuf_ops {
30 /*
31 * Calculate number of grefs required to handle this buffer,
32 * e.g. if grefs are required for page directory only or the buffer
33 * pages as well.
34 */
35 void (*calc_num_grefs)(struct xen_drm_front_shbuf *buf);
36 /* Fill page directory according to para-virtual display protocol. */
37 void (*fill_page_dir)(struct xen_drm_front_shbuf *buf);
38 /* Claim grant references for the pages of the buffer. */
39 int (*grant_refs_for_buffer)(struct xen_drm_front_shbuf *buf,
40 grant_ref_t *priv_gref_head, int gref_idx);
41 /* Map grant references of the buffer. */
42 int (*map)(struct xen_drm_front_shbuf *buf);
43 /* Unmap grant references of the buffer. */
44 int (*unmap)(struct xen_drm_front_shbuf *buf);
45};
46
47grant_ref_t xen_drm_front_shbuf_get_dir_start(struct xen_drm_front_shbuf *buf)
48{
49 if (!buf->grefs)
50 return GRANT_INVALID_REF;
51
52 return buf->grefs[0];
53}
54
55int xen_drm_front_shbuf_map(struct xen_drm_front_shbuf *buf)
56{
57 if (buf->ops->map)
58 return buf->ops->map(buf);
59
60 /* no need to map own grant references */
61 return 0;
62}
63
64int xen_drm_front_shbuf_unmap(struct xen_drm_front_shbuf *buf)
65{
66 if (buf->ops->unmap)
67 return buf->ops->unmap(buf);
68
69 /* no need to unmap own grant references */
70 return 0;
71}
72
73void xen_drm_front_shbuf_flush(struct xen_drm_front_shbuf *buf)
74{
75#if defined(CONFIG_X86)
76 drm_clflush_pages(buf->pages, buf->num_pages);
77#endif
78}
79
80void xen_drm_front_shbuf_free(struct xen_drm_front_shbuf *buf)
81{
82 if (buf->grefs) {
83 int i;
84
85 for (i = 0; i < buf->num_grefs; i++)
86 if (buf->grefs[i] != GRANT_INVALID_REF)
87 gnttab_end_foreign_access(buf->grefs[i],
88 0, 0UL);
89 }
90 kfree(buf->grefs);
91 kfree(buf->directory);
92 kfree(buf);
93}
94
95/*
96 * number of grefs a page can hold with respect to the
97 * struct xendispl_page_directory header
98 */
99#define XEN_DRM_NUM_GREFS_PER_PAGE ((PAGE_SIZE - \
100 offsetof(struct xendispl_page_directory, gref)) / \
101 sizeof(grant_ref_t))
102
103static int get_num_pages_dir(struct xen_drm_front_shbuf *buf)
104{
105 /* number of pages the page directory consumes itself */
106 return DIV_ROUND_UP(buf->num_pages, XEN_DRM_NUM_GREFS_PER_PAGE);
107}
108
109static void backend_calc_num_grefs(struct xen_drm_front_shbuf *buf)
110{
111 /* only for pages the page directory consumes itself */
112 buf->num_grefs = get_num_pages_dir(buf);
113}
114
115static void guest_calc_num_grefs(struct xen_drm_front_shbuf *buf)
116{
117 /*
118 * number of pages the page directory consumes itself
119 * plus grefs for the buffer pages
120 */
121 buf->num_grefs = get_num_pages_dir(buf) + buf->num_pages;
122}
123
124#define xen_page_to_vaddr(page) \
125 ((phys_addr_t)pfn_to_kaddr(page_to_xen_pfn(page)))
126
127static int backend_unmap(struct xen_drm_front_shbuf *buf)
128{
129 struct gnttab_unmap_grant_ref *unmap_ops;
130 int i, ret;
131
132 if (!buf->pages || !buf->backend_map_handles || !buf->grefs)
133 return 0;
134
135 unmap_ops = kcalloc(buf->num_pages, sizeof(*unmap_ops),
136 GFP_KERNEL);
137 if (!unmap_ops) {
138 DRM_ERROR("Failed to get memory while unmapping\n");
139 return -ENOMEM;
140 }
141
142 for (i = 0; i < buf->num_pages; i++) {
143 phys_addr_t addr;
144
145 addr = xen_page_to_vaddr(buf->pages[i]);
146 gnttab_set_unmap_op(&unmap_ops[i], addr, GNTMAP_host_map,
147 buf->backend_map_handles[i]);
148 }
149
150 ret = gnttab_unmap_refs(unmap_ops, NULL, buf->pages,
151 buf->num_pages);
152
153 for (i = 0; i < buf->num_pages; i++) {
154 if (unlikely(unmap_ops[i].status != GNTST_okay))
155 DRM_ERROR("Failed to unmap page %d: %d\n",
156 i, unmap_ops[i].status);
157 }
158
159 if (ret)
160 DRM_ERROR("Failed to unmap grant references, ret %d", ret);
161
162 kfree(unmap_ops);
163 kfree(buf->backend_map_handles);
164 buf->backend_map_handles = NULL;
165 return ret;
166}
167
168static int backend_map(struct xen_drm_front_shbuf *buf)
169{
170 struct gnttab_map_grant_ref *map_ops = NULL;
171 unsigned char *ptr;
172 int ret, cur_gref, cur_dir_page, cur_page, grefs_left;
173
174 map_ops = kcalloc(buf->num_pages, sizeof(*map_ops), GFP_KERNEL);
175 if (!map_ops)
176 return -ENOMEM;
177
178 buf->backend_map_handles = kcalloc(buf->num_pages,
179 sizeof(*buf->backend_map_handles),
180 GFP_KERNEL);
181 if (!buf->backend_map_handles) {
182 kfree(map_ops);
183 return -ENOMEM;
184 }
185
186 /*
187 * read page directory to get grefs from the backend: for external
188 * buffer we only allocate buf->grefs for the page directory,
189 * so buf->num_grefs has number of pages in the page directory itself
190 */
191 ptr = buf->directory;
192 grefs_left = buf->num_pages;
193 cur_page = 0;
194 for (cur_dir_page = 0; cur_dir_page < buf->num_grefs; cur_dir_page++) {
195 struct xendispl_page_directory *page_dir =
196 (struct xendispl_page_directory *)ptr;
197 int to_copy = XEN_DRM_NUM_GREFS_PER_PAGE;
198
199 if (to_copy > grefs_left)
200 to_copy = grefs_left;
201
202 for (cur_gref = 0; cur_gref < to_copy; cur_gref++) {
203 phys_addr_t addr;
204
205 addr = xen_page_to_vaddr(buf->pages[cur_page]);
206 gnttab_set_map_op(&map_ops[cur_page], addr,
207 GNTMAP_host_map,
208 page_dir->gref[cur_gref],
209 buf->xb_dev->otherend_id);
210 cur_page++;
211 }
212
213 grefs_left -= to_copy;
214 ptr += PAGE_SIZE;
215 }
216 ret = gnttab_map_refs(map_ops, NULL, buf->pages, buf->num_pages);
217
218 /* save handles even if error, so we can unmap */
219 for (cur_page = 0; cur_page < buf->num_pages; cur_page++) {
220 buf->backend_map_handles[cur_page] = map_ops[cur_page].handle;
221 if (unlikely(map_ops[cur_page].status != GNTST_okay))
222 DRM_ERROR("Failed to map page %d: %d\n",
223 cur_page, map_ops[cur_page].status);
224 }
225
226 if (ret) {
227 DRM_ERROR("Failed to map grant references, ret %d", ret);
228 backend_unmap(buf);
229 }
230
231 kfree(map_ops);
232 return ret;
233}
234
235static void backend_fill_page_dir(struct xen_drm_front_shbuf *buf)
236{
237 struct xendispl_page_directory *page_dir;
238 unsigned char *ptr;
239 int i, num_pages_dir;
240
241 ptr = buf->directory;
242 num_pages_dir = get_num_pages_dir(buf);
243
244 /* fill only grefs for the page directory itself */
245 for (i = 0; i < num_pages_dir - 1; i++) {
246 page_dir = (struct xendispl_page_directory *)ptr;
247
248 page_dir->gref_dir_next_page = buf->grefs[i + 1];
249 ptr += PAGE_SIZE;
250 }
251 /* last page must say there is no more pages */
252 page_dir = (struct xendispl_page_directory *)ptr;
253 page_dir->gref_dir_next_page = GRANT_INVALID_REF;
254}
255
256static void guest_fill_page_dir(struct xen_drm_front_shbuf *buf)
257{
258 unsigned char *ptr;
259 int cur_gref, grefs_left, to_copy, i, num_pages_dir;
260
261 ptr = buf->directory;
262 num_pages_dir = get_num_pages_dir(buf);
263
264 /*
265 * while copying, skip grefs at start, they are for pages
266 * granted for the page directory itself
267 */
268 cur_gref = num_pages_dir;
269 grefs_left = buf->num_pages;
270 for (i = 0; i < num_pages_dir; i++) {
271 struct xendispl_page_directory *page_dir =
272 (struct xendispl_page_directory *)ptr;
273
274 if (grefs_left <= XEN_DRM_NUM_GREFS_PER_PAGE) {
275 to_copy = grefs_left;
276 page_dir->gref_dir_next_page = GRANT_INVALID_REF;
277 } else {
278 to_copy = XEN_DRM_NUM_GREFS_PER_PAGE;
279 page_dir->gref_dir_next_page = buf->grefs[i + 1];
280 }
281 memcpy(&page_dir->gref, &buf->grefs[cur_gref],
282 to_copy * sizeof(grant_ref_t));
283 ptr += PAGE_SIZE;
284 grefs_left -= to_copy;
285 cur_gref += to_copy;
286 }
287}
288
289static int guest_grant_refs_for_buffer(struct xen_drm_front_shbuf *buf,
290 grant_ref_t *priv_gref_head,
291 int gref_idx)
292{
293 int i, cur_ref, otherend_id;
294
295 otherend_id = buf->xb_dev->otherend_id;
296 for (i = 0; i < buf->num_pages; i++) {
297 cur_ref = gnttab_claim_grant_reference(priv_gref_head);
298 if (cur_ref < 0)
299 return cur_ref;
300
301 gnttab_grant_foreign_access_ref(cur_ref, otherend_id,
302 xen_page_to_gfn(buf->pages[i]),
303 0);
304 buf->grefs[gref_idx++] = cur_ref;
305 }
306 return 0;
307}
308
309static int grant_references(struct xen_drm_front_shbuf *buf)
310{
311 grant_ref_t priv_gref_head;
312 int ret, i, j, cur_ref;
313 int otherend_id, num_pages_dir;
314
315 ret = gnttab_alloc_grant_references(buf->num_grefs, &priv_gref_head);
316 if (ret < 0) {
317 DRM_ERROR("Cannot allocate grant references\n");
318 return ret;
319 }
320
321 otherend_id = buf->xb_dev->otherend_id;
322 j = 0;
323 num_pages_dir = get_num_pages_dir(buf);
324 for (i = 0; i < num_pages_dir; i++) {
325 unsigned long frame;
326
327 cur_ref = gnttab_claim_grant_reference(&priv_gref_head);
328 if (cur_ref < 0)
329 return cur_ref;
330
331 frame = xen_page_to_gfn(virt_to_page(buf->directory +
332 PAGE_SIZE * i));
333 gnttab_grant_foreign_access_ref(cur_ref, otherend_id, frame, 0);
334 buf->grefs[j++] = cur_ref;
335 }
336
337 if (buf->ops->grant_refs_for_buffer) {
338 ret = buf->ops->grant_refs_for_buffer(buf, &priv_gref_head, j);
339 if (ret)
340 return ret;
341 }
342
343 gnttab_free_grant_references(priv_gref_head);
344 return 0;
345}
346
347static int alloc_storage(struct xen_drm_front_shbuf *buf)
348{
349 buf->grefs = kcalloc(buf->num_grefs, sizeof(*buf->grefs), GFP_KERNEL);
350 if (!buf->grefs)
351 return -ENOMEM;
352
353 buf->directory = kcalloc(get_num_pages_dir(buf), PAGE_SIZE, GFP_KERNEL);
354 if (!buf->directory)
355 return -ENOMEM;
356
357 return 0;
358}
359
360/*
361 * For be allocated buffers we don't need grant_refs_for_buffer as those
362 * grant references are allocated at backend side
363 */
364static const struct xen_drm_front_shbuf_ops backend_ops = {
365 .calc_num_grefs = backend_calc_num_grefs,
366 .fill_page_dir = backend_fill_page_dir,
367 .map = backend_map,
368 .unmap = backend_unmap
369};
370
371/* For locally granted references we do not need to map/unmap the references */
372static const struct xen_drm_front_shbuf_ops local_ops = {
373 .calc_num_grefs = guest_calc_num_grefs,
374 .fill_page_dir = guest_fill_page_dir,
375 .grant_refs_for_buffer = guest_grant_refs_for_buffer,
376};
377
378struct xen_drm_front_shbuf *
379xen_drm_front_shbuf_alloc(struct xen_drm_front_shbuf_cfg *cfg)
380{
381 struct xen_drm_front_shbuf *buf;
382 int ret;
383
384 buf = kzalloc(sizeof(*buf), GFP_KERNEL);
385 if (!buf)
386 return NULL;
387
388 if (cfg->be_alloc)
389 buf->ops = &backend_ops;
390 else
391 buf->ops = &local_ops;
392
393 buf->xb_dev = cfg->xb_dev;
394 buf->num_pages = DIV_ROUND_UP(cfg->size, PAGE_SIZE);
395 buf->pages = cfg->pages;
396
397 buf->ops->calc_num_grefs(buf);
398
399 ret = alloc_storage(buf);
400 if (ret)
401 goto fail;
402
403 ret = grant_references(buf);
404 if (ret)
405 goto fail;
406
407 buf->ops->fill_page_dir(buf);
408
409 return buf;
410
411fail:
412 xen_drm_front_shbuf_free(buf);
413 return ERR_PTR(ret);
414}
diff --git a/drivers/gpu/drm/xen/xen_drm_front_shbuf.h b/drivers/gpu/drm/xen/xen_drm_front_shbuf.h
new file mode 100644
index 000000000000..7545c692539e
--- /dev/null
+++ b/drivers/gpu/drm/xen/xen_drm_front_shbuf.h
@@ -0,0 +1,64 @@
1/* SPDX-License-Identifier: GPL-2.0 OR MIT */
2
3/*
4 * Xen para-virtual DRM device
5 *
6 * Copyright (C) 2016-2018 EPAM Systems Inc.
7 *
8 * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
9 */
10
11#ifndef __XEN_DRM_FRONT_SHBUF_H_
12#define __XEN_DRM_FRONT_SHBUF_H_
13
14#include <linux/kernel.h>
15#include <linux/scatterlist.h>
16
17#include <xen/grant_table.h>
18
19struct xen_drm_front_shbuf {
20 /*
21 * number of references granted for the backend use:
22 * - for allocated/imported dma-buf's this holds number of grant
23 * references for the page directory and pages of the buffer
24 * - for the buffer provided by the backend this holds number of
25 * grant references for the page directory as grant references for
26 * the buffer will be provided by the backend
27 */
28 int num_grefs;
29 grant_ref_t *grefs;
30 unsigned char *directory;
31
32 int num_pages;
33 struct page **pages;
34
35 struct xenbus_device *xb_dev;
36
37 /* these are the ops used internally depending on be_alloc mode */
38 const struct xen_drm_front_shbuf_ops *ops;
39
40 /* Xen map handles for the buffer allocated by the backend */
41 grant_handle_t *backend_map_handles;
42};
43
44struct xen_drm_front_shbuf_cfg {
45 struct xenbus_device *xb_dev;
46 size_t size;
47 struct page **pages;
48 bool be_alloc;
49};
50
51struct xen_drm_front_shbuf *
52xen_drm_front_shbuf_alloc(struct xen_drm_front_shbuf_cfg *cfg);
53
54grant_ref_t xen_drm_front_shbuf_get_dir_start(struct xen_drm_front_shbuf *buf);
55
56int xen_drm_front_shbuf_map(struct xen_drm_front_shbuf *buf);
57
58int xen_drm_front_shbuf_unmap(struct xen_drm_front_shbuf *buf);
59
60void xen_drm_front_shbuf_flush(struct xen_drm_front_shbuf *buf);
61
62void xen_drm_front_shbuf_free(struct xen_drm_front_shbuf *buf);
63
64#endif /* __XEN_DRM_FRONT_SHBUF_H_ */
diff --git a/drivers/gpu/drm/zte/zx_plane.c b/drivers/gpu/drm/zte/zx_plane.c
index 94545adac50d..d1931f5ea0b2 100644
--- a/drivers/gpu/drm/zte/zx_plane.c
+++ b/drivers/gpu/drm/zte/zx_plane.c
@@ -268,7 +268,7 @@ static void zx_plane_atomic_disable(struct drm_plane *plane,
268 struct zx_plane *zplane = to_zx_plane(plane); 268 struct zx_plane *zplane = to_zx_plane(plane);
269 void __iomem *hbsc = zplane->hbsc; 269 void __iomem *hbsc = zplane->hbsc;
270 270
271 zx_vou_layer_disable(plane); 271 zx_vou_layer_disable(plane, old_state);
272 272
273 /* Disable HBSC block */ 273 /* Disable HBSC block */
274 zx_writel_mask(hbsc + HBSC_CTRL0, HBSC_CTRL_EN, 0); 274 zx_writel_mask(hbsc + HBSC_CTRL0, HBSC_CTRL_EN, 0);
diff --git a/drivers/gpu/drm/zte/zx_vou.c b/drivers/gpu/drm/zte/zx_vou.c
index 7491813131f3..442311d31110 100644
--- a/drivers/gpu/drm/zte/zx_vou.c
+++ b/drivers/gpu/drm/zte/zx_vou.c
@@ -627,9 +627,10 @@ void zx_vou_layer_enable(struct drm_plane *plane)
627 zx_writel_mask(vou->osd + OSD_CTRL0, bits->enable, bits->enable); 627 zx_writel_mask(vou->osd + OSD_CTRL0, bits->enable, bits->enable);
628} 628}
629 629
630void zx_vou_layer_disable(struct drm_plane *plane) 630void zx_vou_layer_disable(struct drm_plane *plane,
631 struct drm_plane_state *old_state)
631{ 632{
632 struct zx_crtc *zcrtc = to_zx_crtc(plane->crtc); 633 struct zx_crtc *zcrtc = to_zx_crtc(old_state->crtc);
633 struct zx_vou_hw *vou = zcrtc->vou; 634 struct zx_vou_hw *vou = zcrtc->vou;
634 struct zx_plane *zplane = to_zx_plane(plane); 635 struct zx_plane *zplane = to_zx_plane(plane);
635 const struct vou_layer_bits *bits = zplane->bits; 636 const struct vou_layer_bits *bits = zplane->bits;
diff --git a/drivers/gpu/drm/zte/zx_vou.h b/drivers/gpu/drm/zte/zx_vou.h
index 97d72bfce982..5b7f84fbb112 100644
--- a/drivers/gpu/drm/zte/zx_vou.h
+++ b/drivers/gpu/drm/zte/zx_vou.h
@@ -62,6 +62,7 @@ void zx_vou_config_dividers(struct drm_crtc *crtc,
62 struct vou_div_config *configs, int num); 62 struct vou_div_config *configs, int num);
63 63
64void zx_vou_layer_enable(struct drm_plane *plane); 64void zx_vou_layer_enable(struct drm_plane *plane);
65void zx_vou_layer_disable(struct drm_plane *plane); 65void zx_vou_layer_disable(struct drm_plane *plane,
66 struct drm_plane_state *old_state);
66 67
67#endif /* __ZX_VOU_H__ */ 68#endif /* __ZX_VOU_H__ */
diff --git a/drivers/staging/vboxvideo/vbox_drv.c b/drivers/staging/vboxvideo/vbox_drv.c
index e18642e5027e..f6d26beffa54 100644
--- a/drivers/staging/vboxvideo/vbox_drv.c
+++ b/drivers/staging/vboxvideo/vbox_drv.c
@@ -242,7 +242,7 @@ static struct drm_driver driver = {
242 .minor = DRIVER_MINOR, 242 .minor = DRIVER_MINOR,
243 .patchlevel = DRIVER_PATCHLEVEL, 243 .patchlevel = DRIVER_PATCHLEVEL,
244 244
245 .gem_free_object = vbox_gem_free_object, 245 .gem_free_object_unlocked = vbox_gem_free_object,
246 .dumb_create = vbox_dumb_create, 246 .dumb_create = vbox_dumb_create,
247 .dumb_map_offset = vbox_dumb_mmap_offset, 247 .dumb_map_offset = vbox_dumb_mmap_offset,
248 .dumb_destroy = drm_gem_dumb_destroy, 248 .dumb_destroy = drm_gem_dumb_destroy,
diff --git a/include/drm/bridge/analogix_dp.h b/include/drm/bridge/analogix_dp.h
index e9a1116d2f8e..475b706b49de 100644
--- a/include/drm/bridge/analogix_dp.h
+++ b/include/drm/bridge/analogix_dp.h
@@ -33,7 +33,8 @@ struct analogix_dp_plat_data {
33 struct drm_connector *connector; 33 struct drm_connector *connector;
34 bool skip_connector; 34 bool skip_connector;
35 35
36 int (*power_on)(struct analogix_dp_plat_data *); 36 int (*power_on_start)(struct analogix_dp_plat_data *);
37 int (*power_on_end)(struct analogix_dp_plat_data *);
37 int (*power_off)(struct analogix_dp_plat_data *); 38 int (*power_off)(struct analogix_dp_plat_data *);
38 int (*attach)(struct analogix_dp_plat_data *, struct drm_bridge *, 39 int (*attach)(struct analogix_dp_plat_data *, struct drm_bridge *,
39 struct drm_connector *); 40 struct drm_connector *);
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index c6666cd09347..f5099c12c6a6 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -95,14 +95,6 @@ struct dma_buf_attachment;
95struct pci_dev; 95struct pci_dev;
96struct pci_controller; 96struct pci_controller;
97 97
98/***********************************************************************/
99/** \name DRM template customization defaults */
100/*@{*/
101
102/***********************************************************************/
103/** \name Internal types and structures */
104/*@{*/
105
106#define DRM_IF_VERSION(maj, min) (maj << 16 | min) 98#define DRM_IF_VERSION(maj, min) (maj << 16 | min)
107 99
108/** 100/**
@@ -123,27 +115,13 @@ static inline bool drm_drv_uses_atomic_modeset(struct drm_device *dev)
123#define DRM_SWITCH_POWER_CHANGING 2 115#define DRM_SWITCH_POWER_CHANGING 2
124#define DRM_SWITCH_POWER_DYNAMIC_OFF 3 116#define DRM_SWITCH_POWER_DYNAMIC_OFF 3
125 117
126static __inline__ int drm_core_check_feature(struct drm_device *dev, 118static inline bool drm_core_check_feature(struct drm_device *dev, int feature)
127 int feature)
128{ 119{
129 return ((dev->driver->driver_features & feature) ? 1 : 0); 120 return dev->driver->driver_features & feature;
130} 121}
131 122
132/******************************************************************/
133/** \name Internal function definitions */
134/*@{*/
135
136 /* Driver support (drm_drv.h) */
137
138/*
139 * These are exported to drivers so that they can implement fencing using
140 * DMA quiscent + idle. DMA quiescent usually requires the hardware lock.
141 */
142
143/*@}*/
144
145/* returns true if currently okay to sleep */ 123/* returns true if currently okay to sleep */
146static __inline__ bool drm_can_sleep(void) 124static inline bool drm_can_sleep(void)
147{ 125{
148 if (in_atomic() || in_dbg_master() || irqs_disabled()) 126 if (in_atomic() || in_dbg_master() || irqs_disabled())
149 return false; 127 return false;
diff --git a/include/drm/drm_blend.h b/include/drm/drm_blend.h
index 17606026590b..330c561c4c11 100644
--- a/include/drm/drm_blend.h
+++ b/include/drm/drm_blend.h
@@ -36,6 +36,9 @@ static inline bool drm_rotation_90_or_270(unsigned int rotation)
36 return rotation & (DRM_MODE_ROTATE_90 | DRM_MODE_ROTATE_270); 36 return rotation & (DRM_MODE_ROTATE_90 | DRM_MODE_ROTATE_270);
37} 37}
38 38
39#define DRM_BLEND_ALPHA_OPAQUE 0xffff
40
41int drm_plane_create_alpha_property(struct drm_plane *plane);
39int drm_plane_create_rotation_property(struct drm_plane *plane, 42int drm_plane_create_rotation_property(struct drm_plane *plane,
40 unsigned int rotation, 43 unsigned int rotation,
41 unsigned int supported_rotations); 44 unsigned int supported_rotations);
diff --git a/include/drm/drm_device.h b/include/drm/drm_device.h
index 7c4fa32f3fc6..3a0eac2885b7 100644
--- a/include/drm/drm_device.h
+++ b/include/drm/drm_device.h
@@ -46,7 +46,14 @@ struct drm_device {
46 /* currently active master for this device. Protected by master_mutex */ 46 /* currently active master for this device. Protected by master_mutex */
47 struct drm_master *master; 47 struct drm_master *master;
48 48
49 atomic_t unplugged; /**< Flag whether dev is dead */ 49 /**
50 * @unplugged:
51 *
52 * Flag to tell if the device has been unplugged.
53 * See drm_dev_enter() and drm_dev_is_unplugged().
54 */
55 bool unplugged;
56
50 struct inode *anon_inode; /**< inode for private address-space */ 57 struct inode *anon_inode; /**< inode for private address-space */
51 char *unique; /**< unique name of the device */ 58 char *unique; /**< unique name of the device */
52 /*@} */ 59 /*@} */
diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h
index d23dcdd1bd95..7e545f5f94d3 100644
--- a/include/drm/drm_drv.h
+++ b/include/drm/drm_drv.h
@@ -624,6 +624,8 @@ void drm_dev_get(struct drm_device *dev);
624void drm_dev_put(struct drm_device *dev); 624void drm_dev_put(struct drm_device *dev);
625void drm_dev_unref(struct drm_device *dev); 625void drm_dev_unref(struct drm_device *dev);
626void drm_put_dev(struct drm_device *dev); 626void drm_put_dev(struct drm_device *dev);
627bool drm_dev_enter(struct drm_device *dev, int *idx);
628void drm_dev_exit(int idx);
627void drm_dev_unplug(struct drm_device *dev); 629void drm_dev_unplug(struct drm_device *dev);
628 630
629/** 631/**
@@ -635,11 +637,16 @@ void drm_dev_unplug(struct drm_device *dev);
635 * unplugged, these two functions guarantee that any store before calling 637 * unplugged, these two functions guarantee that any store before calling
636 * drm_dev_unplug() is visible to callers of this function after it completes 638 * drm_dev_unplug() is visible to callers of this function after it completes
637 */ 639 */
638static inline int drm_dev_is_unplugged(struct drm_device *dev) 640static inline bool drm_dev_is_unplugged(struct drm_device *dev)
639{ 641{
640 int ret = atomic_read(&dev->unplugged); 642 int idx;
641 smp_rmb(); 643
642 return ret; 644 if (drm_dev_enter(dev, &idx)) {
645 drm_dev_exit(idx);
646 return false;
647 }
648
649 return true;
643} 650}
644 651
645 652
diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h
index 8d89a9c3748d..b25d12ef120a 100644
--- a/include/drm/drm_edid.h
+++ b/include/drm/drm_edid.h
@@ -465,8 +465,6 @@ struct edid *drm_get_edid(struct drm_connector *connector,
465struct edid *drm_get_edid_switcheroo(struct drm_connector *connector, 465struct edid *drm_get_edid_switcheroo(struct drm_connector *connector,
466 struct i2c_adapter *adapter); 466 struct i2c_adapter *adapter);
467struct edid *drm_edid_duplicate(const struct edid *edid); 467struct edid *drm_edid_duplicate(const struct edid *edid);
468void drm_reset_display_info(struct drm_connector *connector);
469u32 drm_add_display_info(struct drm_connector *connector, const struct edid *edid);
470int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid); 468int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid);
471 469
472u8 drm_match_cea_mode(const struct drm_display_mode *to_match); 470u8 drm_match_cea_mode(const struct drm_display_mode *to_match);
diff --git a/include/drm/drm_gem_framebuffer_helper.h b/include/drm/drm_gem_framebuffer_helper.h
index 5ca7cdc3f527..a38de7eb55b4 100644
--- a/include/drm/drm_gem_framebuffer_helper.h
+++ b/include/drm/drm_gem_framebuffer_helper.h
@@ -10,6 +10,7 @@ struct drm_gem_object;
10struct drm_mode_fb_cmd2; 10struct drm_mode_fb_cmd2;
11struct drm_plane; 11struct drm_plane;
12struct drm_plane_state; 12struct drm_plane_state;
13struct drm_simple_display_pipe;
13 14
14struct drm_gem_object *drm_gem_fb_get_obj(struct drm_framebuffer *fb, 15struct drm_gem_object *drm_gem_fb_get_obj(struct drm_framebuffer *fb,
15 unsigned int plane); 16 unsigned int plane);
@@ -27,6 +28,8 @@ drm_gem_fb_create(struct drm_device *dev, struct drm_file *file,
27 28
28int drm_gem_fb_prepare_fb(struct drm_plane *plane, 29int drm_gem_fb_prepare_fb(struct drm_plane *plane,
29 struct drm_plane_state *state); 30 struct drm_plane_state *state);
31int drm_gem_fb_simple_display_pipe_prepare_fb(struct drm_simple_display_pipe *pipe,
32 struct drm_plane_state *plane_state);
30 33
31struct drm_framebuffer * 34struct drm_framebuffer *
32drm_gem_fbdev_fb_create(struct drm_device *dev, 35drm_gem_fbdev_fb_create(struct drm_device *dev,
diff --git a/include/drm/drm_legacy.h b/include/drm/drm_legacy.h
index cf0e7d89bcdf..8fad66f88e4f 100644
--- a/include/drm/drm_legacy.h
+++ b/include/drm/drm_legacy.h
@@ -194,8 +194,8 @@ void drm_legacy_ioremap(struct drm_local_map *map, struct drm_device *dev);
194void drm_legacy_ioremap_wc(struct drm_local_map *map, struct drm_device *dev); 194void drm_legacy_ioremap_wc(struct drm_local_map *map, struct drm_device *dev);
195void drm_legacy_ioremapfree(struct drm_local_map *map, struct drm_device *dev); 195void drm_legacy_ioremapfree(struct drm_local_map *map, struct drm_device *dev);
196 196
197static __inline__ struct drm_local_map *drm_legacy_findmap(struct drm_device *dev, 197static inline struct drm_local_map *drm_legacy_findmap(struct drm_device *dev,
198 unsigned int token) 198 unsigned int token)
199{ 199{
200 struct drm_map_list *_entry; 200 struct drm_map_list *_entry;
201 list_for_each_entry(_entry, &dev->maplist, head) 201 list_for_each_entry(_entry, &dev->maplist, head)
diff --git a/include/drm/drm_mode_config.h b/include/drm/drm_mode_config.h
index 7569f22ffef6..33b3a96d66d0 100644
--- a/include/drm/drm_mode_config.h
+++ b/include/drm/drm_mode_config.h
@@ -796,6 +796,14 @@ struct drm_mode_config {
796 bool allow_fb_modifiers; 796 bool allow_fb_modifiers;
797 797
798 /** 798 /**
799 * @normalize_zpos:
800 *
801 * If true the drm core will call drm_atomic_normalize_zpos() as part of
802 * atomic mode checking from drm_atomic_helper_check()
803 */
804 bool normalize_zpos;
805
806 /**
799 * @modifiers_property: Plane property to list support modifier/format 807 * @modifiers_property: Plane property to list support modifier/format
800 * combination. 808 * combination.
801 */ 809 */
diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h
index 3e76ca805b0f..35e2a3a79fc5 100644
--- a/include/drm/drm_modeset_helper_vtables.h
+++ b/include/drm/drm_modeset_helper_vtables.h
@@ -1004,11 +1004,14 @@ struct drm_plane_helper_funcs {
1004 * This function must not block for outstanding rendering, since it is 1004 * This function must not block for outstanding rendering, since it is
1005 * called in the context of the atomic IOCTL even for async commits to 1005 * called in the context of the atomic IOCTL even for async commits to
1006 * be able to return any errors to userspace. Instead the recommended 1006 * be able to return any errors to userspace. Instead the recommended
1007 * way is to fill out the fence member of the passed-in 1007 * way is to fill out the &drm_plane_state.fence of the passed-in
1008 * &drm_plane_state. If the driver doesn't support native fences then 1008 * &drm_plane_state. If the driver doesn't support native fences then
1009 * equivalent functionality should be implemented through private 1009 * equivalent functionality should be implemented through private
1010 * members in the plane structure. 1010 * members in the plane structure.
1011 * 1011 *
1012 * Drivers which always have their buffers pinned should use
1013 * drm_gem_fb_prepare_fb() for this hook.
1014 *
1012 * The helpers will call @cleanup_fb with matching arguments for every 1015 * The helpers will call @cleanup_fb with matching arguments for every
1013 * successful call to this hook. 1016 * successful call to this hook.
1014 * 1017 *
diff --git a/include/drm/drm_plane.h b/include/drm/drm_plane.h
index f7bf4a48b1c3..26fa50c2a50e 100644
--- a/include/drm/drm_plane.h
+++ b/include/drm/drm_plane.h
@@ -43,6 +43,7 @@ struct drm_modeset_acquire_ctx;
43 * plane (in 16.16) 43 * plane (in 16.16)
44 * @src_w: width of visible portion of plane (in 16.16) 44 * @src_w: width of visible portion of plane (in 16.16)
45 * @src_h: height of visible portion of plane (in 16.16) 45 * @src_h: height of visible portion of plane (in 16.16)
46 * @alpha: opacity of the plane
46 * @rotation: rotation of the plane 47 * @rotation: rotation of the plane
47 * @zpos: priority of the given plane on crtc (optional) 48 * @zpos: priority of the given plane on crtc (optional)
48 * Note that multiple active planes on the same crtc can have an identical 49 * Note that multiple active planes on the same crtc can have an identical
@@ -51,8 +52,8 @@ struct drm_modeset_acquire_ctx;
51 * plane with a lower ID. 52 * plane with a lower ID.
52 * @normalized_zpos: normalized value of zpos: unique, range from 0 to N-1 53 * @normalized_zpos: normalized value of zpos: unique, range from 0 to N-1
53 * where N is the number of active planes for given crtc. Note that 54 * where N is the number of active planes for given crtc. Note that
54 * the driver must call drm_atomic_normalize_zpos() to update this before 55 * the driver must set drm_mode_config.normalize_zpos or call
55 * it can be trusted. 56 * drm_atomic_normalize_zpos() to update this before it can be trusted.
56 * @src: clipped source coordinates of the plane (in 16.16) 57 * @src: clipped source coordinates of the plane (in 16.16)
57 * @dst: clipped destination coordinates of the plane 58 * @dst: clipped destination coordinates of the plane
58 * @state: backpointer to global drm_atomic_state 59 * @state: backpointer to global drm_atomic_state
@@ -79,8 +80,15 @@ struct drm_plane_state {
79 /** 80 /**
80 * @fence: 81 * @fence:
81 * 82 *
82 * Optional fence to wait for before scanning out @fb. Do not write this 83 * Optional fence to wait for before scanning out @fb. The core atomic
83 * directly, use drm_atomic_set_fence_for_plane() 84 * code will set this when userspace is using explicit fencing. Do not
85 * write this directly for a driver's implicit fence, use
86 * drm_atomic_set_fence_for_plane() to ensure that an explicit fence is
87 * preserved.
88 *
89 * Drivers should store any implicit fence in this from their
90 * &drm_plane_helper.prepare_fb callback. See drm_gem_fb_prepare_fb()
91 * and drm_gem_fb_simple_display_pipe_prepare_fb() for suitable helpers.
84 */ 92 */
85 struct dma_fence *fence; 93 struct dma_fence *fence;
86 94
@@ -106,6 +114,9 @@ struct drm_plane_state {
106 uint32_t src_x, src_y; 114 uint32_t src_x, src_y;
107 uint32_t src_h, src_w; 115 uint32_t src_h, src_w;
108 116
117 /* Plane opacity */
118 u16 alpha;
119
109 /* Plane rotation */ 120 /* Plane rotation */
110 unsigned int rotation; 121 unsigned int rotation;
111 122
@@ -496,6 +507,7 @@ enum drm_plane_type {
496 * @funcs: helper functions 507 * @funcs: helper functions
497 * @properties: property tracking for this plane 508 * @properties: property tracking for this plane
498 * @type: type of plane (overlay, primary, cursor) 509 * @type: type of plane (overlay, primary, cursor)
510 * @alpha_property: alpha property for this plane
499 * @zpos_property: zpos property for this plane 511 * @zpos_property: zpos property for this plane
500 * @rotation_property: rotation property for this plane 512 * @rotation_property: rotation property for this plane
501 * @helper_private: mid-layer private data 513 * @helper_private: mid-layer private data
@@ -571,6 +583,7 @@ struct drm_plane {
571 */ 583 */
572 struct drm_plane_state *state; 584 struct drm_plane_state *state;
573 585
586 struct drm_property *alpha_property;
574 struct drm_property *zpos_property; 587 struct drm_property *zpos_property;
575 struct drm_property *rotation_property; 588 struct drm_property *rotation_property;
576 589
diff --git a/include/drm/drm_property.h b/include/drm/drm_property.h
index d1423c7f3c73..ab8167baade5 100644
--- a/include/drm/drm_property.h
+++ b/include/drm/drm_property.h
@@ -281,32 +281,6 @@ struct drm_property_blob *drm_property_blob_get(struct drm_property_blob *blob);
281void drm_property_blob_put(struct drm_property_blob *blob); 281void drm_property_blob_put(struct drm_property_blob *blob);
282 282
283/** 283/**
284 * drm_property_reference_blob - acquire a blob property reference
285 * @blob: DRM blob property
286 *
287 * This is a compatibility alias for drm_property_blob_get() and should not be
288 * used by new code.
289 */
290static inline struct drm_property_blob *
291drm_property_reference_blob(struct drm_property_blob *blob)
292{
293 return drm_property_blob_get(blob);
294}
295
296/**
297 * drm_property_unreference_blob - release a blob property reference
298 * @blob: DRM blob property
299 *
300 * This is a compatibility alias for drm_property_blob_put() and should not be
301 * used by new code.
302 */
303static inline void
304drm_property_unreference_blob(struct drm_property_blob *blob)
305{
306 drm_property_blob_put(blob);
307}
308
309/**
310 * drm_property_find - find property object 284 * drm_property_find - find property object
311 * @dev: DRM device 285 * @dev: DRM device
312 * @file_priv: drm file to check for lease against. 286 * @file_priv: drm file to check for lease against.
diff --git a/include/drm/drm_simple_kms_helper.h b/include/drm/drm_simple_kms_helper.h
index 1b4e352143fd..451960438a29 100644
--- a/include/drm/drm_simple_kms_helper.h
+++ b/include/drm/drm_simple_kms_helper.h
@@ -64,7 +64,8 @@ struct drm_simple_display_pipe_funcs {
64 * This hook is optional. 64 * This hook is optional.
65 */ 65 */
66 void (*enable)(struct drm_simple_display_pipe *pipe, 66 void (*enable)(struct drm_simple_display_pipe *pipe,
67 struct drm_crtc_state *crtc_state); 67 struct drm_crtc_state *crtc_state,
68 struct drm_plane_state *plane_state);
68 /** 69 /**
69 * @disable: 70 * @disable:
70 * 71 *
@@ -115,6 +116,9 @@ struct drm_simple_display_pipe_funcs {
115 * Optional, called by &drm_plane_helper_funcs.prepare_fb. Please read 116 * Optional, called by &drm_plane_helper_funcs.prepare_fb. Please read
116 * the documentation for the &drm_plane_helper_funcs.prepare_fb hook for 117 * the documentation for the &drm_plane_helper_funcs.prepare_fb hook for
117 * more details. 118 * more details.
119 *
120 * Drivers which always have their buffers pinned should use
121 * drm_gem_fb_simple_display_pipe_prepare_fb() for this hook.
118 */ 122 */
119 int (*prepare_fb)(struct drm_simple_display_pipe *pipe, 123 int (*prepare_fb)(struct drm_simple_display_pipe *pipe,
120 struct drm_plane_state *plane_state); 124 struct drm_plane_state *plane_state);
diff --git a/include/drm/tinydrm/mipi-dbi.h b/include/drm/tinydrm/mipi-dbi.h
index 44e824af2ef6..b8ba58861986 100644
--- a/include/drm/tinydrm/mipi-dbi.h
+++ b/include/drm/tinydrm/mipi-dbi.h
@@ -67,7 +67,9 @@ int mipi_dbi_init(struct device *dev, struct mipi_dbi *mipi,
67 const struct drm_simple_display_pipe_funcs *pipe_funcs, 67 const struct drm_simple_display_pipe_funcs *pipe_funcs,
68 struct drm_driver *driver, 68 struct drm_driver *driver,
69 const struct drm_display_mode *mode, unsigned int rotation); 69 const struct drm_display_mode *mode, unsigned int rotation);
70void mipi_dbi_enable_flush(struct mipi_dbi *mipi); 70void mipi_dbi_enable_flush(struct mipi_dbi *mipi,
71 struct drm_crtc_state *crtc_state,
72 struct drm_plane_state *plan_state);
71void mipi_dbi_pipe_disable(struct drm_simple_display_pipe *pipe); 73void mipi_dbi_pipe_disable(struct drm_simple_display_pipe *pipe);
72void mipi_dbi_hw_reset(struct mipi_dbi *mipi); 74void mipi_dbi_hw_reset(struct mipi_dbi *mipi);
73bool mipi_dbi_display_is_on(struct mipi_dbi *mipi); 75bool mipi_dbi_display_is_on(struct mipi_dbi *mipi);
diff --git a/include/drm/tinydrm/tinydrm-helpers.h b/include/drm/tinydrm/tinydrm-helpers.h
index 0a4ddbc04c60..5b96f0b12c8c 100644
--- a/include/drm/tinydrm/tinydrm-helpers.h
+++ b/include/drm/tinydrm/tinydrm-helpers.h
@@ -36,6 +36,11 @@ static inline bool tinydrm_machine_little_endian(void)
36bool tinydrm_merge_clips(struct drm_clip_rect *dst, 36bool tinydrm_merge_clips(struct drm_clip_rect *dst,
37 struct drm_clip_rect *src, unsigned int num_clips, 37 struct drm_clip_rect *src, unsigned int num_clips,
38 unsigned int flags, u32 max_width, u32 max_height); 38 unsigned int flags, u32 max_width, u32 max_height);
39int tinydrm_fb_dirty(struct drm_framebuffer *fb,
40 struct drm_file *file_priv,
41 unsigned int flags, unsigned int color,
42 struct drm_clip_rect *clips,
43 unsigned int num_clips);
39void tinydrm_memcpy(void *dst, void *vaddr, struct drm_framebuffer *fb, 44void tinydrm_memcpy(void *dst, void *vaddr, struct drm_framebuffer *fb,
40 struct drm_clip_rect *clip); 45 struct drm_clip_rect *clip);
41void tinydrm_swab16(u16 *dst, void *vaddr, struct drm_framebuffer *fb, 46void tinydrm_swab16(u16 *dst, void *vaddr, struct drm_framebuffer *fb,
diff --git a/include/drm/tinydrm/tinydrm.h b/include/drm/tinydrm/tinydrm.h
index 07a9a11fe19d..56e4a916b5e8 100644
--- a/include/drm/tinydrm/tinydrm.h
+++ b/include/drm/tinydrm/tinydrm.h
@@ -26,6 +26,10 @@ struct tinydrm_device {
26 struct drm_simple_display_pipe pipe; 26 struct drm_simple_display_pipe pipe;
27 struct mutex dirty_lock; 27 struct mutex dirty_lock;
28 const struct drm_framebuffer_funcs *fb_funcs; 28 const struct drm_framebuffer_funcs *fb_funcs;
29 int (*fb_dirty)(struct drm_framebuffer *framebuffer,
30 struct drm_file *file_priv, unsigned flags,
31 unsigned color, struct drm_clip_rect *clips,
32 unsigned num_clips);
29}; 33};
30 34
31static inline struct tinydrm_device * 35static inline struct tinydrm_device *
@@ -41,7 +45,7 @@ pipe_to_tinydrm(struct drm_simple_display_pipe *pipe)
41 * the &drm_driver structure. 45 * the &drm_driver structure.
42 */ 46 */
43#define TINYDRM_GEM_DRIVER_OPS \ 47#define TINYDRM_GEM_DRIVER_OPS \
44 .gem_free_object = tinydrm_gem_cma_free_object, \ 48 .gem_free_object_unlocked = tinydrm_gem_cma_free_object, \
45 .gem_print_info = drm_gem_cma_print_info, \ 49 .gem_print_info = drm_gem_cma_print_info, \
46 .gem_vm_ops = &drm_gem_cma_vm_ops, \ 50 .gem_vm_ops = &drm_gem_cma_vm_ops, \
47 .prime_handle_to_fd = drm_gem_prime_handle_to_fd, \ 51 .prime_handle_to_fd = drm_gem_prime_handle_to_fd, \
@@ -91,8 +95,6 @@ void tinydrm_shutdown(struct tinydrm_device *tdev);
91 95
92void tinydrm_display_pipe_update(struct drm_simple_display_pipe *pipe, 96void tinydrm_display_pipe_update(struct drm_simple_display_pipe *pipe,
93 struct drm_plane_state *old_state); 97 struct drm_plane_state *old_state);
94int tinydrm_display_pipe_prepare_fb(struct drm_simple_display_pipe *pipe,
95 struct drm_plane_state *plane_state);
96int 98int
97tinydrm_display_pipe_init(struct tinydrm_device *tdev, 99tinydrm_display_pipe_init(struct tinydrm_device *tdev,
98 const struct drm_simple_display_pipe_funcs *funcs, 100 const struct drm_simple_display_pipe_funcs *funcs,
diff --git a/scripts/coccinelle/api/drm-get-put.cocci b/scripts/coccinelle/api/drm-get-put.cocci
index ceb71ea7f61c..3a09c97ad87d 100644
--- a/scripts/coccinelle/api/drm-get-put.cocci
+++ b/scripts/coccinelle/api/drm-get-put.cocci
@@ -40,12 +40,6 @@ expression object;
40- drm_gem_object_unreference_unlocked(object) 40- drm_gem_object_unreference_unlocked(object)
41+ drm_gem_object_put_unlocked(object) 41+ drm_gem_object_put_unlocked(object)
42| 42|
43- drm_property_reference_blob(object)
44+ drm_property_blob_get(object)
45|
46- drm_property_unreference_blob(object)
47+ drm_property_blob_put(object)
48|
49- drm_dev_unref(object) 43- drm_dev_unref(object)
50+ drm_dev_put(object) 44+ drm_dev_put(object)
51) 45)
@@ -72,10 +66,6 @@ __drm_gem_object_unreference(object)
72| 66|
73drm_gem_object_unreference_unlocked(object) 67drm_gem_object_unreference_unlocked(object)
74| 68|
75drm_property_unreference_blob@p(object)
76|
77drm_property_reference_blob@p(object)
78|
79drm_dev_unref@p(object) 69drm_dev_unref@p(object)
80) 70)
81 71