aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2017-10-16 20:45:05 -0400
committerDave Airlie <airlied@redhat.com>2017-10-16 20:53:43 -0400
commitbd21a37d41c3b3088aeae59f54fd82de0ddb6fdd (patch)
tree985dc5b65d2adb36a3671e78a174b3aaac4b6f50
parentd7205d5c0f2b09d900231de9026b97105c37d343 (diff)
parent6c94804fde4415f3938778155d8e665e6870a46d (diff)
Merge remote-tracking branch 'pfdo/drm-next' into drm-next
Pull in drm-next for the object find API changes. Fix the one place the API crashes. Signed-off-by: Dave Airlie <airlied@redhat.com>
-rw-r--r--Documentation/devicetree/bindings/display/bridge/adi,adv7511.txt4
-rw-r--r--Documentation/devicetree/bindings/display/bridge/sii9234.txt49
-rw-r--r--Documentation/devicetree/bindings/display/panel/raspberrypi,7inch-touchscreen.txt49
-rw-r--r--Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt3
-rw-r--r--drivers/dma-buf/reservation.c56
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_virtual.c4
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c2
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c2
-rw-r--r--drivers/gpu/drm/bochs/bochs_kms.c2
-rw-r--r--drivers/gpu/drm/bridge/Kconfig10
-rw-r--r--drivers/gpu/drm/bridge/Makefile1
-rw-r--r--drivers/gpu/drm/bridge/adv7511/Kconfig8
-rw-r--r--drivers/gpu/drm/bridge/adv7511/Makefile1
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511.h43
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_cec.c337
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_drv.c116
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7533.c38
-rw-r--r--drivers/gpu/drm/bridge/panel.c10
-rw-r--r--drivers/gpu/drm/bridge/sii9234.c994
-rw-r--r--drivers/gpu/drm/bridge/sil-sii8620.c96
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c5
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_mode.c2
-rw-r--r--drivers/gpu/drm/drm_atomic.c11
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c7
-rw-r--r--drivers/gpu/drm/drm_color_mgmt.c4
-rw-r--r--drivers/gpu/drm/drm_connector.c2
-rw-r--r--drivers/gpu/drm/drm_crtc.c8
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c4
-rw-r--r--drivers/gpu/drm/drm_crtc_internal.h1
-rw-r--r--drivers/gpu/drm/drm_dp_helper.c7
-rw-r--r--drivers/gpu/drm/drm_encoder.c2
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c2
-rw-r--r--drivers/gpu/drm/drm_framebuffer.c9
-rw-r--r--drivers/gpu/drm/drm_gem_framebuffer_helper.c82
-rw-r--r--drivers/gpu/drm/drm_internal.h1
-rw-r--r--drivers/gpu/drm/drm_ioctl.c2
-rw-r--r--drivers/gpu/drm/drm_mode_object.c10
-rw-r--r--drivers/gpu/drm/drm_plane.c14
-rw-r--r--drivers/gpu/drm/drm_plane_helper.c2
-rw-r--r--drivers/gpu/drm/drm_probe_helper.c2
-rw-r--r--drivers/gpu/drm/drm_property.c6
-rw-r--r--drivers/gpu/drm/drm_syncobj.c3
-rw-r--r--drivers/gpu/drm/drm_vblank.c140
-rw-r--r--drivers/gpu/drm/etnaviv/Kconfig2
-rw-r--r--drivers/gpu/drm/etnaviv/Makefile3
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_buffer.c36
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c15
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.h6
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.c39
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.h6
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.c19
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c69
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.c217
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.h13
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_iommu.c197
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_iommu.h7
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c120
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_mmu.c106
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_mmu.h36
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_perfmon.c495
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_perfmon.h49
-rw-r--r--drivers/gpu/drm/gma500/mid_bios.c2
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_sdvo.c9
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c2
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c4
-rw-r--r--drivers/gpu/drm/i915/Kconfig1
-rw-r--r--drivers/gpu/drm/i915/Makefile3
-rw-r--r--drivers/gpu/drm/i915/gvt/cfg_space.c30
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c37
-rw-r--r--drivers/gpu/drm/i915/gvt/execlist.c127
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c5
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h14
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c44
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.c47
-rw-r--r--drivers/gpu/drm/i915/gvt/render.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c114
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.h1
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c116
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c42
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h198
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c60
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c12
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c15
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c326
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.h46
-rw-r--r--drivers/gpu/drm/i915/i915_gem_request.c32
-rw-r--r--drivers/gpu/drm/i915/i915_gem_request.h35
-rw-r--r--drivers/gpu/drm/i915/i915_gem_userptr.c83
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c29
-rw-r--r--drivers/gpu/drm/i915/i915_guc_submission.c358
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c961
-rw-r--r--drivers/gpu/drm/i915/i915_oa_cflgt2.c109
-rw-r--r--drivers/gpu/drm/i915/i915_oa_cflgt2.h34
-rw-r--r--drivers/gpu/drm/i915/i915_params.c208
-rw-r--r--drivers/gpu/drm/i915/i915_params.h86
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c19
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c14
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h42
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence.c27
-rw-r--r--drivers/gpu/drm/i915/intel_audio.c5
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c14
-rw-r--r--drivers/gpu/drm/i915/intel_breadcrumbs.c2
-rw-r--r--drivers/gpu/drm/i915/intel_cdclk.c6
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c4
-rw-r--r--drivers/gpu/drm/i915/intel_csr.c8
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c26
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.c41
-rw-r--r--drivers/gpu/drm/i915/intel_display.c28
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c78
-rw-r--r--drivers/gpu/drm/i915/intel_dp_aux_backlight.c2
-rw-r--r--drivers/gpu/drm/i915/intel_dp_mst.c11
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h6
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.c10
-rw-r--r--drivers/gpu/drm/i915/intel_engine_cs.c241
-rw-r--r--drivers/gpu/drm/i915/intel_fbc.c11
-rw-r--r--drivers/gpu/drm/i915/intel_fifo_underrun.c14
-rw-r--r--drivers/gpu/drm/i915/intel_guc_fwif.h10
-rw-r--r--drivers/gpu/drm/i915/intel_guc_loader.c13
-rw-r--r--drivers/gpu/drm/i915/intel_guc_log.c26
-rw-r--r--drivers/gpu/drm/i915/intel_gvt.c12
-rw-r--r--drivers/gpu/drm/i915/intel_hangcheck.c2
-rw-r--r--drivers/gpu/drm/i915/intel_huc.c42
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c459
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.h36
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c4
-rw-r--r--drivers/gpu/drm/i915/intel_modes.c17
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c2
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c2
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c8
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c310
-rw-r--r--drivers/gpu/drm/i915/intel_psr.c286
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c167
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h135
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c17
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c2
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c2
-rw-r--r--drivers/gpu/drm/i915/intel_uc.c74
-rw-r--r--drivers/gpu/drm/i915/intel_uc.h23
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c237
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.h10
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_timeline.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_random.c5
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_random.h5
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_hangcheck.c24
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gem_device.c5
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c2
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c4
-rw-r--r--drivers/gpu/drm/omapdrm/displays/connector-hdmi.c8
-rw-r--r--drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c18
-rw-r--r--drivers/gpu/drm/omapdrm/dss/Kconfig8
-rw-r--r--drivers/gpu/drm/omapdrm/dss/Makefile1
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi.h7
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4.c62
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c381
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4_cec.h55
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4_core.c7
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4_core.h4
-rw-r--r--drivers/gpu/drm/omapdrm/dss/omapdss.h1
-rw-r--r--drivers/gpu/drm/panel/Kconfig8
-rw-r--r--drivers/gpu/drm/panel/Makefile1
-rw-r--r--drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c514
-rw-r--r--drivers/gpu/drm/radeon/r100.c2
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c16
-rw-r--r--drivers/gpu/drm/rockchip/Kconfig1
-rw-r--r--drivers/gpu/drm/stm/ltdc.c16
-rw-r--r--drivers/gpu/drm/stm/ltdc.h2
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_backend.c38
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_drv.c16
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_hdmi.h112
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_hdmi_ddc_clk.c38
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c228
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_hdmi_i2c.c227
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_hdmi_tmds_clk.c68
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.c91
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.h6
-rw-r--r--drivers/gpu/drm/tinydrm/core/tinydrm-pipe.c1
-rw-r--r--drivers/gpu/drm/tinydrm/mi0283qt.c8
-rw-r--r--drivers/gpu/drm/tinydrm/repaper.c28
-rw-r--r--drivers/gpu/drm/tinydrm/st7586.c6
-rw-r--r--drivers/gpu/drm/udl/udl_connector.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_dpi.c17
-rw-r--r--drivers/gpu/drm/vc4/vc4_dsi.c12
-rw-r--r--drivers/gpu/drm/vc4/vc4_plane.c20
-rw-r--r--drivers/gpu/drm/via/via_verifier.c10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c2
-rw-r--r--drivers/media/v4l2-core/videobuf2-dma-contig.c4
-rw-r--r--drivers/rapidio/devices/rio_mport_cdev.c4
-rw-r--r--drivers/staging/vboxvideo/vbox_mode.c2
-rw-r--r--include/drm/bridge/mhl.h4
-rw-r--r--include/drm/drm_atomic.h94
-rw-r--r--include/drm/drm_connector.h3
-rw-r--r--include/drm/drm_crtc.h5
-rw-r--r--include/drm/drm_dp_helper.h6
-rw-r--r--include/drm/drm_drv.h2
-rw-r--r--include/drm/drm_encoder.h3
-rw-r--r--include/drm/drm_framebuffer.h1
-rw-r--r--include/drm/drm_mode_object.h2
-rw-r--r--include/drm/drm_of.h31
-rw-r--r--include/drm/drm_plane.h3
-rw-r--r--include/drm/drm_property.h3
-rw-r--r--include/drm/drm_vblank.h6
-rw-r--r--include/drm/i915_pciids.h2
-rw-r--r--include/linux/dma-fence.h5
-rw-r--r--include/linux/regmap.h39
-rw-r--r--include/linux/scatterlist.h17
-rw-r--r--include/uapi/drm/etnaviv_drm.h43
-rw-r--r--include/uapi/drm/i915_drm.h11
-rw-r--r--lib/scatterlist.c95
-rw-r--r--tools/testing/scatterlist/Makefile30
-rw-r--r--tools/testing/scatterlist/linux/mm.h125
-rw-r--r--tools/testing/scatterlist/main.c79
216 files changed, 8658 insertions, 3359 deletions
diff --git a/Documentation/devicetree/bindings/display/bridge/adi,adv7511.txt b/Documentation/devicetree/bindings/display/bridge/adi,adv7511.txt
index 06668bca7ffc..0047b1394c70 100644
--- a/Documentation/devicetree/bindings/display/bridge/adi,adv7511.txt
+++ b/Documentation/devicetree/bindings/display/bridge/adi,adv7511.txt
@@ -68,6 +68,8 @@ Optional properties:
68- adi,disable-timing-generator: Only for ADV7533. Disables the internal timing 68- adi,disable-timing-generator: Only for ADV7533. Disables the internal timing
69 generator. The chip will rely on the sync signals in the DSI data lanes, 69 generator. The chip will rely on the sync signals in the DSI data lanes,
70 rather than generate its own timings for HDMI output. 70 rather than generate its own timings for HDMI output.
71- clocks: from common clock binding: reference to the CEC clock.
72- clock-names: from common clock binding: must be "cec".
71 73
72Required nodes: 74Required nodes:
73 75
@@ -89,6 +91,8 @@ Example
89 reg = <39>; 91 reg = <39>;
90 interrupt-parent = <&gpio3>; 92 interrupt-parent = <&gpio3>;
91 interrupts = <29 IRQ_TYPE_EDGE_FALLING>; 93 interrupts = <29 IRQ_TYPE_EDGE_FALLING>;
94 clocks = <&cec_clock>;
95 clock-names = "cec";
92 96
93 adi,input-depth = <8>; 97 adi,input-depth = <8>;
94 adi,input-colorspace = "rgb"; 98 adi,input-colorspace = "rgb";
diff --git a/Documentation/devicetree/bindings/display/bridge/sii9234.txt b/Documentation/devicetree/bindings/display/bridge/sii9234.txt
new file mode 100644
index 000000000000..88041ba23d56
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/bridge/sii9234.txt
@@ -0,0 +1,49 @@
1Silicon Image SiI9234 HDMI/MHL bridge bindings
2
3Required properties:
4 - compatible : "sil,sii9234".
5 - reg : I2C address for TPI interface, use 0x39
6 - avcc33-supply : MHL/USB Switch Supply Voltage (3.3V)
7 - iovcc18-supply : I/O Supply Voltage (1.8V)
8 - avcc12-supply : TMDS Analog Supply Voltage (1.2V)
9 - cvcc12-supply : Digital Core Supply Voltage (1.2V)
10 - interrupts, interrupt-parent: interrupt specifier of INT pin
11 - reset-gpios: gpio specifier of RESET pin (active low)
12 - video interfaces: Device node can contain two video interface port
13 nodes for HDMI encoder and connector according to [1].
14 - port@0 - MHL to HDMI
15 - port@1 - MHL to connector
16
17[1]: Documentation/devicetree/bindings/media/video-interfaces.txt
18
19
20Example:
21 sii9234@39 {
22 compatible = "sil,sii9234";
23 reg = <0x39>;
24 avcc33-supply = <&vcc33mhl>;
25 iovcc18-supply = <&vcc18mhl>;
26 avcc12-supply = <&vsil12>;
27 cvcc12-supply = <&vsil12>;
28 reset-gpios = <&gpf3 4 GPIO_ACTIVE_LOW>;
29 interrupt-parent = <&gpf3>;
30 interrupts = <5 IRQ_TYPE_LEVEL_HIGH>;
31
32 ports {
33 #address-cells = <1>;
34 #size-cells = <0>;
35
36 port@0 {
37 reg = <0>;
38 mhl_to_hdmi: endpoint {
39 remote-endpoint = <&hdmi_to_mhl>;
40 };
41 };
42 port@1 {
43 reg = <1>;
44 mhl_to_connector: endpoint {
45 remote-endpoint = <&connector_to_mhl>;
46 };
47 };
48 };
49 };
diff --git a/Documentation/devicetree/bindings/display/panel/raspberrypi,7inch-touchscreen.txt b/Documentation/devicetree/bindings/display/panel/raspberrypi,7inch-touchscreen.txt
new file mode 100644
index 000000000000..e9e19c059260
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/raspberrypi,7inch-touchscreen.txt
@@ -0,0 +1,49 @@
1This binding covers the official 7" (800x480) Raspberry Pi touchscreen
2panel.
3
4This DSI panel contains:
5
6- TC358762 DSI->DPI bridge
7- Atmel microcontroller on I2C for power sequencing the DSI bridge and
8 controlling backlight
9- Touchscreen controller on I2C for touch input
10
11and this binding covers the DSI display parts but not its touch input.
12
13Required properties:
14- compatible: Must be "raspberrypi,7inch-touchscreen-panel"
15- reg: Must be "45"
16- port: See panel-common.txt
17
18Example:
19
20dsi1: dsi@7e700000 {
21 #address-cells = <1>;
22 #size-cells = <0>;
23 <...>
24
25 port {
26 dsi_out_port: endpoint {
27 remote-endpoint = <&panel_dsi_port>;
28 };
29 };
30};
31
32i2c_dsi: i2c {
33 compatible = "i2c-gpio";
34 #address-cells = <1>;
35 #size-cells = <0>;
36 gpios = <&gpio 28 0
37 &gpio 29 0>;
38
39 lcd@45 {
40 compatible = "raspberrypi,7inch-touchscreen-panel";
41 reg = <0x45>;
42
43 port {
44 panel_dsi_port: endpoint {
45 remote-endpoint = <&dsi_out_port>;
46 };
47 };
48 };
49};
diff --git a/Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt b/Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt
index 92441086caba..46df3b78ae9e 100644
--- a/Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt
+++ b/Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt
@@ -41,14 +41,17 @@ CEC. It is one end of the pipeline.
41Required properties: 41Required properties:
42 - compatible: value must be one of: 42 - compatible: value must be one of:
43 * allwinner,sun5i-a10s-hdmi 43 * allwinner,sun5i-a10s-hdmi
44 * allwinner,sun6i-a31-hdmi
44 - reg: base address and size of memory-mapped region 45 - reg: base address and size of memory-mapped region
45 - interrupts: interrupt associated to this IP 46 - interrupts: interrupt associated to this IP
46 - clocks: phandles to the clocks feeding the HDMI encoder 47 - clocks: phandles to the clocks feeding the HDMI encoder
47 * ahb: the HDMI interface clock 48 * ahb: the HDMI interface clock
48 * mod: the HDMI module clock 49 * mod: the HDMI module clock
50 * ddc: the HDMI ddc clock (A31 only)
49 * pll-0: the first video PLL 51 * pll-0: the first video PLL
50 * pll-1: the second video PLL 52 * pll-1: the second video PLL
51 - clock-names: the clock names mentioned above 53 - clock-names: the clock names mentioned above
54 - resets: phandle to the reset control for the HDMI encoder (A31 only)
52 - dmas: phandles to the DMA channels used by the HDMI encoder 55 - dmas: phandles to the DMA channels used by the HDMI encoder
53 * ddc-tx: The channel for DDC transmission 56 * ddc-tx: The channel for DDC transmission
54 * ddc-rx: The channel for DDC reception 57 * ddc-rx: The channel for DDC reception
diff --git a/drivers/dma-buf/reservation.c b/drivers/dma-buf/reservation.c
index dec3a815455d..b44d9d7db347 100644
--- a/drivers/dma-buf/reservation.c
+++ b/drivers/dma-buf/reservation.c
@@ -266,8 +266,7 @@ EXPORT_SYMBOL(reservation_object_add_excl_fence);
266* @dst: the destination reservation object 266* @dst: the destination reservation object
267* @src: the source reservation object 267* @src: the source reservation object
268* 268*
269* Copy all fences from src to dst. Both src->lock as well as dst-lock must be 269* Copy all fences from src to dst. dst-lock must be held.
270* held.
271*/ 270*/
272int reservation_object_copy_fences(struct reservation_object *dst, 271int reservation_object_copy_fences(struct reservation_object *dst,
273 struct reservation_object *src) 272 struct reservation_object *src)
@@ -277,33 +276,62 @@ int reservation_object_copy_fences(struct reservation_object *dst,
277 size_t size; 276 size_t size;
278 unsigned i; 277 unsigned i;
279 278
280 src_list = reservation_object_get_list(src); 279 rcu_read_lock();
280 src_list = rcu_dereference(src->fence);
281 281
282retry:
282 if (src_list) { 283 if (src_list) {
283 size = offsetof(typeof(*src_list), 284 unsigned shared_count = src_list->shared_count;
284 shared[src_list->shared_count]); 285
286 size = offsetof(typeof(*src_list), shared[shared_count]);
287 rcu_read_unlock();
288
285 dst_list = kmalloc(size, GFP_KERNEL); 289 dst_list = kmalloc(size, GFP_KERNEL);
286 if (!dst_list) 290 if (!dst_list)
287 return -ENOMEM; 291 return -ENOMEM;
288 292
289 dst_list->shared_count = src_list->shared_count; 293 rcu_read_lock();
290 dst_list->shared_max = src_list->shared_count; 294 src_list = rcu_dereference(src->fence);
291 for (i = 0; i < src_list->shared_count; ++i) 295 if (!src_list || src_list->shared_count > shared_count) {
292 dst_list->shared[i] = 296 kfree(dst_list);
293 dma_fence_get(src_list->shared[i]); 297 goto retry;
298 }
299
300 dst_list->shared_count = 0;
301 dst_list->shared_max = shared_count;
302 for (i = 0; i < src_list->shared_count; ++i) {
303 struct dma_fence *fence;
304
305 fence = rcu_dereference(src_list->shared[i]);
306 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
307 &fence->flags))
308 continue;
309
310 if (!dma_fence_get_rcu(fence)) {
311 kfree(dst_list);
312 src_list = rcu_dereference(src->fence);
313 goto retry;
314 }
315
316 if (dma_fence_is_signaled(fence)) {
317 dma_fence_put(fence);
318 continue;
319 }
320
321 dst_list->shared[dst_list->shared_count++] = fence;
322 }
294 } else { 323 } else {
295 dst_list = NULL; 324 dst_list = NULL;
296 } 325 }
297 326
327 new = dma_fence_get_rcu_safe(&src->fence_excl);
328 rcu_read_unlock();
329
298 kfree(dst->staged); 330 kfree(dst->staged);
299 dst->staged = NULL; 331 dst->staged = NULL;
300 332
301 src_list = reservation_object_get_list(dst); 333 src_list = reservation_object_get_list(dst);
302
303 old = reservation_object_get_excl(dst); 334 old = reservation_object_get_excl(dst);
304 new = reservation_object_get_excl(src);
305
306 dma_fence_get(new);
307 335
308 preempt_disable(); 336 preempt_disable();
309 write_seqcount_begin(&dst->seq); 337 write_seqcount_begin(&dst->seq);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
index f51b41f094ef..df9cbc78e168 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
@@ -231,7 +231,7 @@ amdgpu_connector_update_scratch_regs(struct drm_connector *connector,
231 if (connector->encoder_ids[i] == 0) 231 if (connector->encoder_ids[i] == 0)
232 break; 232 break;
233 233
234 encoder = drm_encoder_find(connector->dev, 234 encoder = drm_encoder_find(connector->dev, NULL,
235 connector->encoder_ids[i]); 235 connector->encoder_ids[i]);
236 if (!encoder) 236 if (!encoder)
237 continue; 237 continue;
@@ -256,7 +256,7 @@ amdgpu_connector_find_encoder(struct drm_connector *connector,
256 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { 256 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
257 if (connector->encoder_ids[i] == 0) 257 if (connector->encoder_ids[i] == 0)
258 break; 258 break;
259 encoder = drm_encoder_find(connector->dev, 259 encoder = drm_encoder_find(connector->dev, NULL,
260 connector->encoder_ids[i]); 260 connector->encoder_ids[i]);
261 if (!encoder) 261 if (!encoder)
262 continue; 262 continue;
@@ -372,7 +372,7 @@ amdgpu_connector_best_single_encoder(struct drm_connector *connector)
372 372
373 /* pick the encoder ids */ 373 /* pick the encoder ids */
374 if (enc_id) 374 if (enc_id)
375 return drm_encoder_find(connector->dev, enc_id); 375 return drm_encoder_find(connector->dev, NULL, enc_id);
376 return NULL; 376 return NULL;
377} 377}
378 378
@@ -1077,7 +1077,7 @@ amdgpu_connector_dvi_detect(struct drm_connector *connector, bool force)
1077 if (connector->encoder_ids[i] == 0) 1077 if (connector->encoder_ids[i] == 0)
1078 break; 1078 break;
1079 1079
1080 encoder = drm_encoder_find(connector->dev, connector->encoder_ids[i]); 1080 encoder = drm_encoder_find(connector->dev, NULL, connector->encoder_ids[i]);
1081 if (!encoder) 1081 if (!encoder)
1082 continue; 1082 continue;
1083 1083
@@ -1134,7 +1134,7 @@ amdgpu_connector_dvi_encoder(struct drm_connector *connector)
1134 if (connector->encoder_ids[i] == 0) 1134 if (connector->encoder_ids[i] == 0)
1135 break; 1135 break;
1136 1136
1137 encoder = drm_encoder_find(connector->dev, connector->encoder_ids[i]); 1137 encoder = drm_encoder_find(connector->dev, NULL, connector->encoder_ids[i]);
1138 if (!encoder) 1138 if (!encoder)
1139 continue; 1139 continue;
1140 1140
@@ -1153,7 +1153,7 @@ amdgpu_connector_dvi_encoder(struct drm_connector *connector)
1153 /* then check use digitial */ 1153 /* then check use digitial */
1154 /* pick the first one */ 1154 /* pick the first one */
1155 if (enc_id) 1155 if (enc_id)
1156 return drm_encoder_find(connector->dev, enc_id); 1156 return drm_encoder_find(connector->dev, NULL, enc_id);
1157 return NULL; 1157 return NULL;
1158} 1158}
1159 1159
@@ -1294,7 +1294,7 @@ u16 amdgpu_connector_encoder_get_dp_bridge_encoder_id(struct drm_connector *conn
1294 if (connector->encoder_ids[i] == 0) 1294 if (connector->encoder_ids[i] == 0)
1295 break; 1295 break;
1296 1296
1297 encoder = drm_encoder_find(connector->dev, 1297 encoder = drm_encoder_find(connector->dev, NULL,
1298 connector->encoder_ids[i]); 1298 connector->encoder_ids[i]);
1299 if (!encoder) 1299 if (!encoder)
1300 continue; 1300 continue;
@@ -1323,7 +1323,7 @@ static bool amdgpu_connector_encoder_is_hbr2(struct drm_connector *connector)
1323 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { 1323 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
1324 if (connector->encoder_ids[i] == 0) 1324 if (connector->encoder_ids[i] == 0)
1325 break; 1325 break;
1326 encoder = drm_encoder_find(connector->dev, 1326 encoder = drm_encoder_find(connector->dev, NULL,
1327 connector->encoder_ids[i]); 1327 connector->encoder_ids[i]);
1328 if (!encoder) 1328 if (!encoder)
1329 continue; 1329 continue;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
index b9ee9073cb0d..a8829af120c1 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
@@ -288,7 +288,7 @@ dce_virtual_encoder(struct drm_connector *connector)
288 if (connector->encoder_ids[i] == 0) 288 if (connector->encoder_ids[i] == 0)
289 break; 289 break;
290 290
291 encoder = drm_encoder_find(connector->dev, connector->encoder_ids[i]); 291 encoder = drm_encoder_find(connector->dev, NULL, connector->encoder_ids[i]);
292 if (!encoder) 292 if (!encoder)
293 continue; 293 continue;
294 294
@@ -298,7 +298,7 @@ dce_virtual_encoder(struct drm_connector *connector)
298 298
299 /* pick the first one */ 299 /* pick the first one */
300 if (enc_id) 300 if (enc_id)
301 return drm_encoder_find(connector->dev, enc_id); 301 return drm_encoder_find(connector->dev, NULL, enc_id);
302 return NULL; 302 return NULL;
303} 303}
304 304
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index e98bb639268d..8cc228ebdc9a 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -2638,7 +2638,7 @@ static struct drm_encoder *best_encoder(struct drm_connector *connector)
2638 2638
2639 /* pick the encoder ids */ 2639 /* pick the encoder ids */
2640 if (enc_id) { 2640 if (enc_id) {
2641 obj = drm_mode_object_find(connector->dev, enc_id, DRM_MODE_OBJECT_ENCODER); 2641 obj = drm_mode_object_find(connector->dev, NULL, enc_id, DRM_MODE_OBJECT_ENCODER);
2642 if (!obj) { 2642 if (!obj) {
2643 DRM_ERROR("Couldn't find a matching encoder for our connector\n"); 2643 DRM_ERROR("Couldn't find a matching encoder for our connector\n");
2644 return NULL; 2644 return NULL;
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index 6f3849ec0c1d..9555a3542022 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -713,7 +713,7 @@ static struct drm_encoder *ast_best_single_encoder(struct drm_connector *connect
713 int enc_id = connector->encoder_ids[0]; 713 int enc_id = connector->encoder_ids[0];
714 /* pick the encoder ids */ 714 /* pick the encoder ids */
715 if (enc_id) 715 if (enc_id)
716 return drm_encoder_find(connector->dev, enc_id); 716 return drm_encoder_find(connector->dev, NULL, enc_id);
717 return NULL; 717 return NULL;
718} 718}
719 719
diff --git a/drivers/gpu/drm/bochs/bochs_kms.c b/drivers/gpu/drm/bochs/bochs_kms.c
index 6a91e62da2f4..a24a18fbd65a 100644
--- a/drivers/gpu/drm/bochs/bochs_kms.c
+++ b/drivers/gpu/drm/bochs/bochs_kms.c
@@ -213,7 +213,7 @@ bochs_connector_best_encoder(struct drm_connector *connector)
213 int enc_id = connector->encoder_ids[0]; 213 int enc_id = connector->encoder_ids[0];
214 /* pick the encoder ids */ 214 /* pick the encoder ids */
215 if (enc_id) 215 if (enc_id)
216 return drm_encoder_find(connector->dev, enc_id); 216 return drm_encoder_find(connector->dev, NULL, enc_id);
217 return NULL; 217 return NULL;
218} 218}
219 219
diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
index adf9ae0e0b7c..3b99d5a06c16 100644
--- a/drivers/gpu/drm/bridge/Kconfig
+++ b/drivers/gpu/drm/bridge/Kconfig
@@ -71,7 +71,7 @@ config DRM_PARADE_PS8622
71 71
72config DRM_SIL_SII8620 72config DRM_SIL_SII8620
73 tristate "Silicon Image SII8620 HDMI/MHL bridge" 73 tristate "Silicon Image SII8620 HDMI/MHL bridge"
74 depends on OF 74 depends on OF && RC_CORE
75 select DRM_KMS_HELPER 75 select DRM_KMS_HELPER
76 help 76 help
77 Silicon Image SII8620 HDMI/MHL bridge chip driver. 77 Silicon Image SII8620 HDMI/MHL bridge chip driver.
@@ -84,6 +84,14 @@ config DRM_SII902X
84 ---help--- 84 ---help---
85 Silicon Image sii902x bridge chip driver. 85 Silicon Image sii902x bridge chip driver.
86 86
87config DRM_SII9234
88 tristate "Silicon Image SII9234 HDMI/MHL bridge"
89 depends on OF
90 ---help---
91 Say Y here if you want support for the MHL interface.
92 It is an I2C driver, that detects connection of MHL bridge
93 and starts encapsulation of HDMI signal.
94
87config DRM_TOSHIBA_TC358767 95config DRM_TOSHIBA_TC358767
88 tristate "Toshiba TC358767 eDP bridge" 96 tristate "Toshiba TC358767 eDP bridge"
89 depends on OF 97 depends on OF
diff --git a/drivers/gpu/drm/bridge/Makefile b/drivers/gpu/drm/bridge/Makefile
index defcf1e7ca1c..e3d5eb031f18 100644
--- a/drivers/gpu/drm/bridge/Makefile
+++ b/drivers/gpu/drm/bridge/Makefile
@@ -6,6 +6,7 @@ obj-$(CONFIG_DRM_NXP_PTN3460) += nxp-ptn3460.o
6obj-$(CONFIG_DRM_PARADE_PS8622) += parade-ps8622.o 6obj-$(CONFIG_DRM_PARADE_PS8622) += parade-ps8622.o
7obj-$(CONFIG_DRM_SIL_SII8620) += sil-sii8620.o 7obj-$(CONFIG_DRM_SIL_SII8620) += sil-sii8620.o
8obj-$(CONFIG_DRM_SII902X) += sii902x.o 8obj-$(CONFIG_DRM_SII902X) += sii902x.o
9obj-$(CONFIG_DRM_SII9234) += sii9234.o
9obj-$(CONFIG_DRM_TOSHIBA_TC358767) += tc358767.o 10obj-$(CONFIG_DRM_TOSHIBA_TC358767) += tc358767.o
10obj-$(CONFIG_DRM_ANALOGIX_DP) += analogix/ 11obj-$(CONFIG_DRM_ANALOGIX_DP) += analogix/
11obj-$(CONFIG_DRM_I2C_ADV7511) += adv7511/ 12obj-$(CONFIG_DRM_I2C_ADV7511) += adv7511/
diff --git a/drivers/gpu/drm/bridge/adv7511/Kconfig b/drivers/gpu/drm/bridge/adv7511/Kconfig
index 2fed567f9943..592b9d2ec034 100644
--- a/drivers/gpu/drm/bridge/adv7511/Kconfig
+++ b/drivers/gpu/drm/bridge/adv7511/Kconfig
@@ -21,3 +21,11 @@ config DRM_I2C_ADV7533
21 default y 21 default y
22 help 22 help
23 Support for the Analog Devices ADV7533 DSI to HDMI encoder. 23 Support for the Analog Devices ADV7533 DSI to HDMI encoder.
24
25config DRM_I2C_ADV7511_CEC
26 bool "ADV7511/33 HDMI CEC driver"
27 depends on DRM_I2C_ADV7511
28 select CEC_CORE
29 default y
30 help
31 When selected the HDMI transmitter will support the CEC feature.
diff --git a/drivers/gpu/drm/bridge/adv7511/Makefile b/drivers/gpu/drm/bridge/adv7511/Makefile
index 5ba675534f6e..5bb384938a71 100644
--- a/drivers/gpu/drm/bridge/adv7511/Makefile
+++ b/drivers/gpu/drm/bridge/adv7511/Makefile
@@ -1,4 +1,5 @@
1adv7511-y := adv7511_drv.o 1adv7511-y := adv7511_drv.o
2adv7511-$(CONFIG_DRM_I2C_ADV7511_AUDIO) += adv7511_audio.o 2adv7511-$(CONFIG_DRM_I2C_ADV7511_AUDIO) += adv7511_audio.o
3adv7511-$(CONFIG_DRM_I2C_ADV7511_CEC) += adv7511_cec.o
3adv7511-$(CONFIG_DRM_I2C_ADV7533) += adv7533.o 4adv7511-$(CONFIG_DRM_I2C_ADV7533) += adv7533.o
4obj-$(CONFIG_DRM_I2C_ADV7511) += adv7511.o 5obj-$(CONFIG_DRM_I2C_ADV7511) += adv7511.o
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511.h b/drivers/gpu/drm/bridge/adv7511/adv7511.h
index 12ef2d8ee110..b4efcbabf7f7 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511.h
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511.h
@@ -195,6 +195,25 @@
195#define ADV7511_PACKET_GM(x) ADV7511_PACKET(5, x) 195#define ADV7511_PACKET_GM(x) ADV7511_PACKET(5, x)
196#define ADV7511_PACKET_SPARE(x) ADV7511_PACKET(6, x) 196#define ADV7511_PACKET_SPARE(x) ADV7511_PACKET(6, x)
197 197
198#define ADV7511_REG_CEC_TX_FRAME_HDR 0x00
199#define ADV7511_REG_CEC_TX_FRAME_DATA0 0x01
200#define ADV7511_REG_CEC_TX_FRAME_LEN 0x10
201#define ADV7511_REG_CEC_TX_ENABLE 0x11
202#define ADV7511_REG_CEC_TX_RETRY 0x12
203#define ADV7511_REG_CEC_TX_LOW_DRV_CNT 0x14
204#define ADV7511_REG_CEC_RX_FRAME_HDR 0x15
205#define ADV7511_REG_CEC_RX_FRAME_DATA0 0x16
206#define ADV7511_REG_CEC_RX_FRAME_LEN 0x25
207#define ADV7511_REG_CEC_RX_ENABLE 0x26
208#define ADV7511_REG_CEC_RX_BUFFERS 0x4a
209#define ADV7511_REG_CEC_LOG_ADDR_MASK 0x4b
210#define ADV7511_REG_CEC_LOG_ADDR_0_1 0x4c
211#define ADV7511_REG_CEC_LOG_ADDR_2 0x4d
212#define ADV7511_REG_CEC_CLK_DIV 0x4e
213#define ADV7511_REG_CEC_SOFT_RESET 0x50
214
215#define ADV7533_REG_CEC_OFFSET 0x70
216
198enum adv7511_input_clock { 217enum adv7511_input_clock {
199 ADV7511_INPUT_CLOCK_1X, 218 ADV7511_INPUT_CLOCK_1X,
200 ADV7511_INPUT_CLOCK_2X, 219 ADV7511_INPUT_CLOCK_2X,
@@ -297,6 +316,8 @@ enum adv7511_type {
297 ADV7533, 316 ADV7533,
298}; 317};
299 318
319#define ADV7511_MAX_ADDRS 3
320
300struct adv7511 { 321struct adv7511 {
301 struct i2c_client *i2c_main; 322 struct i2c_client *i2c_main;
302 struct i2c_client *i2c_edid; 323 struct i2c_client *i2c_edid;
@@ -341,15 +362,27 @@ struct adv7511 {
341 362
342 enum adv7511_type type; 363 enum adv7511_type type;
343 struct platform_device *audio_pdev; 364 struct platform_device *audio_pdev;
365
366 struct cec_adapter *cec_adap;
367 u8 cec_addr[ADV7511_MAX_ADDRS];
368 u8 cec_valid_addrs;
369 bool cec_enabled_adap;
370 struct clk *cec_clk;
371 u32 cec_clk_freq;
344}; 372};
345 373
374#ifdef CONFIG_DRM_I2C_ADV7511_CEC
375int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511,
376 unsigned int offset);
377void adv7511_cec_irq_process(struct adv7511 *adv7511, unsigned int irq1);
378#endif
379
346#ifdef CONFIG_DRM_I2C_ADV7533 380#ifdef CONFIG_DRM_I2C_ADV7533
347void adv7533_dsi_power_on(struct adv7511 *adv); 381void adv7533_dsi_power_on(struct adv7511 *adv);
348void adv7533_dsi_power_off(struct adv7511 *adv); 382void adv7533_dsi_power_off(struct adv7511 *adv);
349void adv7533_mode_set(struct adv7511 *adv, struct drm_display_mode *mode); 383void adv7533_mode_set(struct adv7511 *adv, struct drm_display_mode *mode);
350int adv7533_patch_registers(struct adv7511 *adv); 384int adv7533_patch_registers(struct adv7511 *adv);
351void adv7533_uninit_cec(struct adv7511 *adv); 385int adv7533_patch_cec_registers(struct adv7511 *adv);
352int adv7533_init_cec(struct adv7511 *adv);
353int adv7533_attach_dsi(struct adv7511 *adv); 386int adv7533_attach_dsi(struct adv7511 *adv);
354void adv7533_detach_dsi(struct adv7511 *adv); 387void adv7533_detach_dsi(struct adv7511 *adv);
355int adv7533_parse_dt(struct device_node *np, struct adv7511 *adv); 388int adv7533_parse_dt(struct device_node *np, struct adv7511 *adv);
@@ -372,11 +405,7 @@ static inline int adv7533_patch_registers(struct adv7511 *adv)
372 return -ENODEV; 405 return -ENODEV;
373} 406}
374 407
375static inline void adv7533_uninit_cec(struct adv7511 *adv) 408static inline int adv7533_patch_cec_registers(struct adv7511 *adv)
376{
377}
378
379static inline int adv7533_init_cec(struct adv7511 *adv)
380{ 409{
381 return -ENODEV; 410 return -ENODEV;
382} 411}
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_cec.c b/drivers/gpu/drm/bridge/adv7511/adv7511_cec.c
new file mode 100644
index 000000000000..b33d730e4d73
--- /dev/null
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_cec.c
@@ -0,0 +1,337 @@
1/*
2 * adv7511_cec.c - Analog Devices ADV7511/33 cec driver
3 *
4 * Copyright 2017 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
5 *
6 * This program is free software; you may redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
11 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
12 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
13 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
14 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
15 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
16 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
17 * SOFTWARE.
18 *
19 */
20
21#include <linux/device.h>
22#include <linux/module.h>
23#include <linux/of_device.h>
24#include <linux/slab.h>
25#include <linux/clk.h>
26
27#include <media/cec.h>
28
29#include "adv7511.h"
30
31#define ADV7511_INT1_CEC_MASK \
32 (ADV7511_INT1_CEC_TX_READY | ADV7511_INT1_CEC_TX_ARBIT_LOST | \
33 ADV7511_INT1_CEC_TX_RETRY_TIMEOUT | ADV7511_INT1_CEC_RX_READY1)
34
35static void adv_cec_tx_raw_status(struct adv7511 *adv7511, u8 tx_raw_status)
36{
37 unsigned int offset = adv7511->type == ADV7533 ?
38 ADV7533_REG_CEC_OFFSET : 0;
39 unsigned int val;
40
41 if (regmap_read(adv7511->regmap_cec,
42 ADV7511_REG_CEC_TX_ENABLE + offset, &val))
43 return;
44
45 if ((val & 0x01) == 0)
46 return;
47
48 if (tx_raw_status & ADV7511_INT1_CEC_TX_ARBIT_LOST) {
49 cec_transmit_attempt_done(adv7511->cec_adap,
50 CEC_TX_STATUS_ARB_LOST);
51 return;
52 }
53 if (tx_raw_status & ADV7511_INT1_CEC_TX_RETRY_TIMEOUT) {
54 u8 status;
55 u8 err_cnt = 0;
56 u8 nack_cnt = 0;
57 u8 low_drive_cnt = 0;
58 unsigned int cnt;
59
60 /*
61 * We set this status bit since this hardware performs
62 * retransmissions.
63 */
64 status = CEC_TX_STATUS_MAX_RETRIES;
65 if (regmap_read(adv7511->regmap_cec,
66 ADV7511_REG_CEC_TX_LOW_DRV_CNT + offset, &cnt)) {
67 err_cnt = 1;
68 status |= CEC_TX_STATUS_ERROR;
69 } else {
70 nack_cnt = cnt & 0xf;
71 if (nack_cnt)
72 status |= CEC_TX_STATUS_NACK;
73 low_drive_cnt = cnt >> 4;
74 if (low_drive_cnt)
75 status |= CEC_TX_STATUS_LOW_DRIVE;
76 }
77 cec_transmit_done(adv7511->cec_adap, status,
78 0, nack_cnt, low_drive_cnt, err_cnt);
79 return;
80 }
81 if (tx_raw_status & ADV7511_INT1_CEC_TX_READY) {
82 cec_transmit_attempt_done(adv7511->cec_adap, CEC_TX_STATUS_OK);
83 return;
84 }
85}
86
87void adv7511_cec_irq_process(struct adv7511 *adv7511, unsigned int irq1)
88{
89 unsigned int offset = adv7511->type == ADV7533 ?
90 ADV7533_REG_CEC_OFFSET : 0;
91 const u32 irq_tx_mask = ADV7511_INT1_CEC_TX_READY |
92 ADV7511_INT1_CEC_TX_ARBIT_LOST |
93 ADV7511_INT1_CEC_TX_RETRY_TIMEOUT;
94 struct cec_msg msg = {};
95 unsigned int len;
96 unsigned int val;
97 u8 i;
98
99 if (irq1 & irq_tx_mask)
100 adv_cec_tx_raw_status(adv7511, irq1);
101
102 if (!(irq1 & ADV7511_INT1_CEC_RX_READY1))
103 return;
104
105 if (regmap_read(adv7511->regmap_cec,
106 ADV7511_REG_CEC_RX_FRAME_LEN + offset, &len))
107 return;
108
109 msg.len = len & 0x1f;
110
111 if (msg.len > 16)
112 msg.len = 16;
113
114 if (!msg.len)
115 return;
116
117 for (i = 0; i < msg.len; i++) {
118 regmap_read(adv7511->regmap_cec,
119 i + ADV7511_REG_CEC_RX_FRAME_HDR + offset, &val);
120 msg.msg[i] = val;
121 }
122
123 /* toggle to re-enable rx 1 */
124 regmap_write(adv7511->regmap_cec,
125 ADV7511_REG_CEC_RX_BUFFERS + offset, 1);
126 regmap_write(adv7511->regmap_cec,
127 ADV7511_REG_CEC_RX_BUFFERS + offset, 0);
128 cec_received_msg(adv7511->cec_adap, &msg);
129}
130
131static int adv7511_cec_adap_enable(struct cec_adapter *adap, bool enable)
132{
133 struct adv7511 *adv7511 = cec_get_drvdata(adap);
134 unsigned int offset = adv7511->type == ADV7533 ?
135 ADV7533_REG_CEC_OFFSET : 0;
136
137 if (adv7511->i2c_cec == NULL)
138 return -EIO;
139
140 if (!adv7511->cec_enabled_adap && enable) {
141 /* power up cec section */
142 regmap_update_bits(adv7511->regmap_cec,
143 ADV7511_REG_CEC_CLK_DIV + offset,
144 0x03, 0x01);
145 /* legacy mode and clear all rx buffers */
146 regmap_write(adv7511->regmap_cec,
147 ADV7511_REG_CEC_RX_BUFFERS + offset, 0x07);
148 regmap_write(adv7511->regmap_cec,
149 ADV7511_REG_CEC_RX_BUFFERS + offset, 0);
150 /* initially disable tx */
151 regmap_update_bits(adv7511->regmap_cec,
152 ADV7511_REG_CEC_TX_ENABLE + offset, 1, 0);
153 /* enabled irqs: */
154 /* tx: ready */
155 /* tx: arbitration lost */
156 /* tx: retry timeout */
157 /* rx: ready 1 */
158 regmap_update_bits(adv7511->regmap,
159 ADV7511_REG_INT_ENABLE(1), 0x3f,
160 ADV7511_INT1_CEC_MASK);
161 } else if (adv7511->cec_enabled_adap && !enable) {
162 regmap_update_bits(adv7511->regmap,
163 ADV7511_REG_INT_ENABLE(1), 0x3f, 0);
164 /* disable address mask 1-3 */
165 regmap_update_bits(adv7511->regmap_cec,
166 ADV7511_REG_CEC_LOG_ADDR_MASK + offset,
167 0x70, 0x00);
168 /* power down cec section */
169 regmap_update_bits(adv7511->regmap_cec,
170 ADV7511_REG_CEC_CLK_DIV + offset,
171 0x03, 0x00);
172 adv7511->cec_valid_addrs = 0;
173 }
174 adv7511->cec_enabled_adap = enable;
175 return 0;
176}
177
178static int adv7511_cec_adap_log_addr(struct cec_adapter *adap, u8 addr)
179{
180 struct adv7511 *adv7511 = cec_get_drvdata(adap);
181 unsigned int offset = adv7511->type == ADV7533 ?
182 ADV7533_REG_CEC_OFFSET : 0;
183 unsigned int i, free_idx = ADV7511_MAX_ADDRS;
184
185 if (!adv7511->cec_enabled_adap)
186 return addr == CEC_LOG_ADDR_INVALID ? 0 : -EIO;
187
188 if (addr == CEC_LOG_ADDR_INVALID) {
189 regmap_update_bits(adv7511->regmap_cec,
190 ADV7511_REG_CEC_LOG_ADDR_MASK + offset,
191 0x70, 0);
192 adv7511->cec_valid_addrs = 0;
193 return 0;
194 }
195
196 for (i = 0; i < ADV7511_MAX_ADDRS; i++) {
197 bool is_valid = adv7511->cec_valid_addrs & (1 << i);
198
199 if (free_idx == ADV7511_MAX_ADDRS && !is_valid)
200 free_idx = i;
201 if (is_valid && adv7511->cec_addr[i] == addr)
202 return 0;
203 }
204 if (i == ADV7511_MAX_ADDRS) {
205 i = free_idx;
206 if (i == ADV7511_MAX_ADDRS)
207 return -ENXIO;
208 }
209 adv7511->cec_addr[i] = addr;
210 adv7511->cec_valid_addrs |= 1 << i;
211
212 switch (i) {
213 case 0:
214 /* enable address mask 0 */
215 regmap_update_bits(adv7511->regmap_cec,
216 ADV7511_REG_CEC_LOG_ADDR_MASK + offset,
217 0x10, 0x10);
218 /* set address for mask 0 */
219 regmap_update_bits(adv7511->regmap_cec,
220 ADV7511_REG_CEC_LOG_ADDR_0_1 + offset,
221 0x0f, addr);
222 break;
223 case 1:
224 /* enable address mask 1 */
225 regmap_update_bits(adv7511->regmap_cec,
226 ADV7511_REG_CEC_LOG_ADDR_MASK + offset,
227 0x20, 0x20);
228 /* set address for mask 1 */
229 regmap_update_bits(adv7511->regmap_cec,
230 ADV7511_REG_CEC_LOG_ADDR_0_1 + offset,
231 0xf0, addr << 4);
232 break;
233 case 2:
234 /* enable address mask 2 */
235 regmap_update_bits(adv7511->regmap_cec,
236 ADV7511_REG_CEC_LOG_ADDR_MASK + offset,
237 0x40, 0x40);
238 /* set address for mask 1 */
239 regmap_update_bits(adv7511->regmap_cec,
240 ADV7511_REG_CEC_LOG_ADDR_2 + offset,
241 0x0f, addr);
242 break;
243 }
244 return 0;
245}
246
247static int adv7511_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
248 u32 signal_free_time, struct cec_msg *msg)
249{
250 struct adv7511 *adv7511 = cec_get_drvdata(adap);
251 unsigned int offset = adv7511->type == ADV7533 ?
252 ADV7533_REG_CEC_OFFSET : 0;
253 u8 len = msg->len;
254 unsigned int i;
255
256 /*
257 * The number of retries is the number of attempts - 1, but retry
258 * at least once. It's not clear if a value of 0 is allowed, so
259 * let's do at least one retry.
260 */
261 regmap_update_bits(adv7511->regmap_cec,
262 ADV7511_REG_CEC_TX_RETRY + offset,
263 0x70, max(1, attempts - 1) << 4);
264
265 /* blocking, clear cec tx irq status */
266 regmap_update_bits(adv7511->regmap, ADV7511_REG_INT(1), 0x38, 0x38);
267
268 /* write data */
269 for (i = 0; i < len; i++)
270 regmap_write(adv7511->regmap_cec,
271 i + ADV7511_REG_CEC_TX_FRAME_HDR + offset,
272 msg->msg[i]);
273
274 /* set length (data + header) */
275 regmap_write(adv7511->regmap_cec,
276 ADV7511_REG_CEC_TX_FRAME_LEN + offset, len);
277 /* start transmit, enable tx */
278 regmap_write(adv7511->regmap_cec,
279 ADV7511_REG_CEC_TX_ENABLE + offset, 0x01);
280 return 0;
281}
282
283static const struct cec_adap_ops adv7511_cec_adap_ops = {
284 .adap_enable = adv7511_cec_adap_enable,
285 .adap_log_addr = adv7511_cec_adap_log_addr,
286 .adap_transmit = adv7511_cec_adap_transmit,
287};
288
289static int adv7511_cec_parse_dt(struct device *dev, struct adv7511 *adv7511)
290{
291 adv7511->cec_clk = devm_clk_get(dev, "cec");
292 if (IS_ERR(adv7511->cec_clk)) {
293 int ret = PTR_ERR(adv7511->cec_clk);
294
295 adv7511->cec_clk = NULL;
296 return ret;
297 }
298 clk_prepare_enable(adv7511->cec_clk);
299 adv7511->cec_clk_freq = clk_get_rate(adv7511->cec_clk);
300 return 0;
301}
302
303int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511,
304 unsigned int offset)
305{
306 int ret = adv7511_cec_parse_dt(dev, adv7511);
307
308 if (ret)
309 return ret;
310
311 adv7511->cec_adap = cec_allocate_adapter(&adv7511_cec_adap_ops,
312 adv7511, dev_name(dev), CEC_CAP_DEFAULTS, ADV7511_MAX_ADDRS);
313 if (IS_ERR(adv7511->cec_adap))
314 return PTR_ERR(adv7511->cec_adap);
315
316 regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL + offset, 0);
317 /* cec soft reset */
318 regmap_write(adv7511->regmap_cec,
319 ADV7511_REG_CEC_SOFT_RESET + offset, 0x01);
320 regmap_write(adv7511->regmap_cec,
321 ADV7511_REG_CEC_SOFT_RESET + offset, 0x00);
322
323 /* legacy mode */
324 regmap_write(adv7511->regmap_cec,
325 ADV7511_REG_CEC_RX_BUFFERS + offset, 0x00);
326
327 regmap_write(adv7511->regmap_cec,
328 ADV7511_REG_CEC_CLK_DIV + offset,
329 ((adv7511->cec_clk_freq / 750000) - 1) << 2);
330
331 ret = cec_register_adapter(adv7511->cec_adap, dev);
332 if (ret) {
333 cec_delete_adapter(adv7511->cec_adap);
334 adv7511->cec_adap = NULL;
335 }
336 return ret;
337}
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
index bd7dbae1119e..31ca883bda83 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
@@ -11,12 +11,15 @@
11#include <linux/module.h> 11#include <linux/module.h>
12#include <linux/of_device.h> 12#include <linux/of_device.h>
13#include <linux/slab.h> 13#include <linux/slab.h>
14#include <linux/clk.h>
14 15
15#include <drm/drmP.h> 16#include <drm/drmP.h>
16#include <drm/drm_atomic.h> 17#include <drm/drm_atomic.h>
17#include <drm/drm_atomic_helper.h> 18#include <drm/drm_atomic_helper.h>
18#include <drm/drm_edid.h> 19#include <drm/drm_edid.h>
19 20
21#include <media/cec.h>
22
20#include "adv7511.h" 23#include "adv7511.h"
21 24
22/* ADI recommended values for proper operation. */ 25/* ADI recommended values for proper operation. */
@@ -336,8 +339,10 @@ static void __adv7511_power_on(struct adv7511 *adv7511)
336 */ 339 */
337 regmap_write(adv7511->regmap, ADV7511_REG_INT_ENABLE(0), 340 regmap_write(adv7511->regmap, ADV7511_REG_INT_ENABLE(0),
338 ADV7511_INT0_EDID_READY | ADV7511_INT0_HPD); 341 ADV7511_INT0_EDID_READY | ADV7511_INT0_HPD);
339 regmap_write(adv7511->regmap, ADV7511_REG_INT_ENABLE(1), 342 regmap_update_bits(adv7511->regmap,
340 ADV7511_INT1_DDC_ERROR); 343 ADV7511_REG_INT_ENABLE(1),
344 ADV7511_INT1_DDC_ERROR,
345 ADV7511_INT1_DDC_ERROR);
341 } 346 }
342 347
343 /* 348 /*
@@ -373,6 +378,9 @@ static void __adv7511_power_off(struct adv7511 *adv7511)
373 regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER, 378 regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER,
374 ADV7511_POWER_POWER_DOWN, 379 ADV7511_POWER_POWER_DOWN,
375 ADV7511_POWER_POWER_DOWN); 380 ADV7511_POWER_POWER_DOWN);
381 regmap_update_bits(adv7511->regmap,
382 ADV7511_REG_INT_ENABLE(1),
383 ADV7511_INT1_DDC_ERROR, 0);
376 regcache_mark_dirty(adv7511->regmap); 384 regcache_mark_dirty(adv7511->regmap);
377} 385}
378 386
@@ -423,6 +431,8 @@ static void adv7511_hpd_work(struct work_struct *work)
423 431
424 if (adv7511->connector.status != status) { 432 if (adv7511->connector.status != status) {
425 adv7511->connector.status = status; 433 adv7511->connector.status = status;
434 if (status == connector_status_disconnected)
435 cec_phys_addr_invalidate(adv7511->cec_adap);
426 drm_kms_helper_hotplug_event(adv7511->connector.dev); 436 drm_kms_helper_hotplug_event(adv7511->connector.dev);
427 } 437 }
428} 438}
@@ -453,6 +463,10 @@ static int adv7511_irq_process(struct adv7511 *adv7511, bool process_hpd)
453 wake_up_all(&adv7511->wq); 463 wake_up_all(&adv7511->wq);
454 } 464 }
455 465
466#ifdef CONFIG_DRM_I2C_ADV7511_CEC
467 adv7511_cec_irq_process(adv7511, irq1);
468#endif
469
456 return 0; 470 return 0;
457} 471}
458 472
@@ -595,6 +609,8 @@ static int adv7511_get_modes(struct adv7511 *adv7511,
595 609
596 kfree(edid); 610 kfree(edid);
597 611
612 cec_s_phys_addr_from_edid(adv7511->cec_adap, edid);
613
598 return count; 614 return count;
599} 615}
600 616
@@ -919,6 +935,65 @@ static void adv7511_uninit_regulators(struct adv7511 *adv)
919 regulator_bulk_disable(adv->num_supplies, adv->supplies); 935 regulator_bulk_disable(adv->num_supplies, adv->supplies);
920} 936}
921 937
938static bool adv7511_cec_register_volatile(struct device *dev, unsigned int reg)
939{
940 struct i2c_client *i2c = to_i2c_client(dev);
941 struct adv7511 *adv7511 = i2c_get_clientdata(i2c);
942
943 if (adv7511->type == ADV7533)
944 reg -= ADV7533_REG_CEC_OFFSET;
945
946 switch (reg) {
947 case ADV7511_REG_CEC_RX_FRAME_HDR:
948 case ADV7511_REG_CEC_RX_FRAME_DATA0...
949 ADV7511_REG_CEC_RX_FRAME_DATA0 + 14:
950 case ADV7511_REG_CEC_RX_FRAME_LEN:
951 case ADV7511_REG_CEC_RX_BUFFERS:
952 case ADV7511_REG_CEC_TX_LOW_DRV_CNT:
953 return true;
954 }
955
956 return false;
957}
958
959static const struct regmap_config adv7511_cec_regmap_config = {
960 .reg_bits = 8,
961 .val_bits = 8,
962
963 .max_register = 0xff,
964 .cache_type = REGCACHE_RBTREE,
965 .volatile_reg = adv7511_cec_register_volatile,
966};
967
968static int adv7511_init_cec_regmap(struct adv7511 *adv)
969{
970 int ret;
971
972 adv->i2c_cec = i2c_new_dummy(adv->i2c_main->adapter,
973 adv->i2c_main->addr - 1);
974 if (!adv->i2c_cec)
975 return -ENOMEM;
976 i2c_set_clientdata(adv->i2c_cec, adv);
977
978 adv->regmap_cec = devm_regmap_init_i2c(adv->i2c_cec,
979 &adv7511_cec_regmap_config);
980 if (IS_ERR(adv->regmap_cec)) {
981 ret = PTR_ERR(adv->regmap_cec);
982 goto err;
983 }
984
985 if (adv->type == ADV7533) {
986 ret = adv7533_patch_cec_registers(adv);
987 if (ret)
988 goto err;
989 }
990
991 return 0;
992err:
993 i2c_unregister_device(adv->i2c_cec);
994 return ret;
995}
996
922static int adv7511_parse_dt(struct device_node *np, 997static int adv7511_parse_dt(struct device_node *np,
923 struct adv7511_link_config *config) 998 struct adv7511_link_config *config)
924{ 999{
@@ -1009,6 +1084,7 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
1009 struct device *dev = &i2c->dev; 1084 struct device *dev = &i2c->dev;
1010 unsigned int main_i2c_addr = i2c->addr << 1; 1085 unsigned int main_i2c_addr = i2c->addr << 1;
1011 unsigned int edid_i2c_addr = main_i2c_addr + 4; 1086 unsigned int edid_i2c_addr = main_i2c_addr + 4;
1087 unsigned int offset;
1012 unsigned int val; 1088 unsigned int val;
1013 int ret; 1089 int ret;
1014 1090
@@ -1092,11 +1168,9 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
1092 goto uninit_regulators; 1168 goto uninit_regulators;
1093 } 1169 }
1094 1170
1095 if (adv7511->type == ADV7533) { 1171 ret = adv7511_init_cec_regmap(adv7511);
1096 ret = adv7533_init_cec(adv7511); 1172 if (ret)
1097 if (ret) 1173 goto err_i2c_unregister_edid;
1098 goto err_i2c_unregister_edid;
1099 }
1100 1174
1101 INIT_WORK(&adv7511->hpd_work, adv7511_hpd_work); 1175 INIT_WORK(&adv7511->hpd_work, adv7511_hpd_work);
1102 1176
@@ -1111,10 +1185,6 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
1111 goto err_unregister_cec; 1185 goto err_unregister_cec;
1112 } 1186 }
1113 1187
1114 /* CEC is unused for now */
1115 regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL,
1116 ADV7511_CEC_CTRL_POWER_DOWN);
1117
1118 adv7511_power_off(adv7511); 1188 adv7511_power_off(adv7511);
1119 1189
1120 i2c_set_clientdata(i2c, adv7511); 1190 i2c_set_clientdata(i2c, adv7511);
@@ -1129,10 +1199,23 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
1129 1199
1130 adv7511_audio_init(dev, adv7511); 1200 adv7511_audio_init(dev, adv7511);
1131 1201
1202 offset = adv7511->type == ADV7533 ? ADV7533_REG_CEC_OFFSET : 0;
1203
1204#ifdef CONFIG_DRM_I2C_ADV7511_CEC
1205 ret = adv7511_cec_init(dev, adv7511, offset);
1206 if (ret)
1207 goto err_unregister_cec;
1208#else
1209 regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL + offset,
1210 ADV7511_CEC_CTRL_POWER_DOWN);
1211#endif
1212
1132 return 0; 1213 return 0;
1133 1214
1134err_unregister_cec: 1215err_unregister_cec:
1135 adv7533_uninit_cec(adv7511); 1216 i2c_unregister_device(adv7511->i2c_cec);
1217 if (adv7511->cec_clk)
1218 clk_disable_unprepare(adv7511->cec_clk);
1136err_i2c_unregister_edid: 1219err_i2c_unregister_edid:
1137 i2c_unregister_device(adv7511->i2c_edid); 1220 i2c_unregister_device(adv7511->i2c_edid);
1138uninit_regulators: 1221uninit_regulators:
@@ -1145,10 +1228,11 @@ static int adv7511_remove(struct i2c_client *i2c)
1145{ 1228{
1146 struct adv7511 *adv7511 = i2c_get_clientdata(i2c); 1229 struct adv7511 *adv7511 = i2c_get_clientdata(i2c);
1147 1230
1148 if (adv7511->type == ADV7533) { 1231 if (adv7511->type == ADV7533)
1149 adv7533_detach_dsi(adv7511); 1232 adv7533_detach_dsi(adv7511);
1150 adv7533_uninit_cec(adv7511); 1233 i2c_unregister_device(adv7511->i2c_cec);
1151 } 1234 if (adv7511->cec_clk)
1235 clk_disable_unprepare(adv7511->cec_clk);
1152 1236
1153 adv7511_uninit_regulators(adv7511); 1237 adv7511_uninit_regulators(adv7511);
1154 1238
@@ -1156,6 +1240,8 @@ static int adv7511_remove(struct i2c_client *i2c)
1156 1240
1157 adv7511_audio_exit(adv7511); 1241 adv7511_audio_exit(adv7511);
1158 1242
1243 cec_unregister_adapter(adv7511->cec_adap);
1244
1159 i2c_unregister_device(adv7511->i2c_edid); 1245 i2c_unregister_device(adv7511->i2c_edid);
1160 1246
1161 return 0; 1247 return 0;
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7533.c b/drivers/gpu/drm/bridge/adv7511/adv7533.c
index ac804f81e2f6..185b6d842166 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7533.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7533.c
@@ -32,14 +32,6 @@ static const struct reg_sequence adv7533_cec_fixed_registers[] = {
32 { 0x05, 0xc8 }, 32 { 0x05, 0xc8 },
33}; 33};
34 34
35static const struct regmap_config adv7533_cec_regmap_config = {
36 .reg_bits = 8,
37 .val_bits = 8,
38
39 .max_register = 0xff,
40 .cache_type = REGCACHE_RBTREE,
41};
42
43static void adv7511_dsi_config_timing_gen(struct adv7511 *adv) 35static void adv7511_dsi_config_timing_gen(struct adv7511 *adv)
44{ 36{
45 struct mipi_dsi_device *dsi = adv->dsi; 37 struct mipi_dsi_device *dsi = adv->dsi;
@@ -145,37 +137,11 @@ int adv7533_patch_registers(struct adv7511 *adv)
145 ARRAY_SIZE(adv7533_fixed_registers)); 137 ARRAY_SIZE(adv7533_fixed_registers));
146} 138}
147 139
148void adv7533_uninit_cec(struct adv7511 *adv) 140int adv7533_patch_cec_registers(struct adv7511 *adv)
149{
150 i2c_unregister_device(adv->i2c_cec);
151}
152
153int adv7533_init_cec(struct adv7511 *adv)
154{ 141{
155 int ret; 142 return regmap_register_patch(adv->regmap_cec,
156
157 adv->i2c_cec = i2c_new_dummy(adv->i2c_main->adapter,
158 adv->i2c_main->addr - 1);
159 if (!adv->i2c_cec)
160 return -ENOMEM;
161
162 adv->regmap_cec = devm_regmap_init_i2c(adv->i2c_cec,
163 &adv7533_cec_regmap_config);
164 if (IS_ERR(adv->regmap_cec)) {
165 ret = PTR_ERR(adv->regmap_cec);
166 goto err;
167 }
168
169 ret = regmap_register_patch(adv->regmap_cec,
170 adv7533_cec_fixed_registers, 143 adv7533_cec_fixed_registers,
171 ARRAY_SIZE(adv7533_cec_fixed_registers)); 144 ARRAY_SIZE(adv7533_cec_fixed_registers));
172 if (ret)
173 goto err;
174
175 return 0;
176err:
177 adv7533_uninit_cec(adv);
178 return ret;
179} 145}
180 146
181int adv7533_attach_dsi(struct adv7511 *adv) 147int adv7533_attach_dsi(struct adv7511 *adv)
diff --git a/drivers/gpu/drm/bridge/panel.c b/drivers/gpu/drm/bridge/panel.c
index e0cca19b4044..6d99d4a3beb3 100644
--- a/drivers/gpu/drm/bridge/panel.c
+++ b/drivers/gpu/drm/bridge/panel.c
@@ -188,7 +188,15 @@ EXPORT_SYMBOL(drm_panel_bridge_add);
188 */ 188 */
189void drm_panel_bridge_remove(struct drm_bridge *bridge) 189void drm_panel_bridge_remove(struct drm_bridge *bridge)
190{ 190{
191 struct panel_bridge *panel_bridge = drm_bridge_to_panel_bridge(bridge); 191 struct panel_bridge *panel_bridge;
192
193 if (!bridge)
194 return;
195
196 if (bridge->funcs != &panel_bridge_bridge_funcs)
197 return;
198
199 panel_bridge = drm_bridge_to_panel_bridge(bridge);
192 200
193 drm_bridge_remove(bridge); 201 drm_bridge_remove(bridge);
194 devm_kfree(panel_bridge->panel->dev, bridge); 202 devm_kfree(panel_bridge->panel->dev, bridge);
diff --git a/drivers/gpu/drm/bridge/sii9234.c b/drivers/gpu/drm/bridge/sii9234.c
new file mode 100644
index 000000000000..c77000626c22
--- /dev/null
+++ b/drivers/gpu/drm/bridge/sii9234.c
@@ -0,0 +1,994 @@
1/*
2 * Copyright (C) 2017 Samsung Electronics
3 *
4 * Authors:
5 * Tomasz Stanislawski <t.stanislaws@samsung.com>
6 * Maciej Purski <m.purski@samsung.com>
7 *
8 * Based on sii9234 driver created by:
9 * Adam Hampson <ahampson@sta.samsung.com>
10 * Erik Gilling <konkers@android.com>
11 * Shankar Bandal <shankar.b@samsung.com>
12 * Dharam Kumar <dharam.kr@samsung.com>
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2 of the License, or
17 * (at your option) any later version.
18 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License
25 * along with this program
26 *
27 */
28#include <drm/bridge/mhl.h>
29#include <drm/drm_crtc.h>
30#include <drm/drm_edid.h>
31
32#include <linux/delay.h>
33#include <linux/err.h>
34#include <linux/gpio/consumer.h>
35#include <linux/i2c.h>
36#include <linux/interrupt.h>
37#include <linux/irq.h>
38#include <linux/kernel.h>
39#include <linux/module.h>
40#include <linux/mutex.h>
41#include <linux/regulator/consumer.h>
42#include <linux/slab.h>
43
44#define CBUS_DEVCAP_OFFSET 0x80
45
46#define SII9234_MHL_VERSION 0x11
47#define SII9234_SCRATCHPAD_SIZE 0x10
48#define SII9234_INT_STAT_SIZE 0x33
49
50#define BIT_TMDS_CCTRL_TMDS_OE BIT(4)
51#define MHL_HPD_OUT_OVR_EN BIT(4)
52#define MHL_HPD_OUT_OVR_VAL BIT(5)
53#define MHL_INIT_TIMEOUT 0x0C
54
55/* MHL Tx registers and bits */
56#define MHL_TX_SRST 0x05
57#define MHL_TX_SYSSTAT_REG 0x09
58#define MHL_TX_INTR1_REG 0x71
59#define MHL_TX_INTR4_REG 0x74
60#define MHL_TX_INTR1_ENABLE_REG 0x75
61#define MHL_TX_INTR4_ENABLE_REG 0x78
62#define MHL_TX_INT_CTRL_REG 0x79
63#define MHL_TX_TMDS_CCTRL 0x80
64#define MHL_TX_DISC_CTRL1_REG 0x90
65#define MHL_TX_DISC_CTRL2_REG 0x91
66#define MHL_TX_DISC_CTRL3_REG 0x92
67#define MHL_TX_DISC_CTRL4_REG 0x93
68#define MHL_TX_DISC_CTRL5_REG 0x94
69#define MHL_TX_DISC_CTRL6_REG 0x95
70#define MHL_TX_DISC_CTRL7_REG 0x96
71#define MHL_TX_DISC_CTRL8_REG 0x97
72#define MHL_TX_STAT2_REG 0x99
73#define MHL_TX_MHLTX_CTL1_REG 0xA0
74#define MHL_TX_MHLTX_CTL2_REG 0xA1
75#define MHL_TX_MHLTX_CTL4_REG 0xA3
76#define MHL_TX_MHLTX_CTL6_REG 0xA5
77#define MHL_TX_MHLTX_CTL7_REG 0xA6
78
79#define RSEN_STATUS BIT(2)
80#define HPD_CHANGE_INT BIT(6)
81#define RSEN_CHANGE_INT BIT(5)
82#define RGND_READY_INT BIT(6)
83#define VBUS_LOW_INT BIT(5)
84#define CBUS_LKOUT_INT BIT(4)
85#define MHL_DISC_FAIL_INT BIT(3)
86#define MHL_EST_INT BIT(2)
87#define HPD_CHANGE_INT_MASK BIT(6)
88#define RSEN_CHANGE_INT_MASK BIT(5)
89
90#define RGND_READY_MASK BIT(6)
91#define CBUS_LKOUT_MASK BIT(4)
92#define MHL_DISC_FAIL_MASK BIT(3)
93#define MHL_EST_MASK BIT(2)
94
95#define SKIP_GND BIT(6)
96
97#define ATT_THRESH_SHIFT 0x04
98#define ATT_THRESH_MASK (0x03 << ATT_THRESH_SHIFT)
99#define USB_D_OEN BIT(3)
100#define DEGLITCH_TIME_MASK 0x07
101#define DEGLITCH_TIME_2MS 0
102#define DEGLITCH_TIME_4MS 1
103#define DEGLITCH_TIME_8MS 2
104#define DEGLITCH_TIME_16MS 3
105#define DEGLITCH_TIME_40MS 4
106#define DEGLITCH_TIME_50MS 5
107#define DEGLITCH_TIME_60MS 6
108#define DEGLITCH_TIME_128MS 7
109
110#define USB_D_OVR BIT(7)
111#define USB_ID_OVR BIT(6)
112#define DVRFLT_SEL BIT(5)
113#define BLOCK_RGND_INT BIT(4)
114#define SKIP_DEG BIT(3)
115#define CI2CA_POL BIT(2)
116#define CI2CA_WKUP BIT(1)
117#define SINGLE_ATT BIT(0)
118
119#define USB_D_ODN BIT(5)
120#define VBUS_CHECK BIT(2)
121#define RGND_INTP_MASK 0x03
122#define RGND_INTP_OPEN 0
123#define RGND_INTP_2K 1
124#define RGND_INTP_1K 2
125#define RGND_INTP_SHORT 3
126
127/* HDMI registers */
128#define HDMI_RX_TMDS0_CCTRL1_REG 0x10
129#define HDMI_RX_TMDS_CLK_EN_REG 0x11
130#define HDMI_RX_TMDS_CH_EN_REG 0x12
131#define HDMI_RX_PLL_CALREFSEL_REG 0x17
132#define HDMI_RX_PLL_VCOCAL_REG 0x1A
133#define HDMI_RX_EQ_DATA0_REG 0x22
134#define HDMI_RX_EQ_DATA1_REG 0x23
135#define HDMI_RX_EQ_DATA2_REG 0x24
136#define HDMI_RX_EQ_DATA3_REG 0x25
137#define HDMI_RX_EQ_DATA4_REG 0x26
138#define HDMI_RX_TMDS_ZONE_CTRL_REG 0x4C
139#define HDMI_RX_TMDS_MODE_CTRL_REG 0x4D
140
141/* CBUS registers */
142#define CBUS_INT_STATUS_1_REG 0x08
143#define CBUS_INTR1_ENABLE_REG 0x09
144#define CBUS_MSC_REQ_ABORT_REASON_REG 0x0D
145#define CBUS_INT_STATUS_2_REG 0x1E
146#define CBUS_INTR2_ENABLE_REG 0x1F
147#define CBUS_LINK_CONTROL_2_REG 0x31
148#define CBUS_MHL_STATUS_REG_0 0xB0
149#define CBUS_MHL_STATUS_REG_1 0xB1
150
151#define BIT_CBUS_RESET BIT(3)
152#define SET_HPD_DOWNSTREAM BIT(6)
153
154/* TPI registers */
155#define TPI_DPD_REG 0x3D
156
157/* Timeouts in msec */
158#define T_SRC_VBUS_CBUS_TO_STABLE 200
159#define T_SRC_CBUS_FLOAT 100
160#define T_SRC_CBUS_DEGLITCH 2
161#define T_SRC_RXSENSE_DEGLITCH 110
162
163#define MHL1_MAX_CLK 75000 /* in kHz */
164
165#define I2C_TPI_ADDR 0x3D
166#define I2C_HDMI_ADDR 0x49
167#define I2C_CBUS_ADDR 0x64
168
169enum sii9234_state {
170 ST_OFF,
171 ST_D3,
172 ST_RGND_INIT,
173 ST_RGND_1K,
174 ST_RSEN_HIGH,
175 ST_MHL_ESTABLISHED,
176 ST_FAILURE_DISCOVERY,
177 ST_FAILURE,
178};
179
180struct sii9234 {
181 struct i2c_client *client[4];
182 struct drm_bridge bridge;
183 struct device *dev;
184 struct gpio_desc *gpio_reset;
185 int i2c_error;
186 struct regulator_bulk_data supplies[4];
187
188 struct mutex lock; /* Protects fields below and device registers */
189 enum sii9234_state state;
190};
191
192enum sii9234_client_id {
193 I2C_MHL,
194 I2C_TPI,
195 I2C_HDMI,
196 I2C_CBUS,
197};
198
199static const char * const sii9234_client_name[] = {
200 [I2C_MHL] = "MHL",
201 [I2C_TPI] = "TPI",
202 [I2C_HDMI] = "HDMI",
203 [I2C_CBUS] = "CBUS",
204};
205
206static int sii9234_writeb(struct sii9234 *ctx, int id, int offset,
207 int value)
208{
209 int ret;
210 struct i2c_client *client = ctx->client[id];
211
212 if (ctx->i2c_error)
213 return ctx->i2c_error;
214
215 ret = i2c_smbus_write_byte_data(client, offset, value);
216 if (ret < 0)
217 dev_err(ctx->dev, "writeb: %4s[0x%02x] <- 0x%02x\n",
218 sii9234_client_name[id], offset, value);
219 ctx->i2c_error = ret;
220
221 return ret;
222}
223
224static int sii9234_writebm(struct sii9234 *ctx, int id, int offset,
225 int value, int mask)
226{
227 int ret;
228 struct i2c_client *client = ctx->client[id];
229
230 if (ctx->i2c_error)
231 return ctx->i2c_error;
232
233 ret = i2c_smbus_write_byte(client, offset);
234 if (ret < 0) {
235 dev_err(ctx->dev, "writebm: %4s[0x%02x] <- 0x%02x\n",
236 sii9234_client_name[id], offset, value);
237 ctx->i2c_error = ret;
238 return ret;
239 }
240
241 ret = i2c_smbus_read_byte(client);
242 if (ret < 0) {
243 dev_err(ctx->dev, "writebm: %4s[0x%02x] <- 0x%02x\n",
244 sii9234_client_name[id], offset, value);
245 ctx->i2c_error = ret;
246 return ret;
247 }
248
249 value = (value & mask) | (ret & ~mask);
250
251 ret = i2c_smbus_write_byte_data(client, offset, value);
252 if (ret < 0) {
253 dev_err(ctx->dev, "writebm: %4s[0x%02x] <- 0x%02x\n",
254 sii9234_client_name[id], offset, value);
255 ctx->i2c_error = ret;
256 }
257
258 return ret;
259}
260
261static int sii9234_readb(struct sii9234 *ctx, int id, int offset)
262{
263 int ret;
264 struct i2c_client *client = ctx->client[id];
265
266 if (ctx->i2c_error)
267 return ctx->i2c_error;
268
269 ret = i2c_smbus_write_byte(client, offset);
270 if (ret < 0) {
271 dev_err(ctx->dev, "readb: %4s[0x%02x]\n",
272 sii9234_client_name[id], offset);
273 ctx->i2c_error = ret;
274 return ret;
275 }
276
277 ret = i2c_smbus_read_byte(client);
278 if (ret < 0) {
279 dev_err(ctx->dev, "readb: %4s[0x%02x]\n",
280 sii9234_client_name[id], offset);
281 ctx->i2c_error = ret;
282 }
283
284 return ret;
285}
286
287static int sii9234_clear_error(struct sii9234 *ctx)
288{
289 int ret = ctx->i2c_error;
290
291 ctx->i2c_error = 0;
292
293 return ret;
294}
295
296#define mhl_tx_writeb(sii9234, offset, value) \
297 sii9234_writeb(sii9234, I2C_MHL, offset, value)
298#define mhl_tx_writebm(sii9234, offset, value, mask) \
299 sii9234_writebm(sii9234, I2C_MHL, offset, value, mask)
300#define mhl_tx_readb(sii9234, offset) \
301 sii9234_readb(sii9234, I2C_MHL, offset)
302#define cbus_writeb(sii9234, offset, value) \
303 sii9234_writeb(sii9234, I2C_CBUS, offset, value)
304#define cbus_writebm(sii9234, offset, value, mask) \
305 sii9234_writebm(sii9234, I2C_CBUS, offset, value, mask)
306#define cbus_readb(sii9234, offset) \
307 sii9234_readb(sii9234, I2C_CBUS, offset)
308#define hdmi_writeb(sii9234, offset, value) \
309 sii9234_writeb(sii9234, I2C_HDMI, offset, value)
310#define hdmi_writebm(sii9234, offset, value, mask) \
311 sii9234_writebm(sii9234, I2C_HDMI, offset, value, mask)
312#define hdmi_readb(sii9234, offset) \
313 sii9234_readb(sii9234, I2C_HDMI, offset)
314#define tpi_writeb(sii9234, offset, value) \
315 sii9234_writeb(sii9234, I2C_TPI, offset, value)
316#define tpi_writebm(sii9234, offset, value, mask) \
317 sii9234_writebm(sii9234, I2C_TPI, offset, value, mask)
318#define tpi_readb(sii9234, offset) \
319 sii9234_readb(sii9234, I2C_TPI, offset)
320
321static u8 sii9234_tmds_control(struct sii9234 *ctx, bool enable)
322{
323 mhl_tx_writebm(ctx, MHL_TX_TMDS_CCTRL, enable ? ~0 : 0,
324 BIT_TMDS_CCTRL_TMDS_OE);
325 mhl_tx_writebm(ctx, MHL_TX_INT_CTRL_REG, enable ? ~0 : 0,
326 MHL_HPD_OUT_OVR_EN | MHL_HPD_OUT_OVR_VAL);
327 return sii9234_clear_error(ctx);
328}
329
330static int sii9234_cbus_reset(struct sii9234 *ctx)
331{
332 int i;
333
334 mhl_tx_writebm(ctx, MHL_TX_SRST, ~0, BIT_CBUS_RESET);
335 msleep(T_SRC_CBUS_DEGLITCH);
336 mhl_tx_writebm(ctx, MHL_TX_SRST, 0, BIT_CBUS_RESET);
337
338 for (i = 0; i < 4; i++) {
339 /*
340 * Enable WRITE_STAT interrupt for writes to all
341 * 4 MSC Status registers.
342 */
343 cbus_writeb(ctx, 0xE0 + i, 0xF2);
344 /*
345 * Enable SET_INT interrupt for writes to all
346 * 4 MSC Interrupt registers.
347 */
348 cbus_writeb(ctx, 0xF0 + i, 0xF2);
349 }
350
351 return sii9234_clear_error(ctx);
352}
353
354/* Require to chek mhl imformation of samsung in cbus_init_register */
355static int sii9234_cbus_init(struct sii9234 *ctx)
356{
357 cbus_writeb(ctx, 0x07, 0xF2);
358 cbus_writeb(ctx, 0x40, 0x03);
359 cbus_writeb(ctx, 0x42, 0x06);
360 cbus_writeb(ctx, 0x36, 0x0C);
361 cbus_writeb(ctx, 0x3D, 0xFD);
362 cbus_writeb(ctx, 0x1C, 0x01);
363 cbus_writeb(ctx, 0x1D, 0x0F);
364 cbus_writeb(ctx, 0x44, 0x02);
365 /* Setup our devcap */
366 cbus_writeb(ctx, CBUS_DEVCAP_OFFSET + MHL_DCAP_DEV_STATE, 0x00);
367 cbus_writeb(ctx, CBUS_DEVCAP_OFFSET + MHL_DCAP_MHL_VERSION,
368 SII9234_MHL_VERSION);
369 cbus_writeb(ctx, CBUS_DEVCAP_OFFSET + MHL_DCAP_CAT,
370 MHL_DCAP_CAT_SOURCE);
371 cbus_writeb(ctx, CBUS_DEVCAP_OFFSET + MHL_DCAP_ADOPTER_ID_H, 0x01);
372 cbus_writeb(ctx, CBUS_DEVCAP_OFFSET + MHL_DCAP_ADOPTER_ID_L, 0x41);
373 cbus_writeb(ctx, CBUS_DEVCAP_OFFSET + MHL_DCAP_VID_LINK_MODE,
374 MHL_DCAP_VID_LINK_RGB444 | MHL_DCAP_VID_LINK_YCBCR444);
375 cbus_writeb(ctx, CBUS_DEVCAP_OFFSET + MHL_DCAP_VIDEO_TYPE,
376 MHL_DCAP_VT_GRAPHICS);
377 cbus_writeb(ctx, CBUS_DEVCAP_OFFSET + MHL_DCAP_LOG_DEV_MAP,
378 MHL_DCAP_LD_GUI);
379 cbus_writeb(ctx, CBUS_DEVCAP_OFFSET + MHL_DCAP_BANDWIDTH, 0x0F);
380 cbus_writeb(ctx, CBUS_DEVCAP_OFFSET + MHL_DCAP_FEATURE_FLAG,
381 MHL_DCAP_FEATURE_RCP_SUPPORT | MHL_DCAP_FEATURE_RAP_SUPPORT
382 | MHL_DCAP_FEATURE_SP_SUPPORT);
383 cbus_writeb(ctx, CBUS_DEVCAP_OFFSET + MHL_DCAP_DEVICE_ID_H, 0x0);
384 cbus_writeb(ctx, CBUS_DEVCAP_OFFSET + MHL_DCAP_DEVICE_ID_L, 0x0);
385 cbus_writeb(ctx, CBUS_DEVCAP_OFFSET + MHL_DCAP_SCRATCHPAD_SIZE,
386 SII9234_SCRATCHPAD_SIZE);
387 cbus_writeb(ctx, CBUS_DEVCAP_OFFSET + MHL_DCAP_INT_STAT_SIZE,
388 SII9234_INT_STAT_SIZE);
389 cbus_writeb(ctx, CBUS_DEVCAP_OFFSET + MHL_DCAP_RESERVED, 0);
390 cbus_writebm(ctx, 0x31, 0x0C, 0x0C);
391 cbus_writeb(ctx, 0x30, 0x01);
392 cbus_writebm(ctx, 0x3C, 0x30, 0x38);
393 cbus_writebm(ctx, 0x22, 0x0D, 0x0F);
394 cbus_writebm(ctx, 0x2E, 0x15, 0x15);
395 cbus_writeb(ctx, CBUS_INTR1_ENABLE_REG, 0);
396 cbus_writeb(ctx, CBUS_INTR2_ENABLE_REG, 0);
397
398 return sii9234_clear_error(ctx);
399}
400
401static void force_usb_id_switch_open(struct sii9234 *ctx)
402{
403 /* Disable CBUS discovery */
404 mhl_tx_writebm(ctx, MHL_TX_DISC_CTRL1_REG, 0, 0x01);
405 /* Force USB ID switch to open */
406 mhl_tx_writebm(ctx, MHL_TX_DISC_CTRL6_REG, ~0, USB_ID_OVR);
407 mhl_tx_writebm(ctx, MHL_TX_DISC_CTRL3_REG, ~0, 0x86);
408 /* Force upstream HPD to 0 when not in MHL mode. */
409 mhl_tx_writebm(ctx, MHL_TX_INT_CTRL_REG, 0, 0x30);
410}
411
412static void release_usb_id_switch_open(struct sii9234 *ctx)
413{
414 msleep(T_SRC_CBUS_FLOAT);
415 /* Clear USB ID switch to open */
416 mhl_tx_writebm(ctx, MHL_TX_DISC_CTRL6_REG, 0, USB_ID_OVR);
417 /* Enable CBUS discovery */
418 mhl_tx_writebm(ctx, MHL_TX_DISC_CTRL1_REG, ~0, 0x01);
419}
420
421static int sii9234_power_init(struct sii9234 *ctx)
422{
423 /* Force the SiI9234 into the D0 state. */
424 tpi_writeb(ctx, TPI_DPD_REG, 0x3F);
425 /* Enable TxPLL Clock */
426 hdmi_writeb(ctx, HDMI_RX_TMDS_CLK_EN_REG, 0x01);
427 /* Enable Tx Clock Path & Equalizer */
428 hdmi_writeb(ctx, HDMI_RX_TMDS_CH_EN_REG, 0x15);
429 /* Power Up TMDS */
430 mhl_tx_writeb(ctx, 0x08, 0x35);
431 return sii9234_clear_error(ctx);
432}
433
434static int sii9234_hdmi_init(struct sii9234 *ctx)
435{
436 hdmi_writeb(ctx, HDMI_RX_TMDS0_CCTRL1_REG, 0xC1);
437 hdmi_writeb(ctx, HDMI_RX_PLL_CALREFSEL_REG, 0x03);
438 hdmi_writeb(ctx, HDMI_RX_PLL_VCOCAL_REG, 0x20);
439 hdmi_writeb(ctx, HDMI_RX_EQ_DATA0_REG, 0x8A);
440 hdmi_writeb(ctx, HDMI_RX_EQ_DATA1_REG, 0x6A);
441 hdmi_writeb(ctx, HDMI_RX_EQ_DATA2_REG, 0xAA);
442 hdmi_writeb(ctx, HDMI_RX_EQ_DATA3_REG, 0xCA);
443 hdmi_writeb(ctx, HDMI_RX_EQ_DATA4_REG, 0xEA);
444 hdmi_writeb(ctx, HDMI_RX_TMDS_ZONE_CTRL_REG, 0xA0);
445 hdmi_writeb(ctx, HDMI_RX_TMDS_MODE_CTRL_REG, 0x00);
446 mhl_tx_writeb(ctx, MHL_TX_TMDS_CCTRL, 0x34);
447 hdmi_writeb(ctx, 0x45, 0x44);
448 hdmi_writeb(ctx, 0x31, 0x0A);
449 hdmi_writeb(ctx, HDMI_RX_TMDS0_CCTRL1_REG, 0xC1);
450
451 return sii9234_clear_error(ctx);
452}
453
454static int sii9234_mhl_tx_ctl_int(struct sii9234 *ctx)
455{
456 mhl_tx_writeb(ctx, MHL_TX_MHLTX_CTL1_REG, 0xD0);
457 mhl_tx_writeb(ctx, MHL_TX_MHLTX_CTL2_REG, 0xFC);
458 mhl_tx_writeb(ctx, MHL_TX_MHLTX_CTL4_REG, 0xEB);
459 mhl_tx_writeb(ctx, MHL_TX_MHLTX_CTL7_REG, 0x0C);
460
461 return sii9234_clear_error(ctx);
462}
463
464static int sii9234_reset(struct sii9234 *ctx)
465{
466 int ret;
467
468 sii9234_clear_error(ctx);
469
470 ret = sii9234_power_init(ctx);
471 if (ret < 0)
472 return ret;
473 ret = sii9234_cbus_reset(ctx);
474 if (ret < 0)
475 return ret;
476 ret = sii9234_hdmi_init(ctx);
477 if (ret < 0)
478 return ret;
479 ret = sii9234_mhl_tx_ctl_int(ctx);
480 if (ret < 0)
481 return ret;
482
483 /* Enable HDCP Compliance safety */
484 mhl_tx_writeb(ctx, 0x2B, 0x01);
485 /* CBUS discovery cycle time for each drive and float = 150us */
486 mhl_tx_writebm(ctx, MHL_TX_DISC_CTRL1_REG, 0x04, 0x06);
487 /* Clear bit 6 (reg_skip_rgnd) */
488 mhl_tx_writeb(ctx, MHL_TX_DISC_CTRL2_REG, (1 << 7) /* Reserved */
489 | 2 << ATT_THRESH_SHIFT | DEGLITCH_TIME_50MS);
490 /*
491 * Changed from 66 to 65 for 94[1:0] = 01 = 5k reg_cbusmhl_pup_sel
492 * 1.8V CBUS VTH & GND threshold
493 * to meet CTS 3.3.7.2 spec
494 */
495 mhl_tx_writeb(ctx, MHL_TX_DISC_CTRL5_REG, 0x77);
496 cbus_writebm(ctx, CBUS_LINK_CONTROL_2_REG, ~0, MHL_INIT_TIMEOUT);
497 mhl_tx_writeb(ctx, MHL_TX_MHLTX_CTL6_REG, 0xA0);
498 /* RGND & single discovery attempt (RGND blocking) */
499 mhl_tx_writeb(ctx, MHL_TX_DISC_CTRL6_REG, BLOCK_RGND_INT |
500 DVRFLT_SEL | SINGLE_ATT);
501 /* Use VBUS path of discovery state machine */
502 mhl_tx_writeb(ctx, MHL_TX_DISC_CTRL8_REG, 0);
503 /* 0x92[3] sets the CBUS / ID switch */
504 mhl_tx_writebm(ctx, MHL_TX_DISC_CTRL6_REG, ~0, USB_ID_OVR);
505 /*
506 * To allow RGND engine to operate correctly.
507 * When moving the chip from D2 to D0 (power up, init regs)
508 * the values should be
509 * 94[1:0] = 01 reg_cbusmhl_pup_sel[1:0] should be set for 5k
510 * 93[7:6] = 10 reg_cbusdisc_pup_sel[1:0] should be
511 * set for 10k (default)
512 * 93[5:4] = 00 reg_cbusidle_pup_sel[1:0] = open (default)
513 */
514 mhl_tx_writebm(ctx, MHL_TX_DISC_CTRL3_REG, ~0, 0x86);
515 /*
516 * Change from CC to 8C to match 5K
517 * to meet CTS 3.3.72 spec
518 */
519 mhl_tx_writebm(ctx, MHL_TX_DISC_CTRL4_REG, ~0, 0x8C);
520 /* Configure the interrupt as active high */
521 mhl_tx_writebm(ctx, MHL_TX_INT_CTRL_REG, 0, 0x06);
522
523 msleep(25);
524
525 /* Release usb_id switch */
526 mhl_tx_writebm(ctx, MHL_TX_DISC_CTRL6_REG, 0, USB_ID_OVR);
527 mhl_tx_writeb(ctx, MHL_TX_DISC_CTRL1_REG, 0x27);
528
529 ret = sii9234_clear_error(ctx);
530 if (ret < 0)
531 return ret;
532 ret = sii9234_cbus_init(ctx);
533 if (ret < 0)
534 return ret;
535
536 /* Enable Auto soft reset on SCDT = 0 */
537 mhl_tx_writeb(ctx, 0x05, 0x04);
538 /* HDMI Transcode mode enable */
539 mhl_tx_writeb(ctx, 0x0D, 0x1C);
540 mhl_tx_writeb(ctx, MHL_TX_INTR4_ENABLE_REG,
541 RGND_READY_MASK | CBUS_LKOUT_MASK
542 | MHL_DISC_FAIL_MASK | MHL_EST_MASK);
543 mhl_tx_writeb(ctx, MHL_TX_INTR1_ENABLE_REG, 0x60);
544
545 /* This point is very important before measure RGND impedance */
546 force_usb_id_switch_open(ctx);
547 mhl_tx_writebm(ctx, MHL_TX_DISC_CTRL4_REG, 0, 0xF0);
548 mhl_tx_writebm(ctx, MHL_TX_DISC_CTRL5_REG, 0, 0x03);
549 release_usb_id_switch_open(ctx);
550
551 /* Force upstream HPD to 0 when not in MHL mode */
552 mhl_tx_writebm(ctx, MHL_TX_INT_CTRL_REG, 0, 1 << 5);
553 mhl_tx_writebm(ctx, MHL_TX_INT_CTRL_REG, ~0, 1 << 4);
554
555 return sii9234_clear_error(ctx);
556}
557
558static int sii9234_goto_d3(struct sii9234 *ctx)
559{
560 int ret;
561
562 dev_dbg(ctx->dev, "sii9234: detection started d3\n");
563
564 ret = sii9234_reset(ctx);
565 if (ret < 0)
566 goto exit;
567
568 hdmi_writeb(ctx, 0x01, 0x03);
569 tpi_writebm(ctx, TPI_DPD_REG, 0, 1);
570 /* I2C above is expected to fail because power goes down */
571 sii9234_clear_error(ctx);
572
573 ctx->state = ST_D3;
574
575 return 0;
576 exit:
577 dev_err(ctx->dev, "%s failed\n", __func__);
578 return -1;
579}
580
581static int sii9234_hw_on(struct sii9234 *ctx)
582{
583 return regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
584}
585
586static void sii9234_hw_off(struct sii9234 *ctx)
587{
588 gpiod_set_value(ctx->gpio_reset, 1);
589 msleep(20);
590 regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
591}
592
593static void sii9234_hw_reset(struct sii9234 *ctx)
594{
595 gpiod_set_value(ctx->gpio_reset, 1);
596 msleep(20);
597 gpiod_set_value(ctx->gpio_reset, 0);
598}
599
600static void sii9234_cable_in(struct sii9234 *ctx)
601{
602 int ret;
603
604 mutex_lock(&ctx->lock);
605 if (ctx->state != ST_OFF)
606 goto unlock;
607 ret = sii9234_hw_on(ctx);
608 if (ret < 0)
609 goto unlock;
610
611 sii9234_hw_reset(ctx);
612 sii9234_goto_d3(ctx);
613 /* To avoid irq storm, when hw is in meta state */
614 enable_irq(to_i2c_client(ctx->dev)->irq);
615
616unlock:
617 mutex_unlock(&ctx->lock);
618}
619
620static void sii9234_cable_out(struct sii9234 *ctx)
621{
622 mutex_lock(&ctx->lock);
623
624 if (ctx->state == ST_OFF)
625 goto unlock;
626
627 disable_irq(to_i2c_client(ctx->dev)->irq);
628 tpi_writeb(ctx, TPI_DPD_REG, 0);
629 /* Turn on&off hpd festure for only QCT HDMI */
630 sii9234_hw_off(ctx);
631
632 ctx->state = ST_OFF;
633
634unlock:
635 mutex_unlock(&ctx->lock);
636}
637
638static enum sii9234_state sii9234_rgnd_ready_irq(struct sii9234 *ctx)
639{
640 int value;
641
642 if (ctx->state == ST_D3) {
643 int ret;
644
645 dev_dbg(ctx->dev, "RGND_READY_INT\n");
646 sii9234_hw_reset(ctx);
647
648 ret = sii9234_reset(ctx);
649 if (ret < 0) {
650 dev_err(ctx->dev, "sii9234_reset() failed\n");
651 return ST_FAILURE;
652 }
653
654 return ST_RGND_INIT;
655 }
656
657 /* Got interrupt in inappropriate state */
658 if (ctx->state != ST_RGND_INIT)
659 return ST_FAILURE;
660
661 value = mhl_tx_readb(ctx, MHL_TX_STAT2_REG);
662 if (sii9234_clear_error(ctx))
663 return ST_FAILURE;
664
665 if ((value & RGND_INTP_MASK) != RGND_INTP_1K) {
666 dev_warn(ctx->dev, "RGND is not 1k\n");
667 return ST_RGND_INIT;
668 }
669 dev_dbg(ctx->dev, "RGND 1K!!\n");
670 mhl_tx_writebm(ctx, MHL_TX_DISC_CTRL4_REG, ~0, 0x8C);
671 mhl_tx_writeb(ctx, MHL_TX_DISC_CTRL5_REG, 0x77);
672 mhl_tx_writebm(ctx, MHL_TX_DISC_CTRL6_REG, ~0, 0x05);
673 if (sii9234_clear_error(ctx))
674 return ST_FAILURE;
675
676 msleep(T_SRC_VBUS_CBUS_TO_STABLE);
677 return ST_RGND_1K;
678}
679
680static enum sii9234_state sii9234_mhl_established(struct sii9234 *ctx)
681{
682 dev_dbg(ctx->dev, "mhl est interrupt\n");
683
684 /* Discovery override */
685 mhl_tx_writeb(ctx, MHL_TX_MHLTX_CTL1_REG, 0x10);
686 /* Increase DDC translation layer timer (byte mode) */
687 cbus_writeb(ctx, 0x07, 0x32);
688 cbus_writebm(ctx, 0x44, ~0, 1 << 1);
689 /* Keep the discovery enabled. Need RGND interrupt */
690 mhl_tx_writebm(ctx, MHL_TX_DISC_CTRL1_REG, ~0, 1);
691 mhl_tx_writeb(ctx, MHL_TX_INTR1_ENABLE_REG,
692 RSEN_CHANGE_INT_MASK | HPD_CHANGE_INT_MASK);
693
694 if (sii9234_clear_error(ctx))
695 return ST_FAILURE;
696
697 return ST_MHL_ESTABLISHED;
698}
699
700static enum sii9234_state sii9234_hpd_change(struct sii9234 *ctx)
701{
702 int value;
703
704 value = cbus_readb(ctx, CBUS_MSC_REQ_ABORT_REASON_REG);
705 if (sii9234_clear_error(ctx))
706 return ST_FAILURE;
707
708 if (value & SET_HPD_DOWNSTREAM) {
709 /* Downstream HPD High, Enable TMDS */
710 sii9234_tmds_control(ctx, true);
711 } else {
712 /* Downstream HPD Low, Disable TMDS */
713 sii9234_tmds_control(ctx, false);
714 }
715
716 return ctx->state;
717}
718
719static enum sii9234_state sii9234_rsen_change(struct sii9234 *ctx)
720{
721 int value;
722
723 /* Work_around code to handle wrong interrupt */
724 if (ctx->state != ST_RGND_1K) {
725 dev_err(ctx->dev, "RSEN_HIGH without RGND_1K\n");
726 return ST_FAILURE;
727 }
728 value = mhl_tx_readb(ctx, MHL_TX_SYSSTAT_REG);
729 if (value < 0)
730 return ST_FAILURE;
731
732 if (value & RSEN_STATUS) {
733 dev_dbg(ctx->dev, "MHL cable connected.. RSEN High\n");
734 return ST_RSEN_HIGH;
735 }
736 dev_dbg(ctx->dev, "RSEN lost\n");
737 /*
738 * Once RSEN loss is confirmed,we need to check
739 * based on cable status and chip power status,whether
740 * it is SINK Loss(HDMI cable not connected, TV Off)
741 * or MHL cable disconnection
742 * TODO: Define the below mhl_disconnection()
743 */
744 msleep(T_SRC_RXSENSE_DEGLITCH);
745 value = mhl_tx_readb(ctx, MHL_TX_SYSSTAT_REG);
746 if (value < 0)
747 return ST_FAILURE;
748 dev_dbg(ctx->dev, "sys_stat: %x\n", value);
749
750 if (value & RSEN_STATUS) {
751 dev_dbg(ctx->dev, "RSEN recovery\n");
752 return ST_RSEN_HIGH;
753 }
754 dev_dbg(ctx->dev, "RSEN Really LOW\n");
755 /* To meet CTS 3.3.22.2 spec */
756 sii9234_tmds_control(ctx, false);
757 force_usb_id_switch_open(ctx);
758 release_usb_id_switch_open(ctx);
759
760 return ST_FAILURE;
761}
762
763static irqreturn_t sii9234_irq_thread(int irq, void *data)
764{
765 struct sii9234 *ctx = data;
766 int intr1, intr4;
767 int intr1_en, intr4_en;
768 int cbus_intr1, cbus_intr2;
769
770 dev_dbg(ctx->dev, "%s\n", __func__);
771
772 mutex_lock(&ctx->lock);
773
774 intr1 = mhl_tx_readb(ctx, MHL_TX_INTR1_REG);
775 intr4 = mhl_tx_readb(ctx, MHL_TX_INTR4_REG);
776 intr1_en = mhl_tx_readb(ctx, MHL_TX_INTR1_ENABLE_REG);
777 intr4_en = mhl_tx_readb(ctx, MHL_TX_INTR4_ENABLE_REG);
778 cbus_intr1 = cbus_readb(ctx, CBUS_INT_STATUS_1_REG);
779 cbus_intr2 = cbus_readb(ctx, CBUS_INT_STATUS_2_REG);
780
781 if (sii9234_clear_error(ctx))
782 goto done;
783
784 dev_dbg(ctx->dev, "irq %02x/%02x %02x/%02x %02x/%02x\n",
785 intr1, intr1_en, intr4, intr4_en, cbus_intr1, cbus_intr2);
786
787 if (intr4 & RGND_READY_INT)
788 ctx->state = sii9234_rgnd_ready_irq(ctx);
789 if (intr1 & RSEN_CHANGE_INT)
790 ctx->state = sii9234_rsen_change(ctx);
791 if (intr4 & MHL_EST_INT)
792 ctx->state = sii9234_mhl_established(ctx);
793 if (intr1 & HPD_CHANGE_INT)
794 ctx->state = sii9234_hpd_change(ctx);
795 if (intr4 & CBUS_LKOUT_INT)
796 ctx->state = ST_FAILURE;
797 if (intr4 & MHL_DISC_FAIL_INT)
798 ctx->state = ST_FAILURE_DISCOVERY;
799
800 done:
801 /* Clean interrupt status and pending flags */
802 mhl_tx_writeb(ctx, MHL_TX_INTR1_REG, intr1);
803 mhl_tx_writeb(ctx, MHL_TX_INTR4_REG, intr4);
804 cbus_writeb(ctx, CBUS_MHL_STATUS_REG_0, 0xFF);
805 cbus_writeb(ctx, CBUS_MHL_STATUS_REG_1, 0xFF);
806 cbus_writeb(ctx, CBUS_INT_STATUS_1_REG, cbus_intr1);
807 cbus_writeb(ctx, CBUS_INT_STATUS_2_REG, cbus_intr2);
808
809 sii9234_clear_error(ctx);
810
811 if (ctx->state == ST_FAILURE) {
812 dev_dbg(ctx->dev, "try to reset after failure\n");
813 sii9234_hw_reset(ctx);
814 sii9234_goto_d3(ctx);
815 }
816
817 if (ctx->state == ST_FAILURE_DISCOVERY) {
818 dev_err(ctx->dev, "discovery failed, no power for MHL?\n");
819 tpi_writebm(ctx, TPI_DPD_REG, 0, 1);
820 ctx->state = ST_D3;
821 }
822
823 mutex_unlock(&ctx->lock);
824
825 return IRQ_HANDLED;
826}
827
828static int sii9234_init_resources(struct sii9234 *ctx,
829 struct i2c_client *client)
830{
831 struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
832 int ret;
833
834 if (!ctx->dev->of_node) {
835 dev_err(ctx->dev, "not DT device\n");
836 return -ENODEV;
837 }
838
839 ctx->gpio_reset = devm_gpiod_get(ctx->dev, "reset", GPIOD_OUT_LOW);
840 if (IS_ERR(ctx->gpio_reset)) {
841 dev_err(ctx->dev, "failed to get reset gpio from DT\n");
842 return PTR_ERR(ctx->gpio_reset);
843 }
844
845 ctx->supplies[0].supply = "avcc12";
846 ctx->supplies[1].supply = "avcc33";
847 ctx->supplies[2].supply = "iovcc18";
848 ctx->supplies[3].supply = "cvcc12";
849 ret = devm_regulator_bulk_get(ctx->dev, 4, ctx->supplies);
850 if (ret) {
851 dev_err(ctx->dev, "regulator_bulk failed\n");
852 return ret;
853 }
854
855 ctx->client[I2C_MHL] = client;
856
857 ctx->client[I2C_TPI] = i2c_new_dummy(adapter, I2C_TPI_ADDR);
858 if (!ctx->client[I2C_TPI]) {
859 dev_err(ctx->dev, "failed to create TPI client\n");
860 return -ENODEV;
861 }
862
863 ctx->client[I2C_HDMI] = i2c_new_dummy(adapter, I2C_HDMI_ADDR);
864 if (!ctx->client[I2C_HDMI]) {
865 dev_err(ctx->dev, "failed to create HDMI RX client\n");
866 goto fail_tpi;
867 }
868
869 ctx->client[I2C_CBUS] = i2c_new_dummy(adapter, I2C_CBUS_ADDR);
870 if (!ctx->client[I2C_CBUS]) {
871 dev_err(ctx->dev, "failed to create CBUS client\n");
872 goto fail_hdmi;
873 }
874
875 return 0;
876
877fail_hdmi:
878 i2c_unregister_device(ctx->client[I2C_HDMI]);
879fail_tpi:
880 i2c_unregister_device(ctx->client[I2C_TPI]);
881
882 return -ENODEV;
883}
884
885static void sii9234_deinit_resources(struct sii9234 *ctx)
886{
887 i2c_unregister_device(ctx->client[I2C_CBUS]);
888 i2c_unregister_device(ctx->client[I2C_HDMI]);
889 i2c_unregister_device(ctx->client[I2C_TPI]);
890}
891
892static inline struct sii9234 *bridge_to_sii9234(struct drm_bridge *bridge)
893{
894 return container_of(bridge, struct sii9234, bridge);
895}
896
897static enum drm_mode_status sii9234_mode_valid(struct drm_bridge *bridge,
898 const struct drm_display_mode *mode)
899{
900 if (mode->clock > MHL1_MAX_CLK)
901 return MODE_CLOCK_HIGH;
902
903 return MODE_OK;
904}
905
906static const struct drm_bridge_funcs sii9234_bridge_funcs = {
907 .mode_valid = sii9234_mode_valid,
908};
909
910static int sii9234_probe(struct i2c_client *client,
911 const struct i2c_device_id *id)
912{
913 struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
914 struct sii9234 *ctx;
915 struct device *dev = &client->dev;
916 int ret;
917
918 ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
919 if (!ctx)
920 return -ENOMEM;
921
922 ctx->dev = dev;
923 mutex_init(&ctx->lock);
924
925 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) {
926 dev_err(dev, "I2C adapter lacks SMBUS feature\n");
927 return -EIO;
928 }
929
930 if (!client->irq) {
931 dev_err(dev, "no irq provided\n");
932 return -EINVAL;
933 }
934
935 irq_set_status_flags(client->irq, IRQ_NOAUTOEN);
936 ret = devm_request_threaded_irq(dev, client->irq, NULL,
937 sii9234_irq_thread,
938 IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
939 "sii9234", ctx);
940 if (ret < 0) {
941 dev_err(dev, "failed to install IRQ handler\n");
942 return ret;
943 }
944
945 ret = sii9234_init_resources(ctx, client);
946 if (ret < 0)
947 return ret;
948
949 i2c_set_clientdata(client, ctx);
950
951 ctx->bridge.funcs = &sii9234_bridge_funcs;
952 ctx->bridge.of_node = dev->of_node;
953 drm_bridge_add(&ctx->bridge);
954
955 sii9234_cable_in(ctx);
956
957 return 0;
958}
959
960static int sii9234_remove(struct i2c_client *client)
961{
962 struct sii9234 *ctx = i2c_get_clientdata(client);
963
964 sii9234_cable_out(ctx);
965 drm_bridge_remove(&ctx->bridge);
966 sii9234_deinit_resources(ctx);
967
968 return 0;
969}
970
971static const struct of_device_id sii9234_dt_match[] = {
972 { .compatible = "sil,sii9234" },
973 { },
974};
975MODULE_DEVICE_TABLE(of, sii9234_dt_match);
976
977static const struct i2c_device_id sii9234_id[] = {
978 { "SII9234", 0 },
979 { },
980};
981MODULE_DEVICE_TABLE(i2c, sii9234_id);
982
983static struct i2c_driver sii9234_driver = {
984 .driver = {
985 .name = "sii9234",
986 .of_match_table = sii9234_dt_match,
987 },
988 .probe = sii9234_probe,
989 .remove = sii9234_remove,
990 .id_table = sii9234_id,
991};
992
993module_i2c_driver(sii9234_driver);
994MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/bridge/sil-sii8620.c b/drivers/gpu/drm/bridge/sil-sii8620.c
index 5131bfb94f06..b7eb704d0a8a 100644
--- a/drivers/gpu/drm/bridge/sil-sii8620.c
+++ b/drivers/gpu/drm/bridge/sil-sii8620.c
@@ -28,6 +28,8 @@
28#include <linux/regulator/consumer.h> 28#include <linux/regulator/consumer.h>
29#include <linux/slab.h> 29#include <linux/slab.h>
30 30
31#include <media/rc-core.h>
32
31#include "sil-sii8620.h" 33#include "sil-sii8620.h"
32 34
33#define SII8620_BURST_BUF_LEN 288 35#define SII8620_BURST_BUF_LEN 288
@@ -58,6 +60,7 @@ enum sii8620_mt_state {
58struct sii8620 { 60struct sii8620 {
59 struct drm_bridge bridge; 61 struct drm_bridge bridge;
60 struct device *dev; 62 struct device *dev;
63 struct rc_dev *rc_dev;
61 struct clk *clk_xtal; 64 struct clk *clk_xtal;
62 struct gpio_desc *gpio_reset; 65 struct gpio_desc *gpio_reset;
63 struct gpio_desc *gpio_int; 66 struct gpio_desc *gpio_int;
@@ -431,6 +434,16 @@ static void sii8620_mt_rap(struct sii8620 *ctx, u8 code)
431 sii8620_mt_msc_msg(ctx, MHL_MSC_MSG_RAP, code); 434 sii8620_mt_msc_msg(ctx, MHL_MSC_MSG_RAP, code);
432} 435}
433 436
437static void sii8620_mt_rcpk(struct sii8620 *ctx, u8 code)
438{
439 sii8620_mt_msc_msg(ctx, MHL_MSC_MSG_RCPK, code);
440}
441
442static void sii8620_mt_rcpe(struct sii8620 *ctx, u8 code)
443{
444 sii8620_mt_msc_msg(ctx, MHL_MSC_MSG_RCPE, code);
445}
446
434static void sii8620_mt_read_devcap_send(struct sii8620 *ctx, 447static void sii8620_mt_read_devcap_send(struct sii8620 *ctx,
435 struct sii8620_mt_msg *msg) 448 struct sii8620_mt_msg *msg)
436{ 449{
@@ -1753,6 +1766,25 @@ static void sii8620_send_features(struct sii8620 *ctx)
1753 sii8620_write_buf(ctx, REG_MDT_XMIT_WRITE_PORT, buf, ARRAY_SIZE(buf)); 1766 sii8620_write_buf(ctx, REG_MDT_XMIT_WRITE_PORT, buf, ARRAY_SIZE(buf));
1754} 1767}
1755 1768
1769static bool sii8620_rcp_consume(struct sii8620 *ctx, u8 scancode)
1770{
1771 bool pressed = !(scancode & MHL_RCP_KEY_RELEASED_MASK);
1772
1773 scancode &= MHL_RCP_KEY_ID_MASK;
1774
1775 if (!ctx->rc_dev) {
1776 dev_dbg(ctx->dev, "RCP input device not initialized\n");
1777 return false;
1778 }
1779
1780 if (pressed)
1781 rc_keydown(ctx->rc_dev, RC_PROTO_CEC, scancode, 0);
1782 else
1783 rc_keyup(ctx->rc_dev);
1784
1785 return true;
1786}
1787
1756static void sii8620_msc_mr_set_int(struct sii8620 *ctx) 1788static void sii8620_msc_mr_set_int(struct sii8620 *ctx)
1757{ 1789{
1758 u8 ints[MHL_INT_SIZE]; 1790 u8 ints[MHL_INT_SIZE];
@@ -1804,19 +1836,25 @@ static void sii8620_msc_mt_done(struct sii8620 *ctx)
1804 1836
1805static void sii8620_msc_mr_msc_msg(struct sii8620 *ctx) 1837static void sii8620_msc_mr_msc_msg(struct sii8620 *ctx)
1806{ 1838{
1807 struct sii8620_mt_msg *msg = sii8620_msc_msg_first(ctx); 1839 struct sii8620_mt_msg *msg;
1808 u8 buf[2]; 1840 u8 buf[2];
1809 1841
1810 if (!msg)
1811 return;
1812
1813 sii8620_read_buf(ctx, REG_MSC_MR_MSC_MSG_RCVD_1ST_DATA, buf, 2); 1842 sii8620_read_buf(ctx, REG_MSC_MR_MSC_MSG_RCVD_1ST_DATA, buf, 2);
1814 1843
1815 switch (buf[0]) { 1844 switch (buf[0]) {
1816 case MHL_MSC_MSG_RAPK: 1845 case MHL_MSC_MSG_RAPK:
1846 msg = sii8620_msc_msg_first(ctx);
1847 if (!msg)
1848 return;
1817 msg->ret = buf[1]; 1849 msg->ret = buf[1];
1818 ctx->mt_state = MT_STATE_DONE; 1850 ctx->mt_state = MT_STATE_DONE;
1819 break; 1851 break;
1852 case MHL_MSC_MSG_RCP:
1853 if (!sii8620_rcp_consume(ctx, buf[1]))
1854 sii8620_mt_rcpe(ctx,
1855 MHL_RCPE_STATUS_INEFFECTIVE_KEY_CODE);
1856 sii8620_mt_rcpk(ctx, buf[1]);
1857 break;
1820 default: 1858 default:
1821 dev_err(ctx->dev, "%s message type %d,%d not supported", 1859 dev_err(ctx->dev, "%s message type %d,%d not supported",
1822 __func__, buf[0], buf[1]); 1860 __func__, buf[0], buf[1]);
@@ -2102,11 +2140,57 @@ static void sii8620_cable_in(struct sii8620 *ctx)
2102 enable_irq(to_i2c_client(ctx->dev)->irq); 2140 enable_irq(to_i2c_client(ctx->dev)->irq);
2103} 2141}
2104 2142
2143static void sii8620_init_rcp_input_dev(struct sii8620 *ctx)
2144{
2145 struct rc_dev *rc_dev;
2146 int ret;
2147
2148 rc_dev = rc_allocate_device(RC_DRIVER_SCANCODE);
2149 if (!rc_dev) {
2150 dev_err(ctx->dev, "Failed to allocate RC device\n");
2151 ctx->error = -ENOMEM;
2152 return;
2153 }
2154
2155 rc_dev->input_phys = "sii8620/input0";
2156 rc_dev->input_id.bustype = BUS_VIRTUAL;
2157 rc_dev->map_name = RC_MAP_CEC;
2158 rc_dev->allowed_protocols = RC_PROTO_BIT_CEC;
2159 rc_dev->driver_name = "sii8620";
2160 rc_dev->device_name = "sii8620";
2161
2162 ret = rc_register_device(rc_dev);
2163
2164 if (ret) {
2165 dev_err(ctx->dev, "Failed to register RC device\n");
2166 ctx->error = ret;
2167 rc_free_device(ctx->rc_dev);
2168 return;
2169 }
2170 ctx->rc_dev = rc_dev;
2171}
2172
2105static inline struct sii8620 *bridge_to_sii8620(struct drm_bridge *bridge) 2173static inline struct sii8620 *bridge_to_sii8620(struct drm_bridge *bridge)
2106{ 2174{
2107 return container_of(bridge, struct sii8620, bridge); 2175 return container_of(bridge, struct sii8620, bridge);
2108} 2176}
2109 2177
2178static int sii8620_attach(struct drm_bridge *bridge)
2179{
2180 struct sii8620 *ctx = bridge_to_sii8620(bridge);
2181
2182 sii8620_init_rcp_input_dev(ctx);
2183
2184 return sii8620_clear_error(ctx);
2185}
2186
2187static void sii8620_detach(struct drm_bridge *bridge)
2188{
2189 struct sii8620 *ctx = bridge_to_sii8620(bridge);
2190
2191 rc_unregister_device(ctx->rc_dev);
2192}
2193
2110static bool sii8620_mode_fixup(struct drm_bridge *bridge, 2194static bool sii8620_mode_fixup(struct drm_bridge *bridge,
2111 const struct drm_display_mode *mode, 2195 const struct drm_display_mode *mode,
2112 struct drm_display_mode *adjusted_mode) 2196 struct drm_display_mode *adjusted_mode)
@@ -2151,6 +2235,8 @@ end:
2151} 2235}
2152 2236
2153static const struct drm_bridge_funcs sii8620_bridge_funcs = { 2237static const struct drm_bridge_funcs sii8620_bridge_funcs = {
2238 .attach = sii8620_attach,
2239 .detach = sii8620_detach,
2154 .mode_fixup = sii8620_mode_fixup, 2240 .mode_fixup = sii8620_mode_fixup,
2155}; 2241};
2156 2242
@@ -2217,8 +2303,8 @@ static int sii8620_remove(struct i2c_client *client)
2217 struct sii8620 *ctx = i2c_get_clientdata(client); 2303 struct sii8620 *ctx = i2c_get_clientdata(client);
2218 2304
2219 disable_irq(to_i2c_client(ctx->dev)->irq); 2305 disable_irq(to_i2c_client(ctx->dev)->irq);
2220 drm_bridge_remove(&ctx->bridge);
2221 sii8620_hw_off(ctx); 2306 sii8620_hw_off(ctx);
2307 drm_bridge_remove(&ctx->bridge);
2222 2308
2223 return 0; 2309 return 0;
2224} 2310}
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
index f4f633a0dffa..d9cca4fd66ec 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
@@ -221,7 +221,6 @@ struct dw_mipi_dsi {
221 struct drm_bridge bridge; 221 struct drm_bridge bridge;
222 struct mipi_dsi_host dsi_host; 222 struct mipi_dsi_host dsi_host;
223 struct drm_bridge *panel_bridge; 223 struct drm_bridge *panel_bridge;
224 bool is_panel_bridge;
225 struct device *dev; 224 struct device *dev;
226 void __iomem *base; 225 void __iomem *base;
227 226
@@ -297,7 +296,6 @@ static int dw_mipi_dsi_host_attach(struct mipi_dsi_host *host,
297 bridge = drm_panel_bridge_add(panel, DRM_MODE_CONNECTOR_DSI); 296 bridge = drm_panel_bridge_add(panel, DRM_MODE_CONNECTOR_DSI);
298 if (IS_ERR(bridge)) 297 if (IS_ERR(bridge))
299 return PTR_ERR(bridge); 298 return PTR_ERR(bridge);
300 dsi->is_panel_bridge = true;
301 } 299 }
302 300
303 dsi->panel_bridge = bridge; 301 dsi->panel_bridge = bridge;
@@ -312,8 +310,7 @@ static int dw_mipi_dsi_host_detach(struct mipi_dsi_host *host,
312{ 310{
313 struct dw_mipi_dsi *dsi = host_to_dsi(host); 311 struct dw_mipi_dsi *dsi = host_to_dsi(host);
314 312
315 if (dsi->is_panel_bridge) 313 drm_of_panel_bridge_remove(host->dev->of_node, 1, 0);
316 drm_panel_bridge_remove(dsi->panel_bridge);
317 314
318 drm_bridge_remove(&dsi->bridge); 315 drm_bridge_remove(&dsi->bridge);
319 316
diff --git a/drivers/gpu/drm/cirrus/cirrus_mode.c b/drivers/gpu/drm/cirrus/cirrus_mode.c
index a4c4a465b385..cd23b1b28259 100644
--- a/drivers/gpu/drm/cirrus/cirrus_mode.c
+++ b/drivers/gpu/drm/cirrus/cirrus_mode.c
@@ -457,7 +457,7 @@ static struct drm_encoder *cirrus_connector_best_encoder(struct drm_connector
457 int enc_id = connector->encoder_ids[0]; 457 int enc_id = connector->encoder_ids[0];
458 /* pick the encoder ids */ 458 /* pick the encoder ids */
459 if (enc_id) 459 if (enc_id)
460 return drm_encoder_find(connector->dev, enc_id); 460 return drm_encoder_find(connector->dev, NULL, enc_id);
461 return NULL; 461 return NULL;
462} 462}
463 463
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index 366c56fe5f58..562494873ca5 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -182,9 +182,6 @@ void drm_atomic_state_default_clear(struct drm_atomic_state *state)
182 for (i = 0; i < state->num_private_objs; i++) { 182 for (i = 0; i < state->num_private_objs; i++) {
183 struct drm_private_obj *obj = state->private_objs[i].ptr; 183 struct drm_private_obj *obj = state->private_objs[i].ptr;
184 184
185 if (!obj)
186 continue;
187
188 obj->funcs->atomic_destroy_state(obj, 185 obj->funcs->atomic_destroy_state(obj,
189 state->private_objs[i].state); 186 state->private_objs[i].state);
190 state->private_objs[i].ptr = NULL; 187 state->private_objs[i].ptr = NULL;
@@ -718,7 +715,7 @@ static int drm_atomic_plane_set_property(struct drm_plane *plane,
718 struct drm_mode_config *config = &dev->mode_config; 715 struct drm_mode_config *config = &dev->mode_config;
719 716
720 if (property == config->prop_fb_id) { 717 if (property == config->prop_fb_id) {
721 struct drm_framebuffer *fb = drm_framebuffer_lookup(dev, val); 718 struct drm_framebuffer *fb = drm_framebuffer_lookup(dev, NULL, val);
722 drm_atomic_set_fb_for_plane(state, fb); 719 drm_atomic_set_fb_for_plane(state, fb);
723 if (fb) 720 if (fb)
724 drm_framebuffer_put(fb); 721 drm_framebuffer_put(fb);
@@ -734,7 +731,7 @@ static int drm_atomic_plane_set_property(struct drm_plane *plane,
734 return -EINVAL; 731 return -EINVAL;
735 732
736 } else if (property == config->prop_crtc_id) { 733 } else if (property == config->prop_crtc_id) {
737 struct drm_crtc *crtc = drm_crtc_find(dev, val); 734 struct drm_crtc *crtc = drm_crtc_find(dev, NULL, val);
738 return drm_atomic_set_crtc_for_plane(state, crtc); 735 return drm_atomic_set_crtc_for_plane(state, crtc);
739 } else if (property == config->prop_crtc_x) { 736 } else if (property == config->prop_crtc_x) {
740 state->crtc_x = U642I64(val); 737 state->crtc_x = U642I64(val);
@@ -1149,7 +1146,7 @@ static int drm_atomic_connector_set_property(struct drm_connector *connector,
1149 struct drm_mode_config *config = &dev->mode_config; 1146 struct drm_mode_config *config = &dev->mode_config;
1150 1147
1151 if (property == config->prop_crtc_id) { 1148 if (property == config->prop_crtc_id) {
1152 struct drm_crtc *crtc = drm_crtc_find(dev, val); 1149 struct drm_crtc *crtc = drm_crtc_find(dev, NULL, val);
1153 return drm_atomic_set_crtc_for_connector(state, crtc); 1150 return drm_atomic_set_crtc_for_connector(state, crtc);
1154 } else if (property == config->dpms_property) { 1151 } else if (property == config->dpms_property) {
1155 /* setting DPMS property requires special handling, which 1152 /* setting DPMS property requires special handling, which
@@ -2259,7 +2256,7 @@ retry:
2259 goto out; 2256 goto out;
2260 } 2257 }
2261 2258
2262 obj = drm_mode_object_find(dev, obj_id, DRM_MODE_OBJECT_ANY); 2259 obj = drm_mode_object_find(dev, file_priv, obj_id, DRM_MODE_OBJECT_ANY);
2263 if (!obj) { 2260 if (!obj) {
2264 ret = -ENOENT; 2261 ret = -ENOENT;
2265 goto out; 2262 goto out;
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 01c34bc5b5b0..ae56d91433ff 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -1704,7 +1704,7 @@ crtc_or_fake_commit(struct drm_atomic_state *state, struct drm_crtc *crtc)
1704 * drm_atomic_helper_commit_cleanup_done(). 1704 * drm_atomic_helper_commit_cleanup_done().
1705 * 1705 *
1706 * This is all implemented by in drm_atomic_helper_commit(), giving drivers a 1706 * This is all implemented by in drm_atomic_helper_commit(), giving drivers a
1707 * complete and esay-to-use default implementation of the atomic_commit() hook. 1707 * complete and easy-to-use default implementation of the atomic_commit() hook.
1708 * 1708 *
1709 * The tracking of asynchronously executed and still pending commits is done 1709 * The tracking of asynchronously executed and still pending commits is done
1710 * using the core structure &drm_crtc_commit. 1710 * using the core structure &drm_crtc_commit.
@@ -1819,7 +1819,7 @@ EXPORT_SYMBOL(drm_atomic_helper_setup_commit);
1819 * This function waits for all preceeding commits that touch the same CRTC as 1819 * This function waits for all preceeding commits that touch the same CRTC as
1820 * @old_state to both be committed to the hardware (as signalled by 1820 * @old_state to both be committed to the hardware (as signalled by
1821 * drm_atomic_helper_commit_hw_done) and executed by the hardware (as signalled 1821 * drm_atomic_helper_commit_hw_done) and executed by the hardware (as signalled
1822 * by calling drm_crtc_vblank_send_event() on the &drm_crtc_state.event). 1822 * by calling drm_crtc_send_vblank_event() on the &drm_crtc_state.event).
1823 * 1823 *
1824 * This is part of the atomic helper support for nonblocking commits, see 1824 * This is part of the atomic helper support for nonblocking commits, see
1825 * drm_atomic_helper_setup_commit() for an overview. 1825 * drm_atomic_helper_setup_commit() for an overview.
@@ -3052,6 +3052,7 @@ out:
3052 drm_modeset_backoff(&ctx); 3052 drm_modeset_backoff(&ctx);
3053 } 3053 }
3054 3054
3055 drm_atomic_state_put(state);
3055 drm_modeset_drop_locks(&ctx); 3056 drm_modeset_drop_locks(&ctx);
3056 drm_modeset_acquire_fini(&ctx); 3057 drm_modeset_acquire_fini(&ctx);
3057 3058
@@ -3206,7 +3207,7 @@ struct drm_encoder *
3206drm_atomic_helper_best_encoder(struct drm_connector *connector) 3207drm_atomic_helper_best_encoder(struct drm_connector *connector)
3207{ 3208{
3208 WARN_ON(connector->encoder_ids[1]); 3209 WARN_ON(connector->encoder_ids[1]);
3209 return drm_encoder_find(connector->dev, connector->encoder_ids[0]); 3210 return drm_encoder_find(connector->dev, NULL, connector->encoder_ids[0]);
3210} 3211}
3211EXPORT_SYMBOL(drm_atomic_helper_best_encoder); 3212EXPORT_SYMBOL(drm_atomic_helper_best_encoder);
3212 3213
diff --git a/drivers/gpu/drm/drm_color_mgmt.c b/drivers/gpu/drm/drm_color_mgmt.c
index fe0982708e95..0d002b045bd2 100644
--- a/drivers/gpu/drm/drm_color_mgmt.c
+++ b/drivers/gpu/drm/drm_color_mgmt.c
@@ -230,7 +230,7 @@ int drm_mode_gamma_set_ioctl(struct drm_device *dev,
230 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 230 if (!drm_core_check_feature(dev, DRIVER_MODESET))
231 return -EINVAL; 231 return -EINVAL;
232 232
233 crtc = drm_crtc_find(dev, crtc_lut->crtc_id); 233 crtc = drm_crtc_find(dev, file_priv, crtc_lut->crtc_id);
234 if (!crtc) 234 if (!crtc)
235 return -ENOENT; 235 return -ENOENT;
236 236
@@ -308,7 +308,7 @@ int drm_mode_gamma_get_ioctl(struct drm_device *dev,
308 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 308 if (!drm_core_check_feature(dev, DRIVER_MODESET))
309 return -EINVAL; 309 return -EINVAL;
310 310
311 crtc = drm_crtc_find(dev, crtc_lut->crtc_id); 311 crtc = drm_crtc_find(dev, file_priv, crtc_lut->crtc_id);
312 if (!crtc) 312 if (!crtc)
313 return -ENOENT; 313 return -ENOENT;
314 314
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
index d8ca526ca4ee..704fc8934616 100644
--- a/drivers/gpu/drm/drm_connector.c
+++ b/drivers/gpu/drm/drm_connector.c
@@ -1310,7 +1310,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
1310 1310
1311 memset(&u_mode, 0, sizeof(struct drm_mode_modeinfo)); 1311 memset(&u_mode, 0, sizeof(struct drm_mode_modeinfo));
1312 1312
1313 connector = drm_connector_lookup(dev, out_resp->connector_id); 1313 connector = drm_connector_lookup(dev, file_priv, out_resp->connector_id);
1314 if (!connector) 1314 if (!connector)
1315 return -ENOENT; 1315 return -ENOENT;
1316 1316
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 68b4e976d5e0..f0556e654116 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -402,7 +402,7 @@ int drm_mode_getcrtc(struct drm_device *dev,
402 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 402 if (!drm_core_check_feature(dev, DRIVER_MODESET))
403 return -EINVAL; 403 return -EINVAL;
404 404
405 crtc = drm_crtc_find(dev, crtc_resp->crtc_id); 405 crtc = drm_crtc_find(dev, file_priv, crtc_resp->crtc_id);
406 if (!crtc) 406 if (!crtc)
407 return -ENOENT; 407 return -ENOENT;
408 408
@@ -569,7 +569,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
569 if (crtc_req->x & 0xffff0000 || crtc_req->y & 0xffff0000) 569 if (crtc_req->x & 0xffff0000 || crtc_req->y & 0xffff0000)
570 return -ERANGE; 570 return -ERANGE;
571 571
572 crtc = drm_crtc_find(dev, crtc_req->crtc_id); 572 crtc = drm_crtc_find(dev, file_priv, crtc_req->crtc_id);
573 if (!crtc) { 573 if (!crtc) {
574 DRM_DEBUG_KMS("Unknown CRTC ID %d\n", crtc_req->crtc_id); 574 DRM_DEBUG_KMS("Unknown CRTC ID %d\n", crtc_req->crtc_id);
575 return -ENOENT; 575 return -ENOENT;
@@ -595,7 +595,7 @@ retry:
595 /* Make refcounting symmetric with the lookup path. */ 595 /* Make refcounting symmetric with the lookup path. */
596 drm_framebuffer_get(fb); 596 drm_framebuffer_get(fb);
597 } else { 597 } else {
598 fb = drm_framebuffer_lookup(dev, crtc_req->fb_id); 598 fb = drm_framebuffer_lookup(dev, file_priv, crtc_req->fb_id);
599 if (!fb) { 599 if (!fb) {
600 DRM_DEBUG_KMS("Unknown FB ID%d\n", 600 DRM_DEBUG_KMS("Unknown FB ID%d\n",
601 crtc_req->fb_id); 601 crtc_req->fb_id);
@@ -680,7 +680,7 @@ retry:
680 goto out; 680 goto out;
681 } 681 }
682 682
683 connector = drm_connector_lookup(dev, out_id); 683 connector = drm_connector_lookup(dev, file_priv, out_id);
684 if (!connector) { 684 if (!connector) {
685 DRM_DEBUG_KMS("Connector id %d unknown\n", 685 DRM_DEBUG_KMS("Connector id %d unknown\n",
686 out_id); 686 out_id);
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index eab36a460638..5a84c3bc915d 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -562,12 +562,12 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set,
562 * Allocate space for the backup of all (non-pointer) encoder and 562 * Allocate space for the backup of all (non-pointer) encoder and
563 * connector data. 563 * connector data.
564 */ 564 */
565 save_encoder_crtcs = kzalloc(dev->mode_config.num_encoder * 565 save_encoder_crtcs = kcalloc(dev->mode_config.num_encoder,
566 sizeof(struct drm_crtc *), GFP_KERNEL); 566 sizeof(struct drm_crtc *), GFP_KERNEL);
567 if (!save_encoder_crtcs) 567 if (!save_encoder_crtcs)
568 return -ENOMEM; 568 return -ENOMEM;
569 569
570 save_connector_encoders = kzalloc(dev->mode_config.num_connector * 570 save_connector_encoders = kcalloc(dev->mode_config.num_connector,
571 sizeof(struct drm_encoder *), GFP_KERNEL); 571 sizeof(struct drm_encoder *), GFP_KERNEL);
572 if (!save_connector_encoders) { 572 if (!save_connector_encoders) {
573 kfree(save_encoder_crtcs); 573 kfree(save_encoder_crtcs);
diff --git a/drivers/gpu/drm/drm_crtc_internal.h b/drivers/gpu/drm/drm_crtc_internal.h
index a43582076b20..9ebb8841778c 100644
--- a/drivers/gpu/drm/drm_crtc_internal.h
+++ b/drivers/gpu/drm/drm_crtc_internal.h
@@ -106,6 +106,7 @@ int drm_mode_object_add(struct drm_device *dev, struct drm_mode_object *obj,
106void drm_mode_object_register(struct drm_device *dev, 106void drm_mode_object_register(struct drm_device *dev,
107 struct drm_mode_object *obj); 107 struct drm_mode_object *obj);
108struct drm_mode_object *__drm_mode_object_find(struct drm_device *dev, 108struct drm_mode_object *__drm_mode_object_find(struct drm_device *dev,
109 struct drm_file *file_priv,
109 uint32_t id, uint32_t type); 110 uint32_t id, uint32_t type);
110void drm_mode_object_unregister(struct drm_device *dev, 111void drm_mode_object_unregister(struct drm_device *dev,
111 struct drm_mode_object *object); 112 struct drm_mode_object *object);
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index 08af8d6b844b..b3d68964b407 100644
--- a/drivers/gpu/drm/drm_dp_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -137,8 +137,10 @@ EXPORT_SYMBOL(drm_dp_link_train_channel_eq_delay);
137u8 drm_dp_link_rate_to_bw_code(int link_rate) 137u8 drm_dp_link_rate_to_bw_code(int link_rate)
138{ 138{
139 switch (link_rate) { 139 switch (link_rate) {
140 case 162000:
141 default: 140 default:
141 WARN(1, "unknown DP link rate %d, using %x\n", link_rate,
142 DP_LINK_BW_1_62);
143 case 162000:
142 return DP_LINK_BW_1_62; 144 return DP_LINK_BW_1_62;
143 case 270000: 145 case 270000:
144 return DP_LINK_BW_2_7; 146 return DP_LINK_BW_2_7;
@@ -151,8 +153,9 @@ EXPORT_SYMBOL(drm_dp_link_rate_to_bw_code);
151int drm_dp_bw_code_to_link_rate(u8 link_bw) 153int drm_dp_bw_code_to_link_rate(u8 link_bw)
152{ 154{
153 switch (link_bw) { 155 switch (link_bw) {
154 case DP_LINK_BW_1_62:
155 default: 156 default:
157 WARN(1, "unknown DP link BW code %x, using 162000\n", link_bw);
158 case DP_LINK_BW_1_62:
156 return 162000; 159 return 162000;
157 case DP_LINK_BW_2_7: 160 case DP_LINK_BW_2_7:
158 return 270000; 161 return 270000;
diff --git a/drivers/gpu/drm/drm_encoder.c b/drivers/gpu/drm/drm_encoder.c
index 0708779840d2..43f644844b83 100644
--- a/drivers/gpu/drm/drm_encoder.c
+++ b/drivers/gpu/drm/drm_encoder.c
@@ -220,7 +220,7 @@ int drm_mode_getencoder(struct drm_device *dev, void *data,
220 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 220 if (!drm_core_check_feature(dev, DRIVER_MODESET))
221 return -EINVAL; 221 return -EINVAL;
222 222
223 encoder = drm_encoder_find(dev, enc_resp->encoder_id); 223 encoder = drm_encoder_find(dev, file_priv, enc_resp->encoder_id);
224 if (!encoder) 224 if (!encoder)
225 return -ENOENT; 225 return -ENOENT;
226 226
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 6a31d13f2f81..116d1f1337c7 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -2266,7 +2266,7 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
2266 if (modes[n] == NULL) 2266 if (modes[n] == NULL)
2267 return best_score; 2267 return best_score;
2268 2268
2269 crtcs = kzalloc(fb_helper->connector_count * 2269 crtcs = kcalloc(fb_helper->connector_count,
2270 sizeof(struct drm_fb_helper_crtc *), GFP_KERNEL); 2270 sizeof(struct drm_fb_helper_crtc *), GFP_KERNEL);
2271 if (!crtcs) 2271 if (!crtcs)
2272 return best_score; 2272 return best_score;
diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c
index af279844d7ce..2affe53f3fda 100644
--- a/drivers/gpu/drm/drm_framebuffer.c
+++ b/drivers/gpu/drm/drm_framebuffer.c
@@ -381,7 +381,7 @@ int drm_mode_rmfb(struct drm_device *dev,
381 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 381 if (!drm_core_check_feature(dev, DRIVER_MODESET))
382 return -EINVAL; 382 return -EINVAL;
383 383
384 fb = drm_framebuffer_lookup(dev, *id); 384 fb = drm_framebuffer_lookup(dev, file_priv, *id);
385 if (!fb) 385 if (!fb)
386 return -ENOENT; 386 return -ENOENT;
387 387
@@ -450,7 +450,7 @@ int drm_mode_getfb(struct drm_device *dev,
450 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 450 if (!drm_core_check_feature(dev, DRIVER_MODESET))
451 return -EINVAL; 451 return -EINVAL;
452 452
453 fb = drm_framebuffer_lookup(dev, r->fb_id); 453 fb = drm_framebuffer_lookup(dev, file_priv, r->fb_id);
454 if (!fb) 454 if (!fb)
455 return -ENOENT; 455 return -ENOENT;
456 456
@@ -515,7 +515,7 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
515 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 515 if (!drm_core_check_feature(dev, DRIVER_MODESET))
516 return -EINVAL; 516 return -EINVAL;
517 517
518 fb = drm_framebuffer_lookup(dev, r->fb_id); 518 fb = drm_framebuffer_lookup(dev, file_priv, r->fb_id);
519 if (!fb) 519 if (!fb)
520 return -ENOENT; 520 return -ENOENT;
521 521
@@ -688,12 +688,13 @@ EXPORT_SYMBOL(drm_framebuffer_init);
688 * again, using drm_framebuffer_put(). 688 * again, using drm_framebuffer_put().
689 */ 689 */
690struct drm_framebuffer *drm_framebuffer_lookup(struct drm_device *dev, 690struct drm_framebuffer *drm_framebuffer_lookup(struct drm_device *dev,
691 struct drm_file *file_priv,
691 uint32_t id) 692 uint32_t id)
692{ 693{
693 struct drm_mode_object *obj; 694 struct drm_mode_object *obj;
694 struct drm_framebuffer *fb = NULL; 695 struct drm_framebuffer *fb = NULL;
695 696
696 obj = __drm_mode_object_find(dev, id, DRM_MODE_OBJECT_FB); 697 obj = __drm_mode_object_find(dev, file_priv, id, DRM_MODE_OBJECT_FB);
697 if (obj) 698 if (obj)
698 fb = obj_to_fb(obj); 699 fb = obj_to_fb(obj);
699 return fb; 700 return fb;
diff --git a/drivers/gpu/drm/drm_gem_framebuffer_helper.c b/drivers/gpu/drm/drm_gem_framebuffer_helper.c
index fc7e995541c9..aa8cb9bfa499 100644
--- a/drivers/gpu/drm/drm_gem_framebuffer_helper.c
+++ b/drivers/gpu/drm/drm_gem_framebuffer_helper.c
@@ -27,19 +27,24 @@
27 * DOC: overview 27 * DOC: overview
28 * 28 *
29 * This library provides helpers for drivers that don't subclass 29 * This library provides helpers for drivers that don't subclass
30 * &drm_framebuffer and and use &drm_gem_object for their backing storage. 30 * &drm_framebuffer and use &drm_gem_object for their backing storage.
31 * 31 *
32 * Drivers without additional needs to validate framebuffers can simply use 32 * Drivers without additional needs to validate framebuffers can simply use
33 * drm_gem_fb_create() and everything is wired up automatically. But all 33 * drm_gem_fb_create() and everything is wired up automatically. Other drivers
34 * parts can be used individually. 34 * can use all parts independently.
35 */ 35 */
36 36
37/** 37/**
38 * drm_gem_fb_get_obj() - Get GEM object for framebuffer 38 * drm_gem_fb_get_obj() - Get GEM object backing the framebuffer
39 * @fb: The framebuffer 39 * @fb: Framebuffer
40 * @plane: Which plane 40 * @plane: Plane index
41 * 41 *
42 * Returns the GEM object for given framebuffer. 42 * No additional reference is taken beyond the one that the &drm_frambuffer
43 * already holds.
44 *
45 * Returns:
46 * Pointer to &drm_gem_object for the given framebuffer and plane index or NULL
47 * if it does not exist.
43 */ 48 */
44struct drm_gem_object *drm_gem_fb_get_obj(struct drm_framebuffer *fb, 49struct drm_gem_object *drm_gem_fb_get_obj(struct drm_framebuffer *fb,
45 unsigned int plane) 50 unsigned int plane)
@@ -82,7 +87,7 @@ drm_gem_fb_alloc(struct drm_device *dev,
82 87
83/** 88/**
84 * drm_gem_fb_destroy - Free GEM backed framebuffer 89 * drm_gem_fb_destroy - Free GEM backed framebuffer
85 * @fb: DRM framebuffer 90 * @fb: Framebuffer
86 * 91 *
87 * Frees a GEM backed framebuffer with its backing buffer(s) and the structure 92 * Frees a GEM backed framebuffer with its backing buffer(s) and the structure
88 * itself. Drivers can use this as their &drm_framebuffer_funcs->destroy 93 * itself. Drivers can use this as their &drm_framebuffer_funcs->destroy
@@ -102,12 +107,13 @@ EXPORT_SYMBOL(drm_gem_fb_destroy);
102 107
103/** 108/**
104 * drm_gem_fb_create_handle - Create handle for GEM backed framebuffer 109 * drm_gem_fb_create_handle - Create handle for GEM backed framebuffer
105 * @fb: DRM framebuffer 110 * @fb: Framebuffer
106 * @file: drm file 111 * @file: DRM file to register the handle for
107 * @handle: handle created 112 * @handle: Pointer to return the created handle
108 * 113 *
114 * This function creates a handle for the GEM object backing the framebuffer.
109 * Drivers can use this as their &drm_framebuffer_funcs->create_handle 115 * Drivers can use this as their &drm_framebuffer_funcs->create_handle
110 * callback. 116 * callback. The GETFB IOCTL calls into this callback.
111 * 117 *
112 * Returns: 118 * Returns:
113 * 0 on success or a negative error code on failure. 119 * 0 on success or a negative error code on failure.
@@ -120,18 +126,21 @@ int drm_gem_fb_create_handle(struct drm_framebuffer *fb, struct drm_file *file,
120EXPORT_SYMBOL(drm_gem_fb_create_handle); 126EXPORT_SYMBOL(drm_gem_fb_create_handle);
121 127
122/** 128/**
123 * drm_gem_fb_create_with_funcs() - helper function for the 129 * drm_gem_fb_create_with_funcs() - Helper function for the
124 * &drm_mode_config_funcs.fb_create 130 * &drm_mode_config_funcs.fb_create
125 * callback 131 * callback
126 * @dev: DRM device 132 * @dev: DRM device
127 * @file: drm file for the ioctl call 133 * @file: DRM file that holds the GEM handle(s) backing the framebuffer
128 * @mode_cmd: metadata from the userspace fb creation request 134 * @mode_cmd: Metadata from the userspace framebuffer creation request
129 * @funcs: vtable to be used for the new framebuffer object 135 * @funcs: vtable to be used for the new framebuffer object
130 * 136 *
131 * This can be used to set &drm_framebuffer_funcs for drivers that need the 137 * This can be used to set &drm_framebuffer_funcs for drivers that need the
132 * &drm_framebuffer_funcs.dirty callback. Use drm_gem_fb_create() if you don't 138 * &drm_framebuffer_funcs.dirty callback. Use drm_gem_fb_create() if you don't
133 * need to change &drm_framebuffer_funcs. 139 * need to change &drm_framebuffer_funcs.
134 * The function does buffer size validation. 140 * The function does buffer size validation.
141 *
142 * Returns:
143 * Pointer to a &drm_framebuffer on success or an error pointer on failure.
135 */ 144 */
136struct drm_framebuffer * 145struct drm_framebuffer *
137drm_gem_fb_create_with_funcs(struct drm_device *dev, struct drm_file *file, 146drm_gem_fb_create_with_funcs(struct drm_device *dev, struct drm_file *file,
@@ -192,15 +201,26 @@ static const struct drm_framebuffer_funcs drm_gem_fb_funcs = {
192}; 201};
193 202
194/** 203/**
195 * drm_gem_fb_create() - &drm_mode_config_funcs.fb_create callback function 204 * drm_gem_fb_create() - Helper function for the
205 * &drm_mode_config_funcs.fb_create callback
196 * @dev: DRM device 206 * @dev: DRM device
197 * @file: drm file for the ioctl call 207 * @file: DRM file that holds the GEM handle(s) backing the framebuffer
198 * @mode_cmd: metadata from the userspace fb creation request 208 * @mode_cmd: Metadata from the userspace framebuffer creation request
209 *
210 * This function creates a new framebuffer object described by
211 * &drm_mode_fb_cmd2. This description includes handles for the buffer(s)
212 * backing the framebuffer.
199 * 213 *
200 * If your hardware has special alignment or pitch requirements these should be 214 * If your hardware has special alignment or pitch requirements these should be
201 * checked before calling this function. The function does buffer size 215 * checked before calling this function. The function does buffer size
202 * validation. Use drm_gem_fb_create_with_funcs() if you need to set 216 * validation. Use drm_gem_fb_create_with_funcs() if you need to set
203 * &drm_framebuffer_funcs.dirty. 217 * &drm_framebuffer_funcs.dirty.
218 *
219 * Drivers can use this as their &drm_mode_config_funcs.fb_create callback.
220 * The ADDFB2 IOCTL calls into this callback.
221 *
222 * Returns:
223 * Pointer to a &drm_framebuffer on success or an error pointer on failure.
204 */ 224 */
205struct drm_framebuffer * 225struct drm_framebuffer *
206drm_gem_fb_create(struct drm_device *dev, struct drm_file *file, 226drm_gem_fb_create(struct drm_device *dev, struct drm_file *file,
@@ -212,15 +232,15 @@ drm_gem_fb_create(struct drm_device *dev, struct drm_file *file,
212EXPORT_SYMBOL_GPL(drm_gem_fb_create); 232EXPORT_SYMBOL_GPL(drm_gem_fb_create);
213 233
214/** 234/**
215 * drm_gem_fb_prepare_fb() - Prepare gem framebuffer 235 * drm_gem_fb_prepare_fb() - Prepare a GEM backed framebuffer
216 * @plane: Which plane 236 * @plane: Plane
217 * @state: Plane state attach fence to 237 * @state: Plane state the fence will be attached to
218 * 238 *
219 * This can be used as the &drm_plane_helper_funcs.prepare_fb hook. 239 * This function prepares a GEM backed framebuffer for scanout by checking if
220 * 240 * the plane framebuffer has a DMA-BUF attached. If it does, it extracts the
221 * This function checks if the plane FB has an dma-buf attached, extracts 241 * exclusive fence and attaches it to the plane state for the atomic helper to
222 * the exclusive fence and attaches it to plane state for the atomic helper 242 * wait on. This function can be used as the &drm_plane_helper_funcs.prepare_fb
223 * to wait on. 243 * callback.
224 * 244 *
225 * There is no need for &drm_plane_helper_funcs.cleanup_fb hook for simple 245 * There is no need for &drm_plane_helper_funcs.cleanup_fb hook for simple
226 * gem based framebuffer drivers which have their buffers always pinned in 246 * gem based framebuffer drivers which have their buffers always pinned in
@@ -246,17 +266,19 @@ int drm_gem_fb_prepare_fb(struct drm_plane *plane,
246EXPORT_SYMBOL_GPL(drm_gem_fb_prepare_fb); 266EXPORT_SYMBOL_GPL(drm_gem_fb_prepare_fb);
247 267
248/** 268/**
249 * drm_gem_fbdev_fb_create - Create a drm_framebuffer for fbdev emulation 269 * drm_gem_fbdev_fb_create - Create a GEM backed &drm_framebuffer for fbdev
270 * emulation
250 * @dev: DRM device 271 * @dev: DRM device
251 * @sizes: fbdev size description 272 * @sizes: fbdev size description
252 * @pitch_align: optional pitch alignment 273 * @pitch_align: Optional pitch alignment
253 * @obj: GEM object backing the framebuffer 274 * @obj: GEM object backing the framebuffer
254 * @funcs: vtable to be used for the new framebuffer object 275 * @funcs: vtable to be used for the new framebuffer object
255 * 276 *
256 * This function creates a framebuffer for use with fbdev emulation. 277 * This function creates a framebuffer from a &drm_fb_helper_surface_size
278 * description for use in the &drm_fb_helper_funcs.fb_probe callback.
257 * 279 *
258 * Returns: 280 * Returns:
259 * Pointer to a drm_framebuffer on success or an error pointer on failure. 281 * Pointer to a &drm_framebuffer on success or an error pointer on failure.
260 */ 282 */
261struct drm_framebuffer * 283struct drm_framebuffer *
262drm_gem_fbdev_fb_create(struct drm_device *dev, 284drm_gem_fbdev_fb_create(struct drm_device *dev,
diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
index fbc3f308fa19..edd921adcf33 100644
--- a/drivers/gpu/drm/drm_internal.h
+++ b/drivers/gpu/drm/drm_internal.h
@@ -55,7 +55,6 @@ int drm_clients_info(struct seq_file *m, void* data);
55int drm_gem_name_info(struct seq_file *m, void *data); 55int drm_gem_name_info(struct seq_file *m, void *data);
56 56
57/* drm_vblank.c */ 57/* drm_vblank.c */
58extern unsigned int drm_timestamp_monotonic;
59void drm_vblank_disable_and_save(struct drm_device *dev, unsigned int pipe); 58void drm_vblank_disable_and_save(struct drm_device *dev, unsigned int pipe);
60void drm_vblank_cleanup(struct drm_device *dev); 59void drm_vblank_cleanup(struct drm_device *dev);
61 60
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index a9ae6dd2d593..a78f03155466 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -235,7 +235,7 @@ static int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_
235 /* Only some caps make sense with UMS/render-only drivers. */ 235 /* Only some caps make sense with UMS/render-only drivers. */
236 switch (req->capability) { 236 switch (req->capability) {
237 case DRM_CAP_TIMESTAMP_MONOTONIC: 237 case DRM_CAP_TIMESTAMP_MONOTONIC:
238 req->value = drm_timestamp_monotonic; 238 req->value = 1;
239 return 0; 239 return 0;
240 case DRM_CAP_PRIME: 240 case DRM_CAP_PRIME:
241 req->value |= dev->driver->prime_fd_to_handle ? DRM_PRIME_CAP_IMPORT : 0; 241 req->value |= dev->driver->prime_fd_to_handle ? DRM_PRIME_CAP_IMPORT : 0;
diff --git a/drivers/gpu/drm/drm_mode_object.c b/drivers/gpu/drm/drm_mode_object.c
index 7a1ea91d3343..240a05d91a53 100644
--- a/drivers/gpu/drm/drm_mode_object.c
+++ b/drivers/gpu/drm/drm_mode_object.c
@@ -105,6 +105,7 @@ void drm_mode_object_unregister(struct drm_device *dev,
105} 105}
106 106
107struct drm_mode_object *__drm_mode_object_find(struct drm_device *dev, 107struct drm_mode_object *__drm_mode_object_find(struct drm_device *dev,
108 struct drm_file *file_priv,
108 uint32_t id, uint32_t type) 109 uint32_t id, uint32_t type)
109{ 110{
110 struct drm_mode_object *obj = NULL; 111 struct drm_mode_object *obj = NULL;
@@ -127,7 +128,7 @@ struct drm_mode_object *__drm_mode_object_find(struct drm_device *dev,
127 128
128/** 129/**
129 * drm_mode_object_find - look up a drm object with static lifetime 130 * drm_mode_object_find - look up a drm object with static lifetime
130 * @dev: drm device 131 * @file_priv: drm file
131 * @id: id of the mode object 132 * @id: id of the mode object
132 * @type: type of the mode object 133 * @type: type of the mode object
133 * 134 *
@@ -136,11 +137,12 @@ struct drm_mode_object *__drm_mode_object_find(struct drm_device *dev,
136 * by callind drm_mode_object_put(). 137 * by callind drm_mode_object_put().
137 */ 138 */
138struct drm_mode_object *drm_mode_object_find(struct drm_device *dev, 139struct drm_mode_object *drm_mode_object_find(struct drm_device *dev,
140 struct drm_file *file_priv,
139 uint32_t id, uint32_t type) 141 uint32_t id, uint32_t type)
140{ 142{
141 struct drm_mode_object *obj = NULL; 143 struct drm_mode_object *obj = NULL;
142 144
143 obj = __drm_mode_object_find(dev, id, type); 145 obj = __drm_mode_object_find(dev, file_priv, id, type);
144 return obj; 146 return obj;
145} 147}
146EXPORT_SYMBOL(drm_mode_object_find); 148EXPORT_SYMBOL(drm_mode_object_find);
@@ -359,7 +361,7 @@ int drm_mode_obj_get_properties_ioctl(struct drm_device *dev, void *data,
359 361
360 drm_modeset_lock_all(dev); 362 drm_modeset_lock_all(dev);
361 363
362 obj = drm_mode_object_find(dev, arg->obj_id, arg->obj_type); 364 obj = drm_mode_object_find(dev, file_priv, arg->obj_id, arg->obj_type);
363 if (!obj) { 365 if (!obj) {
364 ret = -ENOENT; 366 ret = -ENOENT;
365 goto out; 367 goto out;
@@ -481,7 +483,7 @@ int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data,
481 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 483 if (!drm_core_check_feature(dev, DRIVER_MODESET))
482 return -EINVAL; 484 return -EINVAL;
483 485
484 arg_obj = drm_mode_object_find(dev, arg->obj_id, arg->obj_type); 486 arg_obj = drm_mode_object_find(dev, file_priv, arg->obj_id, arg->obj_type);
485 if (!arg_obj) 487 if (!arg_obj)
486 return -ENOENT; 488 return -ENOENT;
487 489
diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c
index 72cba9805edc..6af02c7b5da3 100644
--- a/drivers/gpu/drm/drm_plane.c
+++ b/drivers/gpu/drm/drm_plane.c
@@ -513,7 +513,7 @@ int drm_mode_getplane(struct drm_device *dev, void *data,
513 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 513 if (!drm_core_check_feature(dev, DRIVER_MODESET))
514 return -EINVAL; 514 return -EINVAL;
515 515
516 plane = drm_plane_find(dev, plane_resp->plane_id); 516 plane = drm_plane_find(dev, file_priv, plane_resp->plane_id);
517 if (!plane) 517 if (!plane)
518 return -ENOENT; 518 return -ENOENT;
519 519
@@ -703,7 +703,7 @@ int drm_mode_setplane(struct drm_device *dev, void *data,
703 * First, find the plane, crtc, and fb objects. If not available, 703 * First, find the plane, crtc, and fb objects. If not available,
704 * we don't bother to call the driver. 704 * we don't bother to call the driver.
705 */ 705 */
706 plane = drm_plane_find(dev, plane_req->plane_id); 706 plane = drm_plane_find(dev, file_priv, plane_req->plane_id);
707 if (!plane) { 707 if (!plane) {
708 DRM_DEBUG_KMS("Unknown plane ID %d\n", 708 DRM_DEBUG_KMS("Unknown plane ID %d\n",
709 plane_req->plane_id); 709 plane_req->plane_id);
@@ -711,14 +711,14 @@ int drm_mode_setplane(struct drm_device *dev, void *data,
711 } 711 }
712 712
713 if (plane_req->fb_id) { 713 if (plane_req->fb_id) {
714 fb = drm_framebuffer_lookup(dev, plane_req->fb_id); 714 fb = drm_framebuffer_lookup(dev, file_priv, plane_req->fb_id);
715 if (!fb) { 715 if (!fb) {
716 DRM_DEBUG_KMS("Unknown framebuffer ID %d\n", 716 DRM_DEBUG_KMS("Unknown framebuffer ID %d\n",
717 plane_req->fb_id); 717 plane_req->fb_id);
718 return -ENOENT; 718 return -ENOENT;
719 } 719 }
720 720
721 crtc = drm_crtc_find(dev, plane_req->crtc_id); 721 crtc = drm_crtc_find(dev, file_priv, plane_req->crtc_id);
722 if (!crtc) { 722 if (!crtc) {
723 drm_framebuffer_put(fb); 723 drm_framebuffer_put(fb);
724 DRM_DEBUG_KMS("Unknown crtc ID %d\n", 724 DRM_DEBUG_KMS("Unknown crtc ID %d\n",
@@ -829,7 +829,7 @@ static int drm_mode_cursor_common(struct drm_device *dev,
829 if (!req->flags || (~DRM_MODE_CURSOR_FLAGS & req->flags)) 829 if (!req->flags || (~DRM_MODE_CURSOR_FLAGS & req->flags))
830 return -EINVAL; 830 return -EINVAL;
831 831
832 crtc = drm_crtc_find(dev, req->crtc_id); 832 crtc = drm_crtc_find(dev, file_priv, req->crtc_id);
833 if (!crtc) { 833 if (!crtc) {
834 DRM_DEBUG_KMS("Unknown CRTC ID %d\n", req->crtc_id); 834 DRM_DEBUG_KMS("Unknown CRTC ID %d\n", req->crtc_id);
835 return -ENOENT; 835 return -ENOENT;
@@ -944,7 +944,7 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
944 if ((page_flip->flags & DRM_MODE_PAGE_FLIP_ASYNC) && !dev->mode_config.async_page_flip) 944 if ((page_flip->flags & DRM_MODE_PAGE_FLIP_ASYNC) && !dev->mode_config.async_page_flip)
945 return -EINVAL; 945 return -EINVAL;
946 946
947 crtc = drm_crtc_find(dev, page_flip->crtc_id); 947 crtc = drm_crtc_find(dev, file_priv, page_flip->crtc_id);
948 if (!crtc) 948 if (!crtc)
949 return -ENOENT; 949 return -ENOENT;
950 950
@@ -1005,7 +1005,7 @@ retry:
1005 goto out; 1005 goto out;
1006 } 1006 }
1007 1007
1008 fb = drm_framebuffer_lookup(dev, page_flip->fb_id); 1008 fb = drm_framebuffer_lookup(dev, file_priv, page_flip->fb_id);
1009 if (!fb) { 1009 if (!fb) {
1010 ret = -ENOENT; 1010 ret = -ENOENT;
1011 goto out; 1011 goto out;
diff --git a/drivers/gpu/drm/drm_plane_helper.c b/drivers/gpu/drm/drm_plane_helper.c
index 06aee1741e96..759ed93f4ba8 100644
--- a/drivers/gpu/drm/drm_plane_helper.c
+++ b/drivers/gpu/drm/drm_plane_helper.c
@@ -354,7 +354,7 @@ int drm_primary_helper_update(struct drm_plane *plane, struct drm_crtc *crtc,
354 /* Find current connectors for CRTC */ 354 /* Find current connectors for CRTC */
355 num_connectors = get_connectors_for_crtc(crtc, NULL, 0); 355 num_connectors = get_connectors_for_crtc(crtc, NULL, 0);
356 BUG_ON(num_connectors == 0); 356 BUG_ON(num_connectors == 0);
357 connector_list = kzalloc(num_connectors * sizeof(*connector_list), 357 connector_list = kcalloc(num_connectors, sizeof(*connector_list),
358 GFP_KERNEL); 358 GFP_KERNEL);
359 if (!connector_list) 359 if (!connector_list)
360 return -ENOMEM; 360 return -ENOMEM;
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index 5840aabbf24e..6dc2dde5b672 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -99,7 +99,7 @@ drm_mode_validate_pipeline(struct drm_display_mode *mode,
99 99
100 /* Step 2: Validate against encoders and crtcs */ 100 /* Step 2: Validate against encoders and crtcs */
101 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { 101 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
102 struct drm_encoder *encoder = drm_encoder_find(dev, ids[i]); 102 struct drm_encoder *encoder = drm_encoder_find(dev, NULL, ids[i]);
103 struct drm_crtc *crtc; 103 struct drm_crtc *crtc;
104 104
105 if (!encoder) 105 if (!encoder)
diff --git a/drivers/gpu/drm/drm_property.c b/drivers/gpu/drm/drm_property.c
index bc5128203056..bae50e6b819d 100644
--- a/drivers/gpu/drm/drm_property.c
+++ b/drivers/gpu/drm/drm_property.c
@@ -450,7 +450,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
450 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 450 if (!drm_core_check_feature(dev, DRIVER_MODESET))
451 return -EINVAL; 451 return -EINVAL;
452 452
453 property = drm_property_find(dev, out_resp->prop_id); 453 property = drm_property_find(dev, file_priv, out_resp->prop_id);
454 if (!property) 454 if (!property)
455 return -ENOENT; 455 return -ENOENT;
456 456
@@ -634,7 +634,7 @@ struct drm_property_blob *drm_property_lookup_blob(struct drm_device *dev,
634 struct drm_mode_object *obj; 634 struct drm_mode_object *obj;
635 struct drm_property_blob *blob = NULL; 635 struct drm_property_blob *blob = NULL;
636 636
637 obj = __drm_mode_object_find(dev, id, DRM_MODE_OBJECT_BLOB); 637 obj = __drm_mode_object_find(dev, NULL, id, DRM_MODE_OBJECT_BLOB);
638 if (obj) 638 if (obj)
639 blob = obj_to_blob(obj); 639 blob = obj_to_blob(obj);
640 return blob; 640 return blob;
@@ -897,7 +897,7 @@ bool drm_property_change_valid_get(struct drm_property *property,
897 if (value == 0) 897 if (value == 0)
898 return true; 898 return true;
899 899
900 *ref = __drm_mode_object_find(property->dev, value, 900 *ref = __drm_mode_object_find(property->dev, NULL, value,
901 property->values[0]); 901 property->values[0]);
902 return *ref != NULL; 902 return *ref != NULL;
903 } 903 }
diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
index 80b6151da9ae..f776fc1cc543 100644
--- a/drivers/gpu/drm/drm_syncobj.c
+++ b/drivers/gpu/drm/drm_syncobj.c
@@ -845,7 +845,8 @@ static int drm_syncobj_array_wait(struct drm_device *dev,
845} 845}
846 846
847static int drm_syncobj_array_find(struct drm_file *file_private, 847static int drm_syncobj_array_find(struct drm_file *file_private,
848 void *user_handles, uint32_t count_handles, 848 void __user *user_handles,
849 uint32_t count_handles,
849 struct drm_syncobj ***syncobjs_out) 850 struct drm_syncobj ***syncobjs_out)
850{ 851{
851 uint32_t i, *handles; 852 uint32_t i, *handles;
diff --git a/drivers/gpu/drm/drm_vblank.c b/drivers/gpu/drm/drm_vblank.c
index 70f2b9593edc..3af6c20ba03b 100644
--- a/drivers/gpu/drm/drm_vblank.c
+++ b/drivers/gpu/drm/drm_vblank.c
@@ -78,28 +78,20 @@
78 78
79static bool 79static bool
80drm_get_last_vbltimestamp(struct drm_device *dev, unsigned int pipe, 80drm_get_last_vbltimestamp(struct drm_device *dev, unsigned int pipe,
81 struct timeval *tvblank, bool in_vblank_irq); 81 ktime_t *tvblank, bool in_vblank_irq);
82 82
83static unsigned int drm_timestamp_precision = 20; /* Default to 20 usecs. */ 83static unsigned int drm_timestamp_precision = 20; /* Default to 20 usecs. */
84 84
85/*
86 * Default to use monotonic timestamps for wait-for-vblank and page-flip
87 * complete events.
88 */
89unsigned int drm_timestamp_monotonic = 1;
90
91static int drm_vblank_offdelay = 5000; /* Default to 5000 msecs. */ 85static int drm_vblank_offdelay = 5000; /* Default to 5000 msecs. */
92 86
93module_param_named(vblankoffdelay, drm_vblank_offdelay, int, 0600); 87module_param_named(vblankoffdelay, drm_vblank_offdelay, int, 0600);
94module_param_named(timestamp_precision_usec, drm_timestamp_precision, int, 0600); 88module_param_named(timestamp_precision_usec, drm_timestamp_precision, int, 0600);
95module_param_named(timestamp_monotonic, drm_timestamp_monotonic, int, 0600);
96MODULE_PARM_DESC(vblankoffdelay, "Delay until vblank irq auto-disable [msecs] (0: never disable, <0: disable immediately)"); 89MODULE_PARM_DESC(vblankoffdelay, "Delay until vblank irq auto-disable [msecs] (0: never disable, <0: disable immediately)");
97MODULE_PARM_DESC(timestamp_precision_usec, "Max. error on timestamps [usecs]"); 90MODULE_PARM_DESC(timestamp_precision_usec, "Max. error on timestamps [usecs]");
98MODULE_PARM_DESC(timestamp_monotonic, "Use monotonic timestamps");
99 91
100static void store_vblank(struct drm_device *dev, unsigned int pipe, 92static void store_vblank(struct drm_device *dev, unsigned int pipe,
101 u32 vblank_count_inc, 93 u32 vblank_count_inc,
102 struct timeval *t_vblank, u32 last) 94 ktime_t t_vblank, u32 last)
103{ 95{
104 struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; 96 struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
105 97
@@ -108,7 +100,7 @@ static void store_vblank(struct drm_device *dev, unsigned int pipe,
108 vblank->last = last; 100 vblank->last = last;
109 101
110 write_seqlock(&vblank->seqlock); 102 write_seqlock(&vblank->seqlock);
111 vblank->time = *t_vblank; 103 vblank->time = t_vblank;
112 vblank->count += vblank_count_inc; 104 vblank->count += vblank_count_inc;
113 write_sequnlock(&vblank->seqlock); 105 write_sequnlock(&vblank->seqlock);
114} 106}
@@ -151,7 +143,7 @@ static void drm_reset_vblank_timestamp(struct drm_device *dev, unsigned int pipe
151{ 143{
152 u32 cur_vblank; 144 u32 cur_vblank;
153 bool rc; 145 bool rc;
154 struct timeval t_vblank; 146 ktime_t t_vblank;
155 int count = DRM_TIMESTAMP_MAXRETRIES; 147 int count = DRM_TIMESTAMP_MAXRETRIES;
156 148
157 spin_lock(&dev->vblank_time_lock); 149 spin_lock(&dev->vblank_time_lock);
@@ -171,13 +163,13 @@ static void drm_reset_vblank_timestamp(struct drm_device *dev, unsigned int pipe
171 * interrupt and assign 0 for now, to mark the vblanktimestamp as invalid. 163 * interrupt and assign 0 for now, to mark the vblanktimestamp as invalid.
172 */ 164 */
173 if (!rc) 165 if (!rc)
174 t_vblank = (struct timeval) {0, 0}; 166 t_vblank = 0;
175 167
176 /* 168 /*
177 * +1 to make sure user will never see the same 169 * +1 to make sure user will never see the same
178 * vblank counter value before and after a modeset 170 * vblank counter value before and after a modeset
179 */ 171 */
180 store_vblank(dev, pipe, 1, &t_vblank, cur_vblank); 172 store_vblank(dev, pipe, 1, t_vblank, cur_vblank);
181 173
182 spin_unlock(&dev->vblank_time_lock); 174 spin_unlock(&dev->vblank_time_lock);
183} 175}
@@ -200,7 +192,7 @@ static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe,
200 struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; 192 struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
201 u32 cur_vblank, diff; 193 u32 cur_vblank, diff;
202 bool rc; 194 bool rc;
203 struct timeval t_vblank; 195 ktime_t t_vblank;
204 int count = DRM_TIMESTAMP_MAXRETRIES; 196 int count = DRM_TIMESTAMP_MAXRETRIES;
205 int framedur_ns = vblank->framedur_ns; 197 int framedur_ns = vblank->framedur_ns;
206 198
@@ -225,11 +217,7 @@ static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe,
225 /* trust the hw counter when it's around */ 217 /* trust the hw counter when it's around */
226 diff = (cur_vblank - vblank->last) & dev->max_vblank_count; 218 diff = (cur_vblank - vblank->last) & dev->max_vblank_count;
227 } else if (rc && framedur_ns) { 219 } else if (rc && framedur_ns) {
228 const struct timeval *t_old; 220 u64 diff_ns = ktime_to_ns(ktime_sub(t_vblank, vblank->time));
229 u64 diff_ns;
230
231 t_old = &vblank->time;
232 diff_ns = timeval_to_ns(&t_vblank) - timeval_to_ns(t_old);
233 221
234 /* 222 /*
235 * Figure out how many vblanks we've missed based 223 * Figure out how many vblanks we've missed based
@@ -278,9 +266,9 @@ static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe,
278 * for now, to mark the vblanktimestamp as invalid. 266 * for now, to mark the vblanktimestamp as invalid.
279 */ 267 */
280 if (!rc && !in_vblank_irq) 268 if (!rc && !in_vblank_irq)
281 t_vblank = (struct timeval) {0, 0}; 269 t_vblank = 0;
282 270
283 store_vblank(dev, pipe, diff, &t_vblank, cur_vblank); 271 store_vblank(dev, pipe, diff, t_vblank, cur_vblank);
284} 272}
285 273
286static u32 drm_vblank_count(struct drm_device *dev, unsigned int pipe) 274static u32 drm_vblank_count(struct drm_device *dev, unsigned int pipe)
@@ -556,7 +544,7 @@ EXPORT_SYMBOL(drm_calc_timestamping_constants);
556 * @pipe: index of CRTC whose vblank timestamp to retrieve 544 * @pipe: index of CRTC whose vblank timestamp to retrieve
557 * @max_error: Desired maximum allowable error in timestamps (nanosecs) 545 * @max_error: Desired maximum allowable error in timestamps (nanosecs)
558 * On return contains true maximum error of timestamp 546 * On return contains true maximum error of timestamp
559 * @vblank_time: Pointer to struct timeval which should receive the timestamp 547 * @vblank_time: Pointer to time which should receive the timestamp
560 * @in_vblank_irq: 548 * @in_vblank_irq:
561 * True when called from drm_crtc_handle_vblank(). Some drivers 549 * True when called from drm_crtc_handle_vblank(). Some drivers
562 * need to apply some workarounds for gpu-specific vblank irq quirks 550 * need to apply some workarounds for gpu-specific vblank irq quirks
@@ -584,10 +572,10 @@ EXPORT_SYMBOL(drm_calc_timestamping_constants);
584bool drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, 572bool drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
585 unsigned int pipe, 573 unsigned int pipe,
586 int *max_error, 574 int *max_error,
587 struct timeval *vblank_time, 575 ktime_t *vblank_time,
588 bool in_vblank_irq) 576 bool in_vblank_irq)
589{ 577{
590 struct timeval tv_etime; 578 struct timespec64 ts_etime, ts_vblank_time;
591 ktime_t stime, etime; 579 ktime_t stime, etime;
592 bool vbl_status; 580 bool vbl_status;
593 struct drm_crtc *crtc; 581 struct drm_crtc *crtc;
@@ -676,41 +664,31 @@ bool drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
676 delta_ns = div_s64(1000000LL * (vpos * mode->crtc_htotal + hpos), 664 delta_ns = div_s64(1000000LL * (vpos * mode->crtc_htotal + hpos),
677 mode->crtc_clock); 665 mode->crtc_clock);
678 666
679 if (!drm_timestamp_monotonic)
680 etime = ktime_mono_to_real(etime);
681
682 /* save this only for debugging purposes */ 667 /* save this only for debugging purposes */
683 tv_etime = ktime_to_timeval(etime); 668 ts_etime = ktime_to_timespec64(etime);
669 ts_vblank_time = ktime_to_timespec64(*vblank_time);
684 /* Subtract time delta from raw timestamp to get final 670 /* Subtract time delta from raw timestamp to get final
685 * vblank_time timestamp for end of vblank. 671 * vblank_time timestamp for end of vblank.
686 */ 672 */
687 etime = ktime_sub_ns(etime, delta_ns); 673 etime = ktime_sub_ns(etime, delta_ns);
688 *vblank_time = ktime_to_timeval(etime); 674 *vblank_time = etime;
689 675
690 DRM_DEBUG_VBL("crtc %u : v p(%d,%d)@ %ld.%ld -> %ld.%ld [e %d us, %d rep]\n", 676 DRM_DEBUG_VBL("crtc %u : v p(%d,%d)@ %lld.%06ld -> %lld.%06ld [e %d us, %d rep]\n",
691 pipe, hpos, vpos, 677 pipe, hpos, vpos,
692 (long)tv_etime.tv_sec, (long)tv_etime.tv_usec, 678 (u64)ts_etime.tv_sec, ts_etime.tv_nsec / 1000,
693 (long)vblank_time->tv_sec, (long)vblank_time->tv_usec, 679 (u64)ts_vblank_time.tv_sec, ts_vblank_time.tv_nsec / 1000,
694 duration_ns/1000, i); 680 duration_ns / 1000, i);
695 681
696 return true; 682 return true;
697} 683}
698EXPORT_SYMBOL(drm_calc_vbltimestamp_from_scanoutpos); 684EXPORT_SYMBOL(drm_calc_vbltimestamp_from_scanoutpos);
699 685
700static struct timeval get_drm_timestamp(void)
701{
702 ktime_t now;
703
704 now = drm_timestamp_monotonic ? ktime_get() : ktime_get_real();
705 return ktime_to_timeval(now);
706}
707
708/** 686/**
709 * drm_get_last_vbltimestamp - retrieve raw timestamp for the most recent 687 * drm_get_last_vbltimestamp - retrieve raw timestamp for the most recent
710 * vblank interval 688 * vblank interval
711 * @dev: DRM device 689 * @dev: DRM device
712 * @pipe: index of CRTC whose vblank timestamp to retrieve 690 * @pipe: index of CRTC whose vblank timestamp to retrieve
713 * @tvblank: Pointer to target struct timeval which should receive the timestamp 691 * @tvblank: Pointer to target time which should receive the timestamp
714 * @in_vblank_irq: 692 * @in_vblank_irq:
715 * True when called from drm_crtc_handle_vblank(). Some drivers 693 * True when called from drm_crtc_handle_vblank(). Some drivers
716 * need to apply some workarounds for gpu-specific vblank irq quirks 694 * need to apply some workarounds for gpu-specific vblank irq quirks
@@ -728,7 +706,7 @@ static struct timeval get_drm_timestamp(void)
728 */ 706 */
729static bool 707static bool
730drm_get_last_vbltimestamp(struct drm_device *dev, unsigned int pipe, 708drm_get_last_vbltimestamp(struct drm_device *dev, unsigned int pipe,
731 struct timeval *tvblank, bool in_vblank_irq) 709 ktime_t *tvblank, bool in_vblank_irq)
732{ 710{
733 bool ret = false; 711 bool ret = false;
734 712
@@ -744,7 +722,7 @@ drm_get_last_vbltimestamp(struct drm_device *dev, unsigned int pipe,
744 * Return current monotonic/gettimeofday timestamp as best estimate. 722 * Return current monotonic/gettimeofday timestamp as best estimate.
745 */ 723 */
746 if (!ret) 724 if (!ret)
747 *tvblank = get_drm_timestamp(); 725 *tvblank = ktime_get();
748 726
749 return ret; 727 return ret;
750} 728}
@@ -769,14 +747,14 @@ u32 drm_crtc_vblank_count(struct drm_crtc *crtc)
769EXPORT_SYMBOL(drm_crtc_vblank_count); 747EXPORT_SYMBOL(drm_crtc_vblank_count);
770 748
771static u32 drm_vblank_count_and_time(struct drm_device *dev, unsigned int pipe, 749static u32 drm_vblank_count_and_time(struct drm_device *dev, unsigned int pipe,
772 struct timeval *vblanktime) 750 ktime_t *vblanktime)
773{ 751{
774 struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; 752 struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
775 u32 vblank_count; 753 u32 vblank_count;
776 unsigned int seq; 754 unsigned int seq;
777 755
778 if (WARN_ON(pipe >= dev->num_crtcs)) { 756 if (WARN_ON(pipe >= dev->num_crtcs)) {
779 *vblanktime = (struct timeval) { 0 }; 757 *vblanktime = 0;
780 return 0; 758 return 0;
781 } 759 }
782 760
@@ -793,7 +771,7 @@ static u32 drm_vblank_count_and_time(struct drm_device *dev, unsigned int pipe,
793 * drm_crtc_vblank_count_and_time - retrieve "cooked" vblank counter value 771 * drm_crtc_vblank_count_and_time - retrieve "cooked" vblank counter value
794 * and the system timestamp corresponding to that vblank counter value 772 * and the system timestamp corresponding to that vblank counter value
795 * @crtc: which counter to retrieve 773 * @crtc: which counter to retrieve
796 * @vblanktime: Pointer to struct timeval to receive the vblank timestamp. 774 * @vblanktime: Pointer to time to receive the vblank timestamp.
797 * 775 *
798 * Fetches the "cooked" vblank count value that represents the number of 776 * Fetches the "cooked" vblank count value that represents the number of
799 * vblank events since the system was booted, including lost events due to 777 * vblank events since the system was booted, including lost events due to
@@ -801,7 +779,7 @@ static u32 drm_vblank_count_and_time(struct drm_device *dev, unsigned int pipe,
801 * of the vblank interval that corresponds to the current vblank counter value. 779 * of the vblank interval that corresponds to the current vblank counter value.
802 */ 780 */
803u32 drm_crtc_vblank_count_and_time(struct drm_crtc *crtc, 781u32 drm_crtc_vblank_count_and_time(struct drm_crtc *crtc,
804 struct timeval *vblanktime) 782 ktime_t *vblanktime)
805{ 783{
806 return drm_vblank_count_and_time(crtc->dev, drm_crtc_index(crtc), 784 return drm_vblank_count_and_time(crtc->dev, drm_crtc_index(crtc),
807 vblanktime); 785 vblanktime);
@@ -810,11 +788,18 @@ EXPORT_SYMBOL(drm_crtc_vblank_count_and_time);
810 788
811static void send_vblank_event(struct drm_device *dev, 789static void send_vblank_event(struct drm_device *dev,
812 struct drm_pending_vblank_event *e, 790 struct drm_pending_vblank_event *e,
813 unsigned long seq, struct timeval *now) 791 unsigned long seq, ktime_t now)
814{ 792{
793 struct timespec64 tv = ktime_to_timespec64(now);
794
815 e->event.sequence = seq; 795 e->event.sequence = seq;
816 e->event.tv_sec = now->tv_sec; 796 /*
817 e->event.tv_usec = now->tv_usec; 797 * e->event is a user space structure, with hardcoded unsigned
798 * 32-bit seconds/microseconds. This is safe as we always use
799 * monotonic timestamps since linux-4.15
800 */
801 e->event.tv_sec = tv.tv_sec;
802 e->event.tv_usec = tv.tv_nsec / 1000;
818 803
819 trace_drm_vblank_event_delivered(e->base.file_priv, e->pipe, 804 trace_drm_vblank_event_delivered(e->base.file_priv, e->pipe,
820 e->event.sequence); 805 e->event.sequence);
@@ -869,7 +854,7 @@ void drm_crtc_arm_vblank_event(struct drm_crtc *crtc,
869 assert_spin_locked(&dev->event_lock); 854 assert_spin_locked(&dev->event_lock);
870 855
871 e->pipe = pipe; 856 e->pipe = pipe;
872 e->event.sequence = drm_vblank_count(dev, pipe); 857 e->event.sequence = drm_crtc_accurate_vblank_count(crtc) + 1;
873 e->event.crtc_id = crtc->base.id; 858 e->event.crtc_id = crtc->base.id;
874 list_add_tail(&e->base.link, &dev->vblank_event_list); 859 list_add_tail(&e->base.link, &dev->vblank_event_list);
875} 860}
@@ -891,18 +876,18 @@ void drm_crtc_send_vblank_event(struct drm_crtc *crtc,
891{ 876{
892 struct drm_device *dev = crtc->dev; 877 struct drm_device *dev = crtc->dev;
893 unsigned int seq, pipe = drm_crtc_index(crtc); 878 unsigned int seq, pipe = drm_crtc_index(crtc);
894 struct timeval now; 879 ktime_t now;
895 880
896 if (dev->num_crtcs > 0) { 881 if (dev->num_crtcs > 0) {
897 seq = drm_vblank_count_and_time(dev, pipe, &now); 882 seq = drm_vblank_count_and_time(dev, pipe, &now);
898 } else { 883 } else {
899 seq = 0; 884 seq = 0;
900 885
901 now = get_drm_timestamp(); 886 now = ktime_get();
902 } 887 }
903 e->pipe = pipe; 888 e->pipe = pipe;
904 e->event.crtc_id = crtc->base.id; 889 e->event.crtc_id = crtc->base.id;
905 send_vblank_event(dev, e, seq, &now); 890 send_vblank_event(dev, e, seq, now);
906} 891}
907EXPORT_SYMBOL(drm_crtc_send_vblank_event); 892EXPORT_SYMBOL(drm_crtc_send_vblank_event);
908 893
@@ -1100,7 +1085,8 @@ void drm_crtc_vblank_off(struct drm_crtc *crtc)
1100 unsigned int pipe = drm_crtc_index(crtc); 1085 unsigned int pipe = drm_crtc_index(crtc);
1101 struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; 1086 struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
1102 struct drm_pending_vblank_event *e, *t; 1087 struct drm_pending_vblank_event *e, *t;
1103 struct timeval now; 1088
1089 ktime_t now;
1104 unsigned long irqflags; 1090 unsigned long irqflags;
1105 unsigned int seq; 1091 unsigned int seq;
1106 1092
@@ -1141,7 +1127,7 @@ void drm_crtc_vblank_off(struct drm_crtc *crtc)
1141 e->event.sequence, seq); 1127 e->event.sequence, seq);
1142 list_del(&e->base.link); 1128 list_del(&e->base.link);
1143 drm_vblank_put(dev, pipe); 1129 drm_vblank_put(dev, pipe);
1144 send_vblank_event(dev, e, seq, &now); 1130 send_vblank_event(dev, e, seq, now);
1145 } 1131 }
1146 spin_unlock_irqrestore(&dev->event_lock, irqflags); 1132 spin_unlock_irqrestore(&dev->event_lock, irqflags);
1147 1133
@@ -1321,7 +1307,7 @@ static int drm_queue_vblank_event(struct drm_device *dev, unsigned int pipe,
1321{ 1307{
1322 struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; 1308 struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
1323 struct drm_pending_vblank_event *e; 1309 struct drm_pending_vblank_event *e;
1324 struct timeval now; 1310 ktime_t now;
1325 unsigned long flags; 1311 unsigned long flags;
1326 unsigned int seq; 1312 unsigned int seq;
1327 int ret; 1313 int ret;
@@ -1367,7 +1353,7 @@ static int drm_queue_vblank_event(struct drm_device *dev, unsigned int pipe,
1367 e->event.sequence = vblwait->request.sequence; 1353 e->event.sequence = vblwait->request.sequence;
1368 if (vblank_passed(seq, vblwait->request.sequence)) { 1354 if (vblank_passed(seq, vblwait->request.sequence)) {
1369 drm_vblank_put(dev, pipe); 1355 drm_vblank_put(dev, pipe);
1370 send_vblank_event(dev, e, seq, &now); 1356 send_vblank_event(dev, e, seq, now);
1371 vblwait->reply.sequence = seq; 1357 vblwait->reply.sequence = seq;
1372 } else { 1358 } else {
1373 /* drm_handle_vblank_events will call drm_vblank_put */ 1359 /* drm_handle_vblank_events will call drm_vblank_put */
@@ -1398,6 +1384,23 @@ static bool drm_wait_vblank_is_query(union drm_wait_vblank *vblwait)
1398 _DRM_VBLANK_NEXTONMISS)); 1384 _DRM_VBLANK_NEXTONMISS));
1399} 1385}
1400 1386
1387static void drm_wait_vblank_reply(struct drm_device *dev, unsigned int pipe,
1388 struct drm_wait_vblank_reply *reply)
1389{
1390 ktime_t now;
1391 struct timespec64 ts;
1392
1393 /*
1394 * drm_wait_vblank_reply is a UAPI structure that uses 'long'
1395 * to store the seconds. This is safe as we always use monotonic
1396 * timestamps since linux-4.15.
1397 */
1398 reply->sequence = drm_vblank_count_and_time(dev, pipe, &now);
1399 ts = ktime_to_timespec64(now);
1400 reply->tval_sec = (u32)ts.tv_sec;
1401 reply->tval_usec = ts.tv_nsec / 1000;
1402}
1403
1401int drm_wait_vblank_ioctl(struct drm_device *dev, void *data, 1404int drm_wait_vblank_ioctl(struct drm_device *dev, void *data,
1402 struct drm_file *file_priv) 1405 struct drm_file *file_priv)
1403{ 1406{
@@ -1439,12 +1442,7 @@ int drm_wait_vblank_ioctl(struct drm_device *dev, void *data,
1439 if (dev->vblank_disable_immediate && 1442 if (dev->vblank_disable_immediate &&
1440 drm_wait_vblank_is_query(vblwait) && 1443 drm_wait_vblank_is_query(vblwait) &&
1441 READ_ONCE(vblank->enabled)) { 1444 READ_ONCE(vblank->enabled)) {
1442 struct timeval now; 1445 drm_wait_vblank_reply(dev, pipe, &vblwait->reply);
1443
1444 vblwait->reply.sequence =
1445 drm_vblank_count_and_time(dev, pipe, &now);
1446 vblwait->reply.tval_sec = now.tv_sec;
1447 vblwait->reply.tval_usec = now.tv_usec;
1448 return 0; 1446 return 0;
1449 } 1447 }
1450 1448
@@ -1487,11 +1485,7 @@ int drm_wait_vblank_ioctl(struct drm_device *dev, void *data,
1487 } 1485 }
1488 1486
1489 if (ret != -EINTR) { 1487 if (ret != -EINTR) {
1490 struct timeval now; 1488 drm_wait_vblank_reply(dev, pipe, &vblwait->reply);
1491
1492 vblwait->reply.sequence = drm_vblank_count_and_time(dev, pipe, &now);
1493 vblwait->reply.tval_sec = now.tv_sec;
1494 vblwait->reply.tval_usec = now.tv_usec;
1495 1489
1496 DRM_DEBUG("crtc %d returning %u to client\n", 1490 DRM_DEBUG("crtc %d returning %u to client\n",
1497 pipe, vblwait->reply.sequence); 1491 pipe, vblwait->reply.sequence);
@@ -1507,7 +1501,7 @@ done:
1507static void drm_handle_vblank_events(struct drm_device *dev, unsigned int pipe) 1501static void drm_handle_vblank_events(struct drm_device *dev, unsigned int pipe)
1508{ 1502{
1509 struct drm_pending_vblank_event *e, *t; 1503 struct drm_pending_vblank_event *e, *t;
1510 struct timeval now; 1504 ktime_t now;
1511 unsigned int seq; 1505 unsigned int seq;
1512 1506
1513 assert_spin_locked(&dev->event_lock); 1507 assert_spin_locked(&dev->event_lock);
@@ -1525,7 +1519,7 @@ static void drm_handle_vblank_events(struct drm_device *dev, unsigned int pipe)
1525 1519
1526 list_del(&e->base.link); 1520 list_del(&e->base.link);
1527 drm_vblank_put(dev, pipe); 1521 drm_vblank_put(dev, pipe);
1528 send_vblank_event(dev, e, seq, &now); 1522 send_vblank_event(dev, e, seq, now);
1529 } 1523 }
1530 1524
1531 trace_drm_vblank_event(pipe, seq); 1525 trace_drm_vblank_event(pipe, seq);
diff --git a/drivers/gpu/drm/etnaviv/Kconfig b/drivers/gpu/drm/etnaviv/Kconfig
index 38b477b5fbf9..a29b8f59eb15 100644
--- a/drivers/gpu/drm/etnaviv/Kconfig
+++ b/drivers/gpu/drm/etnaviv/Kconfig
@@ -7,8 +7,6 @@ config DRM_ETNAVIV
7 select SHMEM 7 select SHMEM
8 select SYNC_FILE 8 select SYNC_FILE
9 select TMPFS 9 select TMPFS
10 select IOMMU_API
11 select IOMMU_SUPPORT
12 select WANT_DEV_COREDUMP 10 select WANT_DEV_COREDUMP
13 select CMA if HAVE_DMA_CONTIGUOUS 11 select CMA if HAVE_DMA_CONTIGUOUS
14 select DMA_CMA if HAVE_DMA_CONTIGUOUS 12 select DMA_CMA if HAVE_DMA_CONTIGUOUS
diff --git a/drivers/gpu/drm/etnaviv/Makefile b/drivers/gpu/drm/etnaviv/Makefile
index 4f76c992043f..15c3bfa89a79 100644
--- a/drivers/gpu/drm/etnaviv/Makefile
+++ b/drivers/gpu/drm/etnaviv/Makefile
@@ -10,6 +10,7 @@ etnaviv-y := \
10 etnaviv_gpu.o \ 10 etnaviv_gpu.o \
11 etnaviv_iommu_v2.o \ 11 etnaviv_iommu_v2.o \
12 etnaviv_iommu.o \ 12 etnaviv_iommu.o \
13 etnaviv_mmu.o 13 etnaviv_mmu.o \
14 etnaviv_perfmon.o
14 15
15obj-$(CONFIG_DRM_ETNAVIV) += etnaviv.o 16obj-$(CONFIG_DRM_ETNAVIV) += etnaviv.o
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
index ed9588f36bc9..9e7098e3207f 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
@@ -250,6 +250,42 @@ void etnaviv_buffer_end(struct etnaviv_gpu *gpu)
250 } 250 }
251} 251}
252 252
253/* Append a 'sync point' to the ring buffer. */
254void etnaviv_sync_point_queue(struct etnaviv_gpu *gpu, unsigned int event)
255{
256 struct etnaviv_cmdbuf *buffer = gpu->buffer;
257 unsigned int waitlink_offset = buffer->user_size - 16;
258 u32 dwords, target;
259
260 /*
261 * We need at most 3 dwords in the return target:
262 * 1 event + 1 end + 1 wait + 1 link.
263 */
264 dwords = 4;
265 target = etnaviv_buffer_reserve(gpu, buffer, dwords);
266
267 /* Signal sync point event */
268 CMD_LOAD_STATE(buffer, VIVS_GL_EVENT, VIVS_GL_EVENT_EVENT_ID(event) |
269 VIVS_GL_EVENT_FROM_PE);
270
271 /* Stop the FE to 'pause' the GPU */
272 CMD_END(buffer);
273
274 /* Append waitlink */
275 CMD_WAIT(buffer);
276 CMD_LINK(buffer, 2, etnaviv_cmdbuf_get_va(buffer) +
277 buffer->user_size - 4);
278
279 /*
280 * Kick off the 'sync point' command by replacing the previous
281 * WAIT with a link to the address in the ring buffer.
282 */
283 etnaviv_buffer_replace_wait(buffer, waitlink_offset,
284 VIV_FE_LINK_HEADER_OP_LINK |
285 VIV_FE_LINK_HEADER_PREFETCH(dwords),
286 target);
287}
288
253/* Append a command buffer to the ring buffer. */ 289/* Append a command buffer to the ring buffer. */
254void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned int event, 290void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned int event,
255 struct etnaviv_cmdbuf *cmdbuf) 291 struct etnaviv_cmdbuf *cmdbuf)
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c b/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c
index 633e0f07cbac..66ac79558bbd 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c
@@ -19,6 +19,7 @@
19#include "etnaviv_cmdbuf.h" 19#include "etnaviv_cmdbuf.h"
20#include "etnaviv_gpu.h" 20#include "etnaviv_gpu.h"
21#include "etnaviv_mmu.h" 21#include "etnaviv_mmu.h"
22#include "etnaviv_perfmon.h"
22 23
23#define SUBALLOC_SIZE SZ_256K 24#define SUBALLOC_SIZE SZ_256K
24#define SUBALLOC_GRANULE SZ_4K 25#define SUBALLOC_GRANULE SZ_4K
@@ -87,9 +88,10 @@ void etnaviv_cmdbuf_suballoc_destroy(struct etnaviv_cmdbuf_suballoc *suballoc)
87 88
88struct etnaviv_cmdbuf * 89struct etnaviv_cmdbuf *
89etnaviv_cmdbuf_new(struct etnaviv_cmdbuf_suballoc *suballoc, u32 size, 90etnaviv_cmdbuf_new(struct etnaviv_cmdbuf_suballoc *suballoc, u32 size,
90 size_t nr_bos) 91 size_t nr_bos, size_t nr_pmrs)
91{ 92{
92 struct etnaviv_cmdbuf *cmdbuf; 93 struct etnaviv_cmdbuf *cmdbuf;
94 struct etnaviv_perfmon_request *pmrs;
93 size_t sz = size_vstruct(nr_bos, sizeof(cmdbuf->bo_map[0]), 95 size_t sz = size_vstruct(nr_bos, sizeof(cmdbuf->bo_map[0]),
94 sizeof(*cmdbuf)); 96 sizeof(*cmdbuf));
95 int granule_offs, order, ret; 97 int granule_offs, order, ret;
@@ -98,6 +100,12 @@ etnaviv_cmdbuf_new(struct etnaviv_cmdbuf_suballoc *suballoc, u32 size,
98 if (!cmdbuf) 100 if (!cmdbuf)
99 return NULL; 101 return NULL;
100 102
103 sz = sizeof(*pmrs) * nr_pmrs;
104 pmrs = kzalloc(sz, GFP_KERNEL);
105 if (!pmrs)
106 goto out_free_cmdbuf;
107
108 cmdbuf->pmrs = pmrs;
101 cmdbuf->suballoc = suballoc; 109 cmdbuf->suballoc = suballoc;
102 cmdbuf->size = size; 110 cmdbuf->size = size;
103 111
@@ -124,6 +132,10 @@ retry:
124 cmdbuf->vaddr = suballoc->vaddr + cmdbuf->suballoc_offset; 132 cmdbuf->vaddr = suballoc->vaddr + cmdbuf->suballoc_offset;
125 133
126 return cmdbuf; 134 return cmdbuf;
135
136out_free_cmdbuf:
137 kfree(cmdbuf);
138 return NULL;
127} 139}
128 140
129void etnaviv_cmdbuf_free(struct etnaviv_cmdbuf *cmdbuf) 141void etnaviv_cmdbuf_free(struct etnaviv_cmdbuf *cmdbuf)
@@ -139,6 +151,7 @@ void etnaviv_cmdbuf_free(struct etnaviv_cmdbuf *cmdbuf)
139 suballoc->free_space = 1; 151 suballoc->free_space = 1;
140 mutex_unlock(&suballoc->lock); 152 mutex_unlock(&suballoc->lock);
141 wake_up_all(&suballoc->free_event); 153 wake_up_all(&suballoc->free_event);
154 kfree(cmdbuf->pmrs);
142 kfree(cmdbuf); 155 kfree(cmdbuf);
143} 156}
144 157
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.h b/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.h
index 80d78076c679..b6348b9f2a9d 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.h
@@ -21,6 +21,7 @@
21 21
22struct etnaviv_gpu; 22struct etnaviv_gpu;
23struct etnaviv_cmdbuf_suballoc; 23struct etnaviv_cmdbuf_suballoc;
24struct etnaviv_perfmon_request;
24 25
25struct etnaviv_cmdbuf { 26struct etnaviv_cmdbuf {
26 /* suballocator this cmdbuf is allocated from */ 27 /* suballocator this cmdbuf is allocated from */
@@ -38,6 +39,9 @@ struct etnaviv_cmdbuf {
38 u32 exec_state; 39 u32 exec_state;
39 /* per GPU in-flight list */ 40 /* per GPU in-flight list */
40 struct list_head node; 41 struct list_head node;
42 /* perfmon requests */
43 unsigned int nr_pmrs;
44 struct etnaviv_perfmon_request *pmrs;
41 /* BOs attached to this command buffer */ 45 /* BOs attached to this command buffer */
42 unsigned int nr_bos; 46 unsigned int nr_bos;
43 struct etnaviv_vram_mapping *bo_map[0]; 47 struct etnaviv_vram_mapping *bo_map[0];
@@ -49,7 +53,7 @@ void etnaviv_cmdbuf_suballoc_destroy(struct etnaviv_cmdbuf_suballoc *suballoc);
49 53
50struct etnaviv_cmdbuf * 54struct etnaviv_cmdbuf *
51etnaviv_cmdbuf_new(struct etnaviv_cmdbuf_suballoc *suballoc, u32 size, 55etnaviv_cmdbuf_new(struct etnaviv_cmdbuf_suballoc *suballoc, u32 size,
52 size_t nr_bos); 56 size_t nr_bos, size_t nr_pmrs);
53void etnaviv_cmdbuf_free(struct etnaviv_cmdbuf *cmdbuf); 57void etnaviv_cmdbuf_free(struct etnaviv_cmdbuf *cmdbuf);
54 58
55u32 etnaviv_cmdbuf_get_va(struct etnaviv_cmdbuf *buf); 59u32 etnaviv_cmdbuf_get_va(struct etnaviv_cmdbuf *buf);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
index 2cb4773823c2..3fadb8d45e51 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
@@ -23,6 +23,7 @@
23#include "etnaviv_gpu.h" 23#include "etnaviv_gpu.h"
24#include "etnaviv_gem.h" 24#include "etnaviv_gem.h"
25#include "etnaviv_mmu.h" 25#include "etnaviv_mmu.h"
26#include "etnaviv_perfmon.h"
26 27
27#ifdef CONFIG_DRM_ETNAVIV_REGISTER_LOGGING 28#ifdef CONFIG_DRM_ETNAVIV_REGISTER_LOGGING
28static bool reglog; 29static bool reglog;
@@ -451,6 +452,40 @@ static int etnaviv_ioctl_gem_wait(struct drm_device *dev, void *data,
451 return ret; 452 return ret;
452} 453}
453 454
455static int etnaviv_ioctl_pm_query_dom(struct drm_device *dev, void *data,
456 struct drm_file *file)
457{
458 struct etnaviv_drm_private *priv = dev->dev_private;
459 struct drm_etnaviv_pm_domain *args = data;
460 struct etnaviv_gpu *gpu;
461
462 if (args->pipe >= ETNA_MAX_PIPES)
463 return -EINVAL;
464
465 gpu = priv->gpu[args->pipe];
466 if (!gpu)
467 return -ENXIO;
468
469 return etnaviv_pm_query_dom(gpu, args);
470}
471
472static int etnaviv_ioctl_pm_query_sig(struct drm_device *dev, void *data,
473 struct drm_file *file)
474{
475 struct etnaviv_drm_private *priv = dev->dev_private;
476 struct drm_etnaviv_pm_signal *args = data;
477 struct etnaviv_gpu *gpu;
478
479 if (args->pipe >= ETNA_MAX_PIPES)
480 return -EINVAL;
481
482 gpu = priv->gpu[args->pipe];
483 if (!gpu)
484 return -ENXIO;
485
486 return etnaviv_pm_query_sig(gpu, args);
487}
488
454static const struct drm_ioctl_desc etnaviv_ioctls[] = { 489static const struct drm_ioctl_desc etnaviv_ioctls[] = {
455#define ETNA_IOCTL(n, func, flags) \ 490#define ETNA_IOCTL(n, func, flags) \
456 DRM_IOCTL_DEF_DRV(ETNAVIV_##n, etnaviv_ioctl_##func, flags) 491 DRM_IOCTL_DEF_DRV(ETNAVIV_##n, etnaviv_ioctl_##func, flags)
@@ -463,6 +498,8 @@ static const struct drm_ioctl_desc etnaviv_ioctls[] = {
463 ETNA_IOCTL(WAIT_FENCE, wait_fence, DRM_AUTH|DRM_RENDER_ALLOW), 498 ETNA_IOCTL(WAIT_FENCE, wait_fence, DRM_AUTH|DRM_RENDER_ALLOW),
464 ETNA_IOCTL(GEM_USERPTR, gem_userptr, DRM_AUTH|DRM_RENDER_ALLOW), 499 ETNA_IOCTL(GEM_USERPTR, gem_userptr, DRM_AUTH|DRM_RENDER_ALLOW),
465 ETNA_IOCTL(GEM_WAIT, gem_wait, DRM_AUTH|DRM_RENDER_ALLOW), 500 ETNA_IOCTL(GEM_WAIT, gem_wait, DRM_AUTH|DRM_RENDER_ALLOW),
501 ETNA_IOCTL(PM_QUERY_DOM, pm_query_dom, DRM_AUTH|DRM_RENDER_ALLOW),
502 ETNA_IOCTL(PM_QUERY_SIG, pm_query_sig, DRM_AUTH|DRM_RENDER_ALLOW),
466}; 503};
467 504
468static const struct vm_operations_struct vm_ops = { 505static const struct vm_operations_struct vm_ops = {
@@ -513,7 +550,7 @@ static struct drm_driver etnaviv_drm_driver = {
513 .desc = "etnaviv DRM", 550 .desc = "etnaviv DRM",
514 .date = "20151214", 551 .date = "20151214",
515 .major = 1, 552 .major = 1,
516 .minor = 1, 553 .minor = 2,
517}; 554};
518 555
519/* 556/*
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.h b/drivers/gpu/drm/etnaviv/etnaviv_drv.h
index 058389f93b69..d249acb6da08 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.h
@@ -26,7 +26,6 @@
26#include <linux/pm_runtime.h> 26#include <linux/pm_runtime.h>
27#include <linux/slab.h> 27#include <linux/slab.h>
28#include <linux/list.h> 28#include <linux/list.h>
29#include <linux/iommu.h>
30#include <linux/types.h> 29#include <linux/types.h>
31#include <linux/sizes.h> 30#include <linux/sizes.h>
32 31
@@ -92,15 +91,12 @@ int etnaviv_gem_cpu_fini(struct drm_gem_object *obj);
92void etnaviv_gem_free_object(struct drm_gem_object *obj); 91void etnaviv_gem_free_object(struct drm_gem_object *obj);
93int etnaviv_gem_new_handle(struct drm_device *dev, struct drm_file *file, 92int etnaviv_gem_new_handle(struct drm_device *dev, struct drm_file *file,
94 u32 size, u32 flags, u32 *handle); 93 u32 size, u32 flags, u32 *handle);
95struct drm_gem_object *etnaviv_gem_new_locked(struct drm_device *dev,
96 u32 size, u32 flags);
97struct drm_gem_object *etnaviv_gem_new(struct drm_device *dev,
98 u32 size, u32 flags);
99int etnaviv_gem_new_userptr(struct drm_device *dev, struct drm_file *file, 94int etnaviv_gem_new_userptr(struct drm_device *dev, struct drm_file *file,
100 uintptr_t ptr, u32 size, u32 flags, u32 *handle); 95 uintptr_t ptr, u32 size, u32 flags, u32 *handle);
101u16 etnaviv_buffer_init(struct etnaviv_gpu *gpu); 96u16 etnaviv_buffer_init(struct etnaviv_gpu *gpu);
102u16 etnaviv_buffer_config_mmuv2(struct etnaviv_gpu *gpu, u32 mtlb_addr, u32 safe_addr); 97u16 etnaviv_buffer_config_mmuv2(struct etnaviv_gpu *gpu, u32 mtlb_addr, u32 safe_addr);
103void etnaviv_buffer_end(struct etnaviv_gpu *gpu); 98void etnaviv_buffer_end(struct etnaviv_gpu *gpu);
99void etnaviv_sync_point_queue(struct etnaviv_gpu *gpu, unsigned int event);
104void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned int event, 100void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned int event,
105 struct etnaviv_cmdbuf *cmdbuf); 101 struct etnaviv_cmdbuf *cmdbuf);
106void etnaviv_validate_init(void); 102void etnaviv_validate_init(void);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
index 57881167ccd2..5884ab623e0a 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
@@ -704,25 +704,6 @@ int etnaviv_gem_new_handle(struct drm_device *dev, struct drm_file *file,
704 return ret; 704 return ret;
705} 705}
706 706
707struct drm_gem_object *etnaviv_gem_new(struct drm_device *dev,
708 u32 size, u32 flags)
709{
710 struct drm_gem_object *obj;
711 int ret;
712
713 obj = __etnaviv_gem_new(dev, size, flags);
714 if (IS_ERR(obj))
715 return obj;
716
717 ret = etnaviv_gem_obj_add(dev, obj);
718 if (ret < 0) {
719 drm_gem_object_put_unlocked(obj);
720 return ERR_PTR(ret);
721 }
722
723 return obj;
724}
725
726int etnaviv_gem_new_private(struct drm_device *dev, size_t size, u32 flags, 707int etnaviv_gem_new_private(struct drm_device *dev, size_t size, u32 flags,
727 struct reservation_object *robj, const struct etnaviv_gem_ops *ops, 708 struct reservation_object *robj, const struct etnaviv_gem_ops *ops,
728 struct etnaviv_gem_object **res) 709 struct etnaviv_gem_object **res)
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
index 46dfe0737f43..ff911541a190 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
@@ -21,6 +21,7 @@
21#include "etnaviv_drv.h" 21#include "etnaviv_drv.h"
22#include "etnaviv_gpu.h" 22#include "etnaviv_gpu.h"
23#include "etnaviv_gem.h" 23#include "etnaviv_gem.h"
24#include "etnaviv_perfmon.h"
24 25
25/* 26/*
26 * Cmdstream submission: 27 * Cmdstream submission:
@@ -283,6 +284,54 @@ static int submit_reloc(struct etnaviv_gem_submit *submit, void *stream,
283 return 0; 284 return 0;
284} 285}
285 286
287static int submit_perfmon_validate(struct etnaviv_gem_submit *submit,
288 struct etnaviv_cmdbuf *cmdbuf,
289 const struct drm_etnaviv_gem_submit_pmr *pmrs,
290 u32 nr_pms)
291{
292 u32 i;
293
294 for (i = 0; i < nr_pms; i++) {
295 const struct drm_etnaviv_gem_submit_pmr *r = pmrs + i;
296 struct etnaviv_gem_submit_bo *bo;
297 int ret;
298
299 ret = submit_bo(submit, r->read_idx, &bo);
300 if (ret)
301 return ret;
302
303 /* at offset 0 a sequence number gets stored used for userspace sync */
304 if (r->read_offset == 0) {
305 DRM_ERROR("perfmon request: offset is 0");
306 return -EINVAL;
307 }
308
309 if (r->read_offset >= bo->obj->base.size - sizeof(u32)) {
310 DRM_ERROR("perfmon request: offset %u outside object", i);
311 return -EINVAL;
312 }
313
314 if (r->flags & ~(ETNA_PM_PROCESS_PRE | ETNA_PM_PROCESS_POST)) {
315 DRM_ERROR("perfmon request: flags are not valid");
316 return -EINVAL;
317 }
318
319 if (etnaviv_pm_req_validate(r, cmdbuf->exec_state)) {
320 DRM_ERROR("perfmon request: domain or signal not valid");
321 return -EINVAL;
322 }
323
324 cmdbuf->pmrs[i].flags = r->flags;
325 cmdbuf->pmrs[i].domain = r->domain;
326 cmdbuf->pmrs[i].signal = r->signal;
327 cmdbuf->pmrs[i].sequence = r->sequence;
328 cmdbuf->pmrs[i].offset = r->read_offset;
329 cmdbuf->pmrs[i].bo_vma = etnaviv_gem_vmap(&bo->obj->base);
330 }
331
332 return 0;
333}
334
286static void submit_cleanup(struct etnaviv_gem_submit *submit) 335static void submit_cleanup(struct etnaviv_gem_submit *submit)
287{ 336{
288 unsigned i; 337 unsigned i;
@@ -306,6 +355,7 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
306 struct etnaviv_drm_private *priv = dev->dev_private; 355 struct etnaviv_drm_private *priv = dev->dev_private;
307 struct drm_etnaviv_gem_submit *args = data; 356 struct drm_etnaviv_gem_submit *args = data;
308 struct drm_etnaviv_gem_submit_reloc *relocs; 357 struct drm_etnaviv_gem_submit_reloc *relocs;
358 struct drm_etnaviv_gem_submit_pmr *pmrs;
309 struct drm_etnaviv_gem_submit_bo *bos; 359 struct drm_etnaviv_gem_submit_bo *bos;
310 struct etnaviv_gem_submit *submit; 360 struct etnaviv_gem_submit *submit;
311 struct etnaviv_cmdbuf *cmdbuf; 361 struct etnaviv_cmdbuf *cmdbuf;
@@ -347,11 +397,12 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
347 */ 397 */
348 bos = kvmalloc_array(args->nr_bos, sizeof(*bos), GFP_KERNEL); 398 bos = kvmalloc_array(args->nr_bos, sizeof(*bos), GFP_KERNEL);
349 relocs = kvmalloc_array(args->nr_relocs, sizeof(*relocs), GFP_KERNEL); 399 relocs = kvmalloc_array(args->nr_relocs, sizeof(*relocs), GFP_KERNEL);
400 pmrs = kvmalloc_array(args->nr_pmrs, sizeof(*pmrs), GFP_KERNEL);
350 stream = kvmalloc_array(1, args->stream_size, GFP_KERNEL); 401 stream = kvmalloc_array(1, args->stream_size, GFP_KERNEL);
351 cmdbuf = etnaviv_cmdbuf_new(gpu->cmdbuf_suballoc, 402 cmdbuf = etnaviv_cmdbuf_new(gpu->cmdbuf_suballoc,
352 ALIGN(args->stream_size, 8) + 8, 403 ALIGN(args->stream_size, 8) + 8,
353 args->nr_bos); 404 args->nr_bos, args->nr_pmrs);
354 if (!bos || !relocs || !stream || !cmdbuf) { 405 if (!bos || !relocs || !pmrs || !stream || !cmdbuf) {
355 ret = -ENOMEM; 406 ret = -ENOMEM;
356 goto err_submit_cmds; 407 goto err_submit_cmds;
357 } 408 }
@@ -373,6 +424,14 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
373 goto err_submit_cmds; 424 goto err_submit_cmds;
374 } 425 }
375 426
427 ret = copy_from_user(pmrs, u64_to_user_ptr(args->pmrs),
428 args->nr_pmrs * sizeof(*pmrs));
429 if (ret) {
430 ret = -EFAULT;
431 goto err_submit_cmds;
432 }
433 cmdbuf->nr_pmrs = args->nr_pmrs;
434
376 ret = copy_from_user(stream, u64_to_user_ptr(args->stream), 435 ret = copy_from_user(stream, u64_to_user_ptr(args->stream),
377 args->stream_size); 436 args->stream_size);
378 if (ret) { 437 if (ret) {
@@ -441,6 +500,10 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
441 if (ret) 500 if (ret)
442 goto out; 501 goto out;
443 502
503 ret = submit_perfmon_validate(submit, cmdbuf, pmrs, args->nr_pmrs);
504 if (ret)
505 goto out;
506
444 memcpy(cmdbuf->vaddr, stream, args->stream_size); 507 memcpy(cmdbuf->vaddr, stream, args->stream_size);
445 cmdbuf->user_size = ALIGN(args->stream_size, 8); 508 cmdbuf->user_size = ALIGN(args->stream_size, 8);
446 509
@@ -496,6 +559,8 @@ err_submit_cmds:
496 kvfree(bos); 559 kvfree(bos);
497 if (relocs) 560 if (relocs)
498 kvfree(relocs); 561 kvfree(relocs);
562 if (pmrs)
563 kvfree(pmrs);
499 564
500 return ret; 565 return ret;
501} 566}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
index fc9a6a83dfc7..8197e1d6ed11 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
@@ -25,6 +25,7 @@
25#include "etnaviv_gpu.h" 25#include "etnaviv_gpu.h"
26#include "etnaviv_gem.h" 26#include "etnaviv_gem.h"
27#include "etnaviv_mmu.h" 27#include "etnaviv_mmu.h"
28#include "etnaviv_perfmon.h"
28#include "common.xml.h" 29#include "common.xml.h"
29#include "state.xml.h" 30#include "state.xml.h"
30#include "state_hi.xml.h" 31#include "state_hi.xml.h"
@@ -420,9 +421,10 @@ static void etnaviv_gpu_update_clock(struct etnaviv_gpu *gpu)
420 gpu->base_rate_shader >> gpu->freq_scale); 421 gpu->base_rate_shader >> gpu->freq_scale);
421 } else { 422 } else {
422 unsigned int fscale = 1 << (6 - gpu->freq_scale); 423 unsigned int fscale = 1 << (6 - gpu->freq_scale);
423 u32 clock = VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS | 424 u32 clock = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
424 VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(fscale);
425 425
426 clock &= ~VIVS_HI_CLOCK_CONTROL_FSCALE_VAL__MASK;
427 clock |= VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(fscale);
426 etnaviv_gpu_load_clock(gpu, clock); 428 etnaviv_gpu_load_clock(gpu, clock);
427 } 429 }
428} 430}
@@ -433,24 +435,14 @@ static int etnaviv_hw_reset(struct etnaviv_gpu *gpu)
433 unsigned long timeout; 435 unsigned long timeout;
434 bool failed = true; 436 bool failed = true;
435 437
436 /* TODO
437 *
438 * - clock gating
439 * - puls eater
440 * - what about VG?
441 */
442
443 /* We hope that the GPU resets in under one second */ 438 /* We hope that the GPU resets in under one second */
444 timeout = jiffies + msecs_to_jiffies(1000); 439 timeout = jiffies + msecs_to_jiffies(1000);
445 440
446 while (time_is_after_jiffies(timeout)) { 441 while (time_is_after_jiffies(timeout)) {
447 /* enable clock */ 442 /* enable clock */
448 etnaviv_gpu_update_clock(gpu); 443 unsigned int fscale = 1 << (6 - gpu->freq_scale);
449 444 control = VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(fscale);
450 control = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL); 445 etnaviv_gpu_load_clock(gpu, control);
451
452 /* Wait for stable clock. Vivante's code waited for 1ms */
453 usleep_range(1000, 10000);
454 446
455 /* isolate the GPU. */ 447 /* isolate the GPU. */
456 control |= VIVS_HI_CLOCK_CONTROL_ISOLATE_GPU; 448 control |= VIVS_HI_CLOCK_CONTROL_ISOLATE_GPU;
@@ -461,7 +453,7 @@ static int etnaviv_hw_reset(struct etnaviv_gpu *gpu)
461 gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control); 453 gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
462 454
463 /* wait for reset. */ 455 /* wait for reset. */
464 msleep(1); 456 usleep_range(10, 20);
465 457
466 /* reset soft reset bit. */ 458 /* reset soft reset bit. */
467 control &= ~VIVS_HI_CLOCK_CONTROL_SOFT_RESET; 459 control &= ~VIVS_HI_CLOCK_CONTROL_SOFT_RESET;
@@ -490,6 +482,10 @@ static int etnaviv_hw_reset(struct etnaviv_gpu *gpu)
490 continue; 482 continue;
491 } 483 }
492 484
485 /* disable debug registers, as they are not normally needed */
486 control |= VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS;
487 gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
488
493 failed = false; 489 failed = false;
494 break; 490 break;
495 } 491 }
@@ -721,7 +717,7 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
721 } 717 }
722 718
723 /* Create buffer: */ 719 /* Create buffer: */
724 gpu->buffer = etnaviv_cmdbuf_new(gpu->cmdbuf_suballoc, PAGE_SIZE, 0); 720 gpu->buffer = etnaviv_cmdbuf_new(gpu->cmdbuf_suballoc, PAGE_SIZE, 0, 0);
725 if (!gpu->buffer) { 721 if (!gpu->buffer) {
726 ret = -ENOMEM; 722 ret = -ENOMEM;
727 dev_err(gpu->dev, "could not create command buffer\n"); 723 dev_err(gpu->dev, "could not create command buffer\n");
@@ -739,10 +735,9 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
739 /* Setup event management */ 735 /* Setup event management */
740 spin_lock_init(&gpu->event_spinlock); 736 spin_lock_init(&gpu->event_spinlock);
741 init_completion(&gpu->event_free); 737 init_completion(&gpu->event_free);
742 for (i = 0; i < ARRAY_SIZE(gpu->event); i++) { 738 bitmap_zero(gpu->event_bitmap, ETNA_NR_EVENTS);
743 gpu->event[i].used = false; 739 for (i = 0; i < ARRAY_SIZE(gpu->event); i++)
744 complete(&gpu->event_free); 740 complete(&gpu->event_free);
745 }
746 741
747 /* Now program the hardware */ 742 /* Now program the hardware */
748 mutex_lock(&gpu->lock); 743 mutex_lock(&gpu->lock);
@@ -926,7 +921,7 @@ static void recover_worker(struct work_struct *work)
926 struct etnaviv_gpu *gpu = container_of(work, struct etnaviv_gpu, 921 struct etnaviv_gpu *gpu = container_of(work, struct etnaviv_gpu,
927 recover_work); 922 recover_work);
928 unsigned long flags; 923 unsigned long flags;
929 unsigned int i; 924 unsigned int i = 0;
930 925
931 dev_err(gpu->dev, "hangcheck recover!\n"); 926 dev_err(gpu->dev, "hangcheck recover!\n");
932 927
@@ -945,14 +940,12 @@ static void recover_worker(struct work_struct *work)
945 940
946 /* complete all events, the GPU won't do it after the reset */ 941 /* complete all events, the GPU won't do it after the reset */
947 spin_lock_irqsave(&gpu->event_spinlock, flags); 942 spin_lock_irqsave(&gpu->event_spinlock, flags);
948 for (i = 0; i < ARRAY_SIZE(gpu->event); i++) { 943 for_each_set_bit_from(i, gpu->event_bitmap, ETNA_NR_EVENTS) {
949 if (!gpu->event[i].used)
950 continue;
951 dma_fence_signal(gpu->event[i].fence); 944 dma_fence_signal(gpu->event[i].fence);
952 gpu->event[i].fence = NULL; 945 gpu->event[i].fence = NULL;
953 gpu->event[i].used = false;
954 complete(&gpu->event_free); 946 complete(&gpu->event_free);
955 } 947 }
948 bitmap_zero(gpu->event_bitmap, ETNA_NR_EVENTS);
956 spin_unlock_irqrestore(&gpu->event_spinlock, flags); 949 spin_unlock_irqrestore(&gpu->event_spinlock, flags);
957 gpu->completed_fence = gpu->active_fence; 950 gpu->completed_fence = gpu->active_fence;
958 951
@@ -1140,30 +1133,45 @@ int etnaviv_gpu_fence_sync_obj(struct etnaviv_gem_object *etnaviv_obj,
1140 * event management: 1133 * event management:
1141 */ 1134 */
1142 1135
1143static unsigned int event_alloc(struct etnaviv_gpu *gpu) 1136static int event_alloc(struct etnaviv_gpu *gpu, unsigned nr_events,
1137 unsigned int *events)
1144{ 1138{
1145 unsigned long ret, flags; 1139 unsigned long flags, timeout = msecs_to_jiffies(10 * 10000);
1146 unsigned int i, event = ~0U; 1140 unsigned i, acquired = 0;
1147 1141
1148 ret = wait_for_completion_timeout(&gpu->event_free, 1142 for (i = 0; i < nr_events; i++) {
1149 msecs_to_jiffies(10 * 10000)); 1143 unsigned long ret;
1150 if (!ret)
1151 dev_err(gpu->dev, "wait_for_completion_timeout failed");
1152 1144
1153 spin_lock_irqsave(&gpu->event_spinlock, flags); 1145 ret = wait_for_completion_timeout(&gpu->event_free, timeout);
1154 1146
1155 /* find first free event */ 1147 if (!ret) {
1156 for (i = 0; i < ARRAY_SIZE(gpu->event); i++) { 1148 dev_err(gpu->dev, "wait_for_completion_timeout failed");
1157 if (gpu->event[i].used == false) { 1149 goto out;
1158 gpu->event[i].used = true;
1159 event = i;
1160 break;
1161 } 1150 }
1151
1152 acquired++;
1153 timeout = ret;
1154 }
1155
1156 spin_lock_irqsave(&gpu->event_spinlock, flags);
1157
1158 for (i = 0; i < nr_events; i++) {
1159 int event = find_first_zero_bit(gpu->event_bitmap, ETNA_NR_EVENTS);
1160
1161 events[i] = event;
1162 memset(&gpu->event[event], 0, sizeof(struct etnaviv_event));
1163 set_bit(event, gpu->event_bitmap);
1162 } 1164 }
1163 1165
1164 spin_unlock_irqrestore(&gpu->event_spinlock, flags); 1166 spin_unlock_irqrestore(&gpu->event_spinlock, flags);
1165 1167
1166 return event; 1168 return 0;
1169
1170out:
1171 for (i = 0; i < acquired; i++)
1172 complete(&gpu->event_free);
1173
1174 return -EBUSY;
1167} 1175}
1168 1176
1169static void event_free(struct etnaviv_gpu *gpu, unsigned int event) 1177static void event_free(struct etnaviv_gpu *gpu, unsigned int event)
@@ -1172,12 +1180,12 @@ static void event_free(struct etnaviv_gpu *gpu, unsigned int event)
1172 1180
1173 spin_lock_irqsave(&gpu->event_spinlock, flags); 1181 spin_lock_irqsave(&gpu->event_spinlock, flags);
1174 1182
1175 if (gpu->event[event].used == false) { 1183 if (!test_bit(event, gpu->event_bitmap)) {
1176 dev_warn(gpu->dev, "event %u is already marked as free", 1184 dev_warn(gpu->dev, "event %u is already marked as free",
1177 event); 1185 event);
1178 spin_unlock_irqrestore(&gpu->event_spinlock, flags); 1186 spin_unlock_irqrestore(&gpu->event_spinlock, flags);
1179 } else { 1187 } else {
1180 gpu->event[event].used = false; 1188 clear_bit(event, gpu->event_bitmap);
1181 spin_unlock_irqrestore(&gpu->event_spinlock, flags); 1189 spin_unlock_irqrestore(&gpu->event_spinlock, flags);
1182 1190
1183 complete(&gpu->event_free); 1191 complete(&gpu->event_free);
@@ -1311,12 +1319,71 @@ void etnaviv_gpu_pm_put(struct etnaviv_gpu *gpu)
1311 pm_runtime_put_autosuspend(gpu->dev); 1319 pm_runtime_put_autosuspend(gpu->dev);
1312} 1320}
1313 1321
1322static void sync_point_perfmon_sample(struct etnaviv_gpu *gpu,
1323 struct etnaviv_event *event, unsigned int flags)
1324{
1325 const struct etnaviv_cmdbuf *cmdbuf = event->cmdbuf;
1326 unsigned int i;
1327
1328 for (i = 0; i < cmdbuf->nr_pmrs; i++) {
1329 const struct etnaviv_perfmon_request *pmr = cmdbuf->pmrs + i;
1330
1331 if (pmr->flags == flags)
1332 etnaviv_perfmon_process(gpu, pmr);
1333 }
1334}
1335
1336static void sync_point_perfmon_sample_pre(struct etnaviv_gpu *gpu,
1337 struct etnaviv_event *event)
1338{
1339 u32 val;
1340
1341 /* disable clock gating */
1342 val = gpu_read(gpu, VIVS_PM_POWER_CONTROLS);
1343 val &= ~VIVS_PM_POWER_CONTROLS_ENABLE_MODULE_CLOCK_GATING;
1344 gpu_write(gpu, VIVS_PM_POWER_CONTROLS, val);
1345
1346 /* enable debug register */
1347 val = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
1348 val &= ~VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS;
1349 gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, val);
1350
1351 sync_point_perfmon_sample(gpu, event, ETNA_PM_PROCESS_PRE);
1352}
1353
1354static void sync_point_perfmon_sample_post(struct etnaviv_gpu *gpu,
1355 struct etnaviv_event *event)
1356{
1357 const struct etnaviv_cmdbuf *cmdbuf = event->cmdbuf;
1358 unsigned int i;
1359 u32 val;
1360
1361 sync_point_perfmon_sample(gpu, event, ETNA_PM_PROCESS_POST);
1362
1363 for (i = 0; i < cmdbuf->nr_pmrs; i++) {
1364 const struct etnaviv_perfmon_request *pmr = cmdbuf->pmrs + i;
1365
1366 *pmr->bo_vma = pmr->sequence;
1367 }
1368
1369 /* disable debug register */
1370 val = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
1371 val |= VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS;
1372 gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, val);
1373
1374 /* enable clock gating */
1375 val = gpu_read(gpu, VIVS_PM_POWER_CONTROLS);
1376 val |= VIVS_PM_POWER_CONTROLS_ENABLE_MODULE_CLOCK_GATING;
1377 gpu_write(gpu, VIVS_PM_POWER_CONTROLS, val);
1378}
1379
1380
1314/* add bo's to gpu's ring, and kick gpu: */ 1381/* add bo's to gpu's ring, and kick gpu: */
1315int etnaviv_gpu_submit(struct etnaviv_gpu *gpu, 1382int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
1316 struct etnaviv_gem_submit *submit, struct etnaviv_cmdbuf *cmdbuf) 1383 struct etnaviv_gem_submit *submit, struct etnaviv_cmdbuf *cmdbuf)
1317{ 1384{
1318 struct dma_fence *fence; 1385 struct dma_fence *fence;
1319 unsigned int event, i; 1386 unsigned int i, nr_events = 1, event[3];
1320 int ret; 1387 int ret;
1321 1388
1322 ret = etnaviv_gpu_pm_get_sync(gpu); 1389 ret = etnaviv_gpu_pm_get_sync(gpu);
@@ -1332,10 +1399,19 @@ int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
1332 * 1399 *
1333 */ 1400 */
1334 1401
1335 event = event_alloc(gpu); 1402 /*
1336 if (unlikely(event == ~0U)) { 1403 * if there are performance monitor requests we need to have
1337 DRM_ERROR("no free event\n"); 1404 * - a sync point to re-configure gpu and process ETNA_PM_PROCESS_PRE
1338 ret = -EBUSY; 1405 * requests.
1406 * - a sync point to re-configure gpu, process ETNA_PM_PROCESS_POST requests
1407 * and update the sequence number for userspace.
1408 */
1409 if (cmdbuf->nr_pmrs)
1410 nr_events = 3;
1411
1412 ret = event_alloc(gpu, nr_events, event);
1413 if (ret) {
1414 DRM_ERROR("no free events\n");
1339 goto out_pm_put; 1415 goto out_pm_put;
1340 } 1416 }
1341 1417
@@ -1343,12 +1419,14 @@ int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
1343 1419
1344 fence = etnaviv_gpu_fence_alloc(gpu); 1420 fence = etnaviv_gpu_fence_alloc(gpu);
1345 if (!fence) { 1421 if (!fence) {
1346 event_free(gpu, event); 1422 for (i = 0; i < nr_events; i++)
1423 event_free(gpu, event[i]);
1424
1347 ret = -ENOMEM; 1425 ret = -ENOMEM;
1348 goto out_unlock; 1426 goto out_unlock;
1349 } 1427 }
1350 1428
1351 gpu->event[event].fence = fence; 1429 gpu->event[event[0]].fence = fence;
1352 submit->fence = dma_fence_get(fence); 1430 submit->fence = dma_fence_get(fence);
1353 gpu->active_fence = submit->fence->seqno; 1431 gpu->active_fence = submit->fence->seqno;
1354 1432
@@ -1358,7 +1436,19 @@ int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
1358 gpu->lastctx = cmdbuf->ctx; 1436 gpu->lastctx = cmdbuf->ctx;
1359 } 1437 }
1360 1438
1361 etnaviv_buffer_queue(gpu, event, cmdbuf); 1439 if (cmdbuf->nr_pmrs) {
1440 gpu->event[event[1]].sync_point = &sync_point_perfmon_sample_pre;
1441 gpu->event[event[1]].cmdbuf = cmdbuf;
1442 etnaviv_sync_point_queue(gpu, event[1]);
1443 }
1444
1445 etnaviv_buffer_queue(gpu, event[0], cmdbuf);
1446
1447 if (cmdbuf->nr_pmrs) {
1448 gpu->event[event[2]].sync_point = &sync_point_perfmon_sample_post;
1449 gpu->event[event[2]].cmdbuf = cmdbuf;
1450 etnaviv_sync_point_queue(gpu, event[2]);
1451 }
1362 1452
1363 cmdbuf->fence = fence; 1453 cmdbuf->fence = fence;
1364 list_add_tail(&cmdbuf->node, &gpu->active_cmd_list); 1454 list_add_tail(&cmdbuf->node, &gpu->active_cmd_list);
@@ -1394,6 +1484,24 @@ out_pm_put:
1394 return ret; 1484 return ret;
1395} 1485}
1396 1486
1487static void etnaviv_process_sync_point(struct etnaviv_gpu *gpu,
1488 struct etnaviv_event *event)
1489{
1490 u32 addr = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
1491
1492 event->sync_point(gpu, event);
1493 etnaviv_gpu_start_fe(gpu, addr + 2, 2);
1494}
1495
1496static void sync_point_worker(struct work_struct *work)
1497{
1498 struct etnaviv_gpu *gpu = container_of(work, struct etnaviv_gpu,
1499 sync_point_work);
1500
1501 etnaviv_process_sync_point(gpu, &gpu->event[gpu->sync_point_event]);
1502 event_free(gpu, gpu->sync_point_event);
1503}
1504
1397/* 1505/*
1398 * Init/Cleanup: 1506 * Init/Cleanup:
1399 */ 1507 */
@@ -1440,7 +1548,15 @@ static irqreturn_t irq_handler(int irq, void *data)
1440 1548
1441 dev_dbg(gpu->dev, "event %u\n", event); 1549 dev_dbg(gpu->dev, "event %u\n", event);
1442 1550
1551 if (gpu->event[event].sync_point) {
1552 gpu->sync_point_event = event;
1553 etnaviv_queue_work(gpu->drm, &gpu->sync_point_work);
1554 }
1555
1443 fence = gpu->event[event].fence; 1556 fence = gpu->event[event].fence;
1557 if (!fence)
1558 continue;
1559
1444 gpu->event[event].fence = NULL; 1560 gpu->event[event].fence = NULL;
1445 dma_fence_signal(fence); 1561 dma_fence_signal(fence);
1446 1562
@@ -1645,6 +1761,7 @@ static int etnaviv_gpu_bind(struct device *dev, struct device *master,
1645 1761
1646 INIT_LIST_HEAD(&gpu->active_cmd_list); 1762 INIT_LIST_HEAD(&gpu->active_cmd_list);
1647 INIT_WORK(&gpu->retire_work, retire_worker); 1763 INIT_WORK(&gpu->retire_work, retire_worker);
1764 INIT_WORK(&gpu->sync_point_work, sync_point_worker);
1648 INIT_WORK(&gpu->recover_work, recover_worker); 1765 INIT_WORK(&gpu->recover_work, recover_worker);
1649 init_waitqueue_head(&gpu->fence_event); 1766 init_waitqueue_head(&gpu->fence_event);
1650 1767
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
index 689cb8f3680c..4f10f147297a 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
@@ -88,13 +88,17 @@ struct etnaviv_chip_identity {
88}; 88};
89 89
90struct etnaviv_event { 90struct etnaviv_event {
91 bool used;
92 struct dma_fence *fence; 91 struct dma_fence *fence;
92 struct etnaviv_cmdbuf *cmdbuf;
93
94 void (*sync_point)(struct etnaviv_gpu *gpu, struct etnaviv_event *event);
93}; 95};
94 96
95struct etnaviv_cmdbuf_suballoc; 97struct etnaviv_cmdbuf_suballoc;
96struct etnaviv_cmdbuf; 98struct etnaviv_cmdbuf;
97 99
100#define ETNA_NR_EVENTS 30
101
98struct etnaviv_gpu { 102struct etnaviv_gpu {
99 struct drm_device *drm; 103 struct drm_device *drm;
100 struct thermal_cooling_device *cooling; 104 struct thermal_cooling_device *cooling;
@@ -112,7 +116,8 @@ struct etnaviv_gpu {
112 u32 memory_base; 116 u32 memory_base;
113 117
114 /* event management: */ 118 /* event management: */
115 struct etnaviv_event event[30]; 119 DECLARE_BITMAP(event_bitmap, ETNA_NR_EVENTS);
120 struct etnaviv_event event[ETNA_NR_EVENTS];
116 struct completion event_free; 121 struct completion event_free;
117 spinlock_t event_spinlock; 122 spinlock_t event_spinlock;
118 123
@@ -133,6 +138,10 @@ struct etnaviv_gpu {
133 /* worker for handling active-list retiring: */ 138 /* worker for handling active-list retiring: */
134 struct work_struct retire_work; 139 struct work_struct retire_work;
135 140
141 /* worker for handling 'sync' points: */
142 struct work_struct sync_point_work;
143 int sync_point_event;
144
136 void __iomem *mmio; 145 void __iomem *mmio;
137 int irq; 146 int irq;
138 147
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu.c b/drivers/gpu/drm/etnaviv/etnaviv_iommu.c
index 7a7c97f599d7..14e24ac6573f 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_iommu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu.c
@@ -14,7 +14,6 @@
14 * this program. If not, see <http://www.gnu.org/licenses/>. 14 * this program. If not, see <http://www.gnu.org/licenses/>.
15 */ 15 */
16 16
17#include <linux/iommu.h>
18#include <linux/platform_device.h> 17#include <linux/platform_device.h>
19#include <linux/sizes.h> 18#include <linux/sizes.h>
20#include <linux/slab.h> 19#include <linux/slab.h>
@@ -31,174 +30,115 @@
31 30
32#define GPU_MEM_START 0x80000000 31#define GPU_MEM_START 0x80000000
33 32
34struct etnaviv_iommu_domain_pgtable { 33struct etnaviv_iommuv1_domain {
35 u32 *pgtable; 34 struct etnaviv_iommu_domain base;
36 dma_addr_t paddr; 35 u32 *pgtable_cpu;
36 dma_addr_t pgtable_dma;
37}; 37};
38 38
39struct etnaviv_iommu_domain { 39static struct etnaviv_iommuv1_domain *
40 struct iommu_domain domain; 40to_etnaviv_domain(struct etnaviv_iommu_domain *domain)
41 struct device *dev;
42 void *bad_page_cpu;
43 dma_addr_t bad_page_dma;
44 struct etnaviv_iommu_domain_pgtable pgtable;
45 spinlock_t map_lock;
46};
47
48static struct etnaviv_iommu_domain *to_etnaviv_domain(struct iommu_domain *domain)
49{
50 return container_of(domain, struct etnaviv_iommu_domain, domain);
51}
52
53static int pgtable_alloc(struct etnaviv_iommu_domain_pgtable *pgtable,
54 size_t size)
55{
56 pgtable->pgtable = dma_alloc_coherent(NULL, size, &pgtable->paddr, GFP_KERNEL);
57 if (!pgtable->pgtable)
58 return -ENOMEM;
59
60 return 0;
61}
62
63static void pgtable_free(struct etnaviv_iommu_domain_pgtable *pgtable,
64 size_t size)
65{ 41{
66 dma_free_coherent(NULL, size, pgtable->pgtable, pgtable->paddr); 42 return container_of(domain, struct etnaviv_iommuv1_domain, base);
67}
68
69static u32 pgtable_read(struct etnaviv_iommu_domain_pgtable *pgtable,
70 unsigned long iova)
71{
72 /* calcuate index into page table */
73 unsigned int index = (iova - GPU_MEM_START) / SZ_4K;
74 phys_addr_t paddr;
75
76 paddr = pgtable->pgtable[index];
77
78 return paddr;
79} 43}
80 44
81static void pgtable_write(struct etnaviv_iommu_domain_pgtable *pgtable, 45static int __etnaviv_iommu_init(struct etnaviv_iommuv1_domain *etnaviv_domain)
82 unsigned long iova, phys_addr_t paddr)
83{
84 /* calcuate index into page table */
85 unsigned int index = (iova - GPU_MEM_START) / SZ_4K;
86
87 pgtable->pgtable[index] = paddr;
88}
89
90static int __etnaviv_iommu_init(struct etnaviv_iommu_domain *etnaviv_domain)
91{ 46{
92 u32 *p; 47 u32 *p;
93 int ret, i; 48 int i;
94 49
95 etnaviv_domain->bad_page_cpu = dma_alloc_coherent(etnaviv_domain->dev, 50 etnaviv_domain->base.bad_page_cpu = dma_alloc_coherent(
96 SZ_4K, 51 etnaviv_domain->base.dev,
97 &etnaviv_domain->bad_page_dma, 52 SZ_4K,
98 GFP_KERNEL); 53 &etnaviv_domain->base.bad_page_dma,
99 if (!etnaviv_domain->bad_page_cpu) 54 GFP_KERNEL);
55 if (!etnaviv_domain->base.bad_page_cpu)
100 return -ENOMEM; 56 return -ENOMEM;
101 57
102 p = etnaviv_domain->bad_page_cpu; 58 p = etnaviv_domain->base.bad_page_cpu;
103 for (i = 0; i < SZ_4K / 4; i++) 59 for (i = 0; i < SZ_4K / 4; i++)
104 *p++ = 0xdead55aa; 60 *p++ = 0xdead55aa;
105 61
106 ret = pgtable_alloc(&etnaviv_domain->pgtable, PT_SIZE); 62 etnaviv_domain->pgtable_cpu =
107 if (ret < 0) { 63 dma_alloc_coherent(etnaviv_domain->base.dev, PT_SIZE,
108 dma_free_coherent(etnaviv_domain->dev, SZ_4K, 64 &etnaviv_domain->pgtable_dma,
109 etnaviv_domain->bad_page_cpu, 65 GFP_KERNEL);
110 etnaviv_domain->bad_page_dma); 66 if (!etnaviv_domain->pgtable_cpu) {
111 return ret; 67 dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
68 etnaviv_domain->base.bad_page_cpu,
69 etnaviv_domain->base.bad_page_dma);
70 return -ENOMEM;
112 } 71 }
113 72
114 for (i = 0; i < PT_ENTRIES; i++) 73 for (i = 0; i < PT_ENTRIES; i++)
115 etnaviv_domain->pgtable.pgtable[i] = 74 etnaviv_domain->pgtable_cpu[i] =
116 etnaviv_domain->bad_page_dma; 75 etnaviv_domain->base.bad_page_dma;
117
118 spin_lock_init(&etnaviv_domain->map_lock);
119 76
120 return 0; 77 return 0;
121} 78}
122 79
123static void etnaviv_domain_free(struct iommu_domain *domain) 80static void etnaviv_iommuv1_domain_free(struct etnaviv_iommu_domain *domain)
124{ 81{
125 struct etnaviv_iommu_domain *etnaviv_domain = to_etnaviv_domain(domain); 82 struct etnaviv_iommuv1_domain *etnaviv_domain =
83 to_etnaviv_domain(domain);
126 84
127 pgtable_free(&etnaviv_domain->pgtable, PT_SIZE); 85 dma_free_coherent(etnaviv_domain->base.dev, PT_SIZE,
86 etnaviv_domain->pgtable_cpu,
87 etnaviv_domain->pgtable_dma);
128 88
129 dma_free_coherent(etnaviv_domain->dev, SZ_4K, 89 dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
130 etnaviv_domain->bad_page_cpu, 90 etnaviv_domain->base.bad_page_cpu,
131 etnaviv_domain->bad_page_dma); 91 etnaviv_domain->base.bad_page_dma);
132 92
133 kfree(etnaviv_domain); 93 kfree(etnaviv_domain);
134} 94}
135 95
136static int etnaviv_iommuv1_map(struct iommu_domain *domain, unsigned long iova, 96static int etnaviv_iommuv1_map(struct etnaviv_iommu_domain *domain,
137 phys_addr_t paddr, size_t size, int prot) 97 unsigned long iova, phys_addr_t paddr,
98 size_t size, int prot)
138{ 99{
139 struct etnaviv_iommu_domain *etnaviv_domain = to_etnaviv_domain(domain); 100 struct etnaviv_iommuv1_domain *etnaviv_domain = to_etnaviv_domain(domain);
101 unsigned int index = (iova - GPU_MEM_START) / SZ_4K;
140 102
141 if (size != SZ_4K) 103 if (size != SZ_4K)
142 return -EINVAL; 104 return -EINVAL;
143 105
144 spin_lock(&etnaviv_domain->map_lock); 106 etnaviv_domain->pgtable_cpu[index] = paddr;
145 pgtable_write(&etnaviv_domain->pgtable, iova, paddr);
146 spin_unlock(&etnaviv_domain->map_lock);
147 107
148 return 0; 108 return 0;
149} 109}
150 110
151static size_t etnaviv_iommuv1_unmap(struct iommu_domain *domain, 111static size_t etnaviv_iommuv1_unmap(struct etnaviv_iommu_domain *domain,
152 unsigned long iova, size_t size) 112 unsigned long iova, size_t size)
153{ 113{
154 struct etnaviv_iommu_domain *etnaviv_domain = to_etnaviv_domain(domain); 114 struct etnaviv_iommuv1_domain *etnaviv_domain =
115 to_etnaviv_domain(domain);
116 unsigned int index = (iova - GPU_MEM_START) / SZ_4K;
155 117
156 if (size != SZ_4K) 118 if (size != SZ_4K)
157 return -EINVAL; 119 return -EINVAL;
158 120
159 spin_lock(&etnaviv_domain->map_lock); 121 etnaviv_domain->pgtable_cpu[index] = etnaviv_domain->base.bad_page_dma;
160 pgtable_write(&etnaviv_domain->pgtable, iova,
161 etnaviv_domain->bad_page_dma);
162 spin_unlock(&etnaviv_domain->map_lock);
163 122
164 return SZ_4K; 123 return SZ_4K;
165} 124}
166 125
167static phys_addr_t etnaviv_iommu_iova_to_phys(struct iommu_domain *domain, 126static size_t etnaviv_iommuv1_dump_size(struct etnaviv_iommu_domain *domain)
168 dma_addr_t iova)
169{
170 struct etnaviv_iommu_domain *etnaviv_domain = to_etnaviv_domain(domain);
171
172 return pgtable_read(&etnaviv_domain->pgtable, iova);
173}
174
175static size_t etnaviv_iommuv1_dump_size(struct iommu_domain *domain)
176{ 127{
177 return PT_SIZE; 128 return PT_SIZE;
178} 129}
179 130
180static void etnaviv_iommuv1_dump(struct iommu_domain *domain, void *buf) 131static void etnaviv_iommuv1_dump(struct etnaviv_iommu_domain *domain, void *buf)
181{ 132{
182 struct etnaviv_iommu_domain *etnaviv_domain = to_etnaviv_domain(domain); 133 struct etnaviv_iommuv1_domain *etnaviv_domain =
134 to_etnaviv_domain(domain);
183 135
184 memcpy(buf, etnaviv_domain->pgtable.pgtable, PT_SIZE); 136 memcpy(buf, etnaviv_domain->pgtable_cpu, PT_SIZE);
185} 137}
186 138
187static const struct etnaviv_iommu_ops etnaviv_iommu_ops = {
188 .ops = {
189 .domain_free = etnaviv_domain_free,
190 .map = etnaviv_iommuv1_map,
191 .unmap = etnaviv_iommuv1_unmap,
192 .iova_to_phys = etnaviv_iommu_iova_to_phys,
193 .pgsize_bitmap = SZ_4K,
194 },
195 .dump_size = etnaviv_iommuv1_dump_size,
196 .dump = etnaviv_iommuv1_dump,
197};
198
199void etnaviv_iommuv1_restore(struct etnaviv_gpu *gpu) 139void etnaviv_iommuv1_restore(struct etnaviv_gpu *gpu)
200{ 140{
201 struct etnaviv_iommu_domain *etnaviv_domain = 141 struct etnaviv_iommuv1_domain *etnaviv_domain =
202 to_etnaviv_domain(gpu->mmu->domain); 142 to_etnaviv_domain(gpu->mmu->domain);
203 u32 pgtable; 143 u32 pgtable;
204 144
@@ -210,7 +150,7 @@ void etnaviv_iommuv1_restore(struct etnaviv_gpu *gpu)
210 gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_PE, gpu->memory_base); 150 gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_PE, gpu->memory_base);
211 151
212 /* set page table address in MC */ 152 /* set page table address in MC */
213 pgtable = (u32)etnaviv_domain->pgtable.paddr; 153 pgtable = (u32)etnaviv_domain->pgtable_dma;
214 154
215 gpu_write(gpu, VIVS_MC_MMU_FE_PAGE_TABLE, pgtable); 155 gpu_write(gpu, VIVS_MC_MMU_FE_PAGE_TABLE, pgtable);
216 gpu_write(gpu, VIVS_MC_MMU_TX_PAGE_TABLE, pgtable); 156 gpu_write(gpu, VIVS_MC_MMU_TX_PAGE_TABLE, pgtable);
@@ -219,28 +159,37 @@ void etnaviv_iommuv1_restore(struct etnaviv_gpu *gpu)
219 gpu_write(gpu, VIVS_MC_MMU_RA_PAGE_TABLE, pgtable); 159 gpu_write(gpu, VIVS_MC_MMU_RA_PAGE_TABLE, pgtable);
220} 160}
221 161
222struct iommu_domain *etnaviv_iommuv1_domain_alloc(struct etnaviv_gpu *gpu) 162const struct etnaviv_iommu_domain_ops etnaviv_iommuv1_ops = {
163 .free = etnaviv_iommuv1_domain_free,
164 .map = etnaviv_iommuv1_map,
165 .unmap = etnaviv_iommuv1_unmap,
166 .dump_size = etnaviv_iommuv1_dump_size,
167 .dump = etnaviv_iommuv1_dump,
168};
169
170struct etnaviv_iommu_domain *
171etnaviv_iommuv1_domain_alloc(struct etnaviv_gpu *gpu)
223{ 172{
224 struct etnaviv_iommu_domain *etnaviv_domain; 173 struct etnaviv_iommuv1_domain *etnaviv_domain;
174 struct etnaviv_iommu_domain *domain;
225 int ret; 175 int ret;
226 176
227 etnaviv_domain = kzalloc(sizeof(*etnaviv_domain), GFP_KERNEL); 177 etnaviv_domain = kzalloc(sizeof(*etnaviv_domain), GFP_KERNEL);
228 if (!etnaviv_domain) 178 if (!etnaviv_domain)
229 return NULL; 179 return NULL;
230 180
231 etnaviv_domain->dev = gpu->dev; 181 domain = &etnaviv_domain->base;
232 182
233 etnaviv_domain->domain.type = __IOMMU_DOMAIN_PAGING; 183 domain->dev = gpu->dev;
234 etnaviv_domain->domain.ops = &etnaviv_iommu_ops.ops; 184 domain->base = GPU_MEM_START;
235 etnaviv_domain->domain.pgsize_bitmap = SZ_4K; 185 domain->size = PT_ENTRIES * SZ_4K;
236 etnaviv_domain->domain.geometry.aperture_start = GPU_MEM_START; 186 domain->ops = &etnaviv_iommuv1_ops;
237 etnaviv_domain->domain.geometry.aperture_end = GPU_MEM_START + PT_ENTRIES * SZ_4K - 1;
238 187
239 ret = __etnaviv_iommu_init(etnaviv_domain); 188 ret = __etnaviv_iommu_init(etnaviv_domain);
240 if (ret) 189 if (ret)
241 goto out_free; 190 goto out_free;
242 191
243 return &etnaviv_domain->domain; 192 return &etnaviv_domain->base;
244 193
245out_free: 194out_free:
246 kfree(etnaviv_domain); 195 kfree(etnaviv_domain);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu.h b/drivers/gpu/drm/etnaviv/etnaviv_iommu.h
index 8b51e7c16feb..01d59bf70d78 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_iommu.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu.h
@@ -18,11 +18,14 @@
18#define __ETNAVIV_IOMMU_H__ 18#define __ETNAVIV_IOMMU_H__
19 19
20struct etnaviv_gpu; 20struct etnaviv_gpu;
21struct etnaviv_iommu_domain;
21 22
22struct iommu_domain *etnaviv_iommuv1_domain_alloc(struct etnaviv_gpu *gpu); 23struct etnaviv_iommu_domain *
24etnaviv_iommuv1_domain_alloc(struct etnaviv_gpu *gpu);
23void etnaviv_iommuv1_restore(struct etnaviv_gpu *gpu); 25void etnaviv_iommuv1_restore(struct etnaviv_gpu *gpu);
24 26
25struct iommu_domain *etnaviv_iommuv2_domain_alloc(struct etnaviv_gpu *gpu); 27struct etnaviv_iommu_domain *
28etnaviv_iommuv2_domain_alloc(struct etnaviv_gpu *gpu);
26void etnaviv_iommuv2_restore(struct etnaviv_gpu *gpu); 29void etnaviv_iommuv2_restore(struct etnaviv_gpu *gpu);
27 30
28#endif /* __ETNAVIV_IOMMU_H__ */ 31#endif /* __ETNAVIV_IOMMU_H__ */
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
index cbe447ac5974..fc60fc8ddbf0 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
@@ -14,7 +14,6 @@
14 * this program. If not, see <http://www.gnu.org/licenses/>. 14 * this program. If not, see <http://www.gnu.org/licenses/>.
15 */ 15 */
16 16
17#include <linux/iommu.h>
18#include <linux/platform_device.h> 17#include <linux/platform_device.h>
19#include <linux/sizes.h> 18#include <linux/sizes.h>
20#include <linux/slab.h> 19#include <linux/slab.h>
@@ -40,10 +39,7 @@
40#define MMUv2_MAX_STLB_ENTRIES 1024 39#define MMUv2_MAX_STLB_ENTRIES 1024
41 40
42struct etnaviv_iommuv2_domain { 41struct etnaviv_iommuv2_domain {
43 struct iommu_domain domain; 42 struct etnaviv_iommu_domain base;
44 struct device *dev;
45 void *bad_page_cpu;
46 dma_addr_t bad_page_dma;
47 /* M(aster) TLB aka first level pagetable */ 43 /* M(aster) TLB aka first level pagetable */
48 u32 *mtlb_cpu; 44 u32 *mtlb_cpu;
49 dma_addr_t mtlb_dma; 45 dma_addr_t mtlb_dma;
@@ -52,13 +48,15 @@ struct etnaviv_iommuv2_domain {
52 dma_addr_t stlb_dma[1024]; 48 dma_addr_t stlb_dma[1024];
53}; 49};
54 50
55static struct etnaviv_iommuv2_domain *to_etnaviv_domain(struct iommu_domain *domain) 51static struct etnaviv_iommuv2_domain *
52to_etnaviv_domain(struct etnaviv_iommu_domain *domain)
56{ 53{
57 return container_of(domain, struct etnaviv_iommuv2_domain, domain); 54 return container_of(domain, struct etnaviv_iommuv2_domain, base);
58} 55}
59 56
60static int etnaviv_iommuv2_map(struct iommu_domain *domain, unsigned long iova, 57static int etnaviv_iommuv2_map(struct etnaviv_iommu_domain *domain,
61 phys_addr_t paddr, size_t size, int prot) 58 unsigned long iova, phys_addr_t paddr,
59 size_t size, int prot)
62{ 60{
63 struct etnaviv_iommuv2_domain *etnaviv_domain = 61 struct etnaviv_iommuv2_domain *etnaviv_domain =
64 to_etnaviv_domain(domain); 62 to_etnaviv_domain(domain);
@@ -68,7 +66,7 @@ static int etnaviv_iommuv2_map(struct iommu_domain *domain, unsigned long iova,
68 if (size != SZ_4K) 66 if (size != SZ_4K)
69 return -EINVAL; 67 return -EINVAL;
70 68
71 if (prot & IOMMU_WRITE) 69 if (prot & ETNAVIV_PROT_WRITE)
72 entry |= MMUv2_PTE_WRITEABLE; 70 entry |= MMUv2_PTE_WRITEABLE;
73 71
74 mtlb_entry = (iova & MMUv2_MTLB_MASK) >> MMUv2_MTLB_SHIFT; 72 mtlb_entry = (iova & MMUv2_MTLB_MASK) >> MMUv2_MTLB_SHIFT;
@@ -79,8 +77,8 @@ static int etnaviv_iommuv2_map(struct iommu_domain *domain, unsigned long iova,
79 return 0; 77 return 0;
80} 78}
81 79
82static size_t etnaviv_iommuv2_unmap(struct iommu_domain *domain, 80static size_t etnaviv_iommuv2_unmap(struct etnaviv_iommu_domain *domain,
83 unsigned long iova, size_t size) 81 unsigned long iova, size_t size)
84{ 82{
85 struct etnaviv_iommuv2_domain *etnaviv_domain = 83 struct etnaviv_iommuv2_domain *etnaviv_domain =
86 to_etnaviv_domain(domain); 84 to_etnaviv_domain(domain);
@@ -97,38 +95,26 @@ static size_t etnaviv_iommuv2_unmap(struct iommu_domain *domain,
97 return SZ_4K; 95 return SZ_4K;
98} 96}
99 97
100static phys_addr_t etnaviv_iommuv2_iova_to_phys(struct iommu_domain *domain,
101 dma_addr_t iova)
102{
103 struct etnaviv_iommuv2_domain *etnaviv_domain =
104 to_etnaviv_domain(domain);
105 int mtlb_entry, stlb_entry;
106
107 mtlb_entry = (iova & MMUv2_MTLB_MASK) >> MMUv2_MTLB_SHIFT;
108 stlb_entry = (iova & MMUv2_STLB_MASK) >> MMUv2_STLB_SHIFT;
109
110 return etnaviv_domain->stlb_cpu[mtlb_entry][stlb_entry] & ~(SZ_4K - 1);
111}
112
113static int etnaviv_iommuv2_init(struct etnaviv_iommuv2_domain *etnaviv_domain) 98static int etnaviv_iommuv2_init(struct etnaviv_iommuv2_domain *etnaviv_domain)
114{ 99{
115 u32 *p; 100 u32 *p;
116 int ret, i, j; 101 int ret, i, j;
117 102
118 /* allocate scratch page */ 103 /* allocate scratch page */
119 etnaviv_domain->bad_page_cpu = dma_alloc_coherent(etnaviv_domain->dev, 104 etnaviv_domain->base.bad_page_cpu = dma_alloc_coherent(
120 SZ_4K, 105 etnaviv_domain->base.dev,
121 &etnaviv_domain->bad_page_dma, 106 SZ_4K,
122 GFP_KERNEL); 107 &etnaviv_domain->base.bad_page_dma,
123 if (!etnaviv_domain->bad_page_cpu) { 108 GFP_KERNEL);
109 if (!etnaviv_domain->base.bad_page_cpu) {
124 ret = -ENOMEM; 110 ret = -ENOMEM;
125 goto fail_mem; 111 goto fail_mem;
126 } 112 }
127 p = etnaviv_domain->bad_page_cpu; 113 p = etnaviv_domain->base.bad_page_cpu;
128 for (i = 0; i < SZ_4K / 4; i++) 114 for (i = 0; i < SZ_4K / 4; i++)
129 *p++ = 0xdead55aa; 115 *p++ = 0xdead55aa;
130 116
131 etnaviv_domain->mtlb_cpu = dma_alloc_coherent(etnaviv_domain->dev, 117 etnaviv_domain->mtlb_cpu = dma_alloc_coherent(etnaviv_domain->base.dev,
132 SZ_4K, 118 SZ_4K,
133 &etnaviv_domain->mtlb_dma, 119 &etnaviv_domain->mtlb_dma,
134 GFP_KERNEL); 120 GFP_KERNEL);
@@ -140,7 +126,7 @@ static int etnaviv_iommuv2_init(struct etnaviv_iommuv2_domain *etnaviv_domain)
140 /* pre-populate STLB pages (may want to switch to on-demand later) */ 126 /* pre-populate STLB pages (may want to switch to on-demand later) */
141 for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++) { 127 for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++) {
142 etnaviv_domain->stlb_cpu[i] = 128 etnaviv_domain->stlb_cpu[i] =
143 dma_alloc_coherent(etnaviv_domain->dev, 129 dma_alloc_coherent(etnaviv_domain->base.dev,
144 SZ_4K, 130 SZ_4K,
145 &etnaviv_domain->stlb_dma[i], 131 &etnaviv_domain->stlb_dma[i],
146 GFP_KERNEL); 132 GFP_KERNEL);
@@ -159,19 +145,19 @@ static int etnaviv_iommuv2_init(struct etnaviv_iommuv2_domain *etnaviv_domain)
159 return 0; 145 return 0;
160 146
161fail_mem: 147fail_mem:
162 if (etnaviv_domain->bad_page_cpu) 148 if (etnaviv_domain->base.bad_page_cpu)
163 dma_free_coherent(etnaviv_domain->dev, SZ_4K, 149 dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
164 etnaviv_domain->bad_page_cpu, 150 etnaviv_domain->base.bad_page_cpu,
165 etnaviv_domain->bad_page_dma); 151 etnaviv_domain->base.bad_page_dma);
166 152
167 if (etnaviv_domain->mtlb_cpu) 153 if (etnaviv_domain->mtlb_cpu)
168 dma_free_coherent(etnaviv_domain->dev, SZ_4K, 154 dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
169 etnaviv_domain->mtlb_cpu, 155 etnaviv_domain->mtlb_cpu,
170 etnaviv_domain->mtlb_dma); 156 etnaviv_domain->mtlb_dma);
171 157
172 for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++) { 158 for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++) {
173 if (etnaviv_domain->stlb_cpu[i]) 159 if (etnaviv_domain->stlb_cpu[i])
174 dma_free_coherent(etnaviv_domain->dev, SZ_4K, 160 dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
175 etnaviv_domain->stlb_cpu[i], 161 etnaviv_domain->stlb_cpu[i],
176 etnaviv_domain->stlb_dma[i]); 162 etnaviv_domain->stlb_dma[i]);
177 } 163 }
@@ -179,23 +165,23 @@ fail_mem:
179 return ret; 165 return ret;
180} 166}
181 167
182static void etnaviv_iommuv2_domain_free(struct iommu_domain *domain) 168static void etnaviv_iommuv2_domain_free(struct etnaviv_iommu_domain *domain)
183{ 169{
184 struct etnaviv_iommuv2_domain *etnaviv_domain = 170 struct etnaviv_iommuv2_domain *etnaviv_domain =
185 to_etnaviv_domain(domain); 171 to_etnaviv_domain(domain);
186 int i; 172 int i;
187 173
188 dma_free_coherent(etnaviv_domain->dev, SZ_4K, 174 dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
189 etnaviv_domain->bad_page_cpu, 175 etnaviv_domain->base.bad_page_cpu,
190 etnaviv_domain->bad_page_dma); 176 etnaviv_domain->base.bad_page_dma);
191 177
192 dma_free_coherent(etnaviv_domain->dev, SZ_4K, 178 dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
193 etnaviv_domain->mtlb_cpu, 179 etnaviv_domain->mtlb_cpu,
194 etnaviv_domain->mtlb_dma); 180 etnaviv_domain->mtlb_dma);
195 181
196 for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++) { 182 for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++) {
197 if (etnaviv_domain->stlb_cpu[i]) 183 if (etnaviv_domain->stlb_cpu[i])
198 dma_free_coherent(etnaviv_domain->dev, SZ_4K, 184 dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
199 etnaviv_domain->stlb_cpu[i], 185 etnaviv_domain->stlb_cpu[i],
200 etnaviv_domain->stlb_dma[i]); 186 etnaviv_domain->stlb_dma[i]);
201 } 187 }
@@ -203,7 +189,7 @@ static void etnaviv_iommuv2_domain_free(struct iommu_domain *domain)
203 vfree(etnaviv_domain); 189 vfree(etnaviv_domain);
204} 190}
205 191
206static size_t etnaviv_iommuv2_dump_size(struct iommu_domain *domain) 192static size_t etnaviv_iommuv2_dump_size(struct etnaviv_iommu_domain *domain)
207{ 193{
208 struct etnaviv_iommuv2_domain *etnaviv_domain = 194 struct etnaviv_iommuv2_domain *etnaviv_domain =
209 to_etnaviv_domain(domain); 195 to_etnaviv_domain(domain);
@@ -217,7 +203,7 @@ static size_t etnaviv_iommuv2_dump_size(struct iommu_domain *domain)
217 return dump_size; 203 return dump_size;
218} 204}
219 205
220static void etnaviv_iommuv2_dump(struct iommu_domain *domain, void *buf) 206static void etnaviv_iommuv2_dump(struct etnaviv_iommu_domain *domain, void *buf)
221{ 207{
222 struct etnaviv_iommuv2_domain *etnaviv_domain = 208 struct etnaviv_iommuv2_domain *etnaviv_domain =
223 to_etnaviv_domain(domain); 209 to_etnaviv_domain(domain);
@@ -230,18 +216,6 @@ static void etnaviv_iommuv2_dump(struct iommu_domain *domain, void *buf)
230 memcpy(buf, etnaviv_domain->stlb_cpu[i], SZ_4K); 216 memcpy(buf, etnaviv_domain->stlb_cpu[i], SZ_4K);
231} 217}
232 218
233static const struct etnaviv_iommu_ops etnaviv_iommu_ops = {
234 .ops = {
235 .domain_free = etnaviv_iommuv2_domain_free,
236 .map = etnaviv_iommuv2_map,
237 .unmap = etnaviv_iommuv2_unmap,
238 .iova_to_phys = etnaviv_iommuv2_iova_to_phys,
239 .pgsize_bitmap = SZ_4K,
240 },
241 .dump_size = etnaviv_iommuv2_dump_size,
242 .dump = etnaviv_iommuv2_dump,
243};
244
245void etnaviv_iommuv2_restore(struct etnaviv_gpu *gpu) 219void etnaviv_iommuv2_restore(struct etnaviv_gpu *gpu)
246{ 220{
247 struct etnaviv_iommuv2_domain *etnaviv_domain = 221 struct etnaviv_iommuv2_domain *etnaviv_domain =
@@ -254,35 +228,45 @@ void etnaviv_iommuv2_restore(struct etnaviv_gpu *gpu)
254 228
255 prefetch = etnaviv_buffer_config_mmuv2(gpu, 229 prefetch = etnaviv_buffer_config_mmuv2(gpu,
256 (u32)etnaviv_domain->mtlb_dma, 230 (u32)etnaviv_domain->mtlb_dma,
257 (u32)etnaviv_domain->bad_page_dma); 231 (u32)etnaviv_domain->base.bad_page_dma);
258 etnaviv_gpu_start_fe(gpu, (u32)etnaviv_cmdbuf_get_pa(gpu->buffer), 232 etnaviv_gpu_start_fe(gpu, (u32)etnaviv_cmdbuf_get_pa(gpu->buffer),
259 prefetch); 233 prefetch);
260 etnaviv_gpu_wait_idle(gpu, 100); 234 etnaviv_gpu_wait_idle(gpu, 100);
261 235
262 gpu_write(gpu, VIVS_MMUv2_CONTROL, VIVS_MMUv2_CONTROL_ENABLE); 236 gpu_write(gpu, VIVS_MMUv2_CONTROL, VIVS_MMUv2_CONTROL_ENABLE);
263} 237}
264struct iommu_domain *etnaviv_iommuv2_domain_alloc(struct etnaviv_gpu *gpu) 238
239const struct etnaviv_iommu_domain_ops etnaviv_iommuv2_ops = {
240 .free = etnaviv_iommuv2_domain_free,
241 .map = etnaviv_iommuv2_map,
242 .unmap = etnaviv_iommuv2_unmap,
243 .dump_size = etnaviv_iommuv2_dump_size,
244 .dump = etnaviv_iommuv2_dump,
245};
246
247struct etnaviv_iommu_domain *
248etnaviv_iommuv2_domain_alloc(struct etnaviv_gpu *gpu)
265{ 249{
266 struct etnaviv_iommuv2_domain *etnaviv_domain; 250 struct etnaviv_iommuv2_domain *etnaviv_domain;
251 struct etnaviv_iommu_domain *domain;
267 int ret; 252 int ret;
268 253
269 etnaviv_domain = vzalloc(sizeof(*etnaviv_domain)); 254 etnaviv_domain = vzalloc(sizeof(*etnaviv_domain));
270 if (!etnaviv_domain) 255 if (!etnaviv_domain)
271 return NULL; 256 return NULL;
272 257
273 etnaviv_domain->dev = gpu->dev; 258 domain = &etnaviv_domain->base;
274 259
275 etnaviv_domain->domain.type = __IOMMU_DOMAIN_PAGING; 260 domain->dev = gpu->dev;
276 etnaviv_domain->domain.ops = &etnaviv_iommu_ops.ops; 261 domain->base = 0;
277 etnaviv_domain->domain.pgsize_bitmap = SZ_4K; 262 domain->size = (u64)SZ_1G * 4;
278 etnaviv_domain->domain.geometry.aperture_start = 0; 263 domain->ops = &etnaviv_iommuv2_ops;
279 etnaviv_domain->domain.geometry.aperture_end = ~0UL & ~(SZ_4K - 1);
280 264
281 ret = etnaviv_iommuv2_init(etnaviv_domain); 265 ret = etnaviv_iommuv2_init(etnaviv_domain);
282 if (ret) 266 if (ret)
283 goto out_free; 267 goto out_free;
284 268
285 return &etnaviv_domain->domain; 269 return &etnaviv_domain->base;
286 270
287out_free: 271out_free:
288 vfree(etnaviv_domain); 272 vfree(etnaviv_domain);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
index f103e787de94..35074b944778 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
@@ -22,17 +22,64 @@
22#include "etnaviv_iommu.h" 22#include "etnaviv_iommu.h"
23#include "etnaviv_mmu.h" 23#include "etnaviv_mmu.h"
24 24
25static int etnaviv_fault_handler(struct iommu_domain *iommu, struct device *dev, 25static void etnaviv_domain_unmap(struct etnaviv_iommu_domain *domain,
26 unsigned long iova, int flags, void *arg) 26 unsigned long iova, size_t size)
27{ 27{
28 DBG("*** fault: iova=%08lx, flags=%d", iova, flags); 28 size_t unmapped_page, unmapped = 0;
29 return 0; 29 size_t pgsize = SZ_4K;
30
31 if (!IS_ALIGNED(iova | size, pgsize)) {
32 pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n",
33 iova, size, pgsize);
34 return;
35 }
36
37 while (unmapped < size) {
38 unmapped_page = domain->ops->unmap(domain, iova, pgsize);
39 if (!unmapped_page)
40 break;
41
42 iova += unmapped_page;
43 unmapped += unmapped_page;
44 }
30} 45}
31 46
32int etnaviv_iommu_map(struct etnaviv_iommu *iommu, u32 iova, 47static int etnaviv_domain_map(struct etnaviv_iommu_domain *domain,
33 struct sg_table *sgt, unsigned len, int prot) 48 unsigned long iova, phys_addr_t paddr,
49 size_t size, int prot)
34{ 50{
35 struct iommu_domain *domain = iommu->domain; 51 unsigned long orig_iova = iova;
52 size_t pgsize = SZ_4K;
53 size_t orig_size = size;
54 int ret = 0;
55
56 if (!IS_ALIGNED(iova | paddr | size, pgsize)) {
57 pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%x\n",
58 iova, &paddr, size, pgsize);
59 return -EINVAL;
60 }
61
62 while (size) {
63 ret = domain->ops->map(domain, iova, paddr, pgsize, prot);
64 if (ret)
65 break;
66
67 iova += pgsize;
68 paddr += pgsize;
69 size -= pgsize;
70 }
71
72 /* unroll mapping in case something went wrong */
73 if (ret)
74 etnaviv_domain_unmap(domain, orig_iova, orig_size - size);
75
76 return ret;
77}
78
79static int etnaviv_iommu_map(struct etnaviv_iommu *iommu, u32 iova,
80 struct sg_table *sgt, unsigned len, int prot)
81{
82 struct etnaviv_iommu_domain *domain = iommu->domain;
36 struct scatterlist *sg; 83 struct scatterlist *sg;
37 unsigned int da = iova; 84 unsigned int da = iova;
38 unsigned int i, j; 85 unsigned int i, j;
@@ -47,7 +94,7 @@ int etnaviv_iommu_map(struct etnaviv_iommu *iommu, u32 iova,
47 94
48 VERB("map[%d]: %08x %08x(%zx)", i, iova, pa, bytes); 95 VERB("map[%d]: %08x %08x(%zx)", i, iova, pa, bytes);
49 96
50 ret = iommu_map(domain, da, pa, bytes, prot); 97 ret = etnaviv_domain_map(domain, da, pa, bytes, prot);
51 if (ret) 98 if (ret)
52 goto fail; 99 goto fail;
53 100
@@ -62,27 +109,24 @@ fail:
62 for_each_sg(sgt->sgl, sg, i, j) { 109 for_each_sg(sgt->sgl, sg, i, j) {
63 size_t bytes = sg_dma_len(sg) + sg->offset; 110 size_t bytes = sg_dma_len(sg) + sg->offset;
64 111
65 iommu_unmap(domain, da, bytes); 112 etnaviv_domain_unmap(domain, da, bytes);
66 da += bytes; 113 da += bytes;
67 } 114 }
68 return ret; 115 return ret;
69} 116}
70 117
71int etnaviv_iommu_unmap(struct etnaviv_iommu *iommu, u32 iova, 118static void etnaviv_iommu_unmap(struct etnaviv_iommu *iommu, u32 iova,
72 struct sg_table *sgt, unsigned len) 119 struct sg_table *sgt, unsigned len)
73{ 120{
74 struct iommu_domain *domain = iommu->domain; 121 struct etnaviv_iommu_domain *domain = iommu->domain;
75 struct scatterlist *sg; 122 struct scatterlist *sg;
76 unsigned int da = iova; 123 unsigned int da = iova;
77 int i; 124 int i;
78 125
79 for_each_sg(sgt->sgl, sg, sgt->nents, i) { 126 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
80 size_t bytes = sg_dma_len(sg) + sg->offset; 127 size_t bytes = sg_dma_len(sg) + sg->offset;
81 size_t unmapped;
82 128
83 unmapped = iommu_unmap(domain, da, bytes); 129 etnaviv_domain_unmap(domain, da, bytes);
84 if (unmapped < bytes)
85 return unmapped;
86 130
87 VERB("unmap[%d]: %08x(%zx)", i, iova, bytes); 131 VERB("unmap[%d]: %08x(%zx)", i, iova, bytes);
88 132
@@ -90,8 +134,6 @@ int etnaviv_iommu_unmap(struct etnaviv_iommu *iommu, u32 iova,
90 134
91 da += bytes; 135 da += bytes;
92 } 136 }
93
94 return 0;
95} 137}
96 138
97static void etnaviv_iommu_remove_mapping(struct etnaviv_iommu *mmu, 139static void etnaviv_iommu_remove_mapping(struct etnaviv_iommu *mmu,
@@ -237,7 +279,7 @@ int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu,
237 mmu->last_iova = node->start + etnaviv_obj->base.size; 279 mmu->last_iova = node->start + etnaviv_obj->base.size;
238 mapping->iova = node->start; 280 mapping->iova = node->start;
239 ret = etnaviv_iommu_map(mmu, node->start, sgt, etnaviv_obj->base.size, 281 ret = etnaviv_iommu_map(mmu, node->start, sgt, etnaviv_obj->base.size,
240 IOMMU_READ | IOMMU_WRITE); 282 ETNAVIV_PROT_READ | ETNAVIV_PROT_WRITE);
241 283
242 if (ret < 0) { 284 if (ret < 0) {
243 drm_mm_remove_node(node); 285 drm_mm_remove_node(node);
@@ -271,7 +313,7 @@ void etnaviv_iommu_unmap_gem(struct etnaviv_iommu *mmu,
271void etnaviv_iommu_destroy(struct etnaviv_iommu *mmu) 313void etnaviv_iommu_destroy(struct etnaviv_iommu *mmu)
272{ 314{
273 drm_mm_takedown(&mmu->mm); 315 drm_mm_takedown(&mmu->mm);
274 iommu_domain_free(mmu->domain); 316 mmu->domain->ops->free(mmu->domain);
275 kfree(mmu); 317 kfree(mmu);
276} 318}
277 319
@@ -303,11 +345,7 @@ struct etnaviv_iommu *etnaviv_iommu_new(struct etnaviv_gpu *gpu)
303 mutex_init(&mmu->lock); 345 mutex_init(&mmu->lock);
304 INIT_LIST_HEAD(&mmu->mappings); 346 INIT_LIST_HEAD(&mmu->mappings);
305 347
306 drm_mm_init(&mmu->mm, mmu->domain->geometry.aperture_start, 348 drm_mm_init(&mmu->mm, mmu->domain->base, mmu->domain->size);
307 mmu->domain->geometry.aperture_end -
308 mmu->domain->geometry.aperture_start + 1);
309
310 iommu_set_fault_handler(mmu->domain, etnaviv_fault_handler, gpu->dev);
311 349
312 return mmu; 350 return mmu;
313} 351}
@@ -338,8 +376,8 @@ int etnaviv_iommu_get_suballoc_va(struct etnaviv_gpu *gpu, dma_addr_t paddr,
338 mutex_unlock(&mmu->lock); 376 mutex_unlock(&mmu->lock);
339 return ret; 377 return ret;
340 } 378 }
341 ret = iommu_map(mmu->domain, vram_node->start, paddr, size, 379 ret = etnaviv_domain_map(mmu->domain, vram_node->start, paddr,
342 IOMMU_READ); 380 size, ETNAVIV_PROT_READ);
343 if (ret < 0) { 381 if (ret < 0) {
344 drm_mm_remove_node(vram_node); 382 drm_mm_remove_node(vram_node);
345 mutex_unlock(&mmu->lock); 383 mutex_unlock(&mmu->lock);
@@ -362,25 +400,17 @@ void etnaviv_iommu_put_suballoc_va(struct etnaviv_gpu *gpu,
362 400
363 if (mmu->version == ETNAVIV_IOMMU_V2) { 401 if (mmu->version == ETNAVIV_IOMMU_V2) {
364 mutex_lock(&mmu->lock); 402 mutex_lock(&mmu->lock);
365 iommu_unmap(mmu->domain,iova, size); 403 etnaviv_domain_unmap(mmu->domain, iova, size);
366 drm_mm_remove_node(vram_node); 404 drm_mm_remove_node(vram_node);
367 mutex_unlock(&mmu->lock); 405 mutex_unlock(&mmu->lock);
368 } 406 }
369} 407}
370size_t etnaviv_iommu_dump_size(struct etnaviv_iommu *iommu) 408size_t etnaviv_iommu_dump_size(struct etnaviv_iommu *iommu)
371{ 409{
372 struct etnaviv_iommu_ops *ops; 410 return iommu->domain->ops->dump_size(iommu->domain);
373
374 ops = container_of(iommu->domain->ops, struct etnaviv_iommu_ops, ops);
375
376 return ops->dump_size(iommu->domain);
377} 411}
378 412
379void etnaviv_iommu_dump(struct etnaviv_iommu *iommu, void *buf) 413void etnaviv_iommu_dump(struct etnaviv_iommu *iommu, void *buf)
380{ 414{
381 struct etnaviv_iommu_ops *ops; 415 iommu->domain->ops->dump(iommu->domain, buf);
382
383 ops = container_of(iommu->domain->ops, struct etnaviv_iommu_ops, ops);
384
385 ops->dump(iommu->domain, buf);
386} 416}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.h b/drivers/gpu/drm/etnaviv/etnaviv_mmu.h
index 54be289e5981..ab603f5166b1 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.h
@@ -17,7 +17,8 @@
17#ifndef __ETNAVIV_MMU_H__ 17#ifndef __ETNAVIV_MMU_H__
18#define __ETNAVIV_MMU_H__ 18#define __ETNAVIV_MMU_H__
19 19
20#include <linux/iommu.h> 20#define ETNAVIV_PROT_READ (1 << 0)
21#define ETNAVIV_PROT_WRITE (1 << 1)
21 22
22enum etnaviv_iommu_version { 23enum etnaviv_iommu_version {
23 ETNAVIV_IOMMU_V1 = 0, 24 ETNAVIV_IOMMU_V1 = 0,
@@ -26,16 +27,31 @@ enum etnaviv_iommu_version {
26 27
27struct etnaviv_gpu; 28struct etnaviv_gpu;
28struct etnaviv_vram_mapping; 29struct etnaviv_vram_mapping;
30struct etnaviv_iommu_domain;
29 31
30struct etnaviv_iommu_ops { 32struct etnaviv_iommu_domain_ops {
31 struct iommu_ops ops; 33 void (*free)(struct etnaviv_iommu_domain *);
32 size_t (*dump_size)(struct iommu_domain *); 34 int (*map)(struct etnaviv_iommu_domain *domain, unsigned long iova,
33 void (*dump)(struct iommu_domain *, void *); 35 phys_addr_t paddr, size_t size, int prot);
36 size_t (*unmap)(struct etnaviv_iommu_domain *domain, unsigned long iova,
37 size_t size);
38 size_t (*dump_size)(struct etnaviv_iommu_domain *);
39 void (*dump)(struct etnaviv_iommu_domain *, void *);
40};
41
42struct etnaviv_iommu_domain {
43 struct device *dev;
44 void *bad_page_cpu;
45 dma_addr_t bad_page_dma;
46 u64 base;
47 u64 size;
48
49 const struct etnaviv_iommu_domain_ops *ops;
34}; 50};
35 51
36struct etnaviv_iommu { 52struct etnaviv_iommu {
37 struct etnaviv_gpu *gpu; 53 struct etnaviv_gpu *gpu;
38 struct iommu_domain *domain; 54 struct etnaviv_iommu_domain *domain;
39 55
40 enum etnaviv_iommu_version version; 56 enum etnaviv_iommu_version version;
41 57
@@ -49,18 +65,11 @@ struct etnaviv_iommu {
49 65
50struct etnaviv_gem_object; 66struct etnaviv_gem_object;
51 67
52int etnaviv_iommu_attach(struct etnaviv_iommu *iommu, const char **names,
53 int cnt);
54int etnaviv_iommu_map(struct etnaviv_iommu *iommu, u32 iova,
55 struct sg_table *sgt, unsigned len, int prot);
56int etnaviv_iommu_unmap(struct etnaviv_iommu *iommu, u32 iova,
57 struct sg_table *sgt, unsigned len);
58int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu, 68int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu,
59 struct etnaviv_gem_object *etnaviv_obj, u32 memory_base, 69 struct etnaviv_gem_object *etnaviv_obj, u32 memory_base,
60 struct etnaviv_vram_mapping *mapping); 70 struct etnaviv_vram_mapping *mapping);
61void etnaviv_iommu_unmap_gem(struct etnaviv_iommu *mmu, 71void etnaviv_iommu_unmap_gem(struct etnaviv_iommu *mmu,
62 struct etnaviv_vram_mapping *mapping); 72 struct etnaviv_vram_mapping *mapping);
63void etnaviv_iommu_destroy(struct etnaviv_iommu *iommu);
64 73
65int etnaviv_iommu_get_suballoc_va(struct etnaviv_gpu *gpu, dma_addr_t paddr, 74int etnaviv_iommu_get_suballoc_va(struct etnaviv_gpu *gpu, dma_addr_t paddr,
66 struct drm_mm_node *vram_node, size_t size, 75 struct drm_mm_node *vram_node, size_t size,
@@ -73,6 +82,7 @@ size_t etnaviv_iommu_dump_size(struct etnaviv_iommu *iommu);
73void etnaviv_iommu_dump(struct etnaviv_iommu *iommu, void *buf); 82void etnaviv_iommu_dump(struct etnaviv_iommu *iommu, void *buf);
74 83
75struct etnaviv_iommu *etnaviv_iommu_new(struct etnaviv_gpu *gpu); 84struct etnaviv_iommu *etnaviv_iommu_new(struct etnaviv_gpu *gpu);
85void etnaviv_iommu_destroy(struct etnaviv_iommu *iommu);
76void etnaviv_iommu_restore(struct etnaviv_gpu *gpu); 86void etnaviv_iommu_restore(struct etnaviv_gpu *gpu);
77 87
78#endif /* __ETNAVIV_MMU_H__ */ 88#endif /* __ETNAVIV_MMU_H__ */
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c b/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c
new file mode 100644
index 000000000000..768f5aafdd18
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c
@@ -0,0 +1,495 @@
1/*
2 * Copyright (C) 2017 Etnaviv Project
3 * Copyright (C) 2017 Zodiac Inflight Innovations
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include "etnaviv_gpu.h"
19#include "etnaviv_perfmon.h"
20#include "state_hi.xml.h"
21
22struct etnaviv_pm_domain;
23
24struct etnaviv_pm_signal {
25 char name[64];
26 u32 data;
27
28 u32 (*sample)(struct etnaviv_gpu *gpu,
29 const struct etnaviv_pm_domain *domain,
30 const struct etnaviv_pm_signal *signal);
31};
32
33struct etnaviv_pm_domain {
34 char name[64];
35
36 /* profile register */
37 u32 profile_read;
38 u32 profile_config;
39
40 u8 nr_signals;
41 const struct etnaviv_pm_signal *signal;
42};
43
44struct etnaviv_pm_domain_meta {
45 const struct etnaviv_pm_domain *domains;
46 u32 nr_domains;
47};
48
49static u32 simple_reg_read(struct etnaviv_gpu *gpu,
50 const struct etnaviv_pm_domain *domain,
51 const struct etnaviv_pm_signal *signal)
52{
53 return gpu_read(gpu, signal->data);
54}
55
56static u32 perf_reg_read(struct etnaviv_gpu *gpu,
57 const struct etnaviv_pm_domain *domain,
58 const struct etnaviv_pm_signal *signal)
59{
60 gpu_write(gpu, domain->profile_config, signal->data);
61
62 return gpu_read(gpu, domain->profile_read);
63}
64
65static u32 pipe_reg_read(struct etnaviv_gpu *gpu,
66 const struct etnaviv_pm_domain *domain,
67 const struct etnaviv_pm_signal *signal)
68{
69 u32 clock = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
70 u32 value = 0;
71 unsigned i;
72
73 for (i = 0; i < gpu->identity.pixel_pipes; i++) {
74 clock &= ~(VIVS_HI_CLOCK_CONTROL_DEBUG_PIXEL_PIPE__MASK);
75 clock |= VIVS_HI_CLOCK_CONTROL_DEBUG_PIXEL_PIPE(i);
76 gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, clock);
77 gpu_write(gpu, domain->profile_config, signal->data);
78 value += gpu_read(gpu, domain->profile_read);
79 }
80
81 /* switch back to pixel pipe 0 to prevent GPU hang */
82 clock &= ~(VIVS_HI_CLOCK_CONTROL_DEBUG_PIXEL_PIPE__MASK);
83 clock |= VIVS_HI_CLOCK_CONTROL_DEBUG_PIXEL_PIPE(0);
84 gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, clock);
85
86 return value;
87}
88
89static const struct etnaviv_pm_domain doms_3d[] = {
90 {
91 .name = "HI",
92 .profile_read = VIVS_MC_PROFILE_HI_READ,
93 .profile_config = VIVS_MC_PROFILE_CONFIG2,
94 .nr_signals = 5,
95 .signal = (const struct etnaviv_pm_signal[]) {
96 {
97 "TOTAL_CYCLES",
98 VIVS_HI_PROFILE_TOTAL_CYCLES,
99 &simple_reg_read
100 },
101 {
102 "IDLE_CYCLES",
103 VIVS_HI_PROFILE_IDLE_CYCLES,
104 &simple_reg_read
105 },
106 {
107 "AXI_CYCLES_READ_REQUEST_STALLED",
108 VIVS_MC_PROFILE_CONFIG2_HI_AXI_CYCLES_READ_REQUEST_STALLED,
109 &perf_reg_read
110 },
111 {
112 "AXI_CYCLES_WRITE_REQUEST_STALLED",
113 VIVS_MC_PROFILE_CONFIG2_HI_AXI_CYCLES_WRITE_REQUEST_STALLED,
114 &perf_reg_read
115 },
116 {
117 "AXI_CYCLES_WRITE_DATA_STALLED",
118 VIVS_MC_PROFILE_CONFIG2_HI_AXI_CYCLES_WRITE_DATA_STALLED,
119 &perf_reg_read
120 }
121 }
122 },
123 {
124 .name = "PE",
125 .profile_read = VIVS_MC_PROFILE_PE_READ,
126 .profile_config = VIVS_MC_PROFILE_CONFIG0,
127 .nr_signals = 5,
128 .signal = (const struct etnaviv_pm_signal[]) {
129 {
130 "PIXEL_COUNT_KILLED_BY_COLOR_PIPE",
131 VIVS_MC_PROFILE_CONFIG0_PE_PIXEL_COUNT_KILLED_BY_COLOR_PIPE,
132 &pipe_reg_read
133 },
134 {
135 "PIXEL_COUNT_KILLED_BY_DEPTH_PIPE",
136 VIVS_MC_PROFILE_CONFIG0_PE_PIXEL_COUNT_KILLED_BY_DEPTH_PIPE,
137 &pipe_reg_read
138 },
139 {
140 "PIXEL_COUNT_DRAWN_BY_COLOR_PIPE",
141 VIVS_MC_PROFILE_CONFIG0_PE_PIXEL_COUNT_DRAWN_BY_COLOR_PIPE,
142 &pipe_reg_read
143 },
144 {
145 "PIXEL_COUNT_DRAWN_BY_DEPTH_PIPE",
146 VIVS_MC_PROFILE_CONFIG0_PE_PIXEL_COUNT_DRAWN_BY_DEPTH_PIPE,
147 &pipe_reg_read
148 }
149 }
150 },
151 {
152 .name = "SH",
153 .profile_read = VIVS_MC_PROFILE_SH_READ,
154 .profile_config = VIVS_MC_PROFILE_CONFIG0,
155 .nr_signals = 9,
156 .signal = (const struct etnaviv_pm_signal[]) {
157 {
158 "SHADER_CYCLES",
159 VIVS_MC_PROFILE_CONFIG0_SH_SHADER_CYCLES,
160 &perf_reg_read
161 },
162 {
163 "PS_INST_COUNTER",
164 VIVS_MC_PROFILE_CONFIG0_SH_PS_INST_COUNTER,
165 &perf_reg_read
166 },
167 {
168 "RENDERED_PIXEL_COUNTER",
169 VIVS_MC_PROFILE_CONFIG0_SH_RENDERED_PIXEL_COUNTER,
170 &perf_reg_read
171 },
172 {
173 "VS_INST_COUNTER",
174 VIVS_MC_PROFILE_CONFIG0_SH_VS_INST_COUNTER,
175 &pipe_reg_read
176 },
177 {
178 "RENDERED_VERTICE_COUNTER",
179 VIVS_MC_PROFILE_CONFIG0_SH_RENDERED_VERTICE_COUNTER,
180 &pipe_reg_read
181 },
182 {
183 "VTX_BRANCH_INST_COUNTER",
184 VIVS_MC_PROFILE_CONFIG0_SH_VTX_BRANCH_INST_COUNTER,
185 &pipe_reg_read
186 },
187 {
188 "VTX_TEXLD_INST_COUNTER",
189 VIVS_MC_PROFILE_CONFIG0_SH_VTX_TEXLD_INST_COUNTER,
190 &pipe_reg_read
191 },
192 {
193 "PXL_BRANCH_INST_COUNTER",
194 VIVS_MC_PROFILE_CONFIG0_SH_PXL_BRANCH_INST_COUNTER,
195 &pipe_reg_read
196 },
197 {
198 "PXL_TEXLD_INST_COUNTER",
199 VIVS_MC_PROFILE_CONFIG0_SH_PXL_TEXLD_INST_COUNTER,
200 &pipe_reg_read
201 }
202 }
203 },
204 {
205 .name = "PA",
206 .profile_read = VIVS_MC_PROFILE_PA_READ,
207 .profile_config = VIVS_MC_PROFILE_CONFIG1,
208 .nr_signals = 6,
209 .signal = (const struct etnaviv_pm_signal[]) {
210 {
211 "INPUT_VTX_COUNTER",
212 VIVS_MC_PROFILE_CONFIG1_PA_INPUT_VTX_COUNTER,
213 &perf_reg_read
214 },
215 {
216 "INPUT_PRIM_COUNTER",
217 VIVS_MC_PROFILE_CONFIG1_PA_INPUT_PRIM_COUNTER,
218 &perf_reg_read
219 },
220 {
221 "OUTPUT_PRIM_COUNTER",
222 VIVS_MC_PROFILE_CONFIG1_PA_OUTPUT_PRIM_COUNTER,
223 &perf_reg_read
224 },
225 {
226 "DEPTH_CLIPPED_COUNTER",
227 VIVS_MC_PROFILE_CONFIG1_PA_DEPTH_CLIPPED_COUNTER,
228 &pipe_reg_read
229 },
230 {
231 "TRIVIAL_REJECTED_COUNTER",
232 VIVS_MC_PROFILE_CONFIG1_PA_TRIVIAL_REJECTED_COUNTER,
233 &pipe_reg_read
234 },
235 {
236 "CULLED_COUNTER",
237 VIVS_MC_PROFILE_CONFIG1_PA_CULLED_COUNTER,
238 &pipe_reg_read
239 }
240 }
241 },
242 {
243 .name = "SE",
244 .profile_read = VIVS_MC_PROFILE_SE_READ,
245 .profile_config = VIVS_MC_PROFILE_CONFIG1,
246 .nr_signals = 2,
247 .signal = (const struct etnaviv_pm_signal[]) {
248 {
249 "CULLED_TRIANGLE_COUNT",
250 VIVS_MC_PROFILE_CONFIG1_SE_CULLED_TRIANGLE_COUNT,
251 &perf_reg_read
252 },
253 {
254 "CULLED_LINES_COUNT",
255 VIVS_MC_PROFILE_CONFIG1_SE_CULLED_LINES_COUNT,
256 &perf_reg_read
257 }
258 }
259 },
260 {
261 .name = "RA",
262 .profile_read = VIVS_MC_PROFILE_RA_READ,
263 .profile_config = VIVS_MC_PROFILE_CONFIG1,
264 .nr_signals = 7,
265 .signal = (const struct etnaviv_pm_signal[]) {
266 {
267 "VALID_PIXEL_COUNT",
268 VIVS_MC_PROFILE_CONFIG1_RA_VALID_PIXEL_COUNT,
269 &perf_reg_read
270 },
271 {
272 "TOTAL_QUAD_COUNT",
273 VIVS_MC_PROFILE_CONFIG1_RA_TOTAL_QUAD_COUNT,
274 &perf_reg_read
275 },
276 {
277 "VALID_QUAD_COUNT_AFTER_EARLY_Z",
278 VIVS_MC_PROFILE_CONFIG1_RA_VALID_QUAD_COUNT_AFTER_EARLY_Z,
279 &perf_reg_read
280 },
281 {
282 "TOTAL_PRIMITIVE_COUNT",
283 VIVS_MC_PROFILE_CONFIG1_RA_TOTAL_PRIMITIVE_COUNT,
284 &perf_reg_read
285 },
286 {
287 "PIPE_CACHE_MISS_COUNTER",
288 VIVS_MC_PROFILE_CONFIG1_RA_PIPE_CACHE_MISS_COUNTER,
289 &perf_reg_read
290 },
291 {
292 "PREFETCH_CACHE_MISS_COUNTER",
293 VIVS_MC_PROFILE_CONFIG1_RA_PREFETCH_CACHE_MISS_COUNTER,
294 &perf_reg_read
295 },
296 {
297 "CULLED_QUAD_COUNT",
298 VIVS_MC_PROFILE_CONFIG1_RA_CULLED_QUAD_COUNT,
299 &perf_reg_read
300 }
301 }
302 },
303 {
304 .name = "TX",
305 .profile_read = VIVS_MC_PROFILE_TX_READ,
306 .profile_config = VIVS_MC_PROFILE_CONFIG1,
307 .nr_signals = 9,
308 .signal = (const struct etnaviv_pm_signal[]) {
309 {
310 "TOTAL_BILINEAR_REQUESTS",
311 VIVS_MC_PROFILE_CONFIG1_TX_TOTAL_BILINEAR_REQUESTS,
312 &perf_reg_read
313 },
314 {
315 "TOTAL_TRILINEAR_REQUESTS",
316 VIVS_MC_PROFILE_CONFIG1_TX_TOTAL_TRILINEAR_REQUESTS,
317 &perf_reg_read
318 },
319 {
320 "TOTAL_DISCARDED_TEXTURE_REQUESTS",
321 VIVS_MC_PROFILE_CONFIG1_TX_TOTAL_DISCARDED_TEXTURE_REQUESTS,
322 &perf_reg_read
323 },
324 {
325 "TOTAL_TEXTURE_REQUESTS",
326 VIVS_MC_PROFILE_CONFIG1_TX_TOTAL_TEXTURE_REQUESTS,
327 &perf_reg_read
328 },
329 {
330 "MEM_READ_COUNT",
331 VIVS_MC_PROFILE_CONFIG1_TX_MEM_READ_COUNT,
332 &perf_reg_read
333 },
334 {
335 "MEM_READ_IN_8B_COUNT",
336 VIVS_MC_PROFILE_CONFIG1_TX_MEM_READ_IN_8B_COUNT,
337 &perf_reg_read
338 },
339 {
340 "CACHE_MISS_COUNT",
341 VIVS_MC_PROFILE_CONFIG1_TX_CACHE_MISS_COUNT,
342 &perf_reg_read
343 },
344 {
345 "CACHE_HIT_TEXEL_COUNT",
346 VIVS_MC_PROFILE_CONFIG1_TX_CACHE_HIT_TEXEL_COUNT,
347 &perf_reg_read
348 },
349 {
350 "CACHE_MISS_TEXEL_COUNT",
351 VIVS_MC_PROFILE_CONFIG1_TX_CACHE_MISS_TEXEL_COUNT,
352 &perf_reg_read
353 }
354 }
355 },
356 {
357 .name = "MC",
358 .profile_read = VIVS_MC_PROFILE_MC_READ,
359 .profile_config = VIVS_MC_PROFILE_CONFIG2,
360 .nr_signals = 3,
361 .signal = (const struct etnaviv_pm_signal[]) {
362 {
363 "TOTAL_READ_REQ_8B_FROM_PIPELINE",
364 VIVS_MC_PROFILE_CONFIG2_MC_TOTAL_READ_REQ_8B_FROM_PIPELINE,
365 &perf_reg_read
366 },
367 {
368 "TOTAL_READ_REQ_8B_FROM_IP",
369 VIVS_MC_PROFILE_CONFIG2_MC_TOTAL_READ_REQ_8B_FROM_IP,
370 &perf_reg_read
371 },
372 {
373 "TOTAL_WRITE_REQ_8B_FROM_PIPELINE",
374 VIVS_MC_PROFILE_CONFIG2_MC_TOTAL_WRITE_REQ_8B_FROM_PIPELINE,
375 &perf_reg_read
376 }
377 }
378 }
379};
380
381static const struct etnaviv_pm_domain doms_2d[] = {
382 {
383 .name = "PE",
384 .profile_read = VIVS_MC_PROFILE_PE_READ,
385 .profile_config = VIVS_MC_PROFILE_CONFIG0,
386 .nr_signals = 1,
387 .signal = (const struct etnaviv_pm_signal[]) {
388 {
389 "PIXELS_RENDERED_2D",
390 VIVS_MC_PROFILE_CONFIG0_PE_PIXELS_RENDERED_2D,
391 &pipe_reg_read
392 }
393 }
394 }
395};
396
397static const struct etnaviv_pm_domain doms_vg[] = {
398};
399
400static const struct etnaviv_pm_domain_meta doms_meta[] = {
401 {
402 .nr_domains = ARRAY_SIZE(doms_3d),
403 .domains = &doms_3d[0]
404 },
405 {
406 .nr_domains = ARRAY_SIZE(doms_2d),
407 .domains = &doms_2d[0]
408 },
409 {
410 .nr_domains = ARRAY_SIZE(doms_vg),
411 .domains = &doms_vg[0]
412 }
413};
414
415int etnaviv_pm_query_dom(struct etnaviv_gpu *gpu,
416 struct drm_etnaviv_pm_domain *domain)
417{
418 const struct etnaviv_pm_domain_meta *meta = &doms_meta[domain->pipe];
419 const struct etnaviv_pm_domain *dom;
420
421 if (domain->iter >= meta->nr_domains)
422 return -EINVAL;
423
424 dom = meta->domains + domain->iter;
425
426 domain->id = domain->iter;
427 domain->nr_signals = dom->nr_signals;
428 strncpy(domain->name, dom->name, sizeof(domain->name));
429
430 domain->iter++;
431 if (domain->iter == meta->nr_domains)
432 domain->iter = 0xff;
433
434 return 0;
435}
436
437int etnaviv_pm_query_sig(struct etnaviv_gpu *gpu,
438 struct drm_etnaviv_pm_signal *signal)
439{
440 const struct etnaviv_pm_domain_meta *meta = &doms_meta[signal->pipe];
441 const struct etnaviv_pm_domain *dom;
442 const struct etnaviv_pm_signal *sig;
443
444 if (signal->domain >= meta->nr_domains)
445 return -EINVAL;
446
447 dom = meta->domains + signal->domain;
448
449 if (signal->iter > dom->nr_signals)
450 return -EINVAL;
451
452 sig = &dom->signal[signal->iter];
453
454 signal->id = signal->iter;
455 strncpy(signal->name, sig->name, sizeof(signal->name));
456
457 signal->iter++;
458 if (signal->iter == dom->nr_signals)
459 signal->iter = 0xffff;
460
461 return 0;
462}
463
464int etnaviv_pm_req_validate(const struct drm_etnaviv_gem_submit_pmr *r,
465 u32 exec_state)
466{
467 const struct etnaviv_pm_domain_meta *meta = &doms_meta[exec_state];
468 const struct etnaviv_pm_domain *dom;
469
470 if (r->domain >= meta->nr_domains)
471 return -EINVAL;
472
473 dom = meta->domains + r->domain;
474
475 if (r->signal > dom->nr_signals)
476 return -EINVAL;
477
478 return 0;
479}
480
481void etnaviv_perfmon_process(struct etnaviv_gpu *gpu,
482 const struct etnaviv_perfmon_request *pmr)
483{
484 const struct etnaviv_pm_domain_meta *meta = &doms_meta[gpu->exec_state];
485 const struct etnaviv_pm_domain *dom;
486 const struct etnaviv_pm_signal *sig;
487 u32 *bo = pmr->bo_vma;
488 u32 val;
489
490 dom = meta->domains + pmr->domain;
491 sig = &dom->signal[pmr->signal];
492 val = sig->sample(gpu, dom, sig);
493
494 *(bo + pmr->offset) = val;
495}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_perfmon.h b/drivers/gpu/drm/etnaviv/etnaviv_perfmon.h
new file mode 100644
index 000000000000..35dce194cb00
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/etnaviv_perfmon.h
@@ -0,0 +1,49 @@
1/*
2 * Copyright (C) 2017 Etnaviv Project
3 * Copyright (C) 2017 Zodiac Inflight Innovations
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#ifndef __ETNAVIV_PERFMON_H__
19#define __ETNAVIV_PERFMON_H__
20
21struct etnaviv_gpu;
22struct drm_etnaviv_pm_domain;
23struct drm_etnaviv_pm_signal;
24
25struct etnaviv_perfmon_request
26{
27 u32 flags;
28 u8 domain;
29 u8 signal;
30 u32 sequence;
31
32 /* bo to store a value */
33 u32 *bo_vma;
34 u32 offset;
35};
36
37int etnaviv_pm_query_dom(struct etnaviv_gpu *gpu,
38 struct drm_etnaviv_pm_domain *domain);
39
40int etnaviv_pm_query_sig(struct etnaviv_gpu *gpu,
41 struct drm_etnaviv_pm_signal *signal);
42
43int etnaviv_pm_req_validate(const struct drm_etnaviv_gem_submit_pmr *r,
44 u32 exec_state);
45
46void etnaviv_perfmon_process(struct etnaviv_gpu *gpu,
47 const struct etnaviv_perfmon_request *pmr);
48
49#endif /* __ETNAVIV_PERFMON_H__ */
diff --git a/drivers/gpu/drm/gma500/mid_bios.c b/drivers/gpu/drm/gma500/mid_bios.c
index d75ecb3bdee7..1fa163373a47 100644
--- a/drivers/gpu/drm/gma500/mid_bios.c
+++ b/drivers/gpu/drm/gma500/mid_bios.c
@@ -237,7 +237,7 @@ static int mid_get_vbt_data_r10(struct drm_psb_private *dev_priv, u32 addr)
237 237
238 gct = kmalloc(sizeof(*gct) * vbt.panel_count, GFP_KERNEL); 238 gct = kmalloc(sizeof(*gct) * vbt.panel_count, GFP_KERNEL);
239 if (!gct) 239 if (!gct)
240 return -1; 240 return -ENOMEM;
241 241
242 gct_virtual = ioremap(addr + sizeof(vbt), 242 gct_virtual = ioremap(addr + sizeof(vbt),
243 sizeof(*gct) * vbt.panel_count); 243 sizeof(*gct) * vbt.panel_count);
diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
index e787d376ba67..84507912be84 100644
--- a/drivers/gpu/drm/gma500/psb_intel_sdvo.c
+++ b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
@@ -37,6 +37,7 @@
37#include "psb_drv.h" 37#include "psb_drv.h"
38#include "psb_intel_sdvo_regs.h" 38#include "psb_intel_sdvo_regs.h"
39#include "psb_intel_reg.h" 39#include "psb_intel_reg.h"
40#include <linux/kernel.h>
40 41
41#define SDVO_TMDS_MASK (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1) 42#define SDVO_TMDS_MASK (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)
42#define SDVO_RGB_MASK (SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1) 43#define SDVO_RGB_MASK (SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1)
@@ -62,8 +63,6 @@ static const char *tv_format_names[] = {
62 "SECAM_60" 63 "SECAM_60"
63}; 64};
64 65
65#define TV_FORMAT_NUM (sizeof(tv_format_names) / sizeof(*tv_format_names))
66
67struct psb_intel_sdvo { 66struct psb_intel_sdvo {
68 struct gma_encoder base; 67 struct gma_encoder base;
69 68
@@ -148,7 +147,7 @@ struct psb_intel_sdvo_connector {
148 int force_audio; 147 int force_audio;
149 148
150 /* This contains all current supported TV format */ 149 /* This contains all current supported TV format */
151 u8 tv_format_supported[TV_FORMAT_NUM]; 150 u8 tv_format_supported[ARRAY_SIZE(tv_format_names)];
152 int format_supported_num; 151 int format_supported_num;
153 struct drm_property *tv_format; 152 struct drm_property *tv_format;
154 153
@@ -1709,7 +1708,7 @@ psb_intel_sdvo_set_property(struct drm_connector *connector,
1709 } 1708 }
1710 1709
1711 if (property == psb_intel_sdvo_connector->tv_format) { 1710 if (property == psb_intel_sdvo_connector->tv_format) {
1712 if (val >= TV_FORMAT_NUM) 1711 if (val >= ARRAY_SIZE(tv_format_names))
1713 return -EINVAL; 1712 return -EINVAL;
1714 1713
1715 if (psb_intel_sdvo->tv_format_index == 1714 if (psb_intel_sdvo->tv_format_index ==
@@ -2269,7 +2268,7 @@ static bool psb_intel_sdvo_tv_create_property(struct psb_intel_sdvo *psb_intel_s
2269 return false; 2268 return false;
2270 2269
2271 psb_intel_sdvo_connector->format_supported_num = 0; 2270 psb_intel_sdvo_connector->format_supported_num = 0;
2272 for (i = 0 ; i < TV_FORMAT_NUM; i++) 2271 for (i = 0 ; i < ARRAY_SIZE(tv_format_names); i++)
2273 if (format_map & (1 << i)) 2272 if (format_map & (1 << i))
2274 psb_intel_sdvo_connector->tv_format_supported[psb_intel_sdvo_connector->format_supported_num++] = i; 2273 psb_intel_sdvo_connector->tv_format_supported[psb_intel_sdvo_connector->format_supported_num++] = i;
2275 2274
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
index ec4dd9df9150..f4eba87c96f3 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
@@ -36,7 +36,7 @@ static int hibmc_connector_mode_valid(struct drm_connector *connector,
36static struct drm_encoder * 36static struct drm_encoder *
37hibmc_connector_best_encoder(struct drm_connector *connector) 37hibmc_connector_best_encoder(struct drm_connector *connector)
38{ 38{
39 return drm_encoder_find(connector->dev, connector->encoder_ids[0]); 39 return drm_encoder_find(connector->dev, NULL, connector->encoder_ids[0]);
40} 40}
41 41
42static const struct drm_connector_helper_funcs 42static const struct drm_connector_helper_funcs
diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
index c19ab4f91ae7..ddb0403f1975 100644
--- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
+++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
@@ -237,8 +237,8 @@ static int kirin_drm_platform_probe(struct platform_device *pdev)
237 } 237 }
238 238
239 remote = of_graph_get_remote_node(np, 0, 0); 239 remote = of_graph_get_remote_node(np, 0, 0);
240 if (IS_ERR(remote)) 240 if (!remote)
241 return PTR_ERR(remote); 241 return -ENODEV;
242 242
243 drm_of_component_match_add(dev, &match, compare_of, remote); 243 drm_of_component_match_add(dev, &match, compare_of, remote);
244 of_node_put(remote); 244 of_node_put(remote);
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index e9e64e8e9765..dfd95889f4b7 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -12,6 +12,7 @@ config DRM_I915
12 select DRM_PANEL 12 select DRM_PANEL
13 select DRM_MIPI_DSI 13 select DRM_MIPI_DSI
14 select RELAY 14 select RELAY
15 select IRQ_WORK
15 # i915 depends on ACPI_VIDEO when ACPI is enabled 16 # i915 depends on ACPI_VIDEO when ACPI is enabled
16 # but for select to work, need to select ACPI_VIDEO's dependencies, ick 17 # but for select to work, need to select ACPI_VIDEO's dependencies, ick
17 select BACKLIGHT_LCD_SUPPORT if ACPI 18 select BACKLIGHT_LCD_SUPPORT if ACPI
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 1cb8059a3a16..5182e3d5557d 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -139,7 +139,8 @@ i915-y += i915_perf.o \
139 i915_oa_bxt.o \ 139 i915_oa_bxt.o \
140 i915_oa_kblgt2.o \ 140 i915_oa_kblgt2.o \
141 i915_oa_kblgt3.o \ 141 i915_oa_kblgt3.o \
142 i915_oa_glk.o 142 i915_oa_glk.o \
143 i915_oa_cflgt2.o
143 144
144ifeq ($(CONFIG_DRM_I915_GVT),y) 145ifeq ($(CONFIG_DRM_I915_GVT),y)
145i915-y += intel_gvt.o 146i915-y += intel_gvt.o
diff --git a/drivers/gpu/drm/i915/gvt/cfg_space.c b/drivers/gpu/drm/i915/gvt/cfg_space.c
index ff3154fe6588..ab19545d59a1 100644
--- a/drivers/gpu/drm/i915/gvt/cfg_space.c
+++ b/drivers/gpu/drm/i915/gvt/cfg_space.c
@@ -101,7 +101,7 @@ int intel_vgpu_emulate_cfg_read(struct intel_vgpu *vgpu, unsigned int offset,
101 if (WARN_ON(bytes > 4)) 101 if (WARN_ON(bytes > 4))
102 return -EINVAL; 102 return -EINVAL;
103 103
104 if (WARN_ON(offset + bytes > INTEL_GVT_MAX_CFG_SPACE_SZ)) 104 if (WARN_ON(offset + bytes > vgpu->gvt->device_info.cfg_space_size))
105 return -EINVAL; 105 return -EINVAL;
106 106
107 memcpy(p_data, vgpu_cfg_space(vgpu) + offset, bytes); 107 memcpy(p_data, vgpu_cfg_space(vgpu) + offset, bytes);
@@ -110,13 +110,25 @@ int intel_vgpu_emulate_cfg_read(struct intel_vgpu *vgpu, unsigned int offset,
110 110
111static int map_aperture(struct intel_vgpu *vgpu, bool map) 111static int map_aperture(struct intel_vgpu *vgpu, bool map)
112{ 112{
113 u64 first_gfn, first_mfn; 113 phys_addr_t aperture_pa = vgpu_aperture_pa_base(vgpu);
114 unsigned long aperture_sz = vgpu_aperture_sz(vgpu);
115 u64 first_gfn;
114 u64 val; 116 u64 val;
115 int ret; 117 int ret;
116 118
117 if (map == vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].tracked) 119 if (map == vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].tracked)
118 return 0; 120 return 0;
119 121
122 if (map) {
123 vgpu->gm.aperture_va = memremap(aperture_pa, aperture_sz,
124 MEMREMAP_WC);
125 if (!vgpu->gm.aperture_va)
126 return -ENOMEM;
127 } else {
128 memunmap(vgpu->gm.aperture_va);
129 vgpu->gm.aperture_va = NULL;
130 }
131
120 val = vgpu_cfg_space(vgpu)[PCI_BASE_ADDRESS_2]; 132 val = vgpu_cfg_space(vgpu)[PCI_BASE_ADDRESS_2];
121 if (val & PCI_BASE_ADDRESS_MEM_TYPE_64) 133 if (val & PCI_BASE_ADDRESS_MEM_TYPE_64)
122 val = *(u64 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_2); 134 val = *(u64 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_2);
@@ -124,14 +136,16 @@ static int map_aperture(struct intel_vgpu *vgpu, bool map)
124 val = *(u32 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_2); 136 val = *(u32 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_2);
125 137
126 first_gfn = (val + vgpu_aperture_offset(vgpu)) >> PAGE_SHIFT; 138 first_gfn = (val + vgpu_aperture_offset(vgpu)) >> PAGE_SHIFT;
127 first_mfn = vgpu_aperture_pa_base(vgpu) >> PAGE_SHIFT;
128 139
129 ret = intel_gvt_hypervisor_map_gfn_to_mfn(vgpu, first_gfn, 140 ret = intel_gvt_hypervisor_map_gfn_to_mfn(vgpu, first_gfn,
130 first_mfn, 141 aperture_pa >> PAGE_SHIFT,
131 vgpu_aperture_sz(vgpu) >> 142 aperture_sz >> PAGE_SHIFT,
132 PAGE_SHIFT, map); 143 map);
133 if (ret) 144 if (ret) {
145 memunmap(vgpu->gm.aperture_va);
146 vgpu->gm.aperture_va = NULL;
134 return ret; 147 return ret;
148 }
135 149
136 vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].tracked = map; 150 vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].tracked = map;
137 return 0; 151 return 0;
@@ -275,7 +289,7 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
275 if (WARN_ON(bytes > 4)) 289 if (WARN_ON(bytes > 4))
276 return -EINVAL; 290 return -EINVAL;
277 291
278 if (WARN_ON(offset + bytes > INTEL_GVT_MAX_CFG_SPACE_SZ)) 292 if (WARN_ON(offset + bytes > vgpu->gvt->device_info.cfg_space_size))
279 return -EINVAL; 293 return -EINVAL;
280 294
281 /* First check if it's PCI_COMMAND */ 295 /* First check if it's PCI_COMMAND */
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index 21c36e256884..2c0ccbb817dc 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -1576,11 +1576,11 @@ static int batch_buffer_needs_scan(struct parser_exec_state *s)
1576 return 1; 1576 return 1;
1577} 1577}
1578 1578
1579static uint32_t find_bb_size(struct parser_exec_state *s) 1579static int find_bb_size(struct parser_exec_state *s)
1580{ 1580{
1581 unsigned long gma = 0; 1581 unsigned long gma = 0;
1582 struct cmd_info *info; 1582 struct cmd_info *info;
1583 uint32_t bb_size = 0; 1583 int bb_size = 0;
1584 uint32_t cmd_len = 0; 1584 uint32_t cmd_len = 0;
1585 bool met_bb_end = false; 1585 bool met_bb_end = false;
1586 struct intel_vgpu *vgpu = s->vgpu; 1586 struct intel_vgpu *vgpu = s->vgpu;
@@ -1637,6 +1637,8 @@ static int perform_bb_shadow(struct parser_exec_state *s)
1637 1637
1638 /* get the size of the batch buffer */ 1638 /* get the size of the batch buffer */
1639 bb_size = find_bb_size(s); 1639 bb_size = find_bb_size(s);
1640 if (bb_size < 0)
1641 return -EINVAL;
1640 1642
1641 /* allocate shadow batch buffer */ 1643 /* allocate shadow batch buffer */
1642 entry_obj = kmalloc(sizeof(*entry_obj), GFP_KERNEL); 1644 entry_obj = kmalloc(sizeof(*entry_obj), GFP_KERNEL);
@@ -2603,7 +2605,8 @@ static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
2603{ 2605{
2604 struct intel_vgpu *vgpu = workload->vgpu; 2606 struct intel_vgpu *vgpu = workload->vgpu;
2605 unsigned long gma_head, gma_tail, gma_top, guest_rb_size; 2607 unsigned long gma_head, gma_tail, gma_top, guest_rb_size;
2606 u32 *cs; 2608 void *shadow_ring_buffer_va;
2609 int ring_id = workload->ring_id;
2607 int ret; 2610 int ret;
2608 2611
2609 guest_rb_size = _RING_CTL_BUF_SIZE(workload->rb_ctl); 2612 guest_rb_size = _RING_CTL_BUF_SIZE(workload->rb_ctl);
@@ -2616,34 +2619,42 @@ static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
2616 gma_tail = workload->rb_start + workload->rb_tail; 2619 gma_tail = workload->rb_start + workload->rb_tail;
2617 gma_top = workload->rb_start + guest_rb_size; 2620 gma_top = workload->rb_start + guest_rb_size;
2618 2621
2619 /* allocate shadow ring buffer */ 2622 if (workload->rb_len > vgpu->reserve_ring_buffer_size[ring_id]) {
2620 cs = intel_ring_begin(workload->req, workload->rb_len / sizeof(u32)); 2623 void *va = vgpu->reserve_ring_buffer_va[ring_id];
2621 if (IS_ERR(cs)) 2624 /* realloc the new ring buffer if needed */
2622 return PTR_ERR(cs); 2625 vgpu->reserve_ring_buffer_va[ring_id] =
2626 krealloc(va, workload->rb_len, GFP_KERNEL);
2627 if (!vgpu->reserve_ring_buffer_va[ring_id]) {
2628 gvt_vgpu_err("fail to alloc reserve ring buffer\n");
2629 return -ENOMEM;
2630 }
2631 vgpu->reserve_ring_buffer_size[ring_id] = workload->rb_len;
2632 }
2633
2634 shadow_ring_buffer_va = vgpu->reserve_ring_buffer_va[ring_id];
2623 2635
2624 /* get shadow ring buffer va */ 2636 /* get shadow ring buffer va */
2625 workload->shadow_ring_buffer_va = cs; 2637 workload->shadow_ring_buffer_va = shadow_ring_buffer_va;
2626 2638
2627 /* head > tail --> copy head <-> top */ 2639 /* head > tail --> copy head <-> top */
2628 if (gma_head > gma_tail) { 2640 if (gma_head > gma_tail) {
2629 ret = copy_gma_to_hva(vgpu, vgpu->gtt.ggtt_mm, 2641 ret = copy_gma_to_hva(vgpu, vgpu->gtt.ggtt_mm,
2630 gma_head, gma_top, cs); 2642 gma_head, gma_top, shadow_ring_buffer_va);
2631 if (ret < 0) { 2643 if (ret < 0) {
2632 gvt_vgpu_err("fail to copy guest ring buffer\n"); 2644 gvt_vgpu_err("fail to copy guest ring buffer\n");
2633 return ret; 2645 return ret;
2634 } 2646 }
2635 cs += ret / sizeof(u32); 2647 shadow_ring_buffer_va += ret;
2636 gma_head = workload->rb_start; 2648 gma_head = workload->rb_start;
2637 } 2649 }
2638 2650
2639 /* copy head or start <-> tail */ 2651 /* copy head or start <-> tail */
2640 ret = copy_gma_to_hva(vgpu, vgpu->gtt.ggtt_mm, gma_head, gma_tail, cs); 2652 ret = copy_gma_to_hva(vgpu, vgpu->gtt.ggtt_mm, gma_head, gma_tail,
2653 shadow_ring_buffer_va);
2641 if (ret < 0) { 2654 if (ret < 0) {
2642 gvt_vgpu_err("fail to copy guest ring buffer\n"); 2655 gvt_vgpu_err("fail to copy guest ring buffer\n");
2643 return ret; 2656 return ret;
2644 } 2657 }
2645 cs += ret / sizeof(u32);
2646 intel_ring_advance(workload->req, cs);
2647 return 0; 2658 return 0;
2648} 2659}
2649 2660
diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c
index 91b4300f3b39..5ec07ecf33ad 100644
--- a/drivers/gpu/drm/i915/gvt/execlist.c
+++ b/drivers/gpu/drm/i915/gvt/execlist.c
@@ -368,7 +368,7 @@ static void free_workload(struct intel_vgpu_workload *workload)
368#define get_desc_from_elsp_dwords(ed, i) \ 368#define get_desc_from_elsp_dwords(ed, i) \
369 ((struct execlist_ctx_descriptor_format *)&((ed)->data[i * 2])) 369 ((struct execlist_ctx_descriptor_format *)&((ed)->data[i * 2]))
370 370
371static void prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload) 371static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
372{ 372{
373 const int gmadr_bytes = workload->vgpu->gvt->device_info.gmadr_bytes_in_cmd; 373 const int gmadr_bytes = workload->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
374 struct intel_shadow_bb_entry *entry_obj; 374 struct intel_shadow_bb_entry *entry_obj;
@@ -379,7 +379,7 @@ static void prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
379 379
380 vma = i915_gem_object_ggtt_pin(entry_obj->obj, NULL, 0, 4, 0); 380 vma = i915_gem_object_ggtt_pin(entry_obj->obj, NULL, 0, 4, 0);
381 if (IS_ERR(vma)) { 381 if (IS_ERR(vma)) {
382 return; 382 return PTR_ERR(vma);
383 } 383 }
384 384
385 /* FIXME: we are not tracking our pinned VMA leaving it 385 /* FIXME: we are not tracking our pinned VMA leaving it
@@ -392,6 +392,7 @@ static void prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
392 if (gmadr_bytes == 8) 392 if (gmadr_bytes == 8)
393 entry_obj->bb_start_cmd_va[2] = 0; 393 entry_obj->bb_start_cmd_va[2] = 0;
394 } 394 }
395 return 0;
395} 396}
396 397
397static int update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx) 398static int update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx)
@@ -420,7 +421,7 @@ static int update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx)
420 return 0; 421 return 0;
421} 422}
422 423
423static void prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) 424static int prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
424{ 425{
425 struct i915_vma *vma; 426 struct i915_vma *vma;
426 unsigned char *per_ctx_va = 427 unsigned char *per_ctx_va =
@@ -428,12 +429,12 @@ static void prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
428 wa_ctx->indirect_ctx.size; 429 wa_ctx->indirect_ctx.size;
429 430
430 if (wa_ctx->indirect_ctx.size == 0) 431 if (wa_ctx->indirect_ctx.size == 0)
431 return; 432 return 0;
432 433
433 vma = i915_gem_object_ggtt_pin(wa_ctx->indirect_ctx.obj, NULL, 434 vma = i915_gem_object_ggtt_pin(wa_ctx->indirect_ctx.obj, NULL,
434 0, CACHELINE_BYTES, 0); 435 0, CACHELINE_BYTES, 0);
435 if (IS_ERR(vma)) { 436 if (IS_ERR(vma)) {
436 return; 437 return PTR_ERR(vma);
437 } 438 }
438 439
439 /* FIXME: we are not tracking our pinned VMA leaving it 440 /* FIXME: we are not tracking our pinned VMA leaving it
@@ -447,26 +448,7 @@ static void prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
447 memset(per_ctx_va, 0, CACHELINE_BYTES); 448 memset(per_ctx_va, 0, CACHELINE_BYTES);
448 449
449 update_wa_ctx_2_shadow_ctx(wa_ctx); 450 update_wa_ctx_2_shadow_ctx(wa_ctx);
450} 451 return 0;
451
452static int prepare_execlist_workload(struct intel_vgpu_workload *workload)
453{
454 struct intel_vgpu *vgpu = workload->vgpu;
455 struct execlist_ctx_descriptor_format ctx[2];
456 int ring_id = workload->ring_id;
457
458 intel_vgpu_pin_mm(workload->shadow_mm);
459 intel_vgpu_sync_oos_pages(workload->vgpu);
460 intel_vgpu_flush_post_shadow(workload->vgpu);
461 prepare_shadow_batch_buffer(workload);
462 prepare_shadow_wa_ctx(&workload->wa_ctx);
463 if (!workload->emulate_schedule_in)
464 return 0;
465
466 ctx[0] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 1);
467 ctx[1] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 0);
468
469 return emulate_execlist_schedule_in(&vgpu->execlist[ring_id], ctx);
470} 452}
471 453
472static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload) 454static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
@@ -489,13 +471,62 @@ static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
489 } 471 }
490} 472}
491 473
492static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) 474static int prepare_execlist_workload(struct intel_vgpu_workload *workload)
493{ 475{
494 if (!wa_ctx->indirect_ctx.obj) 476 struct intel_vgpu *vgpu = workload->vgpu;
495 return; 477 struct execlist_ctx_descriptor_format ctx[2];
478 int ring_id = workload->ring_id;
479 int ret;
480
481 ret = intel_vgpu_pin_mm(workload->shadow_mm);
482 if (ret) {
483 gvt_vgpu_err("fail to vgpu pin mm\n");
484 goto out;
485 }
486
487 ret = intel_vgpu_sync_oos_pages(workload->vgpu);
488 if (ret) {
489 gvt_vgpu_err("fail to vgpu sync oos pages\n");
490 goto err_unpin_mm;
491 }
496 492
497 i915_gem_object_unpin_map(wa_ctx->indirect_ctx.obj); 493 ret = intel_vgpu_flush_post_shadow(workload->vgpu);
498 i915_gem_object_put(wa_ctx->indirect_ctx.obj); 494 if (ret) {
495 gvt_vgpu_err("fail to flush post shadow\n");
496 goto err_unpin_mm;
497 }
498
499 ret = prepare_shadow_batch_buffer(workload);
500 if (ret) {
501 gvt_vgpu_err("fail to prepare_shadow_batch_buffer\n");
502 goto err_unpin_mm;
503 }
504
505 ret = prepare_shadow_wa_ctx(&workload->wa_ctx);
506 if (ret) {
507 gvt_vgpu_err("fail to prepare_shadow_wa_ctx\n");
508 goto err_shadow_batch;
509 }
510
511 if (!workload->emulate_schedule_in)
512 return 0;
513
514 ctx[0] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 1);
515 ctx[1] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 0);
516
517 ret = emulate_execlist_schedule_in(&vgpu->execlist[ring_id], ctx);
518 if (!ret)
519 goto out;
520 else
521 gvt_vgpu_err("fail to emulate execlist schedule in\n");
522
523 release_shadow_wa_ctx(&workload->wa_ctx);
524err_shadow_batch:
525 release_shadow_batch_buffer(workload);
526err_unpin_mm:
527 intel_vgpu_unpin_mm(workload->shadow_mm);
528out:
529 return ret;
499} 530}
500 531
501static int complete_execlist_workload(struct intel_vgpu_workload *workload) 532static int complete_execlist_workload(struct intel_vgpu_workload *workload)
@@ -511,8 +542,10 @@ static int complete_execlist_workload(struct intel_vgpu_workload *workload)
511 gvt_dbg_el("complete workload %p status %d\n", workload, 542 gvt_dbg_el("complete workload %p status %d\n", workload,
512 workload->status); 543 workload->status);
513 544
514 release_shadow_batch_buffer(workload); 545 if (!workload->status) {
515 release_shadow_wa_ctx(&workload->wa_ctx); 546 release_shadow_batch_buffer(workload);
547 release_shadow_wa_ctx(&workload->wa_ctx);
548 }
516 549
517 if (workload->status || (vgpu->resetting_eng & ENGINE_MASK(ring_id))) { 550 if (workload->status || (vgpu->resetting_eng & ENGINE_MASK(ring_id))) {
518 /* if workload->status is not successful means HW GPU 551 /* if workload->status is not successful means HW GPU
@@ -820,10 +853,21 @@ static void clean_workloads(struct intel_vgpu *vgpu, unsigned long engine_mask)
820 853
821void intel_vgpu_clean_execlist(struct intel_vgpu *vgpu) 854void intel_vgpu_clean_execlist(struct intel_vgpu *vgpu)
822{ 855{
856 enum intel_engine_id i;
857 struct intel_engine_cs *engine;
858
823 clean_workloads(vgpu, ALL_ENGINES); 859 clean_workloads(vgpu, ALL_ENGINES);
824 kmem_cache_destroy(vgpu->workloads); 860 kmem_cache_destroy(vgpu->workloads);
861
862 for_each_engine(engine, vgpu->gvt->dev_priv, i) {
863 kfree(vgpu->reserve_ring_buffer_va[i]);
864 vgpu->reserve_ring_buffer_va[i] = NULL;
865 vgpu->reserve_ring_buffer_size[i] = 0;
866 }
867
825} 868}
826 869
870#define RESERVE_RING_BUFFER_SIZE ((1 * PAGE_SIZE)/8)
827int intel_vgpu_init_execlist(struct intel_vgpu *vgpu) 871int intel_vgpu_init_execlist(struct intel_vgpu *vgpu)
828{ 872{
829 enum intel_engine_id i; 873 enum intel_engine_id i;
@@ -843,7 +887,26 @@ int intel_vgpu_init_execlist(struct intel_vgpu *vgpu)
843 if (!vgpu->workloads) 887 if (!vgpu->workloads)
844 return -ENOMEM; 888 return -ENOMEM;
845 889
890 /* each ring has a shadow ring buffer until vgpu destroyed */
891 for_each_engine(engine, vgpu->gvt->dev_priv, i) {
892 vgpu->reserve_ring_buffer_va[i] =
893 kmalloc(RESERVE_RING_BUFFER_SIZE, GFP_KERNEL);
894 if (!vgpu->reserve_ring_buffer_va[i]) {
895 gvt_vgpu_err("fail to alloc reserve ring buffer\n");
896 goto out;
897 }
898 vgpu->reserve_ring_buffer_size[i] = RESERVE_RING_BUFFER_SIZE;
899 }
846 return 0; 900 return 0;
901out:
902 for_each_engine(engine, vgpu->gvt->dev_priv, i) {
903 if (vgpu->reserve_ring_buffer_size[i]) {
904 kfree(vgpu->reserve_ring_buffer_va[i]);
905 vgpu->reserve_ring_buffer_va[i] = NULL;
906 vgpu->reserve_ring_buffer_size[i] = 0;
907 }
908 }
909 return -ENOMEM;
847} 910}
848 911
849void intel_vgpu_reset_execlist(struct intel_vgpu *vgpu, 912void intel_vgpu_reset_execlist(struct intel_vgpu *vgpu,
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index e6dfc3331f4b..2801d70579d8 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -1647,14 +1647,13 @@ int intel_vgpu_pin_mm(struct intel_vgpu_mm *mm)
1647 if (WARN_ON(mm->type != INTEL_GVT_MM_PPGTT)) 1647 if (WARN_ON(mm->type != INTEL_GVT_MM_PPGTT))
1648 return 0; 1648 return 0;
1649 1649
1650 atomic_inc(&mm->pincount);
1651
1652 if (!mm->shadowed) { 1650 if (!mm->shadowed) {
1653 ret = shadow_mm(mm); 1651 ret = shadow_mm(mm);
1654 if (ret) 1652 if (ret)
1655 return ret; 1653 return ret;
1656 } 1654 }
1657 1655
1656 atomic_inc(&mm->pincount);
1658 list_del_init(&mm->lru_list); 1657 list_del_init(&mm->lru_list);
1659 list_add_tail(&mm->lru_list, &mm->vgpu->gvt->gtt.mm_lru_list_head); 1658 list_add_tail(&mm->lru_list, &mm->vgpu->gvt->gtt.mm_lru_list_head);
1660 return 0; 1659 return 0;
@@ -1972,7 +1971,7 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu,
1972 */ 1971 */
1973 se.val64 |= _PAGE_PRESENT | _PAGE_RW; 1972 se.val64 |= _PAGE_PRESENT | _PAGE_RW;
1974 if (type == GTT_TYPE_PPGTT_PDE_PT) 1973 if (type == GTT_TYPE_PPGTT_PDE_PT)
1975 se.val64 |= PPAT_CACHED_INDEX; 1974 se.val64 |= PPAT_CACHED;
1976 1975
1977 for (i = 0; i < page_entry_num; i++) 1976 for (i = 0; i < page_entry_num; i++)
1978 ops->set_entry(scratch_pt, &se, i, false, 0, vgpu); 1977 ops->set_entry(scratch_pt, &se, i, false, 0, vgpu);
diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c
index c27c6838eaca..aaa347f8620c 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.c
+++ b/drivers/gpu/drm/i915/gvt/gvt.c
@@ -111,7 +111,7 @@ static void init_device_info(struct intel_gvt *gvt)
111 if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv) 111 if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)
112 || IS_KABYLAKE(gvt->dev_priv)) { 112 || IS_KABYLAKE(gvt->dev_priv)) {
113 info->max_support_vgpus = 8; 113 info->max_support_vgpus = 8;
114 info->cfg_space_size = 256; 114 info->cfg_space_size = PCI_CFG_SPACE_EXP_SIZE;
115 info->mmio_size = 2 * 1024 * 1024; 115 info->mmio_size = 2 * 1024 * 1024;
116 info->mmio_bar = 0; 116 info->mmio_bar = 0;
117 info->gtt_start_offset = 8 * 1024 * 1024; 117 info->gtt_start_offset = 8 * 1024 * 1024;
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index 44b719eda8c4..9c2e7c0aa38f 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -80,6 +80,7 @@ struct intel_gvt_device_info {
80struct intel_vgpu_gm { 80struct intel_vgpu_gm {
81 u64 aperture_sz; 81 u64 aperture_sz;
82 u64 hidden_sz; 82 u64 hidden_sz;
83 void *aperture_va;
83 struct drm_mm_node low_gm_node; 84 struct drm_mm_node low_gm_node;
84 struct drm_mm_node high_gm_node; 85 struct drm_mm_node high_gm_node;
85}; 86};
@@ -99,7 +100,6 @@ struct intel_vgpu_mmio {
99 bool disable_warn_untrack; 100 bool disable_warn_untrack;
100}; 101};
101 102
102#define INTEL_GVT_MAX_CFG_SPACE_SZ 256
103#define INTEL_GVT_MAX_BAR_NUM 4 103#define INTEL_GVT_MAX_BAR_NUM 4
104 104
105struct intel_vgpu_pci_bar { 105struct intel_vgpu_pci_bar {
@@ -108,7 +108,7 @@ struct intel_vgpu_pci_bar {
108}; 108};
109 109
110struct intel_vgpu_cfg_space { 110struct intel_vgpu_cfg_space {
111 unsigned char virtual_cfg_space[INTEL_GVT_MAX_CFG_SPACE_SZ]; 111 unsigned char virtual_cfg_space[PCI_CFG_SPACE_EXP_SIZE];
112 struct intel_vgpu_pci_bar bar[INTEL_GVT_MAX_BAR_NUM]; 112 struct intel_vgpu_pci_bar bar[INTEL_GVT_MAX_BAR_NUM];
113}; 113};
114 114
@@ -165,6 +165,9 @@ struct intel_vgpu {
165 struct list_head workload_q_head[I915_NUM_ENGINES]; 165 struct list_head workload_q_head[I915_NUM_ENGINES];
166 struct kmem_cache *workloads; 166 struct kmem_cache *workloads;
167 atomic_t running_workload_num; 167 atomic_t running_workload_num;
168 /* 1/2K for each reserve ring buffer */
169 void *reserve_ring_buffer_va[I915_NUM_ENGINES];
170 int reserve_ring_buffer_size[I915_NUM_ENGINES];
168 DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES); 171 DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES);
169 struct i915_gem_context *shadow_ctx; 172 struct i915_gem_context *shadow_ctx;
170 DECLARE_BITMAP(shadow_ctx_desc_updated, I915_NUM_ENGINES); 173 DECLARE_BITMAP(shadow_ctx_desc_updated, I915_NUM_ENGINES);
@@ -474,6 +477,13 @@ int intel_vgpu_emulate_cfg_read(struct intel_vgpu *vgpu, unsigned int offset,
474int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset, 477int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
475 void *p_data, unsigned int bytes); 478 void *p_data, unsigned int bytes);
476 479
480static inline u64 intel_vgpu_get_bar_gpa(struct intel_vgpu *vgpu, int bar)
481{
482 /* We are 64bit bar. */
483 return (*(u64 *)(vgpu->cfg_space.virtual_cfg_space + bar)) &
484 PCI_BASE_ADDRESS_MEM_MASK;
485}
486
477void intel_gvt_clean_opregion(struct intel_gvt *gvt); 487void intel_gvt_clean_opregion(struct intel_gvt *gvt);
478int intel_gvt_init_opregion(struct intel_gvt *gvt); 488int intel_gvt_init_opregion(struct intel_gvt *gvt);
479 489
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index 83e88c70272a..96060920a6fe 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -609,21 +609,20 @@ static void intel_vgpu_release_work(struct work_struct *work)
609 __intel_vgpu_release(vgpu); 609 __intel_vgpu_release(vgpu);
610} 610}
611 611
612static uint64_t intel_vgpu_get_bar0_addr(struct intel_vgpu *vgpu) 612static uint64_t intel_vgpu_get_bar_addr(struct intel_vgpu *vgpu, int bar)
613{ 613{
614 u32 start_lo, start_hi; 614 u32 start_lo, start_hi;
615 u32 mem_type; 615 u32 mem_type;
616 int pos = PCI_BASE_ADDRESS_0;
617 616
618 start_lo = (*(u32 *)(vgpu->cfg_space.virtual_cfg_space + pos)) & 617 start_lo = (*(u32 *)(vgpu->cfg_space.virtual_cfg_space + bar)) &
619 PCI_BASE_ADDRESS_MEM_MASK; 618 PCI_BASE_ADDRESS_MEM_MASK;
620 mem_type = (*(u32 *)(vgpu->cfg_space.virtual_cfg_space + pos)) & 619 mem_type = (*(u32 *)(vgpu->cfg_space.virtual_cfg_space + bar)) &
621 PCI_BASE_ADDRESS_MEM_TYPE_MASK; 620 PCI_BASE_ADDRESS_MEM_TYPE_MASK;
622 621
623 switch (mem_type) { 622 switch (mem_type) {
624 case PCI_BASE_ADDRESS_MEM_TYPE_64: 623 case PCI_BASE_ADDRESS_MEM_TYPE_64:
625 start_hi = (*(u32 *)(vgpu->cfg_space.virtual_cfg_space 624 start_hi = (*(u32 *)(vgpu->cfg_space.virtual_cfg_space
626 + pos + 4)); 625 + bar + 4));
627 break; 626 break;
628 case PCI_BASE_ADDRESS_MEM_TYPE_32: 627 case PCI_BASE_ADDRESS_MEM_TYPE_32:
629 case PCI_BASE_ADDRESS_MEM_TYPE_1M: 628 case PCI_BASE_ADDRESS_MEM_TYPE_1M:
@@ -637,6 +636,21 @@ static uint64_t intel_vgpu_get_bar0_addr(struct intel_vgpu *vgpu)
637 return ((u64)start_hi << 32) | start_lo; 636 return ((u64)start_hi << 32) | start_lo;
638} 637}
639 638
639static int intel_vgpu_bar_rw(struct intel_vgpu *vgpu, int bar, uint64_t off,
640 void *buf, unsigned int count, bool is_write)
641{
642 uint64_t bar_start = intel_vgpu_get_bar_addr(vgpu, bar);
643 int ret;
644
645 if (is_write)
646 ret = intel_gvt_ops->emulate_mmio_write(vgpu,
647 bar_start + off, buf, count);
648 else
649 ret = intel_gvt_ops->emulate_mmio_read(vgpu,
650 bar_start + off, buf, count);
651 return ret;
652}
653
640static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf, 654static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf,
641 size_t count, loff_t *ppos, bool is_write) 655 size_t count, loff_t *ppos, bool is_write)
642{ 656{
@@ -661,20 +675,14 @@ static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf,
661 buf, count); 675 buf, count);
662 break; 676 break;
663 case VFIO_PCI_BAR0_REGION_INDEX: 677 case VFIO_PCI_BAR0_REGION_INDEX:
664 case VFIO_PCI_BAR1_REGION_INDEX: 678 ret = intel_vgpu_bar_rw(vgpu, PCI_BASE_ADDRESS_0, pos,
665 if (is_write) { 679 buf, count, is_write);
666 uint64_t bar0_start = intel_vgpu_get_bar0_addr(vgpu);
667
668 ret = intel_gvt_ops->emulate_mmio_write(vgpu,
669 bar0_start + pos, buf, count);
670 } else {
671 uint64_t bar0_start = intel_vgpu_get_bar0_addr(vgpu);
672
673 ret = intel_gvt_ops->emulate_mmio_read(vgpu,
674 bar0_start + pos, buf, count);
675 }
676 break; 680 break;
677 case VFIO_PCI_BAR2_REGION_INDEX: 681 case VFIO_PCI_BAR2_REGION_INDEX:
682 ret = intel_vgpu_bar_rw(vgpu, PCI_BASE_ADDRESS_2, pos,
683 buf, count, is_write);
684 break;
685 case VFIO_PCI_BAR1_REGION_INDEX:
678 case VFIO_PCI_BAR3_REGION_INDEX: 686 case VFIO_PCI_BAR3_REGION_INDEX:
679 case VFIO_PCI_BAR4_REGION_INDEX: 687 case VFIO_PCI_BAR4_REGION_INDEX:
680 case VFIO_PCI_BAR5_REGION_INDEX: 688 case VFIO_PCI_BAR5_REGION_INDEX:
@@ -970,7 +978,7 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
970 switch (info.index) { 978 switch (info.index) {
971 case VFIO_PCI_CONFIG_REGION_INDEX: 979 case VFIO_PCI_CONFIG_REGION_INDEX:
972 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index); 980 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
973 info.size = INTEL_GVT_MAX_CFG_SPACE_SZ; 981 info.size = vgpu->gvt->device_info.cfg_space_size;
974 info.flags = VFIO_REGION_INFO_FLAG_READ | 982 info.flags = VFIO_REGION_INFO_FLAG_READ |
975 VFIO_REGION_INFO_FLAG_WRITE; 983 VFIO_REGION_INFO_FLAG_WRITE;
976 break; 984 break;
diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c
index 980ec8906b1e..1e1310f50289 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.c
+++ b/drivers/gpu/drm/i915/gvt/mmio.c
@@ -45,8 +45,7 @@
45 */ 45 */
46int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa) 46int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa)
47{ 47{
48 u64 gttmmio_gpa = *(u64 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_0) & 48 u64 gttmmio_gpa = intel_vgpu_get_bar_gpa(vgpu, PCI_BASE_ADDRESS_0);
49 ~GENMASK(3, 0);
50 return gpa - gttmmio_gpa; 49 return gpa - gttmmio_gpa;
51} 50}
52 51
@@ -57,6 +56,38 @@ int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa)
57 (reg >= gvt->device_info.gtt_start_offset \ 56 (reg >= gvt->device_info.gtt_start_offset \
58 && reg < gvt->device_info.gtt_start_offset + gvt_ggtt_sz(gvt)) 57 && reg < gvt->device_info.gtt_start_offset + gvt_ggtt_sz(gvt))
59 58
59static bool vgpu_gpa_is_aperture(struct intel_vgpu *vgpu, uint64_t gpa)
60{
61 u64 aperture_gpa = intel_vgpu_get_bar_gpa(vgpu, PCI_BASE_ADDRESS_2);
62 u64 aperture_sz = vgpu_aperture_sz(vgpu);
63
64 return gpa >= aperture_gpa && gpa < aperture_gpa + aperture_sz;
65}
66
67static int vgpu_aperture_rw(struct intel_vgpu *vgpu, uint64_t gpa,
68 void *pdata, unsigned int size, bool is_read)
69{
70 u64 aperture_gpa = intel_vgpu_get_bar_gpa(vgpu, PCI_BASE_ADDRESS_2);
71 u64 offset = gpa - aperture_gpa;
72
73 if (!vgpu_gpa_is_aperture(vgpu, gpa + size - 1)) {
74 gvt_vgpu_err("Aperture rw out of range, offset %llx, size %d\n",
75 offset, size);
76 return -EINVAL;
77 }
78
79 if (!vgpu->gm.aperture_va) {
80 gvt_vgpu_err("BAR is not enabled\n");
81 return -ENXIO;
82 }
83
84 if (is_read)
85 memcpy(pdata, vgpu->gm.aperture_va + offset, size);
86 else
87 memcpy(vgpu->gm.aperture_va + offset, pdata, size);
88 return 0;
89}
90
60static void failsafe_emulate_mmio_rw(struct intel_vgpu *vgpu, uint64_t pa, 91static void failsafe_emulate_mmio_rw(struct intel_vgpu *vgpu, uint64_t pa,
61 void *p_data, unsigned int bytes, bool read) 92 void *p_data, unsigned int bytes, bool read)
62{ 93{
@@ -133,6 +164,12 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
133 } 164 }
134 mutex_lock(&gvt->lock); 165 mutex_lock(&gvt->lock);
135 166
167 if (vgpu_gpa_is_aperture(vgpu, pa)) {
168 ret = vgpu_aperture_rw(vgpu, pa, p_data, bytes, true);
169 mutex_unlock(&gvt->lock);
170 return ret;
171 }
172
136 if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) { 173 if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) {
137 struct intel_vgpu_guest_page *gp; 174 struct intel_vgpu_guest_page *gp;
138 175
@@ -224,6 +261,12 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa,
224 261
225 mutex_lock(&gvt->lock); 262 mutex_lock(&gvt->lock);
226 263
264 if (vgpu_gpa_is_aperture(vgpu, pa)) {
265 ret = vgpu_aperture_rw(vgpu, pa, p_data, bytes, false);
266 mutex_unlock(&gvt->lock);
267 return ret;
268 }
269
227 if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) { 270 if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) {
228 struct intel_vgpu_guest_page *gp; 271 struct intel_vgpu_guest_page *gp;
229 272
diff --git a/drivers/gpu/drm/i915/gvt/render.c b/drivers/gpu/drm/i915/gvt/render.c
index 2ea542257f03..6d066cf35478 100644
--- a/drivers/gpu/drm/i915/gvt/render.c
+++ b/drivers/gpu/drm/i915/gvt/render.c
@@ -293,7 +293,7 @@ static void switch_mmio_to_vgpu(struct intel_vgpu *vgpu, int ring_id)
293 */ 293 */
294 if (mmio->in_context && 294 if (mmio->in_context &&
295 ((ctx_ctrl & inhibit_mask) != inhibit_mask) && 295 ((ctx_ctrl & inhibit_mask) != inhibit_mask) &&
296 i915.enable_execlists) 296 i915_modparams.enable_execlists)
297 continue; 297 continue;
298 298
299 if (mmio->mask) 299 if (mmio->mask)
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 391800d2067b..d5892d24f0b6 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -87,7 +87,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
87 return -EINVAL; 87 return -EINVAL;
88 } 88 }
89 89
90 page = i915_gem_object_get_page(ctx_obj, LRC_PPHWSP_PN + i); 90 page = i915_gem_object_get_page(ctx_obj, LRC_HEADER_PAGES + i);
91 dst = kmap(page); 91 dst = kmap(page);
92 intel_gvt_hypervisor_read_gpa(vgpu, context_gpa, dst, 92 intel_gvt_hypervisor_read_gpa(vgpu, context_gpa, dst,
93 GTT_PAGE_SIZE); 93 GTT_PAGE_SIZE);
@@ -201,6 +201,43 @@ static void shadow_context_descriptor_update(struct i915_gem_context *ctx,
201 ce->lrc_desc = desc; 201 ce->lrc_desc = desc;
202} 202}
203 203
204static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload)
205{
206 struct intel_vgpu *vgpu = workload->vgpu;
207 void *shadow_ring_buffer_va;
208 u32 *cs;
209
210 /* allocate shadow ring buffer */
211 cs = intel_ring_begin(workload->req, workload->rb_len / sizeof(u32));
212 if (IS_ERR(cs)) {
213 gvt_vgpu_err("fail to alloc size =%ld shadow ring buffer\n",
214 workload->rb_len);
215 return PTR_ERR(cs);
216 }
217
218 shadow_ring_buffer_va = workload->shadow_ring_buffer_va;
219
220 /* get shadow ring buffer va */
221 workload->shadow_ring_buffer_va = cs;
222
223 memcpy(cs, shadow_ring_buffer_va,
224 workload->rb_len);
225
226 cs += workload->rb_len / sizeof(u32);
227 intel_ring_advance(workload->req, cs);
228
229 return 0;
230}
231
232void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
233{
234 if (!wa_ctx->indirect_ctx.obj)
235 return;
236
237 i915_gem_object_unpin_map(wa_ctx->indirect_ctx.obj);
238 i915_gem_object_put(wa_ctx->indirect_ctx.obj);
239}
240
204/** 241/**
205 * intel_gvt_scan_and_shadow_workload - audit the workload by scanning and 242 * intel_gvt_scan_and_shadow_workload - audit the workload by scanning and
206 * shadow it as well, include ringbuffer,wa_ctx and ctx. 243 * shadow it as well, include ringbuffer,wa_ctx and ctx.
@@ -214,8 +251,10 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
214 int ring_id = workload->ring_id; 251 int ring_id = workload->ring_id;
215 struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx; 252 struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
216 struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv; 253 struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
254 struct intel_engine_cs *engine = dev_priv->engine[ring_id];
217 struct drm_i915_gem_request *rq; 255 struct drm_i915_gem_request *rq;
218 struct intel_vgpu *vgpu = workload->vgpu; 256 struct intel_vgpu *vgpu = workload->vgpu;
257 struct intel_ring *ring;
219 int ret; 258 int ret;
220 259
221 lockdep_assert_held(&dev_priv->drm.struct_mutex); 260 lockdep_assert_held(&dev_priv->drm.struct_mutex);
@@ -231,35 +270,56 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
231 shadow_context_descriptor_update(shadow_ctx, 270 shadow_context_descriptor_update(shadow_ctx,
232 dev_priv->engine[ring_id]); 271 dev_priv->engine[ring_id]);
233 272
234 rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx);
235 if (IS_ERR(rq)) {
236 gvt_vgpu_err("fail to allocate gem request\n");
237 ret = PTR_ERR(rq);
238 goto out;
239 }
240
241 gvt_dbg_sched("ring id %d get i915 gem request %p\n", ring_id, rq);
242
243 workload->req = i915_gem_request_get(rq);
244
245 ret = intel_gvt_scan_and_shadow_ringbuffer(workload); 273 ret = intel_gvt_scan_and_shadow_ringbuffer(workload);
246 if (ret) 274 if (ret)
247 goto out; 275 goto err_scan;
248 276
249 if ((workload->ring_id == RCS) && 277 if ((workload->ring_id == RCS) &&
250 (workload->wa_ctx.indirect_ctx.size != 0)) { 278 (workload->wa_ctx.indirect_ctx.size != 0)) {
251 ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx); 279 ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx);
252 if (ret) 280 if (ret)
253 goto out; 281 goto err_scan;
282 }
283
284 /* pin shadow context by gvt even the shadow context will be pinned
285 * when i915 alloc request. That is because gvt will update the guest
286 * context from shadow context when workload is completed, and at that
287 * moment, i915 may already unpined the shadow context to make the
288 * shadow_ctx pages invalid. So gvt need to pin itself. After update
289 * the guest context, gvt can unpin the shadow_ctx safely.
290 */
291 ring = engine->context_pin(engine, shadow_ctx);
292 if (IS_ERR(ring)) {
293 ret = PTR_ERR(ring);
294 gvt_vgpu_err("fail to pin shadow context\n");
295 goto err_shadow;
254 } 296 }
255 297
256 ret = populate_shadow_context(workload); 298 ret = populate_shadow_context(workload);
257 if (ret) 299 if (ret)
258 goto out; 300 goto err_unpin;
259 301
302 rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx);
303 if (IS_ERR(rq)) {
304 gvt_vgpu_err("fail to allocate gem request\n");
305 ret = PTR_ERR(rq);
306 goto err_unpin;
307 }
308
309 gvt_dbg_sched("ring id %d get i915 gem request %p\n", ring_id, rq);
310
311 workload->req = i915_gem_request_get(rq);
312 ret = copy_workload_to_ring_buffer(workload);
313 if (ret)
314 goto err_unpin;
260 workload->shadowed = true; 315 workload->shadowed = true;
316 return 0;
261 317
262out: 318err_unpin:
319 engine->context_unpin(engine, shadow_ctx);
320err_shadow:
321 release_shadow_wa_ctx(&workload->wa_ctx);
322err_scan:
263 return ret; 323 return ret;
264} 324}
265 325
@@ -269,8 +329,6 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
269 struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx; 329 struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
270 struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv; 330 struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
271 struct intel_engine_cs *engine = dev_priv->engine[ring_id]; 331 struct intel_engine_cs *engine = dev_priv->engine[ring_id];
272 struct intel_vgpu *vgpu = workload->vgpu;
273 struct intel_ring *ring;
274 int ret = 0; 332 int ret = 0;
275 333
276 gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n", 334 gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n",
@@ -284,22 +342,10 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
284 342
285 if (workload->prepare) { 343 if (workload->prepare) {
286 ret = workload->prepare(workload); 344 ret = workload->prepare(workload);
287 if (ret) 345 if (ret) {
346 engine->context_unpin(engine, shadow_ctx);
288 goto out; 347 goto out;
289 } 348 }
290
291 /* pin shadow context by gvt even the shadow context will be pinned
292 * when i915 alloc request. That is because gvt will update the guest
293 * context from shadow context when workload is completed, and at that
294 * moment, i915 may already unpined the shadow context to make the
295 * shadow_ctx pages invalid. So gvt need to pin itself. After update
296 * the guest context, gvt can unpin the shadow_ctx safely.
297 */
298 ring = engine->context_pin(engine, shadow_ctx);
299 if (IS_ERR(ring)) {
300 ret = PTR_ERR(ring);
301 gvt_vgpu_err("fail to pin shadow context\n");
302 goto out;
303 } 349 }
304 350
305out: 351out:
@@ -408,7 +454,7 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
408 return; 454 return;
409 } 455 }
410 456
411 page = i915_gem_object_get_page(ctx_obj, LRC_PPHWSP_PN + i); 457 page = i915_gem_object_get_page(ctx_obj, LRC_HEADER_PAGES + i);
412 src = kmap(page); 458 src = kmap(page);
413 intel_gvt_hypervisor_write_gpa(vgpu, context_gpa, src, 459 intel_gvt_hypervisor_write_gpa(vgpu, context_gpa, src,
414 GTT_PAGE_SIZE); 460 GTT_PAGE_SIZE);
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h
index 0d431a968a32..f36b85fd6d01 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.h
+++ b/drivers/gpu/drm/i915/gvt/scheduler.h
@@ -140,4 +140,5 @@ int intel_vgpu_init_gvt_context(struct intel_vgpu *vgpu);
140 140
141void intel_vgpu_clean_gvt_context(struct intel_vgpu *vgpu); 141void intel_vgpu_clean_gvt_context(struct intel_vgpu *vgpu);
142 142
143void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx);
143#endif 144#endif
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index e4d4b6b41e26..b4a6ac60e7c6 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -67,7 +67,7 @@ static int i915_capabilities(struct seq_file *m, void *data)
67#undef PRINT_FLAG 67#undef PRINT_FLAG
68 68
69 kernel_param_lock(THIS_MODULE); 69 kernel_param_lock(THIS_MODULE);
70#define PRINT_PARAM(T, x) seq_print_param(m, #x, #T, &i915.x); 70#define PRINT_PARAM(T, x, ...) seq_print_param(m, #x, #T, &i915_modparams.x);
71 I915_PARAMS_FOR_EACH(PRINT_PARAM); 71 I915_PARAMS_FOR_EACH(PRINT_PARAM);
72#undef PRINT_PARAM 72#undef PRINT_PARAM
73 kernel_param_unlock(THIS_MODULE); 73 kernel_param_unlock(THIS_MODULE);
@@ -1267,7 +1267,7 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
1267 if (waitqueue_active(&dev_priv->gpu_error.reset_queue)) 1267 if (waitqueue_active(&dev_priv->gpu_error.reset_queue))
1268 seq_puts(m, "struct_mutex blocked for reset\n"); 1268 seq_puts(m, "struct_mutex blocked for reset\n");
1269 1269
1270 if (!i915.enable_hangcheck) { 1270 if (!i915_modparams.enable_hangcheck) {
1271 seq_puts(m, "Hangcheck disabled\n"); 1271 seq_puts(m, "Hangcheck disabled\n");
1272 return 0; 1272 return 0;
1273 } 1273 }
@@ -1422,6 +1422,9 @@ static int i915_forcewake_domains(struct seq_file *m, void *data)
1422 struct intel_uncore_forcewake_domain *fw_domain; 1422 struct intel_uncore_forcewake_domain *fw_domain;
1423 unsigned int tmp; 1423 unsigned int tmp;
1424 1424
1425 seq_printf(m, "user.bypass_count = %u\n",
1426 i915->uncore.user_forcewake.count);
1427
1425 for_each_fw_domain(fw_domain, i915, tmp) 1428 for_each_fw_domain(fw_domain, i915, tmp)
1426 seq_printf(m, "%s.wake_count = %u\n", 1429 seq_printf(m, "%s.wake_count = %u\n",
1427 intel_uncore_forcewake_domain_to_str(fw_domain->id), 1430 intel_uncore_forcewake_domain_to_str(fw_domain->id),
@@ -1699,7 +1702,7 @@ static int i915_ips_status(struct seq_file *m, void *unused)
1699 intel_runtime_pm_get(dev_priv); 1702 intel_runtime_pm_get(dev_priv);
1700 1703
1701 seq_printf(m, "Enabled by kernel parameter: %s\n", 1704 seq_printf(m, "Enabled by kernel parameter: %s\n",
1702 yesno(i915.enable_ips)); 1705 yesno(i915_modparams.enable_ips));
1703 1706
1704 if (INTEL_GEN(dev_priv) >= 8) { 1707 if (INTEL_GEN(dev_priv) >= 8) {
1705 seq_puts(m, "Currently: unknown\n"); 1708 seq_puts(m, "Currently: unknown\n");
@@ -2014,7 +2017,7 @@ static int i915_dump_lrc(struct seq_file *m, void *unused)
2014 enum intel_engine_id id; 2017 enum intel_engine_id id;
2015 int ret; 2018 int ret;
2016 2019
2017 if (!i915.enable_execlists) { 2020 if (!i915_modparams.enable_execlists) {
2018 seq_printf(m, "Logical Ring Contexts are disabled\n"); 2021 seq_printf(m, "Logical Ring Contexts are disabled\n");
2019 return 0; 2022 return 0;
2020 } 2023 }
@@ -2443,12 +2446,8 @@ static void i915_guc_client_info(struct seq_file *m,
2443 2446
2444 seq_printf(m, "\tPriority %d, GuC stage index: %u, PD offset 0x%x\n", 2447 seq_printf(m, "\tPriority %d, GuC stage index: %u, PD offset 0x%x\n",
2445 client->priority, client->stage_id, client->proc_desc_offset); 2448 client->priority, client->stage_id, client->proc_desc_offset);
2446 seq_printf(m, "\tDoorbell id %d, offset: 0x%lx, cookie 0x%x\n", 2449 seq_printf(m, "\tDoorbell id %d, offset: 0x%lx\n",
2447 client->doorbell_id, client->doorbell_offset, client->doorbell_cookie); 2450 client->doorbell_id, client->doorbell_offset);
2448 seq_printf(m, "\tWQ size %d, offset: 0x%x, tail %d\n",
2449 client->wq_size, client->wq_offset, client->wq_tail);
2450
2451 seq_printf(m, "\tWork queue full: %u\n", client->no_wq_space);
2452 2451
2453 for_each_engine(engine, dev_priv, id) { 2452 for_each_engine(engine, dev_priv, id) {
2454 u64 submissions = client->submissions[id]; 2453 u64 submissions = client->submissions[id];
@@ -2594,7 +2593,7 @@ static int i915_guc_log_control_get(void *data, u64 *val)
2594 if (!dev_priv->guc.log.vma) 2593 if (!dev_priv->guc.log.vma)
2595 return -EINVAL; 2594 return -EINVAL;
2596 2595
2597 *val = i915.guc_log_level; 2596 *val = i915_modparams.guc_log_level;
2598 2597
2599 return 0; 2598 return 0;
2600} 2599}
@@ -3312,7 +3311,9 @@ static int i915_engine_info(struct seq_file *m, void *unused)
3312 seq_printf(m, "\tBBADDR: 0x%08x_%08x\n", 3311 seq_printf(m, "\tBBADDR: 0x%08x_%08x\n",
3313 upper_32_bits(addr), lower_32_bits(addr)); 3312 upper_32_bits(addr), lower_32_bits(addr));
3314 3313
3315 if (i915.enable_execlists) { 3314 if (i915_modparams.enable_execlists) {
3315 const u32 *hws = &engine->status_page.page_addr[I915_HWS_CSB_BUF0_INDEX];
3316 struct intel_engine_execlists * const execlists = &engine->execlists;
3316 u32 ptr, read, write; 3317 u32 ptr, read, write;
3317 unsigned int idx; 3318 unsigned int idx;
3318 3319
@@ -3323,8 +3324,10 @@ static int i915_engine_info(struct seq_file *m, void *unused)
3323 ptr = I915_READ(RING_CONTEXT_STATUS_PTR(engine)); 3324 ptr = I915_READ(RING_CONTEXT_STATUS_PTR(engine));
3324 read = GEN8_CSB_READ_PTR(ptr); 3325 read = GEN8_CSB_READ_PTR(ptr);
3325 write = GEN8_CSB_WRITE_PTR(ptr); 3326 write = GEN8_CSB_WRITE_PTR(ptr);
3326 seq_printf(m, "\tExeclist CSB read %d, write %d, interrupt posted? %s\n", 3327 seq_printf(m, "\tExeclist CSB read %d [%d cached], write %d [%d from hws], interrupt posted? %s\n",
3327 read, write, 3328 read, execlists->csb_head,
3329 write,
3330 intel_read_status_page(engine, intel_hws_csb_write_index(engine->i915)),
3328 yesno(test_bit(ENGINE_IRQ_EXECLIST, 3331 yesno(test_bit(ENGINE_IRQ_EXECLIST,
3329 &engine->irq_posted))); 3332 &engine->irq_posted)));
3330 if (read >= GEN8_CSB_ENTRIES) 3333 if (read >= GEN8_CSB_ENTRIES)
@@ -3335,18 +3338,19 @@ static int i915_engine_info(struct seq_file *m, void *unused)
3335 write += GEN8_CSB_ENTRIES; 3338 write += GEN8_CSB_ENTRIES;
3336 while (read < write) { 3339 while (read < write) {
3337 idx = ++read % GEN8_CSB_ENTRIES; 3340 idx = ++read % GEN8_CSB_ENTRIES;
3338 seq_printf(m, "\tExeclist CSB[%d]: 0x%08x, context: %d\n", 3341 seq_printf(m, "\tExeclist CSB[%d]: 0x%08x [0x%08x in hwsp], context: %d [%d in hwsp]\n",
3339 idx, 3342 idx,
3340 I915_READ(RING_CONTEXT_STATUS_BUF_LO(engine, idx)), 3343 I915_READ(RING_CONTEXT_STATUS_BUF_LO(engine, idx)),
3341 I915_READ(RING_CONTEXT_STATUS_BUF_HI(engine, idx))); 3344 hws[idx * 2],
3345 I915_READ(RING_CONTEXT_STATUS_BUF_HI(engine, idx)),
3346 hws[idx * 2 + 1]);
3342 } 3347 }
3343 3348
3344 rcu_read_lock(); 3349 rcu_read_lock();
3345 for (idx = 0; idx < ARRAY_SIZE(engine->execlist_port); idx++) { 3350 for (idx = 0; idx < execlists_num_ports(execlists); idx++) {
3346 unsigned int count; 3351 unsigned int count;
3347 3352
3348 rq = port_unpack(&engine->execlist_port[idx], 3353 rq = port_unpack(&execlists->port[idx], &count);
3349 &count);
3350 if (rq) { 3354 if (rq) {
3351 seq_printf(m, "\t\tELSP[%d] count=%d, ", 3355 seq_printf(m, "\t\tELSP[%d] count=%d, ",
3352 idx, count); 3356 idx, count);
@@ -3359,7 +3363,7 @@ static int i915_engine_info(struct seq_file *m, void *unused)
3359 rcu_read_unlock(); 3363 rcu_read_unlock();
3360 3364
3361 spin_lock_irq(&engine->timeline->lock); 3365 spin_lock_irq(&engine->timeline->lock);
3362 for (rb = engine->execlist_first; rb; rb = rb_next(rb)){ 3366 for (rb = execlists->first; rb; rb = rb_next(rb)) {
3363 struct i915_priolist *p = 3367 struct i915_priolist *p =
3364 rb_entry(rb, typeof(*p), node); 3368 rb_entry(rb, typeof(*p), node);
3365 3369
@@ -3403,7 +3407,7 @@ static int i915_semaphore_status(struct seq_file *m, void *unused)
3403 enum intel_engine_id id; 3407 enum intel_engine_id id;
3404 int j, ret; 3408 int j, ret;
3405 3409
3406 if (!i915.semaphores) { 3410 if (!i915_modparams.semaphores) {
3407 seq_puts(m, "Semaphores are disabled\n"); 3411 seq_puts(m, "Semaphores are disabled\n");
3408 return 0; 3412 return 0;
3409 } 3413 }
@@ -3523,6 +3527,57 @@ static int i915_wa_registers(struct seq_file *m, void *unused)
3523 return 0; 3527 return 0;
3524} 3528}
3525 3529
3530static int i915_ipc_status_show(struct seq_file *m, void *data)
3531{
3532 struct drm_i915_private *dev_priv = m->private;
3533
3534 seq_printf(m, "Isochronous Priority Control: %s\n",
3535 yesno(dev_priv->ipc_enabled));
3536 return 0;
3537}
3538
3539static int i915_ipc_status_open(struct inode *inode, struct file *file)
3540{
3541 struct drm_i915_private *dev_priv = inode->i_private;
3542
3543 if (!HAS_IPC(dev_priv))
3544 return -ENODEV;
3545
3546 return single_open(file, i915_ipc_status_show, dev_priv);
3547}
3548
3549static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf,
3550 size_t len, loff_t *offp)
3551{
3552 struct seq_file *m = file->private_data;
3553 struct drm_i915_private *dev_priv = m->private;
3554 int ret;
3555 bool enable;
3556
3557 ret = kstrtobool_from_user(ubuf, len, &enable);
3558 if (ret < 0)
3559 return ret;
3560
3561 intel_runtime_pm_get(dev_priv);
3562 if (!dev_priv->ipc_enabled && enable)
3563 DRM_INFO("Enabling IPC: WM will be proper only after next commit\n");
3564 dev_priv->wm.distrust_bios_wm = true;
3565 dev_priv->ipc_enabled = enable;
3566 intel_enable_ipc(dev_priv);
3567 intel_runtime_pm_put(dev_priv);
3568
3569 return len;
3570}
3571
3572static const struct file_operations i915_ipc_status_fops = {
3573 .owner = THIS_MODULE,
3574 .open = i915_ipc_status_open,
3575 .read = seq_read,
3576 .llseek = seq_lseek,
3577 .release = single_release,
3578 .write = i915_ipc_status_write
3579};
3580
3526static int i915_ddb_info(struct seq_file *m, void *unused) 3581static int i915_ddb_info(struct seq_file *m, void *unused)
3527{ 3582{
3528 struct drm_i915_private *dev_priv = node_to_i915(m->private); 3583 struct drm_i915_private *dev_priv = node_to_i915(m->private);
@@ -4674,26 +4729,26 @@ static int i915_sseu_status(struct seq_file *m, void *unused)
4674 4729
4675static int i915_forcewake_open(struct inode *inode, struct file *file) 4730static int i915_forcewake_open(struct inode *inode, struct file *file)
4676{ 4731{
4677 struct drm_i915_private *dev_priv = inode->i_private; 4732 struct drm_i915_private *i915 = inode->i_private;
4678 4733
4679 if (INTEL_GEN(dev_priv) < 6) 4734 if (INTEL_GEN(i915) < 6)
4680 return 0; 4735 return 0;
4681 4736
4682 intel_runtime_pm_get(dev_priv); 4737 intel_runtime_pm_get(i915);
4683 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 4738 intel_uncore_forcewake_user_get(i915);
4684 4739
4685 return 0; 4740 return 0;
4686} 4741}
4687 4742
4688static int i915_forcewake_release(struct inode *inode, struct file *file) 4743static int i915_forcewake_release(struct inode *inode, struct file *file)
4689{ 4744{
4690 struct drm_i915_private *dev_priv = inode->i_private; 4745 struct drm_i915_private *i915 = inode->i_private;
4691 4746
4692 if (INTEL_GEN(dev_priv) < 6) 4747 if (INTEL_GEN(i915) < 6)
4693 return 0; 4748 return 0;
4694 4749
4695 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 4750 intel_uncore_forcewake_user_put(i915);
4696 intel_runtime_pm_put(dev_priv); 4751 intel_runtime_pm_put(i915);
4697 4752
4698 return 0; 4753 return 0;
4699} 4754}
@@ -4859,7 +4914,8 @@ static const struct i915_debugfs_files {
4859 {"i915_dp_test_type", &i915_displayport_test_type_fops}, 4914 {"i915_dp_test_type", &i915_displayport_test_type_fops},
4860 {"i915_dp_test_active", &i915_displayport_test_active_fops}, 4915 {"i915_dp_test_active", &i915_displayport_test_active_fops},
4861 {"i915_guc_log_control", &i915_guc_log_control_fops}, 4916 {"i915_guc_log_control", &i915_guc_log_control_fops},
4862 {"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops} 4917 {"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops},
4918 {"i915_ipc_status", &i915_ipc_status_fops}
4863}; 4919};
4864 4920
4865int i915_debugfs_register(struct drm_i915_private *dev_priv) 4921int i915_debugfs_register(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index ff70fc45ba7c..59ac9199b35d 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -58,12 +58,12 @@ static unsigned int i915_load_fail_count;
58 58
59bool __i915_inject_load_failure(const char *func, int line) 59bool __i915_inject_load_failure(const char *func, int line)
60{ 60{
61 if (i915_load_fail_count >= i915.inject_load_failure) 61 if (i915_load_fail_count >= i915_modparams.inject_load_failure)
62 return false; 62 return false;
63 63
64 if (++i915_load_fail_count == i915.inject_load_failure) { 64 if (++i915_load_fail_count == i915_modparams.inject_load_failure) {
65 DRM_INFO("Injecting failure at checkpoint %u [%s:%d]\n", 65 DRM_INFO("Injecting failure at checkpoint %u [%s:%d]\n",
66 i915.inject_load_failure, func, line); 66 i915_modparams.inject_load_failure, func, line);
67 return true; 67 return true;
68 } 68 }
69 69
@@ -106,8 +106,8 @@ __i915_printk(struct drm_i915_private *dev_priv, const char *level,
106 106
107static bool i915_error_injected(struct drm_i915_private *dev_priv) 107static bool i915_error_injected(struct drm_i915_private *dev_priv)
108{ 108{
109 return i915.inject_load_failure && 109 return i915_modparams.inject_load_failure &&
110 i915_load_fail_count == i915.inject_load_failure; 110 i915_load_fail_count == i915_modparams.inject_load_failure;
111} 111}
112 112
113#define i915_load_error(dev_priv, fmt, ...) \ 113#define i915_load_error(dev_priv, fmt, ...) \
@@ -321,7 +321,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
321 value = USES_PPGTT(dev_priv); 321 value = USES_PPGTT(dev_priv);
322 break; 322 break;
323 case I915_PARAM_HAS_SEMAPHORES: 323 case I915_PARAM_HAS_SEMAPHORES:
324 value = i915.semaphores; 324 value = i915_modparams.semaphores;
325 break; 325 break;
326 case I915_PARAM_HAS_SECURE_BATCHES: 326 case I915_PARAM_HAS_SECURE_BATCHES:
327 value = capable(CAP_SYS_ADMIN); 327 value = capable(CAP_SYS_ADMIN);
@@ -340,7 +340,8 @@ static int i915_getparam(struct drm_device *dev, void *data,
340 return -ENODEV; 340 return -ENODEV;
341 break; 341 break;
342 case I915_PARAM_HAS_GPU_RESET: 342 case I915_PARAM_HAS_GPU_RESET:
343 value = i915.enable_hangcheck && intel_has_gpu_reset(dev_priv); 343 value = i915_modparams.enable_hangcheck &&
344 intel_has_gpu_reset(dev_priv);
344 if (value && intel_has_reset_engine(dev_priv)) 345 if (value && intel_has_reset_engine(dev_priv))
345 value = 2; 346 value = 2;
346 break; 347 break;
@@ -869,6 +870,10 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv,
869 memcpy(device_info, match_info, sizeof(*device_info)); 870 memcpy(device_info, match_info, sizeof(*device_info));
870 device_info->device_id = dev_priv->drm.pdev->device; 871 device_info->device_id = dev_priv->drm.pdev->device;
871 872
873 BUILD_BUG_ON(INTEL_MAX_PLATFORMS >
874 sizeof(device_info->platform_mask) * BITS_PER_BYTE);
875 device_info->platform_mask = BIT(device_info->platform);
876
872 BUG_ON(device_info->gen > sizeof(device_info->gen_mask) * BITS_PER_BYTE); 877 BUG_ON(device_info->gen > sizeof(device_info->gen_mask) * BITS_PER_BYTE);
873 device_info->gen_mask = BIT(device_info->gen - 1); 878 device_info->gen_mask = BIT(device_info->gen - 1);
874 879
@@ -1031,9 +1036,9 @@ static void i915_driver_cleanup_mmio(struct drm_i915_private *dev_priv)
1031 1036
1032static void intel_sanitize_options(struct drm_i915_private *dev_priv) 1037static void intel_sanitize_options(struct drm_i915_private *dev_priv)
1033{ 1038{
1034 i915.enable_execlists = 1039 i915_modparams.enable_execlists =
1035 intel_sanitize_enable_execlists(dev_priv, 1040 intel_sanitize_enable_execlists(dev_priv,
1036 i915.enable_execlists); 1041 i915_modparams.enable_execlists);
1037 1042
1038 /* 1043 /*
1039 * i915.enable_ppgtt is read-only, so do an early pass to validate the 1044 * i915.enable_ppgtt is read-only, so do an early pass to validate the
@@ -1041,12 +1046,15 @@ static void intel_sanitize_options(struct drm_i915_private *dev_priv)
1041 * do this now so that we can print out any log messages once rather 1046 * do this now so that we can print out any log messages once rather
1042 * than every time we check intel_enable_ppgtt(). 1047 * than every time we check intel_enable_ppgtt().
1043 */ 1048 */
1044 i915.enable_ppgtt = 1049 i915_modparams.enable_ppgtt =
1045 intel_sanitize_enable_ppgtt(dev_priv, i915.enable_ppgtt); 1050 intel_sanitize_enable_ppgtt(dev_priv,
1046 DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915.enable_ppgtt); 1051 i915_modparams.enable_ppgtt);
1052 DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915_modparams.enable_ppgtt);
1047 1053
1048 i915.semaphores = intel_sanitize_semaphores(dev_priv, i915.semaphores); 1054 i915_modparams.semaphores =
1049 DRM_DEBUG_DRIVER("use GPU semaphores? %s\n", yesno(i915.semaphores)); 1055 intel_sanitize_semaphores(dev_priv, i915_modparams.semaphores);
1056 DRM_DEBUG_DRIVER("use GPU semaphores? %s\n",
1057 yesno(i915_modparams.semaphores));
1050 1058
1051 intel_uc_sanitize_options(dev_priv); 1059 intel_uc_sanitize_options(dev_priv);
1052 1060
@@ -1277,7 +1285,7 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
1277 int ret; 1285 int ret;
1278 1286
1279 /* Enable nuclear pageflip on ILK+ */ 1287 /* Enable nuclear pageflip on ILK+ */
1280 if (!i915.nuclear_pageflip && match_info->gen < 5) 1288 if (!i915_modparams.nuclear_pageflip && match_info->gen < 5)
1281 driver.driver_features &= ~DRIVER_ATOMIC; 1289 driver.driver_features &= ~DRIVER_ATOMIC;
1282 1290
1283 ret = -ENOMEM; 1291 ret = -ENOMEM;
@@ -1341,7 +1349,7 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
1341 1349
1342 intel_runtime_pm_enable(dev_priv); 1350 intel_runtime_pm_enable(dev_priv);
1343 1351
1344 dev_priv->ipc_enabled = false; 1352 intel_init_ipc(dev_priv);
1345 1353
1346 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG)) 1354 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG))
1347 DRM_INFO("DRM_I915_DEBUG enabled\n"); 1355 DRM_INFO("DRM_I915_DEBUG enabled\n");
@@ -2609,6 +2617,8 @@ static int intel_runtime_resume(struct device *kdev)
2609 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) 2617 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
2610 intel_hpd_init(dev_priv); 2618 intel_hpd_init(dev_priv);
2611 2619
2620 intel_enable_ipc(dev_priv);
2621
2612 enable_rpm_wakeref_asserts(dev_priv); 2622 enable_rpm_wakeref_asserts(dev_priv);
2613 2623
2614 if (ret) 2624 if (ret)
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index b1fa81348ee9..7ca11318ac69 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -80,8 +80,8 @@
80 80
81#define DRIVER_NAME "i915" 81#define DRIVER_NAME "i915"
82#define DRIVER_DESC "Intel Graphics" 82#define DRIVER_DESC "Intel Graphics"
83#define DRIVER_DATE "20170907" 83#define DRIVER_DATE "20170929"
84#define DRIVER_TIMESTAMP 1504772900 84#define DRIVER_TIMESTAMP 1506682238
85 85
86/* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and 86/* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and
87 * WARN_ON()) for hw state sanity checks to check for unexpected conditions 87 * WARN_ON()) for hw state sanity checks to check for unexpected conditions
@@ -93,7 +93,7 @@
93#define I915_STATE_WARN(condition, format...) ({ \ 93#define I915_STATE_WARN(condition, format...) ({ \
94 int __ret_warn_on = !!(condition); \ 94 int __ret_warn_on = !!(condition); \
95 if (unlikely(__ret_warn_on)) \ 95 if (unlikely(__ret_warn_on)) \
96 if (!WARN(i915.verbose_state_checks, format)) \ 96 if (!WARN(i915_modparams.verbose_state_checks, format)) \
97 DRM_ERROR(format); \ 97 DRM_ERROR(format); \
98 unlikely(__ret_warn_on); \ 98 unlikely(__ret_warn_on); \
99}) 99})
@@ -126,7 +126,7 @@ static inline uint_fixed_16_16_t u32_to_fixed16(uint32_t val)
126{ 126{
127 uint_fixed_16_16_t fp; 127 uint_fixed_16_16_t fp;
128 128
129 WARN_ON(val >> 16); 129 WARN_ON(val > U16_MAX);
130 130
131 fp.val = val << 16; 131 fp.val = val << 16;
132 return fp; 132 return fp;
@@ -163,8 +163,8 @@ static inline uint_fixed_16_16_t max_fixed16(uint_fixed_16_16_t max1,
163static inline uint_fixed_16_16_t clamp_u64_to_fixed16(uint64_t val) 163static inline uint_fixed_16_16_t clamp_u64_to_fixed16(uint64_t val)
164{ 164{
165 uint_fixed_16_16_t fp; 165 uint_fixed_16_16_t fp;
166 WARN_ON(val >> 32); 166 WARN_ON(val > U32_MAX);
167 fp.val = clamp_t(uint32_t, val, 0, ~0); 167 fp.val = (uint32_t) val;
168 return fp; 168 return fp;
169} 169}
170 170
@@ -181,8 +181,8 @@ static inline uint32_t mul_round_up_u32_fixed16(uint32_t val,
181 181
182 intermediate_val = (uint64_t) val * mul.val; 182 intermediate_val = (uint64_t) val * mul.val;
183 intermediate_val = DIV_ROUND_UP_ULL(intermediate_val, 1 << 16); 183 intermediate_val = DIV_ROUND_UP_ULL(intermediate_val, 1 << 16);
184 WARN_ON(intermediate_val >> 32); 184 WARN_ON(intermediate_val > U32_MAX);
185 return clamp_t(uint32_t, intermediate_val, 0, ~0); 185 return (uint32_t) intermediate_val;
186} 186}
187 187
188static inline uint_fixed_16_16_t mul_fixed16(uint_fixed_16_16_t val, 188static inline uint_fixed_16_16_t mul_fixed16(uint_fixed_16_16_t val,
@@ -211,8 +211,8 @@ static inline uint32_t div_round_up_u32_fixed16(uint32_t val,
211 211
212 interm_val = (uint64_t)val << 16; 212 interm_val = (uint64_t)val << 16;
213 interm_val = DIV_ROUND_UP_ULL(interm_val, d.val); 213 interm_val = DIV_ROUND_UP_ULL(interm_val, d.val);
214 WARN_ON(interm_val >> 32); 214 WARN_ON(interm_val > U32_MAX);
215 return clamp_t(uint32_t, interm_val, 0, ~0); 215 return (uint32_t) interm_val;
216} 216}
217 217
218static inline uint_fixed_16_16_t mul_u32_fixed16(uint32_t val, 218static inline uint_fixed_16_16_t mul_u32_fixed16(uint32_t val,
@@ -776,7 +776,6 @@ struct intel_csr {
776 func(has_fpga_dbg); \ 776 func(has_fpga_dbg); \
777 func(has_full_ppgtt); \ 777 func(has_full_ppgtt); \
778 func(has_full_48bit_ppgtt); \ 778 func(has_full_48bit_ppgtt); \
779 func(has_gmbus_irq); \
780 func(has_gmch_display); \ 779 func(has_gmch_display); \
781 func(has_guc); \ 780 func(has_guc); \
782 func(has_guc_ct); \ 781 func(has_guc_ct); \
@@ -797,7 +796,8 @@ struct intel_csr {
797 func(cursor_needs_physical); \ 796 func(cursor_needs_physical); \
798 func(hws_needs_physical); \ 797 func(hws_needs_physical); \
799 func(overlay_needs_physical); \ 798 func(overlay_needs_physical); \
800 func(supports_tv); 799 func(supports_tv); \
800 func(has_ipc);
801 801
802struct sseu_dev_info { 802struct sseu_dev_info {
803 u8 slice_mask; 803 u8 slice_mask;
@@ -851,21 +851,28 @@ enum intel_platform {
851}; 851};
852 852
853struct intel_device_info { 853struct intel_device_info {
854 u32 display_mmio_offset;
855 u16 device_id; 854 u16 device_id;
856 u8 num_pipes;
857 u8 num_sprites[I915_MAX_PIPES];
858 u8 num_scalers[I915_MAX_PIPES];
859 u8 gen;
860 u16 gen_mask; 855 u16 gen_mask;
861 enum intel_platform platform; 856
857 u8 gen;
862 u8 gt; /* GT number, 0 if undefined */ 858 u8 gt; /* GT number, 0 if undefined */
863 u8 ring_mask; /* Rings supported by the HW */
864 u8 num_rings; 859 u8 num_rings;
860 u8 ring_mask; /* Rings supported by the HW */
861
862 enum intel_platform platform;
863 u32 platform_mask;
864
865 u32 display_mmio_offset;
866
867 u8 num_pipes;
868 u8 num_sprites[I915_MAX_PIPES];
869 u8 num_scalers[I915_MAX_PIPES];
870
865#define DEFINE_FLAG(name) u8 name:1 871#define DEFINE_FLAG(name) u8 name:1
866 DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG); 872 DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG);
867#undef DEFINE_FLAG 873#undef DEFINE_FLAG
868 u16 ddb_size; /* in blocks */ 874 u16 ddb_size; /* in blocks */
875
869 /* Register offsets for the various display pipes and transcoders */ 876 /* Register offsets for the various display pipes and transcoders */
870 int pipe_offsets[I915_MAX_TRANSCODERS]; 877 int pipe_offsets[I915_MAX_TRANSCODERS];
871 int trans_offsets[I915_MAX_TRANSCODERS]; 878 int trans_offsets[I915_MAX_TRANSCODERS];
@@ -1000,7 +1007,8 @@ struct i915_gpu_state {
1000 u32 seqno; 1007 u32 seqno;
1001 u32 head; 1008 u32 head;
1002 u32 tail; 1009 u32 tail;
1003 } *requests, execlist[2]; 1010 } *requests, execlist[EXECLIST_MAX_PORTS];
1011 unsigned int num_ports;
1004 1012
1005 struct drm_i915_error_waiter { 1013 struct drm_i915_error_waiter {
1006 char comm[TASK_COMM_LEN]; 1014 char comm[TASK_COMM_LEN];
@@ -1178,6 +1186,14 @@ struct i915_psr {
1178 bool y_cord_support; 1186 bool y_cord_support;
1179 bool colorimetry_support; 1187 bool colorimetry_support;
1180 bool alpm; 1188 bool alpm;
1189
1190 void (*enable_source)(struct intel_dp *,
1191 const struct intel_crtc_state *);
1192 void (*disable_source)(struct intel_dp *,
1193 const struct intel_crtc_state *);
1194 void (*enable_sink)(struct intel_dp *);
1195 void (*activate)(struct intel_dp *);
1196 void (*setup_vsc)(struct intel_dp *, const struct intel_crtc_state *);
1181}; 1197};
1182 1198
1183enum intel_pch { 1199enum intel_pch {
@@ -1836,6 +1852,20 @@ struct skl_wm_level {
1836 uint8_t plane_res_l; 1852 uint8_t plane_res_l;
1837}; 1853};
1838 1854
1855/* Stores plane specific WM parameters */
1856struct skl_wm_params {
1857 bool x_tiled, y_tiled;
1858 bool rc_surface;
1859 uint32_t width;
1860 uint8_t cpp;
1861 uint32_t plane_pixel_rate;
1862 uint32_t y_min_scanlines;
1863 uint32_t plane_bytes_per_line;
1864 uint_fixed_16_16_t plane_blocks_per_line;
1865 uint_fixed_16_16_t y_tile_minimum;
1866 uint32_t linetime_us;
1867};
1868
1839/* 1869/*
1840 * This struct helps tracking the state needed for runtime PM, which puts the 1870 * This struct helps tracking the state needed for runtime PM, which puts the
1841 * device in PCI D3 state. Notice that when this happens, nothing on the 1871 * device in PCI D3 state. Notice that when this happens, nothing on the
@@ -2331,6 +2361,8 @@ struct drm_i915_private {
2331 DECLARE_HASHTABLE(mm_structs, 7); 2361 DECLARE_HASHTABLE(mm_structs, 7);
2332 struct mutex mm_lock; 2362 struct mutex mm_lock;
2333 2363
2364 struct intel_ppat ppat;
2365
2334 /* Kernel Modesetting */ 2366 /* Kernel Modesetting */
2335 2367
2336 struct intel_crtc *plane_to_crtc_mapping[I915_MAX_PIPES]; 2368 struct intel_crtc *plane_to_crtc_mapping[I915_MAX_PIPES];
@@ -2811,8 +2843,8 @@ static inline struct scatterlist *__sg_next(struct scatterlist *sg)
2811#define for_each_sgt_dma(__dmap, __iter, __sgt) \ 2843#define for_each_sgt_dma(__dmap, __iter, __sgt) \
2812 for ((__iter) = __sgt_iter((__sgt)->sgl, true); \ 2844 for ((__iter) = __sgt_iter((__sgt)->sgl, true); \
2813 ((__dmap) = (__iter).dma + (__iter).curr); \ 2845 ((__dmap) = (__iter).dma + (__iter).curr); \
2814 (((__iter).curr += PAGE_SIZE) < (__iter).max) || \ 2846 (((__iter).curr += PAGE_SIZE) >= (__iter).max) ? \
2815 ((__iter) = __sgt_iter(__sg_next((__iter).sgp), true), 0)) 2847 (__iter) = __sgt_iter(__sg_next((__iter).sgp), true), 0 : 0)
2816 2848
2817/** 2849/**
2818 * for_each_sgt_page - iterate over the pages of the given sg_table 2850 * for_each_sgt_page - iterate over the pages of the given sg_table
@@ -2824,8 +2856,23 @@ static inline struct scatterlist *__sg_next(struct scatterlist *sg)
2824 for ((__iter) = __sgt_iter((__sgt)->sgl, false); \ 2856 for ((__iter) = __sgt_iter((__sgt)->sgl, false); \
2825 ((__pp) = (__iter).pfn == 0 ? NULL : \ 2857 ((__pp) = (__iter).pfn == 0 ? NULL : \
2826 pfn_to_page((__iter).pfn + ((__iter).curr >> PAGE_SHIFT))); \ 2858 pfn_to_page((__iter).pfn + ((__iter).curr >> PAGE_SHIFT))); \
2827 (((__iter).curr += PAGE_SIZE) < (__iter).max) || \ 2859 (((__iter).curr += PAGE_SIZE) >= (__iter).max) ? \
2828 ((__iter) = __sgt_iter(__sg_next((__iter).sgp), false), 0)) 2860 (__iter) = __sgt_iter(__sg_next((__iter).sgp), false), 0 : 0)
2861
2862static inline unsigned int i915_sg_segment_size(void)
2863{
2864 unsigned int size = swiotlb_max_segment();
2865
2866 if (size == 0)
2867 return SCATTERLIST_MAX_SEGMENT;
2868
2869 size = rounddown(size, PAGE_SIZE);
2870 /* swiotlb_max_segment_size can return 1 byte when it means one page. */
2871 if (size < PAGE_SIZE)
2872 size = PAGE_SIZE;
2873
2874 return size;
2875}
2829 2876
2830static inline const struct intel_device_info * 2877static inline const struct intel_device_info *
2831intel_info(const struct drm_i915_private *dev_priv) 2878intel_info(const struct drm_i915_private *dev_priv)
@@ -2842,23 +2889,21 @@ intel_info(const struct drm_i915_private *dev_priv)
2842#define INTEL_REVID(dev_priv) ((dev_priv)->drm.pdev->revision) 2889#define INTEL_REVID(dev_priv) ((dev_priv)->drm.pdev->revision)
2843 2890
2844#define GEN_FOREVER (0) 2891#define GEN_FOREVER (0)
2892
2893#define INTEL_GEN_MASK(s, e) ( \
2894 BUILD_BUG_ON_ZERO(!__builtin_constant_p(s)) + \
2895 BUILD_BUG_ON_ZERO(!__builtin_constant_p(e)) + \
2896 GENMASK((e) != GEN_FOREVER ? (e) - 1 : BITS_PER_LONG - 1, \
2897 (s) != GEN_FOREVER ? (s) - 1 : 0) \
2898)
2899
2845/* 2900/*
2846 * Returns true if Gen is in inclusive range [Start, End]. 2901 * Returns true if Gen is in inclusive range [Start, End].
2847 * 2902 *
2848 * Use GEN_FOREVER for unbound start and or end. 2903 * Use GEN_FOREVER for unbound start and or end.
2849 */ 2904 */
2850#define IS_GEN(dev_priv, s, e) ({ \ 2905#define IS_GEN(dev_priv, s, e) \
2851 unsigned int __s = (s), __e = (e); \ 2906 (!!((dev_priv)->info.gen_mask & INTEL_GEN_MASK((s), (e))))
2852 BUILD_BUG_ON(!__builtin_constant_p(s)); \
2853 BUILD_BUG_ON(!__builtin_constant_p(e)); \
2854 if ((__s) != GEN_FOREVER) \
2855 __s = (s) - 1; \
2856 if ((__e) == GEN_FOREVER) \
2857 __e = BITS_PER_LONG - 1; \
2858 else \
2859 __e = (e) - 1; \
2860 !!((dev_priv)->info.gen_mask & GENMASK((__e), (__s))); \
2861})
2862 2907
2863/* 2908/*
2864 * Return true if revision is in range [since,until] inclusive. 2909 * Return true if revision is in range [since,until] inclusive.
@@ -2868,37 +2913,39 @@ intel_info(const struct drm_i915_private *dev_priv)
2868#define IS_REVID(p, since, until) \ 2913#define IS_REVID(p, since, until) \
2869 (INTEL_REVID(p) >= (since) && INTEL_REVID(p) <= (until)) 2914 (INTEL_REVID(p) >= (since) && INTEL_REVID(p) <= (until))
2870 2915
2871#define IS_I830(dev_priv) ((dev_priv)->info.platform == INTEL_I830) 2916#define IS_PLATFORM(dev_priv, p) ((dev_priv)->info.platform_mask & BIT(p))
2872#define IS_I845G(dev_priv) ((dev_priv)->info.platform == INTEL_I845G) 2917
2873#define IS_I85X(dev_priv) ((dev_priv)->info.platform == INTEL_I85X) 2918#define IS_I830(dev_priv) IS_PLATFORM(dev_priv, INTEL_I830)
2874#define IS_I865G(dev_priv) ((dev_priv)->info.platform == INTEL_I865G) 2919#define IS_I845G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I845G)
2875#define IS_I915G(dev_priv) ((dev_priv)->info.platform == INTEL_I915G) 2920#define IS_I85X(dev_priv) IS_PLATFORM(dev_priv, INTEL_I85X)
2876#define IS_I915GM(dev_priv) ((dev_priv)->info.platform == INTEL_I915GM) 2921#define IS_I865G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I865G)
2877#define IS_I945G(dev_priv) ((dev_priv)->info.platform == INTEL_I945G) 2922#define IS_I915G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I915G)
2878#define IS_I945GM(dev_priv) ((dev_priv)->info.platform == INTEL_I945GM) 2923#define IS_I915GM(dev_priv) IS_PLATFORM(dev_priv, INTEL_I915GM)
2879#define IS_I965G(dev_priv) ((dev_priv)->info.platform == INTEL_I965G) 2924#define IS_I945G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I945G)
2880#define IS_I965GM(dev_priv) ((dev_priv)->info.platform == INTEL_I965GM) 2925#define IS_I945GM(dev_priv) IS_PLATFORM(dev_priv, INTEL_I945GM)
2881#define IS_G45(dev_priv) ((dev_priv)->info.platform == INTEL_G45) 2926#define IS_I965G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I965G)
2882#define IS_GM45(dev_priv) ((dev_priv)->info.platform == INTEL_GM45) 2927#define IS_I965GM(dev_priv) IS_PLATFORM(dev_priv, INTEL_I965GM)
2928#define IS_G45(dev_priv) IS_PLATFORM(dev_priv, INTEL_G45)
2929#define IS_GM45(dev_priv) IS_PLATFORM(dev_priv, INTEL_GM45)
2883#define IS_G4X(dev_priv) (IS_G45(dev_priv) || IS_GM45(dev_priv)) 2930#define IS_G4X(dev_priv) (IS_G45(dev_priv) || IS_GM45(dev_priv))
2884#define IS_PINEVIEW_G(dev_priv) (INTEL_DEVID(dev_priv) == 0xa001) 2931#define IS_PINEVIEW_G(dev_priv) (INTEL_DEVID(dev_priv) == 0xa001)
2885#define IS_PINEVIEW_M(dev_priv) (INTEL_DEVID(dev_priv) == 0xa011) 2932#define IS_PINEVIEW_M(dev_priv) (INTEL_DEVID(dev_priv) == 0xa011)
2886#define IS_PINEVIEW(dev_priv) ((dev_priv)->info.platform == INTEL_PINEVIEW) 2933#define IS_PINEVIEW(dev_priv) IS_PLATFORM(dev_priv, INTEL_PINEVIEW)
2887#define IS_G33(dev_priv) ((dev_priv)->info.platform == INTEL_G33) 2934#define IS_G33(dev_priv) IS_PLATFORM(dev_priv, INTEL_G33)
2888#define IS_IRONLAKE_M(dev_priv) (INTEL_DEVID(dev_priv) == 0x0046) 2935#define IS_IRONLAKE_M(dev_priv) (INTEL_DEVID(dev_priv) == 0x0046)
2889#define IS_IVYBRIDGE(dev_priv) ((dev_priv)->info.platform == INTEL_IVYBRIDGE) 2936#define IS_IVYBRIDGE(dev_priv) IS_PLATFORM(dev_priv, INTEL_IVYBRIDGE)
2890#define IS_IVB_GT1(dev_priv) (IS_IVYBRIDGE(dev_priv) && \ 2937#define IS_IVB_GT1(dev_priv) (IS_IVYBRIDGE(dev_priv) && \
2891 (dev_priv)->info.gt == 1) 2938 (dev_priv)->info.gt == 1)
2892#define IS_VALLEYVIEW(dev_priv) ((dev_priv)->info.platform == INTEL_VALLEYVIEW) 2939#define IS_VALLEYVIEW(dev_priv) IS_PLATFORM(dev_priv, INTEL_VALLEYVIEW)
2893#define IS_CHERRYVIEW(dev_priv) ((dev_priv)->info.platform == INTEL_CHERRYVIEW) 2940#define IS_CHERRYVIEW(dev_priv) IS_PLATFORM(dev_priv, INTEL_CHERRYVIEW)
2894#define IS_HASWELL(dev_priv) ((dev_priv)->info.platform == INTEL_HASWELL) 2941#define IS_HASWELL(dev_priv) IS_PLATFORM(dev_priv, INTEL_HASWELL)
2895#define IS_BROADWELL(dev_priv) ((dev_priv)->info.platform == INTEL_BROADWELL) 2942#define IS_BROADWELL(dev_priv) IS_PLATFORM(dev_priv, INTEL_BROADWELL)
2896#define IS_SKYLAKE(dev_priv) ((dev_priv)->info.platform == INTEL_SKYLAKE) 2943#define IS_SKYLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_SKYLAKE)
2897#define IS_BROXTON(dev_priv) ((dev_priv)->info.platform == INTEL_BROXTON) 2944#define IS_BROXTON(dev_priv) IS_PLATFORM(dev_priv, INTEL_BROXTON)
2898#define IS_KABYLAKE(dev_priv) ((dev_priv)->info.platform == INTEL_KABYLAKE) 2945#define IS_KABYLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_KABYLAKE)
2899#define IS_GEMINILAKE(dev_priv) ((dev_priv)->info.platform == INTEL_GEMINILAKE) 2946#define IS_GEMINILAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_GEMINILAKE)
2900#define IS_COFFEELAKE(dev_priv) ((dev_priv)->info.platform == INTEL_COFFEELAKE) 2947#define IS_COFFEELAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_COFFEELAKE)
2901#define IS_CANNONLAKE(dev_priv) ((dev_priv)->info.platform == INTEL_CANNONLAKE) 2948#define IS_CANNONLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_CANNONLAKE)
2902#define IS_MOBILE(dev_priv) ((dev_priv)->info.is_mobile) 2949#define IS_MOBILE(dev_priv) ((dev_priv)->info.is_mobile)
2903#define IS_HSW_EARLY_SDV(dev_priv) (IS_HASWELL(dev_priv) && \ 2950#define IS_HSW_EARLY_SDV(dev_priv) (IS_HASWELL(dev_priv) && \
2904 (INTEL_DEVID(dev_priv) & 0xFF00) == 0x0C00) 2951 (INTEL_DEVID(dev_priv) & 0xFF00) == 0x0C00)
@@ -2946,6 +2993,8 @@ intel_info(const struct drm_i915_private *dev_priv)
2946 (dev_priv)->info.gt == 3) 2993 (dev_priv)->info.gt == 3)
2947#define IS_CFL_ULT(dev_priv) (IS_COFFEELAKE(dev_priv) && \ 2994#define IS_CFL_ULT(dev_priv) (IS_COFFEELAKE(dev_priv) && \
2948 (INTEL_DEVID(dev_priv) & 0x00F0) == 0x00A0) 2995 (INTEL_DEVID(dev_priv) & 0x00F0) == 0x00A0)
2996#define IS_CFL_GT2(dev_priv) (IS_COFFEELAKE(dev_priv) && \
2997 (dev_priv)->info.gt == 2)
2949 2998
2950#define IS_ALPHA_SUPPORT(intel_info) ((intel_info)->is_alpha_support) 2999#define IS_ALPHA_SUPPORT(intel_info) ((intel_info)->is_alpha_support)
2951 3000
@@ -3036,9 +3085,9 @@ intel_info(const struct drm_i915_private *dev_priv)
3036 3085
3037#define HAS_LOGICAL_RING_CONTEXTS(dev_priv) \ 3086#define HAS_LOGICAL_RING_CONTEXTS(dev_priv) \
3038 ((dev_priv)->info.has_logical_ring_contexts) 3087 ((dev_priv)->info.has_logical_ring_contexts)
3039#define USES_PPGTT(dev_priv) (i915.enable_ppgtt) 3088#define USES_PPGTT(dev_priv) (i915_modparams.enable_ppgtt)
3040#define USES_FULL_PPGTT(dev_priv) (i915.enable_ppgtt >= 2) 3089#define USES_FULL_PPGTT(dev_priv) (i915_modparams.enable_ppgtt >= 2)
3041#define USES_FULL_48BIT_PPGTT(dev_priv) (i915.enable_ppgtt == 3) 3090#define USES_FULL_48BIT_PPGTT(dev_priv) (i915_modparams.enable_ppgtt == 3)
3042 3091
3043#define HAS_OVERLAY(dev_priv) ((dev_priv)->info.has_overlay) 3092#define HAS_OVERLAY(dev_priv) ((dev_priv)->info.has_overlay)
3044#define OVERLAY_NEEDS_PHYSICAL(dev_priv) \ 3093#define OVERLAY_NEEDS_PHYSICAL(dev_priv) \
@@ -3056,9 +3105,12 @@ intel_info(const struct drm_i915_private *dev_priv)
3056 * even when in MSI mode. This results in spurious interrupt warnings if the 3105 * even when in MSI mode. This results in spurious interrupt warnings if the
3057 * legacy irq no. is shared with another device. The kernel then disables that 3106 * legacy irq no. is shared with another device. The kernel then disables that
3058 * interrupt source and so prevents the other device from working properly. 3107 * interrupt source and so prevents the other device from working properly.
3108 *
3109 * Since we don't enable MSI anymore on gen4, we can always use GMBUS/AUX
3110 * interrupts.
3059 */ 3111 */
3060#define HAS_AUX_IRQ(dev_priv) ((dev_priv)->info.gen >= 5) 3112#define HAS_AUX_IRQ(dev_priv) true
3061#define HAS_GMBUS_IRQ(dev_priv) ((dev_priv)->info.has_gmbus_irq) 3113#define HAS_GMBUS_IRQ(dev_priv) (INTEL_GEN(dev_priv) >= 4)
3062 3114
3063/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte 3115/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
3064 * rows, which changed the alignment requirements and fence programming. 3116 * rows, which changed the alignment requirements and fence programming.
@@ -3089,6 +3141,8 @@ intel_info(const struct drm_i915_private *dev_priv)
3089#define HAS_RUNTIME_PM(dev_priv) ((dev_priv)->info.has_runtime_pm) 3141#define HAS_RUNTIME_PM(dev_priv) ((dev_priv)->info.has_runtime_pm)
3090#define HAS_64BIT_RELOC(dev_priv) ((dev_priv)->info.has_64bit_reloc) 3142#define HAS_64BIT_RELOC(dev_priv) ((dev_priv)->info.has_64bit_reloc)
3091 3143
3144#define HAS_IPC(dev_priv) ((dev_priv)->info.has_ipc)
3145
3092/* 3146/*
3093 * For now, anything with a GuC requires uCode loading, and then supports 3147 * For now, anything with a GuC requires uCode loading, and then supports
3094 * command submission once loaded. But these are logically independent 3148 * command submission once loaded. But these are logically independent
@@ -3234,7 +3288,7 @@ static inline void i915_queue_hangcheck(struct drm_i915_private *dev_priv)
3234{ 3288{
3235 unsigned long delay; 3289 unsigned long delay;
3236 3290
3237 if (unlikely(!i915.enable_hangcheck)) 3291 if (unlikely(!i915_modparams.enable_hangcheck))
3238 return; 3292 return;
3239 3293
3240 /* Don't continually defer the hangcheck so that it is always run at 3294 /* Don't continually defer the hangcheck so that it is always run at
@@ -3267,6 +3321,8 @@ static inline bool intel_vgpu_active(struct drm_i915_private *dev_priv)
3267 return dev_priv->vgpu.active; 3321 return dev_priv->vgpu.active;
3268} 3322}
3269 3323
3324u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv,
3325 enum pipe pipe);
3270void 3326void
3271i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, 3327i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
3272 u32 status_mask); 3328 u32 status_mask);
@@ -4360,4 +4416,12 @@ int remap_io_mapping(struct vm_area_struct *vma,
4360 unsigned long addr, unsigned long pfn, unsigned long size, 4416 unsigned long addr, unsigned long pfn, unsigned long size,
4361 struct io_mapping *iomap); 4417 struct io_mapping *iomap);
4362 4418
4419static inline int intel_hws_csb_write_index(struct drm_i915_private *i915)
4420{
4421 if (INTEL_GEN(i915) >= 10)
4422 return CNL_HWS_CSB_WRITE_INDEX;
4423 else
4424 return I915_HWS_CSB_WRITE_INDEX;
4425}
4426
4363#endif 4427#endif
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 8f074c7f6253..73eeb6b1f1cd 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -179,7 +179,7 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
179 * the alignment of the buddy allocation will naturally match. 179 * the alignment of the buddy allocation will naturally match.
180 */ 180 */
181 phys = drm_pci_alloc(obj->base.dev, 181 phys = drm_pci_alloc(obj->base.dev,
182 obj->base.size, 182 roundup_pow_of_two(obj->base.size),
183 roundup_pow_of_two(obj->base.size)); 183 roundup_pow_of_two(obj->base.size));
184 if (!phys) 184 if (!phys)
185 return ERR_PTR(-ENOMEM); 185 return ERR_PTR(-ENOMEM);
@@ -694,10 +694,10 @@ flush_write_domain(struct drm_i915_gem_object *obj, unsigned int flush_domains)
694 694
695 switch (obj->base.write_domain) { 695 switch (obj->base.write_domain) {
696 case I915_GEM_DOMAIN_GTT: 696 case I915_GEM_DOMAIN_GTT:
697 if (INTEL_GEN(dev_priv) >= 6 && !HAS_LLC(dev_priv)) { 697 if (!HAS_LLC(dev_priv)) {
698 intel_runtime_pm_get(dev_priv); 698 intel_runtime_pm_get(dev_priv);
699 spin_lock_irq(&dev_priv->uncore.lock); 699 spin_lock_irq(&dev_priv->uncore.lock);
700 POSTING_READ_FW(RING_ACTHD(dev_priv->engine[RCS]->mmio_base)); 700 POSTING_READ_FW(RING_HEAD(dev_priv->engine[RCS]->mmio_base));
701 spin_unlock_irq(&dev_priv->uncore.lock); 701 spin_unlock_irq(&dev_priv->uncore.lock);
702 intel_runtime_pm_put(dev_priv); 702 intel_runtime_pm_put(dev_priv);
703 } 703 }
@@ -2303,7 +2303,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
2303 struct sgt_iter sgt_iter; 2303 struct sgt_iter sgt_iter;
2304 struct page *page; 2304 struct page *page;
2305 unsigned long last_pfn = 0; /* suppress gcc warning */ 2305 unsigned long last_pfn = 0; /* suppress gcc warning */
2306 unsigned int max_segment; 2306 unsigned int max_segment = i915_sg_segment_size();
2307 gfp_t noreclaim; 2307 gfp_t noreclaim;
2308 int ret; 2308 int ret;
2309 2309
@@ -2314,10 +2314,6 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
2314 GEM_BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS); 2314 GEM_BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
2315 GEM_BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS); 2315 GEM_BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
2316 2316
2317 max_segment = swiotlb_max_segment();
2318 if (!max_segment)
2319 max_segment = rounddown(UINT_MAX, PAGE_SIZE);
2320
2321 st = kmalloc(sizeof(*st), GFP_KERNEL); 2317 st = kmalloc(sizeof(*st), GFP_KERNEL);
2322 if (st == NULL) 2318 if (st == NULL)
2323 return ERR_PTR(-ENOMEM); 2319 return ERR_PTR(-ENOMEM);
@@ -2819,8 +2815,8 @@ i915_gem_reset_prepare_engine(struct intel_engine_cs *engine)
2819 * Turning off the engine->irq_tasklet until the reset is over 2815 * Turning off the engine->irq_tasklet until the reset is over
2820 * prevents the race. 2816 * prevents the race.
2821 */ 2817 */
2822 tasklet_kill(&engine->irq_tasklet); 2818 tasklet_kill(&engine->execlists.irq_tasklet);
2823 tasklet_disable(&engine->irq_tasklet); 2819 tasklet_disable(&engine->execlists.irq_tasklet);
2824 2820
2825 if (engine->irq_seqno_barrier) 2821 if (engine->irq_seqno_barrier)
2826 engine->irq_seqno_barrier(engine); 2822 engine->irq_seqno_barrier(engine);
@@ -2999,7 +2995,7 @@ void i915_gem_reset(struct drm_i915_private *dev_priv)
2999 2995
3000void i915_gem_reset_finish_engine(struct intel_engine_cs *engine) 2996void i915_gem_reset_finish_engine(struct intel_engine_cs *engine)
3001{ 2997{
3002 tasklet_enable(&engine->irq_tasklet); 2998 tasklet_enable(&engine->execlists.irq_tasklet);
3003 kthread_unpark(engine->breadcrumbs.signaler); 2999 kthread_unpark(engine->breadcrumbs.signaler);
3004} 3000}
3005 3001
@@ -3026,9 +3022,6 @@ static void nop_submit_request(struct drm_i915_gem_request *request)
3026 3022
3027static void engine_set_wedged(struct intel_engine_cs *engine) 3023static void engine_set_wedged(struct intel_engine_cs *engine)
3028{ 3024{
3029 struct drm_i915_gem_request *request;
3030 unsigned long flags;
3031
3032 /* We need to be sure that no thread is running the old callback as 3025 /* We need to be sure that no thread is running the old callback as
3033 * we install the nop handler (otherwise we would submit a request 3026 * we install the nop handler (otherwise we would submit a request
3034 * to hardware that will never complete). In order to prevent this 3027 * to hardware that will never complete). In order to prevent this
@@ -3038,40 +3031,7 @@ static void engine_set_wedged(struct intel_engine_cs *engine)
3038 engine->submit_request = nop_submit_request; 3031 engine->submit_request = nop_submit_request;
3039 3032
3040 /* Mark all executing requests as skipped */ 3033 /* Mark all executing requests as skipped */
3041 spin_lock_irqsave(&engine->timeline->lock, flags); 3034 engine->cancel_requests(engine);
3042 list_for_each_entry(request, &engine->timeline->requests, link)
3043 if (!i915_gem_request_completed(request))
3044 dma_fence_set_error(&request->fence, -EIO);
3045 spin_unlock_irqrestore(&engine->timeline->lock, flags);
3046
3047 /*
3048 * Clear the execlists queue up before freeing the requests, as those
3049 * are the ones that keep the context and ringbuffer backing objects
3050 * pinned in place.
3051 */
3052
3053 if (i915.enable_execlists) {
3054 struct execlist_port *port = engine->execlist_port;
3055 unsigned long flags;
3056 unsigned int n;
3057
3058 spin_lock_irqsave(&engine->timeline->lock, flags);
3059
3060 for (n = 0; n < ARRAY_SIZE(engine->execlist_port); n++)
3061 i915_gem_request_put(port_request(&port[n]));
3062 memset(engine->execlist_port, 0, sizeof(engine->execlist_port));
3063 engine->execlist_queue = RB_ROOT;
3064 engine->execlist_first = NULL;
3065
3066 spin_unlock_irqrestore(&engine->timeline->lock, flags);
3067
3068 /* The port is checked prior to scheduling a tasklet, but
3069 * just in case we have suspended the tasklet to do the
3070 * wedging make sure that when it wakes, it decides there
3071 * is no work to do by clearing the irq_posted bit.
3072 */
3073 clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
3074 }
3075 3035
3076 /* Mark all pending requests as complete so that any concurrent 3036 /* Mark all pending requests as complete so that any concurrent
3077 * (lockless) lookup doesn't try and wait upon the request as we 3037 * (lockless) lookup doesn't try and wait upon the request as we
@@ -4778,7 +4738,7 @@ bool intel_sanitize_semaphores(struct drm_i915_private *dev_priv, int value)
4778 return false; 4738 return false;
4779 4739
4780 /* TODO: make semaphores and Execlists play nicely together */ 4740 /* TODO: make semaphores and Execlists play nicely together */
4781 if (i915.enable_execlists) 4741 if (i915_modparams.enable_execlists)
4782 return false; 4742 return false;
4783 4743
4784 if (value >= 0) 4744 if (value >= 0)
@@ -4799,7 +4759,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
4799 4759
4800 dev_priv->mm.unordered_timeline = dma_fence_context_alloc(1); 4760 dev_priv->mm.unordered_timeline = dma_fence_context_alloc(1);
4801 4761
4802 if (!i915.enable_execlists) { 4762 if (!i915_modparams.enable_execlists) {
4803 dev_priv->gt.resume = intel_legacy_submission_resume; 4763 dev_priv->gt.resume = intel_legacy_submission_resume;
4804 dev_priv->gt.cleanup_engine = intel_engine_cleanup; 4764 dev_priv->gt.cleanup_engine = intel_engine_cleanup;
4805 } else { 4765 } else {
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 58a2a44f88bd..921ee369c74d 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -314,7 +314,7 @@ __create_hw_context(struct drm_i915_private *dev_priv,
314 * present or not in use we still need a small bias as ring wraparound 314 * present or not in use we still need a small bias as ring wraparound
315 * at offset 0 sometimes hangs. No idea why. 315 * at offset 0 sometimes hangs. No idea why.
316 */ 316 */
317 if (HAS_GUC(dev_priv) && i915.enable_guc_loading) 317 if (HAS_GUC(dev_priv) && i915_modparams.enable_guc_loading)
318 ctx->ggtt_offset_bias = GUC_WOPCM_TOP; 318 ctx->ggtt_offset_bias = GUC_WOPCM_TOP;
319 else 319 else
320 ctx->ggtt_offset_bias = I915_GTT_PAGE_SIZE; 320 ctx->ggtt_offset_bias = I915_GTT_PAGE_SIZE;
@@ -407,7 +407,7 @@ i915_gem_context_create_gvt(struct drm_device *dev)
407 i915_gem_context_set_closed(ctx); /* not user accessible */ 407 i915_gem_context_set_closed(ctx); /* not user accessible */
408 i915_gem_context_clear_bannable(ctx); 408 i915_gem_context_clear_bannable(ctx);
409 i915_gem_context_set_force_single_submission(ctx); 409 i915_gem_context_set_force_single_submission(ctx);
410 if (!i915.enable_guc_submission) 410 if (!i915_modparams.enable_guc_submission)
411 ctx->ring_size = 512 * PAGE_SIZE; /* Max ring buffer size */ 411 ctx->ring_size = 512 * PAGE_SIZE; /* Max ring buffer size */
412 412
413 GEM_BUG_ON(i915_gem_context_is_kernel(ctx)); 413 GEM_BUG_ON(i915_gem_context_is_kernel(ctx));
@@ -431,7 +431,7 @@ int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
431 431
432 if (intel_vgpu_active(dev_priv) && 432 if (intel_vgpu_active(dev_priv) &&
433 HAS_LOGICAL_RING_CONTEXTS(dev_priv)) { 433 HAS_LOGICAL_RING_CONTEXTS(dev_priv)) {
434 if (!i915.enable_execlists) { 434 if (!i915_modparams.enable_execlists) {
435 DRM_INFO("Only EXECLIST mode is supported in vgpu.\n"); 435 DRM_INFO("Only EXECLIST mode is supported in vgpu.\n");
436 return -EINVAL; 436 return -EINVAL;
437 } 437 }
@@ -483,7 +483,7 @@ void i915_gem_contexts_lost(struct drm_i915_private *dev_priv)
483 } 483 }
484 484
485 /* Force the GPU state to be restored on enabling */ 485 /* Force the GPU state to be restored on enabling */
486 if (!i915.enable_execlists) { 486 if (!i915_modparams.enable_execlists) {
487 struct i915_gem_context *ctx; 487 struct i915_gem_context *ctx;
488 488
489 list_for_each_entry(ctx, &dev_priv->contexts.list, link) { 489 list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
@@ -568,7 +568,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 flags)
568 enum intel_engine_id id; 568 enum intel_engine_id id;
569 const int num_rings = 569 const int num_rings =
570 /* Use an extended w/a on gen7 if signalling from other rings */ 570 /* Use an extended w/a on gen7 if signalling from other rings */
571 (i915.semaphores && INTEL_GEN(dev_priv) == 7) ? 571 (i915_modparams.semaphores && INTEL_GEN(dev_priv) == 7) ?
572 INTEL_INFO(dev_priv)->num_rings - 1 : 572 INTEL_INFO(dev_priv)->num_rings - 1 :
573 0; 573 0;
574 int len; 574 int len;
@@ -837,7 +837,7 @@ int i915_switch_context(struct drm_i915_gem_request *req)
837 struct intel_engine_cs *engine = req->engine; 837 struct intel_engine_cs *engine = req->engine;
838 838
839 lockdep_assert_held(&req->i915->drm.struct_mutex); 839 lockdep_assert_held(&req->i915->drm.struct_mutex);
840 if (i915.enable_execlists) 840 if (i915_modparams.enable_execlists)
841 return 0; 841 return 0;
842 842
843 if (!req->ctx->engine[engine->id].state) { 843 if (!req->ctx->engine[engine->id].state) {
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index ca0eab343644..d733c4d5a500 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -58,6 +58,7 @@ enum {
58 58
59#define __EXEC_HAS_RELOC BIT(31) 59#define __EXEC_HAS_RELOC BIT(31)
60#define __EXEC_VALIDATED BIT(30) 60#define __EXEC_VALIDATED BIT(30)
61#define __EXEC_INTERNAL_FLAGS (~0u << 30)
61#define UPDATE PIN_OFFSET_FIXED 62#define UPDATE PIN_OFFSET_FIXED
62 63
63#define BATCH_OFFSET_BIAS (256*1024) 64#define BATCH_OFFSET_BIAS (256*1024)
@@ -679,7 +680,7 @@ static int eb_select_context(struct i915_execbuffer *eb)
679static int eb_lookup_vmas(struct i915_execbuffer *eb) 680static int eb_lookup_vmas(struct i915_execbuffer *eb)
680{ 681{
681 struct radix_tree_root *handles_vma = &eb->ctx->handles_vma; 682 struct radix_tree_root *handles_vma = &eb->ctx->handles_vma;
682 struct drm_i915_gem_object *uninitialized_var(obj); 683 struct drm_i915_gem_object *obj;
683 unsigned int i; 684 unsigned int i;
684 int err; 685 int err;
685 686
@@ -725,19 +726,17 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
725 goto err_obj; 726 goto err_obj;
726 } 727 }
727 728
729 /* transfer ref to ctx */
728 vma->open_count++; 730 vma->open_count++;
729 list_add(&lut->obj_link, &obj->lut_list); 731 list_add(&lut->obj_link, &obj->lut_list);
730 list_add(&lut->ctx_link, &eb->ctx->handles_list); 732 list_add(&lut->ctx_link, &eb->ctx->handles_list);
731 lut->ctx = eb->ctx; 733 lut->ctx = eb->ctx;
732 lut->handle = handle; 734 lut->handle = handle;
733 735
734 /* transfer ref to ctx */
735 obj = NULL;
736
737add_vma: 736add_vma:
738 err = eb_add_vma(eb, i, vma); 737 err = eb_add_vma(eb, i, vma);
739 if (unlikely(err)) 738 if (unlikely(err))
740 goto err_obj; 739 goto err_vma;
741 740
742 GEM_BUG_ON(vma != eb->vma[i]); 741 GEM_BUG_ON(vma != eb->vma[i]);
743 GEM_BUG_ON(vma->exec_flags != &eb->flags[i]); 742 GEM_BUG_ON(vma->exec_flags != &eb->flags[i]);
@@ -766,8 +765,7 @@ add_vma:
766 return eb_reserve(eb); 765 return eb_reserve(eb);
767 766
768err_obj: 767err_obj:
769 if (obj) 768 i915_gem_object_put(obj);
770 i915_gem_object_put(obj);
771err_vma: 769err_vma:
772 eb->vma[i] = NULL; 770 eb->vma[i] = NULL;
773 return err; 771 return err;
@@ -1587,7 +1585,7 @@ static int eb_prefault_relocations(const struct i915_execbuffer *eb)
1587 const unsigned int count = eb->buffer_count; 1585 const unsigned int count = eb->buffer_count;
1588 unsigned int i; 1586 unsigned int i;
1589 1587
1590 if (unlikely(i915.prefault_disable)) 1588 if (unlikely(i915_modparams.prefault_disable))
1591 return 0; 1589 return 0;
1592 1590
1593 for (i = 0; i < count; i++) { 1591 for (i = 0; i < count; i++) {
@@ -2188,6 +2186,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
2188 int out_fence_fd = -1; 2186 int out_fence_fd = -1;
2189 int err; 2187 int err;
2190 2188
2189 BUILD_BUG_ON(__EXEC_INTERNAL_FLAGS & ~__I915_EXEC_ILLEGAL_FLAGS);
2191 BUILD_BUG_ON(__EXEC_OBJECT_INTERNAL_FLAGS & 2190 BUILD_BUG_ON(__EXEC_OBJECT_INTERNAL_FLAGS &
2192 ~__EXEC_OBJECT_UNKNOWN_FLAGS); 2191 ~__EXEC_OBJECT_UNKNOWN_FLAGS);
2193 2192
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 40d446ba0b85..4c82ceb8d318 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -180,7 +180,7 @@ int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
180 return 0; 180 return 0;
181 } 181 }
182 182
183 if (INTEL_GEN(dev_priv) >= 8 && i915.enable_execlists) { 183 if (INTEL_GEN(dev_priv) >= 8 && i915_modparams.enable_execlists) {
184 if (has_full_48bit_ppgtt) 184 if (has_full_48bit_ppgtt)
185 return 3; 185 return 3;
186 186
@@ -230,13 +230,13 @@ static gen8_pte_t gen8_pte_encode(dma_addr_t addr,
230 230
231 switch (level) { 231 switch (level) {
232 case I915_CACHE_NONE: 232 case I915_CACHE_NONE:
233 pte |= PPAT_UNCACHED_INDEX; 233 pte |= PPAT_UNCACHED;
234 break; 234 break;
235 case I915_CACHE_WT: 235 case I915_CACHE_WT:
236 pte |= PPAT_DISPLAY_ELLC_INDEX; 236 pte |= PPAT_DISPLAY_ELLC;
237 break; 237 break;
238 default: 238 default:
239 pte |= PPAT_CACHED_INDEX; 239 pte |= PPAT_CACHED;
240 break; 240 break;
241 } 241 }
242 242
@@ -249,9 +249,9 @@ static gen8_pde_t gen8_pde_encode(const dma_addr_t addr,
249 gen8_pde_t pde = _PAGE_PRESENT | _PAGE_RW; 249 gen8_pde_t pde = _PAGE_PRESENT | _PAGE_RW;
250 pde |= addr; 250 pde |= addr;
251 if (level != I915_CACHE_NONE) 251 if (level != I915_CACHE_NONE)
252 pde |= PPAT_CACHED_PDE_INDEX; 252 pde |= PPAT_CACHED_PDE;
253 else 253 else
254 pde |= PPAT_UNCACHED_INDEX; 254 pde |= PPAT_UNCACHED;
255 return pde; 255 return pde;
256} 256}
257 257
@@ -481,10 +481,8 @@ static void fill_page_dma(struct i915_address_space *vm,
481 const u64 val) 481 const u64 val)
482{ 482{
483 u64 * const vaddr = kmap_atomic(p->page); 483 u64 * const vaddr = kmap_atomic(p->page);
484 int i;
485 484
486 for (i = 0; i < 512; i++) 485 memset64(vaddr, val, PAGE_SIZE / sizeof(val));
487 vaddr[i] = val;
488 486
489 kunmap_atomic(vaddr); 487 kunmap_atomic(vaddr);
490} 488}
@@ -1168,19 +1166,22 @@ static int gen8_ppgtt_alloc_pd(struct i915_address_space *vm,
1168 unsigned int pde; 1166 unsigned int pde;
1169 1167
1170 gen8_for_each_pde(pt, pd, start, length, pde) { 1168 gen8_for_each_pde(pt, pd, start, length, pde) {
1169 int count = gen8_pte_count(start, length);
1170
1171 if (pt == vm->scratch_pt) { 1171 if (pt == vm->scratch_pt) {
1172 pt = alloc_pt(vm); 1172 pt = alloc_pt(vm);
1173 if (IS_ERR(pt)) 1173 if (IS_ERR(pt))
1174 goto unwind; 1174 goto unwind;
1175 1175
1176 gen8_initialize_pt(vm, pt); 1176 if (count < GEN8_PTES)
1177 gen8_initialize_pt(vm, pt);
1177 1178
1178 gen8_ppgtt_set_pde(vm, pd, pt, pde); 1179 gen8_ppgtt_set_pde(vm, pd, pt, pde);
1179 pd->used_pdes++; 1180 pd->used_pdes++;
1180 GEM_BUG_ON(pd->used_pdes > I915_PDES); 1181 GEM_BUG_ON(pd->used_pdes > I915_PDES);
1181 } 1182 }
1182 1183
1183 pt->used_ptes += gen8_pte_count(start, length); 1184 pt->used_ptes += count;
1184 } 1185 }
1185 return 0; 1186 return 0;
1186 1187
@@ -1969,7 +1970,7 @@ int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv)
1969 /* In the case of execlists, PPGTT is enabled by the context descriptor 1970 /* In the case of execlists, PPGTT is enabled by the context descriptor
1970 * and the PDPs are contained within the context itself. We don't 1971 * and the PDPs are contained within the context itself. We don't
1971 * need to do anything here. */ 1972 * need to do anything here. */
1972 if (i915.enable_execlists) 1973 if (i915_modparams.enable_execlists)
1973 return 0; 1974 return 0;
1974 1975
1975 if (!USES_PPGTT(dev_priv)) 1976 if (!USES_PPGTT(dev_priv))
@@ -2816,41 +2817,209 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
2816 return 0; 2817 return 0;
2817} 2818}
2818 2819
2819static void cnl_setup_private_ppat(struct drm_i915_private *dev_priv) 2820static struct intel_ppat_entry *
2821__alloc_ppat_entry(struct intel_ppat *ppat, unsigned int index, u8 value)
2822{
2823 struct intel_ppat_entry *entry = &ppat->entries[index];
2824
2825 GEM_BUG_ON(index >= ppat->max_entries);
2826 GEM_BUG_ON(test_bit(index, ppat->used));
2827
2828 entry->ppat = ppat;
2829 entry->value = value;
2830 kref_init(&entry->ref);
2831 set_bit(index, ppat->used);
2832 set_bit(index, ppat->dirty);
2833
2834 return entry;
2835}
2836
2837static void __free_ppat_entry(struct intel_ppat_entry *entry)
2838{
2839 struct intel_ppat *ppat = entry->ppat;
2840 unsigned int index = entry - ppat->entries;
2841
2842 GEM_BUG_ON(index >= ppat->max_entries);
2843 GEM_BUG_ON(!test_bit(index, ppat->used));
2844
2845 entry->value = ppat->clear_value;
2846 clear_bit(index, ppat->used);
2847 set_bit(index, ppat->dirty);
2848}
2849
2850/**
2851 * intel_ppat_get - get a usable PPAT entry
2852 * @i915: i915 device instance
2853 * @value: the PPAT value required by the caller
2854 *
2855 * The function tries to search if there is an existing PPAT entry which
2856 * matches with the required value. If perfectly matched, the existing PPAT
2857 * entry will be used. If only partially matched, it will try to check if
2858 * there is any available PPAT index. If yes, it will allocate a new PPAT
2859 * index for the required entry and update the HW. If not, the partially
2860 * matched entry will be used.
2861 */
2862const struct intel_ppat_entry *
2863intel_ppat_get(struct drm_i915_private *i915, u8 value)
2820{ 2864{
2865 struct intel_ppat *ppat = &i915->ppat;
2866 struct intel_ppat_entry *entry;
2867 unsigned int scanned, best_score;
2868 int i;
2869
2870 GEM_BUG_ON(!ppat->max_entries);
2871
2872 scanned = best_score = 0;
2873 for_each_set_bit(i, ppat->used, ppat->max_entries) {
2874 unsigned int score;
2875
2876 score = ppat->match(ppat->entries[i].value, value);
2877 if (score > best_score) {
2878 entry = &ppat->entries[i];
2879 if (score == INTEL_PPAT_PERFECT_MATCH) {
2880 kref_get(&entry->ref);
2881 return entry;
2882 }
2883 best_score = score;
2884 }
2885 scanned++;
2886 }
2887
2888 if (scanned == ppat->max_entries) {
2889 if (!best_score)
2890 return ERR_PTR(-ENOSPC);
2891
2892 kref_get(&entry->ref);
2893 return entry;
2894 }
2895
2896 i = find_first_zero_bit(ppat->used, ppat->max_entries);
2897 entry = __alloc_ppat_entry(ppat, i, value);
2898 ppat->update_hw(i915);
2899 return entry;
2900}
2901
2902static void release_ppat(struct kref *kref)
2903{
2904 struct intel_ppat_entry *entry =
2905 container_of(kref, struct intel_ppat_entry, ref);
2906 struct drm_i915_private *i915 = entry->ppat->i915;
2907
2908 __free_ppat_entry(entry);
2909 entry->ppat->update_hw(i915);
2910}
2911
2912/**
2913 * intel_ppat_put - put back the PPAT entry got from intel_ppat_get()
2914 * @entry: an intel PPAT entry
2915 *
2916 * Put back the PPAT entry got from intel_ppat_get(). If the PPAT index of the
2917 * entry is dynamically allocated, its reference count will be decreased. Once
2918 * the reference count becomes into zero, the PPAT index becomes free again.
2919 */
2920void intel_ppat_put(const struct intel_ppat_entry *entry)
2921{
2922 struct intel_ppat *ppat = entry->ppat;
2923 unsigned int index = entry - ppat->entries;
2924
2925 GEM_BUG_ON(!ppat->max_entries);
2926
2927 kref_put(&ppat->entries[index].ref, release_ppat);
2928}
2929
2930static void cnl_private_pat_update_hw(struct drm_i915_private *dev_priv)
2931{
2932 struct intel_ppat *ppat = &dev_priv->ppat;
2933 int i;
2934
2935 for_each_set_bit(i, ppat->dirty, ppat->max_entries) {
2936 I915_WRITE(GEN10_PAT_INDEX(i), ppat->entries[i].value);
2937 clear_bit(i, ppat->dirty);
2938 }
2939}
2940
2941static void bdw_private_pat_update_hw(struct drm_i915_private *dev_priv)
2942{
2943 struct intel_ppat *ppat = &dev_priv->ppat;
2944 u64 pat = 0;
2945 int i;
2946
2947 for (i = 0; i < ppat->max_entries; i++)
2948 pat |= GEN8_PPAT(i, ppat->entries[i].value);
2949
2950 bitmap_clear(ppat->dirty, 0, ppat->max_entries);
2951
2952 I915_WRITE(GEN8_PRIVATE_PAT_LO, lower_32_bits(pat));
2953 I915_WRITE(GEN8_PRIVATE_PAT_HI, upper_32_bits(pat));
2954}
2955
2956static unsigned int bdw_private_pat_match(u8 src, u8 dst)
2957{
2958 unsigned int score = 0;
2959 enum {
2960 AGE_MATCH = BIT(0),
2961 TC_MATCH = BIT(1),
2962 CA_MATCH = BIT(2),
2963 };
2964
2965 /* Cache attribute has to be matched. */
2966 if (GEN8_PPAT_GET_CA(src) != GEN8_PPAT_GET_CA(dst))
2967 return 0;
2968
2969 score |= CA_MATCH;
2970
2971 if (GEN8_PPAT_GET_TC(src) == GEN8_PPAT_GET_TC(dst))
2972 score |= TC_MATCH;
2973
2974 if (GEN8_PPAT_GET_AGE(src) == GEN8_PPAT_GET_AGE(dst))
2975 score |= AGE_MATCH;
2976
2977 if (score == (AGE_MATCH | TC_MATCH | CA_MATCH))
2978 return INTEL_PPAT_PERFECT_MATCH;
2979
2980 return score;
2981}
2982
2983static unsigned int chv_private_pat_match(u8 src, u8 dst)
2984{
2985 return (CHV_PPAT_GET_SNOOP(src) == CHV_PPAT_GET_SNOOP(dst)) ?
2986 INTEL_PPAT_PERFECT_MATCH : 0;
2987}
2988
2989static void cnl_setup_private_ppat(struct intel_ppat *ppat)
2990{
2991 ppat->max_entries = 8;
2992 ppat->update_hw = cnl_private_pat_update_hw;
2993 ppat->match = bdw_private_pat_match;
2994 ppat->clear_value = GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3);
2995
2821 /* XXX: spec is unclear if this is still needed for CNL+ */ 2996 /* XXX: spec is unclear if this is still needed for CNL+ */
2822 if (!USES_PPGTT(dev_priv)) { 2997 if (!USES_PPGTT(ppat->i915)) {
2823 I915_WRITE(GEN10_PAT_INDEX(0), GEN8_PPAT_UC); 2998 __alloc_ppat_entry(ppat, 0, GEN8_PPAT_UC);
2824 return; 2999 return;
2825 } 3000 }
2826 3001
2827 I915_WRITE(GEN10_PAT_INDEX(0), GEN8_PPAT_WB | GEN8_PPAT_LLC); 3002 __alloc_ppat_entry(ppat, 0, GEN8_PPAT_WB | GEN8_PPAT_LLC);
2828 I915_WRITE(GEN10_PAT_INDEX(1), GEN8_PPAT_WC | GEN8_PPAT_LLCELLC); 3003 __alloc_ppat_entry(ppat, 1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC);
2829 I915_WRITE(GEN10_PAT_INDEX(2), GEN8_PPAT_WT | GEN8_PPAT_LLCELLC); 3004 __alloc_ppat_entry(ppat, 2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC);
2830 I915_WRITE(GEN10_PAT_INDEX(3), GEN8_PPAT_UC); 3005 __alloc_ppat_entry(ppat, 3, GEN8_PPAT_UC);
2831 I915_WRITE(GEN10_PAT_INDEX(4), GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)); 3006 __alloc_ppat_entry(ppat, 4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0));
2832 I915_WRITE(GEN10_PAT_INDEX(5), GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)); 3007 __alloc_ppat_entry(ppat, 5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1));
2833 I915_WRITE(GEN10_PAT_INDEX(6), GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)); 3008 __alloc_ppat_entry(ppat, 6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2));
2834 I915_WRITE(GEN10_PAT_INDEX(7), GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3)); 3009 __alloc_ppat_entry(ppat, 7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
2835} 3010}
2836 3011
2837/* The GGTT and PPGTT need a private PPAT setup in order to handle cacheability 3012/* The GGTT and PPGTT need a private PPAT setup in order to handle cacheability
2838 * bits. When using advanced contexts each context stores its own PAT, but 3013 * bits. When using advanced contexts each context stores its own PAT, but
2839 * writing this data shouldn't be harmful even in those cases. */ 3014 * writing this data shouldn't be harmful even in those cases. */
2840static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv) 3015static void bdw_setup_private_ppat(struct intel_ppat *ppat)
2841{ 3016{
2842 u64 pat; 3017 ppat->max_entries = 8;
3018 ppat->update_hw = bdw_private_pat_update_hw;
3019 ppat->match = bdw_private_pat_match;
3020 ppat->clear_value = GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3);
2843 3021
2844 pat = GEN8_PPAT(0, GEN8_PPAT_WB | GEN8_PPAT_LLC) | /* for normal objects, no eLLC */ 3022 if (!USES_PPGTT(ppat->i915)) {
2845 GEN8_PPAT(1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC) | /* for something pointing to ptes? */
2846 GEN8_PPAT(2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC) | /* for scanout with eLLC */
2847 GEN8_PPAT(3, GEN8_PPAT_UC) | /* Uncached objects, mostly for scanout */
2848 GEN8_PPAT(4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)) |
2849 GEN8_PPAT(5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)) |
2850 GEN8_PPAT(6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)) |
2851 GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
2852
2853 if (!USES_PPGTT(dev_priv))
2854 /* Spec: "For GGTT, there is NO pat_sel[2:0] from the entry, 3023 /* Spec: "For GGTT, there is NO pat_sel[2:0] from the entry,
2855 * so RTL will always use the value corresponding to 3024 * so RTL will always use the value corresponding to
2856 * pat_sel = 000". 3025 * pat_sel = 000".
@@ -2864,17 +3033,26 @@ static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv)
2864 * So we can still hold onto all our assumptions wrt cpu 3033 * So we can still hold onto all our assumptions wrt cpu
2865 * clflushing on LLC machines. 3034 * clflushing on LLC machines.
2866 */ 3035 */
2867 pat = GEN8_PPAT(0, GEN8_PPAT_UC); 3036 __alloc_ppat_entry(ppat, 0, GEN8_PPAT_UC);
3037 return;
3038 }
2868 3039
2869 /* XXX: spec defines this as 2 distinct registers. It's unclear if a 64b 3040 __alloc_ppat_entry(ppat, 0, GEN8_PPAT_WB | GEN8_PPAT_LLC); /* for normal objects, no eLLC */
2870 * write would work. */ 3041 __alloc_ppat_entry(ppat, 1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC); /* for something pointing to ptes? */
2871 I915_WRITE(GEN8_PRIVATE_PAT_LO, pat); 3042 __alloc_ppat_entry(ppat, 2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC); /* for scanout with eLLC */
2872 I915_WRITE(GEN8_PRIVATE_PAT_HI, pat >> 32); 3043 __alloc_ppat_entry(ppat, 3, GEN8_PPAT_UC); /* Uncached objects, mostly for scanout */
3044 __alloc_ppat_entry(ppat, 4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0));
3045 __alloc_ppat_entry(ppat, 5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1));
3046 __alloc_ppat_entry(ppat, 6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2));
3047 __alloc_ppat_entry(ppat, 7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
2873} 3048}
2874 3049
2875static void chv_setup_private_ppat(struct drm_i915_private *dev_priv) 3050static void chv_setup_private_ppat(struct intel_ppat *ppat)
2876{ 3051{
2877 u64 pat; 3052 ppat->max_entries = 8;
3053 ppat->update_hw = bdw_private_pat_update_hw;
3054 ppat->match = chv_private_pat_match;
3055 ppat->clear_value = CHV_PPAT_SNOOP;
2878 3056
2879 /* 3057 /*
2880 * Map WB on BDW to snooped on CHV. 3058 * Map WB on BDW to snooped on CHV.
@@ -2894,17 +3072,15 @@ static void chv_setup_private_ppat(struct drm_i915_private *dev_priv)
2894 * Which means we must set the snoop bit in PAT entry 0 3072 * Which means we must set the snoop bit in PAT entry 0
2895 * in order to keep the global status page working. 3073 * in order to keep the global status page working.
2896 */ 3074 */
2897 pat = GEN8_PPAT(0, CHV_PPAT_SNOOP) |
2898 GEN8_PPAT(1, 0) |
2899 GEN8_PPAT(2, 0) |
2900 GEN8_PPAT(3, 0) |
2901 GEN8_PPAT(4, CHV_PPAT_SNOOP) |
2902 GEN8_PPAT(5, CHV_PPAT_SNOOP) |
2903 GEN8_PPAT(6, CHV_PPAT_SNOOP) |
2904 GEN8_PPAT(7, CHV_PPAT_SNOOP);
2905 3075
2906 I915_WRITE(GEN8_PRIVATE_PAT_LO, pat); 3076 __alloc_ppat_entry(ppat, 0, CHV_PPAT_SNOOP);
2907 I915_WRITE(GEN8_PRIVATE_PAT_HI, pat >> 32); 3077 __alloc_ppat_entry(ppat, 1, 0);
3078 __alloc_ppat_entry(ppat, 2, 0);
3079 __alloc_ppat_entry(ppat, 3, 0);
3080 __alloc_ppat_entry(ppat, 4, CHV_PPAT_SNOOP);
3081 __alloc_ppat_entry(ppat, 5, CHV_PPAT_SNOOP);
3082 __alloc_ppat_entry(ppat, 6, CHV_PPAT_SNOOP);
3083 __alloc_ppat_entry(ppat, 7, CHV_PPAT_SNOOP);
2908} 3084}
2909 3085
2910static void gen6_gmch_remove(struct i915_address_space *vm) 3086static void gen6_gmch_remove(struct i915_address_space *vm)
@@ -2915,6 +3091,31 @@ static void gen6_gmch_remove(struct i915_address_space *vm)
2915 cleanup_scratch_page(vm); 3091 cleanup_scratch_page(vm);
2916} 3092}
2917 3093
3094static void setup_private_pat(struct drm_i915_private *dev_priv)
3095{
3096 struct intel_ppat *ppat = &dev_priv->ppat;
3097 int i;
3098
3099 ppat->i915 = dev_priv;
3100
3101 if (INTEL_GEN(dev_priv) >= 10)
3102 cnl_setup_private_ppat(ppat);
3103 else if (IS_CHERRYVIEW(dev_priv) || IS_GEN9_LP(dev_priv))
3104 chv_setup_private_ppat(ppat);
3105 else
3106 bdw_setup_private_ppat(ppat);
3107
3108 GEM_BUG_ON(ppat->max_entries > INTEL_MAX_PPAT_ENTRIES);
3109
3110 for_each_clear_bit(i, ppat->used, ppat->max_entries) {
3111 ppat->entries[i].value = ppat->clear_value;
3112 ppat->entries[i].ppat = ppat;
3113 set_bit(i, ppat->dirty);
3114 }
3115
3116 ppat->update_hw(dev_priv);
3117}
3118
2918static int gen8_gmch_probe(struct i915_ggtt *ggtt) 3119static int gen8_gmch_probe(struct i915_ggtt *ggtt)
2919{ 3120{
2920 struct drm_i915_private *dev_priv = ggtt->base.i915; 3121 struct drm_i915_private *dev_priv = ggtt->base.i915;
@@ -2947,14 +3148,6 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
2947 } 3148 }
2948 3149
2949 ggtt->base.total = (size / sizeof(gen8_pte_t)) << PAGE_SHIFT; 3150 ggtt->base.total = (size / sizeof(gen8_pte_t)) << PAGE_SHIFT;
2950
2951 if (INTEL_GEN(dev_priv) >= 10)
2952 cnl_setup_private_ppat(dev_priv);
2953 else if (IS_CHERRYVIEW(dev_priv) || IS_GEN9_LP(dev_priv))
2954 chv_setup_private_ppat(dev_priv);
2955 else
2956 bdw_setup_private_ppat(dev_priv);
2957
2958 ggtt->base.cleanup = gen6_gmch_remove; 3151 ggtt->base.cleanup = gen6_gmch_remove;
2959 ggtt->base.bind_vma = ggtt_bind_vma; 3152 ggtt->base.bind_vma = ggtt_bind_vma;
2960 ggtt->base.unbind_vma = ggtt_unbind_vma; 3153 ggtt->base.unbind_vma = ggtt_unbind_vma;
@@ -2975,6 +3168,8 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
2975 3168
2976 ggtt->invalidate = gen6_ggtt_invalidate; 3169 ggtt->invalidate = gen6_ggtt_invalidate;
2977 3170
3171 setup_private_pat(dev_priv);
3172
2978 return ggtt_probe_common(ggtt, size); 3173 return ggtt_probe_common(ggtt, size);
2979} 3174}
2980 3175
@@ -3095,7 +3290,7 @@ int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv)
3095 * currently don't have any bits spare to pass in this upper 3290 * currently don't have any bits spare to pass in this upper
3096 * restriction! 3291 * restriction!
3097 */ 3292 */
3098 if (HAS_GUC(dev_priv) && i915.enable_guc_loading) { 3293 if (HAS_GUC(dev_priv) && i915_modparams.enable_guc_loading) {
3099 ggtt->base.total = min_t(u64, ggtt->base.total, GUC_GGTT_TOP); 3294 ggtt->base.total = min_t(u64, ggtt->base.total, GUC_GGTT_TOP);
3100 ggtt->mappable_end = min(ggtt->mappable_end, ggtt->base.total); 3295 ggtt->mappable_end = min(ggtt->mappable_end, ggtt->base.total);
3101 } 3296 }
@@ -3232,13 +3427,10 @@ void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
3232 ggtt->base.closed = false; 3427 ggtt->base.closed = false;
3233 3428
3234 if (INTEL_GEN(dev_priv) >= 8) { 3429 if (INTEL_GEN(dev_priv) >= 8) {
3235 if (INTEL_GEN(dev_priv) >= 10) 3430 struct intel_ppat *ppat = &dev_priv->ppat;
3236 cnl_setup_private_ppat(dev_priv);
3237 else if (IS_CHERRYVIEW(dev_priv) || IS_GEN9_LP(dev_priv))
3238 chv_setup_private_ppat(dev_priv);
3239 else
3240 bdw_setup_private_ppat(dev_priv);
3241 3431
3432 bitmap_set(ppat->dirty, 0, ppat->max_entries);
3433 dev_priv->ppat.update_hw(dev_priv);
3242 return; 3434 return;
3243 } 3435 }
3244 3436
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index b4e3aa7c0ce1..f62fb903dc24 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -126,13 +126,13 @@ typedef u64 gen8_ppgtt_pml4e_t;
126 * tables */ 126 * tables */
127#define GEN8_PDPE_MASK 0x1ff 127#define GEN8_PDPE_MASK 0x1ff
128 128
129#define PPAT_UNCACHED_INDEX (_PAGE_PWT | _PAGE_PCD) 129#define PPAT_UNCACHED (_PAGE_PWT | _PAGE_PCD)
130#define PPAT_CACHED_PDE_INDEX 0 /* WB LLC */ 130#define PPAT_CACHED_PDE 0 /* WB LLC */
131#define PPAT_CACHED_INDEX _PAGE_PAT /* WB LLCeLLC */ 131#define PPAT_CACHED _PAGE_PAT /* WB LLCeLLC */
132#define PPAT_DISPLAY_ELLC_INDEX _PAGE_PCD /* WT eLLC */ 132#define PPAT_DISPLAY_ELLC _PAGE_PCD /* WT eLLC */
133 133
134#define CHV_PPAT_SNOOP (1<<6) 134#define CHV_PPAT_SNOOP (1<<6)
135#define GEN8_PPAT_AGE(x) (x<<4) 135#define GEN8_PPAT_AGE(x) ((x)<<4)
136#define GEN8_PPAT_LLCeLLC (3<<2) 136#define GEN8_PPAT_LLCeLLC (3<<2)
137#define GEN8_PPAT_LLCELLC (2<<2) 137#define GEN8_PPAT_LLCELLC (2<<2)
138#define GEN8_PPAT_LLC (1<<2) 138#define GEN8_PPAT_LLC (1<<2)
@@ -143,6 +143,11 @@ typedef u64 gen8_ppgtt_pml4e_t;
143#define GEN8_PPAT_ELLC_OVERRIDE (0<<2) 143#define GEN8_PPAT_ELLC_OVERRIDE (0<<2)
144#define GEN8_PPAT(i, x) ((u64)(x) << ((i) * 8)) 144#define GEN8_PPAT(i, x) ((u64)(x) << ((i) * 8))
145 145
146#define GEN8_PPAT_GET_CA(x) ((x) & 3)
147#define GEN8_PPAT_GET_TC(x) ((x) & (3 << 2))
148#define GEN8_PPAT_GET_AGE(x) ((x) & (3 << 4))
149#define CHV_PPAT_GET_SNOOP(x) ((x) & (1 << 6))
150
146struct sg_table; 151struct sg_table;
147 152
148struct intel_rotation_info { 153struct intel_rotation_info {
@@ -536,6 +541,37 @@ i915_vm_to_ggtt(struct i915_address_space *vm)
536 return container_of(vm, struct i915_ggtt, base); 541 return container_of(vm, struct i915_ggtt, base);
537} 542}
538 543
544#define INTEL_MAX_PPAT_ENTRIES 8
545#define INTEL_PPAT_PERFECT_MATCH (~0U)
546
547struct intel_ppat;
548
549struct intel_ppat_entry {
550 struct intel_ppat *ppat;
551 struct kref ref;
552 u8 value;
553};
554
555struct intel_ppat {
556 struct intel_ppat_entry entries[INTEL_MAX_PPAT_ENTRIES];
557 DECLARE_BITMAP(used, INTEL_MAX_PPAT_ENTRIES);
558 DECLARE_BITMAP(dirty, INTEL_MAX_PPAT_ENTRIES);
559 unsigned int max_entries;
560 u8 clear_value;
561 /*
562 * Return a score to show how two PPAT values match,
563 * a INTEL_PPAT_PERFECT_MATCH indicates a perfect match
564 */
565 unsigned int (*match)(u8 src, u8 dst);
566 void (*update_hw)(struct drm_i915_private *i915);
567
568 struct drm_i915_private *i915;
569};
570
571const struct intel_ppat_entry *
572intel_ppat_get(struct drm_i915_private *i915, u8 value);
573void intel_ppat_put(const struct intel_ppat_entry *entry);
574
539int i915_gem_init_aliasing_ppgtt(struct drm_i915_private *i915); 575int i915_gem_init_aliasing_ppgtt(struct drm_i915_private *i915);
540void i915_gem_fini_aliasing_ppgtt(struct drm_i915_private *i915); 576void i915_gem_fini_aliasing_ppgtt(struct drm_i915_private *i915);
541 577
diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
index 813a3b546d6e..4eb1a76731b2 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/i915_gem_request.c
@@ -1021,12 +1021,28 @@ static bool busywait_stop(unsigned long timeout, unsigned int cpu)
1021 return this_cpu != cpu; 1021 return this_cpu != cpu;
1022} 1022}
1023 1023
1024bool __i915_spin_request(const struct drm_i915_gem_request *req, 1024static bool __i915_spin_request(const struct drm_i915_gem_request *req,
1025 u32 seqno, int state, unsigned long timeout_us) 1025 u32 seqno, int state, unsigned long timeout_us)
1026{ 1026{
1027 struct intel_engine_cs *engine = req->engine; 1027 struct intel_engine_cs *engine = req->engine;
1028 unsigned int irq, cpu; 1028 unsigned int irq, cpu;
1029 1029
1030 GEM_BUG_ON(!seqno);
1031
1032 /*
1033 * Only wait for the request if we know it is likely to complete.
1034 *
1035 * We don't track the timestamps around requests, nor the average
1036 * request length, so we do not have a good indicator that this
1037 * request will complete within the timeout. What we do know is the
1038 * order in which requests are executed by the engine and so we can
1039 * tell if the request has started. If the request hasn't started yet,
1040 * it is a fair assumption that it will not complete within our
1041 * relatively short timeout.
1042 */
1043 if (!i915_seqno_passed(intel_engine_get_seqno(engine), seqno - 1))
1044 return false;
1045
1030 /* When waiting for high frequency requests, e.g. during synchronous 1046 /* When waiting for high frequency requests, e.g. during synchronous
1031 * rendering split between the CPU and GPU, the finite amount of time 1047 * rendering split between the CPU and GPU, the finite amount of time
1032 * required to set up the irq and wait upon it limits the response 1048 * required to set up the irq and wait upon it limits the response
@@ -1040,12 +1056,8 @@ bool __i915_spin_request(const struct drm_i915_gem_request *req,
1040 irq = atomic_read(&engine->irq_count); 1056 irq = atomic_read(&engine->irq_count);
1041 timeout_us += local_clock_us(&cpu); 1057 timeout_us += local_clock_us(&cpu);
1042 do { 1058 do {
1043 if (seqno != i915_gem_request_global_seqno(req)) 1059 if (i915_seqno_passed(intel_engine_get_seqno(engine), seqno))
1044 break; 1060 return seqno == i915_gem_request_global_seqno(req);
1045
1046 if (i915_seqno_passed(intel_engine_get_seqno(req->engine),
1047 seqno))
1048 return true;
1049 1061
1050 /* Seqno are meant to be ordered *before* the interrupt. If 1062 /* Seqno are meant to be ordered *before* the interrupt. If
1051 * we see an interrupt without a corresponding seqno advance, 1063 * we see an interrupt without a corresponding seqno advance,
@@ -1156,7 +1168,7 @@ restart:
1156 GEM_BUG_ON(!i915_sw_fence_signaled(&req->submit)); 1168 GEM_BUG_ON(!i915_sw_fence_signaled(&req->submit));
1157 1169
1158 /* Optimistic short spin before touching IRQs */ 1170 /* Optimistic short spin before touching IRQs */
1159 if (i915_spin_request(req, state, 5)) 1171 if (__i915_spin_request(req, wait.seqno, state, 5))
1160 goto complete; 1172 goto complete;
1161 1173
1162 set_current_state(state); 1174 set_current_state(state);
@@ -1213,7 +1225,7 @@ wakeup:
1213 continue; 1225 continue;
1214 1226
1215 /* Only spin if we know the GPU is processing this request */ 1227 /* Only spin if we know the GPU is processing this request */
1216 if (i915_spin_request(req, state, 2)) 1228 if (__i915_spin_request(req, wait.seqno, state, 2))
1217 break; 1229 break;
1218 1230
1219 if (!intel_wait_check_request(&wait, req)) { 1231 if (!intel_wait_check_request(&wait, req)) {
diff --git a/drivers/gpu/drm/i915/i915_gem_request.h b/drivers/gpu/drm/i915/i915_gem_request.h
index 49a4c8994ff0..96eb52471dad 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.h
+++ b/drivers/gpu/drm/i915/i915_gem_request.h
@@ -313,26 +313,6 @@ static inline bool i915_seqno_passed(u32 seq1, u32 seq2)
313} 313}
314 314
315static inline bool 315static inline bool
316__i915_gem_request_started(const struct drm_i915_gem_request *req, u32 seqno)
317{
318 GEM_BUG_ON(!seqno);
319 return i915_seqno_passed(intel_engine_get_seqno(req->engine),
320 seqno - 1);
321}
322
323static inline bool
324i915_gem_request_started(const struct drm_i915_gem_request *req)
325{
326 u32 seqno;
327
328 seqno = i915_gem_request_global_seqno(req);
329 if (!seqno)
330 return false;
331
332 return __i915_gem_request_started(req, seqno);
333}
334
335static inline bool
336__i915_gem_request_completed(const struct drm_i915_gem_request *req, u32 seqno) 316__i915_gem_request_completed(const struct drm_i915_gem_request *req, u32 seqno)
337{ 317{
338 GEM_BUG_ON(!seqno); 318 GEM_BUG_ON(!seqno);
@@ -352,21 +332,6 @@ i915_gem_request_completed(const struct drm_i915_gem_request *req)
352 return __i915_gem_request_completed(req, seqno); 332 return __i915_gem_request_completed(req, seqno);
353} 333}
354 334
355bool __i915_spin_request(const struct drm_i915_gem_request *request,
356 u32 seqno, int state, unsigned long timeout_us);
357static inline bool i915_spin_request(const struct drm_i915_gem_request *request,
358 int state, unsigned long timeout_us)
359{
360 u32 seqno;
361
362 seqno = i915_gem_request_global_seqno(request);
363 if (!seqno)
364 return 0;
365
366 return (__i915_gem_request_started(request, seqno) &&
367 __i915_spin_request(request, seqno, state, timeout_us));
368}
369
370/* We treat requests as fences. This is not be to confused with our 335/* We treat requests as fences. This is not be to confused with our
371 * "fence registers" but pipeline synchronisation objects ala GL_ARB_sync. 336 * "fence registers" but pipeline synchronisation objects ala GL_ARB_sync.
372 * We use the fences to synchronize access from the CPU with activity on the 337 * We use the fences to synchronize access from the CPU with activity on the
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index 709efe2357ea..2d4996de7331 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -399,64 +399,42 @@ struct get_pages_work {
399 struct task_struct *task; 399 struct task_struct *task;
400}; 400};
401 401
402#if IS_ENABLED(CONFIG_SWIOTLB)
403#define swiotlb_active() swiotlb_nr_tbl()
404#else
405#define swiotlb_active() 0
406#endif
407
408static int
409st_set_pages(struct sg_table **st, struct page **pvec, int num_pages)
410{
411 struct scatterlist *sg;
412 int ret, n;
413
414 *st = kmalloc(sizeof(**st), GFP_KERNEL);
415 if (*st == NULL)
416 return -ENOMEM;
417
418 if (swiotlb_active()) {
419 ret = sg_alloc_table(*st, num_pages, GFP_KERNEL);
420 if (ret)
421 goto err;
422
423 for_each_sg((*st)->sgl, sg, num_pages, n)
424 sg_set_page(sg, pvec[n], PAGE_SIZE, 0);
425 } else {
426 ret = sg_alloc_table_from_pages(*st, pvec, num_pages,
427 0, num_pages << PAGE_SHIFT,
428 GFP_KERNEL);
429 if (ret)
430 goto err;
431 }
432
433 return 0;
434
435err:
436 kfree(*st);
437 *st = NULL;
438 return ret;
439}
440
441static struct sg_table * 402static struct sg_table *
442__i915_gem_userptr_set_pages(struct drm_i915_gem_object *obj, 403__i915_gem_userptr_alloc_pages(struct drm_i915_gem_object *obj,
443 struct page **pvec, int num_pages) 404 struct page **pvec, int num_pages)
444{ 405{
445 struct sg_table *pages; 406 unsigned int max_segment = i915_sg_segment_size();
407 struct sg_table *st;
446 int ret; 408 int ret;
447 409
448 ret = st_set_pages(&pages, pvec, num_pages); 410 st = kmalloc(sizeof(*st), GFP_KERNEL);
449 if (ret) 411 if (!st)
412 return ERR_PTR(-ENOMEM);
413
414alloc_table:
415 ret = __sg_alloc_table_from_pages(st, pvec, num_pages,
416 0, num_pages << PAGE_SHIFT,
417 max_segment,
418 GFP_KERNEL);
419 if (ret) {
420 kfree(st);
450 return ERR_PTR(ret); 421 return ERR_PTR(ret);
422 }
451 423
452 ret = i915_gem_gtt_prepare_pages(obj, pages); 424 ret = i915_gem_gtt_prepare_pages(obj, st);
453 if (ret) { 425 if (ret) {
454 sg_free_table(pages); 426 sg_free_table(st);
455 kfree(pages); 427
428 if (max_segment > PAGE_SIZE) {
429 max_segment = PAGE_SIZE;
430 goto alloc_table;
431 }
432
433 kfree(st);
456 return ERR_PTR(ret); 434 return ERR_PTR(ret);
457 } 435 }
458 436
459 return pages; 437 return st;
460} 438}
461 439
462static int 440static int
@@ -540,7 +518,8 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
540 struct sg_table *pages = ERR_PTR(ret); 518 struct sg_table *pages = ERR_PTR(ret);
541 519
542 if (pinned == npages) { 520 if (pinned == npages) {
543 pages = __i915_gem_userptr_set_pages(obj, pvec, npages); 521 pages = __i915_gem_userptr_alloc_pages(obj, pvec,
522 npages);
544 if (!IS_ERR(pages)) { 523 if (!IS_ERR(pages)) {
545 __i915_gem_object_set_pages(obj, pages); 524 __i915_gem_object_set_pages(obj, pages);
546 pinned = 0; 525 pinned = 0;
@@ -661,7 +640,7 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
661 pages = __i915_gem_userptr_get_pages_schedule(obj); 640 pages = __i915_gem_userptr_get_pages_schedule(obj);
662 active = pages == ERR_PTR(-EAGAIN); 641 active = pages == ERR_PTR(-EAGAIN);
663 } else { 642 } else {
664 pages = __i915_gem_userptr_set_pages(obj, pvec, num_pages); 643 pages = __i915_gem_userptr_alloc_pages(obj, pvec, num_pages);
665 active = !IS_ERR(pages); 644 active = !IS_ERR(pages);
666 } 645 }
667 if (active) 646 if (active)
@@ -834,7 +813,9 @@ int i915_gem_init_userptr(struct drm_i915_private *dev_priv)
834 hash_init(dev_priv->mm_structs); 813 hash_init(dev_priv->mm_structs);
835 814
836 dev_priv->mm.userptr_wq = 815 dev_priv->mm.userptr_wq =
837 alloc_workqueue("i915-userptr-acquire", WQ_HIGHPRI, 0); 816 alloc_workqueue("i915-userptr-acquire",
817 WQ_HIGHPRI | WQ_MEM_RECLAIM,
818 0);
838 if (!dev_priv->mm.userptr_wq) 819 if (!dev_priv->mm.userptr_wq)
839 return -ENOMEM; 820 return -ENOMEM;
840 821
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 0c779671fe2d..c14552ab270b 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -396,6 +396,8 @@ static void error_print_context(struct drm_i915_error_state_buf *m,
396static void error_print_engine(struct drm_i915_error_state_buf *m, 396static void error_print_engine(struct drm_i915_error_state_buf *m,
397 const struct drm_i915_error_engine *ee) 397 const struct drm_i915_error_engine *ee)
398{ 398{
399 int n;
400
399 err_printf(m, "%s command stream:\n", engine_str(ee->engine_id)); 401 err_printf(m, "%s command stream:\n", engine_str(ee->engine_id));
400 err_printf(m, " START: 0x%08x\n", ee->start); 402 err_printf(m, " START: 0x%08x\n", ee->start);
401 err_printf(m, " HEAD: 0x%08x [0x%08x]\n", ee->head, ee->rq_head); 403 err_printf(m, " HEAD: 0x%08x [0x%08x]\n", ee->head, ee->rq_head);
@@ -465,8 +467,11 @@ static void error_print_engine(struct drm_i915_error_state_buf *m,
465 jiffies_to_msecs(jiffies - ee->hangcheck_timestamp)); 467 jiffies_to_msecs(jiffies - ee->hangcheck_timestamp));
466 err_printf(m, " engine reset count: %u\n", ee->reset_count); 468 err_printf(m, " engine reset count: %u\n", ee->reset_count);
467 469
468 error_print_request(m, " ELSP[0]: ", &ee->execlist[0]); 470 for (n = 0; n < ee->num_ports; n++) {
469 error_print_request(m, " ELSP[1]: ", &ee->execlist[1]); 471 err_printf(m, " ELSP[%d]:", n);
472 error_print_request(m, " ", &ee->execlist[n]);
473 }
474
470 error_print_context(m, " Active context: ", &ee->context); 475 error_print_context(m, " Active context: ", &ee->context);
471} 476}
472 477
@@ -567,7 +572,7 @@ static __always_inline void err_print_param(struct drm_i915_error_state_buf *m,
567static void err_print_params(struct drm_i915_error_state_buf *m, 572static void err_print_params(struct drm_i915_error_state_buf *m,
568 const struct i915_params *p) 573 const struct i915_params *p)
569{ 574{
570#define PRINT(T, x) err_print_param(m, #x, #T, &p->x); 575#define PRINT(T, x, ...) err_print_param(m, #x, #T, &p->x);
571 I915_PARAMS_FOR_EACH(PRINT); 576 I915_PARAMS_FOR_EACH(PRINT);
572#undef PRINT 577#undef PRINT
573} 578}
@@ -861,7 +866,7 @@ void __i915_gpu_state_free(struct kref *error_ref)
861 kfree(error->overlay); 866 kfree(error->overlay);
862 kfree(error->display); 867 kfree(error->display);
863 868
864#define FREE(T, x) free_param(#T, &error->params.x); 869#define FREE(T, x, ...) free_param(#T, &error->params.x);
865 I915_PARAMS_FOR_EACH(FREE); 870 I915_PARAMS_FOR_EACH(FREE);
866#undef FREE 871#undef FREE
867 872
@@ -1327,17 +1332,19 @@ static void engine_record_requests(struct intel_engine_cs *engine,
1327static void error_record_engine_execlists(struct intel_engine_cs *engine, 1332static void error_record_engine_execlists(struct intel_engine_cs *engine,
1328 struct drm_i915_error_engine *ee) 1333 struct drm_i915_error_engine *ee)
1329{ 1334{
1330 const struct execlist_port *port = engine->execlist_port; 1335 const struct intel_engine_execlists * const execlists = &engine->execlists;
1331 unsigned int n; 1336 unsigned int n;
1332 1337
1333 for (n = 0; n < ARRAY_SIZE(engine->execlist_port); n++) { 1338 for (n = 0; n < execlists_num_ports(execlists); n++) {
1334 struct drm_i915_gem_request *rq = port_request(&port[n]); 1339 struct drm_i915_gem_request *rq = port_request(&execlists->port[n]);
1335 1340
1336 if (!rq) 1341 if (!rq)
1337 break; 1342 break;
1338 1343
1339 record_request(rq, &ee->execlist[n]); 1344 record_request(rq, &ee->execlist[n]);
1340 } 1345 }
1346
1347 ee->num_ports = n;
1341} 1348}
1342 1349
1343static void record_context(struct drm_i915_error_context *e, 1350static void record_context(struct drm_i915_error_context *e,
@@ -1554,7 +1561,7 @@ static void i915_gem_capture_guc_log_buffer(struct drm_i915_private *dev_priv,
1554 struct i915_gpu_state *error) 1561 struct i915_gpu_state *error)
1555{ 1562{
1556 /* Capturing log buf contents won't be useful if logging was disabled */ 1563 /* Capturing log buf contents won't be useful if logging was disabled */
1557 if (!dev_priv->guc.log.vma || (i915.guc_log_level < 0)) 1564 if (!dev_priv->guc.log.vma || (i915_modparams.guc_log_level < 0))
1558 return; 1565 return;
1559 1566
1560 error->guc_log = i915_error_object_create(dev_priv, 1567 error->guc_log = i915_error_object_create(dev_priv,
@@ -1696,8 +1703,8 @@ static int capture(void *data)
1696 ktime_to_timeval(ktime_sub(ktime_get(), 1703 ktime_to_timeval(ktime_sub(ktime_get(),
1697 error->i915->gt.last_init_time)); 1704 error->i915->gt.last_init_time));
1698 1705
1699 error->params = i915; 1706 error->params = i915_modparams;
1700#define DUP(T, x) dup_param(#T, &error->params.x); 1707#define DUP(T, x, ...) dup_param(#T, &error->params.x);
1701 I915_PARAMS_FOR_EACH(DUP); 1708 I915_PARAMS_FOR_EACH(DUP);
1702#undef DUP 1709#undef DUP
1703 1710
@@ -1751,7 +1758,7 @@ void i915_capture_error_state(struct drm_i915_private *dev_priv,
1751 struct i915_gpu_state *error; 1758 struct i915_gpu_state *error;
1752 unsigned long flags; 1759 unsigned long flags;
1753 1760
1754 if (!i915.error_capture) 1761 if (!i915_modparams.error_capture)
1755 return; 1762 return;
1756 1763
1757 if (READ_ONCE(dev_priv->gpu_error.first_error)) 1764 if (READ_ONCE(dev_priv->gpu_error.first_error))
diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c
index 48a1e9349a2c..04f1281d81a5 100644
--- a/drivers/gpu/drm/i915/i915_guc_submission.c
+++ b/drivers/gpu/drm/i915/i915_guc_submission.c
@@ -192,13 +192,12 @@ static int __create_doorbell(struct i915_guc_client *client)
192 192
193 doorbell = __get_doorbell(client); 193 doorbell = __get_doorbell(client);
194 doorbell->db_status = GUC_DOORBELL_ENABLED; 194 doorbell->db_status = GUC_DOORBELL_ENABLED;
195 doorbell->cookie = client->doorbell_cookie; 195 doorbell->cookie = 0;
196 196
197 err = __guc_allocate_doorbell(client->guc, client->stage_id); 197 err = __guc_allocate_doorbell(client->guc, client->stage_id);
198 if (err) { 198 if (err)
199 doorbell->db_status = GUC_DOORBELL_DISABLED; 199 doorbell->db_status = GUC_DOORBELL_DISABLED;
200 doorbell->cookie = 0; 200
201 }
202 return err; 201 return err;
203} 202}
204 203
@@ -306,7 +305,7 @@ static void guc_proc_desc_init(struct intel_guc *guc,
306 desc->db_base_addr = 0; 305 desc->db_base_addr = 0;
307 306
308 desc->stage_id = client->stage_id; 307 desc->stage_id = client->stage_id;
309 desc->wq_size_bytes = client->wq_size; 308 desc->wq_size_bytes = GUC_WQ_SIZE;
310 desc->wq_status = WQ_STATUS_ACTIVE; 309 desc->wq_status = WQ_STATUS_ACTIVE;
311 desc->priority = client->priority; 310 desc->priority = client->priority;
312} 311}
@@ -391,8 +390,8 @@ static void guc_stage_desc_init(struct intel_guc *guc,
391 desc->db_trigger_cpu = (uintptr_t)__get_doorbell(client); 390 desc->db_trigger_cpu = (uintptr_t)__get_doorbell(client);
392 desc->db_trigger_uk = gfx_addr + client->doorbell_offset; 391 desc->db_trigger_uk = gfx_addr + client->doorbell_offset;
393 desc->process_desc = gfx_addr + client->proc_desc_offset; 392 desc->process_desc = gfx_addr + client->proc_desc_offset;
394 desc->wq_addr = gfx_addr + client->wq_offset; 393 desc->wq_addr = gfx_addr + GUC_DB_SIZE;
395 desc->wq_size = client->wq_size; 394 desc->wq_size = GUC_WQ_SIZE;
396 395
397 desc->desc_private = (uintptr_t)client; 396 desc->desc_private = (uintptr_t)client;
398} 397}
@@ -406,82 +405,23 @@ static void guc_stage_desc_fini(struct intel_guc *guc,
406 memset(desc, 0, sizeof(*desc)); 405 memset(desc, 0, sizeof(*desc));
407} 406}
408 407
409/**
410 * i915_guc_wq_reserve() - reserve space in the GuC's workqueue
411 * @request: request associated with the commands
412 *
413 * Return: 0 if space is available
414 * -EAGAIN if space is not currently available
415 *
416 * This function must be called (and must return 0) before a request
417 * is submitted to the GuC via i915_guc_submit() below. Once a result
418 * of 0 has been returned, it must be balanced by a corresponding
419 * call to submit().
420 *
421 * Reservation allows the caller to determine in advance that space
422 * will be available for the next submission before committing resources
423 * to it, and helps avoid late failures with complicated recovery paths.
424 */
425int i915_guc_wq_reserve(struct drm_i915_gem_request *request)
426{
427 const size_t wqi_size = sizeof(struct guc_wq_item);
428 struct i915_guc_client *client = request->i915->guc.execbuf_client;
429 struct guc_process_desc *desc = __get_process_desc(client);
430 u32 freespace;
431 int ret;
432
433 spin_lock_irq(&client->wq_lock);
434 freespace = CIRC_SPACE(client->wq_tail, desc->head, client->wq_size);
435 freespace -= client->wq_rsvd;
436 if (likely(freespace >= wqi_size)) {
437 client->wq_rsvd += wqi_size;
438 ret = 0;
439 } else {
440 client->no_wq_space++;
441 ret = -EAGAIN;
442 }
443 spin_unlock_irq(&client->wq_lock);
444
445 return ret;
446}
447
448static void guc_client_update_wq_rsvd(struct i915_guc_client *client, int size)
449{
450 unsigned long flags;
451
452 spin_lock_irqsave(&client->wq_lock, flags);
453 client->wq_rsvd += size;
454 spin_unlock_irqrestore(&client->wq_lock, flags);
455}
456
457void i915_guc_wq_unreserve(struct drm_i915_gem_request *request)
458{
459 const int wqi_size = sizeof(struct guc_wq_item);
460 struct i915_guc_client *client = request->i915->guc.execbuf_client;
461
462 GEM_BUG_ON(READ_ONCE(client->wq_rsvd) < wqi_size);
463 guc_client_update_wq_rsvd(client, -wqi_size);
464}
465
466/* Construct a Work Item and append it to the GuC's Work Queue */ 408/* Construct a Work Item and append it to the GuC's Work Queue */
467static void guc_wq_item_append(struct i915_guc_client *client, 409static void guc_wq_item_append(struct i915_guc_client *client,
468 struct drm_i915_gem_request *rq) 410 struct drm_i915_gem_request *rq)
469{ 411{
470 /* wqi_len is in DWords, and does not include the one-word header */ 412 /* wqi_len is in DWords, and does not include the one-word header */
471 const size_t wqi_size = sizeof(struct guc_wq_item); 413 const size_t wqi_size = sizeof(struct guc_wq_item);
472 const u32 wqi_len = wqi_size/sizeof(u32) - 1; 414 const u32 wqi_len = wqi_size / sizeof(u32) - 1;
473 struct intel_engine_cs *engine = rq->engine; 415 struct intel_engine_cs *engine = rq->engine;
416 struct i915_gem_context *ctx = rq->ctx;
474 struct guc_process_desc *desc = __get_process_desc(client); 417 struct guc_process_desc *desc = __get_process_desc(client);
475 struct guc_wq_item *wqi; 418 struct guc_wq_item *wqi;
476 u32 freespace, tail, wq_off; 419 u32 ring_tail, wq_off;
477 420
478 /* Free space is guaranteed, see i915_guc_wq_reserve() above */ 421 lockdep_assert_held(&client->wq_lock);
479 freespace = CIRC_SPACE(client->wq_tail, desc->head, client->wq_size);
480 GEM_BUG_ON(freespace < wqi_size);
481 422
482 /* The GuC firmware wants the tail index in QWords, not bytes */ 423 ring_tail = intel_ring_set_tail(rq->ring, rq->tail) / sizeof(u64);
483 tail = intel_ring_set_tail(rq->ring, rq->tail) >> 3; 424 GEM_BUG_ON(ring_tail > WQ_RING_TAIL_MAX);
484 GEM_BUG_ON(tail > WQ_RING_TAIL_MAX);
485 425
486 /* For now workqueue item is 4 DWs; workqueue buffer is 2 pages. So we 426 /* For now workqueue item is 4 DWs; workqueue buffer is 2 pages. So we
487 * should not have the case where structure wqi is across page, neither 427 * should not have the case where structure wqi is across page, neither
@@ -491,29 +431,29 @@ static void guc_wq_item_append(struct i915_guc_client *client,
491 * workqueue buffer dw by dw. 431 * workqueue buffer dw by dw.
492 */ 432 */
493 BUILD_BUG_ON(wqi_size != 16); 433 BUILD_BUG_ON(wqi_size != 16);
494 GEM_BUG_ON(client->wq_rsvd < wqi_size);
495 434
496 /* postincrement WQ tail for next time */ 435 /* Free space is guaranteed. */
497 wq_off = client->wq_tail; 436 wq_off = READ_ONCE(desc->tail);
437 GEM_BUG_ON(CIRC_SPACE(wq_off, READ_ONCE(desc->head),
438 GUC_WQ_SIZE) < wqi_size);
498 GEM_BUG_ON(wq_off & (wqi_size - 1)); 439 GEM_BUG_ON(wq_off & (wqi_size - 1));
499 client->wq_tail += wqi_size;
500 client->wq_tail &= client->wq_size - 1;
501 client->wq_rsvd -= wqi_size;
502 440
503 /* WQ starts from the page after doorbell / process_desc */ 441 /* WQ starts from the page after doorbell / process_desc */
504 wqi = client->vaddr + wq_off + GUC_DB_SIZE; 442 wqi = client->vaddr + wq_off + GUC_DB_SIZE;
505 443
506 /* Now fill in the 4-word work queue item */ 444 /* Now fill in the 4-word work queue item */
507 wqi->header = WQ_TYPE_INORDER | 445 wqi->header = WQ_TYPE_INORDER |
508 (wqi_len << WQ_LEN_SHIFT) | 446 (wqi_len << WQ_LEN_SHIFT) |
509 (engine->guc_id << WQ_TARGET_SHIFT) | 447 (engine->guc_id << WQ_TARGET_SHIFT) |
510 WQ_NO_WCFLUSH_WAIT; 448 WQ_NO_WCFLUSH_WAIT;
511 449
512 /* The GuC wants only the low-order word of the context descriptor */ 450 wqi->context_desc = lower_32_bits(intel_lr_context_descriptor(ctx, engine));
513 wqi->context_desc = (u32)intel_lr_context_descriptor(rq->ctx, engine);
514 451
515 wqi->submit_element_info = tail << WQ_RING_TAIL_SHIFT; 452 wqi->submit_element_info = ring_tail << WQ_RING_TAIL_SHIFT;
516 wqi->fence_id = rq->global_seqno; 453 wqi->fence_id = rq->global_seqno;
454
455 /* Postincrement WQ tail for next time. */
456 WRITE_ONCE(desc->tail, (wq_off + wqi_size) & (GUC_WQ_SIZE - 1));
517} 457}
518 458
519static void guc_reset_wq(struct i915_guc_client *client) 459static void guc_reset_wq(struct i915_guc_client *client)
@@ -522,106 +462,64 @@ static void guc_reset_wq(struct i915_guc_client *client)
522 462
523 desc->head = 0; 463 desc->head = 0;
524 desc->tail = 0; 464 desc->tail = 0;
525
526 client->wq_tail = 0;
527} 465}
528 466
529static int guc_ring_doorbell(struct i915_guc_client *client) 467static void guc_ring_doorbell(struct i915_guc_client *client)
530{ 468{
531 struct guc_process_desc *desc = __get_process_desc(client); 469 struct guc_doorbell_info *db;
532 union guc_doorbell_qw db_cmp, db_exc, db_ret; 470 u32 cookie;
533 union guc_doorbell_qw *db;
534 int attempt = 2, ret = -EAGAIN;
535
536 /* Update the tail so it is visible to GuC */
537 desc->tail = client->wq_tail;
538
539 /* current cookie */
540 db_cmp.db_status = GUC_DOORBELL_ENABLED;
541 db_cmp.cookie = client->doorbell_cookie;
542 471
543 /* cookie to be updated */ 472 lockdep_assert_held(&client->wq_lock);
544 db_exc.db_status = GUC_DOORBELL_ENABLED;
545 db_exc.cookie = client->doorbell_cookie + 1;
546 if (db_exc.cookie == 0)
547 db_exc.cookie = 1;
548 473
549 /* pointer of current doorbell cacheline */ 474 /* pointer of current doorbell cacheline */
550 db = (union guc_doorbell_qw *)__get_doorbell(client); 475 db = __get_doorbell(client);
551
552 while (attempt--) {
553 /* lets ring the doorbell */
554 db_ret.value_qw = atomic64_cmpxchg((atomic64_t *)db,
555 db_cmp.value_qw, db_exc.value_qw);
556
557 /* if the exchange was successfully executed */
558 if (db_ret.value_qw == db_cmp.value_qw) {
559 /* db was successfully rung */
560 client->doorbell_cookie = db_exc.cookie;
561 ret = 0;
562 break;
563 }
564
565 /* XXX: doorbell was lost and need to acquire it again */
566 if (db_ret.db_status == GUC_DOORBELL_DISABLED)
567 break;
568 476
569 DRM_WARN("Cookie mismatch. Expected %d, found %d\n", 477 /* we're not expecting the doorbell cookie to change behind our back */
570 db_cmp.cookie, db_ret.cookie); 478 cookie = READ_ONCE(db->cookie);
571 479 WARN_ON_ONCE(xchg(&db->cookie, cookie + 1) != cookie);
572 /* update the cookie to newly read cookie from GuC */
573 db_cmp.cookie = db_ret.cookie;
574 db_exc.cookie = db_ret.cookie + 1;
575 if (db_exc.cookie == 0)
576 db_exc.cookie = 1;
577 }
578 480
579 return ret; 481 /* XXX: doorbell was lost and need to acquire it again */
482 GEM_BUG_ON(db->db_status != GUC_DOORBELL_ENABLED);
580} 483}
581 484
582/** 485/**
583 * __i915_guc_submit() - Submit commands through GuC 486 * i915_guc_submit() - Submit commands through GuC
584 * @rq: request associated with the commands 487 * @engine: engine associated with the commands
585 *
586 * The caller must have already called i915_guc_wq_reserve() above with
587 * a result of 0 (success), guaranteeing that there is space in the work
588 * queue for the new request, so enqueuing the item cannot fail.
589 *
590 * Bad Things Will Happen if the caller violates this protocol e.g. calls
591 * submit() when _reserve() says there's no space, or calls _submit()
592 * a different number of times from (successful) calls to _reserve().
593 * 488 *
594 * The only error here arises if the doorbell hardware isn't functioning 489 * The only error here arises if the doorbell hardware isn't functioning
595 * as expected, which really shouln't happen. 490 * as expected, which really shouln't happen.
596 */ 491 */
597static void __i915_guc_submit(struct drm_i915_gem_request *rq) 492static void i915_guc_submit(struct intel_engine_cs *engine)
598{ 493{
599 struct drm_i915_private *dev_priv = rq->i915; 494 struct drm_i915_private *dev_priv = engine->i915;
600 struct intel_engine_cs *engine = rq->engine; 495 struct intel_guc *guc = &dev_priv->guc;
601 unsigned int engine_id = engine->id;
602 struct intel_guc *guc = &rq->i915->guc;
603 struct i915_guc_client *client = guc->execbuf_client; 496 struct i915_guc_client *client = guc->execbuf_client;
604 unsigned long flags; 497 struct intel_engine_execlists * const execlists = &engine->execlists;
605 int b_ret; 498 struct execlist_port *port = execlists->port;
499 const unsigned int engine_id = engine->id;
500 unsigned int n;
606 501
607 /* WA to flush out the pending GMADR writes to ring buffer. */ 502 for (n = 0; n < ARRAY_SIZE(execlists->port); n++) {
608 if (i915_vma_is_map_and_fenceable(rq->ring->vma)) 503 struct drm_i915_gem_request *rq;
609 POSTING_READ_FW(GUC_STATUS); 504 unsigned int count;
610 505
611 spin_lock_irqsave(&client->wq_lock, flags); 506 rq = port_unpack(&port[n], &count);
507 if (rq && count == 0) {
508 port_set(&port[n], port_pack(rq, ++count));
612 509
613 guc_wq_item_append(client, rq); 510 if (i915_vma_is_map_and_fenceable(rq->ring->vma))
614 b_ret = guc_ring_doorbell(client); 511 POSTING_READ_FW(GUC_STATUS);
615 512
616 client->submissions[engine_id] += 1; 513 spin_lock(&client->wq_lock);
617 514
618 spin_unlock_irqrestore(&client->wq_lock, flags); 515 guc_wq_item_append(client, rq);
619} 516 guc_ring_doorbell(client);
620 517
621static void i915_guc_submit(struct drm_i915_gem_request *rq) 518 client->submissions[engine_id] += 1;
622{ 519
623 __i915_gem_request_submit(rq); 520 spin_unlock(&client->wq_lock);
624 __i915_guc_submit(rq); 521 }
522 }
625} 523}
626 524
627static void nested_enable_signaling(struct drm_i915_gem_request *rq) 525static void nested_enable_signaling(struct drm_i915_gem_request *rq)
@@ -655,27 +553,33 @@ static void port_assign(struct execlist_port *port,
655 if (port_isset(port)) 553 if (port_isset(port))
656 i915_gem_request_put(port_request(port)); 554 i915_gem_request_put(port_request(port));
657 555
658 port_set(port, i915_gem_request_get(rq)); 556 port_set(port, port_pack(i915_gem_request_get(rq), port_count(port)));
659 nested_enable_signaling(rq); 557 nested_enable_signaling(rq);
660} 558}
661 559
662static bool i915_guc_dequeue(struct intel_engine_cs *engine) 560static void i915_guc_dequeue(struct intel_engine_cs *engine)
663{ 561{
664 struct execlist_port *port = engine->execlist_port; 562 struct intel_engine_execlists * const execlists = &engine->execlists;
665 struct drm_i915_gem_request *last = port_request(port); 563 struct execlist_port *port = execlists->port;
666 struct rb_node *rb; 564 struct drm_i915_gem_request *last = NULL;
565 const struct execlist_port * const last_port =
566 &execlists->port[execlists->port_mask];
667 bool submit = false; 567 bool submit = false;
568 struct rb_node *rb;
569
570 if (port_isset(port))
571 port++;
668 572
669 spin_lock_irq(&engine->timeline->lock); 573 spin_lock_irq(&engine->timeline->lock);
670 rb = engine->execlist_first; 574 rb = execlists->first;
671 GEM_BUG_ON(rb_first(&engine->execlist_queue) != rb); 575 GEM_BUG_ON(rb_first(&execlists->queue) != rb);
672 while (rb) { 576 while (rb) {
673 struct i915_priolist *p = rb_entry(rb, typeof(*p), node); 577 struct i915_priolist *p = rb_entry(rb, typeof(*p), node);
674 struct drm_i915_gem_request *rq, *rn; 578 struct drm_i915_gem_request *rq, *rn;
675 579
676 list_for_each_entry_safe(rq, rn, &p->requests, priotree.link) { 580 list_for_each_entry_safe(rq, rn, &p->requests, priotree.link) {
677 if (last && rq->ctx != last->ctx) { 581 if (last && rq->ctx != last->ctx) {
678 if (port != engine->execlist_port) { 582 if (port == last_port) {
679 __list_del_many(&p->requests, 583 __list_del_many(&p->requests,
680 &rq->priotree.link); 584 &rq->priotree.link);
681 goto done; 585 goto done;
@@ -689,50 +593,48 @@ static bool i915_guc_dequeue(struct intel_engine_cs *engine)
689 INIT_LIST_HEAD(&rq->priotree.link); 593 INIT_LIST_HEAD(&rq->priotree.link);
690 rq->priotree.priority = INT_MAX; 594 rq->priotree.priority = INT_MAX;
691 595
692 i915_guc_submit(rq); 596 __i915_gem_request_submit(rq);
693 trace_i915_gem_request_in(rq, port_index(port, engine)); 597 trace_i915_gem_request_in(rq, port_index(port, execlists));
694 last = rq; 598 last = rq;
695 submit = true; 599 submit = true;
696 } 600 }
697 601
698 rb = rb_next(rb); 602 rb = rb_next(rb);
699 rb_erase(&p->node, &engine->execlist_queue); 603 rb_erase(&p->node, &execlists->queue);
700 INIT_LIST_HEAD(&p->requests); 604 INIT_LIST_HEAD(&p->requests);
701 if (p->priority != I915_PRIORITY_NORMAL) 605 if (p->priority != I915_PRIORITY_NORMAL)
702 kmem_cache_free(engine->i915->priorities, p); 606 kmem_cache_free(engine->i915->priorities, p);
703 } 607 }
704done: 608done:
705 engine->execlist_first = rb; 609 execlists->first = rb;
706 if (submit) 610 if (submit) {
707 port_assign(port, last); 611 port_assign(port, last);
612 i915_guc_submit(engine);
613 }
708 spin_unlock_irq(&engine->timeline->lock); 614 spin_unlock_irq(&engine->timeline->lock);
709
710 return submit;
711} 615}
712 616
713static void i915_guc_irq_handler(unsigned long data) 617static void i915_guc_irq_handler(unsigned long data)
714{ 618{
715 struct intel_engine_cs *engine = (struct intel_engine_cs *)data; 619 struct intel_engine_cs * const engine = (struct intel_engine_cs *)data;
716 struct execlist_port *port = engine->execlist_port; 620 struct intel_engine_execlists * const execlists = &engine->execlists;
621 struct execlist_port *port = execlists->port;
622 const struct execlist_port * const last_port =
623 &execlists->port[execlists->port_mask];
717 struct drm_i915_gem_request *rq; 624 struct drm_i915_gem_request *rq;
718 bool submit;
719 625
720 do { 626 rq = port_request(&port[0]);
721 rq = port_request(&port[0]); 627 while (rq && i915_gem_request_completed(rq)) {
722 while (rq && i915_gem_request_completed(rq)) { 628 trace_i915_gem_request_out(rq);
723 trace_i915_gem_request_out(rq); 629 i915_gem_request_put(rq);
724 i915_gem_request_put(rq);
725 630
726 port[0] = port[1]; 631 execlists_port_complete(execlists, port);
727 memset(&port[1], 0, sizeof(port[1]));
728 632
729 rq = port_request(&port[0]); 633 rq = port_request(&port[0]);
730 } 634 }
731 635
732 submit = false; 636 if (!port_isset(last_port))
733 if (!port_count(&port[1])) 637 i915_guc_dequeue(engine);
734 submit = i915_guc_dequeue(engine);
735 } while (submit);
736} 638}
737 639
738/* 640/*
@@ -913,8 +815,6 @@ guc_client_alloc(struct drm_i915_private *dev_priv,
913 client->engines = engines; 815 client->engines = engines;
914 client->priority = priority; 816 client->priority = priority;
915 client->doorbell_id = GUC_DOORBELL_INVALID; 817 client->doorbell_id = GUC_DOORBELL_INVALID;
916 client->wq_offset = GUC_DB_SIZE;
917 client->wq_size = GUC_WQ_SIZE;
918 spin_lock_init(&client->wq_lock); 818 spin_lock_init(&client->wq_lock);
919 819
920 ret = ida_simple_get(&guc->stage_ids, 0, GUC_MAX_STAGE_DESCRIPTORS, 820 ret = ida_simple_get(&guc->stage_ids, 0, GUC_MAX_STAGE_DESCRIPTORS,
@@ -996,28 +896,39 @@ static void guc_client_free(struct i915_guc_client *client)
996 kfree(client); 896 kfree(client);
997} 897}
998 898
899static void guc_policy_init(struct guc_policy *policy)
900{
901 policy->execution_quantum = POLICY_DEFAULT_EXECUTION_QUANTUM_US;
902 policy->preemption_time = POLICY_DEFAULT_PREEMPTION_TIME_US;
903 policy->fault_time = POLICY_DEFAULT_FAULT_TIME_US;
904 policy->policy_flags = 0;
905}
906
999static void guc_policies_init(struct guc_policies *policies) 907static void guc_policies_init(struct guc_policies *policies)
1000{ 908{
1001 struct guc_policy *policy; 909 struct guc_policy *policy;
1002 u32 p, i; 910 u32 p, i;
1003 911
1004 policies->dpc_promote_time = 500000; 912 policies->dpc_promote_time = POLICY_DEFAULT_DPC_PROMOTE_TIME_US;
1005 policies->max_num_work_items = POLICY_MAX_NUM_WI; 913 policies->max_num_work_items = POLICY_MAX_NUM_WI;
1006 914
1007 for (p = 0; p < GUC_CLIENT_PRIORITY_NUM; p++) { 915 for (p = 0; p < GUC_CLIENT_PRIORITY_NUM; p++) {
1008 for (i = GUC_RENDER_ENGINE; i < GUC_MAX_ENGINES_NUM; i++) { 916 for (i = GUC_RENDER_ENGINE; i < GUC_MAX_ENGINES_NUM; i++) {
1009 policy = &policies->policy[p][i]; 917 policy = &policies->policy[p][i];
1010 918
1011 policy->execution_quantum = 1000000; 919 guc_policy_init(policy);
1012 policy->preemption_time = 500000;
1013 policy->fault_time = 250000;
1014 policy->policy_flags = 0;
1015 } 920 }
1016 } 921 }
1017 922
1018 policies->is_valid = 1; 923 policies->is_valid = 1;
1019} 924}
1020 925
926/*
927 * The first 80 dwords of the register state context, containing the
928 * execlists and ppgtt registers.
929 */
930#define LR_HW_CONTEXT_SIZE (80 * sizeof(u32))
931
1021static int guc_ads_create(struct intel_guc *guc) 932static int guc_ads_create(struct intel_guc *guc)
1022{ 933{
1023 struct drm_i915_private *dev_priv = guc_to_i915(guc); 934 struct drm_i915_private *dev_priv = guc_to_i915(guc);
@@ -1032,6 +943,8 @@ static int guc_ads_create(struct intel_guc *guc)
1032 } __packed *blob; 943 } __packed *blob;
1033 struct intel_engine_cs *engine; 944 struct intel_engine_cs *engine;
1034 enum intel_engine_id id; 945 enum intel_engine_id id;
946 const u32 skipped_offset = LRC_HEADER_PAGES * PAGE_SIZE;
947 const u32 skipped_size = LRC_PPHWSP_SZ * PAGE_SIZE + LR_HW_CONTEXT_SIZE;
1035 u32 base; 948 u32 base;
1036 949
1037 GEM_BUG_ON(guc->ads_vma); 950 GEM_BUG_ON(guc->ads_vma);
@@ -1062,13 +975,20 @@ static int guc_ads_create(struct intel_guc *guc)
1062 * engines after a reset. Here we use the Render ring default 975 * engines after a reset. Here we use the Render ring default
1063 * context, which must already exist and be pinned in the GGTT, 976 * context, which must already exist and be pinned in the GGTT,
1064 * so its address won't change after we've told the GuC where 977 * so its address won't change after we've told the GuC where
1065 * to find it. 978 * to find it. Note that we have to skip our header (1 page),
979 * because our GuC shared data is there.
1066 */ 980 */
1067 blob->ads.golden_context_lrca = 981 blob->ads.golden_context_lrca =
1068 dev_priv->engine[RCS]->status_page.ggtt_offset; 982 guc_ggtt_offset(dev_priv->kernel_context->engine[RCS].state) + skipped_offset;
1069 983
984 /*
985 * The GuC expects us to exclude the portion of the context image that
986 * it skips from the size it is to read. It starts reading from after
987 * the execlist context (so skipping the first page [PPHWSP] and 80
988 * dwords). Weird guc is weird.
989 */
1070 for_each_engine(engine, dev_priv, id) 990 for_each_engine(engine, dev_priv, id)
1071 blob->ads.eng_state_size[engine->guc_id] = engine->context_size; 991 blob->ads.eng_state_size[engine->guc_id] = engine->context_size - skipped_size;
1072 992
1073 base = guc_ggtt_offset(vma); 993 base = guc_ggtt_offset(vma);
1074 blob->ads.scheduler_policies = base + ptr_offset(blob, policies); 994 blob->ads.scheduler_policies = base + ptr_offset(blob, policies);
@@ -1221,6 +1141,19 @@ int i915_guc_submission_enable(struct drm_i915_private *dev_priv)
1221 enum intel_engine_id id; 1141 enum intel_engine_id id;
1222 int err; 1142 int err;
1223 1143
1144 /*
1145 * We're using GuC work items for submitting work through GuC. Since
1146 * we're coalescing multiple requests from a single context into a
1147 * single work item prior to assigning it to execlist_port, we can
1148 * never have more work items than the total number of ports (for all
1149 * engines). The GuC firmware is controlling the HEAD of work queue,
1150 * and it is guaranteed that it will remove the work item from the
1151 * queue before our request is completed.
1152 */
1153 BUILD_BUG_ON(ARRAY_SIZE(engine->execlists.port) *
1154 sizeof(struct guc_wq_item) *
1155 I915_NUM_ENGINES > GUC_WQ_SIZE);
1156
1224 if (!client) { 1157 if (!client) {
1225 client = guc_client_alloc(dev_priv, 1158 client = guc_client_alloc(dev_priv,
1226 INTEL_INFO(dev_priv)->ring_mask, 1159 INTEL_INFO(dev_priv)->ring_mask,
@@ -1248,24 +1181,15 @@ int i915_guc_submission_enable(struct drm_i915_private *dev_priv)
1248 guc_interrupts_capture(dev_priv); 1181 guc_interrupts_capture(dev_priv);
1249 1182
1250 for_each_engine(engine, dev_priv, id) { 1183 for_each_engine(engine, dev_priv, id) {
1251 const int wqi_size = sizeof(struct guc_wq_item); 1184 struct intel_engine_execlists * const execlists = &engine->execlists;
1252 struct drm_i915_gem_request *rq;
1253
1254 /* The tasklet was initialised by execlists, and may be in 1185 /* The tasklet was initialised by execlists, and may be in
1255 * a state of flux (across a reset) and so we just want to 1186 * a state of flux (across a reset) and so we just want to
1256 * take over the callback without changing any other state 1187 * take over the callback without changing any other state
1257 * in the tasklet. 1188 * in the tasklet.
1258 */ 1189 */
1259 engine->irq_tasklet.func = i915_guc_irq_handler; 1190 execlists->irq_tasklet.func = i915_guc_irq_handler;
1260 clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted); 1191 clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
1261 1192 tasklet_schedule(&execlists->irq_tasklet);
1262 /* Replay the current set of previously submitted requests */
1263 spin_lock_irq(&engine->timeline->lock);
1264 list_for_each_entry(rq, &engine->timeline->requests, link) {
1265 guc_client_update_wq_rsvd(client, wqi_size);
1266 __i915_guc_submit(rq);
1267 }
1268 spin_unlock_irq(&engine->timeline->lock);
1269 } 1193 }
1270 1194
1271 return 0; 1195 return 0;
@@ -1310,7 +1234,7 @@ int intel_guc_suspend(struct drm_i915_private *dev_priv)
1310 /* any value greater than GUC_POWER_D0 */ 1234 /* any value greater than GUC_POWER_D0 */
1311 data[1] = GUC_POWER_D1; 1235 data[1] = GUC_POWER_D1;
1312 /* first page is shared data with GuC */ 1236 /* first page is shared data with GuC */
1313 data[2] = guc_ggtt_offset(ctx->engine[RCS].state); 1237 data[2] = guc_ggtt_offset(ctx->engine[RCS].state) + LRC_GUCSHR_PN * PAGE_SIZE;
1314 1238
1315 return intel_guc_send(guc, data, ARRAY_SIZE(data)); 1239 return intel_guc_send(guc, data, ARRAY_SIZE(data));
1316} 1240}
@@ -1328,7 +1252,7 @@ int intel_guc_resume(struct drm_i915_private *dev_priv)
1328 if (guc->fw.load_status != INTEL_UC_FIRMWARE_SUCCESS) 1252 if (guc->fw.load_status != INTEL_UC_FIRMWARE_SUCCESS)
1329 return 0; 1253 return 0;
1330 1254
1331 if (i915.guc_log_level >= 0) 1255 if (i915_modparams.guc_log_level >= 0)
1332 gen9_enable_guc_interrupts(dev_priv); 1256 gen9_enable_guc_interrupts(dev_priv);
1333 1257
1334 ctx = dev_priv->kernel_context; 1258 ctx = dev_priv->kernel_context;
@@ -1336,7 +1260,7 @@ int intel_guc_resume(struct drm_i915_private *dev_priv)
1336 data[0] = INTEL_GUC_ACTION_EXIT_S_STATE; 1260 data[0] = INTEL_GUC_ACTION_EXIT_S_STATE;
1337 data[1] = GUC_POWER_D0; 1261 data[1] = GUC_POWER_D0;
1338 /* first page is shared data with GuC */ 1262 /* first page is shared data with GuC */
1339 data[2] = guc_ggtt_offset(ctx->engine[RCS].state); 1263 data[2] = guc_ggtt_offset(ctx->engine[RCS].state) + LRC_GUCSHR_PN * PAGE_SIZE;
1340 1264
1341 return intel_guc_send(guc, data, ARRAY_SIZE(data)); 1265 return intel_guc_send(guc, data, ARRAY_SIZE(data));
1342} 1266}
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 2fe92d2e0f62..efd7827ff181 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -126,7 +126,7 @@ static const u32 hpd_bxt[HPD_NUM_PINS] = {
126 POSTING_READ(GEN8_##type##_IIR(which)); \ 126 POSTING_READ(GEN8_##type##_IIR(which)); \
127} while (0) 127} while (0)
128 128
129#define GEN5_IRQ_RESET(type) do { \ 129#define GEN3_IRQ_RESET(type) do { \
130 I915_WRITE(type##IMR, 0xffffffff); \ 130 I915_WRITE(type##IMR, 0xffffffff); \
131 POSTING_READ(type##IMR); \ 131 POSTING_READ(type##IMR); \
132 I915_WRITE(type##IER, 0); \ 132 I915_WRITE(type##IER, 0); \
@@ -136,10 +136,20 @@ static const u32 hpd_bxt[HPD_NUM_PINS] = {
136 POSTING_READ(type##IIR); \ 136 POSTING_READ(type##IIR); \
137} while (0) 137} while (0)
138 138
139#define GEN2_IRQ_RESET(type) do { \
140 I915_WRITE16(type##IMR, 0xffff); \
141 POSTING_READ16(type##IMR); \
142 I915_WRITE16(type##IER, 0); \
143 I915_WRITE16(type##IIR, 0xffff); \
144 POSTING_READ16(type##IIR); \
145 I915_WRITE16(type##IIR, 0xffff); \
146 POSTING_READ16(type##IIR); \
147} while (0)
148
139/* 149/*
140 * We should clear IMR at preinstall/uninstall, and just check at postinstall. 150 * We should clear IMR at preinstall/uninstall, and just check at postinstall.
141 */ 151 */
142static void gen5_assert_iir_is_zero(struct drm_i915_private *dev_priv, 152static void gen3_assert_iir_is_zero(struct drm_i915_private *dev_priv,
143 i915_reg_t reg) 153 i915_reg_t reg)
144{ 154{
145 u32 val = I915_READ(reg); 155 u32 val = I915_READ(reg);
@@ -155,20 +165,43 @@ static void gen5_assert_iir_is_zero(struct drm_i915_private *dev_priv,
155 POSTING_READ(reg); 165 POSTING_READ(reg);
156} 166}
157 167
168static void gen2_assert_iir_is_zero(struct drm_i915_private *dev_priv,
169 i915_reg_t reg)
170{
171 u16 val = I915_READ16(reg);
172
173 if (val == 0)
174 return;
175
176 WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n",
177 i915_mmio_reg_offset(reg), val);
178 I915_WRITE16(reg, 0xffff);
179 POSTING_READ16(reg);
180 I915_WRITE16(reg, 0xffff);
181 POSTING_READ16(reg);
182}
183
158#define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \ 184#define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \
159 gen5_assert_iir_is_zero(dev_priv, GEN8_##type##_IIR(which)); \ 185 gen3_assert_iir_is_zero(dev_priv, GEN8_##type##_IIR(which)); \
160 I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \ 186 I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \
161 I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \ 187 I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
162 POSTING_READ(GEN8_##type##_IMR(which)); \ 188 POSTING_READ(GEN8_##type##_IMR(which)); \
163} while (0) 189} while (0)
164 190
165#define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \ 191#define GEN3_IRQ_INIT(type, imr_val, ier_val) do { \
166 gen5_assert_iir_is_zero(dev_priv, type##IIR); \ 192 gen3_assert_iir_is_zero(dev_priv, type##IIR); \
167 I915_WRITE(type##IER, (ier_val)); \ 193 I915_WRITE(type##IER, (ier_val)); \
168 I915_WRITE(type##IMR, (imr_val)); \ 194 I915_WRITE(type##IMR, (imr_val)); \
169 POSTING_READ(type##IMR); \ 195 POSTING_READ(type##IMR); \
170} while (0) 196} while (0)
171 197
198#define GEN2_IRQ_INIT(type, imr_val, ier_val) do { \
199 gen2_assert_iir_is_zero(dev_priv, type##IIR); \
200 I915_WRITE16(type##IER, (ier_val)); \
201 I915_WRITE16(type##IMR, (imr_val)); \
202 POSTING_READ16(type##IMR); \
203} while (0)
204
172static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir); 205static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
173static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir); 206static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
174 207
@@ -534,62 +567,16 @@ void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
534 POSTING_READ(SDEIMR); 567 POSTING_READ(SDEIMR);
535} 568}
536 569
537static void 570u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv,
538__i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, 571 enum pipe pipe)
539 u32 enable_mask, u32 status_mask)
540{ 572{
541 i915_reg_t reg = PIPESTAT(pipe); 573 u32 status_mask = dev_priv->pipestat_irq_mask[pipe];
542 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK; 574 u32 enable_mask = status_mask << 16;
543
544 lockdep_assert_held(&dev_priv->irq_lock);
545 WARN_ON(!intel_irqs_enabled(dev_priv));
546
547 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
548 status_mask & ~PIPESTAT_INT_STATUS_MASK,
549 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
550 pipe_name(pipe), enable_mask, status_mask))
551 return;
552
553 if ((pipestat & enable_mask) == enable_mask)
554 return;
555
556 dev_priv->pipestat_irq_mask[pipe] |= status_mask;
557
558 /* Enable the interrupt, clear any pending status */
559 pipestat |= enable_mask | status_mask;
560 I915_WRITE(reg, pipestat);
561 POSTING_READ(reg);
562}
563
564static void
565__i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
566 u32 enable_mask, u32 status_mask)
567{
568 i915_reg_t reg = PIPESTAT(pipe);
569 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
570 575
571 lockdep_assert_held(&dev_priv->irq_lock); 576 lockdep_assert_held(&dev_priv->irq_lock);
572 WARN_ON(!intel_irqs_enabled(dev_priv));
573
574 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
575 status_mask & ~PIPESTAT_INT_STATUS_MASK,
576 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
577 pipe_name(pipe), enable_mask, status_mask))
578 return;
579
580 if ((pipestat & enable_mask) == 0)
581 return;
582
583 dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
584 577
585 pipestat &= ~enable_mask; 578 if (INTEL_GEN(dev_priv) < 5)
586 I915_WRITE(reg, pipestat); 579 goto out;
587 POSTING_READ(reg);
588}
589
590static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask)
591{
592 u32 enable_mask = status_mask << 16;
593 580
594 /* 581 /*
595 * On pipe A we don't support the PSR interrupt yet, 582 * On pipe A we don't support the PSR interrupt yet,
@@ -612,35 +599,59 @@ static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask)
612 if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV) 599 if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
613 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV; 600 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
614 601
602out:
603 WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
604 status_mask & ~PIPESTAT_INT_STATUS_MASK,
605 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
606 pipe_name(pipe), enable_mask, status_mask);
607
615 return enable_mask; 608 return enable_mask;
616} 609}
617 610
618void 611void i915_enable_pipestat(struct drm_i915_private *dev_priv,
619i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, 612 enum pipe pipe, u32 status_mask)
620 u32 status_mask)
621{ 613{
614 i915_reg_t reg = PIPESTAT(pipe);
622 u32 enable_mask; 615 u32 enable_mask;
623 616
624 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 617 WARN_ONCE(status_mask & ~PIPESTAT_INT_STATUS_MASK,
625 enable_mask = vlv_get_pipestat_enable_mask(&dev_priv->drm, 618 "pipe %c: status_mask=0x%x\n",
626 status_mask); 619 pipe_name(pipe), status_mask);
627 else 620
628 enable_mask = status_mask << 16; 621 lockdep_assert_held(&dev_priv->irq_lock);
629 __i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask); 622 WARN_ON(!intel_irqs_enabled(dev_priv));
623
624 if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == status_mask)
625 return;
626
627 dev_priv->pipestat_irq_mask[pipe] |= status_mask;
628 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
629
630 I915_WRITE(reg, enable_mask | status_mask);
631 POSTING_READ(reg);
630} 632}
631 633
632void 634void i915_disable_pipestat(struct drm_i915_private *dev_priv,
633i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, 635 enum pipe pipe, u32 status_mask)
634 u32 status_mask)
635{ 636{
637 i915_reg_t reg = PIPESTAT(pipe);
636 u32 enable_mask; 638 u32 enable_mask;
637 639
638 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 640 WARN_ONCE(status_mask & ~PIPESTAT_INT_STATUS_MASK,
639 enable_mask = vlv_get_pipestat_enable_mask(&dev_priv->drm, 641 "pipe %c: status_mask=0x%x\n",
640 status_mask); 642 pipe_name(pipe), status_mask);
641 else 643
642 enable_mask = status_mask << 16; 644 lockdep_assert_held(&dev_priv->irq_lock);
643 __i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask); 645 WARN_ON(!intel_irqs_enabled(dev_priv));
646
647 if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == 0)
648 return;
649
650 dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
651 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
652
653 I915_WRITE(reg, enable_mask | status_mask);
654 POSTING_READ(reg);
644} 655}
645 656
646/** 657/**
@@ -772,6 +783,57 @@ static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
772 return I915_READ(PIPE_FRMCOUNT_G4X(pipe)); 783 return I915_READ(PIPE_FRMCOUNT_G4X(pipe));
773} 784}
774 785
786/*
787 * On certain encoders on certain platforms, pipe
788 * scanline register will not work to get the scanline,
789 * since the timings are driven from the PORT or issues
790 * with scanline register updates.
791 * This function will use Framestamp and current
792 * timestamp registers to calculate the scanline.
793 */
794static u32 __intel_get_crtc_scanline_from_timestamp(struct intel_crtc *crtc)
795{
796 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
797 struct drm_vblank_crtc *vblank =
798 &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
799 const struct drm_display_mode *mode = &vblank->hwmode;
800 u32 vblank_start = mode->crtc_vblank_start;
801 u32 vtotal = mode->crtc_vtotal;
802 u32 htotal = mode->crtc_htotal;
803 u32 clock = mode->crtc_clock;
804 u32 scanline, scan_prev_time, scan_curr_time, scan_post_time;
805
806 /*
807 * To avoid the race condition where we might cross into the
808 * next vblank just between the PIPE_FRMTMSTMP and TIMESTAMP_CTR
809 * reads. We make sure we read PIPE_FRMTMSTMP and TIMESTAMP_CTR
810 * during the same frame.
811 */
812 do {
813 /*
814 * This field provides read back of the display
815 * pipe frame time stamp. The time stamp value
816 * is sampled at every start of vertical blank.
817 */
818 scan_prev_time = I915_READ_FW(PIPE_FRMTMSTMP(crtc->pipe));
819
820 /*
821 * The TIMESTAMP_CTR register has the current
822 * time stamp value.
823 */
824 scan_curr_time = I915_READ_FW(IVB_TIMESTAMP_CTR);
825
826 scan_post_time = I915_READ_FW(PIPE_FRMTMSTMP(crtc->pipe));
827 } while (scan_post_time != scan_prev_time);
828
829 scanline = div_u64(mul_u32_u32(scan_curr_time - scan_prev_time,
830 clock), 1000 * htotal);
831 scanline = min(scanline, vtotal - 1);
832 scanline = (scanline + vblank_start) % vtotal;
833
834 return scanline;
835}
836
775/* I915_READ_FW, only for fast reads of display block, no need for forcewake etc. */ 837/* I915_READ_FW, only for fast reads of display block, no need for forcewake etc. */
776static int __intel_get_crtc_scanline(struct intel_crtc *crtc) 838static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
777{ 839{
@@ -788,6 +850,9 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
788 vblank = &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)]; 850 vblank = &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
789 mode = &vblank->hwmode; 851 mode = &vblank->hwmode;
790 852
853 if (mode->private_flags & I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP)
854 return __intel_get_crtc_scanline_from_timestamp(crtc);
855
791 vtotal = mode->crtc_vtotal; 856 vtotal = mode->crtc_vtotal;
792 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 857 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
793 vtotal /= 2; 858 vtotal /= 2;
@@ -1005,6 +1070,8 @@ static void notify_ring(struct intel_engine_cs *engine)
1005 spin_lock(&engine->breadcrumbs.irq_lock); 1070 spin_lock(&engine->breadcrumbs.irq_lock);
1006 wait = engine->breadcrumbs.irq_wait; 1071 wait = engine->breadcrumbs.irq_wait;
1007 if (wait) { 1072 if (wait) {
1073 bool wakeup = engine->irq_seqno_barrier;
1074
1008 /* We use a callback from the dma-fence to submit 1075 /* We use a callback from the dma-fence to submit
1009 * requests after waiting on our own requests. To 1076 * requests after waiting on our own requests. To
1010 * ensure minimum delay in queuing the next request to 1077 * ensure minimum delay in queuing the next request to
@@ -1017,12 +1084,18 @@ static void notify_ring(struct intel_engine_cs *engine)
1017 * and many waiters. 1084 * and many waiters.
1018 */ 1085 */
1019 if (i915_seqno_passed(intel_engine_get_seqno(engine), 1086 if (i915_seqno_passed(intel_engine_get_seqno(engine),
1020 wait->seqno) && 1087 wait->seqno)) {
1021 !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, 1088 struct drm_i915_gem_request *waiter = wait->request;
1022 &wait->request->fence.flags)) 1089
1023 rq = i915_gem_request_get(wait->request); 1090 wakeup = true;
1091 if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
1092 &waiter->fence.flags) &&
1093 intel_wait_check_request(wait, waiter))
1094 rq = i915_gem_request_get(waiter);
1095 }
1024 1096
1025 wake_up_process(wait->tsk); 1097 if (wakeup)
1098 wake_up_process(wait->tsk);
1026 } else { 1099 } else {
1027 __intel_engine_disarm_breadcrumbs(engine); 1100 __intel_engine_disarm_breadcrumbs(engine);
1028 } 1101 }
@@ -1305,10 +1378,11 @@ static void snb_gt_irq_handler(struct drm_i915_private *dev_priv,
1305static void 1378static void
1306gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir, int test_shift) 1379gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir, int test_shift)
1307{ 1380{
1381 struct intel_engine_execlists * const execlists = &engine->execlists;
1308 bool tasklet = false; 1382 bool tasklet = false;
1309 1383
1310 if (iir & (GT_CONTEXT_SWITCH_INTERRUPT << test_shift)) { 1384 if (iir & (GT_CONTEXT_SWITCH_INTERRUPT << test_shift)) {
1311 if (port_count(&engine->execlist_port[0])) { 1385 if (port_count(&execlists->port[0])) {
1312 __set_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted); 1386 __set_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
1313 tasklet = true; 1387 tasklet = true;
1314 } 1388 }
@@ -1316,11 +1390,11 @@ gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir, int test_shift)
1316 1390
1317 if (iir & (GT_RENDER_USER_INTERRUPT << test_shift)) { 1391 if (iir & (GT_RENDER_USER_INTERRUPT << test_shift)) {
1318 notify_ring(engine); 1392 notify_ring(engine);
1319 tasklet |= i915.enable_guc_submission; 1393 tasklet |= i915_modparams.enable_guc_submission;
1320 } 1394 }
1321 1395
1322 if (tasklet) 1396 if (tasklet)
1323 tasklet_hi_schedule(&engine->irq_tasklet); 1397 tasklet_hi_schedule(&execlists->irq_tasklet);
1324} 1398}
1325 1399
1326static irqreturn_t gen8_gt_irq_ack(struct drm_i915_private *dev_priv, 1400static irqreturn_t gen8_gt_irq_ack(struct drm_i915_private *dev_priv,
@@ -1573,11 +1647,11 @@ static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1573 * bonkers. So let's just wait for the next vblank and read 1647 * bonkers. So let's just wait for the next vblank and read
1574 * out the buggy result. 1648 * out the buggy result.
1575 * 1649 *
1576 * On CHV sometimes the second CRC is bonkers as well, so 1650 * On GEN8+ sometimes the second CRC is bonkers as well, so
1577 * don't trust that one either. 1651 * don't trust that one either.
1578 */ 1652 */
1579 if (pipe_crc->skipped == 0 || 1653 if (pipe_crc->skipped == 0 ||
1580 (IS_CHERRYVIEW(dev_priv) && pipe_crc->skipped == 1)) { 1654 (INTEL_GEN(dev_priv) >= 8 && pipe_crc->skipped == 1)) {
1581 pipe_crc->skipped++; 1655 pipe_crc->skipped++;
1582 spin_unlock(&pipe_crc->lock); 1656 spin_unlock(&pipe_crc->lock);
1583 return; 1657 return;
@@ -1706,8 +1780,21 @@ static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 gt_iir)
1706 } 1780 }
1707} 1781}
1708 1782
1709static void valleyview_pipestat_irq_ack(struct drm_i915_private *dev_priv, 1783static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv)
1710 u32 iir, u32 pipe_stats[I915_MAX_PIPES]) 1784{
1785 enum pipe pipe;
1786
1787 for_each_pipe(dev_priv, pipe) {
1788 I915_WRITE(PIPESTAT(pipe),
1789 PIPESTAT_INT_STATUS_MASK |
1790 PIPE_FIFO_UNDERRUN_STATUS);
1791
1792 dev_priv->pipestat_irq_mask[pipe] = 0;
1793 }
1794}
1795
1796static void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
1797 u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1711{ 1798{
1712 int pipe; 1799 int pipe;
1713 1800
@@ -1720,7 +1807,7 @@ static void valleyview_pipestat_irq_ack(struct drm_i915_private *dev_priv,
1720 1807
1721 for_each_pipe(dev_priv, pipe) { 1808 for_each_pipe(dev_priv, pipe) {
1722 i915_reg_t reg; 1809 i915_reg_t reg;
1723 u32 mask, iir_bit = 0; 1810 u32 status_mask, enable_mask, iir_bit = 0;
1724 1811
1725 /* 1812 /*
1726 * PIPESTAT bits get signalled even when the interrupt is 1813 * PIPESTAT bits get signalled even when the interrupt is
@@ -1731,7 +1818,7 @@ static void valleyview_pipestat_irq_ack(struct drm_i915_private *dev_priv,
1731 */ 1818 */
1732 1819
1733 /* fifo underruns are filterered in the underrun handler. */ 1820 /* fifo underruns are filterered in the underrun handler. */
1734 mask = PIPE_FIFO_UNDERRUN_STATUS; 1821 status_mask = PIPE_FIFO_UNDERRUN_STATUS;
1735 1822
1736 switch (pipe) { 1823 switch (pipe) {
1737 case PIPE_A: 1824 case PIPE_A:
@@ -1745,25 +1832,92 @@ static void valleyview_pipestat_irq_ack(struct drm_i915_private *dev_priv,
1745 break; 1832 break;
1746 } 1833 }
1747 if (iir & iir_bit) 1834 if (iir & iir_bit)
1748 mask |= dev_priv->pipestat_irq_mask[pipe]; 1835 status_mask |= dev_priv->pipestat_irq_mask[pipe];
1749 1836
1750 if (!mask) 1837 if (!status_mask)
1751 continue; 1838 continue;
1752 1839
1753 reg = PIPESTAT(pipe); 1840 reg = PIPESTAT(pipe);
1754 mask |= PIPESTAT_INT_ENABLE_MASK; 1841 pipe_stats[pipe] = I915_READ(reg) & status_mask;
1755 pipe_stats[pipe] = I915_READ(reg) & mask; 1842 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
1756 1843
1757 /* 1844 /*
1758 * Clear the PIPE*STAT regs before the IIR 1845 * Clear the PIPE*STAT regs before the IIR
1759 */ 1846 */
1760 if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS | 1847 if (pipe_stats[pipe])
1761 PIPESTAT_INT_STATUS_MASK)) 1848 I915_WRITE(reg, enable_mask | pipe_stats[pipe]);
1762 I915_WRITE(reg, pipe_stats[pipe]);
1763 } 1849 }
1764 spin_unlock(&dev_priv->irq_lock); 1850 spin_unlock(&dev_priv->irq_lock);
1765} 1851}
1766 1852
1853static void i8xx_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1854 u16 iir, u32 pipe_stats[I915_MAX_PIPES])
1855{
1856 enum pipe pipe;
1857
1858 for_each_pipe(dev_priv, pipe) {
1859 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
1860 drm_handle_vblank(&dev_priv->drm, pipe);
1861
1862 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1863 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1864
1865 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1866 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1867 }
1868}
1869
1870static void i915_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1871 u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1872{
1873 bool blc_event = false;
1874 enum pipe pipe;
1875
1876 for_each_pipe(dev_priv, pipe) {
1877 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
1878 drm_handle_vblank(&dev_priv->drm, pipe);
1879
1880 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
1881 blc_event = true;
1882
1883 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1884 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1885
1886 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1887 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1888 }
1889
1890 if (blc_event || (iir & I915_ASLE_INTERRUPT))
1891 intel_opregion_asle_intr(dev_priv);
1892}
1893
1894static void i965_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1895 u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1896{
1897 bool blc_event = false;
1898 enum pipe pipe;
1899
1900 for_each_pipe(dev_priv, pipe) {
1901 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
1902 drm_handle_vblank(&dev_priv->drm, pipe);
1903
1904 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
1905 blc_event = true;
1906
1907 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1908 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1909
1910 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1911 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1912 }
1913
1914 if (blc_event || (iir & I915_ASLE_INTERRUPT))
1915 intel_opregion_asle_intr(dev_priv);
1916
1917 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1918 gmbus_irq_handler(dev_priv);
1919}
1920
1767static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv, 1921static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1768 u32 pipe_stats[I915_MAX_PIPES]) 1922 u32 pipe_stats[I915_MAX_PIPES])
1769{ 1923{
@@ -1879,7 +2033,7 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
1879 2033
1880 /* Call regardless, as some status bits might not be 2034 /* Call regardless, as some status bits might not be
1881 * signalled in iir */ 2035 * signalled in iir */
1882 valleyview_pipestat_irq_ack(dev_priv, iir, pipe_stats); 2036 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
1883 2037
1884 if (iir & (I915_LPE_PIPE_A_INTERRUPT | 2038 if (iir & (I915_LPE_PIPE_A_INTERRUPT |
1885 I915_LPE_PIPE_B_INTERRUPT)) 2039 I915_LPE_PIPE_B_INTERRUPT))
@@ -1963,7 +2117,7 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
1963 2117
1964 /* Call regardless, as some status bits might not be 2118 /* Call regardless, as some status bits might not be
1965 * signalled in iir */ 2119 * signalled in iir */
1966 valleyview_pipestat_irq_ack(dev_priv, iir, pipe_stats); 2120 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
1967 2121
1968 if (iir & (I915_LPE_PIPE_A_INTERRUPT | 2122 if (iir & (I915_LPE_PIPE_A_INTERRUPT |
1969 I915_LPE_PIPE_B_INTERRUPT | 2123 I915_LPE_PIPE_B_INTERRUPT |
@@ -2860,7 +3014,7 @@ static void ibx_irq_reset(struct drm_i915_private *dev_priv)
2860 if (HAS_PCH_NOP(dev_priv)) 3014 if (HAS_PCH_NOP(dev_priv))
2861 return; 3015 return;
2862 3016
2863 GEN5_IRQ_RESET(SDE); 3017 GEN3_IRQ_RESET(SDE);
2864 3018
2865 if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv)) 3019 if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
2866 I915_WRITE(SERR_INT, 0xffffffff); 3020 I915_WRITE(SERR_INT, 0xffffffff);
@@ -2888,15 +3042,13 @@ static void ibx_irq_pre_postinstall(struct drm_device *dev)
2888 3042
2889static void gen5_gt_irq_reset(struct drm_i915_private *dev_priv) 3043static void gen5_gt_irq_reset(struct drm_i915_private *dev_priv)
2890{ 3044{
2891 GEN5_IRQ_RESET(GT); 3045 GEN3_IRQ_RESET(GT);
2892 if (INTEL_GEN(dev_priv) >= 6) 3046 if (INTEL_GEN(dev_priv) >= 6)
2893 GEN5_IRQ_RESET(GEN6_PM); 3047 GEN3_IRQ_RESET(GEN6_PM);
2894} 3048}
2895 3049
2896static void vlv_display_irq_reset(struct drm_i915_private *dev_priv) 3050static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
2897{ 3051{
2898 enum pipe pipe;
2899
2900 if (IS_CHERRYVIEW(dev_priv)) 3052 if (IS_CHERRYVIEW(dev_priv))
2901 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV); 3053 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
2902 else 3054 else
@@ -2905,14 +3057,9 @@ static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
2905 i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0); 3057 i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0);
2906 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3058 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2907 3059
2908 for_each_pipe(dev_priv, pipe) { 3060 i9xx_pipestat_irq_reset(dev_priv);
2909 I915_WRITE(PIPESTAT(pipe),
2910 PIPE_FIFO_UNDERRUN_STATUS |
2911 PIPESTAT_INT_STATUS_MASK);
2912 dev_priv->pipestat_irq_mask[pipe] = 0;
2913 }
2914 3061
2915 GEN5_IRQ_RESET(VLV_); 3062 GEN3_IRQ_RESET(VLV_);
2916 dev_priv->irq_mask = ~0; 3063 dev_priv->irq_mask = ~0;
2917} 3064}
2918 3065
@@ -2922,8 +3069,7 @@ static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
2922 u32 enable_mask; 3069 u32 enable_mask;
2923 enum pipe pipe; 3070 enum pipe pipe;
2924 3071
2925 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV | 3072 pipestat_mask = PIPE_CRC_DONE_INTERRUPT_STATUS;
2926 PIPE_CRC_DONE_INTERRUPT_STATUS;
2927 3073
2928 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 3074 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
2929 for_each_pipe(dev_priv, pipe) 3075 for_each_pipe(dev_priv, pipe)
@@ -2943,7 +3089,7 @@ static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
2943 3089
2944 dev_priv->irq_mask = ~enable_mask; 3090 dev_priv->irq_mask = ~enable_mask;
2945 3091
2946 GEN5_IRQ_INIT(VLV_, dev_priv->irq_mask, enable_mask); 3092 GEN3_IRQ_INIT(VLV_, dev_priv->irq_mask, enable_mask);
2947} 3093}
2948 3094
2949/* drm_dma.h hooks 3095/* drm_dma.h hooks
@@ -2952,9 +3098,10 @@ static void ironlake_irq_reset(struct drm_device *dev)
2952{ 3098{
2953 struct drm_i915_private *dev_priv = to_i915(dev); 3099 struct drm_i915_private *dev_priv = to_i915(dev);
2954 3100
2955 I915_WRITE(HWSTAM, 0xffffffff); 3101 if (IS_GEN5(dev_priv))
3102 I915_WRITE(HWSTAM, 0xffffffff);
2956 3103
2957 GEN5_IRQ_RESET(DE); 3104 GEN3_IRQ_RESET(DE);
2958 if (IS_GEN7(dev_priv)) 3105 if (IS_GEN7(dev_priv))
2959 I915_WRITE(GEN7_ERR_INT, 0xffffffff); 3106 I915_WRITE(GEN7_ERR_INT, 0xffffffff);
2960 3107
@@ -2963,7 +3110,7 @@ static void ironlake_irq_reset(struct drm_device *dev)
2963 ibx_irq_reset(dev_priv); 3110 ibx_irq_reset(dev_priv);
2964} 3111}
2965 3112
2966static void valleyview_irq_preinstall(struct drm_device *dev) 3113static void valleyview_irq_reset(struct drm_device *dev)
2967{ 3114{
2968 struct drm_i915_private *dev_priv = to_i915(dev); 3115 struct drm_i915_private *dev_priv = to_i915(dev);
2969 3116
@@ -3001,9 +3148,9 @@ static void gen8_irq_reset(struct drm_device *dev)
3001 POWER_DOMAIN_PIPE(pipe))) 3148 POWER_DOMAIN_PIPE(pipe)))
3002 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe); 3149 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
3003 3150
3004 GEN5_IRQ_RESET(GEN8_DE_PORT_); 3151 GEN3_IRQ_RESET(GEN8_DE_PORT_);
3005 GEN5_IRQ_RESET(GEN8_DE_MISC_); 3152 GEN3_IRQ_RESET(GEN8_DE_MISC_);
3006 GEN5_IRQ_RESET(GEN8_PCU_); 3153 GEN3_IRQ_RESET(GEN8_PCU_);
3007 3154
3008 if (HAS_PCH_SPLIT(dev_priv)) 3155 if (HAS_PCH_SPLIT(dev_priv))
3009 ibx_irq_reset(dev_priv); 3156 ibx_irq_reset(dev_priv);
@@ -3037,7 +3184,7 @@ void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
3037 synchronize_irq(dev_priv->drm.irq); 3184 synchronize_irq(dev_priv->drm.irq);
3038} 3185}
3039 3186
3040static void cherryview_irq_preinstall(struct drm_device *dev) 3187static void cherryview_irq_reset(struct drm_device *dev)
3041{ 3188{
3042 struct drm_i915_private *dev_priv = to_i915(dev); 3189 struct drm_i915_private *dev_priv = to_i915(dev);
3043 3190
@@ -3046,7 +3193,7 @@ static void cherryview_irq_preinstall(struct drm_device *dev)
3046 3193
3047 gen8_gt_irq_reset(dev_priv); 3194 gen8_gt_irq_reset(dev_priv);
3048 3195
3049 GEN5_IRQ_RESET(GEN8_PCU_); 3196 GEN3_IRQ_RESET(GEN8_PCU_);
3050 3197
3051 spin_lock_irq(&dev_priv->irq_lock); 3198 spin_lock_irq(&dev_priv->irq_lock);
3052 if (dev_priv->display_irqs_enabled) 3199 if (dev_priv->display_irqs_enabled)
@@ -3111,7 +3258,15 @@ static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
3111 3258
3112static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv) 3259static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv)
3113{ 3260{
3114 u32 hotplug; 3261 u32 val, hotplug;
3262
3263 /* Display WA #1179 WaHardHangonHotPlug: cnp */
3264 if (HAS_PCH_CNP(dev_priv)) {
3265 val = I915_READ(SOUTH_CHICKEN1);
3266 val &= ~CHASSIS_CLK_REQ_DURATION_MASK;
3267 val |= CHASSIS_CLK_REQ_DURATION(0xf);
3268 I915_WRITE(SOUTH_CHICKEN1, val);
3269 }
3115 3270
3116 /* Enable digital hotplug on the PCH */ 3271 /* Enable digital hotplug on the PCH */
3117 hotplug = I915_READ(PCH_PORT_HOTPLUG); 3272 hotplug = I915_READ(PCH_PORT_HOTPLUG);
@@ -3238,10 +3393,12 @@ static void ibx_irq_postinstall(struct drm_device *dev)
3238 3393
3239 if (HAS_PCH_IBX(dev_priv)) 3394 if (HAS_PCH_IBX(dev_priv))
3240 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON; 3395 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
3241 else 3396 else if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
3242 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT; 3397 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
3398 else
3399 mask = SDE_GMBUS_CPT;
3243 3400
3244 gen5_assert_iir_is_zero(dev_priv, SDEIIR); 3401 gen3_assert_iir_is_zero(dev_priv, SDEIIR);
3245 I915_WRITE(SDEIMR, ~mask); 3402 I915_WRITE(SDEIMR, ~mask);
3246 3403
3247 if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) || 3404 if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
@@ -3272,7 +3429,7 @@ static void gen5_gt_irq_postinstall(struct drm_device *dev)
3272 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT; 3429 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
3273 } 3430 }
3274 3431
3275 GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs); 3432 GEN3_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
3276 3433
3277 if (INTEL_GEN(dev_priv) >= 6) { 3434 if (INTEL_GEN(dev_priv) >= 6) {
3278 /* 3435 /*
@@ -3285,7 +3442,7 @@ static void gen5_gt_irq_postinstall(struct drm_device *dev)
3285 } 3442 }
3286 3443
3287 dev_priv->pm_imr = 0xffffffff; 3444 dev_priv->pm_imr = 0xffffffff;
3288 GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_imr, pm_irqs); 3445 GEN3_IRQ_INIT(GEN6_PM, dev_priv->pm_imr, pm_irqs);
3289 } 3446 }
3290} 3447}
3291 3448
@@ -3296,18 +3453,14 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
3296 3453
3297 if (INTEL_GEN(dev_priv) >= 7) { 3454 if (INTEL_GEN(dev_priv) >= 7) {
3298 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | 3455 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
3299 DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB | 3456 DE_PCH_EVENT_IVB | DE_AUX_CHANNEL_A_IVB);
3300 DE_PLANEB_FLIP_DONE_IVB |
3301 DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB);
3302 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB | 3457 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
3303 DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB | 3458 DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB |
3304 DE_DP_A_HOTPLUG_IVB); 3459 DE_DP_A_HOTPLUG_IVB);
3305 } else { 3460 } else {
3306 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | 3461 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
3307 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE | 3462 DE_AUX_CHANNEL_A | DE_PIPEB_CRC_DONE |
3308 DE_AUX_CHANNEL_A | 3463 DE_PIPEA_CRC_DONE | DE_POISON);
3309 DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE |
3310 DE_POISON);
3311 extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT | 3464 extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
3312 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN | 3465 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
3313 DE_DP_A_HOTPLUG); 3466 DE_DP_A_HOTPLUG);
@@ -3315,11 +3468,9 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
3315 3468
3316 dev_priv->irq_mask = ~display_mask; 3469 dev_priv->irq_mask = ~display_mask;
3317 3470
3318 I915_WRITE(HWSTAM, 0xeffe);
3319
3320 ibx_irq_pre_postinstall(dev); 3471 ibx_irq_pre_postinstall(dev);
3321 3472
3322 GEN5_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask); 3473 GEN3_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask);
3323 3474
3324 gen5_gt_irq_postinstall(dev); 3475 gen5_gt_irq_postinstall(dev);
3325 3476
@@ -3429,15 +3580,13 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
3429 enum pipe pipe; 3580 enum pipe pipe;
3430 3581
3431 if (INTEL_GEN(dev_priv) >= 9) { 3582 if (INTEL_GEN(dev_priv) >= 9) {
3432 de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE | 3583 de_pipe_masked |= GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
3433 GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
3434 de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C | 3584 de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
3435 GEN9_AUX_CHANNEL_D; 3585 GEN9_AUX_CHANNEL_D;
3436 if (IS_GEN9_LP(dev_priv)) 3586 if (IS_GEN9_LP(dev_priv))
3437 de_port_masked |= BXT_DE_PORT_GMBUS; 3587 de_port_masked |= BXT_DE_PORT_GMBUS;
3438 } else { 3588 } else {
3439 de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE | 3589 de_pipe_masked |= GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
3440 GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
3441 } 3590 }
3442 3591
3443 de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK | 3592 de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
@@ -3460,8 +3609,8 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
3460 dev_priv->de_irq_mask[pipe], 3609 dev_priv->de_irq_mask[pipe],
3461 de_pipe_enables); 3610 de_pipe_enables);
3462 3611
3463 GEN5_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables); 3612 GEN3_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
3464 GEN5_IRQ_INIT(GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked); 3613 GEN3_IRQ_INIT(GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked);
3465 3614
3466 if (IS_GEN9_LP(dev_priv)) 3615 if (IS_GEN9_LP(dev_priv))
3467 bxt_hpd_detection_setup(dev_priv); 3616 bxt_hpd_detection_setup(dev_priv);
@@ -3505,98 +3654,36 @@ static int cherryview_irq_postinstall(struct drm_device *dev)
3505 return 0; 3654 return 0;
3506} 3655}
3507 3656
3508static void gen8_irq_uninstall(struct drm_device *dev) 3657static void i8xx_irq_reset(struct drm_device *dev)
3509{
3510 struct drm_i915_private *dev_priv = to_i915(dev);
3511
3512 if (!dev_priv)
3513 return;
3514
3515 gen8_irq_reset(dev);
3516}
3517
3518static void valleyview_irq_uninstall(struct drm_device *dev)
3519{
3520 struct drm_i915_private *dev_priv = to_i915(dev);
3521
3522 if (!dev_priv)
3523 return;
3524
3525 I915_WRITE(VLV_MASTER_IER, 0);
3526 POSTING_READ(VLV_MASTER_IER);
3527
3528 gen5_gt_irq_reset(dev_priv);
3529
3530 I915_WRITE(HWSTAM, 0xffffffff);
3531
3532 spin_lock_irq(&dev_priv->irq_lock);
3533 if (dev_priv->display_irqs_enabled)
3534 vlv_display_irq_reset(dev_priv);
3535 spin_unlock_irq(&dev_priv->irq_lock);
3536}
3537
3538static void cherryview_irq_uninstall(struct drm_device *dev)
3539{ 3658{
3540 struct drm_i915_private *dev_priv = to_i915(dev); 3659 struct drm_i915_private *dev_priv = to_i915(dev);
3541 3660
3542 if (!dev_priv) 3661 i9xx_pipestat_irq_reset(dev_priv);
3543 return;
3544
3545 I915_WRITE(GEN8_MASTER_IRQ, 0);
3546 POSTING_READ(GEN8_MASTER_IRQ);
3547
3548 gen8_gt_irq_reset(dev_priv);
3549
3550 GEN5_IRQ_RESET(GEN8_PCU_);
3551
3552 spin_lock_irq(&dev_priv->irq_lock);
3553 if (dev_priv->display_irqs_enabled)
3554 vlv_display_irq_reset(dev_priv);
3555 spin_unlock_irq(&dev_priv->irq_lock);
3556}
3557
3558static void ironlake_irq_uninstall(struct drm_device *dev)
3559{
3560 struct drm_i915_private *dev_priv = to_i915(dev);
3561 3662
3562 if (!dev_priv) 3663 I915_WRITE16(HWSTAM, 0xffff);
3563 return;
3564
3565 ironlake_irq_reset(dev);
3566}
3567
3568static void i8xx_irq_preinstall(struct drm_device * dev)
3569{
3570 struct drm_i915_private *dev_priv = to_i915(dev);
3571 int pipe;
3572 3664
3573 for_each_pipe(dev_priv, pipe) 3665 GEN2_IRQ_RESET();
3574 I915_WRITE(PIPESTAT(pipe), 0);
3575 I915_WRITE16(IMR, 0xffff);
3576 I915_WRITE16(IER, 0x0);
3577 POSTING_READ16(IER);
3578} 3666}
3579 3667
3580static int i8xx_irq_postinstall(struct drm_device *dev) 3668static int i8xx_irq_postinstall(struct drm_device *dev)
3581{ 3669{
3582 struct drm_i915_private *dev_priv = to_i915(dev); 3670 struct drm_i915_private *dev_priv = to_i915(dev);
3671 u16 enable_mask;
3583 3672
3584 I915_WRITE16(EMR, 3673 I915_WRITE16(EMR, ~(I915_ERROR_PAGE_TABLE |
3585 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); 3674 I915_ERROR_MEMORY_REFRESH));
3586 3675
3587 /* Unmask the interrupts that we always want on. */ 3676 /* Unmask the interrupts that we always want on. */
3588 dev_priv->irq_mask = 3677 dev_priv->irq_mask =
3589 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3678 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3590 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3679 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT);
3591 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3680
3592 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT); 3681 enable_mask =
3593 I915_WRITE16(IMR, dev_priv->irq_mask); 3682 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3683 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3684 I915_USER_INTERRUPT;
3594 3685
3595 I915_WRITE16(IER, 3686 GEN2_IRQ_INIT(, dev_priv->irq_mask, enable_mask);
3596 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3597 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3598 I915_USER_INTERRUPT);
3599 POSTING_READ16(IER);
3600 3687
3601 /* Interrupt setup is already guaranteed to be single-threaded, this is 3688 /* Interrupt setup is already guaranteed to be single-threaded, this is
3602 * just to make the assert_spin_locked check happy. */ 3689 * just to make the assert_spin_locked check happy. */
@@ -3608,17 +3695,11 @@ static int i8xx_irq_postinstall(struct drm_device *dev)
3608 return 0; 3695 return 0;
3609} 3696}
3610 3697
3611/*
3612 * Returns true when a page flip has completed.
3613 */
3614static irqreturn_t i8xx_irq_handler(int irq, void *arg) 3698static irqreturn_t i8xx_irq_handler(int irq, void *arg)
3615{ 3699{
3616 struct drm_device *dev = arg; 3700 struct drm_device *dev = arg;
3617 struct drm_i915_private *dev_priv = to_i915(dev); 3701 struct drm_i915_private *dev_priv = to_i915(dev);
3618 u16 iir, new_iir; 3702 irqreturn_t ret = IRQ_NONE;
3619 u32 pipe_stats[2];
3620 int pipe;
3621 irqreturn_t ret;
3622 3703
3623 if (!intel_irqs_enabled(dev_priv)) 3704 if (!intel_irqs_enabled(dev_priv))
3624 return IRQ_NONE; 3705 return IRQ_NONE;
@@ -3626,96 +3707,50 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
3626 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 3707 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
3627 disable_rpm_wakeref_asserts(dev_priv); 3708 disable_rpm_wakeref_asserts(dev_priv);
3628 3709
3629 ret = IRQ_NONE; 3710 do {
3630 iir = I915_READ16(IIR); 3711 u32 pipe_stats[I915_MAX_PIPES] = {};
3631 if (iir == 0) 3712 u16 iir;
3632 goto out;
3633 3713
3634 while (iir) { 3714 iir = I915_READ16(IIR);
3635 /* Can't rely on pipestat interrupt bit in iir as it might 3715 if (iir == 0)
3636 * have been cleared after the pipestat interrupt was received. 3716 break;
3637 * It doesn't set the bit in iir again, but it still produces
3638 * interrupts (for non-MSI).
3639 */
3640 spin_lock(&dev_priv->irq_lock);
3641 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
3642 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
3643 3717
3644 for_each_pipe(dev_priv, pipe) { 3718 ret = IRQ_HANDLED;
3645 i915_reg_t reg = PIPESTAT(pipe);
3646 pipe_stats[pipe] = I915_READ(reg);
3647 3719
3648 /* 3720 /* Call regardless, as some status bits might not be
3649 * Clear the PIPE*STAT regs before the IIR 3721 * signalled in iir */
3650 */ 3722 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
3651 if (pipe_stats[pipe] & 0x8000ffff)
3652 I915_WRITE(reg, pipe_stats[pipe]);
3653 }
3654 spin_unlock(&dev_priv->irq_lock);
3655 3723
3656 I915_WRITE16(IIR, iir); 3724 I915_WRITE16(IIR, iir);
3657 new_iir = I915_READ16(IIR); /* Flush posted writes */
3658 3725
3659 if (iir & I915_USER_INTERRUPT) 3726 if (iir & I915_USER_INTERRUPT)
3660 notify_ring(dev_priv->engine[RCS]); 3727 notify_ring(dev_priv->engine[RCS]);
3661 3728
3662 for_each_pipe(dev_priv, pipe) { 3729 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
3663 int plane = pipe; 3730 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
3664 if (HAS_FBC(dev_priv))
3665 plane = !plane;
3666
3667 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
3668 drm_handle_vblank(&dev_priv->drm, pipe);
3669
3670 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
3671 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
3672
3673 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
3674 intel_cpu_fifo_underrun_irq_handler(dev_priv,
3675 pipe);
3676 }
3677 3731
3678 iir = new_iir; 3732 i8xx_pipestat_irq_handler(dev_priv, iir, pipe_stats);
3679 } 3733 } while (0);
3680 ret = IRQ_HANDLED;
3681 3734
3682out:
3683 enable_rpm_wakeref_asserts(dev_priv); 3735 enable_rpm_wakeref_asserts(dev_priv);
3684 3736
3685 return ret; 3737 return ret;
3686} 3738}
3687 3739
3688static void i8xx_irq_uninstall(struct drm_device * dev) 3740static void i915_irq_reset(struct drm_device *dev)
3689{
3690 struct drm_i915_private *dev_priv = to_i915(dev);
3691 int pipe;
3692
3693 for_each_pipe(dev_priv, pipe) {
3694 /* Clear enable bits; then clear status bits */
3695 I915_WRITE(PIPESTAT(pipe), 0);
3696 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
3697 }
3698 I915_WRITE16(IMR, 0xffff);
3699 I915_WRITE16(IER, 0x0);
3700 I915_WRITE16(IIR, I915_READ16(IIR));
3701}
3702
3703static void i915_irq_preinstall(struct drm_device * dev)
3704{ 3741{
3705 struct drm_i915_private *dev_priv = to_i915(dev); 3742 struct drm_i915_private *dev_priv = to_i915(dev);
3706 int pipe;
3707 3743
3708 if (I915_HAS_HOTPLUG(dev_priv)) { 3744 if (I915_HAS_HOTPLUG(dev_priv)) {
3709 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 3745 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
3710 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3746 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3711 } 3747 }
3712 3748
3713 I915_WRITE16(HWSTAM, 0xeffe); 3749 i9xx_pipestat_irq_reset(dev_priv);
3714 for_each_pipe(dev_priv, pipe) 3750
3715 I915_WRITE(PIPESTAT(pipe), 0); 3751 I915_WRITE(HWSTAM, 0xffffffff);
3716 I915_WRITE(IMR, 0xffffffff); 3752
3717 I915_WRITE(IER, 0x0); 3753 GEN3_IRQ_RESET();
3718 POSTING_READ(IER);
3719} 3754}
3720 3755
3721static int i915_irq_postinstall(struct drm_device *dev) 3756static int i915_irq_postinstall(struct drm_device *dev)
@@ -3723,15 +3758,14 @@ static int i915_irq_postinstall(struct drm_device *dev)
3723 struct drm_i915_private *dev_priv = to_i915(dev); 3758 struct drm_i915_private *dev_priv = to_i915(dev);
3724 u32 enable_mask; 3759 u32 enable_mask;
3725 3760
3726 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); 3761 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE |
3762 I915_ERROR_MEMORY_REFRESH));
3727 3763
3728 /* Unmask the interrupts that we always want on. */ 3764 /* Unmask the interrupts that we always want on. */
3729 dev_priv->irq_mask = 3765 dev_priv->irq_mask =
3730 ~(I915_ASLE_INTERRUPT | 3766 ~(I915_ASLE_INTERRUPT |
3731 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3767 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3732 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3768 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT);
3733 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3734 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
3735 3769
3736 enable_mask = 3770 enable_mask =
3737 I915_ASLE_INTERRUPT | 3771 I915_ASLE_INTERRUPT |
@@ -3740,20 +3774,13 @@ static int i915_irq_postinstall(struct drm_device *dev)
3740 I915_USER_INTERRUPT; 3774 I915_USER_INTERRUPT;
3741 3775
3742 if (I915_HAS_HOTPLUG(dev_priv)) { 3776 if (I915_HAS_HOTPLUG(dev_priv)) {
3743 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
3744 POSTING_READ(PORT_HOTPLUG_EN);
3745
3746 /* Enable in IER... */ 3777 /* Enable in IER... */
3747 enable_mask |= I915_DISPLAY_PORT_INTERRUPT; 3778 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
3748 /* and unmask in IMR */ 3779 /* and unmask in IMR */
3749 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT; 3780 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
3750 } 3781 }
3751 3782
3752 I915_WRITE(IMR, dev_priv->irq_mask); 3783 GEN3_IRQ_INIT(, dev_priv->irq_mask, enable_mask);
3753 I915_WRITE(IER, enable_mask);
3754 POSTING_READ(IER);
3755
3756 i915_enable_asle_pipestat(dev_priv);
3757 3784
3758 /* Interrupt setup is already guaranteed to be single-threaded, this is 3785 /* Interrupt setup is already guaranteed to be single-threaded, this is
3759 * just to make the assert_spin_locked check happy. */ 3786 * just to make the assert_spin_locked check happy. */
@@ -3762,6 +3789,8 @@ static int i915_irq_postinstall(struct drm_device *dev)
3762 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 3789 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3763 spin_unlock_irq(&dev_priv->irq_lock); 3790 spin_unlock_irq(&dev_priv->irq_lock);
3764 3791
3792 i915_enable_asle_pipestat(dev_priv);
3793
3765 return 0; 3794 return 0;
3766} 3795}
3767 3796
@@ -3769,8 +3798,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
3769{ 3798{
3770 struct drm_device *dev = arg; 3799 struct drm_device *dev = arg;
3771 struct drm_i915_private *dev_priv = to_i915(dev); 3800 struct drm_i915_private *dev_priv = to_i915(dev);
3772 u32 iir, new_iir, pipe_stats[I915_MAX_PIPES]; 3801 irqreturn_t ret = IRQ_NONE;
3773 int pipe, ret = IRQ_NONE;
3774 3802
3775 if (!intel_irqs_enabled(dev_priv)) 3803 if (!intel_irqs_enabled(dev_priv))
3776 return IRQ_NONE; 3804 return IRQ_NONE;
@@ -3778,131 +3806,56 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
3778 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 3806 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
3779 disable_rpm_wakeref_asserts(dev_priv); 3807 disable_rpm_wakeref_asserts(dev_priv);
3780 3808
3781 iir = I915_READ(IIR);
3782 do { 3809 do {
3783 bool irq_received = (iir) != 0; 3810 u32 pipe_stats[I915_MAX_PIPES] = {};
3784 bool blc_event = false; 3811 u32 hotplug_status = 0;
3785 3812 u32 iir;
3786 /* Can't rely on pipestat interrupt bit in iir as it might
3787 * have been cleared after the pipestat interrupt was received.
3788 * It doesn't set the bit in iir again, but it still produces
3789 * interrupts (for non-MSI).
3790 */
3791 spin_lock(&dev_priv->irq_lock);
3792 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
3793 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
3794
3795 for_each_pipe(dev_priv, pipe) {
3796 i915_reg_t reg = PIPESTAT(pipe);
3797 pipe_stats[pipe] = I915_READ(reg);
3798
3799 /* Clear the PIPE*STAT regs before the IIR */
3800 if (pipe_stats[pipe] & 0x8000ffff) {
3801 I915_WRITE(reg, pipe_stats[pipe]);
3802 irq_received = true;
3803 }
3804 }
3805 spin_unlock(&dev_priv->irq_lock);
3806 3813
3807 if (!irq_received) 3814 iir = I915_READ(IIR);
3815 if (iir == 0)
3808 break; 3816 break;
3809 3817
3810 /* Consume port. Then clear IIR or we'll miss events */ 3818 ret = IRQ_HANDLED;
3819
3811 if (I915_HAS_HOTPLUG(dev_priv) && 3820 if (I915_HAS_HOTPLUG(dev_priv) &&
3812 iir & I915_DISPLAY_PORT_INTERRUPT) { 3821 iir & I915_DISPLAY_PORT_INTERRUPT)
3813 u32 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 3822 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
3814 if (hotplug_status) 3823
3815 i9xx_hpd_irq_handler(dev_priv, hotplug_status); 3824 /* Call regardless, as some status bits might not be
3816 } 3825 * signalled in iir */
3826 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
3817 3827
3818 I915_WRITE(IIR, iir); 3828 I915_WRITE(IIR, iir);
3819 new_iir = I915_READ(IIR); /* Flush posted writes */
3820 3829
3821 if (iir & I915_USER_INTERRUPT) 3830 if (iir & I915_USER_INTERRUPT)
3822 notify_ring(dev_priv->engine[RCS]); 3831 notify_ring(dev_priv->engine[RCS]);
3823 3832
3824 for_each_pipe(dev_priv, pipe) { 3833 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
3825 int plane = pipe; 3834 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
3826 if (HAS_FBC(dev_priv))
3827 plane = !plane;
3828
3829 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
3830 drm_handle_vblank(&dev_priv->drm, pipe);
3831
3832 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
3833 blc_event = true;
3834
3835 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
3836 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
3837
3838 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
3839 intel_cpu_fifo_underrun_irq_handler(dev_priv,
3840 pipe);
3841 }
3842 3835
3843 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 3836 if (hotplug_status)
3844 intel_opregion_asle_intr(dev_priv); 3837 i9xx_hpd_irq_handler(dev_priv, hotplug_status);
3845 3838
3846 /* With MSI, interrupts are only generated when iir 3839 i915_pipestat_irq_handler(dev_priv, iir, pipe_stats);
3847 * transitions from zero to nonzero. If another bit got 3840 } while (0);
3848 * set while we were handling the existing iir bits, then
3849 * we would never get another interrupt.
3850 *
3851 * This is fine on non-MSI as well, as if we hit this path
3852 * we avoid exiting the interrupt handler only to generate
3853 * another one.
3854 *
3855 * Note that for MSI this could cause a stray interrupt report
3856 * if an interrupt landed in the time between writing IIR and
3857 * the posting read. This should be rare enough to never
3858 * trigger the 99% of 100,000 interrupts test for disabling
3859 * stray interrupts.
3860 */
3861 ret = IRQ_HANDLED;
3862 iir = new_iir;
3863 } while (iir);
3864 3841
3865 enable_rpm_wakeref_asserts(dev_priv); 3842 enable_rpm_wakeref_asserts(dev_priv);
3866 3843
3867 return ret; 3844 return ret;
3868} 3845}
3869 3846
3870static void i915_irq_uninstall(struct drm_device * dev) 3847static void i965_irq_reset(struct drm_device *dev)
3871{
3872 struct drm_i915_private *dev_priv = to_i915(dev);
3873 int pipe;
3874
3875 if (I915_HAS_HOTPLUG(dev_priv)) {
3876 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
3877 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3878 }
3879
3880 I915_WRITE16(HWSTAM, 0xffff);
3881 for_each_pipe(dev_priv, pipe) {
3882 /* Clear enable bits; then clear status bits */
3883 I915_WRITE(PIPESTAT(pipe), 0);
3884 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
3885 }
3886 I915_WRITE(IMR, 0xffffffff);
3887 I915_WRITE(IER, 0x0);
3888
3889 I915_WRITE(IIR, I915_READ(IIR));
3890}
3891
3892static void i965_irq_preinstall(struct drm_device * dev)
3893{ 3848{
3894 struct drm_i915_private *dev_priv = to_i915(dev); 3849 struct drm_i915_private *dev_priv = to_i915(dev);
3895 int pipe;
3896 3850
3897 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 3851 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
3898 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3852 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3899 3853
3900 I915_WRITE(HWSTAM, 0xeffe); 3854 i9xx_pipestat_irq_reset(dev_priv);
3901 for_each_pipe(dev_priv, pipe) 3855
3902 I915_WRITE(PIPESTAT(pipe), 0); 3856 I915_WRITE(HWSTAM, 0xffffffff);
3903 I915_WRITE(IMR, 0xffffffff); 3857
3904 I915_WRITE(IER, 0x0); 3858 GEN3_IRQ_RESET();
3905 POSTING_READ(IER);
3906} 3859}
3907 3860
3908static int i965_irq_postinstall(struct drm_device *dev) 3861static int i965_irq_postinstall(struct drm_device *dev)
@@ -3911,31 +3864,6 @@ static int i965_irq_postinstall(struct drm_device *dev)
3911 u32 enable_mask; 3864 u32 enable_mask;
3912 u32 error_mask; 3865 u32 error_mask;
3913 3866
3914 /* Unmask the interrupts that we always want on. */
3915 dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
3916 I915_DISPLAY_PORT_INTERRUPT |
3917 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3918 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3919 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3920 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
3921 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
3922
3923 enable_mask = ~dev_priv->irq_mask;
3924 enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3925 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
3926 enable_mask |= I915_USER_INTERRUPT;
3927
3928 if (IS_G4X(dev_priv))
3929 enable_mask |= I915_BSD_USER_INTERRUPT;
3930
3931 /* Interrupt setup is already guaranteed to be single-threaded, this is
3932 * just to make the assert_spin_locked check happy. */
3933 spin_lock_irq(&dev_priv->irq_lock);
3934 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3935 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3936 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3937 spin_unlock_irq(&dev_priv->irq_lock);
3938
3939 /* 3867 /*
3940 * Enable some error detection, note the instruction error mask 3868 * Enable some error detection, note the instruction error mask
3941 * bit is reserved, so we leave it masked. 3869 * bit is reserved, so we leave it masked.
@@ -3951,12 +3879,34 @@ static int i965_irq_postinstall(struct drm_device *dev)
3951 } 3879 }
3952 I915_WRITE(EMR, error_mask); 3880 I915_WRITE(EMR, error_mask);
3953 3881
3954 I915_WRITE(IMR, dev_priv->irq_mask); 3882 /* Unmask the interrupts that we always want on. */
3955 I915_WRITE(IER, enable_mask); 3883 dev_priv->irq_mask =
3956 POSTING_READ(IER); 3884 ~(I915_ASLE_INTERRUPT |
3885 I915_DISPLAY_PORT_INTERRUPT |
3886 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3887 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3888 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
3957 3889
3958 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 3890 enable_mask =
3959 POSTING_READ(PORT_HOTPLUG_EN); 3891 I915_ASLE_INTERRUPT |
3892 I915_DISPLAY_PORT_INTERRUPT |
3893 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3894 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3895 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
3896 I915_USER_INTERRUPT;
3897
3898 if (IS_G4X(dev_priv))
3899 enable_mask |= I915_BSD_USER_INTERRUPT;
3900
3901 GEN3_IRQ_INIT(, dev_priv->irq_mask, enable_mask);
3902
3903 /* Interrupt setup is already guaranteed to be single-threaded, this is
3904 * just to make the assert_spin_locked check happy. */
3905 spin_lock_irq(&dev_priv->irq_lock);
3906 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3907 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3908 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3909 spin_unlock_irq(&dev_priv->irq_lock);
3960 3910
3961 i915_enable_asle_pipestat(dev_priv); 3911 i915_enable_asle_pipestat(dev_priv);
3962 3912
@@ -3992,9 +3942,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
3992{ 3942{
3993 struct drm_device *dev = arg; 3943 struct drm_device *dev = arg;
3994 struct drm_i915_private *dev_priv = to_i915(dev); 3944 struct drm_i915_private *dev_priv = to_i915(dev);
3995 u32 iir, new_iir; 3945 irqreturn_t ret = IRQ_NONE;
3996 u32 pipe_stats[I915_MAX_PIPES];
3997 int ret = IRQ_NONE, pipe;
3998 3946
3999 if (!intel_irqs_enabled(dev_priv)) 3947 if (!intel_irqs_enabled(dev_priv))
4000 return IRQ_NONE; 3948 return IRQ_NONE;
@@ -4002,121 +3950,46 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
4002 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 3950 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
4003 disable_rpm_wakeref_asserts(dev_priv); 3951 disable_rpm_wakeref_asserts(dev_priv);
4004 3952
4005 iir = I915_READ(IIR); 3953 do {
4006 3954 u32 pipe_stats[I915_MAX_PIPES] = {};
4007 for (;;) { 3955 u32 hotplug_status = 0;
4008 bool irq_received = (iir) != 0; 3956 u32 iir;
4009 bool blc_event = false;
4010
4011 /* Can't rely on pipestat interrupt bit in iir as it might
4012 * have been cleared after the pipestat interrupt was received.
4013 * It doesn't set the bit in iir again, but it still produces
4014 * interrupts (for non-MSI).
4015 */
4016 spin_lock(&dev_priv->irq_lock);
4017 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
4018 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
4019
4020 for_each_pipe(dev_priv, pipe) {
4021 i915_reg_t reg = PIPESTAT(pipe);
4022 pipe_stats[pipe] = I915_READ(reg);
4023
4024 /*
4025 * Clear the PIPE*STAT regs before the IIR
4026 */
4027 if (pipe_stats[pipe] & 0x8000ffff) {
4028 I915_WRITE(reg, pipe_stats[pipe]);
4029 irq_received = true;
4030 }
4031 }
4032 spin_unlock(&dev_priv->irq_lock);
4033 3957
4034 if (!irq_received) 3958 iir = I915_READ(IIR);
3959 if (iir == 0)
4035 break; 3960 break;
4036 3961
4037 ret = IRQ_HANDLED; 3962 ret = IRQ_HANDLED;
4038 3963
4039 /* Consume port. Then clear IIR or we'll miss events */ 3964 if (iir & I915_DISPLAY_PORT_INTERRUPT)
4040 if (iir & I915_DISPLAY_PORT_INTERRUPT) { 3965 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
4041 u32 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 3966
4042 if (hotplug_status) 3967 /* Call regardless, as some status bits might not be
4043 i9xx_hpd_irq_handler(dev_priv, hotplug_status); 3968 * signalled in iir */
4044 } 3969 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
4045 3970
4046 I915_WRITE(IIR, iir); 3971 I915_WRITE(IIR, iir);
4047 new_iir = I915_READ(IIR); /* Flush posted writes */
4048 3972
4049 if (iir & I915_USER_INTERRUPT) 3973 if (iir & I915_USER_INTERRUPT)
4050 notify_ring(dev_priv->engine[RCS]); 3974 notify_ring(dev_priv->engine[RCS]);
3975
4051 if (iir & I915_BSD_USER_INTERRUPT) 3976 if (iir & I915_BSD_USER_INTERRUPT)
4052 notify_ring(dev_priv->engine[VCS]); 3977 notify_ring(dev_priv->engine[VCS]);
4053 3978
4054 for_each_pipe(dev_priv, pipe) { 3979 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
4055 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS) 3980 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
4056 drm_handle_vblank(&dev_priv->drm, pipe);
4057
4058 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
4059 blc_event = true;
4060
4061 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4062 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
4063
4064 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
4065 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
4066 }
4067
4068 if (blc_event || (iir & I915_ASLE_INTERRUPT))
4069 intel_opregion_asle_intr(dev_priv);
4070 3981
4071 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 3982 if (hotplug_status)
4072 gmbus_irq_handler(dev_priv); 3983 i9xx_hpd_irq_handler(dev_priv, hotplug_status);
4073 3984
4074 /* With MSI, interrupts are only generated when iir 3985 i965_pipestat_irq_handler(dev_priv, iir, pipe_stats);
4075 * transitions from zero to nonzero. If another bit got 3986 } while (0);
4076 * set while we were handling the existing iir bits, then
4077 * we would never get another interrupt.
4078 *
4079 * This is fine on non-MSI as well, as if we hit this path
4080 * we avoid exiting the interrupt handler only to generate
4081 * another one.
4082 *
4083 * Note that for MSI this could cause a stray interrupt report
4084 * if an interrupt landed in the time between writing IIR and
4085 * the posting read. This should be rare enough to never
4086 * trigger the 99% of 100,000 interrupts test for disabling
4087 * stray interrupts.
4088 */
4089 iir = new_iir;
4090 }
4091 3987
4092 enable_rpm_wakeref_asserts(dev_priv); 3988 enable_rpm_wakeref_asserts(dev_priv);
4093 3989
4094 return ret; 3990 return ret;
4095} 3991}
4096 3992
4097static void i965_irq_uninstall(struct drm_device * dev)
4098{
4099 struct drm_i915_private *dev_priv = to_i915(dev);
4100 int pipe;
4101
4102 if (!dev_priv)
4103 return;
4104
4105 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4106 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4107
4108 I915_WRITE(HWSTAM, 0xffffffff);
4109 for_each_pipe(dev_priv, pipe)
4110 I915_WRITE(PIPESTAT(pipe), 0);
4111 I915_WRITE(IMR, 0xffffffff);
4112 I915_WRITE(IER, 0x0);
4113
4114 for_each_pipe(dev_priv, pipe)
4115 I915_WRITE(PIPESTAT(pipe),
4116 I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
4117 I915_WRITE(IIR, I915_READ(IIR));
4118}
4119
4120/** 3993/**
4121 * intel_irq_init - initializes irq support 3994 * intel_irq_init - initializes irq support
4122 * @dev_priv: i915 device instance 3995 * @dev_priv: i915 device instance
@@ -4197,17 +4070,17 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
4197 4070
4198 if (IS_CHERRYVIEW(dev_priv)) { 4071 if (IS_CHERRYVIEW(dev_priv)) {
4199 dev->driver->irq_handler = cherryview_irq_handler; 4072 dev->driver->irq_handler = cherryview_irq_handler;
4200 dev->driver->irq_preinstall = cherryview_irq_preinstall; 4073 dev->driver->irq_preinstall = cherryview_irq_reset;
4201 dev->driver->irq_postinstall = cherryview_irq_postinstall; 4074 dev->driver->irq_postinstall = cherryview_irq_postinstall;
4202 dev->driver->irq_uninstall = cherryview_irq_uninstall; 4075 dev->driver->irq_uninstall = cherryview_irq_reset;
4203 dev->driver->enable_vblank = i965_enable_vblank; 4076 dev->driver->enable_vblank = i965_enable_vblank;
4204 dev->driver->disable_vblank = i965_disable_vblank; 4077 dev->driver->disable_vblank = i965_disable_vblank;
4205 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4078 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4206 } else if (IS_VALLEYVIEW(dev_priv)) { 4079 } else if (IS_VALLEYVIEW(dev_priv)) {
4207 dev->driver->irq_handler = valleyview_irq_handler; 4080 dev->driver->irq_handler = valleyview_irq_handler;
4208 dev->driver->irq_preinstall = valleyview_irq_preinstall; 4081 dev->driver->irq_preinstall = valleyview_irq_reset;
4209 dev->driver->irq_postinstall = valleyview_irq_postinstall; 4082 dev->driver->irq_postinstall = valleyview_irq_postinstall;
4210 dev->driver->irq_uninstall = valleyview_irq_uninstall; 4083 dev->driver->irq_uninstall = valleyview_irq_reset;
4211 dev->driver->enable_vblank = i965_enable_vblank; 4084 dev->driver->enable_vblank = i965_enable_vblank;
4212 dev->driver->disable_vblank = i965_disable_vblank; 4085 dev->driver->disable_vblank = i965_disable_vblank;
4213 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4086 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
@@ -4215,7 +4088,7 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
4215 dev->driver->irq_handler = gen8_irq_handler; 4088 dev->driver->irq_handler = gen8_irq_handler;
4216 dev->driver->irq_preinstall = gen8_irq_reset; 4089 dev->driver->irq_preinstall = gen8_irq_reset;
4217 dev->driver->irq_postinstall = gen8_irq_postinstall; 4090 dev->driver->irq_postinstall = gen8_irq_postinstall;
4218 dev->driver->irq_uninstall = gen8_irq_uninstall; 4091 dev->driver->irq_uninstall = gen8_irq_reset;
4219 dev->driver->enable_vblank = gen8_enable_vblank; 4092 dev->driver->enable_vblank = gen8_enable_vblank;
4220 dev->driver->disable_vblank = gen8_disable_vblank; 4093 dev->driver->disable_vblank = gen8_disable_vblank;
4221 if (IS_GEN9_LP(dev_priv)) 4094 if (IS_GEN9_LP(dev_priv))
@@ -4229,29 +4102,29 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
4229 dev->driver->irq_handler = ironlake_irq_handler; 4102 dev->driver->irq_handler = ironlake_irq_handler;
4230 dev->driver->irq_preinstall = ironlake_irq_reset; 4103 dev->driver->irq_preinstall = ironlake_irq_reset;
4231 dev->driver->irq_postinstall = ironlake_irq_postinstall; 4104 dev->driver->irq_postinstall = ironlake_irq_postinstall;
4232 dev->driver->irq_uninstall = ironlake_irq_uninstall; 4105 dev->driver->irq_uninstall = ironlake_irq_reset;
4233 dev->driver->enable_vblank = ironlake_enable_vblank; 4106 dev->driver->enable_vblank = ironlake_enable_vblank;
4234 dev->driver->disable_vblank = ironlake_disable_vblank; 4107 dev->driver->disable_vblank = ironlake_disable_vblank;
4235 dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup; 4108 dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
4236 } else { 4109 } else {
4237 if (IS_GEN2(dev_priv)) { 4110 if (IS_GEN2(dev_priv)) {
4238 dev->driver->irq_preinstall = i8xx_irq_preinstall; 4111 dev->driver->irq_preinstall = i8xx_irq_reset;
4239 dev->driver->irq_postinstall = i8xx_irq_postinstall; 4112 dev->driver->irq_postinstall = i8xx_irq_postinstall;
4240 dev->driver->irq_handler = i8xx_irq_handler; 4113 dev->driver->irq_handler = i8xx_irq_handler;
4241 dev->driver->irq_uninstall = i8xx_irq_uninstall; 4114 dev->driver->irq_uninstall = i8xx_irq_reset;
4242 dev->driver->enable_vblank = i8xx_enable_vblank; 4115 dev->driver->enable_vblank = i8xx_enable_vblank;
4243 dev->driver->disable_vblank = i8xx_disable_vblank; 4116 dev->driver->disable_vblank = i8xx_disable_vblank;
4244 } else if (IS_GEN3(dev_priv)) { 4117 } else if (IS_GEN3(dev_priv)) {
4245 dev->driver->irq_preinstall = i915_irq_preinstall; 4118 dev->driver->irq_preinstall = i915_irq_reset;
4246 dev->driver->irq_postinstall = i915_irq_postinstall; 4119 dev->driver->irq_postinstall = i915_irq_postinstall;
4247 dev->driver->irq_uninstall = i915_irq_uninstall; 4120 dev->driver->irq_uninstall = i915_irq_reset;
4248 dev->driver->irq_handler = i915_irq_handler; 4121 dev->driver->irq_handler = i915_irq_handler;
4249 dev->driver->enable_vblank = i8xx_enable_vblank; 4122 dev->driver->enable_vblank = i8xx_enable_vblank;
4250 dev->driver->disable_vblank = i8xx_disable_vblank; 4123 dev->driver->disable_vblank = i8xx_disable_vblank;
4251 } else { 4124 } else {
4252 dev->driver->irq_preinstall = i965_irq_preinstall; 4125 dev->driver->irq_preinstall = i965_irq_reset;
4253 dev->driver->irq_postinstall = i965_irq_postinstall; 4126 dev->driver->irq_postinstall = i965_irq_postinstall;
4254 dev->driver->irq_uninstall = i965_irq_uninstall; 4127 dev->driver->irq_uninstall = i965_irq_reset;
4255 dev->driver->irq_handler = i965_irq_handler; 4128 dev->driver->irq_handler = i965_irq_handler;
4256 dev->driver->enable_vblank = i965_enable_vblank; 4129 dev->driver->enable_vblank = i965_enable_vblank;
4257 dev->driver->disable_vblank = i965_disable_vblank; 4130 dev->driver->disable_vblank = i965_disable_vblank;
diff --git a/drivers/gpu/drm/i915/i915_oa_cflgt2.c b/drivers/gpu/drm/i915/i915_oa_cflgt2.c
new file mode 100644
index 000000000000..368c87d7ee9a
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_oa_cflgt2.c
@@ -0,0 +1,109 @@
1/*
2 * Autogenerated file by GPU Top : https://github.com/rib/gputop
3 * DO NOT EDIT manually!
4 *
5 *
6 * Copyright (c) 2015 Intel Corporation
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 *
27 */
28
29#include <linux/sysfs.h>
30
31#include "i915_drv.h"
32#include "i915_oa_cflgt2.h"
33
34static const struct i915_oa_reg b_counter_config_test_oa[] = {
35 { _MMIO(0x2740), 0x00000000 },
36 { _MMIO(0x2744), 0x00800000 },
37 { _MMIO(0x2714), 0xf0800000 },
38 { _MMIO(0x2710), 0x00000000 },
39 { _MMIO(0x2724), 0xf0800000 },
40 { _MMIO(0x2720), 0x00000000 },
41 { _MMIO(0x2770), 0x00000004 },
42 { _MMIO(0x2774), 0x00000000 },
43 { _MMIO(0x2778), 0x00000003 },
44 { _MMIO(0x277c), 0x00000000 },
45 { _MMIO(0x2780), 0x00000007 },
46 { _MMIO(0x2784), 0x00000000 },
47 { _MMIO(0x2788), 0x00100002 },
48 { _MMIO(0x278c), 0x0000fff7 },
49 { _MMIO(0x2790), 0x00100002 },
50 { _MMIO(0x2794), 0x0000ffcf },
51 { _MMIO(0x2798), 0x00100082 },
52 { _MMIO(0x279c), 0x0000ffef },
53 { _MMIO(0x27a0), 0x001000c2 },
54 { _MMIO(0x27a4), 0x0000ffe7 },
55 { _MMIO(0x27a8), 0x00100001 },
56 { _MMIO(0x27ac), 0x0000ffe7 },
57};
58
59static const struct i915_oa_reg flex_eu_config_test_oa[] = {
60};
61
62static const struct i915_oa_reg mux_config_test_oa[] = {
63 { _MMIO(0x9840), 0x00000080 },
64 { _MMIO(0x9888), 0x11810000 },
65 { _MMIO(0x9888), 0x07810013 },
66 { _MMIO(0x9888), 0x1f810000 },
67 { _MMIO(0x9888), 0x1d810000 },
68 { _MMIO(0x9888), 0x1b930040 },
69 { _MMIO(0x9888), 0x07e54000 },
70 { _MMIO(0x9888), 0x1f908000 },
71 { _MMIO(0x9888), 0x11900000 },
72 { _MMIO(0x9888), 0x37900000 },
73 { _MMIO(0x9888), 0x53900000 },
74 { _MMIO(0x9888), 0x45900000 },
75 { _MMIO(0x9888), 0x33900000 },
76};
77
78static ssize_t
79show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
80{
81 return sprintf(buf, "1\n");
82}
83
84void
85i915_perf_load_test_config_cflgt2(struct drm_i915_private *dev_priv)
86{
87 strncpy(dev_priv->perf.oa.test_config.uuid,
88 "74fb4902-d3d3-4237-9e90-cbdc68d0a446",
89 UUID_STRING_LEN);
90 dev_priv->perf.oa.test_config.id = 1;
91
92 dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa;
93 dev_priv->perf.oa.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
94
95 dev_priv->perf.oa.test_config.b_counter_regs = b_counter_config_test_oa;
96 dev_priv->perf.oa.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
97
98 dev_priv->perf.oa.test_config.flex_regs = flex_eu_config_test_oa;
99 dev_priv->perf.oa.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
100
101 dev_priv->perf.oa.test_config.sysfs_metric.name = "74fb4902-d3d3-4237-9e90-cbdc68d0a446";
102 dev_priv->perf.oa.test_config.sysfs_metric.attrs = dev_priv->perf.oa.test_config.attrs;
103
104 dev_priv->perf.oa.test_config.attrs[0] = &dev_priv->perf.oa.test_config.sysfs_metric_id.attr;
105
106 dev_priv->perf.oa.test_config.sysfs_metric_id.attr.name = "id";
107 dev_priv->perf.oa.test_config.sysfs_metric_id.attr.mode = 0444;
108 dev_priv->perf.oa.test_config.sysfs_metric_id.show = show_test_oa_id;
109}
diff --git a/drivers/gpu/drm/i915/i915_oa_cflgt2.h b/drivers/gpu/drm/i915/i915_oa_cflgt2.h
new file mode 100644
index 000000000000..1f3268ef2ea2
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_oa_cflgt2.h
@@ -0,0 +1,34 @@
1/*
2 * Autogenerated file by GPU Top : https://github.com/rib/gputop
3 * DO NOT EDIT manually!
4 *
5 *
6 * Copyright (c) 2015 Intel Corporation
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 *
27 */
28
29#ifndef __I915_OA_CFLGT2_H__
30#define __I915_OA_CFLGT2_H__
31
32extern void i915_perf_load_test_config_cflgt2(struct drm_i915_private *dev_priv);
33
34#endif
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index 8ab003dca113..9dff323a83d3 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -25,235 +25,171 @@
25#include "i915_params.h" 25#include "i915_params.h"
26#include "i915_drv.h" 26#include "i915_drv.h"
27 27
28struct i915_params i915 __read_mostly = { 28#define i915_param_named(name, T, perm, desc) \
29 .modeset = -1, 29 module_param_named(name, i915_modparams.name, T, perm); \
30 .panel_ignore_lid = 1, 30 MODULE_PARM_DESC(name, desc)
31 .semaphores = -1, 31#define i915_param_named_unsafe(name, T, perm, desc) \
32 .lvds_channel_mode = 0, 32 module_param_named_unsafe(name, i915_modparams.name, T, perm); \
33 .panel_use_ssc = -1, 33 MODULE_PARM_DESC(name, desc)
34 .vbt_sdvo_panel_type = -1, 34
35 .enable_rc6 = -1, 35struct i915_params i915_modparams __read_mostly = {
36 .enable_dc = -1, 36#define MEMBER(T, member, value) .member = (value),
37 .enable_fbc = -1, 37 I915_PARAMS_FOR_EACH(MEMBER)
38 .enable_execlists = -1, 38#undef MEMBER
39 .enable_hangcheck = true,
40 .enable_ppgtt = -1,
41 .enable_psr = -1,
42 .alpha_support = IS_ENABLED(CONFIG_DRM_I915_ALPHA_SUPPORT),
43 .disable_power_well = -1,
44 .enable_ips = 1,
45 .fastboot = 0,
46 .prefault_disable = 0,
47 .load_detect_test = 0,
48 .force_reset_modeset_test = 0,
49 .reset = 2,
50 .error_capture = true,
51 .invert_brightness = 0,
52 .disable_display = 0,
53 .enable_cmd_parser = true,
54 .use_mmio_flip = 0,
55 .mmio_debug = 0,
56 .verbose_state_checks = 1,
57 .nuclear_pageflip = 0,
58 .edp_vswing = 0,
59 .enable_guc_loading = 0,
60 .enable_guc_submission = 0,
61 .guc_log_level = -1,
62 .guc_firmware_path = NULL,
63 .huc_firmware_path = NULL,
64 .enable_dp_mst = true,
65 .inject_load_failure = 0,
66 .enable_dpcd_backlight = false,
67 .enable_gvt = false,
68}; 39};
69 40
70module_param_named(modeset, i915.modeset, int, 0400); 41i915_param_named(modeset, int, 0400,
71MODULE_PARM_DESC(modeset,
72 "Use kernel modesetting [KMS] (0=disable, " 42 "Use kernel modesetting [KMS] (0=disable, "
73 "1=on, -1=force vga console preference [default])"); 43 "1=on, -1=force vga console preference [default])");
74 44
75module_param_named_unsafe(panel_ignore_lid, i915.panel_ignore_lid, int, 0600); 45i915_param_named_unsafe(panel_ignore_lid, int, 0600,
76MODULE_PARM_DESC(panel_ignore_lid,
77 "Override lid status (0=autodetect, 1=autodetect disabled [default], " 46 "Override lid status (0=autodetect, 1=autodetect disabled [default], "
78 "-1=force lid closed, -2=force lid open)"); 47 "-1=force lid closed, -2=force lid open)");
79 48
80module_param_named_unsafe(semaphores, i915.semaphores, int, 0400); 49i915_param_named_unsafe(semaphores, int, 0400,
81MODULE_PARM_DESC(semaphores,
82 "Use semaphores for inter-ring sync " 50 "Use semaphores for inter-ring sync "
83 "(default: -1 (use per-chip defaults))"); 51 "(default: -1 (use per-chip defaults))");
84 52
85module_param_named_unsafe(enable_rc6, i915.enable_rc6, int, 0400); 53i915_param_named_unsafe(enable_rc6, int, 0400,
86MODULE_PARM_DESC(enable_rc6,
87 "Enable power-saving render C-state 6. " 54 "Enable power-saving render C-state 6. "
88 "Different stages can be selected via bitmask values " 55 "Different stages can be selected via bitmask values "
89 "(0 = disable; 1 = enable rc6; 2 = enable deep rc6; 4 = enable deepest rc6). " 56 "(0 = disable; 1 = enable rc6; 2 = enable deep rc6; 4 = enable deepest rc6). "
90 "For example, 3 would enable rc6 and deep rc6, and 7 would enable everything. " 57 "For example, 3 would enable rc6 and deep rc6, and 7 would enable everything. "
91 "default: -1 (use per-chip default)"); 58 "default: -1 (use per-chip default)");
92 59
93module_param_named_unsafe(enable_dc, i915.enable_dc, int, 0400); 60i915_param_named_unsafe(enable_dc, int, 0400,
94MODULE_PARM_DESC(enable_dc,
95 "Enable power-saving display C-states. " 61 "Enable power-saving display C-states. "
96 "(-1=auto [default]; 0=disable; 1=up to DC5; 2=up to DC6)"); 62 "(-1=auto [default]; 0=disable; 1=up to DC5; 2=up to DC6)");
97 63
98module_param_named_unsafe(enable_fbc, i915.enable_fbc, int, 0600); 64i915_param_named_unsafe(enable_fbc, int, 0600,
99MODULE_PARM_DESC(enable_fbc,
100 "Enable frame buffer compression for power savings " 65 "Enable frame buffer compression for power savings "
101 "(default: -1 (use per-chip default))"); 66 "(default: -1 (use per-chip default))");
102 67
103module_param_named_unsafe(lvds_channel_mode, i915.lvds_channel_mode, int, 0400); 68i915_param_named_unsafe(lvds_channel_mode, int, 0400,
104MODULE_PARM_DESC(lvds_channel_mode,
105 "Specify LVDS channel mode " 69 "Specify LVDS channel mode "
106 "(0=probe BIOS [default], 1=single-channel, 2=dual-channel)"); 70 "(0=probe BIOS [default], 1=single-channel, 2=dual-channel)");
107 71
108module_param_named_unsafe(lvds_use_ssc, i915.panel_use_ssc, int, 0600); 72i915_param_named_unsafe(panel_use_ssc, int, 0600,
109MODULE_PARM_DESC(lvds_use_ssc,
110 "Use Spread Spectrum Clock with panels [LVDS/eDP] " 73 "Use Spread Spectrum Clock with panels [LVDS/eDP] "
111 "(default: auto from VBT)"); 74 "(default: auto from VBT)");
112 75
113module_param_named_unsafe(vbt_sdvo_panel_type, i915.vbt_sdvo_panel_type, int, 0400); 76i915_param_named_unsafe(vbt_sdvo_panel_type, int, 0400,
114MODULE_PARM_DESC(vbt_sdvo_panel_type,
115 "Override/Ignore selection of SDVO panel mode in the VBT " 77 "Override/Ignore selection of SDVO panel mode in the VBT "
116 "(-2=ignore, -1=auto [default], index in VBT BIOS table)"); 78 "(-2=ignore, -1=auto [default], index in VBT BIOS table)");
117 79
118module_param_named_unsafe(reset, i915.reset, int, 0600); 80i915_param_named_unsafe(reset, int, 0600,
119MODULE_PARM_DESC(reset, "Attempt GPU resets (0=disabled, 1=full gpu reset, 2=engine reset [default])"); 81 "Attempt GPU resets (0=disabled, 1=full gpu reset, 2=engine reset [default])");
120 82
121module_param_named_unsafe(vbt_firmware, i915.vbt_firmware, charp, 0400); 83i915_param_named_unsafe(vbt_firmware, charp, 0400,
122MODULE_PARM_DESC(vbt_firmware, 84 "Load VBT from specified file under /lib/firmware");
123 "Load VBT from specified file under /lib/firmware");
124 85
125#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) 86#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
126module_param_named(error_capture, i915.error_capture, bool, 0600); 87i915_param_named(error_capture, bool, 0600,
127MODULE_PARM_DESC(error_capture,
128 "Record the GPU state following a hang. " 88 "Record the GPU state following a hang. "
129 "This information in /sys/class/drm/card<N>/error is vital for " 89 "This information in /sys/class/drm/card<N>/error is vital for "
130 "triaging and debugging hangs."); 90 "triaging and debugging hangs.");
131#endif 91#endif
132 92
133module_param_named_unsafe(enable_hangcheck, i915.enable_hangcheck, bool, 0644); 93i915_param_named_unsafe(enable_hangcheck, bool, 0644,
134MODULE_PARM_DESC(enable_hangcheck,
135 "Periodically check GPU activity for detecting hangs. " 94 "Periodically check GPU activity for detecting hangs. "
136 "WARNING: Disabling this can cause system wide hangs. " 95 "WARNING: Disabling this can cause system wide hangs. "
137 "(default: true)"); 96 "(default: true)");
138 97
139module_param_named_unsafe(enable_ppgtt, i915.enable_ppgtt, int, 0400); 98i915_param_named_unsafe(enable_ppgtt, int, 0400,
140MODULE_PARM_DESC(enable_ppgtt,
141 "Override PPGTT usage. " 99 "Override PPGTT usage. "
142 "(-1=auto [default], 0=disabled, 1=aliasing, 2=full, 3=full with extended address space)"); 100 "(-1=auto [default], 0=disabled, 1=aliasing, 2=full, 3=full with extended address space)");
143 101
144module_param_named_unsafe(enable_execlists, i915.enable_execlists, int, 0400); 102i915_param_named_unsafe(enable_execlists, int, 0400,
145MODULE_PARM_DESC(enable_execlists,
146 "Override execlists usage. " 103 "Override execlists usage. "
147 "(-1=auto [default], 0=disabled, 1=enabled)"); 104 "(-1=auto [default], 0=disabled, 1=enabled)");
148 105
149module_param_named_unsafe(enable_psr, i915.enable_psr, int, 0600); 106i915_param_named_unsafe(enable_psr, int, 0600,
150MODULE_PARM_DESC(enable_psr, "Enable PSR " 107 "Enable PSR "
151 "(0=disabled, 1=enabled - link mode chosen per-platform, 2=force link-standby mode, 3=force link-off mode) " 108 "(0=disabled, 1=enabled - link mode chosen per-platform, 2=force link-standby mode, 3=force link-off mode) "
152 "Default: -1 (use per-chip default)"); 109 "Default: -1 (use per-chip default)");
153 110
154module_param_named_unsafe(alpha_support, i915.alpha_support, bool, 0400); 111i915_param_named_unsafe(alpha_support, bool, 0400,
155MODULE_PARM_DESC(alpha_support,
156 "Enable alpha quality driver support for latest hardware. " 112 "Enable alpha quality driver support for latest hardware. "
157 "See also CONFIG_DRM_I915_ALPHA_SUPPORT."); 113 "See also CONFIG_DRM_I915_ALPHA_SUPPORT.");
158 114
159module_param_named_unsafe(disable_power_well, i915.disable_power_well, int, 0400); 115i915_param_named_unsafe(disable_power_well, int, 0400,
160MODULE_PARM_DESC(disable_power_well,
161 "Disable display power wells when possible " 116 "Disable display power wells when possible "
162 "(-1=auto [default], 0=power wells always on, 1=power wells disabled when possible)"); 117 "(-1=auto [default], 0=power wells always on, 1=power wells disabled when possible)");
163 118
164module_param_named_unsafe(enable_ips, i915.enable_ips, int, 0600); 119i915_param_named_unsafe(enable_ips, int, 0600, "Enable IPS (default: true)");
165MODULE_PARM_DESC(enable_ips, "Enable IPS (default: true)");
166 120
167module_param_named(fastboot, i915.fastboot, bool, 0600); 121i915_param_named(fastboot, bool, 0600,
168MODULE_PARM_DESC(fastboot,
169 "Try to skip unnecessary mode sets at boot time (default: false)"); 122 "Try to skip unnecessary mode sets at boot time (default: false)");
170 123
171module_param_named_unsafe(prefault_disable, i915.prefault_disable, bool, 0600); 124i915_param_named_unsafe(prefault_disable, bool, 0600,
172MODULE_PARM_DESC(prefault_disable,
173 "Disable page prefaulting for pread/pwrite/reloc (default:false). " 125 "Disable page prefaulting for pread/pwrite/reloc (default:false). "
174 "For developers only."); 126 "For developers only.");
175 127
176module_param_named_unsafe(load_detect_test, i915.load_detect_test, bool, 0600); 128i915_param_named_unsafe(load_detect_test, bool, 0600,
177MODULE_PARM_DESC(load_detect_test,
178 "Force-enable the VGA load detect code for testing (default:false). " 129 "Force-enable the VGA load detect code for testing (default:false). "
179 "For developers only."); 130 "For developers only.");
180 131
181module_param_named_unsafe(force_reset_modeset_test, i915.force_reset_modeset_test, bool, 0600); 132i915_param_named_unsafe(force_reset_modeset_test, bool, 0600,
182MODULE_PARM_DESC(force_reset_modeset_test,
183 "Force a modeset during gpu reset for testing (default:false). " 133 "Force a modeset during gpu reset for testing (default:false). "
184 "For developers only."); 134 "For developers only.");
185 135
186module_param_named_unsafe(invert_brightness, i915.invert_brightness, int, 0600); 136i915_param_named_unsafe(invert_brightness, int, 0600,
187MODULE_PARM_DESC(invert_brightness,
188 "Invert backlight brightness " 137 "Invert backlight brightness "
189 "(-1 force normal, 0 machine defaults, 1 force inversion), please " 138 "(-1 force normal, 0 machine defaults, 1 force inversion), please "
190 "report PCI device ID, subsystem vendor and subsystem device ID " 139 "report PCI device ID, subsystem vendor and subsystem device ID "
191 "to dri-devel@lists.freedesktop.org, if your machine needs it. " 140 "to dri-devel@lists.freedesktop.org, if your machine needs it. "
192 "It will then be included in an upcoming module version."); 141 "It will then be included in an upcoming module version.");
193 142
194module_param_named(disable_display, i915.disable_display, bool, 0400); 143i915_param_named(disable_display, bool, 0400,
195MODULE_PARM_DESC(disable_display, "Disable display (default: false)"); 144 "Disable display (default: false)");
196 145
197module_param_named_unsafe(enable_cmd_parser, i915.enable_cmd_parser, bool, 0400); 146i915_param_named_unsafe(enable_cmd_parser, bool, 0400,
198MODULE_PARM_DESC(enable_cmd_parser, 147 "Enable command parsing (true=enabled [default], false=disabled)");
199 "Enable command parsing (true=enabled [default], false=disabled)");
200 148
201module_param_named_unsafe(use_mmio_flip, i915.use_mmio_flip, int, 0600); 149i915_param_named_unsafe(use_mmio_flip, int, 0600,
202MODULE_PARM_DESC(use_mmio_flip, 150 "use MMIO flips (-1=never, 0=driver discretion [default], 1=always)");
203 "use MMIO flips (-1=never, 0=driver discretion [default], 1=always)");
204 151
205module_param_named(mmio_debug, i915.mmio_debug, int, 0600); 152i915_param_named(mmio_debug, int, 0600,
206MODULE_PARM_DESC(mmio_debug,
207 "Enable the MMIO debug code for the first N failures (default: off). " 153 "Enable the MMIO debug code for the first N failures (default: off). "
208 "This may negatively affect performance."); 154 "This may negatively affect performance.");
209 155
210module_param_named(verbose_state_checks, i915.verbose_state_checks, bool, 0600); 156i915_param_named(verbose_state_checks, bool, 0600,
211MODULE_PARM_DESC(verbose_state_checks,
212 "Enable verbose logs (ie. WARN_ON()) in case of unexpected hw state conditions."); 157 "Enable verbose logs (ie. WARN_ON()) in case of unexpected hw state conditions.");
213 158
214module_param_named_unsafe(nuclear_pageflip, i915.nuclear_pageflip, bool, 0400); 159i915_param_named_unsafe(nuclear_pageflip, bool, 0400,
215MODULE_PARM_DESC(nuclear_pageflip, 160 "Force enable atomic functionality on platforms that don't have full support yet.");
216 "Force enable atomic functionality on platforms that don't have full support yet.");
217 161
218/* WA to get away with the default setting in VBT for early platforms.Will be removed */ 162/* WA to get away with the default setting in VBT for early platforms.Will be removed */
219module_param_named_unsafe(edp_vswing, i915.edp_vswing, int, 0400); 163i915_param_named_unsafe(edp_vswing, int, 0400,
220MODULE_PARM_DESC(edp_vswing, 164 "Ignore/Override vswing pre-emph table selection from VBT "
221 "Ignore/Override vswing pre-emph table selection from VBT " 165 "(0=use value from vbt [default], 1=low power swing(200mV),"
222 "(0=use value from vbt [default], 1=low power swing(200mV)," 166 "2=default swing(400mV))");
223 "2=default swing(400mV))"); 167
224 168i915_param_named_unsafe(enable_guc_loading, int, 0400,
225module_param_named_unsafe(enable_guc_loading, i915.enable_guc_loading, int, 0400); 169 "Enable GuC firmware loading "
226MODULE_PARM_DESC(enable_guc_loading, 170 "(-1=auto, 0=never [default], 1=if available, 2=required)");
227 "Enable GuC firmware loading " 171
228 "(-1=auto, 0=never [default], 1=if available, 2=required)"); 172i915_param_named_unsafe(enable_guc_submission, int, 0400,
229 173 "Enable GuC submission "
230module_param_named_unsafe(enable_guc_submission, i915.enable_guc_submission, int, 0400); 174 "(-1=auto, 0=never [default], 1=if available, 2=required)");
231MODULE_PARM_DESC(enable_guc_submission, 175
232 "Enable GuC submission " 176i915_param_named(guc_log_level, int, 0400,
233 "(-1=auto, 0=never [default], 1=if available, 2=required)");
234
235module_param_named(guc_log_level, i915.guc_log_level, int, 0400);
236MODULE_PARM_DESC(guc_log_level,
237 "GuC firmware logging level (-1:disabled (default), 0-3:enabled)"); 177 "GuC firmware logging level (-1:disabled (default), 0-3:enabled)");
238 178
239module_param_named_unsafe(guc_firmware_path, i915.guc_firmware_path, charp, 0400); 179i915_param_named_unsafe(guc_firmware_path, charp, 0400,
240MODULE_PARM_DESC(guc_firmware_path,
241 "GuC firmware path to use instead of the default one"); 180 "GuC firmware path to use instead of the default one");
242 181
243module_param_named_unsafe(huc_firmware_path, i915.huc_firmware_path, charp, 0400); 182i915_param_named_unsafe(huc_firmware_path, charp, 0400,
244MODULE_PARM_DESC(huc_firmware_path,
245 "HuC firmware path to use instead of the default one"); 183 "HuC firmware path to use instead of the default one");
246 184
247module_param_named_unsafe(enable_dp_mst, i915.enable_dp_mst, bool, 0600); 185i915_param_named_unsafe(enable_dp_mst, bool, 0600,
248MODULE_PARM_DESC(enable_dp_mst,
249 "Enable multi-stream transport (MST) for new DisplayPort sinks. (default: true)"); 186 "Enable multi-stream transport (MST) for new DisplayPort sinks. (default: true)");
250module_param_named_unsafe(inject_load_failure, i915.inject_load_failure, uint, 0400); 187
251MODULE_PARM_DESC(inject_load_failure, 188i915_param_named_unsafe(inject_load_failure, uint, 0400,
252 "Force an error after a number of failure check points (0:disabled (default), N:force failure at the Nth failure check point)"); 189 "Force an error after a number of failure check points (0:disabled (default), N:force failure at the Nth failure check point)");
253module_param_named(enable_dpcd_backlight, i915.enable_dpcd_backlight, bool, 0600); 190
254MODULE_PARM_DESC(enable_dpcd_backlight, 191i915_param_named(enable_dpcd_backlight, bool, 0600,
255 "Enable support for DPCD backlight control (default:false)"); 192 "Enable support for DPCD backlight control (default:false)");
256 193
257module_param_named(enable_gvt, i915.enable_gvt, bool, 0400); 194i915_param_named(enable_gvt, bool, 0400,
258MODULE_PARM_DESC(enable_gvt,
259 "Enable support for Intel GVT-g graphics virtualization host support(default:false)"); 195 "Enable support for Intel GVT-g graphics virtualization host support(default:false)");
diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h
index ac844709c97e..4f3f8d650194 100644
--- a/drivers/gpu/drm/i915/i915_params.h
+++ b/drivers/gpu/drm/i915/i915_params.h
@@ -27,56 +27,56 @@
27 27
28#include <linux/cache.h> /* for __read_mostly */ 28#include <linux/cache.h> /* for __read_mostly */
29 29
30#define I915_PARAMS_FOR_EACH(func) \ 30#define I915_PARAMS_FOR_EACH(param) \
31 func(char *, vbt_firmware); \ 31 param(char *, vbt_firmware, NULL) \
32 func(int, modeset); \ 32 param(int, modeset, -1) \
33 func(int, panel_ignore_lid); \ 33 param(int, panel_ignore_lid, 1) \
34 func(int, semaphores); \ 34 param(int, semaphores, -1) \
35 func(int, lvds_channel_mode); \ 35 param(int, lvds_channel_mode, 0) \
36 func(int, panel_use_ssc); \ 36 param(int, panel_use_ssc, -1) \
37 func(int, vbt_sdvo_panel_type); \ 37 param(int, vbt_sdvo_panel_type, -1) \
38 func(int, enable_rc6); \ 38 param(int, enable_rc6, -1) \
39 func(int, enable_dc); \ 39 param(int, enable_dc, -1) \
40 func(int, enable_fbc); \ 40 param(int, enable_fbc, -1) \
41 func(int, enable_ppgtt); \ 41 param(int, enable_ppgtt, -1) \
42 func(int, enable_execlists); \ 42 param(int, enable_execlists, -1) \
43 func(int, enable_psr); \ 43 param(int, enable_psr, -1) \
44 func(int, disable_power_well); \ 44 param(int, disable_power_well, -1) \
45 func(int, enable_ips); \ 45 param(int, enable_ips, 1) \
46 func(int, invert_brightness); \ 46 param(int, invert_brightness, 0) \
47 func(int, enable_guc_loading); \ 47 param(int, enable_guc_loading, 0) \
48 func(int, enable_guc_submission); \ 48 param(int, enable_guc_submission, 0) \
49 func(int, guc_log_level); \ 49 param(int, guc_log_level, -1) \
50 func(char *, guc_firmware_path); \ 50 param(char *, guc_firmware_path, NULL) \
51 func(char *, huc_firmware_path); \ 51 param(char *, huc_firmware_path, NULL) \
52 func(int, use_mmio_flip); \ 52 param(int, use_mmio_flip, 0) \
53 func(int, mmio_debug); \ 53 param(int, mmio_debug, 0) \
54 func(int, edp_vswing); \ 54 param(int, edp_vswing, 0) \
55 func(int, reset); \ 55 param(int, reset, 2) \
56 func(unsigned int, inject_load_failure); \ 56 param(unsigned int, inject_load_failure, 0) \
57 /* leave bools at the end to not create holes */ \ 57 /* leave bools at the end to not create holes */ \
58 func(bool, alpha_support); \ 58 param(bool, alpha_support, IS_ENABLED(CONFIG_DRM_I915_ALPHA_SUPPORT)) \
59 func(bool, enable_cmd_parser); \ 59 param(bool, enable_cmd_parser, true) \
60 func(bool, enable_hangcheck); \ 60 param(bool, enable_hangcheck, true) \
61 func(bool, fastboot); \ 61 param(bool, fastboot, false) \
62 func(bool, prefault_disable); \ 62 param(bool, prefault_disable, false) \
63 func(bool, load_detect_test); \ 63 param(bool, load_detect_test, false) \
64 func(bool, force_reset_modeset_test); \ 64 param(bool, force_reset_modeset_test, false) \
65 func(bool, error_capture); \ 65 param(bool, error_capture, true) \
66 func(bool, disable_display); \ 66 param(bool, disable_display, false) \
67 func(bool, verbose_state_checks); \ 67 param(bool, verbose_state_checks, true) \
68 func(bool, nuclear_pageflip); \ 68 param(bool, nuclear_pageflip, false) \
69 func(bool, enable_dp_mst); \ 69 param(bool, enable_dp_mst, true) \
70 func(bool, enable_dpcd_backlight); \ 70 param(bool, enable_dpcd_backlight, false) \
71 func(bool, enable_gvt) 71 param(bool, enable_gvt, false)
72 72
73#define MEMBER(T, member) T member 73#define MEMBER(T, member, ...) T member;
74struct i915_params { 74struct i915_params {
75 I915_PARAMS_FOR_EACH(MEMBER); 75 I915_PARAMS_FOR_EACH(MEMBER);
76}; 76};
77#undef MEMBER 77#undef MEMBER
78 78
79extern struct i915_params i915 __read_mostly; 79extern struct i915_params i915_modparams __read_mostly;
80 80
81#endif 81#endif
82 82
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index 881b5d6708aa..da60866b6628 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -168,6 +168,7 @@ static const struct intel_device_info intel_i965g_info __initconst = {
168 .platform = INTEL_I965G, 168 .platform = INTEL_I965G,
169 .has_overlay = 1, 169 .has_overlay = 1,
170 .hws_needs_physical = 1, 170 .hws_needs_physical = 1,
171 .has_snoop = false,
171}; 172};
172 173
173static const struct intel_device_info intel_i965gm_info __initconst = { 174static const struct intel_device_info intel_i965gm_info __initconst = {
@@ -177,6 +178,7 @@ static const struct intel_device_info intel_i965gm_info __initconst = {
177 .has_overlay = 1, 178 .has_overlay = 1,
178 .supports_tv = 1, 179 .supports_tv = 1,
179 .hws_needs_physical = 1, 180 .hws_needs_physical = 1,
181 .has_snoop = false,
180}; 182};
181 183
182static const struct intel_device_info intel_g45_info __initconst = { 184static const struct intel_device_info intel_g45_info __initconst = {
@@ -198,7 +200,6 @@ static const struct intel_device_info intel_gm45_info __initconst = {
198#define GEN5_FEATURES \ 200#define GEN5_FEATURES \
199 .gen = 5, .num_pipes = 2, \ 201 .gen = 5, .num_pipes = 2, \
200 .has_hotplug = 1, \ 202 .has_hotplug = 1, \
201 .has_gmbus_irq = 1, \
202 .ring_mask = RENDER_RING | BSD_RING, \ 203 .ring_mask = RENDER_RING | BSD_RING, \
203 .has_snoop = true, \ 204 .has_snoop = true, \
204 GEN_DEFAULT_PIPEOFFSETS, \ 205 GEN_DEFAULT_PIPEOFFSETS, \
@@ -223,7 +224,6 @@ static const struct intel_device_info intel_ironlake_m_info __initconst = {
223 .has_llc = 1, \ 224 .has_llc = 1, \
224 .has_rc6 = 1, \ 225 .has_rc6 = 1, \
225 .has_rc6p = 1, \ 226 .has_rc6p = 1, \
226 .has_gmbus_irq = 1, \
227 .has_aliasing_ppgtt = 1, \ 227 .has_aliasing_ppgtt = 1, \
228 GEN_DEFAULT_PIPEOFFSETS, \ 228 GEN_DEFAULT_PIPEOFFSETS, \
229 CURSOR_OFFSETS 229 CURSOR_OFFSETS
@@ -266,7 +266,6 @@ static const struct intel_device_info intel_sandybridge_m_gt2_info __initconst =
266 .has_llc = 1, \ 266 .has_llc = 1, \
267 .has_rc6 = 1, \ 267 .has_rc6 = 1, \
268 .has_rc6p = 1, \ 268 .has_rc6p = 1, \
269 .has_gmbus_irq = 1, \
270 .has_aliasing_ppgtt = 1, \ 269 .has_aliasing_ppgtt = 1, \
271 .has_full_ppgtt = 1, \ 270 .has_full_ppgtt = 1, \
272 GEN_DEFAULT_PIPEOFFSETS, \ 271 GEN_DEFAULT_PIPEOFFSETS, \
@@ -319,7 +318,6 @@ static const struct intel_device_info intel_valleyview_info __initconst = {
319 .has_psr = 1, 318 .has_psr = 1,
320 .has_runtime_pm = 1, 319 .has_runtime_pm = 1,
321 .has_rc6 = 1, 320 .has_rc6 = 1,
322 .has_gmbus_irq = 1,
323 .has_gmch_display = 1, 321 .has_gmch_display = 1,
324 .has_hotplug = 1, 322 .has_hotplug = 1,
325 .has_aliasing_ppgtt = 1, 323 .has_aliasing_ppgtt = 1,
@@ -410,7 +408,6 @@ static const struct intel_device_info intel_cherryview_info __initconst = {
410 .has_runtime_pm = 1, 408 .has_runtime_pm = 1,
411 .has_resource_streamer = 1, 409 .has_resource_streamer = 1,
412 .has_rc6 = 1, 410 .has_rc6 = 1,
413 .has_gmbus_irq = 1,
414 .has_logical_ring_contexts = 1, 411 .has_logical_ring_contexts = 1,
415 .has_gmch_display = 1, 412 .has_gmch_display = 1,
416 .has_aliasing_ppgtt = 1, 413 .has_aliasing_ppgtt = 1,
@@ -472,7 +469,6 @@ static const struct intel_device_info intel_skylake_gt4_info __initconst = {
472 .has_resource_streamer = 1, \ 469 .has_resource_streamer = 1, \
473 .has_rc6 = 1, \ 470 .has_rc6 = 1, \
474 .has_dp_mst = 1, \ 471 .has_dp_mst = 1, \
475 .has_gmbus_irq = 1, \
476 .has_logical_ring_contexts = 1, \ 472 .has_logical_ring_contexts = 1, \
477 .has_guc = 1, \ 473 .has_guc = 1, \
478 .has_aliasing_ppgtt = 1, \ 474 .has_aliasing_ppgtt = 1, \
@@ -480,6 +476,7 @@ static const struct intel_device_info intel_skylake_gt4_info __initconst = {
480 .has_full_48bit_ppgtt = 1, \ 476 .has_full_48bit_ppgtt = 1, \
481 .has_reset_engine = 1, \ 477 .has_reset_engine = 1, \
482 .has_snoop = true, \ 478 .has_snoop = true, \
479 .has_ipc = 1, \
483 GEN_DEFAULT_PIPEOFFSETS, \ 480 GEN_DEFAULT_PIPEOFFSETS, \
484 IVB_CURSOR_OFFSETS, \ 481 IVB_CURSOR_OFFSETS, \
485 BDW_COLORS 482 BDW_COLORS
@@ -503,6 +500,7 @@ static const struct intel_device_info intel_geminilake_info __initconst = {
503 .platform = INTEL_KABYLAKE, \ 500 .platform = INTEL_KABYLAKE, \
504 .has_csr = 1, \ 501 .has_csr = 1, \
505 .has_guc = 1, \ 502 .has_guc = 1, \
503 .has_ipc = 1, \
506 .ddb_size = 896 504 .ddb_size = 896
507 505
508static const struct intel_device_info intel_kabylake_gt1_info __initconst = { 506static const struct intel_device_info intel_kabylake_gt1_info __initconst = {
@@ -522,12 +520,12 @@ static const struct intel_device_info intel_kabylake_gt3_info __initconst = {
522}; 520};
523 521
524#define CFL_PLATFORM \ 522#define CFL_PLATFORM \
525 .is_alpha_support = 1, \
526 BDW_FEATURES, \ 523 BDW_FEATURES, \
527 .gen = 9, \ 524 .gen = 9, \
528 .platform = INTEL_COFFEELAKE, \ 525 .platform = INTEL_COFFEELAKE, \
529 .has_csr = 1, \ 526 .has_csr = 1, \
530 .has_guc = 1, \ 527 .has_guc = 1, \
528 .has_ipc = 1, \
531 .ddb_size = 896 529 .ddb_size = 896
532 530
533static const struct intel_device_info intel_coffeelake_gt1_info __initconst = { 531static const struct intel_device_info intel_coffeelake_gt1_info __initconst = {
@@ -554,6 +552,7 @@ static const struct intel_device_info intel_cannonlake_gt2_info __initconst = {
554 .gt = 2, 552 .gt = 2,
555 .ddb_size = 1024, 553 .ddb_size = 1024,
556 .has_csr = 1, 554 .has_csr = 1,
555 .has_ipc = 1,
557 .color = { .degamma_lut_size = 0, .gamma_lut_size = 1024 } 556 .color = { .degamma_lut_size = 0, .gamma_lut_size = 1024 }
558}; 557};
559 558
@@ -632,7 +631,7 @@ static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
632 (struct intel_device_info *) ent->driver_data; 631 (struct intel_device_info *) ent->driver_data;
633 int err; 632 int err;
634 633
635 if (IS_ALPHA_SUPPORT(intel_info) && !i915.alpha_support) { 634 if (IS_ALPHA_SUPPORT(intel_info) && !i915_modparams.alpha_support) {
636 DRM_INFO("The driver support for your hardware in this kernel version is alpha quality\n" 635 DRM_INFO("The driver support for your hardware in this kernel version is alpha quality\n"
637 "See CONFIG_DRM_I915_ALPHA_SUPPORT or i915.alpha_support module parameter\n" 636 "See CONFIG_DRM_I915_ALPHA_SUPPORT or i915.alpha_support module parameter\n"
638 "to enable support in this kernel version, or check for kernel updates.\n"); 637 "to enable support in this kernel version, or check for kernel updates.\n");
@@ -690,10 +689,10 @@ static int __init i915_init(void)
690 * vga_text_mode_force boot option. 689 * vga_text_mode_force boot option.
691 */ 690 */
692 691
693 if (i915.modeset == 0) 692 if (i915_modparams.modeset == 0)
694 use_kms = false; 693 use_kms = false;
695 694
696 if (vgacon_text_force() && i915.modeset == -1) 695 if (vgacon_text_force() && i915_modparams.modeset == -1)
697 use_kms = false; 696 use_kms = false;
698 697
699 if (!use_kms) { 698 if (!use_kms) {
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 94185d610673..1383a2995a69 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -206,6 +206,7 @@
206#include "i915_oa_kblgt2.h" 206#include "i915_oa_kblgt2.h"
207#include "i915_oa_kblgt3.h" 207#include "i915_oa_kblgt3.h"
208#include "i915_oa_glk.h" 208#include "i915_oa_glk.h"
209#include "i915_oa_cflgt2.h"
209 210
210/* HW requires this to be a power of two, between 128k and 16M, though driver 211/* HW requires this to be a power of two, between 128k and 16M, though driver
211 * is currently generally designed assuming the largest 16M size is used such 212 * is currently generally designed assuming the largest 16M size is used such
@@ -1213,7 +1214,7 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
1213{ 1214{
1214 struct drm_i915_private *dev_priv = stream->dev_priv; 1215 struct drm_i915_private *dev_priv = stream->dev_priv;
1215 1216
1216 if (i915.enable_execlists) 1217 if (i915_modparams.enable_execlists)
1217 dev_priv->perf.oa.specific_ctx_id = stream->ctx->hw_id; 1218 dev_priv->perf.oa.specific_ctx_id = stream->ctx->hw_id;
1218 else { 1219 else {
1219 struct intel_engine_cs *engine = dev_priv->engine[RCS]; 1220 struct intel_engine_cs *engine = dev_priv->engine[RCS];
@@ -1259,7 +1260,7 @@ static void oa_put_render_ctx_id(struct i915_perf_stream *stream)
1259{ 1260{
1260 struct drm_i915_private *dev_priv = stream->dev_priv; 1261 struct drm_i915_private *dev_priv = stream->dev_priv;
1261 1262
1262 if (i915.enable_execlists) { 1263 if (i915_modparams.enable_execlists) {
1263 dev_priv->perf.oa.specific_ctx_id = INVALID_CTX_ID; 1264 dev_priv->perf.oa.specific_ctx_id = INVALID_CTX_ID;
1264 } else { 1265 } else {
1265 struct intel_engine_cs *engine = dev_priv->engine[RCS]; 1266 struct intel_engine_cs *engine = dev_priv->engine[RCS];
@@ -1850,8 +1851,7 @@ static int gen8_enable_metric_set(struct drm_i915_private *dev_priv,
1850 * be read back from automatically triggered reports, as part of the 1851 * be read back from automatically triggered reports, as part of the
1851 * RPT_ID field. 1852 * RPT_ID field.
1852 */ 1853 */
1853 if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv) || 1854 if (IS_GEN9(dev_priv)) {
1854 IS_KABYLAKE(dev_priv) || IS_GEMINILAKE(dev_priv)) {
1855 I915_WRITE(GEN8_OA_DEBUG, 1855 I915_WRITE(GEN8_OA_DEBUG,
1856 _MASKED_BIT_ENABLE(GEN9_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS | 1856 _MASKED_BIT_ENABLE(GEN9_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS |
1857 GEN9_OA_DEBUG_INCLUDE_CLK_RATIO)); 1857 GEN9_OA_DEBUG_INCLUDE_CLK_RATIO));
@@ -2927,6 +2927,9 @@ void i915_perf_register(struct drm_i915_private *dev_priv)
2927 i915_perf_load_test_config_kblgt3(dev_priv); 2927 i915_perf_load_test_config_kblgt3(dev_priv);
2928 } else if (IS_GEMINILAKE(dev_priv)) { 2928 } else if (IS_GEMINILAKE(dev_priv)) {
2929 i915_perf_load_test_config_glk(dev_priv); 2929 i915_perf_load_test_config_glk(dev_priv);
2930 } else if (IS_COFFEELAKE(dev_priv)) {
2931 if (IS_CFL_GT2(dev_priv))
2932 i915_perf_load_test_config_cflgt2(dev_priv);
2930 } 2933 }
2931 2934
2932 if (dev_priv->perf.oa.test_config.id == 0) 2935 if (dev_priv->perf.oa.test_config.id == 0)
@@ -3405,7 +3408,7 @@ void i915_perf_init(struct drm_i915_private *dev_priv)
3405 dev_priv->perf.oa.timestamp_frequency = 12500000; 3408 dev_priv->perf.oa.timestamp_frequency = 12500000;
3406 3409
3407 dev_priv->perf.oa.oa_formats = hsw_oa_formats; 3410 dev_priv->perf.oa.oa_formats = hsw_oa_formats;
3408 } else if (i915.enable_execlists) { 3411 } else if (i915_modparams.enable_execlists) {
3409 /* Note: that although we could theoretically also support the 3412 /* Note: that although we could theoretically also support the
3410 * legacy ringbuffer mode on BDW (and earlier iterations of 3413 * legacy ringbuffer mode on BDW (and earlier iterations of
3411 * this driver, before upstreaming did this) it didn't seem 3414 * this driver, before upstreaming did this) it didn't seem
@@ -3453,6 +3456,7 @@ void i915_perf_init(struct drm_i915_private *dev_priv)
3453 break; 3456 break;
3454 case INTEL_SKYLAKE: 3457 case INTEL_SKYLAKE:
3455 case INTEL_KABYLAKE: 3458 case INTEL_KABYLAKE:
3459 case INTEL_COFFEELAKE:
3456 dev_priv->perf.oa.timestamp_frequency = 12000000; 3460 dev_priv->perf.oa.timestamp_frequency = 12000000;
3457 break; 3461 break;
3458 default: 3462 default:
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 2eff98cdcfad..ee0d4f14ac98 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -2336,7 +2336,7 @@ enum i915_power_well_id {
2336#define DONE_REG _MMIO(0x40b0) 2336#define DONE_REG _MMIO(0x40b0)
2337#define GEN8_PRIVATE_PAT_LO _MMIO(0x40e0) 2337#define GEN8_PRIVATE_PAT_LO _MMIO(0x40e0)
2338#define GEN8_PRIVATE_PAT_HI _MMIO(0x40e0 + 4) 2338#define GEN8_PRIVATE_PAT_HI _MMIO(0x40e0 + 4)
2339#define GEN10_PAT_INDEX(index) _MMIO(0x40e0 + index*4) 2339#define GEN10_PAT_INDEX(index) _MMIO(0x40e0 + (index)*4)
2340#define BSD_HWS_PGA_GEN7 _MMIO(0x04180) 2340#define BSD_HWS_PGA_GEN7 _MMIO(0x04180)
2341#define BLT_HWS_PGA_GEN7 _MMIO(0x04280) 2341#define BLT_HWS_PGA_GEN7 _MMIO(0x04280)
2342#define VEBOX_HWS_PGA_GEN7 _MMIO(0x04380) 2342#define VEBOX_HWS_PGA_GEN7 _MMIO(0x04380)
@@ -2730,6 +2730,11 @@ enum i915_power_well_id {
2730#define GEN9_F2_SS_DIS_SHIFT 20 2730#define GEN9_F2_SS_DIS_SHIFT 20
2731#define GEN9_F2_SS_DIS_MASK (0xf << GEN9_F2_SS_DIS_SHIFT) 2731#define GEN9_F2_SS_DIS_MASK (0xf << GEN9_F2_SS_DIS_SHIFT)
2732 2732
2733#define GEN10_F2_S_ENA_SHIFT 22
2734#define GEN10_F2_S_ENA_MASK (0x3f << GEN10_F2_S_ENA_SHIFT)
2735#define GEN10_F2_SS_DIS_SHIFT 18
2736#define GEN10_F2_SS_DIS_MASK (0xf << GEN10_F2_SS_DIS_SHIFT)
2737
2733#define GEN8_EU_DISABLE0 _MMIO(0x9134) 2738#define GEN8_EU_DISABLE0 _MMIO(0x9134)
2734#define GEN8_EU_DIS0_S0_MASK 0xffffff 2739#define GEN8_EU_DIS0_S0_MASK 0xffffff
2735#define GEN8_EU_DIS0_S1_SHIFT 24 2740#define GEN8_EU_DIS0_S1_SHIFT 24
@@ -2745,6 +2750,9 @@ enum i915_power_well_id {
2745 2750
2746#define GEN9_EU_DISABLE(slice) _MMIO(0x9134 + (slice)*0x4) 2751#define GEN9_EU_DISABLE(slice) _MMIO(0x9134 + (slice)*0x4)
2747 2752
2753#define GEN10_EU_DISABLE3 _MMIO(0x9140)
2754#define GEN10_EU_DIS_SS_MASK 0xff
2755
2748#define GEN6_BSD_SLEEP_PSMI_CONTROL _MMIO(0x12050) 2756#define GEN6_BSD_SLEEP_PSMI_CONTROL _MMIO(0x12050)
2749#define GEN6_BSD_SLEEP_MSG_DISABLE (1 << 0) 2757#define GEN6_BSD_SLEEP_MSG_DISABLE (1 << 0)
2750#define GEN6_BSD_SLEEP_FLUSH_DISABLE (1 << 2) 2758#define GEN6_BSD_SLEEP_FLUSH_DISABLE (1 << 2)
@@ -4047,7 +4055,7 @@ enum {
4047#define EDP_PSR2_FRAME_BEFORE_SU_SHIFT 4 4055#define EDP_PSR2_FRAME_BEFORE_SU_SHIFT 4
4048#define EDP_PSR2_FRAME_BEFORE_SU_MASK (0xf<<4) 4056#define EDP_PSR2_FRAME_BEFORE_SU_MASK (0xf<<4)
4049#define EDP_PSR2_IDLE_MASK 0xf 4057#define EDP_PSR2_IDLE_MASK 0xf
4050#define EDP_FRAMES_BEFORE_SU_ENTRY (1<<4) 4058#define EDP_PSR2_FRAME_BEFORE_SU(a) ((a)<<4)
4051 4059
4052#define EDP_PSR2_STATUS_CTL _MMIO(0x6f940) 4060#define EDP_PSR2_STATUS_CTL _MMIO(0x6f940)
4053#define EDP_PSR2_STATUS_STATE_MASK (0xf<<28) 4061#define EDP_PSR2_STATUS_STATE_MASK (0xf<<28)
@@ -6913,7 +6921,7 @@ enum {
6913# define CHICKEN3_DGMG_DONE_FIX_DISABLE (1 << 2) 6921# define CHICKEN3_DGMG_DONE_FIX_DISABLE (1 << 2)
6914 6922
6915#define CHICKEN_PAR1_1 _MMIO(0x42080) 6923#define CHICKEN_PAR1_1 _MMIO(0x42080)
6916#define SKL_RC_HASH_OUTSIDE (1 << 15) 6924#define SKL_DE_COMPRESSED_HASH_MODE (1 << 15)
6917#define DPA_MASK_VBLANK_SRD (1 << 15) 6925#define DPA_MASK_VBLANK_SRD (1 << 15)
6918#define FORCE_ARB_IDLE_PLANES (1 << 14) 6926#define FORCE_ARB_IDLE_PLANES (1 << 14)
6919#define SKL_EDP_PSR_FIX_RDWRAP (1 << 3) 6927#define SKL_EDP_PSR_FIX_RDWRAP (1 << 3)
@@ -6949,6 +6957,7 @@ enum {
6949#define DISP_FBC_WM_DIS (1<<15) 6957#define DISP_FBC_WM_DIS (1<<15)
6950#define DISP_ARB_CTL2 _MMIO(0x45004) 6958#define DISP_ARB_CTL2 _MMIO(0x45004)
6951#define DISP_DATA_PARTITION_5_6 (1<<6) 6959#define DISP_DATA_PARTITION_5_6 (1<<6)
6960#define DISP_IPC_ENABLE (1<<3)
6952#define DBUF_CTL _MMIO(0x45008) 6961#define DBUF_CTL _MMIO(0x45008)
6953#define DBUF_POWER_REQUEST (1<<31) 6962#define DBUF_POWER_REQUEST (1<<31)
6954#define DBUF_POWER_STATE (1<<30) 6963#define DBUF_POWER_STATE (1<<30)
@@ -6990,6 +6999,7 @@ enum {
6990# define GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC ((1<<10) | (1<<26)) 6999# define GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC ((1<<10) | (1<<26))
6991# define GEN9_RHWO_OPTIMIZATION_DISABLE (1<<14) 7000# define GEN9_RHWO_OPTIMIZATION_DISABLE (1<<14)
6992#define COMMON_SLICE_CHICKEN2 _MMIO(0x7014) 7001#define COMMON_SLICE_CHICKEN2 _MMIO(0x7014)
7002# define GEN9_PBE_COMPRESSED_HASH_SELECTION (1<<13)
6993# define GEN9_DISABLE_GATHER_AT_SET_SHADER_COMMON_SLICE (1<<12) 7003# define GEN9_DISABLE_GATHER_AT_SET_SHADER_COMMON_SLICE (1<<12)
6994# define GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION (1<<8) 7004# define GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION (1<<8)
6995# define GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE (1<<0) 7005# define GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE (1<<0)
@@ -7469,6 +7479,8 @@ enum {
7469#define FDI_PHASE_SYNC_OVR(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_OVR - ((pipe) * 2))) 7479#define FDI_PHASE_SYNC_OVR(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_OVR - ((pipe) * 2)))
7470#define FDI_PHASE_SYNC_EN(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_EN - ((pipe) * 2))) 7480#define FDI_PHASE_SYNC_EN(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_EN - ((pipe) * 2)))
7471#define FDI_BC_BIFURCATION_SELECT (1 << 12) 7481#define FDI_BC_BIFURCATION_SELECT (1 << 12)
7482#define CHASSIS_CLK_REQ_DURATION_MASK (0xf << 8)
7483#define CHASSIS_CLK_REQ_DURATION(x) ((x) << 8)
7472#define SPT_PWM_GRANULARITY (1<<0) 7484#define SPT_PWM_GRANULARITY (1<<0)
7473#define SOUTH_CHICKEN2 _MMIO(0xc2004) 7485#define SOUTH_CHICKEN2 _MMIO(0xc2004)
7474#define FDI_MPHY_IOSFSB_RESET_STATUS (1<<13) 7486#define FDI_MPHY_IOSFSB_RESET_STATUS (1<<13)
@@ -7953,8 +7965,8 @@ enum {
7953#define GEN7_PCODE_TIMEOUT 0x2 7965#define GEN7_PCODE_TIMEOUT 0x2
7954#define GEN7_PCODE_ILLEGAL_DATA 0x3 7966#define GEN7_PCODE_ILLEGAL_DATA 0x3
7955#define GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE 0x10 7967#define GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE 0x10
7956#define GEN6_PCODE_WRITE_RC6VIDS 0x4 7968#define GEN6_PCODE_WRITE_RC6VIDS 0x4
7957#define GEN6_PCODE_READ_RC6VIDS 0x5 7969#define GEN6_PCODE_READ_RC6VIDS 0x5
7958#define GEN6_ENCODE_RC6_VID(mv) (((mv) - 245) / 5) 7970#define GEN6_ENCODE_RC6_VID(mv) (((mv) - 245) / 5)
7959#define GEN6_DECODE_RC6_VID(vids) (((vids) * 5) + 245) 7971#define GEN6_DECODE_RC6_VID(vids) (((vids) * 5) + 245)
7960#define BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ 0x18 7972#define BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ 0x18
@@ -7973,7 +7985,9 @@ enum {
7973#define GEN6_PCODE_WRITE_D_COMP 0x11 7985#define GEN6_PCODE_WRITE_D_COMP 0x11
7974#define HSW_PCODE_DE_WRITE_FREQ_REQ 0x17 7986#define HSW_PCODE_DE_WRITE_FREQ_REQ 0x17
7975#define DISPLAY_IPS_CONTROL 0x19 7987#define DISPLAY_IPS_CONTROL 0x19
7976#define HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL 0x1A 7988 /* See also IPS_CTL */
7989#define IPS_PCODE_CONTROL (1 << 30)
7990#define HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL 0x1A
7977#define GEN9_PCODE_SAGV_CONTROL 0x21 7991#define GEN9_PCODE_SAGV_CONTROL 0x21
7978#define GEN9_SAGV_DISABLE 0x0 7992#define GEN9_SAGV_DISABLE 0x0
7979#define GEN9_SAGV_IS_DISABLED 0x1 7993#define GEN9_SAGV_IS_DISABLED 0x1
@@ -8082,6 +8096,7 @@ enum {
8082#define GEN8_SAMPLER_POWER_BYPASS_DIS (1<<1) 8096#define GEN8_SAMPLER_POWER_BYPASS_DIS (1<<1)
8083 8097
8084#define GEN9_HALF_SLICE_CHICKEN7 _MMIO(0xe194) 8098#define GEN9_HALF_SLICE_CHICKEN7 _MMIO(0xe194)
8099#define GEN9_SAMPLER_HASH_COMPRESSED_READ_ADDR (1<<8)
8085#define GEN9_ENABLE_YV12_BUGFIX (1<<4) 8100#define GEN9_ENABLE_YV12_BUGFIX (1<<4)
8086#define GEN9_ENABLE_GPGPU_PREEMPTION (1<<2) 8101#define GEN9_ENABLE_GPGPU_PREEMPTION (1<<2)
8087 8102
@@ -8594,7 +8609,7 @@ enum skl_power_gate {
8594#define DPLL_CFGCR0_LINK_RATE_3240 (6 << 25) 8609#define DPLL_CFGCR0_LINK_RATE_3240 (6 << 25)
8595#define DPLL_CFGCR0_LINK_RATE_4050 (7 << 25) 8610#define DPLL_CFGCR0_LINK_RATE_4050 (7 << 25)
8596#define DPLL_CFGCR0_DCO_FRACTION_MASK (0x7fff << 10) 8611#define DPLL_CFGCR0_DCO_FRACTION_MASK (0x7fff << 10)
8597#define DPLL_CFGCR0_DCO_FRAC_SHIFT (10) 8612#define DPLL_CFGCR0_DCO_FRACTION_SHIFT (10)
8598#define DPLL_CFGCR0_DCO_FRACTION(x) ((x) << 10) 8613#define DPLL_CFGCR0_DCO_FRACTION(x) ((x) << 10)
8599#define DPLL_CFGCR0_DCO_INTEGER_MASK (0x3ff) 8614#define DPLL_CFGCR0_DCO_INTEGER_MASK (0x3ff)
8600#define CNL_DPLL_CFGCR0(pll) _MMIO_PLL(pll, _CNL_DPLL0_CFGCR0, _CNL_DPLL1_CFGCR0) 8615#define CNL_DPLL_CFGCR0(pll) _MMIO_PLL(pll, _CNL_DPLL0_CFGCR0, _CNL_DPLL1_CFGCR0)
@@ -8801,6 +8816,15 @@ enum skl_power_gate {
8801#define MIPIO_TXESC_CLK_DIV2 _MMIO(0x160008) 8816#define MIPIO_TXESC_CLK_DIV2 _MMIO(0x160008)
8802#define GLK_TX_ESC_CLK_DIV2_MASK 0x3FF 8817#define GLK_TX_ESC_CLK_DIV2_MASK 0x3FF
8803 8818
8819/* Gen4+ Timestamp and Pipe Frame time stamp registers */
8820#define GEN4_TIMESTAMP _MMIO(0x2358)
8821#define ILK_TIMESTAMP_HI _MMIO(0x70070)
8822#define IVB_TIMESTAMP_CTR _MMIO(0x44070)
8823
8824#define _PIPE_FRMTMSTMP_A 0x70048
8825#define PIPE_FRMTMSTMP(pipe) \
8826 _MMIO_PIPE2(pipe, _PIPE_FRMTMSTMP_A)
8827
8804/* BXT MIPI clock controls */ 8828/* BXT MIPI clock controls */
8805#define BXT_MAX_VAR_OUTPUT_KHZ 39500 8829#define BXT_MAX_VAR_OUTPUT_KHZ 39500
8806 8830
@@ -9382,4 +9406,8 @@ enum skl_power_gate {
9382#define GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL 0x67F1427F /* " " */ 9406#define GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL 0x67F1427F /* " " */
9383#define GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT 0x5FF101FF /* " " */ 9407#define GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT 0x5FF101FF /* " " */
9384 9408
9409#define MMCD_MISC_CTRL _MMIO(0x4ddc) /* skl+ */
9410#define MMCD_PCLA (1 << 31)
9411#define MMCD_HOTSPOT_EN (1 << 27)
9412
9385#endif /* _I915_REG_H_ */ 9413#endif /* _I915_REG_H_ */
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c
index f29540f922af..808ea4d5b962 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.c
+++ b/drivers/gpu/drm/i915/i915_sw_fence.c
@@ -9,6 +9,7 @@
9 9
10#include <linux/slab.h> 10#include <linux/slab.h>
11#include <linux/dma-fence.h> 11#include <linux/dma-fence.h>
12#include <linux/irq_work.h>
12#include <linux/reservation.h> 13#include <linux/reservation.h>
13 14
14#include "i915_sw_fence.h" 15#include "i915_sw_fence.h"
@@ -356,31 +357,44 @@ struct i915_sw_dma_fence_cb {
356 struct i915_sw_fence *fence; 357 struct i915_sw_fence *fence;
357 struct dma_fence *dma; 358 struct dma_fence *dma;
358 struct timer_list timer; 359 struct timer_list timer;
360 struct irq_work work;
359}; 361};
360 362
361static void timer_i915_sw_fence_wake(unsigned long data) 363static void timer_i915_sw_fence_wake(unsigned long data)
362{ 364{
363 struct i915_sw_dma_fence_cb *cb = (struct i915_sw_dma_fence_cb *)data; 365 struct i915_sw_dma_fence_cb *cb = (struct i915_sw_dma_fence_cb *)data;
366 struct i915_sw_fence *fence;
367
368 fence = xchg(&cb->fence, NULL);
369 if (!fence)
370 return;
364 371
365 pr_warn("asynchronous wait on fence %s:%s:%x timed out\n", 372 pr_warn("asynchronous wait on fence %s:%s:%x timed out\n",
366 cb->dma->ops->get_driver_name(cb->dma), 373 cb->dma->ops->get_driver_name(cb->dma),
367 cb->dma->ops->get_timeline_name(cb->dma), 374 cb->dma->ops->get_timeline_name(cb->dma),
368 cb->dma->seqno); 375 cb->dma->seqno);
369 dma_fence_put(cb->dma);
370 cb->dma = NULL;
371 376
372 i915_sw_fence_complete(cb->fence); 377 i915_sw_fence_complete(fence);
373 cb->timer.function = NULL;
374} 378}
375 379
376static void dma_i915_sw_fence_wake(struct dma_fence *dma, 380static void dma_i915_sw_fence_wake(struct dma_fence *dma,
377 struct dma_fence_cb *data) 381 struct dma_fence_cb *data)
378{ 382{
379 struct i915_sw_dma_fence_cb *cb = container_of(data, typeof(*cb), base); 383 struct i915_sw_dma_fence_cb *cb = container_of(data, typeof(*cb), base);
384 struct i915_sw_fence *fence;
385
386 fence = xchg(&cb->fence, NULL);
387 if (fence)
388 i915_sw_fence_complete(fence);
389
390 irq_work_queue(&cb->work);
391}
392
393static void irq_i915_sw_fence_work(struct irq_work *wrk)
394{
395 struct i915_sw_dma_fence_cb *cb = container_of(wrk, typeof(*cb), work);
380 396
381 del_timer_sync(&cb->timer); 397 del_timer_sync(&cb->timer);
382 if (cb->timer.function)
383 i915_sw_fence_complete(cb->fence);
384 dma_fence_put(cb->dma); 398 dma_fence_put(cb->dma);
385 399
386 kfree(cb); 400 kfree(cb);
@@ -414,6 +428,7 @@ int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
414 __setup_timer(&cb->timer, 428 __setup_timer(&cb->timer,
415 timer_i915_sw_fence_wake, (unsigned long)cb, 429 timer_i915_sw_fence_wake, (unsigned long)cb,
416 TIMER_IRQSAFE); 430 TIMER_IRQSAFE);
431 init_irq_work(&cb->work, irq_i915_sw_fence_work);
417 if (timeout) { 432 if (timeout) {
418 cb->dma = dma_fence_get(dma); 433 cb->dma = dma_fence_get(dma);
419 mod_timer(&cb->timer, round_jiffies_up(jiffies + timeout)); 434 mod_timer(&cb->timer, round_jiffies_up(jiffies + timeout));
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
index d805b6e6fe71..27743be5b768 100644
--- a/drivers/gpu/drm/i915/intel_audio.c
+++ b/drivers/gpu/drm/i915/intel_audio.c
@@ -606,11 +606,6 @@ void intel_audio_codec_enable(struct intel_encoder *intel_encoder,
606 connector->encoder->base.id, 606 connector->encoder->base.id,
607 connector->encoder->name); 607 connector->encoder->name);
608 608
609 /* ELD Conn_Type */
610 connector->eld[5] &= ~(3 << 2);
611 if (intel_crtc_has_dp_encoder(crtc_state))
612 connector->eld[5] |= (1 << 2);
613
614 connector->eld[6] = drm_av_sync_delay(connector, adjusted_mode) / 2; 609 connector->eld[6] = drm_av_sync_delay(connector, adjusted_mode) / 2;
615 610
616 if (dev_priv->display.audio_codec_enable) 611 if (dev_priv->display.audio_codec_enable)
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 5949750a35ee..b881ce8596ee 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -356,7 +356,7 @@ parse_sdvo_panel_data(struct drm_i915_private *dev_priv,
356 struct drm_display_mode *panel_fixed_mode; 356 struct drm_display_mode *panel_fixed_mode;
357 int index; 357 int index;
358 358
359 index = i915.vbt_sdvo_panel_type; 359 index = i915_modparams.vbt_sdvo_panel_type;
360 if (index == -2) { 360 if (index == -2) {
361 DRM_DEBUG_KMS("Ignore SDVO panel mode from BIOS VBT tables.\n"); 361 DRM_DEBUG_KMS("Ignore SDVO panel mode from BIOS VBT tables.\n");
362 return; 362 return;
@@ -675,8 +675,9 @@ parse_edp(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
675 uint8_t vswing; 675 uint8_t vswing;
676 676
677 /* Don't read from VBT if module parameter has valid value*/ 677 /* Don't read from VBT if module parameter has valid value*/
678 if (i915.edp_vswing) { 678 if (i915_modparams.edp_vswing) {
679 dev_priv->vbt.edp.low_vswing = i915.edp_vswing == 1; 679 dev_priv->vbt.edp.low_vswing =
680 i915_modparams.edp_vswing == 1;
680 } else { 681 } else {
681 vswing = (edp->edp_vswing_preemph >> (panel_type * 4)) & 0xF; 682 vswing = (edp->edp_vswing_preemph >> (panel_type * 4)) & 0xF;
682 dev_priv->vbt.edp.low_vswing = vswing == 0; 683 dev_priv->vbt.edp.low_vswing = vswing == 0;
@@ -1162,6 +1163,13 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
1162 is_hdmi = is_dvi && (child->device_type & DEVICE_TYPE_NOT_HDMI_OUTPUT) == 0; 1163 is_hdmi = is_dvi && (child->device_type & DEVICE_TYPE_NOT_HDMI_OUTPUT) == 0;
1163 is_edp = is_dp && (child->device_type & DEVICE_TYPE_INTERNAL_CONNECTOR); 1164 is_edp = is_dp && (child->device_type & DEVICE_TYPE_INTERNAL_CONNECTOR);
1164 1165
1166 if (port == PORT_A && is_dvi) {
1167 DRM_DEBUG_KMS("VBT claims port A supports DVI%s, ignoring\n",
1168 is_hdmi ? "/HDMI" : "");
1169 is_dvi = false;
1170 is_hdmi = false;
1171 }
1172
1165 info->supports_dvi = is_dvi; 1173 info->supports_dvi = is_dvi;
1166 info->supports_hdmi = is_hdmi; 1174 info->supports_hdmi = is_hdmi;
1167 info->supports_dp = is_dp; 1175 info->supports_dp = is_dp;
diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c
index 4e00e5cb9fa1..29c62d481cef 100644
--- a/drivers/gpu/drm/i915/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c
@@ -64,7 +64,7 @@ static unsigned long wait_timeout(void)
64 64
65static noinline void missed_breadcrumb(struct intel_engine_cs *engine) 65static noinline void missed_breadcrumb(struct intel_engine_cs *engine)
66{ 66{
67 DRM_DEBUG_DRIVER("%s missed breadcrumb at %pF, irq posted? %s, current seqno=%x, last=%x\n", 67 DRM_DEBUG_DRIVER("%s missed breadcrumb at %pS, irq posted? %s, current seqno=%x, last=%x\n",
68 engine->name, __builtin_return_address(0), 68 engine->name, __builtin_return_address(0),
69 yesno(test_bit(ENGINE_IRQ_BREADCRUMB, 69 yesno(test_bit(ENGINE_IRQ_BREADCRUMB,
70 &engine->irq_posted)), 70 &engine->irq_posted)),
diff --git a/drivers/gpu/drm/i915/intel_cdclk.c b/drivers/gpu/drm/i915/intel_cdclk.c
index d32911816fc2..87fc42b19336 100644
--- a/drivers/gpu/drm/i915/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/intel_cdclk.c
@@ -669,8 +669,12 @@ static void bdw_set_cdclk(struct drm_i915_private *dev_priv,
669 val |= LCPLL_CD_SOURCE_FCLK; 669 val |= LCPLL_CD_SOURCE_FCLK;
670 I915_WRITE(LCPLL_CTL, val); 670 I915_WRITE(LCPLL_CTL, val);
671 671
672 /*
673 * According to the spec, it should be enough to poll for this 1 us.
674 * However, extensive testing shows that this can take longer.
675 */
672 if (wait_for_us(I915_READ(LCPLL_CTL) & 676 if (wait_for_us(I915_READ(LCPLL_CTL) &
673 LCPLL_CD_SOURCE_FCLK_DONE, 1)) 677 LCPLL_CD_SOURCE_FCLK_DONE, 100))
674 DRM_ERROR("Switching to FCLK failed\n"); 678 DRM_ERROR("Switching to FCLK failed\n");
675 679
676 val = I915_READ(LCPLL_CTL); 680 val = I915_READ(LCPLL_CTL);
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index a77dd80a97f2..954070255b4d 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -712,7 +712,7 @@ intel_crt_detect(struct drm_connector *connector,
712 * broken monitor (without edid) to work behind a broken kvm (that fails 712 * broken monitor (without edid) to work behind a broken kvm (that fails
713 * to have the right resistors for HP detection) needs to fix this up. 713 * to have the right resistors for HP detection) needs to fix this up.
714 * For now just bail out. */ 714 * For now just bail out. */
715 if (I915_HAS_HOTPLUG(dev_priv) && !i915.load_detect_test) { 715 if (I915_HAS_HOTPLUG(dev_priv) && !i915_modparams.load_detect_test) {
716 status = connector_status_disconnected; 716 status = connector_status_disconnected;
717 goto out; 717 goto out;
718 } 718 }
@@ -730,7 +730,7 @@ intel_crt_detect(struct drm_connector *connector,
730 else if (INTEL_GEN(dev_priv) < 4) 730 else if (INTEL_GEN(dev_priv) < 4)
731 status = intel_crt_load_detect(crt, 731 status = intel_crt_load_detect(crt,
732 to_intel_crtc(connector->state->crtc)->pipe); 732 to_intel_crtc(connector->state->crtc)->pipe);
733 else if (i915.load_detect_test) 733 else if (i915_modparams.load_detect_test)
734 status = connector_status_disconnected; 734 status = connector_status_disconnected;
735 else 735 else
736 status = connector_status_unknown; 736 status = connector_status_unknown;
diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c
index 965988f79a55..cdfb624eb82d 100644
--- a/drivers/gpu/drm/i915/intel_csr.c
+++ b/drivers/gpu/drm/i915/intel_csr.c
@@ -252,8 +252,14 @@ void intel_csr_load_program(struct drm_i915_private *dev_priv)
252 } 252 }
253 253
254 fw_size = dev_priv->csr.dmc_fw_size; 254 fw_size = dev_priv->csr.dmc_fw_size;
255 assert_rpm_wakelock_held(dev_priv);
256
257 preempt_disable();
258
255 for (i = 0; i < fw_size; i++) 259 for (i = 0; i < fw_size; i++)
256 I915_WRITE(CSR_PROGRAM(i), payload[i]); 260 I915_WRITE_FW(CSR_PROGRAM(i), payload[i]);
261
262 preempt_enable();
257 263
258 for (i = 0; i < dev_priv->csr.mmio_count; i++) { 264 for (i = 0; i < dev_priv->csr.mmio_count; i++) {
259 I915_WRITE(dev_priv->csr.mmioaddr[i], 265 I915_WRITE(dev_priv->csr.mmioaddr[i],
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 1da3bb2cc4b4..93cbbcbbc193 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -301,10 +301,10 @@ static const struct ddi_buf_trans skl_y_ddi_translations_hdmi[] = {
301}; 301};
302 302
303struct bxt_ddi_buf_trans { 303struct bxt_ddi_buf_trans {
304 u32 margin; /* swing value */ 304 u8 margin; /* swing value */
305 u32 scale; /* scale value */ 305 u8 scale; /* scale value */
306 u32 enable; /* scale enable */ 306 u8 enable; /* scale enable */
307 u32 deemphasis; 307 u8 deemphasis;
308 bool default_index; /* true if the entry represents default value */ 308 bool default_index; /* true if the entry represents default value */
309}; 309};
310 310
@@ -354,11 +354,11 @@ static const struct bxt_ddi_buf_trans bxt_ddi_translations_hdmi[] = {
354}; 354};
355 355
356struct cnl_ddi_buf_trans { 356struct cnl_ddi_buf_trans {
357 u32 dw2_swing_sel; 357 u8 dw2_swing_sel;
358 u32 dw7_n_scalar; 358 u8 dw7_n_scalar;
359 u32 dw4_cursor_coeff; 359 u8 dw4_cursor_coeff;
360 u32 dw4_post_cursor_2; 360 u8 dw4_post_cursor_2;
361 u32 dw4_post_cursor_1; 361 u8 dw4_post_cursor_1;
362}; 362};
363 363
364/* Voltage Swing Programming for VccIO 0.85V for DP */ 364/* Voltage Swing Programming for VccIO 0.85V for DP */
@@ -1212,7 +1212,7 @@ static int cnl_calc_wrpll_link(struct drm_i915_private *dev_priv,
1212 dco_freq = (cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK) * ref_clock; 1212 dco_freq = (cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK) * ref_clock;
1213 1213
1214 dco_freq += (((cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >> 1214 dco_freq += (((cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >>
1215 DPLL_CFGCR0_DCO_FRAC_SHIFT) * ref_clock) / 0x8000; 1215 DPLL_CFGCR0_DCO_FRACTION_SHIFT) * ref_clock) / 0x8000;
1216 1216
1217 return dco_freq / (p0 * p1 * p2 * 5); 1217 return dco_freq / (p0 * p1 * p2 * 5);
1218} 1218}
@@ -1939,7 +1939,7 @@ static void cnl_ddi_vswing_program(struct drm_i915_private *dev_priv,
1939 val |= RCOMP_SCALAR(0x98); 1939 val |= RCOMP_SCALAR(0x98);
1940 I915_WRITE(CNL_PORT_TX_DW2_GRP(port), val); 1940 I915_WRITE(CNL_PORT_TX_DW2_GRP(port), val);
1941 1941
1942 /* Program PORT_TX_DW4 */ 1942 /* Program PORT_TX_DW4 */
1943 /* We cannot write to GRP. It would overrite individual loadgen */ 1943 /* We cannot write to GRP. It would overrite individual loadgen */
1944 for (ln = 0; ln < 4; ln++) { 1944 for (ln = 0; ln < 4; ln++) {
1945 val = I915_READ(CNL_PORT_TX_DW4_LN(port, ln)); 1945 val = I915_READ(CNL_PORT_TX_DW4_LN(port, ln));
@@ -1951,7 +1951,7 @@ static void cnl_ddi_vswing_program(struct drm_i915_private *dev_priv,
1951 I915_WRITE(CNL_PORT_TX_DW4_LN(port, ln), val); 1951 I915_WRITE(CNL_PORT_TX_DW4_LN(port, ln), val);
1952 } 1952 }
1953 1953
1954 /* Program PORT_TX_DW5 */ 1954 /* Program PORT_TX_DW5 */
1955 /* All DW5 values are fixed for every table entry */ 1955 /* All DW5 values are fixed for every table entry */
1956 val = I915_READ(CNL_PORT_TX_DW5_LN0(port)); 1956 val = I915_READ(CNL_PORT_TX_DW5_LN0(port));
1957 val &= ~RTERM_SELECT_MASK; 1957 val &= ~RTERM_SELECT_MASK;
@@ -1959,7 +1959,7 @@ static void cnl_ddi_vswing_program(struct drm_i915_private *dev_priv,
1959 val |= TAP3_DISABLE; 1959 val |= TAP3_DISABLE;
1960 I915_WRITE(CNL_PORT_TX_DW5_GRP(port), val); 1960 I915_WRITE(CNL_PORT_TX_DW5_GRP(port), val);
1961 1961
1962 /* Program PORT_TX_DW7 */ 1962 /* Program PORT_TX_DW7 */
1963 val = I915_READ(CNL_PORT_TX_DW7_LN0(port)); 1963 val = I915_READ(CNL_PORT_TX_DW7_LN0(port));
1964 val &= ~N_SCALAR_MASK; 1964 val &= ~N_SCALAR_MASK;
1965 val |= N_SCALAR(ddi_translations[level].dw7_n_scalar); 1965 val |= N_SCALAR(ddi_translations[level].dw7_n_scalar);
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
index b17f7045c8f8..875d428ea75f 100644
--- a/drivers/gpu/drm/i915/intel_device_info.c
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -82,6 +82,39 @@ void intel_device_info_dump(struct drm_i915_private *dev_priv)
82#undef PRINT_FLAG 82#undef PRINT_FLAG
83} 83}
84 84
85static void gen10_sseu_info_init(struct drm_i915_private *dev_priv)
86{
87 struct sseu_dev_info *sseu = &mkwrite_device_info(dev_priv)->sseu;
88 const u32 fuse2 = I915_READ(GEN8_FUSE2);
89
90 sseu->slice_mask = (fuse2 & GEN10_F2_S_ENA_MASK) >>
91 GEN10_F2_S_ENA_SHIFT;
92 sseu->subslice_mask = (1 << 4) - 1;
93 sseu->subslice_mask &= ~((fuse2 & GEN10_F2_SS_DIS_MASK) >>
94 GEN10_F2_SS_DIS_SHIFT);
95
96 sseu->eu_total = hweight32(~I915_READ(GEN8_EU_DISABLE0));
97 sseu->eu_total += hweight32(~I915_READ(GEN8_EU_DISABLE1));
98 sseu->eu_total += hweight32(~I915_READ(GEN8_EU_DISABLE2));
99 sseu->eu_total += hweight8(~(I915_READ(GEN10_EU_DISABLE3) &
100 GEN10_EU_DIS_SS_MASK));
101
102 /*
103 * CNL is expected to always have a uniform distribution
104 * of EU across subslices with the exception that any one
105 * EU in any one subslice may be fused off for die
106 * recovery.
107 */
108 sseu->eu_per_subslice = sseu_subslice_total(sseu) ?
109 DIV_ROUND_UP(sseu->eu_total,
110 sseu_subslice_total(sseu)) : 0;
111
112 /* No restrictions on Power Gating */
113 sseu->has_slice_pg = 1;
114 sseu->has_subslice_pg = 1;
115 sseu->has_eu_pg = 1;
116}
117
85static void cherryview_sseu_info_init(struct drm_i915_private *dev_priv) 118static void cherryview_sseu_info_init(struct drm_i915_private *dev_priv)
86{ 119{
87 struct sseu_dev_info *sseu = &mkwrite_device_info(dev_priv)->sseu; 120 struct sseu_dev_info *sseu = &mkwrite_device_info(dev_priv)->sseu;
@@ -343,7 +376,7 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
343 info->num_sprites[pipe] = 1; 376 info->num_sprites[pipe] = 1;
344 } 377 }
345 378
346 if (i915.disable_display) { 379 if (i915_modparams.disable_display) {
347 DRM_INFO("Display disabled (module parameter)\n"); 380 DRM_INFO("Display disabled (module parameter)\n");
348 info->num_pipes = 0; 381 info->num_pipes = 0;
349 } else if (info->num_pipes > 0 && 382 } else if (info->num_pipes > 0 &&
@@ -409,10 +442,10 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
409 cherryview_sseu_info_init(dev_priv); 442 cherryview_sseu_info_init(dev_priv);
410 else if (IS_BROADWELL(dev_priv)) 443 else if (IS_BROADWELL(dev_priv))
411 broadwell_sseu_info_init(dev_priv); 444 broadwell_sseu_info_init(dev_priv);
412 else if (INTEL_INFO(dev_priv)->gen >= 9) 445 else if (INTEL_GEN(dev_priv) == 9)
413 gen9_sseu_info_init(dev_priv); 446 gen9_sseu_info_init(dev_priv);
414 447 else if (INTEL_GEN(dev_priv) >= 10)
415 WARN_ON(info->has_snoop != !info->has_llc); 448 gen10_sseu_info_init(dev_priv);
416 449
417 DRM_DEBUG_DRIVER("slice mask: %04x\n", info->sseu.slice_mask); 450 DRM_DEBUG_DRIVER("slice mask: %04x\n", info->sseu.slice_mask);
418 DRM_DEBUG_DRIVER("slice total: %u\n", hweight8(info->sseu.slice_mask)); 451 DRM_DEBUG_DRIVER("slice total: %u\n", hweight8(info->sseu.slice_mask));
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index f780f39e0758..615c58e48613 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -3701,7 +3701,7 @@ void intel_prepare_reset(struct drm_i915_private *dev_priv)
3701 3701
3702 3702
3703 /* reset doesn't touch the display */ 3703 /* reset doesn't touch the display */
3704 if (!i915.force_reset_modeset_test && 3704 if (!i915_modparams.force_reset_modeset_test &&
3705 !gpu_reset_clobbers_display(dev_priv)) 3705 !gpu_reset_clobbers_display(dev_priv))
3706 return; 3706 return;
3707 3707
@@ -3757,7 +3757,7 @@ void intel_finish_reset(struct drm_i915_private *dev_priv)
3757 int ret; 3757 int ret;
3758 3758
3759 /* reset doesn't touch the display */ 3759 /* reset doesn't touch the display */
3760 if (!i915.force_reset_modeset_test && 3760 if (!i915_modparams.force_reset_modeset_test &&
3761 !gpu_reset_clobbers_display(dev_priv)) 3761 !gpu_reset_clobbers_display(dev_priv))
3762 return; 3762 return;
3763 3763
@@ -4956,7 +4956,8 @@ void hsw_enable_ips(struct intel_crtc *crtc)
4956 assert_plane_enabled(dev_priv, crtc->plane); 4956 assert_plane_enabled(dev_priv, crtc->plane);
4957 if (IS_BROADWELL(dev_priv)) { 4957 if (IS_BROADWELL(dev_priv)) {
4958 mutex_lock(&dev_priv->rps.hw_lock); 4958 mutex_lock(&dev_priv->rps.hw_lock);
4959 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0xc0000000)); 4959 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL,
4960 IPS_ENABLE | IPS_PCODE_CONTROL));
4960 mutex_unlock(&dev_priv->rps.hw_lock); 4961 mutex_unlock(&dev_priv->rps.hw_lock);
4961 /* Quoting Art Runyan: "its not safe to expect any particular 4962 /* Quoting Art Runyan: "its not safe to expect any particular
4962 * value in IPS_CTL bit 31 after enabling IPS through the 4963 * value in IPS_CTL bit 31 after enabling IPS through the
@@ -6312,7 +6313,7 @@ static void hsw_compute_ips_config(struct intel_crtc *crtc,
6312 struct drm_device *dev = crtc->base.dev; 6313 struct drm_device *dev = crtc->base.dev;
6313 struct drm_i915_private *dev_priv = to_i915(dev); 6314 struct drm_i915_private *dev_priv = to_i915(dev);
6314 6315
6315 pipe_config->ips_enabled = i915.enable_ips && 6316 pipe_config->ips_enabled = i915_modparams.enable_ips &&
6316 hsw_crtc_supports_ips(crtc) && 6317 hsw_crtc_supports_ips(crtc) &&
6317 pipe_config_supports_ips(dev_priv, pipe_config); 6318 pipe_config_supports_ips(dev_priv, pipe_config);
6318} 6319}
@@ -6493,8 +6494,8 @@ intel_link_compute_m_n(int bits_per_pixel, int nlanes,
6493 6494
6494static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv) 6495static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
6495{ 6496{
6496 if (i915.panel_use_ssc >= 0) 6497 if (i915_modparams.panel_use_ssc >= 0)
6497 return i915.panel_use_ssc != 0; 6498 return i915_modparams.panel_use_ssc != 0;
6498 return dev_priv->vbt.lvds_use_ssc 6499 return dev_priv->vbt.lvds_use_ssc
6499 && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE); 6500 && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
6500} 6501}
@@ -9309,11 +9310,11 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
9309 pipe_config->gamma_mode = 9310 pipe_config->gamma_mode =
9310 I915_READ(GAMMA_MODE(crtc->pipe)) & GAMMA_MODE_MODE_MASK; 9311 I915_READ(GAMMA_MODE(crtc->pipe)) & GAMMA_MODE_MODE_MASK;
9311 9312
9312 if (IS_BROADWELL(dev_priv) || dev_priv->info.gen >= 9) { 9313 if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9) {
9313 u32 tmp = I915_READ(PIPEMISC(crtc->pipe)); 9314 u32 tmp = I915_READ(PIPEMISC(crtc->pipe));
9314 bool clrspace_yuv = tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV; 9315 bool clrspace_yuv = tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV;
9315 9316
9316 if (IS_GEMINILAKE(dev_priv) || dev_priv->info.gen >= 10) { 9317 if (IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
9317 bool blend_mode_420 = tmp & 9318 bool blend_mode_420 = tmp &
9318 PIPEMISC_YUV420_MODE_FULL_BLEND; 9319 PIPEMISC_YUV420_MODE_FULL_BLEND;
9319 9320
@@ -10223,7 +10224,7 @@ int intel_dotclock_calculate(int link_freq,
10223 if (!m_n->link_n) 10224 if (!m_n->link_n)
10224 return 0; 10225 return 0;
10225 10226
10226 return div_u64((u64)m_n->link_m * link_freq, m_n->link_n); 10227 return div_u64(mul_u32_u32(m_n->link_m, link_freq), m_n->link_n);
10227} 10228}
10228 10229
10229static void ironlake_pch_clock_get(struct intel_crtc *crtc, 10230static void ironlake_pch_clock_get(struct intel_crtc *crtc,
@@ -12083,7 +12084,7 @@ static int intel_atomic_check(struct drm_device *dev,
12083 return ret; 12084 return ret;
12084 } 12085 }
12085 12086
12086 if (i915.fastboot && 12087 if (i915_modparams.fastboot &&
12087 intel_pipe_config_compare(dev_priv, 12088 intel_pipe_config_compare(dev_priv,
12088 to_intel_crtc_state(old_crtc_state), 12089 to_intel_crtc_state(old_crtc_state),
12089 pipe_config, true)) { 12090 pipe_config, true)) {
@@ -12298,7 +12299,6 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
12298 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 12299 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
12299 struct drm_crtc *crtc; 12300 struct drm_crtc *crtc;
12300 struct intel_crtc_state *intel_cstate; 12301 struct intel_crtc_state *intel_cstate;
12301 bool hw_check = intel_state->modeset;
12302 u64 put_domains[I915_MAX_PIPES] = {}; 12302 u64 put_domains[I915_MAX_PIPES] = {};
12303 int i; 12303 int i;
12304 12304
@@ -12314,7 +12314,6 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
12314 12314
12315 if (needs_modeset(new_crtc_state) || 12315 if (needs_modeset(new_crtc_state) ||
12316 to_intel_crtc_state(new_crtc_state)->update_pipe) { 12316 to_intel_crtc_state(new_crtc_state)->update_pipe) {
12317 hw_check = true;
12318 12317
12319 put_domains[to_intel_crtc(crtc)->pipe] = 12318 put_domains[to_intel_crtc(crtc)->pipe] =
12320 modeset_get_crtc_power_domains(crtc, 12319 modeset_get_crtc_power_domains(crtc,
@@ -13511,7 +13510,7 @@ int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
13511 struct drm_crtc *drmmode_crtc; 13510 struct drm_crtc *drmmode_crtc;
13512 struct intel_crtc *crtc; 13511 struct intel_crtc *crtc;
13513 13512
13514 drmmode_crtc = drm_crtc_find(dev, pipe_from_crtc_id->crtc_id); 13513 drmmode_crtc = drm_crtc_find(dev, file, pipe_from_crtc_id->crtc_id);
13515 if (!drmmode_crtc) 13514 if (!drmmode_crtc)
13516 return -ENOENT; 13515 return -ENOENT;
13517 13516
@@ -14218,7 +14217,7 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv)
14218 dev_priv->display.fdi_link_train = hsw_fdi_link_train; 14217 dev_priv->display.fdi_link_train = hsw_fdi_link_train;
14219 } 14218 }
14220 14219
14221 if (dev_priv->info.gen >= 9) 14220 if (INTEL_GEN(dev_priv) >= 9)
14222 dev_priv->display.update_crtcs = skl_update_crtcs; 14221 dev_priv->display.update_crtcs = skl_update_crtcs;
14223 else 14222 else
14224 dev_priv->display.update_crtcs = intel_update_crtcs; 14223 dev_priv->display.update_crtcs = intel_update_crtcs;
@@ -15192,6 +15191,7 @@ void intel_display_resume(struct drm_device *dev)
15192 if (!ret) 15191 if (!ret)
15193 ret = __intel_display_resume(dev, state, &ctx); 15192 ret = __intel_display_resume(dev, state, &ctx);
15194 15193
15194 intel_enable_ipc(dev_priv);
15195 drm_modeset_drop_locks(&ctx); 15195 drm_modeset_drop_locks(&ctx);
15196 drm_modeset_acquire_fini(&ctx); 15196 drm_modeset_acquire_fini(&ctx);
15197 15197
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 887953c0f495..90e756c76f10 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -42,6 +42,7 @@
42#include "i915_drv.h" 42#include "i915_drv.h"
43 43
44#define DP_LINK_CHECK_TIMEOUT (10 * 1000) 44#define DP_LINK_CHECK_TIMEOUT (10 * 1000)
45#define DP_DPRX_ESI_LEN 14
45 46
46/* Compliance test status bits */ 47/* Compliance test status bits */
47#define INTEL_DP_RESOLUTION_SHIFT_MASK 0 48#define INTEL_DP_RESOLUTION_SHIFT_MASK 0
@@ -2692,24 +2693,46 @@ static void intel_disable_dp(struct intel_encoder *encoder,
2692 const struct drm_connector_state *old_conn_state) 2693 const struct drm_connector_state *old_conn_state)
2693{ 2694{
2694 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 2695 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2695 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2696 2696
2697 if (old_crtc_state->has_audio) 2697 if (old_crtc_state->has_audio)
2698 intel_audio_codec_disable(encoder); 2698 intel_audio_codec_disable(encoder);
2699 2699
2700 if (HAS_PSR(dev_priv) && !HAS_DDI(dev_priv))
2701 intel_psr_disable(intel_dp, old_crtc_state);
2702
2703 /* Make sure the panel is off before trying to change the mode. But also 2700 /* Make sure the panel is off before trying to change the mode. But also
2704 * ensure that we have vdd while we switch off the panel. */ 2701 * ensure that we have vdd while we switch off the panel. */
2705 intel_edp_panel_vdd_on(intel_dp); 2702 intel_edp_panel_vdd_on(intel_dp);
2706 intel_edp_backlight_off(old_conn_state); 2703 intel_edp_backlight_off(old_conn_state);
2707 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF); 2704 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
2708 intel_edp_panel_off(intel_dp); 2705 intel_edp_panel_off(intel_dp);
2706}
2707
2708static void g4x_disable_dp(struct intel_encoder *encoder,
2709 const struct intel_crtc_state *old_crtc_state,
2710 const struct drm_connector_state *old_conn_state)
2711{
2712 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2713
2714 intel_disable_dp(encoder, old_crtc_state, old_conn_state);
2709 2715
2710 /* disable the port before the pipe on g4x */ 2716 /* disable the port before the pipe on g4x */
2711 if (INTEL_GEN(dev_priv) < 5) 2717 intel_dp_link_down(intel_dp);
2712 intel_dp_link_down(intel_dp); 2718}
2719
2720static void ilk_disable_dp(struct intel_encoder *encoder,
2721 const struct intel_crtc_state *old_crtc_state,
2722 const struct drm_connector_state *old_conn_state)
2723{
2724 intel_disable_dp(encoder, old_crtc_state, old_conn_state);
2725}
2726
2727static void vlv_disable_dp(struct intel_encoder *encoder,
2728 const struct intel_crtc_state *old_crtc_state,
2729 const struct drm_connector_state *old_conn_state)
2730{
2731 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2732
2733 intel_psr_disable(intel_dp, old_crtc_state);
2734
2735 intel_disable_dp(encoder, old_crtc_state, old_conn_state);
2713} 2736}
2714 2737
2715static void ilk_post_disable_dp(struct intel_encoder *encoder, 2738static void ilk_post_disable_dp(struct intel_encoder *encoder,
@@ -3826,7 +3849,7 @@ intel_dp_can_mst(struct intel_dp *intel_dp)
3826{ 3849{
3827 u8 mstm_cap; 3850 u8 mstm_cap;
3828 3851
3829 if (!i915.enable_dp_mst) 3852 if (!i915_modparams.enable_dp_mst)
3830 return false; 3853 return false;
3831 3854
3832 if (!intel_dp->can_mst) 3855 if (!intel_dp->can_mst)
@@ -3844,7 +3867,7 @@ intel_dp_can_mst(struct intel_dp *intel_dp)
3844static void 3867static void
3845intel_dp_configure_mst(struct intel_dp *intel_dp) 3868intel_dp_configure_mst(struct intel_dp *intel_dp)
3846{ 3869{
3847 if (!i915.enable_dp_mst) 3870 if (!i915_modparams.enable_dp_mst)
3848 return; 3871 return;
3849 3872
3850 if (!intel_dp->can_mst) 3873 if (!intel_dp->can_mst)
@@ -3991,15 +4014,9 @@ intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
3991static bool 4014static bool
3992intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector) 4015intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
3993{ 4016{
3994 int ret; 4017 return drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT_ESI,
3995 4018 sink_irq_vector, DP_DPRX_ESI_LEN) ==
3996 ret = drm_dp_dpcd_read(&intel_dp->aux, 4019 DP_DPRX_ESI_LEN;
3997 DP_SINK_COUNT_ESI,
3998 sink_irq_vector, 14);
3999 if (ret != 14)
4000 return false;
4001
4002 return true;
4003} 4020}
4004 4021
4005static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp) 4022static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
@@ -4199,7 +4216,7 @@ intel_dp_check_mst_status(struct intel_dp *intel_dp)
4199 bool bret; 4216 bool bret;
4200 4217
4201 if (intel_dp->is_mst) { 4218 if (intel_dp->is_mst) {
4202 u8 esi[16] = { 0 }; 4219 u8 esi[DP_DPRX_ESI_LEN] = { 0 };
4203 int ret = 0; 4220 int ret = 0;
4204 int retry; 4221 int retry;
4205 bool handled; 4222 bool handled;
@@ -4736,10 +4753,6 @@ intel_dp_long_pulse(struct intel_connector *intel_connector)
4736 if (intel_encoder->type != INTEL_OUTPUT_EDP) 4753 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4737 intel_encoder->type = INTEL_OUTPUT_DP; 4754 intel_encoder->type = INTEL_OUTPUT_DP;
4738 4755
4739 DRM_DEBUG_KMS("Display Port TPS3 support: source %s, sink %s\n",
4740 yesno(intel_dp_source_supports_hbr2(intel_dp)),
4741 yesno(drm_dp_tps3_supported(intel_dp->dpcd)));
4742
4743 if (intel_dp->reset_link_params) { 4756 if (intel_dp->reset_link_params) {
4744 /* Initial max link lane count */ 4757 /* Initial max link lane count */
4745 intel_dp->max_link_lane_count = intel_dp_max_common_lane_count(intel_dp); 4758 intel_dp->max_link_lane_count = intel_dp_max_common_lane_count(intel_dp);
@@ -5467,11 +5480,6 @@ static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv,
5467 return; 5480 return;
5468 } 5481 }
5469 5482
5470 /*
5471 * FIXME: This needs proper synchronization with psr state for some
5472 * platforms that cannot have PSR and DRRS enabled at the same time.
5473 */
5474
5475 dig_port = dp_to_dig_port(intel_dp); 5483 dig_port = dp_to_dig_port(intel_dp);
5476 encoder = &dig_port->base; 5484 encoder = &dig_port->base;
5477 intel_crtc = to_intel_crtc(encoder->base.crtc); 5485 intel_crtc = to_intel_crtc(encoder->base.crtc);
@@ -5555,6 +5563,11 @@ void intel_edp_drrs_enable(struct intel_dp *intel_dp,
5555 return; 5563 return;
5556 } 5564 }
5557 5565
5566 if (dev_priv->psr.enabled) {
5567 DRM_DEBUG_KMS("PSR enabled. Not enabling DRRS.\n");
5568 return;
5569 }
5570
5558 mutex_lock(&dev_priv->drrs.mutex); 5571 mutex_lock(&dev_priv->drrs.mutex);
5559 if (WARN_ON(dev_priv->drrs.dp)) { 5572 if (WARN_ON(dev_priv->drrs.dp)) {
5560 DRM_ERROR("DRRS already enabled\n"); 5573 DRM_ERROR("DRRS already enabled\n");
@@ -6145,7 +6158,6 @@ bool intel_dp_init(struct drm_i915_private *dev_priv,
6145 goto err_encoder_init; 6158 goto err_encoder_init;
6146 6159
6147 intel_encoder->compute_config = intel_dp_compute_config; 6160 intel_encoder->compute_config = intel_dp_compute_config;
6148 intel_encoder->disable = intel_disable_dp;
6149 intel_encoder->get_hw_state = intel_dp_get_hw_state; 6161 intel_encoder->get_hw_state = intel_dp_get_hw_state;
6150 intel_encoder->get_config = intel_dp_get_config; 6162 intel_encoder->get_config = intel_dp_get_config;
6151 intel_encoder->suspend = intel_dp_encoder_suspend; 6163 intel_encoder->suspend = intel_dp_encoder_suspend;
@@ -6153,18 +6165,24 @@ bool intel_dp_init(struct drm_i915_private *dev_priv,
6153 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable; 6165 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
6154 intel_encoder->pre_enable = chv_pre_enable_dp; 6166 intel_encoder->pre_enable = chv_pre_enable_dp;
6155 intel_encoder->enable = vlv_enable_dp; 6167 intel_encoder->enable = vlv_enable_dp;
6168 intel_encoder->disable = vlv_disable_dp;
6156 intel_encoder->post_disable = chv_post_disable_dp; 6169 intel_encoder->post_disable = chv_post_disable_dp;
6157 intel_encoder->post_pll_disable = chv_dp_post_pll_disable; 6170 intel_encoder->post_pll_disable = chv_dp_post_pll_disable;
6158 } else if (IS_VALLEYVIEW(dev_priv)) { 6171 } else if (IS_VALLEYVIEW(dev_priv)) {
6159 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable; 6172 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
6160 intel_encoder->pre_enable = vlv_pre_enable_dp; 6173 intel_encoder->pre_enable = vlv_pre_enable_dp;
6161 intel_encoder->enable = vlv_enable_dp; 6174 intel_encoder->enable = vlv_enable_dp;
6175 intel_encoder->disable = vlv_disable_dp;
6162 intel_encoder->post_disable = vlv_post_disable_dp; 6176 intel_encoder->post_disable = vlv_post_disable_dp;
6177 } else if (INTEL_GEN(dev_priv) >= 5) {
6178 intel_encoder->pre_enable = g4x_pre_enable_dp;
6179 intel_encoder->enable = g4x_enable_dp;
6180 intel_encoder->disable = ilk_disable_dp;
6181 intel_encoder->post_disable = ilk_post_disable_dp;
6163 } else { 6182 } else {
6164 intel_encoder->pre_enable = g4x_pre_enable_dp; 6183 intel_encoder->pre_enable = g4x_pre_enable_dp;
6165 intel_encoder->enable = g4x_enable_dp; 6184 intel_encoder->enable = g4x_enable_dp;
6166 if (INTEL_GEN(dev_priv) >= 5) 6185 intel_encoder->disable = g4x_disable_dp;
6167 intel_encoder->post_disable = ilk_post_disable_dp;
6168 } 6186 }
6169 6187
6170 intel_dig_port->port = port; 6188 intel_dig_port->port = port;
diff --git a/drivers/gpu/drm/i915/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/intel_dp_aux_backlight.c
index d2830ba3162e..2bb2ceb9d463 100644
--- a/drivers/gpu/drm/i915/intel_dp_aux_backlight.c
+++ b/drivers/gpu/drm/i915/intel_dp_aux_backlight.c
@@ -264,7 +264,7 @@ int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector)
264{ 264{
265 struct intel_panel *panel = &intel_connector->panel; 265 struct intel_panel *panel = &intel_connector->panel;
266 266
267 if (!i915.enable_dpcd_backlight) 267 if (!i915_modparams.enable_dpcd_backlight)
268 return -ENODEV; 268 return -ENODEV;
269 269
270 if (!intel_dp_aux_display_control_capable(intel_connector)) 270 if (!intel_dp_aux_display_control_capable(intel_connector))
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index 8e3aad0ea60b..9a396f483f8b 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -133,7 +133,7 @@ static void intel_mst_disable_dp(struct intel_encoder *encoder,
133 to_intel_connector(old_conn_state->connector); 133 to_intel_connector(old_conn_state->connector);
134 int ret; 134 int ret;
135 135
136 DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links); 136 DRM_DEBUG_KMS("active links %d\n", intel_dp->active_mst_links);
137 137
138 drm_dp_mst_reset_vcpi_slots(&intel_dp->mst_mgr, connector->port); 138 drm_dp_mst_reset_vcpi_slots(&intel_dp->mst_mgr, connector->port);
139 139
@@ -155,8 +155,6 @@ static void intel_mst_post_disable_dp(struct intel_encoder *encoder,
155 struct intel_connector *connector = 155 struct intel_connector *connector =
156 to_intel_connector(old_conn_state->connector); 156 to_intel_connector(old_conn_state->connector);
157 157
158 DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links);
159
160 /* this can fail */ 158 /* this can fail */
161 drm_dp_check_act_status(&intel_dp->mst_mgr); 159 drm_dp_check_act_status(&intel_dp->mst_mgr);
162 /* and this can also fail */ 160 /* and this can also fail */
@@ -173,6 +171,7 @@ static void intel_mst_post_disable_dp(struct intel_encoder *encoder,
173 171
174 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF); 172 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
175 } 173 }
174 DRM_DEBUG_KMS("active links %d\n", intel_dp->active_mst_links);
176} 175}
177 176
178static void intel_mst_pre_enable_dp(struct intel_encoder *encoder, 177static void intel_mst_pre_enable_dp(struct intel_encoder *encoder,
@@ -195,7 +194,7 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder,
195 connector->encoder = encoder; 194 connector->encoder = encoder;
196 intel_mst->connector = connector; 195 intel_mst->connector = connector;
197 196
198 DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links); 197 DRM_DEBUG_KMS("active links %d\n", intel_dp->active_mst_links);
199 198
200 if (intel_dp->active_mst_links == 0) 199 if (intel_dp->active_mst_links == 0)
201 intel_dig_port->base.pre_enable(&intel_dig_port->base, 200 intel_dig_port->base.pre_enable(&intel_dig_port->base,
@@ -229,7 +228,7 @@ static void intel_mst_enable_dp(struct intel_encoder *encoder,
229 enum port port = intel_dig_port->port; 228 enum port port = intel_dig_port->port;
230 int ret; 229 int ret;
231 230
232 DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links); 231 DRM_DEBUG_KMS("active links %d\n", intel_dp->active_mst_links);
233 232
234 if (intel_wait_for_register(dev_priv, 233 if (intel_wait_for_register(dev_priv,
235 DP_TP_STATUS(port), 234 DP_TP_STATUS(port),
@@ -494,6 +493,7 @@ static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
494 struct intel_connector *intel_connector = to_intel_connector(connector); 493 struct intel_connector *intel_connector = to_intel_connector(connector);
495 struct drm_i915_private *dev_priv = to_i915(connector->dev); 494 struct drm_i915_private *dev_priv = to_i915(connector->dev);
496 495
496 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id, connector->name);
497 drm_connector_unregister(connector); 497 drm_connector_unregister(connector);
498 498
499 if (dev_priv->fbdev) 499 if (dev_priv->fbdev)
@@ -505,7 +505,6 @@ static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
505 drm_modeset_unlock(&connector->dev->mode_config.connection_mutex); 505 drm_modeset_unlock(&connector->dev->mode_config.connection_mutex);
506 506
507 drm_connector_unreference(connector); 507 drm_connector_unreference(connector);
508 DRM_DEBUG_KMS("\n");
509} 508}
510 509
511static void intel_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr) 510static void intel_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr)
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 463ed152e6b1..0cab667fff57 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -494,6 +494,8 @@ struct intel_crtc_scaler_state {
494 494
495/* drm_mode->private_flags */ 495/* drm_mode->private_flags */
496#define I915_MODE_FLAG_INHERITED 1 496#define I915_MODE_FLAG_INHERITED 1
497/* Flag to get scanline using frame time stamps */
498#define I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP (1<<1)
497 499
498struct intel_pipe_wm { 500struct intel_pipe_wm {
499 struct intel_wm_level wm[5]; 501 struct intel_wm_level wm[5];
@@ -1898,9 +1900,11 @@ bool ilk_disable_lp_wm(struct drm_device *dev);
1898int sanitize_rc6_option(struct drm_i915_private *dev_priv, int enable_rc6); 1900int sanitize_rc6_option(struct drm_i915_private *dev_priv, int enable_rc6);
1899int skl_check_pipe_max_pixel_rate(struct intel_crtc *intel_crtc, 1901int skl_check_pipe_max_pixel_rate(struct intel_crtc *intel_crtc,
1900 struct intel_crtc_state *cstate); 1902 struct intel_crtc_state *cstate);
1903void intel_init_ipc(struct drm_i915_private *dev_priv);
1904void intel_enable_ipc(struct drm_i915_private *dev_priv);
1901static inline int intel_enable_rc6(void) 1905static inline int intel_enable_rc6(void)
1902{ 1906{
1903 return i915.enable_rc6; 1907 return i915_modparams.enable_rc6;
1904} 1908}
1905 1909
1906/* intel_sdvo.c */ 1910/* intel_sdvo.c */
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
index fc25d7d2d942..20a7b004ffd7 100644
--- a/drivers/gpu/drm/i915/intel_dsi.c
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -263,7 +263,7 @@ static int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd, bool hs,
263 263
264 /* XXX: old code skips write if control unchanged */ 264 /* XXX: old code skips write if control unchanged */
265 if (cmd == I915_READ(MIPI_DPI_CONTROL(port))) 265 if (cmd == I915_READ(MIPI_DPI_CONTROL(port)))
266 DRM_ERROR("Same special packet %02x twice in a row.\n", cmd); 266 DRM_DEBUG_KMS("Same special packet %02x twice in a row.\n", cmd);
267 267
268 I915_WRITE(MIPI_DPI_CONTROL(port), cmd); 268 I915_WRITE(MIPI_DPI_CONTROL(port), cmd);
269 269
@@ -330,6 +330,10 @@ static bool intel_dsi_compute_config(struct intel_encoder *encoder,
330 adjusted_mode->flags = 0; 330 adjusted_mode->flags = 0;
331 331
332 if (IS_GEN9_LP(dev_priv)) { 332 if (IS_GEN9_LP(dev_priv)) {
333 /* Enable Frame time stamp based scanline reporting */
334 adjusted_mode->private_flags |=
335 I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP;
336
333 /* Dual link goes to DSI transcoder A. */ 337 /* Dual link goes to DSI transcoder A. */
334 if (intel_dsi->ports == BIT(PORT_C)) 338 if (intel_dsi->ports == BIT(PORT_C))
335 pipe_config->cpu_transcoder = TRANSCODER_DSI_C; 339 pipe_config->cpu_transcoder = TRANSCODER_DSI_C;
@@ -1102,6 +1106,10 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder,
1102 pixel_format_from_register_bits(fmt)); 1106 pixel_format_from_register_bits(fmt));
1103 bpp = pipe_config->pipe_bpp; 1107 bpp = pipe_config->pipe_bpp;
1104 1108
1109 /* Enable Frame time stamo based scanline reporting */
1110 adjusted_mode->private_flags |=
1111 I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP;
1112
1105 /* In terms of pixels */ 1113 /* In terms of pixels */
1106 adjusted_mode->crtc_hdisplay = 1114 adjusted_mode->crtc_hdisplay =
1107 I915_READ(BXT_MIPI_TRANS_HACTIVE(port)); 1115 I915_READ(BXT_MIPI_TRANS_HACTIVE(port));
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index b8e9a234af2d..a28e2a864cf1 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -39,6 +39,7 @@
39 39
40#define GEN8_LR_CONTEXT_RENDER_SIZE (20 * PAGE_SIZE) 40#define GEN8_LR_CONTEXT_RENDER_SIZE (20 * PAGE_SIZE)
41#define GEN9_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE) 41#define GEN9_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE)
42#define GEN10_LR_CONTEXT_RENDER_SIZE (19 * PAGE_SIZE)
42 43
43#define GEN8_LR_CONTEXT_OTHER_SIZE ( 2 * PAGE_SIZE) 44#define GEN8_LR_CONTEXT_OTHER_SIZE ( 2 * PAGE_SIZE)
44 45
@@ -150,10 +151,11 @@ __intel_engine_context_size(struct drm_i915_private *dev_priv, u8 class)
150 default: 151 default:
151 MISSING_CASE(INTEL_GEN(dev_priv)); 152 MISSING_CASE(INTEL_GEN(dev_priv));
152 case 10: 153 case 10:
154 return GEN10_LR_CONTEXT_RENDER_SIZE;
153 case 9: 155 case 9:
154 return GEN9_LR_CONTEXT_RENDER_SIZE; 156 return GEN9_LR_CONTEXT_RENDER_SIZE;
155 case 8: 157 case 8:
156 return i915.enable_execlists ? 158 return i915_modparams.enable_execlists ?
157 GEN8_LR_CONTEXT_RENDER_SIZE : 159 GEN8_LR_CONTEXT_RENDER_SIZE :
158 GEN8_CXT_TOTAL_SIZE; 160 GEN8_CXT_TOTAL_SIZE;
159 case 7: 161 case 7:
@@ -301,7 +303,7 @@ int intel_engines_init(struct drm_i915_private *dev_priv)
301 &intel_engine_classes[engine->class]; 303 &intel_engine_classes[engine->class];
302 int (*init)(struct intel_engine_cs *engine); 304 int (*init)(struct intel_engine_cs *engine);
303 305
304 if (i915.enable_execlists) 306 if (i915_modparams.enable_execlists)
305 init = class_info->init_execlists; 307 init = class_info->init_execlists;
306 else 308 else
307 init = class_info->init_legacy; 309 init = class_info->init_legacy;
@@ -380,6 +382,37 @@ static void intel_engine_init_timeline(struct intel_engine_cs *engine)
380 engine->timeline = &engine->i915->gt.global_timeline.engine[engine->id]; 382 engine->timeline = &engine->i915->gt.global_timeline.engine[engine->id];
381} 383}
382 384
385static bool csb_force_mmio(struct drm_i915_private *i915)
386{
387 /* GVT emulation depends upon intercepting CSB mmio */
388 if (intel_vgpu_active(i915))
389 return true;
390
391 /*
392 * IOMMU adds unpredictable latency causing the CSB write (from the
393 * GPU into the HWSP) to only be visible some time after the interrupt
394 * (missed breadcrumb syndrome).
395 */
396 if (intel_vtd_active())
397 return true;
398
399 return false;
400}
401
402static void intel_engine_init_execlist(struct intel_engine_cs *engine)
403{
404 struct intel_engine_execlists * const execlists = &engine->execlists;
405
406 execlists->csb_use_mmio = csb_force_mmio(engine->i915);
407
408 execlists->port_mask = 1;
409 BUILD_BUG_ON_NOT_POWER_OF_2(execlists_num_ports(execlists));
410 GEM_BUG_ON(execlists_num_ports(execlists) > EXECLIST_MAX_PORTS);
411
412 execlists->queue = RB_ROOT;
413 execlists->first = NULL;
414}
415
383/** 416/**
384 * intel_engines_setup_common - setup engine state not requiring hw access 417 * intel_engines_setup_common - setup engine state not requiring hw access
385 * @engine: Engine to setup. 418 * @engine: Engine to setup.
@@ -391,8 +424,7 @@ static void intel_engine_init_timeline(struct intel_engine_cs *engine)
391 */ 424 */
392void intel_engine_setup_common(struct intel_engine_cs *engine) 425void intel_engine_setup_common(struct intel_engine_cs *engine)
393{ 426{
394 engine->execlist_queue = RB_ROOT; 427 intel_engine_init_execlist(engine);
395 engine->execlist_first = NULL;
396 428
397 intel_engine_init_timeline(engine); 429 intel_engine_init_timeline(engine);
398 intel_engine_init_hangcheck(engine); 430 intel_engine_init_hangcheck(engine);
@@ -442,6 +474,116 @@ static void intel_engine_cleanup_scratch(struct intel_engine_cs *engine)
442 i915_vma_unpin_and_release(&engine->scratch); 474 i915_vma_unpin_and_release(&engine->scratch);
443} 475}
444 476
477static void cleanup_phys_status_page(struct intel_engine_cs *engine)
478{
479 struct drm_i915_private *dev_priv = engine->i915;
480
481 if (!dev_priv->status_page_dmah)
482 return;
483
484 drm_pci_free(&dev_priv->drm, dev_priv->status_page_dmah);
485 engine->status_page.page_addr = NULL;
486}
487
488static void cleanup_status_page(struct intel_engine_cs *engine)
489{
490 struct i915_vma *vma;
491 struct drm_i915_gem_object *obj;
492
493 vma = fetch_and_zero(&engine->status_page.vma);
494 if (!vma)
495 return;
496
497 obj = vma->obj;
498
499 i915_vma_unpin(vma);
500 i915_vma_close(vma);
501
502 i915_gem_object_unpin_map(obj);
503 __i915_gem_object_release_unless_active(obj);
504}
505
506static int init_status_page(struct intel_engine_cs *engine)
507{
508 struct drm_i915_gem_object *obj;
509 struct i915_vma *vma;
510 unsigned int flags;
511 void *vaddr;
512 int ret;
513
514 obj = i915_gem_object_create_internal(engine->i915, PAGE_SIZE);
515 if (IS_ERR(obj)) {
516 DRM_ERROR("Failed to allocate status page\n");
517 return PTR_ERR(obj);
518 }
519
520 ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
521 if (ret)
522 goto err;
523
524 vma = i915_vma_instance(obj, &engine->i915->ggtt.base, NULL);
525 if (IS_ERR(vma)) {
526 ret = PTR_ERR(vma);
527 goto err;
528 }
529
530 flags = PIN_GLOBAL;
531 if (!HAS_LLC(engine->i915))
532 /* On g33, we cannot place HWS above 256MiB, so
533 * restrict its pinning to the low mappable arena.
534 * Though this restriction is not documented for
535 * gen4, gen5, or byt, they also behave similarly
536 * and hang if the HWS is placed at the top of the
537 * GTT. To generalise, it appears that all !llc
538 * platforms have issues with us placing the HWS
539 * above the mappable region (even though we never
540 * actually map it).
541 */
542 flags |= PIN_MAPPABLE;
543 else
544 flags |= PIN_HIGH;
545 ret = i915_vma_pin(vma, 0, 4096, flags);
546 if (ret)
547 goto err;
548
549 vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
550 if (IS_ERR(vaddr)) {
551 ret = PTR_ERR(vaddr);
552 goto err_unpin;
553 }
554
555 engine->status_page.vma = vma;
556 engine->status_page.ggtt_offset = i915_ggtt_offset(vma);
557 engine->status_page.page_addr = memset(vaddr, 0, PAGE_SIZE);
558
559 DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
560 engine->name, i915_ggtt_offset(vma));
561 return 0;
562
563err_unpin:
564 i915_vma_unpin(vma);
565err:
566 i915_gem_object_put(obj);
567 return ret;
568}
569
570static int init_phys_status_page(struct intel_engine_cs *engine)
571{
572 struct drm_i915_private *dev_priv = engine->i915;
573
574 GEM_BUG_ON(engine->id != RCS);
575
576 dev_priv->status_page_dmah =
577 drm_pci_alloc(&dev_priv->drm, PAGE_SIZE, PAGE_SIZE);
578 if (!dev_priv->status_page_dmah)
579 return -ENOMEM;
580
581 engine->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
582 memset(engine->status_page.page_addr, 0, PAGE_SIZE);
583
584 return 0;
585}
586
445/** 587/**
446 * intel_engines_init_common - initialize cengine state which might require hw access 588 * intel_engines_init_common - initialize cengine state which might require hw access
447 * @engine: Engine to initialize. 589 * @engine: Engine to initialize.
@@ -477,10 +619,21 @@ int intel_engine_init_common(struct intel_engine_cs *engine)
477 619
478 ret = i915_gem_render_state_init(engine); 620 ret = i915_gem_render_state_init(engine);
479 if (ret) 621 if (ret)
480 goto err_unpin; 622 goto err_breadcrumbs;
623
624 if (HWS_NEEDS_PHYSICAL(engine->i915))
625 ret = init_phys_status_page(engine);
626 else
627 ret = init_status_page(engine);
628 if (ret)
629 goto err_rs_fini;
481 630
482 return 0; 631 return 0;
483 632
633err_rs_fini:
634 i915_gem_render_state_fini(engine);
635err_breadcrumbs:
636 intel_engine_fini_breadcrumbs(engine);
484err_unpin: 637err_unpin:
485 engine->context_unpin(engine, engine->i915->kernel_context); 638 engine->context_unpin(engine, engine->i915->kernel_context);
486 return ret; 639 return ret;
@@ -497,6 +650,11 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
497{ 650{
498 intel_engine_cleanup_scratch(engine); 651 intel_engine_cleanup_scratch(engine);
499 652
653 if (HWS_NEEDS_PHYSICAL(engine->i915))
654 cleanup_phys_status_page(engine);
655 else
656 cleanup_status_page(engine);
657
500 i915_gem_render_state_fini(engine); 658 i915_gem_render_state_fini(engine);
501 intel_engine_fini_breadcrumbs(engine); 659 intel_engine_fini_breadcrumbs(engine);
502 intel_engine_cleanup_cmd_parser(engine); 660 intel_engine_cleanup_cmd_parser(engine);
@@ -812,6 +970,19 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine)
812 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | 970 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
813 ECOCHK_DIS_TLB); 971 ECOCHK_DIS_TLB);
814 972
973 if (HAS_LLC(dev_priv)) {
974 /* WaCompressedResourceSamplerPbeMediaNewHashMode:skl,kbl
975 *
976 * Must match Display Engine. See
977 * WaCompressedResourceDisplayNewHashMode.
978 */
979 WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
980 GEN9_PBE_COMPRESSED_HASH_SELECTION);
981 WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
982 GEN9_SAMPLER_HASH_COMPRESSED_READ_ADDR);
983 WA_SET_BIT(MMCD_MISC_CTRL, MMCD_PCLA | MMCD_HOTSPOT_EN);
984 }
985
815 /* WaClearFlowControlGpgpuContextSave:skl,bxt,kbl,glk,cfl */ 986 /* WaClearFlowControlGpgpuContextSave:skl,bxt,kbl,glk,cfl */
816 /* WaDisablePartialInstShootdown:skl,bxt,kbl,glk,cfl */ 987 /* WaDisablePartialInstShootdown:skl,bxt,kbl,glk,cfl */
817 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, 988 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
@@ -981,12 +1152,14 @@ static int skl_init_workarounds(struct intel_engine_cs *engine)
981 GEN9_GAPS_TSV_CREDIT_DISABLE)); 1152 GEN9_GAPS_TSV_CREDIT_DISABLE));
982 1153
983 /* WaDisableGafsUnitClkGating:skl */ 1154 /* WaDisableGafsUnitClkGating:skl */
984 WA_SET_BIT(GEN7_UCGCTL4, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE); 1155 I915_WRITE(GEN7_UCGCTL4, (I915_READ(GEN7_UCGCTL4) |
1156 GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE));
985 1157
986 /* WaInPlaceDecompressionHang:skl */ 1158 /* WaInPlaceDecompressionHang:skl */
987 if (IS_SKL_REVID(dev_priv, SKL_REVID_H0, REVID_FOREVER)) 1159 if (IS_SKL_REVID(dev_priv, SKL_REVID_H0, REVID_FOREVER))
988 WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA, 1160 I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
989 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); 1161 (I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
1162 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS));
990 1163
991 /* WaDisableLSQCROPERFforOCL:skl */ 1164 /* WaDisableLSQCROPERFforOCL:skl */
992 ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4); 1165 ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
@@ -1022,8 +1195,8 @@ static int bxt_init_workarounds(struct intel_engine_cs *engine)
1022 1195
1023 /* WaDisablePooledEuLoadBalancingFix:bxt */ 1196 /* WaDisablePooledEuLoadBalancingFix:bxt */
1024 if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER)) { 1197 if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER)) {
1025 WA_SET_BIT_MASKED(FF_SLICE_CS_CHICKEN2, 1198 I915_WRITE(FF_SLICE_CS_CHICKEN2,
1026 GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE); 1199 _MASKED_BIT_ENABLE(GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE));
1027 } 1200 }
1028 1201
1029 /* WaDisableSbeCacheDispatchPortSharing:bxt */ 1202 /* WaDisableSbeCacheDispatchPortSharing:bxt */
@@ -1059,8 +1232,9 @@ static int bxt_init_workarounds(struct intel_engine_cs *engine)
1059 1232
1060 /* WaInPlaceDecompressionHang:bxt */ 1233 /* WaInPlaceDecompressionHang:bxt */
1061 if (IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER)) 1234 if (IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER))
1062 WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA, 1235 I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
1063 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); 1236 (I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
1237 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS));
1064 1238
1065 return 0; 1239 return 0;
1066} 1240}
@@ -1070,10 +1244,11 @@ static int cnl_init_workarounds(struct intel_engine_cs *engine)
1070 struct drm_i915_private *dev_priv = engine->i915; 1244 struct drm_i915_private *dev_priv = engine->i915;
1071 int ret; 1245 int ret;
1072 1246
1073 /* WaDisableI2mCycleOnWRPort: cnl (pre-prod) */ 1247 /* WaDisableI2mCycleOnWRPort:cnl (pre-prod) */
1074 if (IS_CNL_REVID(dev_priv, CNL_REVID_B0, CNL_REVID_B0)) 1248 if (IS_CNL_REVID(dev_priv, CNL_REVID_B0, CNL_REVID_B0))
1075 WA_SET_BIT(GAMT_CHKN_BIT_REG, 1249 I915_WRITE(GAMT_CHKN_BIT_REG,
1076 GAMT_CHKN_DISABLE_I2M_CYCLE_ON_WR_PORT); 1250 (I915_READ(GAMT_CHKN_BIT_REG) |
1251 GAMT_CHKN_DISABLE_I2M_CYCLE_ON_WR_PORT));
1077 1252
1078 /* WaForceContextSaveRestoreNonCoherent:cnl */ 1253 /* WaForceContextSaveRestoreNonCoherent:cnl */
1079 WA_SET_BIT_MASKED(CNL_HDC_CHICKEN0, 1254 WA_SET_BIT_MASKED(CNL_HDC_CHICKEN0,
@@ -1093,11 +1268,12 @@ static int cnl_init_workarounds(struct intel_engine_cs *engine)
1093 GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE); 1268 GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE);
1094 1269
1095 /* WaInPlaceDecompressionHang:cnl */ 1270 /* WaInPlaceDecompressionHang:cnl */
1096 WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA, 1271 I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
1097 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); 1272 (I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
1273 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS));
1098 1274
1099 /* WaPushConstantDereferenceHoldDisable:cnl */ 1275 /* WaPushConstantDereferenceHoldDisable:cnl */
1100 WA_SET_BIT(GEN7_ROW_CHICKEN2, PUSH_CONSTANT_DEREF_DISABLE); 1276 WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2, PUSH_CONSTANT_DEREF_DISABLE);
1101 1277
1102 /* FtrEnableFastAnisoL1BankingFix: cnl */ 1278 /* FtrEnableFastAnisoL1BankingFix: cnl */
1103 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3, CNL_FAST_ANISO_L1_BANKING_FIX); 1279 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3, CNL_FAST_ANISO_L1_BANKING_FIX);
@@ -1125,8 +1301,9 @@ static int kbl_init_workarounds(struct intel_engine_cs *engine)
1125 1301
1126 /* WaDisableDynamicCreditSharing:kbl */ 1302 /* WaDisableDynamicCreditSharing:kbl */
1127 if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0)) 1303 if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
1128 WA_SET_BIT(GAMT_CHKN_BIT_REG, 1304 I915_WRITE(GAMT_CHKN_BIT_REG,
1129 GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING); 1305 (I915_READ(GAMT_CHKN_BIT_REG) |
1306 GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING));
1130 1307
1131 /* WaDisableFenceDestinationToSLM:kbl (pre-prod) */ 1308 /* WaDisableFenceDestinationToSLM:kbl (pre-prod) */
1132 if (IS_KBL_REVID(dev_priv, KBL_REVID_A0, KBL_REVID_A0)) 1309 if (IS_KBL_REVID(dev_priv, KBL_REVID_A0, KBL_REVID_A0))
@@ -1139,7 +1316,8 @@ static int kbl_init_workarounds(struct intel_engine_cs *engine)
1139 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION); 1316 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
1140 1317
1141 /* WaDisableGafsUnitClkGating:kbl */ 1318 /* WaDisableGafsUnitClkGating:kbl */
1142 WA_SET_BIT(GEN7_UCGCTL4, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE); 1319 I915_WRITE(GEN7_UCGCTL4, (I915_READ(GEN7_UCGCTL4) |
1320 GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE));
1143 1321
1144 /* WaDisableSbeCacheDispatchPortSharing:kbl */ 1322 /* WaDisableSbeCacheDispatchPortSharing:kbl */
1145 WA_SET_BIT_MASKED( 1323 WA_SET_BIT_MASKED(
@@ -1147,8 +1325,9 @@ static int kbl_init_workarounds(struct intel_engine_cs *engine)
1147 GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE); 1325 GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
1148 1326
1149 /* WaInPlaceDecompressionHang:kbl */ 1327 /* WaInPlaceDecompressionHang:kbl */
1150 WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA, 1328 I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
1151 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); 1329 (I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
1330 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS));
1152 1331
1153 /* WaDisableLSQCROPERFforOCL:kbl */ 1332 /* WaDisableLSQCROPERFforOCL:kbl */
1154 ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4); 1333 ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
@@ -1192,7 +1371,8 @@ static int cfl_init_workarounds(struct intel_engine_cs *engine)
1192 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION); 1371 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
1193 1372
1194 /* WaDisableGafsUnitClkGating:cfl */ 1373 /* WaDisableGafsUnitClkGating:cfl */
1195 WA_SET_BIT(GEN7_UCGCTL4, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE); 1374 I915_WRITE(GEN7_UCGCTL4, (I915_READ(GEN7_UCGCTL4) |
1375 GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE));
1196 1376
1197 /* WaDisableSbeCacheDispatchPortSharing:cfl */ 1377 /* WaDisableSbeCacheDispatchPortSharing:cfl */
1198 WA_SET_BIT_MASKED( 1378 WA_SET_BIT_MASKED(
@@ -1200,8 +1380,9 @@ static int cfl_init_workarounds(struct intel_engine_cs *engine)
1200 GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE); 1380 GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
1201 1381
1202 /* WaInPlaceDecompressionHang:cfl */ 1382 /* WaInPlaceDecompressionHang:cfl */
1203 WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA, 1383 I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
1204 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); 1384 (I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
1385 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS));
1205 1386
1206 return 0; 1387 return 0;
1207} 1388}
@@ -1324,11 +1505,11 @@ bool intel_engine_is_idle(struct intel_engine_cs *engine)
1324 return false; 1505 return false;
1325 1506
1326 /* Both ports drained, no more ELSP submission? */ 1507 /* Both ports drained, no more ELSP submission? */
1327 if (port_request(&engine->execlist_port[0])) 1508 if (port_request(&engine->execlists.port[0]))
1328 return false; 1509 return false;
1329 1510
1330 /* ELSP is empty, but there are ready requests? */ 1511 /* ELSP is empty, but there are ready requests? */
1331 if (READ_ONCE(engine->execlist_first)) 1512 if (READ_ONCE(engine->execlists.first))
1332 return false; 1513 return false;
1333 1514
1334 /* Ring stopped? */ 1515 /* Ring stopped? */
@@ -1377,8 +1558,8 @@ void intel_engines_mark_idle(struct drm_i915_private *i915)
1377 for_each_engine(engine, i915, id) { 1558 for_each_engine(engine, i915, id) {
1378 intel_engine_disarm_breadcrumbs(engine); 1559 intel_engine_disarm_breadcrumbs(engine);
1379 i915_gem_batch_pool_fini(&engine->batch_pool); 1560 i915_gem_batch_pool_fini(&engine->batch_pool);
1380 tasklet_kill(&engine->irq_tasklet); 1561 tasklet_kill(&engine->execlists.irq_tasklet);
1381 engine->no_priolist = false; 1562 engine->execlists.no_priolist = false;
1382 } 1563 }
1383} 1564}
1384 1565
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
index 58a772de6672..8e3a05505f49 100644
--- a/drivers/gpu/drm/i915/intel_fbc.c
+++ b/drivers/gpu/drm/i915/intel_fbc.c
@@ -859,7 +859,7 @@ static bool intel_fbc_can_enable(struct drm_i915_private *dev_priv)
859 return false; 859 return false;
860 } 860 }
861 861
862 if (!i915.enable_fbc) { 862 if (!i915_modparams.enable_fbc) {
863 fbc->no_fbc_reason = "disabled per module param or by default"; 863 fbc->no_fbc_reason = "disabled per module param or by default";
864 return false; 864 return false;
865 } 865 }
@@ -1310,8 +1310,8 @@ void intel_fbc_init_pipe_state(struct drm_i915_private *dev_priv)
1310 */ 1310 */
1311static int intel_sanitize_fbc_option(struct drm_i915_private *dev_priv) 1311static int intel_sanitize_fbc_option(struct drm_i915_private *dev_priv)
1312{ 1312{
1313 if (i915.enable_fbc >= 0) 1313 if (i915_modparams.enable_fbc >= 0)
1314 return !!i915.enable_fbc; 1314 return !!i915_modparams.enable_fbc;
1315 1315
1316 if (!HAS_FBC(dev_priv)) 1316 if (!HAS_FBC(dev_priv))
1317 return 0; 1317 return 0;
@@ -1355,8 +1355,9 @@ void intel_fbc_init(struct drm_i915_private *dev_priv)
1355 if (need_fbc_vtd_wa(dev_priv)) 1355 if (need_fbc_vtd_wa(dev_priv))
1356 mkwrite_device_info(dev_priv)->has_fbc = false; 1356 mkwrite_device_info(dev_priv)->has_fbc = false;
1357 1357
1358 i915.enable_fbc = intel_sanitize_fbc_option(dev_priv); 1358 i915_modparams.enable_fbc = intel_sanitize_fbc_option(dev_priv);
1359 DRM_DEBUG_KMS("Sanitized enable_fbc value: %d\n", i915.enable_fbc); 1359 DRM_DEBUG_KMS("Sanitized enable_fbc value: %d\n",
1360 i915_modparams.enable_fbc);
1360 1361
1361 if (!HAS_FBC(dev_priv)) { 1362 if (!HAS_FBC(dev_priv)) {
1362 fbc->no_fbc_reason = "unsupported by this chipset"; 1363 fbc->no_fbc_reason = "unsupported by this chipset";
diff --git a/drivers/gpu/drm/i915/intel_fifo_underrun.c b/drivers/gpu/drm/i915/intel_fifo_underrun.c
index 04689600e337..77c123cc8817 100644
--- a/drivers/gpu/drm/i915/intel_fifo_underrun.c
+++ b/drivers/gpu/drm/i915/intel_fifo_underrun.c
@@ -88,14 +88,15 @@ static void i9xx_check_fifo_underruns(struct intel_crtc *crtc)
88{ 88{
89 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 89 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
90 i915_reg_t reg = PIPESTAT(crtc->pipe); 90 i915_reg_t reg = PIPESTAT(crtc->pipe);
91 u32 pipestat = I915_READ(reg) & 0xffff0000; 91 u32 enable_mask;
92 92
93 lockdep_assert_held(&dev_priv->irq_lock); 93 lockdep_assert_held(&dev_priv->irq_lock);
94 94
95 if ((pipestat & PIPE_FIFO_UNDERRUN_STATUS) == 0) 95 if ((I915_READ(reg) & PIPE_FIFO_UNDERRUN_STATUS) == 0)
96 return; 96 return;
97 97
98 I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS); 98 enable_mask = i915_pipestat_enable_mask(dev_priv, crtc->pipe);
99 I915_WRITE(reg, enable_mask | PIPE_FIFO_UNDERRUN_STATUS);
99 POSTING_READ(reg); 100 POSTING_READ(reg);
100 101
101 trace_intel_cpu_fifo_underrun(dev_priv, crtc->pipe); 102 trace_intel_cpu_fifo_underrun(dev_priv, crtc->pipe);
@@ -108,15 +109,16 @@ static void i9xx_set_fifo_underrun_reporting(struct drm_device *dev,
108{ 109{
109 struct drm_i915_private *dev_priv = to_i915(dev); 110 struct drm_i915_private *dev_priv = to_i915(dev);
110 i915_reg_t reg = PIPESTAT(pipe); 111 i915_reg_t reg = PIPESTAT(pipe);
111 u32 pipestat = I915_READ(reg) & 0xffff0000;
112 112
113 lockdep_assert_held(&dev_priv->irq_lock); 113 lockdep_assert_held(&dev_priv->irq_lock);
114 114
115 if (enable) { 115 if (enable) {
116 I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS); 116 u32 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
117
118 I915_WRITE(reg, enable_mask | PIPE_FIFO_UNDERRUN_STATUS);
117 POSTING_READ(reg); 119 POSTING_READ(reg);
118 } else { 120 } else {
119 if (old && pipestat & PIPE_FIFO_UNDERRUN_STATUS) 121 if (old && I915_READ(reg) & PIPE_FIFO_UNDERRUN_STATUS)
120 DRM_ERROR("pipe %c underrun\n", pipe_name(pipe)); 122 DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
121 } 123 }
122} 124}
diff --git a/drivers/gpu/drm/i915/intel_guc_fwif.h b/drivers/gpu/drm/i915/intel_guc_fwif.h
index 5fa286074811..7eb6b4fa1d6f 100644
--- a/drivers/gpu/drm/i915/intel_guc_fwif.h
+++ b/drivers/gpu/drm/i915/intel_guc_fwif.h
@@ -56,10 +56,6 @@
56#define WQ_LEN_SHIFT 16 56#define WQ_LEN_SHIFT 16
57#define WQ_NO_WCFLUSH_WAIT (1 << 27) 57#define WQ_NO_WCFLUSH_WAIT (1 << 27)
58#define WQ_PRESENT_WORKLOAD (1 << 28) 58#define WQ_PRESENT_WORKLOAD (1 << 28)
59#define WQ_WORKLOAD_SHIFT 29
60#define WQ_WORKLOAD_GENERAL (0 << WQ_WORKLOAD_SHIFT)
61#define WQ_WORKLOAD_GPGPU (1 << WQ_WORKLOAD_SHIFT)
62#define WQ_WORKLOAD_TOUCH (2 << WQ_WORKLOAD_SHIFT)
63 59
64#define WQ_RING_TAIL_SHIFT 20 60#define WQ_RING_TAIL_SHIFT 20
65#define WQ_RING_TAIL_MAX 0x7FF /* 2^11 QWords */ 61#define WQ_RING_TAIL_MAX 0x7FF /* 2^11 QWords */
@@ -388,7 +384,11 @@ struct guc_ct_buffer_desc {
388/* Preempt to idle on quantum expiry */ 384/* Preempt to idle on quantum expiry */
389#define POLICY_PREEMPT_TO_IDLE (1<<1) 385#define POLICY_PREEMPT_TO_IDLE (1<<1)
390 386
391#define POLICY_MAX_NUM_WI 15 387#define POLICY_MAX_NUM_WI 15
388#define POLICY_DEFAULT_DPC_PROMOTE_TIME_US 500000
389#define POLICY_DEFAULT_EXECUTION_QUANTUM_US 1000000
390#define POLICY_DEFAULT_PREEMPTION_TIME_US 500000
391#define POLICY_DEFAULT_FAULT_TIME_US 250000
392 392
393struct guc_policy { 393struct guc_policy {
394 /* Time for one workload to execute. (in micro seconds) */ 394 /* Time for one workload to execute. (in micro seconds) */
diff --git a/drivers/gpu/drm/i915/intel_guc_loader.c b/drivers/gpu/drm/i915/intel_guc_loader.c
index 8b0ae7fce7f2..c9e25be4db40 100644
--- a/drivers/gpu/drm/i915/intel_guc_loader.c
+++ b/drivers/gpu/drm/i915/intel_guc_loader.c
@@ -131,14 +131,14 @@ static void guc_params_init(struct drm_i915_private *dev_priv)
131 131
132 params[GUC_CTL_LOG_PARAMS] = guc->log.flags; 132 params[GUC_CTL_LOG_PARAMS] = guc->log.flags;
133 133
134 if (i915.guc_log_level >= 0) { 134 if (i915_modparams.guc_log_level >= 0) {
135 params[GUC_CTL_DEBUG] = 135 params[GUC_CTL_DEBUG] =
136 i915.guc_log_level << GUC_LOG_VERBOSITY_SHIFT; 136 i915_modparams.guc_log_level << GUC_LOG_VERBOSITY_SHIFT;
137 } else 137 } else
138 params[GUC_CTL_DEBUG] = GUC_LOG_DISABLED; 138 params[GUC_CTL_DEBUG] = GUC_LOG_DISABLED;
139 139
140 /* If GuC submission is enabled, set up additional parameters here */ 140 /* If GuC submission is enabled, set up additional parameters here */
141 if (i915.enable_guc_submission) { 141 if (i915_modparams.enable_guc_submission) {
142 u32 ads = guc_ggtt_offset(guc->ads_vma) >> PAGE_SHIFT; 142 u32 ads = guc_ggtt_offset(guc->ads_vma) >> PAGE_SHIFT;
143 u32 pgs = guc_ggtt_offset(dev_priv->guc.stage_desc_pool); 143 u32 pgs = guc_ggtt_offset(dev_priv->guc.stage_desc_pool);
144 u32 ctx_in_16 = GUC_MAX_STAGE_DESCRIPTORS / 16; 144 u32 ctx_in_16 = GUC_MAX_STAGE_DESCRIPTORS / 16;
@@ -368,7 +368,8 @@ int intel_guc_init_hw(struct intel_guc *guc)
368 guc->fw.load_status = INTEL_UC_FIRMWARE_SUCCESS; 368 guc->fw.load_status = INTEL_UC_FIRMWARE_SUCCESS;
369 369
370 DRM_INFO("GuC %s (firmware %s [version %u.%u])\n", 370 DRM_INFO("GuC %s (firmware %s [version %u.%u])\n",
371 i915.enable_guc_submission ? "submission enabled" : "loaded", 371 i915_modparams.enable_guc_submission ? "submission enabled" :
372 "loaded",
372 guc->fw.path, 373 guc->fw.path,
373 guc->fw.major_ver_found, guc->fw.minor_ver_found); 374 guc->fw.major_ver_found, guc->fw.minor_ver_found);
374 375
@@ -390,8 +391,8 @@ int intel_guc_select_fw(struct intel_guc *guc)
390 guc->fw.load_status = INTEL_UC_FIRMWARE_NONE; 391 guc->fw.load_status = INTEL_UC_FIRMWARE_NONE;
391 guc->fw.type = INTEL_UC_FW_TYPE_GUC; 392 guc->fw.type = INTEL_UC_FW_TYPE_GUC;
392 393
393 if (i915.guc_firmware_path) { 394 if (i915_modparams.guc_firmware_path) {
394 guc->fw.path = i915.guc_firmware_path; 395 guc->fw.path = i915_modparams.guc_firmware_path;
395 guc->fw.major_ver_wanted = 0; 396 guc->fw.major_ver_wanted = 0;
396 guc->fw.minor_ver_wanted = 0; 397 guc->fw.minor_ver_wanted = 0;
397 } else if (IS_SKYLAKE(dev_priv)) { 398 } else if (IS_SKYLAKE(dev_priv)) {
diff --git a/drivers/gpu/drm/i915/intel_guc_log.c b/drivers/gpu/drm/i915/intel_guc_log.c
index 16d3b8719cab..6571d96704ad 100644
--- a/drivers/gpu/drm/i915/intel_guc_log.c
+++ b/drivers/gpu/drm/i915/intel_guc_log.c
@@ -144,7 +144,7 @@ static int guc_log_relay_file_create(struct intel_guc *guc)
144 struct dentry *log_dir; 144 struct dentry *log_dir;
145 int ret; 145 int ret;
146 146
147 if (i915.guc_log_level < 0) 147 if (i915_modparams.guc_log_level < 0)
148 return 0; 148 return 0;
149 149
150 /* For now create the log file in /sys/kernel/debug/dri/0 dir */ 150 /* For now create the log file in /sys/kernel/debug/dri/0 dir */
@@ -480,7 +480,7 @@ err_runtime:
480 guc_log_runtime_destroy(guc); 480 guc_log_runtime_destroy(guc);
481err: 481err:
482 /* logging will remain off */ 482 /* logging will remain off */
483 i915.guc_log_level = -1; 483 i915_modparams.guc_log_level = -1;
484 return ret; 484 return ret;
485} 485}
486 486
@@ -502,7 +502,8 @@ static void guc_flush_logs(struct intel_guc *guc)
502{ 502{
503 struct drm_i915_private *dev_priv = guc_to_i915(guc); 503 struct drm_i915_private *dev_priv = guc_to_i915(guc);
504 504
505 if (!i915.enable_guc_submission || (i915.guc_log_level < 0)) 505 if (!i915_modparams.enable_guc_submission ||
506 (i915_modparams.guc_log_level < 0))
506 return; 507 return;
507 508
508 /* First disable the interrupts, will be renabled afterwards */ 509 /* First disable the interrupts, will be renabled afterwards */
@@ -529,8 +530,8 @@ int intel_guc_log_create(struct intel_guc *guc)
529 530
530 GEM_BUG_ON(guc->log.vma); 531 GEM_BUG_ON(guc->log.vma);
531 532
532 if (i915.guc_log_level > GUC_LOG_VERBOSITY_MAX) 533 if (i915_modparams.guc_log_level > GUC_LOG_VERBOSITY_MAX)
533 i915.guc_log_level = GUC_LOG_VERBOSITY_MAX; 534 i915_modparams.guc_log_level = GUC_LOG_VERBOSITY_MAX;
534 535
535 /* The first page is to save log buffer state. Allocate one 536 /* The first page is to save log buffer state. Allocate one
536 * extra page for others in case for overlap */ 537 * extra page for others in case for overlap */
@@ -555,7 +556,7 @@ int intel_guc_log_create(struct intel_guc *guc)
555 556
556 guc->log.vma = vma; 557 guc->log.vma = vma;
557 558
558 if (i915.guc_log_level >= 0) { 559 if (i915_modparams.guc_log_level >= 0) {
559 ret = guc_log_runtime_create(guc); 560 ret = guc_log_runtime_create(guc);
560 if (ret < 0) 561 if (ret < 0)
561 goto err_vma; 562 goto err_vma;
@@ -576,7 +577,7 @@ err_vma:
576 i915_vma_unpin_and_release(&guc->log.vma); 577 i915_vma_unpin_and_release(&guc->log.vma);
577err: 578err:
578 /* logging will be off */ 579 /* logging will be off */
579 i915.guc_log_level = -1; 580 i915_modparams.guc_log_level = -1;
580 return ret; 581 return ret;
581} 582}
582 583
@@ -600,7 +601,7 @@ int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val)
600 return -EINVAL; 601 return -EINVAL;
601 602
602 /* This combination doesn't make sense & won't have any effect */ 603 /* This combination doesn't make sense & won't have any effect */
603 if (!log_param.logging_enabled && (i915.guc_log_level < 0)) 604 if (!log_param.logging_enabled && (i915_modparams.guc_log_level < 0))
604 return 0; 605 return 0;
605 606
606 ret = guc_log_control(guc, log_param.value); 607 ret = guc_log_control(guc, log_param.value);
@@ -610,7 +611,7 @@ int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val)
610 } 611 }
611 612
612 if (log_param.logging_enabled) { 613 if (log_param.logging_enabled) {
613 i915.guc_log_level = log_param.verbosity; 614 i915_modparams.guc_log_level = log_param.verbosity;
614 615
615 /* If log_level was set as -1 at boot time, then the relay channel file 616 /* If log_level was set as -1 at boot time, then the relay channel file
616 * wouldn't have been created by now and interrupts also would not have 617 * wouldn't have been created by now and interrupts also would not have
@@ -633,7 +634,7 @@ int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val)
633 guc_flush_logs(guc); 634 guc_flush_logs(guc);
634 635
635 /* As logging is disabled, update log level to reflect that */ 636 /* As logging is disabled, update log level to reflect that */
636 i915.guc_log_level = -1; 637 i915_modparams.guc_log_level = -1;
637 } 638 }
638 639
639 return ret; 640 return ret;
@@ -641,7 +642,8 @@ int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val)
641 642
642void i915_guc_log_register(struct drm_i915_private *dev_priv) 643void i915_guc_log_register(struct drm_i915_private *dev_priv)
643{ 644{
644 if (!i915.enable_guc_submission || i915.guc_log_level < 0) 645 if (!i915_modparams.enable_guc_submission ||
646 (i915_modparams.guc_log_level < 0))
645 return; 647 return;
646 648
647 mutex_lock(&dev_priv->drm.struct_mutex); 649 mutex_lock(&dev_priv->drm.struct_mutex);
@@ -651,7 +653,7 @@ void i915_guc_log_register(struct drm_i915_private *dev_priv)
651 653
652void i915_guc_log_unregister(struct drm_i915_private *dev_priv) 654void i915_guc_log_unregister(struct drm_i915_private *dev_priv)
653{ 655{
654 if (!i915.enable_guc_submission) 656 if (!i915_modparams.enable_guc_submission)
655 return; 657 return;
656 658
657 mutex_lock(&dev_priv->drm.struct_mutex); 659 mutex_lock(&dev_priv->drm.struct_mutex);
diff --git a/drivers/gpu/drm/i915/intel_gvt.c b/drivers/gpu/drm/i915/intel_gvt.c
index c17ed0e62b67..b4a7f31f0214 100644
--- a/drivers/gpu/drm/i915/intel_gvt.c
+++ b/drivers/gpu/drm/i915/intel_gvt.c
@@ -58,7 +58,7 @@ static bool is_supported_device(struct drm_i915_private *dev_priv)
58 */ 58 */
59void intel_gvt_sanitize_options(struct drm_i915_private *dev_priv) 59void intel_gvt_sanitize_options(struct drm_i915_private *dev_priv)
60{ 60{
61 if (!i915.enable_gvt) 61 if (!i915_modparams.enable_gvt)
62 return; 62 return;
63 63
64 if (intel_vgpu_active(dev_priv)) { 64 if (intel_vgpu_active(dev_priv)) {
@@ -73,7 +73,7 @@ void intel_gvt_sanitize_options(struct drm_i915_private *dev_priv)
73 73
74 return; 74 return;
75bail: 75bail:
76 i915.enable_gvt = 0; 76 i915_modparams.enable_gvt = 0;
77} 77}
78 78
79/** 79/**
@@ -90,17 +90,17 @@ int intel_gvt_init(struct drm_i915_private *dev_priv)
90{ 90{
91 int ret; 91 int ret;
92 92
93 if (!i915.enable_gvt) { 93 if (!i915_modparams.enable_gvt) {
94 DRM_DEBUG_DRIVER("GVT-g is disabled by kernel params\n"); 94 DRM_DEBUG_DRIVER("GVT-g is disabled by kernel params\n");
95 return 0; 95 return 0;
96 } 96 }
97 97
98 if (!i915.enable_execlists) { 98 if (!i915_modparams.enable_execlists) {
99 DRM_ERROR("i915 GVT-g loading failed due to disabled execlists mode\n"); 99 DRM_ERROR("i915 GVT-g loading failed due to disabled execlists mode\n");
100 return -EIO; 100 return -EIO;
101 } 101 }
102 102
103 if (i915.enable_guc_submission) { 103 if (i915_modparams.enable_guc_submission) {
104 DRM_ERROR("i915 GVT-g loading failed due to Graphics virtualization is not yet supported with GuC submission\n"); 104 DRM_ERROR("i915 GVT-g loading failed due to Graphics virtualization is not yet supported with GuC submission\n");
105 return -EIO; 105 return -EIO;
106 } 106 }
@@ -123,7 +123,7 @@ int intel_gvt_init(struct drm_i915_private *dev_priv)
123 return 0; 123 return 0;
124 124
125bail: 125bail:
126 i915.enable_gvt = 0; 126 i915_modparams.enable_gvt = 0;
127 return 0; 127 return 0;
128} 128}
129 129
diff --git a/drivers/gpu/drm/i915/intel_hangcheck.c b/drivers/gpu/drm/i915/intel_hangcheck.c
index d9d87d96fb69..12ac270a5f93 100644
--- a/drivers/gpu/drm/i915/intel_hangcheck.c
+++ b/drivers/gpu/drm/i915/intel_hangcheck.c
@@ -428,7 +428,7 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
428 unsigned int hung = 0, stuck = 0; 428 unsigned int hung = 0, stuck = 0;
429 int busy_count = 0; 429 int busy_count = 0;
430 430
431 if (!i915.enable_hangcheck) 431 if (!i915_modparams.enable_hangcheck)
432 return; 432 return;
433 433
434 if (!READ_ONCE(dev_priv->gt.awake)) 434 if (!READ_ONCE(dev_priv->gt.awake))
diff --git a/drivers/gpu/drm/i915/intel_huc.c b/drivers/gpu/drm/i915/intel_huc.c
index 6145fa0d6773..8b4b53525422 100644
--- a/drivers/gpu/drm/i915/intel_huc.c
+++ b/drivers/gpu/drm/i915/intel_huc.c
@@ -155,8 +155,8 @@ void intel_huc_select_fw(struct intel_huc *huc)
155 huc->fw.load_status = INTEL_UC_FIRMWARE_NONE; 155 huc->fw.load_status = INTEL_UC_FIRMWARE_NONE;
156 huc->fw.type = INTEL_UC_FW_TYPE_HUC; 156 huc->fw.type = INTEL_UC_FW_TYPE_HUC;
157 157
158 if (i915.huc_firmware_path) { 158 if (i915_modparams.huc_firmware_path) {
159 huc->fw.path = i915.huc_firmware_path; 159 huc->fw.path = i915_modparams.huc_firmware_path;
160 huc->fw.major_ver_wanted = 0; 160 huc->fw.major_ver_wanted = 0;
161 huc->fw.minor_ver_wanted = 0; 161 huc->fw.minor_ver_wanted = 0;
162 } else if (IS_SKYLAKE(dev_priv)) { 162 } else if (IS_SKYLAKE(dev_priv)) {
@@ -225,19 +225,22 @@ void intel_huc_init_hw(struct intel_huc *huc)
225} 225}
226 226
227/** 227/**
228 * intel_guc_auth_huc() - authenticate ucode 228 * intel_huc_auth() - Authenticate HuC uCode
229 * @dev_priv: the drm_i915_device 229 * @huc: intel_huc structure
230 *
231 * Called after HuC and GuC firmware loading during intel_uc_init_hw().
230 * 232 *
231 * Triggers a HuC fw authentication request to the GuC via intel_guc_action_ 233 * This function pins HuC firmware image object into GGTT.
232 * authenticate_huc interface. 234 * Then it invokes GuC action to authenticate passing the offset to RSA
235 * signature through intel_guc_auth_huc(). It then waits for 50ms for
236 * firmware verification ACK and unpins the object.
233 */ 237 */
234void intel_guc_auth_huc(struct drm_i915_private *dev_priv) 238void intel_huc_auth(struct intel_huc *huc)
235{ 239{
236 struct intel_guc *guc = &dev_priv->guc; 240 struct drm_i915_private *i915 = huc_to_i915(huc);
237 struct intel_huc *huc = &dev_priv->huc; 241 struct intel_guc *guc = &i915->guc;
238 struct i915_vma *vma; 242 struct i915_vma *vma;
239 int ret; 243 int ret;
240 u32 data[2];
241 244
242 if (huc->fw.load_status != INTEL_UC_FIRMWARE_SUCCESS) 245 if (huc->fw.load_status != INTEL_UC_FIRMWARE_SUCCESS)
243 return; 246 return;
@@ -250,23 +253,19 @@ void intel_guc_auth_huc(struct drm_i915_private *dev_priv)
250 return; 253 return;
251 } 254 }
252 255
253 /* Specify auth action and where public signature is. */ 256 ret = intel_guc_auth_huc(guc,
254 data[0] = INTEL_GUC_ACTION_AUTHENTICATE_HUC; 257 guc_ggtt_offset(vma) + huc->fw.rsa_offset);
255 data[1] = guc_ggtt_offset(vma) + huc->fw.rsa_offset;
256
257 ret = intel_guc_send(guc, data, ARRAY_SIZE(data));
258 if (ret) { 258 if (ret) {
259 DRM_ERROR("HuC: GuC did not ack Auth request %d\n", ret); 259 DRM_ERROR("HuC: GuC did not ack Auth request %d\n", ret);
260 goto out; 260 goto out;
261 } 261 }
262 262
263 /* Check authentication status, it should be done by now */ 263 /* Check authentication status, it should be done by now */
264 ret = intel_wait_for_register(dev_priv, 264 ret = intel_wait_for_register(i915,
265 HUC_STATUS2, 265 HUC_STATUS2,
266 HUC_FW_VERIFIED, 266 HUC_FW_VERIFIED,
267 HUC_FW_VERIFIED, 267 HUC_FW_VERIFIED,
268 50); 268 50);
269
270 if (ret) { 269 if (ret) {
271 DRM_ERROR("HuC: Authentication failed %d\n", ret); 270 DRM_ERROR("HuC: Authentication failed %d\n", ret);
272 goto out; 271 goto out;
@@ -275,4 +274,3 @@ void intel_guc_auth_huc(struct drm_i915_private *dev_priv)
275out: 274out:
276 i915_vma_unpin(vma); 275 i915_vma_unpin(vma);
277} 276}
278
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index d89e1b8e1cc5..61cac26a8b05 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -244,7 +244,7 @@ int intel_sanitize_enable_execlists(struct drm_i915_private *dev_priv, int enabl
244 244
245 if (HAS_LOGICAL_RING_CONTEXTS(dev_priv) && 245 if (HAS_LOGICAL_RING_CONTEXTS(dev_priv) &&
246 USES_PPGTT(dev_priv) && 246 USES_PPGTT(dev_priv) &&
247 i915.use_mmio_flip >= 0) 247 i915_modparams.use_mmio_flip >= 0)
248 return 1; 248 return 1;
249 249
250 return 0; 250 return 0;
@@ -279,17 +279,73 @@ intel_lr_context_descriptor_update(struct i915_gem_context *ctx,
279 BUILD_BUG_ON(MAX_CONTEXT_HW_ID > (1<<GEN8_CTX_ID_WIDTH)); 279 BUILD_BUG_ON(MAX_CONTEXT_HW_ID > (1<<GEN8_CTX_ID_WIDTH));
280 280
281 desc = ctx->desc_template; /* bits 0-11 */ 281 desc = ctx->desc_template; /* bits 0-11 */
282 desc |= i915_ggtt_offset(ce->state) + LRC_PPHWSP_PN * PAGE_SIZE; 282 desc |= i915_ggtt_offset(ce->state) + LRC_HEADER_PAGES * PAGE_SIZE;
283 /* bits 12-31 */ 283 /* bits 12-31 */
284 desc |= (u64)ctx->hw_id << GEN8_CTX_ID_SHIFT; /* bits 32-52 */ 284 desc |= (u64)ctx->hw_id << GEN8_CTX_ID_SHIFT; /* bits 32-52 */
285 285
286 ce->lrc_desc = desc; 286 ce->lrc_desc = desc;
287} 287}
288 288
289uint64_t intel_lr_context_descriptor(struct i915_gem_context *ctx, 289static struct i915_priolist *
290 struct intel_engine_cs *engine) 290lookup_priolist(struct intel_engine_cs *engine,
291 struct i915_priotree *pt,
292 int prio)
291{ 293{
292 return ctx->engine[engine->id].lrc_desc; 294 struct intel_engine_execlists * const execlists = &engine->execlists;
295 struct i915_priolist *p;
296 struct rb_node **parent, *rb;
297 bool first = true;
298
299 if (unlikely(execlists->no_priolist))
300 prio = I915_PRIORITY_NORMAL;
301
302find_priolist:
303 /* most positive priority is scheduled first, equal priorities fifo */
304 rb = NULL;
305 parent = &execlists->queue.rb_node;
306 while (*parent) {
307 rb = *parent;
308 p = rb_entry(rb, typeof(*p), node);
309 if (prio > p->priority) {
310 parent = &rb->rb_left;
311 } else if (prio < p->priority) {
312 parent = &rb->rb_right;
313 first = false;
314 } else {
315 return p;
316 }
317 }
318
319 if (prio == I915_PRIORITY_NORMAL) {
320 p = &execlists->default_priolist;
321 } else {
322 p = kmem_cache_alloc(engine->i915->priorities, GFP_ATOMIC);
323 /* Convert an allocation failure to a priority bump */
324 if (unlikely(!p)) {
325 prio = I915_PRIORITY_NORMAL; /* recurses just once */
326
327 /* To maintain ordering with all rendering, after an
328 * allocation failure we have to disable all scheduling.
329 * Requests will then be executed in fifo, and schedule
330 * will ensure that dependencies are emitted in fifo.
331 * There will be still some reordering with existing
332 * requests, so if userspace lied about their
333 * dependencies that reordering may be visible.
334 */
335 execlists->no_priolist = true;
336 goto find_priolist;
337 }
338 }
339
340 p->priority = prio;
341 INIT_LIST_HEAD(&p->requests);
342 rb_link_node(&p->node, rb, parent);
343 rb_insert_color(&p->node, &execlists->queue);
344
345 if (first)
346 execlists->first = &p->node;
347
348 return ptr_pack_bits(p, first, 1);
293} 349}
294 350
295static inline void 351static inline void
@@ -338,12 +394,12 @@ static u64 execlists_update_context(struct drm_i915_gem_request *rq)
338 394
339static void execlists_submit_ports(struct intel_engine_cs *engine) 395static void execlists_submit_ports(struct intel_engine_cs *engine)
340{ 396{
341 struct execlist_port *port = engine->execlist_port; 397 struct execlist_port *port = engine->execlists.port;
342 u32 __iomem *elsp = 398 u32 __iomem *elsp =
343 engine->i915->regs + i915_mmio_reg_offset(RING_ELSP(engine)); 399 engine->i915->regs + i915_mmio_reg_offset(RING_ELSP(engine));
344 unsigned int n; 400 unsigned int n;
345 401
346 for (n = ARRAY_SIZE(engine->execlist_port); n--; ) { 402 for (n = execlists_num_ports(&engine->execlists); n--; ) {
347 struct drm_i915_gem_request *rq; 403 struct drm_i915_gem_request *rq;
348 unsigned int count; 404 unsigned int count;
349 u64 desc; 405 u64 desc;
@@ -398,7 +454,10 @@ static void port_assign(struct execlist_port *port,
398static void execlists_dequeue(struct intel_engine_cs *engine) 454static void execlists_dequeue(struct intel_engine_cs *engine)
399{ 455{
400 struct drm_i915_gem_request *last; 456 struct drm_i915_gem_request *last;
401 struct execlist_port *port = engine->execlist_port; 457 struct intel_engine_execlists * const execlists = &engine->execlists;
458 struct execlist_port *port = execlists->port;
459 const struct execlist_port * const last_port =
460 &execlists->port[execlists->port_mask];
402 struct rb_node *rb; 461 struct rb_node *rb;
403 bool submit = false; 462 bool submit = false;
404 463
@@ -412,8 +471,6 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
412 */ 471 */
413 last->tail = last->wa_tail; 472 last->tail = last->wa_tail;
414 473
415 GEM_BUG_ON(port_isset(&port[1]));
416
417 /* Hardware submission is through 2 ports. Conceptually each port 474 /* Hardware submission is through 2 ports. Conceptually each port
418 * has a (RING_START, RING_HEAD, RING_TAIL) tuple. RING_START is 475 * has a (RING_START, RING_HEAD, RING_TAIL) tuple. RING_START is
419 * static for a context, and unique to each, so we only execute 476 * static for a context, and unique to each, so we only execute
@@ -436,8 +493,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
436 */ 493 */
437 494
438 spin_lock_irq(&engine->timeline->lock); 495 spin_lock_irq(&engine->timeline->lock);
439 rb = engine->execlist_first; 496 rb = execlists->first;
440 GEM_BUG_ON(rb_first(&engine->execlist_queue) != rb); 497 GEM_BUG_ON(rb_first(&execlists->queue) != rb);
441 while (rb) { 498 while (rb) {
442 struct i915_priolist *p = rb_entry(rb, typeof(*p), node); 499 struct i915_priolist *p = rb_entry(rb, typeof(*p), node);
443 struct drm_i915_gem_request *rq, *rn; 500 struct drm_i915_gem_request *rq, *rn;
@@ -460,7 +517,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
460 * combine this request with the last, then we 517 * combine this request with the last, then we
461 * are done. 518 * are done.
462 */ 519 */
463 if (port != engine->execlist_port) { 520 if (port == last_port) {
464 __list_del_many(&p->requests, 521 __list_del_many(&p->requests,
465 &rq->priotree.link); 522 &rq->priotree.link);
466 goto done; 523 goto done;
@@ -485,25 +542,27 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
485 if (submit) 542 if (submit)
486 port_assign(port, last); 543 port_assign(port, last);
487 port++; 544 port++;
545
546 GEM_BUG_ON(port_isset(port));
488 } 547 }
489 548
490 INIT_LIST_HEAD(&rq->priotree.link); 549 INIT_LIST_HEAD(&rq->priotree.link);
491 rq->priotree.priority = INT_MAX; 550 rq->priotree.priority = INT_MAX;
492 551
493 __i915_gem_request_submit(rq); 552 __i915_gem_request_submit(rq);
494 trace_i915_gem_request_in(rq, port_index(port, engine)); 553 trace_i915_gem_request_in(rq, port_index(port, execlists));
495 last = rq; 554 last = rq;
496 submit = true; 555 submit = true;
497 } 556 }
498 557
499 rb = rb_next(rb); 558 rb = rb_next(rb);
500 rb_erase(&p->node, &engine->execlist_queue); 559 rb_erase(&p->node, &execlists->queue);
501 INIT_LIST_HEAD(&p->requests); 560 INIT_LIST_HEAD(&p->requests);
502 if (p->priority != I915_PRIORITY_NORMAL) 561 if (p->priority != I915_PRIORITY_NORMAL)
503 kmem_cache_free(engine->i915->priorities, p); 562 kmem_cache_free(engine->i915->priorities, p);
504 } 563 }
505done: 564done:
506 engine->execlist_first = rb; 565 execlists->first = rb;
507 if (submit) 566 if (submit)
508 port_assign(port, last); 567 port_assign(port, last);
509 spin_unlock_irq(&engine->timeline->lock); 568 spin_unlock_irq(&engine->timeline->lock);
@@ -512,9 +571,83 @@ done:
512 execlists_submit_ports(engine); 571 execlists_submit_ports(engine);
513} 572}
514 573
574static void
575execlist_cancel_port_requests(struct intel_engine_execlists *execlists)
576{
577 struct execlist_port *port = execlists->port;
578 unsigned int num_ports = ARRAY_SIZE(execlists->port);
579
580 while (num_ports-- && port_isset(port)) {
581 struct drm_i915_gem_request *rq = port_request(port);
582
583 execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_OUT);
584 i915_gem_request_put(rq);
585
586 memset(port, 0, sizeof(*port));
587 port++;
588 }
589}
590
591static void execlists_cancel_requests(struct intel_engine_cs *engine)
592{
593 struct intel_engine_execlists * const execlists = &engine->execlists;
594 struct drm_i915_gem_request *rq, *rn;
595 struct rb_node *rb;
596 unsigned long flags;
597
598 spin_lock_irqsave(&engine->timeline->lock, flags);
599
600 /* Cancel the requests on the HW and clear the ELSP tracker. */
601 execlist_cancel_port_requests(execlists);
602
603 /* Mark all executing requests as skipped. */
604 list_for_each_entry(rq, &engine->timeline->requests, link) {
605 GEM_BUG_ON(!rq->global_seqno);
606 if (!i915_gem_request_completed(rq))
607 dma_fence_set_error(&rq->fence, -EIO);
608 }
609
610 /* Flush the queued requests to the timeline list (for retiring). */
611 rb = execlists->first;
612 while (rb) {
613 struct i915_priolist *p = rb_entry(rb, typeof(*p), node);
614
615 list_for_each_entry_safe(rq, rn, &p->requests, priotree.link) {
616 INIT_LIST_HEAD(&rq->priotree.link);
617 rq->priotree.priority = INT_MAX;
618
619 dma_fence_set_error(&rq->fence, -EIO);
620 __i915_gem_request_submit(rq);
621 }
622
623 rb = rb_next(rb);
624 rb_erase(&p->node, &execlists->queue);
625 INIT_LIST_HEAD(&p->requests);
626 if (p->priority != I915_PRIORITY_NORMAL)
627 kmem_cache_free(engine->i915->priorities, p);
628 }
629
630 /* Remaining _unready_ requests will be nop'ed when submitted */
631
632
633 execlists->queue = RB_ROOT;
634 execlists->first = NULL;
635 GEM_BUG_ON(port_isset(execlists->port));
636
637 /*
638 * The port is checked prior to scheduling a tasklet, but
639 * just in case we have suspended the tasklet to do the
640 * wedging make sure that when it wakes, it decides there
641 * is no work to do by clearing the irq_posted bit.
642 */
643 clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
644
645 spin_unlock_irqrestore(&engine->timeline->lock, flags);
646}
647
515static bool execlists_elsp_ready(const struct intel_engine_cs *engine) 648static bool execlists_elsp_ready(const struct intel_engine_cs *engine)
516{ 649{
517 const struct execlist_port *port = engine->execlist_port; 650 const struct execlist_port *port = engine->execlists.port;
518 651
519 return port_count(&port[0]) + port_count(&port[1]) < 2; 652 return port_count(&port[0]) + port_count(&port[1]) < 2;
520} 653}
@@ -525,8 +658,9 @@ static bool execlists_elsp_ready(const struct intel_engine_cs *engine)
525 */ 658 */
526static void intel_lrc_irq_handler(unsigned long data) 659static void intel_lrc_irq_handler(unsigned long data)
527{ 660{
528 struct intel_engine_cs *engine = (struct intel_engine_cs *)data; 661 struct intel_engine_cs * const engine = (struct intel_engine_cs *)data;
529 struct execlist_port *port = engine->execlist_port; 662 struct intel_engine_execlists * const execlists = &engine->execlists;
663 struct execlist_port *port = execlists->port;
530 struct drm_i915_private *dev_priv = engine->i915; 664 struct drm_i915_private *dev_priv = engine->i915;
531 665
532 /* We can skip acquiring intel_runtime_pm_get() here as it was taken 666 /* We can skip acquiring intel_runtime_pm_get() here as it was taken
@@ -538,19 +672,25 @@ static void intel_lrc_irq_handler(unsigned long data)
538 */ 672 */
539 GEM_BUG_ON(!dev_priv->gt.awake); 673 GEM_BUG_ON(!dev_priv->gt.awake);
540 674
541 intel_uncore_forcewake_get(dev_priv, engine->fw_domains); 675 intel_uncore_forcewake_get(dev_priv, execlists->fw_domains);
542 676
543 /* Prefer doing test_and_clear_bit() as a two stage operation to avoid 677 /* Prefer doing test_and_clear_bit() as a two stage operation to avoid
544 * imposing the cost of a locked atomic transaction when submitting a 678 * imposing the cost of a locked atomic transaction when submitting a
545 * new request (outside of the context-switch interrupt). 679 * new request (outside of the context-switch interrupt).
546 */ 680 */
547 while (test_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted)) { 681 while (test_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted)) {
548 u32 __iomem *csb_mmio = 682 /* The HWSP contains a (cacheable) mirror of the CSB */
549 dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine)); 683 const u32 *buf =
550 u32 __iomem *buf = 684 &engine->status_page.page_addr[I915_HWS_CSB_BUF0_INDEX];
551 dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_BUF_LO(engine, 0));
552 unsigned int head, tail; 685 unsigned int head, tail;
553 686
687 /* However GVT emulation depends upon intercepting CSB mmio */
688 if (unlikely(execlists->csb_use_mmio)) {
689 buf = (u32 * __force)
690 (dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_BUF_LO(engine, 0)));
691 execlists->csb_head = -1; /* force mmio read of CSB ptrs */
692 }
693
554 /* The write will be ordered by the uncached read (itself 694 /* The write will be ordered by the uncached read (itself
555 * a memory barrier), so we do not need another in the form 695 * a memory barrier), so we do not need another in the form
556 * of a locked instruction. The race between the interrupt 696 * of a locked instruction. The race between the interrupt
@@ -562,9 +702,20 @@ static void intel_lrc_irq_handler(unsigned long data)
562 * is set and we do a new loop. 702 * is set and we do a new loop.
563 */ 703 */
564 __clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted); 704 __clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
565 head = readl(csb_mmio); 705 if (unlikely(execlists->csb_head == -1)) { /* following a reset */
566 tail = GEN8_CSB_WRITE_PTR(head); 706 head = readl(dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine)));
567 head = GEN8_CSB_READ_PTR(head); 707 tail = GEN8_CSB_WRITE_PTR(head);
708 head = GEN8_CSB_READ_PTR(head);
709 execlists->csb_head = head;
710 } else {
711 const int write_idx =
712 intel_hws_csb_write_index(dev_priv) -
713 I915_HWS_CSB_BUF0_INDEX;
714
715 head = execlists->csb_head;
716 tail = READ_ONCE(buf[write_idx]);
717 }
718
568 while (head != tail) { 719 while (head != tail) {
569 struct drm_i915_gem_request *rq; 720 struct drm_i915_gem_request *rq;
570 unsigned int status; 721 unsigned int status;
@@ -590,13 +741,12 @@ static void intel_lrc_irq_handler(unsigned long data)
590 * status notifier. 741 * status notifier.
591 */ 742 */
592 743
593 status = readl(buf + 2 * head); 744 status = READ_ONCE(buf[2 * head]); /* maybe mmio! */
594 if (!(status & GEN8_CTX_STATUS_COMPLETED_MASK)) 745 if (!(status & GEN8_CTX_STATUS_COMPLETED_MASK))
595 continue; 746 continue;
596 747
597 /* Check the context/desc id for this event matches */ 748 /* Check the context/desc id for this event matches */
598 GEM_DEBUG_BUG_ON(readl(buf + 2 * head + 1) != 749 GEM_DEBUG_BUG_ON(buf[2 * head + 1] != port->context_id);
599 port->context_id);
600 750
601 rq = port_unpack(port, &count); 751 rq = port_unpack(port, &count);
602 GEM_BUG_ON(count == 0); 752 GEM_BUG_ON(count == 0);
@@ -608,8 +758,7 @@ static void intel_lrc_irq_handler(unsigned long data)
608 trace_i915_gem_request_out(rq); 758 trace_i915_gem_request_out(rq);
609 i915_gem_request_put(rq); 759 i915_gem_request_put(rq);
610 760
611 port[0] = port[1]; 761 execlists_port_complete(execlists, port);
612 memset(&port[1], 0, sizeof(port[1]));
613 } else { 762 } else {
614 port_set(port, port_pack(rq, count)); 763 port_set(port, port_pack(rq, count));
615 } 764 }
@@ -619,78 +768,28 @@ static void intel_lrc_irq_handler(unsigned long data)
619 !(status & GEN8_CTX_STATUS_ACTIVE_IDLE)); 768 !(status & GEN8_CTX_STATUS_ACTIVE_IDLE));
620 } 769 }
621 770
622 writel(_MASKED_FIELD(GEN8_CSB_READ_PTR_MASK, head << 8), 771 if (head != execlists->csb_head) {
623 csb_mmio); 772 execlists->csb_head = head;
773 writel(_MASKED_FIELD(GEN8_CSB_READ_PTR_MASK, head << 8),
774 dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine)));
775 }
624 } 776 }
625 777
626 if (execlists_elsp_ready(engine)) 778 if (execlists_elsp_ready(engine))
627 execlists_dequeue(engine); 779 execlists_dequeue(engine);
628 780
629 intel_uncore_forcewake_put(dev_priv, engine->fw_domains); 781 intel_uncore_forcewake_put(dev_priv, execlists->fw_domains);
630} 782}
631 783
632static bool 784static void insert_request(struct intel_engine_cs *engine,
633insert_request(struct intel_engine_cs *engine, 785 struct i915_priotree *pt,
634 struct i915_priotree *pt, 786 int prio)
635 int prio)
636{ 787{
637 struct i915_priolist *p; 788 struct i915_priolist *p = lookup_priolist(engine, pt, prio);
638 struct rb_node **parent, *rb;
639 bool first = true;
640 789
641 if (unlikely(engine->no_priolist)) 790 list_add_tail(&pt->link, &ptr_mask_bits(p, 1)->requests);
642 prio = I915_PRIORITY_NORMAL; 791 if (ptr_unmask_bits(p, 1) && execlists_elsp_ready(engine))
643 792 tasklet_hi_schedule(&engine->execlists.irq_tasklet);
644find_priolist:
645 /* most positive priority is scheduled first, equal priorities fifo */
646 rb = NULL;
647 parent = &engine->execlist_queue.rb_node;
648 while (*parent) {
649 rb = *parent;
650 p = rb_entry(rb, typeof(*p), node);
651 if (prio > p->priority) {
652 parent = &rb->rb_left;
653 } else if (prio < p->priority) {
654 parent = &rb->rb_right;
655 first = false;
656 } else {
657 list_add_tail(&pt->link, &p->requests);
658 return false;
659 }
660 }
661
662 if (prio == I915_PRIORITY_NORMAL) {
663 p = &engine->default_priolist;
664 } else {
665 p = kmem_cache_alloc(engine->i915->priorities, GFP_ATOMIC);
666 /* Convert an allocation failure to a priority bump */
667 if (unlikely(!p)) {
668 prio = I915_PRIORITY_NORMAL; /* recurses just once */
669
670 /* To maintain ordering with all rendering, after an
671 * allocation failure we have to disable all scheduling.
672 * Requests will then be executed in fifo, and schedule
673 * will ensure that dependencies are emitted in fifo.
674 * There will be still some reordering with existing
675 * requests, so if userspace lied about their
676 * dependencies that reordering may be visible.
677 */
678 engine->no_priolist = true;
679 goto find_priolist;
680 }
681 }
682
683 p->priority = prio;
684 rb_link_node(&p->node, rb, parent);
685 rb_insert_color(&p->node, &engine->execlist_queue);
686
687 INIT_LIST_HEAD(&p->requests);
688 list_add_tail(&pt->link, &p->requests);
689
690 if (first)
691 engine->execlist_first = &p->node;
692
693 return first;
694} 793}
695 794
696static void execlists_submit_request(struct drm_i915_gem_request *request) 795static void execlists_submit_request(struct drm_i915_gem_request *request)
@@ -701,14 +800,9 @@ static void execlists_submit_request(struct drm_i915_gem_request *request)
701 /* Will be called from irq-context when using foreign fences. */ 800 /* Will be called from irq-context when using foreign fences. */
702 spin_lock_irqsave(&engine->timeline->lock, flags); 801 spin_lock_irqsave(&engine->timeline->lock, flags);
703 802
704 if (insert_request(engine, 803 insert_request(engine, &request->priotree, request->priotree.priority);
705 &request->priotree,
706 request->priotree.priority)) {
707 if (execlists_elsp_ready(engine))
708 tasklet_hi_schedule(&engine->irq_tasklet);
709 }
710 804
711 GEM_BUG_ON(!engine->execlist_first); 805 GEM_BUG_ON(!engine->execlists.first);
712 GEM_BUG_ON(list_empty(&request->priotree.link)); 806 GEM_BUG_ON(list_empty(&request->priotree.link));
713 807
714 spin_unlock_irqrestore(&engine->timeline->lock, flags); 808 spin_unlock_irqrestore(&engine->timeline->lock, flags);
@@ -914,27 +1008,14 @@ static int execlists_request_alloc(struct drm_i915_gem_request *request)
914 */ 1008 */
915 request->reserved_space += EXECLISTS_REQUEST_SIZE; 1009 request->reserved_space += EXECLISTS_REQUEST_SIZE;
916 1010
917 if (i915.enable_guc_submission) {
918 /*
919 * Check that the GuC has space for the request before
920 * going any further, as the i915_add_request() call
921 * later on mustn't fail ...
922 */
923 ret = i915_guc_wq_reserve(request);
924 if (ret)
925 goto err;
926 }
927
928 cs = intel_ring_begin(request, 0); 1011 cs = intel_ring_begin(request, 0);
929 if (IS_ERR(cs)) { 1012 if (IS_ERR(cs))
930 ret = PTR_ERR(cs); 1013 return PTR_ERR(cs);
931 goto err_unreserve;
932 }
933 1014
934 if (!ce->initialised) { 1015 if (!ce->initialised) {
935 ret = engine->init_context(request); 1016 ret = engine->init_context(request);
936 if (ret) 1017 if (ret)
937 goto err_unreserve; 1018 return ret;
938 1019
939 ce->initialised = true; 1020 ce->initialised = true;
940 } 1021 }
@@ -948,12 +1029,6 @@ static int execlists_request_alloc(struct drm_i915_gem_request *request)
948 1029
949 request->reserved_space -= EXECLISTS_REQUEST_SIZE; 1030 request->reserved_space -= EXECLISTS_REQUEST_SIZE;
950 return 0; 1031 return 0;
951
952err_unreserve:
953 if (i915.enable_guc_submission)
954 i915_guc_wq_unreserve(request);
955err:
956 return ret;
957} 1032}
958 1033
959/* 1034/*
@@ -1116,13 +1191,6 @@ static u32 *gen9_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
1116 return batch; 1191 return batch;
1117} 1192}
1118 1193
1119static u32 *gen9_init_perctx_bb(struct intel_engine_cs *engine, u32 *batch)
1120{
1121 *batch++ = MI_BATCH_BUFFER_END;
1122
1123 return batch;
1124}
1125
1126#define CTX_WA_BB_OBJ_SIZE (PAGE_SIZE) 1194#define CTX_WA_BB_OBJ_SIZE (PAGE_SIZE)
1127 1195
1128static int lrc_setup_wa_ctx(struct intel_engine_cs *engine) 1196static int lrc_setup_wa_ctx(struct intel_engine_cs *engine)
@@ -1179,7 +1247,7 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
1179 return 0; 1247 return 0;
1180 case 9: 1248 case 9:
1181 wa_bb_fn[0] = gen9_init_indirectctx_bb; 1249 wa_bb_fn[0] = gen9_init_indirectctx_bb;
1182 wa_bb_fn[1] = gen9_init_perctx_bb; 1250 wa_bb_fn[1] = NULL;
1183 break; 1251 break;
1184 case 8: 1252 case 8:
1185 wa_bb_fn[0] = gen8_init_indirectctx_bb; 1253 wa_bb_fn[0] = gen8_init_indirectctx_bb;
@@ -1210,7 +1278,8 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
1210 ret = -EINVAL; 1278 ret = -EINVAL;
1211 break; 1279 break;
1212 } 1280 }
1213 batch_ptr = wa_bb_fn[i](engine, batch_ptr); 1281 if (wa_bb_fn[i])
1282 batch_ptr = wa_bb_fn[i](engine, batch_ptr);
1214 wa_bb[i]->size = batch_ptr - (batch + wa_bb[i]->offset); 1283 wa_bb[i]->size = batch_ptr - (batch + wa_bb[i]->offset);
1215 } 1284 }
1216 1285
@@ -1234,9 +1303,7 @@ static u8 gtiir[] = {
1234static int gen8_init_common_ring(struct intel_engine_cs *engine) 1303static int gen8_init_common_ring(struct intel_engine_cs *engine)
1235{ 1304{
1236 struct drm_i915_private *dev_priv = engine->i915; 1305 struct drm_i915_private *dev_priv = engine->i915;
1237 struct execlist_port *port = engine->execlist_port; 1306 struct intel_engine_execlists * const execlists = &engine->execlists;
1238 unsigned int n;
1239 bool submit;
1240 int ret; 1307 int ret;
1241 1308
1242 ret = intel_mocs_init_engine(engine); 1309 ret = intel_mocs_init_engine(engine);
@@ -1269,24 +1336,11 @@ static int gen8_init_common_ring(struct intel_engine_cs *engine)
1269 I915_WRITE(GEN8_GT_IIR(gtiir[engine->id]), 1336 I915_WRITE(GEN8_GT_IIR(gtiir[engine->id]),
1270 GT_CONTEXT_SWITCH_INTERRUPT << engine->irq_shift); 1337 GT_CONTEXT_SWITCH_INTERRUPT << engine->irq_shift);
1271 clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted); 1338 clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
1339 execlists->csb_head = -1;
1272 1340
1273 /* After a GPU reset, we may have requests to replay */ 1341 /* After a GPU reset, we may have requests to replay */
1274 submit = false; 1342 if (!i915_modparams.enable_guc_submission && execlists->first)
1275 for (n = 0; n < ARRAY_SIZE(engine->execlist_port); n++) { 1343 tasklet_schedule(&execlists->irq_tasklet);
1276 if (!port_isset(&port[n]))
1277 break;
1278
1279 DRM_DEBUG_DRIVER("Restarting %s:%d from 0x%x\n",
1280 engine->name, n,
1281 port_request(&port[n])->global_seqno);
1282
1283 /* Discard the current inflight count */
1284 port_set(&port[n], port_request(&port[n]));
1285 submit = true;
1286 }
1287
1288 if (submit && !i915.enable_guc_submission)
1289 execlists_submit_ports(engine);
1290 1344
1291 return 0; 1345 return 0;
1292} 1346}
@@ -1327,9 +1381,12 @@ static int gen9_init_render_ring(struct intel_engine_cs *engine)
1327static void reset_common_ring(struct intel_engine_cs *engine, 1381static void reset_common_ring(struct intel_engine_cs *engine,
1328 struct drm_i915_gem_request *request) 1382 struct drm_i915_gem_request *request)
1329{ 1383{
1330 struct execlist_port *port = engine->execlist_port; 1384 struct intel_engine_execlists * const execlists = &engine->execlists;
1385 struct drm_i915_gem_request *rq, *rn;
1331 struct intel_context *ce; 1386 struct intel_context *ce;
1332 unsigned int n; 1387 unsigned long flags;
1388
1389 spin_lock_irqsave(&engine->timeline->lock, flags);
1333 1390
1334 /* 1391 /*
1335 * Catch up with any missed context-switch interrupts. 1392 * Catch up with any missed context-switch interrupts.
@@ -1340,20 +1397,26 @@ static void reset_common_ring(struct intel_engine_cs *engine,
1340 * guessing the missed context-switch events by looking at what 1397 * guessing the missed context-switch events by looking at what
1341 * requests were completed. 1398 * requests were completed.
1342 */ 1399 */
1343 if (!request) { 1400 execlist_cancel_port_requests(execlists);
1344 for (n = 0; n < ARRAY_SIZE(engine->execlist_port); n++)
1345 i915_gem_request_put(port_request(&port[n]));
1346 memset(engine->execlist_port, 0, sizeof(engine->execlist_port));
1347 return;
1348 }
1349 1401
1350 if (request->ctx != port_request(port)->ctx) { 1402 /* Push back any incomplete requests for replay after the reset. */
1351 i915_gem_request_put(port_request(port)); 1403 list_for_each_entry_safe_reverse(rq, rn,
1352 port[0] = port[1]; 1404 &engine->timeline->requests, link) {
1353 memset(&port[1], 0, sizeof(port[1])); 1405 struct i915_priolist *p;
1406
1407 if (i915_gem_request_completed(rq))
1408 break;
1409
1410 __i915_gem_request_unsubmit(rq);
1411
1412 p = lookup_priolist(engine,
1413 &rq->priotree,
1414 rq->priotree.priority);
1415 list_add(&rq->priotree.link,
1416 &ptr_mask_bits(p, 1)->requests);
1354 } 1417 }
1355 1418
1356 GEM_BUG_ON(request->ctx != port_request(port)->ctx); 1419 spin_unlock_irqrestore(&engine->timeline->lock, flags);
1357 1420
1358 /* If the request was innocent, we leave the request in the ELSP 1421 /* If the request was innocent, we leave the request in the ELSP
1359 * and will try to replay it on restarting. The context image may 1422 * and will try to replay it on restarting. The context image may
@@ -1365,7 +1428,7 @@ static void reset_common_ring(struct intel_engine_cs *engine,
1365 * and have to at least restore the RING register in the context 1428 * and have to at least restore the RING register in the context
1366 * image back to the expected values to skip over the guilty request. 1429 * image back to the expected values to skip over the guilty request.
1367 */ 1430 */
1368 if (request->fence.error != -EIO) 1431 if (!request || request->fence.error != -EIO)
1369 return; 1432 return;
1370 1433
1371 /* We want a simple context + ring to execute the breadcrumb update. 1434 /* We want a simple context + ring to execute the breadcrumb update.
@@ -1668,8 +1731,8 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
1668 * Tasklet cannot be active at this point due intel_mark_active/idle 1731 * Tasklet cannot be active at this point due intel_mark_active/idle
1669 * so this is just for documentation. 1732 * so this is just for documentation.
1670 */ 1733 */
1671 if (WARN_ON(test_bit(TASKLET_STATE_SCHED, &engine->irq_tasklet.state))) 1734 if (WARN_ON(test_bit(TASKLET_STATE_SCHED, &engine->execlists.irq_tasklet.state)))
1672 tasklet_kill(&engine->irq_tasklet); 1735 tasklet_kill(&engine->execlists.irq_tasklet);
1673 1736
1674 dev_priv = engine->i915; 1737 dev_priv = engine->i915;
1675 1738
@@ -1680,11 +1743,6 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
1680 if (engine->cleanup) 1743 if (engine->cleanup)
1681 engine->cleanup(engine); 1744 engine->cleanup(engine);
1682 1745
1683 if (engine->status_page.vma) {
1684 i915_gem_object_unpin_map(engine->status_page.vma->obj);
1685 engine->status_page.vma = NULL;
1686 }
1687
1688 intel_engine_cleanup_common(engine); 1746 intel_engine_cleanup_common(engine);
1689 1747
1690 lrc_destroy_wa_ctx(engine); 1748 lrc_destroy_wa_ctx(engine);
@@ -1696,8 +1754,9 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
1696static void execlists_set_default_submission(struct intel_engine_cs *engine) 1754static void execlists_set_default_submission(struct intel_engine_cs *engine)
1697{ 1755{
1698 engine->submit_request = execlists_submit_request; 1756 engine->submit_request = execlists_submit_request;
1757 engine->cancel_requests = execlists_cancel_requests;
1699 engine->schedule = execlists_schedule; 1758 engine->schedule = execlists_schedule;
1700 engine->irq_tasklet.func = intel_lrc_irq_handler; 1759 engine->execlists.irq_tasklet.func = intel_lrc_irq_handler;
1701} 1760}
1702 1761
1703static void 1762static void
@@ -1731,24 +1790,6 @@ logical_ring_default_irqs(struct intel_engine_cs *engine)
1731 engine->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift; 1790 engine->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift;
1732} 1791}
1733 1792
1734static int
1735lrc_setup_hws(struct intel_engine_cs *engine, struct i915_vma *vma)
1736{
1737 const int hws_offset = LRC_PPHWSP_PN * PAGE_SIZE;
1738 void *hws;
1739
1740 /* The HWSP is part of the default context object in LRC mode. */
1741 hws = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
1742 if (IS_ERR(hws))
1743 return PTR_ERR(hws);
1744
1745 engine->status_page.page_addr = hws + hws_offset;
1746 engine->status_page.ggtt_offset = i915_ggtt_offset(vma) + hws_offset;
1747 engine->status_page.vma = vma;
1748
1749 return 0;
1750}
1751
1752static void 1793static void
1753logical_ring_setup(struct intel_engine_cs *engine) 1794logical_ring_setup(struct intel_engine_cs *engine)
1754{ 1795{
@@ -1772,32 +1813,23 @@ logical_ring_setup(struct intel_engine_cs *engine)
1772 RING_CONTEXT_STATUS_BUF_BASE(engine), 1813 RING_CONTEXT_STATUS_BUF_BASE(engine),
1773 FW_REG_READ); 1814 FW_REG_READ);
1774 1815
1775 engine->fw_domains = fw_domains; 1816 engine->execlists.fw_domains = fw_domains;
1776 1817
1777 tasklet_init(&engine->irq_tasklet, 1818 tasklet_init(&engine->execlists.irq_tasklet,
1778 intel_lrc_irq_handler, (unsigned long)engine); 1819 intel_lrc_irq_handler, (unsigned long)engine);
1779 1820
1780 logical_ring_default_vfuncs(engine); 1821 logical_ring_default_vfuncs(engine);
1781 logical_ring_default_irqs(engine); 1822 logical_ring_default_irqs(engine);
1782} 1823}
1783 1824
1784static int 1825static int logical_ring_init(struct intel_engine_cs *engine)
1785logical_ring_init(struct intel_engine_cs *engine)
1786{ 1826{
1787 struct i915_gem_context *dctx = engine->i915->kernel_context;
1788 int ret; 1827 int ret;
1789 1828
1790 ret = intel_engine_init_common(engine); 1829 ret = intel_engine_init_common(engine);
1791 if (ret) 1830 if (ret)
1792 goto error; 1831 goto error;
1793 1832
1794 /* And setup the hardware status page. */
1795 ret = lrc_setup_hws(engine, dctx->engine[engine->id].state);
1796 if (ret) {
1797 DRM_ERROR("Failed to set up hws %s: %d\n", engine->name, ret);
1798 goto error;
1799 }
1800
1801 return 0; 1833 return 0;
1802 1834
1803error: 1835error:
@@ -1955,13 +1987,12 @@ static void execlists_init_reg_state(u32 *regs,
1955 CTX_REG(regs, CTX_SECOND_BB_HEAD_L, RING_SBBADDR(base), 0); 1987 CTX_REG(regs, CTX_SECOND_BB_HEAD_L, RING_SBBADDR(base), 0);
1956 CTX_REG(regs, CTX_SECOND_BB_STATE, RING_SBBSTATE(base), 0); 1988 CTX_REG(regs, CTX_SECOND_BB_STATE, RING_SBBSTATE(base), 0);
1957 if (rcs) { 1989 if (rcs) {
1958 CTX_REG(regs, CTX_BB_PER_CTX_PTR, RING_BB_PER_CTX_PTR(base), 0); 1990 struct i915_ctx_workarounds *wa_ctx = &engine->wa_ctx;
1991
1959 CTX_REG(regs, CTX_RCS_INDIRECT_CTX, RING_INDIRECT_CTX(base), 0); 1992 CTX_REG(regs, CTX_RCS_INDIRECT_CTX, RING_INDIRECT_CTX(base), 0);
1960 CTX_REG(regs, CTX_RCS_INDIRECT_CTX_OFFSET, 1993 CTX_REG(regs, CTX_RCS_INDIRECT_CTX_OFFSET,
1961 RING_INDIRECT_CTX_OFFSET(base), 0); 1994 RING_INDIRECT_CTX_OFFSET(base), 0);
1962 1995 if (wa_ctx->indirect_ctx.size) {
1963 if (engine->wa_ctx.vma) {
1964 struct i915_ctx_workarounds *wa_ctx = &engine->wa_ctx;
1965 u32 ggtt_offset = i915_ggtt_offset(wa_ctx->vma); 1996 u32 ggtt_offset = i915_ggtt_offset(wa_ctx->vma);
1966 1997
1967 regs[CTX_RCS_INDIRECT_CTX + 1] = 1998 regs[CTX_RCS_INDIRECT_CTX + 1] =
@@ -1970,6 +2001,11 @@ static void execlists_init_reg_state(u32 *regs,
1970 2001
1971 regs[CTX_RCS_INDIRECT_CTX_OFFSET + 1] = 2002 regs[CTX_RCS_INDIRECT_CTX_OFFSET + 1] =
1972 intel_lr_indirect_ctx_offset(engine) << 6; 2003 intel_lr_indirect_ctx_offset(engine) << 6;
2004 }
2005
2006 CTX_REG(regs, CTX_BB_PER_CTX_PTR, RING_BB_PER_CTX_PTR(base), 0);
2007 if (wa_ctx->per_ctx.size) {
2008 u32 ggtt_offset = i915_ggtt_offset(wa_ctx->vma);
1973 2009
1974 regs[CTX_BB_PER_CTX_PTR + 1] = 2010 regs[CTX_BB_PER_CTX_PTR + 1] =
1975 (ggtt_offset + wa_ctx->per_ctx.offset) | 0x01; 2011 (ggtt_offset + wa_ctx->per_ctx.offset) | 0x01;
@@ -2054,8 +2090,11 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
2054 2090
2055 context_size = round_up(engine->context_size, I915_GTT_PAGE_SIZE); 2091 context_size = round_up(engine->context_size, I915_GTT_PAGE_SIZE);
2056 2092
2057 /* One extra page as the sharing data between driver and GuC */ 2093 /*
2058 context_size += PAGE_SIZE * LRC_PPHWSP_PN; 2094 * Before the actual start of the context image, we insert a few pages
2095 * for our own use and for sharing with the GuC.
2096 */
2097 context_size += LRC_HEADER_PAGES * PAGE_SIZE;
2059 2098
2060 ctx_obj = i915_gem_object_create(ctx->i915, context_size); 2099 ctx_obj = i915_gem_object_create(ctx->i915, context_size);
2061 if (IS_ERR(ctx_obj)) { 2100 if (IS_ERR(ctx_obj)) {
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
index 57ef5833c427..314adee7127a 100644
--- a/drivers/gpu/drm/i915/intel_lrc.h
+++ b/drivers/gpu/drm/i915/intel_lrc.h
@@ -25,6 +25,7 @@
25#define _INTEL_LRC_H_ 25#define _INTEL_LRC_H_
26 26
27#include "intel_ringbuffer.h" 27#include "intel_ringbuffer.h"
28#include "i915_gem_context.h"
28 29
29#define GEN8_LR_CONTEXT_ALIGN I915_GTT_MIN_ALIGNMENT 30#define GEN8_LR_CONTEXT_ALIGN I915_GTT_MIN_ALIGNMENT
30 31
@@ -69,17 +70,42 @@ int logical_xcs_ring_init(struct intel_engine_cs *engine);
69 70
70/* Logical Ring Contexts */ 71/* Logical Ring Contexts */
71 72
72/* One extra page is added before LRC for GuC as shared data */ 73/*
74 * We allocate a header at the start of the context image for our own
75 * use, therefore the actual location of the logical state is offset
76 * from the start of the VMA. The layout is
77 *
78 * | [guc] | [hwsp] [logical state] |
79 * |<- our header ->|<- context image ->|
80 *
81 */
82/* The first page is used for sharing data with the GuC */
73#define LRC_GUCSHR_PN (0) 83#define LRC_GUCSHR_PN (0)
74#define LRC_PPHWSP_PN (LRC_GUCSHR_PN + 1) 84#define LRC_GUCSHR_SZ (1)
75#define LRC_STATE_PN (LRC_PPHWSP_PN + 1) 85/* At the start of the context image is its per-process HWS page */
86#define LRC_PPHWSP_PN (LRC_GUCSHR_PN + LRC_GUCSHR_SZ)
87#define LRC_PPHWSP_SZ (1)
88/* Finally we have the logical state for the context */
89#define LRC_STATE_PN (LRC_PPHWSP_PN + LRC_PPHWSP_SZ)
90
91/*
92 * Currently we include the PPHWSP in __intel_engine_context_size() so
93 * the size of the header is synonymous with the start of the PPHWSP.
94 */
95#define LRC_HEADER_PAGES LRC_PPHWSP_PN
76 96
77struct drm_i915_private; 97struct drm_i915_private;
78struct i915_gem_context; 98struct i915_gem_context;
79 99
80void intel_lr_context_resume(struct drm_i915_private *dev_priv); 100void intel_lr_context_resume(struct drm_i915_private *dev_priv);
81uint64_t intel_lr_context_descriptor(struct i915_gem_context *ctx, 101
82 struct intel_engine_cs *engine); 102static inline uint64_t
103intel_lr_context_descriptor(struct i915_gem_context *ctx,
104 struct intel_engine_cs *engine)
105{
106 return ctx->engine[engine->id].lrc_desc;
107}
108
83 109
84/* Execlists */ 110/* Execlists */
85int intel_sanitize_enable_execlists(struct drm_i915_private *dev_priv, 111int intel_sanitize_enable_execlists(struct drm_i915_private *dev_priv,
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index a9813aea89d8..a55954a89148 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -880,8 +880,8 @@ static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder)
880 struct drm_i915_private *dev_priv = to_i915(dev); 880 struct drm_i915_private *dev_priv = to_i915(dev);
881 881
882 /* use the module option value if specified */ 882 /* use the module option value if specified */
883 if (i915.lvds_channel_mode > 0) 883 if (i915_modparams.lvds_channel_mode > 0)
884 return i915.lvds_channel_mode == 2; 884 return i915_modparams.lvds_channel_mode == 2;
885 885
886 /* single channel LVDS is limited to 112 MHz */ 886 /* single channel LVDS is limited to 112 MHz */
887 if (lvds_encoder->attached_connector->base.panel.fixed_mode->clock 887 if (lvds_encoder->attached_connector->base.panel.fixed_mode->clock
diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c
index 951e834dd274..28a778b785ac 100644
--- a/drivers/gpu/drm/i915/intel_modes.c
+++ b/drivers/gpu/drm/i915/intel_modes.c
@@ -30,6 +30,21 @@
30#include "intel_drv.h" 30#include "intel_drv.h"
31#include "i915_drv.h" 31#include "i915_drv.h"
32 32
33static void intel_connector_update_eld_conn_type(struct drm_connector *connector)
34{
35 u8 conn_type;
36
37 if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
38 connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
39 conn_type = DRM_ELD_CONN_TYPE_DP;
40 } else {
41 conn_type = DRM_ELD_CONN_TYPE_HDMI;
42 }
43
44 connector->eld[DRM_ELD_SAD_COUNT_CONN_TYPE] &= ~DRM_ELD_CONN_TYPE_MASK;
45 connector->eld[DRM_ELD_SAD_COUNT_CONN_TYPE] |= conn_type;
46}
47
33/** 48/**
34 * intel_connector_update_modes - update connector from edid 49 * intel_connector_update_modes - update connector from edid
35 * @connector: DRM connector device to use 50 * @connector: DRM connector device to use
@@ -44,6 +59,8 @@ int intel_connector_update_modes(struct drm_connector *connector,
44 ret = drm_add_edid_modes(connector, edid); 59 ret = drm_add_edid_modes(connector, edid);
45 drm_edid_to_eld(connector, edid); 60 drm_edid_to_eld(connector, edid);
46 61
62 intel_connector_update_eld_conn_type(connector);
63
47 return ret; 64 return ret;
48} 65}
49 66
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 98154efcb2f4..1d946240e55f 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -921,7 +921,7 @@ static int intel_load_vbt_firmware(struct drm_i915_private *dev_priv)
921{ 921{
922 struct intel_opregion *opregion = &dev_priv->opregion; 922 struct intel_opregion *opregion = &dev_priv->opregion;
923 const struct firmware *fw = NULL; 923 const struct firmware *fw = NULL;
924 const char *name = i915.vbt_firmware; 924 const char *name = i915_modparams.vbt_firmware;
925 int ret; 925 int ret;
926 926
927 if (!name || !*name) 927 if (!name || !*name)
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index aace22e7ccac..1b397b41cb4f 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -1134,7 +1134,7 @@ int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
1134 if (!params) 1134 if (!params)
1135 return -ENOMEM; 1135 return -ENOMEM;
1136 1136
1137 drmmode_crtc = drm_crtc_find(dev, put_image_rec->crtc_id); 1137 drmmode_crtc = drm_crtc_find(dev, file_priv, put_image_rec->crtc_id);
1138 if (!drmmode_crtc) { 1138 if (!drmmode_crtc) {
1139 ret = -ENOENT; 1139 ret = -ENOENT;
1140 goto out_free; 1140 goto out_free;
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 3b1c5d783ee7..adc51e452e3e 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -379,13 +379,13 @@ enum drm_connector_status
379intel_panel_detect(struct drm_i915_private *dev_priv) 379intel_panel_detect(struct drm_i915_private *dev_priv)
380{ 380{
381 /* Assume that the BIOS does not lie through the OpRegion... */ 381 /* Assume that the BIOS does not lie through the OpRegion... */
382 if (!i915.panel_ignore_lid && dev_priv->opregion.lid_state) { 382 if (!i915_modparams.panel_ignore_lid && dev_priv->opregion.lid_state) {
383 return *dev_priv->opregion.lid_state & 0x1 ? 383 return *dev_priv->opregion.lid_state & 0x1 ?
384 connector_status_connected : 384 connector_status_connected :
385 connector_status_disconnected; 385 connector_status_disconnected;
386 } 386 }
387 387
388 switch (i915.panel_ignore_lid) { 388 switch (i915_modparams.panel_ignore_lid) {
389 case -2: 389 case -2:
390 return connector_status_connected; 390 return connector_status_connected;
391 case -1: 391 case -1:
@@ -465,10 +465,10 @@ static u32 intel_panel_compute_brightness(struct intel_connector *connector,
465 465
466 WARN_ON(panel->backlight.max == 0); 466 WARN_ON(panel->backlight.max == 0);
467 467
468 if (i915.invert_brightness < 0) 468 if (i915_modparams.invert_brightness < 0)
469 return val; 469 return val;
470 470
471 if (i915.invert_brightness > 0 || 471 if (i915_modparams.invert_brightness > 0 ||
472 dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS) { 472 dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS) {
473 return panel->backlight.max - val + panel->backlight.min; 473 return panel->backlight.max - val + panel->backlight.min;
474 } 474 }
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 0201816a4229..c66af09e27a7 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -58,24 +58,23 @@
58 58
59static void gen9_init_clock_gating(struct drm_i915_private *dev_priv) 59static void gen9_init_clock_gating(struct drm_i915_private *dev_priv)
60{ 60{
61 if (HAS_LLC(dev_priv)) {
62 /*
63 * WaCompressedResourceDisplayNewHashMode:skl,kbl
64 * Display WA#0390: skl,kbl
65 *
66 * Must match Sampler, Pixel Back End, and Media. See
67 * WaCompressedResourceSamplerPbeMediaNewHashMode.
68 */
69 I915_WRITE(CHICKEN_PAR1_1,
70 I915_READ(CHICKEN_PAR1_1) |
71 SKL_DE_COMPRESSED_HASH_MODE);
72 }
73
61 /* See Bspec note for PSR2_CTL bit 31, Wa#828:skl,bxt,kbl,cfl */ 74 /* See Bspec note for PSR2_CTL bit 31, Wa#828:skl,bxt,kbl,cfl */
62 I915_WRITE(CHICKEN_PAR1_1, 75 I915_WRITE(CHICKEN_PAR1_1,
63 I915_READ(CHICKEN_PAR1_1) | SKL_EDP_PSR_FIX_RDWRAP); 76 I915_READ(CHICKEN_PAR1_1) | SKL_EDP_PSR_FIX_RDWRAP);
64 77
65 /*
66 * Display WA#0390: skl,bxt,kbl,glk
67 *
68 * Must match Sampler, Pixel Back End, and Media
69 * (0xE194 bit 8, 0x7014 bit 13, 0x4DDC bits 27 and 31).
70 *
71 * Including bits outside the page in the hash would
72 * require 2 (or 4?) MiB alignment of resources. Just
73 * assume the defaul hashing mode which only uses bits
74 * within the page.
75 */
76 I915_WRITE(CHICKEN_PAR1_1,
77 I915_READ(CHICKEN_PAR1_1) & ~SKL_RC_HASH_OUTSIDE);
78
79 I915_WRITE(GEN8_CONFIG0, 78 I915_WRITE(GEN8_CONFIG0,
80 I915_READ(GEN8_CONFIG0) | GEN9_DEFAULT_FIXES); 79 I915_READ(GEN8_CONFIG0) | GEN9_DEFAULT_FIXES);
81 80
@@ -4376,134 +4375,147 @@ skl_adjusted_plane_pixel_rate(const struct intel_crtc_state *cstate,
4376 downscale_amount); 4375 downscale_amount);
4377} 4376}
4378 4377
4379static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv, 4378static int
4380 struct intel_crtc_state *cstate, 4379skl_compute_plane_wm_params(const struct drm_i915_private *dev_priv,
4381 const struct intel_plane_state *intel_pstate, 4380 struct intel_crtc_state *cstate,
4382 uint16_t ddb_allocation, 4381 const struct intel_plane_state *intel_pstate,
4383 int level, 4382 struct skl_wm_params *wp)
4384 uint16_t *out_blocks, /* out */
4385 uint8_t *out_lines, /* out */
4386 bool *enabled /* out */)
4387{ 4383{
4388 struct intel_plane *plane = to_intel_plane(intel_pstate->base.plane); 4384 struct intel_plane *plane = to_intel_plane(intel_pstate->base.plane);
4389 const struct drm_plane_state *pstate = &intel_pstate->base; 4385 const struct drm_plane_state *pstate = &intel_pstate->base;
4390 const struct drm_framebuffer *fb = pstate->fb; 4386 const struct drm_framebuffer *fb = pstate->fb;
4391 uint32_t latency = dev_priv->wm.skl_latency[level];
4392 uint_fixed_16_16_t method1, method2;
4393 uint_fixed_16_16_t plane_blocks_per_line;
4394 uint_fixed_16_16_t selected_result;
4395 uint32_t interm_pbpl; 4387 uint32_t interm_pbpl;
4396 uint32_t plane_bytes_per_line;
4397 uint32_t res_blocks, res_lines;
4398 uint8_t cpp;
4399 uint32_t width = 0;
4400 uint32_t plane_pixel_rate;
4401 uint_fixed_16_16_t y_tile_minimum;
4402 uint32_t y_min_scanlines;
4403 struct intel_atomic_state *state = 4388 struct intel_atomic_state *state =
4404 to_intel_atomic_state(cstate->base.state); 4389 to_intel_atomic_state(cstate->base.state);
4405 bool apply_memory_bw_wa = skl_needs_memory_bw_wa(state); 4390 bool apply_memory_bw_wa = skl_needs_memory_bw_wa(state);
4406 bool y_tiled, x_tiled;
4407 4391
4408 if (latency == 0 || 4392 if (!intel_wm_plane_visible(cstate, intel_pstate))
4409 !intel_wm_plane_visible(cstate, intel_pstate)) {
4410 *enabled = false;
4411 return 0; 4393 return 0;
4412 }
4413 4394
4414 y_tiled = fb->modifier == I915_FORMAT_MOD_Y_TILED || 4395 wp->y_tiled = fb->modifier == I915_FORMAT_MOD_Y_TILED ||
4415 fb->modifier == I915_FORMAT_MOD_Yf_TILED || 4396 fb->modifier == I915_FORMAT_MOD_Yf_TILED ||
4416 fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS || 4397 fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
4417 fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS; 4398 fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
4418 x_tiled = fb->modifier == I915_FORMAT_MOD_X_TILED; 4399 wp->x_tiled = fb->modifier == I915_FORMAT_MOD_X_TILED;
4419 4400 wp->rc_surface = fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
4420 /* Display WA #1141: kbl,cfl */ 4401 fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
4421 if ((IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) &&
4422 dev_priv->ipc_enabled)
4423 latency += 4;
4424
4425 if (apply_memory_bw_wa && x_tiled)
4426 latency += 15;
4427 4402
4428 if (plane->id == PLANE_CURSOR) { 4403 if (plane->id == PLANE_CURSOR) {
4429 width = intel_pstate->base.crtc_w; 4404 wp->width = intel_pstate->base.crtc_w;
4430 } else { 4405 } else {
4431 /* 4406 /*
4432 * Src coordinates are already rotated by 270 degrees for 4407 * Src coordinates are already rotated by 270 degrees for
4433 * the 90/270 degree plane rotation cases (to match the 4408 * the 90/270 degree plane rotation cases (to match the
4434 * GTT mapping), hence no need to account for rotation here. 4409 * GTT mapping), hence no need to account for rotation here.
4435 */ 4410 */
4436 width = drm_rect_width(&intel_pstate->base.src) >> 16; 4411 wp->width = drm_rect_width(&intel_pstate->base.src) >> 16;
4437 } 4412 }
4438 4413
4439 cpp = (fb->format->format == DRM_FORMAT_NV12) ? fb->format->cpp[1] : 4414 wp->cpp = (fb->format->format == DRM_FORMAT_NV12) ? fb->format->cpp[1] :
4440 fb->format->cpp[0]; 4415 fb->format->cpp[0];
4441 plane_pixel_rate = skl_adjusted_plane_pixel_rate(cstate, intel_pstate); 4416 wp->plane_pixel_rate = skl_adjusted_plane_pixel_rate(cstate,
4417 intel_pstate);
4442 4418
4443 if (drm_rotation_90_or_270(pstate->rotation)) { 4419 if (drm_rotation_90_or_270(pstate->rotation)) {
4444 4420
4445 switch (cpp) { 4421 switch (wp->cpp) {
4446 case 1: 4422 case 1:
4447 y_min_scanlines = 16; 4423 wp->y_min_scanlines = 16;
4448 break; 4424 break;
4449 case 2: 4425 case 2:
4450 y_min_scanlines = 8; 4426 wp->y_min_scanlines = 8;
4451 break; 4427 break;
4452 case 4: 4428 case 4:
4453 y_min_scanlines = 4; 4429 wp->y_min_scanlines = 4;
4454 break; 4430 break;
4455 default: 4431 default:
4456 MISSING_CASE(cpp); 4432 MISSING_CASE(wp->cpp);
4457 return -EINVAL; 4433 return -EINVAL;
4458 } 4434 }
4459 } else { 4435 } else {
4460 y_min_scanlines = 4; 4436 wp->y_min_scanlines = 4;
4461 } 4437 }
4462 4438
4463 if (apply_memory_bw_wa) 4439 if (apply_memory_bw_wa)
4464 y_min_scanlines *= 2; 4440 wp->y_min_scanlines *= 2;
4465 4441
4466 plane_bytes_per_line = width * cpp; 4442 wp->plane_bytes_per_line = wp->width * wp->cpp;
4467 if (y_tiled) { 4443 if (wp->y_tiled) {
4468 interm_pbpl = DIV_ROUND_UP(plane_bytes_per_line * 4444 interm_pbpl = DIV_ROUND_UP(wp->plane_bytes_per_line *
4469 y_min_scanlines, 512); 4445 wp->y_min_scanlines, 512);
4470 4446
4471 if (INTEL_GEN(dev_priv) >= 10) 4447 if (INTEL_GEN(dev_priv) >= 10)
4472 interm_pbpl++; 4448 interm_pbpl++;
4473 4449
4474 plane_blocks_per_line = div_fixed16(interm_pbpl, 4450 wp->plane_blocks_per_line = div_fixed16(interm_pbpl,
4475 y_min_scanlines); 4451 wp->y_min_scanlines);
4476 } else if (x_tiled && INTEL_GEN(dev_priv) == 9) { 4452 } else if (wp->x_tiled && IS_GEN9(dev_priv)) {
4477 interm_pbpl = DIV_ROUND_UP(plane_bytes_per_line, 512); 4453 interm_pbpl = DIV_ROUND_UP(wp->plane_bytes_per_line, 512);
4478 plane_blocks_per_line = u32_to_fixed16(interm_pbpl); 4454 wp->plane_blocks_per_line = u32_to_fixed16(interm_pbpl);
4479 } else { 4455 } else {
4480 interm_pbpl = DIV_ROUND_UP(plane_bytes_per_line, 512) + 1; 4456 interm_pbpl = DIV_ROUND_UP(wp->plane_bytes_per_line, 512) + 1;
4481 plane_blocks_per_line = u32_to_fixed16(interm_pbpl); 4457 wp->plane_blocks_per_line = u32_to_fixed16(interm_pbpl);
4458 }
4459
4460 wp->y_tile_minimum = mul_u32_fixed16(wp->y_min_scanlines,
4461 wp->plane_blocks_per_line);
4462 wp->linetime_us = fixed16_to_u32_round_up(
4463 intel_get_linetime_us(cstate));
4464
4465 return 0;
4466}
4467
4468static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
4469 struct intel_crtc_state *cstate,
4470 const struct intel_plane_state *intel_pstate,
4471 uint16_t ddb_allocation,
4472 int level,
4473 const struct skl_wm_params *wp,
4474 uint16_t *out_blocks, /* out */
4475 uint8_t *out_lines, /* out */
4476 bool *enabled /* out */)
4477{
4478 const struct drm_plane_state *pstate = &intel_pstate->base;
4479 uint32_t latency = dev_priv->wm.skl_latency[level];
4480 uint_fixed_16_16_t method1, method2;
4481 uint_fixed_16_16_t selected_result;
4482 uint32_t res_blocks, res_lines;
4483 struct intel_atomic_state *state =
4484 to_intel_atomic_state(cstate->base.state);
4485 bool apply_memory_bw_wa = skl_needs_memory_bw_wa(state);
4486
4487 if (latency == 0 ||
4488 !intel_wm_plane_visible(cstate, intel_pstate)) {
4489 *enabled = false;
4490 return 0;
4482 } 4491 }
4483 4492
4484 method1 = skl_wm_method1(dev_priv, plane_pixel_rate, cpp, latency); 4493 /* Display WA #1141: kbl,cfl */
4485 method2 = skl_wm_method2(plane_pixel_rate, 4494 if ((IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv) ||
4495 IS_CNL_REVID(dev_priv, CNL_REVID_A0, CNL_REVID_B0)) &&
4496 dev_priv->ipc_enabled)
4497 latency += 4;
4498
4499 if (apply_memory_bw_wa && wp->x_tiled)
4500 latency += 15;
4501
4502 method1 = skl_wm_method1(dev_priv, wp->plane_pixel_rate,
4503 wp->cpp, latency);
4504 method2 = skl_wm_method2(wp->plane_pixel_rate,
4486 cstate->base.adjusted_mode.crtc_htotal, 4505 cstate->base.adjusted_mode.crtc_htotal,
4487 latency, 4506 latency,
4488 plane_blocks_per_line); 4507 wp->plane_blocks_per_line);
4489 4508
4490 y_tile_minimum = mul_u32_fixed16(y_min_scanlines, 4509 if (wp->y_tiled) {
4491 plane_blocks_per_line); 4510 selected_result = max_fixed16(method2, wp->y_tile_minimum);
4492
4493 if (y_tiled) {
4494 selected_result = max_fixed16(method2, y_tile_minimum);
4495 } else { 4511 } else {
4496 uint32_t linetime_us; 4512 if ((wp->cpp * cstate->base.adjusted_mode.crtc_htotal /
4497 4513 512 < 1) && (wp->plane_bytes_per_line / 512 < 1))
4498 linetime_us = fixed16_to_u32_round_up(
4499 intel_get_linetime_us(cstate));
4500 if ((cpp * cstate->base.adjusted_mode.crtc_htotal / 512 < 1) &&
4501 (plane_bytes_per_line / 512 < 1))
4502 selected_result = method2; 4514 selected_result = method2;
4503 else if (ddb_allocation >= 4515 else if (ddb_allocation >=
4504 fixed16_to_u32_round_up(plane_blocks_per_line)) 4516 fixed16_to_u32_round_up(wp->plane_blocks_per_line))
4505 selected_result = min_fixed16(method1, method2); 4517 selected_result = min_fixed16(method1, method2);
4506 else if (latency >= linetime_us) 4518 else if (latency >= wp->linetime_us)
4507 selected_result = min_fixed16(method1, method2); 4519 selected_result = min_fixed16(method1, method2);
4508 else 4520 else
4509 selected_result = method1; 4521 selected_result = method1;
@@ -4511,19 +4523,18 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
4511 4523
4512 res_blocks = fixed16_to_u32_round_up(selected_result) + 1; 4524 res_blocks = fixed16_to_u32_round_up(selected_result) + 1;
4513 res_lines = div_round_up_fixed16(selected_result, 4525 res_lines = div_round_up_fixed16(selected_result,
4514 plane_blocks_per_line); 4526 wp->plane_blocks_per_line);
4515 4527
4516 /* Display WA #1125: skl,bxt,kbl,glk */ 4528 /* Display WA #1125: skl,bxt,kbl,glk */
4517 if (level == 0 && 4529 if (level == 0 && wp->rc_surface)
4518 (fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS || 4530 res_blocks += fixed16_to_u32_round_up(wp->y_tile_minimum);
4519 fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS))
4520 res_blocks += fixed16_to_u32_round_up(y_tile_minimum);
4521 4531
4522 /* Display WA #1126: skl,bxt,kbl,glk */ 4532 /* Display WA #1126: skl,bxt,kbl,glk */
4523 if (level >= 1 && level <= 7) { 4533 if (level >= 1 && level <= 7) {
4524 if (y_tiled) { 4534 if (wp->y_tiled) {
4525 res_blocks += fixed16_to_u32_round_up(y_tile_minimum); 4535 res_blocks += fixed16_to_u32_round_up(
4526 res_lines += y_min_scanlines; 4536 wp->y_tile_minimum);
4537 res_lines += wp->y_min_scanlines;
4527 } else { 4538 } else {
4528 res_blocks++; 4539 res_blocks++;
4529 } 4540 }
@@ -4561,6 +4572,7 @@ skl_compute_wm_levels(const struct drm_i915_private *dev_priv,
4561 struct skl_ddb_allocation *ddb, 4572 struct skl_ddb_allocation *ddb,
4562 struct intel_crtc_state *cstate, 4573 struct intel_crtc_state *cstate,
4563 const struct intel_plane_state *intel_pstate, 4574 const struct intel_plane_state *intel_pstate,
4575 const struct skl_wm_params *wm_params,
4564 struct skl_plane_wm *wm) 4576 struct skl_plane_wm *wm)
4565{ 4577{
4566 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc); 4578 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
@@ -4584,6 +4596,7 @@ skl_compute_wm_levels(const struct drm_i915_private *dev_priv,
4584 intel_pstate, 4596 intel_pstate,
4585 ddb_blocks, 4597 ddb_blocks,
4586 level, 4598 level,
4599 wm_params,
4587 &result->plane_res_b, 4600 &result->plane_res_b,
4588 &result->plane_res_l, 4601 &result->plane_res_l,
4589 &result->plane_en); 4602 &result->plane_en);
@@ -4609,20 +4622,65 @@ skl_compute_linetime_wm(struct intel_crtc_state *cstate)
4609 4622
4610 linetime_wm = fixed16_to_u32_round_up(mul_u32_fixed16(8, linetime_us)); 4623 linetime_wm = fixed16_to_u32_round_up(mul_u32_fixed16(8, linetime_us));
4611 4624
4612 /* Display WA #1135: bxt. */ 4625 /* Display WA #1135: bxt:ALL GLK:ALL */
4613 if (IS_BROXTON(dev_priv) && dev_priv->ipc_enabled) 4626 if ((IS_BROXTON(dev_priv) || IS_GEMINILAKE(dev_priv)) &&
4614 linetime_wm = DIV_ROUND_UP(linetime_wm, 2); 4627 dev_priv->ipc_enabled)
4628 linetime_wm /= 2;
4615 4629
4616 return linetime_wm; 4630 return linetime_wm;
4617} 4631}
4618 4632
4619static void skl_compute_transition_wm(struct intel_crtc_state *cstate, 4633static void skl_compute_transition_wm(struct intel_crtc_state *cstate,
4634 struct skl_wm_params *wp,
4635 struct skl_wm_level *wm_l0,
4636 uint16_t ddb_allocation,
4620 struct skl_wm_level *trans_wm /* out */) 4637 struct skl_wm_level *trans_wm /* out */)
4621{ 4638{
4639 struct drm_device *dev = cstate->base.crtc->dev;
4640 const struct drm_i915_private *dev_priv = to_i915(dev);
4641 uint16_t trans_min, trans_y_tile_min;
4642 const uint16_t trans_amount = 10; /* This is configurable amount */
4643 uint16_t trans_offset_b, res_blocks;
4644
4622 if (!cstate->base.active) 4645 if (!cstate->base.active)
4646 goto exit;
4647
4648 /* Transition WM are not recommended by HW team for GEN9 */
4649 if (INTEL_GEN(dev_priv) <= 9)
4650 goto exit;
4651
4652 /* Transition WM don't make any sense if ipc is disabled */
4653 if (!dev_priv->ipc_enabled)
4654 goto exit;
4655
4656 if (INTEL_GEN(dev_priv) >= 10)
4657 trans_min = 4;
4658
4659 trans_offset_b = trans_min + trans_amount;
4660
4661 if (wp->y_tiled) {
4662 trans_y_tile_min = (uint16_t) mul_round_up_u32_fixed16(2,
4663 wp->y_tile_minimum);
4664 res_blocks = max(wm_l0->plane_res_b, trans_y_tile_min) +
4665 trans_offset_b;
4666 } else {
4667 res_blocks = wm_l0->plane_res_b + trans_offset_b;
4668
4669 /* WA BUG:1938466 add one block for non y-tile planes */
4670 if (IS_CNL_REVID(dev_priv, CNL_REVID_A0, CNL_REVID_A0))
4671 res_blocks += 1;
4672
4673 }
4674
4675 res_blocks += 1;
4676
4677 if (res_blocks < ddb_allocation) {
4678 trans_wm->plane_res_b = res_blocks;
4679 trans_wm->plane_en = true;
4623 return; 4680 return;
4681 }
4624 4682
4625 /* Until we know more, just disable transition WMs */ 4683exit:
4626 trans_wm->plane_en = false; 4684 trans_wm->plane_en = false;
4627} 4685}
4628 4686
@@ -4648,14 +4706,25 @@ static int skl_build_pipe_wm(struct intel_crtc_state *cstate,
4648 const struct intel_plane_state *intel_pstate = 4706 const struct intel_plane_state *intel_pstate =
4649 to_intel_plane_state(pstate); 4707 to_intel_plane_state(pstate);
4650 enum plane_id plane_id = to_intel_plane(plane)->id; 4708 enum plane_id plane_id = to_intel_plane(plane)->id;
4709 struct skl_wm_params wm_params;
4710 enum pipe pipe = to_intel_crtc(cstate->base.crtc)->pipe;
4711 uint16_t ddb_blocks;
4651 4712
4652 wm = &pipe_wm->planes[plane_id]; 4713 wm = &pipe_wm->planes[plane_id];
4714 ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][plane_id]);
4715 memset(&wm_params, 0, sizeof(struct skl_wm_params));
4716
4717 ret = skl_compute_plane_wm_params(dev_priv, cstate,
4718 intel_pstate, &wm_params);
4719 if (ret)
4720 return ret;
4653 4721
4654 ret = skl_compute_wm_levels(dev_priv, ddb, cstate, 4722 ret = skl_compute_wm_levels(dev_priv, ddb, cstate,
4655 intel_pstate, wm); 4723 intel_pstate, &wm_params, wm);
4656 if (ret) 4724 if (ret)
4657 return ret; 4725 return ret;
4658 skl_compute_transition_wm(cstate, &wm->trans_wm); 4726 skl_compute_transition_wm(cstate, &wm_params, &wm->wm[0],
4727 ddb_blocks, &wm->trans_wm);
4659 } 4728 }
4660 pipe_wm->linetime = skl_compute_linetime_wm(cstate); 4729 pipe_wm->linetime = skl_compute_linetime_wm(cstate);
4661 4730
@@ -5754,6 +5823,30 @@ void intel_update_watermarks(struct intel_crtc *crtc)
5754 dev_priv->display.update_wm(crtc); 5823 dev_priv->display.update_wm(crtc);
5755} 5824}
5756 5825
5826void intel_enable_ipc(struct drm_i915_private *dev_priv)
5827{
5828 u32 val;
5829
5830 val = I915_READ(DISP_ARB_CTL2);
5831
5832 if (dev_priv->ipc_enabled)
5833 val |= DISP_IPC_ENABLE;
5834 else
5835 val &= ~DISP_IPC_ENABLE;
5836
5837 I915_WRITE(DISP_ARB_CTL2, val);
5838}
5839
5840void intel_init_ipc(struct drm_i915_private *dev_priv)
5841{
5842 dev_priv->ipc_enabled = false;
5843 if (!HAS_IPC(dev_priv))
5844 return;
5845
5846 dev_priv->ipc_enabled = true;
5847 intel_enable_ipc(dev_priv);
5848}
5849
5757/* 5850/*
5758 * Lock protecting IPS related data structures 5851 * Lock protecting IPS related data structures
5759 */ 5852 */
@@ -7732,7 +7825,7 @@ void intel_init_gt_powersave(struct drm_i915_private *dev_priv)
7732 * RPM depends on RC6 to save restore the GT HW context, so make RC6 a 7825 * RPM depends on RC6 to save restore the GT HW context, so make RC6 a
7733 * requirement. 7826 * requirement.
7734 */ 7827 */
7735 if (!i915.enable_rc6) { 7828 if (!i915_modparams.enable_rc6) {
7736 DRM_INFO("RC6 disabled, disabling runtime PM support\n"); 7829 DRM_INFO("RC6 disabled, disabling runtime PM support\n");
7737 intel_runtime_pm_get(dev_priv); 7830 intel_runtime_pm_get(dev_priv);
7738 } 7831 }
@@ -7789,7 +7882,7 @@ void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv)
7789 if (IS_VALLEYVIEW(dev_priv)) 7882 if (IS_VALLEYVIEW(dev_priv))
7790 valleyview_cleanup_gt_powersave(dev_priv); 7883 valleyview_cleanup_gt_powersave(dev_priv);
7791 7884
7792 if (!i915.enable_rc6) 7885 if (!i915_modparams.enable_rc6)
7793 intel_runtime_pm_put(dev_priv); 7886 intel_runtime_pm_put(dev_priv);
7794} 7887}
7795 7888
@@ -7911,7 +8004,7 @@ static void __intel_autoenable_gt_powersave(struct work_struct *work)
7911 if (IS_ERR(req)) 8004 if (IS_ERR(req))
7912 goto unlock; 8005 goto unlock;
7913 8006
7914 if (!i915.enable_execlists && i915_switch_context(req) == 0) 8007 if (!i915_modparams.enable_execlists && i915_switch_context(req) == 0)
7915 rcs->init_context(req); 8008 rcs->init_context(req);
7916 8009
7917 /* Mark the device busy, calling intel_enable_gt_powersave() */ 8010 /* Mark the device busy, calling intel_enable_gt_powersave() */
@@ -8276,7 +8369,8 @@ static void cnp_init_clock_gating(struct drm_i915_private *dev_priv)
8276 return; 8369 return;
8277 8370
8278 /* Wa #1181 */ 8371 /* Wa #1181 */
8279 I915_WRITE(SOUTH_DSPCLK_GATE_D, CNP_PWM_CGE_GATING_DISABLE); 8372 I915_WRITE(SOUTH_DSPCLK_GATE_D, I915_READ(SOUTH_DSPCLK_GATE_D) |
8373 CNP_PWM_CGE_GATING_DISABLE);
8280} 8374}
8281 8375
8282static void cnl_init_clock_gating(struct drm_i915_private *dev_priv) 8376static void cnl_init_clock_gating(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index f62ab05d3d62..5419cda83ba8 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -117,46 +117,41 @@ static void vlv_psr_setup_vsc(struct intel_dp *intel_dp,
117 I915_WRITE(VLV_VSCSDP(crtc->pipe), val); 117 I915_WRITE(VLV_VSCSDP(crtc->pipe), val);
118} 118}
119 119
120static void skl_psr_setup_su_vsc(struct intel_dp *intel_dp, 120static void hsw_psr_setup_vsc(struct intel_dp *intel_dp,
121 const struct intel_crtc_state *crtc_state) 121 const struct intel_crtc_state *crtc_state)
122{ 122{
123 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 123 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
124 struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev); 124 struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
125 struct edp_vsc_psr psr_vsc; 125 struct edp_vsc_psr psr_vsc;
126 126
127 /* Prepare VSC Header for SU as per EDP 1.4 spec, Table 6.11 */ 127 if (dev_priv->psr.psr2_support) {
128 memset(&psr_vsc, 0, sizeof(psr_vsc)); 128 /* Prepare VSC Header for SU as per EDP 1.4 spec, Table 6.11 */
129 psr_vsc.sdp_header.HB0 = 0; 129 memset(&psr_vsc, 0, sizeof(psr_vsc));
130 psr_vsc.sdp_header.HB1 = 0x7; 130 psr_vsc.sdp_header.HB0 = 0;
131 if (dev_priv->psr.colorimetry_support && 131 psr_vsc.sdp_header.HB1 = 0x7;
132 dev_priv->psr.y_cord_support) { 132 if (dev_priv->psr.colorimetry_support &&
133 psr_vsc.sdp_header.HB2 = 0x5; 133 dev_priv->psr.y_cord_support) {
134 psr_vsc.sdp_header.HB3 = 0x13; 134 psr_vsc.sdp_header.HB2 = 0x5;
135 } else if (dev_priv->psr.y_cord_support) { 135 psr_vsc.sdp_header.HB3 = 0x13;
136 psr_vsc.sdp_header.HB2 = 0x4; 136 } else if (dev_priv->psr.y_cord_support) {
137 psr_vsc.sdp_header.HB3 = 0xe; 137 psr_vsc.sdp_header.HB2 = 0x4;
138 psr_vsc.sdp_header.HB3 = 0xe;
139 } else {
140 psr_vsc.sdp_header.HB2 = 0x3;
141 psr_vsc.sdp_header.HB3 = 0xc;
142 }
138 } else { 143 } else {
139 psr_vsc.sdp_header.HB2 = 0x3; 144 /* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */
140 psr_vsc.sdp_header.HB3 = 0xc; 145 memset(&psr_vsc, 0, sizeof(psr_vsc));
146 psr_vsc.sdp_header.HB0 = 0;
147 psr_vsc.sdp_header.HB1 = 0x7;
148 psr_vsc.sdp_header.HB2 = 0x2;
149 psr_vsc.sdp_header.HB3 = 0x8;
141 } 150 }
142 151
143 intel_psr_write_vsc(intel_dp, &psr_vsc); 152 intel_psr_write_vsc(intel_dp, &psr_vsc);
144} 153}
145 154
146static void hsw_psr_setup_vsc(struct intel_dp *intel_dp,
147 const struct intel_crtc_state *crtc_state)
148{
149 struct edp_vsc_psr psr_vsc;
150
151 /* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */
152 memset(&psr_vsc, 0, sizeof(psr_vsc));
153 psr_vsc.sdp_header.HB0 = 0;
154 psr_vsc.sdp_header.HB1 = 0x7;
155 psr_vsc.sdp_header.HB2 = 0x2;
156 psr_vsc.sdp_header.HB3 = 0x8;
157 intel_psr_write_vsc(intel_dp, &psr_vsc);
158}
159
160static void vlv_psr_enable_sink(struct intel_dp *intel_dp) 155static void vlv_psr_enable_sink(struct intel_dp *intel_dp)
161{ 156{
162 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 157 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
@@ -239,7 +234,7 @@ static void vlv_psr_enable_source(struct intel_dp *intel_dp,
239 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); 234 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
240 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 235 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
241 236
242 /* Transition from PSR_state 0 to PSR_state 1, i.e. PSR Inactive */ 237 /* Transition from PSR_state 0 (disabled) to PSR_state 1 (inactive) */
243 I915_WRITE(VLV_PSRCTL(crtc->pipe), 238 I915_WRITE(VLV_PSRCTL(crtc->pipe),
244 VLV_EDP_PSR_MODE_SW_TIMER | 239 VLV_EDP_PSR_MODE_SW_TIMER |
245 VLV_EDP_PSR_SRC_TRANSMITTER_STATE | 240 VLV_EDP_PSR_SRC_TRANSMITTER_STATE |
@@ -254,16 +249,17 @@ static void vlv_psr_activate(struct intel_dp *intel_dp)
254 struct drm_crtc *crtc = dig_port->base.base.crtc; 249 struct drm_crtc *crtc = dig_port->base.base.crtc;
255 enum pipe pipe = to_intel_crtc(crtc)->pipe; 250 enum pipe pipe = to_intel_crtc(crtc)->pipe;
256 251
257 /* Let's do the transition from PSR_state 1 to PSR_state 2 252 /*
258 * that is PSR transition to active - static frame transmission. 253 * Let's do the transition from PSR_state 1 (inactive) to
259 * Then Hardware is responsible for the transition to PSR_state 3 254 * PSR_state 2 (transition to active - static frame transmission).
260 * that is PSR active - no Remote Frame Buffer (RFB) update. 255 * Then Hardware is responsible for the transition to
256 * PSR_state 3 (active - no Remote Frame Buffer (RFB) update).
261 */ 257 */
262 I915_WRITE(VLV_PSRCTL(pipe), I915_READ(VLV_PSRCTL(pipe)) | 258 I915_WRITE(VLV_PSRCTL(pipe), I915_READ(VLV_PSRCTL(pipe)) |
263 VLV_EDP_PSR_ACTIVE_ENTRY); 259 VLV_EDP_PSR_ACTIVE_ENTRY);
264} 260}
265 261
266static void intel_enable_source_psr1(struct intel_dp *intel_dp) 262static void hsw_activate_psr1(struct intel_dp *intel_dp)
267{ 263{
268 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 264 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
269 struct drm_device *dev = dig_port->base.base.dev; 265 struct drm_device *dev = dig_port->base.base.dev;
@@ -317,7 +313,7 @@ static void intel_enable_source_psr1(struct intel_dp *intel_dp)
317 I915_WRITE(EDP_PSR_CTL, val); 313 I915_WRITE(EDP_PSR_CTL, val);
318} 314}
319 315
320static void intel_enable_source_psr2(struct intel_dp *intel_dp) 316static void hsw_activate_psr2(struct intel_dp *intel_dp)
321{ 317{
322 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 318 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
323 struct drm_device *dev = dig_port->base.base.dev; 319 struct drm_device *dev = dig_port->base.base.dev;
@@ -331,6 +327,7 @@ static void intel_enable_source_psr2(struct intel_dp *intel_dp)
331 */ 327 */
332 uint32_t idle_frames = max(6, dev_priv->vbt.psr.idle_frames); 328 uint32_t idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
333 uint32_t val; 329 uint32_t val;
330 uint8_t sink_latency;
334 331
335 val = idle_frames << EDP_PSR_IDLE_FRAME_SHIFT; 332 val = idle_frames << EDP_PSR_IDLE_FRAME_SHIFT;
336 333
@@ -338,8 +335,16 @@ static void intel_enable_source_psr2(struct intel_dp *intel_dp)
338 * mesh at all with our frontbuffer tracking. And the hw alone isn't 335 * mesh at all with our frontbuffer tracking. And the hw alone isn't
339 * good enough. */ 336 * good enough. */
340 val |= EDP_PSR2_ENABLE | 337 val |= EDP_PSR2_ENABLE |
341 EDP_SU_TRACK_ENABLE | 338 EDP_SU_TRACK_ENABLE;
342 EDP_FRAMES_BEFORE_SU_ENTRY; 339
340 if (drm_dp_dpcd_readb(&intel_dp->aux,
341 DP_SYNCHRONIZATION_LATENCY_IN_SINK,
342 &sink_latency) == 1) {
343 sink_latency &= DP_MAX_RESYNC_FRAME_COUNT_MASK;
344 } else {
345 sink_latency = 0;
346 }
347 val |= EDP_PSR2_FRAME_BEFORE_SU(sink_latency + 1);
343 348
344 if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 5) 349 if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 5)
345 val |= EDP_PSR2_TP2_TIME_2500; 350 val |= EDP_PSR2_TP2_TIME_2500;
@@ -353,17 +358,22 @@ static void intel_enable_source_psr2(struct intel_dp *intel_dp)
353 I915_WRITE(EDP_PSR2_CTL, val); 358 I915_WRITE(EDP_PSR2_CTL, val);
354} 359}
355 360
356static void hsw_psr_enable_source(struct intel_dp *intel_dp) 361static void hsw_psr_activate(struct intel_dp *intel_dp)
357{ 362{
358 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 363 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
359 struct drm_device *dev = dig_port->base.base.dev; 364 struct drm_device *dev = dig_port->base.base.dev;
360 struct drm_i915_private *dev_priv = to_i915(dev); 365 struct drm_i915_private *dev_priv = to_i915(dev);
361 366
367 /* On HSW+ after we enable PSR on source it will activate it
368 * as soon as it match configure idle_frame count. So
369 * we just actually enable it here on activation time.
370 */
371
362 /* psr1 and psr2 are mutually exclusive.*/ 372 /* psr1 and psr2 are mutually exclusive.*/
363 if (dev_priv->psr.psr2_support) 373 if (dev_priv->psr.psr2_support)
364 intel_enable_source_psr2(intel_dp); 374 hsw_activate_psr2(intel_dp);
365 else 375 else
366 intel_enable_source_psr1(intel_dp); 376 hsw_activate_psr1(intel_dp);
367} 377}
368 378
369static bool intel_psr_match_conditions(struct intel_dp *intel_dp) 379static bool intel_psr_match_conditions(struct intel_dp *intel_dp)
@@ -395,7 +405,7 @@ static bool intel_psr_match_conditions(struct intel_dp *intel_dp)
395 return false; 405 return false;
396 } 406 }
397 407
398 if (!i915.enable_psr) { 408 if (!i915_modparams.enable_psr) {
399 DRM_DEBUG_KMS("PSR disable by flag\n"); 409 DRM_DEBUG_KMS("PSR disable by flag\n");
400 return false; 410 return false;
401 } 411 }
@@ -467,19 +477,46 @@ static void intel_psr_activate(struct intel_dp *intel_dp)
467 WARN_ON(dev_priv->psr.active); 477 WARN_ON(dev_priv->psr.active);
468 lockdep_assert_held(&dev_priv->psr.lock); 478 lockdep_assert_held(&dev_priv->psr.lock);
469 479
470 /* Enable/Re-enable PSR on the host */ 480 dev_priv->psr.activate(intel_dp);
471 if (HAS_DDI(dev_priv))
472 /* On HSW+ after we enable PSR on source it will activate it
473 * as soon as it match configure idle_frame count. So
474 * we just actually enable it here on activation time.
475 */
476 hsw_psr_enable_source(intel_dp);
477 else
478 vlv_psr_activate(intel_dp);
479
480 dev_priv->psr.active = true; 481 dev_priv->psr.active = true;
481} 482}
482 483
484static void hsw_psr_enable_source(struct intel_dp *intel_dp,
485 const struct intel_crtc_state *crtc_state)
486{
487 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
488 struct drm_device *dev = dig_port->base.base.dev;
489 struct drm_i915_private *dev_priv = to_i915(dev);
490 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
491 u32 chicken;
492
493 if (dev_priv->psr.psr2_support) {
494 chicken = PSR2_VSC_ENABLE_PROG_HEADER;
495 if (dev_priv->psr.y_cord_support)
496 chicken |= PSR2_ADD_VERTICAL_LINE_COUNT;
497 I915_WRITE(CHICKEN_TRANS(cpu_transcoder), chicken);
498
499 I915_WRITE(EDP_PSR_DEBUG_CTL,
500 EDP_PSR_DEBUG_MASK_MEMUP |
501 EDP_PSR_DEBUG_MASK_HPD |
502 EDP_PSR_DEBUG_MASK_LPSP |
503 EDP_PSR_DEBUG_MASK_MAX_SLEEP |
504 EDP_PSR_DEBUG_MASK_DISP_REG_WRITE);
505 } else {
506 /*
507 * Per Spec: Avoid continuous PSR exit by masking MEMUP
508 * and HPD. also mask LPSP to avoid dependency on other
509 * drivers that might block runtime_pm besides
510 * preventing other hw tracking issues now we can rely
511 * on frontbuffer tracking.
512 */
513 I915_WRITE(EDP_PSR_DEBUG_CTL,
514 EDP_PSR_DEBUG_MASK_MEMUP |
515 EDP_PSR_DEBUG_MASK_HPD |
516 EDP_PSR_DEBUG_MASK_LPSP);
517 }
518}
519
483/** 520/**
484 * intel_psr_enable - Enable PSR 521 * intel_psr_enable - Enable PSR
485 * @intel_dp: Intel DP 522 * @intel_dp: Intel DP
@@ -493,19 +530,16 @@ void intel_psr_enable(struct intel_dp *intel_dp,
493 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 530 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
494 struct drm_device *dev = intel_dig_port->base.base.dev; 531 struct drm_device *dev = intel_dig_port->base.base.dev;
495 struct drm_i915_private *dev_priv = to_i915(dev); 532 struct drm_i915_private *dev_priv = to_i915(dev);
496 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
497 u32 chicken;
498 533
499 if (!HAS_PSR(dev_priv)) { 534 if (!HAS_PSR(dev_priv))
500 DRM_DEBUG_KMS("PSR not supported on this platform\n");
501 return; 535 return;
502 }
503 536
504 if (!is_edp_psr(intel_dp)) { 537 if (!is_edp_psr(intel_dp)) {
505 DRM_DEBUG_KMS("PSR not supported by this panel\n"); 538 DRM_DEBUG_KMS("PSR not supported by this panel\n");
506 return; 539 return;
507 } 540 }
508 541
542 WARN_ON(dev_priv->drrs.dp);
509 mutex_lock(&dev_priv->psr.lock); 543 mutex_lock(&dev_priv->psr.lock);
510 if (dev_priv->psr.enabled) { 544 if (dev_priv->psr.enabled) {
511 DRM_DEBUG_KMS("PSR already in use\n"); 545 DRM_DEBUG_KMS("PSR already in use\n");
@@ -517,72 +551,28 @@ void intel_psr_enable(struct intel_dp *intel_dp,
517 551
518 dev_priv->psr.busy_frontbuffer_bits = 0; 552 dev_priv->psr.busy_frontbuffer_bits = 0;
519 553
520 if (HAS_DDI(dev_priv)) { 554 dev_priv->psr.setup_vsc(intel_dp, crtc_state);
521 if (dev_priv->psr.psr2_support) { 555 dev_priv->psr.enable_sink(intel_dp);
522 skl_psr_setup_su_vsc(intel_dp, crtc_state); 556 dev_priv->psr.enable_source(intel_dp, crtc_state);
523 557 dev_priv->psr.enabled = intel_dp;
524 chicken = PSR2_VSC_ENABLE_PROG_HEADER;
525 if (dev_priv->psr.y_cord_support)
526 chicken |= PSR2_ADD_VERTICAL_LINE_COUNT;
527 I915_WRITE(CHICKEN_TRANS(cpu_transcoder), chicken);
528
529 I915_WRITE(EDP_PSR_DEBUG_CTL,
530 EDP_PSR_DEBUG_MASK_MEMUP |
531 EDP_PSR_DEBUG_MASK_HPD |
532 EDP_PSR_DEBUG_MASK_LPSP |
533 EDP_PSR_DEBUG_MASK_MAX_SLEEP |
534 EDP_PSR_DEBUG_MASK_DISP_REG_WRITE);
535 } else {
536 /* set up vsc header for psr1 */
537 hsw_psr_setup_vsc(intel_dp, crtc_state);
538
539 /*
540 * Per Spec: Avoid continuous PSR exit by masking MEMUP
541 * and HPD. also mask LPSP to avoid dependency on other
542 * drivers that might block runtime_pm besides
543 * preventing other hw tracking issues now we can rely
544 * on frontbuffer tracking.
545 */
546 I915_WRITE(EDP_PSR_DEBUG_CTL,
547 EDP_PSR_DEBUG_MASK_MEMUP |
548 EDP_PSR_DEBUG_MASK_HPD |
549 EDP_PSR_DEBUG_MASK_LPSP);
550 }
551
552 /* Enable PSR on the panel */
553 hsw_psr_enable_sink(intel_dp);
554 558
555 if (INTEL_GEN(dev_priv) >= 9) 559 if (INTEL_GEN(dev_priv) >= 9) {
556 intel_psr_activate(intel_dp); 560 intel_psr_activate(intel_dp);
557 } else { 561 } else {
558 vlv_psr_setup_vsc(intel_dp, crtc_state); 562 /*
559 563 * FIXME: Activation should happen immediately since this
560 /* Enable PSR on the panel */ 564 * function is just called after pipe is fully trained and
561 vlv_psr_enable_sink(intel_dp); 565 * enabled.
562 566 * However on some platforms we face issues when first
563 /* On HSW+ enable_source also means go to PSR entry/active 567 * activation follows a modeset so quickly.
564 * state as soon as idle_frame achieved and here would be 568 * - On VLV/CHV we get bank screen on first activation
565 * to soon. However on VLV enable_source just enable PSR 569 * - On HSW/BDW we get a recoverable frozen screen until
566 * but let it on inactive state. So we might do this prior 570 * next exit-activate sequence.
567 * to active transition, i.e. here.
568 */ 571 */
569 vlv_psr_enable_source(intel_dp, crtc_state);
570 }
571
572 /*
573 * FIXME: Activation should happen immediately since this function
574 * is just called after pipe is fully trained and enabled.
575 * However on every platform we face issues when first activation
576 * follows a modeset so quickly.
577 * - On VLV/CHV we get bank screen on first activation
578 * - On HSW/BDW we get a recoverable frozen screen until next
579 * exit-activate sequence.
580 */
581 if (INTEL_GEN(dev_priv) < 9)
582 schedule_delayed_work(&dev_priv->psr.work, 572 schedule_delayed_work(&dev_priv->psr.work,
583 msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5)); 573 msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5));
574 }
584 575
585 dev_priv->psr.enabled = intel_dp;
586unlock: 576unlock:
587 mutex_unlock(&dev_priv->psr.lock); 577 mutex_unlock(&dev_priv->psr.lock);
588} 578}
@@ -597,7 +587,7 @@ static void vlv_psr_disable(struct intel_dp *intel_dp,
597 uint32_t val; 587 uint32_t val;
598 588
599 if (dev_priv->psr.active) { 589 if (dev_priv->psr.active) {
600 /* Put VLV PSR back to PSR_state 0 that is PSR Disabled. */ 590 /* Put VLV PSR back to PSR_state 0 (disabled). */
601 if (intel_wait_for_register(dev_priv, 591 if (intel_wait_for_register(dev_priv,
602 VLV_PSRSTAT(crtc->pipe), 592 VLV_PSRSTAT(crtc->pipe),
603 VLV_EDP_PSR_IN_TRANS, 593 VLV_EDP_PSR_IN_TRANS,
@@ -678,17 +668,16 @@ void intel_psr_disable(struct intel_dp *intel_dp,
678 struct drm_device *dev = intel_dig_port->base.base.dev; 668 struct drm_device *dev = intel_dig_port->base.base.dev;
679 struct drm_i915_private *dev_priv = to_i915(dev); 669 struct drm_i915_private *dev_priv = to_i915(dev);
680 670
671 if (!HAS_PSR(dev_priv))
672 return;
673
681 mutex_lock(&dev_priv->psr.lock); 674 mutex_lock(&dev_priv->psr.lock);
682 if (!dev_priv->psr.enabled) { 675 if (!dev_priv->psr.enabled) {
683 mutex_unlock(&dev_priv->psr.lock); 676 mutex_unlock(&dev_priv->psr.lock);
684 return; 677 return;
685 } 678 }
686 679
687 /* Disable PSR on Source */ 680 dev_priv->psr.disable_source(intel_dp, old_crtc_state);
688 if (HAS_DDI(dev_priv))
689 hsw_psr_disable(intel_dp, old_crtc_state);
690 else
691 vlv_psr_disable(intel_dp, old_crtc_state);
692 681
693 /* Disable PSR on Sink */ 682 /* Disable PSR on Sink */
694 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0); 683 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);
@@ -788,17 +777,20 @@ static void intel_psr_exit(struct drm_i915_private *dev_priv)
788 } else { 777 } else {
789 val = I915_READ(VLV_PSRCTL(pipe)); 778 val = I915_READ(VLV_PSRCTL(pipe));
790 779
791 /* Here we do the transition from PSR_state 3 to PSR_state 5 780 /*
792 * directly once PSR State 4 that is active with single frame 781 * Here we do the transition drirectly from
793 * update can be skipped. PSR_state 5 that is PSR exit then 782 * PSR_state 3 (active - no Remote Frame Buffer (RFB) update) to
794 * Hardware is responsible to transition back to PSR_state 1 783 * PSR_state 5 (exit).
795 * that is PSR inactive. Same state after 784 * PSR State 4 (active with single frame update) can be skipped.
796 * vlv_edp_psr_enable_source. 785 * On PSR_state 5 (exit) Hardware is responsible to transition
786 * back to PSR_state 1 (inactive).
787 * Now we are at Same state after vlv_psr_enable_source.
797 */ 788 */
798 val &= ~VLV_EDP_PSR_ACTIVE_ENTRY; 789 val &= ~VLV_EDP_PSR_ACTIVE_ENTRY;
799 I915_WRITE(VLV_PSRCTL(pipe), val); 790 I915_WRITE(VLV_PSRCTL(pipe), val);
800 791
801 /* Send AUX wake up - Spec says after transitioning to PSR 792 /*
793 * Send AUX wake up - Spec says after transitioning to PSR
802 * active we have to send AUX wake up by writing 01h in DPCD 794 * active we have to send AUX wake up by writing 01h in DPCD
803 * 600h of sink device. 795 * 600h of sink device.
804 * XXX: This might slow down the transition, but without this 796 * XXX: This might slow down the transition, but without this
@@ -829,6 +821,9 @@ void intel_psr_single_frame_update(struct drm_i915_private *dev_priv,
829 enum pipe pipe; 821 enum pipe pipe;
830 u32 val; 822 u32 val;
831 823
824 if (!HAS_PSR(dev_priv))
825 return;
826
832 /* 827 /*
833 * Single frame update is already supported on BDW+ but it requires 828 * Single frame update is already supported on BDW+ but it requires
834 * many W/A and it isn't really needed. 829 * many W/A and it isn't really needed.
@@ -875,6 +870,9 @@ void intel_psr_invalidate(struct drm_i915_private *dev_priv,
875 struct drm_crtc *crtc; 870 struct drm_crtc *crtc;
876 enum pipe pipe; 871 enum pipe pipe;
877 872
873 if (!HAS_PSR(dev_priv))
874 return;
875
878 mutex_lock(&dev_priv->psr.lock); 876 mutex_lock(&dev_priv->psr.lock);
879 if (!dev_priv->psr.enabled) { 877 if (!dev_priv->psr.enabled) {
880 mutex_unlock(&dev_priv->psr.lock); 878 mutex_unlock(&dev_priv->psr.lock);
@@ -912,6 +910,9 @@ void intel_psr_flush(struct drm_i915_private *dev_priv,
912 struct drm_crtc *crtc; 910 struct drm_crtc *crtc;
913 enum pipe pipe; 911 enum pipe pipe;
914 912
913 if (!HAS_PSR(dev_priv))
914 return;
915
915 mutex_lock(&dev_priv->psr.lock); 916 mutex_lock(&dev_priv->psr.lock);
916 if (!dev_priv->psr.enabled) { 917 if (!dev_priv->psr.enabled) {
917 mutex_unlock(&dev_priv->psr.lock); 918 mutex_unlock(&dev_priv->psr.lock);
@@ -944,12 +945,15 @@ void intel_psr_flush(struct drm_i915_private *dev_priv,
944 */ 945 */
945void intel_psr_init(struct drm_i915_private *dev_priv) 946void intel_psr_init(struct drm_i915_private *dev_priv)
946{ 947{
948 if (!HAS_PSR(dev_priv))
949 return;
950
947 dev_priv->psr_mmio_base = IS_HASWELL(dev_priv) ? 951 dev_priv->psr_mmio_base = IS_HASWELL(dev_priv) ?
948 HSW_EDP_PSR_BASE : BDW_EDP_PSR_BASE; 952 HSW_EDP_PSR_BASE : BDW_EDP_PSR_BASE;
949 953
950 /* Per platform default: all disabled. */ 954 /* Per platform default: all disabled. */
951 if (i915.enable_psr == -1) 955 if (i915_modparams.enable_psr == -1)
952 i915.enable_psr = 0; 956 i915_modparams.enable_psr = 0;
953 957
954 /* Set link_standby x link_off defaults */ 958 /* Set link_standby x link_off defaults */
955 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 959 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
@@ -963,15 +967,29 @@ void intel_psr_init(struct drm_i915_private *dev_priv)
963 dev_priv->psr.link_standby = dev_priv->vbt.psr.full_link; 967 dev_priv->psr.link_standby = dev_priv->vbt.psr.full_link;
964 968
965 /* Override link_standby x link_off defaults */ 969 /* Override link_standby x link_off defaults */
966 if (i915.enable_psr == 2 && !dev_priv->psr.link_standby) { 970 if (i915_modparams.enable_psr == 2 && !dev_priv->psr.link_standby) {
967 DRM_DEBUG_KMS("PSR: Forcing link standby\n"); 971 DRM_DEBUG_KMS("PSR: Forcing link standby\n");
968 dev_priv->psr.link_standby = true; 972 dev_priv->psr.link_standby = true;
969 } 973 }
970 if (i915.enable_psr == 3 && dev_priv->psr.link_standby) { 974 if (i915_modparams.enable_psr == 3 && dev_priv->psr.link_standby) {
971 DRM_DEBUG_KMS("PSR: Forcing main link off\n"); 975 DRM_DEBUG_KMS("PSR: Forcing main link off\n");
972 dev_priv->psr.link_standby = false; 976 dev_priv->psr.link_standby = false;
973 } 977 }
974 978
975 INIT_DELAYED_WORK(&dev_priv->psr.work, intel_psr_work); 979 INIT_DELAYED_WORK(&dev_priv->psr.work, intel_psr_work);
976 mutex_init(&dev_priv->psr.lock); 980 mutex_init(&dev_priv->psr.lock);
981
982 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
983 dev_priv->psr.enable_source = vlv_psr_enable_source;
984 dev_priv->psr.disable_source = vlv_psr_disable;
985 dev_priv->psr.enable_sink = vlv_psr_enable_sink;
986 dev_priv->psr.activate = vlv_psr_activate;
987 dev_priv->psr.setup_vsc = vlv_psr_setup_vsc;
988 } else {
989 dev_priv->psr.enable_source = hsw_psr_enable_source;
990 dev_priv->psr.disable_source = hsw_psr_disable;
991 dev_priv->psr.enable_sink = hsw_psr_enable_sink;
992 dev_priv->psr.activate = hsw_psr_activate;
993 dev_priv->psr.setup_vsc = hsw_psr_setup_vsc;
994 }
977} 995}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index cdf084ef5aae..05c08b0bc172 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -402,17 +402,18 @@ static void intel_ring_setup_status_page(struct intel_engine_cs *engine)
402 */ 402 */
403 if (IS_GEN7(dev_priv)) { 403 if (IS_GEN7(dev_priv)) {
404 switch (engine->id) { 404 switch (engine->id) {
405 /*
406 * No more rings exist on Gen7. Default case is only to shut up
407 * gcc switch check warning.
408 */
409 default:
410 GEM_BUG_ON(engine->id);
405 case RCS: 411 case RCS:
406 mmio = RENDER_HWS_PGA_GEN7; 412 mmio = RENDER_HWS_PGA_GEN7;
407 break; 413 break;
408 case BCS: 414 case BCS:
409 mmio = BLT_HWS_PGA_GEN7; 415 mmio = BLT_HWS_PGA_GEN7;
410 break; 416 break;
411 /*
412 * VCS2 actually doesn't exist on Gen7. Only shut up
413 * gcc switch check warning
414 */
415 case VCS2:
416 case VCS: 417 case VCS:
417 mmio = BSD_HWS_PGA_GEN7; 418 mmio = BSD_HWS_PGA_GEN7;
418 break; 419 break;
@@ -427,6 +428,9 @@ static void intel_ring_setup_status_page(struct intel_engine_cs *engine)
427 mmio = RING_HWS_PGA(engine->mmio_base); 428 mmio = RING_HWS_PGA(engine->mmio_base);
428 } 429 }
429 430
431 if (INTEL_GEN(dev_priv) >= 6)
432 I915_WRITE(RING_HWSTAM(engine->mmio_base), 0xffffffff);
433
430 I915_WRITE(mmio, engine->status_page.ggtt_offset); 434 I915_WRITE(mmio, engine->status_page.ggtt_offset);
431 POSTING_READ(mmio); 435 POSTING_READ(mmio);
432 436
@@ -778,6 +782,24 @@ static u32 *gen6_signal(struct drm_i915_gem_request *req, u32 *cs)
778 return cs; 782 return cs;
779} 783}
780 784
785static void cancel_requests(struct intel_engine_cs *engine)
786{
787 struct drm_i915_gem_request *request;
788 unsigned long flags;
789
790 spin_lock_irqsave(&engine->timeline->lock, flags);
791
792 /* Mark all submitted requests as skipped. */
793 list_for_each_entry(request, &engine->timeline->requests, link) {
794 GEM_BUG_ON(!request->global_seqno);
795 if (!i915_gem_request_completed(request))
796 dma_fence_set_error(&request->fence, -EIO);
797 }
798 /* Remaining _unready_ requests will be nop'ed when submitted */
799
800 spin_unlock_irqrestore(&engine->timeline->lock, flags);
801}
802
781static void i9xx_submit_request(struct drm_i915_gem_request *request) 803static void i9xx_submit_request(struct drm_i915_gem_request *request)
782{ 804{
783 struct drm_i915_private *dev_priv = request->i915; 805 struct drm_i915_private *dev_priv = request->i915;
@@ -1174,113 +1196,7 @@ i915_emit_bb_start(struct drm_i915_gem_request *req,
1174 return 0; 1196 return 0;
1175} 1197}
1176 1198
1177static void cleanup_phys_status_page(struct intel_engine_cs *engine)
1178{
1179 struct drm_i915_private *dev_priv = engine->i915;
1180
1181 if (!dev_priv->status_page_dmah)
1182 return;
1183
1184 drm_pci_free(&dev_priv->drm, dev_priv->status_page_dmah);
1185 engine->status_page.page_addr = NULL;
1186}
1187
1188static void cleanup_status_page(struct intel_engine_cs *engine)
1189{
1190 struct i915_vma *vma;
1191 struct drm_i915_gem_object *obj;
1192
1193 vma = fetch_and_zero(&engine->status_page.vma);
1194 if (!vma)
1195 return;
1196
1197 obj = vma->obj;
1198
1199 i915_vma_unpin(vma);
1200 i915_vma_close(vma);
1201
1202 i915_gem_object_unpin_map(obj);
1203 __i915_gem_object_release_unless_active(obj);
1204}
1205
1206static int init_status_page(struct intel_engine_cs *engine)
1207{
1208 struct drm_i915_gem_object *obj;
1209 struct i915_vma *vma;
1210 unsigned int flags;
1211 void *vaddr;
1212 int ret;
1213
1214 obj = i915_gem_object_create_internal(engine->i915, PAGE_SIZE);
1215 if (IS_ERR(obj)) {
1216 DRM_ERROR("Failed to allocate status page\n");
1217 return PTR_ERR(obj);
1218 }
1219 1199
1220 ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
1221 if (ret)
1222 goto err;
1223
1224 vma = i915_vma_instance(obj, &engine->i915->ggtt.base, NULL);
1225 if (IS_ERR(vma)) {
1226 ret = PTR_ERR(vma);
1227 goto err;
1228 }
1229
1230 flags = PIN_GLOBAL;
1231 if (!HAS_LLC(engine->i915))
1232 /* On g33, we cannot place HWS above 256MiB, so
1233 * restrict its pinning to the low mappable arena.
1234 * Though this restriction is not documented for
1235 * gen4, gen5, or byt, they also behave similarly
1236 * and hang if the HWS is placed at the top of the
1237 * GTT. To generalise, it appears that all !llc
1238 * platforms have issues with us placing the HWS
1239 * above the mappable region (even though we never
1240 * actualy map it).
1241 */
1242 flags |= PIN_MAPPABLE;
1243 ret = i915_vma_pin(vma, 0, 4096, flags);
1244 if (ret)
1245 goto err;
1246
1247 vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
1248 if (IS_ERR(vaddr)) {
1249 ret = PTR_ERR(vaddr);
1250 goto err_unpin;
1251 }
1252
1253 engine->status_page.vma = vma;
1254 engine->status_page.ggtt_offset = i915_ggtt_offset(vma);
1255 engine->status_page.page_addr = memset(vaddr, 0, PAGE_SIZE);
1256
1257 DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
1258 engine->name, i915_ggtt_offset(vma));
1259 return 0;
1260
1261err_unpin:
1262 i915_vma_unpin(vma);
1263err:
1264 i915_gem_object_put(obj);
1265 return ret;
1266}
1267
1268static int init_phys_status_page(struct intel_engine_cs *engine)
1269{
1270 struct drm_i915_private *dev_priv = engine->i915;
1271
1272 GEM_BUG_ON(engine->id != RCS);
1273
1274 dev_priv->status_page_dmah =
1275 drm_pci_alloc(&dev_priv->drm, PAGE_SIZE, PAGE_SIZE);
1276 if (!dev_priv->status_page_dmah)
1277 return -ENOMEM;
1278
1279 engine->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
1280 memset(engine->status_page.page_addr, 0, PAGE_SIZE);
1281
1282 return 0;
1283}
1284 1200
1285int intel_ring_pin(struct intel_ring *ring, 1201int intel_ring_pin(struct intel_ring *ring,
1286 struct drm_i915_private *i915, 1202 struct drm_i915_private *i915,
@@ -1567,17 +1483,10 @@ static int intel_init_ring_buffer(struct intel_engine_cs *engine)
1567 if (err) 1483 if (err)
1568 goto err; 1484 goto err;
1569 1485
1570 if (HWS_NEEDS_PHYSICAL(engine->i915))
1571 err = init_phys_status_page(engine);
1572 else
1573 err = init_status_page(engine);
1574 if (err)
1575 goto err;
1576
1577 ring = intel_engine_create_ring(engine, 32 * PAGE_SIZE); 1486 ring = intel_engine_create_ring(engine, 32 * PAGE_SIZE);
1578 if (IS_ERR(ring)) { 1487 if (IS_ERR(ring)) {
1579 err = PTR_ERR(ring); 1488 err = PTR_ERR(ring);
1580 goto err_hws; 1489 goto err;
1581 } 1490 }
1582 1491
1583 /* Ring wraparound at offset 0 sometimes hangs. No idea why. */ 1492 /* Ring wraparound at offset 0 sometimes hangs. No idea why. */
@@ -1592,11 +1501,6 @@ static int intel_init_ring_buffer(struct intel_engine_cs *engine)
1592 1501
1593err_ring: 1502err_ring:
1594 intel_ring_free(ring); 1503 intel_ring_free(ring);
1595err_hws:
1596 if (HWS_NEEDS_PHYSICAL(engine->i915))
1597 cleanup_phys_status_page(engine);
1598 else
1599 cleanup_status_page(engine);
1600err: 1504err:
1601 intel_engine_cleanup_common(engine); 1505 intel_engine_cleanup_common(engine);
1602 return err; 1506 return err;
@@ -1615,11 +1519,6 @@ void intel_engine_cleanup(struct intel_engine_cs *engine)
1615 if (engine->cleanup) 1519 if (engine->cleanup)
1616 engine->cleanup(engine); 1520 engine->cleanup(engine);
1617 1521
1618 if (HWS_NEEDS_PHYSICAL(dev_priv))
1619 cleanup_phys_status_page(engine);
1620 else
1621 cleanup_status_page(engine);
1622
1623 intel_engine_cleanup_common(engine); 1522 intel_engine_cleanup_common(engine);
1624 1523
1625 dev_priv->engine[engine->id] = NULL; 1524 dev_priv->engine[engine->id] = NULL;
@@ -1983,7 +1882,7 @@ static void intel_ring_init_semaphores(struct drm_i915_private *dev_priv,
1983 struct drm_i915_gem_object *obj; 1882 struct drm_i915_gem_object *obj;
1984 int ret, i; 1883 int ret, i;
1985 1884
1986 if (!i915.semaphores) 1885 if (!i915_modparams.semaphores)
1987 return; 1886 return;
1988 1887
1989 if (INTEL_GEN(dev_priv) >= 8 && !dev_priv->semaphore) { 1888 if (INTEL_GEN(dev_priv) >= 8 && !dev_priv->semaphore) {
@@ -2083,7 +1982,7 @@ err_obj:
2083 i915_gem_object_put(obj); 1982 i915_gem_object_put(obj);
2084err: 1983err:
2085 DRM_DEBUG_DRIVER("Failed to allocate space for semaphores, disabling\n"); 1984 DRM_DEBUG_DRIVER("Failed to allocate space for semaphores, disabling\n");
2086 i915.semaphores = 0; 1985 i915_modparams.semaphores = 0;
2087} 1986}
2088 1987
2089static void intel_ring_init_irq(struct drm_i915_private *dev_priv, 1988static void intel_ring_init_irq(struct drm_i915_private *dev_priv,
@@ -2115,11 +2014,13 @@ static void intel_ring_init_irq(struct drm_i915_private *dev_priv,
2115static void i9xx_set_default_submission(struct intel_engine_cs *engine) 2014static void i9xx_set_default_submission(struct intel_engine_cs *engine)
2116{ 2015{
2117 engine->submit_request = i9xx_submit_request; 2016 engine->submit_request = i9xx_submit_request;
2017 engine->cancel_requests = cancel_requests;
2118} 2018}
2119 2019
2120static void gen6_bsd_set_default_submission(struct intel_engine_cs *engine) 2020static void gen6_bsd_set_default_submission(struct intel_engine_cs *engine)
2121{ 2021{
2122 engine->submit_request = gen6_bsd_submit_request; 2022 engine->submit_request = gen6_bsd_submit_request;
2023 engine->cancel_requests = cancel_requests;
2123} 2024}
2124 2025
2125static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv, 2026static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv,
@@ -2138,7 +2039,7 @@ static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv,
2138 2039
2139 engine->emit_breadcrumb = i9xx_emit_breadcrumb; 2040 engine->emit_breadcrumb = i9xx_emit_breadcrumb;
2140 engine->emit_breadcrumb_sz = i9xx_emit_breadcrumb_sz; 2041 engine->emit_breadcrumb_sz = i9xx_emit_breadcrumb_sz;
2141 if (i915.semaphores) { 2042 if (i915_modparams.semaphores) {
2142 int num_rings; 2043 int num_rings;
2143 2044
2144 engine->emit_breadcrumb = gen6_sema_emit_breadcrumb; 2045 engine->emit_breadcrumb = gen6_sema_emit_breadcrumb;
@@ -2182,7 +2083,7 @@ int intel_init_render_ring_buffer(struct intel_engine_cs *engine)
2182 engine->emit_breadcrumb = gen8_render_emit_breadcrumb; 2083 engine->emit_breadcrumb = gen8_render_emit_breadcrumb;
2183 engine->emit_breadcrumb_sz = gen8_render_emit_breadcrumb_sz; 2084 engine->emit_breadcrumb_sz = gen8_render_emit_breadcrumb_sz;
2184 engine->emit_flush = gen8_render_ring_flush; 2085 engine->emit_flush = gen8_render_ring_flush;
2185 if (i915.semaphores) { 2086 if (i915_modparams.semaphores) {
2186 int num_rings; 2087 int num_rings;
2187 2088
2188 engine->semaphore.signal = gen8_rcs_signal; 2089 engine->semaphore.signal = gen8_rcs_signal;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 79c0021f3700..56d7ae9f298b 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -184,6 +184,91 @@ struct i915_priolist {
184 int priority; 184 int priority;
185}; 185};
186 186
187/**
188 * struct intel_engine_execlists - execlist submission queue and port state
189 *
190 * The struct intel_engine_execlists represents the combined logical state of
191 * driver and the hardware state for execlist mode of submission.
192 */
193struct intel_engine_execlists {
194 /**
195 * @irq_tasklet: softirq tasklet for bottom handler
196 */
197 struct tasklet_struct irq_tasklet;
198
199 /**
200 * @default_priolist: priority list for I915_PRIORITY_NORMAL
201 */
202 struct i915_priolist default_priolist;
203
204 /**
205 * @no_priolist: priority lists disabled
206 */
207 bool no_priolist;
208
209 /**
210 * @port: execlist port states
211 *
212 * For each hardware ELSP (ExecList Submission Port) we keep
213 * track of the last request and the number of times we submitted
214 * that port to hw. We then count the number of times the hw reports
215 * a context completion or preemption. As only one context can
216 * be active on hw, we limit resubmission of context to port[0]. This
217 * is called Lite Restore, of the context.
218 */
219 struct execlist_port {
220 /**
221 * @request_count: combined request and submission count
222 */
223 struct drm_i915_gem_request *request_count;
224#define EXECLIST_COUNT_BITS 2
225#define port_request(p) ptr_mask_bits((p)->request_count, EXECLIST_COUNT_BITS)
226#define port_count(p) ptr_unmask_bits((p)->request_count, EXECLIST_COUNT_BITS)
227#define port_pack(rq, count) ptr_pack_bits(rq, count, EXECLIST_COUNT_BITS)
228#define port_unpack(p, count) ptr_unpack_bits((p)->request_count, count, EXECLIST_COUNT_BITS)
229#define port_set(p, packed) ((p)->request_count = (packed))
230#define port_isset(p) ((p)->request_count)
231#define port_index(p, execlists) ((p) - (execlists)->port)
232
233 /**
234 * @context_id: context ID for port
235 */
236 GEM_DEBUG_DECL(u32 context_id);
237
238#define EXECLIST_MAX_PORTS 2
239 } port[EXECLIST_MAX_PORTS];
240
241 /**
242 * @port_mask: number of execlist ports - 1
243 */
244 unsigned int port_mask;
245
246 /**
247 * @queue: queue of requests, in priority lists
248 */
249 struct rb_root queue;
250
251 /**
252 * @first: leftmost level in priority @queue
253 */
254 struct rb_node *first;
255
256 /**
257 * @fw_domains: forcewake domains for irq tasklet
258 */
259 unsigned int fw_domains;
260
261 /**
262 * @csb_head: context status buffer head
263 */
264 unsigned int csb_head;
265
266 /**
267 * @csb_use_mmio: access csb through mmio, instead of hwsp
268 */
269 bool csb_use_mmio;
270};
271
187#define INTEL_ENGINE_CS_MAX_NAME 8 272#define INTEL_ENGINE_CS_MAX_NAME 8
188 273
189struct intel_engine_cs { 274struct intel_engine_cs {
@@ -306,6 +391,14 @@ struct intel_engine_cs {
306 void (*schedule)(struct drm_i915_gem_request *request, 391 void (*schedule)(struct drm_i915_gem_request *request,
307 int priority); 392 int priority);
308 393
394 /*
395 * Cancel all requests on the hardware, or queued for execution.
396 * This should only cancel the ready requests that have been
397 * submitted to the engine (via the engine->submit_request callback).
398 * This is called when marking the device as wedged.
399 */
400 void (*cancel_requests)(struct intel_engine_cs *engine);
401
309 /* Some chipsets are not quite as coherent as advertised and need 402 /* Some chipsets are not quite as coherent as advertised and need
310 * an expensive kick to force a true read of the up-to-date seqno. 403 * an expensive kick to force a true read of the up-to-date seqno.
311 * However, the up-to-date seqno is not always required and the last 404 * However, the up-to-date seqno is not always required and the last
@@ -372,25 +465,7 @@ struct intel_engine_cs {
372 u32 *(*signal)(struct drm_i915_gem_request *req, u32 *cs); 465 u32 *(*signal)(struct drm_i915_gem_request *req, u32 *cs);
373 } semaphore; 466 } semaphore;
374 467
375 /* Execlists */ 468 struct intel_engine_execlists execlists;
376 struct tasklet_struct irq_tasklet;
377 struct i915_priolist default_priolist;
378 bool no_priolist;
379 struct execlist_port {
380 struct drm_i915_gem_request *request_count;
381#define EXECLIST_COUNT_BITS 2
382#define port_request(p) ptr_mask_bits((p)->request_count, EXECLIST_COUNT_BITS)
383#define port_count(p) ptr_unmask_bits((p)->request_count, EXECLIST_COUNT_BITS)
384#define port_pack(rq, count) ptr_pack_bits(rq, count, EXECLIST_COUNT_BITS)
385#define port_unpack(p, count) ptr_unpack_bits((p)->request_count, count, EXECLIST_COUNT_BITS)
386#define port_set(p, packed) ((p)->request_count = (packed))
387#define port_isset(p) ((p)->request_count)
388#define port_index(p, e) ((p) - (e)->execlist_port)
389 GEM_DEBUG_DECL(u32 context_id);
390 } execlist_port[2];
391 struct rb_root execlist_queue;
392 struct rb_node *execlist_first;
393 unsigned int fw_domains;
394 469
395 /* Contexts are pinned whilst they are active on the GPU. The last 470 /* Contexts are pinned whilst they are active on the GPU. The last
396 * context executed remains active whilst the GPU is idle - the 471 * context executed remains active whilst the GPU is idle - the
@@ -444,6 +519,24 @@ struct intel_engine_cs {
444}; 519};
445 520
446static inline unsigned int 521static inline unsigned int
522execlists_num_ports(const struct intel_engine_execlists * const execlists)
523{
524 return execlists->port_mask + 1;
525}
526
527static inline void
528execlists_port_complete(struct intel_engine_execlists * const execlists,
529 struct execlist_port * const port)
530{
531 const unsigned int m = execlists->port_mask;
532
533 GEM_BUG_ON(port_index(port, execlists) != 0);
534
535 memmove(port, port + 1, m * sizeof(struct execlist_port));
536 memset(port + m, 0, sizeof(struct execlist_port));
537}
538
539static inline unsigned int
447intel_engine_flag(const struct intel_engine_cs *engine) 540intel_engine_flag(const struct intel_engine_cs *engine)
448{ 541{
449 return BIT(engine->id); 542 return BIT(engine->id);
@@ -496,6 +589,10 @@ intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value)
496#define I915_GEM_HWS_SCRATCH_INDEX 0x40 589#define I915_GEM_HWS_SCRATCH_INDEX 0x40
497#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT) 590#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
498 591
592#define I915_HWS_CSB_BUF0_INDEX 0x10
593#define I915_HWS_CSB_WRITE_INDEX 0x1f
594#define CNL_HWS_CSB_WRITE_INDEX 0x2f
595
499struct intel_ring * 596struct intel_ring *
500intel_engine_create_ring(struct intel_engine_cs *engine, int size); 597intel_engine_create_ring(struct intel_engine_cs *engine, int size);
501int intel_ring_pin(struct intel_ring *ring, 598int intel_ring_pin(struct intel_ring *ring,
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index a3bfb9f27e7a..7933d1bc6a1c 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -2413,7 +2413,7 @@ static uint32_t get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
2413 mask = 0; 2413 mask = 0;
2414 } 2414 }
2415 2415
2416 if (!i915.disable_power_well) 2416 if (!i915_modparams.disable_power_well)
2417 max_dc = 0; 2417 max_dc = 0;
2418 2418
2419 if (enable_dc >= 0 && enable_dc <= max_dc) { 2419 if (enable_dc >= 0 && enable_dc <= max_dc) {
@@ -2471,10 +2471,11 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
2471{ 2471{
2472 struct i915_power_domains *power_domains = &dev_priv->power_domains; 2472 struct i915_power_domains *power_domains = &dev_priv->power_domains;
2473 2473
2474 i915.disable_power_well = sanitize_disable_power_well_option(dev_priv, 2474 i915_modparams.disable_power_well =
2475 i915.disable_power_well); 2475 sanitize_disable_power_well_option(dev_priv,
2476 dev_priv->csr.allowed_dc_mask = get_allowed_dc_mask(dev_priv, 2476 i915_modparams.disable_power_well);
2477 i915.enable_dc); 2477 dev_priv->csr.allowed_dc_mask =
2478 get_allowed_dc_mask(dev_priv, i915_modparams.enable_dc);
2478 2479
2479 BUILD_BUG_ON(POWER_DOMAIN_NUM > 64); 2480 BUILD_BUG_ON(POWER_DOMAIN_NUM > 64);
2480 2481
@@ -2535,7 +2536,7 @@ void intel_power_domains_fini(struct drm_i915_private *dev_priv)
2535 intel_display_set_init_power(dev_priv, true); 2536 intel_display_set_init_power(dev_priv, true);
2536 2537
2537 /* Remove the refcount we took to keep power well support disabled. */ 2538 /* Remove the refcount we took to keep power well support disabled. */
2538 if (!i915.disable_power_well) 2539 if (!i915_modparams.disable_power_well)
2539 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); 2540 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
2540 2541
2541 /* 2542 /*
@@ -2995,7 +2996,7 @@ void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume)
2995 /* For now, we need the power well to be always enabled. */ 2996 /* For now, we need the power well to be always enabled. */
2996 intel_display_set_init_power(dev_priv, true); 2997 intel_display_set_init_power(dev_priv, true);
2997 /* Disable power support if the user asked so. */ 2998 /* Disable power support if the user asked so. */
2998 if (!i915.disable_power_well) 2999 if (!i915_modparams.disable_power_well)
2999 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); 3000 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
3000 intel_power_domains_sync_hw(dev_priv); 3001 intel_power_domains_sync_hw(dev_priv);
3001 power_domains->initializing = false; 3002 power_domains->initializing = false;
@@ -3014,7 +3015,7 @@ void intel_power_domains_suspend(struct drm_i915_private *dev_priv)
3014 * Even if power well support was disabled we still want to disable 3015 * Even if power well support was disabled we still want to disable
3015 * power wells while we are system suspended. 3016 * power wells while we are system suspended.
3016 */ 3017 */
3017 if (!i915.disable_power_well) 3018 if (!i915_modparams.disable_power_well)
3018 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); 3019 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
3019 3020
3020 if (IS_CANNONLAKE(dev_priv)) 3021 if (IS_CANNONLAKE(dev_priv))
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index b0d6e3e28d07..28a1209d87e2 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -994,7 +994,7 @@ int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
994 set->flags & I915_SET_COLORKEY_DESTINATION) 994 set->flags & I915_SET_COLORKEY_DESTINATION)
995 return -EINVAL; 995 return -EINVAL;
996 996
997 plane = drm_plane_find(dev, set->plane_id); 997 plane = drm_plane_find(dev, file_priv, set->plane_id);
998 if (!plane || plane->type != DRM_PLANE_TYPE_OVERLAY) 998 if (!plane || plane->type != DRM_PLANE_TYPE_OVERLAY)
999 return -ENOENT; 999 return -ENOENT;
1000 1000
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 0cc999fa09c5..a79a7591b2cf 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1385,7 +1385,7 @@ intel_tv_get_modes(struct drm_connector *connector)
1385 mode_ptr->vsync_end = mode_ptr->vsync_start + 1; 1385 mode_ptr->vsync_end = mode_ptr->vsync_start + 1;
1386 mode_ptr->vtotal = vactive_s + 33; 1386 mode_ptr->vtotal = vactive_s + 33;
1387 1387
1388 tmp = (u64) tv_mode->refresh * mode_ptr->vtotal; 1388 tmp = mul_u32_u32(tv_mode->refresh, mode_ptr->vtotal);
1389 tmp *= mode_ptr->htotal; 1389 tmp *= mode_ptr->htotal;
1390 tmp = div_u64(tmp, 1000000); 1390 tmp = div_u64(tmp, 1000000);
1391 mode_ptr->clock = (int) tmp; 1391 mode_ptr->clock = (int) tmp;
diff --git a/drivers/gpu/drm/i915/intel_uc.c b/drivers/gpu/drm/i915/intel_uc.c
index 0178ba42a0e5..277477890240 100644
--- a/drivers/gpu/drm/i915/intel_uc.c
+++ b/drivers/gpu/drm/i915/intel_uc.c
@@ -63,35 +63,35 @@ static int __intel_uc_reset_hw(struct drm_i915_private *dev_priv)
63void intel_uc_sanitize_options(struct drm_i915_private *dev_priv) 63void intel_uc_sanitize_options(struct drm_i915_private *dev_priv)
64{ 64{
65 if (!HAS_GUC(dev_priv)) { 65 if (!HAS_GUC(dev_priv)) {
66 if (i915.enable_guc_loading > 0 || 66 if (i915_modparams.enable_guc_loading > 0 ||
67 i915.enable_guc_submission > 0) 67 i915_modparams.enable_guc_submission > 0)
68 DRM_INFO("Ignoring GuC options, no hardware\n"); 68 DRM_INFO("Ignoring GuC options, no hardware\n");
69 69
70 i915.enable_guc_loading = 0; 70 i915_modparams.enable_guc_loading = 0;
71 i915.enable_guc_submission = 0; 71 i915_modparams.enable_guc_submission = 0;
72 return; 72 return;
73 } 73 }
74 74
75 /* A negative value means "use platform default" */ 75 /* A negative value means "use platform default" */
76 if (i915.enable_guc_loading < 0) 76 if (i915_modparams.enable_guc_loading < 0)
77 i915.enable_guc_loading = HAS_GUC_UCODE(dev_priv); 77 i915_modparams.enable_guc_loading = HAS_GUC_UCODE(dev_priv);
78 78
79 /* Verify firmware version */ 79 /* Verify firmware version */
80 if (i915.enable_guc_loading) { 80 if (i915_modparams.enable_guc_loading) {
81 if (HAS_HUC_UCODE(dev_priv)) 81 if (HAS_HUC_UCODE(dev_priv))
82 intel_huc_select_fw(&dev_priv->huc); 82 intel_huc_select_fw(&dev_priv->huc);
83 83
84 if (intel_guc_select_fw(&dev_priv->guc)) 84 if (intel_guc_select_fw(&dev_priv->guc))
85 i915.enable_guc_loading = 0; 85 i915_modparams.enable_guc_loading = 0;
86 } 86 }
87 87
88 /* Can't enable guc submission without guc loaded */ 88 /* Can't enable guc submission without guc loaded */
89 if (!i915.enable_guc_loading) 89 if (!i915_modparams.enable_guc_loading)
90 i915.enable_guc_submission = 0; 90 i915_modparams.enable_guc_submission = 0;
91 91
92 /* A negative value means "use platform default" */ 92 /* A negative value means "use platform default" */
93 if (i915.enable_guc_submission < 0) 93 if (i915_modparams.enable_guc_submission < 0)
94 i915.enable_guc_submission = HAS_GUC_SCHED(dev_priv); 94 i915_modparams.enable_guc_submission = HAS_GUC_SCHED(dev_priv);
95} 95}
96 96
97static void gen8_guc_raise_irq(struct intel_guc *guc) 97static void gen8_guc_raise_irq(struct intel_guc *guc)
@@ -290,7 +290,7 @@ static void guc_init_send_regs(struct intel_guc *guc)
290 290
291static void guc_capture_load_err_log(struct intel_guc *guc) 291static void guc_capture_load_err_log(struct intel_guc *guc)
292{ 292{
293 if (!guc->log.vma || i915.guc_log_level < 0) 293 if (!guc->log.vma || i915_modparams.guc_log_level < 0)
294 return; 294 return;
295 295
296 if (!guc->load_err_log) 296 if (!guc->load_err_log)
@@ -328,12 +328,33 @@ static void guc_disable_communication(struct intel_guc *guc)
328 guc->send = intel_guc_send_nop; 328 guc->send = intel_guc_send_nop;
329} 329}
330 330
331/**
332 * intel_guc_auth_huc() - Send action to GuC to authenticate HuC ucode
333 * @guc: intel_guc structure
334 * @rsa_offset: rsa offset w.r.t ggtt base of huc vma
335 *
336 * Triggers a HuC firmware authentication request to the GuC via intel_guc_send
337 * INTEL_GUC_ACTION_AUTHENTICATE_HUC interface. This function is invoked by
338 * intel_huc_auth().
339 *
340 * Return: non-zero code on error
341 */
342int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset)
343{
344 u32 action[] = {
345 INTEL_GUC_ACTION_AUTHENTICATE_HUC,
346 rsa_offset
347 };
348
349 return intel_guc_send(guc, action, ARRAY_SIZE(action));
350}
351
331int intel_uc_init_hw(struct drm_i915_private *dev_priv) 352int intel_uc_init_hw(struct drm_i915_private *dev_priv)
332{ 353{
333 struct intel_guc *guc = &dev_priv->guc; 354 struct intel_guc *guc = &dev_priv->guc;
334 int ret, attempts; 355 int ret, attempts;
335 356
336 if (!i915.enable_guc_loading) 357 if (!i915_modparams.enable_guc_loading)
337 return 0; 358 return 0;
338 359
339 guc_disable_communication(guc); 360 guc_disable_communication(guc);
@@ -342,7 +363,7 @@ int intel_uc_init_hw(struct drm_i915_private *dev_priv)
342 /* We need to notify the guc whenever we change the GGTT */ 363 /* We need to notify the guc whenever we change the GGTT */
343 i915_ggtt_enable_guc(dev_priv); 364 i915_ggtt_enable_guc(dev_priv);
344 365
345 if (i915.enable_guc_submission) { 366 if (i915_modparams.enable_guc_submission) {
346 /* 367 /*
347 * This is stuff we need to have available at fw load time 368 * This is stuff we need to have available at fw load time
348 * if we are planning to enable submission later 369 * if we are planning to enable submission later
@@ -390,9 +411,9 @@ int intel_uc_init_hw(struct drm_i915_private *dev_priv)
390 if (ret) 411 if (ret)
391 goto err_log_capture; 412 goto err_log_capture;
392 413
393 intel_guc_auth_huc(dev_priv); 414 intel_huc_auth(&dev_priv->huc);
394 if (i915.enable_guc_submission) { 415 if (i915_modparams.enable_guc_submission) {
395 if (i915.guc_log_level >= 0) 416 if (i915_modparams.guc_log_level >= 0)
396 gen9_enable_guc_interrupts(dev_priv); 417 gen9_enable_guc_interrupts(dev_priv);
397 418
398 ret = i915_guc_submission_enable(dev_priv); 419 ret = i915_guc_submission_enable(dev_priv);
@@ -417,23 +438,24 @@ err_interrupts:
417err_log_capture: 438err_log_capture:
418 guc_capture_load_err_log(guc); 439 guc_capture_load_err_log(guc);
419err_submission: 440err_submission:
420 if (i915.enable_guc_submission) 441 if (i915_modparams.enable_guc_submission)
421 i915_guc_submission_fini(dev_priv); 442 i915_guc_submission_fini(dev_priv);
422err_guc: 443err_guc:
423 i915_ggtt_disable_guc(dev_priv); 444 i915_ggtt_disable_guc(dev_priv);
424 445
425 DRM_ERROR("GuC init failed\n"); 446 DRM_ERROR("GuC init failed\n");
426 if (i915.enable_guc_loading > 1 || i915.enable_guc_submission > 1) 447 if (i915_modparams.enable_guc_loading > 1 ||
448 i915_modparams.enable_guc_submission > 1)
427 ret = -EIO; 449 ret = -EIO;
428 else 450 else
429 ret = 0; 451 ret = 0;
430 452
431 if (i915.enable_guc_submission) { 453 if (i915_modparams.enable_guc_submission) {
432 i915.enable_guc_submission = 0; 454 i915_modparams.enable_guc_submission = 0;
433 DRM_NOTE("Falling back from GuC submission to execlist mode\n"); 455 DRM_NOTE("Falling back from GuC submission to execlist mode\n");
434 } 456 }
435 457
436 i915.enable_guc_loading = 0; 458 i915_modparams.enable_guc_loading = 0;
437 DRM_NOTE("GuC firmware loading disabled\n"); 459 DRM_NOTE("GuC firmware loading disabled\n");
438 460
439 return ret; 461 return ret;
@@ -443,15 +465,15 @@ void intel_uc_fini_hw(struct drm_i915_private *dev_priv)
443{ 465{
444 guc_free_load_err_log(&dev_priv->guc); 466 guc_free_load_err_log(&dev_priv->guc);
445 467
446 if (!i915.enable_guc_loading) 468 if (!i915_modparams.enable_guc_loading)
447 return; 469 return;
448 470
449 if (i915.enable_guc_submission) 471 if (i915_modparams.enable_guc_submission)
450 i915_guc_submission_disable(dev_priv); 472 i915_guc_submission_disable(dev_priv);
451 473
452 guc_disable_communication(&dev_priv->guc); 474 guc_disable_communication(&dev_priv->guc);
453 475
454 if (i915.enable_guc_submission) { 476 if (i915_modparams.enable_guc_submission) {
455 gen9_disable_guc_interrupts(dev_priv); 477 gen9_disable_guc_interrupts(dev_priv);
456 i915_guc_submission_fini(dev_priv); 478 i915_guc_submission_fini(dev_priv);
457 } 479 }
diff --git a/drivers/gpu/drm/i915/intel_uc.h b/drivers/gpu/drm/i915/intel_uc.h
index 22ae52b17b0f..6966349ed737 100644
--- a/drivers/gpu/drm/i915/intel_uc.h
+++ b/drivers/gpu/drm/i915/intel_uc.h
@@ -52,17 +52,6 @@ struct drm_i915_gem_request;
52 * GuC). The subsequent pages of the client object constitute the work 52 * GuC). The subsequent pages of the client object constitute the work
53 * queue (a circular array of work items), again described in the process 53 * queue (a circular array of work items), again described in the process
54 * descriptor. Work queue pages are mapped momentarily as required. 54 * descriptor. Work queue pages are mapped momentarily as required.
55 *
56 * We also keep a few statistics on failures. Ideally, these should all
57 * be zero!
58 * no_wq_space: times that the submission pre-check found no space was
59 * available in the work queue (note, the queue is shared,
60 * not per-engine). It is OK for this to be nonzero, but
61 * it should not be huge!
62 * b_fail: failed to ring the doorbell. This should never happen, unless
63 * somehow the hardware misbehaves, or maybe if the GuC firmware
64 * crashes? We probably need to reset the GPU to recover.
65 * retcode: errno from last guc_submit()
66 */ 55 */
67struct i915_guc_client { 56struct i915_guc_client {
68 struct i915_vma *vma; 57 struct i915_vma *vma;
@@ -77,15 +66,8 @@ struct i915_guc_client {
77 66
78 u16 doorbell_id; 67 u16 doorbell_id;
79 unsigned long doorbell_offset; 68 unsigned long doorbell_offset;
80 u32 doorbell_cookie;
81 69
82 spinlock_t wq_lock; 70 spinlock_t wq_lock;
83 uint32_t wq_offset;
84 uint32_t wq_size;
85 uint32_t wq_tail;
86 uint32_t wq_rsvd;
87 uint32_t no_wq_space;
88
89 /* Per-engine counts of GuC submissions */ 71 /* Per-engine counts of GuC submissions */
90 uint64_t submissions[I915_NUM_ENGINES]; 72 uint64_t submissions[I915_NUM_ENGINES];
91}; 73};
@@ -229,6 +211,7 @@ void intel_uc_fini_hw(struct drm_i915_private *dev_priv);
229int intel_guc_sample_forcewake(struct intel_guc *guc); 211int intel_guc_sample_forcewake(struct intel_guc *guc);
230int intel_guc_send_nop(struct intel_guc *guc, const u32 *action, u32 len); 212int intel_guc_send_nop(struct intel_guc *guc, const u32 *action, u32 len);
231int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len); 213int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len);
214int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset);
232 215
233static inline int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len) 216static inline int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len)
234{ 217{
@@ -250,8 +233,6 @@ u32 intel_guc_wopcm_size(struct drm_i915_private *dev_priv);
250/* i915_guc_submission.c */ 233/* i915_guc_submission.c */
251int i915_guc_submission_init(struct drm_i915_private *dev_priv); 234int i915_guc_submission_init(struct drm_i915_private *dev_priv);
252int i915_guc_submission_enable(struct drm_i915_private *dev_priv); 235int i915_guc_submission_enable(struct drm_i915_private *dev_priv);
253int i915_guc_wq_reserve(struct drm_i915_gem_request *rq);
254void i915_guc_wq_unreserve(struct drm_i915_gem_request *request);
255void i915_guc_submission_disable(struct drm_i915_private *dev_priv); 236void i915_guc_submission_disable(struct drm_i915_private *dev_priv);
256void i915_guc_submission_fini(struct drm_i915_private *dev_priv); 237void i915_guc_submission_fini(struct drm_i915_private *dev_priv);
257struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size); 238struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size);
@@ -274,6 +255,6 @@ static inline u32 guc_ggtt_offset(struct i915_vma *vma)
274/* intel_huc.c */ 255/* intel_huc.c */
275void intel_huc_select_fw(struct intel_huc *huc); 256void intel_huc_select_fw(struct intel_huc *huc);
276void intel_huc_init_hw(struct intel_huc *huc); 257void intel_huc_init_hw(struct intel_huc *huc);
277void intel_guc_auth_huc(struct drm_i915_private *dev_priv); 258void intel_huc_auth(struct intel_huc *huc);
278 259
279#endif 260#endif
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 0529af7cfbb8..b3c3f94fc7e4 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -436,7 +436,8 @@ void intel_uncore_resume_early(struct drm_i915_private *dev_priv)
436 436
437void intel_uncore_sanitize(struct drm_i915_private *dev_priv) 437void intel_uncore_sanitize(struct drm_i915_private *dev_priv)
438{ 438{
439 i915.enable_rc6 = sanitize_rc6_option(dev_priv, i915.enable_rc6); 439 i915_modparams.enable_rc6 =
440 sanitize_rc6_option(dev_priv, i915_modparams.enable_rc6);
440 441
441 /* BIOS often leaves RC6 enabled, but disable it for hw init */ 442 /* BIOS often leaves RC6 enabled, but disable it for hw init */
442 intel_sanitize_gt_powersave(dev_priv); 443 intel_sanitize_gt_powersave(dev_priv);
@@ -490,6 +491,57 @@ void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
490} 491}
491 492
492/** 493/**
494 * intel_uncore_forcewake_user_get - claim forcewake on behalf of userspace
495 * @dev_priv: i915 device instance
496 *
497 * This function is a wrapper around intel_uncore_forcewake_get() to acquire
498 * the GT powerwell and in the process disable our debugging for the
499 * duration of userspace's bypass.
500 */
501void intel_uncore_forcewake_user_get(struct drm_i915_private *dev_priv)
502{
503 spin_lock_irq(&dev_priv->uncore.lock);
504 if (!dev_priv->uncore.user_forcewake.count++) {
505 intel_uncore_forcewake_get__locked(dev_priv, FORCEWAKE_ALL);
506
507 /* Save and disable mmio debugging for the user bypass */
508 dev_priv->uncore.user_forcewake.saved_mmio_check =
509 dev_priv->uncore.unclaimed_mmio_check;
510 dev_priv->uncore.user_forcewake.saved_mmio_debug =
511 i915_modparams.mmio_debug;
512
513 dev_priv->uncore.unclaimed_mmio_check = 0;
514 i915_modparams.mmio_debug = 0;
515 }
516 spin_unlock_irq(&dev_priv->uncore.lock);
517}
518
519/**
520 * intel_uncore_forcewake_user_put - release forcewake on behalf of userspace
521 * @dev_priv: i915 device instance
522 *
523 * This function complements intel_uncore_forcewake_user_get() and releases
524 * the GT powerwell taken on behalf of the userspace bypass.
525 */
526void intel_uncore_forcewake_user_put(struct drm_i915_private *dev_priv)
527{
528 spin_lock_irq(&dev_priv->uncore.lock);
529 if (!--dev_priv->uncore.user_forcewake.count) {
530 if (intel_uncore_unclaimed_mmio(dev_priv))
531 dev_info(dev_priv->drm.dev,
532 "Invalid mmio detected during user access\n");
533
534 dev_priv->uncore.unclaimed_mmio_check =
535 dev_priv->uncore.user_forcewake.saved_mmio_check;
536 i915_modparams.mmio_debug =
537 dev_priv->uncore.user_forcewake.saved_mmio_debug;
538
539 intel_uncore_forcewake_put__locked(dev_priv, FORCEWAKE_ALL);
540 }
541 spin_unlock_irq(&dev_priv->uncore.lock);
542}
543
544/**
493 * intel_uncore_forcewake_get__locked - grab forcewake domain references 545 * intel_uncore_forcewake_get__locked - grab forcewake domain references
494 * @dev_priv: i915 device instance 546 * @dev_priv: i915 device instance
495 * @fw_domains: forcewake domains to get reference on 547 * @fw_domains: forcewake domains to get reference on
@@ -790,7 +842,8 @@ __unclaimed_reg_debug(struct drm_i915_private *dev_priv,
790 "Unclaimed %s register 0x%x\n", 842 "Unclaimed %s register 0x%x\n",
791 read ? "read from" : "write to", 843 read ? "read from" : "write to",
792 i915_mmio_reg_offset(reg))) 844 i915_mmio_reg_offset(reg)))
793 i915.mmio_debug--; /* Only report the first N failures */ 845 /* Only report the first N failures */
846 i915_modparams.mmio_debug--;
794} 847}
795 848
796static inline void 849static inline void
@@ -799,7 +852,7 @@ unclaimed_reg_debug(struct drm_i915_private *dev_priv,
799 const bool read, 852 const bool read,
800 const bool before) 853 const bool before)
801{ 854{
802 if (likely(!i915.mmio_debug)) 855 if (likely(!i915_modparams.mmio_debug))
803 return; 856 return;
804 857
805 __unclaimed_reg_debug(dev_priv, reg, read, before); 858 __unclaimed_reg_debug(dev_priv, reg, read, before);
@@ -1241,102 +1294,101 @@ void intel_uncore_fini(struct drm_i915_private *dev_priv)
1241 intel_uncore_forcewake_reset(dev_priv, false); 1294 intel_uncore_forcewake_reset(dev_priv, false);
1242} 1295}
1243 1296
1244#define GEN_RANGE(l, h) GENMASK((h) - 1, (l) - 1) 1297static const struct reg_whitelist {
1245 1298 i915_reg_t offset_ldw;
1246static const struct register_whitelist { 1299 i915_reg_t offset_udw;
1247 i915_reg_t offset_ldw, offset_udw; 1300 u16 gen_mask;
1248 uint32_t size; 1301 u8 size;
1249 /* supported gens, 0x10 for 4, 0x30 for 4 and 5, etc. */ 1302} reg_read_whitelist[] = { {
1250 uint32_t gen_bitmask; 1303 .offset_ldw = RING_TIMESTAMP(RENDER_RING_BASE),
1251} whitelist[] = { 1304 .offset_udw = RING_TIMESTAMP_UDW(RENDER_RING_BASE),
1252 { .offset_ldw = RING_TIMESTAMP(RENDER_RING_BASE), 1305 .gen_mask = INTEL_GEN_MASK(4, 10),
1253 .offset_udw = RING_TIMESTAMP_UDW(RENDER_RING_BASE), 1306 .size = 8
1254 .size = 8, .gen_bitmask = GEN_RANGE(4, 10) }, 1307} };
1255};
1256 1308
1257int i915_reg_read_ioctl(struct drm_device *dev, 1309int i915_reg_read_ioctl(struct drm_device *dev,
1258 void *data, struct drm_file *file) 1310 void *data, struct drm_file *file)
1259{ 1311{
1260 struct drm_i915_private *dev_priv = to_i915(dev); 1312 struct drm_i915_private *dev_priv = to_i915(dev);
1261 struct drm_i915_reg_read *reg = data; 1313 struct drm_i915_reg_read *reg = data;
1262 struct register_whitelist const *entry = whitelist; 1314 struct reg_whitelist const *entry;
1263 unsigned size; 1315 unsigned int flags;
1264 i915_reg_t offset_ldw, offset_udw; 1316 int remain;
1265 int i, ret = 0; 1317 int ret = 0;
1266 1318
1267 for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) { 1319 entry = reg_read_whitelist;
1268 if (i915_mmio_reg_offset(entry->offset_ldw) == (reg->offset & -entry->size) && 1320 remain = ARRAY_SIZE(reg_read_whitelist);
1269 (INTEL_INFO(dev_priv)->gen_mask & entry->gen_bitmask)) 1321 while (remain) {
1322 u32 entry_offset = i915_mmio_reg_offset(entry->offset_ldw);
1323
1324 GEM_BUG_ON(!is_power_of_2(entry->size));
1325 GEM_BUG_ON(entry->size > 8);
1326 GEM_BUG_ON(entry_offset & (entry->size - 1));
1327
1328 if (INTEL_INFO(dev_priv)->gen_mask & entry->gen_mask &&
1329 entry_offset == (reg->offset & -entry->size))
1270 break; 1330 break;
1331 entry++;
1332 remain--;
1271 } 1333 }
1272 1334
1273 if (i == ARRAY_SIZE(whitelist)) 1335 if (!remain)
1274 return -EINVAL; 1336 return -EINVAL;
1275 1337
1276 /* We use the low bits to encode extra flags as the register should 1338 flags = reg->offset & (entry->size - 1);
1277 * be naturally aligned (and those that are not so aligned merely
1278 * limit the available flags for that register).
1279 */
1280 offset_ldw = entry->offset_ldw;
1281 offset_udw = entry->offset_udw;
1282 size = entry->size;
1283 size |= reg->offset ^ i915_mmio_reg_offset(offset_ldw);
1284 1339
1285 intel_runtime_pm_get(dev_priv); 1340 intel_runtime_pm_get(dev_priv);
1286 1341 if (entry->size == 8 && flags == I915_REG_READ_8B_WA)
1287 switch (size) { 1342 reg->val = I915_READ64_2x32(entry->offset_ldw,
1288 case 8 | 1: 1343 entry->offset_udw);
1289 reg->val = I915_READ64_2x32(offset_ldw, offset_udw); 1344 else if (entry->size == 8 && flags == 0)
1290 break; 1345 reg->val = I915_READ64(entry->offset_ldw);
1291 case 8: 1346 else if (entry->size == 4 && flags == 0)
1292 reg->val = I915_READ64(offset_ldw); 1347 reg->val = I915_READ(entry->offset_ldw);
1293 break; 1348 else if (entry->size == 2 && flags == 0)
1294 case 4: 1349 reg->val = I915_READ16(entry->offset_ldw);
1295 reg->val = I915_READ(offset_ldw); 1350 else if (entry->size == 1 && flags == 0)
1296 break; 1351 reg->val = I915_READ8(entry->offset_ldw);
1297 case 2: 1352 else
1298 reg->val = I915_READ16(offset_ldw);
1299 break;
1300 case 1:
1301 reg->val = I915_READ8(offset_ldw);
1302 break;
1303 default:
1304 ret = -EINVAL; 1353 ret = -EINVAL;
1305 goto out;
1306 }
1307
1308out:
1309 intel_runtime_pm_put(dev_priv); 1354 intel_runtime_pm_put(dev_priv);
1355
1310 return ret; 1356 return ret;
1311} 1357}
1312 1358
1313static void gen3_stop_rings(struct drm_i915_private *dev_priv) 1359static void gen3_stop_engine(struct intel_engine_cs *engine)
1360{
1361 struct drm_i915_private *dev_priv = engine->i915;
1362 const u32 base = engine->mmio_base;
1363 const i915_reg_t mode = RING_MI_MODE(base);
1364
1365 I915_WRITE_FW(mode, _MASKED_BIT_ENABLE(STOP_RING));
1366 if (intel_wait_for_register_fw(dev_priv,
1367 mode,
1368 MODE_IDLE,
1369 MODE_IDLE,
1370 500))
1371 DRM_DEBUG_DRIVER("%s: timed out on STOP_RING\n",
1372 engine->name);
1373
1374 I915_WRITE_FW(RING_CTL(base), 0);
1375 I915_WRITE_FW(RING_HEAD(base), 0);
1376 I915_WRITE_FW(RING_TAIL(base), 0);
1377
1378 /* Check acts as a post */
1379 if (I915_READ_FW(RING_HEAD(base)) != 0)
1380 DRM_DEBUG_DRIVER("%s: ring head not parked\n",
1381 engine->name);
1382}
1383
1384static void i915_stop_engines(struct drm_i915_private *dev_priv,
1385 unsigned engine_mask)
1314{ 1386{
1315 struct intel_engine_cs *engine; 1387 struct intel_engine_cs *engine;
1316 enum intel_engine_id id; 1388 enum intel_engine_id id;
1317 1389
1318 for_each_engine(engine, dev_priv, id) { 1390 for_each_engine_masked(engine, dev_priv, engine_mask, id)
1319 const u32 base = engine->mmio_base; 1391 gen3_stop_engine(engine);
1320 const i915_reg_t mode = RING_MI_MODE(base);
1321
1322 I915_WRITE_FW(mode, _MASKED_BIT_ENABLE(STOP_RING));
1323 if (intel_wait_for_register_fw(dev_priv,
1324 mode,
1325 MODE_IDLE,
1326 MODE_IDLE,
1327 500))
1328 DRM_DEBUG_DRIVER("%s: timed out on STOP_RING\n",
1329 engine->name);
1330
1331 I915_WRITE_FW(RING_CTL(base), 0);
1332 I915_WRITE_FW(RING_HEAD(base), 0);
1333 I915_WRITE_FW(RING_TAIL(base), 0);
1334
1335 /* Check acts as a post */
1336 if (I915_READ_FW(RING_HEAD(base)) != 0)
1337 DRM_DEBUG_DRIVER("%s: ring head not parked\n",
1338 engine->name);
1339 }
1340} 1392}
1341 1393
1342static bool i915_reset_complete(struct pci_dev *pdev) 1394static bool i915_reset_complete(struct pci_dev *pdev)
@@ -1371,9 +1423,6 @@ static int g33_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
1371{ 1423{
1372 struct pci_dev *pdev = dev_priv->drm.pdev; 1424 struct pci_dev *pdev = dev_priv->drm.pdev;
1373 1425
1374 /* Stop engines before we reset; see g4x_do_reset() below for why. */
1375 gen3_stop_rings(dev_priv);
1376
1377 pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE); 1426 pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
1378 return wait_for(g4x_reset_complete(pdev), 500); 1427 return wait_for(g4x_reset_complete(pdev), 500);
1379} 1428}
@@ -1388,12 +1437,6 @@ static int g4x_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
1388 I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE); 1437 I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE);
1389 POSTING_READ(VDECCLK_GATE_D); 1438 POSTING_READ(VDECCLK_GATE_D);
1390 1439
1391 /* We stop engines, otherwise we might get failed reset and a
1392 * dead gpu (on elk).
1393 * WaMediaResetMainRingCleanup:ctg,elk (presumably)
1394 */
1395 gen3_stop_rings(dev_priv);
1396
1397 pci_write_config_byte(pdev, I915_GDRST, 1440 pci_write_config_byte(pdev, I915_GDRST,
1398 GRDOM_MEDIA | GRDOM_RESET_ENABLE); 1441 GRDOM_MEDIA | GRDOM_RESET_ENABLE);
1399 ret = wait_for(g4x_reset_complete(pdev), 500); 1442 ret = wait_for(g4x_reset_complete(pdev), 500);
@@ -1662,7 +1705,7 @@ typedef int (*reset_func)(struct drm_i915_private *, unsigned engine_mask);
1662 1705
1663static reset_func intel_get_gpu_reset(struct drm_i915_private *dev_priv) 1706static reset_func intel_get_gpu_reset(struct drm_i915_private *dev_priv)
1664{ 1707{
1665 if (!i915.reset) 1708 if (!i915_modparams.reset)
1666 return NULL; 1709 return NULL;
1667 1710
1668 if (INTEL_INFO(dev_priv)->gen >= 8) 1711 if (INTEL_INFO(dev_priv)->gen >= 8)
@@ -1698,6 +1741,20 @@ int intel_gpu_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
1698 */ 1741 */
1699 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 1742 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
1700 for (retry = 0; retry < 3; retry++) { 1743 for (retry = 0; retry < 3; retry++) {
1744
1745 /* We stop engines, otherwise we might get failed reset and a
1746 * dead gpu (on elk). Also as modern gpu as kbl can suffer
1747 * from system hang if batchbuffer is progressing when
1748 * the reset is issued, regardless of READY_TO_RESET ack.
1749 * Thus assume it is best to stop engines on all gens
1750 * where we have a gpu reset.
1751 *
1752 * WaMediaResetMainRingCleanup:ctg,elk (presumably)
1753 *
1754 * FIXME: Wa for more modern gens needs to be validated
1755 */
1756 i915_stop_engines(dev_priv, engine_mask);
1757
1701 ret = reset(dev_priv, engine_mask); 1758 ret = reset(dev_priv, engine_mask);
1702 if (ret != -ETIMEDOUT) 1759 if (ret != -ETIMEDOUT)
1703 break; 1760 break;
@@ -1722,7 +1779,7 @@ bool intel_has_reset_engine(struct drm_i915_private *dev_priv)
1722{ 1779{
1723 return (dev_priv->info.has_reset_engine && 1780 return (dev_priv->info.has_reset_engine &&
1724 !dev_priv->guc.execbuf_client && 1781 !dev_priv->guc.execbuf_client &&
1725 i915.reset >= 2); 1782 i915_modparams.reset >= 2);
1726} 1783}
1727 1784
1728int intel_guc_reset(struct drm_i915_private *dev_priv) 1785int intel_guc_reset(struct drm_i915_private *dev_priv)
@@ -1747,7 +1804,7 @@ bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv)
1747bool 1804bool
1748intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv) 1805intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv)
1749{ 1806{
1750 if (unlikely(i915.mmio_debug || 1807 if (unlikely(i915_modparams.mmio_debug ||
1751 dev_priv->uncore.unclaimed_mmio_check <= 0)) 1808 dev_priv->uncore.unclaimed_mmio_check <= 0))
1752 return false; 1809 return false;
1753 1810
@@ -1755,7 +1812,7 @@ intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv)
1755 DRM_DEBUG("Unclaimed register detected, " 1812 DRM_DEBUG("Unclaimed register detected, "
1756 "enabling oneshot unclaimed register reporting. " 1813 "enabling oneshot unclaimed register reporting. "
1757 "Please use i915.mmio_debug=N for more information.\n"); 1814 "Please use i915.mmio_debug=N for more information.\n");
1758 i915.mmio_debug++; 1815 i915_modparams.mmio_debug++;
1759 dev_priv->uncore.unclaimed_mmio_check--; 1816 dev_priv->uncore.unclaimed_mmio_check--;
1760 return true; 1817 return true;
1761 } 1818 }
diff --git a/drivers/gpu/drm/i915/intel_uncore.h b/drivers/gpu/drm/i915/intel_uncore.h
index 5f90278da461..03786f931905 100644
--- a/drivers/gpu/drm/i915/intel_uncore.h
+++ b/drivers/gpu/drm/i915/intel_uncore.h
@@ -102,6 +102,13 @@ struct intel_uncore {
102 i915_reg_t reg_ack; 102 i915_reg_t reg_ack;
103 } fw_domain[FW_DOMAIN_ID_COUNT]; 103 } fw_domain[FW_DOMAIN_ID_COUNT];
104 104
105 struct {
106 unsigned int count;
107
108 int saved_mmio_check;
109 int saved_mmio_debug;
110 } user_forcewake;
111
105 int unclaimed_mmio_check; 112 int unclaimed_mmio_check;
106}; 113};
107 114
@@ -144,6 +151,9 @@ void intel_uncore_forcewake_get__locked(struct drm_i915_private *dev_priv,
144void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv, 151void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv,
145 enum forcewake_domains domains); 152 enum forcewake_domains domains);
146 153
154void intel_uncore_forcewake_user_get(struct drm_i915_private *dev_priv);
155void intel_uncore_forcewake_user_put(struct drm_i915_private *dev_priv);
156
147int intel_wait_for_register(struct drm_i915_private *dev_priv, 157int intel_wait_for_register(struct drm_i915_private *dev_priv,
148 i915_reg_t reg, 158 i915_reg_t reg,
149 u32 mask, 159 u32 mask,
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_timeline.c b/drivers/gpu/drm/i915/selftests/i915_gem_timeline.c
index 7a44dab631b8..4795877abe56 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_timeline.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_timeline.c
@@ -121,7 +121,7 @@ out:
121 121
122static unsigned int random_engine(struct rnd_state *rnd) 122static unsigned int random_engine(struct rnd_state *rnd)
123{ 123{
124 return ((u64)prandom_u32_state(rnd) * I915_NUM_ENGINES) >> 32; 124 return i915_prandom_u32_max_state(I915_NUM_ENGINES, rnd);
125} 125}
126 126
127static int bench_sync(void *arg) 127static int bench_sync(void *arg)
diff --git a/drivers/gpu/drm/i915/selftests/i915_random.c b/drivers/gpu/drm/i915/selftests/i915_random.c
index 222c511bea49..b85872cc7fbe 100644
--- a/drivers/gpu/drm/i915/selftests/i915_random.c
+++ b/drivers/gpu/drm/i915/selftests/i915_random.c
@@ -41,11 +41,6 @@ u64 i915_prandom_u64_state(struct rnd_state *rnd)
41 return x; 41 return x;
42} 42}
43 43
44static inline u32 i915_prandom_u32_max_state(u32 ep_ro, struct rnd_state *state)
45{
46 return upper_32_bits((u64)prandom_u32_state(state) * ep_ro);
47}
48
49void i915_random_reorder(unsigned int *order, unsigned int count, 44void i915_random_reorder(unsigned int *order, unsigned int count,
50 struct rnd_state *state) 45 struct rnd_state *state)
51{ 46{
diff --git a/drivers/gpu/drm/i915/selftests/i915_random.h b/drivers/gpu/drm/i915/selftests/i915_random.h
index 6c9379871384..7dffedc501ca 100644
--- a/drivers/gpu/drm/i915/selftests/i915_random.h
+++ b/drivers/gpu/drm/i915/selftests/i915_random.h
@@ -43,6 +43,11 @@
43 43
44u64 i915_prandom_u64_state(struct rnd_state *rnd); 44u64 i915_prandom_u64_state(struct rnd_state *rnd);
45 45
46static inline u32 i915_prandom_u32_max_state(u32 ep_ro, struct rnd_state *state)
47{
48 return upper_32_bits(mul_u32_u32(prandom_u32_state(state), ep_ro));
49}
50
46unsigned int *i915_random_order(unsigned int count, 51unsigned int *i915_random_order(unsigned int count,
47 struct rnd_state *state); 52 struct rnd_state *state);
48void i915_random_reorder(unsigned int *order, 53void i915_random_reorder(unsigned int *order,
diff --git a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
index 02e52a146ed8..377c1de766ce 100644
--- a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
+++ b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
@@ -621,7 +621,12 @@ static int igt_wait_reset(void *arg)
621 __i915_add_request(rq, true); 621 __i915_add_request(rq, true);
622 622
623 if (!wait_for_hang(&h, rq)) { 623 if (!wait_for_hang(&h, rq)) {
624 pr_err("Failed to start request %x\n", rq->fence.seqno); 624 pr_err("Failed to start request %x, at %x\n",
625 rq->fence.seqno, hws_seqno(&h, rq));
626
627 i915_reset(i915, 0);
628 i915_gem_set_wedged(i915);
629
625 err = -EIO; 630 err = -EIO;
626 goto out_rq; 631 goto out_rq;
627 } 632 }
@@ -708,10 +713,14 @@ static int igt_reset_queue(void *arg)
708 __i915_add_request(rq, true); 713 __i915_add_request(rq, true);
709 714
710 if (!wait_for_hang(&h, prev)) { 715 if (!wait_for_hang(&h, prev)) {
711 pr_err("Failed to start request %x\n", 716 pr_err("Failed to start request %x, at %x\n",
712 prev->fence.seqno); 717 prev->fence.seqno, hws_seqno(&h, prev));
713 i915_gem_request_put(rq); 718 i915_gem_request_put(rq);
714 i915_gem_request_put(prev); 719 i915_gem_request_put(prev);
720
721 i915_reset(i915, 0);
722 i915_gem_set_wedged(i915);
723
715 err = -EIO; 724 err = -EIO;
716 goto fini; 725 goto fini;
717 } 726 }
@@ -806,7 +815,12 @@ static int igt_handle_error(void *arg)
806 __i915_add_request(rq, true); 815 __i915_add_request(rq, true);
807 816
808 if (!wait_for_hang(&h, rq)) { 817 if (!wait_for_hang(&h, rq)) {
809 pr_err("Failed to start request %x\n", rq->fence.seqno); 818 pr_err("Failed to start request %x, at %x\n",
819 rq->fence.seqno, hws_seqno(&h, rq));
820
821 i915_reset(i915, 0);
822 i915_gem_set_wedged(i915);
823
810 err = -EIO; 824 err = -EIO;
811 goto err_request; 825 goto err_request;
812 } 826 }
@@ -843,8 +857,8 @@ err_unlock:
843int intel_hangcheck_live_selftests(struct drm_i915_private *i915) 857int intel_hangcheck_live_selftests(struct drm_i915_private *i915)
844{ 858{
845 static const struct i915_subtest tests[] = { 859 static const struct i915_subtest tests[] = {
860 SUBTEST(igt_global_reset), /* attempt to recover GPU first */
846 SUBTEST(igt_hang_sanitycheck), 861 SUBTEST(igt_hang_sanitycheck),
847 SUBTEST(igt_global_reset),
848 SUBTEST(igt_reset_engine), 862 SUBTEST(igt_reset_engine),
849 SUBTEST(igt_reset_active_engines), 863 SUBTEST(igt_reset_active_engines),
850 SUBTEST(igt_wait_reset), 864 SUBTEST(igt_wait_reset),
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index 678723430d78..2388424a14da 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -146,6 +146,11 @@ struct drm_i915_private *mock_gem_device(void)
146 dev_set_name(&pdev->dev, "mock"); 146 dev_set_name(&pdev->dev, "mock");
147 dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 147 dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
148 148
149#if IS_ENABLED(CONFIG_IOMMU_API)
150 /* hack to disable iommu for the fake device; force identity mapping */
151 pdev->dev.archdata.iommu = (void *)-1;
152#endif
153
149 dev_pm_domain_set(&pdev->dev, &pm_domain); 154 dev_pm_domain_set(&pdev->dev, &pm_domain);
150 pm_runtime_enable(&pdev->dev); 155 pm_runtime_enable(&pdev->dev);
151 pm_runtime_dont_use_autosuspend(&pdev->dev); 156 pm_runtime_dont_use_autosuspend(&pdev->dev);
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index 5e9cd4c0e8b6..68e5d9c94475 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -1670,7 +1670,7 @@ static struct drm_encoder *mga_connector_best_encoder(struct drm_connector
1670 int enc_id = connector->encoder_ids[0]; 1670 int enc_id = connector->encoder_ids[0];
1671 /* pick the encoder ids */ 1671 /* pick the encoder ids */
1672 if (enc_id) 1672 if (enc_id)
1673 return drm_encoder_find(connector->dev, enc_id); 1673 return drm_encoder_find(connector->dev, NULL, enc_id);
1674 return NULL; 1674 return NULL;
1675} 1675}
1676 1676
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
index f7c0698fec40..7e829a8d1cb1 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
@@ -599,7 +599,7 @@ static u32 mdp5_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
599 struct drm_crtc *crtc; 599 struct drm_crtc *crtc;
600 struct drm_encoder *encoder; 600 struct drm_encoder *encoder;
601 601
602 if (pipe < 0 || pipe >= priv->num_crtcs) 602 if (pipe >= priv->num_crtcs)
603 return 0; 603 return 0;
604 604
605 crtc = priv->crtcs[pipe]; 605 crtc = priv->crtcs[pipe];
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 70d8e0d69ad5..69d6e61a01ec 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -373,7 +373,7 @@ find_encoder(struct drm_connector *connector, int type)
373 if (!id) 373 if (!id)
374 break; 374 break;
375 375
376 enc = drm_encoder_find(dev, id); 376 enc = drm_encoder_find(dev, NULL, id);
377 if (!enc) 377 if (!enc)
378 continue; 378 continue;
379 nv_encoder = nouveau_encoder(enc); 379 nv_encoder = nouveau_encoder(enc);
@@ -441,7 +441,7 @@ nouveau_connector_ddc_detect(struct drm_connector *connector)
441 if (id == 0) 441 if (id == 0)
442 break; 442 break;
443 443
444 encoder = drm_encoder_find(dev, id); 444 encoder = drm_encoder_find(dev, NULL, id);
445 if (!encoder) 445 if (!encoder)
446 continue; 446 continue;
447 nv_encoder = nouveau_encoder(encoder); 447 nv_encoder = nouveau_encoder(encoder);
diff --git a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c
index d9d25df6fc1b..4600d3841c25 100644
--- a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c
+++ b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c
@@ -165,11 +165,15 @@ static bool hdmic_detect(struct omap_dss_device *dssdev)
165{ 165{
166 struct panel_drv_data *ddata = to_panel_data(dssdev); 166 struct panel_drv_data *ddata = to_panel_data(dssdev);
167 struct omap_dss_device *in = ddata->in; 167 struct omap_dss_device *in = ddata->in;
168 bool connected;
168 169
169 if (gpio_is_valid(ddata->hpd_gpio)) 170 if (gpio_is_valid(ddata->hpd_gpio))
170 return gpio_get_value_cansleep(ddata->hpd_gpio); 171 connected = gpio_get_value_cansleep(ddata->hpd_gpio);
171 else 172 else
172 return in->ops.hdmi->detect(in); 173 connected = in->ops.hdmi->detect(in);
174 if (!connected && in->ops.hdmi->lost_hotplug)
175 in->ops.hdmi->lost_hotplug(in);
176 return connected;
173} 177}
174 178
175static int hdmic_register_hpd_cb(struct omap_dss_device *dssdev, 179static int hdmic_register_hpd_cb(struct omap_dss_device *dssdev,
diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c b/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c
index a9e9d667c55e..e3d98d78fc40 100644
--- a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c
+++ b/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c
@@ -51,6 +51,8 @@ static int tpd_connect(struct omap_dss_device *dssdev,
51 dssdev->dst = dst; 51 dssdev->dst = dst;
52 52
53 gpiod_set_value_cansleep(ddata->ct_cp_hpd_gpio, 1); 53 gpiod_set_value_cansleep(ddata->ct_cp_hpd_gpio, 1);
54 gpiod_set_value_cansleep(ddata->ls_oe_gpio, 1);
55
54 /* DC-DC converter needs at max 300us to get to 90% of 5V */ 56 /* DC-DC converter needs at max 300us to get to 90% of 5V */
55 udelay(300); 57 udelay(300);
56 58
@@ -69,6 +71,7 @@ static void tpd_disconnect(struct omap_dss_device *dssdev,
69 return; 71 return;
70 72
71 gpiod_set_value_cansleep(ddata->ct_cp_hpd_gpio, 0); 73 gpiod_set_value_cansleep(ddata->ct_cp_hpd_gpio, 0);
74 gpiod_set_value_cansleep(ddata->ls_oe_gpio, 0);
72 75
73 dst->src = NULL; 76 dst->src = NULL;
74 dssdev->dst = NULL; 77 dssdev->dst = NULL;
@@ -146,25 +149,22 @@ static int tpd_read_edid(struct omap_dss_device *dssdev,
146{ 149{
147 struct panel_drv_data *ddata = to_panel_data(dssdev); 150 struct panel_drv_data *ddata = to_panel_data(dssdev);
148 struct omap_dss_device *in = ddata->in; 151 struct omap_dss_device *in = ddata->in;
149 int r;
150 152
151 if (!gpiod_get_value_cansleep(ddata->hpd_gpio)) 153 if (!gpiod_get_value_cansleep(ddata->hpd_gpio))
152 return -ENODEV; 154 return -ENODEV;
153 155
154 gpiod_set_value_cansleep(ddata->ls_oe_gpio, 1); 156 return in->ops.hdmi->read_edid(in, edid, len);
155
156 r = in->ops.hdmi->read_edid(in, edid, len);
157
158 gpiod_set_value_cansleep(ddata->ls_oe_gpio, 0);
159
160 return r;
161} 157}
162 158
163static bool tpd_detect(struct omap_dss_device *dssdev) 159static bool tpd_detect(struct omap_dss_device *dssdev)
164{ 160{
165 struct panel_drv_data *ddata = to_panel_data(dssdev); 161 struct panel_drv_data *ddata = to_panel_data(dssdev);
162 struct omap_dss_device *in = ddata->in;
163 bool connected = gpiod_get_value_cansleep(ddata->hpd_gpio);
166 164
167 return gpiod_get_value_cansleep(ddata->hpd_gpio); 165 if (!connected && in->ops.hdmi->lost_hotplug)
166 in->ops.hdmi->lost_hotplug(in);
167 return connected;
168} 168}
169 169
170static int tpd_register_hpd_cb(struct omap_dss_device *dssdev, 170static int tpd_register_hpd_cb(struct omap_dss_device *dssdev,
diff --git a/drivers/gpu/drm/omapdrm/dss/Kconfig b/drivers/gpu/drm/omapdrm/dss/Kconfig
index 8b87d5cf45fc..f24ebf7f61dd 100644
--- a/drivers/gpu/drm/omapdrm/dss/Kconfig
+++ b/drivers/gpu/drm/omapdrm/dss/Kconfig
@@ -65,6 +65,14 @@ config OMAP4_DSS_HDMI
65 help 65 help
66 HDMI support for OMAP4 based SoCs. 66 HDMI support for OMAP4 based SoCs.
67 67
68config OMAP4_DSS_HDMI_CEC
69 bool "Enable HDMI CEC support for OMAP4"
70 depends on OMAP4_DSS_HDMI
71 select CEC_CORE
72 default y
73 ---help---
74 When selected the HDMI transmitter will support the CEC feature.
75
68config OMAP5_DSS_HDMI 76config OMAP5_DSS_HDMI
69 bool "HDMI support for OMAP5" 77 bool "HDMI support for OMAP5"
70 default n 78 default n
diff --git a/drivers/gpu/drm/omapdrm/dss/Makefile b/drivers/gpu/drm/omapdrm/dss/Makefile
index 142ce5a02542..3c5644c3fc38 100644
--- a/drivers/gpu/drm/omapdrm/dss/Makefile
+++ b/drivers/gpu/drm/omapdrm/dss/Makefile
@@ -14,5 +14,6 @@ omapdss-$(CONFIG_OMAP2_DSS_DSI) += dsi.o
14omapdss-$(CONFIG_OMAP2_DSS_HDMI_COMMON) += hdmi_common.o hdmi_wp.o hdmi_pll.o \ 14omapdss-$(CONFIG_OMAP2_DSS_HDMI_COMMON) += hdmi_common.o hdmi_wp.o hdmi_pll.o \
15 hdmi_phy.o 15 hdmi_phy.o
16omapdss-$(CONFIG_OMAP4_DSS_HDMI) += hdmi4.o hdmi4_core.o 16omapdss-$(CONFIG_OMAP4_DSS_HDMI) += hdmi4.o hdmi4_core.o
17omapdss-$(CONFIG_OMAP4_DSS_HDMI_CEC) += hdmi4_cec.o
17omapdss-$(CONFIG_OMAP5_DSS_HDMI) += hdmi5.o hdmi5_core.o 18omapdss-$(CONFIG_OMAP5_DSS_HDMI) += hdmi5.o hdmi5_core.o
18ccflags-$(CONFIG_OMAP2_DSS_DEBUG) += -DDEBUG 19ccflags-$(CONFIG_OMAP2_DSS_DEBUG) += -DDEBUG
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi.h b/drivers/gpu/drm/omapdrm/dss/hdmi.h
index a820b394af09..c2609c448ddc 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi.h
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi.h
@@ -24,6 +24,7 @@
24#include <linux/platform_device.h> 24#include <linux/platform_device.h>
25#include <linux/hdmi.h> 25#include <linux/hdmi.h>
26#include <sound/omap-hdmi-audio.h> 26#include <sound/omap-hdmi-audio.h>
27#include <media/cec.h>
27 28
28#include "omapdss.h" 29#include "omapdss.h"
29#include "dss.h" 30#include "dss.h"
@@ -264,6 +265,10 @@ struct hdmi_core_data {
264 void __iomem *base; 265 void __iomem *base;
265 bool cts_swmode; 266 bool cts_swmode;
266 bool audio_use_mclk; 267 bool audio_use_mclk;
268
269 struct hdmi_wp_data *wp;
270 unsigned int core_pwr_cnt;
271 struct cec_adapter *adap;
267}; 272};
268 273
269static inline void hdmi_write_reg(void __iomem *base_addr, const u32 idx, 274static inline void hdmi_write_reg(void __iomem *base_addr, const u32 idx,
@@ -373,7 +378,7 @@ struct omap_hdmi {
373 bool audio_configured; 378 bool audio_configured;
374 struct omap_dss_audio audio_config; 379 struct omap_dss_audio audio_config;
375 380
376 /* This lock should be taken when booleans bellow are touched. */ 381 /* This lock should be taken when booleans below are touched. */
377 spinlock_t audio_playing_lock; 382 spinlock_t audio_playing_lock;
378 bool audio_playing; 383 bool audio_playing;
379 bool display_enabled; 384 bool display_enabled;
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4.c b/drivers/gpu/drm/omapdrm/dss/hdmi4.c
index f169348da377..a598dfdeb585 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi4.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4.c
@@ -36,9 +36,11 @@
36#include <linux/of.h> 36#include <linux/of.h>
37#include <linux/of_graph.h> 37#include <linux/of_graph.h>
38#include <sound/omap-hdmi-audio.h> 38#include <sound/omap-hdmi-audio.h>
39#include <media/cec.h>
39 40
40#include "omapdss.h" 41#include "omapdss.h"
41#include "hdmi4_core.h" 42#include "hdmi4_core.h"
43#include "hdmi4_cec.h"
42#include "dss.h" 44#include "dss.h"
43#include "hdmi.h" 45#include "hdmi.h"
44 46
@@ -70,7 +72,8 @@ static void hdmi_runtime_put(void)
70 72
71static irqreturn_t hdmi_irq_handler(int irq, void *data) 73static irqreturn_t hdmi_irq_handler(int irq, void *data)
72{ 74{
73 struct hdmi_wp_data *wp = data; 75 struct omap_hdmi *hdmi = data;
76 struct hdmi_wp_data *wp = &hdmi->wp;
74 u32 irqstatus; 77 u32 irqstatus;
75 78
76 irqstatus = hdmi_wp_get_irqstatus(wp); 79 irqstatus = hdmi_wp_get_irqstatus(wp);
@@ -95,6 +98,13 @@ static irqreturn_t hdmi_irq_handler(int irq, void *data)
95 } else if (irqstatus & HDMI_IRQ_LINK_DISCONNECT) { 98 } else if (irqstatus & HDMI_IRQ_LINK_DISCONNECT) {
96 hdmi_wp_set_phy_pwr(wp, HDMI_PHYPWRCMD_LDOON); 99 hdmi_wp_set_phy_pwr(wp, HDMI_PHYPWRCMD_LDOON);
97 } 100 }
101 if (irqstatus & HDMI_IRQ_CORE) {
102 u32 intr4 = hdmi_read_reg(hdmi->core.base, HDMI_CORE_SYS_INTR4);
103
104 hdmi_write_reg(hdmi->core.base, HDMI_CORE_SYS_INTR4, intr4);
105 if (intr4 & 8)
106 hdmi4_cec_irq(&hdmi->core);
107 }
98 108
99 return IRQ_HANDLED; 109 return IRQ_HANDLED;
100} 110}
@@ -123,14 +133,19 @@ static int hdmi_power_on_core(struct omap_dss_device *dssdev)
123{ 133{
124 int r; 134 int r;
125 135
136 if (hdmi.core.core_pwr_cnt++)
137 return 0;
138
126 r = regulator_enable(hdmi.vdda_reg); 139 r = regulator_enable(hdmi.vdda_reg);
127 if (r) 140 if (r)
128 return r; 141 goto err_reg_enable;
129 142
130 r = hdmi_runtime_get(); 143 r = hdmi_runtime_get();
131 if (r) 144 if (r)
132 goto err_runtime_get; 145 goto err_runtime_get;
133 146
147 hdmi4_core_powerdown_disable(&hdmi.core);
148
134 /* Make selection of HDMI in DSS */ 149 /* Make selection of HDMI in DSS */
135 dss_select_hdmi_venc_clk_source(DSS_HDMI_M_PCLK); 150 dss_select_hdmi_venc_clk_source(DSS_HDMI_M_PCLK);
136 151
@@ -140,12 +155,17 @@ static int hdmi_power_on_core(struct omap_dss_device *dssdev)
140 155
141err_runtime_get: 156err_runtime_get:
142 regulator_disable(hdmi.vdda_reg); 157 regulator_disable(hdmi.vdda_reg);
158err_reg_enable:
159 hdmi.core.core_pwr_cnt--;
143 160
144 return r; 161 return r;
145} 162}
146 163
147static void hdmi_power_off_core(struct omap_dss_device *dssdev) 164static void hdmi_power_off_core(struct omap_dss_device *dssdev)
148{ 165{
166 if (--hdmi.core.core_pwr_cnt)
167 return;
168
149 hdmi.core_enabled = false; 169 hdmi.core_enabled = false;
150 170
151 hdmi_runtime_put(); 171 hdmi_runtime_put();
@@ -166,8 +186,8 @@ static int hdmi_power_on_full(struct omap_dss_device *dssdev)
166 return r; 186 return r;
167 187
168 /* disable and clear irqs */ 188 /* disable and clear irqs */
169 hdmi_wp_clear_irqenable(wp, 0xffffffff); 189 hdmi_wp_clear_irqenable(wp, ~HDMI_IRQ_CORE);
170 hdmi_wp_set_irqstatus(wp, 0xffffffff); 190 hdmi_wp_set_irqstatus(wp, ~HDMI_IRQ_CORE);
171 191
172 vm = &hdmi.cfg.vm; 192 vm = &hdmi.cfg.vm;
173 193
@@ -242,7 +262,7 @@ static void hdmi_power_off_full(struct omap_dss_device *dssdev)
242{ 262{
243 enum omap_channel channel = dssdev->dispc_channel; 263 enum omap_channel channel = dssdev->dispc_channel;
244 264
245 hdmi_wp_clear_irqenable(&hdmi.wp, 0xffffffff); 265 hdmi_wp_clear_irqenable(&hdmi.wp, ~HDMI_IRQ_CORE);
246 266
247 hdmi_wp_video_stop(&hdmi.wp); 267 hdmi_wp_video_stop(&hdmi.wp);
248 268
@@ -393,11 +413,11 @@ static void hdmi_display_disable(struct omap_dss_device *dssdev)
393 mutex_unlock(&hdmi.lock); 413 mutex_unlock(&hdmi.lock);
394} 414}
395 415
396static int hdmi_core_enable(struct omap_dss_device *dssdev) 416int hdmi4_core_enable(struct omap_dss_device *dssdev)
397{ 417{
398 int r = 0; 418 int r = 0;
399 419
400 DSSDBG("ENTER omapdss_hdmi_core_enable\n"); 420 DSSDBG("ENTER omapdss_hdmi4_core_enable\n");
401 421
402 mutex_lock(&hdmi.lock); 422 mutex_lock(&hdmi.lock);
403 423
@@ -415,9 +435,9 @@ err0:
415 return r; 435 return r;
416} 436}
417 437
418static void hdmi_core_disable(struct omap_dss_device *dssdev) 438void hdmi4_core_disable(struct omap_dss_device *dssdev)
419{ 439{
420 DSSDBG("Enter omapdss_hdmi_core_disable\n"); 440 DSSDBG("Enter omapdss_hdmi4_core_disable\n");
421 441
422 mutex_lock(&hdmi.lock); 442 mutex_lock(&hdmi.lock);
423 443
@@ -475,19 +495,28 @@ static int hdmi_read_edid(struct omap_dss_device *dssdev,
475 need_enable = hdmi.core_enabled == false; 495 need_enable = hdmi.core_enabled == false;
476 496
477 if (need_enable) { 497 if (need_enable) {
478 r = hdmi_core_enable(dssdev); 498 r = hdmi4_core_enable(dssdev);
479 if (r) 499 if (r)
480 return r; 500 return r;
481 } 501 }
482 502
483 r = read_edid(edid, len); 503 r = read_edid(edid, len);
484 504 if (r >= 256)
505 hdmi4_cec_set_phys_addr(&hdmi.core,
506 cec_get_edid_phys_addr(edid, r, NULL));
507 else
508 hdmi4_cec_set_phys_addr(&hdmi.core, CEC_PHYS_ADDR_INVALID);
485 if (need_enable) 509 if (need_enable)
486 hdmi_core_disable(dssdev); 510 hdmi4_core_disable(dssdev);
487 511
488 return r; 512 return r;
489} 513}
490 514
515static void hdmi_lost_hotplug(struct omap_dss_device *dssdev)
516{
517 hdmi4_cec_set_phys_addr(&hdmi.core, CEC_PHYS_ADDR_INVALID);
518}
519
491static int hdmi_set_infoframe(struct omap_dss_device *dssdev, 520static int hdmi_set_infoframe(struct omap_dss_device *dssdev,
492 const struct hdmi_avi_infoframe *avi) 521 const struct hdmi_avi_infoframe *avi)
493{ 522{
@@ -514,6 +543,7 @@ static const struct omapdss_hdmi_ops hdmi_ops = {
514 .get_timings = hdmi_display_get_timings, 543 .get_timings = hdmi_display_get_timings,
515 544
516 .read_edid = hdmi_read_edid, 545 .read_edid = hdmi_read_edid,
546 .lost_hotplug = hdmi_lost_hotplug,
517 .set_infoframe = hdmi_set_infoframe, 547 .set_infoframe = hdmi_set_infoframe,
518 .set_hdmi_mode = hdmi_set_hdmi_mode, 548 .set_hdmi_mode = hdmi_set_hdmi_mode,
519}; 549};
@@ -715,6 +745,10 @@ static int hdmi4_bind(struct device *dev, struct device *master, void *data)
715 if (r) 745 if (r)
716 goto err; 746 goto err;
717 747
748 r = hdmi4_cec_init(pdev, &hdmi.core, &hdmi.wp);
749 if (r)
750 goto err;
751
718 irq = platform_get_irq(pdev, 0); 752 irq = platform_get_irq(pdev, 0);
719 if (irq < 0) { 753 if (irq < 0) {
720 DSSERR("platform_get_irq failed\n"); 754 DSSERR("platform_get_irq failed\n");
@@ -724,7 +758,7 @@ static int hdmi4_bind(struct device *dev, struct device *master, void *data)
724 758
725 r = devm_request_threaded_irq(&pdev->dev, irq, 759 r = devm_request_threaded_irq(&pdev->dev, irq,
726 NULL, hdmi_irq_handler, 760 NULL, hdmi_irq_handler,
727 IRQF_ONESHOT, "OMAP HDMI", &hdmi.wp); 761 IRQF_ONESHOT, "OMAP HDMI", &hdmi);
728 if (r) { 762 if (r) {
729 DSSERR("HDMI IRQ request failed\n"); 763 DSSERR("HDMI IRQ request failed\n");
730 goto err; 764 goto err;
@@ -759,6 +793,8 @@ static void hdmi4_unbind(struct device *dev, struct device *master, void *data)
759 793
760 hdmi_uninit_output(pdev); 794 hdmi_uninit_output(pdev);
761 795
796 hdmi4_cec_uninit(&hdmi.core);
797
762 hdmi_pll_uninit(&hdmi.pll); 798 hdmi_pll_uninit(&hdmi.pll);
763 799
764 pm_runtime_disable(&pdev->dev); 800 pm_runtime_disable(&pdev->dev);
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c b/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c
new file mode 100644
index 000000000000..d86873f2abe6
--- /dev/null
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c
@@ -0,0 +1,381 @@
1/*
2 * HDMI CEC
3 *
4 * Based on the CEC code from hdmi_ti_4xxx_ip.c from Android.
5 *
6 * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com/
7 * Authors: Yong Zhi
8 * Mythri pk <mythripk@ti.com>
9 *
10 * Heavily modified to use the linux CEC framework:
11 *
12 * Copyright 2016-2017 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
13 *
14 * This program is free software; you may redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; version 2 of the License.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
21 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
22 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
23 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
24 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
25 * SOFTWARE.
26 */
27
28#include <linux/kernel.h>
29#include <linux/err.h>
30#include <linux/io.h>
31#include <linux/platform_device.h>
32#include <linux/slab.h>
33
34#include "dss.h"
35#include "hdmi.h"
36#include "hdmi4_core.h"
37#include "hdmi4_cec.h"
38
39/* HDMI CEC */
40#define HDMI_CEC_DEV_ID 0x900
41#define HDMI_CEC_SPEC 0x904
42
43/* Not really a debug register, more a low-level control register */
44#define HDMI_CEC_DBG_3 0x91C
45#define HDMI_CEC_TX_INIT 0x920
46#define HDMI_CEC_TX_DEST 0x924
47#define HDMI_CEC_SETUP 0x938
48#define HDMI_CEC_TX_COMMAND 0x93C
49#define HDMI_CEC_TX_OPERAND 0x940
50#define HDMI_CEC_TRANSMIT_DATA 0x97C
51#define HDMI_CEC_CA_7_0 0x988
52#define HDMI_CEC_CA_15_8 0x98C
53#define HDMI_CEC_INT_STATUS_0 0x998
54#define HDMI_CEC_INT_STATUS_1 0x99C
55#define HDMI_CEC_INT_ENABLE_0 0x990
56#define HDMI_CEC_INT_ENABLE_1 0x994
57#define HDMI_CEC_RX_CONTROL 0x9B0
58#define HDMI_CEC_RX_COUNT 0x9B4
59#define HDMI_CEC_RX_CMD_HEADER 0x9B8
60#define HDMI_CEC_RX_COMMAND 0x9BC
61#define HDMI_CEC_RX_OPERAND 0x9C0
62
63#define HDMI_CEC_TX_FIFO_INT_MASK 0x64
64#define HDMI_CEC_RETRANSMIT_CNT_INT_MASK 0x2
65
66#define HDMI_CORE_CEC_RETRY 200
67
68static void hdmi_cec_received_msg(struct hdmi_core_data *core)
69{
70 u32 cnt = hdmi_read_reg(core->base, HDMI_CEC_RX_COUNT) & 0xff;
71
72 /* While there are CEC frames in the FIFO */
73 while (cnt & 0x70) {
74 /* and the frame doesn't have an error */
75 if (!(cnt & 0x80)) {
76 struct cec_msg msg = {};
77 unsigned int i;
78
79 /* then read the message */
80 msg.len = cnt & 0xf;
81 msg.msg[0] = hdmi_read_reg(core->base,
82 HDMI_CEC_RX_CMD_HEADER);
83 msg.msg[1] = hdmi_read_reg(core->base,
84 HDMI_CEC_RX_COMMAND);
85 for (i = 0; i < msg.len; i++) {
86 unsigned int reg = HDMI_CEC_RX_OPERAND + i * 4;
87
88 msg.msg[2 + i] =
89 hdmi_read_reg(core->base, reg);
90 }
91 msg.len += 2;
92 cec_received_msg(core->adap, &msg);
93 }
94 /* Clear the current frame from the FIFO */
95 hdmi_write_reg(core->base, HDMI_CEC_RX_CONTROL, 1);
96 /* Wait until the current frame is cleared */
97 while (hdmi_read_reg(core->base, HDMI_CEC_RX_CONTROL) & 1)
98 udelay(1);
99 /*
100 * Re-read the count register and loop to see if there are
101 * more messages in the FIFO.
102 */
103 cnt = hdmi_read_reg(core->base, HDMI_CEC_RX_COUNT) & 0xff;
104 }
105}
106
107static void hdmi_cec_transmit_fifo_empty(struct hdmi_core_data *core, u32 stat1)
108{
109 if (stat1 & 2) {
110 u32 dbg3 = hdmi_read_reg(core->base, HDMI_CEC_DBG_3);
111
112 cec_transmit_done(core->adap,
113 CEC_TX_STATUS_NACK |
114 CEC_TX_STATUS_MAX_RETRIES,
115 0, (dbg3 >> 4) & 7, 0, 0);
116 } else if (stat1 & 1) {
117 cec_transmit_done(core->adap,
118 CEC_TX_STATUS_ARB_LOST |
119 CEC_TX_STATUS_MAX_RETRIES,
120 0, 0, 0, 0);
121 } else if (stat1 == 0) {
122 cec_transmit_done(core->adap, CEC_TX_STATUS_OK,
123 0, 0, 0, 0);
124 }
125}
126
127void hdmi4_cec_irq(struct hdmi_core_data *core)
128{
129 u32 stat0 = hdmi_read_reg(core->base, HDMI_CEC_INT_STATUS_0);
130 u32 stat1 = hdmi_read_reg(core->base, HDMI_CEC_INT_STATUS_1);
131
132 hdmi_write_reg(core->base, HDMI_CEC_INT_STATUS_0, stat0);
133 hdmi_write_reg(core->base, HDMI_CEC_INT_STATUS_1, stat1);
134
135 if (stat0 & 0x40)
136 REG_FLD_MOD(core->base, HDMI_CEC_DBG_3, 0x1, 7, 7);
137 else if (stat0 & 0x24)
138 hdmi_cec_transmit_fifo_empty(core, stat1);
139 if (stat1 & 2) {
140 u32 dbg3 = hdmi_read_reg(core->base, HDMI_CEC_DBG_3);
141
142 cec_transmit_done(core->adap,
143 CEC_TX_STATUS_NACK |
144 CEC_TX_STATUS_MAX_RETRIES,
145 0, (dbg3 >> 4) & 7, 0, 0);
146 } else if (stat1 & 1) {
147 cec_transmit_done(core->adap,
148 CEC_TX_STATUS_ARB_LOST |
149 CEC_TX_STATUS_MAX_RETRIES,
150 0, 0, 0, 0);
151 }
152 if (stat0 & 0x02)
153 hdmi_cec_received_msg(core);
154 if (stat1 & 0x3)
155 REG_FLD_MOD(core->base, HDMI_CEC_DBG_3, 0x1, 7, 7);
156}
157
158static bool hdmi_cec_clear_tx_fifo(struct cec_adapter *adap)
159{
160 struct hdmi_core_data *core = cec_get_drvdata(adap);
161 int retry = HDMI_CORE_CEC_RETRY;
162 int temp;
163
164 REG_FLD_MOD(core->base, HDMI_CEC_DBG_3, 0x1, 7, 7);
165 while (retry) {
166 temp = hdmi_read_reg(core->base, HDMI_CEC_DBG_3);
167 if (FLD_GET(temp, 7, 7) == 0)
168 break;
169 retry--;
170 }
171 return retry != 0;
172}
173
174static bool hdmi_cec_clear_rx_fifo(struct cec_adapter *adap)
175{
176 struct hdmi_core_data *core = cec_get_drvdata(adap);
177 int retry = HDMI_CORE_CEC_RETRY;
178 int temp;
179
180 hdmi_write_reg(core->base, HDMI_CEC_RX_CONTROL, 0x3);
181 retry = HDMI_CORE_CEC_RETRY;
182 while (retry) {
183 temp = hdmi_read_reg(core->base, HDMI_CEC_RX_CONTROL);
184 if (FLD_GET(temp, 1, 0) == 0)
185 break;
186 retry--;
187 }
188 return retry != 0;
189}
190
191static int hdmi_cec_adap_enable(struct cec_adapter *adap, bool enable)
192{
193 struct hdmi_core_data *core = cec_get_drvdata(adap);
194 int temp, err;
195
196 if (!enable) {
197 hdmi_write_reg(core->base, HDMI_CEC_INT_ENABLE_0, 0);
198 hdmi_write_reg(core->base, HDMI_CEC_INT_ENABLE_1, 0);
199 REG_FLD_MOD(core->base, HDMI_CORE_SYS_INTR_UNMASK4, 0, 3, 3);
200 hdmi_wp_clear_irqenable(core->wp, HDMI_IRQ_CORE);
201 hdmi_wp_set_irqstatus(core->wp, HDMI_IRQ_CORE);
202 hdmi4_core_disable(NULL);
203 return 0;
204 }
205 err = hdmi4_core_enable(NULL);
206 if (err)
207 return err;
208
209 /* Clear TX FIFO */
210 if (!hdmi_cec_clear_tx_fifo(adap)) {
211 pr_err("cec-%s: could not clear TX FIFO\n", adap->name);
212 return -EIO;
213 }
214
215 /* Clear RX FIFO */
216 if (!hdmi_cec_clear_rx_fifo(adap)) {
217 pr_err("cec-%s: could not clear RX FIFO\n", adap->name);
218 return -EIO;
219 }
220
221 /* Clear CEC interrupts */
222 hdmi_write_reg(core->base, HDMI_CEC_INT_STATUS_1,
223 hdmi_read_reg(core->base, HDMI_CEC_INT_STATUS_1));
224 hdmi_write_reg(core->base, HDMI_CEC_INT_STATUS_0,
225 hdmi_read_reg(core->base, HDMI_CEC_INT_STATUS_0));
226
227 /* Enable HDMI core interrupts */
228 hdmi_wp_set_irqenable(core->wp, HDMI_IRQ_CORE);
229 /* Unmask CEC interrupt */
230 REG_FLD_MOD(core->base, HDMI_CORE_SYS_INTR_UNMASK4, 0x1, 3, 3);
231 /*
232 * Enable CEC interrupts:
233 * Transmit Buffer Full/Empty Change event
234 * Transmitter FIFO Empty event
235 * Receiver FIFO Not Empty event
236 */
237 hdmi_write_reg(core->base, HDMI_CEC_INT_ENABLE_0, 0x26);
238 /*
239 * Enable CEC interrupts:
240 * RX FIFO Overrun Error event
241 * Short Pulse Detected event
242 * Frame Retransmit Count Exceeded event
243 * Start Bit Irregularity event
244 */
245 hdmi_write_reg(core->base, HDMI_CEC_INT_ENABLE_1, 0x0f);
246
247 /* cec calibration enable (self clearing) */
248 hdmi_write_reg(core->base, HDMI_CEC_SETUP, 0x03);
249 msleep(20);
250 hdmi_write_reg(core->base, HDMI_CEC_SETUP, 0x04);
251
252 temp = hdmi_read_reg(core->base, HDMI_CEC_SETUP);
253 if (FLD_GET(temp, 4, 4) != 0) {
254 temp = FLD_MOD(temp, 0, 4, 4);
255 hdmi_write_reg(core->base, HDMI_CEC_SETUP, temp);
256
257 /*
258 * If we enabled CEC in middle of a CEC message on the bus,
259 * we could have start bit irregularity and/or short
260 * pulse event. Clear them now.
261 */
262 temp = hdmi_read_reg(core->base, HDMI_CEC_INT_STATUS_1);
263 temp = FLD_MOD(0x0, 0x5, 2, 0);
264 hdmi_write_reg(core->base, HDMI_CEC_INT_STATUS_1, temp);
265 }
266 return 0;
267}
268
269static int hdmi_cec_adap_log_addr(struct cec_adapter *adap, u8 log_addr)
270{
271 struct hdmi_core_data *core = cec_get_drvdata(adap);
272 u32 v;
273
274 if (log_addr == CEC_LOG_ADDR_INVALID) {
275 hdmi_write_reg(core->base, HDMI_CEC_CA_7_0, 0);
276 hdmi_write_reg(core->base, HDMI_CEC_CA_15_8, 0);
277 return 0;
278 }
279 if (log_addr <= 7) {
280 v = hdmi_read_reg(core->base, HDMI_CEC_CA_7_0);
281 v |= 1 << log_addr;
282 hdmi_write_reg(core->base, HDMI_CEC_CA_7_0, v);
283 } else {
284 v = hdmi_read_reg(core->base, HDMI_CEC_CA_15_8);
285 v |= 1 << (log_addr - 8);
286 hdmi_write_reg(core->base, HDMI_CEC_CA_15_8, v);
287 }
288 return 0;
289}
290
291static int hdmi_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
292 u32 signal_free_time, struct cec_msg *msg)
293{
294 struct hdmi_core_data *core = cec_get_drvdata(adap);
295 int temp;
296 u32 i;
297
298 /* Clear TX FIFO */
299 if (!hdmi_cec_clear_tx_fifo(adap)) {
300 pr_err("cec-%s: could not clear TX FIFO for transmit\n",
301 adap->name);
302 return -EIO;
303 }
304
305 /* Clear TX interrupts */
306 hdmi_write_reg(core->base, HDMI_CEC_INT_STATUS_0,
307 HDMI_CEC_TX_FIFO_INT_MASK);
308
309 hdmi_write_reg(core->base, HDMI_CEC_INT_STATUS_1,
310 HDMI_CEC_RETRANSMIT_CNT_INT_MASK);
311
312 /* Set the retry count */
313 REG_FLD_MOD(core->base, HDMI_CEC_DBG_3, attempts - 1, 6, 4);
314
315 /* Set the initiator addresses */
316 hdmi_write_reg(core->base, HDMI_CEC_TX_INIT, cec_msg_initiator(msg));
317
318 /* Set destination id */
319 temp = cec_msg_destination(msg);
320 if (msg->len == 1)
321 temp |= 0x80;
322 hdmi_write_reg(core->base, HDMI_CEC_TX_DEST, temp);
323 if (msg->len == 1)
324 return 0;
325
326 /* Setup command and arguments for the command */
327 hdmi_write_reg(core->base, HDMI_CEC_TX_COMMAND, msg->msg[1]);
328
329 for (i = 0; i < msg->len - 2; i++)
330 hdmi_write_reg(core->base, HDMI_CEC_TX_OPERAND + i * 4,
331 msg->msg[2 + i]);
332
333 /* Operand count */
334 hdmi_write_reg(core->base, HDMI_CEC_TRANSMIT_DATA,
335 (msg->len - 2) | 0x10);
336 return 0;
337}
338
339static const struct cec_adap_ops hdmi_cec_adap_ops = {
340 .adap_enable = hdmi_cec_adap_enable,
341 .adap_log_addr = hdmi_cec_adap_log_addr,
342 .adap_transmit = hdmi_cec_adap_transmit,
343};
344
345void hdmi4_cec_set_phys_addr(struct hdmi_core_data *core, u16 pa)
346{
347 cec_s_phys_addr(core->adap, pa, false);
348}
349
350int hdmi4_cec_init(struct platform_device *pdev, struct hdmi_core_data *core,
351 struct hdmi_wp_data *wp)
352{
353 const u32 caps = CEC_CAP_TRANSMIT | CEC_CAP_LOG_ADDRS |
354 CEC_CAP_PASSTHROUGH | CEC_CAP_RC;
355 unsigned int ret;
356
357 core->adap = cec_allocate_adapter(&hdmi_cec_adap_ops, core,
358 "omap4", caps, CEC_MAX_LOG_ADDRS);
359 ret = PTR_ERR_OR_ZERO(core->adap);
360 if (ret < 0)
361 return ret;
362 core->wp = wp;
363
364 /*
365 * Initialize CEC clock divider: CEC needs 2MHz clock hence
366 * set the devider to 24 to get 48/24=2MHz clock
367 */
368 REG_FLD_MOD(core->wp->base, HDMI_WP_CLK, 0x18, 5, 0);
369
370 ret = cec_register_adapter(core->adap, &pdev->dev);
371 if (ret < 0) {
372 cec_delete_adapter(core->adap);
373 return ret;
374 }
375 return 0;
376}
377
378void hdmi4_cec_uninit(struct hdmi_core_data *core)
379{
380 cec_unregister_adapter(core->adap);
381}
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.h b/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.h
new file mode 100644
index 000000000000..0292337c97cc
--- /dev/null
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.h
@@ -0,0 +1,55 @@
1/*
2 * HDMI header definition for OMAP4 HDMI CEC IP
3 *
4 * Copyright 2016-2017 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
5 *
6 * This program is free software; you may redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
11 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
12 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
13 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
14 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
15 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
16 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
17 * SOFTWARE.
18 */
19
20#ifndef _HDMI4_CEC_H_
21#define _HDMI4_CEC_H_
22
23struct hdmi_core_data;
24struct hdmi_wp_data;
25struct platform_device;
26
27/* HDMI CEC funcs */
28#ifdef CONFIG_OMAP4_DSS_HDMI_CEC
29void hdmi4_cec_set_phys_addr(struct hdmi_core_data *core, u16 pa);
30void hdmi4_cec_irq(struct hdmi_core_data *core);
31int hdmi4_cec_init(struct platform_device *pdev, struct hdmi_core_data *core,
32 struct hdmi_wp_data *wp);
33void hdmi4_cec_uninit(struct hdmi_core_data *core);
34#else
35static inline void hdmi4_cec_set_phys_addr(struct hdmi_core_data *core, u16 pa)
36{
37}
38
39static inline void hdmi4_cec_irq(struct hdmi_core_data *core)
40{
41}
42
43static inline int hdmi4_cec_init(struct platform_device *pdev,
44 struct hdmi_core_data *core,
45 struct hdmi_wp_data *wp)
46{
47 return 0;
48}
49
50static inline void hdmi4_cec_uninit(struct hdmi_core_data *core)
51{
52}
53#endif
54
55#endif
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
index 365cf07daa01..62e451162d96 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
@@ -208,9 +208,9 @@ static void hdmi_core_init(struct hdmi_core_video_config *video_cfg)
208 video_cfg->tclk_sel_clkmult = HDMI_FPLL10IDCK; 208 video_cfg->tclk_sel_clkmult = HDMI_FPLL10IDCK;
209} 209}
210 210
211static void hdmi_core_powerdown_disable(struct hdmi_core_data *core) 211void hdmi4_core_powerdown_disable(struct hdmi_core_data *core)
212{ 212{
213 DSSDBG("Enter hdmi_core_powerdown_disable\n"); 213 DSSDBG("Enter hdmi4_core_powerdown_disable\n");
214 REG_FLD_MOD(core->base, HDMI_CORE_SYS_SYS_CTRL1, 0x1, 0, 0); 214 REG_FLD_MOD(core->base, HDMI_CORE_SYS_SYS_CTRL1, 0x1, 0, 0);
215} 215}
216 216
@@ -335,9 +335,6 @@ void hdmi4_configure(struct hdmi_core_data *core,
335 */ 335 */
336 hdmi_core_swreset_assert(core); 336 hdmi_core_swreset_assert(core);
337 337
338 /* power down off */
339 hdmi_core_powerdown_disable(core);
340
341 v_core_cfg.pkt_mode = HDMI_PACKETMODE24BITPERPIXEL; 338 v_core_cfg.pkt_mode = HDMI_PACKETMODE24BITPERPIXEL;
342 v_core_cfg.hdmi_dvi = cfg->hdmi_dvi_mode; 339 v_core_cfg.hdmi_dvi = cfg->hdmi_dvi_mode;
343 340
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.h b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.h
index a069f96ec6f6..b6ab579e44d2 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.h
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.h
@@ -266,6 +266,10 @@ void hdmi4_configure(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
266void hdmi4_core_dump(struct hdmi_core_data *core, struct seq_file *s); 266void hdmi4_core_dump(struct hdmi_core_data *core, struct seq_file *s);
267int hdmi4_core_init(struct platform_device *pdev, struct hdmi_core_data *core); 267int hdmi4_core_init(struct platform_device *pdev, struct hdmi_core_data *core);
268 268
269int hdmi4_core_enable(struct omap_dss_device *dssdev);
270void hdmi4_core_disable(struct omap_dss_device *dssdev);
271void hdmi4_core_powerdown_disable(struct hdmi_core_data *core);
272
269int hdmi4_audio_start(struct hdmi_core_data *core, struct hdmi_wp_data *wp); 273int hdmi4_audio_start(struct hdmi_core_data *core, struct hdmi_wp_data *wp);
270void hdmi4_audio_stop(struct hdmi_core_data *core, struct hdmi_wp_data *wp); 274void hdmi4_audio_stop(struct hdmi_core_data *core, struct hdmi_wp_data *wp);
271int hdmi4_audio_config(struct hdmi_core_data *core, struct hdmi_wp_data *wp, 275int hdmi4_audio_config(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss.h b/drivers/gpu/drm/omapdrm/dss/omapdss.h
index 47a331670963..990422b35784 100644
--- a/drivers/gpu/drm/omapdrm/dss/omapdss.h
+++ b/drivers/gpu/drm/omapdrm/dss/omapdss.h
@@ -395,6 +395,7 @@ struct omapdss_hdmi_ops {
395 struct videomode *vm); 395 struct videomode *vm);
396 396
397 int (*read_edid)(struct omap_dss_device *dssdev, u8 *buf, int len); 397 int (*read_edid)(struct omap_dss_device *dssdev, u8 *buf, int len);
398 void (*lost_hotplug)(struct omap_dss_device *dssdev);
398 bool (*detect)(struct omap_dss_device *dssdev); 399 bool (*detect)(struct omap_dss_device *dssdev);
399 400
400 int (*register_hpd_cb)(struct omap_dss_device *dssdev, 401 int (*register_hpd_cb)(struct omap_dss_device *dssdev,
diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
index 718d8ce15b1f..726f3fb3312d 100644
--- a/drivers/gpu/drm/panel/Kconfig
+++ b/drivers/gpu/drm/panel/Kconfig
@@ -82,6 +82,14 @@ config DRM_PANEL_PANASONIC_VVX10F034N00
82 WUXGA (1920x1200) Novatek NT1397-based DSI panel as found in some 82 WUXGA (1920x1200) Novatek NT1397-based DSI panel as found in some
83 Xperia Z2 tablets 83 Xperia Z2 tablets
84 84
85config DRM_PANEL_RASPBERRYPI_TOUCHSCREEN
86 tristate "Raspberry Pi 7-inch touchscreen panel"
87 depends on DRM_MIPI_DSI
88 help
89 Say Y here if you want to enable support for the Raspberry
90 Pi 7" Touchscreen. To compile this driver as a module,
91 choose M here.
92
85config DRM_PANEL_SAMSUNG_S6E3HA2 93config DRM_PANEL_SAMSUNG_S6E3HA2
86 tristate "Samsung S6E3HA2 DSI video mode panel" 94 tristate "Samsung S6E3HA2 DSI video mode panel"
87 depends on OF 95 depends on OF
diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile
index c8483fdd5b9b..77ede3467324 100644
--- a/drivers/gpu/drm/panel/Makefile
+++ b/drivers/gpu/drm/panel/Makefile
@@ -5,6 +5,7 @@ obj-$(CONFIG_DRM_PANEL_JDI_LT070ME05000) += panel-jdi-lt070me05000.o
5obj-$(CONFIG_DRM_PANEL_LG_LG4573) += panel-lg-lg4573.o 5obj-$(CONFIG_DRM_PANEL_LG_LG4573) += panel-lg-lg4573.o
6obj-$(CONFIG_DRM_PANEL_ORISETECH_OTM8009A) += panel-orisetech-otm8009a.o 6obj-$(CONFIG_DRM_PANEL_ORISETECH_OTM8009A) += panel-orisetech-otm8009a.o
7obj-$(CONFIG_DRM_PANEL_PANASONIC_VVX10F034N00) += panel-panasonic-vvx10f034n00.o 7obj-$(CONFIG_DRM_PANEL_PANASONIC_VVX10F034N00) += panel-panasonic-vvx10f034n00.o
8obj-$(CONFIG_DRM_PANEL_RASPBERRYPI_TOUCHSCREEN) += panel-raspberrypi-touchscreen.o
8obj-$(CONFIG_DRM_PANEL_SAMSUNG_LD9040) += panel-samsung-ld9040.o 9obj-$(CONFIG_DRM_PANEL_SAMSUNG_LD9040) += panel-samsung-ld9040.o
9obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E3HA2) += panel-samsung-s6e3ha2.o 10obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E3HA2) += panel-samsung-s6e3ha2.o
10obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E63J0X03) += panel-samsung-s6e63j0x03.o 11obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E63J0X03) += panel-samsung-s6e63j0x03.o
diff --git a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
new file mode 100644
index 000000000000..890fd6ff397c
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
@@ -0,0 +1,514 @@
1/*
2 * Copyright © 2016-2017 Broadcom
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * Portions of this file (derived from panel-simple.c) are:
9 *
10 * Copyright (C) 2013, NVIDIA Corporation. All rights reserved.
11 *
12 * Permission is hereby granted, free of charge, to any person obtaining a
13 * copy of this software and associated documentation files (the "Software"),
14 * to deal in the Software without restriction, including without limitation
15 * the rights to use, copy, modify, merge, publish, distribute, sub license,
16 * and/or sell copies of the Software, and to permit persons to whom the
17 * Software is furnished to do so, subject to the following conditions:
18 *
19 * The above copyright notice and this permission notice (including the
20 * next paragraph) shall be included in all copies or substantial portions
21 * of the Software.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
26 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
28 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
29 * DEALINGS IN THE SOFTWARE.
30 */
31
32/**
33 * Raspberry Pi 7" touchscreen panel driver.
34 *
35 * The 7" touchscreen consists of a DPI LCD panel, a Toshiba
36 * TC358762XBG DSI-DPI bridge, and an I2C-connected Atmel ATTINY88-MUR
37 * controlling power management, the LCD PWM, and initial register
38 * setup of the Tohsiba.
39 *
40 * This driver controls the TC358762 and ATTINY88, presenting a DSI
41 * device with a drm_panel.
42 */
43
44#include <linux/delay.h>
45#include <linux/err.h>
46#include <linux/fb.h>
47#include <linux/gpio.h>
48#include <linux/gpio/consumer.h>
49#include <linux/i2c.h>
50#include <linux/module.h>
51#include <linux/of.h>
52#include <linux/of_device.h>
53#include <linux/of_graph.h>
54#include <linux/pm.h>
55
56#include <drm/drm_panel.h>
57#include <drm/drmP.h>
58#include <drm/drm_crtc.h>
59#include <drm/drm_mipi_dsi.h>
60#include <drm/drm_panel.h>
61
62#define RPI_DSI_DRIVER_NAME "rpi-ts-dsi"
63
64/* I2C registers of the Atmel microcontroller. */
65enum REG_ADDR {
66 REG_ID = 0x80,
67 REG_PORTA, /* BIT(2) for horizontal flip, BIT(3) for vertical flip */
68 REG_PORTB,
69 REG_PORTC,
70 REG_PORTD,
71 REG_POWERON,
72 REG_PWM,
73 REG_DDRA,
74 REG_DDRB,
75 REG_DDRC,
76 REG_DDRD,
77 REG_TEST,
78 REG_WR_ADDRL,
79 REG_WR_ADDRH,
80 REG_READH,
81 REG_READL,
82 REG_WRITEH,
83 REG_WRITEL,
84 REG_ID2,
85};
86
87/* DSI D-PHY Layer Registers */
88#define D0W_DPHYCONTTX 0x0004
89#define CLW_DPHYCONTRX 0x0020
90#define D0W_DPHYCONTRX 0x0024
91#define D1W_DPHYCONTRX 0x0028
92#define COM_DPHYCONTRX 0x0038
93#define CLW_CNTRL 0x0040
94#define D0W_CNTRL 0x0044
95#define D1W_CNTRL 0x0048
96#define DFTMODE_CNTRL 0x0054
97
98/* DSI PPI Layer Registers */
99#define PPI_STARTPPI 0x0104
100#define PPI_BUSYPPI 0x0108
101#define PPI_LINEINITCNT 0x0110
102#define PPI_LPTXTIMECNT 0x0114
103#define PPI_CLS_ATMR 0x0140
104#define PPI_D0S_ATMR 0x0144
105#define PPI_D1S_ATMR 0x0148
106#define PPI_D0S_CLRSIPOCOUNT 0x0164
107#define PPI_D1S_CLRSIPOCOUNT 0x0168
108#define CLS_PRE 0x0180
109#define D0S_PRE 0x0184
110#define D1S_PRE 0x0188
111#define CLS_PREP 0x01A0
112#define D0S_PREP 0x01A4
113#define D1S_PREP 0x01A8
114#define CLS_ZERO 0x01C0
115#define D0S_ZERO 0x01C4
116#define D1S_ZERO 0x01C8
117#define PPI_CLRFLG 0x01E0
118#define PPI_CLRSIPO 0x01E4
119#define HSTIMEOUT 0x01F0
120#define HSTIMEOUTENABLE 0x01F4
121
122/* DSI Protocol Layer Registers */
123#define DSI_STARTDSI 0x0204
124#define DSI_BUSYDSI 0x0208
125#define DSI_LANEENABLE 0x0210
126# define DSI_LANEENABLE_CLOCK BIT(0)
127# define DSI_LANEENABLE_D0 BIT(1)
128# define DSI_LANEENABLE_D1 BIT(2)
129
130#define DSI_LANESTATUS0 0x0214
131#define DSI_LANESTATUS1 0x0218
132#define DSI_INTSTATUS 0x0220
133#define DSI_INTMASK 0x0224
134#define DSI_INTCLR 0x0228
135#define DSI_LPTXTO 0x0230
136#define DSI_MODE 0x0260
137#define DSI_PAYLOAD0 0x0268
138#define DSI_PAYLOAD1 0x026C
139#define DSI_SHORTPKTDAT 0x0270
140#define DSI_SHORTPKTREQ 0x0274
141#define DSI_BTASTA 0x0278
142#define DSI_BTACLR 0x027C
143
144/* DSI General Registers */
145#define DSIERRCNT 0x0300
146#define DSISIGMOD 0x0304
147
148/* DSI Application Layer Registers */
149#define APLCTRL 0x0400
150#define APLSTAT 0x0404
151#define APLERR 0x0408
152#define PWRMOD 0x040C
153#define RDPKTLN 0x0410
154#define PXLFMT 0x0414
155#define MEMWRCMD 0x0418
156
157/* LCDC/DPI Host Registers */
158#define LCDCTRL 0x0420
159#define HSR 0x0424
160#define HDISPR 0x0428
161#define VSR 0x042C
162#define VDISPR 0x0430
163#define VFUEN 0x0434
164
165/* DBI-B Host Registers */
166#define DBIBCTRL 0x0440
167
168/* SPI Master Registers */
169#define SPICMR 0x0450
170#define SPITCR 0x0454
171
172/* System Controller Registers */
173#define SYSSTAT 0x0460
174#define SYSCTRL 0x0464
175#define SYSPLL1 0x0468
176#define SYSPLL2 0x046C
177#define SYSPLL3 0x0470
178#define SYSPMCTRL 0x047C
179
180/* GPIO Registers */
181#define GPIOC 0x0480
182#define GPIOO 0x0484
183#define GPIOI 0x0488
184
185/* I2C Registers */
186#define I2CCLKCTRL 0x0490
187
188/* Chip/Rev Registers */
189#define IDREG 0x04A0
190
191/* Debug Registers */
192#define WCMDQUEUE 0x0500
193#define RCMDQUEUE 0x0504
194
195struct rpi_touchscreen {
196 struct drm_panel base;
197 struct mipi_dsi_device *dsi;
198 struct i2c_client *i2c;
199};
200
201static const struct drm_display_mode rpi_touchscreen_modes[] = {
202 {
203 /* Modeline comes from the Raspberry Pi firmware, with HFP=1
204 * plugged in and clock re-computed from that.
205 */
206 .clock = 25979400 / 1000,
207 .hdisplay = 800,
208 .hsync_start = 800 + 1,
209 .hsync_end = 800 + 1 + 2,
210 .htotal = 800 + 1 + 2 + 46,
211 .vdisplay = 480,
212 .vsync_start = 480 + 7,
213 .vsync_end = 480 + 7 + 2,
214 .vtotal = 480 + 7 + 2 + 21,
215 .vrefresh = 60,
216 },
217};
218
219static struct rpi_touchscreen *panel_to_ts(struct drm_panel *panel)
220{
221 return container_of(panel, struct rpi_touchscreen, base);
222}
223
224static u8 rpi_touchscreen_i2c_read(struct rpi_touchscreen *ts, u8 reg)
225{
226 return i2c_smbus_read_byte_data(ts->i2c, reg);
227}
228
229static void rpi_touchscreen_i2c_write(struct rpi_touchscreen *ts,
230 u8 reg, u8 val)
231{
232 int ret;
233
234 ret = i2c_smbus_write_byte_data(ts->i2c, reg, val);
235 if (ret)
236 dev_err(&ts->dsi->dev, "I2C write failed: %d\n", ret);
237}
238
239static int rpi_touchscreen_write(struct rpi_touchscreen *ts, u16 reg, u32 val)
240{
241#if 0
242 /* The firmware uses LP DSI transactions like this to bring up
243 * the hardware, which should be faster than using I2C to then
244 * pass to the Toshiba. However, I was unable to get it to
245 * work.
246 */
247 u8 msg[] = {
248 reg,
249 reg >> 8,
250 val,
251 val >> 8,
252 val >> 16,
253 val >> 24,
254 };
255
256 mipi_dsi_dcs_write_buffer(ts->dsi, msg, sizeof(msg));
257#else
258 rpi_touchscreen_i2c_write(ts, REG_WR_ADDRH, reg >> 8);
259 rpi_touchscreen_i2c_write(ts, REG_WR_ADDRL, reg);
260 rpi_touchscreen_i2c_write(ts, REG_WRITEH, val >> 8);
261 rpi_touchscreen_i2c_write(ts, REG_WRITEL, val);
262#endif
263
264 return 0;
265}
266
267static int rpi_touchscreen_disable(struct drm_panel *panel)
268{
269 struct rpi_touchscreen *ts = panel_to_ts(panel);
270
271 rpi_touchscreen_i2c_write(ts, REG_PWM, 0);
272
273 rpi_touchscreen_i2c_write(ts, REG_POWERON, 0);
274 udelay(1);
275
276 return 0;
277}
278
279static int rpi_touchscreen_noop(struct drm_panel *panel)
280{
281 return 0;
282}
283
284static int rpi_touchscreen_enable(struct drm_panel *panel)
285{
286 struct rpi_touchscreen *ts = panel_to_ts(panel);
287 int i;
288
289 rpi_touchscreen_i2c_write(ts, REG_POWERON, 1);
290 /* Wait for nPWRDWN to go low to indicate poweron is done. */
291 for (i = 0; i < 100; i++) {
292 if (rpi_touchscreen_i2c_read(ts, REG_PORTB) & 1)
293 break;
294 }
295
296 rpi_touchscreen_write(ts, DSI_LANEENABLE,
297 DSI_LANEENABLE_CLOCK |
298 DSI_LANEENABLE_D0);
299 rpi_touchscreen_write(ts, PPI_D0S_CLRSIPOCOUNT, 0x05);
300 rpi_touchscreen_write(ts, PPI_D1S_CLRSIPOCOUNT, 0x05);
301 rpi_touchscreen_write(ts, PPI_D0S_ATMR, 0x00);
302 rpi_touchscreen_write(ts, PPI_D1S_ATMR, 0x00);
303 rpi_touchscreen_write(ts, PPI_LPTXTIMECNT, 0x03);
304
305 rpi_touchscreen_write(ts, SPICMR, 0x00);
306 rpi_touchscreen_write(ts, LCDCTRL, 0x00100150);
307 rpi_touchscreen_write(ts, SYSCTRL, 0x040f);
308 msleep(100);
309
310 rpi_touchscreen_write(ts, PPI_STARTPPI, 0x01);
311 rpi_touchscreen_write(ts, DSI_STARTDSI, 0x01);
312 msleep(100);
313
314 /* Turn on the backlight. */
315 rpi_touchscreen_i2c_write(ts, REG_PWM, 255);
316
317 /* Default to the same orientation as the closed source
318 * firmware used for the panel. Runtime rotation
319 * configuration will be supported using VC4's plane
320 * orientation bits.
321 */
322 rpi_touchscreen_i2c_write(ts, REG_PORTA, BIT(2));
323
324 return 0;
325}
326
327static int rpi_touchscreen_get_modes(struct drm_panel *panel)
328{
329 struct drm_connector *connector = panel->connector;
330 struct drm_device *drm = panel->drm;
331 unsigned int i, num = 0;
332 static const u32 bus_format = MEDIA_BUS_FMT_RGB888_1X24;
333
334 for (i = 0; i < ARRAY_SIZE(rpi_touchscreen_modes); i++) {
335 const struct drm_display_mode *m = &rpi_touchscreen_modes[i];
336 struct drm_display_mode *mode;
337
338 mode = drm_mode_duplicate(drm, m);
339 if (!mode) {
340 dev_err(drm->dev, "failed to add mode %ux%u@%u\n",
341 m->hdisplay, m->vdisplay, m->vrefresh);
342 continue;
343 }
344
345 mode->type |= DRM_MODE_TYPE_DRIVER;
346
347 if (i == 0)
348 mode->type |= DRM_MODE_TYPE_PREFERRED;
349
350 drm_mode_set_name(mode);
351
352 drm_mode_probed_add(connector, mode);
353 num++;
354 }
355
356 connector->display_info.bpc = 8;
357 connector->display_info.width_mm = 154;
358 connector->display_info.height_mm = 86;
359 drm_display_info_set_bus_formats(&connector->display_info,
360 &bus_format, 1);
361
362 return num;
363}
364
365static const struct drm_panel_funcs rpi_touchscreen_funcs = {
366 .disable = rpi_touchscreen_disable,
367 .unprepare = rpi_touchscreen_noop,
368 .prepare = rpi_touchscreen_noop,
369 .enable = rpi_touchscreen_enable,
370 .get_modes = rpi_touchscreen_get_modes,
371};
372
373static int rpi_touchscreen_probe(struct i2c_client *i2c,
374 const struct i2c_device_id *id)
375{
376 struct device *dev = &i2c->dev;
377 struct rpi_touchscreen *ts;
378 struct device_node *endpoint, *dsi_host_node;
379 struct mipi_dsi_host *host;
380 int ret, ver;
381 struct mipi_dsi_device_info info = {
382 .type = RPI_DSI_DRIVER_NAME,
383 .channel = 0,
384 .node = NULL,
385 };
386
387 ts = devm_kzalloc(dev, sizeof(*ts), GFP_KERNEL);
388 if (!ts)
389 return -ENOMEM;
390
391 i2c_set_clientdata(i2c, ts);
392
393 ts->i2c = i2c;
394
395 ver = rpi_touchscreen_i2c_read(ts, REG_ID);
396 if (ver < 0) {
397 dev_err(dev, "Atmel I2C read failed: %d\n", ver);
398 return -ENODEV;
399 }
400
401 switch (ver) {
402 case 0xde: /* ver 1 */
403 case 0xc3: /* ver 2 */
404 break;
405 default:
406 dev_err(dev, "Unknown Atmel firmware revision: 0x%02x\n", ver);
407 return -ENODEV;
408 }
409
410 /* Turn off at boot, so we can cleanly sequence powering on. */
411 rpi_touchscreen_i2c_write(ts, REG_POWERON, 0);
412
413 /* Look up the DSI host. It needs to probe before we do. */
414 endpoint = of_graph_get_next_endpoint(dev->of_node, NULL);
415 dsi_host_node = of_graph_get_remote_port_parent(endpoint);
416 host = of_find_mipi_dsi_host_by_node(dsi_host_node);
417 of_node_put(dsi_host_node);
418 if (!host) {
419 of_node_put(endpoint);
420 return -EPROBE_DEFER;
421 }
422
423 info.node = of_graph_get_remote_port(endpoint);
424 of_node_put(endpoint);
425
426 ts->dsi = mipi_dsi_device_register_full(host, &info);
427 if (IS_ERR(ts->dsi)) {
428 dev_err(dev, "DSI device registration failed: %ld\n",
429 PTR_ERR(ts->dsi));
430 return PTR_ERR(ts->dsi);
431 }
432
433 ts->base.dev = dev;
434 ts->base.funcs = &rpi_touchscreen_funcs;
435
436 /* This appears last, as it's what will unblock the DSI host
437 * driver's component bind function.
438 */
439 ret = drm_panel_add(&ts->base);
440 if (ret)
441 return ret;
442
443 return 0;
444}
445
446static int rpi_touchscreen_remove(struct i2c_client *i2c)
447{
448 struct rpi_touchscreen *ts = i2c_get_clientdata(i2c);
449
450 mipi_dsi_detach(ts->dsi);
451
452 drm_panel_remove(&ts->base);
453
454 mipi_dsi_device_unregister(ts->dsi);
455 kfree(ts->dsi);
456
457 return 0;
458}
459
460static int rpi_touchscreen_dsi_probe(struct mipi_dsi_device *dsi)
461{
462 int ret;
463
464 dsi->mode_flags = (MIPI_DSI_MODE_VIDEO |
465 MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
466 MIPI_DSI_MODE_LPM);
467 dsi->format = MIPI_DSI_FMT_RGB888;
468 dsi->lanes = 1;
469
470 ret = mipi_dsi_attach(dsi);
471
472 if (ret)
473 dev_err(&dsi->dev, "failed to attach dsi to host: %d\n", ret);
474
475 return ret;
476}
477
478static struct mipi_dsi_driver rpi_touchscreen_dsi_driver = {
479 .driver.name = RPI_DSI_DRIVER_NAME,
480 .probe = rpi_touchscreen_dsi_probe,
481};
482
483static const struct of_device_id rpi_touchscreen_of_ids[] = {
484 { .compatible = "raspberrypi,7inch-touchscreen-panel" },
485 { } /* sentinel */
486};
487MODULE_DEVICE_TABLE(of, rpi_touchscreen_of_ids);
488
489static struct i2c_driver rpi_touchscreen_driver = {
490 .driver = {
491 .name = "rpi_touchscreen",
492 .of_match_table = rpi_touchscreen_of_ids,
493 },
494 .probe = rpi_touchscreen_probe,
495 .remove = rpi_touchscreen_remove,
496};
497
498static int __init rpi_touchscreen_init(void)
499{
500 mipi_dsi_driver_register(&rpi_touchscreen_dsi_driver);
501 return i2c_add_driver(&rpi_touchscreen_driver);
502}
503module_init(rpi_touchscreen_init);
504
505static void __exit rpi_touchscreen_exit(void)
506{
507 i2c_del_driver(&rpi_touchscreen_driver);
508 mipi_dsi_driver_unregister(&rpi_touchscreen_dsi_driver);
509}
510module_exit(rpi_touchscreen_exit);
511
512MODULE_AUTHOR("Eric Anholt <eric@anholt.net>");
513MODULE_DESCRIPTION("Raspberry Pi 7-inch touchscreen driver");
514MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index c31e660e35db..7d39ed63e5be 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -1456,7 +1456,7 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
1456 header = radeon_get_ib_value(p, h_idx); 1456 header = radeon_get_ib_value(p, h_idx);
1457 crtc_id = radeon_get_ib_value(p, h_idx + 5); 1457 crtc_id = radeon_get_ib_value(p, h_idx + 5);
1458 reg = R100_CP_PACKET0_GET_REG(header); 1458 reg = R100_CP_PACKET0_GET_REG(header);
1459 crtc = drm_crtc_find(p->rdev->ddev, crtc_id); 1459 crtc = drm_crtc_find(p->rdev->ddev, p->filp, crtc_id);
1460 if (!crtc) { 1460 if (!crtc) {
1461 DRM_ERROR("cannot find crtc %d\n", crtc_id); 1461 DRM_ERROR("cannot find crtc %d\n", crtc_id);
1462 return -ENOENT; 1462 return -ENOENT;
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index 97fd58e97043..c96b31950ca7 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -887,7 +887,7 @@ int r600_cs_common_vline_parse(struct radeon_cs_parser *p,
887 crtc_id = radeon_get_ib_value(p, h_idx + 2 + 7 + 1); 887 crtc_id = radeon_get_ib_value(p, h_idx + 2 + 7 + 1);
888 reg = R600_CP_PACKET0_GET_REG(header); 888 reg = R600_CP_PACKET0_GET_REG(header);
889 889
890 crtc = drm_crtc_find(p->rdev->ddev, crtc_id); 890 crtc = drm_crtc_find(p->rdev->ddev, p->filp, crtc_id);
891 if (!crtc) { 891 if (!crtc) {
892 DRM_ERROR("cannot find crtc %d\n", crtc_id); 892 DRM_ERROR("cannot find crtc %d\n", crtc_id);
893 return -ENOENT; 893 return -ENOENT;
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 2f642cbefd8e..59dcefb2df3b 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -263,7 +263,7 @@ radeon_connector_update_scratch_regs(struct drm_connector *connector, enum drm_c
263 if (connector->encoder_ids[i] == 0) 263 if (connector->encoder_ids[i] == 0)
264 break; 264 break;
265 265
266 encoder = drm_encoder_find(connector->dev, 266 encoder = drm_encoder_find(connector->dev, NULL,
267 connector->encoder_ids[i]); 267 connector->encoder_ids[i]);
268 if (!encoder) 268 if (!encoder)
269 continue; 269 continue;
@@ -290,7 +290,7 @@ static struct drm_encoder *radeon_find_encoder(struct drm_connector *connector,
290 if (connector->encoder_ids[i] == 0) 290 if (connector->encoder_ids[i] == 0)
291 break; 291 break;
292 292
293 encoder = drm_encoder_find(connector->dev, connector->encoder_ids[i]); 293 encoder = drm_encoder_find(connector->dev, NULL, connector->encoder_ids[i]);
294 if (!encoder) 294 if (!encoder)
295 continue; 295 continue;
296 296
@@ -404,7 +404,7 @@ static struct drm_encoder *radeon_best_single_encoder(struct drm_connector *conn
404 int enc_id = connector->encoder_ids[0]; 404 int enc_id = connector->encoder_ids[0];
405 /* pick the encoder ids */ 405 /* pick the encoder ids */
406 if (enc_id) 406 if (enc_id)
407 return drm_encoder_find(connector->dev, enc_id); 407 return drm_encoder_find(connector->dev, NULL, enc_id);
408 return NULL; 408 return NULL;
409} 409}
410 410
@@ -1368,7 +1368,7 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
1368 if (connector->encoder_ids[i] == 0) 1368 if (connector->encoder_ids[i] == 0)
1369 break; 1369 break;
1370 1370
1371 encoder = drm_encoder_find(connector->dev, 1371 encoder = drm_encoder_find(connector->dev, NULL,
1372 connector->encoder_ids[i]); 1372 connector->encoder_ids[i]);
1373 if (!encoder) 1373 if (!encoder)
1374 continue; 1374 continue;
@@ -1454,7 +1454,7 @@ static struct drm_encoder *radeon_dvi_encoder(struct drm_connector *connector)
1454 if (connector->encoder_ids[i] == 0) 1454 if (connector->encoder_ids[i] == 0)
1455 break; 1455 break;
1456 1456
1457 encoder = drm_encoder_find(connector->dev, connector->encoder_ids[i]); 1457 encoder = drm_encoder_find(connector->dev, NULL, connector->encoder_ids[i]);
1458 if (!encoder) 1458 if (!encoder)
1459 continue; 1459 continue;
1460 1460
@@ -1473,7 +1473,7 @@ static struct drm_encoder *radeon_dvi_encoder(struct drm_connector *connector)
1473 /* then check use digitial */ 1473 /* then check use digitial */
1474 /* pick the first one */ 1474 /* pick the first one */
1475 if (enc_id) 1475 if (enc_id)
1476 return drm_encoder_find(connector->dev, enc_id); 1476 return drm_encoder_find(connector->dev, NULL, enc_id);
1477 return NULL; 1477 return NULL;
1478} 1478}
1479 1479
@@ -1620,7 +1620,7 @@ u16 radeon_connector_encoder_get_dp_bridge_encoder_id(struct drm_connector *conn
1620 if (connector->encoder_ids[i] == 0) 1620 if (connector->encoder_ids[i] == 0)
1621 break; 1621 break;
1622 1622
1623 encoder = drm_encoder_find(connector->dev, connector->encoder_ids[i]); 1623 encoder = drm_encoder_find(connector->dev, NULL, connector->encoder_ids[i]);
1624 if (!encoder) 1624 if (!encoder)
1625 continue; 1625 continue;
1626 1626
@@ -1649,7 +1649,7 @@ static bool radeon_connector_encoder_is_hbr2(struct drm_connector *connector)
1649 if (connector->encoder_ids[i] == 0) 1649 if (connector->encoder_ids[i] == 0)
1650 break; 1650 break;
1651 1651
1652 encoder = drm_encoder_find(connector->dev, connector->encoder_ids[i]); 1652 encoder = drm_encoder_find(connector->dev, NULL, connector->encoder_ids[i]);
1653 if (!encoder) 1653 if (!encoder)
1654 continue; 1654 continue;
1655 1655
diff --git a/drivers/gpu/drm/rockchip/Kconfig b/drivers/gpu/drm/rockchip/Kconfig
index 0c31f0a27b9c..3c70c6224bd2 100644
--- a/drivers/gpu/drm/rockchip/Kconfig
+++ b/drivers/gpu/drm/rockchip/Kconfig
@@ -60,6 +60,7 @@ config ROCKCHIP_INNO_HDMI
60config ROCKCHIP_LVDS 60config ROCKCHIP_LVDS
61 bool "Rockchip LVDS support" 61 bool "Rockchip LVDS support"
62 depends on DRM_ROCKCHIP 62 depends on DRM_ROCKCHIP
63 depends on PINCTRL
63 help 64 help
64 Choose this option to enable support for Rockchip LVDS controllers. 65 Choose this option to enable support for Rockchip LVDS controllers.
65 Rockchip rk3288 SoC has LVDS TX Controller can be used, and it 66 Rockchip rk3288 SoC has LVDS TX Controller can be used, and it
diff --git a/drivers/gpu/drm/stm/ltdc.c b/drivers/gpu/drm/stm/ltdc.c
index d394a03632c4..735c9081202a 100644
--- a/drivers/gpu/drm/stm/ltdc.c
+++ b/drivers/gpu/drm/stm/ltdc.c
@@ -791,9 +791,8 @@ static const struct drm_encoder_funcs ltdc_encoder_funcs = {
791 .destroy = drm_encoder_cleanup, 791 .destroy = drm_encoder_cleanup,
792}; 792};
793 793
794static int ltdc_encoder_init(struct drm_device *ddev) 794static int ltdc_encoder_init(struct drm_device *ddev, struct drm_bridge *bridge)
795{ 795{
796 struct ltdc_device *ldev = ddev->dev_private;
797 struct drm_encoder *encoder; 796 struct drm_encoder *encoder;
798 int ret; 797 int ret;
799 798
@@ -807,7 +806,7 @@ static int ltdc_encoder_init(struct drm_device *ddev)
807 drm_encoder_init(ddev, encoder, &ltdc_encoder_funcs, 806 drm_encoder_init(ddev, encoder, &ltdc_encoder_funcs,
808 DRM_MODE_ENCODER_DPI, NULL); 807 DRM_MODE_ENCODER_DPI, NULL);
809 808
810 ret = drm_bridge_attach(encoder, ldev->bridge, NULL); 809 ret = drm_bridge_attach(encoder, bridge, NULL);
811 if (ret) { 810 if (ret) {
812 drm_encoder_cleanup(encoder); 811 drm_encoder_cleanup(encoder);
813 return -EINVAL; 812 return -EINVAL;
@@ -936,12 +935,9 @@ int ltdc_load(struct drm_device *ddev)
936 ret = PTR_ERR(bridge); 935 ret = PTR_ERR(bridge);
937 goto err; 936 goto err;
938 } 937 }
939 ldev->is_panel_bridge = true;
940 } 938 }
941 939
942 ldev->bridge = bridge; 940 ret = ltdc_encoder_init(ddev, bridge);
943
944 ret = ltdc_encoder_init(ddev);
945 if (ret) { 941 if (ret) {
946 DRM_ERROR("Failed to init encoder\n"); 942 DRM_ERROR("Failed to init encoder\n");
947 goto err; 943 goto err;
@@ -972,8 +968,7 @@ int ltdc_load(struct drm_device *ddev)
972 return 0; 968 return 0;
973 969
974err: 970err:
975 if (ldev->is_panel_bridge) 971 drm_panel_bridge_remove(bridge);
976 drm_panel_bridge_remove(bridge);
977 972
978 clk_disable_unprepare(ldev->pixel_clk); 973 clk_disable_unprepare(ldev->pixel_clk);
979 974
@@ -986,8 +981,7 @@ void ltdc_unload(struct drm_device *ddev)
986 981
987 DRM_DEBUG_DRIVER("\n"); 982 DRM_DEBUG_DRIVER("\n");
988 983
989 if (ldev->is_panel_bridge) 984 drm_of_panel_bridge_remove(ddev->dev->of_node, 0, 0);
990 drm_panel_bridge_remove(ldev->bridge);
991 985
992 clk_disable_unprepare(ldev->pixel_clk); 986 clk_disable_unprepare(ldev->pixel_clk);
993} 987}
diff --git a/drivers/gpu/drm/stm/ltdc.h b/drivers/gpu/drm/stm/ltdc.h
index bc6d6f6419a9..ae437557d715 100644
--- a/drivers/gpu/drm/stm/ltdc.h
+++ b/drivers/gpu/drm/stm/ltdc.h
@@ -24,8 +24,6 @@ struct ltdc_device {
24 struct drm_fbdev_cma *fbdev; 24 struct drm_fbdev_cma *fbdev;
25 void __iomem *regs; 25 void __iomem *regs;
26 struct clk *pixel_clk; /* lcd pixel clock */ 26 struct clk *pixel_clk; /* lcd pixel clock */
27 struct drm_bridge *bridge;
28 bool is_panel_bridge;
29 struct mutex err_lock; /* protecting error_status */ 27 struct mutex err_lock; /* protecting error_status */
30 struct ltdc_caps caps; 28 struct ltdc_caps caps;
31 u32 error_status; 29 u32 error_status;
diff --git a/drivers/gpu/drm/sun4i/sun4i_backend.c b/drivers/gpu/drm/sun4i/sun4i_backend.c
index ec5943627aa5..4fefd8add714 100644
--- a/drivers/gpu/drm/sun4i/sun4i_backend.c
+++ b/drivers/gpu/drm/sun4i/sun4i_backend.c
@@ -209,22 +209,11 @@ int sun4i_backend_update_layer_buffer(struct sun4i_backend *backend,
209{ 209{
210 struct drm_plane_state *state = plane->state; 210 struct drm_plane_state *state = plane->state;
211 struct drm_framebuffer *fb = state->fb; 211 struct drm_framebuffer *fb = state->fb;
212 struct drm_gem_cma_object *gem;
213 u32 lo_paddr, hi_paddr; 212 u32 lo_paddr, hi_paddr;
214 dma_addr_t paddr; 213 dma_addr_t paddr;
215 int bpp;
216
217 /* Get the physical address of the buffer in memory */
218 gem = drm_fb_cma_get_gem_obj(fb, 0);
219
220 DRM_DEBUG_DRIVER("Using GEM @ %pad\n", &gem->paddr);
221
222 /* Compute the start of the displayed memory */
223 bpp = fb->format->cpp[0];
224 paddr = gem->paddr + fb->offsets[0];
225 paddr += (state->src_x >> 16) * bpp;
226 paddr += (state->src_y >> 16) * fb->pitches[0];
227 214
215 /* Get the start of the displayed memory */
216 paddr = drm_fb_cma_get_gem_addr(fb, state, 0);
228 DRM_DEBUG_DRIVER("Setting buffer address to %pad\n", &paddr); 217 DRM_DEBUG_DRIVER("Setting buffer address to %pad\n", &paddr);
229 218
230 /* Write the 32 lower bits of the address (in bits) */ 219 /* Write the 32 lower bits of the address (in bits) */
@@ -369,13 +358,6 @@ static int sun4i_backend_bind(struct device *dev, struct device *master,
369 if (IS_ERR(regs)) 358 if (IS_ERR(regs))
370 return PTR_ERR(regs); 359 return PTR_ERR(regs);
371 360
372 backend->engine.regs = devm_regmap_init_mmio(dev, regs,
373 &sun4i_backend_regmap_config);
374 if (IS_ERR(backend->engine.regs)) {
375 dev_err(dev, "Couldn't create the backend regmap\n");
376 return PTR_ERR(backend->engine.regs);
377 }
378
379 backend->reset = devm_reset_control_get(dev, NULL); 361 backend->reset = devm_reset_control_get(dev, NULL);
380 if (IS_ERR(backend->reset)) { 362 if (IS_ERR(backend->reset)) {
381 dev_err(dev, "Couldn't get our reset line\n"); 363 dev_err(dev, "Couldn't get our reset line\n");
@@ -421,9 +403,23 @@ static int sun4i_backend_bind(struct device *dev, struct device *master,
421 } 403 }
422 } 404 }
423 405
406 backend->engine.regs = devm_regmap_init_mmio(dev, regs,
407 &sun4i_backend_regmap_config);
408 if (IS_ERR(backend->engine.regs)) {
409 dev_err(dev, "Couldn't create the backend regmap\n");
410 return PTR_ERR(backend->engine.regs);
411 }
412
424 list_add_tail(&backend->engine.list, &drv->engine_list); 413 list_add_tail(&backend->engine.list, &drv->engine_list);
425 414
426 /* Reset the registers */ 415 /*
416 * Many of the backend's layer configuration registers have
417 * undefined default values. This poses a risk as we use
418 * regmap_update_bits in some places, and don't overwrite
419 * the whole register.
420 *
421 * Clear the registers here to have something predictable.
422 */
427 for (i = 0x800; i < 0x1000; i += 4) 423 for (i = 0x800; i < 0x1000; i += 4)
428 regmap_write(backend->engine.regs, i, 0); 424 regmap_write(backend->engine.regs, i, 0);
429 425
diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c
index a2012638d5f7..b5879d4620d8 100644
--- a/drivers/gpu/drm/sun4i/sun4i_drv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_drv.c
@@ -226,6 +226,18 @@ struct endpoint_list {
226 struct list_head list; 226 struct list_head list;
227}; 227};
228 228
229static bool node_is_in_list(struct list_head *endpoints,
230 struct device_node *node)
231{
232 struct endpoint_list *endpoint;
233
234 list_for_each_entry(endpoint, endpoints, list)
235 if (endpoint->node == node)
236 return true;
237
238 return false;
239}
240
229static int sun4i_drv_add_endpoints(struct device *dev, 241static int sun4i_drv_add_endpoints(struct device *dev,
230 struct list_head *endpoints, 242 struct list_head *endpoints,
231 struct component_match **match, 243 struct component_match **match,
@@ -292,6 +304,10 @@ static int sun4i_drv_add_endpoints(struct device *dev,
292 } 304 }
293 } 305 }
294 306
307 /* skip downstream node if it is already in the queue */
308 if (node_is_in_list(endpoints, remote))
309 continue;
310
295 /* Add downstream nodes to the queue */ 311 /* Add downstream nodes to the queue */
296 endpoint = kzalloc(sizeof(*endpoint), GFP_KERNEL); 312 endpoint = kzalloc(sizeof(*endpoint), GFP_KERNEL);
297 if (!endpoint) { 313 if (!endpoint) {
diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi.h b/drivers/gpu/drm/sun4i/sun4i_hdmi.h
index a1f8cba251a2..b685ee11623d 100644
--- a/drivers/gpu/drm/sun4i/sun4i_hdmi.h
+++ b/drivers/gpu/drm/sun4i/sun4i_hdmi.h
@@ -14,6 +14,7 @@
14 14
15#include <drm/drm_connector.h> 15#include <drm/drm_connector.h>
16#include <drm/drm_encoder.h> 16#include <drm/drm_encoder.h>
17#include <linux/regmap.h>
17 18
18#include <media/cec-pin.h> 19#include <media/cec-pin.h>
19 20
@@ -58,16 +59,24 @@
58#define SUN4I_HDMI_PAD_CTRL0_TXEN BIT(23) 59#define SUN4I_HDMI_PAD_CTRL0_TXEN BIT(23)
59 60
60#define SUN4I_HDMI_PAD_CTRL1_REG 0x204 61#define SUN4I_HDMI_PAD_CTRL1_REG 0x204
62#define SUN4I_HDMI_PAD_CTRL1_UNKNOWN BIT(24) /* set on A31 */
61#define SUN4I_HDMI_PAD_CTRL1_AMP_OPT BIT(23) 63#define SUN4I_HDMI_PAD_CTRL1_AMP_OPT BIT(23)
62#define SUN4I_HDMI_PAD_CTRL1_AMPCK_OPT BIT(22) 64#define SUN4I_HDMI_PAD_CTRL1_AMPCK_OPT BIT(22)
63#define SUN4I_HDMI_PAD_CTRL1_EMP_OPT BIT(20) 65#define SUN4I_HDMI_PAD_CTRL1_EMP_OPT BIT(20)
64#define SUN4I_HDMI_PAD_CTRL1_EMPCK_OPT BIT(19) 66#define SUN4I_HDMI_PAD_CTRL1_EMPCK_OPT BIT(19)
67#define SUN4I_HDMI_PAD_CTRL1_PWSCK BIT(18)
68#define SUN4I_HDMI_PAD_CTRL1_PWSDT BIT(17)
65#define SUN4I_HDMI_PAD_CTRL1_REG_DEN BIT(15) 69#define SUN4I_HDMI_PAD_CTRL1_REG_DEN BIT(15)
66#define SUN4I_HDMI_PAD_CTRL1_REG_DENCK BIT(14) 70#define SUN4I_HDMI_PAD_CTRL1_REG_DENCK BIT(14)
67#define SUN4I_HDMI_PAD_CTRL1_REG_EMP(n) (((n) & 7) << 10) 71#define SUN4I_HDMI_PAD_CTRL1_REG_EMP(n) (((n) & 7) << 10)
68#define SUN4I_HDMI_PAD_CTRL1_HALVE_CLK BIT(6) 72#define SUN4I_HDMI_PAD_CTRL1_HALVE_CLK BIT(6)
69#define SUN4I_HDMI_PAD_CTRL1_REG_AMP(n) (((n) & 7) << 3) 73#define SUN4I_HDMI_PAD_CTRL1_REG_AMP(n) (((n) & 7) << 3)
70 74
75/* These bits seem to invert the TMDS data channels */
76#define SUN4I_HDMI_PAD_CTRL1_INVERT_R BIT(2)
77#define SUN4I_HDMI_PAD_CTRL1_INVERT_G BIT(1)
78#define SUN4I_HDMI_PAD_CTRL1_INVERT_B BIT(0)
79
71#define SUN4I_HDMI_PLL_CTRL_REG 0x208 80#define SUN4I_HDMI_PLL_CTRL_REG 0x208
72#define SUN4I_HDMI_PLL_CTRL_PLL_EN BIT(31) 81#define SUN4I_HDMI_PLL_CTRL_PLL_EN BIT(31)
73#define SUN4I_HDMI_PLL_CTRL_BWS BIT(30) 82#define SUN4I_HDMI_PLL_CTRL_BWS BIT(30)
@@ -152,21 +161,106 @@
152 161
153#define SUN4I_HDMI_DDC_FIFO_SIZE 16 162#define SUN4I_HDMI_DDC_FIFO_SIZE 16
154 163
164/* A31 specific */
165#define SUN6I_HDMI_DDC_CTRL_REG 0x500
166#define SUN6I_HDMI_DDC_CTRL_RESET BIT(31)
167#define SUN6I_HDMI_DDC_CTRL_START_CMD BIT(27)
168#define SUN6I_HDMI_DDC_CTRL_SDA_ENABLE BIT(6)
169#define SUN6I_HDMI_DDC_CTRL_SCL_ENABLE BIT(4)
170#define SUN6I_HDMI_DDC_CTRL_ENABLE BIT(0)
171
172#define SUN6I_HDMI_DDC_CMD_REG 0x508
173#define SUN6I_HDMI_DDC_CMD_BYTE_COUNT(count) ((count) << 16)
174/* command types in lower 3 bits are the same as sun4i */
175
176#define SUN6I_HDMI_DDC_ADDR_REG 0x50c
177#define SUN6I_HDMI_DDC_ADDR_SEGMENT(seg) (((seg) & 0xff) << 24)
178#define SUN6I_HDMI_DDC_ADDR_EDDC(addr) (((addr) & 0xff) << 16)
179#define SUN6I_HDMI_DDC_ADDR_OFFSET(off) (((off) & 0xff) << 8)
180#define SUN6I_HDMI_DDC_ADDR_SLAVE(addr) (((addr) & 0xff) << 1)
181
182#define SUN6I_HDMI_DDC_INT_STATUS_REG 0x514
183#define SUN6I_HDMI_DDC_INT_STATUS_TIMEOUT BIT(8)
184/* lower 8 bits are the same as sun4i */
185
186#define SUN6I_HDMI_DDC_FIFO_CTRL_REG 0x518
187#define SUN6I_HDMI_DDC_FIFO_CTRL_CLEAR BIT(15)
188/* lower 9 bits are the same as sun4i */
189
190#define SUN6I_HDMI_DDC_CLK_REG 0x520
191/* DDC CLK bit fields are the same, but the formula is not */
192
193#define SUN6I_HDMI_DDC_FIFO_DATA_REG 0x580
194
155enum sun4i_hdmi_pkt_type { 195enum sun4i_hdmi_pkt_type {
156 SUN4I_HDMI_PKT_AVI = 2, 196 SUN4I_HDMI_PKT_AVI = 2,
157 SUN4I_HDMI_PKT_END = 15, 197 SUN4I_HDMI_PKT_END = 15,
158}; 198};
159 199
200struct sun4i_hdmi_variant {
201 bool has_ddc_parent_clk;
202 bool has_reset_control;
203
204 u32 pad_ctrl0_init_val;
205 u32 pad_ctrl1_init_val;
206 u32 pll_ctrl_init_val;
207
208 struct reg_field ddc_clk_reg;
209 u8 ddc_clk_pre_divider;
210 u8 ddc_clk_m_offset;
211
212 u8 tmds_clk_div_offset;
213
214 /* Register fields for I2C adapter */
215 struct reg_field field_ddc_en;
216 struct reg_field field_ddc_start;
217 struct reg_field field_ddc_reset;
218 struct reg_field field_ddc_addr_reg;
219 struct reg_field field_ddc_slave_addr;
220 struct reg_field field_ddc_int_mask;
221 struct reg_field field_ddc_int_status;
222 struct reg_field field_ddc_fifo_clear;
223 struct reg_field field_ddc_fifo_rx_thres;
224 struct reg_field field_ddc_fifo_tx_thres;
225 struct reg_field field_ddc_byte_count;
226 struct reg_field field_ddc_cmd;
227 struct reg_field field_ddc_sda_en;
228 struct reg_field field_ddc_sck_en;
229
230 /* DDC FIFO register offset */
231 u32 ddc_fifo_reg;
232
233 /*
234 * DDC FIFO threshold boundary conditions
235 *
236 * This is used to cope with the threshold boundary condition
237 * being slightly different on sun5i and sun6i.
238 *
239 * On sun5i the threshold is exclusive, i.e. does not include,
240 * the value of the threshold. ( > for RX; < for TX )
241 * On sun6i the threshold is inclusive, i.e. includes, the
242 * value of the threshold. ( >= for RX; <= for TX )
243 */
244 bool ddc_fifo_thres_incl;
245
246 bool ddc_fifo_has_dir;
247};
248
160struct sun4i_hdmi { 249struct sun4i_hdmi {
161 struct drm_connector connector; 250 struct drm_connector connector;
162 struct drm_encoder encoder; 251 struct drm_encoder encoder;
163 struct device *dev; 252 struct device *dev;
164 253
165 void __iomem *base; 254 void __iomem *base;
255 struct regmap *regmap;
256
257 /* Reset control */
258 struct reset_control *reset;
166 259
167 /* Parent clocks */ 260 /* Parent clocks */
168 struct clk *bus_clk; 261 struct clk *bus_clk;
169 struct clk *mod_clk; 262 struct clk *mod_clk;
263 struct clk *ddc_parent_clk;
170 struct clk *pll0_clk; 264 struct clk *pll0_clk;
171 struct clk *pll1_clk; 265 struct clk *pll1_clk;
172 266
@@ -176,10 +270,28 @@ struct sun4i_hdmi {
176 270
177 struct i2c_adapter *i2c; 271 struct i2c_adapter *i2c;
178 272
273 /* Regmap fields for I2C adapter */
274 struct regmap_field *field_ddc_en;
275 struct regmap_field *field_ddc_start;
276 struct regmap_field *field_ddc_reset;
277 struct regmap_field *field_ddc_addr_reg;
278 struct regmap_field *field_ddc_slave_addr;
279 struct regmap_field *field_ddc_int_mask;
280 struct regmap_field *field_ddc_int_status;
281 struct regmap_field *field_ddc_fifo_clear;
282 struct regmap_field *field_ddc_fifo_rx_thres;
283 struct regmap_field *field_ddc_fifo_tx_thres;
284 struct regmap_field *field_ddc_byte_count;
285 struct regmap_field *field_ddc_cmd;
286 struct regmap_field *field_ddc_sda_en;
287 struct regmap_field *field_ddc_sck_en;
288
179 struct sun4i_drv *drv; 289 struct sun4i_drv *drv;
180 290
181 bool hdmi_monitor; 291 bool hdmi_monitor;
182 struct cec_adapter *cec_adap; 292 struct cec_adapter *cec_adap;
293
294 const struct sun4i_hdmi_variant *variant;
183}; 295};
184 296
185int sun4i_ddc_create(struct sun4i_hdmi *hdmi, struct clk *clk); 297int sun4i_ddc_create(struct sun4i_hdmi *hdmi, struct clk *clk);
diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_ddc_clk.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_ddc_clk.c
index 4692e8c345ed..04f85b1cf922 100644
--- a/drivers/gpu/drm/sun4i/sun4i_hdmi_ddc_clk.c
+++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_ddc_clk.c
@@ -11,6 +11,7 @@
11 */ 11 */
12 12
13#include <linux/clk-provider.h> 13#include <linux/clk-provider.h>
14#include <linux/regmap.h>
14 15
15#include "sun4i_tcon.h" 16#include "sun4i_tcon.h"
16#include "sun4i_hdmi.h" 17#include "sun4i_hdmi.h"
@@ -18,6 +19,9 @@
18struct sun4i_ddc { 19struct sun4i_ddc {
19 struct clk_hw hw; 20 struct clk_hw hw;
20 struct sun4i_hdmi *hdmi; 21 struct sun4i_hdmi *hdmi;
22 struct regmap_field *reg;
23 u8 pre_div;
24 u8 m_offset;
21}; 25};
22 26
23static inline struct sun4i_ddc *hw_to_ddc(struct clk_hw *hw) 27static inline struct sun4i_ddc *hw_to_ddc(struct clk_hw *hw)
@@ -27,6 +31,8 @@ static inline struct sun4i_ddc *hw_to_ddc(struct clk_hw *hw)
27 31
28static unsigned long sun4i_ddc_calc_divider(unsigned long rate, 32static unsigned long sun4i_ddc_calc_divider(unsigned long rate,
29 unsigned long parent_rate, 33 unsigned long parent_rate,
34 const u8 pre_div,
35 const u8 m_offset,
30 u8 *m, u8 *n) 36 u8 *m, u8 *n)
31{ 37{
32 unsigned long best_rate = 0; 38 unsigned long best_rate = 0;
@@ -36,7 +42,8 @@ static unsigned long sun4i_ddc_calc_divider(unsigned long rate,
36 for (_n = 0; _n < 8; _n++) { 42 for (_n = 0; _n < 8; _n++) {
37 unsigned long tmp_rate; 43 unsigned long tmp_rate;
38 44
39 tmp_rate = (((parent_rate / 2) / 10) >> _n) / (_m + 1); 45 tmp_rate = (((parent_rate / pre_div) / 10) >> _n) /
46 (_m + m_offset);
40 47
41 if (tmp_rate > rate) 48 if (tmp_rate > rate)
42 continue; 49 continue;
@@ -60,21 +67,25 @@ static unsigned long sun4i_ddc_calc_divider(unsigned long rate,
60static long sun4i_ddc_round_rate(struct clk_hw *hw, unsigned long rate, 67static long sun4i_ddc_round_rate(struct clk_hw *hw, unsigned long rate,
61 unsigned long *prate) 68 unsigned long *prate)
62{ 69{
63 return sun4i_ddc_calc_divider(rate, *prate, NULL, NULL); 70 struct sun4i_ddc *ddc = hw_to_ddc(hw);
71
72 return sun4i_ddc_calc_divider(rate, *prate, ddc->pre_div,
73 ddc->m_offset, NULL, NULL);
64} 74}
65 75
66static unsigned long sun4i_ddc_recalc_rate(struct clk_hw *hw, 76static unsigned long sun4i_ddc_recalc_rate(struct clk_hw *hw,
67 unsigned long parent_rate) 77 unsigned long parent_rate)
68{ 78{
69 struct sun4i_ddc *ddc = hw_to_ddc(hw); 79 struct sun4i_ddc *ddc = hw_to_ddc(hw);
70 u32 reg; 80 unsigned int reg;
71 u8 m, n; 81 u8 m, n;
72 82
73 reg = readl(ddc->hdmi->base + SUN4I_HDMI_DDC_CLK_REG); 83 regmap_field_read(ddc->reg, &reg);
74 m = (reg >> 3) & 0x7; 84 m = (reg >> 3) & 0xf;
75 n = reg & 0x7; 85 n = reg & 0x7;
76 86
77 return (((parent_rate / 2) / 10) >> n) / (m + 1); 87 return (((parent_rate / ddc->pre_div) / 10) >> n) /
88 (m + ddc->m_offset);
78} 89}
79 90
80static int sun4i_ddc_set_rate(struct clk_hw *hw, unsigned long rate, 91static int sun4i_ddc_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -83,10 +94,12 @@ static int sun4i_ddc_set_rate(struct clk_hw *hw, unsigned long rate,
83 struct sun4i_ddc *ddc = hw_to_ddc(hw); 94 struct sun4i_ddc *ddc = hw_to_ddc(hw);
84 u8 div_m, div_n; 95 u8 div_m, div_n;
85 96
86 sun4i_ddc_calc_divider(rate, parent_rate, &div_m, &div_n); 97 sun4i_ddc_calc_divider(rate, parent_rate, ddc->pre_div,
98 ddc->m_offset, &div_m, &div_n);
87 99
88 writel(SUN4I_HDMI_DDC_CLK_M(div_m) | SUN4I_HDMI_DDC_CLK_N(div_n), 100 regmap_field_write(ddc->reg,
89 ddc->hdmi->base + SUN4I_HDMI_DDC_CLK_REG); 101 SUN4I_HDMI_DDC_CLK_M(div_m) |
102 SUN4I_HDMI_DDC_CLK_N(div_n));
90 103
91 return 0; 104 return 0;
92} 105}
@@ -111,6 +124,11 @@ int sun4i_ddc_create(struct sun4i_hdmi *hdmi, struct clk *parent)
111 if (!ddc) 124 if (!ddc)
112 return -ENOMEM; 125 return -ENOMEM;
113 126
127 ddc->reg = devm_regmap_field_alloc(hdmi->dev, hdmi->regmap,
128 hdmi->variant->ddc_clk_reg);
129 if (IS_ERR(ddc->reg))
130 return PTR_ERR(ddc->reg);
131
114 init.name = "hdmi-ddc"; 132 init.name = "hdmi-ddc";
115 init.ops = &sun4i_ddc_ops; 133 init.ops = &sun4i_ddc_ops;
116 init.parent_names = &parent_name; 134 init.parent_names = &parent_name;
@@ -118,6 +136,8 @@ int sun4i_ddc_create(struct sun4i_hdmi *hdmi, struct clk *parent)
118 136
119 ddc->hdmi = hdmi; 137 ddc->hdmi = hdmi;
120 ddc->hw.init = &init; 138 ddc->hw.init = &init;
139 ddc->pre_div = hdmi->variant->ddc_clk_pre_divider;
140 ddc->m_offset = hdmi->variant->ddc_clk_m_offset;
121 141
122 hdmi->ddc_clk = devm_clk_register(hdmi->dev, &ddc->hw); 142 hdmi->ddc_clk = devm_clk_register(hdmi->dev, &ddc->hw);
123 if (IS_ERR(hdmi->ddc_clk)) 143 if (IS_ERR(hdmi->ddc_clk))
diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
index 9ea6cd5a1370..6ca6e6a74c4a 100644
--- a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
+++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
@@ -20,8 +20,11 @@
20#include <linux/clk.h> 20#include <linux/clk.h>
21#include <linux/component.h> 21#include <linux/component.h>
22#include <linux/iopoll.h> 22#include <linux/iopoll.h>
23#include <linux/of_device.h>
23#include <linux/platform_device.h> 24#include <linux/platform_device.h>
24#include <linux/pm_runtime.h> 25#include <linux/pm_runtime.h>
26#include <linux/regmap.h>
27#include <linux/reset.h>
25 28
26#include "sun4i_backend.h" 29#include "sun4i_backend.h"
27#include "sun4i_crtc.h" 30#include "sun4i_crtc.h"
@@ -141,6 +144,22 @@ static void sun4i_hdmi_mode_set(struct drm_encoder *encoder,
141 writel(SUN4I_HDMI_UNKNOWN_INPUT_SYNC, 144 writel(SUN4I_HDMI_UNKNOWN_INPUT_SYNC,
142 hdmi->base + SUN4I_HDMI_UNKNOWN_REG); 145 hdmi->base + SUN4I_HDMI_UNKNOWN_REG);
143 146
147 /*
148 * Setup output pad (?) controls
149 *
150 * This is done here instead of at probe/bind time because
151 * the controller seems to toggle some of the bits on its own.
152 *
153 * We can't just initialize the register there, we need to
154 * protect the clock bits that have already been read out and
155 * cached by the clock framework.
156 */
157 val = readl(hdmi->base + SUN4I_HDMI_PAD_CTRL1_REG);
158 val &= SUN4I_HDMI_PAD_CTRL1_HALVE_CLK;
159 val |= hdmi->variant->pad_ctrl1_init_val;
160 writel(val, hdmi->base + SUN4I_HDMI_PAD_CTRL1_REG);
161 val = readl(hdmi->base + SUN4I_HDMI_PAD_CTRL1_REG);
162
144 /* Setup timing registers */ 163 /* Setup timing registers */
145 writel(SUN4I_HDMI_VID_TIMING_X(mode->hdisplay) | 164 writel(SUN4I_HDMI_VID_TIMING_X(mode->hdisplay) |
146 SUN4I_HDMI_VID_TIMING_Y(mode->vdisplay), 165 SUN4I_HDMI_VID_TIMING_Y(mode->vdisplay),
@@ -267,6 +286,124 @@ static const struct cec_pin_ops sun4i_hdmi_cec_pin_ops = {
267}; 286};
268#endif 287#endif
269 288
289#define SUN4I_HDMI_PAD_CTRL1_MASK (GENMASK(24, 7) | GENMASK(5, 0))
290#define SUN4I_HDMI_PLL_CTRL_MASK (GENMASK(31, 8) | GENMASK(3, 0))
291
292static const struct sun4i_hdmi_variant sun5i_variant = {
293 .pad_ctrl0_init_val = SUN4I_HDMI_PAD_CTRL0_TXEN |
294 SUN4I_HDMI_PAD_CTRL0_CKEN |
295 SUN4I_HDMI_PAD_CTRL0_PWENG |
296 SUN4I_HDMI_PAD_CTRL0_PWEND |
297 SUN4I_HDMI_PAD_CTRL0_PWENC |
298 SUN4I_HDMI_PAD_CTRL0_LDODEN |
299 SUN4I_HDMI_PAD_CTRL0_LDOCEN |
300 SUN4I_HDMI_PAD_CTRL0_BIASEN,
301 .pad_ctrl1_init_val = SUN4I_HDMI_PAD_CTRL1_REG_AMP(6) |
302 SUN4I_HDMI_PAD_CTRL1_REG_EMP(2) |
303 SUN4I_HDMI_PAD_CTRL1_REG_DENCK |
304 SUN4I_HDMI_PAD_CTRL1_REG_DEN |
305 SUN4I_HDMI_PAD_CTRL1_EMPCK_OPT |
306 SUN4I_HDMI_PAD_CTRL1_EMP_OPT |
307 SUN4I_HDMI_PAD_CTRL1_AMPCK_OPT |
308 SUN4I_HDMI_PAD_CTRL1_AMP_OPT,
309 .pll_ctrl_init_val = SUN4I_HDMI_PLL_CTRL_VCO_S(8) |
310 SUN4I_HDMI_PLL_CTRL_CS(7) |
311 SUN4I_HDMI_PLL_CTRL_CP_S(15) |
312 SUN4I_HDMI_PLL_CTRL_S(7) |
313 SUN4I_HDMI_PLL_CTRL_VCO_GAIN(4) |
314 SUN4I_HDMI_PLL_CTRL_SDIV2 |
315 SUN4I_HDMI_PLL_CTRL_LDO2_EN |
316 SUN4I_HDMI_PLL_CTRL_LDO1_EN |
317 SUN4I_HDMI_PLL_CTRL_HV_IS_33 |
318 SUN4I_HDMI_PLL_CTRL_BWS |
319 SUN4I_HDMI_PLL_CTRL_PLL_EN,
320
321 .ddc_clk_reg = REG_FIELD(SUN4I_HDMI_DDC_CLK_REG, 0, 6),
322 .ddc_clk_pre_divider = 2,
323 .ddc_clk_m_offset = 1,
324
325 .field_ddc_en = REG_FIELD(SUN4I_HDMI_DDC_CTRL_REG, 31, 31),
326 .field_ddc_start = REG_FIELD(SUN4I_HDMI_DDC_CTRL_REG, 30, 30),
327 .field_ddc_reset = REG_FIELD(SUN4I_HDMI_DDC_CTRL_REG, 0, 0),
328 .field_ddc_addr_reg = REG_FIELD(SUN4I_HDMI_DDC_ADDR_REG, 0, 31),
329 .field_ddc_slave_addr = REG_FIELD(SUN4I_HDMI_DDC_ADDR_REG, 0, 6),
330 .field_ddc_int_status = REG_FIELD(SUN4I_HDMI_DDC_INT_STATUS_REG, 0, 8),
331 .field_ddc_fifo_clear = REG_FIELD(SUN4I_HDMI_DDC_FIFO_CTRL_REG, 31, 31),
332 .field_ddc_fifo_rx_thres = REG_FIELD(SUN4I_HDMI_DDC_FIFO_CTRL_REG, 4, 7),
333 .field_ddc_fifo_tx_thres = REG_FIELD(SUN4I_HDMI_DDC_FIFO_CTRL_REG, 0, 3),
334 .field_ddc_byte_count = REG_FIELD(SUN4I_HDMI_DDC_BYTE_COUNT_REG, 0, 9),
335 .field_ddc_cmd = REG_FIELD(SUN4I_HDMI_DDC_CMD_REG, 0, 2),
336 .field_ddc_sda_en = REG_FIELD(SUN4I_HDMI_DDC_LINE_CTRL_REG, 9, 9),
337 .field_ddc_sck_en = REG_FIELD(SUN4I_HDMI_DDC_LINE_CTRL_REG, 8, 8),
338
339 .ddc_fifo_reg = SUN4I_HDMI_DDC_FIFO_DATA_REG,
340 .ddc_fifo_has_dir = true,
341};
342
343static const struct sun4i_hdmi_variant sun6i_variant = {
344 .has_ddc_parent_clk = true,
345 .has_reset_control = true,
346 .pad_ctrl0_init_val = 0xff |
347 SUN4I_HDMI_PAD_CTRL0_TXEN |
348 SUN4I_HDMI_PAD_CTRL0_CKEN |
349 SUN4I_HDMI_PAD_CTRL0_PWENG |
350 SUN4I_HDMI_PAD_CTRL0_PWEND |
351 SUN4I_HDMI_PAD_CTRL0_PWENC |
352 SUN4I_HDMI_PAD_CTRL0_LDODEN |
353 SUN4I_HDMI_PAD_CTRL0_LDOCEN,
354 .pad_ctrl1_init_val = SUN4I_HDMI_PAD_CTRL1_REG_AMP(6) |
355 SUN4I_HDMI_PAD_CTRL1_REG_EMP(4) |
356 SUN4I_HDMI_PAD_CTRL1_REG_DENCK |
357 SUN4I_HDMI_PAD_CTRL1_REG_DEN |
358 SUN4I_HDMI_PAD_CTRL1_EMPCK_OPT |
359 SUN4I_HDMI_PAD_CTRL1_EMP_OPT |
360 SUN4I_HDMI_PAD_CTRL1_PWSDT |
361 SUN4I_HDMI_PAD_CTRL1_PWSCK |
362 SUN4I_HDMI_PAD_CTRL1_AMPCK_OPT |
363 SUN4I_HDMI_PAD_CTRL1_AMP_OPT |
364 SUN4I_HDMI_PAD_CTRL1_UNKNOWN,
365 .pll_ctrl_init_val = SUN4I_HDMI_PLL_CTRL_VCO_S(8) |
366 SUN4I_HDMI_PLL_CTRL_CS(3) |
367 SUN4I_HDMI_PLL_CTRL_CP_S(10) |
368 SUN4I_HDMI_PLL_CTRL_S(4) |
369 SUN4I_HDMI_PLL_CTRL_VCO_GAIN(4) |
370 SUN4I_HDMI_PLL_CTRL_SDIV2 |
371 SUN4I_HDMI_PLL_CTRL_LDO2_EN |
372 SUN4I_HDMI_PLL_CTRL_LDO1_EN |
373 SUN4I_HDMI_PLL_CTRL_HV_IS_33 |
374 SUN4I_HDMI_PLL_CTRL_PLL_EN,
375
376 .ddc_clk_reg = REG_FIELD(SUN6I_HDMI_DDC_CLK_REG, 0, 6),
377 .ddc_clk_pre_divider = 1,
378 .ddc_clk_m_offset = 2,
379
380 .tmds_clk_div_offset = 1,
381
382 .field_ddc_en = REG_FIELD(SUN6I_HDMI_DDC_CTRL_REG, 0, 0),
383 .field_ddc_start = REG_FIELD(SUN6I_HDMI_DDC_CTRL_REG, 27, 27),
384 .field_ddc_reset = REG_FIELD(SUN6I_HDMI_DDC_CTRL_REG, 31, 31),
385 .field_ddc_addr_reg = REG_FIELD(SUN6I_HDMI_DDC_ADDR_REG, 1, 31),
386 .field_ddc_slave_addr = REG_FIELD(SUN6I_HDMI_DDC_ADDR_REG, 1, 7),
387 .field_ddc_int_status = REG_FIELD(SUN6I_HDMI_DDC_INT_STATUS_REG, 0, 8),
388 .field_ddc_fifo_clear = REG_FIELD(SUN6I_HDMI_DDC_FIFO_CTRL_REG, 18, 18),
389 .field_ddc_fifo_rx_thres = REG_FIELD(SUN6I_HDMI_DDC_FIFO_CTRL_REG, 4, 7),
390 .field_ddc_fifo_tx_thres = REG_FIELD(SUN6I_HDMI_DDC_FIFO_CTRL_REG, 0, 3),
391 .field_ddc_byte_count = REG_FIELD(SUN6I_HDMI_DDC_CMD_REG, 16, 25),
392 .field_ddc_cmd = REG_FIELD(SUN6I_HDMI_DDC_CMD_REG, 0, 2),
393 .field_ddc_sda_en = REG_FIELD(SUN6I_HDMI_DDC_CTRL_REG, 6, 6),
394 .field_ddc_sck_en = REG_FIELD(SUN6I_HDMI_DDC_CTRL_REG, 4, 4),
395
396 .ddc_fifo_reg = SUN6I_HDMI_DDC_FIFO_DATA_REG,
397 .ddc_fifo_thres_incl = true,
398};
399
400static const struct regmap_config sun4i_hdmi_regmap_config = {
401 .reg_bits = 32,
402 .val_bits = 32,
403 .reg_stride = 4,
404 .max_register = 0x580,
405};
406
270static int sun4i_hdmi_bind(struct device *dev, struct device *master, 407static int sun4i_hdmi_bind(struct device *dev, struct device *master,
271 void *data) 408 void *data)
272{ 409{
@@ -285,6 +422,10 @@ static int sun4i_hdmi_bind(struct device *dev, struct device *master,
285 hdmi->dev = dev; 422 hdmi->dev = dev;
286 hdmi->drv = drv; 423 hdmi->drv = drv;
287 424
425 hdmi->variant = of_device_get_match_data(dev);
426 if (!hdmi->variant)
427 return -EINVAL;
428
288 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 429 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
289 hdmi->base = devm_ioremap_resource(dev, res); 430 hdmi->base = devm_ioremap_resource(dev, res);
290 if (IS_ERR(hdmi->base)) { 431 if (IS_ERR(hdmi->base)) {
@@ -292,77 +433,87 @@ static int sun4i_hdmi_bind(struct device *dev, struct device *master,
292 return PTR_ERR(hdmi->base); 433 return PTR_ERR(hdmi->base);
293 } 434 }
294 435
436 if (hdmi->variant->has_reset_control) {
437 hdmi->reset = devm_reset_control_get(dev, NULL);
438 if (IS_ERR(hdmi->reset)) {
439 dev_err(dev, "Couldn't get the HDMI reset control\n");
440 return PTR_ERR(hdmi->reset);
441 }
442
443 ret = reset_control_deassert(hdmi->reset);
444 if (ret) {
445 dev_err(dev, "Couldn't deassert HDMI reset\n");
446 return ret;
447 }
448 }
449
295 hdmi->bus_clk = devm_clk_get(dev, "ahb"); 450 hdmi->bus_clk = devm_clk_get(dev, "ahb");
296 if (IS_ERR(hdmi->bus_clk)) { 451 if (IS_ERR(hdmi->bus_clk)) {
297 dev_err(dev, "Couldn't get the HDMI bus clock\n"); 452 dev_err(dev, "Couldn't get the HDMI bus clock\n");
298 return PTR_ERR(hdmi->bus_clk); 453 ret = PTR_ERR(hdmi->bus_clk);
454 goto err_assert_reset;
299 } 455 }
300 clk_prepare_enable(hdmi->bus_clk); 456 clk_prepare_enable(hdmi->bus_clk);
301 457
302 hdmi->mod_clk = devm_clk_get(dev, "mod"); 458 hdmi->mod_clk = devm_clk_get(dev, "mod");
303 if (IS_ERR(hdmi->mod_clk)) { 459 if (IS_ERR(hdmi->mod_clk)) {
304 dev_err(dev, "Couldn't get the HDMI mod clock\n"); 460 dev_err(dev, "Couldn't get the HDMI mod clock\n");
305 return PTR_ERR(hdmi->mod_clk); 461 ret = PTR_ERR(hdmi->mod_clk);
462 goto err_disable_bus_clk;
306 } 463 }
307 clk_prepare_enable(hdmi->mod_clk); 464 clk_prepare_enable(hdmi->mod_clk);
308 465
309 hdmi->pll0_clk = devm_clk_get(dev, "pll-0"); 466 hdmi->pll0_clk = devm_clk_get(dev, "pll-0");
310 if (IS_ERR(hdmi->pll0_clk)) { 467 if (IS_ERR(hdmi->pll0_clk)) {
311 dev_err(dev, "Couldn't get the HDMI PLL 0 clock\n"); 468 dev_err(dev, "Couldn't get the HDMI PLL 0 clock\n");
312 return PTR_ERR(hdmi->pll0_clk); 469 ret = PTR_ERR(hdmi->pll0_clk);
470 goto err_disable_mod_clk;
313 } 471 }
314 472
315 hdmi->pll1_clk = devm_clk_get(dev, "pll-1"); 473 hdmi->pll1_clk = devm_clk_get(dev, "pll-1");
316 if (IS_ERR(hdmi->pll1_clk)) { 474 if (IS_ERR(hdmi->pll1_clk)) {
317 dev_err(dev, "Couldn't get the HDMI PLL 1 clock\n"); 475 dev_err(dev, "Couldn't get the HDMI PLL 1 clock\n");
318 return PTR_ERR(hdmi->pll1_clk); 476 ret = PTR_ERR(hdmi->pll1_clk);
477 goto err_disable_mod_clk;
478 }
479
480 hdmi->regmap = devm_regmap_init_mmio(dev, hdmi->base,
481 &sun4i_hdmi_regmap_config);
482 if (IS_ERR(hdmi->regmap)) {
483 dev_err(dev, "Couldn't create HDMI encoder regmap\n");
484 return PTR_ERR(hdmi->regmap);
319 } 485 }
320 486
321 ret = sun4i_tmds_create(hdmi); 487 ret = sun4i_tmds_create(hdmi);
322 if (ret) { 488 if (ret) {
323 dev_err(dev, "Couldn't create the TMDS clock\n"); 489 dev_err(dev, "Couldn't create the TMDS clock\n");
324 return ret; 490 goto err_disable_mod_clk;
491 }
492
493 if (hdmi->variant->has_ddc_parent_clk) {
494 hdmi->ddc_parent_clk = devm_clk_get(dev, "ddc");
495 if (IS_ERR(hdmi->ddc_parent_clk)) {
496 dev_err(dev, "Couldn't get the HDMI DDC clock\n");
497 return PTR_ERR(hdmi->ddc_parent_clk);
498 }
499 } else {
500 hdmi->ddc_parent_clk = hdmi->tmds_clk;
325 } 501 }
326 502
327 writel(SUN4I_HDMI_CTRL_ENABLE, hdmi->base + SUN4I_HDMI_CTRL_REG); 503 writel(SUN4I_HDMI_CTRL_ENABLE, hdmi->base + SUN4I_HDMI_CTRL_REG);
328 504
329 writel(SUN4I_HDMI_PAD_CTRL0_TXEN | SUN4I_HDMI_PAD_CTRL0_CKEN | 505 writel(hdmi->variant->pad_ctrl0_init_val,
330 SUN4I_HDMI_PAD_CTRL0_PWENG | SUN4I_HDMI_PAD_CTRL0_PWEND |
331 SUN4I_HDMI_PAD_CTRL0_PWENC | SUN4I_HDMI_PAD_CTRL0_LDODEN |
332 SUN4I_HDMI_PAD_CTRL0_LDOCEN | SUN4I_HDMI_PAD_CTRL0_BIASEN,
333 hdmi->base + SUN4I_HDMI_PAD_CTRL0_REG); 506 hdmi->base + SUN4I_HDMI_PAD_CTRL0_REG);
334 507
335 /*
336 * We can't just initialize the register there, we need to
337 * protect the clock bits that have already been read out and
338 * cached by the clock framework.
339 */
340 reg = readl(hdmi->base + SUN4I_HDMI_PAD_CTRL1_REG);
341 reg &= SUN4I_HDMI_PAD_CTRL1_HALVE_CLK;
342 reg |= SUN4I_HDMI_PAD_CTRL1_REG_AMP(6) |
343 SUN4I_HDMI_PAD_CTRL1_REG_EMP(2) |
344 SUN4I_HDMI_PAD_CTRL1_REG_DENCK |
345 SUN4I_HDMI_PAD_CTRL1_REG_DEN |
346 SUN4I_HDMI_PAD_CTRL1_EMPCK_OPT |
347 SUN4I_HDMI_PAD_CTRL1_EMP_OPT |
348 SUN4I_HDMI_PAD_CTRL1_AMPCK_OPT |
349 SUN4I_HDMI_PAD_CTRL1_AMP_OPT;
350 writel(reg, hdmi->base + SUN4I_HDMI_PAD_CTRL1_REG);
351
352 reg = readl(hdmi->base + SUN4I_HDMI_PLL_CTRL_REG); 508 reg = readl(hdmi->base + SUN4I_HDMI_PLL_CTRL_REG);
353 reg &= SUN4I_HDMI_PLL_CTRL_DIV_MASK; 509 reg &= SUN4I_HDMI_PLL_CTRL_DIV_MASK;
354 reg |= SUN4I_HDMI_PLL_CTRL_VCO_S(8) | SUN4I_HDMI_PLL_CTRL_CS(7) | 510 reg |= hdmi->variant->pll_ctrl_init_val;
355 SUN4I_HDMI_PLL_CTRL_CP_S(15) | SUN4I_HDMI_PLL_CTRL_S(7) |
356 SUN4I_HDMI_PLL_CTRL_VCO_GAIN(4) | SUN4I_HDMI_PLL_CTRL_SDIV2 |
357 SUN4I_HDMI_PLL_CTRL_LDO2_EN | SUN4I_HDMI_PLL_CTRL_LDO1_EN |
358 SUN4I_HDMI_PLL_CTRL_HV_IS_33 | SUN4I_HDMI_PLL_CTRL_BWS |
359 SUN4I_HDMI_PLL_CTRL_PLL_EN;
360 writel(reg, hdmi->base + SUN4I_HDMI_PLL_CTRL_REG); 511 writel(reg, hdmi->base + SUN4I_HDMI_PLL_CTRL_REG);
361 512
362 ret = sun4i_hdmi_i2c_create(dev, hdmi); 513 ret = sun4i_hdmi_i2c_create(dev, hdmi);
363 if (ret) { 514 if (ret) {
364 dev_err(dev, "Couldn't create the HDMI I2C adapter\n"); 515 dev_err(dev, "Couldn't create the HDMI I2C adapter\n");
365 return ret; 516 goto err_disable_mod_clk;
366 } 517 }
367 518
368 drm_encoder_helper_add(&hdmi->encoder, 519 drm_encoder_helper_add(&hdmi->encoder,
@@ -422,6 +573,12 @@ err_cleanup_connector:
422 drm_encoder_cleanup(&hdmi->encoder); 573 drm_encoder_cleanup(&hdmi->encoder);
423err_del_i2c_adapter: 574err_del_i2c_adapter:
424 i2c_del_adapter(hdmi->i2c); 575 i2c_del_adapter(hdmi->i2c);
576err_disable_mod_clk:
577 clk_disable_unprepare(hdmi->mod_clk);
578err_disable_bus_clk:
579 clk_disable_unprepare(hdmi->bus_clk);
580err_assert_reset:
581 reset_control_assert(hdmi->reset);
425 return ret; 582 return ret;
426} 583}
427 584
@@ -434,6 +591,8 @@ static void sun4i_hdmi_unbind(struct device *dev, struct device *master,
434 drm_connector_cleanup(&hdmi->connector); 591 drm_connector_cleanup(&hdmi->connector);
435 drm_encoder_cleanup(&hdmi->encoder); 592 drm_encoder_cleanup(&hdmi->encoder);
436 i2c_del_adapter(hdmi->i2c); 593 i2c_del_adapter(hdmi->i2c);
594 clk_disable_unprepare(hdmi->mod_clk);
595 clk_disable_unprepare(hdmi->bus_clk);
437} 596}
438 597
439static const struct component_ops sun4i_hdmi_ops = { 598static const struct component_ops sun4i_hdmi_ops = {
@@ -454,7 +613,8 @@ static int sun4i_hdmi_remove(struct platform_device *pdev)
454} 613}
455 614
456static const struct of_device_id sun4i_hdmi_of_table[] = { 615static const struct of_device_id sun4i_hdmi_of_table[] = {
457 { .compatible = "allwinner,sun5i-a10s-hdmi" }, 616 { .compatible = "allwinner,sun5i-a10s-hdmi", .data = &sun5i_variant, },
617 { .compatible = "allwinner,sun6i-a31-hdmi", .data = &sun6i_variant, },
458 { } 618 { }
459}; 619};
460MODULE_DEVICE_TABLE(of, sun4i_hdmi_of_table); 620MODULE_DEVICE_TABLE(of, sun4i_hdmi_of_table);
diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_i2c.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_i2c.c
index 2e42d09ab42e..58e9d37e8c17 100644
--- a/drivers/gpu/drm/sun4i/sun4i_hdmi_i2c.c
+++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_i2c.c
@@ -25,8 +25,6 @@
25 25
26/* FIFO request bit is set when FIFO level is above RX_THRESHOLD during read */ 26/* FIFO request bit is set when FIFO level is above RX_THRESHOLD during read */
27#define RX_THRESHOLD SUN4I_HDMI_DDC_FIFO_CTRL_RX_THRES_MAX 27#define RX_THRESHOLD SUN4I_HDMI_DDC_FIFO_CTRL_RX_THRES_MAX
28/* FIFO request bit is set when FIFO level is below TX_THRESHOLD during write */
29#define TX_THRESHOLD 1
30 28
31static int fifo_transfer(struct sun4i_hdmi *hdmi, u8 *buf, int len, bool read) 29static int fifo_transfer(struct sun4i_hdmi *hdmi, u8 *buf, int len, bool read)
32{ 30{
@@ -39,27 +37,36 @@ static int fifo_transfer(struct sun4i_hdmi *hdmi, u8 *buf, int len, bool read)
39 SUN4I_HDMI_DDC_INT_STATUS_FIFO_REQUEST | 37 SUN4I_HDMI_DDC_INT_STATUS_FIFO_REQUEST |
40 SUN4I_HDMI_DDC_INT_STATUS_TRANSFER_COMPLETE; 38 SUN4I_HDMI_DDC_INT_STATUS_TRANSFER_COMPLETE;
41 u32 reg; 39 u32 reg;
40 /*
41 * If threshold is inclusive, then the FIFO may only have
42 * RX_THRESHOLD number of bytes, instead of RX_THRESHOLD + 1.
43 */
44 int read_len = RX_THRESHOLD +
45 (hdmi->variant->ddc_fifo_thres_incl ? 0 : 1);
42 46
43 /* Limit transfer length by FIFO threshold */ 47 /*
44 len = min_t(int, len, read ? (RX_THRESHOLD + 1) : 48 * Limit transfer length by FIFO threshold or FIFO size.
45 (SUN4I_HDMI_DDC_FIFO_SIZE - TX_THRESHOLD + 1)); 49 * For TX the threshold is for an empty FIFO.
50 */
51 len = min_t(int, len, read ? read_len : SUN4I_HDMI_DDC_FIFO_SIZE);
46 52
47 /* Wait until error, FIFO request bit set or transfer complete */ 53 /* Wait until error, FIFO request bit set or transfer complete */
48 if (readl_poll_timeout(hdmi->base + SUN4I_HDMI_DDC_INT_STATUS_REG, reg, 54 if (regmap_field_read_poll_timeout(hdmi->field_ddc_int_status, reg,
49 reg & mask, len * byte_time_ns, 100000)) 55 reg & mask, len * byte_time_ns,
56 100000))
50 return -ETIMEDOUT; 57 return -ETIMEDOUT;
51 58
52 if (reg & SUN4I_HDMI_DDC_INT_STATUS_ERROR_MASK) 59 if (reg & SUN4I_HDMI_DDC_INT_STATUS_ERROR_MASK)
53 return -EIO; 60 return -EIO;
54 61
55 if (read) 62 if (read)
56 readsb(hdmi->base + SUN4I_HDMI_DDC_FIFO_DATA_REG, buf, len); 63 readsb(hdmi->base + hdmi->variant->ddc_fifo_reg, buf, len);
57 else 64 else
58 writesb(hdmi->base + SUN4I_HDMI_DDC_FIFO_DATA_REG, buf, len); 65 writesb(hdmi->base + hdmi->variant->ddc_fifo_reg, buf, len);
59 66
60 /* Clear FIFO request bit */ 67 /* Clear FIFO request bit by forcing a write to that bit */
61 writel(SUN4I_HDMI_DDC_INT_STATUS_FIFO_REQUEST, 68 regmap_field_force_write(hdmi->field_ddc_int_status,
62 hdmi->base + SUN4I_HDMI_DDC_INT_STATUS_REG); 69 SUN4I_HDMI_DDC_INT_STATUS_FIFO_REQUEST);
63 70
64 return len; 71 return len;
65} 72}
@@ -70,50 +77,52 @@ static int xfer_msg(struct sun4i_hdmi *hdmi, struct i2c_msg *msg)
70 u32 reg; 77 u32 reg;
71 78
72 /* Set FIFO direction */ 79 /* Set FIFO direction */
73 reg = readl(hdmi->base + SUN4I_HDMI_DDC_CTRL_REG); 80 if (hdmi->variant->ddc_fifo_has_dir) {
74 reg &= ~SUN4I_HDMI_DDC_CTRL_FIFO_DIR_MASK; 81 reg = readl(hdmi->base + SUN4I_HDMI_DDC_CTRL_REG);
75 reg |= (msg->flags & I2C_M_RD) ? 82 reg &= ~SUN4I_HDMI_DDC_CTRL_FIFO_DIR_MASK;
76 SUN4I_HDMI_DDC_CTRL_FIFO_DIR_READ : 83 reg |= (msg->flags & I2C_M_RD) ?
77 SUN4I_HDMI_DDC_CTRL_FIFO_DIR_WRITE; 84 SUN4I_HDMI_DDC_CTRL_FIFO_DIR_READ :
78 writel(reg, hdmi->base + SUN4I_HDMI_DDC_CTRL_REG); 85 SUN4I_HDMI_DDC_CTRL_FIFO_DIR_WRITE;
86 writel(reg, hdmi->base + SUN4I_HDMI_DDC_CTRL_REG);
87 }
88
89 /* Clear address register (not cleared by soft reset) */
90 regmap_field_write(hdmi->field_ddc_addr_reg, 0);
79 91
80 /* Set I2C address */ 92 /* Set I2C address */
81 writel(SUN4I_HDMI_DDC_ADDR_SLAVE(msg->addr), 93 regmap_field_write(hdmi->field_ddc_slave_addr, msg->addr);
82 hdmi->base + SUN4I_HDMI_DDC_ADDR_REG); 94
83 95 /*
84 /* Set FIFO RX/TX thresholds and clear FIFO */ 96 * Set FIFO RX/TX thresholds and clear FIFO
85 reg = readl(hdmi->base + SUN4I_HDMI_DDC_FIFO_CTRL_REG); 97 *
86 reg |= SUN4I_HDMI_DDC_FIFO_CTRL_CLEAR; 98 * If threshold is inclusive, we can set the TX threshold to
87 reg &= ~SUN4I_HDMI_DDC_FIFO_CTRL_RX_THRES_MASK; 99 * 0 instead of 1.
88 reg |= SUN4I_HDMI_DDC_FIFO_CTRL_RX_THRES(RX_THRESHOLD); 100 */
89 reg &= ~SUN4I_HDMI_DDC_FIFO_CTRL_TX_THRES_MASK; 101 regmap_field_write(hdmi->field_ddc_fifo_tx_thres,
90 reg |= SUN4I_HDMI_DDC_FIFO_CTRL_TX_THRES(TX_THRESHOLD); 102 hdmi->variant->ddc_fifo_thres_incl ? 0 : 1);
91 writel(reg, hdmi->base + SUN4I_HDMI_DDC_FIFO_CTRL_REG); 103 regmap_field_write(hdmi->field_ddc_fifo_rx_thres, RX_THRESHOLD);
92 if (readl_poll_timeout(hdmi->base + SUN4I_HDMI_DDC_FIFO_CTRL_REG, 104 regmap_field_write(hdmi->field_ddc_fifo_clear, 1);
93 reg, 105 if (regmap_field_read_poll_timeout(hdmi->field_ddc_fifo_clear,
94 !(reg & SUN4I_HDMI_DDC_FIFO_CTRL_CLEAR), 106 reg, !reg, 100, 2000))
95 100, 2000))
96 return -EIO; 107 return -EIO;
97 108
98 /* Set transfer length */ 109 /* Set transfer length */
99 writel(msg->len, hdmi->base + SUN4I_HDMI_DDC_BYTE_COUNT_REG); 110 regmap_field_write(hdmi->field_ddc_byte_count, msg->len);
100 111
101 /* Set command */ 112 /* Set command */
102 writel(msg->flags & I2C_M_RD ? 113 regmap_field_write(hdmi->field_ddc_cmd,
103 SUN4I_HDMI_DDC_CMD_IMPLICIT_READ : 114 msg->flags & I2C_M_RD ?
104 SUN4I_HDMI_DDC_CMD_IMPLICIT_WRITE, 115 SUN4I_HDMI_DDC_CMD_IMPLICIT_READ :
105 hdmi->base + SUN4I_HDMI_DDC_CMD_REG); 116 SUN4I_HDMI_DDC_CMD_IMPLICIT_WRITE);
106 117
107 /* Clear interrupt status bits */ 118 /* Clear interrupt status bits by forcing a write */
108 writel(SUN4I_HDMI_DDC_INT_STATUS_ERROR_MASK | 119 regmap_field_force_write(hdmi->field_ddc_int_status,
109 SUN4I_HDMI_DDC_INT_STATUS_FIFO_REQUEST | 120 SUN4I_HDMI_DDC_INT_STATUS_ERROR_MASK |
110 SUN4I_HDMI_DDC_INT_STATUS_TRANSFER_COMPLETE, 121 SUN4I_HDMI_DDC_INT_STATUS_FIFO_REQUEST |
111 hdmi->base + SUN4I_HDMI_DDC_INT_STATUS_REG); 122 SUN4I_HDMI_DDC_INT_STATUS_TRANSFER_COMPLETE);
112 123
113 /* Start command */ 124 /* Start command */
114 reg = readl(hdmi->base + SUN4I_HDMI_DDC_CTRL_REG); 125 regmap_field_write(hdmi->field_ddc_start, 1);
115 writel(reg | SUN4I_HDMI_DDC_CTRL_START_CMD,
116 hdmi->base + SUN4I_HDMI_DDC_CTRL_REG);
117 126
118 /* Transfer bytes */ 127 /* Transfer bytes */
119 for (i = 0; i < msg->len; i += len) { 128 for (i = 0; i < msg->len; i += len) {
@@ -124,14 +133,12 @@ static int xfer_msg(struct sun4i_hdmi *hdmi, struct i2c_msg *msg)
124 } 133 }
125 134
126 /* Wait for command to finish */ 135 /* Wait for command to finish */
127 if (readl_poll_timeout(hdmi->base + SUN4I_HDMI_DDC_CTRL_REG, 136 if (regmap_field_read_poll_timeout(hdmi->field_ddc_start,
128 reg, 137 reg, !reg, 100, 100000))
129 !(reg & SUN4I_HDMI_DDC_CTRL_START_CMD),
130 100, 100000))
131 return -EIO; 138 return -EIO;
132 139
133 /* Check for errors */ 140 /* Check for errors */
134 reg = readl(hdmi->base + SUN4I_HDMI_DDC_INT_STATUS_REG); 141 regmap_field_read(hdmi->field_ddc_int_status, &reg);
135 if ((reg & SUN4I_HDMI_DDC_INT_STATUS_ERROR_MASK) || 142 if ((reg & SUN4I_HDMI_DDC_INT_STATUS_ERROR_MASK) ||
136 !(reg & SUN4I_HDMI_DDC_INT_STATUS_TRANSFER_COMPLETE)) { 143 !(reg & SUN4I_HDMI_DDC_INT_STATUS_TRANSFER_COMPLETE)) {
137 return -EIO; 144 return -EIO;
@@ -154,20 +161,21 @@ static int sun4i_hdmi_i2c_xfer(struct i2c_adapter *adap,
154 return -EINVAL; 161 return -EINVAL;
155 } 162 }
156 163
164 /* DDC clock needs to be enabled for the module to work */
165 clk_prepare_enable(hdmi->ddc_clk);
166 clk_set_rate(hdmi->ddc_clk, 100000);
167
157 /* Reset I2C controller */ 168 /* Reset I2C controller */
158 writel(SUN4I_HDMI_DDC_CTRL_ENABLE | SUN4I_HDMI_DDC_CTRL_RESET, 169 regmap_field_write(hdmi->field_ddc_en, 1);
159 hdmi->base + SUN4I_HDMI_DDC_CTRL_REG); 170 regmap_field_write(hdmi->field_ddc_reset, 1);
160 if (readl_poll_timeout(hdmi->base + SUN4I_HDMI_DDC_CTRL_REG, reg, 171 if (regmap_field_read_poll_timeout(hdmi->field_ddc_reset,
161 !(reg & SUN4I_HDMI_DDC_CTRL_RESET), 172 reg, !reg, 100, 2000)) {
162 100, 2000)) 173 clk_disable_unprepare(hdmi->ddc_clk);
163 return -EIO; 174 return -EIO;
175 }
164 176
165 writel(SUN4I_HDMI_DDC_LINE_CTRL_SDA_ENABLE | 177 regmap_field_write(hdmi->field_ddc_sck_en, 1);
166 SUN4I_HDMI_DDC_LINE_CTRL_SCL_ENABLE, 178 regmap_field_write(hdmi->field_ddc_sda_en, 1);
167 hdmi->base + SUN4I_HDMI_DDC_LINE_CTRL_REG);
168
169 clk_prepare_enable(hdmi->ddc_clk);
170 clk_set_rate(hdmi->ddc_clk, 100000);
171 179
172 for (i = 0; i < num; i++) { 180 for (i = 0; i < num; i++) {
173 err = xfer_msg(hdmi, &msgs[i]); 181 err = xfer_msg(hdmi, &msgs[i]);
@@ -191,12 +199,105 @@ static const struct i2c_algorithm sun4i_hdmi_i2c_algorithm = {
191 .functionality = sun4i_hdmi_i2c_func, 199 .functionality = sun4i_hdmi_i2c_func,
192}; 200};
193 201
202static int sun4i_hdmi_init_regmap_fields(struct sun4i_hdmi *hdmi)
203{
204 hdmi->field_ddc_en =
205 devm_regmap_field_alloc(hdmi->dev, hdmi->regmap,
206 hdmi->variant->field_ddc_en);
207 if (IS_ERR(hdmi->field_ddc_en))
208 return PTR_ERR(hdmi->field_ddc_en);
209
210 hdmi->field_ddc_start =
211 devm_regmap_field_alloc(hdmi->dev, hdmi->regmap,
212 hdmi->variant->field_ddc_start);
213 if (IS_ERR(hdmi->field_ddc_start))
214 return PTR_ERR(hdmi->field_ddc_start);
215
216 hdmi->field_ddc_reset =
217 devm_regmap_field_alloc(hdmi->dev, hdmi->regmap,
218 hdmi->variant->field_ddc_reset);
219 if (IS_ERR(hdmi->field_ddc_reset))
220 return PTR_ERR(hdmi->field_ddc_reset);
221
222 hdmi->field_ddc_addr_reg =
223 devm_regmap_field_alloc(hdmi->dev, hdmi->regmap,
224 hdmi->variant->field_ddc_addr_reg);
225 if (IS_ERR(hdmi->field_ddc_addr_reg))
226 return PTR_ERR(hdmi->field_ddc_addr_reg);
227
228 hdmi->field_ddc_slave_addr =
229 devm_regmap_field_alloc(hdmi->dev, hdmi->regmap,
230 hdmi->variant->field_ddc_slave_addr);
231 if (IS_ERR(hdmi->field_ddc_slave_addr))
232 return PTR_ERR(hdmi->field_ddc_slave_addr);
233
234 hdmi->field_ddc_int_mask =
235 devm_regmap_field_alloc(hdmi->dev, hdmi->regmap,
236 hdmi->variant->field_ddc_int_mask);
237 if (IS_ERR(hdmi->field_ddc_int_mask))
238 return PTR_ERR(hdmi->field_ddc_int_mask);
239
240 hdmi->field_ddc_int_status =
241 devm_regmap_field_alloc(hdmi->dev, hdmi->regmap,
242 hdmi->variant->field_ddc_int_status);
243 if (IS_ERR(hdmi->field_ddc_int_status))
244 return PTR_ERR(hdmi->field_ddc_int_status);
245
246 hdmi->field_ddc_fifo_clear =
247 devm_regmap_field_alloc(hdmi->dev, hdmi->regmap,
248 hdmi->variant->field_ddc_fifo_clear);
249 if (IS_ERR(hdmi->field_ddc_fifo_clear))
250 return PTR_ERR(hdmi->field_ddc_fifo_clear);
251
252 hdmi->field_ddc_fifo_rx_thres =
253 devm_regmap_field_alloc(hdmi->dev, hdmi->regmap,
254 hdmi->variant->field_ddc_fifo_rx_thres);
255 if (IS_ERR(hdmi->field_ddc_fifo_rx_thres))
256 return PTR_ERR(hdmi->field_ddc_fifo_rx_thres);
257
258 hdmi->field_ddc_fifo_tx_thres =
259 devm_regmap_field_alloc(hdmi->dev, hdmi->regmap,
260 hdmi->variant->field_ddc_fifo_tx_thres);
261 if (IS_ERR(hdmi->field_ddc_fifo_tx_thres))
262 return PTR_ERR(hdmi->field_ddc_fifo_tx_thres);
263
264 hdmi->field_ddc_byte_count =
265 devm_regmap_field_alloc(hdmi->dev, hdmi->regmap,
266 hdmi->variant->field_ddc_byte_count);
267 if (IS_ERR(hdmi->field_ddc_byte_count))
268 return PTR_ERR(hdmi->field_ddc_byte_count);
269
270 hdmi->field_ddc_cmd =
271 devm_regmap_field_alloc(hdmi->dev, hdmi->regmap,
272 hdmi->variant->field_ddc_cmd);
273 if (IS_ERR(hdmi->field_ddc_cmd))
274 return PTR_ERR(hdmi->field_ddc_cmd);
275
276 hdmi->field_ddc_sda_en =
277 devm_regmap_field_alloc(hdmi->dev, hdmi->regmap,
278 hdmi->variant->field_ddc_sda_en);
279 if (IS_ERR(hdmi->field_ddc_sda_en))
280 return PTR_ERR(hdmi->field_ddc_sda_en);
281
282 hdmi->field_ddc_sck_en =
283 devm_regmap_field_alloc(hdmi->dev, hdmi->regmap,
284 hdmi->variant->field_ddc_sck_en);
285 if (IS_ERR(hdmi->field_ddc_sck_en))
286 return PTR_ERR(hdmi->field_ddc_sck_en);
287
288 return 0;
289}
290
194int sun4i_hdmi_i2c_create(struct device *dev, struct sun4i_hdmi *hdmi) 291int sun4i_hdmi_i2c_create(struct device *dev, struct sun4i_hdmi *hdmi)
195{ 292{
196 struct i2c_adapter *adap; 293 struct i2c_adapter *adap;
197 int ret = 0; 294 int ret = 0;
198 295
199 ret = sun4i_ddc_create(hdmi, hdmi->tmds_clk); 296 ret = sun4i_ddc_create(hdmi, hdmi->ddc_parent_clk);
297 if (ret)
298 return ret;
299
300 ret = sun4i_hdmi_init_regmap_fields(hdmi);
200 if (ret) 301 if (ret)
201 return ret; 302 return ret;
202 303
diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_tmds_clk.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_tmds_clk.c
index 5cf2527bffc8..1b6b37aefc38 100644
--- a/drivers/gpu/drm/sun4i/sun4i_hdmi_tmds_clk.c
+++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_tmds_clk.c
@@ -18,6 +18,8 @@
18struct sun4i_tmds { 18struct sun4i_tmds {
19 struct clk_hw hw; 19 struct clk_hw hw;
20 struct sun4i_hdmi *hdmi; 20 struct sun4i_hdmi *hdmi;
21
22 u8 div_offset;
21}; 23};
22 24
23static inline struct sun4i_tmds *hw_to_tmds(struct clk_hw *hw) 25static inline struct sun4i_tmds *hw_to_tmds(struct clk_hw *hw)
@@ -28,6 +30,7 @@ static inline struct sun4i_tmds *hw_to_tmds(struct clk_hw *hw)
28 30
29static unsigned long sun4i_tmds_calc_divider(unsigned long rate, 31static unsigned long sun4i_tmds_calc_divider(unsigned long rate,
30 unsigned long parent_rate, 32 unsigned long parent_rate,
33 u8 div_offset,
31 u8 *div, 34 u8 *div,
32 bool *half) 35 bool *half)
33{ 36{
@@ -35,7 +38,7 @@ static unsigned long sun4i_tmds_calc_divider(unsigned long rate,
35 u8 best_m = 0, m; 38 u8 best_m = 0, m;
36 bool is_double; 39 bool is_double;
37 40
38 for (m = 1; m < 16; m++) { 41 for (m = div_offset ?: 1; m < (16 + div_offset); m++) {
39 u8 d; 42 u8 d;
40 43
41 for (d = 1; d < 3; d++) { 44 for (d = 1; d < 3; d++) {
@@ -67,11 +70,12 @@ static unsigned long sun4i_tmds_calc_divider(unsigned long rate,
67static int sun4i_tmds_determine_rate(struct clk_hw *hw, 70static int sun4i_tmds_determine_rate(struct clk_hw *hw,
68 struct clk_rate_request *req) 71 struct clk_rate_request *req)
69{ 72{
70 struct clk_hw *parent; 73 struct sun4i_tmds *tmds = hw_to_tmds(hw);
74 struct clk_hw *parent = NULL;
71 unsigned long best_parent = 0; 75 unsigned long best_parent = 0;
72 unsigned long rate = req->rate; 76 unsigned long rate = req->rate;
73 int best_div = 1, best_half = 1; 77 int best_div = 1, best_half = 1;
74 int i, j; 78 int i, j, p;
75 79
76 /* 80 /*
77 * We only consider PLL3, since the TCON is very likely to be 81 * We only consider PLL3, since the TCON is very likely to be
@@ -79,32 +83,38 @@ static int sun4i_tmds_determine_rate(struct clk_hw *hw,
79 * clock, so we should not need to do anything. 83 * clock, so we should not need to do anything.
80 */ 84 */
81 85
82 parent = clk_hw_get_parent_by_index(hw, 0); 86 for (p = 0; p < clk_hw_get_num_parents(hw); p++) {
83 if (!parent) 87 parent = clk_hw_get_parent_by_index(hw, p);
84 return -EINVAL; 88 if (!parent)
85 89 continue;
86 for (i = 1; i < 3; i++) { 90
87 for (j = 1; j < 16; j++) { 91 for (i = 1; i < 3; i++) {
88 unsigned long ideal = rate * i * j; 92 for (j = tmds->div_offset ?: 1;
89 unsigned long rounded; 93 j < (16 + tmds->div_offset); j++) {
90 94 unsigned long ideal = rate * i * j;
91 rounded = clk_hw_round_rate(parent, ideal); 95 unsigned long rounded;
92 96
93 if (rounded == ideal) { 97 rounded = clk_hw_round_rate(parent, ideal);
94 best_parent = rounded; 98
95 best_half = i; 99 if (rounded == ideal) {
96 best_div = j; 100 best_parent = rounded;
97 goto out; 101 best_half = i;
98 } 102 best_div = j;
99 103 goto out;
100 if (abs(rate - rounded / i) < 104 }
101 abs(rate - best_parent / best_div)) { 105
102 best_parent = rounded; 106 if (abs(rate - rounded / i) <
103 best_div = i; 107 abs(rate - best_parent / best_div)) {
108 best_parent = rounded;
109 best_div = i;
110 }
104 } 111 }
105 } 112 }
106 } 113 }
107 114
115 if (!parent)
116 return -EINVAL;
117
108out: 118out:
109 req->rate = best_parent / best_half / best_div; 119 req->rate = best_parent / best_half / best_div;
110 req->best_parent_rate = best_parent; 120 req->best_parent_rate = best_parent;
@@ -124,7 +134,7 @@ static unsigned long sun4i_tmds_recalc_rate(struct clk_hw *hw,
124 parent_rate /= 2; 134 parent_rate /= 2;
125 135
126 reg = readl(tmds->hdmi->base + SUN4I_HDMI_PLL_CTRL_REG); 136 reg = readl(tmds->hdmi->base + SUN4I_HDMI_PLL_CTRL_REG);
127 reg = (reg >> 4) & 0xf; 137 reg = ((reg >> 4) & 0xf) + tmds->div_offset;
128 if (!reg) 138 if (!reg)
129 reg = 1; 139 reg = 1;
130 140
@@ -139,7 +149,8 @@ static int sun4i_tmds_set_rate(struct clk_hw *hw, unsigned long rate,
139 u32 reg; 149 u32 reg;
140 u8 div; 150 u8 div;
141 151
142 sun4i_tmds_calc_divider(rate, parent_rate, &div, &half); 152 sun4i_tmds_calc_divider(rate, parent_rate, tmds->div_offset,
153 &div, &half);
143 154
144 reg = readl(tmds->hdmi->base + SUN4I_HDMI_PAD_CTRL1_REG); 155 reg = readl(tmds->hdmi->base + SUN4I_HDMI_PAD_CTRL1_REG);
145 reg &= ~SUN4I_HDMI_PAD_CTRL1_HALVE_CLK; 156 reg &= ~SUN4I_HDMI_PAD_CTRL1_HALVE_CLK;
@@ -149,7 +160,7 @@ static int sun4i_tmds_set_rate(struct clk_hw *hw, unsigned long rate,
149 160
150 reg = readl(tmds->hdmi->base + SUN4I_HDMI_PLL_CTRL_REG); 161 reg = readl(tmds->hdmi->base + SUN4I_HDMI_PLL_CTRL_REG);
151 reg &= ~SUN4I_HDMI_PLL_CTRL_DIV_MASK; 162 reg &= ~SUN4I_HDMI_PLL_CTRL_DIV_MASK;
152 writel(reg | SUN4I_HDMI_PLL_CTRL_DIV(div), 163 writel(reg | SUN4I_HDMI_PLL_CTRL_DIV(div - tmds->div_offset),
153 tmds->hdmi->base + SUN4I_HDMI_PLL_CTRL_REG); 164 tmds->hdmi->base + SUN4I_HDMI_PLL_CTRL_REG);
154 165
155 return 0; 166 return 0;
@@ -216,6 +227,7 @@ int sun4i_tmds_create(struct sun4i_hdmi *hdmi)
216 227
217 tmds->hdmi = hdmi; 228 tmds->hdmi = hdmi;
218 tmds->hw.init = &init; 229 tmds->hw.init = &init;
230 tmds->div_offset = hdmi->variant->tmds_clk_div_offset;
219 231
220 hdmi->tmds_clk = devm_clk_register(hdmi->dev, &tmds->hw); 232 hdmi->tmds_clk = devm_clk_register(hdmi->dev, &tmds->hw);
221 if (IS_ERR(hdmi->tmds_clk)) 233 if (IS_ERR(hdmi->tmds_clk))
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c
index e853dfe51389..68751b999877 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c
@@ -14,9 +14,12 @@
14#include <drm/drm_atomic_helper.h> 14#include <drm/drm_atomic_helper.h>
15#include <drm/drm_crtc.h> 15#include <drm/drm_crtc.h>
16#include <drm/drm_crtc_helper.h> 16#include <drm/drm_crtc_helper.h>
17#include <drm/drm_encoder.h>
17#include <drm/drm_modes.h> 18#include <drm/drm_modes.h>
18#include <drm/drm_of.h> 19#include <drm/drm_of.h>
19 20
21#include <uapi/drm/drm_mode.h>
22
20#include <linux/component.h> 23#include <linux/component.h>
21#include <linux/ioport.h> 24#include <linux/ioport.h>
22#include <linux/of_address.h> 25#include <linux/of_address.h>
@@ -109,26 +112,37 @@ void sun4i_tcon_enable_vblank(struct sun4i_tcon *tcon, bool enable)
109} 112}
110EXPORT_SYMBOL(sun4i_tcon_enable_vblank); 113EXPORT_SYMBOL(sun4i_tcon_enable_vblank);
111 114
112void sun4i_tcon_set_mux(struct sun4i_tcon *tcon, int channel, 115/*
113 struct drm_encoder *encoder) 116 * This function is a helper for TCON output muxing. The TCON output
117 * muxing control register in earlier SoCs (without the TCON TOP block)
118 * are located in TCON0. This helper returns a pointer to TCON0's
119 * sun4i_tcon structure, or NULL if not found.
120 */
121static struct sun4i_tcon *sun4i_get_tcon0(struct drm_device *drm)
114{ 122{
115 u32 val; 123 struct sun4i_drv *drv = drm->dev_private;
124 struct sun4i_tcon *tcon;
116 125
117 if (!tcon->quirks->has_unknown_mux) 126 list_for_each_entry(tcon, &drv->tcon_list, list)
118 return; 127 if (tcon->id == 0)
128 return tcon;
119 129
120 if (channel != 1) 130 dev_warn(drm->dev,
121 return; 131 "TCON0 not found, display output muxing may not work\n");
122 132
123 if (encoder->encoder_type == DRM_MODE_ENCODER_TVDAC) 133 return NULL;
124 val = 1; 134}
125 else
126 val = 0;
127 135
128 /* 136void sun4i_tcon_set_mux(struct sun4i_tcon *tcon, int channel,
129 * FIXME: Undocumented bits 137 struct drm_encoder *encoder)
130 */ 138{
131 regmap_write(tcon->regs, SUN4I_TCON_MUX_CTRL_REG, val); 139 int ret = -ENOTSUPP;
140
141 if (tcon->quirks->set_mux)
142 ret = tcon->quirks->set_mux(tcon, encoder);
143
144 DRM_DEBUG_DRIVER("Muxing encoder %s to CRTC %s: %d\n",
145 encoder->name, encoder->crtc->name, ret);
132} 146}
133EXPORT_SYMBOL(sun4i_tcon_set_mux); 147EXPORT_SYMBOL(sun4i_tcon_set_mux);
134 148
@@ -767,14 +781,57 @@ static int sun4i_tcon_remove(struct platform_device *pdev)
767 return 0; 781 return 0;
768} 782}
769 783
784/* platform specific TCON muxing callbacks */
785static int sun5i_a13_tcon_set_mux(struct sun4i_tcon *tcon,
786 struct drm_encoder *encoder)
787{
788 u32 val;
789
790 if (encoder->encoder_type == DRM_MODE_ENCODER_TVDAC)
791 val = 1;
792 else
793 val = 0;
794
795 /*
796 * FIXME: Undocumented bits
797 */
798 return regmap_write(tcon->regs, SUN4I_TCON_MUX_CTRL_REG, val);
799}
800
801static int sun6i_tcon_set_mux(struct sun4i_tcon *tcon,
802 struct drm_encoder *encoder)
803{
804 struct sun4i_tcon *tcon0 = sun4i_get_tcon0(encoder->dev);
805 u32 shift;
806
807 if (!tcon0)
808 return -EINVAL;
809
810 switch (encoder->encoder_type) {
811 case DRM_MODE_ENCODER_TMDS:
812 /* HDMI */
813 shift = 8;
814 break;
815 default:
816 /* TODO A31 has MIPI DSI but A31s does not */
817 return -EINVAL;
818 }
819
820 regmap_update_bits(tcon0->regs, SUN4I_TCON_MUX_CTRL_REG,
821 0x3 << shift, tcon->id << shift);
822
823 return 0;
824}
825
770static const struct sun4i_tcon_quirks sun5i_a13_quirks = { 826static const struct sun4i_tcon_quirks sun5i_a13_quirks = {
771 .has_unknown_mux = true, 827 .has_channel_1 = true,
772 .has_channel_1 = true, 828 .set_mux = sun5i_a13_tcon_set_mux,
773}; 829};
774 830
775static const struct sun4i_tcon_quirks sun6i_a31_quirks = { 831static const struct sun4i_tcon_quirks sun6i_a31_quirks = {
776 .has_channel_1 = true, 832 .has_channel_1 = true,
777 .needs_de_be_mux = true, 833 .needs_de_be_mux = true,
834 .set_mux = sun6i_tcon_set_mux,
778}; 835};
779 836
780static const struct sun4i_tcon_quirks sun6i_a31s_quirks = { 837static const struct sun4i_tcon_quirks sun6i_a31s_quirks = {
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.h b/drivers/gpu/drm/sun4i/sun4i_tcon.h
index 5a219d1ccc26..d9e1357cc8ae 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.h
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.h
@@ -145,10 +145,14 @@
145 145
146#define SUN4I_TCON_MAX_CHANNELS 2 146#define SUN4I_TCON_MAX_CHANNELS 2
147 147
148struct sun4i_tcon;
149
148struct sun4i_tcon_quirks { 150struct sun4i_tcon_quirks {
149 bool has_unknown_mux; /* sun5i has undocumented mux */
150 bool has_channel_1; /* a33 does not have channel 1 */ 151 bool has_channel_1; /* a33 does not have channel 1 */
151 bool needs_de_be_mux; /* sun6i needs mux to select backend */ 152 bool needs_de_be_mux; /* sun6i needs mux to select backend */
153
154 /* callback to handle tcon muxing options */
155 int (*set_mux)(struct sun4i_tcon *, struct drm_encoder *);
152}; 156};
153 157
154struct sun4i_tcon { 158struct sun4i_tcon {
diff --git a/drivers/gpu/drm/tinydrm/core/tinydrm-pipe.c b/drivers/gpu/drm/tinydrm/core/tinydrm-pipe.c
index fc447c9a1a27..f41fc506ff87 100644
--- a/drivers/gpu/drm/tinydrm/core/tinydrm-pipe.c
+++ b/drivers/gpu/drm/tinydrm/core/tinydrm-pipe.c
@@ -51,7 +51,6 @@ static int tinydrm_connector_get_modes(struct drm_connector *connector)
51 51
52static const struct drm_connector_helper_funcs tinydrm_connector_hfuncs = { 52static const struct drm_connector_helper_funcs tinydrm_connector_hfuncs = {
53 .get_modes = tinydrm_connector_get_modes, 53 .get_modes = tinydrm_connector_get_modes,
54 .best_encoder = drm_atomic_helper_best_encoder,
55}; 54};
56 55
57static enum drm_connector_status 56static enum drm_connector_status
diff --git a/drivers/gpu/drm/tinydrm/mi0283qt.c b/drivers/gpu/drm/tinydrm/mi0283qt.c
index 7fd26912f2ba..6a83b3093254 100644
--- a/drivers/gpu/drm/tinydrm/mi0283qt.c
+++ b/drivers/gpu/drm/tinydrm/mi0283qt.c
@@ -31,7 +31,7 @@ static int mi0283qt_init(struct mipi_dbi *mipi)
31 31
32 ret = regulator_enable(mipi->regulator); 32 ret = regulator_enable(mipi->regulator);
33 if (ret) { 33 if (ret) {
34 dev_err(dev, "Failed to enable regulator %d\n", ret); 34 DRM_DEV_ERROR(dev, "Failed to enable regulator %d\n", ret);
35 return ret; 35 return ret;
36 } 36 }
37 37
@@ -42,7 +42,7 @@ static int mi0283qt_init(struct mipi_dbi *mipi)
42 mipi_dbi_hw_reset(mipi); 42 mipi_dbi_hw_reset(mipi);
43 ret = mipi_dbi_command(mipi, MIPI_DCS_SOFT_RESET); 43 ret = mipi_dbi_command(mipi, MIPI_DCS_SOFT_RESET);
44 if (ret) { 44 if (ret) {
45 dev_err(dev, "Error sending command %d\n", ret); 45 DRM_DEV_ERROR(dev, "Error sending command %d\n", ret);
46 regulator_disable(mipi->regulator); 46 regulator_disable(mipi->regulator);
47 return ret; 47 return ret;
48 } 48 }
@@ -174,13 +174,13 @@ static int mi0283qt_probe(struct spi_device *spi)
174 174
175 mipi->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH); 175 mipi->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
176 if (IS_ERR(mipi->reset)) { 176 if (IS_ERR(mipi->reset)) {
177 dev_err(dev, "Failed to get gpio 'reset'\n"); 177 DRM_DEV_ERROR(dev, "Failed to get gpio 'reset'\n");
178 return PTR_ERR(mipi->reset); 178 return PTR_ERR(mipi->reset);
179 } 179 }
180 180
181 dc = devm_gpiod_get_optional(dev, "dc", GPIOD_OUT_LOW); 181 dc = devm_gpiod_get_optional(dev, "dc", GPIOD_OUT_LOW);
182 if (IS_ERR(dc)) { 182 if (IS_ERR(dc)) {
183 dev_err(dev, "Failed to get gpio 'dc'\n"); 183 DRM_DEV_ERROR(dev, "Failed to get gpio 'dc'\n");
184 return PTR_ERR(dc); 184 return PTR_ERR(dc);
185 } 185 }
186 186
diff --git a/drivers/gpu/drm/tinydrm/repaper.c b/drivers/gpu/drm/tinydrm/repaper.c
index 340198f5afea..75740630c410 100644
--- a/drivers/gpu/drm/tinydrm/repaper.c
+++ b/drivers/gpu/drm/tinydrm/repaper.c
@@ -474,8 +474,7 @@ static void repaper_get_temperature(struct repaper_epd *epd)
474 474
475 ret = thermal_zone_get_temp(epd->thermal, &temperature); 475 ret = thermal_zone_get_temp(epd->thermal, &temperature);
476 if (ret) { 476 if (ret) {
477 dev_err(&epd->spi->dev, "Failed to get temperature (%d)\n", 477 DRM_DEV_ERROR(&epd->spi->dev, "Failed to get temperature (%d)\n", ret);
478 ret);
479 return; 478 return;
480 } 479 }
481 480
@@ -630,7 +629,7 @@ out_unlock:
630 mutex_unlock(&tdev->dirty_lock); 629 mutex_unlock(&tdev->dirty_lock);
631 630
632 if (ret) 631 if (ret)
633 dev_err(fb->dev->dev, "Failed to update display (%d)\n", ret); 632 DRM_DEV_ERROR(fb->dev->dev, "Failed to update display (%d)\n", ret);
634 kfree(buf); 633 kfree(buf);
635 634
636 return ret; 635 return ret;
@@ -704,7 +703,7 @@ static void repaper_pipe_enable(struct drm_simple_display_pipe *pipe,
704 } 703 }
705 704
706 if (!i) { 705 if (!i) {
707 dev_err(dev, "timeout waiting for panel to become ready.\n"); 706 DRM_DEV_ERROR(dev, "timeout waiting for panel to become ready.\n");
708 power_off(epd); 707 power_off(epd);
709 return; 708 return;
710 } 709 }
@@ -726,9 +725,9 @@ static void repaper_pipe_enable(struct drm_simple_display_pipe *pipe,
726 ret = repaper_read_val(spi, 0x0f); 725 ret = repaper_read_val(spi, 0x0f);
727 if (ret < 0 || !(ret & 0x80)) { 726 if (ret < 0 || !(ret & 0x80)) {
728 if (ret < 0) 727 if (ret < 0)
729 dev_err(dev, "failed to read chip (%d)\n", ret); 728 DRM_DEV_ERROR(dev, "failed to read chip (%d)\n", ret);
730 else 729 else
731 dev_err(dev, "panel is reported broken\n"); 730 DRM_DEV_ERROR(dev, "panel is reported broken\n");
732 power_off(epd); 731 power_off(epd);
733 return; 732 return;
734 } 733 }
@@ -768,7 +767,7 @@ static void repaper_pipe_enable(struct drm_simple_display_pipe *pipe,
768 /* check DC/DC */ 767 /* check DC/DC */
769 ret = repaper_read_val(spi, 0x0f); 768 ret = repaper_read_val(spi, 0x0f);
770 if (ret < 0) { 769 if (ret < 0) {
771 dev_err(dev, "failed to read chip (%d)\n", ret); 770 DRM_DEV_ERROR(dev, "failed to read chip (%d)\n", ret);
772 power_off(epd); 771 power_off(epd);
773 return; 772 return;
774 } 773 }
@@ -780,7 +779,7 @@ static void repaper_pipe_enable(struct drm_simple_display_pipe *pipe,
780 } 779 }
781 780
782 if (!dc_ok) { 781 if (!dc_ok) {
783 dev_err(dev, "dc/dc failed\n"); 782 DRM_DEV_ERROR(dev, "dc/dc failed\n");
784 power_off(epd); 783 power_off(epd);
785 return; 784 return;
786 } 785 }
@@ -960,7 +959,7 @@ static int repaper_probe(struct spi_device *spi)
960 if (IS_ERR(epd->panel_on)) { 959 if (IS_ERR(epd->panel_on)) {
961 ret = PTR_ERR(epd->panel_on); 960 ret = PTR_ERR(epd->panel_on);
962 if (ret != -EPROBE_DEFER) 961 if (ret != -EPROBE_DEFER)
963 dev_err(dev, "Failed to get gpio 'panel-on'\n"); 962 DRM_DEV_ERROR(dev, "Failed to get gpio 'panel-on'\n");
964 return ret; 963 return ret;
965 } 964 }
966 965
@@ -968,7 +967,7 @@ static int repaper_probe(struct spi_device *spi)
968 if (IS_ERR(epd->discharge)) { 967 if (IS_ERR(epd->discharge)) {
969 ret = PTR_ERR(epd->discharge); 968 ret = PTR_ERR(epd->discharge);
970 if (ret != -EPROBE_DEFER) 969 if (ret != -EPROBE_DEFER)
971 dev_err(dev, "Failed to get gpio 'discharge'\n"); 970 DRM_DEV_ERROR(dev, "Failed to get gpio 'discharge'\n");
972 return ret; 971 return ret;
973 } 972 }
974 973
@@ -976,7 +975,7 @@ static int repaper_probe(struct spi_device *spi)
976 if (IS_ERR(epd->reset)) { 975 if (IS_ERR(epd->reset)) {
977 ret = PTR_ERR(epd->reset); 976 ret = PTR_ERR(epd->reset);
978 if (ret != -EPROBE_DEFER) 977 if (ret != -EPROBE_DEFER)
979 dev_err(dev, "Failed to get gpio 'reset'\n"); 978 DRM_DEV_ERROR(dev, "Failed to get gpio 'reset'\n");
980 return ret; 979 return ret;
981 } 980 }
982 981
@@ -984,7 +983,7 @@ static int repaper_probe(struct spi_device *spi)
984 if (IS_ERR(epd->busy)) { 983 if (IS_ERR(epd->busy)) {
985 ret = PTR_ERR(epd->busy); 984 ret = PTR_ERR(epd->busy);
986 if (ret != -EPROBE_DEFER) 985 if (ret != -EPROBE_DEFER)
987 dev_err(dev, "Failed to get gpio 'busy'\n"); 986 DRM_DEV_ERROR(dev, "Failed to get gpio 'busy'\n");
988 return ret; 987 return ret;
989 } 988 }
990 989
@@ -992,8 +991,7 @@ static int repaper_probe(struct spi_device *spi)
992 &thermal_zone)) { 991 &thermal_zone)) {
993 epd->thermal = thermal_zone_get_zone_by_name(thermal_zone); 992 epd->thermal = thermal_zone_get_zone_by_name(thermal_zone);
994 if (IS_ERR(epd->thermal)) { 993 if (IS_ERR(epd->thermal)) {
995 dev_err(dev, "Failed to get thermal zone: %s\n", 994 DRM_DEV_ERROR(dev, "Failed to get thermal zone: %s\n", thermal_zone);
996 thermal_zone);
997 return PTR_ERR(epd->thermal); 995 return PTR_ERR(epd->thermal);
998 } 996 }
999 } 997 }
@@ -1034,7 +1032,7 @@ static int repaper_probe(struct spi_device *spi)
1034 if (IS_ERR(epd->border)) { 1032 if (IS_ERR(epd->border)) {
1035 ret = PTR_ERR(epd->border); 1033 ret = PTR_ERR(epd->border);
1036 if (ret != -EPROBE_DEFER) 1034 if (ret != -EPROBE_DEFER)
1037 dev_err(dev, "Failed to get gpio 'border'\n"); 1035 DRM_DEV_ERROR(dev, "Failed to get gpio 'border'\n");
1038 return ret; 1036 return ret;
1039 } 1037 }
1040 1038
diff --git a/drivers/gpu/drm/tinydrm/st7586.c b/drivers/gpu/drm/tinydrm/st7586.c
index da9c0d83045f..0a2c60da5c0e 100644
--- a/drivers/gpu/drm/tinydrm/st7586.c
+++ b/drivers/gpu/drm/tinydrm/st7586.c
@@ -188,7 +188,7 @@ static void st7586_pipe_enable(struct drm_simple_display_pipe *pipe,
188 mipi_dbi_hw_reset(mipi); 188 mipi_dbi_hw_reset(mipi);
189 ret = mipi_dbi_command(mipi, ST7586_AUTO_READ_CTRL, 0x9f); 189 ret = mipi_dbi_command(mipi, ST7586_AUTO_READ_CTRL, 0x9f);
190 if (ret) { 190 if (ret) {
191 dev_err(dev, "Error sending command %d\n", ret); 191 DRM_DEV_ERROR(dev, "Error sending command %d\n", ret);
192 return; 192 return;
193 } 193 }
194 194
@@ -355,13 +355,13 @@ static int st7586_probe(struct spi_device *spi)
355 355
356 mipi->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); 356 mipi->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
357 if (IS_ERR(mipi->reset)) { 357 if (IS_ERR(mipi->reset)) {
358 dev_err(dev, "Failed to get gpio 'reset'\n"); 358 DRM_DEV_ERROR(dev, "Failed to get gpio 'reset'\n");
359 return PTR_ERR(mipi->reset); 359 return PTR_ERR(mipi->reset);
360 } 360 }
361 361
362 a0 = devm_gpiod_get(dev, "a0", GPIOD_OUT_LOW); 362 a0 = devm_gpiod_get(dev, "a0", GPIOD_OUT_LOW);
363 if (IS_ERR(a0)) { 363 if (IS_ERR(a0)) {
364 dev_err(dev, "Failed to get gpio 'a0'\n"); 364 DRM_DEV_ERROR(dev, "Failed to get gpio 'a0'\n");
365 return PTR_ERR(a0); 365 return PTR_ERR(a0);
366 } 366 }
367 367
diff --git a/drivers/gpu/drm/udl/udl_connector.c b/drivers/gpu/drm/udl/udl_connector.c
index 9f9a49748d17..091ca81658eb 100644
--- a/drivers/gpu/drm/udl/udl_connector.c
+++ b/drivers/gpu/drm/udl/udl_connector.c
@@ -105,7 +105,7 @@ static struct drm_encoder*
105udl_best_single_encoder(struct drm_connector *connector) 105udl_best_single_encoder(struct drm_connector *connector)
106{ 106{
107 int enc_id = connector->encoder_ids[0]; 107 int enc_id = connector->encoder_ids[0];
108 return drm_encoder_find(connector->dev, enc_id); 108 return drm_encoder_find(connector->dev, NULL, enc_id);
109} 109}
110 110
111static int udl_connector_set_property(struct drm_connector *connector, 111static int udl_connector_set_property(struct drm_connector *connector,
diff --git a/drivers/gpu/drm/vc4/vc4_dpi.c b/drivers/gpu/drm/vc4/vc4_dpi.c
index 519cefef800d..72c9dbd81d7f 100644
--- a/drivers/gpu/drm/vc4/vc4_dpi.c
+++ b/drivers/gpu/drm/vc4/vc4_dpi.c
@@ -97,8 +97,6 @@ struct vc4_dpi {
97 97
98 struct drm_encoder *encoder; 98 struct drm_encoder *encoder;
99 struct drm_connector *connector; 99 struct drm_connector *connector;
100 struct drm_bridge *bridge;
101 bool is_panel_bridge;
102 100
103 void __iomem *regs; 101 void __iomem *regs;
104 102
@@ -251,10 +249,11 @@ static int vc4_dpi_init_bridge(struct vc4_dpi *dpi)
251{ 249{
252 struct device *dev = &dpi->pdev->dev; 250 struct device *dev = &dpi->pdev->dev;
253 struct drm_panel *panel; 251 struct drm_panel *panel;
252 struct drm_bridge *bridge;
254 int ret; 253 int ret;
255 254
256 ret = drm_of_find_panel_or_bridge(dev->of_node, 0, 0, 255 ret = drm_of_find_panel_or_bridge(dev->of_node, 0, 0,
257 &panel, &dpi->bridge); 256 &panel, &bridge);
258 if (ret) { 257 if (ret) {
259 /* If nothing was connected in the DT, that's not an 258 /* If nothing was connected in the DT, that's not an
260 * error. 259 * error.
@@ -265,13 +264,10 @@ static int vc4_dpi_init_bridge(struct vc4_dpi *dpi)
265 return ret; 264 return ret;
266 } 265 }
267 266
268 if (panel) { 267 if (panel)
269 dpi->bridge = drm_panel_bridge_add(panel, 268 bridge = drm_panel_bridge_add(panel, DRM_MODE_CONNECTOR_DPI);
270 DRM_MODE_CONNECTOR_DPI);
271 dpi->is_panel_bridge = true;
272 }
273 269
274 return drm_bridge_attach(dpi->encoder, dpi->bridge, NULL); 270 return drm_bridge_attach(dpi->encoder, bridge, NULL);
275} 271}
276 272
277static int vc4_dpi_bind(struct device *dev, struct device *master, void *data) 273static int vc4_dpi_bind(struct device *dev, struct device *master, void *data)
@@ -352,8 +348,7 @@ static void vc4_dpi_unbind(struct device *dev, struct device *master,
352 struct vc4_dev *vc4 = to_vc4_dev(drm); 348 struct vc4_dev *vc4 = to_vc4_dev(drm);
353 struct vc4_dpi *dpi = dev_get_drvdata(dev); 349 struct vc4_dpi *dpi = dev_get_drvdata(dev);
354 350
355 if (dpi->is_panel_bridge) 351 drm_of_panel_bridge_remove(dev->of_node, 0, 0);
356 drm_panel_bridge_remove(dpi->bridge);
357 352
358 drm_encoder_cleanup(dpi->encoder); 353 drm_encoder_cleanup(dpi->encoder);
359 354
diff --git a/drivers/gpu/drm/vc4/vc4_dsi.c b/drivers/gpu/drm/vc4/vc4_dsi.c
index 925c726ac694..554605af344e 100644
--- a/drivers/gpu/drm/vc4/vc4_dsi.c
+++ b/drivers/gpu/drm/vc4/vc4_dsi.c
@@ -859,11 +859,7 @@ static bool vc4_dsi_encoder_mode_fixup(struct drm_encoder *encoder,
859 pll_clock = parent_rate / divider; 859 pll_clock = parent_rate / divider;
860 pixel_clock_hz = pll_clock / dsi->divider; 860 pixel_clock_hz = pll_clock / dsi->divider;
861 861
862 /* Round up the clk_set_rate() request slightly, since 862 adjusted_mode->clock = pixel_clock_hz / 1000;
863 * PLLD_DSI1 is an integer divider and its rate selection will
864 * never round up.
865 */
866 adjusted_mode->clock = pixel_clock_hz / 1000 + 1;
867 863
868 /* Given the new pixel clock, adjust HFP to keep vrefresh the same. */ 864 /* Given the new pixel clock, adjust HFP to keep vrefresh the same. */
869 adjusted_mode->htotal = adjusted_mode->clock * mode->htotal / 865 adjusted_mode->htotal = adjusted_mode->clock * mode->htotal /
@@ -901,7 +897,11 @@ static void vc4_dsi_encoder_enable(struct drm_encoder *encoder)
901 vc4_dsi_dump_regs(dsi); 897 vc4_dsi_dump_regs(dsi);
902 } 898 }
903 899
904 phy_clock = pixel_clock_hz * dsi->divider; 900 /* Round up the clk_set_rate() request slightly, since
901 * PLLD_DSI1 is an integer divider and its rate selection will
902 * never round up.
903 */
904 phy_clock = (pixel_clock_hz + 1000) * dsi->divider;
905 ret = clk_set_rate(dsi->pll_phy_clock, phy_clock); 905 ret = clk_set_rate(dsi->pll_phy_clock, phy_clock);
906 if (ret) { 906 if (ret) {
907 dev_err(&dsi->pdev->dev, 907 dev_err(&dsi->pdev->dev,
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index 2968b3ebb895..3a767a038f72 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -547,14 +547,24 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
547 tiling = SCALER_CTL0_TILING_LINEAR; 547 tiling = SCALER_CTL0_TILING_LINEAR;
548 pitch0 = VC4_SET_FIELD(fb->pitches[0], SCALER_SRC_PITCH); 548 pitch0 = VC4_SET_FIELD(fb->pitches[0], SCALER_SRC_PITCH);
549 break; 549 break;
550 case DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED: 550
551 case DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED: {
552 /* For T-tiled, the FB pitch is "how many bytes from
553 * one row to the next, such that pitch * tile_h ==
554 * tile_size * tiles_per_row."
555 */
556 u32 tile_size_shift = 12; /* T tiles are 4kb */
557 u32 tile_h_shift = 5; /* 16 and 32bpp are 32 pixels high */
558 u32 tiles_w = fb->pitches[0] >> (tile_size_shift - tile_h_shift);
559
551 tiling = SCALER_CTL0_TILING_256B_OR_T; 560 tiling = SCALER_CTL0_TILING_256B_OR_T;
552 561
553 pitch0 = (VC4_SET_FIELD(0, SCALER_PITCH0_TILE_Y_OFFSET), 562 pitch0 = (VC4_SET_FIELD(0, SCALER_PITCH0_TILE_Y_OFFSET) |
554 VC4_SET_FIELD(0, SCALER_PITCH0_TILE_WIDTH_L), 563 VC4_SET_FIELD(0, SCALER_PITCH0_TILE_WIDTH_L) |
555 VC4_SET_FIELD((vc4_state->src_w[0] + 31) >> 5, 564 VC4_SET_FIELD(tiles_w, SCALER_PITCH0_TILE_WIDTH_R));
556 SCALER_PITCH0_TILE_WIDTH_R));
557 break; 565 break;
566 }
567
558 default: 568 default:
559 DRM_DEBUG_KMS("Unsupported FB tiling flag 0x%16llx", 569 DRM_DEBUG_KMS("Unsupported FB tiling flag 0x%16llx",
560 (long long)fb->modifier); 570 (long long)fb->modifier);
diff --git a/drivers/gpu/drm/via/via_verifier.c b/drivers/gpu/drm/via/via_verifier.c
index 0677bbf4ec7e..fb2609434df7 100644
--- a/drivers/gpu/drm/via/via_verifier.c
+++ b/drivers/gpu/drm/via/via_verifier.c
@@ -34,6 +34,7 @@
34#include <drm/drm_legacy.h> 34#include <drm/drm_legacy.h>
35#include "via_verifier.h" 35#include "via_verifier.h"
36#include "via_drv.h" 36#include "via_drv.h"
37#include <linux/kernel.h>
37 38
38typedef enum { 39typedef enum {
39 state_command, 40 state_command,
@@ -1102,10 +1103,7 @@ setup_hazard_table(hz_init_t init_table[], hazard_t table[], int size)
1102 1103
1103void via_init_command_verifier(void) 1104void via_init_command_verifier(void)
1104{ 1105{
1105 setup_hazard_table(init_table1, table1, 1106 setup_hazard_table(init_table1, table1, ARRAY_SIZE(init_table1));
1106 sizeof(init_table1) / sizeof(hz_init_t)); 1107 setup_hazard_table(init_table2, table2, ARRAY_SIZE(init_table2));
1107 setup_hazard_table(init_table2, table2, 1108 setup_hazard_table(init_table3, table3, ARRAY_SIZE(init_table3));
1108 sizeof(init_table2) / sizeof(hz_init_t));
1109 setup_hazard_table(init_table3, table3,
1110 sizeof(init_table3) / sizeof(hz_init_t));
1111} 1109}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index 5ec24fd801cd..01be355525e4 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -286,7 +286,7 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
286 286
287 drm_modeset_lock_all(dev); 287 drm_modeset_lock_all(dev);
288 288
289 fb = drm_framebuffer_lookup(dev, arg->fb_id); 289 fb = drm_framebuffer_lookup(dev, file_priv, arg->fb_id);
290 if (!fb) { 290 if (!fb) {
291 DRM_ERROR("Invalid framebuffer id.\n"); 291 DRM_ERROR("Invalid framebuffer id.\n");
292 ret = -ENOENT; 292 ret = -ENOENT;
@@ -369,7 +369,7 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
369 369
370 drm_modeset_lock_all(dev); 370 drm_modeset_lock_all(dev);
371 371
372 fb = drm_framebuffer_lookup(dev, arg->fb_id); 372 fb = drm_framebuffer_lookup(dev, file_priv, arg->fb_id);
373 if (!fb) { 373 if (!fb) {
374 DRM_ERROR("Invalid framebuffer id.\n"); 374 DRM_ERROR("Invalid framebuffer id.\n");
375 ret = -ENOENT; 375 ret = -ENOENT;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index b850562fbdd6..0545740b3724 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -1726,7 +1726,7 @@ int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
1726 return 0; 1726 return 0;
1727 } 1727 }
1728 1728
1729 crtc = drm_crtc_find(dev, arg->crtc_id); 1729 crtc = drm_crtc_find(dev, file_priv, arg->crtc_id);
1730 if (!crtc) { 1730 if (!crtc) {
1731 ret = -ENOENT; 1731 ret = -ENOENT;
1732 goto out; 1732 goto out;
diff --git a/drivers/media/v4l2-core/videobuf2-dma-contig.c b/drivers/media/v4l2-core/videobuf2-dma-contig.c
index 9f389f36566d..a9806ba6116d 100644
--- a/drivers/media/v4l2-core/videobuf2-dma-contig.c
+++ b/drivers/media/v4l2-core/videobuf2-dma-contig.c
@@ -479,7 +479,7 @@ static void *vb2_dc_get_userptr(struct device *dev, unsigned long vaddr,
479{ 479{
480 struct vb2_dc_buf *buf; 480 struct vb2_dc_buf *buf;
481 struct frame_vector *vec; 481 struct frame_vector *vec;
482 unsigned long offset; 482 unsigned int offset;
483 int n_pages, i; 483 int n_pages, i;
484 int ret = 0; 484 int ret = 0;
485 struct sg_table *sgt; 485 struct sg_table *sgt;
@@ -507,7 +507,7 @@ static void *vb2_dc_get_userptr(struct device *dev, unsigned long vaddr,
507 buf->dev = dev; 507 buf->dev = dev;
508 buf->dma_dir = dma_dir; 508 buf->dma_dir = dma_dir;
509 509
510 offset = vaddr & ~PAGE_MASK; 510 offset = lower_32_bits(offset_in_page(vaddr));
511 vec = vb2_create_framevec(vaddr, size, dma_dir == DMA_FROM_DEVICE || 511 vec = vb2_create_framevec(vaddr, size, dma_dir == DMA_FROM_DEVICE ||
512 dma_dir == DMA_BIDIRECTIONAL); 512 dma_dir == DMA_BIDIRECTIONAL);
513 if (IS_ERR(vec)) { 513 if (IS_ERR(vec)) {
diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c
index 5beb0c361076..5c1b6388122a 100644
--- a/drivers/rapidio/devices/rio_mport_cdev.c
+++ b/drivers/rapidio/devices/rio_mport_cdev.c
@@ -876,10 +876,10 @@ rio_dma_transfer(struct file *filp, u32 transfer_mode,
876 * offset within the internal buffer specified by handle parameter. 876 * offset within the internal buffer specified by handle parameter.
877 */ 877 */
878 if (xfer->loc_addr) { 878 if (xfer->loc_addr) {
879 unsigned long offset; 879 unsigned int offset;
880 long pinned; 880 long pinned;
881 881
882 offset = (unsigned long)(uintptr_t)xfer->loc_addr & ~PAGE_MASK; 882 offset = lower_32_bits(offset_in_page(xfer->loc_addr));
883 nr_pages = PAGE_ALIGN(xfer->length + offset) >> PAGE_SHIFT; 883 nr_pages = PAGE_ALIGN(xfer->length + offset) >> PAGE_SHIFT;
884 884
885 page_list = kmalloc_array(nr_pages, 885 page_list = kmalloc_array(nr_pages,
diff --git a/drivers/staging/vboxvideo/vbox_mode.c b/drivers/staging/vboxvideo/vbox_mode.c
index 257a77830410..c745a0402c68 100644
--- a/drivers/staging/vboxvideo/vbox_mode.c
+++ b/drivers/staging/vboxvideo/vbox_mode.c
@@ -377,7 +377,7 @@ static struct drm_encoder *vbox_best_single_encoder(struct drm_connector
377 377
378 /* pick the encoder ids */ 378 /* pick the encoder ids */
379 if (enc_id) 379 if (enc_id)
380 return drm_encoder_find(connector->dev, enc_id); 380 return drm_encoder_find(connector->dev, NULL, enc_id);
381 381
382 return NULL; 382 return NULL;
383} 383}
diff --git a/include/drm/bridge/mhl.h b/include/drm/bridge/mhl.h
index fbdfc8d7f3c7..96a5e0f6ff12 100644
--- a/include/drm/bridge/mhl.h
+++ b/include/drm/bridge/mhl.h
@@ -262,6 +262,10 @@ enum {
262#define MHL_RAPK_UNSUPPORTED 0x02 /* Rcvd RAP action code not supported */ 262#define MHL_RAPK_UNSUPPORTED 0x02 /* Rcvd RAP action code not supported */
263#define MHL_RAPK_BUSY 0x03 /* Responder too busy to respond */ 263#define MHL_RAPK_BUSY 0x03 /* Responder too busy to respond */
264 264
265/* Bit masks for RCP messages */
266#define MHL_RCP_KEY_RELEASED_MASK 0x80
267#define MHL_RCP_KEY_ID_MASK 0x7F
268
265/* 269/*
266 * Error status codes for RCPE messages 270 * Error status codes for RCPE messages
267 */ 271 */
diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h
index 5834580d75bc..5afd6e364fb6 100644
--- a/include/drm/drm_atomic.h
+++ b/include/drm/drm_atomic.h
@@ -585,12 +585,12 @@ void drm_state_dump(struct drm_device *dev, struct drm_printer *p);
585 */ 585 */
586#define for_each_oldnew_connector_in_state(__state, connector, old_connector_state, new_connector_state, __i) \ 586#define for_each_oldnew_connector_in_state(__state, connector, old_connector_state, new_connector_state, __i) \
587 for ((__i) = 0; \ 587 for ((__i) = 0; \
588 (__i) < (__state)->num_connector && \ 588 (__i) < (__state)->num_connector; \
589 ((connector) = (__state)->connectors[__i].ptr, \ 589 (__i)++) \
590 (old_connector_state) = (__state)->connectors[__i].old_state, \ 590 for_each_if ((__state)->connectors[__i].ptr && \
591 (new_connector_state) = (__state)->connectors[__i].new_state, 1); \ 591 ((connector) = (__state)->connectors[__i].ptr, \
592 (__i)++) \ 592 (old_connector_state) = (__state)->connectors[__i].old_state, \
593 for_each_if (connector) 593 (new_connector_state) = (__state)->connectors[__i].new_state, 1))
594 594
595/** 595/**
596 * for_each_old_connector_in_state - iterate over all connectors in an atomic update 596 * for_each_old_connector_in_state - iterate over all connectors in an atomic update
@@ -606,11 +606,11 @@ void drm_state_dump(struct drm_device *dev, struct drm_printer *p);
606 */ 606 */
607#define for_each_old_connector_in_state(__state, connector, old_connector_state, __i) \ 607#define for_each_old_connector_in_state(__state, connector, old_connector_state, __i) \
608 for ((__i) = 0; \ 608 for ((__i) = 0; \
609 (__i) < (__state)->num_connector && \ 609 (__i) < (__state)->num_connector; \
610 ((connector) = (__state)->connectors[__i].ptr, \ 610 (__i)++) \
611 (old_connector_state) = (__state)->connectors[__i].old_state, 1); \ 611 for_each_if ((__state)->connectors[__i].ptr && \
612 (__i)++) \ 612 ((connector) = (__state)->connectors[__i].ptr, \
613 for_each_if (connector) 613 (old_connector_state) = (__state)->connectors[__i].old_state, 1))
614 614
615/** 615/**
616 * for_each_new_connector_in_state - iterate over all connectors in an atomic update 616 * for_each_new_connector_in_state - iterate over all connectors in an atomic update
@@ -626,11 +626,11 @@ void drm_state_dump(struct drm_device *dev, struct drm_printer *p);
626 */ 626 */
627#define for_each_new_connector_in_state(__state, connector, new_connector_state, __i) \ 627#define for_each_new_connector_in_state(__state, connector, new_connector_state, __i) \
628 for ((__i) = 0; \ 628 for ((__i) = 0; \
629 (__i) < (__state)->num_connector && \ 629 (__i) < (__state)->num_connector; \
630 ((connector) = (__state)->connectors[__i].ptr, \ 630 (__i)++) \
631 (new_connector_state) = (__state)->connectors[__i].new_state, 1); \ 631 for_each_if ((__state)->connectors[__i].ptr && \
632 (__i)++) \ 632 ((connector) = (__state)->connectors[__i].ptr, \
633 for_each_if (connector) 633 (new_connector_state) = (__state)->connectors[__i].new_state, 1))
634 634
635/** 635/**
636 * for_each_oldnew_crtc_in_state - iterate over all CRTCs in an atomic update 636 * for_each_oldnew_crtc_in_state - iterate over all CRTCs in an atomic update
@@ -646,12 +646,12 @@ void drm_state_dump(struct drm_device *dev, struct drm_printer *p);
646 */ 646 */
647#define for_each_oldnew_crtc_in_state(__state, crtc, old_crtc_state, new_crtc_state, __i) \ 647#define for_each_oldnew_crtc_in_state(__state, crtc, old_crtc_state, new_crtc_state, __i) \
648 for ((__i) = 0; \ 648 for ((__i) = 0; \
649 (__i) < (__state)->dev->mode_config.num_crtc && \ 649 (__i) < (__state)->dev->mode_config.num_crtc; \
650 ((crtc) = (__state)->crtcs[__i].ptr, \
651 (old_crtc_state) = (__state)->crtcs[__i].old_state, \
652 (new_crtc_state) = (__state)->crtcs[__i].new_state, 1); \
653 (__i)++) \ 650 (__i)++) \
654 for_each_if (crtc) 651 for_each_if ((__state)->crtcs[__i].ptr && \
652 ((crtc) = (__state)->crtcs[__i].ptr, \
653 (old_crtc_state) = (__state)->crtcs[__i].old_state, \
654 (new_crtc_state) = (__state)->crtcs[__i].new_state, 1))
655 655
656/** 656/**
657 * for_each_old_crtc_in_state - iterate over all CRTCs in an atomic update 657 * for_each_old_crtc_in_state - iterate over all CRTCs in an atomic update
@@ -666,11 +666,11 @@ void drm_state_dump(struct drm_device *dev, struct drm_printer *p);
666 */ 666 */
667#define for_each_old_crtc_in_state(__state, crtc, old_crtc_state, __i) \ 667#define for_each_old_crtc_in_state(__state, crtc, old_crtc_state, __i) \
668 for ((__i) = 0; \ 668 for ((__i) = 0; \
669 (__i) < (__state)->dev->mode_config.num_crtc && \ 669 (__i) < (__state)->dev->mode_config.num_crtc; \
670 ((crtc) = (__state)->crtcs[__i].ptr, \
671 (old_crtc_state) = (__state)->crtcs[__i].old_state, 1); \
672 (__i)++) \ 670 (__i)++) \
673 for_each_if (crtc) 671 for_each_if ((__state)->crtcs[__i].ptr && \
672 ((crtc) = (__state)->crtcs[__i].ptr, \
673 (old_crtc_state) = (__state)->crtcs[__i].old_state, 1))
674 674
675/** 675/**
676 * for_each_new_crtc_in_state - iterate over all CRTCs in an atomic update 676 * for_each_new_crtc_in_state - iterate over all CRTCs in an atomic update
@@ -685,11 +685,11 @@ void drm_state_dump(struct drm_device *dev, struct drm_printer *p);
685 */ 685 */
686#define for_each_new_crtc_in_state(__state, crtc, new_crtc_state, __i) \ 686#define for_each_new_crtc_in_state(__state, crtc, new_crtc_state, __i) \
687 for ((__i) = 0; \ 687 for ((__i) = 0; \
688 (__i) < (__state)->dev->mode_config.num_crtc && \ 688 (__i) < (__state)->dev->mode_config.num_crtc; \
689 ((crtc) = (__state)->crtcs[__i].ptr, \
690 (new_crtc_state) = (__state)->crtcs[__i].new_state, 1); \
691 (__i)++) \ 689 (__i)++) \
692 for_each_if (crtc) 690 for_each_if ((__state)->crtcs[__i].ptr && \
691 ((crtc) = (__state)->crtcs[__i].ptr, \
692 (new_crtc_state) = (__state)->crtcs[__i].new_state, 1))
693 693
694/** 694/**
695 * for_each_oldnew_plane_in_state - iterate over all planes in an atomic update 695 * for_each_oldnew_plane_in_state - iterate over all planes in an atomic update
@@ -705,12 +705,12 @@ void drm_state_dump(struct drm_device *dev, struct drm_printer *p);
705 */ 705 */
706#define for_each_oldnew_plane_in_state(__state, plane, old_plane_state, new_plane_state, __i) \ 706#define for_each_oldnew_plane_in_state(__state, plane, old_plane_state, new_plane_state, __i) \
707 for ((__i) = 0; \ 707 for ((__i) = 0; \
708 (__i) < (__state)->dev->mode_config.num_total_plane && \ 708 (__i) < (__state)->dev->mode_config.num_total_plane; \
709 ((plane) = (__state)->planes[__i].ptr, \
710 (old_plane_state) = (__state)->planes[__i].old_state, \
711 (new_plane_state) = (__state)->planes[__i].new_state, 1); \
712 (__i)++) \ 709 (__i)++) \
713 for_each_if (plane) 710 for_each_if ((__state)->planes[__i].ptr && \
711 ((plane) = (__state)->planes[__i].ptr, \
712 (old_plane_state) = (__state)->planes[__i].old_state,\
713 (new_plane_state) = (__state)->planes[__i].new_state, 1))
714 714
715/** 715/**
716 * for_each_old_plane_in_state - iterate over all planes in an atomic update 716 * for_each_old_plane_in_state - iterate over all planes in an atomic update
@@ -725,12 +725,11 @@ void drm_state_dump(struct drm_device *dev, struct drm_printer *p);
725 */ 725 */
726#define for_each_old_plane_in_state(__state, plane, old_plane_state, __i) \ 726#define for_each_old_plane_in_state(__state, plane, old_plane_state, __i) \
727 for ((__i) = 0; \ 727 for ((__i) = 0; \
728 (__i) < (__state)->dev->mode_config.num_total_plane && \ 728 (__i) < (__state)->dev->mode_config.num_total_plane; \
729 ((plane) = (__state)->planes[__i].ptr, \
730 (old_plane_state) = (__state)->planes[__i].old_state, 1); \
731 (__i)++) \ 729 (__i)++) \
732 for_each_if (plane) 730 for_each_if ((__state)->planes[__i].ptr && \
733 731 ((plane) = (__state)->planes[__i].ptr, \
732 (old_plane_state) = (__state)->planes[__i].old_state, 1))
734/** 733/**
735 * for_each_new_plane_in_state - iterate over all planes in an atomic update 734 * for_each_new_plane_in_state - iterate over all planes in an atomic update
736 * @__state: &struct drm_atomic_state pointer 735 * @__state: &struct drm_atomic_state pointer
@@ -744,11 +743,11 @@ void drm_state_dump(struct drm_device *dev, struct drm_printer *p);
744 */ 743 */
745#define for_each_new_plane_in_state(__state, plane, new_plane_state, __i) \ 744#define for_each_new_plane_in_state(__state, plane, new_plane_state, __i) \
746 for ((__i) = 0; \ 745 for ((__i) = 0; \
747 (__i) < (__state)->dev->mode_config.num_total_plane && \ 746 (__i) < (__state)->dev->mode_config.num_total_plane; \
748 ((plane) = (__state)->planes[__i].ptr, \
749 (new_plane_state) = (__state)->planes[__i].new_state, 1); \
750 (__i)++) \ 747 (__i)++) \
751 for_each_if (plane) 748 for_each_if ((__state)->planes[__i].ptr && \
749 ((plane) = (__state)->planes[__i].ptr, \
750 (new_plane_state) = (__state)->planes[__i].new_state, 1))
752 751
753/** 752/**
754 * for_each_oldnew_private_obj_in_state - iterate over all private objects in an atomic update 753 * for_each_oldnew_private_obj_in_state - iterate over all private objects in an atomic update
@@ -768,8 +767,7 @@ void drm_state_dump(struct drm_device *dev, struct drm_printer *p);
768 ((obj) = (__state)->private_objs[__i].ptr, \ 767 ((obj) = (__state)->private_objs[__i].ptr, \
769 (old_obj_state) = (__state)->private_objs[__i].old_state, \ 768 (old_obj_state) = (__state)->private_objs[__i].old_state, \
770 (new_obj_state) = (__state)->private_objs[__i].new_state, 1); \ 769 (new_obj_state) = (__state)->private_objs[__i].new_state, 1); \
771 (__i)++) \ 770 (__i)++)
772 for_each_if (obj)
773 771
774/** 772/**
775 * for_each_old_private_obj_in_state - iterate over all private objects in an atomic update 773 * for_each_old_private_obj_in_state - iterate over all private objects in an atomic update
@@ -787,8 +785,7 @@ void drm_state_dump(struct drm_device *dev, struct drm_printer *p);
787 (__i) < (__state)->num_private_objs && \ 785 (__i) < (__state)->num_private_objs && \
788 ((obj) = (__state)->private_objs[__i].ptr, \ 786 ((obj) = (__state)->private_objs[__i].ptr, \
789 (old_obj_state) = (__state)->private_objs[__i].old_state, 1); \ 787 (old_obj_state) = (__state)->private_objs[__i].old_state, 1); \
790 (__i)++) \ 788 (__i)++)
791 for_each_if (obj)
792 789
793/** 790/**
794 * for_each_new_private_obj_in_state - iterate over all private objects in an atomic update 791 * for_each_new_private_obj_in_state - iterate over all private objects in an atomic update
@@ -806,8 +803,7 @@ void drm_state_dump(struct drm_device *dev, struct drm_printer *p);
806 (__i) < (__state)->num_private_objs && \ 803 (__i) < (__state)->num_private_objs && \
807 ((obj) = (__state)->private_objs[__i].ptr, \ 804 ((obj) = (__state)->private_objs[__i].ptr, \
808 (new_obj_state) = (__state)->private_objs[__i].new_state, 1); \ 805 (new_obj_state) = (__state)->private_objs[__i].new_state, 1); \
809 (__i)++) \ 806 (__i)++)
810 for_each_if (obj)
811 807
812/** 808/**
813 * drm_atomic_crtc_needs_modeset - compute combined modeset need 809 * drm_atomic_crtc_needs_modeset - compute combined modeset need
diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h
index b34904dc8b9b..b4285c40e1e4 100644
--- a/include/drm/drm_connector.h
+++ b/include/drm/drm_connector.h
@@ -939,10 +939,11 @@ static inline unsigned drm_connector_index(struct drm_connector *connector)
939 * add takes a reference to it. 939 * add takes a reference to it.
940 */ 940 */
941static inline struct drm_connector *drm_connector_lookup(struct drm_device *dev, 941static inline struct drm_connector *drm_connector_lookup(struct drm_device *dev,
942 struct drm_file *file_priv,
942 uint32_t id) 943 uint32_t id)
943{ 944{
944 struct drm_mode_object *mo; 945 struct drm_mode_object *mo;
945 mo = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_CONNECTOR); 946 mo = drm_mode_object_find(dev, file_priv, id, DRM_MODE_OBJECT_CONNECTOR);
946 return mo ? obj_to_connector(mo) : NULL; 947 return mo ? obj_to_connector(mo) : NULL;
947} 948}
948 949
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index 80c97210eda5..f7fcceef46d9 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -959,10 +959,11 @@ struct drm_crtc *drm_crtc_from_index(struct drm_device *dev, int idx);
959 * userspace interface should be done using &drm_property. 959 * userspace interface should be done using &drm_property.
960 */ 960 */
961static inline struct drm_crtc *drm_crtc_find(struct drm_device *dev, 961static inline struct drm_crtc *drm_crtc_find(struct drm_device *dev,
962 uint32_t id) 962 struct drm_file *file_priv,
963 uint32_t id)
963{ 964{
964 struct drm_mode_object *mo; 965 struct drm_mode_object *mo;
965 mo = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_CRTC); 966 mo = drm_mode_object_find(dev, file_priv, id, DRM_MODE_OBJECT_CRTC);
966 return mo ? obj_to_crtc(mo) : NULL; 967 return mo ? obj_to_crtc(mo) : NULL;
967} 968}
968 969
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index 3123988e819b..2623a1255481 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -752,6 +752,12 @@
752# define DP_PSR_SINK_INTERNAL_ERROR 7 752# define DP_PSR_SINK_INTERNAL_ERROR 7
753# define DP_PSR_SINK_STATE_MASK 0x07 753# define DP_PSR_SINK_STATE_MASK 0x07
754 754
755#define DP_SYNCHRONIZATION_LATENCY_IN_SINK 0x2009 /* edp 1.4 */
756# define DP_MAX_RESYNC_FRAME_COUNT_MASK (0xf << 0)
757# define DP_MAX_RESYNC_FRAME_COUNT_SHIFT 0
758# define DP_LAST_ACTUAL_SYNCHRONIZATION_LATENCY_MASK (0xf << 4)
759# define DP_LAST_ACTUAL_SYNCHRONIZATION_LATENCY_SHIFT 4
760
755#define DP_RECEIVER_ALPM_STATUS 0x200b /* eDP 1.4 */ 761#define DP_RECEIVER_ALPM_STATUS 0x200b /* eDP 1.4 */
756# define DP_ALPM_LOCK_TIMEOUT_ERROR (1 << 0) 762# define DP_ALPM_LOCK_TIMEOUT_ERROR (1 << 0)
757 763
diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h
index ee06ecd6c01f..412e83a4d3db 100644
--- a/include/drm/drm_drv.h
+++ b/include/drm/drm_drv.h
@@ -324,7 +324,7 @@ struct drm_driver {
324 */ 324 */
325 bool (*get_vblank_timestamp) (struct drm_device *dev, unsigned int pipe, 325 bool (*get_vblank_timestamp) (struct drm_device *dev, unsigned int pipe,
326 int *max_error, 326 int *max_error,
327 struct timeval *vblank_time, 327 ktime_t *vblank_time,
328 bool in_vblank_irq); 328 bool in_vblank_irq);
329 329
330 /** 330 /**
diff --git a/include/drm/drm_encoder.h b/include/drm/drm_encoder.h
index 8d8245ec0181..86db0da8bdcb 100644
--- a/include/drm/drm_encoder.h
+++ b/include/drm/drm_encoder.h
@@ -214,11 +214,12 @@ static inline bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
214 * drm_mode_object_find(). 214 * drm_mode_object_find().
215 */ 215 */
216static inline struct drm_encoder *drm_encoder_find(struct drm_device *dev, 216static inline struct drm_encoder *drm_encoder_find(struct drm_device *dev,
217 struct drm_file *file_priv,
217 uint32_t id) 218 uint32_t id)
218{ 219{
219 struct drm_mode_object *mo; 220 struct drm_mode_object *mo;
220 221
221 mo = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_ENCODER); 222 mo = drm_mode_object_find(dev, file_priv, id, DRM_MODE_OBJECT_ENCODER);
222 223
223 return mo ? obj_to_encoder(mo) : NULL; 224 return mo ? obj_to_encoder(mo) : NULL;
224} 225}
diff --git a/include/drm/drm_framebuffer.h b/include/drm/drm_framebuffer.h
index b6996ddb19d6..4c5ee4ae54df 100644
--- a/include/drm/drm_framebuffer.h
+++ b/include/drm/drm_framebuffer.h
@@ -205,6 +205,7 @@ int drm_framebuffer_init(struct drm_device *dev,
205 struct drm_framebuffer *fb, 205 struct drm_framebuffer *fb,
206 const struct drm_framebuffer_funcs *funcs); 206 const struct drm_framebuffer_funcs *funcs);
207struct drm_framebuffer *drm_framebuffer_lookup(struct drm_device *dev, 207struct drm_framebuffer *drm_framebuffer_lookup(struct drm_device *dev,
208 struct drm_file *file_priv,
208 uint32_t id); 209 uint32_t id);
209void drm_framebuffer_remove(struct drm_framebuffer *fb); 210void drm_framebuffer_remove(struct drm_framebuffer *fb);
210void drm_framebuffer_cleanup(struct drm_framebuffer *fb); 211void drm_framebuffer_cleanup(struct drm_framebuffer *fb);
diff --git a/include/drm/drm_mode_object.h b/include/drm/drm_mode_object.h
index a767b4a30a6d..b2f920b518e3 100644
--- a/include/drm/drm_mode_object.h
+++ b/include/drm/drm_mode_object.h
@@ -27,6 +27,7 @@
27struct drm_object_properties; 27struct drm_object_properties;
28struct drm_property; 28struct drm_property;
29struct drm_device; 29struct drm_device;
30struct drm_file;
30 31
31/** 32/**
32 * struct drm_mode_object - base structure for modeset objects 33 * struct drm_mode_object - base structure for modeset objects
@@ -113,6 +114,7 @@ struct drm_object_properties {
113 } 114 }
114 115
115struct drm_mode_object *drm_mode_object_find(struct drm_device *dev, 116struct drm_mode_object *drm_mode_object_find(struct drm_device *dev,
117 struct drm_file *file_priv,
116 uint32_t id, uint32_t type); 118 uint32_t id, uint32_t type);
117void drm_mode_object_get(struct drm_mode_object *obj); 119void drm_mode_object_get(struct drm_mode_object *obj);
118void drm_mode_object_put(struct drm_mode_object *obj); 120void drm_mode_object_put(struct drm_mode_object *obj);
diff --git a/include/drm/drm_of.h b/include/drm/drm_of.h
index 104dd517fdbe..d20ec4e0431d 100644
--- a/include/drm/drm_of.h
+++ b/include/drm/drm_of.h
@@ -2,6 +2,9 @@
2#define __DRM_OF_H__ 2#define __DRM_OF_H__
3 3
4#include <linux/of_graph.h> 4#include <linux/of_graph.h>
5#if IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_DRM_PANEL_BRIDGE)
6#include <drm/drm_bridge.h>
7#endif
5 8
6struct component_master_ops; 9struct component_master_ops;
7struct component_match; 10struct component_match;
@@ -67,6 +70,34 @@ static inline int drm_of_find_panel_or_bridge(const struct device_node *np,
67} 70}
68#endif 71#endif
69 72
73/*
74 * drm_of_panel_bridge_remove - remove panel bridge
75 * @np: device tree node containing panel bridge output ports
76 *
77 * Remove the panel bridge of a given DT node's port and endpoint number
78 *
79 * Returns zero if successful, or one of the standard error codes if it fails.
80 */
81static inline int drm_of_panel_bridge_remove(const struct device_node *np,
82 int port, int endpoint)
83{
84#if IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_DRM_PANEL_BRIDGE)
85 struct drm_bridge *bridge;
86 struct device_node *remote;
87
88 remote = of_graph_get_remote_node(np, port, endpoint);
89 if (!remote)
90 return -ENODEV;
91
92 bridge = of_drm_find_bridge(remote);
93 drm_panel_bridge_remove(bridge);
94
95 return 0;
96#else
97 return -EINVAL;
98#endif
99}
100
70static inline int drm_of_encoder_active_endpoint_id(struct device_node *node, 101static inline int drm_of_encoder_active_endpoint_id(struct device_node *node,
71 struct drm_encoder *encoder) 102 struct drm_encoder *encoder)
72{ 103{
diff --git a/include/drm/drm_plane.h b/include/drm/drm_plane.h
index 82a217bd77f0..069c4c8ce360 100644
--- a/include/drm/drm_plane.h
+++ b/include/drm/drm_plane.h
@@ -597,10 +597,11 @@ int drm_mode_plane_set_obj_prop(struct drm_plane *plane,
597 * drm_mode_object_find(). 597 * drm_mode_object_find().
598 */ 598 */
599static inline struct drm_plane *drm_plane_find(struct drm_device *dev, 599static inline struct drm_plane *drm_plane_find(struct drm_device *dev,
600 struct drm_file *file_priv,
600 uint32_t id) 601 uint32_t id)
601{ 602{
602 struct drm_mode_object *mo; 603 struct drm_mode_object *mo;
603 mo = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_PLANE); 604 mo = drm_mode_object_find(dev, file_priv, id, DRM_MODE_OBJECT_PLANE);
604 return mo ? obj_to_plane(mo) : NULL; 605 return mo ? obj_to_plane(mo) : NULL;
605} 606}
606 607
diff --git a/include/drm/drm_property.h b/include/drm/drm_property.h
index 37355c623e6c..429d8218f740 100644
--- a/include/drm/drm_property.h
+++ b/include/drm/drm_property.h
@@ -312,10 +312,11 @@ drm_property_unreference_blob(struct drm_property_blob *blob)
312 * This function looks up the property object specified by id and returns it. 312 * This function looks up the property object specified by id and returns it.
313 */ 313 */
314static inline struct drm_property *drm_property_find(struct drm_device *dev, 314static inline struct drm_property *drm_property_find(struct drm_device *dev,
315 struct drm_file *file_priv,
315 uint32_t id) 316 uint32_t id)
316{ 317{
317 struct drm_mode_object *mo; 318 struct drm_mode_object *mo;
318 mo = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_PROPERTY); 319 mo = drm_mode_object_find(dev, file_priv, id, DRM_MODE_OBJECT_PROPERTY);
319 return mo ? obj_to_property(mo) : NULL; 320 return mo ? obj_to_property(mo) : NULL;
320} 321}
321 322
diff --git a/include/drm/drm_vblank.h b/include/drm/drm_vblank.h
index 7fba9efe4951..6a58e2e91a0f 100644
--- a/include/drm/drm_vblank.h
+++ b/include/drm/drm_vblank.h
@@ -92,7 +92,7 @@ struct drm_vblank_crtc {
92 /** 92 /**
93 * @time: Vblank timestamp corresponding to @count. 93 * @time: Vblank timestamp corresponding to @count.
94 */ 94 */
95 struct timeval time; 95 ktime_t time;
96 96
97 /** 97 /**
98 * @refcount: Number of users/waiters of the vblank interrupt. Only when 98 * @refcount: Number of users/waiters of the vblank interrupt. Only when
@@ -154,7 +154,7 @@ struct drm_vblank_crtc {
154int drm_vblank_init(struct drm_device *dev, unsigned int num_crtcs); 154int drm_vblank_init(struct drm_device *dev, unsigned int num_crtcs);
155u32 drm_crtc_vblank_count(struct drm_crtc *crtc); 155u32 drm_crtc_vblank_count(struct drm_crtc *crtc);
156u32 drm_crtc_vblank_count_and_time(struct drm_crtc *crtc, 156u32 drm_crtc_vblank_count_and_time(struct drm_crtc *crtc,
157 struct timeval *vblanktime); 157 ktime_t *vblanktime);
158void drm_crtc_send_vblank_event(struct drm_crtc *crtc, 158void drm_crtc_send_vblank_event(struct drm_crtc *crtc,
159 struct drm_pending_vblank_event *e); 159 struct drm_pending_vblank_event *e);
160void drm_crtc_arm_vblank_event(struct drm_crtc *crtc, 160void drm_crtc_arm_vblank_event(struct drm_crtc *crtc,
@@ -172,7 +172,7 @@ u32 drm_crtc_accurate_vblank_count(struct drm_crtc *crtc);
172 172
173bool drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, 173bool drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
174 unsigned int pipe, int *max_error, 174 unsigned int pipe, int *max_error,
175 struct timeval *vblank_time, 175 ktime_t *vblank_time,
176 bool in_vblank_irq); 176 bool in_vblank_irq);
177void drm_calc_timestamping_constants(struct drm_crtc *crtc, 177void drm_calc_timestamping_constants(struct drm_crtc *crtc,
178 const struct drm_display_mode *mode); 178 const struct drm_display_mode *mode);
diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
index 1257e15c1a03..972a25633525 100644
--- a/include/drm/i915_pciids.h
+++ b/include/drm/i915_pciids.h
@@ -339,7 +339,6 @@
339#define INTEL_KBL_GT1_IDS(info) \ 339#define INTEL_KBL_GT1_IDS(info) \
340 INTEL_VGA_DEVICE(0x5913, info), /* ULT GT1.5 */ \ 340 INTEL_VGA_DEVICE(0x5913, info), /* ULT GT1.5 */ \
341 INTEL_VGA_DEVICE(0x5915, info), /* ULX GT1.5 */ \ 341 INTEL_VGA_DEVICE(0x5915, info), /* ULX GT1.5 */ \
342 INTEL_VGA_DEVICE(0x5917, info), /* DT GT1.5 */ \
343 INTEL_VGA_DEVICE(0x5906, info), /* ULT GT1 */ \ 342 INTEL_VGA_DEVICE(0x5906, info), /* ULT GT1 */ \
344 INTEL_VGA_DEVICE(0x590E, info), /* ULX GT1 */ \ 343 INTEL_VGA_DEVICE(0x590E, info), /* ULX GT1 */ \
345 INTEL_VGA_DEVICE(0x5902, info), /* DT GT1 */ \ 344 INTEL_VGA_DEVICE(0x5902, info), /* DT GT1 */ \
@@ -349,6 +348,7 @@
349 348
350#define INTEL_KBL_GT2_IDS(info) \ 349#define INTEL_KBL_GT2_IDS(info) \
351 INTEL_VGA_DEVICE(0x5916, info), /* ULT GT2 */ \ 350 INTEL_VGA_DEVICE(0x5916, info), /* ULT GT2 */ \
351 INTEL_VGA_DEVICE(0x5917, info), /* Mobile GT2 */ \
352 INTEL_VGA_DEVICE(0x5921, info), /* ULT GT2F */ \ 352 INTEL_VGA_DEVICE(0x5921, info), /* ULT GT2F */ \
353 INTEL_VGA_DEVICE(0x591E, info), /* ULX GT2 */ \ 353 INTEL_VGA_DEVICE(0x591E, info), /* ULX GT2 */ \
354 INTEL_VGA_DEVICE(0x5912, info), /* DT GT2 */ \ 354 INTEL_VGA_DEVICE(0x5912, info), /* DT GT2 */ \
diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h
index 171895072435..ca974224d92e 100644
--- a/include/linux/dma-fence.h
+++ b/include/linux/dma-fence.h
@@ -248,9 +248,12 @@ dma_fence_get_rcu_safe(struct dma_fence * __rcu *fencep)
248 struct dma_fence *fence; 248 struct dma_fence *fence;
249 249
250 fence = rcu_dereference(*fencep); 250 fence = rcu_dereference(*fencep);
251 if (!fence || !dma_fence_get_rcu(fence)) 251 if (!fence)
252 return NULL; 252 return NULL;
253 253
254 if (!dma_fence_get_rcu(fence))
255 continue;
256
254 /* The atomic_inc_not_zero() inside dma_fence_get_rcu() 257 /* The atomic_inc_not_zero() inside dma_fence_get_rcu()
255 * provides a full memory barrier upon success (such as now). 258 * provides a full memory barrier upon success (such as now).
256 * This is paired with the write barrier from assigning 259 * This is paired with the write barrier from assigning
diff --git a/include/linux/regmap.h b/include/linux/regmap.h
index 978abfbac617..93a4663d7acb 100644
--- a/include/linux/regmap.h
+++ b/include/linux/regmap.h
@@ -139,6 +139,45 @@ struct reg_sequence {
139 pollret ?: ((cond) ? 0 : -ETIMEDOUT); \ 139 pollret ?: ((cond) ? 0 : -ETIMEDOUT); \
140}) 140})
141 141
142/**
143 * regmap_field_read_poll_timeout - Poll until a condition is met or timeout
144 *
145 * @field: Regmap field to read from
146 * @val: Unsigned integer variable to read the value into
147 * @cond: Break condition (usually involving @val)
148 * @sleep_us: Maximum time to sleep between reads in us (0
149 * tight-loops). Should be less than ~20ms since usleep_range
150 * is used (see Documentation/timers/timers-howto.txt).
151 * @timeout_us: Timeout in us, 0 means never timeout
152 *
153 * Returns 0 on success and -ETIMEDOUT upon a timeout or the regmap_field_read
154 * error return value in case of a error read. In the two former cases,
155 * the last read value at @addr is stored in @val. Must not be called
156 * from atomic context if sleep_us or timeout_us are used.
157 *
158 * This is modelled after the readx_poll_timeout macros in linux/iopoll.h.
159 */
160#define regmap_field_read_poll_timeout(field, val, cond, sleep_us, timeout_us) \
161({ \
162 ktime_t timeout = ktime_add_us(ktime_get(), timeout_us); \
163 int pollret; \
164 might_sleep_if(sleep_us); \
165 for (;;) { \
166 pollret = regmap_field_read((field), &(val)); \
167 if (pollret) \
168 break; \
169 if (cond) \
170 break; \
171 if (timeout_us && ktime_compare(ktime_get(), timeout) > 0) { \
172 pollret = regmap_field_read((field), &(val)); \
173 break; \
174 } \
175 if (sleep_us) \
176 usleep_range((sleep_us >> 2) + 1, sleep_us); \
177 } \
178 pollret ?: ((cond) ? 0 : -ETIMEDOUT); \
179})
180
142#ifdef CONFIG_REGMAP 181#ifdef CONFIG_REGMAP
143 182
144enum regmap_endian { 183enum regmap_endian {
diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
index 4b3286ac60c8..874b50c232de 100644
--- a/include/linux/scatterlist.h
+++ b/include/linux/scatterlist.h
@@ -21,6 +21,12 @@ struct scatterlist {
21}; 21};
22 22
23/* 23/*
24 * Since the above length field is an unsigned int, below we define the maximum
25 * length in bytes that can be stored in one scatterlist entry.
26 */
27#define SCATTERLIST_MAX_SEGMENT (UINT_MAX & PAGE_MASK)
28
29/*
24 * These macros should be used after a dma_map_sg call has been done 30 * These macros should be used after a dma_map_sg call has been done
25 * to get bus addresses of each of the SG entries and their lengths. 31 * to get bus addresses of each of the SG entries and their lengths.
26 * You should only work with the number of sg entries dma_map_sg 32 * You should only work with the number of sg entries dma_map_sg
@@ -261,10 +267,13 @@ void sg_free_table(struct sg_table *);
261int __sg_alloc_table(struct sg_table *, unsigned int, unsigned int, 267int __sg_alloc_table(struct sg_table *, unsigned int, unsigned int,
262 struct scatterlist *, gfp_t, sg_alloc_fn *); 268 struct scatterlist *, gfp_t, sg_alloc_fn *);
263int sg_alloc_table(struct sg_table *, unsigned int, gfp_t); 269int sg_alloc_table(struct sg_table *, unsigned int, gfp_t);
264int sg_alloc_table_from_pages(struct sg_table *sgt, 270int __sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages,
265 struct page **pages, unsigned int n_pages, 271 unsigned int n_pages, unsigned int offset,
266 unsigned long offset, unsigned long size, 272 unsigned long size, unsigned int max_segment,
267 gfp_t gfp_mask); 273 gfp_t gfp_mask);
274int sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages,
275 unsigned int n_pages, unsigned int offset,
276 unsigned long size, gfp_t gfp_mask);
268 277
269size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, void *buf, 278size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, void *buf,
270 size_t buflen, off_t skip, bool to_buffer); 279 size_t buflen, off_t skip, bool to_buffer);
diff --git a/include/uapi/drm/etnaviv_drm.h b/include/uapi/drm/etnaviv_drm.h
index 76f6f78a352b..110cc73bf549 100644
--- a/include/uapi/drm/etnaviv_drm.h
+++ b/include/uapi/drm/etnaviv_drm.h
@@ -150,6 +150,19 @@ struct drm_etnaviv_gem_submit_bo {
150 __u64 presumed; /* in/out, presumed buffer address */ 150 __u64 presumed; /* in/out, presumed buffer address */
151}; 151};
152 152
153/* performance monitor request (pmr) */
154#define ETNA_PM_PROCESS_PRE 0x0001
155#define ETNA_PM_PROCESS_POST 0x0002
156struct drm_etnaviv_gem_submit_pmr {
157 __u32 flags; /* in, when to process request (ETNA_PM_PROCESS_x) */
158 __u8 domain; /* in, pm domain */
159 __u8 pad;
160 __u16 signal; /* in, pm signal */
161 __u32 sequence; /* in, sequence number */
162 __u32 read_offset; /* in, offset from read_bo */
163 __u32 read_idx; /* in, index of read_bo buffer */
164};
165
153/* Each cmdstream submit consists of a table of buffers involved, and 166/* Each cmdstream submit consists of a table of buffers involved, and
154 * one or more cmdstream buffers. This allows for conditional execution 167 * one or more cmdstream buffers. This allows for conditional execution
155 * (context-restore), and IB buffers needed for per tile/bin draw cmds. 168 * (context-restore), and IB buffers needed for per tile/bin draw cmds.
@@ -175,6 +188,9 @@ struct drm_etnaviv_gem_submit {
175 __u64 stream; /* in, ptr to cmdstream */ 188 __u64 stream; /* in, ptr to cmdstream */
176 __u32 flags; /* in, mask of ETNA_SUBMIT_x */ 189 __u32 flags; /* in, mask of ETNA_SUBMIT_x */
177 __s32 fence_fd; /* in/out, fence fd (see ETNA_SUBMIT_FENCE_FD_x) */ 190 __s32 fence_fd; /* in/out, fence fd (see ETNA_SUBMIT_FENCE_FD_x) */
191 __u64 pmrs; /* in, ptr to array of submit_pmr's */
192 __u32 nr_pmrs; /* in, number of submit_pmr's */
193 __u32 pad;
178}; 194};
179 195
180/* The normal way to synchronize with the GPU is just to CPU_PREP on 196/* The normal way to synchronize with the GPU is just to CPU_PREP on
@@ -210,6 +226,27 @@ struct drm_etnaviv_gem_wait {
210 struct drm_etnaviv_timespec timeout; /* in */ 226 struct drm_etnaviv_timespec timeout; /* in */
211}; 227};
212 228
229/*
230 * Performance Monitor (PM):
231 */
232
233struct drm_etnaviv_pm_domain {
234 __u32 pipe; /* in */
235 __u8 iter; /* in/out, select pm domain at index iter */
236 __u8 id; /* out, id of domain */
237 __u16 nr_signals; /* out, how many signals does this domain provide */
238 char name[64]; /* out, name of domain */
239};
240
241struct drm_etnaviv_pm_signal {
242 __u32 pipe; /* in */
243 __u8 domain; /* in, pm domain index */
244 __u8 pad;
245 __u16 iter; /* in/out, select pm source at index iter */
246 __u16 id; /* out, id of signal */
247 char name[64]; /* out, name of domain */
248};
249
213#define DRM_ETNAVIV_GET_PARAM 0x00 250#define DRM_ETNAVIV_GET_PARAM 0x00
214/* placeholder: 251/* placeholder:
215#define DRM_ETNAVIV_SET_PARAM 0x01 252#define DRM_ETNAVIV_SET_PARAM 0x01
@@ -222,7 +259,9 @@ struct drm_etnaviv_gem_wait {
222#define DRM_ETNAVIV_WAIT_FENCE 0x07 259#define DRM_ETNAVIV_WAIT_FENCE 0x07
223#define DRM_ETNAVIV_GEM_USERPTR 0x08 260#define DRM_ETNAVIV_GEM_USERPTR 0x08
224#define DRM_ETNAVIV_GEM_WAIT 0x09 261#define DRM_ETNAVIV_GEM_WAIT 0x09
225#define DRM_ETNAVIV_NUM_IOCTLS 0x0a 262#define DRM_ETNAVIV_PM_QUERY_DOM 0x0a
263#define DRM_ETNAVIV_PM_QUERY_SIG 0x0b
264#define DRM_ETNAVIV_NUM_IOCTLS 0x0c
226 265
227#define DRM_IOCTL_ETNAVIV_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_ETNAVIV_GET_PARAM, struct drm_etnaviv_param) 266#define DRM_IOCTL_ETNAVIV_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_ETNAVIV_GET_PARAM, struct drm_etnaviv_param)
228#define DRM_IOCTL_ETNAVIV_GEM_NEW DRM_IOWR(DRM_COMMAND_BASE + DRM_ETNAVIV_GEM_NEW, struct drm_etnaviv_gem_new) 267#define DRM_IOCTL_ETNAVIV_GEM_NEW DRM_IOWR(DRM_COMMAND_BASE + DRM_ETNAVIV_GEM_NEW, struct drm_etnaviv_gem_new)
@@ -233,6 +272,8 @@ struct drm_etnaviv_gem_wait {
233#define DRM_IOCTL_ETNAVIV_WAIT_FENCE DRM_IOW(DRM_COMMAND_BASE + DRM_ETNAVIV_WAIT_FENCE, struct drm_etnaviv_wait_fence) 272#define DRM_IOCTL_ETNAVIV_WAIT_FENCE DRM_IOW(DRM_COMMAND_BASE + DRM_ETNAVIV_WAIT_FENCE, struct drm_etnaviv_wait_fence)
234#define DRM_IOCTL_ETNAVIV_GEM_USERPTR DRM_IOWR(DRM_COMMAND_BASE + DRM_ETNAVIV_GEM_USERPTR, struct drm_etnaviv_gem_userptr) 273#define DRM_IOCTL_ETNAVIV_GEM_USERPTR DRM_IOWR(DRM_COMMAND_BASE + DRM_ETNAVIV_GEM_USERPTR, struct drm_etnaviv_gem_userptr)
235#define DRM_IOCTL_ETNAVIV_GEM_WAIT DRM_IOW(DRM_COMMAND_BASE + DRM_ETNAVIV_GEM_WAIT, struct drm_etnaviv_gem_wait) 274#define DRM_IOCTL_ETNAVIV_GEM_WAIT DRM_IOW(DRM_COMMAND_BASE + DRM_ETNAVIV_GEM_WAIT, struct drm_etnaviv_gem_wait)
275#define DRM_IOCTL_ETNAVIV_PM_QUERY_DOM DRM_IOWR(DRM_COMMAND_BASE + DRM_ETNAVIV_PM_QUERY_DOM, struct drm_etnaviv_pm_domain)
276#define DRM_IOCTL_ETNAVIV_PM_QUERY_SIG DRM_IOWR(DRM_COMMAND_BASE + DRM_ETNAVIV_PM_QUERY_SIG, struct drm_etnaviv_pm_signal)
236 277
237#if defined(__cplusplus) 278#if defined(__cplusplus)
238} 279}
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index d8d10d932759..fe25a01c81f2 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -1308,14 +1308,16 @@ struct drm_i915_reg_read {
1308 * be specified 1308 * be specified
1309 */ 1309 */
1310 __u64 offset; 1310 __u64 offset;
1311#define I915_REG_READ_8B_WA BIT(0)
1312
1311 __u64 val; /* Return value */ 1313 __u64 val; /* Return value */
1312}; 1314};
1313/* Known registers: 1315/* Known registers:
1314 * 1316 *
1315 * Render engine timestamp - 0x2358 + 64bit - gen7+ 1317 * Render engine timestamp - 0x2358 + 64bit - gen7+
1316 * - Note this register returns an invalid value if using the default 1318 * - Note this register returns an invalid value if using the default
1317 * single instruction 8byte read, in order to workaround that use 1319 * single instruction 8byte read, in order to workaround that pass
1318 * offset (0x2538 | 1) instead. 1320 * flag I915_REG_READ_8B_WA in offset field.
1319 * 1321 *
1320 */ 1322 */
1321 1323
@@ -1509,6 +1511,11 @@ struct drm_i915_perf_oa_config {
1509 __u32 n_boolean_regs; 1511 __u32 n_boolean_regs;
1510 __u32 n_flex_regs; 1512 __u32 n_flex_regs;
1511 1513
1514 /*
1515 * These fields are pointers to tuples of u32 values (register
1516 * address, value). For example the expected length of the buffer
1517 * pointed by mux_regs_ptr is (2 * sizeof(u32) * n_mux_regs).
1518 */
1512 __u64 mux_regs_ptr; 1519 __u64 mux_regs_ptr;
1513 __u64 boolean_regs_ptr; 1520 __u64 boolean_regs_ptr;
1514 __u64 flex_regs_ptr; 1521 __u64 flex_regs_ptr;
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index be7b4dd6b68d..7c1c55f7daaa 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -370,41 +370,49 @@ int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
370EXPORT_SYMBOL(sg_alloc_table); 370EXPORT_SYMBOL(sg_alloc_table);
371 371
372/** 372/**
373 * sg_alloc_table_from_pages - Allocate and initialize an sg table from 373 * __sg_alloc_table_from_pages - Allocate and initialize an sg table from
374 * an array of pages 374 * an array of pages
375 * @sgt: The sg table header to use 375 * @sgt: The sg table header to use
376 * @pages: Pointer to an array of page pointers 376 * @pages: Pointer to an array of page pointers
377 * @n_pages: Number of pages in the pages array 377 * @n_pages: Number of pages in the pages array
378 * @offset: Offset from start of the first page to the start of a buffer 378 * @offset: Offset from start of the first page to the start of a buffer
379 * @size: Number of valid bytes in the buffer (after offset) 379 * @size: Number of valid bytes in the buffer (after offset)
380 * @gfp_mask: GFP allocation mask 380 * @max_segment: Maximum size of a scatterlist node in bytes (page aligned)
381 * @gfp_mask: GFP allocation mask
381 * 382 *
382 * Description: 383 * Description:
383 * Allocate and initialize an sg table from a list of pages. Contiguous 384 * Allocate and initialize an sg table from a list of pages. Contiguous
384 * ranges of the pages are squashed into a single scatterlist node. A user 385 * ranges of the pages are squashed into a single scatterlist node up to the
385 * may provide an offset at a start and a size of valid data in a buffer 386 * maximum size specified in @max_segment. An user may provide an offset at a
386 * specified by the page array. The returned sg table is released by 387 * start and a size of valid data in a buffer specified by the page array.
387 * sg_free_table. 388 * The returned sg table is released by sg_free_table.
388 * 389 *
389 * Returns: 390 * Returns:
390 * 0 on success, negative error on failure 391 * 0 on success, negative error on failure
391 */ 392 */
392int sg_alloc_table_from_pages(struct sg_table *sgt, 393int __sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages,
393 struct page **pages, unsigned int n_pages, 394 unsigned int n_pages, unsigned int offset,
394 unsigned long offset, unsigned long size, 395 unsigned long size, unsigned int max_segment,
395 gfp_t gfp_mask) 396 gfp_t gfp_mask)
396{ 397{
397 unsigned int chunks; 398 unsigned int chunks, cur_page, seg_len, i;
398 unsigned int i;
399 unsigned int cur_page;
400 int ret; 399 int ret;
401 struct scatterlist *s; 400 struct scatterlist *s;
402 401
402 if (WARN_ON(!max_segment || offset_in_page(max_segment)))
403 return -EINVAL;
404
403 /* compute number of contiguous chunks */ 405 /* compute number of contiguous chunks */
404 chunks = 1; 406 chunks = 1;
405 for (i = 1; i < n_pages; ++i) 407 seg_len = 0;
406 if (page_to_pfn(pages[i]) != page_to_pfn(pages[i - 1]) + 1) 408 for (i = 1; i < n_pages; i++) {
407 ++chunks; 409 seg_len += PAGE_SIZE;
410 if (seg_len >= max_segment ||
411 page_to_pfn(pages[i]) != page_to_pfn(pages[i - 1]) + 1) {
412 chunks++;
413 seg_len = 0;
414 }
415 }
408 416
409 ret = sg_alloc_table(sgt, chunks, gfp_mask); 417 ret = sg_alloc_table(sgt, chunks, gfp_mask);
410 if (unlikely(ret)) 418 if (unlikely(ret))
@@ -413,17 +421,21 @@ int sg_alloc_table_from_pages(struct sg_table *sgt,
413 /* merging chunks and putting them into the scatterlist */ 421 /* merging chunks and putting them into the scatterlist */
414 cur_page = 0; 422 cur_page = 0;
415 for_each_sg(sgt->sgl, s, sgt->orig_nents, i) { 423 for_each_sg(sgt->sgl, s, sgt->orig_nents, i) {
416 unsigned long chunk_size; 424 unsigned int j, chunk_size;
417 unsigned int j;
418 425
419 /* look for the end of the current chunk */ 426 /* look for the end of the current chunk */
420 for (j = cur_page + 1; j < n_pages; ++j) 427 seg_len = 0;
421 if (page_to_pfn(pages[j]) != 428 for (j = cur_page + 1; j < n_pages; j++) {
429 seg_len += PAGE_SIZE;
430 if (seg_len >= max_segment ||
431 page_to_pfn(pages[j]) !=
422 page_to_pfn(pages[j - 1]) + 1) 432 page_to_pfn(pages[j - 1]) + 1)
423 break; 433 break;
434 }
424 435
425 chunk_size = ((j - cur_page) << PAGE_SHIFT) - offset; 436 chunk_size = ((j - cur_page) << PAGE_SHIFT) - offset;
426 sg_set_page(s, pages[cur_page], min(size, chunk_size), offset); 437 sg_set_page(s, pages[cur_page],
438 min_t(unsigned long, size, chunk_size), offset);
427 size -= chunk_size; 439 size -= chunk_size;
428 offset = 0; 440 offset = 0;
429 cur_page = j; 441 cur_page = j;
@@ -431,6 +443,35 @@ int sg_alloc_table_from_pages(struct sg_table *sgt,
431 443
432 return 0; 444 return 0;
433} 445}
446EXPORT_SYMBOL(__sg_alloc_table_from_pages);
447
448/**
449 * sg_alloc_table_from_pages - Allocate and initialize an sg table from
450 * an array of pages
451 * @sgt: The sg table header to use
452 * @pages: Pointer to an array of page pointers
453 * @n_pages: Number of pages in the pages array
454 * @offset: Offset from start of the first page to the start of a buffer
455 * @size: Number of valid bytes in the buffer (after offset)
456 * @gfp_mask: GFP allocation mask
457 *
458 * Description:
459 * Allocate and initialize an sg table from a list of pages. Contiguous
460 * ranges of the pages are squashed into a single scatterlist node. A user
461 * may provide an offset at a start and a size of valid data in a buffer
462 * specified by the page array. The returned sg table is released by
463 * sg_free_table.
464 *
465 * Returns:
466 * 0 on success, negative error on failure
467 */
468int sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages,
469 unsigned int n_pages, unsigned int offset,
470 unsigned long size, gfp_t gfp_mask)
471{
472 return __sg_alloc_table_from_pages(sgt, pages, n_pages, offset, size,
473 SCATTERLIST_MAX_SEGMENT, gfp_mask);
474}
434EXPORT_SYMBOL(sg_alloc_table_from_pages); 475EXPORT_SYMBOL(sg_alloc_table_from_pages);
435 476
436void __sg_page_iter_start(struct sg_page_iter *piter, 477void __sg_page_iter_start(struct sg_page_iter *piter,
diff --git a/tools/testing/scatterlist/Makefile b/tools/testing/scatterlist/Makefile
new file mode 100644
index 000000000000..933c3a6e4d77
--- /dev/null
+++ b/tools/testing/scatterlist/Makefile
@@ -0,0 +1,30 @@
1CFLAGS += -I. -I../../include -g -O2 -Wall -fsanitize=address
2LDFLAGS += -fsanitize=address -fsanitize=undefined
3TARGETS = main
4OFILES = main.o scatterlist.o
5
6ifeq ($(BUILD), 32)
7 CFLAGS += -m32
8 LDFLAGS += -m32
9endif
10
11targets: include $(TARGETS)
12
13main: $(OFILES)
14
15clean:
16 $(RM) $(TARGETS) $(OFILES) scatterlist.c linux/scatterlist.h linux/highmem.h linux/kmemleak.h asm/io.h
17 @rmdir asm
18
19scatterlist.c: ../../../lib/scatterlist.c
20 @sed -e 's/^static //' -e 's/__always_inline //' -e 's/inline //' < $< > $@
21
22.PHONY: include
23
24include: ../../../include/linux/scatterlist.h
25 @mkdir -p linux
26 @mkdir -p asm
27 @touch asm/io.h
28 @touch linux/highmem.h
29 @touch linux/kmemleak.h
30 @cp $< linux/scatterlist.h
diff --git a/tools/testing/scatterlist/linux/mm.h b/tools/testing/scatterlist/linux/mm.h
new file mode 100644
index 000000000000..6f9ac14aa800
--- /dev/null
+++ b/tools/testing/scatterlist/linux/mm.h
@@ -0,0 +1,125 @@
1#ifndef _LINUX_MM_H
2#define _LINUX_MM_H
3
4#include <assert.h>
5#include <string.h>
6#include <stdlib.h>
7#include <errno.h>
8#include <limits.h>
9#include <stdio.h>
10
11typedef unsigned long dma_addr_t;
12
13#define unlikely
14
15#define BUG_ON(x) assert(!(x))
16
17#define WARN_ON(condition) ({ \
18 int __ret_warn_on = !!(condition); \
19 unlikely(__ret_warn_on); \
20})
21
22#define WARN_ON_ONCE(condition) ({ \
23 int __ret_warn_on = !!(condition); \
24 if (unlikely(__ret_warn_on)) \
25 assert(0); \
26 unlikely(__ret_warn_on); \
27})
28
29#define PAGE_SIZE (4096)
30#define PAGE_SHIFT (12)
31#define PAGE_MASK (~(PAGE_SIZE-1))
32
33#define __ALIGN_KERNEL(x, a) __ALIGN_KERNEL_MASK(x, (typeof(x))(a) - 1)
34#define __ALIGN_KERNEL_MASK(x, mask) (((x) + (mask)) & ~(mask))
35#define ALIGN(x, a) __ALIGN_KERNEL((x), (a))
36
37#define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE)
38
39#define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK)
40
41#define virt_to_page(x) ((void *)x)
42#define page_address(x) ((void *)x)
43
44static inline unsigned long page_to_phys(struct page *page)
45{
46 assert(0);
47
48 return 0;
49}
50
51#define page_to_pfn(page) ((unsigned long)(page) / PAGE_SIZE)
52#define pfn_to_page(pfn) (void *)((pfn) * PAGE_SIZE)
53#define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n))
54
55#define __min(t1, t2, min1, min2, x, y) ({ \
56 t1 min1 = (x); \
57 t2 min2 = (y); \
58 (void) (&min1 == &min2); \
59 min1 < min2 ? min1 : min2; })
60
61#define ___PASTE(a,b) a##b
62#define __PASTE(a,b) ___PASTE(a,b)
63
64#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
65
66#define min(x, y) \
67 __min(typeof(x), typeof(y), \
68 __UNIQUE_ID(min1_), __UNIQUE_ID(min2_), \
69 x, y)
70
71#define min_t(type, x, y) \
72 __min(type, type, \
73 __UNIQUE_ID(min1_), __UNIQUE_ID(min2_), \
74 x, y)
75
76#define preemptible() (1)
77
78static inline void *kmap(struct page *page)
79{
80 assert(0);
81
82 return NULL;
83}
84
85static inline void *kmap_atomic(struct page *page)
86{
87 assert(0);
88
89 return NULL;
90}
91
92static inline void kunmap(void *addr)
93{
94 assert(0);
95}
96
97static inline void kunmap_atomic(void *addr)
98{
99 assert(0);
100}
101
102static inline unsigned long __get_free_page(unsigned int flags)
103{
104 return (unsigned long)malloc(PAGE_SIZE);
105}
106
107static inline void free_page(unsigned long page)
108{
109 free((void *)page);
110}
111
112static inline void *kmalloc(unsigned int size, unsigned int flags)
113{
114 return malloc(size);
115}
116
117#define kfree(x) free(x)
118
119#define kmemleak_alloc(a, b, c, d)
120#define kmemleak_free(a)
121
122#define PageSlab(p) (0)
123#define flush_kernel_dcache_page(p)
124
125#endif
diff --git a/tools/testing/scatterlist/main.c b/tools/testing/scatterlist/main.c
new file mode 100644
index 000000000000..0a1464181226
--- /dev/null
+++ b/tools/testing/scatterlist/main.c
@@ -0,0 +1,79 @@
1#include <stdio.h>
2#include <assert.h>
3
4#include <linux/scatterlist.h>
5
6#define MAX_PAGES (64)
7
8static void set_pages(struct page **pages, const unsigned *array, unsigned num)
9{
10 unsigned int i;
11
12 assert(num < MAX_PAGES);
13 for (i = 0; i < num; i++)
14 pages[i] = (struct page *)(unsigned long)
15 ((1 + array[i]) * PAGE_SIZE);
16}
17
18#define pfn(...) (unsigned []){ __VA_ARGS__ }
19
20int main(void)
21{
22 const unsigned int sgmax = SCATTERLIST_MAX_SEGMENT;
23 struct test {
24 int alloc_ret;
25 unsigned num_pages;
26 unsigned *pfn;
27 unsigned size;
28 unsigned int max_seg;
29 unsigned int expected_segments;
30 } *test, tests[] = {
31 { -EINVAL, 1, pfn(0), PAGE_SIZE, PAGE_SIZE + 1, 1 },
32 { -EINVAL, 1, pfn(0), PAGE_SIZE, 0, 1 },
33 { -EINVAL, 1, pfn(0), PAGE_SIZE, sgmax + 1, 1 },
34 { 0, 1, pfn(0), PAGE_SIZE, sgmax, 1 },
35 { 0, 1, pfn(0), 1, sgmax, 1 },
36 { 0, 2, pfn(0, 1), 2 * PAGE_SIZE, sgmax, 1 },
37 { 0, 2, pfn(1, 0), 2 * PAGE_SIZE, sgmax, 2 },
38 { 0, 3, pfn(0, 1, 2), 3 * PAGE_SIZE, sgmax, 1 },
39 { 0, 3, pfn(0, 2, 1), 3 * PAGE_SIZE, sgmax, 3 },
40 { 0, 3, pfn(0, 1, 3), 3 * PAGE_SIZE, sgmax, 2 },
41 { 0, 3, pfn(1, 2, 4), 3 * PAGE_SIZE, sgmax, 2 },
42 { 0, 3, pfn(1, 3, 4), 3 * PAGE_SIZE, sgmax, 2 },
43 { 0, 4, pfn(0, 1, 3, 4), 4 * PAGE_SIZE, sgmax, 2 },
44 { 0, 5, pfn(0, 1, 3, 4, 5), 5 * PAGE_SIZE, sgmax, 2 },
45 { 0, 5, pfn(0, 1, 3, 4, 6), 5 * PAGE_SIZE, sgmax, 3 },
46 { 0, 5, pfn(0, 1, 2, 3, 4), 5 * PAGE_SIZE, sgmax, 1 },
47 { 0, 5, pfn(0, 1, 2, 3, 4), 5 * PAGE_SIZE, 2 * PAGE_SIZE, 3 },
48 { 0, 6, pfn(0, 1, 2, 3, 4, 5), 6 * PAGE_SIZE, 2 * PAGE_SIZE, 3 },
49 { 0, 6, pfn(0, 2, 3, 4, 5, 6), 6 * PAGE_SIZE, 2 * PAGE_SIZE, 4 },
50 { 0, 6, pfn(0, 1, 3, 4, 5, 6), 6 * PAGE_SIZE, 2 * PAGE_SIZE, 3 },
51 { 0, 0, NULL, 0, 0, 0 },
52 };
53 unsigned int i;
54
55 for (i = 0, test = tests; test->expected_segments; test++, i++) {
56 struct page *pages[MAX_PAGES];
57 struct sg_table st;
58 int ret;
59
60 set_pages(pages, test->pfn, test->num_pages);
61
62 ret = __sg_alloc_table_from_pages(&st, pages, test->num_pages,
63 0, test->size, test->max_seg,
64 GFP_KERNEL);
65 assert(ret == test->alloc_ret);
66
67 if (test->alloc_ret)
68 continue;
69
70 assert(st.nents == test->expected_segments);
71 assert(st.orig_nents == test->expected_segments);
72
73 sg_free_table(&st);
74 }
75
76 assert(i == (sizeof(tests) / sizeof(tests[0])) - 1);
77
78 return 0;
79}