aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMaxime Ripard <maxime.ripard@bootlin.com>2019-01-24 05:03:16 -0500
committerMaxime Ripard <maxime.ripard@bootlin.com>2019-01-24 05:03:16 -0500
commitd2c20b5d37820876a8beea4d8d0bc59147077dd5 (patch)
treeae032c4de299cb8f14275514c5cec634bc46b591
parentb30b61ff6b1dc37f276cf56a8328b80086a3ffca (diff)
parent8ca4fd0406b41a872055048d694f3702d8eddb76 (diff)
Merge drm/drm-next into drm-misc-next
danvet needs a backmerge to ease the upcoming drmP.h rework Signed-off-by: Maxime Ripard <maxime.ripard@bootlin.com>
-rw-r--r--Documentation/devicetree/bindings/display/bridge/renesas,lvds.txt3
-rw-r--r--Documentation/devicetree/bindings/display/renesas,du.txt2
-rw-r--r--drivers/acpi/pmic/intel_pmic.c61
-rw-r--r--drivers/acpi/pmic/intel_pmic.h4
-rw-r--r--drivers/acpi/pmic/intel_pmic_chtwc.c19
-rw-r--r--drivers/acpi/pmic/intel_pmic_xpower.c1
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511.h4
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_drv.c8
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7533.c2
-rw-r--r--drivers/gpu/drm/bridge/analogix-anx78xx.c4
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_core.c4
-rw-r--r--drivers/gpu/drm/bridge/sii902x.c4
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi.c4
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c18
-rw-r--r--drivers/gpu/drm/bridge/tc358767.c9
-rw-r--r--drivers/gpu/drm/drm_bridge.c4
-rw-r--r--drivers/gpu/drm/drm_dp_helper.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_mic.c4
-rw-r--r--drivers/gpu/drm/i2c/tda998x_drv.c6
-rw-r--r--drivers/gpu/drm/i915/Makefile2
-rw-r--r--drivers/gpu/drm/i915/dvo.h1
-rw-r--r--drivers/gpu/drm/i915/gvt/vgpu.c4
-rw-r--r--drivers/gpu/drm/i915/i915_cmd_parser.c2
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c440
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c50
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h250
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c221
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c3
-rw-r--r--drivers/gpu/drm/i915/i915_gem_dmabuf.c1
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c1
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c5
-rw-r--r--drivers/gpu/drm/i915/i915_gem_fence_reg.c11
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c166
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.h2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_internal.c1
-rw-r--r--drivers/gpu/drm/i915/i915_gem_object.h3
-rw-r--r--drivers/gpu/drm/i915/i915_gem_shrinker.c104
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c8
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c5
-rw-r--r--drivers/gpu/drm/i915/i915_gem_userptr.c1
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c76
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.h1
-rw-r--r--drivers/gpu/drm/i915/i915_ioc32.c1
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c42
-rw-r--r--drivers/gpu/drm/i915/i915_params.c32
-rw-r--r--drivers/gpu/drm/i915/i915_params.h11
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c6
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c12
-rw-r--r--drivers/gpu/drm/i915/i915_query.c2
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h222
-rw-r--r--drivers/gpu/drm/i915/i915_request.c208
-rw-r--r--drivers/gpu/drm/i915/i915_request.h1
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c13
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c4
-rw-r--r--drivers/gpu/drm/i915/i915_timeline.h8
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h30
-rw-r--r--drivers/gpu/drm/i915/intel_acpi.c1
-rw-r--r--drivers/gpu/drm/i915/intel_atomic.c3
-rw-r--r--drivers/gpu/drm/i915/intel_atomic_plane.c1
-rw-r--r--drivers/gpu/drm/i915/intel_audio.c3
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c14
-rw-r--r--drivers/gpu/drm/i915/intel_breadcrumbs.c26
-rw-r--r--drivers/gpu/drm/i915/intel_cdclk.c10
-rw-r--r--drivers/gpu/drm/i915/intel_color.c218
-rw-r--r--drivers/gpu/drm/i915/intel_connector.c1
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c7
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c328
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.c107
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.h31
-rw-r--r--drivers/gpu/drm/i915/intel_display.c167
-rw-r--r--drivers/gpu/drm/i915/intel_display.h6
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c142
-rw-r--r--drivers/gpu/drm/i915/intel_dp_mst.c1
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h29
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.h1
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_vbt.c24
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c1
-rw-r--r--drivers/gpu/drm/i915/intel_engine_cs.c88
-rw-r--r--drivers/gpu/drm/i915/intel_fbc.c31
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c1
-rw-r--r--drivers/gpu/drm/i915/intel_fifo_underrun.c6
-rw-r--r--drivers/gpu/drm/i915/intel_frontbuffer.c1
-rw-r--r--drivers/gpu/drm/i915/intel_guc_fw.c6
-rw-r--r--drivers/gpu/drm/i915/intel_guc_submission.c3
-rw-r--r--drivers/gpu/drm/i915/intel_hangcheck.c167
-rw-r--r--drivers/gpu/drm/i915/intel_hdcp.c17
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c1
-rw-r--r--drivers/gpu/drm/i915/intel_hotplug.c1
-rw-r--r--drivers/gpu/drm/i915/intel_huc_fw.c7
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c1
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c250
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c5
-rw-r--r--drivers/gpu/drm/i915/intel_mocs.c2
-rw-r--r--drivers/gpu/drm/i915/intel_mocs.h1
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c1
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c11
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c8
-rw-r--r--drivers/gpu/drm/i915/intel_pipe_crc.c8
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c738
-rw-r--r--drivers/gpu/drm/i915/intel_psr.c75
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c505
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h93
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c4
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c1
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c7
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c1
-rw-r--r--drivers/gpu/drm/i915/intel_uc.c14
-rw-r--r--drivers/gpu/drm/i915/intel_uc.h6
-rw-r--r--drivers/gpu/drm/i915/intel_uc_fw.c11
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c147
-rw-r--r--drivers/gpu/drm/i915/intel_vdsc.c1
-rw-r--r--drivers/gpu/drm/i915/intel_wopcm.c6
-rw-r--r--drivers/gpu/drm/i915/intel_workarounds.c131
-rw-r--r--drivers/gpu/drm/i915/selftests/huge_pages.c35
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_context.c6
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_object.c1
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_spinner.c86
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_hangcheck.c320
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_lrc.c4
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_workarounds.c82
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_engine.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gem_device.c2
-rw-r--r--drivers/gpu/drm/i915/vlv_dsi.c1
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi.c4
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.h2
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c2
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_manager.c4
-rw-r--r--drivers/gpu/drm/msm/edp/edp_bridge.c4
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_bridge.c4
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_crtc.c43
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_crtc.h8
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_drv.c42
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_drv.h9
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_encoder.c11
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_encoder.h2
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_group.c51
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_kms.c23
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_of_lvds_r8a7790.dts93
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_of_lvds_r8a7791.dts53
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_of_lvds_r8a7793.dts53
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_of_lvds_r8a7795.dts53
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_of_lvds_r8a7796.dts53
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_plane.c1
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_plane.h3
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_vsp.c1
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_vsp.h3
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_dw_hdmi.c16
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_lvds.c5
-rw-r--r--drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c2
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_drv.c4
-rw-r--r--drivers/gpu/drm/sti/sti_dvo.c4
-rw-r--r--drivers/gpu/drm/sti/sti_hda.c4
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi.c4
-rw-r--r--drivers/gpu/drm/stm/dw_mipi_dsi-stm.c2
-rw-r--r--include/drm/bridge/dw_mipi_dsi.h3
-rw-r--r--include/drm/drm_bridge.h8
-rw-r--r--include/drm/drm_dp_helper.h11
-rw-r--r--include/linux/mfd/intel_soc_pmic.h3
159 files changed, 3169 insertions, 3550 deletions
diff --git a/Documentation/devicetree/bindings/display/bridge/renesas,lvds.txt b/Documentation/devicetree/bindings/display/bridge/renesas,lvds.txt
index ba5469dd09f3..27a054e1bb5f 100644
--- a/Documentation/devicetree/bindings/display/bridge/renesas,lvds.txt
+++ b/Documentation/devicetree/bindings/display/bridge/renesas,lvds.txt
@@ -8,6 +8,7 @@ Required properties:
8 8
9- compatible : Shall contain one of 9- compatible : Shall contain one of
10 - "renesas,r8a7743-lvds" for R8A7743 (RZ/G1M) compatible LVDS encoders 10 - "renesas,r8a7743-lvds" for R8A7743 (RZ/G1M) compatible LVDS encoders
11 - "renesas,r8a774c0-lvds" for R8A774C0 (RZ/G2E) compatible LVDS encoders
11 - "renesas,r8a7790-lvds" for R8A7790 (R-Car H2) compatible LVDS encoders 12 - "renesas,r8a7790-lvds" for R8A7790 (R-Car H2) compatible LVDS encoders
12 - "renesas,r8a7791-lvds" for R8A7791 (R-Car M2-W) compatible LVDS encoders 13 - "renesas,r8a7791-lvds" for R8A7791 (R-Car M2-W) compatible LVDS encoders
13 - "renesas,r8a7793-lvds" for R8A7793 (R-Car M2-N) compatible LVDS encoders 14 - "renesas,r8a7793-lvds" for R8A7793 (R-Car M2-N) compatible LVDS encoders
@@ -25,7 +26,7 @@ Required properties:
25- clock-names: Name of the clocks. This property is model-dependent. 26- clock-names: Name of the clocks. This property is model-dependent.
26 - The functional clock, which mandatory for all models, shall be listed 27 - The functional clock, which mandatory for all models, shall be listed
27 first, and shall be named "fck". 28 first, and shall be named "fck".
28 - On R8A77990 and R8A77995, the LVDS encoder can use the EXTAL or 29 - On R8A77990, R8A77995 and R8A774C0, the LVDS encoder can use the EXTAL or
29 DU_DOTCLKINx clocks. Those clocks are optional. When supplied they must be 30 DU_DOTCLKINx clocks. Those clocks are optional. When supplied they must be
30 named "extal" and "dclkin.x" respectively, with "x" being the DU_DOTCLKIN 31 named "extal" and "dclkin.x" respectively, with "x" being the DU_DOTCLKIN
31 numerical index. 32 numerical index.
diff --git a/Documentation/devicetree/bindings/display/renesas,du.txt b/Documentation/devicetree/bindings/display/renesas,du.txt
index 3c855d9f2719..aedb22b4d161 100644
--- a/Documentation/devicetree/bindings/display/renesas,du.txt
+++ b/Documentation/devicetree/bindings/display/renesas,du.txt
@@ -7,6 +7,7 @@ Required Properties:
7 - "renesas,du-r8a7744" for R8A7744 (RZ/G1N) compatible DU 7 - "renesas,du-r8a7744" for R8A7744 (RZ/G1N) compatible DU
8 - "renesas,du-r8a7745" for R8A7745 (RZ/G1E) compatible DU 8 - "renesas,du-r8a7745" for R8A7745 (RZ/G1E) compatible DU
9 - "renesas,du-r8a77470" for R8A77470 (RZ/G1C) compatible DU 9 - "renesas,du-r8a77470" for R8A77470 (RZ/G1C) compatible DU
10 - "renesas,du-r8a774c0" for R8A774C0 (RZ/G2E) compatible DU
10 - "renesas,du-r8a7779" for R8A7779 (R-Car H1) compatible DU 11 - "renesas,du-r8a7779" for R8A7779 (R-Car H1) compatible DU
11 - "renesas,du-r8a7790" for R8A7790 (R-Car H2) compatible DU 12 - "renesas,du-r8a7790" for R8A7790 (R-Car H2) compatible DU
12 - "renesas,du-r8a7791" for R8A7791 (R-Car M2-W) compatible DU 13 - "renesas,du-r8a7791" for R8A7791 (R-Car M2-W) compatible DU
@@ -57,6 +58,7 @@ corresponding to each DU output.
57 R8A7744 (RZ/G1N) DPAD 0 LVDS 0 - - 58 R8A7744 (RZ/G1N) DPAD 0 LVDS 0 - -
58 R8A7745 (RZ/G1E) DPAD 0 DPAD 1 - - 59 R8A7745 (RZ/G1E) DPAD 0 DPAD 1 - -
59 R8A77470 (RZ/G1C) DPAD 0 DPAD 1 LVDS 0 - 60 R8A77470 (RZ/G1C) DPAD 0 DPAD 1 LVDS 0 -
61 R8A774C0 (RZ/G2E) DPAD 0 LVDS 0 LVDS 1 -
60 R8A7779 (R-Car H1) DPAD 0 DPAD 1 - - 62 R8A7779 (R-Car H1) DPAD 0 DPAD 1 - -
61 R8A7790 (R-Car H2) DPAD 0 LVDS 0 LVDS 1 - 63 R8A7790 (R-Car H2) DPAD 0 LVDS 0 LVDS 1 -
62 R8A7791 (R-Car M2-W) DPAD 0 LVDS 0 - - 64 R8A7791 (R-Car M2-W) DPAD 0 LVDS 0 - -
diff --git a/drivers/acpi/pmic/intel_pmic.c b/drivers/acpi/pmic/intel_pmic.c
index ca18e0d23df9..c14cfaea92e2 100644
--- a/drivers/acpi/pmic/intel_pmic.c
+++ b/drivers/acpi/pmic/intel_pmic.c
@@ -15,6 +15,7 @@
15 15
16#include <linux/export.h> 16#include <linux/export.h>
17#include <linux/acpi.h> 17#include <linux/acpi.h>
18#include <linux/mfd/intel_soc_pmic.h>
18#include <linux/regmap.h> 19#include <linux/regmap.h>
19#include <acpi/acpi_lpat.h> 20#include <acpi/acpi_lpat.h>
20#include "intel_pmic.h" 21#include "intel_pmic.h"
@@ -36,6 +37,8 @@ struct intel_pmic_opregion {
36 struct intel_pmic_regs_handler_ctx ctx; 37 struct intel_pmic_regs_handler_ctx ctx;
37}; 38};
38 39
40static struct intel_pmic_opregion *intel_pmic_opregion;
41
39static int pmic_get_reg_bit(int address, struct pmic_table *table, 42static int pmic_get_reg_bit(int address, struct pmic_table *table,
40 int count, int *reg, int *bit) 43 int count, int *reg, int *bit)
41{ 44{
@@ -304,6 +307,7 @@ int intel_pmic_install_opregion_handler(struct device *dev, acpi_handle handle,
304 } 307 }
305 308
306 opregion->data = d; 309 opregion->data = d;
310 intel_pmic_opregion = opregion;
307 return 0; 311 return 0;
308 312
309out_remove_thermal_handler: 313out_remove_thermal_handler:
@@ -319,3 +323,60 @@ out_error:
319 return ret; 323 return ret;
320} 324}
321EXPORT_SYMBOL_GPL(intel_pmic_install_opregion_handler); 325EXPORT_SYMBOL_GPL(intel_pmic_install_opregion_handler);
326
327/**
328 * intel_soc_pmic_exec_mipi_pmic_seq_element - Execute PMIC MIPI sequence
329 * @i2c_address: I2C client address for the PMIC
330 * @reg_address: PMIC register address
331 * @value: New value for the register bits to change
332 * @mask: Mask indicating which register bits to change
333 *
334 * DSI LCD panels describe an initialization sequence in the i915 VBT (Video
335 * BIOS Tables) using so called MIPI sequences. One possible element in these
336 * sequences is a PMIC specific element of 15 bytes.
337 *
338 * This function executes these PMIC specific elements sending the embedded
339 * commands to the PMIC.
340 *
341 * Return 0 on success, < 0 on failure.
342 */
343int intel_soc_pmic_exec_mipi_pmic_seq_element(u16 i2c_address, u32 reg_address,
344 u32 value, u32 mask)
345{
346 struct intel_pmic_opregion_data *d;
347 int ret;
348
349 if (!intel_pmic_opregion) {
350 pr_warn("%s: No PMIC registered\n", __func__);
351 return -ENXIO;
352 }
353
354 d = intel_pmic_opregion->data;
355
356 mutex_lock(&intel_pmic_opregion->lock);
357
358 if (d->exec_mipi_pmic_seq_element) {
359 ret = d->exec_mipi_pmic_seq_element(intel_pmic_opregion->regmap,
360 i2c_address, reg_address,
361 value, mask);
362 } else if (d->pmic_i2c_address) {
363 if (i2c_address == d->pmic_i2c_address) {
364 ret = regmap_update_bits(intel_pmic_opregion->regmap,
365 reg_address, mask, value);
366 } else {
367 pr_err("%s: Unexpected i2c-addr: 0x%02x (reg-addr 0x%x value 0x%x mask 0x%x)\n",
368 __func__, i2c_address, reg_address, value, mask);
369 ret = -ENXIO;
370 }
371 } else {
372 pr_warn("%s: Not implemented\n", __func__);
373 pr_warn("%s: i2c-addr: 0x%x reg-addr 0x%x value 0x%x mask 0x%x\n",
374 __func__, i2c_address, reg_address, value, mask);
375 ret = -EOPNOTSUPP;
376 }
377
378 mutex_unlock(&intel_pmic_opregion->lock);
379
380 return ret;
381}
382EXPORT_SYMBOL_GPL(intel_soc_pmic_exec_mipi_pmic_seq_element);
diff --git a/drivers/acpi/pmic/intel_pmic.h b/drivers/acpi/pmic/intel_pmic.h
index 095afc96952e..89379476a1df 100644
--- a/drivers/acpi/pmic/intel_pmic.h
+++ b/drivers/acpi/pmic/intel_pmic.h
@@ -15,10 +15,14 @@ struct intel_pmic_opregion_data {
15 int (*update_aux)(struct regmap *r, int reg, int raw_temp); 15 int (*update_aux)(struct regmap *r, int reg, int raw_temp);
16 int (*get_policy)(struct regmap *r, int reg, int bit, u64 *value); 16 int (*get_policy)(struct regmap *r, int reg, int bit, u64 *value);
17 int (*update_policy)(struct regmap *r, int reg, int bit, int enable); 17 int (*update_policy)(struct regmap *r, int reg, int bit, int enable);
18 int (*exec_mipi_pmic_seq_element)(struct regmap *r, u16 i2c_address,
19 u32 reg_address, u32 value, u32 mask);
18 struct pmic_table *power_table; 20 struct pmic_table *power_table;
19 int power_table_count; 21 int power_table_count;
20 struct pmic_table *thermal_table; 22 struct pmic_table *thermal_table;
21 int thermal_table_count; 23 int thermal_table_count;
24 /* For generic exec_mipi_pmic_seq_element handling */
25 int pmic_i2c_address;
22}; 26};
23 27
24int intel_pmic_install_opregion_handler(struct device *dev, acpi_handle handle, struct regmap *regmap, struct intel_pmic_opregion_data *d); 28int intel_pmic_install_opregion_handler(struct device *dev, acpi_handle handle, struct regmap *regmap, struct intel_pmic_opregion_data *d);
diff --git a/drivers/acpi/pmic/intel_pmic_chtwc.c b/drivers/acpi/pmic/intel_pmic_chtwc.c
index 078b0448f30a..7ffd5624b8e1 100644
--- a/drivers/acpi/pmic/intel_pmic_chtwc.c
+++ b/drivers/acpi/pmic/intel_pmic_chtwc.c
@@ -231,6 +231,24 @@ static int intel_cht_wc_pmic_update_power(struct regmap *regmap, int reg,
231 return regmap_update_bits(regmap, reg, bitmask, on ? 1 : 0); 231 return regmap_update_bits(regmap, reg, bitmask, on ? 1 : 0);
232} 232}
233 233
234static int intel_cht_wc_exec_mipi_pmic_seq_element(struct regmap *regmap,
235 u16 i2c_client_address,
236 u32 reg_address,
237 u32 value, u32 mask)
238{
239 u32 address;
240
241 if (i2c_client_address > 0xff || reg_address > 0xff) {
242 pr_warn("%s warning addresses too big client 0x%x reg 0x%x\n",
243 __func__, i2c_client_address, reg_address);
244 return -ERANGE;
245 }
246
247 address = (i2c_client_address << 8) | reg_address;
248
249 return regmap_update_bits(regmap, address, mask, value);
250}
251
234/* 252/*
235 * The thermal table and ops are empty, we do not support the Thermal opregion 253 * The thermal table and ops are empty, we do not support the Thermal opregion
236 * (DPTF) due to lacking documentation. 254 * (DPTF) due to lacking documentation.
@@ -238,6 +256,7 @@ static int intel_cht_wc_pmic_update_power(struct regmap *regmap, int reg,
238static struct intel_pmic_opregion_data intel_cht_wc_pmic_opregion_data = { 256static struct intel_pmic_opregion_data intel_cht_wc_pmic_opregion_data = {
239 .get_power = intel_cht_wc_pmic_get_power, 257 .get_power = intel_cht_wc_pmic_get_power,
240 .update_power = intel_cht_wc_pmic_update_power, 258 .update_power = intel_cht_wc_pmic_update_power,
259 .exec_mipi_pmic_seq_element = intel_cht_wc_exec_mipi_pmic_seq_element,
241 .power_table = power_table, 260 .power_table = power_table,
242 .power_table_count = ARRAY_SIZE(power_table), 261 .power_table_count = ARRAY_SIZE(power_table),
243}; 262};
diff --git a/drivers/acpi/pmic/intel_pmic_xpower.c b/drivers/acpi/pmic/intel_pmic_xpower.c
index 2579675b7082..1b49cbb1e21e 100644
--- a/drivers/acpi/pmic/intel_pmic_xpower.c
+++ b/drivers/acpi/pmic/intel_pmic_xpower.c
@@ -240,6 +240,7 @@ static struct intel_pmic_opregion_data intel_xpower_pmic_opregion_data = {
240 .power_table_count = ARRAY_SIZE(power_table), 240 .power_table_count = ARRAY_SIZE(power_table),
241 .thermal_table = thermal_table, 241 .thermal_table = thermal_table,
242 .thermal_table_count = ARRAY_SIZE(thermal_table), 242 .thermal_table_count = ARRAY_SIZE(thermal_table),
243 .pmic_i2c_address = 0x34,
243}; 244};
244 245
245static acpi_status intel_xpower_pmic_gpio_handler(u32 function, 246static acpi_status intel_xpower_pmic_gpio_handler(u32 function,
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511.h b/drivers/gpu/drm/bridge/adv7511/adv7511.h
index 73d8ccb97742..2b6e0832d1cf 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511.h
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511.h
@@ -395,7 +395,7 @@ static inline int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511)
395#ifdef CONFIG_DRM_I2C_ADV7533 395#ifdef CONFIG_DRM_I2C_ADV7533
396void adv7533_dsi_power_on(struct adv7511 *adv); 396void adv7533_dsi_power_on(struct adv7511 *adv);
397void adv7533_dsi_power_off(struct adv7511 *adv); 397void adv7533_dsi_power_off(struct adv7511 *adv);
398void adv7533_mode_set(struct adv7511 *adv, struct drm_display_mode *mode); 398void adv7533_mode_set(struct adv7511 *adv, const struct drm_display_mode *mode);
399int adv7533_patch_registers(struct adv7511 *adv); 399int adv7533_patch_registers(struct adv7511 *adv);
400int adv7533_patch_cec_registers(struct adv7511 *adv); 400int adv7533_patch_cec_registers(struct adv7511 *adv);
401int adv7533_attach_dsi(struct adv7511 *adv); 401int adv7533_attach_dsi(struct adv7511 *adv);
@@ -411,7 +411,7 @@ static inline void adv7533_dsi_power_off(struct adv7511 *adv)
411} 411}
412 412
413static inline void adv7533_mode_set(struct adv7511 *adv, 413static inline void adv7533_mode_set(struct adv7511 *adv,
414 struct drm_display_mode *mode) 414 const struct drm_display_mode *mode)
415{ 415{
416} 416}
417 417
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
index 85c2d407a52e..d0e98caa2e2a 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
@@ -676,8 +676,8 @@ static enum drm_mode_status adv7511_mode_valid(struct adv7511 *adv7511,
676} 676}
677 677
678static void adv7511_mode_set(struct adv7511 *adv7511, 678static void adv7511_mode_set(struct adv7511 *adv7511,
679 struct drm_display_mode *mode, 679 const struct drm_display_mode *mode,
680 struct drm_display_mode *adj_mode) 680 const struct drm_display_mode *adj_mode)
681{ 681{
682 unsigned int low_refresh_rate; 682 unsigned int low_refresh_rate;
683 unsigned int hsync_polarity = 0; 683 unsigned int hsync_polarity = 0;
@@ -839,8 +839,8 @@ static void adv7511_bridge_disable(struct drm_bridge *bridge)
839} 839}
840 840
841static void adv7511_bridge_mode_set(struct drm_bridge *bridge, 841static void adv7511_bridge_mode_set(struct drm_bridge *bridge,
842 struct drm_display_mode *mode, 842 const struct drm_display_mode *mode,
843 struct drm_display_mode *adj_mode) 843 const struct drm_display_mode *adj_mode)
844{ 844{
845 struct adv7511 *adv = bridge_to_adv7511(bridge); 845 struct adv7511 *adv = bridge_to_adv7511(bridge);
846 846
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7533.c b/drivers/gpu/drm/bridge/adv7511/adv7533.c
index 185b6d842166..5d5e7d9eded2 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7533.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7533.c
@@ -108,7 +108,7 @@ void adv7533_dsi_power_off(struct adv7511 *adv)
108 regmap_write(adv->regmap_cec, 0x27, 0x0b); 108 regmap_write(adv->regmap_cec, 0x27, 0x0b);
109} 109}
110 110
111void adv7533_mode_set(struct adv7511 *adv, struct drm_display_mode *mode) 111void adv7533_mode_set(struct adv7511 *adv, const struct drm_display_mode *mode)
112{ 112{
113 struct mipi_dsi_device *dsi = adv->dsi; 113 struct mipi_dsi_device *dsi = adv->dsi;
114 int lanes, ret; 114 int lanes, ret;
diff --git a/drivers/gpu/drm/bridge/analogix-anx78xx.c b/drivers/gpu/drm/bridge/analogix-anx78xx.c
index e11309e9bc4f..4cf7bc17ae14 100644
--- a/drivers/gpu/drm/bridge/analogix-anx78xx.c
+++ b/drivers/gpu/drm/bridge/analogix-anx78xx.c
@@ -1082,8 +1082,8 @@ static void anx78xx_bridge_disable(struct drm_bridge *bridge)
1082} 1082}
1083 1083
1084static void anx78xx_bridge_mode_set(struct drm_bridge *bridge, 1084static void anx78xx_bridge_mode_set(struct drm_bridge *bridge,
1085 struct drm_display_mode *mode, 1085 const struct drm_display_mode *mode,
1086 struct drm_display_mode *adjusted_mode) 1086 const struct drm_display_mode *adjusted_mode)
1087{ 1087{
1088 struct anx78xx *anx78xx = bridge_to_anx78xx(bridge); 1088 struct anx78xx *anx78xx = bridge_to_anx78xx(bridge);
1089 struct hdmi_avi_infoframe frame; 1089 struct hdmi_avi_infoframe frame;
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
index 753e96129ab7..4d5b47585834 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
@@ -1361,8 +1361,8 @@ static void analogix_dp_bridge_disable(struct drm_bridge *bridge)
1361} 1361}
1362 1362
1363static void analogix_dp_bridge_mode_set(struct drm_bridge *bridge, 1363static void analogix_dp_bridge_mode_set(struct drm_bridge *bridge,
1364 struct drm_display_mode *orig_mode, 1364 const struct drm_display_mode *orig_mode,
1365 struct drm_display_mode *mode) 1365 const struct drm_display_mode *mode)
1366{ 1366{
1367 struct analogix_dp_device *dp = bridge->driver_private; 1367 struct analogix_dp_device *dp = bridge->driver_private;
1368 struct drm_display_info *display_info = &dp->connector.display_info; 1368 struct drm_display_info *display_info = &dp->connector.display_info;
diff --git a/drivers/gpu/drm/bridge/sii902x.c b/drivers/gpu/drm/bridge/sii902x.c
index a9b4f45ae87c..a5d58f7035c1 100644
--- a/drivers/gpu/drm/bridge/sii902x.c
+++ b/drivers/gpu/drm/bridge/sii902x.c
@@ -232,8 +232,8 @@ static void sii902x_bridge_enable(struct drm_bridge *bridge)
232} 232}
233 233
234static void sii902x_bridge_mode_set(struct drm_bridge *bridge, 234static void sii902x_bridge_mode_set(struct drm_bridge *bridge,
235 struct drm_display_mode *mode, 235 const struct drm_display_mode *mode,
236 struct drm_display_mode *adj) 236 const struct drm_display_mode *adj)
237{ 237{
238 struct sii902x *sii902x = bridge_to_sii902x(bridge); 238 struct sii902x *sii902x = bridge_to_sii902x(bridge);
239 struct regmap *regmap = sii902x->regmap; 239 struct regmap *regmap = sii902x->regmap;
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
index 88b720b63126..129f464cbeb1 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
@@ -1999,8 +1999,8 @@ dw_hdmi_bridge_mode_valid(struct drm_bridge *bridge,
1999} 1999}
2000 2000
2001static void dw_hdmi_bridge_mode_set(struct drm_bridge *bridge, 2001static void dw_hdmi_bridge_mode_set(struct drm_bridge *bridge,
2002 struct drm_display_mode *orig_mode, 2002 const struct drm_display_mode *orig_mode,
2003 struct drm_display_mode *mode) 2003 const struct drm_display_mode *mode)
2004{ 2004{
2005 struct dw_hdmi *hdmi = bridge->driver_private; 2005 struct dw_hdmi *hdmi = bridge->driver_private;
2006 2006
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
index 2f4b145b73af..23a5977a3b0a 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
@@ -248,7 +248,7 @@ static inline bool dw_mipi_is_dual_mode(struct dw_mipi_dsi *dsi)
248 * The controller should generate 2 frames before 248 * The controller should generate 2 frames before
249 * preparing the peripheral. 249 * preparing the peripheral.
250 */ 250 */
251static void dw_mipi_dsi_wait_for_two_frames(struct drm_display_mode *mode) 251static void dw_mipi_dsi_wait_for_two_frames(const struct drm_display_mode *mode)
252{ 252{
253 int refresh, two_frames; 253 int refresh, two_frames;
254 254
@@ -564,7 +564,7 @@ static void dw_mipi_dsi_init(struct dw_mipi_dsi *dsi)
564} 564}
565 565
566static void dw_mipi_dsi_dpi_config(struct dw_mipi_dsi *dsi, 566static void dw_mipi_dsi_dpi_config(struct dw_mipi_dsi *dsi,
567 struct drm_display_mode *mode) 567 const struct drm_display_mode *mode)
568{ 568{
569 u32 val = 0, color = 0; 569 u32 val = 0, color = 0;
570 570
@@ -607,7 +607,7 @@ static void dw_mipi_dsi_packet_handler_config(struct dw_mipi_dsi *dsi)
607} 607}
608 608
609static void dw_mipi_dsi_video_packet_config(struct dw_mipi_dsi *dsi, 609static void dw_mipi_dsi_video_packet_config(struct dw_mipi_dsi *dsi,
610 struct drm_display_mode *mode) 610 const struct drm_display_mode *mode)
611{ 611{
612 /* 612 /*
613 * TODO dw drv improvements 613 * TODO dw drv improvements
@@ -642,7 +642,7 @@ static void dw_mipi_dsi_command_mode_config(struct dw_mipi_dsi *dsi)
642 642
643/* Get lane byte clock cycles. */ 643/* Get lane byte clock cycles. */
644static u32 dw_mipi_dsi_get_hcomponent_lbcc(struct dw_mipi_dsi *dsi, 644static u32 dw_mipi_dsi_get_hcomponent_lbcc(struct dw_mipi_dsi *dsi,
645 struct drm_display_mode *mode, 645 const struct drm_display_mode *mode,
646 u32 hcomponent) 646 u32 hcomponent)
647{ 647{
648 u32 frac, lbcc; 648 u32 frac, lbcc;
@@ -658,7 +658,7 @@ static u32 dw_mipi_dsi_get_hcomponent_lbcc(struct dw_mipi_dsi *dsi,
658} 658}
659 659
660static void dw_mipi_dsi_line_timer_config(struct dw_mipi_dsi *dsi, 660static void dw_mipi_dsi_line_timer_config(struct dw_mipi_dsi *dsi,
661 struct drm_display_mode *mode) 661 const struct drm_display_mode *mode)
662{ 662{
663 u32 htotal, hsa, hbp, lbcc; 663 u32 htotal, hsa, hbp, lbcc;
664 664
@@ -681,7 +681,7 @@ static void dw_mipi_dsi_line_timer_config(struct dw_mipi_dsi *dsi,
681} 681}
682 682
683static void dw_mipi_dsi_vertical_timing_config(struct dw_mipi_dsi *dsi, 683static void dw_mipi_dsi_vertical_timing_config(struct dw_mipi_dsi *dsi,
684 struct drm_display_mode *mode) 684 const struct drm_display_mode *mode)
685{ 685{
686 u32 vactive, vsa, vfp, vbp; 686 u32 vactive, vsa, vfp, vbp;
687 687
@@ -818,7 +818,7 @@ static unsigned int dw_mipi_dsi_get_lanes(struct dw_mipi_dsi *dsi)
818} 818}
819 819
820static void dw_mipi_dsi_mode_set(struct dw_mipi_dsi *dsi, 820static void dw_mipi_dsi_mode_set(struct dw_mipi_dsi *dsi,
821 struct drm_display_mode *adjusted_mode) 821 const struct drm_display_mode *adjusted_mode)
822{ 822{
823 const struct dw_mipi_dsi_phy_ops *phy_ops = dsi->plat_data->phy_ops; 823 const struct dw_mipi_dsi_phy_ops *phy_ops = dsi->plat_data->phy_ops;
824 void *priv_data = dsi->plat_data->priv_data; 824 void *priv_data = dsi->plat_data->priv_data;
@@ -861,8 +861,8 @@ static void dw_mipi_dsi_mode_set(struct dw_mipi_dsi *dsi,
861} 861}
862 862
863static void dw_mipi_dsi_bridge_mode_set(struct drm_bridge *bridge, 863static void dw_mipi_dsi_bridge_mode_set(struct drm_bridge *bridge,
864 struct drm_display_mode *mode, 864 const struct drm_display_mode *mode,
865 struct drm_display_mode *adjusted_mode) 865 const struct drm_display_mode *adjusted_mode)
866{ 866{
867 struct dw_mipi_dsi *dsi = bridge_to_dsi(bridge); 867 struct dw_mipi_dsi *dsi = bridge_to_dsi(bridge);
868 868
diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
index 8e28e738cb52..4df07f4cbff5 100644
--- a/drivers/gpu/drm/bridge/tc358767.c
+++ b/drivers/gpu/drm/bridge/tc358767.c
@@ -203,7 +203,7 @@ struct tc_data {
203 /* display edid */ 203 /* display edid */
204 struct edid *edid; 204 struct edid *edid;
205 /* current mode */ 205 /* current mode */
206 struct drm_display_mode *mode; 206 const struct drm_display_mode *mode;
207 207
208 u32 rev; 208 u32 rev;
209 u8 assr; 209 u8 assr;
@@ -648,7 +648,8 @@ err_dpcd_read:
648 return ret; 648 return ret;
649} 649}
650 650
651static int tc_set_video_mode(struct tc_data *tc, struct drm_display_mode *mode) 651static int tc_set_video_mode(struct tc_data *tc,
652 const struct drm_display_mode *mode)
652{ 653{
653 int ret; 654 int ret;
654 int vid_sync_dly; 655 int vid_sync_dly;
@@ -1113,8 +1114,8 @@ static enum drm_mode_status tc_connector_mode_valid(struct drm_connector *connec
1113} 1114}
1114 1115
1115static void tc_bridge_mode_set(struct drm_bridge *bridge, 1116static void tc_bridge_mode_set(struct drm_bridge *bridge,
1116 struct drm_display_mode *mode, 1117 const struct drm_display_mode *mode,
1117 struct drm_display_mode *adj) 1118 const struct drm_display_mode *adj)
1118{ 1119{
1119 struct tc_data *tc = bridge_to_tc(bridge); 1120 struct tc_data *tc = bridge_to_tc(bridge);
1120 1121
diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c
index ba7025041e46..138b2711d389 100644
--- a/drivers/gpu/drm/drm_bridge.c
+++ b/drivers/gpu/drm/drm_bridge.c
@@ -294,8 +294,8 @@ EXPORT_SYMBOL(drm_bridge_post_disable);
294 * Note: the bridge passed should be the one closest to the encoder 294 * Note: the bridge passed should be the one closest to the encoder
295 */ 295 */
296void drm_bridge_mode_set(struct drm_bridge *bridge, 296void drm_bridge_mode_set(struct drm_bridge *bridge,
297 struct drm_display_mode *mode, 297 const struct drm_display_mode *mode,
298 struct drm_display_mode *adjusted_mode) 298 const struct drm_display_mode *adjusted_mode)
299{ 299{
300 if (!bridge) 300 if (!bridge)
301 return; 301 return;
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index d4ecedccbb31..54120b6319e7 100644
--- a/drivers/gpu/drm/drm_dp_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -1277,6 +1277,8 @@ static const struct dpcd_quirk dpcd_quirk_list[] = {
1277 { OUI(0x00, 0x22, 0xb9), DEVICE_ID_ANY, true, BIT(DP_DPCD_QUIRK_CONSTANT_N) }, 1277 { OUI(0x00, 0x22, 0xb9), DEVICE_ID_ANY, true, BIT(DP_DPCD_QUIRK_CONSTANT_N) },
1278 /* LG LP140WF6-SPM1 eDP panel */ 1278 /* LG LP140WF6-SPM1 eDP panel */
1279 { OUI(0x00, 0x22, 0xb9), DEVICE_ID('s', 'i', 'v', 'a', 'r', 'T'), false, BIT(DP_DPCD_QUIRK_CONSTANT_N) }, 1279 { OUI(0x00, 0x22, 0xb9), DEVICE_ID('s', 'i', 'v', 'a', 'r', 'T'), false, BIT(DP_DPCD_QUIRK_CONSTANT_N) },
1280 /* Apple panels need some additional handling to support PSR */
1281 { OUI(0x00, 0x10, 0xfa), DEVICE_ID_ANY, false, BIT(DP_DPCD_QUIRK_NO_PSR) }
1280}; 1282};
1281 1283
1282#undef OUI 1284#undef OUI
diff --git a/drivers/gpu/drm/exynos/exynos_drm_mic.c b/drivers/gpu/drm/exynos/exynos_drm_mic.c
index 2fd299a58297..dd02e8a323ef 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_mic.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_mic.c
@@ -246,8 +246,8 @@ already_disabled:
246} 246}
247 247
248static void mic_mode_set(struct drm_bridge *bridge, 248static void mic_mode_set(struct drm_bridge *bridge,
249 struct drm_display_mode *mode, 249 const struct drm_display_mode *mode,
250 struct drm_display_mode *adjusted_mode) 250 const struct drm_display_mode *adjusted_mode)
251{ 251{
252 struct exynos_mic *mic = bridge->driver_private; 252 struct exynos_mic *mic = bridge->driver_private;
253 253
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index 80e4ff33a37a..ecdb8070ed35 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -845,7 +845,7 @@ static int tda998x_write_aif(struct tda998x_priv *priv,
845} 845}
846 846
847static void 847static void
848tda998x_write_avi(struct tda998x_priv *priv, struct drm_display_mode *mode) 848tda998x_write_avi(struct tda998x_priv *priv, const struct drm_display_mode *mode)
849{ 849{
850 union hdmi_infoframe frame; 850 union hdmi_infoframe frame;
851 851
@@ -1339,8 +1339,8 @@ static void tda998x_bridge_disable(struct drm_bridge *bridge)
1339} 1339}
1340 1340
1341static void tda998x_bridge_mode_set(struct drm_bridge *bridge, 1341static void tda998x_bridge_mode_set(struct drm_bridge *bridge,
1342 struct drm_display_mode *mode, 1342 const struct drm_display_mode *mode,
1343 struct drm_display_mode *adjusted_mode) 1343 const struct drm_display_mode *adjusted_mode)
1344{ 1344{
1345 struct tda998x_priv *priv = bridge_to_tda998x_priv(bridge); 1345 struct tda998x_priv *priv = bridge_to_tda998x_priv(bridge);
1346 unsigned long tmds_clock; 1346 unsigned long tmds_clock;
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 19b5fe5016bf..c34bee16730d 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -40,7 +40,7 @@ i915-y := i915_drv.o \
40 i915_mm.o \ 40 i915_mm.o \
41 i915_params.o \ 41 i915_params.o \
42 i915_pci.o \ 42 i915_pci.o \
43 i915_suspend.o \ 43 i915_suspend.o \
44 i915_syncmap.o \ 44 i915_syncmap.o \
45 i915_sw_fence.o \ 45 i915_sw_fence.o \
46 i915_sysfs.o \ 46 i915_sysfs.o \
diff --git a/drivers/gpu/drm/i915/dvo.h b/drivers/gpu/drm/i915/dvo.h
index 5e6a3013da49..16e0345b711f 100644
--- a/drivers/gpu/drm/i915/dvo.h
+++ b/drivers/gpu/drm/i915/dvo.h
@@ -24,7 +24,6 @@
24#define _INTEL_DVO_H 24#define _INTEL_DVO_H
25 25
26#include <linux/i2c.h> 26#include <linux/i2c.h>
27#include <drm/drmP.h>
28#include <drm/drm_crtc.h> 27#include <drm/drm_crtc.h>
29#include "intel_drv.h" 28#include "intel_drv.h"
30 29
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index c628be05fbfe..e1c860f80eb0 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -148,10 +148,10 @@ int intel_gvt_init_vgpu_types(struct intel_gvt *gvt)
148 gvt->types[i].avail_instance = min(low_avail / vgpu_types[i].low_mm, 148 gvt->types[i].avail_instance = min(low_avail / vgpu_types[i].low_mm,
149 high_avail / vgpu_types[i].high_mm); 149 high_avail / vgpu_types[i].high_mm);
150 150
151 if (IS_GEN8(gvt->dev_priv)) 151 if (IS_GEN(gvt->dev_priv, 8))
152 sprintf(gvt->types[i].name, "GVTg_V4_%s", 152 sprintf(gvt->types[i].name, "GVTg_V4_%s",
153 vgpu_types[i].name); 153 vgpu_types[i].name);
154 else if (IS_GEN9(gvt->dev_priv)) 154 else if (IS_GEN(gvt->dev_priv, 9))
155 sprintf(gvt->types[i].name, "GVTg_V5_%s", 155 sprintf(gvt->types[i].name, "GVTg_V5_%s",
156 vgpu_types[i].name); 156 vgpu_types[i].name);
157 157
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index 95478db9998b..33e8eed64423 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -865,7 +865,7 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine)
865 int cmd_table_count; 865 int cmd_table_count;
866 int ret; 866 int ret;
867 867
868 if (!IS_GEN7(engine->i915)) 868 if (!IS_GEN(engine->i915, 7))
869 return; 869 return;
870 870
871 switch (engine->id) { 871 switch (engine->id) {
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 9bad6a32adae..d460ef522d9c 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -48,7 +48,7 @@ static int i915_capabilities(struct seq_file *m, void *data)
48 seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev_priv)); 48 seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev_priv));
49 49
50 intel_device_info_dump_flags(info, &p); 50 intel_device_info_dump_flags(info, &p);
51 intel_device_info_dump_runtime(info, &p); 51 intel_device_info_dump_runtime(RUNTIME_INFO(dev_priv), &p);
52 intel_driver_caps_print(&dev_priv->caps, &p); 52 intel_driver_caps_print(&dev_priv->caps, &p);
53 53
54 kernel_param_lock(THIS_MODULE); 54 kernel_param_lock(THIS_MODULE);
@@ -297,11 +297,12 @@ out:
297} 297}
298 298
299struct file_stats { 299struct file_stats {
300 struct drm_i915_file_private *file_priv; 300 struct i915_address_space *vm;
301 unsigned long count; 301 unsigned long count;
302 u64 total, unbound; 302 u64 total, unbound;
303 u64 global, shared; 303 u64 global, shared;
304 u64 active, inactive; 304 u64 active, inactive;
305 u64 closed;
305}; 306};
306 307
307static int per_file_stats(int id, void *ptr, void *data) 308static int per_file_stats(int id, void *ptr, void *data)
@@ -326,9 +327,7 @@ static int per_file_stats(int id, void *ptr, void *data)
326 if (i915_vma_is_ggtt(vma)) { 327 if (i915_vma_is_ggtt(vma)) {
327 stats->global += vma->node.size; 328 stats->global += vma->node.size;
328 } else { 329 } else {
329 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vma->vm); 330 if (vma->vm != stats->vm)
330
331 if (ppgtt->vm.file != stats->file_priv)
332 continue; 331 continue;
333 } 332 }
334 333
@@ -336,6 +335,9 @@ static int per_file_stats(int id, void *ptr, void *data)
336 stats->active += vma->node.size; 335 stats->active += vma->node.size;
337 else 336 else
338 stats->inactive += vma->node.size; 337 stats->inactive += vma->node.size;
338
339 if (i915_vma_is_closed(vma))
340 stats->closed += vma->node.size;
339 } 341 }
340 342
341 return 0; 343 return 0;
@@ -343,7 +345,7 @@ static int per_file_stats(int id, void *ptr, void *data)
343 345
344#define print_file_stats(m, name, stats) do { \ 346#define print_file_stats(m, name, stats) do { \
345 if (stats.count) \ 347 if (stats.count) \
346 seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu global, %llu shared, %llu unbound)\n", \ 348 seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu global, %llu shared, %llu unbound, %llu closed)\n", \
347 name, \ 349 name, \
348 stats.count, \ 350 stats.count, \
349 stats.total, \ 351 stats.total, \
@@ -351,20 +353,19 @@ static int per_file_stats(int id, void *ptr, void *data)
351 stats.inactive, \ 353 stats.inactive, \
352 stats.global, \ 354 stats.global, \
353 stats.shared, \ 355 stats.shared, \
354 stats.unbound); \ 356 stats.unbound, \
357 stats.closed); \
355} while (0) 358} while (0)
356 359
357static void print_batch_pool_stats(struct seq_file *m, 360static void print_batch_pool_stats(struct seq_file *m,
358 struct drm_i915_private *dev_priv) 361 struct drm_i915_private *dev_priv)
359{ 362{
360 struct drm_i915_gem_object *obj; 363 struct drm_i915_gem_object *obj;
361 struct file_stats stats;
362 struct intel_engine_cs *engine; 364 struct intel_engine_cs *engine;
365 struct file_stats stats = {};
363 enum intel_engine_id id; 366 enum intel_engine_id id;
364 int j; 367 int j;
365 368
366 memset(&stats, 0, sizeof(stats));
367
368 for_each_engine(engine, dev_priv, id) { 369 for_each_engine(engine, dev_priv, id) {
369 for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) { 370 for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
370 list_for_each_entry(obj, 371 list_for_each_entry(obj,
@@ -377,44 +378,47 @@ static void print_batch_pool_stats(struct seq_file *m,
377 print_file_stats(m, "[k]batch pool", stats); 378 print_file_stats(m, "[k]batch pool", stats);
378} 379}
379 380
380static int per_file_ctx_stats(int idx, void *ptr, void *data) 381static void print_context_stats(struct seq_file *m,
382 struct drm_i915_private *i915)
381{ 383{
382 struct i915_gem_context *ctx = ptr; 384 struct file_stats kstats = {};
383 struct intel_engine_cs *engine; 385 struct i915_gem_context *ctx;
384 enum intel_engine_id id;
385 386
386 for_each_engine(engine, ctx->i915, id) { 387 list_for_each_entry(ctx, &i915->contexts.list, link) {
387 struct intel_context *ce = to_intel_context(ctx, engine); 388 struct intel_engine_cs *engine;
389 enum intel_engine_id id;
388 390
389 if (ce->state) 391 for_each_engine(engine, i915, id) {
390 per_file_stats(0, ce->state->obj, data); 392 struct intel_context *ce = to_intel_context(ctx, engine);
391 if (ce->ring)
392 per_file_stats(0, ce->ring->vma->obj, data);
393 }
394 393
395 return 0; 394 if (ce->state)
396} 395 per_file_stats(0, ce->state->obj, &kstats);
396 if (ce->ring)
397 per_file_stats(0, ce->ring->vma->obj, &kstats);
398 }
397 399
398static void print_context_stats(struct seq_file *m, 400 if (!IS_ERR_OR_NULL(ctx->file_priv)) {
399 struct drm_i915_private *dev_priv) 401 struct file_stats stats = { .vm = &ctx->ppgtt->vm, };
400{ 402 struct drm_file *file = ctx->file_priv->file;
401 struct drm_device *dev = &dev_priv->drm; 403 struct task_struct *task;
402 struct file_stats stats; 404 char name[80];
403 struct drm_file *file;
404 405
405 memset(&stats, 0, sizeof(stats)); 406 spin_lock(&file->table_lock);
407 idr_for_each(&file->object_idr, per_file_stats, &stats);
408 spin_unlock(&file->table_lock);
406 409
407 mutex_lock(&dev->struct_mutex); 410 rcu_read_lock();
408 if (dev_priv->kernel_context) 411 task = pid_task(ctx->pid ?: file->pid, PIDTYPE_PID);
409 per_file_ctx_stats(0, dev_priv->kernel_context, &stats); 412 snprintf(name, sizeof(name), "%s/%d",
413 task ? task->comm : "<unknown>",
414 ctx->user_handle);
415 rcu_read_unlock();
410 416
411 list_for_each_entry(file, &dev->filelist, lhead) { 417 print_file_stats(m, name, stats);
412 struct drm_i915_file_private *fpriv = file->driver_priv; 418 }
413 idr_for_each(&fpriv->context_idr, per_file_ctx_stats, &stats);
414 } 419 }
415 mutex_unlock(&dev->struct_mutex);
416 420
417 print_file_stats(m, "[k]contexts", stats); 421 print_file_stats(m, "[k]contexts", kstats);
418} 422}
419 423
420static int i915_gem_object_info(struct seq_file *m, void *data) 424static int i915_gem_object_info(struct seq_file *m, void *data)
@@ -426,14 +430,9 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
426 u64 size, mapped_size, purgeable_size, dpy_size, huge_size; 430 u64 size, mapped_size, purgeable_size, dpy_size, huge_size;
427 struct drm_i915_gem_object *obj; 431 struct drm_i915_gem_object *obj;
428 unsigned int page_sizes = 0; 432 unsigned int page_sizes = 0;
429 struct drm_file *file;
430 char buf[80]; 433 char buf[80];
431 int ret; 434 int ret;
432 435
433 ret = mutex_lock_interruptible(&dev->struct_mutex);
434 if (ret)
435 return ret;
436
437 seq_printf(m, "%u objects, %llu bytes\n", 436 seq_printf(m, "%u objects, %llu bytes\n",
438 dev_priv->mm.object_count, 437 dev_priv->mm.object_count,
439 dev_priv->mm.object_memory); 438 dev_priv->mm.object_memory);
@@ -514,43 +513,14 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
514 buf, sizeof(buf))); 513 buf, sizeof(buf)));
515 514
516 seq_putc(m, '\n'); 515 seq_putc(m, '\n');
517 print_batch_pool_stats(m, dev_priv);
518 mutex_unlock(&dev->struct_mutex);
519
520 mutex_lock(&dev->filelist_mutex);
521 print_context_stats(m, dev_priv);
522 list_for_each_entry_reverse(file, &dev->filelist, lhead) {
523 struct file_stats stats;
524 struct drm_i915_file_private *file_priv = file->driver_priv;
525 struct i915_request *request;
526 struct task_struct *task;
527
528 mutex_lock(&dev->struct_mutex);
529 516
530 memset(&stats, 0, sizeof(stats)); 517 ret = mutex_lock_interruptible(&dev->struct_mutex);
531 stats.file_priv = file->driver_priv; 518 if (ret)
532 spin_lock(&file->table_lock); 519 return ret;
533 idr_for_each(&file->object_idr, per_file_stats, &stats);
534 spin_unlock(&file->table_lock);
535 /*
536 * Although we have a valid reference on file->pid, that does
537 * not guarantee that the task_struct who called get_pid() is
538 * still alive (e.g. get_pid(current) => fork() => exit()).
539 * Therefore, we need to protect this ->comm access using RCU.
540 */
541 request = list_first_entry_or_null(&file_priv->mm.request_list,
542 struct i915_request,
543 client_link);
544 rcu_read_lock();
545 task = pid_task(request && request->gem_context->pid ?
546 request->gem_context->pid : file->pid,
547 PIDTYPE_PID);
548 print_file_stats(m, task ? task->comm : "<unknown>", stats);
549 rcu_read_unlock();
550 520
551 mutex_unlock(&dev->struct_mutex); 521 print_batch_pool_stats(m, dev_priv);
552 } 522 print_context_stats(m, dev_priv);
553 mutex_unlock(&dev->filelist_mutex); 523 mutex_unlock(&dev->struct_mutex);
554 524
555 return 0; 525 return 0;
556} 526}
@@ -984,8 +954,8 @@ static int i915_gpu_info_open(struct inode *inode, struct file *file)
984 intel_runtime_pm_get(i915); 954 intel_runtime_pm_get(i915);
985 gpu = i915_capture_gpu_state(i915); 955 gpu = i915_capture_gpu_state(i915);
986 intel_runtime_pm_put(i915); 956 intel_runtime_pm_put(i915);
987 if (!gpu) 957 if (IS_ERR(gpu))
988 return -ENOMEM; 958 return PTR_ERR(gpu);
989 959
990 file->private_data = gpu; 960 file->private_data = gpu;
991 return 0; 961 return 0;
@@ -1018,7 +988,13 @@ i915_error_state_write(struct file *filp,
1018 988
1019static int i915_error_state_open(struct inode *inode, struct file *file) 989static int i915_error_state_open(struct inode *inode, struct file *file)
1020{ 990{
1021 file->private_data = i915_first_error_state(inode->i_private); 991 struct i915_gpu_state *error;
992
993 error = i915_first_error_state(inode->i_private);
994 if (IS_ERR(error))
995 return PTR_ERR(error);
996
997 file->private_data = error;
1022 return 0; 998 return 0;
1023} 999}
1024 1000
@@ -1032,30 +1008,6 @@ static const struct file_operations i915_error_state_fops = {
1032}; 1008};
1033#endif 1009#endif
1034 1010
1035static int
1036i915_next_seqno_set(void *data, u64 val)
1037{
1038 struct drm_i915_private *dev_priv = data;
1039 struct drm_device *dev = &dev_priv->drm;
1040 int ret;
1041
1042 ret = mutex_lock_interruptible(&dev->struct_mutex);
1043 if (ret)
1044 return ret;
1045
1046 intel_runtime_pm_get(dev_priv);
1047 ret = i915_gem_set_global_seqno(dev, val);
1048 intel_runtime_pm_put(dev_priv);
1049
1050 mutex_unlock(&dev->struct_mutex);
1051
1052 return ret;
1053}
1054
1055DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops,
1056 NULL, i915_next_seqno_set,
1057 "0x%llx\n");
1058
1059static int i915_frequency_info(struct seq_file *m, void *unused) 1011static int i915_frequency_info(struct seq_file *m, void *unused)
1060{ 1012{
1061 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1013 struct drm_i915_private *dev_priv = node_to_i915(m->private);
@@ -1064,7 +1016,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
1064 1016
1065 intel_runtime_pm_get(dev_priv); 1017 intel_runtime_pm_get(dev_priv);
1066 1018
1067 if (IS_GEN5(dev_priv)) { 1019 if (IS_GEN(dev_priv, 5)) {
1068 u16 rgvswctl = I915_READ16(MEMSWCTL); 1020 u16 rgvswctl = I915_READ16(MEMSWCTL);
1069 u16 rgvstat = I915_READ16(MEMSTAT_ILK); 1021 u16 rgvstat = I915_READ16(MEMSTAT_ILK);
1070 1022
@@ -1785,7 +1737,7 @@ static int i915_emon_status(struct seq_file *m, void *unused)
1785 unsigned long temp, chipset, gfx; 1737 unsigned long temp, chipset, gfx;
1786 int ret; 1738 int ret;
1787 1739
1788 if (!IS_GEN5(dev_priv)) 1740 if (!IS_GEN(dev_priv, 5))
1789 return -ENODEV; 1741 return -ENODEV;
1790 1742
1791 intel_runtime_pm_get(dev_priv); 1743 intel_runtime_pm_get(dev_priv);
@@ -2034,7 +1986,7 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
2034 seq_printf(m, "bit6 swizzle for Y-tiling = %s\n", 1986 seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
2035 swizzle_string(dev_priv->mm.bit_6_swizzle_y)); 1987 swizzle_string(dev_priv->mm.bit_6_swizzle_y));
2036 1988
2037 if (IS_GEN3(dev_priv) || IS_GEN4(dev_priv)) { 1989 if (IS_GEN_RANGE(dev_priv, 3, 4)) {
2038 seq_printf(m, "DDC = 0x%08x\n", 1990 seq_printf(m, "DDC = 0x%08x\n",
2039 I915_READ(DCC)); 1991 I915_READ(DCC));
2040 seq_printf(m, "DDC2 = 0x%08x\n", 1992 seq_printf(m, "DDC2 = 0x%08x\n",
@@ -2070,124 +2022,6 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
2070 return 0; 2022 return 0;
2071} 2023}
2072 2024
2073static int per_file_ctx(int id, void *ptr, void *data)
2074{
2075 struct i915_gem_context *ctx = ptr;
2076 struct seq_file *m = data;
2077 struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
2078
2079 if (!ppgtt) {
2080 seq_printf(m, " no ppgtt for context %d\n",
2081 ctx->user_handle);
2082 return 0;
2083 }
2084
2085 if (i915_gem_context_is_default(ctx))
2086 seq_puts(m, " default context:\n");
2087 else
2088 seq_printf(m, " context %d:\n", ctx->user_handle);
2089 ppgtt->debug_dump(ppgtt, m);
2090
2091 return 0;
2092}
2093
2094static void gen8_ppgtt_info(struct seq_file *m,
2095 struct drm_i915_private *dev_priv)
2096{
2097 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
2098 struct intel_engine_cs *engine;
2099 enum intel_engine_id id;
2100 int i;
2101
2102 if (!ppgtt)
2103 return;
2104
2105 for_each_engine(engine, dev_priv, id) {
2106 seq_printf(m, "%s\n", engine->name);
2107 for (i = 0; i < 4; i++) {
2108 u64 pdp = I915_READ(GEN8_RING_PDP_UDW(engine, i));
2109 pdp <<= 32;
2110 pdp |= I915_READ(GEN8_RING_PDP_LDW(engine, i));
2111 seq_printf(m, "\tPDP%d 0x%016llx\n", i, pdp);
2112 }
2113 }
2114}
2115
2116static void gen6_ppgtt_info(struct seq_file *m,
2117 struct drm_i915_private *dev_priv)
2118{
2119 struct intel_engine_cs *engine;
2120 enum intel_engine_id id;
2121
2122 if (IS_GEN6(dev_priv))
2123 seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE));
2124
2125 for_each_engine(engine, dev_priv, id) {
2126 seq_printf(m, "%s\n", engine->name);
2127 if (IS_GEN7(dev_priv))
2128 seq_printf(m, "GFX_MODE: 0x%08x\n",
2129 I915_READ(RING_MODE_GEN7(engine)));
2130 seq_printf(m, "PP_DIR_BASE: 0x%08x\n",
2131 I915_READ(RING_PP_DIR_BASE(engine)));
2132 seq_printf(m, "PP_DIR_BASE_READ: 0x%08x\n",
2133 I915_READ(RING_PP_DIR_BASE_READ(engine)));
2134 seq_printf(m, "PP_DIR_DCLV: 0x%08x\n",
2135 I915_READ(RING_PP_DIR_DCLV(engine)));
2136 }
2137 if (dev_priv->mm.aliasing_ppgtt) {
2138 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
2139
2140 seq_puts(m, "aliasing PPGTT:\n");
2141 seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd.base.ggtt_offset);
2142
2143 ppgtt->debug_dump(ppgtt, m);
2144 }
2145
2146 seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK));
2147}
2148
2149static int i915_ppgtt_info(struct seq_file *m, void *data)
2150{
2151 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2152 struct drm_device *dev = &dev_priv->drm;
2153 struct drm_file *file;
2154 int ret;
2155
2156 mutex_lock(&dev->filelist_mutex);
2157 ret = mutex_lock_interruptible(&dev->struct_mutex);
2158 if (ret)
2159 goto out_unlock;
2160
2161 intel_runtime_pm_get(dev_priv);
2162
2163 if (INTEL_GEN(dev_priv) >= 8)
2164 gen8_ppgtt_info(m, dev_priv);
2165 else if (INTEL_GEN(dev_priv) >= 6)
2166 gen6_ppgtt_info(m, dev_priv);
2167
2168 list_for_each_entry_reverse(file, &dev->filelist, lhead) {
2169 struct drm_i915_file_private *file_priv = file->driver_priv;
2170 struct task_struct *task;
2171
2172 task = get_pid_task(file->pid, PIDTYPE_PID);
2173 if (!task) {
2174 ret = -ESRCH;
2175 goto out_rpm;
2176 }
2177 seq_printf(m, "\nproc: %s\n", task->comm);
2178 put_task_struct(task);
2179 idr_for_each(&file_priv->context_idr, per_file_ctx,
2180 (void *)(unsigned long)m);
2181 }
2182
2183out_rpm:
2184 intel_runtime_pm_put(dev_priv);
2185 mutex_unlock(&dev->struct_mutex);
2186out_unlock:
2187 mutex_unlock(&dev->filelist_mutex);
2188 return ret;
2189}
2190
2191static int count_irq_waiters(struct drm_i915_private *i915) 2025static int count_irq_waiters(struct drm_i915_private *i915)
2192{ 2026{
2193 struct intel_engine_cs *engine; 2027 struct intel_engine_cs *engine;
@@ -3120,14 +2954,13 @@ static const char *plane_type(enum drm_plane_type type)
3120 return "unknown"; 2954 return "unknown";
3121} 2955}
3122 2956
3123static const char *plane_rotation(unsigned int rotation) 2957static void plane_rotation(char *buf, size_t bufsize, unsigned int rotation)
3124{ 2958{
3125 static char buf[48];
3126 /* 2959 /*
3127 * According to doc only one DRM_MODE_ROTATE_ is allowed but this 2960 * According to doc only one DRM_MODE_ROTATE_ is allowed but this
3128 * will print them all to visualize if the values are misused 2961 * will print them all to visualize if the values are misused
3129 */ 2962 */
3130 snprintf(buf, sizeof(buf), 2963 snprintf(buf, bufsize,
3131 "%s%s%s%s%s%s(0x%08x)", 2964 "%s%s%s%s%s%s(0x%08x)",
3132 (rotation & DRM_MODE_ROTATE_0) ? "0 " : "", 2965 (rotation & DRM_MODE_ROTATE_0) ? "0 " : "",
3133 (rotation & DRM_MODE_ROTATE_90) ? "90 " : "", 2966 (rotation & DRM_MODE_ROTATE_90) ? "90 " : "",
@@ -3136,8 +2969,6 @@ static const char *plane_rotation(unsigned int rotation)
3136 (rotation & DRM_MODE_REFLECT_X) ? "FLIPX " : "", 2969 (rotation & DRM_MODE_REFLECT_X) ? "FLIPX " : "",
3137 (rotation & DRM_MODE_REFLECT_Y) ? "FLIPY " : "", 2970 (rotation & DRM_MODE_REFLECT_Y) ? "FLIPY " : "",
3138 rotation); 2971 rotation);
3139
3140 return buf;
3141} 2972}
3142 2973
3143static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc) 2974static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
@@ -3150,6 +2981,7 @@ static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
3150 struct drm_plane_state *state; 2981 struct drm_plane_state *state;
3151 struct drm_plane *plane = &intel_plane->base; 2982 struct drm_plane *plane = &intel_plane->base;
3152 struct drm_format_name_buf format_name; 2983 struct drm_format_name_buf format_name;
2984 char rot_str[48];
3153 2985
3154 if (!plane->state) { 2986 if (!plane->state) {
3155 seq_puts(m, "plane->state is NULL!\n"); 2987 seq_puts(m, "plane->state is NULL!\n");
@@ -3165,6 +2997,8 @@ static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
3165 sprintf(format_name.str, "N/A"); 2997 sprintf(format_name.str, "N/A");
3166 } 2998 }
3167 2999
3000 plane_rotation(rot_str, sizeof(rot_str), state->rotation);
3001
3168 seq_printf(m, "\t--Plane id %d: type=%s, crtc_pos=%4dx%4d, crtc_size=%4dx%4d, src_pos=%d.%04ux%d.%04u, src_size=%d.%04ux%d.%04u, format=%s, rotation=%s\n", 3002 seq_printf(m, "\t--Plane id %d: type=%s, crtc_pos=%4dx%4d, crtc_size=%4dx%4d, src_pos=%d.%04ux%d.%04u, src_size=%d.%04ux%d.%04u, format=%s, rotation=%s\n",
3169 plane->base.id, 3003 plane->base.id,
3170 plane_type(intel_plane->base.type), 3004 plane_type(intel_plane->base.type),
@@ -3179,7 +3013,7 @@ static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
3179 (state->src_h >> 16), 3013 (state->src_h >> 16),
3180 ((state->src_h & 0xffff) * 15625) >> 10, 3014 ((state->src_h & 0xffff) * 15625) >> 10,
3181 format_name.str, 3015 format_name.str,
3182 plane_rotation(state->rotation)); 3016 rot_str);
3183 } 3017 }
3184} 3018}
3185 3019
@@ -3286,7 +3120,7 @@ static int i915_engine_info(struct seq_file *m, void *unused)
3286 seq_printf(m, "Global active requests: %d\n", 3120 seq_printf(m, "Global active requests: %d\n",
3287 dev_priv->gt.active_requests); 3121 dev_priv->gt.active_requests);
3288 seq_printf(m, "CS timestamp frequency: %u kHz\n", 3122 seq_printf(m, "CS timestamp frequency: %u kHz\n",
3289 dev_priv->info.cs_timestamp_frequency_khz); 3123 RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_khz);
3290 3124
3291 p = drm_seq_file_printer(m); 3125 p = drm_seq_file_printer(m);
3292 for_each_engine(engine, dev_priv, id) 3126 for_each_engine(engine, dev_priv, id)
@@ -3302,7 +3136,7 @@ static int i915_rcs_topology(struct seq_file *m, void *unused)
3302 struct drm_i915_private *dev_priv = node_to_i915(m->private); 3136 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3303 struct drm_printer p = drm_seq_file_printer(m); 3137 struct drm_printer p = drm_seq_file_printer(m);
3304 3138
3305 intel_device_info_dump_topology(&INTEL_INFO(dev_priv)->sseu, &p); 3139 intel_device_info_dump_topology(&RUNTIME_INFO(dev_priv)->sseu, &p);
3306 3140
3307 return 0; 3141 return 0;
3308} 3142}
@@ -4206,9 +4040,6 @@ i915_drop_caches_set(void *data, u64 val)
4206 I915_WAIT_LOCKED, 4040 I915_WAIT_LOCKED,
4207 MAX_SCHEDULE_TIMEOUT); 4041 MAX_SCHEDULE_TIMEOUT);
4208 4042
4209 if (ret == 0 && val & DROP_RESET_SEQNO)
4210 ret = i915_gem_set_global_seqno(&i915->drm, 1);
4211
4212 if (val & DROP_RETIRE) 4043 if (val & DROP_RETIRE)
4213 i915_retire_requests(i915); 4044 i915_retire_requests(i915);
4214 4045
@@ -4261,7 +4092,7 @@ i915_cache_sharing_get(void *data, u64 *val)
4261 struct drm_i915_private *dev_priv = data; 4092 struct drm_i915_private *dev_priv = data;
4262 u32 snpcr; 4093 u32 snpcr;
4263 4094
4264 if (!(IS_GEN6(dev_priv) || IS_GEN7(dev_priv))) 4095 if (!(IS_GEN_RANGE(dev_priv, 6, 7)))
4265 return -ENODEV; 4096 return -ENODEV;
4266 4097
4267 intel_runtime_pm_get(dev_priv); 4098 intel_runtime_pm_get(dev_priv);
@@ -4281,7 +4112,7 @@ i915_cache_sharing_set(void *data, u64 val)
4281 struct drm_i915_private *dev_priv = data; 4112 struct drm_i915_private *dev_priv = data;
4282 u32 snpcr; 4113 u32 snpcr;
4283 4114
4284 if (!(IS_GEN6(dev_priv) || IS_GEN7(dev_priv))) 4115 if (!(IS_GEN_RANGE(dev_priv, 6, 7)))
4285 return -ENODEV; 4116 return -ENODEV;
4286 4117
4287 if (val > 3) 4118 if (val > 3)
@@ -4341,7 +4172,7 @@ static void gen10_sseu_device_status(struct drm_i915_private *dev_priv,
4341 struct sseu_dev_info *sseu) 4172 struct sseu_dev_info *sseu)
4342{ 4173{
4343#define SS_MAX 6 4174#define SS_MAX 6
4344 const struct intel_device_info *info = INTEL_INFO(dev_priv); 4175 const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
4345 u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2]; 4176 u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
4346 int s, ss; 4177 int s, ss;
4347 4178
@@ -4397,7 +4228,7 @@ static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
4397 struct sseu_dev_info *sseu) 4228 struct sseu_dev_info *sseu)
4398{ 4229{
4399#define SS_MAX 3 4230#define SS_MAX 3
4400 const struct intel_device_info *info = INTEL_INFO(dev_priv); 4231 const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
4401 u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2]; 4232 u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
4402 int s, ss; 4233 int s, ss;
4403 4234
@@ -4425,7 +4256,7 @@ static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
4425 4256
4426 if (IS_GEN9_BC(dev_priv)) 4257 if (IS_GEN9_BC(dev_priv))
4427 sseu->subslice_mask[s] = 4258 sseu->subslice_mask[s] =
4428 INTEL_INFO(dev_priv)->sseu.subslice_mask[s]; 4259 RUNTIME_INFO(dev_priv)->sseu.subslice_mask[s];
4429 4260
4430 for (ss = 0; ss < info->sseu.max_subslices; ss++) { 4261 for (ss = 0; ss < info->sseu.max_subslices; ss++) {
4431 unsigned int eu_cnt; 4262 unsigned int eu_cnt;
@@ -4459,10 +4290,10 @@ static void broadwell_sseu_device_status(struct drm_i915_private *dev_priv,
4459 4290
4460 if (sseu->slice_mask) { 4291 if (sseu->slice_mask) {
4461 sseu->eu_per_subslice = 4292 sseu->eu_per_subslice =
4462 INTEL_INFO(dev_priv)->sseu.eu_per_subslice; 4293 RUNTIME_INFO(dev_priv)->sseu.eu_per_subslice;
4463 for (s = 0; s < fls(sseu->slice_mask); s++) { 4294 for (s = 0; s < fls(sseu->slice_mask); s++) {
4464 sseu->subslice_mask[s] = 4295 sseu->subslice_mask[s] =
4465 INTEL_INFO(dev_priv)->sseu.subslice_mask[s]; 4296 RUNTIME_INFO(dev_priv)->sseu.subslice_mask[s];
4466 } 4297 }
4467 sseu->eu_total = sseu->eu_per_subslice * 4298 sseu->eu_total = sseu->eu_per_subslice *
4468 sseu_subslice_total(sseu); 4299 sseu_subslice_total(sseu);
@@ -4470,7 +4301,7 @@ static void broadwell_sseu_device_status(struct drm_i915_private *dev_priv,
4470 /* subtract fused off EU(s) from enabled slice(s) */ 4301 /* subtract fused off EU(s) from enabled slice(s) */
4471 for (s = 0; s < fls(sseu->slice_mask); s++) { 4302 for (s = 0; s < fls(sseu->slice_mask); s++) {
4472 u8 subslice_7eu = 4303 u8 subslice_7eu =
4473 INTEL_INFO(dev_priv)->sseu.subslice_7eu[s]; 4304 RUNTIME_INFO(dev_priv)->sseu.subslice_7eu[s];
4474 4305
4475 sseu->eu_total -= hweight8(subslice_7eu); 4306 sseu->eu_total -= hweight8(subslice_7eu);
4476 } 4307 }
@@ -4523,14 +4354,14 @@ static int i915_sseu_status(struct seq_file *m, void *unused)
4523 return -ENODEV; 4354 return -ENODEV;
4524 4355
4525 seq_puts(m, "SSEU Device Info\n"); 4356 seq_puts(m, "SSEU Device Info\n");
4526 i915_print_sseu_info(m, true, &INTEL_INFO(dev_priv)->sseu); 4357 i915_print_sseu_info(m, true, &RUNTIME_INFO(dev_priv)->sseu);
4527 4358
4528 seq_puts(m, "SSEU Device Status\n"); 4359 seq_puts(m, "SSEU Device Status\n");
4529 memset(&sseu, 0, sizeof(sseu)); 4360 memset(&sseu, 0, sizeof(sseu));
4530 sseu.max_slices = INTEL_INFO(dev_priv)->sseu.max_slices; 4361 sseu.max_slices = RUNTIME_INFO(dev_priv)->sseu.max_slices;
4531 sseu.max_subslices = INTEL_INFO(dev_priv)->sseu.max_subslices; 4362 sseu.max_subslices = RUNTIME_INFO(dev_priv)->sseu.max_subslices;
4532 sseu.max_eus_per_subslice = 4363 sseu.max_eus_per_subslice =
4533 INTEL_INFO(dev_priv)->sseu.max_eus_per_subslice; 4364 RUNTIME_INFO(dev_priv)->sseu.max_eus_per_subslice;
4534 4365
4535 intel_runtime_pm_get(dev_priv); 4366 intel_runtime_pm_get(dev_priv);
4536 4367
@@ -4538,7 +4369,7 @@ static int i915_sseu_status(struct seq_file *m, void *unused)
4538 cherryview_sseu_device_status(dev_priv, &sseu); 4369 cherryview_sseu_device_status(dev_priv, &sseu);
4539 } else if (IS_BROADWELL(dev_priv)) { 4370 } else if (IS_BROADWELL(dev_priv)) {
4540 broadwell_sseu_device_status(dev_priv, &sseu); 4371 broadwell_sseu_device_status(dev_priv, &sseu);
4541 } else if (IS_GEN9(dev_priv)) { 4372 } else if (IS_GEN(dev_priv, 9)) {
4542 gen9_sseu_device_status(dev_priv, &sseu); 4373 gen9_sseu_device_status(dev_priv, &sseu);
4543 } else if (INTEL_GEN(dev_priv) >= 10) { 4374 } else if (INTEL_GEN(dev_priv) >= 10) {
4544 gen10_sseu_device_status(dev_priv, &sseu); 4375 gen10_sseu_device_status(dev_priv, &sseu);
@@ -4899,7 +4730,6 @@ static const struct drm_info_list i915_debugfs_list[] = {
4899 {"i915_context_status", i915_context_status, 0}, 4730 {"i915_context_status", i915_context_status, 0},
4900 {"i915_forcewake_domains", i915_forcewake_domains, 0}, 4731 {"i915_forcewake_domains", i915_forcewake_domains, 0},
4901 {"i915_swizzle_info", i915_swizzle_info, 0}, 4732 {"i915_swizzle_info", i915_swizzle_info, 0},
4902 {"i915_ppgtt_info", i915_ppgtt_info, 0},
4903 {"i915_llc", i915_llc, 0}, 4733 {"i915_llc", i915_llc, 0},
4904 {"i915_edp_psr_status", i915_edp_psr_status, 0}, 4734 {"i915_edp_psr_status", i915_edp_psr_status, 0},
4905 {"i915_energy_uJ", i915_energy_uJ, 0}, 4735 {"i915_energy_uJ", i915_energy_uJ, 0},
@@ -4934,7 +4764,6 @@ static const struct i915_debugfs_files {
4934 {"i915_gpu_info", &i915_gpu_info_fops}, 4764 {"i915_gpu_info", &i915_gpu_info_fops},
4935#endif 4765#endif
4936 {"i915_fifo_underrun_reset", &i915_fifo_underrun_reset_ops}, 4766 {"i915_fifo_underrun_reset", &i915_fifo_underrun_reset_ops},
4937 {"i915_next_seqno", &i915_next_seqno_fops},
4938 {"i915_pri_wm_latency", &i915_pri_wm_latency_fops}, 4767 {"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
4939 {"i915_spr_wm_latency", &i915_spr_wm_latency_fops}, 4768 {"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
4940 {"i915_cur_wm_latency", &i915_cur_wm_latency_fops}, 4769 {"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
@@ -5081,6 +4910,106 @@ static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data)
5081} 4910}
5082DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability); 4911DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability);
5083 4912
4913static int i915_dsc_fec_support_show(struct seq_file *m, void *data)
4914{
4915 struct drm_connector *connector = m->private;
4916 struct drm_device *dev = connector->dev;
4917 struct drm_crtc *crtc;
4918 struct intel_dp *intel_dp;
4919 struct drm_modeset_acquire_ctx ctx;
4920 struct intel_crtc_state *crtc_state = NULL;
4921 int ret = 0;
4922 bool try_again = false;
4923
4924 drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
4925
4926 do {
4927 try_again = false;
4928 ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
4929 &ctx);
4930 if (ret) {
4931 ret = -EINTR;
4932 break;
4933 }
4934 crtc = connector->state->crtc;
4935 if (connector->status != connector_status_connected || !crtc) {
4936 ret = -ENODEV;
4937 break;
4938 }
4939 ret = drm_modeset_lock(&crtc->mutex, &ctx);
4940 if (ret == -EDEADLK) {
4941 ret = drm_modeset_backoff(&ctx);
4942 if (!ret) {
4943 try_again = true;
4944 continue;
4945 }
4946 break;
4947 } else if (ret) {
4948 break;
4949 }
4950 intel_dp = enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4951 crtc_state = to_intel_crtc_state(crtc->state);
4952 seq_printf(m, "DSC_Enabled: %s\n",
4953 yesno(crtc_state->dsc_params.compression_enable));
4954 if (intel_dp->dsc_dpcd)
4955 seq_printf(m, "DSC_Sink_Support: %s\n",
4956 yesno(drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)));
4957 if (!intel_dp_is_edp(intel_dp))
4958 seq_printf(m, "FEC_Sink_Support: %s\n",
4959 yesno(drm_dp_sink_supports_fec(intel_dp->fec_capable)));
4960 } while (try_again);
4961
4962 drm_modeset_drop_locks(&ctx);
4963 drm_modeset_acquire_fini(&ctx);
4964
4965 return ret;
4966}
4967
4968static ssize_t i915_dsc_fec_support_write(struct file *file,
4969 const char __user *ubuf,
4970 size_t len, loff_t *offp)
4971{
4972 bool dsc_enable = false;
4973 int ret;
4974 struct drm_connector *connector =
4975 ((struct seq_file *)file->private_data)->private;
4976 struct intel_encoder *encoder = intel_attached_encoder(connector);
4977 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
4978
4979 if (len == 0)
4980 return 0;
4981
4982 DRM_DEBUG_DRIVER("Copied %zu bytes from user to force DSC\n",
4983 len);
4984
4985 ret = kstrtobool_from_user(ubuf, len, &dsc_enable);
4986 if (ret < 0)
4987 return ret;
4988
4989 DRM_DEBUG_DRIVER("Got %s for DSC Enable\n",
4990 (dsc_enable) ? "true" : "false");
4991 intel_dp->force_dsc_en = dsc_enable;
4992
4993 *offp += len;
4994 return len;
4995}
4996
4997static int i915_dsc_fec_support_open(struct inode *inode,
4998 struct file *file)
4999{
5000 return single_open(file, i915_dsc_fec_support_show,
5001 inode->i_private);
5002}
5003
5004static const struct file_operations i915_dsc_fec_support_fops = {
5005 .owner = THIS_MODULE,
5006 .open = i915_dsc_fec_support_open,
5007 .read = seq_read,
5008 .llseek = seq_lseek,
5009 .release = single_release,
5010 .write = i915_dsc_fec_support_write
5011};
5012
5084/** 5013/**
5085 * i915_debugfs_connector_add - add i915 specific connector debugfs files 5014 * i915_debugfs_connector_add - add i915 specific connector debugfs files
5086 * @connector: pointer to a registered drm_connector 5015 * @connector: pointer to a registered drm_connector
@@ -5093,6 +5022,7 @@ DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability);
5093int i915_debugfs_connector_add(struct drm_connector *connector) 5022int i915_debugfs_connector_add(struct drm_connector *connector)
5094{ 5023{
5095 struct dentry *root = connector->debugfs_entry; 5024 struct dentry *root = connector->debugfs_entry;
5025 struct drm_i915_private *dev_priv = to_i915(connector->dev);
5096 5026
5097 /* The connector must have been registered beforehands. */ 5027 /* The connector must have been registered beforehands. */
5098 if (!root) 5028 if (!root)
@@ -5117,5 +5047,11 @@ int i915_debugfs_connector_add(struct drm_connector *connector)
5117 connector, &i915_hdcp_sink_capability_fops); 5047 connector, &i915_hdcp_sink_capability_fops);
5118 } 5048 }
5119 5049
5050 if (INTEL_GEN(dev_priv) >= 10 &&
5051 (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
5052 connector->connector_type == DRM_MODE_CONNECTOR_eDP))
5053 debugfs_create_file("i915_dsc_fec_support", S_IRUGO, root,
5054 connector, &i915_dsc_fec_support_fops);
5055
5120 return 0; 5056 return 0;
5121} 5057}
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index b310a897a4ad..75652dc1e24c 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -41,7 +41,6 @@
41#include <linux/vt.h> 41#include <linux/vt.h>
42#include <acpi/video.h> 42#include <acpi/video.h>
43 43
44#include <drm/drmP.h>
45#include <drm/drm_crtc_helper.h> 44#include <drm/drm_crtc_helper.h>
46#include <drm/drm_atomic_helper.h> 45#include <drm/drm_atomic_helper.h>
47#include <drm/i915_drm.h> 46#include <drm/i915_drm.h>
@@ -132,15 +131,15 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
132 switch (id) { 131 switch (id) {
133 case INTEL_PCH_IBX_DEVICE_ID_TYPE: 132 case INTEL_PCH_IBX_DEVICE_ID_TYPE:
134 DRM_DEBUG_KMS("Found Ibex Peak PCH\n"); 133 DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
135 WARN_ON(!IS_GEN5(dev_priv)); 134 WARN_ON(!IS_GEN(dev_priv, 5));
136 return PCH_IBX; 135 return PCH_IBX;
137 case INTEL_PCH_CPT_DEVICE_ID_TYPE: 136 case INTEL_PCH_CPT_DEVICE_ID_TYPE:
138 DRM_DEBUG_KMS("Found CougarPoint PCH\n"); 137 DRM_DEBUG_KMS("Found CougarPoint PCH\n");
139 WARN_ON(!IS_GEN6(dev_priv) && !IS_IVYBRIDGE(dev_priv)); 138 WARN_ON(!IS_GEN(dev_priv, 6) && !IS_IVYBRIDGE(dev_priv));
140 return PCH_CPT; 139 return PCH_CPT;
141 case INTEL_PCH_PPT_DEVICE_ID_TYPE: 140 case INTEL_PCH_PPT_DEVICE_ID_TYPE:
142 DRM_DEBUG_KMS("Found PantherPoint PCH\n"); 141 DRM_DEBUG_KMS("Found PantherPoint PCH\n");
143 WARN_ON(!IS_GEN6(dev_priv) && !IS_IVYBRIDGE(dev_priv)); 142 WARN_ON(!IS_GEN(dev_priv, 6) && !IS_IVYBRIDGE(dev_priv));
144 /* PantherPoint is CPT compatible */ 143 /* PantherPoint is CPT compatible */
145 return PCH_CPT; 144 return PCH_CPT;
146 case INTEL_PCH_LPT_DEVICE_ID_TYPE: 145 case INTEL_PCH_LPT_DEVICE_ID_TYPE:
@@ -217,9 +216,9 @@ intel_virt_detect_pch(const struct drm_i915_private *dev_priv)
217 * make an educated guess as to which PCH is really there. 216 * make an educated guess as to which PCH is really there.
218 */ 217 */
219 218
220 if (IS_GEN5(dev_priv)) 219 if (IS_GEN(dev_priv, 5))
221 id = INTEL_PCH_IBX_DEVICE_ID_TYPE; 220 id = INTEL_PCH_IBX_DEVICE_ID_TYPE;
222 else if (IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv)) 221 else if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
223 id = INTEL_PCH_CPT_DEVICE_ID_TYPE; 222 id = INTEL_PCH_CPT_DEVICE_ID_TYPE;
224 else if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv)) 223 else if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
225 id = INTEL_PCH_LPT_LP_DEVICE_ID_TYPE; 224 id = INTEL_PCH_LPT_LP_DEVICE_ID_TYPE;
@@ -349,7 +348,7 @@ static int i915_getparam_ioctl(struct drm_device *dev, void *data,
349 value = min_t(int, INTEL_PPGTT(dev_priv), I915_GEM_PPGTT_FULL); 348 value = min_t(int, INTEL_PPGTT(dev_priv), I915_GEM_PPGTT_FULL);
350 break; 349 break;
351 case I915_PARAM_HAS_SEMAPHORES: 350 case I915_PARAM_HAS_SEMAPHORES:
352 value = HAS_LEGACY_SEMAPHORES(dev_priv); 351 value = 0;
353 break; 352 break;
354 case I915_PARAM_HAS_SECURE_BATCHES: 353 case I915_PARAM_HAS_SECURE_BATCHES:
355 value = capable(CAP_SYS_ADMIN); 354 value = capable(CAP_SYS_ADMIN);
@@ -358,12 +357,12 @@ static int i915_getparam_ioctl(struct drm_device *dev, void *data,
358 value = i915_cmd_parser_get_version(dev_priv); 357 value = i915_cmd_parser_get_version(dev_priv);
359 break; 358 break;
360 case I915_PARAM_SUBSLICE_TOTAL: 359 case I915_PARAM_SUBSLICE_TOTAL:
361 value = sseu_subslice_total(&INTEL_INFO(dev_priv)->sseu); 360 value = sseu_subslice_total(&RUNTIME_INFO(dev_priv)->sseu);
362 if (!value) 361 if (!value)
363 return -ENODEV; 362 return -ENODEV;
364 break; 363 break;
365 case I915_PARAM_EU_TOTAL: 364 case I915_PARAM_EU_TOTAL:
366 value = INTEL_INFO(dev_priv)->sseu.eu_total; 365 value = RUNTIME_INFO(dev_priv)->sseu.eu_total;
367 if (!value) 366 if (!value)
368 return -ENODEV; 367 return -ENODEV;
369 break; 368 break;
@@ -380,7 +379,7 @@ static int i915_getparam_ioctl(struct drm_device *dev, void *data,
380 value = HAS_POOLED_EU(dev_priv); 379 value = HAS_POOLED_EU(dev_priv);
381 break; 380 break;
382 case I915_PARAM_MIN_EU_IN_POOL: 381 case I915_PARAM_MIN_EU_IN_POOL:
383 value = INTEL_INFO(dev_priv)->sseu.min_eu_in_pool; 382 value = RUNTIME_INFO(dev_priv)->sseu.min_eu_in_pool;
384 break; 383 break;
385 case I915_PARAM_HUC_STATUS: 384 case I915_PARAM_HUC_STATUS:
386 value = intel_huc_check_status(&dev_priv->huc); 385 value = intel_huc_check_status(&dev_priv->huc);
@@ -430,17 +429,17 @@ static int i915_getparam_ioctl(struct drm_device *dev, void *data,
430 value = intel_engines_has_context_isolation(dev_priv); 429 value = intel_engines_has_context_isolation(dev_priv);
431 break; 430 break;
432 case I915_PARAM_SLICE_MASK: 431 case I915_PARAM_SLICE_MASK:
433 value = INTEL_INFO(dev_priv)->sseu.slice_mask; 432 value = RUNTIME_INFO(dev_priv)->sseu.slice_mask;
434 if (!value) 433 if (!value)
435 return -ENODEV; 434 return -ENODEV;
436 break; 435 break;
437 case I915_PARAM_SUBSLICE_MASK: 436 case I915_PARAM_SUBSLICE_MASK:
438 value = INTEL_INFO(dev_priv)->sseu.subslice_mask[0]; 437 value = RUNTIME_INFO(dev_priv)->sseu.subslice_mask[0];
439 if (!value) 438 if (!value)
440 return -ENODEV; 439 return -ENODEV;
441 break; 440 break;
442 case I915_PARAM_CS_TIMESTAMP_FREQUENCY: 441 case I915_PARAM_CS_TIMESTAMP_FREQUENCY:
443 value = 1000 * INTEL_INFO(dev_priv)->cs_timestamp_frequency_khz; 442 value = 1000 * RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_khz;
444 break; 443 break;
445 case I915_PARAM_MMAP_GTT_COHERENT: 444 case I915_PARAM_MMAP_GTT_COHERENT:
446 value = INTEL_INFO(dev_priv)->has_coherent_ggtt; 445 value = INTEL_INFO(dev_priv)->has_coherent_ggtt;
@@ -966,7 +965,7 @@ static int i915_mmio_setup(struct drm_i915_private *dev_priv)
966 int mmio_bar; 965 int mmio_bar;
967 int mmio_size; 966 int mmio_size;
968 967
969 mmio_bar = IS_GEN2(dev_priv) ? 1 : 0; 968 mmio_bar = IS_GEN(dev_priv, 2) ? 1 : 0;
970 /* 969 /*
971 * Before gen4, the registers and the GTT are behind different BARs. 970 * Before gen4, the registers and the GTT are behind different BARs.
972 * However, from gen4 onwards, the registers and the GTT are shared 971 * However, from gen4 onwards, the registers and the GTT are shared
@@ -1341,7 +1340,7 @@ intel_get_dram_info(struct drm_i915_private *dev_priv)
1341 /* Need to calculate bandwidth only for Gen9 */ 1340 /* Need to calculate bandwidth only for Gen9 */
1342 if (IS_BROXTON(dev_priv)) 1341 if (IS_BROXTON(dev_priv))
1343 ret = bxt_get_dram_info(dev_priv); 1342 ret = bxt_get_dram_info(dev_priv);
1344 else if (IS_GEN9(dev_priv)) 1343 else if (IS_GEN(dev_priv, 9))
1345 ret = skl_get_dram_info(dev_priv); 1344 ret = skl_get_dram_info(dev_priv);
1346 else 1345 else
1347 ret = skl_dram_get_channels_info(dev_priv); 1346 ret = skl_dram_get_channels_info(dev_priv);
@@ -1374,7 +1373,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
1374 if (i915_inject_load_failure()) 1373 if (i915_inject_load_failure())
1375 return -ENODEV; 1374 return -ENODEV;
1376 1375
1377 intel_device_info_runtime_init(mkwrite_device_info(dev_priv)); 1376 intel_device_info_runtime_init(dev_priv);
1378 1377
1379 if (HAS_PPGTT(dev_priv)) { 1378 if (HAS_PPGTT(dev_priv)) {
1380 if (intel_vgpu_active(dev_priv) && 1379 if (intel_vgpu_active(dev_priv) &&
@@ -1436,7 +1435,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
1436 pci_set_master(pdev); 1435 pci_set_master(pdev);
1437 1436
1438 /* overlay on gen2 is broken and can't address above 1G */ 1437 /* overlay on gen2 is broken and can't address above 1G */
1439 if (IS_GEN2(dev_priv)) { 1438 if (IS_GEN(dev_priv, 2)) {
1440 ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(30)); 1439 ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(30));
1441 if (ret) { 1440 if (ret) {
1442 DRM_ERROR("failed to set DMA mask\n"); 1441 DRM_ERROR("failed to set DMA mask\n");
@@ -1574,7 +1573,7 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
1574 acpi_video_register(); 1573 acpi_video_register();
1575 } 1574 }
1576 1575
1577 if (IS_GEN5(dev_priv)) 1576 if (IS_GEN(dev_priv, 5))
1578 intel_gpu_ips_init(dev_priv); 1577 intel_gpu_ips_init(dev_priv);
1579 1578
1580 intel_audio_init(dev_priv); 1579 intel_audio_init(dev_priv);
@@ -1636,8 +1635,14 @@ static void i915_welcome_messages(struct drm_i915_private *dev_priv)
1636 if (drm_debug & DRM_UT_DRIVER) { 1635 if (drm_debug & DRM_UT_DRIVER) {
1637 struct drm_printer p = drm_debug_printer("i915 device info:"); 1636 struct drm_printer p = drm_debug_printer("i915 device info:");
1638 1637
1639 intel_device_info_dump(&dev_priv->info, &p); 1638 drm_printf(&p, "pciid=0x%04x rev=0x%02x platform=%s gen=%i\n",
1640 intel_device_info_dump_runtime(&dev_priv->info, &p); 1639 INTEL_DEVID(dev_priv),
1640 INTEL_REVID(dev_priv),
1641 intel_platform_name(INTEL_INFO(dev_priv)->platform),
1642 INTEL_GEN(dev_priv));
1643
1644 intel_device_info_dump_flags(INTEL_INFO(dev_priv), &p);
1645 intel_device_info_dump_runtime(RUNTIME_INFO(dev_priv), &p);
1641 } 1646 }
1642 1647
1643 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG)) 1648 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG))
@@ -1674,7 +1679,7 @@ i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent)
1674 /* Setup the write-once "constant" device info */ 1679 /* Setup the write-once "constant" device info */
1675 device_info = mkwrite_device_info(i915); 1680 device_info = mkwrite_device_info(i915);
1676 memcpy(device_info, match_info, sizeof(*device_info)); 1681 memcpy(device_info, match_info, sizeof(*device_info));
1677 device_info->device_id = pdev->device; 1682 RUNTIME_INFO(i915)->device_id = pdev->device;
1678 1683
1679 BUILD_BUG_ON(INTEL_MAX_PLATFORMS > 1684 BUILD_BUG_ON(INTEL_MAX_PLATFORMS >
1680 BITS_PER_TYPE(device_info->platform_mask)); 1685 BITS_PER_TYPE(device_info->platform_mask));
@@ -2174,7 +2179,7 @@ static int i915_drm_resume_early(struct drm_device *dev)
2174 2179
2175 intel_power_domains_resume(dev_priv); 2180 intel_power_domains_resume(dev_priv);
2176 2181
2177 intel_engines_sanitize(dev_priv); 2182 intel_engines_sanitize(dev_priv, true);
2178 2183
2179 enable_rpm_wakeref_asserts(dev_priv); 2184 enable_rpm_wakeref_asserts(dev_priv);
2180 2185
@@ -2226,6 +2231,7 @@ void i915_reset(struct drm_i915_private *i915,
2226 2231
2227 might_sleep(); 2232 might_sleep();
2228 lockdep_assert_held(&i915->drm.struct_mutex); 2233 lockdep_assert_held(&i915->drm.struct_mutex);
2234 assert_rpm_wakelock_held(i915);
2229 GEM_BUG_ON(!test_bit(I915_RESET_BACKOFF, &error->flags)); 2235 GEM_BUG_ON(!test_bit(I915_RESET_BACKOFF, &error->flags));
2230 2236
2231 if (!test_bit(I915_RESET_HANDOFF, &error->flags)) 2237 if (!test_bit(I915_RESET_HANDOFF, &error->flags))
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index b1c31967194b..5df26ccda8a4 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -46,7 +46,6 @@
46#include <linux/reservation.h> 46#include <linux/reservation.h>
47#include <linux/shmem_fs.h> 47#include <linux/shmem_fs.h>
48 48
49#include <drm/drmP.h>
50#include <drm/intel-gtt.h> 49#include <drm/intel-gtt.h>
51#include <drm/drm_legacy.h> /* for struct drm_dma_handle */ 50#include <drm/drm_legacy.h> /* for struct drm_dma_handle */
52#include <drm/drm_gem.h> 51#include <drm/drm_gem.h>
@@ -54,6 +53,7 @@
54#include <drm/drm_cache.h> 53#include <drm/drm_cache.h>
55#include <drm/drm_util.h> 54#include <drm/drm_util.h>
56#include <drm/drm_dsc.h> 55#include <drm/drm_dsc.h>
56#include <drm/drm_connector.h>
57 57
58#include "i915_fixed.h" 58#include "i915_fixed.h"
59#include "i915_params.h" 59#include "i915_params.h"
@@ -90,8 +90,8 @@
90 90
91#define DRIVER_NAME "i915" 91#define DRIVER_NAME "i915"
92#define DRIVER_DESC "Intel Graphics" 92#define DRIVER_DESC "Intel Graphics"
93#define DRIVER_DATE "20181204" 93#define DRIVER_DATE "20190110"
94#define DRIVER_TIMESTAMP 1543944377 94#define DRIVER_TIMESTAMP 1547162337
95 95
96/* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and 96/* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and
97 * WARN_ON()) for hw state sanity checks to check for unexpected conditions 97 * WARN_ON()) for hw state sanity checks to check for unexpected conditions
@@ -281,16 +281,14 @@ struct drm_i915_display_funcs {
281 int (*get_fifo_size)(struct drm_i915_private *dev_priv, 281 int (*get_fifo_size)(struct drm_i915_private *dev_priv,
282 enum i9xx_plane_id i9xx_plane); 282 enum i9xx_plane_id i9xx_plane);
283 int (*compute_pipe_wm)(struct intel_crtc_state *cstate); 283 int (*compute_pipe_wm)(struct intel_crtc_state *cstate);
284 int (*compute_intermediate_wm)(struct drm_device *dev, 284 int (*compute_intermediate_wm)(struct intel_crtc_state *newstate);
285 struct intel_crtc *intel_crtc,
286 struct intel_crtc_state *newstate);
287 void (*initial_watermarks)(struct intel_atomic_state *state, 285 void (*initial_watermarks)(struct intel_atomic_state *state,
288 struct intel_crtc_state *cstate); 286 struct intel_crtc_state *cstate);
289 void (*atomic_update_watermarks)(struct intel_atomic_state *state, 287 void (*atomic_update_watermarks)(struct intel_atomic_state *state,
290 struct intel_crtc_state *cstate); 288 struct intel_crtc_state *cstate);
291 void (*optimize_watermarks)(struct intel_atomic_state *state, 289 void (*optimize_watermarks)(struct intel_atomic_state *state,
292 struct intel_crtc_state *cstate); 290 struct intel_crtc_state *cstate);
293 int (*compute_global_watermarks)(struct drm_atomic_state *state); 291 int (*compute_global_watermarks)(struct intel_atomic_state *state);
294 void (*update_wm)(struct intel_crtc *crtc); 292 void (*update_wm)(struct intel_crtc *crtc);
295 int (*modeset_calc_cdclk)(struct drm_atomic_state *state); 293 int (*modeset_calc_cdclk)(struct drm_atomic_state *state);
296 /* Returns the active state of the crtc, and if the crtc is active, 294 /* Returns the active state of the crtc, and if the crtc is active,
@@ -322,8 +320,8 @@ struct drm_i915_display_funcs {
322 /* display clock increase/decrease */ 320 /* display clock increase/decrease */
323 /* pll clock increase/decrease */ 321 /* pll clock increase/decrease */
324 322
325 void (*load_csc_matrix)(struct drm_crtc_state *crtc_state); 323 void (*load_csc_matrix)(struct intel_crtc_state *crtc_state);
326 void (*load_luts)(struct drm_crtc_state *crtc_state); 324 void (*load_luts)(struct intel_crtc_state *crtc_state);
327}; 325};
328 326
329#define CSR_VERSION(major, minor) ((major) << 16 | (minor)) 327#define CSR_VERSION(major, minor) ((major) << 16 | (minor))
@@ -509,6 +507,7 @@ struct i915_psr {
509 ktime_t last_exit; 507 ktime_t last_exit;
510 bool sink_not_reliable; 508 bool sink_not_reliable;
511 bool irq_aux_error; 509 bool irq_aux_error;
510 u16 su_x_granularity;
512}; 511};
513 512
514enum intel_pch { 513enum intel_pch {
@@ -936,6 +935,8 @@ struct ddi_vbt_port_info {
936 uint8_t supports_hdmi:1; 935 uint8_t supports_hdmi:1;
937 uint8_t supports_dp:1; 936 uint8_t supports_dp:1;
938 uint8_t supports_edp:1; 937 uint8_t supports_edp:1;
938 uint8_t supports_typec_usb:1;
939 uint8_t supports_tbt:1;
939 940
940 uint8_t alternate_aux_channel; 941 uint8_t alternate_aux_channel;
941 uint8_t alternate_ddc_pin; 942 uint8_t alternate_ddc_pin;
@@ -1430,7 +1431,8 @@ struct drm_i915_private {
1430 struct kmem_cache *dependencies; 1431 struct kmem_cache *dependencies;
1431 struct kmem_cache *priorities; 1432 struct kmem_cache *priorities;
1432 1433
1433 const struct intel_device_info info; 1434 const struct intel_device_info __info; /* Use INTEL_INFO() to access. */
1435 struct intel_runtime_info __runtime; /* Use RUNTIME_INFO() to access. */
1434 struct intel_driver_caps caps; 1436 struct intel_driver_caps caps;
1435 1437
1436 /** 1438 /**
@@ -1947,7 +1949,6 @@ struct drm_i915_private {
1947 struct list_head active_rings; 1949 struct list_head active_rings;
1948 struct list_head closed_vma; 1950 struct list_head closed_vma;
1949 u32 active_requests; 1951 u32 active_requests;
1950 u32 request_serial;
1951 1952
1952 /** 1953 /**
1953 * Is the GPU currently considered idle, or busy executing 1954 * Is the GPU currently considered idle, or busy executing
@@ -2191,17 +2192,12 @@ static inline unsigned int i915_sg_segment_size(void)
2191 return size; 2192 return size;
2192} 2193}
2193 2194
2194static inline const struct intel_device_info * 2195#define INTEL_INFO(dev_priv) (&(dev_priv)->__info)
2195intel_info(const struct drm_i915_private *dev_priv) 2196#define RUNTIME_INFO(dev_priv) (&(dev_priv)->__runtime)
2196{
2197 return &dev_priv->info;
2198}
2199
2200#define INTEL_INFO(dev_priv) intel_info((dev_priv))
2201#define DRIVER_CAPS(dev_priv) (&(dev_priv)->caps) 2197#define DRIVER_CAPS(dev_priv) (&(dev_priv)->caps)
2202 2198
2203#define INTEL_GEN(dev_priv) ((dev_priv)->info.gen) 2199#define INTEL_GEN(dev_priv) (INTEL_INFO(dev_priv)->gen)
2204#define INTEL_DEVID(dev_priv) ((dev_priv)->info.device_id) 2200#define INTEL_DEVID(dev_priv) (RUNTIME_INFO(dev_priv)->device_id)
2205 2201
2206#define REVID_FOREVER 0xff 2202#define REVID_FOREVER 0xff
2207#define INTEL_REVID(dev_priv) ((dev_priv)->drm.pdev->revision) 2203#define INTEL_REVID(dev_priv) ((dev_priv)->drm.pdev->revision)
@@ -2212,8 +2208,12 @@ intel_info(const struct drm_i915_private *dev_priv)
2212 GENMASK((e) - 1, (s) - 1)) 2208 GENMASK((e) - 1, (s) - 1))
2213 2209
2214/* Returns true if Gen is in inclusive range [Start, End] */ 2210/* Returns true if Gen is in inclusive range [Start, End] */
2215#define IS_GEN(dev_priv, s, e) \ 2211#define IS_GEN_RANGE(dev_priv, s, e) \
2216 (!!((dev_priv)->info.gen_mask & INTEL_GEN_MASK((s), (e)))) 2212 (!!(INTEL_INFO(dev_priv)->gen_mask & INTEL_GEN_MASK((s), (e))))
2213
2214#define IS_GEN(dev_priv, n) \
2215 (BUILD_BUG_ON_ZERO(!__builtin_constant_p(n)) + \
2216 INTEL_INFO(dev_priv)->gen == (n))
2217 2217
2218/* 2218/*
2219 * Return true if revision is in range [since,until] inclusive. 2219 * Return true if revision is in range [since,until] inclusive.
@@ -2223,7 +2223,7 @@ intel_info(const struct drm_i915_private *dev_priv)
2223#define IS_REVID(p, since, until) \ 2223#define IS_REVID(p, since, until) \
2224 (INTEL_REVID(p) >= (since) && INTEL_REVID(p) <= (until)) 2224 (INTEL_REVID(p) >= (since) && INTEL_REVID(p) <= (until))
2225 2225
2226#define IS_PLATFORM(dev_priv, p) ((dev_priv)->info.platform_mask & BIT(p)) 2226#define IS_PLATFORM(dev_priv, p) (INTEL_INFO(dev_priv)->platform_mask & BIT(p))
2227 2227
2228#define IS_I830(dev_priv) IS_PLATFORM(dev_priv, INTEL_I830) 2228#define IS_I830(dev_priv) IS_PLATFORM(dev_priv, INTEL_I830)
2229#define IS_I845G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I845G) 2229#define IS_I845G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I845G)
@@ -2245,7 +2245,7 @@ intel_info(const struct drm_i915_private *dev_priv)
2245#define IS_IRONLAKE_M(dev_priv) (INTEL_DEVID(dev_priv) == 0x0046) 2245#define IS_IRONLAKE_M(dev_priv) (INTEL_DEVID(dev_priv) == 0x0046)
2246#define IS_IVYBRIDGE(dev_priv) IS_PLATFORM(dev_priv, INTEL_IVYBRIDGE) 2246#define IS_IVYBRIDGE(dev_priv) IS_PLATFORM(dev_priv, INTEL_IVYBRIDGE)
2247#define IS_IVB_GT1(dev_priv) (IS_IVYBRIDGE(dev_priv) && \ 2247#define IS_IVB_GT1(dev_priv) (IS_IVYBRIDGE(dev_priv) && \
2248 (dev_priv)->info.gt == 1) 2248 INTEL_INFO(dev_priv)->gt == 1)
2249#define IS_VALLEYVIEW(dev_priv) IS_PLATFORM(dev_priv, INTEL_VALLEYVIEW) 2249#define IS_VALLEYVIEW(dev_priv) IS_PLATFORM(dev_priv, INTEL_VALLEYVIEW)
2250#define IS_CHERRYVIEW(dev_priv) IS_PLATFORM(dev_priv, INTEL_CHERRYVIEW) 2250#define IS_CHERRYVIEW(dev_priv) IS_PLATFORM(dev_priv, INTEL_CHERRYVIEW)
2251#define IS_HASWELL(dev_priv) IS_PLATFORM(dev_priv, INTEL_HASWELL) 2251#define IS_HASWELL(dev_priv) IS_PLATFORM(dev_priv, INTEL_HASWELL)
@@ -2257,7 +2257,7 @@ intel_info(const struct drm_i915_private *dev_priv)
2257#define IS_COFFEELAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_COFFEELAKE) 2257#define IS_COFFEELAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_COFFEELAKE)
2258#define IS_CANNONLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_CANNONLAKE) 2258#define IS_CANNONLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_CANNONLAKE)
2259#define IS_ICELAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_ICELAKE) 2259#define IS_ICELAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_ICELAKE)
2260#define IS_MOBILE(dev_priv) ((dev_priv)->info.is_mobile) 2260#define IS_MOBILE(dev_priv) (INTEL_INFO(dev_priv)->is_mobile)
2261#define IS_HSW_EARLY_SDV(dev_priv) (IS_HASWELL(dev_priv) && \ 2261#define IS_HSW_EARLY_SDV(dev_priv) (IS_HASWELL(dev_priv) && \
2262 (INTEL_DEVID(dev_priv) & 0xFF00) == 0x0C00) 2262 (INTEL_DEVID(dev_priv) & 0xFF00) == 0x0C00)
2263#define IS_BDW_ULT(dev_priv) (IS_BROADWELL(dev_priv) && \ 2263#define IS_BDW_ULT(dev_priv) (IS_BROADWELL(dev_priv) && \
@@ -2268,11 +2268,13 @@ intel_info(const struct drm_i915_private *dev_priv)
2268#define IS_BDW_ULX(dev_priv) (IS_BROADWELL(dev_priv) && \ 2268#define IS_BDW_ULX(dev_priv) (IS_BROADWELL(dev_priv) && \
2269 (INTEL_DEVID(dev_priv) & 0xf) == 0xe) 2269 (INTEL_DEVID(dev_priv) & 0xf) == 0xe)
2270#define IS_BDW_GT3(dev_priv) (IS_BROADWELL(dev_priv) && \ 2270#define IS_BDW_GT3(dev_priv) (IS_BROADWELL(dev_priv) && \
2271 (dev_priv)->info.gt == 3) 2271 INTEL_INFO(dev_priv)->gt == 3)
2272#define IS_HSW_ULT(dev_priv) (IS_HASWELL(dev_priv) && \ 2272#define IS_HSW_ULT(dev_priv) (IS_HASWELL(dev_priv) && \
2273 (INTEL_DEVID(dev_priv) & 0xFF00) == 0x0A00) 2273 (INTEL_DEVID(dev_priv) & 0xFF00) == 0x0A00)
2274#define IS_HSW_GT3(dev_priv) (IS_HASWELL(dev_priv) && \ 2274#define IS_HSW_GT3(dev_priv) (IS_HASWELL(dev_priv) && \
2275 (dev_priv)->info.gt == 3) 2275 INTEL_INFO(dev_priv)->gt == 3)
2276#define IS_HSW_GT1(dev_priv) (IS_HASWELL(dev_priv) && \
2277 INTEL_INFO(dev_priv)->gt == 1)
2276/* ULX machines are also considered ULT. */ 2278/* ULX machines are also considered ULT. */
2277#define IS_HSW_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x0A0E || \ 2279#define IS_HSW_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x0A0E || \
2278 INTEL_DEVID(dev_priv) == 0x0A1E) 2280 INTEL_DEVID(dev_priv) == 0x0A1E)
@@ -2295,21 +2297,21 @@ intel_info(const struct drm_i915_private *dev_priv)
2295#define IS_AML_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x591C || \ 2297#define IS_AML_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x591C || \
2296 INTEL_DEVID(dev_priv) == 0x87C0) 2298 INTEL_DEVID(dev_priv) == 0x87C0)
2297#define IS_SKL_GT2(dev_priv) (IS_SKYLAKE(dev_priv) && \ 2299#define IS_SKL_GT2(dev_priv) (IS_SKYLAKE(dev_priv) && \
2298 (dev_priv)->info.gt == 2) 2300 INTEL_INFO(dev_priv)->gt == 2)
2299#define IS_SKL_GT3(dev_priv) (IS_SKYLAKE(dev_priv) && \ 2301#define IS_SKL_GT3(dev_priv) (IS_SKYLAKE(dev_priv) && \
2300 (dev_priv)->info.gt == 3) 2302 INTEL_INFO(dev_priv)->gt == 3)
2301#define IS_SKL_GT4(dev_priv) (IS_SKYLAKE(dev_priv) && \ 2303#define IS_SKL_GT4(dev_priv) (IS_SKYLAKE(dev_priv) && \
2302 (dev_priv)->info.gt == 4) 2304 INTEL_INFO(dev_priv)->gt == 4)
2303#define IS_KBL_GT2(dev_priv) (IS_KABYLAKE(dev_priv) && \ 2305#define IS_KBL_GT2(dev_priv) (IS_KABYLAKE(dev_priv) && \
2304 (dev_priv)->info.gt == 2) 2306 INTEL_INFO(dev_priv)->gt == 2)
2305#define IS_KBL_GT3(dev_priv) (IS_KABYLAKE(dev_priv) && \ 2307#define IS_KBL_GT3(dev_priv) (IS_KABYLAKE(dev_priv) && \
2306 (dev_priv)->info.gt == 3) 2308 INTEL_INFO(dev_priv)->gt == 3)
2307#define IS_CFL_ULT(dev_priv) (IS_COFFEELAKE(dev_priv) && \ 2309#define IS_CFL_ULT(dev_priv) (IS_COFFEELAKE(dev_priv) && \
2308 (INTEL_DEVID(dev_priv) & 0x00F0) == 0x00A0) 2310 (INTEL_DEVID(dev_priv) & 0x00F0) == 0x00A0)
2309#define IS_CFL_GT2(dev_priv) (IS_COFFEELAKE(dev_priv) && \ 2311#define IS_CFL_GT2(dev_priv) (IS_COFFEELAKE(dev_priv) && \
2310 (dev_priv)->info.gt == 2) 2312 INTEL_INFO(dev_priv)->gt == 2)
2311#define IS_CFL_GT3(dev_priv) (IS_COFFEELAKE(dev_priv) && \ 2313#define IS_CFL_GT3(dev_priv) (IS_COFFEELAKE(dev_priv) && \
2312 (dev_priv)->info.gt == 3) 2314 INTEL_INFO(dev_priv)->gt == 3)
2313#define IS_CNL_WITH_PORT_F(dev_priv) (IS_CANNONLAKE(dev_priv) && \ 2315#define IS_CNL_WITH_PORT_F(dev_priv) (IS_CANNONLAKE(dev_priv) && \
2314 (INTEL_DEVID(dev_priv) & 0x0004) == 0x0004) 2316 (INTEL_DEVID(dev_priv) & 0x0004) == 0x0004)
2315 2317
@@ -2366,26 +2368,9 @@ intel_info(const struct drm_i915_private *dev_priv)
2366#define IS_ICL_REVID(p, since, until) \ 2368#define IS_ICL_REVID(p, since, until) \
2367 (IS_ICELAKE(p) && IS_REVID(p, since, until)) 2369 (IS_ICELAKE(p) && IS_REVID(p, since, until))
2368 2370
2369/*
2370 * The genX designation typically refers to the render engine, so render
2371 * capability related checks should use IS_GEN, while display and other checks
2372 * have their own (e.g. HAS_PCH_SPLIT for ILK+ display, IS_foo for particular
2373 * chips, etc.).
2374 */
2375#define IS_GEN2(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(1)))
2376#define IS_GEN3(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(2)))
2377#define IS_GEN4(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(3)))
2378#define IS_GEN5(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(4)))
2379#define IS_GEN6(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(5)))
2380#define IS_GEN7(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(6)))
2381#define IS_GEN8(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(7)))
2382#define IS_GEN9(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(8)))
2383#define IS_GEN10(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(9)))
2384#define IS_GEN11(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(10)))
2385
2386#define IS_LP(dev_priv) (INTEL_INFO(dev_priv)->is_lp) 2371#define IS_LP(dev_priv) (INTEL_INFO(dev_priv)->is_lp)
2387#define IS_GEN9_LP(dev_priv) (IS_GEN9(dev_priv) && IS_LP(dev_priv)) 2372#define IS_GEN9_LP(dev_priv) (IS_GEN(dev_priv, 9) && IS_LP(dev_priv))
2388#define IS_GEN9_BC(dev_priv) (IS_GEN9(dev_priv) && !IS_LP(dev_priv)) 2373#define IS_GEN9_BC(dev_priv) (IS_GEN(dev_priv, 9) && !IS_LP(dev_priv))
2389 2374
2390#define ENGINE_MASK(id) BIT(id) 2375#define ENGINE_MASK(id) BIT(id)
2391#define RENDER_RING ENGINE_MASK(RCS) 2376#define RENDER_RING ENGINE_MASK(RCS)
@@ -2399,29 +2384,27 @@ intel_info(const struct drm_i915_private *dev_priv)
2399#define ALL_ENGINES (~0) 2384#define ALL_ENGINES (~0)
2400 2385
2401#define HAS_ENGINE(dev_priv, id) \ 2386#define HAS_ENGINE(dev_priv, id) \
2402 (!!((dev_priv)->info.ring_mask & ENGINE_MASK(id))) 2387 (!!(INTEL_INFO(dev_priv)->ring_mask & ENGINE_MASK(id)))
2403 2388
2404#define HAS_BSD(dev_priv) HAS_ENGINE(dev_priv, VCS) 2389#define HAS_BSD(dev_priv) HAS_ENGINE(dev_priv, VCS)
2405#define HAS_BSD2(dev_priv) HAS_ENGINE(dev_priv, VCS2) 2390#define HAS_BSD2(dev_priv) HAS_ENGINE(dev_priv, VCS2)
2406#define HAS_BLT(dev_priv) HAS_ENGINE(dev_priv, BCS) 2391#define HAS_BLT(dev_priv) HAS_ENGINE(dev_priv, BCS)
2407#define HAS_VEBOX(dev_priv) HAS_ENGINE(dev_priv, VECS) 2392#define HAS_VEBOX(dev_priv) HAS_ENGINE(dev_priv, VECS)
2408 2393
2409#define HAS_LEGACY_SEMAPHORES(dev_priv) IS_GEN7(dev_priv) 2394#define HAS_LLC(dev_priv) (INTEL_INFO(dev_priv)->has_llc)
2410 2395#define HAS_SNOOP(dev_priv) (INTEL_INFO(dev_priv)->has_snoop)
2411#define HAS_LLC(dev_priv) ((dev_priv)->info.has_llc)
2412#define HAS_SNOOP(dev_priv) ((dev_priv)->info.has_snoop)
2413#define HAS_EDRAM(dev_priv) (!!((dev_priv)->edram_cap & EDRAM_ENABLED)) 2396#define HAS_EDRAM(dev_priv) (!!((dev_priv)->edram_cap & EDRAM_ENABLED))
2414#define HAS_WT(dev_priv) ((IS_HASWELL(dev_priv) || \ 2397#define HAS_WT(dev_priv) ((IS_HASWELL(dev_priv) || \
2415 IS_BROADWELL(dev_priv)) && HAS_EDRAM(dev_priv)) 2398 IS_BROADWELL(dev_priv)) && HAS_EDRAM(dev_priv))
2416 2399
2417#define HWS_NEEDS_PHYSICAL(dev_priv) ((dev_priv)->info.hws_needs_physical) 2400#define HWS_NEEDS_PHYSICAL(dev_priv) (INTEL_INFO(dev_priv)->hws_needs_physical)
2418 2401
2419#define HAS_LOGICAL_RING_CONTEXTS(dev_priv) \ 2402#define HAS_LOGICAL_RING_CONTEXTS(dev_priv) \
2420 ((dev_priv)->info.has_logical_ring_contexts) 2403 (INTEL_INFO(dev_priv)->has_logical_ring_contexts)
2421#define HAS_LOGICAL_RING_ELSQ(dev_priv) \ 2404#define HAS_LOGICAL_RING_ELSQ(dev_priv) \
2422 ((dev_priv)->info.has_logical_ring_elsq) 2405 (INTEL_INFO(dev_priv)->has_logical_ring_elsq)
2423#define HAS_LOGICAL_RING_PREEMPTION(dev_priv) \ 2406#define HAS_LOGICAL_RING_PREEMPTION(dev_priv) \
2424 ((dev_priv)->info.has_logical_ring_preemption) 2407 (INTEL_INFO(dev_priv)->has_logical_ring_preemption)
2425 2408
2426#define HAS_EXECLISTS(dev_priv) HAS_LOGICAL_RING_CONTEXTS(dev_priv) 2409#define HAS_EXECLISTS(dev_priv) HAS_LOGICAL_RING_CONTEXTS(dev_priv)
2427 2410
@@ -2435,12 +2418,12 @@ intel_info(const struct drm_i915_private *dev_priv)
2435 2418
2436#define HAS_PAGE_SIZES(dev_priv, sizes) ({ \ 2419#define HAS_PAGE_SIZES(dev_priv, sizes) ({ \
2437 GEM_BUG_ON((sizes) == 0); \ 2420 GEM_BUG_ON((sizes) == 0); \
2438 ((sizes) & ~(dev_priv)->info.page_sizes) == 0; \ 2421 ((sizes) & ~INTEL_INFO(dev_priv)->page_sizes) == 0; \
2439}) 2422})
2440 2423
2441#define HAS_OVERLAY(dev_priv) ((dev_priv)->info.display.has_overlay) 2424#define HAS_OVERLAY(dev_priv) (INTEL_INFO(dev_priv)->display.has_overlay)
2442#define OVERLAY_NEEDS_PHYSICAL(dev_priv) \ 2425#define OVERLAY_NEEDS_PHYSICAL(dev_priv) \
2443 ((dev_priv)->info.display.overlay_needs_physical) 2426 (INTEL_INFO(dev_priv)->display.overlay_needs_physical)
2444 2427
2445/* Early gen2 have a totally busted CS tlb and require pinned batches. */ 2428/* Early gen2 have a totally busted CS tlb and require pinned batches. */
2446#define HAS_BROKEN_CS_TLB(dev_priv) (IS_I830(dev_priv) || IS_I845G(dev_priv)) 2429#define HAS_BROKEN_CS_TLB(dev_priv) (IS_I830(dev_priv) || IS_I845G(dev_priv))
@@ -2458,42 +2441,42 @@ intel_info(const struct drm_i915_private *dev_priv)
2458/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte 2441/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
2459 * rows, which changed the alignment requirements and fence programming. 2442 * rows, which changed the alignment requirements and fence programming.
2460 */ 2443 */
2461#define HAS_128_BYTE_Y_TILING(dev_priv) (!IS_GEN2(dev_priv) && \ 2444#define HAS_128_BYTE_Y_TILING(dev_priv) (!IS_GEN(dev_priv, 2) && \
2462 !(IS_I915G(dev_priv) || \ 2445 !(IS_I915G(dev_priv) || \
2463 IS_I915GM(dev_priv))) 2446 IS_I915GM(dev_priv)))
2464#define SUPPORTS_TV(dev_priv) ((dev_priv)->info.display.supports_tv) 2447#define SUPPORTS_TV(dev_priv) (INTEL_INFO(dev_priv)->display.supports_tv)
2465#define I915_HAS_HOTPLUG(dev_priv) ((dev_priv)->info.display.has_hotplug) 2448#define I915_HAS_HOTPLUG(dev_priv) (INTEL_INFO(dev_priv)->display.has_hotplug)
2466 2449
2467#define HAS_FW_BLC(dev_priv) (INTEL_GEN(dev_priv) > 2) 2450#define HAS_FW_BLC(dev_priv) (INTEL_GEN(dev_priv) > 2)
2468#define HAS_FBC(dev_priv) ((dev_priv)->info.display.has_fbc) 2451#define HAS_FBC(dev_priv) (INTEL_INFO(dev_priv)->display.has_fbc)
2469#define HAS_CUR_FBC(dev_priv) (!HAS_GMCH_DISPLAY(dev_priv) && INTEL_GEN(dev_priv) >= 7) 2452#define HAS_CUR_FBC(dev_priv) (!HAS_GMCH_DISPLAY(dev_priv) && INTEL_GEN(dev_priv) >= 7)
2470 2453
2471#define HAS_IPS(dev_priv) (IS_HSW_ULT(dev_priv) || IS_BROADWELL(dev_priv)) 2454#define HAS_IPS(dev_priv) (IS_HSW_ULT(dev_priv) || IS_BROADWELL(dev_priv))
2472 2455
2473#define HAS_DP_MST(dev_priv) ((dev_priv)->info.display.has_dp_mst) 2456#define HAS_DP_MST(dev_priv) (INTEL_INFO(dev_priv)->display.has_dp_mst)
2474 2457
2475#define HAS_DDI(dev_priv) ((dev_priv)->info.display.has_ddi) 2458#define HAS_DDI(dev_priv) (INTEL_INFO(dev_priv)->display.has_ddi)
2476#define HAS_FPGA_DBG_UNCLAIMED(dev_priv) ((dev_priv)->info.has_fpga_dbg) 2459#define HAS_FPGA_DBG_UNCLAIMED(dev_priv) (INTEL_INFO(dev_priv)->has_fpga_dbg)
2477#define HAS_PSR(dev_priv) ((dev_priv)->info.display.has_psr) 2460#define HAS_PSR(dev_priv) (INTEL_INFO(dev_priv)->display.has_psr)
2478 2461
2479#define HAS_RC6(dev_priv) ((dev_priv)->info.has_rc6) 2462#define HAS_RC6(dev_priv) (INTEL_INFO(dev_priv)->has_rc6)
2480#define HAS_RC6p(dev_priv) ((dev_priv)->info.has_rc6p) 2463#define HAS_RC6p(dev_priv) (INTEL_INFO(dev_priv)->has_rc6p)
2481#define HAS_RC6pp(dev_priv) (false) /* HW was never validated */ 2464#define HAS_RC6pp(dev_priv) (false) /* HW was never validated */
2482 2465
2483#define HAS_CSR(dev_priv) ((dev_priv)->info.display.has_csr) 2466#define HAS_CSR(dev_priv) (INTEL_INFO(dev_priv)->display.has_csr)
2484 2467
2485#define HAS_RUNTIME_PM(dev_priv) ((dev_priv)->info.has_runtime_pm) 2468#define HAS_RUNTIME_PM(dev_priv) (INTEL_INFO(dev_priv)->has_runtime_pm)
2486#define HAS_64BIT_RELOC(dev_priv) ((dev_priv)->info.has_64bit_reloc) 2469#define HAS_64BIT_RELOC(dev_priv) (INTEL_INFO(dev_priv)->has_64bit_reloc)
2487 2470
2488#define HAS_IPC(dev_priv) ((dev_priv)->info.display.has_ipc) 2471#define HAS_IPC(dev_priv) (INTEL_INFO(dev_priv)->display.has_ipc)
2489 2472
2490/* 2473/*
2491 * For now, anything with a GuC requires uCode loading, and then supports 2474 * For now, anything with a GuC requires uCode loading, and then supports
2492 * command submission once loaded. But these are logically independent 2475 * command submission once loaded. But these are logically independent
2493 * properties, so we have separate macros to test them. 2476 * properties, so we have separate macros to test them.
2494 */ 2477 */
2495#define HAS_GUC(dev_priv) ((dev_priv)->info.has_guc) 2478#define HAS_GUC(dev_priv) (INTEL_INFO(dev_priv)->has_guc)
2496#define HAS_GUC_CT(dev_priv) ((dev_priv)->info.has_guc_ct) 2479#define HAS_GUC_CT(dev_priv) (INTEL_INFO(dev_priv)->has_guc_ct)
2497#define HAS_GUC_UCODE(dev_priv) (HAS_GUC(dev_priv)) 2480#define HAS_GUC_UCODE(dev_priv) (HAS_GUC(dev_priv))
2498#define HAS_GUC_SCHED(dev_priv) (HAS_GUC(dev_priv)) 2481#define HAS_GUC_SCHED(dev_priv) (HAS_GUC(dev_priv))
2499 2482
@@ -2502,11 +2485,11 @@ intel_info(const struct drm_i915_private *dev_priv)
2502#define HAS_HUC_UCODE(dev_priv) (HAS_GUC(dev_priv)) 2485#define HAS_HUC_UCODE(dev_priv) (HAS_GUC(dev_priv))
2503 2486
2504/* Having a GuC is not the same as using a GuC */ 2487/* Having a GuC is not the same as using a GuC */
2505#define USES_GUC(dev_priv) intel_uc_is_using_guc() 2488#define USES_GUC(dev_priv) intel_uc_is_using_guc(dev_priv)
2506#define USES_GUC_SUBMISSION(dev_priv) intel_uc_is_using_guc_submission() 2489#define USES_GUC_SUBMISSION(dev_priv) intel_uc_is_using_guc_submission(dev_priv)
2507#define USES_HUC(dev_priv) intel_uc_is_using_huc() 2490#define USES_HUC(dev_priv) intel_uc_is_using_huc(dev_priv)
2508 2491
2509#define HAS_POOLED_EU(dev_priv) ((dev_priv)->info.has_pooled_eu) 2492#define HAS_POOLED_EU(dev_priv) (INTEL_INFO(dev_priv)->has_pooled_eu)
2510 2493
2511#define INTEL_PCH_DEVICE_ID_MASK 0xff80 2494#define INTEL_PCH_DEVICE_ID_MASK 0xff80
2512#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00 2495#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
@@ -2546,12 +2529,12 @@ intel_info(const struct drm_i915_private *dev_priv)
2546#define HAS_PCH_NOP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_NOP) 2529#define HAS_PCH_NOP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_NOP)
2547#define HAS_PCH_SPLIT(dev_priv) (INTEL_PCH_TYPE(dev_priv) != PCH_NONE) 2530#define HAS_PCH_SPLIT(dev_priv) (INTEL_PCH_TYPE(dev_priv) != PCH_NONE)
2548 2531
2549#define HAS_GMCH_DISPLAY(dev_priv) ((dev_priv)->info.display.has_gmch_display) 2532#define HAS_GMCH_DISPLAY(dev_priv) (INTEL_INFO(dev_priv)->display.has_gmch_display)
2550 2533
2551#define HAS_LSPCON(dev_priv) (INTEL_GEN(dev_priv) >= 9) 2534#define HAS_LSPCON(dev_priv) (INTEL_GEN(dev_priv) >= 9)
2552 2535
2553/* DPF == dynamic parity feature */ 2536/* DPF == dynamic parity feature */
2554#define HAS_L3_DPF(dev_priv) ((dev_priv)->info.has_l3_dpf) 2537#define HAS_L3_DPF(dev_priv) (INTEL_INFO(dev_priv)->has_l3_dpf)
2555#define NUM_L3_SLICES(dev_priv) (IS_HSW_GT3(dev_priv) ? \ 2538#define NUM_L3_SLICES(dev_priv) (IS_HSW_GT3(dev_priv) ? \
2556 2 : HAS_L3_DPF(dev_priv)) 2539 2 : HAS_L3_DPF(dev_priv))
2557 2540
@@ -2916,9 +2899,9 @@ i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
2916 __i915_gem_object_unpin_pages(obj); 2899 __i915_gem_object_unpin_pages(obj);
2917} 2900}
2918 2901
2919enum i915_mm_subclass { /* lockdep subclass for obj->mm.lock */ 2902enum i915_mm_subclass { /* lockdep subclass for obj->mm.lock/struct_mutex */
2920 I915_MM_NORMAL = 0, 2903 I915_MM_NORMAL = 0,
2921 I915_MM_SHRINKER 2904 I915_MM_SHRINKER /* called "recursively" from direct-reclaim-esque */
2922}; 2905};
2923 2906
2924void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj, 2907void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
@@ -3204,7 +3187,8 @@ unsigned long i915_gem_shrink(struct drm_i915_private *i915,
3204unsigned long i915_gem_shrink_all(struct drm_i915_private *i915); 3187unsigned long i915_gem_shrink_all(struct drm_i915_private *i915);
3205void i915_gem_shrinker_register(struct drm_i915_private *i915); 3188void i915_gem_shrinker_register(struct drm_i915_private *i915);
3206void i915_gem_shrinker_unregister(struct drm_i915_private *i915); 3189void i915_gem_shrinker_unregister(struct drm_i915_private *i915);
3207void i915_gem_shrinker_taints_mutex(struct mutex *mutex); 3190void i915_gem_shrinker_taints_mutex(struct drm_i915_private *i915,
3191 struct mutex *mutex);
3208 3192
3209/* i915_gem_tiling.c */ 3193/* i915_gem_tiling.c */
3210static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj) 3194static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
@@ -3313,7 +3297,7 @@ static inline void intel_unregister_dsm_handler(void) { return; }
3313static inline struct intel_device_info * 3297static inline struct intel_device_info *
3314mkwrite_device_info(struct drm_i915_private *dev_priv) 3298mkwrite_device_info(struct drm_i915_private *dev_priv)
3315{ 3299{
3316 return (struct intel_device_info *)&dev_priv->info; 3300 return (struct intel_device_info *)INTEL_INFO(dev_priv);
3317} 3301}
3318 3302
3319/* modesetting */ 3303/* modesetting */
@@ -3599,90 +3583,6 @@ wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms)
3599 } 3583 }
3600} 3584}
3601 3585
3602static inline bool
3603__i915_request_irq_complete(const struct i915_request *rq)
3604{
3605 struct intel_engine_cs *engine = rq->engine;
3606 u32 seqno;
3607
3608 /* Note that the engine may have wrapped around the seqno, and
3609 * so our request->global_seqno will be ahead of the hardware,
3610 * even though it completed the request before wrapping. We catch
3611 * this by kicking all the waiters before resetting the seqno
3612 * in hardware, and also signal the fence.
3613 */
3614 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags))
3615 return true;
3616
3617 /* The request was dequeued before we were awoken. We check after
3618 * inspecting the hw to confirm that this was the same request
3619 * that generated the HWS update. The memory barriers within
3620 * the request execution are sufficient to ensure that a check
3621 * after reading the value from hw matches this request.
3622 */
3623 seqno = i915_request_global_seqno(rq);
3624 if (!seqno)
3625 return false;
3626
3627 /* Before we do the heavier coherent read of the seqno,
3628 * check the value (hopefully) in the CPU cacheline.
3629 */
3630 if (__i915_request_completed(rq, seqno))
3631 return true;
3632
3633 /* Ensure our read of the seqno is coherent so that we
3634 * do not "miss an interrupt" (i.e. if this is the last
3635 * request and the seqno write from the GPU is not visible
3636 * by the time the interrupt fires, we will see that the
3637 * request is incomplete and go back to sleep awaiting
3638 * another interrupt that will never come.)
3639 *
3640 * Strictly, we only need to do this once after an interrupt,
3641 * but it is easier and safer to do it every time the waiter
3642 * is woken.
3643 */
3644 if (engine->irq_seqno_barrier &&
3645 test_and_clear_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted)) {
3646 struct intel_breadcrumbs *b = &engine->breadcrumbs;
3647
3648 /* The ordering of irq_posted versus applying the barrier
3649 * is crucial. The clearing of the current irq_posted must
3650 * be visible before we perform the barrier operation,
3651 * such that if a subsequent interrupt arrives, irq_posted
3652 * is reasserted and our task rewoken (which causes us to
3653 * do another __i915_request_irq_complete() immediately
3654 * and reapply the barrier). Conversely, if the clear
3655 * occurs after the barrier, then an interrupt that arrived
3656 * whilst we waited on the barrier would not trigger a
3657 * barrier on the next pass, and the read may not see the
3658 * seqno update.
3659 */
3660 engine->irq_seqno_barrier(engine);
3661
3662 /* If we consume the irq, but we are no longer the bottom-half,
3663 * the real bottom-half may not have serialised their own
3664 * seqno check with the irq-barrier (i.e. may have inspected
3665 * the seqno before we believe it coherent since they see
3666 * irq_posted == false but we are still running).
3667 */
3668 spin_lock_irq(&b->irq_lock);
3669 if (b->irq_wait && b->irq_wait->tsk != current)
3670 /* Note that if the bottom-half is changed as we
3671 * are sending the wake-up, the new bottom-half will
3672 * be woken by whomever made the change. We only have
3673 * to worry about when we steal the irq-posted for
3674 * ourself.
3675 */
3676 wake_up_process(b->irq_wait->tsk);
3677 spin_unlock_irq(&b->irq_lock);
3678
3679 if (__i915_request_completed(rq, seqno))
3680 return true;
3681 }
3682
3683 return false;
3684}
3685
3686void i915_memcpy_init_early(struct drm_i915_private *dev_priv); 3586void i915_memcpy_init_early(struct drm_i915_private *dev_priv);
3687bool i915_memcpy_from_wc(void *dst, const void *src, unsigned long len); 3587bool i915_memcpy_from_wc(void *dst, const void *src, unsigned long len);
3688 3588
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 7399ac7a5629..ea85da393662 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -25,7 +25,6 @@
25 * 25 *
26 */ 26 */
27 27
28#include <drm/drmP.h>
29#include <drm/drm_vma_manager.h> 28#include <drm/drm_vma_manager.h>
30#include <drm/i915_drm.h> 29#include <drm/i915_drm.h>
31#include "i915_drv.h" 30#include "i915_drv.h"
@@ -859,58 +858,6 @@ flush_write_domain(struct drm_i915_gem_object *obj, unsigned int flush_domains)
859 obj->write_domain = 0; 858 obj->write_domain = 0;
860} 859}
861 860
862static inline int
863__copy_to_user_swizzled(char __user *cpu_vaddr,
864 const char *gpu_vaddr, int gpu_offset,
865 int length)
866{
867 int ret, cpu_offset = 0;
868
869 while (length > 0) {
870 int cacheline_end = ALIGN(gpu_offset + 1, 64);
871 int this_length = min(cacheline_end - gpu_offset, length);
872 int swizzled_gpu_offset = gpu_offset ^ 64;
873
874 ret = __copy_to_user(cpu_vaddr + cpu_offset,
875 gpu_vaddr + swizzled_gpu_offset,
876 this_length);
877 if (ret)
878 return ret + length;
879
880 cpu_offset += this_length;
881 gpu_offset += this_length;
882 length -= this_length;
883 }
884
885 return 0;
886}
887
888static inline int
889__copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
890 const char __user *cpu_vaddr,
891 int length)
892{
893 int ret, cpu_offset = 0;
894
895 while (length > 0) {
896 int cacheline_end = ALIGN(gpu_offset + 1, 64);
897 int this_length = min(cacheline_end - gpu_offset, length);
898 int swizzled_gpu_offset = gpu_offset ^ 64;
899
900 ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
901 cpu_vaddr + cpu_offset,
902 this_length);
903 if (ret)
904 return ret + length;
905
906 cpu_offset += this_length;
907 gpu_offset += this_length;
908 length -= this_length;
909 }
910
911 return 0;
912}
913
914/* 861/*
915 * Pins the specified object's pages and synchronizes the object with 862 * Pins the specified object's pages and synchronizes the object with
916 * GPU accesses. Sets needs_clflush to non-zero if the caller should 863 * GPU accesses. Sets needs_clflush to non-zero if the caller should
@@ -1030,72 +977,23 @@ err_unpin:
1030 return ret; 977 return ret;
1031} 978}
1032 979
1033static void
1034shmem_clflush_swizzled_range(char *addr, unsigned long length,
1035 bool swizzled)
1036{
1037 if (unlikely(swizzled)) {
1038 unsigned long start = (unsigned long) addr;
1039 unsigned long end = (unsigned long) addr + length;
1040
1041 /* For swizzling simply ensure that we always flush both
1042 * channels. Lame, but simple and it works. Swizzled
1043 * pwrite/pread is far from a hotpath - current userspace
1044 * doesn't use it at all. */
1045 start = round_down(start, 128);
1046 end = round_up(end, 128);
1047
1048 drm_clflush_virt_range((void *)start, end - start);
1049 } else {
1050 drm_clflush_virt_range(addr, length);
1051 }
1052
1053}
1054
1055/* Only difference to the fast-path function is that this can handle bit17
1056 * and uses non-atomic copy and kmap functions. */
1057static int 980static int
1058shmem_pread_slow(struct page *page, int offset, int length, 981shmem_pread(struct page *page, int offset, int len, char __user *user_data,
1059 char __user *user_data, 982 bool needs_clflush)
1060 bool page_do_bit17_swizzling, bool needs_clflush)
1061{ 983{
1062 char *vaddr; 984 char *vaddr;
1063 int ret; 985 int ret;
1064 986
1065 vaddr = kmap(page); 987 vaddr = kmap(page);
1066 if (needs_clflush)
1067 shmem_clflush_swizzled_range(vaddr + offset, length,
1068 page_do_bit17_swizzling);
1069 988
1070 if (page_do_bit17_swizzling) 989 if (needs_clflush)
1071 ret = __copy_to_user_swizzled(user_data, vaddr, offset, length); 990 drm_clflush_virt_range(vaddr + offset, len);
1072 else
1073 ret = __copy_to_user(user_data, vaddr + offset, length);
1074 kunmap(page);
1075 991
1076 return ret ? - EFAULT : 0; 992 ret = __copy_to_user(user_data, vaddr + offset, len);
1077}
1078 993
1079static int 994 kunmap(page);
1080shmem_pread(struct page *page, int offset, int length, char __user *user_data,
1081 bool page_do_bit17_swizzling, bool needs_clflush)
1082{
1083 int ret;
1084
1085 ret = -ENODEV;
1086 if (!page_do_bit17_swizzling) {
1087 char *vaddr = kmap_atomic(page);
1088
1089 if (needs_clflush)
1090 drm_clflush_virt_range(vaddr + offset, length);
1091 ret = __copy_to_user_inatomic(user_data, vaddr + offset, length);
1092 kunmap_atomic(vaddr);
1093 }
1094 if (ret == 0)
1095 return 0;
1096 995
1097 return shmem_pread_slow(page, offset, length, user_data, 996 return ret ? -EFAULT : 0;
1098 page_do_bit17_swizzling, needs_clflush);
1099} 997}
1100 998
1101static int 999static int
@@ -1104,15 +1002,10 @@ i915_gem_shmem_pread(struct drm_i915_gem_object *obj,
1104{ 1002{
1105 char __user *user_data; 1003 char __user *user_data;
1106 u64 remain; 1004 u64 remain;
1107 unsigned int obj_do_bit17_swizzling;
1108 unsigned int needs_clflush; 1005 unsigned int needs_clflush;
1109 unsigned int idx, offset; 1006 unsigned int idx, offset;
1110 int ret; 1007 int ret;
1111 1008
1112 obj_do_bit17_swizzling = 0;
1113 if (i915_gem_object_needs_bit17_swizzle(obj))
1114 obj_do_bit17_swizzling = BIT(17);
1115
1116 ret = mutex_lock_interruptible(&obj->base.dev->struct_mutex); 1009 ret = mutex_lock_interruptible(&obj->base.dev->struct_mutex);
1117 if (ret) 1010 if (ret)
1118 return ret; 1011 return ret;
@@ -1130,7 +1023,6 @@ i915_gem_shmem_pread(struct drm_i915_gem_object *obj,
1130 unsigned int length = min_t(u64, remain, PAGE_SIZE - offset); 1023 unsigned int length = min_t(u64, remain, PAGE_SIZE - offset);
1131 1024
1132 ret = shmem_pread(page, offset, length, user_data, 1025 ret = shmem_pread(page, offset, length, user_data,
1133 page_to_phys(page) & obj_do_bit17_swizzling,
1134 needs_clflush); 1026 needs_clflush);
1135 if (ret) 1027 if (ret)
1136 break; 1028 break;
@@ -1470,33 +1362,6 @@ out_unlock:
1470 return ret; 1362 return ret;
1471} 1363}
1472 1364
1473static int
1474shmem_pwrite_slow(struct page *page, int offset, int length,
1475 char __user *user_data,
1476 bool page_do_bit17_swizzling,
1477 bool needs_clflush_before,
1478 bool needs_clflush_after)
1479{
1480 char *vaddr;
1481 int ret;
1482
1483 vaddr = kmap(page);
1484 if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
1485 shmem_clflush_swizzled_range(vaddr + offset, length,
1486 page_do_bit17_swizzling);
1487 if (page_do_bit17_swizzling)
1488 ret = __copy_from_user_swizzled(vaddr, offset, user_data,
1489 length);
1490 else
1491 ret = __copy_from_user(vaddr + offset, user_data, length);
1492 if (needs_clflush_after)
1493 shmem_clflush_swizzled_range(vaddr + offset, length,
1494 page_do_bit17_swizzling);
1495 kunmap(page);
1496
1497 return ret ? -EFAULT : 0;
1498}
1499
1500/* Per-page copy function for the shmem pwrite fastpath. 1365/* Per-page copy function for the shmem pwrite fastpath.
1501 * Flushes invalid cachelines before writing to the target if 1366 * Flushes invalid cachelines before writing to the target if
1502 * needs_clflush_before is set and flushes out any written cachelines after 1367 * needs_clflush_before is set and flushes out any written cachelines after
@@ -1504,31 +1369,24 @@ shmem_pwrite_slow(struct page *page, int offset, int length,
1504 */ 1369 */
1505static int 1370static int
1506shmem_pwrite(struct page *page, int offset, int len, char __user *user_data, 1371shmem_pwrite(struct page *page, int offset, int len, char __user *user_data,
1507 bool page_do_bit17_swizzling,
1508 bool needs_clflush_before, 1372 bool needs_clflush_before,
1509 bool needs_clflush_after) 1373 bool needs_clflush_after)
1510{ 1374{
1375 char *vaddr;
1511 int ret; 1376 int ret;
1512 1377
1513 ret = -ENODEV; 1378 vaddr = kmap(page);
1514 if (!page_do_bit17_swizzling) {
1515 char *vaddr = kmap_atomic(page);
1516 1379
1517 if (needs_clflush_before) 1380 if (needs_clflush_before)
1518 drm_clflush_virt_range(vaddr + offset, len); 1381 drm_clflush_virt_range(vaddr + offset, len);
1519 ret = __copy_from_user_inatomic(vaddr + offset, user_data, len);
1520 if (needs_clflush_after)
1521 drm_clflush_virt_range(vaddr + offset, len);
1522 1382
1523 kunmap_atomic(vaddr); 1383 ret = __copy_from_user(vaddr + offset, user_data, len);
1524 } 1384 if (!ret && needs_clflush_after)
1525 if (ret == 0) 1385 drm_clflush_virt_range(vaddr + offset, len);
1526 return ret;
1527 1386
1528 return shmem_pwrite_slow(page, offset, len, user_data, 1387 kunmap(page);
1529 page_do_bit17_swizzling, 1388
1530 needs_clflush_before, 1389 return ret ? -EFAULT : 0;
1531 needs_clflush_after);
1532} 1390}
1533 1391
1534static int 1392static int
@@ -1538,7 +1396,6 @@ i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
1538 struct drm_i915_private *i915 = to_i915(obj->base.dev); 1396 struct drm_i915_private *i915 = to_i915(obj->base.dev);
1539 void __user *user_data; 1397 void __user *user_data;
1540 u64 remain; 1398 u64 remain;
1541 unsigned int obj_do_bit17_swizzling;
1542 unsigned int partial_cacheline_write; 1399 unsigned int partial_cacheline_write;
1543 unsigned int needs_clflush; 1400 unsigned int needs_clflush;
1544 unsigned int offset, idx; 1401 unsigned int offset, idx;
@@ -1553,10 +1410,6 @@ i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
1553 if (ret) 1410 if (ret)
1554 return ret; 1411 return ret;
1555 1412
1556 obj_do_bit17_swizzling = 0;
1557 if (i915_gem_object_needs_bit17_swizzle(obj))
1558 obj_do_bit17_swizzling = BIT(17);
1559
1560 /* If we don't overwrite a cacheline completely we need to be 1413 /* If we don't overwrite a cacheline completely we need to be
1561 * careful to have up-to-date data by first clflushing. Don't 1414 * careful to have up-to-date data by first clflushing. Don't
1562 * overcomplicate things and flush the entire patch. 1415 * overcomplicate things and flush the entire patch.
@@ -1573,7 +1426,6 @@ i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
1573 unsigned int length = min_t(u64, remain, PAGE_SIZE - offset); 1426 unsigned int length = min_t(u64, remain, PAGE_SIZE - offset);
1574 1427
1575 ret = shmem_pwrite(page, offset, length, user_data, 1428 ret = shmem_pwrite(page, offset, length, user_data,
1576 page_to_phys(page) & obj_do_bit17_swizzling,
1577 (offset | length) & partial_cacheline_write, 1429 (offset | length) & partial_cacheline_write,
1578 needs_clflush & CLFLUSH_AFTER); 1430 needs_clflush & CLFLUSH_AFTER);
1579 if (ret) 1431 if (ret)
@@ -3227,13 +3079,6 @@ void i915_gem_reset_engine(struct intel_engine_cs *engine,
3227 struct i915_request *request, 3079 struct i915_request *request,
3228 bool stalled) 3080 bool stalled)
3229{ 3081{
3230 /*
3231 * Make sure this write is visible before we re-enable the interrupt
3232 * handlers on another CPU, as tasklet_enable() resolves to just
3233 * a compiler barrier which is insufficient for our purpose here.
3234 */
3235 smp_store_mb(engine->irq_posted, 0);
3236
3237 if (request) 3082 if (request)
3238 request = i915_gem_reset_request(engine, request, stalled); 3083 request = i915_gem_reset_request(engine, request, stalled);
3239 3084
@@ -3315,7 +3160,7 @@ static void nop_submit_request(struct i915_request *request)
3315 3160
3316 spin_lock_irqsave(&request->engine->timeline.lock, flags); 3161 spin_lock_irqsave(&request->engine->timeline.lock, flags);
3317 __i915_request_submit(request); 3162 __i915_request_submit(request);
3318 intel_engine_init_global_seqno(request->engine, request->global_seqno); 3163 intel_engine_write_global_seqno(request->engine, request->global_seqno);
3319 spin_unlock_irqrestore(&request->engine->timeline.lock, flags); 3164 spin_unlock_irqrestore(&request->engine->timeline.lock, flags);
3320} 3165}
3321 3166
@@ -3356,7 +3201,7 @@ void i915_gem_set_wedged(struct drm_i915_private *i915)
3356 3201
3357 /* 3202 /*
3358 * Make sure no request can slip through without getting completed by 3203 * Make sure no request can slip through without getting completed by
3359 * either this call here to intel_engine_init_global_seqno, or the one 3204 * either this call here to intel_engine_write_global_seqno, or the one
3360 * in nop_submit_request. 3205 * in nop_submit_request.
3361 */ 3206 */
3362 synchronize_rcu(); 3207 synchronize_rcu();
@@ -3384,6 +3229,9 @@ bool i915_gem_unset_wedged(struct drm_i915_private *i915)
3384 if (!test_bit(I915_WEDGED, &i915->gpu_error.flags)) 3229 if (!test_bit(I915_WEDGED, &i915->gpu_error.flags))
3385 return true; 3230 return true;
3386 3231
3232 if (!i915->gt.scratch) /* Never full initialised, recovery impossible */
3233 return false;
3234
3387 GEM_TRACE("start\n"); 3235 GEM_TRACE("start\n");
3388 3236
3389 /* 3237 /*
@@ -3422,8 +3270,7 @@ bool i915_gem_unset_wedged(struct drm_i915_private *i915)
3422 i915_retire_requests(i915); 3270 i915_retire_requests(i915);
3423 GEM_BUG_ON(i915->gt.active_requests); 3271 GEM_BUG_ON(i915->gt.active_requests);
3424 3272
3425 if (!intel_gpu_reset(i915, ALL_ENGINES)) 3273 intel_engines_sanitize(i915, false);
3426 intel_engines_sanitize(i915);
3427 3274
3428 /* 3275 /*
3429 * Undo nop_submit_request. We prevent all new i915 requests from 3276 * Undo nop_submit_request. We prevent all new i915 requests from
@@ -5027,8 +4874,6 @@ void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj)
5027 4874
5028void i915_gem_sanitize(struct drm_i915_private *i915) 4875void i915_gem_sanitize(struct drm_i915_private *i915)
5029{ 4876{
5030 int err;
5031
5032 GEM_TRACE("\n"); 4877 GEM_TRACE("\n");
5033 4878
5034 mutex_lock(&i915->drm.struct_mutex); 4879 mutex_lock(&i915->drm.struct_mutex);
@@ -5053,11 +4898,7 @@ void i915_gem_sanitize(struct drm_i915_private *i915)
5053 * it may impact the display and we are uncertain about the stability 4898 * it may impact the display and we are uncertain about the stability
5054 * of the reset, so this could be applied to even earlier gen. 4899 * of the reset, so this could be applied to even earlier gen.
5055 */ 4900 */
5056 err = -ENODEV; 4901 intel_engines_sanitize(i915, false);
5057 if (INTEL_GEN(i915) >= 5 && intel_has_gpu_reset(i915))
5058 err = WARN_ON(intel_gpu_reset(i915, ALL_ENGINES));
5059 if (!err)
5060 intel_engines_sanitize(i915);
5061 4902
5062 intel_uncore_forcewake_put(i915, FORCEWAKE_ALL); 4903 intel_uncore_forcewake_put(i915, FORCEWAKE_ALL);
5063 intel_runtime_pm_put(i915); 4904 intel_runtime_pm_put(i915);
@@ -5223,15 +5064,15 @@ void i915_gem_init_swizzling(struct drm_i915_private *dev_priv)
5223 I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) | 5064 I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
5224 DISP_TILE_SURFACE_SWIZZLING); 5065 DISP_TILE_SURFACE_SWIZZLING);
5225 5066
5226 if (IS_GEN5(dev_priv)) 5067 if (IS_GEN(dev_priv, 5))
5227 return; 5068 return;
5228 5069
5229 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL); 5070 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
5230 if (IS_GEN6(dev_priv)) 5071 if (IS_GEN(dev_priv, 6))
5231 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB)); 5072 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
5232 else if (IS_GEN7(dev_priv)) 5073 else if (IS_GEN(dev_priv, 7))
5233 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB)); 5074 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
5234 else if (IS_GEN8(dev_priv)) 5075 else if (IS_GEN(dev_priv, 8))
5235 I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW)); 5076 I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
5236 else 5077 else
5237 BUG(); 5078 BUG();
@@ -5253,10 +5094,10 @@ static void init_unused_rings(struct drm_i915_private *dev_priv)
5253 init_unused_ring(dev_priv, SRB1_BASE); 5094 init_unused_ring(dev_priv, SRB1_BASE);
5254 init_unused_ring(dev_priv, SRB2_BASE); 5095 init_unused_ring(dev_priv, SRB2_BASE);
5255 init_unused_ring(dev_priv, SRB3_BASE); 5096 init_unused_ring(dev_priv, SRB3_BASE);
5256 } else if (IS_GEN2(dev_priv)) { 5097 } else if (IS_GEN(dev_priv, 2)) {
5257 init_unused_ring(dev_priv, SRB0_BASE); 5098 init_unused_ring(dev_priv, SRB0_BASE);
5258 init_unused_ring(dev_priv, SRB1_BASE); 5099 init_unused_ring(dev_priv, SRB1_BASE);
5259 } else if (IS_GEN3(dev_priv)) { 5100 } else if (IS_GEN(dev_priv, 3)) {
5260 init_unused_ring(dev_priv, PRB1_BASE); 5101 init_unused_ring(dev_priv, PRB1_BASE);
5261 init_unused_ring(dev_priv, PRB2_BASE); 5102 init_unused_ring(dev_priv, PRB2_BASE);
5262 } 5103 }
@@ -5580,7 +5421,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
5580 } 5421 }
5581 5422
5582 ret = i915_gem_init_scratch(dev_priv, 5423 ret = i915_gem_init_scratch(dev_priv,
5583 IS_GEN2(dev_priv) ? SZ_256K : PAGE_SIZE); 5424 IS_GEN(dev_priv, 2) ? SZ_256K : PAGE_SIZE);
5584 if (ret) { 5425 if (ret) {
5585 GEM_BUG_ON(ret == -EIO); 5426 GEM_BUG_ON(ret == -EIO);
5586 goto err_ggtt; 5427 goto err_ggtt;
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 4ec386950f75..5933adbe3d99 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -86,7 +86,6 @@
86 */ 86 */
87 87
88#include <linux/log2.h> 88#include <linux/log2.h>
89#include <drm/drmP.h>
90#include <drm/i915_drm.h> 89#include <drm/i915_drm.h>
91#include "i915_drv.h" 90#include "i915_drv.h"
92#include "i915_trace.h" 91#include "i915_trace.h"
@@ -311,7 +310,7 @@ static u32 default_desc_template(const struct drm_i915_private *i915,
311 address_mode = INTEL_LEGACY_64B_CONTEXT; 310 address_mode = INTEL_LEGACY_64B_CONTEXT;
312 desc |= address_mode << GEN8_CTX_ADDRESSING_MODE_SHIFT; 311 desc |= address_mode << GEN8_CTX_ADDRESSING_MODE_SHIFT;
313 312
314 if (IS_GEN8(i915)) 313 if (IS_GEN(i915, 8))
315 desc |= GEN8_CTX_L3LLC_COHERENT; 314 desc |= GEN8_CTX_L3LLC_COHERENT;
316 315
317 /* TODO: WaDisableLiteRestore when we start using semaphore 316 /* TODO: WaDisableLiteRestore when we start using semaphore
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index 82e2ca17a441..02f7298bfe57 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -27,7 +27,6 @@
27#include <linux/dma-buf.h> 27#include <linux/dma-buf.h>
28#include <linux/reservation.h> 28#include <linux/reservation.h>
29 29
30#include <drm/drmP.h>
31 30
32#include "i915_drv.h" 31#include "i915_drv.h"
33 32
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 02b83a5ed96c..f6855401f247 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -26,7 +26,6 @@
26 * 26 *
27 */ 27 */
28 28
29#include <drm/drmP.h>
30#include <drm/i915_drm.h> 29#include <drm/i915_drm.h>
31 30
32#include "i915_drv.h" 31#include "i915_drv.h"
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 485b259127c3..e7994505d850 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -31,7 +31,6 @@
31#include <linux/sync_file.h> 31#include <linux/sync_file.h>
32#include <linux/uaccess.h> 32#include <linux/uaccess.h>
33 33
34#include <drm/drmP.h>
35#include <drm/drm_syncobj.h> 34#include <drm/drm_syncobj.h>
36#include <drm/i915_drm.h> 35#include <drm/i915_drm.h>
37 36
@@ -1380,7 +1379,7 @@ eb_relocate_entry(struct i915_execbuffer *eb,
1380 * batchbuffers. 1379 * batchbuffers.
1381 */ 1380 */
1382 if (reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION && 1381 if (reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
1383 IS_GEN6(eb->i915)) { 1382 IS_GEN(eb->i915, 6)) {
1384 err = i915_vma_bind(target, target->obj->cache_level, 1383 err = i915_vma_bind(target, target->obj->cache_level,
1385 PIN_GLOBAL); 1384 PIN_GLOBAL);
1386 if (WARN_ONCE(err, 1385 if (WARN_ONCE(err,
@@ -1896,7 +1895,7 @@ static int i915_reset_gen7_sol_offsets(struct i915_request *rq)
1896 u32 *cs; 1895 u32 *cs;
1897 int i; 1896 int i;
1898 1897
1899 if (!IS_GEN7(rq->i915) || rq->engine->id != RCS) { 1898 if (!IS_GEN(rq->i915, 7) || rq->engine->id != RCS) {
1900 DRM_DEBUG("sol reset is gen7/rcs only\n"); 1899 DRM_DEBUG("sol reset is gen7/rcs only\n");
1901 return -EINVAL; 1900 return -EINVAL;
1902 } 1901 }
diff --git a/drivers/gpu/drm/i915/i915_gem_fence_reg.c b/drivers/gpu/drm/i915/i915_gem_fence_reg.c
index d548ac05ccd7..d67c07cdd0b8 100644
--- a/drivers/gpu/drm/i915/i915_gem_fence_reg.c
+++ b/drivers/gpu/drm/i915/i915_gem_fence_reg.c
@@ -21,7 +21,6 @@
21 * IN THE SOFTWARE. 21 * IN THE SOFTWARE.
22 */ 22 */
23 23
24#include <drm/drmP.h>
25#include <drm/i915_drm.h> 24#include <drm/i915_drm.h>
26#include "i915_drv.h" 25#include "i915_drv.h"
27 26
@@ -193,9 +192,9 @@ static void fence_write(struct drm_i915_fence_reg *fence,
193 * and explicitly managed for internal users. 192 * and explicitly managed for internal users.
194 */ 193 */
195 194
196 if (IS_GEN2(fence->i915)) 195 if (IS_GEN(fence->i915, 2))
197 i830_write_fence_reg(fence, vma); 196 i830_write_fence_reg(fence, vma);
198 else if (IS_GEN3(fence->i915)) 197 else if (IS_GEN(fence->i915, 3))
199 i915_write_fence_reg(fence, vma); 198 i915_write_fence_reg(fence, vma);
200 else 199 else
201 i965_write_fence_reg(fence, vma); 200 i965_write_fence_reg(fence, vma);
@@ -596,13 +595,13 @@ i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv)
596 swizzle_y = I915_BIT_6_SWIZZLE_NONE; 595 swizzle_y = I915_BIT_6_SWIZZLE_NONE;
597 } 596 }
598 } 597 }
599 } else if (IS_GEN5(dev_priv)) { 598 } else if (IS_GEN(dev_priv, 5)) {
600 /* On Ironlake whatever DRAM config, GPU always do 599 /* On Ironlake whatever DRAM config, GPU always do
601 * same swizzling setup. 600 * same swizzling setup.
602 */ 601 */
603 swizzle_x = I915_BIT_6_SWIZZLE_9_10; 602 swizzle_x = I915_BIT_6_SWIZZLE_9_10;
604 swizzle_y = I915_BIT_6_SWIZZLE_9; 603 swizzle_y = I915_BIT_6_SWIZZLE_9;
605 } else if (IS_GEN2(dev_priv)) { 604 } else if (IS_GEN(dev_priv, 2)) {
606 /* As far as we know, the 865 doesn't have these bit 6 605 /* As far as we know, the 865 doesn't have these bit 6
607 * swizzling issues. 606 * swizzling issues.
608 */ 607 */
@@ -647,7 +646,7 @@ i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv)
647 } 646 }
648 647
649 /* check for L-shaped memory aka modified enhanced addressing */ 648 /* check for L-shaped memory aka modified enhanced addressing */
650 if (IS_GEN4(dev_priv) && 649 if (IS_GEN(dev_priv, 4) &&
651 !(I915_READ(DCC2) & DCC2_MODIFIED_ENHANCED_DISABLE)) { 650 !(I915_READ(DCC2) & DCC2_MODIFIED_ENHANCED_DISABLE)) {
652 swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; 651 swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
653 swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; 652 swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index add1fe7aeb93..a8807fbed0aa 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -33,7 +33,6 @@
33 33
34#include <asm/set_memory.h> 34#include <asm/set_memory.h>
35 35
36#include <drm/drmP.h>
37#include <drm/i915_drm.h> 36#include <drm/i915_drm.h>
38 37
39#include "i915_drv.h" 38#include "i915_drv.h"
@@ -483,7 +482,7 @@ static void i915_address_space_init(struct i915_address_space *vm,
483 * attempt holding the lock is immediately reported by lockdep. 482 * attempt holding the lock is immediately reported by lockdep.
484 */ 483 */
485 mutex_init(&vm->mutex); 484 mutex_init(&vm->mutex);
486 i915_gem_shrinker_taints_mutex(&vm->mutex); 485 i915_gem_shrinker_taints_mutex(vm->i915, &vm->mutex);
487 486
488 GEM_BUG_ON(!vm->total); 487 GEM_BUG_ON(!vm->total);
489 drm_mm_init(&vm->mm, 0, vm->total); 488 drm_mm_init(&vm->mm, 0, vm->total);
@@ -1423,8 +1422,6 @@ static int gen8_ppgtt_alloc_pdp(struct i915_address_space *vm,
1423 gen8_initialize_pd(vm, pd); 1422 gen8_initialize_pd(vm, pd);
1424 gen8_ppgtt_set_pdpe(vm, pdp, pd, pdpe); 1423 gen8_ppgtt_set_pdpe(vm, pdp, pd, pdpe);
1425 GEM_BUG_ON(pdp->used_pdpes > i915_pdpes_per_pdp(vm)); 1424 GEM_BUG_ON(pdp->used_pdpes > i915_pdpes_per_pdp(vm));
1426
1427 mark_tlbs_dirty(i915_vm_to_ppgtt(vm));
1428 } 1425 }
1429 1426
1430 ret = gen8_ppgtt_alloc_pd(vm, pd, start, length); 1427 ret = gen8_ppgtt_alloc_pd(vm, pd, start, length);
@@ -1490,84 +1487,6 @@ unwind:
1490 return -ENOMEM; 1487 return -ENOMEM;
1491} 1488}
1492 1489
1493static void gen8_dump_pdp(struct i915_hw_ppgtt *ppgtt,
1494 struct i915_page_directory_pointer *pdp,
1495 u64 start, u64 length,
1496 gen8_pte_t scratch_pte,
1497 struct seq_file *m)
1498{
1499 struct i915_address_space *vm = &ppgtt->vm;
1500 struct i915_page_directory *pd;
1501 u32 pdpe;
1502
1503 gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
1504 struct i915_page_table *pt;
1505 u64 pd_len = length;
1506 u64 pd_start = start;
1507 u32 pde;
1508
1509 if (pdp->page_directory[pdpe] == ppgtt->vm.scratch_pd)
1510 continue;
1511
1512 seq_printf(m, "\tPDPE #%d\n", pdpe);
1513 gen8_for_each_pde(pt, pd, pd_start, pd_len, pde) {
1514 u32 pte;
1515 gen8_pte_t *pt_vaddr;
1516
1517 if (pd->page_table[pde] == ppgtt->vm.scratch_pt)
1518 continue;
1519
1520 pt_vaddr = kmap_atomic_px(pt);
1521 for (pte = 0; pte < GEN8_PTES; pte += 4) {
1522 u64 va = (pdpe << GEN8_PDPE_SHIFT |
1523 pde << GEN8_PDE_SHIFT |
1524 pte << GEN8_PTE_SHIFT);
1525 int i;
1526 bool found = false;
1527
1528 for (i = 0; i < 4; i++)
1529 if (pt_vaddr[pte + i] != scratch_pte)
1530 found = true;
1531 if (!found)
1532 continue;
1533
1534 seq_printf(m, "\t\t0x%llx [%03d,%03d,%04d]: =", va, pdpe, pde, pte);
1535 for (i = 0; i < 4; i++) {
1536 if (pt_vaddr[pte + i] != scratch_pte)
1537 seq_printf(m, " %llx", pt_vaddr[pte + i]);
1538 else
1539 seq_puts(m, " SCRATCH ");
1540 }
1541 seq_puts(m, "\n");
1542 }
1543 kunmap_atomic(pt_vaddr);
1544 }
1545 }
1546}
1547
1548static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
1549{
1550 struct i915_address_space *vm = &ppgtt->vm;
1551 const gen8_pte_t scratch_pte = vm->scratch_pte;
1552 u64 start = 0, length = ppgtt->vm.total;
1553
1554 if (use_4lvl(vm)) {
1555 u64 pml4e;
1556 struct i915_pml4 *pml4 = &ppgtt->pml4;
1557 struct i915_page_directory_pointer *pdp;
1558
1559 gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
1560 if (pml4->pdps[pml4e] == ppgtt->vm.scratch_pdp)
1561 continue;
1562
1563 seq_printf(m, " PML4E #%llu\n", pml4e);
1564 gen8_dump_pdp(ppgtt, pdp, start, length, scratch_pte, m);
1565 }
1566 } else {
1567 gen8_dump_pdp(ppgtt, &ppgtt->pdp, start, length, scratch_pte, m);
1568 }
1569}
1570
1571static int gen8_preallocate_top_level_pdp(struct i915_hw_ppgtt *ppgtt) 1490static int gen8_preallocate_top_level_pdp(struct i915_hw_ppgtt *ppgtt)
1572{ 1491{
1573 struct i915_address_space *vm = &ppgtt->vm; 1492 struct i915_address_space *vm = &ppgtt->vm;
@@ -1672,7 +1591,6 @@ static struct i915_hw_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915)
1672 gen8_ppgtt_notify_vgt(ppgtt, true); 1591 gen8_ppgtt_notify_vgt(ppgtt, true);
1673 1592
1674 ppgtt->vm.cleanup = gen8_ppgtt_cleanup; 1593 ppgtt->vm.cleanup = gen8_ppgtt_cleanup;
1675 ppgtt->debug_dump = gen8_dump_ppgtt;
1676 1594
1677 ppgtt->vm.vma_ops.bind_vma = ppgtt_bind_vma; 1595 ppgtt->vm.vma_ops.bind_vma = ppgtt_bind_vma;
1678 ppgtt->vm.vma_ops.unbind_vma = ppgtt_unbind_vma; 1596 ppgtt->vm.vma_ops.unbind_vma = ppgtt_unbind_vma;
@@ -1688,60 +1606,6 @@ err_free:
1688 return ERR_PTR(err); 1606 return ERR_PTR(err);
1689} 1607}
1690 1608
1691static void gen6_dump_ppgtt(struct i915_hw_ppgtt *base, struct seq_file *m)
1692{
1693 struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base);
1694 const gen6_pte_t scratch_pte = base->vm.scratch_pte;
1695 struct i915_page_table *pt;
1696 u32 pte, pde;
1697
1698 gen6_for_all_pdes(pt, &base->pd, pde) {
1699 gen6_pte_t *vaddr;
1700
1701 if (pt == base->vm.scratch_pt)
1702 continue;
1703
1704 if (i915_vma_is_bound(ppgtt->vma, I915_VMA_GLOBAL_BIND)) {
1705 u32 expected =
1706 GEN6_PDE_ADDR_ENCODE(px_dma(pt)) |
1707 GEN6_PDE_VALID;
1708 u32 pd_entry = readl(ppgtt->pd_addr + pde);
1709
1710 if (pd_entry != expected)
1711 seq_printf(m,
1712 "\tPDE #%d mismatch: Actual PDE: %x Expected PDE: %x\n",
1713 pde,
1714 pd_entry,
1715 expected);
1716
1717 seq_printf(m, "\tPDE: %x\n", pd_entry);
1718 }
1719
1720 vaddr = kmap_atomic_px(base->pd.page_table[pde]);
1721 for (pte = 0; pte < GEN6_PTES; pte += 4) {
1722 int i;
1723
1724 for (i = 0; i < 4; i++)
1725 if (vaddr[pte + i] != scratch_pte)
1726 break;
1727 if (i == 4)
1728 continue;
1729
1730 seq_printf(m, "\t\t(%03d, %04d) %08llx: ",
1731 pde, pte,
1732 (pde * GEN6_PTES + pte) * I915_GTT_PAGE_SIZE);
1733 for (i = 0; i < 4; i++) {
1734 if (vaddr[pte + i] != scratch_pte)
1735 seq_printf(m, " %08x", vaddr[pte + i]);
1736 else
1737 seq_puts(m, " SCRATCH");
1738 }
1739 seq_puts(m, "\n");
1740 }
1741 kunmap_atomic(vaddr);
1742 }
1743}
1744
1745/* Write pde (index) from the page directory @pd to the page table @pt */ 1609/* Write pde (index) from the page directory @pd to the page table @pt */
1746static inline void gen6_write_pde(const struct gen6_hw_ppgtt *ppgtt, 1610static inline void gen6_write_pde(const struct gen6_hw_ppgtt *ppgtt,
1747 const unsigned int pde, 1611 const unsigned int pde,
@@ -2075,6 +1939,7 @@ static struct i915_vma *pd_vma_create(struct gen6_hw_ppgtt *ppgtt, int size)
2075int gen6_ppgtt_pin(struct i915_hw_ppgtt *base) 1939int gen6_ppgtt_pin(struct i915_hw_ppgtt *base)
2076{ 1940{
2077 struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base); 1941 struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base);
1942 int err;
2078 1943
2079 /* 1944 /*
2080 * Workaround the limited maximum vma->pin_count and the aliasing_ppgtt 1945 * Workaround the limited maximum vma->pin_count and the aliasing_ppgtt
@@ -2090,9 +1955,17 @@ int gen6_ppgtt_pin(struct i915_hw_ppgtt *base)
2090 * allocator works in address space sizes, so it's multiplied by page 1955 * allocator works in address space sizes, so it's multiplied by page
2091 * size. We allocate at the top of the GTT to avoid fragmentation. 1956 * size. We allocate at the top of the GTT to avoid fragmentation.
2092 */ 1957 */
2093 return i915_vma_pin(ppgtt->vma, 1958 err = i915_vma_pin(ppgtt->vma,
2094 0, GEN6_PD_ALIGN, 1959 0, GEN6_PD_ALIGN,
2095 PIN_GLOBAL | PIN_HIGH); 1960 PIN_GLOBAL | PIN_HIGH);
1961 if (err)
1962 goto unpin;
1963
1964 return 0;
1965
1966unpin:
1967 ppgtt->pin_count = 0;
1968 return err;
2096} 1969}
2097 1970
2098void gen6_ppgtt_unpin(struct i915_hw_ppgtt *base) 1971void gen6_ppgtt_unpin(struct i915_hw_ppgtt *base)
@@ -2129,7 +2002,6 @@ static struct i915_hw_ppgtt *gen6_ppgtt_create(struct drm_i915_private *i915)
2129 ppgtt->base.vm.clear_range = gen6_ppgtt_clear_range; 2002 ppgtt->base.vm.clear_range = gen6_ppgtt_clear_range;
2130 ppgtt->base.vm.insert_entries = gen6_ppgtt_insert_entries; 2003 ppgtt->base.vm.insert_entries = gen6_ppgtt_insert_entries;
2131 ppgtt->base.vm.cleanup = gen6_ppgtt_cleanup; 2004 ppgtt->base.vm.cleanup = gen6_ppgtt_cleanup;
2132 ppgtt->base.debug_dump = gen6_dump_ppgtt;
2133 2005
2134 ppgtt->base.vm.vma_ops.bind_vma = ppgtt_bind_vma; 2006 ppgtt->base.vm.vma_ops.bind_vma = ppgtt_bind_vma;
2135 ppgtt->base.vm.vma_ops.unbind_vma = ppgtt_unbind_vma; 2007 ppgtt->base.vm.vma_ops.unbind_vma = ppgtt_unbind_vma;
@@ -2195,9 +2067,9 @@ int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv)
2195{ 2067{
2196 gtt_write_workarounds(dev_priv); 2068 gtt_write_workarounds(dev_priv);
2197 2069
2198 if (IS_GEN6(dev_priv)) 2070 if (IS_GEN(dev_priv, 6))
2199 gen6_ppgtt_enable(dev_priv); 2071 gen6_ppgtt_enable(dev_priv);
2200 else if (IS_GEN7(dev_priv)) 2072 else if (IS_GEN(dev_priv, 7))
2201 gen7_ppgtt_enable(dev_priv); 2073 gen7_ppgtt_enable(dev_priv);
2202 2074
2203 return 0; 2075 return 0;
@@ -2279,7 +2151,7 @@ static bool needs_idle_maps(struct drm_i915_private *dev_priv)
2279 /* Query intel_iommu to see if we need the workaround. Presumably that 2151 /* Query intel_iommu to see if we need the workaround. Presumably that
2280 * was loaded first. 2152 * was loaded first.
2281 */ 2153 */
2282 return IS_GEN5(dev_priv) && IS_MOBILE(dev_priv) && intel_vtd_active(); 2154 return IS_GEN(dev_priv, 5) && IS_MOBILE(dev_priv) && intel_vtd_active();
2283} 2155}
2284 2156
2285static void gen6_check_faults(struct drm_i915_private *dev_priv) 2157static void gen6_check_faults(struct drm_i915_private *dev_priv)
@@ -2372,7 +2244,8 @@ int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
2372 DMA_ATTR_NO_WARN)) 2244 DMA_ATTR_NO_WARN))
2373 return 0; 2245 return 0;
2374 2246
2375 /* If the DMA remap fails, one cause can be that we have 2247 /*
2248 * If the DMA remap fails, one cause can be that we have
2376 * too many objects pinned in a small remapping table, 2249 * too many objects pinned in a small remapping table,
2377 * such as swiotlb. Incrementally purge all other objects and 2250 * such as swiotlb. Incrementally purge all other objects and
2378 * try again - if there are no more pages to remove from 2251 * try again - if there are no more pages to remove from
@@ -2382,8 +2255,7 @@ int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
2382 } while (i915_gem_shrink(to_i915(obj->base.dev), 2255 } while (i915_gem_shrink(to_i915(obj->base.dev),
2383 obj->base.size >> PAGE_SHIFT, NULL, 2256 obj->base.size >> PAGE_SHIFT, NULL,
2384 I915_SHRINK_BOUND | 2257 I915_SHRINK_BOUND |
2385 I915_SHRINK_UNBOUND | 2258 I915_SHRINK_UNBOUND));
2386 I915_SHRINK_ACTIVE));
2387 2259
2388 return -ENOSPC; 2260 return -ENOSPC;
2389} 2261}
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index 4874da09a3c4..e2360f16427a 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -413,8 +413,6 @@ struct i915_hw_ppgtt {
413 struct i915_page_directory_pointer pdp; /* GEN8+ */ 413 struct i915_page_directory_pointer pdp; /* GEN8+ */
414 struct i915_page_directory pd; /* GEN6-7 */ 414 struct i915_page_directory pd; /* GEN6-7 */
415 }; 415 };
416
417 void (*debug_dump)(struct i915_hw_ppgtt *ppgtt, struct seq_file *m);
418}; 416};
419 417
420struct gen6_hw_ppgtt { 418struct gen6_hw_ppgtt {
diff --git a/drivers/gpu/drm/i915/i915_gem_internal.c b/drivers/gpu/drm/i915/i915_gem_internal.c
index 0d0144b2104c..fddde1033e74 100644
--- a/drivers/gpu/drm/i915/i915_gem_internal.c
+++ b/drivers/gpu/drm/i915/i915_gem_internal.c
@@ -22,7 +22,6 @@
22 * 22 *
23 */ 23 */
24 24
25#include <drm/drmP.h>
26#include <drm/i915_drm.h> 25#include <drm/i915_drm.h>
27#include "i915_drv.h" 26#include "i915_drv.h"
28 27
diff --git a/drivers/gpu/drm/i915/i915_gem_object.h b/drivers/gpu/drm/i915/i915_gem_object.h
index a6dd7c46de0d..ff3da64470dd 100644
--- a/drivers/gpu/drm/i915/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/i915_gem_object.h
@@ -29,7 +29,8 @@
29 29
30#include <drm/drm_vma_manager.h> 30#include <drm/drm_vma_manager.h>
31#include <drm/drm_gem.h> 31#include <drm/drm_gem.h>
32#include <drm/drmP.h> 32#include <drm/drm_file.h>
33#include <drm/drm_device.h>
33 34
34#include <drm/i915_drm.h> 35#include <drm/i915_drm.h>
35 36
diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
index ea90d3a0d511..6cc2b964c955 100644
--- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
@@ -30,30 +30,27 @@
30#include <linux/pci.h> 30#include <linux/pci.h>
31#include <linux/dma-buf.h> 31#include <linux/dma-buf.h>
32#include <linux/vmalloc.h> 32#include <linux/vmalloc.h>
33#include <drm/drmP.h>
34#include <drm/i915_drm.h> 33#include <drm/i915_drm.h>
35 34
36#include "i915_drv.h" 35#include "i915_drv.h"
37#include "i915_trace.h" 36#include "i915_trace.h"
38 37
39static bool shrinker_lock(struct drm_i915_private *i915, bool *unlock) 38static bool shrinker_lock(struct drm_i915_private *i915,
39 unsigned int flags,
40 bool *unlock)
40{ 41{
41 switch (mutex_trylock_recursive(&i915->drm.struct_mutex)) { 42 struct mutex *m = &i915->drm.struct_mutex;
43
44 switch (mutex_trylock_recursive(m)) {
42 case MUTEX_TRYLOCK_RECURSIVE: 45 case MUTEX_TRYLOCK_RECURSIVE:
43 *unlock = false; 46 *unlock = false;
44 return true; 47 return true;
45 48
46 case MUTEX_TRYLOCK_FAILED: 49 case MUTEX_TRYLOCK_FAILED:
47 *unlock = false; 50 *unlock = false;
48 preempt_disable(); 51 if (flags & I915_SHRINK_ACTIVE &&
49 do { 52 mutex_lock_killable_nested(m, I915_MM_SHRINKER) == 0)
50 cpu_relax(); 53 *unlock = true;
51 if (mutex_trylock(&i915->drm.struct_mutex)) {
52 *unlock = true;
53 break;
54 }
55 } while (!need_resched());
56 preempt_enable();
57 return *unlock; 54 return *unlock;
58 55
59 case MUTEX_TRYLOCK_SUCCESS: 56 case MUTEX_TRYLOCK_SUCCESS:
@@ -160,7 +157,7 @@ i915_gem_shrink(struct drm_i915_private *i915,
160 unsigned long scanned = 0; 157 unsigned long scanned = 0;
161 bool unlock; 158 bool unlock;
162 159
163 if (!shrinker_lock(i915, &unlock)) 160 if (!shrinker_lock(i915, flags, &unlock))
164 return 0; 161 return 0;
165 162
166 /* 163 /*
@@ -357,7 +354,7 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
357 354
358 sc->nr_scanned = 0; 355 sc->nr_scanned = 0;
359 356
360 if (!shrinker_lock(i915, &unlock)) 357 if (!shrinker_lock(i915, 0, &unlock))
361 return SHRINK_STOP; 358 return SHRINK_STOP;
362 359
363 freed = i915_gem_shrink(i915, 360 freed = i915_gem_shrink(i915,
@@ -388,31 +385,6 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
388 return sc->nr_scanned ? freed : SHRINK_STOP; 385 return sc->nr_scanned ? freed : SHRINK_STOP;
389} 386}
390 387
391static bool
392shrinker_lock_uninterruptible(struct drm_i915_private *i915, bool *unlock,
393 int timeout_ms)
394{
395 unsigned long timeout = jiffies + msecs_to_jiffies_timeout(timeout_ms);
396
397 do {
398 if (i915_gem_wait_for_idle(i915,
399 0, MAX_SCHEDULE_TIMEOUT) == 0 &&
400 shrinker_lock(i915, unlock))
401 break;
402
403 schedule_timeout_killable(1);
404 if (fatal_signal_pending(current))
405 return false;
406
407 if (time_after(jiffies, timeout)) {
408 pr_err("Unable to lock GPU to purge memory.\n");
409 return false;
410 }
411 } while (1);
412
413 return true;
414}
415
416static int 388static int
417i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr) 389i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
418{ 390{
@@ -421,7 +393,11 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
421 struct drm_i915_gem_object *obj; 393 struct drm_i915_gem_object *obj;
422 unsigned long unevictable, bound, unbound, freed_pages; 394 unsigned long unevictable, bound, unbound, freed_pages;
423 395
424 freed_pages = i915_gem_shrink_all(i915); 396 intel_runtime_pm_get(i915);
397 freed_pages = i915_gem_shrink(i915, -1UL, NULL,
398 I915_SHRINK_BOUND |
399 I915_SHRINK_UNBOUND);
400 intel_runtime_pm_put(i915);
425 401
426 /* Because we may be allocating inside our own driver, we cannot 402 /* Because we may be allocating inside our own driver, we cannot
427 * assert that there are no objects with pinned pages that are not 403 * assert that there are no objects with pinned pages that are not
@@ -447,10 +423,6 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
447 pr_info("Purging GPU memory, %lu pages freed, " 423 pr_info("Purging GPU memory, %lu pages freed, "
448 "%lu pages still pinned.\n", 424 "%lu pages still pinned.\n",
449 freed_pages, unevictable); 425 freed_pages, unevictable);
450 if (unbound || bound)
451 pr_err("%lu and %lu pages still available in the "
452 "bound and unbound GPU page lists.\n",
453 bound, unbound);
454 426
455 *(unsigned long *)ptr += freed_pages; 427 *(unsigned long *)ptr += freed_pages;
456 return NOTIFY_DONE; 428 return NOTIFY_DONE;
@@ -464,23 +436,20 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
464 struct i915_vma *vma, *next; 436 struct i915_vma *vma, *next;
465 unsigned long freed_pages = 0; 437 unsigned long freed_pages = 0;
466 bool unlock; 438 bool unlock;
467 int ret;
468 439
469 if (!shrinker_lock_uninterruptible(i915, &unlock, 5000)) 440 if (!shrinker_lock(i915, 0, &unlock))
470 return NOTIFY_DONE; 441 return NOTIFY_DONE;
471 442
472 /* Force everything onto the inactive lists */ 443 /* Force everything onto the inactive lists */
473 ret = i915_gem_wait_for_idle(i915, 444 if (i915_gem_wait_for_idle(i915,
474 I915_WAIT_LOCKED, 445 I915_WAIT_LOCKED,
475 MAX_SCHEDULE_TIMEOUT); 446 MAX_SCHEDULE_TIMEOUT))
476 if (ret)
477 goto out; 447 goto out;
478 448
479 intel_runtime_pm_get(i915); 449 intel_runtime_pm_get(i915);
480 freed_pages += i915_gem_shrink(i915, -1UL, NULL, 450 freed_pages += i915_gem_shrink(i915, -1UL, NULL,
481 I915_SHRINK_BOUND | 451 I915_SHRINK_BOUND |
482 I915_SHRINK_UNBOUND | 452 I915_SHRINK_UNBOUND |
483 I915_SHRINK_ACTIVE |
484 I915_SHRINK_VMAPS); 453 I915_SHRINK_VMAPS);
485 intel_runtime_pm_put(i915); 454 intel_runtime_pm_put(i915);
486 455
@@ -533,13 +502,40 @@ void i915_gem_shrinker_unregister(struct drm_i915_private *i915)
533 unregister_shrinker(&i915->mm.shrinker); 502 unregister_shrinker(&i915->mm.shrinker);
534} 503}
535 504
536void i915_gem_shrinker_taints_mutex(struct mutex *mutex) 505void i915_gem_shrinker_taints_mutex(struct drm_i915_private *i915,
506 struct mutex *mutex)
537{ 507{
508 bool unlock = false;
509
538 if (!IS_ENABLED(CONFIG_LOCKDEP)) 510 if (!IS_ENABLED(CONFIG_LOCKDEP))
539 return; 511 return;
540 512
513 if (!lockdep_is_held_type(&i915->drm.struct_mutex, -1)) {
514 mutex_acquire(&i915->drm.struct_mutex.dep_map,
515 I915_MM_NORMAL, 0, _RET_IP_);
516 unlock = true;
517 }
518
541 fs_reclaim_acquire(GFP_KERNEL); 519 fs_reclaim_acquire(GFP_KERNEL);
542 mutex_lock(mutex); 520
543 mutex_unlock(mutex); 521 /*
522 * As we invariably rely on the struct_mutex within the shrinker,
523 * but have a complicated recursion dance, taint all the mutexes used
524 * within the shrinker with the struct_mutex. For completeness, we
525 * taint with all subclass of struct_mutex, even though we should
526 * only need tainting by I915_MM_NORMAL to catch possible ABBA
527 * deadlocks from using struct_mutex inside @mutex.
528 */
529 mutex_acquire(&i915->drm.struct_mutex.dep_map,
530 I915_MM_SHRINKER, 0, _RET_IP_);
531
532 mutex_acquire(&mutex->dep_map, 0, 0, _RET_IP_);
533 mutex_release(&mutex->dep_map, 0, _RET_IP_);
534
535 mutex_release(&i915->drm.struct_mutex.dep_map, 0, _RET_IP_);
536
544 fs_reclaim_release(GFP_KERNEL); 537 fs_reclaim_release(GFP_KERNEL);
538
539 if (unlock)
540 mutex_release(&i915->drm.struct_mutex.dep_map, 0, _RET_IP_);
545} 541}
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index f29a7ff7c362..9df615eea2d8 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -26,7 +26,6 @@
26 * 26 *
27 */ 27 */
28 28
29#include <drm/drmP.h>
30#include <drm/i915_drm.h> 29#include <drm/i915_drm.h>
31#include "i915_drv.h" 30#include "i915_drv.h"
32 31
@@ -102,7 +101,7 @@ static int i915_adjust_stolen(struct drm_i915_private *dev_priv,
102 resource_size_t ggtt_start; 101 resource_size_t ggtt_start;
103 102
104 ggtt_start = I915_READ(PGTBL_CTL); 103 ggtt_start = I915_READ(PGTBL_CTL);
105 if (IS_GEN4(dev_priv)) 104 if (IS_GEN(dev_priv, 4))
106 ggtt_start = (ggtt_start & PGTBL_ADDRESS_LO_MASK) | 105 ggtt_start = (ggtt_start & PGTBL_ADDRESS_LO_MASK) |
107 (ggtt_start & PGTBL_ADDRESS_HI_MASK) << 28; 106 (ggtt_start & PGTBL_ADDRESS_HI_MASK) << 28;
108 else 107 else
@@ -156,7 +155,7 @@ static int i915_adjust_stolen(struct drm_i915_private *dev_priv,
156 * GEN3 firmware likes to smash pci bridges into the stolen 155 * GEN3 firmware likes to smash pci bridges into the stolen
157 * range. Apparently this works. 156 * range. Apparently this works.
158 */ 157 */
159 if (r == NULL && !IS_GEN3(dev_priv)) { 158 if (r == NULL && !IS_GEN(dev_priv, 3)) {
160 DRM_ERROR("conflict detected with stolen region: %pR\n", 159 DRM_ERROR("conflict detected with stolen region: %pR\n",
161 dsm); 160 dsm);
162 161
@@ -194,7 +193,8 @@ static void g4x_get_stolen_reserved(struct drm_i915_private *dev_priv,
194 * Whether ILK really reuses the ELK register for this is unclear. 193 * Whether ILK really reuses the ELK register for this is unclear.
195 * Let's see if we catch anyone with this supposedly enabled on ILK. 194 * Let's see if we catch anyone with this supposedly enabled on ILK.
196 */ 195 */
197 WARN(IS_GEN5(dev_priv), "ILK stolen reserved found? 0x%08x\n", reg_val); 196 WARN(IS_GEN(dev_priv, 5), "ILK stolen reserved found? 0x%08x\n",
197 reg_val);
198 198
199 if (!(reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK)) 199 if (!(reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK))
200 return; 200 return;
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index d9dc9df523b5..16cc9ddbce34 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -27,7 +27,6 @@
27 27
28#include <linux/string.h> 28#include <linux/string.h>
29#include <linux/bitops.h> 29#include <linux/bitops.h>
30#include <drm/drmP.h>
31#include <drm/i915_drm.h> 30#include <drm/i915_drm.h>
32#include "i915_drv.h" 31#include "i915_drv.h"
33 32
@@ -87,7 +86,7 @@ u32 i915_gem_fence_size(struct drm_i915_private *i915,
87 } 86 }
88 87
89 /* Previous chips need a power-of-two fence region when tiling */ 88 /* Previous chips need a power-of-two fence region when tiling */
90 if (IS_GEN3(i915)) 89 if (IS_GEN(i915, 3))
91 ggtt_size = 1024*1024; 90 ggtt_size = 1024*1024;
92 else 91 else
93 ggtt_size = 512*1024; 92 ggtt_size = 512*1024;
@@ -162,7 +161,7 @@ i915_tiling_ok(struct drm_i915_gem_object *obj,
162 return false; 161 return false;
163 } 162 }
164 163
165 if (IS_GEN2(i915) || 164 if (IS_GEN(i915, 2) ||
166 (tiling == I915_TILING_Y && HAS_128_BYTE_Y_TILING(i915))) 165 (tiling == I915_TILING_Y && HAS_128_BYTE_Y_TILING(i915)))
167 tile_width = 128; 166 tile_width = 128;
168 else 167 else
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index 9558582c105e..1fb6a7bb5054 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -22,7 +22,6 @@
22 * 22 *
23 */ 23 */
24 24
25#include <drm/drmP.h>
26#include <drm/i915_drm.h> 25#include <drm/i915_drm.h>
27#include "i915_drv.h" 26#include "i915_drv.h"
28#include "i915_trace.h" 27#include "i915_trace.h"
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 07465123c166..5eaf586c4d48 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -594,13 +594,14 @@ static void print_error_obj(struct drm_i915_error_state_buf *m,
594 594
595static void err_print_capabilities(struct drm_i915_error_state_buf *m, 595static void err_print_capabilities(struct drm_i915_error_state_buf *m,
596 const struct intel_device_info *info, 596 const struct intel_device_info *info,
597 const struct intel_runtime_info *runtime,
597 const struct intel_driver_caps *caps) 598 const struct intel_driver_caps *caps)
598{ 599{
599 struct drm_printer p = i915_error_printer(m); 600 struct drm_printer p = i915_error_printer(m);
600 601
601 intel_device_info_dump_flags(info, &p); 602 intel_device_info_dump_flags(info, &p);
602 intel_driver_caps_print(caps, &p); 603 intel_driver_caps_print(caps, &p);
603 intel_device_info_dump_topology(&info->sseu, &p); 604 intel_device_info_dump_topology(&runtime->sseu, &p);
604} 605}
605 606
606static void err_print_params(struct drm_i915_error_state_buf *m, 607static void err_print_params(struct drm_i915_error_state_buf *m,
@@ -664,7 +665,9 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
664 665
665 if (*error->error_msg) 666 if (*error->error_msg)
666 err_printf(m, "%s\n", error->error_msg); 667 err_printf(m, "%s\n", error->error_msg);
667 err_printf(m, "Kernel: %s\n", init_utsname()->release); 668 err_printf(m, "Kernel: %s %s\n",
669 init_utsname()->release,
670 init_utsname()->machine);
668 ts = ktime_to_timespec64(error->time); 671 ts = ktime_to_timespec64(error->time);
669 err_printf(m, "Time: %lld s %ld us\n", 672 err_printf(m, "Time: %lld s %ld us\n",
670 (s64)ts.tv_sec, ts.tv_nsec / NSEC_PER_USEC); 673 (s64)ts.tv_sec, ts.tv_nsec / NSEC_PER_USEC);
@@ -735,7 +738,7 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
735 err_printf(m, "DONE_REG: 0x%08x\n", error->done_reg); 738 err_printf(m, "DONE_REG: 0x%08x\n", error->done_reg);
736 } 739 }
737 740
738 if (IS_GEN7(m->i915)) 741 if (IS_GEN(m->i915, 7))
739 err_printf(m, "ERR_INT: 0x%08x\n", error->err_int); 742 err_printf(m, "ERR_INT: 0x%08x\n", error->err_int);
740 743
741 for (i = 0; i < ARRAY_SIZE(error->engine); i++) { 744 for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
@@ -844,7 +847,8 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
844 if (error->display) 847 if (error->display)
845 intel_display_print_error_state(m, error->display); 848 intel_display_print_error_state(m, error->display);
846 849
847 err_print_capabilities(m, &error->device_info, &error->driver_caps); 850 err_print_capabilities(m, &error->device_info, &error->runtime_info,
851 &error->driver_caps);
848 err_print_params(m, &error->params); 852 err_print_params(m, &error->params);
849 err_print_uc(m, &error->uc); 853 err_print_uc(m, &error->uc);
850} 854}
@@ -963,17 +967,10 @@ static void i915_error_object_free(struct drm_i915_error_object *obj)
963 kfree(obj); 967 kfree(obj);
964} 968}
965 969
966static __always_inline void free_param(const char *type, void *x)
967{
968 if (!__builtin_strcmp(type, "char *"))
969 kfree(*(void **)x);
970}
971 970
972static void cleanup_params(struct i915_gpu_state *error) 971static void cleanup_params(struct i915_gpu_state *error)
973{ 972{
974#define FREE(T, x, ...) free_param(#T, &error->params.x); 973 i915_params_free(&error->params);
975 I915_PARAMS_FOR_EACH(FREE);
976#undef FREE
977} 974}
978 975
979static void cleanup_uc_state(struct i915_gpu_state *error) 976static void cleanup_uc_state(struct i915_gpu_state *error)
@@ -1037,7 +1034,7 @@ i915_error_object_create(struct drm_i915_private *i915,
1037 dma_addr_t dma; 1034 dma_addr_t dma;
1038 int ret; 1035 int ret;
1039 1036
1040 if (!vma) 1037 if (!vma || !vma->pages)
1041 return NULL; 1038 return NULL;
1042 1039
1043 num_pages = min_t(u64, vma->size, vma->obj->base.size) >> PAGE_SHIFT; 1040 num_pages = min_t(u64, vma->size, vma->obj->base.size) >> PAGE_SHIFT;
@@ -1314,7 +1311,7 @@ static void error_record_engine_registers(struct i915_gpu_state *error,
1314 if (!HWS_NEEDS_PHYSICAL(dev_priv)) { 1311 if (!HWS_NEEDS_PHYSICAL(dev_priv)) {
1315 i915_reg_t mmio; 1312 i915_reg_t mmio;
1316 1313
1317 if (IS_GEN7(dev_priv)) { 1314 if (IS_GEN(dev_priv, 7)) {
1318 switch (engine->id) { 1315 switch (engine->id) {
1319 default: 1316 default:
1320 case RCS: 1317 case RCS:
@@ -1330,7 +1327,7 @@ static void error_record_engine_registers(struct i915_gpu_state *error,
1330 mmio = VEBOX_HWS_PGA_GEN7; 1327 mmio = VEBOX_HWS_PGA_GEN7;
1331 break; 1328 break;
1332 } 1329 }
1333 } else if (IS_GEN6(engine->i915)) { 1330 } else if (IS_GEN(engine->i915, 6)) {
1334 mmio = RING_HWS_PGA_GEN6(engine->mmio_base); 1331 mmio = RING_HWS_PGA_GEN6(engine->mmio_base);
1335 } else { 1332 } else {
1336 /* XXX: gen8 returns to sanity */ 1333 /* XXX: gen8 returns to sanity */
@@ -1352,10 +1349,10 @@ static void error_record_engine_registers(struct i915_gpu_state *error,
1352 1349
1353 ee->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(engine)); 1350 ee->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(engine));
1354 1351
1355 if (IS_GEN6(dev_priv)) 1352 if (IS_GEN(dev_priv, 6))
1356 ee->vm_info.pp_dir_base = 1353 ee->vm_info.pp_dir_base =
1357 I915_READ(RING_PP_DIR_BASE_READ(engine)); 1354 I915_READ(RING_PP_DIR_BASE_READ(engine));
1358 else if (IS_GEN7(dev_priv)) 1355 else if (IS_GEN(dev_priv, 7))
1359 ee->vm_info.pp_dir_base = 1356 ee->vm_info.pp_dir_base =
1360 I915_READ(RING_PP_DIR_BASE(engine)); 1357 I915_READ(RING_PP_DIR_BASE(engine));
1361 else if (INTEL_GEN(dev_priv) >= 8) 1358 else if (INTEL_GEN(dev_priv) >= 8)
@@ -1725,7 +1722,7 @@ static void capture_reg_state(struct i915_gpu_state *error)
1725 error->forcewake = I915_READ_FW(FORCEWAKE_VLV); 1722 error->forcewake = I915_READ_FW(FORCEWAKE_VLV);
1726 } 1723 }
1727 1724
1728 if (IS_GEN7(dev_priv)) 1725 if (IS_GEN(dev_priv, 7))
1729 error->err_int = I915_READ(GEN7_ERR_INT); 1726 error->err_int = I915_READ(GEN7_ERR_INT);
1730 1727
1731 if (INTEL_GEN(dev_priv) >= 8) { 1728 if (INTEL_GEN(dev_priv) >= 8) {
@@ -1733,7 +1730,7 @@ static void capture_reg_state(struct i915_gpu_state *error)
1733 error->fault_data1 = I915_READ(GEN8_FAULT_TLB_DATA1); 1730 error->fault_data1 = I915_READ(GEN8_FAULT_TLB_DATA1);
1734 } 1731 }
1735 1732
1736 if (IS_GEN6(dev_priv)) { 1733 if (IS_GEN(dev_priv, 6)) {
1737 error->forcewake = I915_READ_FW(FORCEWAKE); 1734 error->forcewake = I915_READ_FW(FORCEWAKE);
1738 error->gab_ctl = I915_READ(GAB_CTL); 1735 error->gab_ctl = I915_READ(GAB_CTL);
1739 error->gfx_mode = I915_READ(GFX_MODE); 1736 error->gfx_mode = I915_READ(GFX_MODE);
@@ -1753,7 +1750,7 @@ static void capture_reg_state(struct i915_gpu_state *error)
1753 error->ccid = I915_READ(CCID); 1750 error->ccid = I915_READ(CCID);
1754 1751
1755 /* 3: Feature specific registers */ 1752 /* 3: Feature specific registers */
1756 if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) { 1753 if (IS_GEN_RANGE(dev_priv, 6, 7)) {
1757 error->gam_ecochk = I915_READ(GAM_ECOCHK); 1754 error->gam_ecochk = I915_READ(GAM_ECOCHK);
1758 error->gac_eco = I915_READ(GAC_ECO_BITS); 1755 error->gac_eco = I915_READ(GAC_ECO_BITS);
1759 } 1756 }
@@ -1777,7 +1774,7 @@ static void capture_reg_state(struct i915_gpu_state *error)
1777 error->ier = I915_READ(DEIER); 1774 error->ier = I915_READ(DEIER);
1778 error->gtier[0] = I915_READ(GTIER); 1775 error->gtier[0] = I915_READ(GTIER);
1779 error->ngtier = 1; 1776 error->ngtier = 1;
1780 } else if (IS_GEN2(dev_priv)) { 1777 } else if (IS_GEN(dev_priv, 2)) {
1781 error->ier = I915_READ16(IER); 1778 error->ier = I915_READ16(IER);
1782 } else if (!IS_VALLEYVIEW(dev_priv)) { 1779 } else if (!IS_VALLEYVIEW(dev_priv)) {
1783 error->ier = I915_READ(IER); 1780 error->ier = I915_READ(IER);
@@ -1831,21 +1828,15 @@ static void capture_gen_state(struct i915_gpu_state *error)
1831 memcpy(&error->device_info, 1828 memcpy(&error->device_info,
1832 INTEL_INFO(i915), 1829 INTEL_INFO(i915),
1833 sizeof(error->device_info)); 1830 sizeof(error->device_info));
1831 memcpy(&error->runtime_info,
1832 RUNTIME_INFO(i915),
1833 sizeof(error->runtime_info));
1834 error->driver_caps = i915->caps; 1834 error->driver_caps = i915->caps;
1835} 1835}
1836 1836
1837static __always_inline void dup_param(const char *type, void *x)
1838{
1839 if (!__builtin_strcmp(type, "char *"))
1840 *(void **)x = kstrdup(*(void **)x, GFP_ATOMIC);
1841}
1842
1843static void capture_params(struct i915_gpu_state *error) 1837static void capture_params(struct i915_gpu_state *error)
1844{ 1838{
1845 error->params = i915_modparams; 1839 i915_params_copy(&error->params, &i915_modparams);
1846#define DUP(T, x, ...) dup_param(#T, &error->params.x);
1847 I915_PARAMS_FOR_EACH(DUP);
1848#undef DUP
1849} 1840}
1850 1841
1851static unsigned long capture_find_epoch(const struct i915_gpu_state *error) 1842static unsigned long capture_find_epoch(const struct i915_gpu_state *error)
@@ -1907,9 +1898,16 @@ i915_capture_gpu_state(struct drm_i915_private *i915)
1907{ 1898{
1908 struct i915_gpu_state *error; 1899 struct i915_gpu_state *error;
1909 1900
1901 /* Check if GPU capture has been disabled */
1902 error = READ_ONCE(i915->gpu_error.first_error);
1903 if (IS_ERR(error))
1904 return error;
1905
1910 error = kzalloc(sizeof(*error), GFP_ATOMIC); 1906 error = kzalloc(sizeof(*error), GFP_ATOMIC);
1911 if (!error) 1907 if (!error) {
1912 return NULL; 1908 i915_disable_error_state(i915, -ENOMEM);
1909 return ERR_PTR(-ENOMEM);
1910 }
1913 1911
1914 kref_init(&error->ref); 1912 kref_init(&error->ref);
1915 error->i915 = i915; 1913 error->i915 = i915;
@@ -1945,11 +1943,8 @@ void i915_capture_error_state(struct drm_i915_private *i915,
1945 return; 1943 return;
1946 1944
1947 error = i915_capture_gpu_state(i915); 1945 error = i915_capture_gpu_state(i915);
1948 if (!error) { 1946 if (IS_ERR(error))
1949 DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
1950 i915_disable_error_state(i915, -ENOMEM);
1951 return; 1947 return;
1952 }
1953 1948
1954 i915_error_capture_msg(i915, error, engine_mask, error_msg); 1949 i915_error_capture_msg(i915, error, engine_mask, error_msg);
1955 DRM_INFO("%s\n", error->error_msg); 1950 DRM_INFO("%s\n", error->error_msg);
@@ -1987,7 +1982,7 @@ i915_first_error_state(struct drm_i915_private *i915)
1987 1982
1988 spin_lock_irq(&i915->gpu_error.lock); 1983 spin_lock_irq(&i915->gpu_error.lock);
1989 error = i915->gpu_error.first_error; 1984 error = i915->gpu_error.first_error;
1990 if (error) 1985 if (!IS_ERR_OR_NULL(error))
1991 i915_gpu_state_get(error); 1986 i915_gpu_state_get(error);
1992 spin_unlock_irq(&i915->gpu_error.lock); 1987 spin_unlock_irq(&i915->gpu_error.lock);
1993 1988
@@ -2000,10 +1995,11 @@ void i915_reset_error_state(struct drm_i915_private *i915)
2000 1995
2001 spin_lock_irq(&i915->gpu_error.lock); 1996 spin_lock_irq(&i915->gpu_error.lock);
2002 error = i915->gpu_error.first_error; 1997 error = i915->gpu_error.first_error;
2003 i915->gpu_error.first_error = NULL; 1998 if (error != ERR_PTR(-ENODEV)) /* if disabled, always disabled */
1999 i915->gpu_error.first_error = NULL;
2004 spin_unlock_irq(&i915->gpu_error.lock); 2000 spin_unlock_irq(&i915->gpu_error.lock);
2005 2001
2006 if (!IS_ERR(error)) 2002 if (!IS_ERR_OR_NULL(error))
2007 i915_gpu_state_put(error); 2003 i915_gpu_state_put(error);
2008} 2004}
2009 2005
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h
index ff2652bbb0b0..6d9f45468ac1 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.h
+++ b/drivers/gpu/drm/i915/i915_gpu_error.h
@@ -45,6 +45,7 @@ struct i915_gpu_state {
45 u32 reset_count; 45 u32 reset_count;
46 u32 suspend_count; 46 u32 suspend_count;
47 struct intel_device_info device_info; 47 struct intel_device_info device_info;
48 struct intel_runtime_info runtime_info;
48 struct intel_driver_caps driver_caps; 49 struct intel_driver_caps driver_caps;
49 struct i915_params params; 50 struct i915_params params;
50 51
diff --git a/drivers/gpu/drm/i915/i915_ioc32.c b/drivers/gpu/drm/i915/i915_ioc32.c
index e869daf9c8a9..73c3e8f519e8 100644
--- a/drivers/gpu/drm/i915/i915_ioc32.c
+++ b/drivers/gpu/drm/i915/i915_ioc32.c
@@ -28,7 +28,6 @@
28 */ 28 */
29#include <linux/compat.h> 29#include <linux/compat.h>
30 30
31#include <drm/drmP.h>
32#include <drm/i915_drm.h> 31#include <drm/i915_drm.h>
33#include "i915_drv.h" 32#include "i915_drv.h"
34 33
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index d447d7d508f4..288b0662f7b7 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -31,7 +31,6 @@
31#include <linux/sysrq.h> 31#include <linux/sysrq.h>
32#include <linux/slab.h> 32#include <linux/slab.h>
33#include <linux/circ_buf.h> 33#include <linux/circ_buf.h>
34#include <drm/drmP.h>
35#include <drm/i915_drm.h> 34#include <drm/i915_drm.h>
36#include "i915_drv.h" 35#include "i915_drv.h"
37#include "i915_trace.h" 36#include "i915_trace.h"
@@ -950,7 +949,7 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
950 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 949 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
951 vtotal /= 2; 950 vtotal /= 2;
952 951
953 if (IS_GEN2(dev_priv)) 952 if (IS_GEN(dev_priv, 2))
954 position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2; 953 position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
955 else 954 else
956 position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; 955 position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
@@ -1030,7 +1029,7 @@ static bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
1030 if (stime) 1029 if (stime)
1031 *stime = ktime_get(); 1030 *stime = ktime_get();
1032 1031
1033 if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) { 1032 if (IS_GEN(dev_priv, 2) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
1034 /* No obvious pixelcount register. Only query vertical 1033 /* No obvious pixelcount register. Only query vertical
1035 * scanout position from Display scan line register. 1034 * scanout position from Display scan line register.
1036 */ 1035 */
@@ -1090,7 +1089,7 @@ static bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
1090 else 1089 else
1091 position += vtotal - vbl_end; 1090 position += vtotal - vbl_end;
1092 1091
1093 if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) { 1092 if (IS_GEN(dev_priv, 2) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
1094 *vpos = position; 1093 *vpos = position;
1095 *hpos = 0; 1094 *hpos = 0;
1096 } else { 1095 } else {
@@ -1189,13 +1188,6 @@ static void notify_ring(struct intel_engine_cs *engine)
1189 rq = i915_request_get(waiter); 1188 rq = i915_request_get(waiter);
1190 1189
1191 tsk = wait->tsk; 1190 tsk = wait->tsk;
1192 } else {
1193 if (engine->irq_seqno_barrier &&
1194 i915_seqno_passed(seqno, wait->seqno - 1)) {
1195 set_bit(ENGINE_IRQ_BREADCRUMB,
1196 &engine->irq_posted);
1197 tsk = wait->tsk;
1198 }
1199 } 1191 }
1200 1192
1201 engine->breadcrumbs.irq_count++; 1193 engine->breadcrumbs.irq_count++;
@@ -2547,7 +2539,7 @@ static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
2547 I915_WRITE(SDEIIR, pch_iir); 2539 I915_WRITE(SDEIIR, pch_iir);
2548 } 2540 }
2549 2541
2550 if (IS_GEN5(dev_priv) && de_iir & DE_PCU_EVENT) 2542 if (IS_GEN(dev_priv, 5) && de_iir & DE_PCU_EVENT)
2551 ironlake_rps_change_irq_handler(dev_priv); 2543 ironlake_rps_change_irq_handler(dev_priv);
2552} 2544}
2553 2545
@@ -3243,7 +3235,7 @@ void i915_clear_error_registers(struct drm_i915_private *dev_priv)
3243{ 3235{
3244 u32 eir; 3236 u32 eir;
3245 3237
3246 if (!IS_GEN2(dev_priv)) 3238 if (!IS_GEN(dev_priv, 2))
3247 I915_WRITE(PGTBL_ER, I915_READ(PGTBL_ER)); 3239 I915_WRITE(PGTBL_ER, I915_READ(PGTBL_ER));
3248 3240
3249 if (INTEL_GEN(dev_priv) < 4) 3241 if (INTEL_GEN(dev_priv) < 4)
@@ -3586,11 +3578,8 @@ static void ironlake_irq_reset(struct drm_device *dev)
3586{ 3578{
3587 struct drm_i915_private *dev_priv = to_i915(dev); 3579 struct drm_i915_private *dev_priv = to_i915(dev);
3588 3580
3589 if (IS_GEN5(dev_priv))
3590 I915_WRITE(HWSTAM, 0xffffffff);
3591
3592 GEN3_IRQ_RESET(DE); 3581 GEN3_IRQ_RESET(DE);
3593 if (IS_GEN7(dev_priv)) 3582 if (IS_GEN(dev_priv, 7))
3594 I915_WRITE(GEN7_ERR_INT, 0xffffffff); 3583 I915_WRITE(GEN7_ERR_INT, 0xffffffff);
3595 3584
3596 if (IS_HASWELL(dev_priv)) { 3585 if (IS_HASWELL(dev_priv)) {
@@ -4045,7 +4034,7 @@ static void gen5_gt_irq_postinstall(struct drm_device *dev)
4045 } 4034 }
4046 4035
4047 gt_irqs |= GT_RENDER_USER_INTERRUPT; 4036 gt_irqs |= GT_RENDER_USER_INTERRUPT;
4048 if (IS_GEN5(dev_priv)) { 4037 if (IS_GEN(dev_priv, 5)) {
4049 gt_irqs |= ILK_BSD_USER_INTERRUPT; 4038 gt_irqs |= ILK_BSD_USER_INTERRUPT;
4050 } else { 4039 } else {
4051 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT; 4040 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
@@ -4183,9 +4172,6 @@ static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
4183 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT 4172 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT
4184 }; 4173 };
4185 4174
4186 if (HAS_L3_DPF(dev_priv))
4187 gt_interrupts[0] |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
4188
4189 dev_priv->pm_ier = 0x0; 4175 dev_priv->pm_ier = 0x0;
4190 dev_priv->pm_imr = ~dev_priv->pm_ier; 4176 dev_priv->pm_imr = ~dev_priv->pm_ier;
4191 GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]); 4177 GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
@@ -4368,8 +4354,6 @@ static void i8xx_irq_reset(struct drm_device *dev)
4368 4354
4369 i9xx_pipestat_irq_reset(dev_priv); 4355 i9xx_pipestat_irq_reset(dev_priv);
4370 4356
4371 I915_WRITE16(HWSTAM, 0xffff);
4372
4373 GEN2_IRQ_RESET(); 4357 GEN2_IRQ_RESET();
4374} 4358}
4375 4359
@@ -4537,8 +4521,6 @@ static void i915_irq_reset(struct drm_device *dev)
4537 4521
4538 i9xx_pipestat_irq_reset(dev_priv); 4522 i9xx_pipestat_irq_reset(dev_priv);
4539 4523
4540 I915_WRITE(HWSTAM, 0xffffffff);
4541
4542 GEN3_IRQ_RESET(); 4524 GEN3_IRQ_RESET();
4543} 4525}
4544 4526
@@ -4648,8 +4630,6 @@ static void i965_irq_reset(struct drm_device *dev)
4648 4630
4649 i9xx_pipestat_irq_reset(dev_priv); 4631 i9xx_pipestat_irq_reset(dev_priv);
4650 4632
4651 I915_WRITE(HWSTAM, 0xffffffff);
4652
4653 GEN3_IRQ_RESET(); 4633 GEN3_IRQ_RESET();
4654} 4634}
4655 4635
@@ -4836,7 +4816,7 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
4836 if (INTEL_GEN(dev_priv) >= 8) 4816 if (INTEL_GEN(dev_priv) >= 8)
4837 rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC; 4817 rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
4838 4818
4839 if (IS_GEN2(dev_priv)) { 4819 if (IS_GEN(dev_priv, 2)) {
4840 /* Gen2 doesn't have a hardware frame counter */ 4820 /* Gen2 doesn't have a hardware frame counter */
4841 dev->max_vblank_count = 0; 4821 dev->max_vblank_count = 0;
4842 } else if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) { 4822 } else if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
@@ -4852,7 +4832,7 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
4852 * Gen2 doesn't have a hardware frame counter and so depends on 4832 * Gen2 doesn't have a hardware frame counter and so depends on
4853 * vblank interrupts to produce sane vblank seuquence numbers. 4833 * vblank interrupts to produce sane vblank seuquence numbers.
4854 */ 4834 */
4855 if (!IS_GEN2(dev_priv)) 4835 if (!IS_GEN(dev_priv, 2))
4856 dev->vblank_disable_immediate = true; 4836 dev->vblank_disable_immediate = true;
4857 4837
4858 /* Most platforms treat the display irq block as an always-on 4838 /* Most platforms treat the display irq block as an always-on
@@ -4924,14 +4904,14 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
4924 dev->driver->disable_vblank = ironlake_disable_vblank; 4904 dev->driver->disable_vblank = ironlake_disable_vblank;
4925 dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup; 4905 dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
4926 } else { 4906 } else {
4927 if (IS_GEN2(dev_priv)) { 4907 if (IS_GEN(dev_priv, 2)) {
4928 dev->driver->irq_preinstall = i8xx_irq_reset; 4908 dev->driver->irq_preinstall = i8xx_irq_reset;
4929 dev->driver->irq_postinstall = i8xx_irq_postinstall; 4909 dev->driver->irq_postinstall = i8xx_irq_postinstall;
4930 dev->driver->irq_handler = i8xx_irq_handler; 4910 dev->driver->irq_handler = i8xx_irq_handler;
4931 dev->driver->irq_uninstall = i8xx_irq_reset; 4911 dev->driver->irq_uninstall = i8xx_irq_reset;
4932 dev->driver->enable_vblank = i8xx_enable_vblank; 4912 dev->driver->enable_vblank = i8xx_enable_vblank;
4933 dev->driver->disable_vblank = i8xx_disable_vblank; 4913 dev->driver->disable_vblank = i8xx_disable_vblank;
4934 } else if (IS_GEN3(dev_priv)) { 4914 } else if (IS_GEN(dev_priv, 3)) {
4935 dev->driver->irq_preinstall = i915_irq_reset; 4915 dev->driver->irq_preinstall = i915_irq_reset;
4936 dev->driver->irq_postinstall = i915_irq_postinstall; 4916 dev->driver->irq_postinstall = i915_irq_postinstall;
4937 dev->driver->irq_uninstall = i915_irq_reset; 4917 dev->driver->irq_uninstall = i915_irq_reset;
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index 2e0356561839..9f0539bdaa39 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -77,7 +77,7 @@ i915_param_named(error_capture, bool, 0600,
77 "triaging and debugging hangs."); 77 "triaging and debugging hangs.");
78#endif 78#endif
79 79
80i915_param_named_unsafe(enable_hangcheck, bool, 0644, 80i915_param_named_unsafe(enable_hangcheck, bool, 0600,
81 "Periodically check GPU activity for detecting hangs. " 81 "Periodically check GPU activity for detecting hangs. "
82 "WARNING: Disabling this can cause system wide hangs. " 82 "WARNING: Disabling this can cause system wide hangs. "
83 "(default: true)"); 83 "(default: true)");
@@ -203,3 +203,33 @@ void i915_params_dump(const struct i915_params *params, struct drm_printer *p)
203 I915_PARAMS_FOR_EACH(PRINT); 203 I915_PARAMS_FOR_EACH(PRINT);
204#undef PRINT 204#undef PRINT
205} 205}
206
207static __always_inline void dup_param(const char *type, void *x)
208{
209 if (!__builtin_strcmp(type, "char *"))
210 *(void **)x = kstrdup(*(void **)x, GFP_ATOMIC);
211}
212
213void i915_params_copy(struct i915_params *dest, const struct i915_params *src)
214{
215 *dest = *src;
216#define DUP(T, x, ...) dup_param(#T, &dest->x);
217 I915_PARAMS_FOR_EACH(DUP);
218#undef DUP
219}
220
221static __always_inline void free_param(const char *type, void *x)
222{
223 if (!__builtin_strcmp(type, "char *")) {
224 kfree(*(void **)x);
225 *(void **)x = NULL;
226 }
227}
228
229/* free the allocated members, *not* the passed in params itself */
230void i915_params_free(struct i915_params *params)
231{
232#define FREE(T, x, ...) free_param(#T, &params->x);
233 I915_PARAMS_FOR_EACH(FREE);
234#undef FREE
235}
diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h
index 7e56c516c815..6efcf330bdab 100644
--- a/drivers/gpu/drm/i915/i915_params.h
+++ b/drivers/gpu/drm/i915/i915_params.h
@@ -33,6 +33,15 @@ struct drm_printer;
33#define ENABLE_GUC_SUBMISSION BIT(0) 33#define ENABLE_GUC_SUBMISSION BIT(0)
34#define ENABLE_GUC_LOAD_HUC BIT(1) 34#define ENABLE_GUC_LOAD_HUC BIT(1)
35 35
36/*
37 * Invoke param, a function-like macro, for each i915 param, with arguments:
38 *
39 * param(type, name, value)
40 *
41 * type: parameter type, one of {bool, int, unsigned int, char *}
42 * name: name of the parameter
43 * value: initial/default value of the parameter
44 */
36#define I915_PARAMS_FOR_EACH(param) \ 45#define I915_PARAMS_FOR_EACH(param) \
37 param(char *, vbt_firmware, NULL) \ 46 param(char *, vbt_firmware, NULL) \
38 param(int, modeset, -1) \ 47 param(int, modeset, -1) \
@@ -78,6 +87,8 @@ struct i915_params {
78extern struct i915_params i915_modparams __read_mostly; 87extern struct i915_params i915_modparams __read_mostly;
79 88
80void i915_params_dump(const struct i915_params *params, struct drm_printer *p); 89void i915_params_dump(const struct i915_params *params, struct drm_printer *p);
90void i915_params_copy(struct i915_params *dest, const struct i915_params *src);
91void i915_params_free(struct i915_params *params);
81 92
82#endif 93#endif
83 94
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index 6350db5503cd..dd4aff2b256e 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -82,6 +82,7 @@
82 .display.has_overlay = 1, \ 82 .display.has_overlay = 1, \
83 .display.overlay_needs_physical = 1, \ 83 .display.overlay_needs_physical = 1, \
84 .display.has_gmch_display = 1, \ 84 .display.has_gmch_display = 1, \
85 .gpu_reset_clobbers_display = true, \
85 .hws_needs_physical = 1, \ 86 .hws_needs_physical = 1, \
86 .unfenced_needs_alignment = 1, \ 87 .unfenced_needs_alignment = 1, \
87 .ring_mask = RENDER_RING, \ 88 .ring_mask = RENDER_RING, \
@@ -122,6 +123,7 @@ static const struct intel_device_info intel_i865g_info = {
122 GEN(3), \ 123 GEN(3), \
123 .num_pipes = 2, \ 124 .num_pipes = 2, \
124 .display.has_gmch_display = 1, \ 125 .display.has_gmch_display = 1, \
126 .gpu_reset_clobbers_display = true, \
125 .ring_mask = RENDER_RING, \ 127 .ring_mask = RENDER_RING, \
126 .has_snoop = true, \ 128 .has_snoop = true, \
127 .has_coherent_ggtt = true, \ 129 .has_coherent_ggtt = true, \
@@ -198,6 +200,7 @@ static const struct intel_device_info intel_pineview_info = {
198 .num_pipes = 2, \ 200 .num_pipes = 2, \
199 .display.has_hotplug = 1, \ 201 .display.has_hotplug = 1, \
200 .display.has_gmch_display = 1, \ 202 .display.has_gmch_display = 1, \
203 .gpu_reset_clobbers_display = true, \
201 .ring_mask = RENDER_RING, \ 204 .ring_mask = RENDER_RING, \
202 .has_snoop = true, \ 205 .has_snoop = true, \
203 .has_coherent_ggtt = true, \ 206 .has_coherent_ggtt = true, \
@@ -228,6 +231,7 @@ static const struct intel_device_info intel_g45_info = {
228 GEN4_FEATURES, 231 GEN4_FEATURES,
229 PLATFORM(INTEL_G45), 232 PLATFORM(INTEL_G45),
230 .ring_mask = RENDER_RING | BSD_RING, 233 .ring_mask = RENDER_RING | BSD_RING,
234 .gpu_reset_clobbers_display = false,
231}; 235};
232 236
233static const struct intel_device_info intel_gm45_info = { 237static const struct intel_device_info intel_gm45_info = {
@@ -237,6 +241,7 @@ static const struct intel_device_info intel_gm45_info = {
237 .display.has_fbc = 1, 241 .display.has_fbc = 1,
238 .display.supports_tv = 1, 242 .display.supports_tv = 1,
239 .ring_mask = RENDER_RING | BSD_RING, 243 .ring_mask = RENDER_RING | BSD_RING,
244 .gpu_reset_clobbers_display = false,
240}; 245};
241 246
242#define GEN5_FEATURES \ 247#define GEN5_FEATURES \
@@ -532,7 +537,6 @@ static const struct intel_device_info intel_skylake_gt4_info = {
532 .display.has_fbc = 1, \ 537 .display.has_fbc = 1, \
533 .display.has_psr = 1, \ 538 .display.has_psr = 1, \
534 .has_runtime_pm = 1, \ 539 .has_runtime_pm = 1, \
535 .has_pooled_eu = 0, \
536 .display.has_csr = 1, \ 540 .display.has_csr = 1, \
537 .has_rc6 = 1, \ 541 .has_rc6 = 1, \
538 .display.has_dp_mst = 1, \ 542 .display.has_dp_mst = 1, \
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 2b2eb57ca71f..5b1ae5ed97b3 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -1796,7 +1796,7 @@ static int gen8_enable_metric_set(struct i915_perf_stream *stream)
1796 * be read back from automatically triggered reports, as part of the 1796 * be read back from automatically triggered reports, as part of the
1797 * RPT_ID field. 1797 * RPT_ID field.
1798 */ 1798 */
1799 if (IS_GEN(dev_priv, 9, 11)) { 1799 if (IS_GEN_RANGE(dev_priv, 9, 11)) {
1800 I915_WRITE(GEN8_OA_DEBUG, 1800 I915_WRITE(GEN8_OA_DEBUG,
1801 _MASKED_BIT_ENABLE(GEN9_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS | 1801 _MASKED_BIT_ENABLE(GEN9_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS |
1802 GEN9_OA_DEBUG_INCLUDE_CLK_RATIO)); 1802 GEN9_OA_DEBUG_INCLUDE_CLK_RATIO));
@@ -2646,7 +2646,7 @@ err:
2646static u64 oa_exponent_to_ns(struct drm_i915_private *dev_priv, int exponent) 2646static u64 oa_exponent_to_ns(struct drm_i915_private *dev_priv, int exponent)
2647{ 2647{
2648 return div64_u64(1000000000ULL * (2ULL << exponent), 2648 return div64_u64(1000000000ULL * (2ULL << exponent),
2649 1000ULL * INTEL_INFO(dev_priv)->cs_timestamp_frequency_khz); 2649 1000ULL * RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_khz);
2650} 2650}
2651 2651
2652/** 2652/**
@@ -3415,7 +3415,7 @@ void i915_perf_init(struct drm_i915_private *dev_priv)
3415 dev_priv->perf.oa.ops.read = gen8_oa_read; 3415 dev_priv->perf.oa.ops.read = gen8_oa_read;
3416 dev_priv->perf.oa.ops.oa_hw_tail_read = gen8_oa_hw_tail_read; 3416 dev_priv->perf.oa.ops.oa_hw_tail_read = gen8_oa_hw_tail_read;
3417 3417
3418 if (IS_GEN8(dev_priv) || IS_GEN9(dev_priv)) { 3418 if (IS_GEN_RANGE(dev_priv, 8, 9)) {
3419 dev_priv->perf.oa.ops.is_valid_b_counter_reg = 3419 dev_priv->perf.oa.ops.is_valid_b_counter_reg =
3420 gen7_is_valid_b_counter_addr; 3420 gen7_is_valid_b_counter_addr;
3421 dev_priv->perf.oa.ops.is_valid_mux_reg = 3421 dev_priv->perf.oa.ops.is_valid_mux_reg =
@@ -3431,7 +3431,7 @@ void i915_perf_init(struct drm_i915_private *dev_priv)
3431 dev_priv->perf.oa.ops.enable_metric_set = gen8_enable_metric_set; 3431 dev_priv->perf.oa.ops.enable_metric_set = gen8_enable_metric_set;
3432 dev_priv->perf.oa.ops.disable_metric_set = gen8_disable_metric_set; 3432 dev_priv->perf.oa.ops.disable_metric_set = gen8_disable_metric_set;
3433 3433
3434 if (IS_GEN8(dev_priv)) { 3434 if (IS_GEN(dev_priv, 8)) {
3435 dev_priv->perf.oa.ctx_oactxctrl_offset = 0x120; 3435 dev_priv->perf.oa.ctx_oactxctrl_offset = 0x120;
3436 dev_priv->perf.oa.ctx_flexeu0_offset = 0x2ce; 3436 dev_priv->perf.oa.ctx_flexeu0_offset = 0x2ce;
3437 3437
@@ -3442,7 +3442,7 @@ void i915_perf_init(struct drm_i915_private *dev_priv)
3442 3442
3443 dev_priv->perf.oa.gen8_valid_ctx_bit = (1<<16); 3443 dev_priv->perf.oa.gen8_valid_ctx_bit = (1<<16);
3444 } 3444 }
3445 } else if (IS_GEN(dev_priv, 10, 11)) { 3445 } else if (IS_GEN_RANGE(dev_priv, 10, 11)) {
3446 dev_priv->perf.oa.ops.is_valid_b_counter_reg = 3446 dev_priv->perf.oa.ops.is_valid_b_counter_reg =
3447 gen7_is_valid_b_counter_addr; 3447 gen7_is_valid_b_counter_addr;
3448 dev_priv->perf.oa.ops.is_valid_mux_reg = 3448 dev_priv->perf.oa.ops.is_valid_mux_reg =
@@ -3471,7 +3471,7 @@ void i915_perf_init(struct drm_i915_private *dev_priv)
3471 spin_lock_init(&dev_priv->perf.oa.oa_buffer.ptr_lock); 3471 spin_lock_init(&dev_priv->perf.oa.oa_buffer.ptr_lock);
3472 3472
3473 oa_sample_rate_hard_limit = 1000 * 3473 oa_sample_rate_hard_limit = 1000 *
3474 (INTEL_INFO(dev_priv)->cs_timestamp_frequency_khz / 2); 3474 (RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_khz / 2);
3475 dev_priv->perf.sysctl_header = register_sysctl_table(dev_root); 3475 dev_priv->perf.sysctl_header = register_sysctl_table(dev_root);
3476 3476
3477 mutex_init(&dev_priv->perf.metrics_lock); 3477 mutex_init(&dev_priv->perf.metrics_lock);
diff --git a/drivers/gpu/drm/i915/i915_query.c b/drivers/gpu/drm/i915/i915_query.c
index fe56465cdfd6..cbcb957b7141 100644
--- a/drivers/gpu/drm/i915/i915_query.c
+++ b/drivers/gpu/drm/i915/i915_query.c
@@ -13,7 +13,7 @@
13static int query_topology_info(struct drm_i915_private *dev_priv, 13static int query_topology_info(struct drm_i915_private *dev_priv,
14 struct drm_i915_query_item *query_item) 14 struct drm_i915_query_item *query_item)
15{ 15{
16 const struct sseu_dev_info *sseu = &INTEL_INFO(dev_priv)->sseu; 16 const struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
17 struct drm_i915_query_topology_info topo; 17 struct drm_i915_query_topology_info topo;
18 u32 slice_length, subslice_length, eu_length, total_length; 18 u32 slice_length, subslice_length, eu_length, total_length;
19 19
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 0a7d60509ca7..44958d994bfa 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -139,6 +139,12 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
139 return !i915_mmio_reg_equal(reg, INVALID_MMIO_REG); 139 return !i915_mmio_reg_equal(reg, INVALID_MMIO_REG);
140} 140}
141 141
142#define VLV_DISPLAY_BASE 0x180000
143#define VLV_MIPI_BASE VLV_DISPLAY_BASE
144#define BXT_MIPI_BASE 0x60000
145
146#define DISPLAY_MMIO_BASE(dev_priv) (INTEL_INFO(dev_priv)->display_mmio_offset)
147
142/* 148/*
143 * Given the first two numbers __a and __b of arbitrarily many evenly spaced 149 * Given the first two numbers __a and __b of arbitrarily many evenly spaced
144 * numbers, pick the 0-based __index'th value. 150 * numbers, pick the 0-based __index'th value.
@@ -179,15 +185,15 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
179 * Device info offset array based helpers for groups of registers with unevenly 185 * Device info offset array based helpers for groups of registers with unevenly
180 * spaced base offsets. 186 * spaced base offsets.
181 */ 187 */
182#define _MMIO_PIPE2(pipe, reg) _MMIO(dev_priv->info.pipe_offsets[pipe] - \ 188#define _MMIO_PIPE2(pipe, reg) _MMIO(INTEL_INFO(dev_priv)->pipe_offsets[pipe] - \
183 dev_priv->info.pipe_offsets[PIPE_A] + (reg) + \ 189 INTEL_INFO(dev_priv)->pipe_offsets[PIPE_A] + (reg) + \
184 dev_priv->info.display_mmio_offset) 190 DISPLAY_MMIO_BASE(dev_priv))
185#define _MMIO_TRANS2(pipe, reg) _MMIO(dev_priv->info.trans_offsets[(pipe)] - \ 191#define _MMIO_TRANS2(pipe, reg) _MMIO(INTEL_INFO(dev_priv)->trans_offsets[(pipe)] - \
186 dev_priv->info.trans_offsets[TRANSCODER_A] + (reg) + \ 192 INTEL_INFO(dev_priv)->trans_offsets[TRANSCODER_A] + (reg) + \
187 dev_priv->info.display_mmio_offset) 193 DISPLAY_MMIO_BASE(dev_priv))
188#define _CURSOR2(pipe, reg) _MMIO(dev_priv->info.cursor_offsets[(pipe)] - \ 194#define _CURSOR2(pipe, reg) _MMIO(INTEL_INFO(dev_priv)->cursor_offsets[(pipe)] - \
189 dev_priv->info.cursor_offsets[PIPE_A] + (reg) + \ 195 INTEL_INFO(dev_priv)->cursor_offsets[PIPE_A] + (reg) + \
190 dev_priv->info.display_mmio_offset) 196 DISPLAY_MMIO_BASE(dev_priv))
191 197
192#define __MASKED_FIELD(mask, value) ((mask) << 16 | (value)) 198#define __MASKED_FIELD(mask, value) ((mask) << 16 | (value))
193#define _MASKED_FIELD(mask, value) ({ \ 199#define _MASKED_FIELD(mask, value) ({ \
@@ -347,6 +353,24 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
347#define GEN11_GRDOM_MEDIA4 (1 << 8) 353#define GEN11_GRDOM_MEDIA4 (1 << 8)
348#define GEN11_GRDOM_VECS (1 << 13) 354#define GEN11_GRDOM_VECS (1 << 13)
349#define GEN11_GRDOM_VECS2 (1 << 14) 355#define GEN11_GRDOM_VECS2 (1 << 14)
356#define GEN11_GRDOM_SFC0 (1 << 17)
357#define GEN11_GRDOM_SFC1 (1 << 18)
358
359#define GEN11_VCS_SFC_RESET_BIT(instance) (GEN11_GRDOM_SFC0 << ((instance) >> 1))
360#define GEN11_VECS_SFC_RESET_BIT(instance) (GEN11_GRDOM_SFC0 << (instance))
361
362#define GEN11_VCS_SFC_FORCED_LOCK(engine) _MMIO((engine)->mmio_base + 0x88C)
363#define GEN11_VCS_SFC_FORCED_LOCK_BIT (1 << 0)
364#define GEN11_VCS_SFC_LOCK_STATUS(engine) _MMIO((engine)->mmio_base + 0x890)
365#define GEN11_VCS_SFC_USAGE_BIT (1 << 0)
366#define GEN11_VCS_SFC_LOCK_ACK_BIT (1 << 1)
367
368#define GEN11_VECS_SFC_FORCED_LOCK(engine) _MMIO((engine)->mmio_base + 0x201C)
369#define GEN11_VECS_SFC_FORCED_LOCK_BIT (1 << 0)
370#define GEN11_VECS_SFC_LOCK_ACK(engine) _MMIO((engine)->mmio_base + 0x2018)
371#define GEN11_VECS_SFC_LOCK_ACK_BIT (1 << 0)
372#define GEN11_VECS_SFC_USAGE(engine) _MMIO((engine)->mmio_base + 0x2014)
373#define GEN11_VECS_SFC_USAGE_BIT (1 << 0)
350 374
351#define RING_PP_DIR_BASE(engine) _MMIO((engine)->mmio_base + 0x228) 375#define RING_PP_DIR_BASE(engine) _MMIO((engine)->mmio_base + 0x228)
352#define RING_PP_DIR_BASE_READ(engine) _MMIO((engine)->mmio_base + 0x518) 376#define RING_PP_DIR_BASE_READ(engine) _MMIO((engine)->mmio_base + 0x518)
@@ -1866,6 +1890,10 @@ enum i915_power_well_id {
1866 1890
1867#define CNL_PORT_TX_DW7_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP((port), 7)) 1891#define CNL_PORT_TX_DW7_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP((port), 7))
1868#define CNL_PORT_TX_DW7_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0((port), 7)) 1892#define CNL_PORT_TX_DW7_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0((port), 7))
1893#define ICL_PORT_TX_DW7_AUX(port) _MMIO(_ICL_PORT_TX_DW_AUX(7, port))
1894#define ICL_PORT_TX_DW7_GRP(port) _MMIO(_ICL_PORT_TX_DW_GRP(7, port))
1895#define ICL_PORT_TX_DW7_LN0(port) _MMIO(_ICL_PORT_TX_DW_LN(7, 0, port))
1896#define ICL_PORT_TX_DW7_LN(port, ln) _MMIO(_ICL_PORT_TX_DW_LN(7, ln, port))
1869#define N_SCALAR(x) ((x) << 24) 1897#define N_SCALAR(x) ((x) << 24)
1870#define N_SCALAR_MASK (0x7F << 24) 1898#define N_SCALAR_MASK (0x7F << 24)
1871 1899
@@ -2592,10 +2620,6 @@ enum i915_power_well_id {
2592 2620
2593#define GEN11_GFX_DISABLE_LEGACY_MODE (1 << 3) 2621#define GEN11_GFX_DISABLE_LEGACY_MODE (1 << 3)
2594 2622
2595#define VLV_DISPLAY_BASE 0x180000
2596#define VLV_MIPI_BASE VLV_DISPLAY_BASE
2597#define BXT_MIPI_BASE 0x60000
2598
2599#define VLV_GU_CTL0 _MMIO(VLV_DISPLAY_BASE + 0x2030) 2623#define VLV_GU_CTL0 _MMIO(VLV_DISPLAY_BASE + 0x2030)
2600#define VLV_GU_CTL1 _MMIO(VLV_DISPLAY_BASE + 0x2034) 2624#define VLV_GU_CTL1 _MMIO(VLV_DISPLAY_BASE + 0x2034)
2601#define SCPD0 _MMIO(0x209c) /* 915+ only */ 2625#define SCPD0 _MMIO(0x209c) /* 915+ only */
@@ -3152,9 +3176,9 @@ enum i915_power_well_id {
3152/* 3176/*
3153 * Clock control & power management 3177 * Clock control & power management
3154 */ 3178 */
3155#define _DPLL_A (dev_priv->info.display_mmio_offset + 0x6014) 3179#define _DPLL_A (DISPLAY_MMIO_BASE(dev_priv) + 0x6014)
3156#define _DPLL_B (dev_priv->info.display_mmio_offset + 0x6018) 3180#define _DPLL_B (DISPLAY_MMIO_BASE(dev_priv) + 0x6018)
3157#define _CHV_DPLL_C (dev_priv->info.display_mmio_offset + 0x6030) 3181#define _CHV_DPLL_C (DISPLAY_MMIO_BASE(dev_priv) + 0x6030)
3158#define DPLL(pipe) _MMIO_PIPE3((pipe), _DPLL_A, _DPLL_B, _CHV_DPLL_C) 3182#define DPLL(pipe) _MMIO_PIPE3((pipe), _DPLL_A, _DPLL_B, _CHV_DPLL_C)
3159 3183
3160#define VGA0 _MMIO(0x6000) 3184#define VGA0 _MMIO(0x6000)
@@ -3251,9 +3275,9 @@ enum i915_power_well_id {
3251#define SDVO_MULTIPLIER_SHIFT_HIRES 4 3275#define SDVO_MULTIPLIER_SHIFT_HIRES 4
3252#define SDVO_MULTIPLIER_SHIFT_VGA 0 3276#define SDVO_MULTIPLIER_SHIFT_VGA 0
3253 3277
3254#define _DPLL_A_MD (dev_priv->info.display_mmio_offset + 0x601c) 3278#define _DPLL_A_MD (DISPLAY_MMIO_BASE(dev_priv) + 0x601c)
3255#define _DPLL_B_MD (dev_priv->info.display_mmio_offset + 0x6020) 3279#define _DPLL_B_MD (DISPLAY_MMIO_BASE(dev_priv) + 0x6020)
3256#define _CHV_DPLL_C_MD (dev_priv->info.display_mmio_offset + 0x603c) 3280#define _CHV_DPLL_C_MD (DISPLAY_MMIO_BASE(dev_priv) + 0x603c)
3257#define DPLL_MD(pipe) _MMIO_PIPE3((pipe), _DPLL_A_MD, _DPLL_B_MD, _CHV_DPLL_C_MD) 3281#define DPLL_MD(pipe) _MMIO_PIPE3((pipe), _DPLL_A_MD, _DPLL_B_MD, _CHV_DPLL_C_MD)
3258 3282
3259/* 3283/*
@@ -3325,7 +3349,7 @@ enum i915_power_well_id {
3325#define DSTATE_PLL_D3_OFF (1 << 3) 3349#define DSTATE_PLL_D3_OFF (1 << 3)
3326#define DSTATE_GFX_CLOCK_GATING (1 << 1) 3350#define DSTATE_GFX_CLOCK_GATING (1 << 1)
3327#define DSTATE_DOT_CLOCK_GATING (1 << 0) 3351#define DSTATE_DOT_CLOCK_GATING (1 << 0)
3328#define DSPCLK_GATE_D _MMIO(dev_priv->info.display_mmio_offset + 0x6200) 3352#define DSPCLK_GATE_D _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x6200)
3329# define DPUNIT_B_CLOCK_GATE_DISABLE (1 << 30) /* 965 */ 3353# define DPUNIT_B_CLOCK_GATE_DISABLE (1 << 30) /* 965 */
3330# define VSUNIT_CLOCK_GATE_DISABLE (1 << 29) /* 965 */ 3354# define VSUNIT_CLOCK_GATE_DISABLE (1 << 29) /* 965 */
3331# define VRHUNIT_CLOCK_GATE_DISABLE (1 << 28) /* 965 */ 3355# define VRHUNIT_CLOCK_GATE_DISABLE (1 << 28) /* 965 */
@@ -3465,7 +3489,7 @@ enum i915_power_well_id {
3465#define _PALETTE_A 0xa000 3489#define _PALETTE_A 0xa000
3466#define _PALETTE_B 0xa800 3490#define _PALETTE_B 0xa800
3467#define _CHV_PALETTE_C 0xc000 3491#define _CHV_PALETTE_C 0xc000
3468#define PALETTE(pipe, i) _MMIO(dev_priv->info.display_mmio_offset + \ 3492#define PALETTE(pipe, i) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + \
3469 _PICK((pipe), _PALETTE_A, \ 3493 _PICK((pipe), _PALETTE_A, \
3470 _PALETTE_B, _CHV_PALETTE_C) + \ 3494 _PALETTE_B, _CHV_PALETTE_C) + \
3471 (i) * 4) 3495 (i) * 4)
@@ -4298,7 +4322,7 @@ enum {
4298 4322
4299 4323
4300/* Hotplug control (945+ only) */ 4324/* Hotplug control (945+ only) */
4301#define PORT_HOTPLUG_EN _MMIO(dev_priv->info.display_mmio_offset + 0x61110) 4325#define PORT_HOTPLUG_EN _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61110)
4302#define PORTB_HOTPLUG_INT_EN (1 << 29) 4326#define PORTB_HOTPLUG_INT_EN (1 << 29)
4303#define PORTC_HOTPLUG_INT_EN (1 << 28) 4327#define PORTC_HOTPLUG_INT_EN (1 << 28)
4304#define PORTD_HOTPLUG_INT_EN (1 << 27) 4328#define PORTD_HOTPLUG_INT_EN (1 << 27)
@@ -4328,7 +4352,7 @@ enum {
4328#define CRT_HOTPLUG_DETECT_VOLTAGE_325MV (0 << 2) 4352#define CRT_HOTPLUG_DETECT_VOLTAGE_325MV (0 << 2)
4329#define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2) 4353#define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2)
4330 4354
4331#define PORT_HOTPLUG_STAT _MMIO(dev_priv->info.display_mmio_offset + 0x61114) 4355#define PORT_HOTPLUG_STAT _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61114)
4332/* 4356/*
4333 * HDMI/DP bits are g4x+ 4357 * HDMI/DP bits are g4x+
4334 * 4358 *
@@ -4410,7 +4434,7 @@ enum {
4410 4434
4411#define PORT_DFT_I9XX _MMIO(0x61150) 4435#define PORT_DFT_I9XX _MMIO(0x61150)
4412#define DC_BALANCE_RESET (1 << 25) 4436#define DC_BALANCE_RESET (1 << 25)
4413#define PORT_DFT2_G4X _MMIO(dev_priv->info.display_mmio_offset + 0x61154) 4437#define PORT_DFT2_G4X _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61154)
4414#define DC_BALANCE_RESET_VLV (1 << 31) 4438#define DC_BALANCE_RESET_VLV (1 << 31)
4415#define PIPE_SCRAMBLE_RESET_MASK ((1 << 14) | (0x3 << 0)) 4439#define PIPE_SCRAMBLE_RESET_MASK ((1 << 14) | (0x3 << 0))
4416#define PIPE_C_SCRAMBLE_RESET (1 << 14) /* chv */ 4440#define PIPE_C_SCRAMBLE_RESET (1 << 14) /* chv */
@@ -4695,7 +4719,7 @@ enum {
4695#define PANEL_POWER_CYCLE_DELAY_SHIFT 0 4719#define PANEL_POWER_CYCLE_DELAY_SHIFT 0
4696 4720
4697/* Panel fitting */ 4721/* Panel fitting */
4698#define PFIT_CONTROL _MMIO(dev_priv->info.display_mmio_offset + 0x61230) 4722#define PFIT_CONTROL _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61230)
4699#define PFIT_ENABLE (1 << 31) 4723#define PFIT_ENABLE (1 << 31)
4700#define PFIT_PIPE_MASK (3 << 29) 4724#define PFIT_PIPE_MASK (3 << 29)
4701#define PFIT_PIPE_SHIFT 29 4725#define PFIT_PIPE_SHIFT 29
@@ -4713,7 +4737,7 @@ enum {
4713#define PFIT_SCALING_PROGRAMMED (1 << 26) 4737#define PFIT_SCALING_PROGRAMMED (1 << 26)
4714#define PFIT_SCALING_PILLAR (2 << 26) 4738#define PFIT_SCALING_PILLAR (2 << 26)
4715#define PFIT_SCALING_LETTER (3 << 26) 4739#define PFIT_SCALING_LETTER (3 << 26)
4716#define PFIT_PGM_RATIOS _MMIO(dev_priv->info.display_mmio_offset + 0x61234) 4740#define PFIT_PGM_RATIOS _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61234)
4717/* Pre-965 */ 4741/* Pre-965 */
4718#define PFIT_VERT_SCALE_SHIFT 20 4742#define PFIT_VERT_SCALE_SHIFT 20
4719#define PFIT_VERT_SCALE_MASK 0xfff00000 4743#define PFIT_VERT_SCALE_MASK 0xfff00000
@@ -4725,25 +4749,25 @@ enum {
4725#define PFIT_HORIZ_SCALE_SHIFT_965 0 4749#define PFIT_HORIZ_SCALE_SHIFT_965 0
4726#define PFIT_HORIZ_SCALE_MASK_965 0x00001fff 4750#define PFIT_HORIZ_SCALE_MASK_965 0x00001fff
4727 4751
4728#define PFIT_AUTO_RATIOS _MMIO(dev_priv->info.display_mmio_offset + 0x61238) 4752#define PFIT_AUTO_RATIOS _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61238)
4729 4753
4730#define _VLV_BLC_PWM_CTL2_A (dev_priv->info.display_mmio_offset + 0x61250) 4754#define _VLV_BLC_PWM_CTL2_A (DISPLAY_MMIO_BASE(dev_priv) + 0x61250)
4731#define _VLV_BLC_PWM_CTL2_B (dev_priv->info.display_mmio_offset + 0x61350) 4755#define _VLV_BLC_PWM_CTL2_B (DISPLAY_MMIO_BASE(dev_priv) + 0x61350)
4732#define VLV_BLC_PWM_CTL2(pipe) _MMIO_PIPE(pipe, _VLV_BLC_PWM_CTL2_A, \ 4756#define VLV_BLC_PWM_CTL2(pipe) _MMIO_PIPE(pipe, _VLV_BLC_PWM_CTL2_A, \
4733 _VLV_BLC_PWM_CTL2_B) 4757 _VLV_BLC_PWM_CTL2_B)
4734 4758
4735#define _VLV_BLC_PWM_CTL_A (dev_priv->info.display_mmio_offset + 0x61254) 4759#define _VLV_BLC_PWM_CTL_A (DISPLAY_MMIO_BASE(dev_priv) + 0x61254)
4736#define _VLV_BLC_PWM_CTL_B (dev_priv->info.display_mmio_offset + 0x61354) 4760#define _VLV_BLC_PWM_CTL_B (DISPLAY_MMIO_BASE(dev_priv) + 0x61354)
4737#define VLV_BLC_PWM_CTL(pipe) _MMIO_PIPE(pipe, _VLV_BLC_PWM_CTL_A, \ 4761#define VLV_BLC_PWM_CTL(pipe) _MMIO_PIPE(pipe, _VLV_BLC_PWM_CTL_A, \
4738 _VLV_BLC_PWM_CTL_B) 4762 _VLV_BLC_PWM_CTL_B)
4739 4763
4740#define _VLV_BLC_HIST_CTL_A (dev_priv->info.display_mmio_offset + 0x61260) 4764#define _VLV_BLC_HIST_CTL_A (DISPLAY_MMIO_BASE(dev_priv) + 0x61260)
4741#define _VLV_BLC_HIST_CTL_B (dev_priv->info.display_mmio_offset + 0x61360) 4765#define _VLV_BLC_HIST_CTL_B (DISPLAY_MMIO_BASE(dev_priv) + 0x61360)
4742#define VLV_BLC_HIST_CTL(pipe) _MMIO_PIPE(pipe, _VLV_BLC_HIST_CTL_A, \ 4766#define VLV_BLC_HIST_CTL(pipe) _MMIO_PIPE(pipe, _VLV_BLC_HIST_CTL_A, \
4743 _VLV_BLC_HIST_CTL_B) 4767 _VLV_BLC_HIST_CTL_B)
4744 4768
4745/* Backlight control */ 4769/* Backlight control */
4746#define BLC_PWM_CTL2 _MMIO(dev_priv->info.display_mmio_offset + 0x61250) /* 965+ only */ 4770#define BLC_PWM_CTL2 _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61250) /* 965+ only */
4747#define BLM_PWM_ENABLE (1 << 31) 4771#define BLM_PWM_ENABLE (1 << 31)
4748#define BLM_COMBINATION_MODE (1 << 30) /* gen4 only */ 4772#define BLM_COMBINATION_MODE (1 << 30) /* gen4 only */
4749#define BLM_PIPE_SELECT (1 << 29) 4773#define BLM_PIPE_SELECT (1 << 29)
@@ -4766,7 +4790,7 @@ enum {
4766#define BLM_PHASE_IN_COUNT_MASK (0xff << 8) 4790#define BLM_PHASE_IN_COUNT_MASK (0xff << 8)
4767#define BLM_PHASE_IN_INCR_SHIFT (0) 4791#define BLM_PHASE_IN_INCR_SHIFT (0)
4768#define BLM_PHASE_IN_INCR_MASK (0xff << 0) 4792#define BLM_PHASE_IN_INCR_MASK (0xff << 0)
4769#define BLC_PWM_CTL _MMIO(dev_priv->info.display_mmio_offset + 0x61254) 4793#define BLC_PWM_CTL _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61254)
4770/* 4794/*
4771 * This is the most significant 15 bits of the number of backlight cycles in a 4795 * This is the most significant 15 bits of the number of backlight cycles in a
4772 * complete cycle of the modulated backlight control. 4796 * complete cycle of the modulated backlight control.
@@ -4788,7 +4812,7 @@ enum {
4788#define BACKLIGHT_DUTY_CYCLE_MASK_PNV (0xfffe) 4812#define BACKLIGHT_DUTY_CYCLE_MASK_PNV (0xfffe)
4789#define BLM_POLARITY_PNV (1 << 0) /* pnv only */ 4813#define BLM_POLARITY_PNV (1 << 0) /* pnv only */
4790 4814
4791#define BLC_HIST_CTL _MMIO(dev_priv->info.display_mmio_offset + 0x61260) 4815#define BLC_HIST_CTL _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61260)
4792#define BLM_HISTOGRAM_ENABLE (1 << 31) 4816#define BLM_HISTOGRAM_ENABLE (1 << 31)
4793 4817
4794/* New registers for PCH-split platforms. Safe where new bits show up, the 4818/* New registers for PCH-split platforms. Safe where new bits show up, the
@@ -5412,47 +5436,47 @@ enum {
5412 * is 20 bytes in each direction, hence the 5 fixed 5436 * is 20 bytes in each direction, hence the 5 fixed
5413 * data registers 5437 * data registers
5414 */ 5438 */
5415#define _DPA_AUX_CH_CTL (dev_priv->info.display_mmio_offset + 0x64010) 5439#define _DPA_AUX_CH_CTL (DISPLAY_MMIO_BASE(dev_priv) + 0x64010)
5416#define _DPA_AUX_CH_DATA1 (dev_priv->info.display_mmio_offset + 0x64014) 5440#define _DPA_AUX_CH_DATA1 (DISPLAY_MMIO_BASE(dev_priv) + 0x64014)
5417#define _DPA_AUX_CH_DATA2 (dev_priv->info.display_mmio_offset + 0x64018) 5441#define _DPA_AUX_CH_DATA2 (DISPLAY_MMIO_BASE(dev_priv) + 0x64018)
5418#define _DPA_AUX_CH_DATA3 (dev_priv->info.display_mmio_offset + 0x6401c) 5442#define _DPA_AUX_CH_DATA3 (DISPLAY_MMIO_BASE(dev_priv) + 0x6401c)
5419#define _DPA_AUX_CH_DATA4 (dev_priv->info.display_mmio_offset + 0x64020) 5443#define _DPA_AUX_CH_DATA4 (DISPLAY_MMIO_BASE(dev_priv) + 0x64020)
5420#define _DPA_AUX_CH_DATA5 (dev_priv->info.display_mmio_offset + 0x64024) 5444#define _DPA_AUX_CH_DATA5 (DISPLAY_MMIO_BASE(dev_priv) + 0x64024)
5421 5445
5422#define _DPB_AUX_CH_CTL (dev_priv->info.display_mmio_offset + 0x64110) 5446#define _DPB_AUX_CH_CTL (DISPLAY_MMIO_BASE(dev_priv) + 0x64110)
5423#define _DPB_AUX_CH_DATA1 (dev_priv->info.display_mmio_offset + 0x64114) 5447#define _DPB_AUX_CH_DATA1 (DISPLAY_MMIO_BASE(dev_priv) + 0x64114)
5424#define _DPB_AUX_CH_DATA2 (dev_priv->info.display_mmio_offset + 0x64118) 5448#define _DPB_AUX_CH_DATA2 (DISPLAY_MMIO_BASE(dev_priv) + 0x64118)
5425#define _DPB_AUX_CH_DATA3 (dev_priv->info.display_mmio_offset + 0x6411c) 5449#define _DPB_AUX_CH_DATA3 (DISPLAY_MMIO_BASE(dev_priv) + 0x6411c)
5426#define _DPB_AUX_CH_DATA4 (dev_priv->info.display_mmio_offset + 0x64120) 5450#define _DPB_AUX_CH_DATA4 (DISPLAY_MMIO_BASE(dev_priv) + 0x64120)
5427#define _DPB_AUX_CH_DATA5 (dev_priv->info.display_mmio_offset + 0x64124) 5451#define _DPB_AUX_CH_DATA5 (DISPLAY_MMIO_BASE(dev_priv) + 0x64124)
5428 5452
5429#define _DPC_AUX_CH_CTL (dev_priv->info.display_mmio_offset + 0x64210) 5453#define _DPC_AUX_CH_CTL (DISPLAY_MMIO_BASE(dev_priv) + 0x64210)
5430#define _DPC_AUX_CH_DATA1 (dev_priv->info.display_mmio_offset + 0x64214) 5454#define _DPC_AUX_CH_DATA1 (DISPLAY_MMIO_BASE(dev_priv) + 0x64214)
5431#define _DPC_AUX_CH_DATA2 (dev_priv->info.display_mmio_offset + 0x64218) 5455#define _DPC_AUX_CH_DATA2 (DISPLAY_MMIO_BASE(dev_priv) + 0x64218)
5432#define _DPC_AUX_CH_DATA3 (dev_priv->info.display_mmio_offset + 0x6421c) 5456#define _DPC_AUX_CH_DATA3 (DISPLAY_MMIO_BASE(dev_priv) + 0x6421c)
5433#define _DPC_AUX_CH_DATA4 (dev_priv->info.display_mmio_offset + 0x64220) 5457#define _DPC_AUX_CH_DATA4 (DISPLAY_MMIO_BASE(dev_priv) + 0x64220)
5434#define _DPC_AUX_CH_DATA5 (dev_priv->info.display_mmio_offset + 0x64224) 5458#define _DPC_AUX_CH_DATA5 (DISPLAY_MMIO_BASE(dev_priv) + 0x64224)
5435 5459
5436#define _DPD_AUX_CH_CTL (dev_priv->info.display_mmio_offset + 0x64310) 5460#define _DPD_AUX_CH_CTL (DISPLAY_MMIO_BASE(dev_priv) + 0x64310)
5437#define _DPD_AUX_CH_DATA1 (dev_priv->info.display_mmio_offset + 0x64314) 5461#define _DPD_AUX_CH_DATA1 (DISPLAY_MMIO_BASE(dev_priv) + 0x64314)
5438#define _DPD_AUX_CH_DATA2 (dev_priv->info.display_mmio_offset + 0x64318) 5462#define _DPD_AUX_CH_DATA2 (DISPLAY_MMIO_BASE(dev_priv) + 0x64318)
5439#define _DPD_AUX_CH_DATA3 (dev_priv->info.display_mmio_offset + 0x6431c) 5463#define _DPD_AUX_CH_DATA3 (DISPLAY_MMIO_BASE(dev_priv) + 0x6431c)
5440#define _DPD_AUX_CH_DATA4 (dev_priv->info.display_mmio_offset + 0x64320) 5464#define _DPD_AUX_CH_DATA4 (DISPLAY_MMIO_BASE(dev_priv) + 0x64320)
5441#define _DPD_AUX_CH_DATA5 (dev_priv->info.display_mmio_offset + 0x64324) 5465#define _DPD_AUX_CH_DATA5 (DISPLAY_MMIO_BASE(dev_priv) + 0x64324)
5442 5466
5443#define _DPE_AUX_CH_CTL (dev_priv->info.display_mmio_offset + 0x64410) 5467#define _DPE_AUX_CH_CTL (DISPLAY_MMIO_BASE(dev_priv) + 0x64410)
5444#define _DPE_AUX_CH_DATA1 (dev_priv->info.display_mmio_offset + 0x64414) 5468#define _DPE_AUX_CH_DATA1 (DISPLAY_MMIO_BASE(dev_priv) + 0x64414)
5445#define _DPE_AUX_CH_DATA2 (dev_priv->info.display_mmio_offset + 0x64418) 5469#define _DPE_AUX_CH_DATA2 (DISPLAY_MMIO_BASE(dev_priv) + 0x64418)
5446#define _DPE_AUX_CH_DATA3 (dev_priv->info.display_mmio_offset + 0x6441c) 5470#define _DPE_AUX_CH_DATA3 (DISPLAY_MMIO_BASE(dev_priv) + 0x6441c)
5447#define _DPE_AUX_CH_DATA4 (dev_priv->info.display_mmio_offset + 0x64420) 5471#define _DPE_AUX_CH_DATA4 (DISPLAY_MMIO_BASE(dev_priv) + 0x64420)
5448#define _DPE_AUX_CH_DATA5 (dev_priv->info.display_mmio_offset + 0x64424) 5472#define _DPE_AUX_CH_DATA5 (DISPLAY_MMIO_BASE(dev_priv) + 0x64424)
5449 5473
5450#define _DPF_AUX_CH_CTL (dev_priv->info.display_mmio_offset + 0x64510) 5474#define _DPF_AUX_CH_CTL (DISPLAY_MMIO_BASE(dev_priv) + 0x64510)
5451#define _DPF_AUX_CH_DATA1 (dev_priv->info.display_mmio_offset + 0x64514) 5475#define _DPF_AUX_CH_DATA1 (DISPLAY_MMIO_BASE(dev_priv) + 0x64514)
5452#define _DPF_AUX_CH_DATA2 (dev_priv->info.display_mmio_offset + 0x64518) 5476#define _DPF_AUX_CH_DATA2 (DISPLAY_MMIO_BASE(dev_priv) + 0x64518)
5453#define _DPF_AUX_CH_DATA3 (dev_priv->info.display_mmio_offset + 0x6451c) 5477#define _DPF_AUX_CH_DATA3 (DISPLAY_MMIO_BASE(dev_priv) + 0x6451c)
5454#define _DPF_AUX_CH_DATA4 (dev_priv->info.display_mmio_offset + 0x64520) 5478#define _DPF_AUX_CH_DATA4 (DISPLAY_MMIO_BASE(dev_priv) + 0x64520)
5455#define _DPF_AUX_CH_DATA5 (dev_priv->info.display_mmio_offset + 0x64524) 5479#define _DPF_AUX_CH_DATA5 (DISPLAY_MMIO_BASE(dev_priv) + 0x64524)
5456 5480
5457#define DP_AUX_CH_CTL(aux_ch) _MMIO_PORT(aux_ch, _DPA_AUX_CH_CTL, _DPB_AUX_CH_CTL) 5481#define DP_AUX_CH_CTL(aux_ch) _MMIO_PORT(aux_ch, _DPA_AUX_CH_CTL, _DPB_AUX_CH_CTL)
5458#define DP_AUX_CH_DATA(aux_ch, i) _MMIO(_PORT(aux_ch, _DPA_AUX_CH_DATA1, _DPB_AUX_CH_DATA1) + (i) * 4) /* 5 registers */ 5482#define DP_AUX_CH_DATA(aux_ch, i) _MMIO(_PORT(aux_ch, _DPA_AUX_CH_DATA1, _DPB_AUX_CH_DATA1) + (i) * 4) /* 5 registers */
@@ -5728,7 +5752,7 @@ enum {
5728#define DPINVGTT_STATUS_MASK 0xff 5752#define DPINVGTT_STATUS_MASK 0xff
5729#define DPINVGTT_STATUS_MASK_CHV 0xfff 5753#define DPINVGTT_STATUS_MASK_CHV 0xfff
5730 5754
5731#define DSPARB _MMIO(dev_priv->info.display_mmio_offset + 0x70030) 5755#define DSPARB _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x70030)
5732#define DSPARB_CSTART_MASK (0x7f << 7) 5756#define DSPARB_CSTART_MASK (0x7f << 7)
5733#define DSPARB_CSTART_SHIFT 7 5757#define DSPARB_CSTART_SHIFT 7
5734#define DSPARB_BSTART_MASK (0x7f) 5758#define DSPARB_BSTART_MASK (0x7f)
@@ -5763,7 +5787,7 @@ enum {
5763#define DSPARB_SPRITEF_MASK_VLV (0xff << 8) 5787#define DSPARB_SPRITEF_MASK_VLV (0xff << 8)
5764 5788
5765/* pnv/gen4/g4x/vlv/chv */ 5789/* pnv/gen4/g4x/vlv/chv */
5766#define DSPFW1 _MMIO(dev_priv->info.display_mmio_offset + 0x70034) 5790#define DSPFW1 _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x70034)
5767#define DSPFW_SR_SHIFT 23 5791#define DSPFW_SR_SHIFT 23
5768#define DSPFW_SR_MASK (0x1ff << 23) 5792#define DSPFW_SR_MASK (0x1ff << 23)
5769#define DSPFW_CURSORB_SHIFT 16 5793#define DSPFW_CURSORB_SHIFT 16
@@ -5774,7 +5798,7 @@ enum {
5774#define DSPFW_PLANEA_SHIFT 0 5798#define DSPFW_PLANEA_SHIFT 0
5775#define DSPFW_PLANEA_MASK (0x7f << 0) 5799#define DSPFW_PLANEA_MASK (0x7f << 0)
5776#define DSPFW_PLANEA_MASK_VLV (0xff << 0) /* vlv/chv */ 5800#define DSPFW_PLANEA_MASK_VLV (0xff << 0) /* vlv/chv */
5777#define DSPFW2 _MMIO(dev_priv->info.display_mmio_offset + 0x70038) 5801#define DSPFW2 _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x70038)
5778#define DSPFW_FBC_SR_EN (1 << 31) /* g4x */ 5802#define DSPFW_FBC_SR_EN (1 << 31) /* g4x */
5779#define DSPFW_FBC_SR_SHIFT 28 5803#define DSPFW_FBC_SR_SHIFT 28
5780#define DSPFW_FBC_SR_MASK (0x7 << 28) /* g4x */ 5804#define DSPFW_FBC_SR_MASK (0x7 << 28) /* g4x */
@@ -5790,7 +5814,7 @@ enum {
5790#define DSPFW_SPRITEA_SHIFT 0 5814#define DSPFW_SPRITEA_SHIFT 0
5791#define DSPFW_SPRITEA_MASK (0x7f << 0) /* g4x */ 5815#define DSPFW_SPRITEA_MASK (0x7f << 0) /* g4x */
5792#define DSPFW_SPRITEA_MASK_VLV (0xff << 0) /* vlv/chv */ 5816#define DSPFW_SPRITEA_MASK_VLV (0xff << 0) /* vlv/chv */
5793#define DSPFW3 _MMIO(dev_priv->info.display_mmio_offset + 0x7003c) 5817#define DSPFW3 _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x7003c)
5794#define DSPFW_HPLL_SR_EN (1 << 31) 5818#define DSPFW_HPLL_SR_EN (1 << 31)
5795#define PINEVIEW_SELF_REFRESH_EN (1 << 30) 5819#define PINEVIEW_SELF_REFRESH_EN (1 << 30)
5796#define DSPFW_CURSOR_SR_SHIFT 24 5820#define DSPFW_CURSOR_SR_SHIFT 24
@@ -6206,35 +6230,35 @@ enum {
6206 * [10:1f] all 6230 * [10:1f] all
6207 * [30:32] all 6231 * [30:32] all
6208 */ 6232 */
6209#define SWF0(i) _MMIO(dev_priv->info.display_mmio_offset + 0x70410 + (i) * 4) 6233#define SWF0(i) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x70410 + (i) * 4)
6210#define SWF1(i) _MMIO(dev_priv->info.display_mmio_offset + 0x71410 + (i) * 4) 6234#define SWF1(i) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x71410 + (i) * 4)
6211#define SWF3(i) _MMIO(dev_priv->info.display_mmio_offset + 0x72414 + (i) * 4) 6235#define SWF3(i) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x72414 + (i) * 4)
6212#define SWF_ILK(i) _MMIO(0x4F000 + (i) * 4) 6236#define SWF_ILK(i) _MMIO(0x4F000 + (i) * 4)
6213 6237
6214/* Pipe B */ 6238/* Pipe B */
6215#define _PIPEBDSL (dev_priv->info.display_mmio_offset + 0x71000) 6239#define _PIPEBDSL (DISPLAY_MMIO_BASE(dev_priv) + 0x71000)
6216#define _PIPEBCONF (dev_priv->info.display_mmio_offset + 0x71008) 6240#define _PIPEBCONF (DISPLAY_MMIO_BASE(dev_priv) + 0x71008)
6217#define _PIPEBSTAT (dev_priv->info.display_mmio_offset + 0x71024) 6241#define _PIPEBSTAT (DISPLAY_MMIO_BASE(dev_priv) + 0x71024)
6218#define _PIPEBFRAMEHIGH 0x71040 6242#define _PIPEBFRAMEHIGH 0x71040
6219#define _PIPEBFRAMEPIXEL 0x71044 6243#define _PIPEBFRAMEPIXEL 0x71044
6220#define _PIPEB_FRMCOUNT_G4X (dev_priv->info.display_mmio_offset + 0x71040) 6244#define _PIPEB_FRMCOUNT_G4X (DISPLAY_MMIO_BASE(dev_priv) + 0x71040)
6221#define _PIPEB_FLIPCOUNT_G4X (dev_priv->info.display_mmio_offset + 0x71044) 6245#define _PIPEB_FLIPCOUNT_G4X (DISPLAY_MMIO_BASE(dev_priv) + 0x71044)
6222 6246
6223 6247
6224/* Display B control */ 6248/* Display B control */
6225#define _DSPBCNTR (dev_priv->info.display_mmio_offset + 0x71180) 6249#define _DSPBCNTR (DISPLAY_MMIO_BASE(dev_priv) + 0x71180)
6226#define DISPPLANE_ALPHA_TRANS_ENABLE (1 << 15) 6250#define DISPPLANE_ALPHA_TRANS_ENABLE (1 << 15)
6227#define DISPPLANE_ALPHA_TRANS_DISABLE 0 6251#define DISPPLANE_ALPHA_TRANS_DISABLE 0
6228#define DISPPLANE_SPRITE_ABOVE_DISPLAY 0 6252#define DISPPLANE_SPRITE_ABOVE_DISPLAY 0
6229#define DISPPLANE_SPRITE_ABOVE_OVERLAY (1) 6253#define DISPPLANE_SPRITE_ABOVE_OVERLAY (1)
6230#define _DSPBADDR (dev_priv->info.display_mmio_offset + 0x71184) 6254#define _DSPBADDR (DISPLAY_MMIO_BASE(dev_priv) + 0x71184)
6231#define _DSPBSTRIDE (dev_priv->info.display_mmio_offset + 0x71188) 6255#define _DSPBSTRIDE (DISPLAY_MMIO_BASE(dev_priv) + 0x71188)
6232#define _DSPBPOS (dev_priv->info.display_mmio_offset + 0x7118C) 6256#define _DSPBPOS (DISPLAY_MMIO_BASE(dev_priv) + 0x7118C)
6233#define _DSPBSIZE (dev_priv->info.display_mmio_offset + 0x71190) 6257#define _DSPBSIZE (DISPLAY_MMIO_BASE(dev_priv) + 0x71190)
6234#define _DSPBSURF (dev_priv->info.display_mmio_offset + 0x7119C) 6258#define _DSPBSURF (DISPLAY_MMIO_BASE(dev_priv) + 0x7119C)
6235#define _DSPBTILEOFF (dev_priv->info.display_mmio_offset + 0x711A4) 6259#define _DSPBTILEOFF (DISPLAY_MMIO_BASE(dev_priv) + 0x711A4)
6236#define _DSPBOFFSET (dev_priv->info.display_mmio_offset + 0x711A4) 6260#define _DSPBOFFSET (DISPLAY_MMIO_BASE(dev_priv) + 0x711A4)
6237#define _DSPBSURFLIVE (dev_priv->info.display_mmio_offset + 0x711AC) 6261#define _DSPBSURFLIVE (DISPLAY_MMIO_BASE(dev_priv) + 0x711AC)
6238 6262
6239/* ICL DSI 0 and 1 */ 6263/* ICL DSI 0 and 1 */
6240#define _PIPEDSI0CONF 0x7b008 6264#define _PIPEDSI0CONF 0x7b008
@@ -8786,7 +8810,7 @@ enum {
8786#define GEN9_ENABLE_GPGPU_PREEMPTION (1 << 2) 8810#define GEN9_ENABLE_GPGPU_PREEMPTION (1 << 2)
8787 8811
8788/* Audio */ 8812/* Audio */
8789#define G4X_AUD_VID_DID _MMIO(dev_priv->info.display_mmio_offset + 0x62020) 8813#define G4X_AUD_VID_DID _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x62020)
8790#define INTEL_AUDIO_DEVCL 0x808629FB 8814#define INTEL_AUDIO_DEVCL 0x808629FB
8791#define INTEL_AUDIO_DEVBLC 0x80862801 8815#define INTEL_AUDIO_DEVBLC 0x80862801
8792#define INTEL_AUDIO_DEVCTG 0x80862802 8816#define INTEL_AUDIO_DEVCTG 0x80862802
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index cefefc11d922..d1355154886a 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -111,99 +111,10 @@ i915_request_remove_from_client(struct i915_request *request)
111 spin_unlock(&file_priv->mm.lock); 111 spin_unlock(&file_priv->mm.lock);
112} 112}
113 113
114static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno) 114static void reserve_gt(struct drm_i915_private *i915)
115{ 115{
116 struct intel_engine_cs *engine;
117 struct i915_timeline *timeline;
118 enum intel_engine_id id;
119 int ret;
120
121 /* Carefully retire all requests without writing to the rings */
122 ret = i915_gem_wait_for_idle(i915,
123 I915_WAIT_INTERRUPTIBLE |
124 I915_WAIT_LOCKED,
125 MAX_SCHEDULE_TIMEOUT);
126 if (ret)
127 return ret;
128
129 GEM_BUG_ON(i915->gt.active_requests);
130
131 /* If the seqno wraps around, we need to clear the breadcrumb rbtree */
132 for_each_engine(engine, i915, id) {
133 GEM_TRACE("%s seqno %d (current %d) -> %d\n",
134 engine->name,
135 engine->timeline.seqno,
136 intel_engine_get_seqno(engine),
137 seqno);
138
139 if (seqno == engine->timeline.seqno)
140 continue;
141
142 kthread_park(engine->breadcrumbs.signaler);
143
144 if (!i915_seqno_passed(seqno, engine->timeline.seqno)) {
145 /* Flush any waiters before we reuse the seqno */
146 intel_engine_disarm_breadcrumbs(engine);
147 intel_engine_init_hangcheck(engine);
148 GEM_BUG_ON(!list_empty(&engine->breadcrumbs.signals));
149 }
150
151 /* Check we are idle before we fiddle with hw state! */
152 GEM_BUG_ON(!intel_engine_is_idle(engine));
153 GEM_BUG_ON(i915_gem_active_isset(&engine->timeline.last_request));
154
155 /* Finally reset hw state */
156 intel_engine_init_global_seqno(engine, seqno);
157 engine->timeline.seqno = seqno;
158
159 kthread_unpark(engine->breadcrumbs.signaler);
160 }
161
162 list_for_each_entry(timeline, &i915->gt.timelines, link)
163 memset(timeline->global_sync, 0, sizeof(timeline->global_sync));
164
165 i915->gt.request_serial = seqno;
166
167 return 0;
168}
169
170int i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno)
171{
172 struct drm_i915_private *i915 = to_i915(dev);
173
174 lockdep_assert_held(&i915->drm.struct_mutex);
175
176 if (seqno == 0)
177 return -EINVAL;
178
179 /* HWS page needs to be set less than what we will inject to ring */
180 return reset_all_global_seqno(i915, seqno - 1);
181}
182
183static int reserve_gt(struct drm_i915_private *i915)
184{
185 int ret;
186
187 /*
188 * Reservation is fine until we may need to wrap around
189 *
190 * By incrementing the serial for every request, we know that no
191 * individual engine may exceed that serial (as each is reset to 0
192 * on any wrap). This protects even the most pessimistic of migrations
193 * of every request from all engines onto just one.
194 */
195 while (unlikely(++i915->gt.request_serial == 0)) {
196 ret = reset_all_global_seqno(i915, 0);
197 if (ret) {
198 i915->gt.request_serial--;
199 return ret;
200 }
201 }
202
203 if (!i915->gt.active_requests++) 116 if (!i915->gt.active_requests++)
204 i915_gem_unpark(i915); 117 i915_gem_unpark(i915);
205
206 return 0;
207} 118}
208 119
209static void unreserve_gt(struct drm_i915_private *i915) 120static void unreserve_gt(struct drm_i915_private *i915)
@@ -566,6 +477,38 @@ submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
566 return NOTIFY_DONE; 477 return NOTIFY_DONE;
567} 478}
568 479
480static void ring_retire_requests(struct intel_ring *ring)
481{
482 struct i915_request *rq, *rn;
483
484 list_for_each_entry_safe(rq, rn, &ring->request_list, ring_link) {
485 if (!i915_request_completed(rq))
486 break;
487
488 i915_request_retire(rq);
489 }
490}
491
492static noinline struct i915_request *
493i915_request_alloc_slow(struct intel_context *ce)
494{
495 struct intel_ring *ring = ce->ring;
496 struct i915_request *rq;
497
498 if (list_empty(&ring->request_list))
499 goto out;
500
501 /* Ratelimit ourselves to prevent oom from malicious clients */
502 rq = list_last_entry(&ring->request_list, typeof(*rq), ring_link);
503 cond_synchronize_rcu(rq->rcustate);
504
505 /* Retire our old requests in the hope that we free some */
506 ring_retire_requests(ring);
507
508out:
509 return kmem_cache_alloc(ce->gem_context->i915->requests, GFP_KERNEL);
510}
511
569/** 512/**
570 * i915_request_alloc - allocate a request structure 513 * i915_request_alloc - allocate a request structure
571 * 514 *
@@ -608,13 +551,7 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
608 if (IS_ERR(ce)) 551 if (IS_ERR(ce))
609 return ERR_CAST(ce); 552 return ERR_CAST(ce);
610 553
611 ret = reserve_gt(i915); 554 reserve_gt(i915);
612 if (ret)
613 goto err_unpin;
614
615 ret = intel_ring_wait_for_space(ce->ring, MIN_SPACE_FOR_ADD_REQUEST);
616 if (ret)
617 goto err_unreserve;
618 555
619 /* Move our oldest request to the slab-cache (if not in use!) */ 556 /* Move our oldest request to the slab-cache (if not in use!) */
620 rq = list_first_entry(&ce->ring->request_list, typeof(*rq), ring_link); 557 rq = list_first_entry(&ce->ring->request_list, typeof(*rq), ring_link);
@@ -654,15 +591,7 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
654 rq = kmem_cache_alloc(i915->requests, 591 rq = kmem_cache_alloc(i915->requests,
655 GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN); 592 GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
656 if (unlikely(!rq)) { 593 if (unlikely(!rq)) {
657 i915_retire_requests(i915); 594 rq = i915_request_alloc_slow(ce);
658
659 /* Ratelimit ourselves to prevent oom from malicious clients */
660 rq = i915_gem_active_raw(&ce->ring->timeline->last_request,
661 &i915->drm.struct_mutex);
662 if (rq)
663 cond_synchronize_rcu(rq->rcustate);
664
665 rq = kmem_cache_alloc(i915->requests, GFP_KERNEL);
666 if (!rq) { 595 if (!rq) {
667 ret = -ENOMEM; 596 ret = -ENOMEM;
668 goto err_unreserve; 597 goto err_unreserve;
@@ -707,9 +636,13 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
707 * i915_request_add() call can't fail. Note that the reserve may need 636 * i915_request_add() call can't fail. Note that the reserve may need
708 * to be redone if the request is not actually submitted straight 637 * to be redone if the request is not actually submitted straight
709 * away, e.g. because a GPU scheduler has deferred it. 638 * away, e.g. because a GPU scheduler has deferred it.
639 *
640 * Note that due to how we add reserved_space to intel_ring_begin()
641 * we need to double our request to ensure that if we need to wrap
642 * around inside i915_request_add() there is sufficient space at
643 * the beginning of the ring as well.
710 */ 644 */
711 rq->reserved_space = MIN_SPACE_FOR_ADD_REQUEST; 645 rq->reserved_space = 2 * engine->emit_breadcrumb_sz * sizeof(u32);
712 GEM_BUG_ON(rq->reserved_space < engine->emit_breadcrumb_sz);
713 646
714 /* 647 /*
715 * Record the position of the start of the request so that 648 * Record the position of the start of the request so that
@@ -719,11 +652,6 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
719 */ 652 */
720 rq->head = rq->ring->emit; 653 rq->head = rq->ring->emit;
721 654
722 /* Unconditionally invalidate GPU caches and TLBs. */
723 ret = engine->emit_flush(rq, EMIT_INVALIDATE);
724 if (ret)
725 goto err_unwind;
726
727 ret = engine->request_alloc(rq); 655 ret = engine->request_alloc(rq);
728 if (ret) 656 if (ret)
729 goto err_unwind; 657 goto err_unwind;
@@ -748,7 +676,6 @@ err_unwind:
748 kmem_cache_free(i915->requests, rq); 676 kmem_cache_free(i915->requests, rq);
749err_unreserve: 677err_unreserve:
750 unreserve_gt(i915); 678 unreserve_gt(i915);
751err_unpin:
752 intel_context_unpin(ce); 679 intel_context_unpin(ce);
753 return ERR_PTR(ret); 680 return ERR_PTR(ret);
754} 681}
@@ -776,34 +703,12 @@ i915_request_await_request(struct i915_request *to, struct i915_request *from)
776 ret = i915_sw_fence_await_sw_fence_gfp(&to->submit, 703 ret = i915_sw_fence_await_sw_fence_gfp(&to->submit,
777 &from->submit, 704 &from->submit,
778 I915_FENCE_GFP); 705 I915_FENCE_GFP);
779 return ret < 0 ? ret : 0; 706 } else {
780 } 707 ret = i915_sw_fence_await_dma_fence(&to->submit,
781 708 &from->fence, 0,
782 if (to->engine->semaphore.sync_to) { 709 I915_FENCE_GFP);
783 u32 seqno;
784
785 GEM_BUG_ON(!from->engine->semaphore.signal);
786
787 seqno = i915_request_global_seqno(from);
788 if (!seqno)
789 goto await_dma_fence;
790
791 if (seqno <= to->timeline->global_sync[from->engine->id])
792 return 0;
793
794 trace_i915_gem_ring_sync_to(to, from);
795 ret = to->engine->semaphore.sync_to(to, from);
796 if (ret)
797 return ret;
798
799 to->timeline->global_sync[from->engine->id] = seqno;
800 return 0;
801 } 710 }
802 711
803await_dma_fence:
804 ret = i915_sw_fence_await_dma_fence(&to->submit,
805 &from->fence, 0,
806 I915_FENCE_GFP);
807 return ret < 0 ? ret : 0; 712 return ret < 0 ? ret : 0;
808} 713}
809 714
@@ -979,8 +884,8 @@ void i915_request_add(struct i915_request *request)
979 * should already have been reserved in the ring buffer. Let the ring 884 * should already have been reserved in the ring buffer. Let the ring
980 * know that it is time to use that space up. 885 * know that it is time to use that space up.
981 */ 886 */
887 GEM_BUG_ON(request->reserved_space > request->ring->space);
982 request->reserved_space = 0; 888 request->reserved_space = 0;
983 engine->emit_flush(request, EMIT_FLUSH);
984 889
985 /* 890 /*
986 * Record the position of the start of the breadcrumb so that 891 * Record the position of the start of the breadcrumb so that
@@ -1298,13 +1203,7 @@ restart:
1298 set_current_state(state); 1203 set_current_state(state);
1299 1204
1300wakeup: 1205wakeup:
1301 /* 1206 if (i915_request_completed(rq))
1302 * Carefully check if the request is complete, giving time
1303 * for the seqno to be visible following the interrupt.
1304 * We also have to check in case we are kicked by the GPU
1305 * reset in order to drop the struct_mutex.
1306 */
1307 if (__i915_request_irq_complete(rq))
1308 break; 1207 break;
1309 1208
1310 /* 1209 /*
@@ -1343,19 +1242,6 @@ complete:
1343 return timeout; 1242 return timeout;
1344} 1243}
1345 1244
1346static void ring_retire_requests(struct intel_ring *ring)
1347{
1348 struct i915_request *request, *next;
1349
1350 list_for_each_entry_safe(request, next,
1351 &ring->request_list, ring_link) {
1352 if (!i915_request_completed(request))
1353 break;
1354
1355 i915_request_retire(request);
1356 }
1357}
1358
1359void i915_retire_requests(struct drm_i915_private *i915) 1245void i915_retire_requests(struct drm_i915_private *i915)
1360{ 1246{
1361 struct intel_ring *ring, *tmp; 1247 struct intel_ring *ring, *tmp;
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
index 90e9d170a0cd..d014b0605445 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -30,7 +30,6 @@
30#include "i915_gem.h" 30#include "i915_gem.h"
31#include "i915_scheduler.h" 31#include "i915_scheduler.h"
32#include "i915_sw_fence.h" 32#include "i915_sw_fence.h"
33#include "i915_scheduler.h"
34 33
35#include <uapi/drm/i915_drm.h> 34#include <uapi/drm/i915_drm.h>
36 35
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 8f3aa4dc0c98..f18afa2bac8d 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -24,7 +24,6 @@
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 */ 25 */
26 26
27#include <drm/drmP.h>
28#include <drm/i915_drm.h> 27#include <drm/i915_drm.h>
29#include "intel_drv.h" 28#include "intel_drv.h"
30#include "i915_reg.h" 29#include "i915_reg.h"
@@ -65,7 +64,7 @@ int i915_save_state(struct drm_i915_private *dev_priv)
65 64
66 i915_save_display(dev_priv); 65 i915_save_display(dev_priv);
67 66
68 if (IS_GEN4(dev_priv)) 67 if (IS_GEN(dev_priv, 4))
69 pci_read_config_word(pdev, GCDGMBUS, 68 pci_read_config_word(pdev, GCDGMBUS,
70 &dev_priv->regfile.saveGCDGMBUS); 69 &dev_priv->regfile.saveGCDGMBUS);
71 70
@@ -77,14 +76,14 @@ int i915_save_state(struct drm_i915_private *dev_priv)
77 dev_priv->regfile.saveMI_ARB_STATE = I915_READ(MI_ARB_STATE); 76 dev_priv->regfile.saveMI_ARB_STATE = I915_READ(MI_ARB_STATE);
78 77
79 /* Scratch space */ 78 /* Scratch space */
80 if (IS_GEN2(dev_priv) && IS_MOBILE(dev_priv)) { 79 if (IS_GEN(dev_priv, 2) && IS_MOBILE(dev_priv)) {
81 for (i = 0; i < 7; i++) { 80 for (i = 0; i < 7; i++) {
82 dev_priv->regfile.saveSWF0[i] = I915_READ(SWF0(i)); 81 dev_priv->regfile.saveSWF0[i] = I915_READ(SWF0(i));
83 dev_priv->regfile.saveSWF1[i] = I915_READ(SWF1(i)); 82 dev_priv->regfile.saveSWF1[i] = I915_READ(SWF1(i));
84 } 83 }
85 for (i = 0; i < 3; i++) 84 for (i = 0; i < 3; i++)
86 dev_priv->regfile.saveSWF3[i] = I915_READ(SWF3(i)); 85 dev_priv->regfile.saveSWF3[i] = I915_READ(SWF3(i));
87 } else if (IS_GEN2(dev_priv)) { 86 } else if (IS_GEN(dev_priv, 2)) {
88 for (i = 0; i < 7; i++) 87 for (i = 0; i < 7; i++)
89 dev_priv->regfile.saveSWF1[i] = I915_READ(SWF1(i)); 88 dev_priv->regfile.saveSWF1[i] = I915_READ(SWF1(i));
90 } else if (HAS_GMCH_DISPLAY(dev_priv)) { 89 } else if (HAS_GMCH_DISPLAY(dev_priv)) {
@@ -108,7 +107,7 @@ int i915_restore_state(struct drm_i915_private *dev_priv)
108 107
109 mutex_lock(&dev_priv->drm.struct_mutex); 108 mutex_lock(&dev_priv->drm.struct_mutex);
110 109
111 if (IS_GEN4(dev_priv)) 110 if (IS_GEN(dev_priv, 4))
112 pci_write_config_word(pdev, GCDGMBUS, 111 pci_write_config_word(pdev, GCDGMBUS,
113 dev_priv->regfile.saveGCDGMBUS); 112 dev_priv->regfile.saveGCDGMBUS);
114 i915_restore_display(dev_priv); 113 i915_restore_display(dev_priv);
@@ -122,14 +121,14 @@ int i915_restore_state(struct drm_i915_private *dev_priv)
122 I915_WRITE(MI_ARB_STATE, dev_priv->regfile.saveMI_ARB_STATE | 0xffff0000); 121 I915_WRITE(MI_ARB_STATE, dev_priv->regfile.saveMI_ARB_STATE | 0xffff0000);
123 122
124 /* Scratch space */ 123 /* Scratch space */
125 if (IS_GEN2(dev_priv) && IS_MOBILE(dev_priv)) { 124 if (IS_GEN(dev_priv, 2) && IS_MOBILE(dev_priv)) {
126 for (i = 0; i < 7; i++) { 125 for (i = 0; i < 7; i++) {
127 I915_WRITE(SWF0(i), dev_priv->regfile.saveSWF0[i]); 126 I915_WRITE(SWF0(i), dev_priv->regfile.saveSWF0[i]);
128 I915_WRITE(SWF1(i), dev_priv->regfile.saveSWF1[i]); 127 I915_WRITE(SWF1(i), dev_priv->regfile.saveSWF1[i]);
129 } 128 }
130 for (i = 0; i < 3; i++) 129 for (i = 0; i < 3; i++)
131 I915_WRITE(SWF3(i), dev_priv->regfile.saveSWF3[i]); 130 I915_WRITE(SWF3(i), dev_priv->regfile.saveSWF3[i]);
132 } else if (IS_GEN2(dev_priv)) { 131 } else if (IS_GEN(dev_priv, 2)) {
133 for (i = 0; i < 7; i++) 132 for (i = 0; i < 7; i++)
134 I915_WRITE(SWF1(i), dev_priv->regfile.saveSWF1[i]); 133 I915_WRITE(SWF1(i), dev_priv->regfile.saveSWF1[i]);
135 } else if (HAS_GMCH_DISPLAY(dev_priv)) { 134 } else if (HAS_GMCH_DISPLAY(dev_priv)) {
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 535caebd9813..c0cfe7ae2ba5 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -521,7 +521,9 @@ static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
521 ssize_t ret; 521 ssize_t ret;
522 522
523 gpu = i915_first_error_state(i915); 523 gpu = i915_first_error_state(i915);
524 if (gpu) { 524 if (IS_ERR(gpu)) {
525 ret = PTR_ERR(gpu);
526 } else if (gpu) {
525 ret = i915_gpu_state_copy_to_buffer(gpu, buf, off, count); 527 ret = i915_gpu_state_copy_to_buffer(gpu, buf, off, count);
526 i915_gpu_state_put(gpu); 528 i915_gpu_state_put(gpu);
527 } else { 529 } else {
diff --git a/drivers/gpu/drm/i915/i915_timeline.h b/drivers/gpu/drm/i915/i915_timeline.h
index ebd71b487220..38c1e15e927a 100644
--- a/drivers/gpu/drm/i915/i915_timeline.h
+++ b/drivers/gpu/drm/i915/i915_timeline.h
@@ -63,14 +63,6 @@ struct i915_timeline {
63 * redundant and we can discard it without loss of generality. 63 * redundant and we can discard it without loss of generality.
64 */ 64 */
65 struct i915_syncmap *sync; 65 struct i915_syncmap *sync;
66 /**
67 * Separately to the inter-context seqno map above, we track the last
68 * barrier (e.g. semaphore wait) to the global engine timelines. Note
69 * that this tracks global_seqno rather than the context.seqno, and
70 * so it is subject to the limitations of hw wraparound and that we
71 * may need to revoke global_seqno (on pre-emption).
72 */
73 u32 global_sync[I915_NUM_ENGINES];
74 66
75 struct list_head link; 67 struct list_head link;
76 const char *name; 68 const char *name;
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index b50c6b829715..33d90eca9cdd 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -6,7 +6,6 @@
6#include <linux/types.h> 6#include <linux/types.h>
7#include <linux/tracepoint.h> 7#include <linux/tracepoint.h>
8 8
9#include <drm/drmP.h>
10#include "i915_drv.h" 9#include "i915_drv.h"
11#include "intel_drv.h" 10#include "intel_drv.h"
12#include "intel_ringbuffer.h" 11#include "intel_ringbuffer.h"
@@ -585,35 +584,6 @@ TRACE_EVENT(i915_gem_evict_vm,
585 TP_printk("dev=%d, vm=%p", __entry->dev, __entry->vm) 584 TP_printk("dev=%d, vm=%p", __entry->dev, __entry->vm)
586); 585);
587 586
588TRACE_EVENT(i915_gem_ring_sync_to,
589 TP_PROTO(struct i915_request *to, struct i915_request *from),
590 TP_ARGS(to, from),
591
592 TP_STRUCT__entry(
593 __field(u32, dev)
594 __field(u32, from_class)
595 __field(u32, from_instance)
596 __field(u32, to_class)
597 __field(u32, to_instance)
598 __field(u32, seqno)
599 ),
600
601 TP_fast_assign(
602 __entry->dev = from->i915->drm.primary->index;
603 __entry->from_class = from->engine->uabi_class;
604 __entry->from_instance = from->engine->instance;
605 __entry->to_class = to->engine->uabi_class;
606 __entry->to_instance = to->engine->instance;
607 __entry->seqno = from->global_seqno;
608 ),
609
610 TP_printk("dev=%u, sync-from=%u:%u, sync-to=%u:%u, seqno=%u",
611 __entry->dev,
612 __entry->from_class, __entry->from_instance,
613 __entry->to_class, __entry->to_instance,
614 __entry->seqno)
615);
616
617TRACE_EVENT(i915_request_queue, 587TRACE_EVENT(i915_request_queue,
618 TP_PROTO(struct i915_request *rq, u32 flags), 588 TP_PROTO(struct i915_request *rq, u32 flags),
619 TP_ARGS(rq, flags), 589 TP_ARGS(rq, flags),
diff --git a/drivers/gpu/drm/i915/intel_acpi.c b/drivers/gpu/drm/i915/intel_acpi.c
index 6ba478e57b9b..9d142d038a7d 100644
--- a/drivers/gpu/drm/i915/intel_acpi.c
+++ b/drivers/gpu/drm/i915/intel_acpi.c
@@ -6,7 +6,6 @@
6 */ 6 */
7#include <linux/pci.h> 7#include <linux/pci.h>
8#include <linux/acpi.h> 8#include <linux/acpi.h>
9#include <drm/drmP.h>
10#include "i915_drv.h" 9#include "i915_drv.h"
11 10
12#define INTEL_DSM_REVISION_ID 1 /* For Calpella anyway... */ 11#define INTEL_DSM_REVISION_ID 1 /* For Calpella anyway... */
diff --git a/drivers/gpu/drm/i915/intel_atomic.c b/drivers/gpu/drm/i915/intel_atomic.c
index 8cb02f28d30c..d8dbc9980281 100644
--- a/drivers/gpu/drm/i915/intel_atomic.c
+++ b/drivers/gpu/drm/i915/intel_atomic.c
@@ -29,7 +29,6 @@
29 * See intel_atomic_plane.c for the plane-specific atomic functionality. 29 * See intel_atomic_plane.c for the plane-specific atomic functionality.
30 */ 30 */
31 31
32#include <drm/drmP.h>
33#include <drm/drm_atomic.h> 32#include <drm/drm_atomic.h>
34#include <drm/drm_atomic_helper.h> 33#include <drm/drm_atomic_helper.h>
35#include <drm/drm_plane_helper.h> 34#include <drm/drm_plane_helper.h>
@@ -233,7 +232,7 @@ static void intel_atomic_setup_scaler(struct intel_crtc_scaler_state *scaler_sta
233 if (plane_state && plane_state->base.fb && 232 if (plane_state && plane_state->base.fb &&
234 plane_state->base.fb->format->is_yuv && 233 plane_state->base.fb->format->is_yuv &&
235 plane_state->base.fb->format->num_planes > 1) { 234 plane_state->base.fb->format->num_planes > 1) {
236 if (IS_GEN9(dev_priv) && 235 if (IS_GEN(dev_priv, 9) &&
237 !IS_GEMINILAKE(dev_priv)) { 236 !IS_GEMINILAKE(dev_priv)) {
238 mode = SKL_PS_SCALER_MODE_NV12; 237 mode = SKL_PS_SCALER_MODE_NV12;
239 } else if (icl_is_hdr_plane(to_intel_plane(plane_state->base.plane))) { 238 } else if (icl_is_hdr_plane(to_intel_plane(plane_state->base.plane))) {
diff --git a/drivers/gpu/drm/i915/intel_atomic_plane.c b/drivers/gpu/drm/i915/intel_atomic_plane.c
index 0a73e6e65c20..683a75dad4fb 100644
--- a/drivers/gpu/drm/i915/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/intel_atomic_plane.c
@@ -31,7 +31,6 @@
31 * prepare/check/commit/cleanup steps. 31 * prepare/check/commit/cleanup steps.
32 */ 32 */
33 33
34#include <drm/drmP.h>
35#include <drm/drm_atomic_helper.h> 34#include <drm/drm_atomic_helper.h>
36#include <drm/drm_plane_helper.h> 35#include <drm/drm_plane_helper.h>
37#include "intel_drv.h" 36#include "intel_drv.h"
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
index ae55a6865d5c..202a58cf2d9f 100644
--- a/drivers/gpu/drm/i915/intel_audio.c
+++ b/drivers/gpu/drm/i915/intel_audio.c
@@ -27,7 +27,6 @@
27#include <drm/intel_lpe_audio.h> 27#include <drm/intel_lpe_audio.h>
28#include "intel_drv.h" 28#include "intel_drv.h"
29 29
30#include <drm/drmP.h>
31#include <drm/drm_edid.h> 30#include <drm/drm_edid.h>
32#include "i915_drv.h" 31#include "i915_drv.h"
33 32
@@ -758,7 +757,7 @@ static void i915_audio_component_codec_wake_override(struct device *kdev,
758 struct drm_i915_private *dev_priv = kdev_to_i915(kdev); 757 struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
759 u32 tmp; 758 u32 tmp;
760 759
761 if (!IS_GEN9(dev_priv)) 760 if (!IS_GEN(dev_priv, 9))
762 return; 761 return;
763 762
764 i915_audio_component_get_power(kdev); 763 i915_audio_component_get_power(kdev);
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 6d3e0260d49c..140c218128cb 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -26,7 +26,6 @@
26 */ 26 */
27 27
28#include <drm/drm_dp_helper.h> 28#include <drm/drm_dp_helper.h>
29#include <drm/drmP.h>
30#include <drm/i915_drm.h> 29#include <drm/i915_drm.h>
31#include "i915_drv.h" 30#include "i915_drv.h"
32 31
@@ -453,7 +452,7 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv, u8 bdb_version)
453 * Only parse SDVO mappings on gens that could have SDVO. This isn't 452 * Only parse SDVO mappings on gens that could have SDVO. This isn't
454 * accurate and doesn't have to be, as long as it's not too strict. 453 * accurate and doesn't have to be, as long as it's not too strict.
455 */ 454 */
456 if (!IS_GEN(dev_priv, 3, 7)) { 455 if (!IS_GEN_RANGE(dev_priv, 3, 7)) {
457 DRM_DEBUG_KMS("Skipping SDVO device mapping\n"); 456 DRM_DEBUG_KMS("Skipping SDVO device mapping\n");
458 return; 457 return;
459 } 458 }
@@ -1386,8 +1385,15 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
1386 info->supports_dp = is_dp; 1385 info->supports_dp = is_dp;
1387 info->supports_edp = is_edp; 1386 info->supports_edp = is_edp;
1388 1387
1389 DRM_DEBUG_KMS("Port %c VBT info: DP:%d HDMI:%d DVI:%d EDP:%d CRT:%d\n", 1388 if (bdb_version >= 195)
1390 port_name(port), is_dp, is_hdmi, is_dvi, is_edp, is_crt); 1389 info->supports_typec_usb = child->dp_usb_type_c;
1390
1391 if (bdb_version >= 209)
1392 info->supports_tbt = child->tbt;
1393
1394 DRM_DEBUG_KMS("Port %c VBT info: DP:%d HDMI:%d DVI:%d EDP:%d CRT:%d TCUSB:%d TBT:%d\n",
1395 port_name(port), is_dp, is_hdmi, is_dvi, is_edp, is_crt,
1396 info->supports_typec_usb, info->supports_tbt);
1391 1397
1392 if (is_edp && is_dvi) 1398 if (is_edp && is_dvi)
1393 DRM_DEBUG_KMS("Internal DP port %c is TMDS compatible\n", 1399 DRM_DEBUG_KMS("Internal DP port %c is TMDS compatible\n",
diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c
index 447c5256f63a..4ed7105d7ff5 100644
--- a/drivers/gpu/drm/i915/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c
@@ -166,12 +166,6 @@ static void irq_enable(struct intel_engine_cs *engine)
166 */ 166 */
167 GEM_BUG_ON(!intel_irqs_enabled(engine->i915)); 167 GEM_BUG_ON(!intel_irqs_enabled(engine->i915));
168 168
169 /* Enabling the IRQ may miss the generation of the interrupt, but
170 * we still need to force the barrier before reading the seqno,
171 * just in case.
172 */
173 set_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted);
174
175 /* Caller disables interrupts */ 169 /* Caller disables interrupts */
176 if (engine->irq_enable) { 170 if (engine->irq_enable) {
177 spin_lock(&engine->i915->irq_lock); 171 spin_lock(&engine->i915->irq_lock);
@@ -683,16 +677,6 @@ static int intel_breadcrumbs_signaler(void *arg)
683 } 677 }
684 678
685 if (unlikely(do_schedule)) { 679 if (unlikely(do_schedule)) {
686 /* Before we sleep, check for a missed seqno */
687 if (current->state & TASK_NORMAL &&
688 !list_empty(&b->signals) &&
689 engine->irq_seqno_barrier &&
690 test_and_clear_bit(ENGINE_IRQ_BREADCRUMB,
691 &engine->irq_posted)) {
692 engine->irq_seqno_barrier(engine);
693 intel_engine_wakeup(engine);
694 }
695
696sleep: 680sleep:
697 if (kthread_should_park()) 681 if (kthread_should_park())
698 kthread_parkme(); 682 kthread_parkme();
@@ -859,16 +843,6 @@ void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine)
859 else 843 else
860 irq_disable(engine); 844 irq_disable(engine);
861 845
862 /*
863 * We set the IRQ_BREADCRUMB bit when we enable the irq presuming the
864 * GPU is active and may have already executed the MI_USER_INTERRUPT
865 * before the CPU is ready to receive. However, the engine is currently
866 * idle (we haven't started it yet), there is no possibility for a
867 * missed interrupt as we enabled the irq and so we can clear the
868 * immediate wakeup (until a real interrupt arrives for the waiter).
869 */
870 clear_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted);
871
872 spin_unlock_irqrestore(&b->irq_lock, flags); 846 spin_unlock_irqrestore(&b->irq_lock, flags);
873} 847}
874 848
diff --git a/drivers/gpu/drm/i915/intel_cdclk.c b/drivers/gpu/drm/i915/intel_cdclk.c
index 25e3aba9cded..2021e484a287 100644
--- a/drivers/gpu/drm/i915/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/intel_cdclk.c
@@ -2140,7 +2140,7 @@ static int intel_pixel_rate_to_cdclk(struct drm_i915_private *dev_priv,
2140{ 2140{
2141 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) 2141 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
2142 return DIV_ROUND_UP(pixel_rate, 2); 2142 return DIV_ROUND_UP(pixel_rate, 2);
2143 else if (IS_GEN9(dev_priv) || 2143 else if (IS_GEN(dev_priv, 9) ||
2144 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) 2144 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
2145 return pixel_rate; 2145 return pixel_rate;
2146 else if (IS_CHERRYVIEW(dev_priv)) 2146 else if (IS_CHERRYVIEW(dev_priv))
@@ -2176,7 +2176,7 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state)
2176 if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv)) { 2176 if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv)) {
2177 /* Display WA #1145: glk,cnl */ 2177 /* Display WA #1145: glk,cnl */
2178 min_cdclk = max(316800, min_cdclk); 2178 min_cdclk = max(316800, min_cdclk);
2179 } else if (IS_GEN9(dev_priv) || IS_BROADWELL(dev_priv)) { 2179 } else if (IS_GEN(dev_priv, 9) || IS_BROADWELL(dev_priv)) {
2180 /* Display WA #1144: skl,bxt */ 2180 /* Display WA #1144: skl,bxt */
2181 min_cdclk = max(432000, min_cdclk); 2181 min_cdclk = max(432000, min_cdclk);
2182 } 2182 }
@@ -2537,7 +2537,7 @@ static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv)
2537 2537
2538 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) 2538 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
2539 return 2 * max_cdclk_freq; 2539 return 2 * max_cdclk_freq;
2540 else if (IS_GEN9(dev_priv) || 2540 else if (IS_GEN(dev_priv, 9) ||
2541 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) 2541 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
2542 return max_cdclk_freq; 2542 return max_cdclk_freq;
2543 else if (IS_CHERRYVIEW(dev_priv)) 2543 else if (IS_CHERRYVIEW(dev_priv))
@@ -2785,9 +2785,9 @@ void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv)
2785 dev_priv->display.get_cdclk = hsw_get_cdclk; 2785 dev_priv->display.get_cdclk = hsw_get_cdclk;
2786 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 2786 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
2787 dev_priv->display.get_cdclk = vlv_get_cdclk; 2787 dev_priv->display.get_cdclk = vlv_get_cdclk;
2788 else if (IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv)) 2788 else if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
2789 dev_priv->display.get_cdclk = fixed_400mhz_get_cdclk; 2789 dev_priv->display.get_cdclk = fixed_400mhz_get_cdclk;
2790 else if (IS_GEN5(dev_priv)) 2790 else if (IS_GEN(dev_priv, 5))
2791 dev_priv->display.get_cdclk = fixed_450mhz_get_cdclk; 2791 dev_priv->display.get_cdclk = fixed_450mhz_get_cdclk;
2792 else if (IS_GM45(dev_priv)) 2792 else if (IS_GM45(dev_priv))
2793 dev_priv->display.get_cdclk = gm45_get_cdclk; 2793 dev_priv->display.get_cdclk = gm45_get_cdclk;
diff --git a/drivers/gpu/drm/i915/intel_color.c b/drivers/gpu/drm/i915/intel_color.c
index 5127da286a2b..37fd9ddf762e 100644
--- a/drivers/gpu/drm/i915/intel_color.c
+++ b/drivers/gpu/drm/i915/intel_color.c
@@ -74,12 +74,17 @@
74#define ILK_CSC_COEFF_1_0 \ 74#define ILK_CSC_COEFF_1_0 \
75 ((7 << 12) | ILK_CSC_COEFF_FP(CTM_COEFF_1_0, 8)) 75 ((7 << 12) | ILK_CSC_COEFF_FP(CTM_COEFF_1_0, 8))
76 76
77static bool crtc_state_is_legacy_gamma(struct drm_crtc_state *state) 77static bool lut_is_legacy(struct drm_property_blob *lut)
78{ 78{
79 return !state->degamma_lut && 79 return drm_color_lut_size(lut) == LEGACY_LUT_LENGTH;
80 !state->ctm && 80}
81 state->gamma_lut && 81
82 drm_color_lut_size(state->gamma_lut) == LEGACY_LUT_LENGTH; 82static bool crtc_state_is_legacy_gamma(struct intel_crtc_state *crtc_state)
83{
84 return !crtc_state->base.degamma_lut &&
85 !crtc_state->base.ctm &&
86 crtc_state->base.gamma_lut &&
87 lut_is_legacy(crtc_state->base.gamma_lut);
83} 88}
84 89
85/* 90/*
@@ -108,10 +113,10 @@ static u64 *ctm_mult_by_limited(u64 *result, const u64 *input)
108 return result; 113 return result;
109} 114}
110 115
111static void ilk_load_ycbcr_conversion_matrix(struct intel_crtc *intel_crtc) 116static void ilk_load_ycbcr_conversion_matrix(struct intel_crtc *crtc)
112{ 117{
113 int pipe = intel_crtc->pipe; 118 int pipe = crtc->pipe;
114 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev); 119 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
115 120
116 I915_WRITE(PIPE_CSC_PREOFF_HI(pipe), 0); 121 I915_WRITE(PIPE_CSC_PREOFF_HI(pipe), 0);
117 I915_WRITE(PIPE_CSC_PREOFF_ME(pipe), 0); 122 I915_WRITE(PIPE_CSC_PREOFF_ME(pipe), 0);
@@ -132,14 +137,12 @@ static void ilk_load_ycbcr_conversion_matrix(struct intel_crtc *intel_crtc)
132 I915_WRITE(PIPE_CSC_MODE(pipe), 0); 137 I915_WRITE(PIPE_CSC_MODE(pipe), 0);
133} 138}
134 139
135static void ilk_load_csc_matrix(struct drm_crtc_state *crtc_state) 140static void ilk_load_csc_matrix(struct intel_crtc_state *crtc_state)
136{ 141{
137 struct drm_crtc *crtc = crtc_state->crtc; 142 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
138 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 143 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
139 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 144 int i, pipe = crtc->pipe;
140 int i, pipe = intel_crtc->pipe;
141 uint16_t coeffs[9] = { 0, }; 145 uint16_t coeffs[9] = { 0, };
142 struct intel_crtc_state *intel_crtc_state = to_intel_crtc_state(crtc_state);
143 bool limited_color_range = false; 146 bool limited_color_range = false;
144 147
145 /* 148 /*
@@ -147,14 +150,14 @@ static void ilk_load_csc_matrix(struct drm_crtc_state *crtc_state)
147 * do the range compression using the gamma LUT instead. 150 * do the range compression using the gamma LUT instead.
148 */ 151 */
149 if (INTEL_GEN(dev_priv) >= 8 || IS_HASWELL(dev_priv)) 152 if (INTEL_GEN(dev_priv) >= 8 || IS_HASWELL(dev_priv))
150 limited_color_range = intel_crtc_state->limited_color_range; 153 limited_color_range = crtc_state->limited_color_range;
151 154
152 if (intel_crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 || 155 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
153 intel_crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) { 156 crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) {
154 ilk_load_ycbcr_conversion_matrix(intel_crtc); 157 ilk_load_ycbcr_conversion_matrix(crtc);
155 return; 158 return;
156 } else if (crtc_state->ctm) { 159 } else if (crtc_state->base.ctm) {
157 struct drm_color_ctm *ctm = crtc_state->ctm->data; 160 struct drm_color_ctm *ctm = crtc_state->base.ctm->data;
158 const u64 *input; 161 const u64 *input;
159 u64 temp[9]; 162 u64 temp[9];
160 163
@@ -253,16 +256,15 @@ static void ilk_load_csc_matrix(struct drm_crtc_state *crtc_state)
253/* 256/*
254 * Set up the pipe CSC unit on CherryView. 257 * Set up the pipe CSC unit on CherryView.
255 */ 258 */
256static void cherryview_load_csc_matrix(struct drm_crtc_state *state) 259static void cherryview_load_csc_matrix(struct intel_crtc_state *crtc_state)
257{ 260{
258 struct drm_crtc *crtc = state->crtc; 261 struct drm_device *dev = crtc_state->base.crtc->dev;
259 struct drm_device *dev = crtc->dev;
260 struct drm_i915_private *dev_priv = to_i915(dev); 262 struct drm_i915_private *dev_priv = to_i915(dev);
261 int pipe = to_intel_crtc(crtc)->pipe; 263 int pipe = to_intel_crtc(crtc_state->base.crtc)->pipe;
262 uint32_t mode; 264 uint32_t mode;
263 265
264 if (state->ctm) { 266 if (crtc_state->base.ctm) {
265 struct drm_color_ctm *ctm = state->ctm->data; 267 struct drm_color_ctm *ctm = crtc_state->base.ctm->data;
266 uint16_t coeffs[9] = { 0, }; 268 uint16_t coeffs[9] = { 0, };
267 int i; 269 int i;
268 270
@@ -293,17 +295,17 @@ static void cherryview_load_csc_matrix(struct drm_crtc_state *state)
293 I915_WRITE(CGM_PIPE_CSC_COEFF8(pipe), coeffs[8]); 295 I915_WRITE(CGM_PIPE_CSC_COEFF8(pipe), coeffs[8]);
294 } 296 }
295 297
296 mode = (state->ctm ? CGM_PIPE_MODE_CSC : 0); 298 mode = (crtc_state->base.ctm ? CGM_PIPE_MODE_CSC : 0);
297 if (!crtc_state_is_legacy_gamma(state)) { 299 if (!crtc_state_is_legacy_gamma(crtc_state)) {
298 mode |= (state->degamma_lut ? CGM_PIPE_MODE_DEGAMMA : 0) | 300 mode |= (crtc_state->base.degamma_lut ? CGM_PIPE_MODE_DEGAMMA : 0) |
299 (state->gamma_lut ? CGM_PIPE_MODE_GAMMA : 0); 301 (crtc_state->base.gamma_lut ? CGM_PIPE_MODE_GAMMA : 0);
300 } 302 }
301 I915_WRITE(CGM_PIPE_MODE(pipe), mode); 303 I915_WRITE(CGM_PIPE_MODE(pipe), mode);
302} 304}
303 305
304void intel_color_set_csc(struct drm_crtc_state *crtc_state) 306void intel_color_set_csc(struct intel_crtc_state *crtc_state)
305{ 307{
306 struct drm_device *dev = crtc_state->crtc->dev; 308 struct drm_device *dev = crtc_state->base.crtc->dev;
307 struct drm_i915_private *dev_priv = to_i915(dev); 309 struct drm_i915_private *dev_priv = to_i915(dev);
308 310
309 if (dev_priv->display.load_csc_matrix) 311 if (dev_priv->display.load_csc_matrix)
@@ -311,14 +313,12 @@ void intel_color_set_csc(struct drm_crtc_state *crtc_state)
311} 313}
312 314
313/* Loads the legacy palette/gamma unit for the CRTC. */ 315/* Loads the legacy palette/gamma unit for the CRTC. */
314static void i9xx_load_luts_internal(struct drm_crtc *crtc, 316static void i9xx_load_luts_internal(struct intel_crtc_state *crtc_state,
315 struct drm_property_blob *blob, 317 struct drm_property_blob *blob)
316 struct intel_crtc_state *crtc_state)
317{ 318{
318 struct drm_device *dev = crtc->dev; 319 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
319 struct drm_i915_private *dev_priv = to_i915(dev); 320 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
320 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 321 enum pipe pipe = crtc->pipe;
321 enum pipe pipe = intel_crtc->pipe;
322 int i; 322 int i;
323 323
324 if (HAS_GMCH_DISPLAY(dev_priv)) { 324 if (HAS_GMCH_DISPLAY(dev_priv)) {
@@ -353,53 +353,48 @@ static void i9xx_load_luts_internal(struct drm_crtc *crtc,
353 } 353 }
354} 354}
355 355
356static void i9xx_load_luts(struct drm_crtc_state *crtc_state) 356static void i9xx_load_luts(struct intel_crtc_state *crtc_state)
357{ 357{
358 i9xx_load_luts_internal(crtc_state->crtc, crtc_state->gamma_lut, 358 i9xx_load_luts_internal(crtc_state, crtc_state->base.gamma_lut);
359 to_intel_crtc_state(crtc_state));
360} 359}
361 360
362/* Loads the legacy palette/gamma unit for the CRTC on Haswell. */ 361/* Loads the legacy palette/gamma unit for the CRTC on Haswell. */
363static void haswell_load_luts(struct drm_crtc_state *crtc_state) 362static void haswell_load_luts(struct intel_crtc_state *crtc_state)
364{ 363{
365 struct drm_crtc *crtc = crtc_state->crtc; 364 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
366 struct drm_device *dev = crtc->dev; 365 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
367 struct drm_i915_private *dev_priv = to_i915(dev);
368 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
369 struct intel_crtc_state *intel_crtc_state =
370 to_intel_crtc_state(crtc_state);
371 bool reenable_ips = false; 366 bool reenable_ips = false;
372 367
373 /* 368 /*
374 * Workaround : Do not read or write the pipe palette/gamma data while 369 * Workaround : Do not read or write the pipe palette/gamma data while
375 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled. 370 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
376 */ 371 */
377 if (IS_HASWELL(dev_priv) && intel_crtc_state->ips_enabled && 372 if (IS_HASWELL(dev_priv) && crtc_state->ips_enabled &&
378 (intel_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)) { 373 (crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)) {
379 hsw_disable_ips(intel_crtc_state); 374 hsw_disable_ips(crtc_state);
380 reenable_ips = true; 375 reenable_ips = true;
381 } 376 }
382 377
383 intel_crtc_state->gamma_mode = GAMMA_MODE_MODE_8BIT; 378 crtc_state->gamma_mode = GAMMA_MODE_MODE_8BIT;
384 I915_WRITE(GAMMA_MODE(intel_crtc->pipe), GAMMA_MODE_MODE_8BIT); 379 I915_WRITE(GAMMA_MODE(crtc->pipe), GAMMA_MODE_MODE_8BIT);
385 380
386 i9xx_load_luts(crtc_state); 381 i9xx_load_luts(crtc_state);
387 382
388 if (reenable_ips) 383 if (reenable_ips)
389 hsw_enable_ips(intel_crtc_state); 384 hsw_enable_ips(crtc_state);
390} 385}
391 386
392static void bdw_load_degamma_lut(struct drm_crtc_state *state) 387static void bdw_load_degamma_lut(struct intel_crtc_state *crtc_state)
393{ 388{
394 struct drm_i915_private *dev_priv = to_i915(state->crtc->dev); 389 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
395 enum pipe pipe = to_intel_crtc(state->crtc)->pipe; 390 enum pipe pipe = to_intel_crtc(crtc_state->base.crtc)->pipe;
396 uint32_t i, lut_size = INTEL_INFO(dev_priv)->color.degamma_lut_size; 391 uint32_t i, lut_size = INTEL_INFO(dev_priv)->color.degamma_lut_size;
397 392
398 I915_WRITE(PREC_PAL_INDEX(pipe), 393 I915_WRITE(PREC_PAL_INDEX(pipe),
399 PAL_PREC_SPLIT_MODE | PAL_PREC_AUTO_INCREMENT); 394 PAL_PREC_SPLIT_MODE | PAL_PREC_AUTO_INCREMENT);
400 395
401 if (state->degamma_lut) { 396 if (crtc_state->base.degamma_lut) {
402 struct drm_color_lut *lut = state->degamma_lut->data; 397 struct drm_color_lut *lut = crtc_state->base.degamma_lut->data;
403 398
404 for (i = 0; i < lut_size; i++) { 399 for (i = 0; i < lut_size; i++) {
405 uint32_t word = 400 uint32_t word =
@@ -419,10 +414,10 @@ static void bdw_load_degamma_lut(struct drm_crtc_state *state)
419 } 414 }
420} 415}
421 416
422static void bdw_load_gamma_lut(struct drm_crtc_state *state, u32 offset) 417static void bdw_load_gamma_lut(struct intel_crtc_state *crtc_state, u32 offset)
423{ 418{
424 struct drm_i915_private *dev_priv = to_i915(state->crtc->dev); 419 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
425 enum pipe pipe = to_intel_crtc(state->crtc)->pipe; 420 enum pipe pipe = to_intel_crtc(crtc_state->base.crtc)->pipe;
426 uint32_t i, lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size; 421 uint32_t i, lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size;
427 422
428 WARN_ON(offset & ~PAL_PREC_INDEX_VALUE_MASK); 423 WARN_ON(offset & ~PAL_PREC_INDEX_VALUE_MASK);
@@ -432,8 +427,8 @@ static void bdw_load_gamma_lut(struct drm_crtc_state *state, u32 offset)
432 PAL_PREC_AUTO_INCREMENT | 427 PAL_PREC_AUTO_INCREMENT |
433 offset); 428 offset);
434 429
435 if (state->gamma_lut) { 430 if (crtc_state->base.gamma_lut) {
436 struct drm_color_lut *lut = state->gamma_lut->data; 431 struct drm_color_lut *lut = crtc_state->base.gamma_lut->data;
437 432
438 for (i = 0; i < lut_size; i++) { 433 for (i = 0; i < lut_size; i++) {
439 uint32_t word = 434 uint32_t word =
@@ -467,22 +462,21 @@ static void bdw_load_gamma_lut(struct drm_crtc_state *state, u32 offset)
467} 462}
468 463
469/* Loads the palette/gamma unit for the CRTC on Broadwell+. */ 464/* Loads the palette/gamma unit for the CRTC on Broadwell+. */
470static void broadwell_load_luts(struct drm_crtc_state *state) 465static void broadwell_load_luts(struct intel_crtc_state *crtc_state)
471{ 466{
472 struct drm_i915_private *dev_priv = to_i915(state->crtc->dev); 467 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
473 struct intel_crtc_state *intel_state = to_intel_crtc_state(state); 468 enum pipe pipe = to_intel_crtc(crtc_state->base.crtc)->pipe;
474 enum pipe pipe = to_intel_crtc(state->crtc)->pipe;
475 469
476 if (crtc_state_is_legacy_gamma(state)) { 470 if (crtc_state_is_legacy_gamma(crtc_state)) {
477 haswell_load_luts(state); 471 haswell_load_luts(crtc_state);
478 return; 472 return;
479 } 473 }
480 474
481 bdw_load_degamma_lut(state); 475 bdw_load_degamma_lut(crtc_state);
482 bdw_load_gamma_lut(state, 476 bdw_load_gamma_lut(crtc_state,
483 INTEL_INFO(dev_priv)->color.degamma_lut_size); 477 INTEL_INFO(dev_priv)->color.degamma_lut_size);
484 478
485 intel_state->gamma_mode = GAMMA_MODE_MODE_SPLIT; 479 crtc_state->gamma_mode = GAMMA_MODE_MODE_SPLIT;
486 I915_WRITE(GAMMA_MODE(pipe), GAMMA_MODE_MODE_SPLIT); 480 I915_WRITE(GAMMA_MODE(pipe), GAMMA_MODE_MODE_SPLIT);
487 POSTING_READ(GAMMA_MODE(pipe)); 481 POSTING_READ(GAMMA_MODE(pipe));
488 482
@@ -493,10 +487,10 @@ static void broadwell_load_luts(struct drm_crtc_state *state)
493 I915_WRITE(PREC_PAL_INDEX(pipe), 0); 487 I915_WRITE(PREC_PAL_INDEX(pipe), 0);
494} 488}
495 489
496static void glk_load_degamma_lut(struct drm_crtc_state *state) 490static void glk_load_degamma_lut(struct intel_crtc_state *crtc_state)
497{ 491{
498 struct drm_i915_private *dev_priv = to_i915(state->crtc->dev); 492 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
499 enum pipe pipe = to_intel_crtc(state->crtc)->pipe; 493 enum pipe pipe = to_intel_crtc(crtc_state->base.crtc)->pipe;
500 const uint32_t lut_size = 33; 494 const uint32_t lut_size = 33;
501 uint32_t i; 495 uint32_t i;
502 496
@@ -523,49 +517,46 @@ static void glk_load_degamma_lut(struct drm_crtc_state *state)
523 I915_WRITE(PRE_CSC_GAMC_DATA(pipe), (1 << 16)); 517 I915_WRITE(PRE_CSC_GAMC_DATA(pipe), (1 << 16));
524} 518}
525 519
526static void glk_load_luts(struct drm_crtc_state *state) 520static void glk_load_luts(struct intel_crtc_state *crtc_state)
527{ 521{
528 struct drm_crtc *crtc = state->crtc; 522 struct drm_device *dev = crtc_state->base.crtc->dev;
529 struct drm_device *dev = crtc->dev;
530 struct drm_i915_private *dev_priv = to_i915(dev); 523 struct drm_i915_private *dev_priv = to_i915(dev);
531 struct intel_crtc_state *intel_state = to_intel_crtc_state(state); 524 enum pipe pipe = to_intel_crtc(crtc_state->base.crtc)->pipe;
532 enum pipe pipe = to_intel_crtc(crtc)->pipe;
533 525
534 glk_load_degamma_lut(state); 526 glk_load_degamma_lut(crtc_state);
535 527
536 if (crtc_state_is_legacy_gamma(state)) { 528 if (crtc_state_is_legacy_gamma(crtc_state)) {
537 haswell_load_luts(state); 529 haswell_load_luts(crtc_state);
538 return; 530 return;
539 } 531 }
540 532
541 bdw_load_gamma_lut(state, 0); 533 bdw_load_gamma_lut(crtc_state, 0);
542 534
543 intel_state->gamma_mode = GAMMA_MODE_MODE_10BIT; 535 crtc_state->gamma_mode = GAMMA_MODE_MODE_10BIT;
544 I915_WRITE(GAMMA_MODE(pipe), GAMMA_MODE_MODE_10BIT); 536 I915_WRITE(GAMMA_MODE(pipe), GAMMA_MODE_MODE_10BIT);
545 POSTING_READ(GAMMA_MODE(pipe)); 537 POSTING_READ(GAMMA_MODE(pipe));
546} 538}
547 539
548/* Loads the palette/gamma unit for the CRTC on CherryView. */ 540/* Loads the palette/gamma unit for the CRTC on CherryView. */
549static void cherryview_load_luts(struct drm_crtc_state *state) 541static void cherryview_load_luts(struct intel_crtc_state *crtc_state)
550{ 542{
551 struct drm_crtc *crtc = state->crtc; 543 struct drm_crtc *crtc = crtc_state->base.crtc;
552 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 544 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
553 enum pipe pipe = to_intel_crtc(crtc)->pipe; 545 enum pipe pipe = to_intel_crtc(crtc)->pipe;
554 struct drm_color_lut *lut; 546 struct drm_color_lut *lut;
555 uint32_t i, lut_size; 547 uint32_t i, lut_size;
556 uint32_t word0, word1; 548 uint32_t word0, word1;
557 549
558 if (crtc_state_is_legacy_gamma(state)) { 550 if (crtc_state_is_legacy_gamma(crtc_state)) {
559 /* Turn off degamma/gamma on CGM block. */ 551 /* Turn off degamma/gamma on CGM block. */
560 I915_WRITE(CGM_PIPE_MODE(pipe), 552 I915_WRITE(CGM_PIPE_MODE(pipe),
561 (state->ctm ? CGM_PIPE_MODE_CSC : 0)); 553 (crtc_state->base.ctm ? CGM_PIPE_MODE_CSC : 0));
562 i9xx_load_luts_internal(crtc, state->gamma_lut, 554 i9xx_load_luts_internal(crtc_state, crtc_state->base.gamma_lut);
563 to_intel_crtc_state(state));
564 return; 555 return;
565 } 556 }
566 557
567 if (state->degamma_lut) { 558 if (crtc_state->base.degamma_lut) {
568 lut = state->degamma_lut->data; 559 lut = crtc_state->base.degamma_lut->data;
569 lut_size = INTEL_INFO(dev_priv)->color.degamma_lut_size; 560 lut_size = INTEL_INFO(dev_priv)->color.degamma_lut_size;
570 for (i = 0; i < lut_size; i++) { 561 for (i = 0; i < lut_size; i++) {
571 /* Write LUT in U0.14 format. */ 562 /* Write LUT in U0.14 format. */
@@ -579,8 +570,8 @@ static void cherryview_load_luts(struct drm_crtc_state *state)
579 } 570 }
580 } 571 }
581 572
582 if (state->gamma_lut) { 573 if (crtc_state->base.gamma_lut) {
583 lut = state->gamma_lut->data; 574 lut = crtc_state->base.gamma_lut->data;
584 lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size; 575 lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size;
585 for (i = 0; i < lut_size; i++) { 576 for (i = 0; i < lut_size; i++) {
586 /* Write LUT in U0.10 format. */ 577 /* Write LUT in U0.10 format. */
@@ -595,29 +586,28 @@ static void cherryview_load_luts(struct drm_crtc_state *state)
595 } 586 }
596 587
597 I915_WRITE(CGM_PIPE_MODE(pipe), 588 I915_WRITE(CGM_PIPE_MODE(pipe),
598 (state->ctm ? CGM_PIPE_MODE_CSC : 0) | 589 (crtc_state->base.ctm ? CGM_PIPE_MODE_CSC : 0) |
599 (state->degamma_lut ? CGM_PIPE_MODE_DEGAMMA : 0) | 590 (crtc_state->base.degamma_lut ? CGM_PIPE_MODE_DEGAMMA : 0) |
600 (state->gamma_lut ? CGM_PIPE_MODE_GAMMA : 0)); 591 (crtc_state->base.gamma_lut ? CGM_PIPE_MODE_GAMMA : 0));
601 592
602 /* 593 /*
603 * Also program a linear LUT in the legacy block (behind the 594 * Also program a linear LUT in the legacy block (behind the
604 * CGM block). 595 * CGM block).
605 */ 596 */
606 i9xx_load_luts_internal(crtc, NULL, to_intel_crtc_state(state)); 597 i9xx_load_luts_internal(crtc_state, NULL);
607} 598}
608 599
609void intel_color_load_luts(struct drm_crtc_state *crtc_state) 600void intel_color_load_luts(struct intel_crtc_state *crtc_state)
610{ 601{
611 struct drm_device *dev = crtc_state->crtc->dev; 602 struct drm_device *dev = crtc_state->base.crtc->dev;
612 struct drm_i915_private *dev_priv = to_i915(dev); 603 struct drm_i915_private *dev_priv = to_i915(dev);
613 604
614 dev_priv->display.load_luts(crtc_state); 605 dev_priv->display.load_luts(crtc_state);
615} 606}
616 607
617int intel_color_check(struct drm_crtc *crtc, 608int intel_color_check(struct intel_crtc_state *crtc_state)
618 struct drm_crtc_state *crtc_state)
619{ 609{
620 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 610 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
621 size_t gamma_length, degamma_length; 611 size_t gamma_length, degamma_length;
622 612
623 degamma_length = INTEL_INFO(dev_priv)->color.degamma_lut_size; 613 degamma_length = INTEL_INFO(dev_priv)->color.degamma_lut_size;
@@ -627,10 +617,10 @@ int intel_color_check(struct drm_crtc *crtc,
627 * We allow both degamma & gamma luts at the right size or 617 * We allow both degamma & gamma luts at the right size or
628 * NULL. 618 * NULL.
629 */ 619 */
630 if ((!crtc_state->degamma_lut || 620 if ((!crtc_state->base.degamma_lut ||
631 drm_color_lut_size(crtc_state->degamma_lut) == degamma_length) && 621 drm_color_lut_size(crtc_state->base.degamma_lut) == degamma_length) &&
632 (!crtc_state->gamma_lut || 622 (!crtc_state->base.gamma_lut ||
633 drm_color_lut_size(crtc_state->gamma_lut) == gamma_length)) 623 drm_color_lut_size(crtc_state->base.gamma_lut) == gamma_length))
634 return 0; 624 return 0;
635 625
636 /* 626 /*
@@ -643,11 +633,11 @@ int intel_color_check(struct drm_crtc *crtc,
643 return -EINVAL; 633 return -EINVAL;
644} 634}
645 635
646void intel_color_init(struct drm_crtc *crtc) 636void intel_color_init(struct intel_crtc *crtc)
647{ 637{
648 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 638 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
649 639
650 drm_mode_crtc_set_gamma_size(crtc, 256); 640 drm_mode_crtc_set_gamma_size(&crtc->base, 256);
651 641
652 if (IS_CHERRYVIEW(dev_priv)) { 642 if (IS_CHERRYVIEW(dev_priv)) {
653 dev_priv->display.load_csc_matrix = cherryview_load_csc_matrix; 643 dev_priv->display.load_csc_matrix = cherryview_load_csc_matrix;
@@ -669,7 +659,7 @@ void intel_color_init(struct drm_crtc *crtc)
669 /* Enable color management support when we have degamma & gamma LUTs. */ 659 /* Enable color management support when we have degamma & gamma LUTs. */
670 if (INTEL_INFO(dev_priv)->color.degamma_lut_size != 0 && 660 if (INTEL_INFO(dev_priv)->color.degamma_lut_size != 0 &&
671 INTEL_INFO(dev_priv)->color.gamma_lut_size != 0) 661 INTEL_INFO(dev_priv)->color.gamma_lut_size != 0)
672 drm_crtc_enable_color_mgmt(crtc, 662 drm_crtc_enable_color_mgmt(&crtc->base,
673 INTEL_INFO(dev_priv)->color.degamma_lut_size, 663 INTEL_INFO(dev_priv)->color.degamma_lut_size,
674 true, 664 true,
675 INTEL_INFO(dev_priv)->color.gamma_lut_size); 665 INTEL_INFO(dev_priv)->color.gamma_lut_size);
diff --git a/drivers/gpu/drm/i915/intel_connector.c b/drivers/gpu/drm/i915/intel_connector.c
index 37d2c644f4b8..ee16758747c5 100644
--- a/drivers/gpu/drm/i915/intel_connector.c
+++ b/drivers/gpu/drm/i915/intel_connector.c
@@ -27,7 +27,6 @@
27#include <linux/i2c.h> 27#include <linux/i2c.h>
28#include <drm/drm_atomic_helper.h> 28#include <drm/drm_atomic_helper.h>
29#include <drm/drm_edid.h> 29#include <drm/drm_edid.h>
30#include <drm/drmP.h>
31#include "intel_drv.h" 30#include "intel_drv.h"
32#include "i915_drv.h" 31#include "i915_drv.h"
33 32
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 2e0fd9927db2..e73458f693a5 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -27,7 +27,6 @@
27#include <linux/dmi.h> 27#include <linux/dmi.h>
28#include <linux/i2c.h> 28#include <linux/i2c.h>
29#include <linux/slab.h> 29#include <linux/slab.h>
30#include <drm/drmP.h>
31#include <drm/drm_atomic_helper.h> 30#include <drm/drm_atomic_helper.h>
32#include <drm/drm_crtc.h> 31#include <drm/drm_crtc.h>
33#include <drm/drm_crtc_helper.h> 32#include <drm/drm_crtc_helper.h>
@@ -322,7 +321,7 @@ intel_crt_mode_valid(struct drm_connector *connector,
322 * DAC limit supposedly 355 MHz. 321 * DAC limit supposedly 355 MHz.
323 */ 322 */
324 max_clock = 270000; 323 max_clock = 270000;
325 else if (IS_GEN3(dev_priv) || IS_GEN4(dev_priv)) 324 else if (IS_GEN_RANGE(dev_priv, 3, 4))
326 max_clock = 400000; 325 max_clock = 400000;
327 else 326 else
328 max_clock = 350000; 327 max_clock = 350000;
@@ -667,7 +666,7 @@ intel_crt_load_detect(struct intel_crt *crt, uint32_t pipe)
667 /* Set the border color to purple. */ 666 /* Set the border color to purple. */
668 I915_WRITE(bclrpat_reg, 0x500050); 667 I915_WRITE(bclrpat_reg, 0x500050);
669 668
670 if (!IS_GEN2(dev_priv)) { 669 if (!IS_GEN(dev_priv, 2)) {
671 uint32_t pipeconf = I915_READ(pipeconf_reg); 670 uint32_t pipeconf = I915_READ(pipeconf_reg);
672 I915_WRITE(pipeconf_reg, pipeconf | PIPECONF_FORCE_BORDER); 671 I915_WRITE(pipeconf_reg, pipeconf | PIPECONF_FORCE_BORDER);
673 POSTING_READ(pipeconf_reg); 672 POSTING_READ(pipeconf_reg);
@@ -982,7 +981,7 @@ void intel_crt_init(struct drm_i915_private *dev_priv)
982 else 981 else
983 crt->base.crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); 982 crt->base.crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
984 983
985 if (IS_GEN2(dev_priv)) 984 if (IS_GEN(dev_priv, 2))
986 connector->interlace_allowed = 0; 985 connector->interlace_allowed = 0;
987 else 986 else
988 connector->interlace_allowed = 1; 987 connector->interlace_allowed = 1;
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index fd06d1fd39d3..b1ac89b514c1 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -494,103 +494,58 @@ static const struct cnl_ddi_buf_trans cnl_ddi_translations_edp_1_05V[] = {
494 { 0x2, 0x7F, 0x3F, 0x00, 0x00 }, /* 400 400 0.0 */ 494 { 0x2, 0x7F, 0x3F, 0x00, 0x00 }, /* 400 400 0.0 */
495}; 495};
496 496
497struct icl_combo_phy_ddi_buf_trans { 497/* icl_combo_phy_ddi_translations */
498 u32 dw2_swing_select; 498static const struct cnl_ddi_buf_trans icl_combo_phy_ddi_translations_dp_hbr2[] = {
499 u32 dw2_swing_scalar; 499 /* NT mV Trans mV db */
500 u32 dw4_scaling; 500 { 0xA, 0x35, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
501}; 501 { 0xA, 0x4F, 0x37, 0x00, 0x08 }, /* 350 500 3.1 */
502 502 { 0xC, 0x71, 0x2F, 0x00, 0x10 }, /* 350 700 6.0 */
503/* Voltage Swing Programming for VccIO 0.85V for DP */ 503 { 0x6, 0x7F, 0x2B, 0x00, 0x14 }, /* 350 900 8.2 */
504static const struct icl_combo_phy_ddi_buf_trans icl_combo_phy_ddi_translations_dp_hdmi_0_85V[] = { 504 { 0xA, 0x4C, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */
505 /* Voltage mV db */ 505 { 0xC, 0x73, 0x34, 0x00, 0x0B }, /* 500 700 2.9 */
506 { 0x2, 0x98, 0x0018 }, /* 400 0.0 */ 506 { 0x6, 0x7F, 0x2F, 0x00, 0x10 }, /* 500 900 5.1 */
507 { 0x2, 0x98, 0x3015 }, /* 400 3.5 */ 507 { 0xC, 0x6C, 0x3C, 0x00, 0x03 }, /* 650 700 0.6 */
508 { 0x2, 0x98, 0x6012 }, /* 400 6.0 */ 508 { 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 900 3.5 */
509 { 0x2, 0x98, 0x900F }, /* 400 9.5 */ 509 { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */
510 { 0xB, 0x70, 0x0018 }, /* 600 0.0 */
511 { 0xB, 0x70, 0x3015 }, /* 600 3.5 */
512 { 0xB, 0x70, 0x6012 }, /* 600 6.0 */
513 { 0x5, 0x00, 0x0018 }, /* 800 0.0 */
514 { 0x5, 0x00, 0x3015 }, /* 800 3.5 */
515 { 0x6, 0x98, 0x0018 }, /* 1200 0.0 */
516};
517
518/* FIXME - After table is updated in Bspec */
519/* Voltage Swing Programming for VccIO 0.85V for eDP */
520static const struct icl_combo_phy_ddi_buf_trans icl_combo_phy_ddi_translations_edp_0_85V[] = {
521 /* Voltage mV db */
522 { 0x0, 0x00, 0x00 }, /* 200 0.0 */
523 { 0x0, 0x00, 0x00 }, /* 200 1.5 */
524 { 0x0, 0x00, 0x00 }, /* 200 4.0 */
525 { 0x0, 0x00, 0x00 }, /* 200 6.0 */
526 { 0x0, 0x00, 0x00 }, /* 250 0.0 */
527 { 0x0, 0x00, 0x00 }, /* 250 1.5 */
528 { 0x0, 0x00, 0x00 }, /* 250 4.0 */
529 { 0x0, 0x00, 0x00 }, /* 300 0.0 */
530 { 0x0, 0x00, 0x00 }, /* 300 1.5 */
531 { 0x0, 0x00, 0x00 }, /* 350 0.0 */
532};
533
534/* Voltage Swing Programming for VccIO 0.95V for DP */
535static const struct icl_combo_phy_ddi_buf_trans icl_combo_phy_ddi_translations_dp_hdmi_0_95V[] = {
536 /* Voltage mV db */
537 { 0x2, 0x98, 0x0018 }, /* 400 0.0 */
538 { 0x2, 0x98, 0x3015 }, /* 400 3.5 */
539 { 0x2, 0x98, 0x6012 }, /* 400 6.0 */
540 { 0x2, 0x98, 0x900F }, /* 400 9.5 */
541 { 0x4, 0x98, 0x0018 }, /* 600 0.0 */
542 { 0x4, 0x98, 0x3015 }, /* 600 3.5 */
543 { 0x4, 0x98, 0x6012 }, /* 600 6.0 */
544 { 0x5, 0x76, 0x0018 }, /* 800 0.0 */
545 { 0x5, 0x76, 0x3015 }, /* 800 3.5 */
546 { 0x6, 0x98, 0x0018 }, /* 1200 0.0 */
547}; 510};
548 511
549/* FIXME - After table is updated in Bspec */ 512static const struct cnl_ddi_buf_trans icl_combo_phy_ddi_translations_edp_hbr2[] = {
550/* Voltage Swing Programming for VccIO 0.95V for eDP */ 513 /* NT mV Trans mV db */
551static const struct icl_combo_phy_ddi_buf_trans icl_combo_phy_ddi_translations_edp_0_95V[] = { 514 { 0x0, 0x7F, 0x3F, 0x00, 0x00 }, /* 200 200 0.0 */
552 /* Voltage mV db */ 515 { 0x8, 0x7F, 0x38, 0x00, 0x07 }, /* 200 250 1.9 */
553 { 0x0, 0x00, 0x00 }, /* 200 0.0 */ 516 { 0x1, 0x7F, 0x33, 0x00, 0x0C }, /* 200 300 3.5 */
554 { 0x0, 0x00, 0x00 }, /* 200 1.5 */ 517 { 0x9, 0x7F, 0x31, 0x00, 0x0E }, /* 200 350 4.9 */
555 { 0x0, 0x00, 0x00 }, /* 200 4.0 */ 518 { 0x8, 0x7F, 0x3F, 0x00, 0x00 }, /* 250 250 0.0 */
556 { 0x0, 0x00, 0x00 }, /* 200 6.0 */ 519 { 0x1, 0x7F, 0x38, 0x00, 0x07 }, /* 250 300 1.6 */
557 { 0x0, 0x00, 0x00 }, /* 250 0.0 */ 520 { 0x9, 0x7F, 0x35, 0x00, 0x0A }, /* 250 350 2.9 */
558 { 0x0, 0x00, 0x00 }, /* 250 1.5 */ 521 { 0x1, 0x7F, 0x3F, 0x00, 0x00 }, /* 300 300 0.0 */
559 { 0x0, 0x00, 0x00 }, /* 250 4.0 */ 522 { 0x9, 0x7F, 0x38, 0x00, 0x07 }, /* 300 350 1.3 */
560 { 0x0, 0x00, 0x00 }, /* 300 0.0 */ 523 { 0x9, 0x7F, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
561 { 0x0, 0x00, 0x00 }, /* 300 1.5 */
562 { 0x0, 0x00, 0x00 }, /* 350 0.0 */
563}; 524};
564 525
565/* Voltage Swing Programming for VccIO 1.05V for DP */ 526static const struct cnl_ddi_buf_trans icl_combo_phy_ddi_translations_edp_hbr3[] = {
566static const struct icl_combo_phy_ddi_buf_trans icl_combo_phy_ddi_translations_dp_hdmi_1_05V[] = { 527 /* NT mV Trans mV db */
567 /* Voltage mV db */ 528 { 0xA, 0x35, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
568 { 0x2, 0x98, 0x0018 }, /* 400 0.0 */ 529 { 0xA, 0x4F, 0x37, 0x00, 0x08 }, /* 350 500 3.1 */
569 { 0x2, 0x98, 0x3015 }, /* 400 3.5 */ 530 { 0xC, 0x71, 0x2F, 0x00, 0x10 }, /* 350 700 6.0 */
570 { 0x2, 0x98, 0x6012 }, /* 400 6.0 */ 531 { 0x6, 0x7F, 0x2B, 0x00, 0x14 }, /* 350 900 8.2 */
571 { 0x2, 0x98, 0x900F }, /* 400 9.5 */ 532 { 0xA, 0x4C, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */
572 { 0x4, 0x98, 0x0018 }, /* 600 0.0 */ 533 { 0xC, 0x73, 0x34, 0x00, 0x0B }, /* 500 700 2.9 */
573 { 0x4, 0x98, 0x3015 }, /* 600 3.5 */ 534 { 0x6, 0x7F, 0x2F, 0x00, 0x10 }, /* 500 900 5.1 */
574 { 0x4, 0x98, 0x6012 }, /* 600 6.0 */ 535 { 0xC, 0x6C, 0x3C, 0x00, 0x03 }, /* 650 700 0.6 */
575 { 0x5, 0x71, 0x0018 }, /* 800 0.0 */ 536 { 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 900 3.5 */
576 { 0x5, 0x71, 0x3015 }, /* 800 3.5 */ 537 { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */
577 { 0x6, 0x98, 0x0018 }, /* 1200 0.0 */
578}; 538};
579 539
580/* FIXME - After table is updated in Bspec */ 540static const struct cnl_ddi_buf_trans icl_combo_phy_ddi_translations_hdmi[] = {
581/* Voltage Swing Programming for VccIO 1.05V for eDP */ 541 /* NT mV Trans mV db */
582static const struct icl_combo_phy_ddi_buf_trans icl_combo_phy_ddi_translations_edp_1_05V[] = { 542 { 0xA, 0x60, 0x3F, 0x00, 0x00 }, /* 450 450 0.0 */
583 /* Voltage mV db */ 543 { 0xB, 0x73, 0x36, 0x00, 0x09 }, /* 450 650 3.2 */
584 { 0x0, 0x00, 0x00 }, /* 200 0.0 */ 544 { 0x6, 0x7F, 0x31, 0x00, 0x0E }, /* 450 850 5.5 */
585 { 0x0, 0x00, 0x00 }, /* 200 1.5 */ 545 { 0xB, 0x73, 0x3F, 0x00, 0x00 }, /* 650 650 0.0 ALS */
586 { 0x0, 0x00, 0x00 }, /* 200 4.0 */ 546 { 0x6, 0x7F, 0x37, 0x00, 0x08 }, /* 650 850 2.3 */
587 { 0x0, 0x00, 0x00 }, /* 200 6.0 */ 547 { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 850 850 0.0 */
588 { 0x0, 0x00, 0x00 }, /* 250 0.0 */ 548 { 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 850 3.0 */
589 { 0x0, 0x00, 0x00 }, /* 250 1.5 */
590 { 0x0, 0x00, 0x00 }, /* 250 4.0 */
591 { 0x0, 0x00, 0x00 }, /* 300 0.0 */
592 { 0x0, 0x00, 0x00 }, /* 300 1.5 */
593 { 0x0, 0x00, 0x00 }, /* 350 0.0 */
594}; 549};
595 550
596struct icl_mg_phy_ddi_buf_trans { 551struct icl_mg_phy_ddi_buf_trans {
@@ -871,43 +826,23 @@ cnl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
871 } 826 }
872} 827}
873 828
874static const struct icl_combo_phy_ddi_buf_trans * 829static const struct cnl_ddi_buf_trans *
875icl_get_combo_buf_trans(struct drm_i915_private *dev_priv, enum port port, 830icl_get_combo_buf_trans(struct drm_i915_private *dev_priv, enum port port,
876 int type, int *n_entries) 831 int type, int rate, int *n_entries)
877{ 832{
878 u32 voltage = I915_READ(ICL_PORT_COMP_DW3(port)) & VOLTAGE_INFO_MASK; 833 if (type == INTEL_OUTPUT_HDMI) {
879 834 *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_hdmi);
880 if (type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp.low_vswing) { 835 return icl_combo_phy_ddi_translations_hdmi;
881 switch (voltage) { 836 } else if (rate > 540000 && type == INTEL_OUTPUT_EDP) {
882 case VOLTAGE_INFO_0_85V: 837 *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_hbr3);
883 *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_0_85V); 838 return icl_combo_phy_ddi_translations_edp_hbr3;
884 return icl_combo_phy_ddi_translations_edp_0_85V; 839 } else if (type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp.low_vswing) {
885 case VOLTAGE_INFO_0_95V: 840 *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_hbr2);
886 *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_0_95V); 841 return icl_combo_phy_ddi_translations_edp_hbr2;
887 return icl_combo_phy_ddi_translations_edp_0_95V;
888 case VOLTAGE_INFO_1_05V:
889 *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_1_05V);
890 return icl_combo_phy_ddi_translations_edp_1_05V;
891 default:
892 MISSING_CASE(voltage);
893 return NULL;
894 }
895 } else {
896 switch (voltage) {
897 case VOLTAGE_INFO_0_85V:
898 *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_dp_hdmi_0_85V);
899 return icl_combo_phy_ddi_translations_dp_hdmi_0_85V;
900 case VOLTAGE_INFO_0_95V:
901 *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_dp_hdmi_0_95V);
902 return icl_combo_phy_ddi_translations_dp_hdmi_0_95V;
903 case VOLTAGE_INFO_1_05V:
904 *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_dp_hdmi_1_05V);
905 return icl_combo_phy_ddi_translations_dp_hdmi_1_05V;
906 default:
907 MISSING_CASE(voltage);
908 return NULL;
909 }
910 } 842 }
843
844 *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_dp_hbr2);
845 return icl_combo_phy_ddi_translations_dp_hbr2;
911} 846}
912 847
913static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port port) 848static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port port)
@@ -918,8 +853,8 @@ static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port por
918 853
919 if (IS_ICELAKE(dev_priv)) { 854 if (IS_ICELAKE(dev_priv)) {
920 if (intel_port_is_combophy(dev_priv, port)) 855 if (intel_port_is_combophy(dev_priv, port))
921 icl_get_combo_buf_trans(dev_priv, port, 856 icl_get_combo_buf_trans(dev_priv, port, INTEL_OUTPUT_HDMI,
922 INTEL_OUTPUT_HDMI, &n_entries); 857 0, &n_entries);
923 else 858 else
924 n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations); 859 n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations);
925 default_entry = n_entries - 1; 860 default_entry = n_entries - 1;
@@ -1361,6 +1296,9 @@ static int skl_calc_wrpll_link(struct drm_i915_private *dev_priv,
1361 dco_freq += (((cfgcr1_val & DPLL_CFGCR1_DCO_FRACTION_MASK) >> 9) * 24 * 1296 dco_freq += (((cfgcr1_val & DPLL_CFGCR1_DCO_FRACTION_MASK) >> 9) * 24 *
1362 1000) / 0x8000; 1297 1000) / 0x8000;
1363 1298
1299 if (WARN_ON(p0 == 0 || p1 == 0 || p2 == 0))
1300 return 0;
1301
1364 return dco_freq / (p0 * p1 * p2 * 5); 1302 return dco_freq / (p0 * p1 * p2 * 5);
1365} 1303}
1366 1304
@@ -1880,7 +1818,7 @@ void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state)
1880 temp |= TRANS_DDI_MODE_SELECT_DVI; 1818 temp |= TRANS_DDI_MODE_SELECT_DVI;
1881 1819
1882 if (crtc_state->hdmi_scrambling) 1820 if (crtc_state->hdmi_scrambling)
1883 temp |= TRANS_DDI_HDMI_SCRAMBLING_MASK; 1821 temp |= TRANS_DDI_HDMI_SCRAMBLING;
1884 if (crtc_state->hdmi_high_tmds_clock_ratio) 1822 if (crtc_state->hdmi_high_tmds_clock_ratio)
1885 temp |= TRANS_DDI_HIGH_TMDS_CHAR_RATE; 1823 temp |= TRANS_DDI_HIGH_TMDS_CHAR_RATE;
1886 } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) { 1824 } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
@@ -2275,13 +2213,14 @@ static void bxt_ddi_vswing_sequence(struct intel_encoder *encoder,
2275u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder) 2213u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder)
2276{ 2214{
2277 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 2215 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2216 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2278 enum port port = encoder->port; 2217 enum port port = encoder->port;
2279 int n_entries; 2218 int n_entries;
2280 2219
2281 if (IS_ICELAKE(dev_priv)) { 2220 if (IS_ICELAKE(dev_priv)) {
2282 if (intel_port_is_combophy(dev_priv, port)) 2221 if (intel_port_is_combophy(dev_priv, port))
2283 icl_get_combo_buf_trans(dev_priv, port, encoder->type, 2222 icl_get_combo_buf_trans(dev_priv, port, encoder->type,
2284 &n_entries); 2223 intel_dp->link_rate, &n_entries);
2285 else 2224 else
2286 n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations); 2225 n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations);
2287 } else if (IS_CANNONLAKE(dev_priv)) { 2226 } else if (IS_CANNONLAKE(dev_priv)) {
@@ -2462,14 +2401,15 @@ static void cnl_ddi_vswing_sequence(struct intel_encoder *encoder,
2462} 2401}
2463 2402
2464static void icl_ddi_combo_vswing_program(struct drm_i915_private *dev_priv, 2403static void icl_ddi_combo_vswing_program(struct drm_i915_private *dev_priv,
2465 u32 level, enum port port, int type) 2404 u32 level, enum port port, int type,
2405 int rate)
2466{ 2406{
2467 const struct icl_combo_phy_ddi_buf_trans *ddi_translations = NULL; 2407 const struct cnl_ddi_buf_trans *ddi_translations = NULL;
2468 u32 n_entries, val; 2408 u32 n_entries, val;
2469 int ln; 2409 int ln;
2470 2410
2471 ddi_translations = icl_get_combo_buf_trans(dev_priv, port, type, 2411 ddi_translations = icl_get_combo_buf_trans(dev_priv, port, type,
2472 &n_entries); 2412 rate, &n_entries);
2473 if (!ddi_translations) 2413 if (!ddi_translations)
2474 return; 2414 return;
2475 2415
@@ -2478,34 +2418,23 @@ static void icl_ddi_combo_vswing_program(struct drm_i915_private *dev_priv,
2478 level = n_entries - 1; 2418 level = n_entries - 1;
2479 } 2419 }
2480 2420
2481 /* Set PORT_TX_DW5 Rterm Sel to 110b. */ 2421 /* Set PORT_TX_DW5 */
2482 val = I915_READ(ICL_PORT_TX_DW5_LN0(port)); 2422 val = I915_READ(ICL_PORT_TX_DW5_LN0(port));
2483 val &= ~RTERM_SELECT_MASK; 2423 val &= ~(SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK |
2424 TAP2_DISABLE | TAP3_DISABLE);
2425 val |= SCALING_MODE_SEL(0x2);
2484 val |= RTERM_SELECT(0x6); 2426 val |= RTERM_SELECT(0x6);
2485 I915_WRITE(ICL_PORT_TX_DW5_GRP(port), val); 2427 val |= TAP3_DISABLE;
2486
2487 /* Program PORT_TX_DW5 */
2488 val = I915_READ(ICL_PORT_TX_DW5_LN0(port));
2489 /* Set DisableTap2 and DisableTap3 if MIPI DSI
2490 * Clear DisableTap2 and DisableTap3 for all other Ports
2491 */
2492 if (type == INTEL_OUTPUT_DSI) {
2493 val |= TAP2_DISABLE;
2494 val |= TAP3_DISABLE;
2495 } else {
2496 val &= ~TAP2_DISABLE;
2497 val &= ~TAP3_DISABLE;
2498 }
2499 I915_WRITE(ICL_PORT_TX_DW5_GRP(port), val); 2428 I915_WRITE(ICL_PORT_TX_DW5_GRP(port), val);
2500 2429
2501 /* Program PORT_TX_DW2 */ 2430 /* Program PORT_TX_DW2 */
2502 val = I915_READ(ICL_PORT_TX_DW2_LN0(port)); 2431 val = I915_READ(ICL_PORT_TX_DW2_LN0(port));
2503 val &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK | 2432 val &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK |
2504 RCOMP_SCALAR_MASK); 2433 RCOMP_SCALAR_MASK);
2505 val |= SWING_SEL_UPPER(ddi_translations[level].dw2_swing_select); 2434 val |= SWING_SEL_UPPER(ddi_translations[level].dw2_swing_sel);
2506 val |= SWING_SEL_LOWER(ddi_translations[level].dw2_swing_select); 2435 val |= SWING_SEL_LOWER(ddi_translations[level].dw2_swing_sel);
2507 /* Program Rcomp scalar for every table entry */ 2436 /* Program Rcomp scalar for every table entry */
2508 val |= RCOMP_SCALAR(ddi_translations[level].dw2_swing_scalar); 2437 val |= RCOMP_SCALAR(0x98);
2509 I915_WRITE(ICL_PORT_TX_DW2_GRP(port), val); 2438 I915_WRITE(ICL_PORT_TX_DW2_GRP(port), val);
2510 2439
2511 /* Program PORT_TX_DW4 */ 2440 /* Program PORT_TX_DW4 */
@@ -2514,9 +2443,17 @@ static void icl_ddi_combo_vswing_program(struct drm_i915_private *dev_priv,
2514 val = I915_READ(ICL_PORT_TX_DW4_LN(port, ln)); 2443 val = I915_READ(ICL_PORT_TX_DW4_LN(port, ln));
2515 val &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK | 2444 val &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK |
2516 CURSOR_COEFF_MASK); 2445 CURSOR_COEFF_MASK);
2517 val |= ddi_translations[level].dw4_scaling; 2446 val |= POST_CURSOR_1(ddi_translations[level].dw4_post_cursor_1);
2447 val |= POST_CURSOR_2(ddi_translations[level].dw4_post_cursor_2);
2448 val |= CURSOR_COEFF(ddi_translations[level].dw4_cursor_coeff);
2518 I915_WRITE(ICL_PORT_TX_DW4_LN(port, ln), val); 2449 I915_WRITE(ICL_PORT_TX_DW4_LN(port, ln), val);
2519 } 2450 }
2451
2452 /* Program PORT_TX_DW7 */
2453 val = I915_READ(ICL_PORT_TX_DW7_LN0(port));
2454 val &= ~N_SCALAR_MASK;
2455 val |= N_SCALAR(ddi_translations[level].dw7_n_scalar);
2456 I915_WRITE(ICL_PORT_TX_DW7_GRP(port), val);
2520} 2457}
2521 2458
2522static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder, 2459static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
@@ -2581,7 +2518,7 @@ static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
2581 I915_WRITE(ICL_PORT_TX_DW5_GRP(port), val); 2518 I915_WRITE(ICL_PORT_TX_DW5_GRP(port), val);
2582 2519
2583 /* 5. Program swing and de-emphasis */ 2520 /* 5. Program swing and de-emphasis */
2584 icl_ddi_combo_vswing_program(dev_priv, level, port, type); 2521 icl_ddi_combo_vswing_program(dev_priv, level, port, type, rate);
2585 2522
2586 /* 6. Set training enable to trigger update */ 2523 /* 6. Set training enable to trigger update */
2587 val = I915_READ(ICL_PORT_TX_DW5_LN0(port)); 2524 val = I915_READ(ICL_PORT_TX_DW5_LN0(port));
@@ -3603,6 +3540,24 @@ static void intel_disable_ddi(struct intel_encoder *encoder,
3603 intel_disable_ddi_dp(encoder, old_crtc_state, old_conn_state); 3540 intel_disable_ddi_dp(encoder, old_crtc_state, old_conn_state);
3604} 3541}
3605 3542
3543static void intel_ddi_update_pipe_dp(struct intel_encoder *encoder,
3544 const struct intel_crtc_state *crtc_state,
3545 const struct drm_connector_state *conn_state)
3546{
3547 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3548
3549 intel_psr_enable(intel_dp, crtc_state);
3550 intel_edp_drrs_enable(intel_dp, crtc_state);
3551}
3552
3553static void intel_ddi_update_pipe(struct intel_encoder *encoder,
3554 const struct intel_crtc_state *crtc_state,
3555 const struct drm_connector_state *conn_state)
3556{
3557 if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
3558 intel_ddi_update_pipe_dp(encoder, crtc_state, conn_state);
3559}
3560
3606static void intel_ddi_set_fia_lane_count(struct intel_encoder *encoder, 3561static void intel_ddi_set_fia_lane_count(struct intel_encoder *encoder,
3607 const struct intel_crtc_state *pipe_config, 3562 const struct intel_crtc_state *pipe_config,
3608 enum port port) 3563 enum port port)
@@ -3793,8 +3748,7 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
3793 if (intel_dig_port->infoframe_enabled(encoder, pipe_config)) 3748 if (intel_dig_port->infoframe_enabled(encoder, pipe_config))
3794 pipe_config->has_infoframe = true; 3749 pipe_config->has_infoframe = true;
3795 3750
3796 if ((temp & TRANS_DDI_HDMI_SCRAMBLING_MASK) == 3751 if (temp & TRANS_DDI_HDMI_SCRAMBLING)
3797 TRANS_DDI_HDMI_SCRAMBLING_MASK)
3798 pipe_config->hdmi_scrambling = true; 3752 pipe_config->hdmi_scrambling = true;
3799 if (temp & TRANS_DDI_HIGH_TMDS_CHAR_RATE) 3753 if (temp & TRANS_DDI_HIGH_TMDS_CHAR_RATE)
3800 pipe_config->hdmi_high_tmds_clock_ratio = true; 3754 pipe_config->hdmi_high_tmds_clock_ratio = true;
@@ -3901,9 +3855,50 @@ static int intel_ddi_compute_config(struct intel_encoder *encoder,
3901 3855
3902} 3856}
3903 3857
3858static void intel_ddi_encoder_suspend(struct intel_encoder *encoder)
3859{
3860 struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
3861 struct drm_i915_private *i915 = to_i915(encoder->base.dev);
3862
3863 intel_dp_encoder_suspend(encoder);
3864
3865 /*
3866 * TODO: disconnect also from USB DP alternate mode once we have a
3867 * way to handle the modeset restore in that mode during resume
3868 * even if the sink has disappeared while being suspended.
3869 */
3870 if (dig_port->tc_legacy_port)
3871 icl_tc_phy_disconnect(i915, dig_port);
3872}
3873
3874static void intel_ddi_encoder_reset(struct drm_encoder *drm_encoder)
3875{
3876 struct intel_digital_port *dig_port = enc_to_dig_port(drm_encoder);
3877 struct drm_i915_private *i915 = to_i915(drm_encoder->dev);
3878
3879 if (intel_port_is_tc(i915, dig_port->base.port))
3880 intel_digital_port_connected(&dig_port->base);
3881
3882 intel_dp_encoder_reset(drm_encoder);
3883}
3884
3885static void intel_ddi_encoder_destroy(struct drm_encoder *encoder)
3886{
3887 struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
3888 struct drm_i915_private *i915 = to_i915(encoder->dev);
3889
3890 intel_dp_encoder_flush_work(encoder);
3891
3892 if (intel_port_is_tc(i915, dig_port->base.port))
3893 icl_tc_phy_disconnect(i915, dig_port);
3894
3895 drm_encoder_cleanup(encoder);
3896 kfree(dig_port);
3897}
3898
3904static const struct drm_encoder_funcs intel_ddi_funcs = { 3899static const struct drm_encoder_funcs intel_ddi_funcs = {
3905 .reset = intel_dp_encoder_reset, 3900 .reset = intel_ddi_encoder_reset,
3906 .destroy = intel_dp_encoder_destroy, 3901 .destroy = intel_ddi_encoder_destroy,
3907}; 3902};
3908 3903
3909static struct intel_connector * 3904static struct intel_connector *
@@ -4147,16 +4142,16 @@ intel_ddi_max_lanes(struct intel_digital_port *intel_dport)
4147 4142
4148void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port) 4143void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
4149{ 4144{
4145 struct ddi_vbt_port_info *port_info =
4146 &dev_priv->vbt.ddi_port_info[port];
4150 struct intel_digital_port *intel_dig_port; 4147 struct intel_digital_port *intel_dig_port;
4151 struct intel_encoder *intel_encoder; 4148 struct intel_encoder *intel_encoder;
4152 struct drm_encoder *encoder; 4149 struct drm_encoder *encoder;
4153 bool init_hdmi, init_dp, init_lspcon = false; 4150 bool init_hdmi, init_dp, init_lspcon = false;
4154 enum pipe pipe; 4151 enum pipe pipe;
4155 4152
4156 4153 init_hdmi = port_info->supports_dvi || port_info->supports_hdmi;
4157 init_hdmi = (dev_priv->vbt.ddi_port_info[port].supports_dvi || 4154 init_dp = port_info->supports_dp;
4158 dev_priv->vbt.ddi_port_info[port].supports_hdmi);
4159 init_dp = dev_priv->vbt.ddi_port_info[port].supports_dp;
4160 4155
4161 if (intel_bios_is_lspcon_present(dev_priv, port)) { 4156 if (intel_bios_is_lspcon_present(dev_priv, port)) {
4162 /* 4157 /*
@@ -4195,9 +4190,10 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
4195 intel_encoder->pre_enable = intel_ddi_pre_enable; 4190 intel_encoder->pre_enable = intel_ddi_pre_enable;
4196 intel_encoder->disable = intel_disable_ddi; 4191 intel_encoder->disable = intel_disable_ddi;
4197 intel_encoder->post_disable = intel_ddi_post_disable; 4192 intel_encoder->post_disable = intel_ddi_post_disable;
4193 intel_encoder->update_pipe = intel_ddi_update_pipe;
4198 intel_encoder->get_hw_state = intel_ddi_get_hw_state; 4194 intel_encoder->get_hw_state = intel_ddi_get_hw_state;
4199 intel_encoder->get_config = intel_ddi_get_config; 4195 intel_encoder->get_config = intel_ddi_get_config;
4200 intel_encoder->suspend = intel_dp_encoder_suspend; 4196 intel_encoder->suspend = intel_ddi_encoder_suspend;
4201 intel_encoder->get_power_domains = intel_ddi_get_power_domains; 4197 intel_encoder->get_power_domains = intel_ddi_get_power_domains;
4202 intel_encoder->type = INTEL_OUTPUT_DDI; 4198 intel_encoder->type = INTEL_OUTPUT_DDI;
4203 intel_encoder->power_domain = intel_port_to_power_domain(port); 4199 intel_encoder->power_domain = intel_port_to_power_domain(port);
@@ -4216,6 +4212,10 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
4216 intel_dig_port->max_lanes = intel_ddi_max_lanes(intel_dig_port); 4212 intel_dig_port->max_lanes = intel_ddi_max_lanes(intel_dig_port);
4217 intel_dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port); 4213 intel_dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port);
4218 4214
4215 intel_dig_port->tc_legacy_port = intel_port_is_tc(dev_priv, port) &&
4216 !port_info->supports_typec_usb &&
4217 !port_info->supports_tbt;
4218
4219 switch (port) { 4219 switch (port) {
4220 case PORT_A: 4220 case PORT_A:
4221 intel_dig_port->ddi_io_power_domain = 4221 intel_dig_port->ddi_io_power_domain =
@@ -4274,6 +4274,10 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
4274 } 4274 }
4275 4275
4276 intel_infoframe_init(intel_dig_port); 4276 intel_infoframe_init(intel_dig_port);
4277
4278 if (intel_port_is_tc(dev_priv, port))
4279 intel_digital_port_connected(intel_encoder);
4280
4277 return; 4281 return;
4278 4282
4279err: 4283err:
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
index 1e56319334f3..855a5074ad77 100644
--- a/drivers/gpu/drm/i915/intel_device_info.c
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -104,7 +104,7 @@ static void sseu_dump(const struct sseu_dev_info *sseu, struct drm_printer *p)
104 drm_printf(p, "has EU power gating: %s\n", yesno(sseu->has_eu_pg)); 104 drm_printf(p, "has EU power gating: %s\n", yesno(sseu->has_eu_pg));
105} 105}
106 106
107void intel_device_info_dump_runtime(const struct intel_device_info *info, 107void intel_device_info_dump_runtime(const struct intel_runtime_info *info,
108 struct drm_printer *p) 108 struct drm_printer *p)
109{ 109{
110 sseu_dump(&info->sseu, p); 110 sseu_dump(&info->sseu, p);
@@ -113,21 +113,6 @@ void intel_device_info_dump_runtime(const struct intel_device_info *info,
113 info->cs_timestamp_frequency_khz); 113 info->cs_timestamp_frequency_khz);
114} 114}
115 115
116void intel_device_info_dump(const struct intel_device_info *info,
117 struct drm_printer *p)
118{
119 struct drm_i915_private *dev_priv =
120 container_of(info, struct drm_i915_private, info);
121
122 drm_printf(p, "pciid=0x%04x rev=0x%02x platform=%s gen=%i\n",
123 INTEL_DEVID(dev_priv),
124 INTEL_REVID(dev_priv),
125 intel_platform_name(info->platform),
126 info->gen);
127
128 intel_device_info_dump_flags(info, p);
129}
130
131void intel_device_info_dump_topology(const struct sseu_dev_info *sseu, 116void intel_device_info_dump_topology(const struct sseu_dev_info *sseu,
132 struct drm_printer *p) 117 struct drm_printer *p)
133{ 118{
@@ -164,7 +149,7 @@ static u16 compute_eu_total(const struct sseu_dev_info *sseu)
164 149
165static void gen11_sseu_info_init(struct drm_i915_private *dev_priv) 150static void gen11_sseu_info_init(struct drm_i915_private *dev_priv)
166{ 151{
167 struct sseu_dev_info *sseu = &mkwrite_device_info(dev_priv)->sseu; 152 struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
168 u8 s_en; 153 u8 s_en;
169 u32 ss_en, ss_en_mask; 154 u32 ss_en, ss_en_mask;
170 u8 eu_en; 155 u8 eu_en;
@@ -203,7 +188,7 @@ static void gen11_sseu_info_init(struct drm_i915_private *dev_priv)
203 188
204static void gen10_sseu_info_init(struct drm_i915_private *dev_priv) 189static void gen10_sseu_info_init(struct drm_i915_private *dev_priv)
205{ 190{
206 struct sseu_dev_info *sseu = &mkwrite_device_info(dev_priv)->sseu; 191 struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
207 const u32 fuse2 = I915_READ(GEN8_FUSE2); 192 const u32 fuse2 = I915_READ(GEN8_FUSE2);
208 int s, ss; 193 int s, ss;
209 const int eu_mask = 0xff; 194 const int eu_mask = 0xff;
@@ -280,7 +265,7 @@ static void gen10_sseu_info_init(struct drm_i915_private *dev_priv)
280 265
281static void cherryview_sseu_info_init(struct drm_i915_private *dev_priv) 266static void cherryview_sseu_info_init(struct drm_i915_private *dev_priv)
282{ 267{
283 struct sseu_dev_info *sseu = &mkwrite_device_info(dev_priv)->sseu; 268 struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
284 u32 fuse; 269 u32 fuse;
285 270
286 fuse = I915_READ(CHV_FUSE_GT); 271 fuse = I915_READ(CHV_FUSE_GT);
@@ -334,7 +319,7 @@ static void cherryview_sseu_info_init(struct drm_i915_private *dev_priv)
334static void gen9_sseu_info_init(struct drm_i915_private *dev_priv) 319static void gen9_sseu_info_init(struct drm_i915_private *dev_priv)
335{ 320{
336 struct intel_device_info *info = mkwrite_device_info(dev_priv); 321 struct intel_device_info *info = mkwrite_device_info(dev_priv);
337 struct sseu_dev_info *sseu = &info->sseu; 322 struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
338 int s, ss; 323 int s, ss;
339 u32 fuse2, eu_disable, subslice_mask; 324 u32 fuse2, eu_disable, subslice_mask;
340 const u8 eu_mask = 0xff; 325 const u8 eu_mask = 0xff;
@@ -437,7 +422,7 @@ static void gen9_sseu_info_init(struct drm_i915_private *dev_priv)
437 422
438static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv) 423static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv)
439{ 424{
440 struct sseu_dev_info *sseu = &mkwrite_device_info(dev_priv)->sseu; 425 struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
441 int s, ss; 426 int s, ss;
442 u32 fuse2, subslice_mask, eu_disable[3]; /* s_max */ 427 u32 fuse2, subslice_mask, eu_disable[3]; /* s_max */
443 428
@@ -519,8 +504,7 @@ static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv)
519 504
520static void haswell_sseu_info_init(struct drm_i915_private *dev_priv) 505static void haswell_sseu_info_init(struct drm_i915_private *dev_priv)
521{ 506{
522 struct intel_device_info *info = mkwrite_device_info(dev_priv); 507 struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
523 struct sseu_dev_info *sseu = &info->sseu;
524 u32 fuse1; 508 u32 fuse1;
525 int s, ss; 509 int s, ss;
526 510
@@ -528,9 +512,9 @@ static void haswell_sseu_info_init(struct drm_i915_private *dev_priv)
528 * There isn't a register to tell us how many slices/subslices. We 512 * There isn't a register to tell us how many slices/subslices. We
529 * work off the PCI-ids here. 513 * work off the PCI-ids here.
530 */ 514 */
531 switch (info->gt) { 515 switch (INTEL_INFO(dev_priv)->gt) {
532 default: 516 default:
533 MISSING_CASE(info->gt); 517 MISSING_CASE(INTEL_INFO(dev_priv)->gt);
534 /* fall through */ 518 /* fall through */
535 case 1: 519 case 1:
536 sseu->slice_mask = BIT(0); 520 sseu->slice_mask = BIT(0);
@@ -725,7 +709,7 @@ static u32 read_timestamp_frequency(struct drm_i915_private *dev_priv)
725 709
726/** 710/**
727 * intel_device_info_runtime_init - initialize runtime info 711 * intel_device_info_runtime_init - initialize runtime info
728 * @info: intel device info struct 712 * @dev_priv: the i915 device
729 * 713 *
730 * Determine various intel_device_info fields at runtime. 714 * Determine various intel_device_info fields at runtime.
731 * 715 *
@@ -739,29 +723,29 @@ static u32 read_timestamp_frequency(struct drm_i915_private *dev_priv)
739 * - after the PCH has been detected, 723 * - after the PCH has been detected,
740 * - before the first usage of the fields it can tweak. 724 * - before the first usage of the fields it can tweak.
741 */ 725 */
742void intel_device_info_runtime_init(struct intel_device_info *info) 726void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
743{ 727{
744 struct drm_i915_private *dev_priv = 728 struct intel_device_info *info = mkwrite_device_info(dev_priv);
745 container_of(info, struct drm_i915_private, info); 729 struct intel_runtime_info *runtime = RUNTIME_INFO(dev_priv);
746 enum pipe pipe; 730 enum pipe pipe;
747 731
748 if (INTEL_GEN(dev_priv) >= 10) { 732 if (INTEL_GEN(dev_priv) >= 10) {
749 for_each_pipe(dev_priv, pipe) 733 for_each_pipe(dev_priv, pipe)
750 info->num_scalers[pipe] = 2; 734 runtime->num_scalers[pipe] = 2;
751 } else if (IS_GEN9(dev_priv)) { 735 } else if (IS_GEN(dev_priv, 9)) {
752 info->num_scalers[PIPE_A] = 2; 736 runtime->num_scalers[PIPE_A] = 2;
753 info->num_scalers[PIPE_B] = 2; 737 runtime->num_scalers[PIPE_B] = 2;
754 info->num_scalers[PIPE_C] = 1; 738 runtime->num_scalers[PIPE_C] = 1;
755 } 739 }
756 740
757 BUILD_BUG_ON(I915_NUM_ENGINES > BITS_PER_TYPE(intel_ring_mask_t)); 741 BUILD_BUG_ON(I915_NUM_ENGINES > BITS_PER_TYPE(intel_ring_mask_t));
758 742
759 if (IS_GEN11(dev_priv)) 743 if (IS_GEN(dev_priv, 11))
760 for_each_pipe(dev_priv, pipe) 744 for_each_pipe(dev_priv, pipe)
761 info->num_sprites[pipe] = 6; 745 runtime->num_sprites[pipe] = 6;
762 else if (IS_GEN10(dev_priv) || IS_GEMINILAKE(dev_priv)) 746 else if (IS_GEN(dev_priv, 10) || IS_GEMINILAKE(dev_priv))
763 for_each_pipe(dev_priv, pipe) 747 for_each_pipe(dev_priv, pipe)
764 info->num_sprites[pipe] = 3; 748 runtime->num_sprites[pipe] = 3;
765 else if (IS_BROXTON(dev_priv)) { 749 else if (IS_BROXTON(dev_priv)) {
766 /* 750 /*
767 * Skylake and Broxton currently don't expose the topmost plane as its 751 * Skylake and Broxton currently don't expose the topmost plane as its
@@ -772,22 +756,22 @@ void intel_device_info_runtime_init(struct intel_device_info *info)
772 * down the line. 756 * down the line.
773 */ 757 */
774 758
775 info->num_sprites[PIPE_A] = 2; 759 runtime->num_sprites[PIPE_A] = 2;
776 info->num_sprites[PIPE_B] = 2; 760 runtime->num_sprites[PIPE_B] = 2;
777 info->num_sprites[PIPE_C] = 1; 761 runtime->num_sprites[PIPE_C] = 1;
778 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 762 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
779 for_each_pipe(dev_priv, pipe) 763 for_each_pipe(dev_priv, pipe)
780 info->num_sprites[pipe] = 2; 764 runtime->num_sprites[pipe] = 2;
781 } else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) { 765 } else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) {
782 for_each_pipe(dev_priv, pipe) 766 for_each_pipe(dev_priv, pipe)
783 info->num_sprites[pipe] = 1; 767 runtime->num_sprites[pipe] = 1;
784 } 768 }
785 769
786 if (i915_modparams.disable_display) { 770 if (i915_modparams.disable_display) {
787 DRM_INFO("Display disabled (module parameter)\n"); 771 DRM_INFO("Display disabled (module parameter)\n");
788 info->num_pipes = 0; 772 info->num_pipes = 0;
789 } else if (HAS_DISPLAY(dev_priv) && 773 } else if (HAS_DISPLAY(dev_priv) &&
790 (IS_GEN7(dev_priv) || IS_GEN8(dev_priv)) && 774 (IS_GEN_RANGE(dev_priv, 7, 8)) &&
791 HAS_PCH_SPLIT(dev_priv)) { 775 HAS_PCH_SPLIT(dev_priv)) {
792 u32 fuse_strap = I915_READ(FUSE_STRAP); 776 u32 fuse_strap = I915_READ(FUSE_STRAP);
793 u32 sfuse_strap = I915_READ(SFUSE_STRAP); 777 u32 sfuse_strap = I915_READ(SFUSE_STRAP);
@@ -811,7 +795,7 @@ void intel_device_info_runtime_init(struct intel_device_info *info)
811 DRM_INFO("PipeC fused off\n"); 795 DRM_INFO("PipeC fused off\n");
812 info->num_pipes -= 1; 796 info->num_pipes -= 1;
813 } 797 }
814 } else if (HAS_DISPLAY(dev_priv) && IS_GEN9(dev_priv)) { 798 } else if (HAS_DISPLAY(dev_priv) && INTEL_GEN(dev_priv) >= 9) {
815 u32 dfsm = I915_READ(SKL_DFSM); 799 u32 dfsm = I915_READ(SKL_DFSM);
816 u8 disabled_mask = 0; 800 u8 disabled_mask = 0;
817 bool invalid; 801 bool invalid;
@@ -851,20 +835,20 @@ void intel_device_info_runtime_init(struct intel_device_info *info)
851 cherryview_sseu_info_init(dev_priv); 835 cherryview_sseu_info_init(dev_priv);
852 else if (IS_BROADWELL(dev_priv)) 836 else if (IS_BROADWELL(dev_priv))
853 broadwell_sseu_info_init(dev_priv); 837 broadwell_sseu_info_init(dev_priv);
854 else if (IS_GEN9(dev_priv)) 838 else if (IS_GEN(dev_priv, 9))
855 gen9_sseu_info_init(dev_priv); 839 gen9_sseu_info_init(dev_priv);
856 else if (IS_GEN10(dev_priv)) 840 else if (IS_GEN(dev_priv, 10))
857 gen10_sseu_info_init(dev_priv); 841 gen10_sseu_info_init(dev_priv);
858 else if (INTEL_GEN(dev_priv) >= 11) 842 else if (INTEL_GEN(dev_priv) >= 11)
859 gen11_sseu_info_init(dev_priv); 843 gen11_sseu_info_init(dev_priv);
860 844
861 if (IS_GEN6(dev_priv) && intel_vtd_active()) { 845 if (IS_GEN(dev_priv, 6) && intel_vtd_active()) {
862 DRM_INFO("Disabling ppGTT for VT-d support\n"); 846 DRM_INFO("Disabling ppGTT for VT-d support\n");
863 info->ppgtt = INTEL_PPGTT_NONE; 847 info->ppgtt = INTEL_PPGTT_NONE;
864 } 848 }
865 849
866 /* Initialize command stream timestamp frequency */ 850 /* Initialize command stream timestamp frequency */
867 info->cs_timestamp_frequency_khz = read_timestamp_frequency(dev_priv); 851 runtime->cs_timestamp_frequency_khz = read_timestamp_frequency(dev_priv);
868} 852}
869 853
870void intel_driver_caps_print(const struct intel_driver_caps *caps, 854void intel_driver_caps_print(const struct intel_driver_caps *caps,
@@ -884,35 +868,44 @@ void intel_driver_caps_print(const struct intel_driver_caps *caps,
884void intel_device_info_init_mmio(struct drm_i915_private *dev_priv) 868void intel_device_info_init_mmio(struct drm_i915_private *dev_priv)
885{ 869{
886 struct intel_device_info *info = mkwrite_device_info(dev_priv); 870 struct intel_device_info *info = mkwrite_device_info(dev_priv);
887 u32 media_fuse; 871 unsigned int logical_vdbox = 0;
888 unsigned int i; 872 unsigned int i;
873 u32 media_fuse;
889 874
890 if (INTEL_GEN(dev_priv) < 11) 875 if (INTEL_GEN(dev_priv) < 11)
891 return; 876 return;
892 877
893 media_fuse = ~I915_READ(GEN11_GT_VEBOX_VDBOX_DISABLE); 878 media_fuse = ~I915_READ(GEN11_GT_VEBOX_VDBOX_DISABLE);
894 879
895 info->vdbox_enable = media_fuse & GEN11_GT_VDBOX_DISABLE_MASK; 880 RUNTIME_INFO(dev_priv)->vdbox_enable = media_fuse & GEN11_GT_VDBOX_DISABLE_MASK;
896 info->vebox_enable = (media_fuse & GEN11_GT_VEBOX_DISABLE_MASK) >> 881 RUNTIME_INFO(dev_priv)->vebox_enable = (media_fuse & GEN11_GT_VEBOX_DISABLE_MASK) >>
897 GEN11_GT_VEBOX_DISABLE_SHIFT; 882 GEN11_GT_VEBOX_DISABLE_SHIFT;
898 883
899 DRM_DEBUG_DRIVER("vdbox enable: %04x\n", info->vdbox_enable); 884 DRM_DEBUG_DRIVER("vdbox enable: %04x\n", RUNTIME_INFO(dev_priv)->vdbox_enable);
900 for (i = 0; i < I915_MAX_VCS; i++) { 885 for (i = 0; i < I915_MAX_VCS; i++) {
901 if (!HAS_ENGINE(dev_priv, _VCS(i))) 886 if (!HAS_ENGINE(dev_priv, _VCS(i)))
902 continue; 887 continue;
903 888
904 if (!(BIT(i) & info->vdbox_enable)) { 889 if (!(BIT(i) & RUNTIME_INFO(dev_priv)->vdbox_enable)) {
905 info->ring_mask &= ~ENGINE_MASK(_VCS(i)); 890 info->ring_mask &= ~ENGINE_MASK(_VCS(i));
906 DRM_DEBUG_DRIVER("vcs%u fused off\n", i); 891 DRM_DEBUG_DRIVER("vcs%u fused off\n", i);
892 continue;
907 } 893 }
894
895 /*
896 * In Gen11, only even numbered logical VDBOXes are
897 * hooked up to an SFC (Scaler & Format Converter) unit.
898 */
899 if (logical_vdbox++ % 2 == 0)
900 RUNTIME_INFO(dev_priv)->vdbox_sfc_access |= BIT(i);
908 } 901 }
909 902
910 DRM_DEBUG_DRIVER("vebox enable: %04x\n", info->vebox_enable); 903 DRM_DEBUG_DRIVER("vebox enable: %04x\n", RUNTIME_INFO(dev_priv)->vebox_enable);
911 for (i = 0; i < I915_MAX_VECS; i++) { 904 for (i = 0; i < I915_MAX_VECS; i++) {
912 if (!HAS_ENGINE(dev_priv, _VECS(i))) 905 if (!HAS_ENGINE(dev_priv, _VECS(i)))
913 continue; 906 continue;
914 907
915 if (!(BIT(i) & info->vebox_enable)) { 908 if (!(BIT(i) & RUNTIME_INFO(dev_priv)->vebox_enable)) {
916 info->ring_mask &= ~ENGINE_MASK(_VECS(i)); 909 info->ring_mask &= ~ENGINE_MASK(_VECS(i));
917 DRM_DEBUG_DRIVER("vecs%u fused off\n", i); 910 DRM_DEBUG_DRIVER("vecs%u fused off\n", i);
918 } 911 }
diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h
index 1caf24e2cf0b..957c6527f76b 100644
--- a/drivers/gpu/drm/i915/intel_device_info.h
+++ b/drivers/gpu/drm/i915/intel_device_info.h
@@ -89,6 +89,7 @@ enum intel_ppgtt {
89 func(is_alpha_support); \ 89 func(is_alpha_support); \
90 /* Keep has_* in alphabetical order */ \ 90 /* Keep has_* in alphabetical order */ \
91 func(has_64bit_reloc); \ 91 func(has_64bit_reloc); \
92 func(gpu_reset_clobbers_display); \
92 func(has_reset_engine); \ 93 func(has_reset_engine); \
93 func(has_fpga_dbg); \ 94 func(has_fpga_dbg); \
94 func(has_guc); \ 95 func(has_guc); \
@@ -152,12 +153,10 @@ struct sseu_dev_info {
152typedef u8 intel_ring_mask_t; 153typedef u8 intel_ring_mask_t;
153 154
154struct intel_device_info { 155struct intel_device_info {
155 u16 device_id;
156 u16 gen_mask; 156 u16 gen_mask;
157 157
158 u8 gen; 158 u8 gen;
159 u8 gt; /* GT number, 0 if undefined */ 159 u8 gt; /* GT number, 0 if undefined */
160 u8 num_rings;
161 intel_ring_mask_t ring_mask; /* Rings supported by the HW */ 160 intel_ring_mask_t ring_mask; /* Rings supported by the HW */
162 161
163 enum intel_platform platform; 162 enum intel_platform platform;
@@ -169,8 +168,6 @@ struct intel_device_info {
169 u32 display_mmio_offset; 168 u32 display_mmio_offset;
170 169
171 u8 num_pipes; 170 u8 num_pipes;
172 u8 num_sprites[I915_MAX_PIPES];
173 u8 num_scalers[I915_MAX_PIPES];
174 171
175#define DEFINE_FLAG(name) u8 name:1 172#define DEFINE_FLAG(name) u8 name:1
176 DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG); 173 DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG);
@@ -189,6 +186,20 @@ struct intel_device_info {
189 int trans_offsets[I915_MAX_TRANSCODERS]; 186 int trans_offsets[I915_MAX_TRANSCODERS];
190 int cursor_offsets[I915_MAX_PIPES]; 187 int cursor_offsets[I915_MAX_PIPES];
191 188
189 struct color_luts {
190 u16 degamma_lut_size;
191 u16 gamma_lut_size;
192 } color;
193};
194
195struct intel_runtime_info {
196 u16 device_id;
197
198 u8 num_sprites[I915_MAX_PIPES];
199 u8 num_scalers[I915_MAX_PIPES];
200
201 u8 num_rings;
202
192 /* Slice/subslice/EU info */ 203 /* Slice/subslice/EU info */
193 struct sseu_dev_info sseu; 204 struct sseu_dev_info sseu;
194 205
@@ -198,10 +209,8 @@ struct intel_device_info {
198 u8 vdbox_enable; 209 u8 vdbox_enable;
199 u8 vebox_enable; 210 u8 vebox_enable;
200 211
201 struct color_luts { 212 /* Media engine access to SFC per instance */
202 u16 degamma_lut_size; 213 u8 vdbox_sfc_access;
203 u16 gamma_lut_size;
204 } color;
205}; 214};
206 215
207struct intel_driver_caps { 216struct intel_driver_caps {
@@ -258,12 +267,10 @@ static inline void sseu_set_eus(struct sseu_dev_info *sseu,
258 267
259const char *intel_platform_name(enum intel_platform platform); 268const char *intel_platform_name(enum intel_platform platform);
260 269
261void intel_device_info_runtime_init(struct intel_device_info *info); 270void intel_device_info_runtime_init(struct drm_i915_private *dev_priv);
262void intel_device_info_dump(const struct intel_device_info *info,
263 struct drm_printer *p);
264void intel_device_info_dump_flags(const struct intel_device_info *info, 271void intel_device_info_dump_flags(const struct intel_device_info *info,
265 struct drm_printer *p); 272 struct drm_printer *p);
266void intel_device_info_dump_runtime(const struct intel_device_info *info, 273void intel_device_info_dump_runtime(const struct intel_runtime_info *info,
267 struct drm_printer *p); 274 struct drm_printer *p);
268void intel_device_info_dump_topology(const struct sseu_dev_info *sseu, 275void intel_device_info_dump_topology(const struct sseu_dev_info *sseu,
269 struct drm_printer *p); 276 struct drm_printer *p);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 52c63135bc65..09e12a826cf2 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -31,7 +31,6 @@
31#include <linux/slab.h> 31#include <linux/slab.h>
32#include <linux/vgaarb.h> 32#include <linux/vgaarb.h>
33#include <drm/drm_edid.h> 33#include <drm/drm_edid.h>
34#include <drm/drmP.h>
35#include "intel_drv.h" 34#include "intel_drv.h"
36#include "intel_frontbuffer.h" 35#include "intel_frontbuffer.h"
37#include <drm/i915_drm.h> 36#include <drm/i915_drm.h>
@@ -984,7 +983,7 @@ static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv,
984 u32 line1, line2; 983 u32 line1, line2;
985 u32 line_mask; 984 u32 line_mask;
986 985
987 if (IS_GEN2(dev_priv)) 986 if (IS_GEN(dev_priv, 2))
988 line_mask = DSL_LINEMASK_GEN2; 987 line_mask = DSL_LINEMASK_GEN2;
989 else 988 else
990 line_mask = DSL_LINEMASK_GEN3; 989 line_mask = DSL_LINEMASK_GEN3;
@@ -1110,7 +1109,7 @@ static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
1110 u32 val; 1109 u32 val;
1111 1110
1112 /* ILK FDI PLL is always enabled */ 1111 /* ILK FDI PLL is always enabled */
1113 if (IS_GEN5(dev_priv)) 1112 if (IS_GEN(dev_priv, 5))
1114 return; 1113 return;
1115 1114
1116 /* On Haswell, DDI ports are responsible for the FDI PLL setup */ 1115 /* On Haswell, DDI ports are responsible for the FDI PLL setup */
@@ -1850,7 +1849,7 @@ static void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state)
1850 1849
1851static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv) 1850static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv)
1852{ 1851{
1853 return IS_GEN2(dev_priv) ? 2048 : 4096; 1852 return IS_GEN(dev_priv, 2) ? 2048 : 4096;
1854} 1853}
1855 1854
1856static unsigned int 1855static unsigned int
@@ -1863,7 +1862,7 @@ intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
1863 case DRM_FORMAT_MOD_LINEAR: 1862 case DRM_FORMAT_MOD_LINEAR:
1864 return cpp; 1863 return cpp;
1865 case I915_FORMAT_MOD_X_TILED: 1864 case I915_FORMAT_MOD_X_TILED:
1866 if (IS_GEN2(dev_priv)) 1865 if (IS_GEN(dev_priv, 2))
1867 return 128; 1866 return 128;
1868 else 1867 else
1869 return 512; 1868 return 512;
@@ -1872,7 +1871,7 @@ intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
1872 return 128; 1871 return 128;
1873 /* fall through */ 1872 /* fall through */
1874 case I915_FORMAT_MOD_Y_TILED: 1873 case I915_FORMAT_MOD_Y_TILED:
1875 if (IS_GEN2(dev_priv) || HAS_128_BYTE_Y_TILING(dev_priv)) 1874 if (IS_GEN(dev_priv, 2) || HAS_128_BYTE_Y_TILING(dev_priv))
1876 return 128; 1875 return 128;
1877 else 1876 else
1878 return 512; 1877 return 512;
@@ -3193,8 +3192,8 @@ static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state,
3193 3192
3194 dspcntr = DISPLAY_PLANE_ENABLE | DISPPLANE_GAMMA_ENABLE; 3193 dspcntr = DISPLAY_PLANE_ENABLE | DISPPLANE_GAMMA_ENABLE;
3195 3194
3196 if (IS_G4X(dev_priv) || IS_GEN5(dev_priv) || 3195 if (IS_G4X(dev_priv) || IS_GEN(dev_priv, 5) ||
3197 IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv)) 3196 IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
3198 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; 3197 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
3199 3198
3200 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 3199 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
@@ -3746,8 +3745,8 @@ __intel_display_resume(struct drm_device *dev,
3746 3745
3747static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv) 3746static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv)
3748{ 3747{
3749 return intel_has_gpu_reset(dev_priv) && 3748 return (INTEL_INFO(dev_priv)->gpu_reset_clobbers_display &&
3750 INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv); 3749 intel_has_gpu_reset(dev_priv));
3751} 3750}
3752 3751
3753void intel_prepare_reset(struct drm_i915_private *dev_priv) 3752void intel_prepare_reset(struct drm_i915_private *dev_priv)
@@ -4120,7 +4119,7 @@ static void gen6_fdi_link_train(struct intel_crtc *crtc,
4120 temp = I915_READ(reg); 4119 temp = I915_READ(reg);
4121 temp &= ~FDI_LINK_TRAIN_NONE; 4120 temp &= ~FDI_LINK_TRAIN_NONE;
4122 temp |= FDI_LINK_TRAIN_PATTERN_2; 4121 temp |= FDI_LINK_TRAIN_PATTERN_2;
4123 if (IS_GEN6(dev_priv)) { 4122 if (IS_GEN(dev_priv, 6)) {
4124 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 4123 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4125 /* SNB-B */ 4124 /* SNB-B */
4126 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; 4125 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
@@ -4919,10 +4918,10 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
4919 /* range checks */ 4918 /* range checks */
4920 if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H || 4919 if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H ||
4921 dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H || 4920 dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H ||
4922 (IS_GEN11(dev_priv) && 4921 (IS_GEN(dev_priv, 11) &&
4923 (src_w > ICL_MAX_SRC_W || src_h > ICL_MAX_SRC_H || 4922 (src_w > ICL_MAX_SRC_W || src_h > ICL_MAX_SRC_H ||
4924 dst_w > ICL_MAX_DST_W || dst_h > ICL_MAX_DST_H)) || 4923 dst_w > ICL_MAX_DST_W || dst_h > ICL_MAX_DST_H)) ||
4925 (!IS_GEN11(dev_priv) && 4924 (!IS_GEN(dev_priv, 11) &&
4926 (src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H || 4925 (src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H ||
4927 dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H))) { 4926 dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H))) {
4928 DRM_DEBUG_KMS("scaler_user index %u.%u: src %ux%u dst %ux%u " 4927 DRM_DEBUG_KMS("scaler_user index %u.%u: src %ux%u dst %ux%u "
@@ -5213,7 +5212,7 @@ intel_post_enable_primary(struct drm_crtc *crtc,
5213 * FIXME: Need to fix the logic to work when we turn off all planes 5212 * FIXME: Need to fix the logic to work when we turn off all planes
5214 * but leave the pipe running. 5213 * but leave the pipe running.
5215 */ 5214 */
5216 if (IS_GEN2(dev_priv)) 5215 if (IS_GEN(dev_priv, 2))
5217 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 5216 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
5218 5217
5219 /* Underruns don't always raise interrupts, so check manually. */ 5218 /* Underruns don't always raise interrupts, so check manually. */
@@ -5234,7 +5233,7 @@ intel_pre_disable_primary_noatomic(struct drm_crtc *crtc)
5234 * Gen2 reports pipe underruns whenever all planes are disabled. 5233 * Gen2 reports pipe underruns whenever all planes are disabled.
5235 * So disable underrun reporting before all the planes get disabled. 5234 * So disable underrun reporting before all the planes get disabled.
5236 */ 5235 */
5237 if (IS_GEN2(dev_priv)) 5236 if (IS_GEN(dev_priv, 2))
5238 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 5237 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
5239 5238
5240 hsw_disable_ips(to_intel_crtc_state(crtc->state)); 5239 hsw_disable_ips(to_intel_crtc_state(crtc->state));
@@ -5292,7 +5291,7 @@ static bool needs_nv12_wa(struct drm_i915_private *dev_priv,
5292 return false; 5291 return false;
5293 5292
5294 /* WA Display #0827: Gen9:all */ 5293 /* WA Display #0827: Gen9:all */
5295 if (IS_GEN9(dev_priv) && !IS_GEMINILAKE(dev_priv)) 5294 if (IS_GEN(dev_priv, 9) && !IS_GEMINILAKE(dev_priv))
5296 return true; 5295 return true;
5297 5296
5298 return false; 5297 return false;
@@ -5365,7 +5364,7 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
5365 * Gen2 reports pipe underruns whenever all planes are disabled. 5364 * Gen2 reports pipe underruns whenever all planes are disabled.
5366 * So disable underrun reporting before all the planes get disabled. 5365 * So disable underrun reporting before all the planes get disabled.
5367 */ 5366 */
5368 if (IS_GEN2(dev_priv) && old_primary_state->visible && 5367 if (IS_GEN(dev_priv, 2) && old_primary_state->visible &&
5369 (modeset || !new_primary_state->base.visible)) 5368 (modeset || !new_primary_state->base.visible))
5370 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false); 5369 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
5371 } 5370 }
@@ -5578,6 +5577,26 @@ static void intel_encoders_post_pll_disable(struct drm_crtc *crtc,
5578 } 5577 }
5579} 5578}
5580 5579
5580static void intel_encoders_update_pipe(struct drm_crtc *crtc,
5581 struct intel_crtc_state *crtc_state,
5582 struct drm_atomic_state *old_state)
5583{
5584 struct drm_connector_state *conn_state;
5585 struct drm_connector *conn;
5586 int i;
5587
5588 for_each_new_connector_in_state(old_state, conn, conn_state, i) {
5589 struct intel_encoder *encoder =
5590 to_intel_encoder(conn_state->best_encoder);
5591
5592 if (conn_state->crtc != crtc)
5593 continue;
5594
5595 if (encoder->update_pipe)
5596 encoder->update_pipe(encoder, crtc_state, conn_state);
5597 }
5598}
5599
5581static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config, 5600static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
5582 struct drm_atomic_state *old_state) 5601 struct drm_atomic_state *old_state)
5583{ 5602{
@@ -5641,7 +5660,7 @@ static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
5641 * On ILK+ LUT must be loaded before the pipe is running but with 5660 * On ILK+ LUT must be loaded before the pipe is running but with
5642 * clocks enabled 5661 * clocks enabled
5643 */ 5662 */
5644 intel_color_load_luts(&pipe_config->base); 5663 intel_color_load_luts(pipe_config);
5645 5664
5646 if (dev_priv->display.initial_watermarks != NULL) 5665 if (dev_priv->display.initial_watermarks != NULL)
5647 dev_priv->display.initial_watermarks(old_intel_state, pipe_config); 5666 dev_priv->display.initial_watermarks(old_intel_state, pipe_config);
@@ -5752,7 +5771,7 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
5752 5771
5753 haswell_set_pipemisc(pipe_config); 5772 haswell_set_pipemisc(pipe_config);
5754 5773
5755 intel_color_set_csc(&pipe_config->base); 5774 intel_color_set_csc(pipe_config);
5756 5775
5757 intel_crtc->active = true; 5776 intel_crtc->active = true;
5758 5777
@@ -5771,7 +5790,7 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
5771 * On ILK+ LUT must be loaded before the pipe is running but with 5790 * On ILK+ LUT must be loaded before the pipe is running but with
5772 * clocks enabled 5791 * clocks enabled
5773 */ 5792 */
5774 intel_color_load_luts(&pipe_config->base); 5793 intel_color_load_luts(pipe_config);
5775 5794
5776 /* 5795 /*
5777 * Display WA #1153: enable hardware to bypass the alpha math 5796 * Display WA #1153: enable hardware to bypass the alpha math
@@ -6117,7 +6136,7 @@ static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
6117 6136
6118 i9xx_set_pipeconf(pipe_config); 6137 i9xx_set_pipeconf(pipe_config);
6119 6138
6120 intel_color_set_csc(&pipe_config->base); 6139 intel_color_set_csc(pipe_config);
6121 6140
6122 intel_crtc->active = true; 6141 intel_crtc->active = true;
6123 6142
@@ -6137,7 +6156,7 @@ static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
6137 6156
6138 i9xx_pfit_enable(pipe_config); 6157 i9xx_pfit_enable(pipe_config);
6139 6158
6140 intel_color_load_luts(&pipe_config->base); 6159 intel_color_load_luts(pipe_config);
6141 6160
6142 dev_priv->display.initial_watermarks(old_intel_state, 6161 dev_priv->display.initial_watermarks(old_intel_state,
6143 pipe_config); 6162 pipe_config);
@@ -6184,7 +6203,7 @@ static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
6184 6203
6185 intel_crtc->active = true; 6204 intel_crtc->active = true;
6186 6205
6187 if (!IS_GEN2(dev_priv)) 6206 if (!IS_GEN(dev_priv, 2))
6188 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 6207 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
6189 6208
6190 intel_encoders_pre_enable(crtc, pipe_config, old_state); 6209 intel_encoders_pre_enable(crtc, pipe_config, old_state);
@@ -6193,7 +6212,7 @@ static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
6193 6212
6194 i9xx_pfit_enable(pipe_config); 6213 i9xx_pfit_enable(pipe_config);
6195 6214
6196 intel_color_load_luts(&pipe_config->base); 6215 intel_color_load_luts(pipe_config);
6197 6216
6198 if (dev_priv->display.initial_watermarks != NULL) 6217 if (dev_priv->display.initial_watermarks != NULL)
6199 dev_priv->display.initial_watermarks(old_intel_state, 6218 dev_priv->display.initial_watermarks(old_intel_state,
@@ -6236,7 +6255,7 @@ static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state,
6236 * On gen2 planes are double buffered but the pipe isn't, so we must 6255 * On gen2 planes are double buffered but the pipe isn't, so we must
6237 * wait for planes to fully turn off before disabling the pipe. 6256 * wait for planes to fully turn off before disabling the pipe.
6238 */ 6257 */
6239 if (IS_GEN2(dev_priv)) 6258 if (IS_GEN(dev_priv, 2))
6240 intel_wait_for_vblank(dev_priv, pipe); 6259 intel_wait_for_vblank(dev_priv, pipe);
6241 6260
6242 intel_encoders_disable(crtc, old_crtc_state, old_state); 6261 intel_encoders_disable(crtc, old_crtc_state, old_state);
@@ -6261,7 +6280,7 @@ static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state,
6261 6280
6262 intel_encoders_post_pll_disable(crtc, old_crtc_state, old_state); 6281 intel_encoders_post_pll_disable(crtc, old_crtc_state, old_state);
6263 6282
6264 if (!IS_GEN2(dev_priv)) 6283 if (!IS_GEN(dev_priv, 2))
6265 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 6284 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
6266 6285
6267 if (!dev_priv->display.initial_watermarks) 6286 if (!dev_priv->display.initial_watermarks)
@@ -6868,7 +6887,7 @@ static bool transcoder_has_m2_n2(struct drm_i915_private *dev_priv,
6868 * Strictly speaking some registers are available before 6887 * Strictly speaking some registers are available before
6869 * gen7, but we only support DRRS on gen7+ 6888 * gen7, but we only support DRRS on gen7+
6870 */ 6889 */
6871 return IS_GEN7(dev_priv) || IS_CHERRYVIEW(dev_priv); 6890 return IS_GEN(dev_priv, 7) || IS_CHERRYVIEW(dev_priv);
6872} 6891}
6873 6892
6874static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state, 6893static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
@@ -9005,7 +9024,7 @@ static void ironlake_get_pfit_config(struct intel_crtc *crtc,
9005 /* We currently do not free assignements of panel fitters on 9024 /* We currently do not free assignements of panel fitters on
9006 * ivb/hsw (since we don't use the higher upscaling modes which 9025 * ivb/hsw (since we don't use the higher upscaling modes which
9007 * differentiates them) so just WARN about this case for now. */ 9026 * differentiates them) so just WARN about this case for now. */
9008 if (IS_GEN7(dev_priv)) { 9027 if (IS_GEN(dev_priv, 7)) {
9009 WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) != 9028 WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) !=
9010 PF_PIPE_SEL_IVB(crtc->pipe)); 9029 PF_PIPE_SEL_IVB(crtc->pipe));
9011 } 9030 }
@@ -9995,7 +10014,7 @@ static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
9995 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 10014 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
9996 u32 cntl = 0; 10015 u32 cntl = 0;
9997 10016
9998 if (IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv)) 10017 if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
9999 cntl |= MCURSOR_TRICKLE_FEED_DISABLE; 10018 cntl |= MCURSOR_TRICKLE_FEED_DISABLE;
10000 10019
10001 if (INTEL_GEN(dev_priv) <= 10) { 10020 if (INTEL_GEN(dev_priv) <= 10) {
@@ -10468,7 +10487,7 @@ static int i9xx_pll_refclk(struct drm_device *dev,
10468 return dev_priv->vbt.lvds_ssc_freq; 10487 return dev_priv->vbt.lvds_ssc_freq;
10469 else if (HAS_PCH_SPLIT(dev_priv)) 10488 else if (HAS_PCH_SPLIT(dev_priv))
10470 return 120000; 10489 return 120000;
10471 else if (!IS_GEN2(dev_priv)) 10490 else if (!IS_GEN(dev_priv, 2))
10472 return 96000; 10491 return 96000;
10473 else 10492 else
10474 return 48000; 10493 return 48000;
@@ -10501,7 +10520,7 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
10501 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT; 10520 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
10502 } 10521 }
10503 10522
10504 if (!IS_GEN2(dev_priv)) { 10523 if (!IS_GEN(dev_priv, 2)) {
10505 if (IS_PINEVIEW(dev_priv)) 10524 if (IS_PINEVIEW(dev_priv))
10506 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >> 10525 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
10507 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW); 10526 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
@@ -10653,20 +10672,17 @@ static void intel_crtc_destroy(struct drm_crtc *crtc)
10653 10672
10654/** 10673/**
10655 * intel_wm_need_update - Check whether watermarks need updating 10674 * intel_wm_need_update - Check whether watermarks need updating
10656 * @plane: drm plane 10675 * @cur: current plane state
10657 * @state: new plane state 10676 * @new: new plane state
10658 * 10677 *
10659 * Check current plane state versus the new one to determine whether 10678 * Check current plane state versus the new one to determine whether
10660 * watermarks need to be recalculated. 10679 * watermarks need to be recalculated.
10661 * 10680 *
10662 * Returns true or false. 10681 * Returns true or false.
10663 */ 10682 */
10664static bool intel_wm_need_update(struct drm_plane *plane, 10683static bool intel_wm_need_update(struct intel_plane_state *cur,
10665 struct drm_plane_state *state) 10684 struct intel_plane_state *new)
10666{ 10685{
10667 struct intel_plane_state *new = to_intel_plane_state(state);
10668 struct intel_plane_state *cur = to_intel_plane_state(plane->state);
10669
10670 /* Update watermarks on tiling or size changes. */ 10686 /* Update watermarks on tiling or size changes. */
10671 if (new->base.visible != cur->base.visible) 10687 if (new->base.visible != cur->base.visible)
10672 return true; 10688 return true;
@@ -10775,7 +10791,8 @@ int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_stat
10775 /* must disable cxsr around plane enable/disable */ 10791 /* must disable cxsr around plane enable/disable */
10776 if (plane->id != PLANE_CURSOR) 10792 if (plane->id != PLANE_CURSOR)
10777 pipe_config->disable_cxsr = true; 10793 pipe_config->disable_cxsr = true;
10778 } else if (intel_wm_need_update(&plane->base, plane_state)) { 10794 } else if (intel_wm_need_update(to_intel_plane_state(plane->base.state),
10795 to_intel_plane_state(plane_state))) {
10779 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) { 10796 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) {
10780 /* FIXME bollocks */ 10797 /* FIXME bollocks */
10781 pipe_config->update_wm_pre = true; 10798 pipe_config->update_wm_pre = true;
@@ -10817,7 +10834,7 @@ int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_stat
10817 * the w/a on all three platforms. 10834 * the w/a on all three platforms.
10818 */ 10835 */
10819 if (plane->id == PLANE_SPRITE0 && 10836 if (plane->id == PLANE_SPRITE0 &&
10820 (IS_GEN5(dev_priv) || IS_GEN6(dev_priv) || 10837 (IS_GEN_RANGE(dev_priv, 5, 6) ||
10821 IS_IVYBRIDGE(dev_priv)) && 10838 IS_IVYBRIDGE(dev_priv)) &&
10822 (turn_on || (!needs_scaling(old_plane_state) && 10839 (turn_on || (!needs_scaling(old_plane_state) &&
10823 needs_scaling(to_intel_plane_state(plane_state))))) 10840 needs_scaling(to_intel_plane_state(plane_state)))))
@@ -10954,8 +10971,7 @@ static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
10954static int intel_crtc_atomic_check(struct drm_crtc *crtc, 10971static int intel_crtc_atomic_check(struct drm_crtc *crtc,
10955 struct drm_crtc_state *crtc_state) 10972 struct drm_crtc_state *crtc_state)
10956{ 10973{
10957 struct drm_device *dev = crtc->dev; 10974 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
10958 struct drm_i915_private *dev_priv = to_i915(dev);
10959 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 10975 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10960 struct intel_crtc_state *pipe_config = 10976 struct intel_crtc_state *pipe_config =
10961 to_intel_crtc_state(crtc_state); 10977 to_intel_crtc_state(crtc_state);
@@ -10975,7 +10991,7 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc,
10975 } 10991 }
10976 10992
10977 if (crtc_state->color_mgmt_changed) { 10993 if (crtc_state->color_mgmt_changed) {
10978 ret = intel_color_check(crtc, crtc_state); 10994 ret = intel_color_check(pipe_config);
10979 if (ret) 10995 if (ret)
10980 return ret; 10996 return ret;
10981 10997
@@ -11004,9 +11020,7 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc,
11004 * old state and the new state. We can program these 11020 * old state and the new state. We can program these
11005 * immediately. 11021 * immediately.
11006 */ 11022 */
11007 ret = dev_priv->display.compute_intermediate_wm(dev, 11023 ret = dev_priv->display.compute_intermediate_wm(pipe_config);
11008 intel_crtc,
11009 pipe_config);
11010 if (ret) { 11024 if (ret) {
11011 DRM_DEBUG_KMS("No valid intermediate pipe watermarks are possible\n"); 11025 DRM_DEBUG_KMS("No valid intermediate pipe watermarks are possible\n");
11012 return ret; 11026 return ret;
@@ -11014,7 +11028,7 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc,
11014 } 11028 }
11015 11029
11016 if (INTEL_GEN(dev_priv) >= 9) { 11030 if (INTEL_GEN(dev_priv) >= 9) {
11017 if (mode_changed) 11031 if (mode_changed || pipe_config->update_pipe)
11018 ret = skl_update_scaler_crtc(pipe_config); 11032 ret = skl_update_scaler_crtc(pipe_config);
11019 11033
11020 if (!ret) 11034 if (!ret)
@@ -11967,7 +11981,7 @@ static void verify_wm_state(struct drm_crtc *crtc,
11967 if (INTEL_GEN(dev_priv) < 9 || !new_state->active) 11981 if (INTEL_GEN(dev_priv) < 9 || !new_state->active)
11968 return; 11982 return;
11969 11983
11970 skl_pipe_wm_get_hw_state(crtc, &hw_wm); 11984 skl_pipe_wm_get_hw_state(intel_crtc, &hw_wm);
11971 sw_wm = &to_intel_crtc_state(new_state)->wm.skl.optimal; 11985 sw_wm = &to_intel_crtc_state(new_state)->wm.skl.optimal;
11972 11986
11973 skl_pipe_ddb_get_hw_state(intel_crtc, hw_ddb_y, hw_ddb_uv); 11987 skl_pipe_ddb_get_hw_state(intel_crtc, hw_ddb_y, hw_ddb_uv);
@@ -12381,7 +12395,7 @@ static void update_scanline_offset(const struct intel_crtc_state *crtc_state)
12381 * However if queried just before the start of vblank we'll get an 12395 * However if queried just before the start of vblank we'll get an
12382 * answer that's slightly in the future. 12396 * answer that's slightly in the future.
12383 */ 12397 */
12384 if (IS_GEN2(dev_priv)) { 12398 if (IS_GEN(dev_priv, 2)) {
12385 const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode; 12399 const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
12386 int vtotal; 12400 int vtotal;
12387 12401
@@ -12622,9 +12636,9 @@ static int intel_modeset_checks(struct drm_atomic_state *state)
12622 * phase. The code here should be run after the per-crtc and per-plane 'check' 12636 * phase. The code here should be run after the per-crtc and per-plane 'check'
12623 * handlers to ensure that all derived state has been updated. 12637 * handlers to ensure that all derived state has been updated.
12624 */ 12638 */
12625static int calc_watermark_data(struct drm_atomic_state *state) 12639static int calc_watermark_data(struct intel_atomic_state *state)
12626{ 12640{
12627 struct drm_device *dev = state->dev; 12641 struct drm_device *dev = state->base.dev;
12628 struct drm_i915_private *dev_priv = to_i915(dev); 12642 struct drm_i915_private *dev_priv = to_i915(dev);
12629 12643
12630 /* Is there platform-specific watermark information to calculate? */ 12644 /* Is there platform-specific watermark information to calculate? */
@@ -12720,7 +12734,7 @@ static int intel_atomic_check(struct drm_device *dev,
12720 return ret; 12734 return ret;
12721 12735
12722 intel_fbc_choose_crtc(dev_priv, intel_state); 12736 intel_fbc_choose_crtc(dev_priv, intel_state);
12723 return calc_watermark_data(state); 12737 return calc_watermark_data(intel_state);
12724} 12738}
12725 12739
12726static int intel_atomic_prepare_commit(struct drm_device *dev, 12740static int intel_atomic_prepare_commit(struct drm_device *dev,
@@ -12762,9 +12776,14 @@ static void intel_update_crtc(struct drm_crtc *crtc,
12762 } else { 12776 } else {
12763 intel_pre_plane_update(to_intel_crtc_state(old_crtc_state), 12777 intel_pre_plane_update(to_intel_crtc_state(old_crtc_state),
12764 pipe_config); 12778 pipe_config);
12779
12780 if (pipe_config->update_pipe)
12781 intel_encoders_update_pipe(crtc, pipe_config, state);
12765 } 12782 }
12766 12783
12767 if (new_plane_state) 12784 if (pipe_config->update_pipe && !pipe_config->enable_fbc)
12785 intel_fbc_disable(intel_crtc);
12786 else if (new_plane_state)
12768 intel_fbc_enable(intel_crtc, pipe_config, new_plane_state); 12787 intel_fbc_enable(intel_crtc, pipe_config, new_plane_state);
12769 12788
12770 intel_begin_crtc_commit(crtc, old_crtc_state); 12789 intel_begin_crtc_commit(crtc, old_crtc_state);
@@ -13559,8 +13578,8 @@ static void intel_begin_crtc_commit(struct drm_crtc *crtc,
13559 if (!modeset && 13578 if (!modeset &&
13560 (intel_cstate->base.color_mgmt_changed || 13579 (intel_cstate->base.color_mgmt_changed ||
13561 intel_cstate->update_pipe)) { 13580 intel_cstate->update_pipe)) {
13562 intel_color_set_csc(&intel_cstate->base); 13581 intel_color_set_csc(intel_cstate);
13563 intel_color_load_luts(&intel_cstate->base); 13582 intel_color_load_luts(intel_cstate);
13564 } 13583 }
13565 13584
13566 /* Perform vblank evasion around commit operation */ 13585 /* Perform vblank evasion around commit operation */
@@ -13585,7 +13604,7 @@ void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
13585{ 13604{
13586 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 13605 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
13587 13606
13588 if (!IS_GEN2(dev_priv)) 13607 if (!IS_GEN(dev_priv, 2))
13589 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true); 13608 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
13590 13609
13591 if (crtc_state->has_pch_encoder) { 13610 if (crtc_state->has_pch_encoder) {
@@ -14047,7 +14066,7 @@ static void intel_crtc_init_scalers(struct intel_crtc *crtc,
14047 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 14066 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
14048 int i; 14067 int i;
14049 14068
14050 crtc->num_scalers = dev_priv->info.num_scalers[crtc->pipe]; 14069 crtc->num_scalers = RUNTIME_INFO(dev_priv)->num_scalers[crtc->pipe];
14051 if (!crtc->num_scalers) 14070 if (!crtc->num_scalers)
14052 return; 14071 return;
14053 14072
@@ -14133,7 +14152,7 @@ static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
14133 14152
14134 drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs); 14153 drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
14135 14154
14136 intel_color_init(&intel_crtc->base); 14155 intel_color_init(intel_crtc);
14137 14156
14138 WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe); 14157 WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe);
14139 14158
@@ -14192,7 +14211,7 @@ static bool has_edp_a(struct drm_i915_private *dev_priv)
14192 if ((I915_READ(DP_A) & DP_DETECTED) == 0) 14211 if ((I915_READ(DP_A) & DP_DETECTED) == 0)
14193 return false; 14212 return false;
14194 14213
14195 if (IS_GEN5(dev_priv) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE)) 14214 if (IS_GEN(dev_priv, 5) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE))
14196 return false; 14215 return false;
14197 14216
14198 return true; 14217 return true;
@@ -14404,7 +14423,7 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
14404 } 14423 }
14405 14424
14406 vlv_dsi_init(dev_priv); 14425 vlv_dsi_init(dev_priv);
14407 } else if (!IS_GEN2(dev_priv) && !IS_PINEVIEW(dev_priv)) { 14426 } else if (!IS_GEN(dev_priv, 2) && !IS_PINEVIEW(dev_priv)) {
14408 bool found = false; 14427 bool found = false;
14409 14428
14410 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) { 14429 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
@@ -14438,7 +14457,7 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
14438 14457
14439 if (IS_G4X(dev_priv) && (I915_READ(DP_D) & DP_DETECTED)) 14458 if (IS_G4X(dev_priv) && (I915_READ(DP_D) & DP_DETECTED))
14440 intel_dp_init(dev_priv, DP_D, PORT_D); 14459 intel_dp_init(dev_priv, DP_D, PORT_D);
14441 } else if (IS_GEN2(dev_priv)) 14460 } else if (IS_GEN(dev_priv, 2))
14442 intel_dvo_init(dev_priv); 14461 intel_dvo_init(dev_priv);
14443 14462
14444 if (SUPPORTS_TV(dev_priv)) 14463 if (SUPPORTS_TV(dev_priv))
@@ -14636,7 +14655,7 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
14636 * require the entire fb to accommodate that to avoid 14655 * require the entire fb to accommodate that to avoid
14637 * potential runtime errors at plane configuration time. 14656 * potential runtime errors at plane configuration time.
14638 */ 14657 */
14639 if (IS_GEN9(dev_priv) && i == 0 && fb->width > 3840 && 14658 if (IS_GEN(dev_priv, 9) && i == 0 && fb->width > 3840 &&
14640 is_ccs_modifier(fb->modifier)) 14659 is_ccs_modifier(fb->modifier))
14641 stride_alignment *= 4; 14660 stride_alignment *= 4;
14642 14661
@@ -14841,7 +14860,7 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv)
14841 dev_priv->display.crtc_compute_clock = pnv_crtc_compute_clock; 14860 dev_priv->display.crtc_compute_clock = pnv_crtc_compute_clock;
14842 dev_priv->display.crtc_enable = i9xx_crtc_enable; 14861 dev_priv->display.crtc_enable = i9xx_crtc_enable;
14843 dev_priv->display.crtc_disable = i9xx_crtc_disable; 14862 dev_priv->display.crtc_disable = i9xx_crtc_disable;
14844 } else if (!IS_GEN2(dev_priv)) { 14863 } else if (!IS_GEN(dev_priv, 2)) {
14845 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 14864 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
14846 dev_priv->display.get_initial_plane_config = 14865 dev_priv->display.get_initial_plane_config =
14847 i9xx_get_initial_plane_config; 14866 i9xx_get_initial_plane_config;
@@ -14857,9 +14876,9 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv)
14857 dev_priv->display.crtc_disable = i9xx_crtc_disable; 14876 dev_priv->display.crtc_disable = i9xx_crtc_disable;
14858 } 14877 }
14859 14878
14860 if (IS_GEN5(dev_priv)) { 14879 if (IS_GEN(dev_priv, 5)) {
14861 dev_priv->display.fdi_link_train = ironlake_fdi_link_train; 14880 dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
14862 } else if (IS_GEN6(dev_priv)) { 14881 } else if (IS_GEN(dev_priv, 6)) {
14863 dev_priv->display.fdi_link_train = gen6_fdi_link_train; 14882 dev_priv->display.fdi_link_train = gen6_fdi_link_train;
14864 } else if (IS_IVYBRIDGE(dev_priv)) { 14883 } else if (IS_IVYBRIDGE(dev_priv)) {
14865 /* FIXME: detect B0+ stepping and use auto training */ 14884 /* FIXME: detect B0+ stepping and use auto training */
@@ -14991,12 +15010,12 @@ fail:
14991 15010
14992static void intel_update_fdi_pll_freq(struct drm_i915_private *dev_priv) 15011static void intel_update_fdi_pll_freq(struct drm_i915_private *dev_priv)
14993{ 15012{
14994 if (IS_GEN5(dev_priv)) { 15013 if (IS_GEN(dev_priv, 5)) {
14995 u32 fdi_pll_clk = 15014 u32 fdi_pll_clk =
14996 I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK; 15015 I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK;
14997 15016
14998 dev_priv->fdi_pll_freq = (fdi_pll_clk + 2) * 10000; 15017 dev_priv->fdi_pll_freq = (fdi_pll_clk + 2) * 10000;
14999 } else if (IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv)) { 15018 } else if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv)) {
15000 dev_priv->fdi_pll_freq = 270000; 15019 dev_priv->fdi_pll_freq = 270000;
15001 } else { 15020 } else {
15002 return; 15021 return;
@@ -15112,10 +15131,10 @@ int intel_modeset_init(struct drm_device *dev)
15112 } 15131 }
15113 15132
15114 /* maximum framebuffer dimensions */ 15133 /* maximum framebuffer dimensions */
15115 if (IS_GEN2(dev_priv)) { 15134 if (IS_GEN(dev_priv, 2)) {
15116 dev->mode_config.max_width = 2048; 15135 dev->mode_config.max_width = 2048;
15117 dev->mode_config.max_height = 2048; 15136 dev->mode_config.max_height = 2048;
15118 } else if (IS_GEN3(dev_priv)) { 15137 } else if (IS_GEN(dev_priv, 3)) {
15119 dev->mode_config.max_width = 4096; 15138 dev->mode_config.max_width = 4096;
15120 dev->mode_config.max_height = 4096; 15139 dev->mode_config.max_height = 4096;
15121 } else { 15140 } else {
@@ -15126,7 +15145,7 @@ int intel_modeset_init(struct drm_device *dev)
15126 if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) { 15145 if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) {
15127 dev->mode_config.cursor_width = IS_I845G(dev_priv) ? 64 : 512; 15146 dev->mode_config.cursor_width = IS_I845G(dev_priv) ? 64 : 512;
15128 dev->mode_config.cursor_height = 1023; 15147 dev->mode_config.cursor_height = 1023;
15129 } else if (IS_GEN2(dev_priv)) { 15148 } else if (IS_GEN(dev_priv, 2)) {
15130 dev->mode_config.cursor_width = 64; 15149 dev->mode_config.cursor_width = 64;
15131 dev->mode_config.cursor_height = 64; 15150 dev->mode_config.cursor_height = 64;
15132 } else { 15151 } else {
@@ -15850,15 +15869,15 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
15850 } 15869 }
15851 15870
15852 if (IS_G4X(dev_priv)) { 15871 if (IS_G4X(dev_priv)) {
15853 g4x_wm_get_hw_state(dev); 15872 g4x_wm_get_hw_state(dev_priv);
15854 g4x_wm_sanitize(dev_priv); 15873 g4x_wm_sanitize(dev_priv);
15855 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 15874 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
15856 vlv_wm_get_hw_state(dev); 15875 vlv_wm_get_hw_state(dev_priv);
15857 vlv_wm_sanitize(dev_priv); 15876 vlv_wm_sanitize(dev_priv);
15858 } else if (INTEL_GEN(dev_priv) >= 9) { 15877 } else if (INTEL_GEN(dev_priv) >= 9) {
15859 skl_wm_get_hw_state(dev); 15878 skl_wm_get_hw_state(dev_priv);
15860 } else if (HAS_PCH_SPLIT(dev_priv)) { 15879 } else if (HAS_PCH_SPLIT(dev_priv)) {
15861 ilk_wm_get_hw_state(dev); 15880 ilk_wm_get_hw_state(dev_priv);
15862 } 15881 }
15863 15882
15864 for_each_intel_crtc(dev, crtc) { 15883 for_each_intel_crtc(dev, crtc) {
diff --git a/drivers/gpu/drm/i915/intel_display.h b/drivers/gpu/drm/i915/intel_display.h
index 4262452963b3..c7c068662288 100644
--- a/drivers/gpu/drm/i915/intel_display.h
+++ b/drivers/gpu/drm/i915/intel_display.h
@@ -121,7 +121,7 @@ enum i9xx_plane_id {
121}; 121};
122 122
123#define plane_name(p) ((p) + 'A') 123#define plane_name(p) ((p) + 'A')
124#define sprite_name(p, s) ((p) * INTEL_INFO(dev_priv)->num_sprites[(p)] + (s) + 'A') 124#define sprite_name(p, s) ((p) * RUNTIME_INFO(dev_priv)->num_sprites[(p)] + (s) + 'A')
125 125
126/* 126/*
127 * Per-pipe plane identifier. 127 * Per-pipe plane identifier.
@@ -311,12 +311,12 @@ struct intel_link_m_n {
311 311
312#define for_each_universal_plane(__dev_priv, __pipe, __p) \ 312#define for_each_universal_plane(__dev_priv, __pipe, __p) \
313 for ((__p) = 0; \ 313 for ((__p) = 0; \
314 (__p) < INTEL_INFO(__dev_priv)->num_sprites[(__pipe)] + 1; \ 314 (__p) < RUNTIME_INFO(__dev_priv)->num_sprites[(__pipe)] + 1; \
315 (__p)++) 315 (__p)++)
316 316
317#define for_each_sprite(__dev_priv, __p, __s) \ 317#define for_each_sprite(__dev_priv, __p, __s) \
318 for ((__s) = 0; \ 318 for ((__s) = 0; \
319 (__s) < INTEL_INFO(__dev_priv)->num_sprites[(__p)]; \ 319 (__s) < RUNTIME_INFO(__dev_priv)->num_sprites[(__p)]; \
320 (__s)++) 320 (__s)++)
321 321
322#define for_each_port_masked(__port, __ports_mask) \ 322#define for_each_port_masked(__port, __ports_mask) \
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index d18b72b5f0b8..c1eda64ada9d 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -32,7 +32,6 @@
32#include <linux/notifier.h> 32#include <linux/notifier.h>
33#include <linux/reboot.h> 33#include <linux/reboot.h>
34#include <asm/byteorder.h> 34#include <asm/byteorder.h>
35#include <drm/drmP.h>
36#include <drm/drm_atomic_helper.h> 35#include <drm/drm_atomic_helper.h>
37#include <drm/drm_crtc.h> 36#include <drm/drm_crtc.h>
38#include <drm/drm_crtc_helper.h> 37#include <drm/drm_crtc_helper.h>
@@ -304,9 +303,11 @@ static int cnl_max_source_rate(struct intel_dp *intel_dp)
304static int icl_max_source_rate(struct intel_dp *intel_dp) 303static int icl_max_source_rate(struct intel_dp *intel_dp)
305{ 304{
306 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 305 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
306 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
307 enum port port = dig_port->base.port; 307 enum port port = dig_port->base.port;
308 308
309 if (port == PORT_B) 309 if (intel_port_is_combophy(dev_priv, port) &&
310 !intel_dp_is_edp(intel_dp))
310 return 540000; 311 return 540000;
311 312
312 return 810000; 313 return 810000;
@@ -344,7 +345,7 @@ intel_dp_set_source_rates(struct intel_dp *intel_dp)
344 if (INTEL_GEN(dev_priv) >= 10) { 345 if (INTEL_GEN(dev_priv) >= 10) {
345 source_rates = cnl_rates; 346 source_rates = cnl_rates;
346 size = ARRAY_SIZE(cnl_rates); 347 size = ARRAY_SIZE(cnl_rates);
347 if (IS_GEN10(dev_priv)) 348 if (IS_GEN(dev_priv, 10))
348 max_rate = cnl_max_source_rate(intel_dp); 349 max_rate = cnl_max_source_rate(intel_dp);
349 else 350 else
350 max_rate = icl_max_source_rate(intel_dp); 351 max_rate = icl_max_source_rate(intel_dp);
@@ -1128,7 +1129,7 @@ static uint32_t g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
1128 to_i915(intel_dig_port->base.base.dev); 1129 to_i915(intel_dig_port->base.base.dev);
1129 uint32_t precharge, timeout; 1130 uint32_t precharge, timeout;
1130 1131
1131 if (IS_GEN6(dev_priv)) 1132 if (IS_GEN(dev_priv, 6))
1132 precharge = 3; 1133 precharge = 3;
1133 else 1134 else
1134 precharge = 5; 1135 precharge = 5;
@@ -2055,7 +2056,8 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
2055 &limits); 2056 &limits);
2056 2057
2057 /* enable compression if the mode doesn't fit available BW */ 2058 /* enable compression if the mode doesn't fit available BW */
2058 if (ret) { 2059 DRM_DEBUG_KMS("Force DSC en = %d\n", intel_dp->force_dsc_en);
2060 if (ret || intel_dp->force_dsc_en) {
2059 ret = intel_dp_dsc_compute_config(intel_dp, pipe_config, 2061 ret = intel_dp_dsc_compute_config(intel_dp, pipe_config,
2060 conn_state, &limits); 2062 conn_state, &limits);
2061 if (ret < 0) 2063 if (ret < 0)
@@ -2590,7 +2592,7 @@ static void edp_panel_on(struct intel_dp *intel_dp)
2590 2592
2591 pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 2593 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2592 pp = ironlake_get_pp_control(intel_dp); 2594 pp = ironlake_get_pp_control(intel_dp);
2593 if (IS_GEN5(dev_priv)) { 2595 if (IS_GEN(dev_priv, 5)) {
2594 /* ILK workaround: disable reset around power sequence */ 2596 /* ILK workaround: disable reset around power sequence */
2595 pp &= ~PANEL_POWER_RESET; 2597 pp &= ~PANEL_POWER_RESET;
2596 I915_WRITE(pp_ctrl_reg, pp); 2598 I915_WRITE(pp_ctrl_reg, pp);
@@ -2598,7 +2600,7 @@ static void edp_panel_on(struct intel_dp *intel_dp)
2598 } 2600 }
2599 2601
2600 pp |= PANEL_POWER_ON; 2602 pp |= PANEL_POWER_ON;
2601 if (!IS_GEN5(dev_priv)) 2603 if (!IS_GEN(dev_priv, 5))
2602 pp |= PANEL_POWER_RESET; 2604 pp |= PANEL_POWER_RESET;
2603 2605
2604 I915_WRITE(pp_ctrl_reg, pp); 2606 I915_WRITE(pp_ctrl_reg, pp);
@@ -2607,7 +2609,7 @@ static void edp_panel_on(struct intel_dp *intel_dp)
2607 wait_panel_on(intel_dp); 2609 wait_panel_on(intel_dp);
2608 intel_dp->last_power_on = jiffies; 2610 intel_dp->last_power_on = jiffies;
2609 2611
2610 if (IS_GEN5(dev_priv)) { 2612 if (IS_GEN(dev_priv, 5)) {
2611 pp |= PANEL_POWER_RESET; /* restore panel reset bit */ 2613 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
2612 I915_WRITE(pp_ctrl_reg, pp); 2614 I915_WRITE(pp_ctrl_reg, pp);
2613 POSTING_READ(pp_ctrl_reg); 2615 POSTING_READ(pp_ctrl_reg);
@@ -2836,7 +2838,7 @@ static void ironlake_edp_pll_on(struct intel_dp *intel_dp,
2836 * 1. Wait for the start of vertical blank on the enabled pipe going to FDI 2838 * 1. Wait for the start of vertical blank on the enabled pipe going to FDI
2837 * 2. Program DP PLL enable 2839 * 2. Program DP PLL enable
2838 */ 2840 */
2839 if (IS_GEN5(dev_priv)) 2841 if (IS_GEN(dev_priv, 5))
2840 intel_wait_for_vblank_if_active(dev_priv, !crtc->pipe); 2842 intel_wait_for_vblank_if_active(dev_priv, !crtc->pipe);
2841 2843
2842 intel_dp->DP |= DP_PLL_ENABLE; 2844 intel_dp->DP |= DP_PLL_ENABLE;
@@ -3854,7 +3856,7 @@ intel_dp_set_signal_levels(struct intel_dp *intel_dp)
3854 } else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) { 3856 } else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) {
3855 signal_levels = ivb_cpu_edp_signal_levels(train_set); 3857 signal_levels = ivb_cpu_edp_signal_levels(train_set);
3856 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB; 3858 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
3857 } else if (IS_GEN6(dev_priv) && port == PORT_A) { 3859 } else if (IS_GEN(dev_priv, 6) && port == PORT_A) {
3858 signal_levels = snb_cpu_edp_signal_levels(train_set); 3860 signal_levels = snb_cpu_edp_signal_levels(train_set);
3859 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB; 3861 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3860 } else { 3862 } else {
@@ -3996,6 +3998,42 @@ intel_dp_link_down(struct intel_encoder *encoder,
3996 } 3998 }
3997} 3999}
3998 4000
4001static void
4002intel_dp_extended_receiver_capabilities(struct intel_dp *intel_dp)
4003{
4004 u8 dpcd_ext[6];
4005
4006 /*
4007 * Prior to DP1.3 the bit represented by
4008 * DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT was reserved.
4009 * if it is set DP_DPCD_REV at 0000h could be at a value less than
4010 * the true capability of the panel. The only way to check is to
4011 * then compare 0000h and 2200h.
4012 */
4013 if (!(intel_dp->dpcd[DP_TRAINING_AUX_RD_INTERVAL] &
4014 DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT))
4015 return;
4016
4017 if (drm_dp_dpcd_read(&intel_dp->aux, DP_DP13_DPCD_REV,
4018 &dpcd_ext, sizeof(dpcd_ext)) != sizeof(dpcd_ext)) {
4019 DRM_ERROR("DPCD failed read at extended capabilities\n");
4020 return;
4021 }
4022
4023 if (intel_dp->dpcd[DP_DPCD_REV] > dpcd_ext[DP_DPCD_REV]) {
4024 DRM_DEBUG_KMS("DPCD extended DPCD rev less than base DPCD rev\n");
4025 return;
4026 }
4027
4028 if (!memcmp(intel_dp->dpcd, dpcd_ext, sizeof(dpcd_ext)))
4029 return;
4030
4031 DRM_DEBUG_KMS("Base DPCD: %*ph\n",
4032 (int)sizeof(intel_dp->dpcd), intel_dp->dpcd);
4033
4034 memcpy(intel_dp->dpcd, dpcd_ext, sizeof(dpcd_ext));
4035}
4036
3999bool 4037bool
4000intel_dp_read_dpcd(struct intel_dp *intel_dp) 4038intel_dp_read_dpcd(struct intel_dp *intel_dp)
4001{ 4039{
@@ -4003,6 +4041,8 @@ intel_dp_read_dpcd(struct intel_dp *intel_dp)
4003 sizeof(intel_dp->dpcd)) < 0) 4041 sizeof(intel_dp->dpcd)) < 0)
4004 return false; /* aux transfer failed */ 4042 return false; /* aux transfer failed */
4005 4043
4044 intel_dp_extended_receiver_capabilities(intel_dp);
4045
4006 DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd); 4046 DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
4007 4047
4008 return intel_dp->dpcd[DP_DPCD_REV] != 0; 4048 return intel_dp->dpcd[DP_DPCD_REV] != 0;
@@ -5033,28 +5073,38 @@ static bool icl_combo_port_connected(struct drm_i915_private *dev_priv,
5033 return I915_READ(SDEISR) & SDE_DDI_HOTPLUG_ICP(port); 5073 return I915_READ(SDEISR) & SDE_DDI_HOTPLUG_ICP(port);
5034} 5074}
5035 5075
5076static const char *tc_type_name(enum tc_port_type type)
5077{
5078 static const char * const names[] = {
5079 [TC_PORT_UNKNOWN] = "unknown",
5080 [TC_PORT_LEGACY] = "legacy",
5081 [TC_PORT_TYPEC] = "typec",
5082 [TC_PORT_TBT] = "tbt",
5083 };
5084
5085 if (WARN_ON(type >= ARRAY_SIZE(names)))
5086 type = TC_PORT_UNKNOWN;
5087
5088 return names[type];
5089}
5090
5036static void icl_update_tc_port_type(struct drm_i915_private *dev_priv, 5091static void icl_update_tc_port_type(struct drm_i915_private *dev_priv,
5037 struct intel_digital_port *intel_dig_port, 5092 struct intel_digital_port *intel_dig_port,
5038 bool is_legacy, bool is_typec, bool is_tbt) 5093 bool is_legacy, bool is_typec, bool is_tbt)
5039{ 5094{
5040 enum port port = intel_dig_port->base.port; 5095 enum port port = intel_dig_port->base.port;
5041 enum tc_port_type old_type = intel_dig_port->tc_type; 5096 enum tc_port_type old_type = intel_dig_port->tc_type;
5042 const char *type_str;
5043 5097
5044 WARN_ON(is_legacy + is_typec + is_tbt != 1); 5098 WARN_ON(is_legacy + is_typec + is_tbt != 1);
5045 5099
5046 if (is_legacy) { 5100 if (is_legacy)
5047 intel_dig_port->tc_type = TC_PORT_LEGACY; 5101 intel_dig_port->tc_type = TC_PORT_LEGACY;
5048 type_str = "legacy"; 5102 else if (is_typec)
5049 } else if (is_typec) {
5050 intel_dig_port->tc_type = TC_PORT_TYPEC; 5103 intel_dig_port->tc_type = TC_PORT_TYPEC;
5051 type_str = "typec"; 5104 else if (is_tbt)
5052 } else if (is_tbt) {
5053 intel_dig_port->tc_type = TC_PORT_TBT; 5105 intel_dig_port->tc_type = TC_PORT_TBT;
5054 type_str = "tbt"; 5106 else
5055 } else {
5056 return; 5107 return;
5057 }
5058 5108
5059 /* Types are not supposed to be changed at runtime. */ 5109 /* Types are not supposed to be changed at runtime. */
5060 WARN_ON(old_type != TC_PORT_UNKNOWN && 5110 WARN_ON(old_type != TC_PORT_UNKNOWN &&
@@ -5062,12 +5112,9 @@ static void icl_update_tc_port_type(struct drm_i915_private *dev_priv,
5062 5112
5063 if (old_type != intel_dig_port->tc_type) 5113 if (old_type != intel_dig_port->tc_type)
5064 DRM_DEBUG_KMS("Port %c has TC type %s\n", port_name(port), 5114 DRM_DEBUG_KMS("Port %c has TC type %s\n", port_name(port),
5065 type_str); 5115 tc_type_name(intel_dig_port->tc_type));
5066} 5116}
5067 5117
5068static void icl_tc_phy_disconnect(struct drm_i915_private *dev_priv,
5069 struct intel_digital_port *dig_port);
5070
5071/* 5118/*
5072 * This function implements the first part of the Connect Flow described by our 5119 * This function implements the first part of the Connect Flow described by our
5073 * specification, Gen11 TypeC Programming chapter. The rest of the flow (reading 5120 * specification, Gen11 TypeC Programming chapter. The rest of the flow (reading
@@ -5102,6 +5149,7 @@ static bool icl_tc_phy_connect(struct drm_i915_private *dev_priv,
5102 val = I915_READ(PORT_TX_DFLEXDPPMS); 5149 val = I915_READ(PORT_TX_DFLEXDPPMS);
5103 if (!(val & DP_PHY_MODE_STATUS_COMPLETED(tc_port))) { 5150 if (!(val & DP_PHY_MODE_STATUS_COMPLETED(tc_port))) {
5104 DRM_DEBUG_KMS("DP PHY for TC port %d not ready\n", tc_port); 5151 DRM_DEBUG_KMS("DP PHY for TC port %d not ready\n", tc_port);
5152 WARN_ON(dig_port->tc_legacy_port);
5105 return false; 5153 return false;
5106 } 5154 }
5107 5155
@@ -5133,8 +5181,8 @@ static bool icl_tc_phy_connect(struct drm_i915_private *dev_priv,
5133 * See the comment at the connect function. This implements the Disconnect 5181 * See the comment at the connect function. This implements the Disconnect
5134 * Flow. 5182 * Flow.
5135 */ 5183 */
5136static void icl_tc_phy_disconnect(struct drm_i915_private *dev_priv, 5184void icl_tc_phy_disconnect(struct drm_i915_private *dev_priv,
5137 struct intel_digital_port *dig_port) 5185 struct intel_digital_port *dig_port)
5138{ 5186{
5139 enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port); 5187 enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port);
5140 5188
@@ -5154,6 +5202,10 @@ static void icl_tc_phy_disconnect(struct drm_i915_private *dev_priv,
5154 I915_WRITE(PORT_TX_DFLEXDPCSSS, val); 5202 I915_WRITE(PORT_TX_DFLEXDPCSSS, val);
5155 } 5203 }
5156 5204
5205 DRM_DEBUG_KMS("Port %c TC type %s disconnected\n",
5206 port_name(dig_port->base.port),
5207 tc_type_name(dig_port->tc_type));
5208
5157 dig_port->tc_type = TC_PORT_UNKNOWN; 5209 dig_port->tc_type = TC_PORT_UNKNOWN;
5158} 5210}
5159 5211
@@ -5175,7 +5227,14 @@ static bool icl_tc_port_connected(struct drm_i915_private *dev_priv,
5175 bool is_legacy, is_typec, is_tbt; 5227 bool is_legacy, is_typec, is_tbt;
5176 u32 dpsp; 5228 u32 dpsp;
5177 5229
5178 is_legacy = I915_READ(SDEISR) & SDE_TC_HOTPLUG_ICP(tc_port); 5230 /*
5231 * WARN if we got a legacy port HPD, but VBT didn't mark the port as
5232 * legacy. Treat the port as legacy from now on.
5233 */
5234 if (WARN_ON(!intel_dig_port->tc_legacy_port &&
5235 I915_READ(SDEISR) & SDE_TC_HOTPLUG_ICP(tc_port)))
5236 intel_dig_port->tc_legacy_port = true;
5237 is_legacy = intel_dig_port->tc_legacy_port;
5179 5238
5180 /* 5239 /*
5181 * The spec says we shouldn't be using the ISR bits for detecting 5240 * The spec says we shouldn't be using the ISR bits for detecting
@@ -5187,6 +5246,7 @@ static bool icl_tc_port_connected(struct drm_i915_private *dev_priv,
5187 5246
5188 if (!is_legacy && !is_typec && !is_tbt) { 5247 if (!is_legacy && !is_typec && !is_tbt) {
5189 icl_tc_phy_disconnect(dev_priv, intel_dig_port); 5248 icl_tc_phy_disconnect(dev_priv, intel_dig_port);
5249
5190 return false; 5250 return false;
5191 } 5251 }
5192 5252
@@ -5238,17 +5298,17 @@ bool intel_digital_port_connected(struct intel_encoder *encoder)
5238 5298
5239 if (INTEL_GEN(dev_priv) >= 11) 5299 if (INTEL_GEN(dev_priv) >= 11)
5240 return icl_digital_port_connected(encoder); 5300 return icl_digital_port_connected(encoder);
5241 else if (IS_GEN10(dev_priv) || IS_GEN9_BC(dev_priv)) 5301 else if (IS_GEN(dev_priv, 10) || IS_GEN9_BC(dev_priv))
5242 return spt_digital_port_connected(encoder); 5302 return spt_digital_port_connected(encoder);
5243 else if (IS_GEN9_LP(dev_priv)) 5303 else if (IS_GEN9_LP(dev_priv))
5244 return bxt_digital_port_connected(encoder); 5304 return bxt_digital_port_connected(encoder);
5245 else if (IS_GEN8(dev_priv)) 5305 else if (IS_GEN(dev_priv, 8))
5246 return bdw_digital_port_connected(encoder); 5306 return bdw_digital_port_connected(encoder);
5247 else if (IS_GEN7(dev_priv)) 5307 else if (IS_GEN(dev_priv, 7))
5248 return ivb_digital_port_connected(encoder); 5308 return ivb_digital_port_connected(encoder);
5249 else if (IS_GEN6(dev_priv)) 5309 else if (IS_GEN(dev_priv, 6))
5250 return snb_digital_port_connected(encoder); 5310 return snb_digital_port_connected(encoder);
5251 else if (IS_GEN5(dev_priv)) 5311 else if (IS_GEN(dev_priv, 5))
5252 return ilk_digital_port_connected(encoder); 5312 return ilk_digital_port_connected(encoder);
5253 5313
5254 MISSING_CASE(INTEL_GEN(dev_priv)); 5314 MISSING_CASE(INTEL_GEN(dev_priv));
@@ -5495,7 +5555,7 @@ intel_dp_connector_unregister(struct drm_connector *connector)
5495 intel_connector_unregister(connector); 5555 intel_connector_unregister(connector);
5496} 5556}
5497 5557
5498void intel_dp_encoder_destroy(struct drm_encoder *encoder) 5558void intel_dp_encoder_flush_work(struct drm_encoder *encoder)
5499{ 5559{
5500 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); 5560 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
5501 struct intel_dp *intel_dp = &intel_dig_port->dp; 5561 struct intel_dp *intel_dp = &intel_dig_port->dp;
@@ -5518,9 +5578,14 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder)
5518 } 5578 }
5519 5579
5520 intel_dp_aux_fini(intel_dp); 5580 intel_dp_aux_fini(intel_dp);
5581}
5582
5583static void intel_dp_encoder_destroy(struct drm_encoder *encoder)
5584{
5585 intel_dp_encoder_flush_work(encoder);
5521 5586
5522 drm_encoder_cleanup(encoder); 5587 drm_encoder_cleanup(encoder);
5523 kfree(intel_dig_port); 5588 kfree(enc_to_dig_port(encoder));
5524} 5589}
5525 5590
5526void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder) 5591void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
@@ -5583,7 +5648,12 @@ int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port,
5583 } 5648 }
5584 5649
5585 reply = (rxbuf[0] >> 4) & DP_AUX_NATIVE_REPLY_MASK; 5650 reply = (rxbuf[0] >> 4) & DP_AUX_NATIVE_REPLY_MASK;
5586 return reply == DP_AUX_NATIVE_REPLY_ACK ? 0 : -EIO; 5651 if (reply != DP_AUX_NATIVE_REPLY_ACK) {
5652 DRM_DEBUG_KMS("Aksv write: no DP_AUX_NATIVE_REPLY_ACK %x\n",
5653 reply);
5654 return -EIO;
5655 }
5656 return 0;
5587} 5657}
5588 5658
5589static int intel_dp_hdcp_read_bksv(struct intel_digital_port *intel_dig_port, 5659static int intel_dp_hdcp_read_bksv(struct intel_digital_port *intel_dig_port,
@@ -6366,8 +6436,8 @@ void intel_edp_drrs_enable(struct intel_dp *intel_dp,
6366 } 6436 }
6367 6437
6368 mutex_lock(&dev_priv->drrs.mutex); 6438 mutex_lock(&dev_priv->drrs.mutex);
6369 if (WARN_ON(dev_priv->drrs.dp)) { 6439 if (dev_priv->drrs.dp) {
6370 DRM_ERROR("DRRS already enabled\n"); 6440 DRM_DEBUG_KMS("DRRS already enabled\n");
6371 goto unlock; 6441 goto unlock;
6372 } 6442 }
6373 6443
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index 5899debe2184..a19699023db1 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -23,7 +23,6 @@
23 * 23 *
24 */ 24 */
25 25
26#include <drm/drmP.h>
27#include "i915_drv.h" 26#include "i915_drv.h"
28#include "intel_drv.h" 27#include "intel_drv.h"
29#include <drm/drm_atomic_helper.h> 28#include <drm/drm_atomic_helper.h>
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 19d9abd2666e..d4ee09fb275a 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -243,6 +243,9 @@ struct intel_encoder {
243 void (*post_pll_disable)(struct intel_encoder *, 243 void (*post_pll_disable)(struct intel_encoder *,
244 const struct intel_crtc_state *, 244 const struct intel_crtc_state *,
245 const struct drm_connector_state *); 245 const struct drm_connector_state *);
246 void (*update_pipe)(struct intel_encoder *,
247 const struct intel_crtc_state *,
248 const struct drm_connector_state *);
246 /* Read out the current hw state of this connector, returning true if 249 /* Read out the current hw state of this connector, returning true if
247 * the encoder is active. If the encoder is enabled it also set the pipe 250 * the encoder is active. If the encoder is enabled it also set the pipe
248 * it is connected to in the pipe parameter. */ 251 * it is connected to in the pipe parameter. */
@@ -1208,6 +1211,9 @@ struct intel_dp {
1208 1211
1209 /* Displayport compliance testing */ 1212 /* Displayport compliance testing */
1210 struct intel_dp_compliance compliance; 1213 struct intel_dp_compliance compliance;
1214
1215 /* Display stream compression testing */
1216 bool force_dsc_en;
1211}; 1217};
1212 1218
1213enum lspcon_vendor { 1219enum lspcon_vendor {
@@ -1233,6 +1239,7 @@ struct intel_digital_port {
1233 /* Used for DP and ICL+ TypeC/DP and TypeC/HDMI ports. */ 1239 /* Used for DP and ICL+ TypeC/DP and TypeC/HDMI ports. */
1234 enum aux_ch aux_ch; 1240 enum aux_ch aux_ch;
1235 enum intel_display_power_domain ddi_io_power_domain; 1241 enum intel_display_power_domain ddi_io_power_domain;
1242 bool tc_legacy_port:1;
1236 enum tc_port_type tc_type; 1243 enum tc_port_type tc_type;
1237 1244
1238 void (*write_infoframe)(struct intel_encoder *encoder, 1245 void (*write_infoframe)(struct intel_encoder *encoder,
@@ -1805,7 +1812,7 @@ void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp,
1805 bool enable); 1812 bool enable);
1806void intel_dp_encoder_reset(struct drm_encoder *encoder); 1813void intel_dp_encoder_reset(struct drm_encoder *encoder);
1807void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder); 1814void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder);
1808void intel_dp_encoder_destroy(struct drm_encoder *encoder); 1815void intel_dp_encoder_flush_work(struct drm_encoder *encoder);
1809int intel_dp_compute_config(struct intel_encoder *encoder, 1816int intel_dp_compute_config(struct intel_encoder *encoder,
1810 struct intel_crtc_state *pipe_config, 1817 struct intel_crtc_state *pipe_config,
1811 struct drm_connector_state *conn_state); 1818 struct drm_connector_state *conn_state);
@@ -1873,6 +1880,8 @@ bool intel_dp_read_dpcd(struct intel_dp *intel_dp);
1873int intel_dp_link_required(int pixel_clock, int bpp); 1880int intel_dp_link_required(int pixel_clock, int bpp);
1874int intel_dp_max_data_rate(int max_link_clock, int max_lanes); 1881int intel_dp_max_data_rate(int max_link_clock, int max_lanes);
1875bool intel_digital_port_connected(struct intel_encoder *encoder); 1882bool intel_digital_port_connected(struct intel_encoder *encoder);
1883void icl_tc_phy_disconnect(struct drm_i915_private *dev_priv,
1884 struct intel_digital_port *dig_port);
1876 1885
1877/* intel_dp_aux_backlight.c */ 1886/* intel_dp_aux_backlight.c */
1878int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector); 1887int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector);
@@ -2199,16 +2208,16 @@ void gen6_rps_busy(struct drm_i915_private *dev_priv);
2199void gen6_rps_reset_ei(struct drm_i915_private *dev_priv); 2208void gen6_rps_reset_ei(struct drm_i915_private *dev_priv);
2200void gen6_rps_idle(struct drm_i915_private *dev_priv); 2209void gen6_rps_idle(struct drm_i915_private *dev_priv);
2201void gen6_rps_boost(struct i915_request *rq, struct intel_rps_client *rps); 2210void gen6_rps_boost(struct i915_request *rq, struct intel_rps_client *rps);
2202void g4x_wm_get_hw_state(struct drm_device *dev); 2211void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv);
2203void vlv_wm_get_hw_state(struct drm_device *dev); 2212void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv);
2204void ilk_wm_get_hw_state(struct drm_device *dev); 2213void ilk_wm_get_hw_state(struct drm_i915_private *dev_priv);
2205void skl_wm_get_hw_state(struct drm_device *dev); 2214void skl_wm_get_hw_state(struct drm_i915_private *dev_priv);
2206void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc, 2215void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
2207 struct skl_ddb_entry *ddb_y, 2216 struct skl_ddb_entry *ddb_y,
2208 struct skl_ddb_entry *ddb_uv); 2217 struct skl_ddb_entry *ddb_uv);
2209void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv, 2218void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
2210 struct skl_ddb_allocation *ddb /* out */); 2219 struct skl_ddb_allocation *ddb /* out */);
2211void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc, 2220void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
2212 struct skl_pipe_wm *out); 2221 struct skl_pipe_wm *out);
2213void g4x_wm_sanitize(struct drm_i915_private *dev_priv); 2222void g4x_wm_sanitize(struct drm_i915_private *dev_priv);
2214void vlv_wm_sanitize(struct drm_i915_private *dev_priv); 2223void vlv_wm_sanitize(struct drm_i915_private *dev_priv);
@@ -2326,10 +2335,10 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_
2326 struct intel_plane_state *intel_state); 2335 struct intel_plane_state *intel_state);
2327 2336
2328/* intel_color.c */ 2337/* intel_color.c */
2329void intel_color_init(struct drm_crtc *crtc); 2338void intel_color_init(struct intel_crtc *crtc);
2330int intel_color_check(struct drm_crtc *crtc, struct drm_crtc_state *state); 2339int intel_color_check(struct intel_crtc_state *crtc_state);
2331void intel_color_set_csc(struct drm_crtc_state *crtc_state); 2340void intel_color_set_csc(struct intel_crtc_state *crtc_state);
2332void intel_color_load_luts(struct drm_crtc_state *crtc_state); 2341void intel_color_load_luts(struct intel_crtc_state *crtc_state);
2333 2342
2334/* intel_lspcon.c */ 2343/* intel_lspcon.c */
2335bool lspcon_init(struct intel_digital_port *intel_dig_port); 2344bool lspcon_init(struct intel_digital_port *intel_dig_port);
diff --git a/drivers/gpu/drm/i915/intel_dsi.h b/drivers/gpu/drm/i915/intel_dsi.h
index d968f1f13e09..fc7a09049f81 100644
--- a/drivers/gpu/drm/i915/intel_dsi.h
+++ b/drivers/gpu/drm/i915/intel_dsi.h
@@ -24,7 +24,6 @@
24#ifndef _INTEL_DSI_H 24#ifndef _INTEL_DSI_H
25#define _INTEL_DSI_H 25#define _INTEL_DSI_H
26 26
27#include <drm/drmP.h>
28#include <drm/drm_crtc.h> 27#include <drm/drm_crtc.h>
29#include <drm/drm_mipi_dsi.h> 28#include <drm/drm_mipi_dsi.h>
30#include "intel_drv.h" 29#include "intel_drv.h"
diff --git a/drivers/gpu/drm/i915/intel_dsi_vbt.c b/drivers/gpu/drm/i915/intel_dsi_vbt.c
index a1a8b3790e61..06a11c35a784 100644
--- a/drivers/gpu/drm/i915/intel_dsi_vbt.c
+++ b/drivers/gpu/drm/i915/intel_dsi_vbt.c
@@ -24,15 +24,15 @@
24 * 24 *
25 */ 25 */
26 26
27#include <drm/drmP.h>
28#include <drm/drm_crtc.h> 27#include <drm/drm_crtc.h>
29#include <drm/drm_edid.h> 28#include <drm/drm_edid.h>
30#include <drm/i915_drm.h> 29#include <drm/i915_drm.h>
31#include <linux/gpio/consumer.h> 30#include <linux/gpio/consumer.h>
31#include <linux/mfd/intel_soc_pmic.h>
32#include <linux/slab.h> 32#include <linux/slab.h>
33#include <video/mipi_display.h> 33#include <video/mipi_display.h>
34#include <asm/intel-mid.h> 34#include <asm/intel-mid.h>
35#include <video/mipi_display.h> 35#include <asm/unaligned.h>
36#include "i915_drv.h" 36#include "i915_drv.h"
37#include "intel_drv.h" 37#include "intel_drv.h"
38#include "intel_dsi.h" 38#include "intel_dsi.h"
@@ -393,7 +393,25 @@ static const u8 *mipi_exec_spi(struct intel_dsi *intel_dsi, const u8 *data)
393 393
394static const u8 *mipi_exec_pmic(struct intel_dsi *intel_dsi, const u8 *data) 394static const u8 *mipi_exec_pmic(struct intel_dsi *intel_dsi, const u8 *data)
395{ 395{
396 DRM_DEBUG_KMS("Skipping PMIC element execution\n"); 396#ifdef CONFIG_PMIC_OPREGION
397 u32 value, mask, reg_address;
398 u16 i2c_address;
399 int ret;
400
401 /* byte 0 aka PMIC Flag is reserved */
402 i2c_address = get_unaligned_le16(data + 1);
403 reg_address = get_unaligned_le32(data + 3);
404 value = get_unaligned_le32(data + 7);
405 mask = get_unaligned_le32(data + 11);
406
407 ret = intel_soc_pmic_exec_mipi_pmic_seq_element(i2c_address,
408 reg_address,
409 value, mask);
410 if (ret)
411 DRM_ERROR("%s failed, error: %d\n", __func__, ret);
412#else
413 DRM_ERROR("Your hardware requires CONFIG_PMIC_OPREGION and it is not set\n");
414#endif
397 415
398 return data + 15; 416 return data + 15;
399} 417}
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index 17a16917e134..a6c82482a841 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -26,7 +26,6 @@
26 */ 26 */
27#include <linux/i2c.h> 27#include <linux/i2c.h>
28#include <linux/slab.h> 28#include <linux/slab.h>
29#include <drm/drmP.h>
30#include <drm/drm_atomic_helper.h> 29#include <drm/drm_atomic_helper.h>
31#include <drm/drm_crtc.h> 30#include <drm/drm_crtc.h>
32#include "intel_drv.h" 31#include "intel_drv.h"
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index ff5b7bc692ce..236cd040f271 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -261,6 +261,31 @@ static void __sprint_engine_name(char *name, const struct engine_info *info)
261 info->instance) >= INTEL_ENGINE_CS_MAX_NAME); 261 info->instance) >= INTEL_ENGINE_CS_MAX_NAME);
262} 262}
263 263
264void intel_engine_set_hwsp_writemask(struct intel_engine_cs *engine, u32 mask)
265{
266 struct drm_i915_private *dev_priv = engine->i915;
267 i915_reg_t hwstam;
268
269 /*
270 * Though they added more rings on g4x/ilk, they did not add
271 * per-engine HWSTAM until gen6.
272 */
273 if (INTEL_GEN(dev_priv) < 6 && engine->class != RENDER_CLASS)
274 return;
275
276 hwstam = RING_HWSTAM(engine->mmio_base);
277 if (INTEL_GEN(dev_priv) >= 3)
278 I915_WRITE(hwstam, mask);
279 else
280 I915_WRITE16(hwstam, mask);
281}
282
283static void intel_engine_sanitize_mmio(struct intel_engine_cs *engine)
284{
285 /* Mask off all writes into the unknown HWSP */
286 intel_engine_set_hwsp_writemask(engine, ~0u);
287}
288
264static int 289static int
265intel_engine_setup(struct drm_i915_private *dev_priv, 290intel_engine_setup(struct drm_i915_private *dev_priv,
266 enum intel_engine_id id) 291 enum intel_engine_id id)
@@ -312,6 +337,9 @@ intel_engine_setup(struct drm_i915_private *dev_priv,
312 337
313 ATOMIC_INIT_NOTIFIER_HEAD(&engine->context_status_notifier); 338 ATOMIC_INIT_NOTIFIER_HEAD(&engine->context_status_notifier);
314 339
340 /* Scrub mmio state on takeover */
341 intel_engine_sanitize_mmio(engine);
342
315 dev_priv->engine_class[info->class][info->instance] = engine; 343 dev_priv->engine_class[info->class][info->instance] = engine;
316 dev_priv->engine[id] = engine; 344 dev_priv->engine[id] = engine;
317 return 0; 345 return 0;
@@ -365,7 +393,7 @@ int intel_engines_init_mmio(struct drm_i915_private *dev_priv)
365 goto cleanup; 393 goto cleanup;
366 } 394 }
367 395
368 device_info->num_rings = hweight32(mask); 396 RUNTIME_INFO(dev_priv)->num_rings = hweight32(mask);
369 397
370 i915_check_and_clear_faults(dev_priv); 398 i915_check_and_clear_faults(dev_priv);
371 399
@@ -426,27 +454,9 @@ cleanup:
426 return err; 454 return err;
427} 455}
428 456
429void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno) 457void intel_engine_write_global_seqno(struct intel_engine_cs *engine, u32 seqno)
430{ 458{
431 struct drm_i915_private *dev_priv = engine->i915;
432
433 /* Our semaphore implementation is strictly monotonic (i.e. we proceed
434 * so long as the semaphore value in the register/page is greater
435 * than the sync value), so whenever we reset the seqno,
436 * so long as we reset the tracking semaphore value to 0, it will
437 * always be before the next request's seqno. If we don't reset
438 * the semaphore value, then when the seqno moves backwards all
439 * future waits will complete instantly (causing rendering corruption).
440 */
441 if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) {
442 I915_WRITE(RING_SYNC_0(engine->mmio_base), 0);
443 I915_WRITE(RING_SYNC_1(engine->mmio_base), 0);
444 if (HAS_VEBOX(dev_priv))
445 I915_WRITE(RING_SYNC_2(engine->mmio_base), 0);
446 }
447
448 intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno); 459 intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
449 clear_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted);
450 460
451 /* After manually advancing the seqno, fake the interrupt in case 461 /* After manually advancing the seqno, fake the interrupt in case
452 * there are any waiters for that seqno. 462 * there are any waiters for that seqno.
@@ -495,6 +505,9 @@ void intel_engine_setup_common(struct intel_engine_cs *engine)
495 505
496static void cleanup_status_page(struct intel_engine_cs *engine) 506static void cleanup_status_page(struct intel_engine_cs *engine)
497{ 507{
508 /* Prevent writes into HWSP after returning the page to the system */
509 intel_engine_set_hwsp_writemask(engine, ~0u);
510
498 if (HWS_NEEDS_PHYSICAL(engine->i915)) { 511 if (HWS_NEEDS_PHYSICAL(engine->i915)) {
499 void *addr = fetch_and_zero(&engine->status_page.page_addr); 512 void *addr = fetch_and_zero(&engine->status_page.page_addr);
500 513
@@ -769,12 +782,12 @@ const char *i915_cache_level_str(struct drm_i915_private *i915, int type)
769 782
770u32 intel_calculate_mcr_s_ss_select(struct drm_i915_private *dev_priv) 783u32 intel_calculate_mcr_s_ss_select(struct drm_i915_private *dev_priv)
771{ 784{
772 const struct sseu_dev_info *sseu = &(INTEL_INFO(dev_priv)->sseu); 785 const struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
773 u32 mcr_s_ss_select; 786 u32 mcr_s_ss_select;
774 u32 slice = fls(sseu->slice_mask); 787 u32 slice = fls(sseu->slice_mask);
775 u32 subslice = fls(sseu->subslice_mask[slice]); 788 u32 subslice = fls(sseu->subslice_mask[slice]);
776 789
777 if (IS_GEN10(dev_priv)) 790 if (IS_GEN(dev_priv, 10))
778 mcr_s_ss_select = GEN8_MCR_SLICE(slice) | 791 mcr_s_ss_select = GEN8_MCR_SLICE(slice) |
779 GEN8_MCR_SUBSLICE(subslice); 792 GEN8_MCR_SUBSLICE(subslice);
780 else if (INTEL_GEN(dev_priv) >= 11) 793 else if (INTEL_GEN(dev_priv) >= 11)
@@ -1030,22 +1043,34 @@ void intel_engines_reset_default_submission(struct drm_i915_private *i915)
1030 engine->set_default_submission(engine); 1043 engine->set_default_submission(engine);
1031} 1044}
1032 1045
1046static bool reset_engines(struct drm_i915_private *i915)
1047{
1048 if (INTEL_INFO(i915)->gpu_reset_clobbers_display)
1049 return false;
1050
1051 return intel_gpu_reset(i915, ALL_ENGINES) == 0;
1052}
1053
1033/** 1054/**
1034 * intel_engines_sanitize: called after the GPU has lost power 1055 * intel_engines_sanitize: called after the GPU has lost power
1035 * @i915: the i915 device 1056 * @i915: the i915 device
1057 * @force: ignore a failed reset and sanitize engine state anyway
1036 * 1058 *
1037 * Anytime we reset the GPU, either with an explicit GPU reset or through a 1059 * Anytime we reset the GPU, either with an explicit GPU reset or through a
1038 * PCI power cycle, the GPU loses state and we must reset our state tracking 1060 * PCI power cycle, the GPU loses state and we must reset our state tracking
1039 * to match. Note that calling intel_engines_sanitize() if the GPU has not 1061 * to match. Note that calling intel_engines_sanitize() if the GPU has not
1040 * been reset results in much confusion! 1062 * been reset results in much confusion!
1041 */ 1063 */
1042void intel_engines_sanitize(struct drm_i915_private *i915) 1064void intel_engines_sanitize(struct drm_i915_private *i915, bool force)
1043{ 1065{
1044 struct intel_engine_cs *engine; 1066 struct intel_engine_cs *engine;
1045 enum intel_engine_id id; 1067 enum intel_engine_id id;
1046 1068
1047 GEM_TRACE("\n"); 1069 GEM_TRACE("\n");
1048 1070
1071 if (!reset_engines(i915) && !force)
1072 return;
1073
1049 for_each_engine(engine, i915, id) { 1074 for_each_engine(engine, i915, id) {
1050 if (engine->reset.reset) 1075 if (engine->reset.reset)
1051 engine->reset.reset(engine, NULL); 1076 engine->reset.reset(engine, NULL);
@@ -1248,7 +1273,7 @@ static void intel_engine_print_registers(const struct intel_engine_cs *engine,
1248 &engine->execlists; 1273 &engine->execlists;
1249 u64 addr; 1274 u64 addr;
1250 1275
1251 if (engine->id == RCS && IS_GEN(dev_priv, 4, 7)) 1276 if (engine->id == RCS && IS_GEN_RANGE(dev_priv, 4, 7))
1252 drm_printf(m, "\tCCID: 0x%08x\n", I915_READ(CCID)); 1277 drm_printf(m, "\tCCID: 0x%08x\n", I915_READ(CCID));
1253 drm_printf(m, "\tRING_START: 0x%08x\n", 1278 drm_printf(m, "\tRING_START: 0x%08x\n",
1254 I915_READ(RING_START(engine->mmio_base))); 1279 I915_READ(RING_START(engine->mmio_base)));
@@ -1269,16 +1294,6 @@ static void intel_engine_print_registers(const struct intel_engine_cs *engine,
1269 drm_printf(m, "\tRING_IMR: %08x\n", I915_READ_IMR(engine)); 1294 drm_printf(m, "\tRING_IMR: %08x\n", I915_READ_IMR(engine));
1270 } 1295 }
1271 1296
1272 if (HAS_LEGACY_SEMAPHORES(dev_priv)) {
1273 drm_printf(m, "\tSYNC_0: 0x%08x\n",
1274 I915_READ(RING_SYNC_0(engine->mmio_base)));
1275 drm_printf(m, "\tSYNC_1: 0x%08x\n",
1276 I915_READ(RING_SYNC_1(engine->mmio_base)));
1277 if (HAS_VEBOX(dev_priv))
1278 drm_printf(m, "\tSYNC_2: 0x%08x\n",
1279 I915_READ(RING_SYNC_2(engine->mmio_base)));
1280 }
1281
1282 addr = intel_engine_get_active_head(engine); 1297 addr = intel_engine_get_active_head(engine);
1283 drm_printf(m, "\tACTHD: 0x%08x_%08x\n", 1298 drm_printf(m, "\tACTHD: 0x%08x_%08x\n",
1284 upper_32_bits(addr), lower_32_bits(addr)); 1299 upper_32_bits(addr), lower_32_bits(addr));
@@ -1532,11 +1547,6 @@ void intel_engine_dump(struct intel_engine_cs *engine,
1532 spin_unlock(&b->rb_lock); 1547 spin_unlock(&b->rb_lock);
1533 local_irq_restore(flags); 1548 local_irq_restore(flags);
1534 1549
1535 drm_printf(m, "IRQ? 0x%lx (breadcrumbs? %s)\n",
1536 engine->irq_posted,
1537 yesno(test_bit(ENGINE_IRQ_BREADCRUMB,
1538 &engine->irq_posted)));
1539
1540 drm_printf(m, "HWSP:\n"); 1550 drm_printf(m, "HWSP:\n");
1541 hexdump(m, engine->status_page.page_addr, PAGE_SIZE); 1551 hexdump(m, engine->status_page.page_addr, PAGE_SIZE);
1542 1552
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
index f23570c44323..ccd5e110a19c 100644
--- a/drivers/gpu/drm/i915/intel_fbc.c
+++ b/drivers/gpu/drm/i915/intel_fbc.c
@@ -84,7 +84,7 @@ static int intel_fbc_calculate_cfb_size(struct drm_i915_private *dev_priv,
84 int lines; 84 int lines;
85 85
86 intel_fbc_get_plane_source_size(cache, NULL, &lines); 86 intel_fbc_get_plane_source_size(cache, NULL, &lines);
87 if (IS_GEN7(dev_priv)) 87 if (IS_GEN(dev_priv, 7))
88 lines = min(lines, 2048); 88 lines = min(lines, 2048);
89 else if (INTEL_GEN(dev_priv) >= 8) 89 else if (INTEL_GEN(dev_priv) >= 8)
90 lines = min(lines, 2560); 90 lines = min(lines, 2560);
@@ -127,7 +127,7 @@ static void i8xx_fbc_activate(struct drm_i915_private *dev_priv)
127 cfb_pitch = params->fb.stride; 127 cfb_pitch = params->fb.stride;
128 128
129 /* FBC_CTL wants 32B or 64B units */ 129 /* FBC_CTL wants 32B or 64B units */
130 if (IS_GEN2(dev_priv)) 130 if (IS_GEN(dev_priv, 2))
131 cfb_pitch = (cfb_pitch / 32) - 1; 131 cfb_pitch = (cfb_pitch / 32) - 1;
132 else 132 else
133 cfb_pitch = (cfb_pitch / 64) - 1; 133 cfb_pitch = (cfb_pitch / 64) - 1;
@@ -136,7 +136,7 @@ static void i8xx_fbc_activate(struct drm_i915_private *dev_priv)
136 for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++) 136 for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
137 I915_WRITE(FBC_TAG(i), 0); 137 I915_WRITE(FBC_TAG(i), 0);
138 138
139 if (IS_GEN4(dev_priv)) { 139 if (IS_GEN(dev_priv, 4)) {
140 u32 fbc_ctl2; 140 u32 fbc_ctl2;
141 141
142 /* Set it up... */ 142 /* Set it up... */
@@ -233,9 +233,9 @@ static void ilk_fbc_activate(struct drm_i915_private *dev_priv)
233 233
234 if (params->flags & PLANE_HAS_FENCE) { 234 if (params->flags & PLANE_HAS_FENCE) {
235 dpfc_ctl |= DPFC_CTL_FENCE_EN; 235 dpfc_ctl |= DPFC_CTL_FENCE_EN;
236 if (IS_GEN5(dev_priv)) 236 if (IS_GEN(dev_priv, 5))
237 dpfc_ctl |= params->vma->fence->id; 237 dpfc_ctl |= params->vma->fence->id;
238 if (IS_GEN6(dev_priv)) { 238 if (IS_GEN(dev_priv, 6)) {
239 I915_WRITE(SNB_DPFC_CTL_SA, 239 I915_WRITE(SNB_DPFC_CTL_SA,
240 SNB_CPU_FENCE_ENABLE | 240 SNB_CPU_FENCE_ENABLE |
241 params->vma->fence->id); 241 params->vma->fence->id);
@@ -243,7 +243,7 @@ static void ilk_fbc_activate(struct drm_i915_private *dev_priv)
243 params->crtc.fence_y_offset); 243 params->crtc.fence_y_offset);
244 } 244 }
245 } else { 245 } else {
246 if (IS_GEN6(dev_priv)) { 246 if (IS_GEN(dev_priv, 6)) {
247 I915_WRITE(SNB_DPFC_CTL_SA, 0); 247 I915_WRITE(SNB_DPFC_CTL_SA, 0);
248 I915_WRITE(DPFC_CPU_FENCE_OFFSET, 0); 248 I915_WRITE(DPFC_CPU_FENCE_OFFSET, 0);
249 } 249 }
@@ -282,7 +282,7 @@ static void gen7_fbc_activate(struct drm_i915_private *dev_priv)
282 int threshold = dev_priv->fbc.threshold; 282 int threshold = dev_priv->fbc.threshold;
283 283
284 /* Display WA #0529: skl, kbl, bxt. */ 284 /* Display WA #0529: skl, kbl, bxt. */
285 if (IS_GEN9(dev_priv) && !IS_GEMINILAKE(dev_priv)) { 285 if (IS_GEN(dev_priv, 9) && !IS_GEMINILAKE(dev_priv)) {
286 u32 val = I915_READ(CHICKEN_MISC_4); 286 u32 val = I915_READ(CHICKEN_MISC_4);
287 287
288 val &= ~(FBC_STRIDE_OVERRIDE | FBC_STRIDE_MASK); 288 val &= ~(FBC_STRIDE_OVERRIDE | FBC_STRIDE_MASK);
@@ -581,10 +581,10 @@ static bool stride_is_valid(struct drm_i915_private *dev_priv,
581 if (stride < 512) 581 if (stride < 512)
582 return false; 582 return false;
583 583
584 if (IS_GEN2(dev_priv) || IS_GEN3(dev_priv)) 584 if (IS_GEN(dev_priv, 2) || IS_GEN(dev_priv, 3))
585 return stride == 4096 || stride == 8192; 585 return stride == 4096 || stride == 8192;
586 586
587 if (IS_GEN4(dev_priv) && !IS_G4X(dev_priv) && stride < 2048) 587 if (IS_GEN(dev_priv, 4) && !IS_G4X(dev_priv) && stride < 2048)
588 return false; 588 return false;
589 589
590 if (stride > 16384) 590 if (stride > 16384)
@@ -603,7 +603,7 @@ static bool pixel_format_is_valid(struct drm_i915_private *dev_priv,
603 case DRM_FORMAT_XRGB1555: 603 case DRM_FORMAT_XRGB1555:
604 case DRM_FORMAT_RGB565: 604 case DRM_FORMAT_RGB565:
605 /* 16bpp not supported on gen2 */ 605 /* 16bpp not supported on gen2 */
606 if (IS_GEN2(dev_priv)) 606 if (IS_GEN(dev_priv, 2))
607 return false; 607 return false;
608 /* WaFbcOnly1to1Ratio:ctg */ 608 /* WaFbcOnly1to1Ratio:ctg */
609 if (IS_G4X(dev_priv)) 609 if (IS_G4X(dev_priv))
@@ -626,7 +626,10 @@ static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc)
626 struct intel_fbc *fbc = &dev_priv->fbc; 626 struct intel_fbc *fbc = &dev_priv->fbc;
627 unsigned int effective_w, effective_h, max_w, max_h; 627 unsigned int effective_w, effective_h, max_w, max_h;
628 628
629 if (INTEL_GEN(dev_priv) >= 8 || IS_HASWELL(dev_priv)) { 629 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
630 max_w = 5120;
631 max_h = 4096;
632 } else if (INTEL_GEN(dev_priv) >= 8 || IS_HASWELL(dev_priv)) {
630 max_w = 4096; 633 max_w = 4096;
631 max_h = 4096; 634 max_h = 4096;
632 } else if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) { 635 } else if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
@@ -784,7 +787,7 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
784 * having a Y offset that isn't divisible by 4 causes FIFO underrun 787 * having a Y offset that isn't divisible by 4 causes FIFO underrun
785 * and screen flicker. 788 * and screen flicker.
786 */ 789 */
787 if (IS_GEN(dev_priv, 9, 10) && 790 if (IS_GEN_RANGE(dev_priv, 9, 10) &&
788 (fbc->state_cache.plane.adjusted_y & 3)) { 791 (fbc->state_cache.plane.adjusted_y & 3)) {
789 fbc->no_fbc_reason = "plane Y offset is misaligned"; 792 fbc->no_fbc_reason = "plane Y offset is misaligned";
790 return false; 793 return false;
@@ -839,7 +842,7 @@ static void intel_fbc_get_reg_params(struct intel_crtc *crtc,
839 842
840 params->cfb_size = intel_fbc_calculate_cfb_size(dev_priv, cache); 843 params->cfb_size = intel_fbc_calculate_cfb_size(dev_priv, cache);
841 844
842 if (IS_GEN9(dev_priv) && !IS_GEMINILAKE(dev_priv)) 845 if (IS_GEN(dev_priv, 9) && !IS_GEMINILAKE(dev_priv))
843 params->gen9_wa_cfb_stride = DIV_ROUND_UP(cache->plane.src_w, 846 params->gen9_wa_cfb_stride = DIV_ROUND_UP(cache->plane.src_w,
844 32 * fbc->threshold) * 8; 847 32 * fbc->threshold) * 8;
845} 848}
@@ -1126,8 +1129,6 @@ void intel_fbc_disable(struct intel_crtc *crtc)
1126 if (!fbc_supported(dev_priv)) 1129 if (!fbc_supported(dev_priv))
1127 return; 1130 return;
1128 1131
1129 WARN_ON(crtc->active);
1130
1131 mutex_lock(&fbc->lock); 1132 mutex_lock(&fbc->lock);
1132 if (fbc->crtc == crtc) 1133 if (fbc->crtc == crtc)
1133 __intel_fbc_disable(dev_priv); 1134 __intel_fbc_disable(dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index fb5bb5b32a60..a0c5046e170c 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -37,7 +37,6 @@
37#include <linux/init.h> 37#include <linux/init.h>
38#include <linux/vga_switcheroo.h> 38#include <linux/vga_switcheroo.h>
39 39
40#include <drm/drmP.h>
41#include <drm/drm_crtc.h> 40#include <drm/drm_crtc.h>
42#include <drm/drm_fb_helper.h> 41#include <drm/drm_fb_helper.h>
43#include "intel_drv.h" 42#include "intel_drv.h"
diff --git a/drivers/gpu/drm/i915/intel_fifo_underrun.c b/drivers/gpu/drm/i915/intel_fifo_underrun.c
index 77c123cc8817..9b39975c8389 100644
--- a/drivers/gpu/drm/i915/intel_fifo_underrun.c
+++ b/drivers/gpu/drm/i915/intel_fifo_underrun.c
@@ -260,9 +260,9 @@ static bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
260 260
261 if (HAS_GMCH_DISPLAY(dev_priv)) 261 if (HAS_GMCH_DISPLAY(dev_priv))
262 i9xx_set_fifo_underrun_reporting(dev, pipe, enable, old); 262 i9xx_set_fifo_underrun_reporting(dev, pipe, enable, old);
263 else if (IS_GEN5(dev_priv) || IS_GEN6(dev_priv)) 263 else if (IS_GEN_RANGE(dev_priv, 5, 6))
264 ironlake_set_fifo_underrun_reporting(dev, pipe, enable); 264 ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
265 else if (IS_GEN7(dev_priv)) 265 else if (IS_GEN(dev_priv, 7))
266 ivybridge_set_fifo_underrun_reporting(dev, pipe, enable, old); 266 ivybridge_set_fifo_underrun_reporting(dev, pipe, enable, old);
267 else if (INTEL_GEN(dev_priv) >= 8) 267 else if (INTEL_GEN(dev_priv) >= 8)
268 broadwell_set_fifo_underrun_reporting(dev, pipe, enable); 268 broadwell_set_fifo_underrun_reporting(dev, pipe, enable);
@@ -423,7 +423,7 @@ void intel_check_cpu_fifo_underruns(struct drm_i915_private *dev_priv)
423 423
424 if (HAS_GMCH_DISPLAY(dev_priv)) 424 if (HAS_GMCH_DISPLAY(dev_priv))
425 i9xx_check_fifo_underruns(crtc); 425 i9xx_check_fifo_underruns(crtc);
426 else if (IS_GEN7(dev_priv)) 426 else if (IS_GEN(dev_priv, 7))
427 ivybridge_check_fifo_underruns(crtc); 427 ivybridge_check_fifo_underruns(crtc);
428 } 428 }
429 429
diff --git a/drivers/gpu/drm/i915/intel_frontbuffer.c b/drivers/gpu/drm/i915/intel_frontbuffer.c
index c3379bde266f..16f253deaf8d 100644
--- a/drivers/gpu/drm/i915/intel_frontbuffer.c
+++ b/drivers/gpu/drm/i915/intel_frontbuffer.c
@@ -60,7 +60,6 @@
60 * functions is deprecated and should be avoided. 60 * functions is deprecated and should be avoided.
61 */ 61 */
62 62
63#include <drm/drmP.h>
64 63
65#include "intel_drv.h" 64#include "intel_drv.h"
66#include "intel_frontbuffer.h" 65#include "intel_frontbuffer.h"
diff --git a/drivers/gpu/drm/i915/intel_guc_fw.c b/drivers/gpu/drm/i915/intel_guc_fw.c
index a67144ee5ceb..13ff7003c6be 100644
--- a/drivers/gpu/drm/i915/intel_guc_fw.c
+++ b/drivers/gpu/drm/i915/intel_guc_fw.c
@@ -77,10 +77,6 @@ static void guc_fw_select(struct intel_uc_fw *guc_fw)
77 guc_fw->path = I915_KBL_GUC_UCODE; 77 guc_fw->path = I915_KBL_GUC_UCODE;
78 guc_fw->major_ver_wanted = KBL_FW_MAJOR; 78 guc_fw->major_ver_wanted = KBL_FW_MAJOR;
79 guc_fw->minor_ver_wanted = KBL_FW_MINOR; 79 guc_fw->minor_ver_wanted = KBL_FW_MINOR;
80 } else {
81 dev_info(dev_priv->drm.dev,
82 "%s: No firmware known for this platform!\n",
83 intel_uc_fw_type_repr(guc_fw->type));
84 } 80 }
85} 81}
86 82
@@ -115,7 +111,7 @@ static void guc_prepare_xfer(struct intel_guc *guc)
115 else 111 else
116 I915_WRITE(GEN9_GT_PM_CONFIG, GT_DOORBELL_ENABLE); 112 I915_WRITE(GEN9_GT_PM_CONFIG, GT_DOORBELL_ENABLE);
117 113
118 if (IS_GEN9(dev_priv)) { 114 if (IS_GEN(dev_priv, 9)) {
119 /* DOP Clock Gating Enable for GuC clocks */ 115 /* DOP Clock Gating Enable for GuC clocks */
120 I915_WRITE(GEN7_MISCCPCTL, (GEN8_DOP_CLOCK_GATE_GUC_ENABLE | 116 I915_WRITE(GEN7_MISCCPCTL, (GEN8_DOP_CLOCK_GATE_GUC_ENABLE |
121 I915_READ(GEN7_MISCCPCTL))); 117 I915_READ(GEN7_MISCCPCTL)));
diff --git a/drivers/gpu/drm/i915/intel_guc_submission.c b/drivers/gpu/drm/i915/intel_guc_submission.c
index 1570dcbe249c..ab1c49b106f2 100644
--- a/drivers/gpu/drm/i915/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/intel_guc_submission.c
@@ -572,7 +572,8 @@ static void inject_preempt_context(struct work_struct *work)
572 if (engine->id == RCS) { 572 if (engine->id == RCS) {
573 cs = gen8_emit_ggtt_write_rcs(cs, 573 cs = gen8_emit_ggtt_write_rcs(cs,
574 GUC_PREEMPT_FINISHED, 574 GUC_PREEMPT_FINISHED,
575 addr); 575 addr,
576 PIPE_CONTROL_CS_STALL);
576 } else { 577 } else {
577 cs = gen8_emit_ggtt_write(cs, 578 cs = gen8_emit_ggtt_write(cs,
578 GUC_PREEMPT_FINISHED, 579 GUC_PREEMPT_FINISHED,
diff --git a/drivers/gpu/drm/i915/intel_hangcheck.c b/drivers/gpu/drm/i915/intel_hangcheck.c
index e26d05a46451..51e9efec5116 100644
--- a/drivers/gpu/drm/i915/intel_hangcheck.c
+++ b/drivers/gpu/drm/i915/intel_hangcheck.c
@@ -24,144 +24,6 @@
24 24
25#include "i915_drv.h" 25#include "i915_drv.h"
26 26
27static bool
28ipehr_is_semaphore_wait(struct intel_engine_cs *engine, u32 ipehr)
29{
30 ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
31 return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE |
32 MI_SEMAPHORE_REGISTER);
33}
34
35static struct intel_engine_cs *
36semaphore_wait_to_signaller_ring(struct intel_engine_cs *engine, u32 ipehr,
37 u64 offset)
38{
39 struct drm_i915_private *dev_priv = engine->i915;
40 u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;
41 struct intel_engine_cs *signaller;
42 enum intel_engine_id id;
43
44 for_each_engine(signaller, dev_priv, id) {
45 if (engine == signaller)
46 continue;
47
48 if (sync_bits == signaller->semaphore.mbox.wait[engine->hw_id])
49 return signaller;
50 }
51
52 DRM_DEBUG_DRIVER("No signaller ring found for %s, ipehr 0x%08x\n",
53 engine->name, ipehr);
54
55 return ERR_PTR(-ENODEV);
56}
57
58static struct intel_engine_cs *
59semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno)
60{
61 struct drm_i915_private *dev_priv = engine->i915;
62 void __iomem *vaddr;
63 u32 cmd, ipehr, head;
64 u64 offset = 0;
65 int i, backwards;
66
67 /*
68 * This function does not support execlist mode - any attempt to
69 * proceed further into this function will result in a kernel panic
70 * when dereferencing ring->buffer, which is not set up in execlist
71 * mode.
72 *
73 * The correct way of doing it would be to derive the currently
74 * executing ring buffer from the current context, which is derived
75 * from the currently running request. Unfortunately, to get the
76 * current request we would have to grab the struct_mutex before doing
77 * anything else, which would be ill-advised since some other thread
78 * might have grabbed it already and managed to hang itself, causing
79 * the hang checker to deadlock.
80 *
81 * Therefore, this function does not support execlist mode in its
82 * current form. Just return NULL and move on.
83 */
84 if (engine->buffer == NULL)
85 return NULL;
86
87 ipehr = I915_READ(RING_IPEHR(engine->mmio_base));
88 if (!ipehr_is_semaphore_wait(engine, ipehr))
89 return NULL;
90
91 /*
92 * HEAD is likely pointing to the dword after the actual command,
93 * so scan backwards until we find the MBOX. But limit it to just 3
94 * or 4 dwords depending on the semaphore wait command size.
95 * Note that we don't care about ACTHD here since that might
96 * point at at batch, and semaphores are always emitted into the
97 * ringbuffer itself.
98 */
99 head = I915_READ_HEAD(engine) & HEAD_ADDR;
100 backwards = (INTEL_GEN(dev_priv) >= 8) ? 5 : 4;
101 vaddr = (void __iomem *)engine->buffer->vaddr;
102
103 for (i = backwards; i; --i) {
104 /*
105 * Be paranoid and presume the hw has gone off into the wild -
106 * our ring is smaller than what the hardware (and hence
107 * HEAD_ADDR) allows. Also handles wrap-around.
108 */
109 head &= engine->buffer->size - 1;
110
111 /* This here seems to blow up */
112 cmd = ioread32(vaddr + head);
113 if (cmd == ipehr)
114 break;
115
116 head -= 4;
117 }
118
119 if (!i)
120 return NULL;
121
122 *seqno = ioread32(vaddr + head + 4) + 1;
123 return semaphore_wait_to_signaller_ring(engine, ipehr, offset);
124}
125
126static int semaphore_passed(struct intel_engine_cs *engine)
127{
128 struct drm_i915_private *dev_priv = engine->i915;
129 struct intel_engine_cs *signaller;
130 u32 seqno;
131
132 engine->hangcheck.deadlock++;
133
134 signaller = semaphore_waits_for(engine, &seqno);
135 if (signaller == NULL)
136 return -1;
137
138 if (IS_ERR(signaller))
139 return 0;
140
141 /* Prevent pathological recursion due to driver bugs */
142 if (signaller->hangcheck.deadlock >= I915_NUM_ENGINES)
143 return -1;
144
145 if (intel_engine_signaled(signaller, seqno))
146 return 1;
147
148 /* cursory check for an unkickable deadlock */
149 if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE &&
150 semaphore_passed(signaller) < 0)
151 return -1;
152
153 return 0;
154}
155
156static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
157{
158 struct intel_engine_cs *engine;
159 enum intel_engine_id id;
160
161 for_each_engine(engine, dev_priv, id)
162 engine->hangcheck.deadlock = 0;
163}
164
165static bool instdone_unchanged(u32 current_instdone, u32 *old_instdone) 27static bool instdone_unchanged(u32 current_instdone, u32 *old_instdone)
166{ 28{
167 u32 tmp = current_instdone | *old_instdone; 29 u32 tmp = current_instdone | *old_instdone;
@@ -236,7 +98,7 @@ engine_stuck(struct intel_engine_cs *engine, u64 acthd)
236 if (ha != ENGINE_DEAD) 98 if (ha != ENGINE_DEAD)
237 return ha; 99 return ha;
238 100
239 if (IS_GEN2(dev_priv)) 101 if (IS_GEN(dev_priv, 2))
240 return ENGINE_DEAD; 102 return ENGINE_DEAD;
241 103
242 /* Is the chip hanging on a WAIT_FOR_EVENT? 104 /* Is the chip hanging on a WAIT_FOR_EVENT?
@@ -252,37 +114,12 @@ engine_stuck(struct intel_engine_cs *engine, u64 acthd)
252 return ENGINE_WAIT_KICK; 114 return ENGINE_WAIT_KICK;
253 } 115 }
254 116
255 if (IS_GEN(dev_priv, 6, 7) && tmp & RING_WAIT_SEMAPHORE) {
256 switch (semaphore_passed(engine)) {
257 default:
258 return ENGINE_DEAD;
259 case 1:
260 i915_handle_error(dev_priv, ALL_ENGINES, 0,
261 "stuck semaphore on %s",
262 engine->name);
263 I915_WRITE_CTL(engine, tmp);
264 return ENGINE_WAIT_KICK;
265 case 0:
266 return ENGINE_WAIT;
267 }
268 }
269
270 return ENGINE_DEAD; 117 return ENGINE_DEAD;
271} 118}
272 119
273static void hangcheck_load_sample(struct intel_engine_cs *engine, 120static void hangcheck_load_sample(struct intel_engine_cs *engine,
274 struct intel_engine_hangcheck *hc) 121 struct intel_engine_hangcheck *hc)
275{ 122{
276 /* We don't strictly need an irq-barrier here, as we are not
277 * serving an interrupt request, be paranoid in case the
278 * barrier has side-effects (such as preventing a broken
279 * cacheline snoop) and so be sure that we can see the seqno
280 * advance. If the seqno should stick, due to a stale
281 * cacheline, we would erroneously declare the GPU hung.
282 */
283 if (engine->irq_seqno_barrier)
284 engine->irq_seqno_barrier(engine);
285
286 hc->acthd = intel_engine_get_active_head(engine); 123 hc->acthd = intel_engine_get_active_head(engine);
287 hc->seqno = intel_engine_get_seqno(engine); 124 hc->seqno = intel_engine_get_seqno(engine);
288} 125}
@@ -433,8 +270,6 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
433 for_each_engine(engine, dev_priv, id) { 270 for_each_engine(engine, dev_priv, id) {
434 struct intel_engine_hangcheck hc; 271 struct intel_engine_hangcheck hc;
435 272
436 semaphore_clear_deadlocks(dev_priv);
437
438 hangcheck_load_sample(engine, &hc); 273 hangcheck_load_sample(engine, &hc);
439 hangcheck_accumulate_sample(engine, &hc); 274 hangcheck_accumulate_sample(engine, &hc);
440 hangcheck_store_sample(engine, &hc); 275 hangcheck_store_sample(engine, &hc);
diff --git a/drivers/gpu/drm/i915/intel_hdcp.c b/drivers/gpu/drm/i915/intel_hdcp.c
index 1bf487f94254..3fcb3b775948 100644
--- a/drivers/gpu/drm/i915/intel_hdcp.c
+++ b/drivers/gpu/drm/i915/intel_hdcp.c
@@ -6,7 +6,6 @@
6 * Sean Paul <seanpaul@chromium.org> 6 * Sean Paul <seanpaul@chromium.org>
7 */ 7 */
8 8
9#include <drm/drmP.h>
10#include <drm/drm_hdcp.h> 9#include <drm/drm_hdcp.h>
11#include <linux/i2c.h> 10#include <linux/i2c.h>
12#include <linux/random.h> 11#include <linux/random.h>
@@ -15,6 +14,7 @@
15#include "i915_reg.h" 14#include "i915_reg.h"
16 15
17#define KEY_LOAD_TRIES 5 16#define KEY_LOAD_TRIES 5
17#define ENCRYPT_STATUS_CHANGE_TIMEOUT_MS 50
18 18
19static 19static
20bool intel_hdcp_is_ksv_valid(u8 *ksv) 20bool intel_hdcp_is_ksv_valid(u8 *ksv)
@@ -157,10 +157,11 @@ static int intel_hdcp_load_keys(struct drm_i915_private *dev_priv)
157 /* 157 /*
158 * Initiate loading the HDCP key from fuses. 158 * Initiate loading the HDCP key from fuses.
159 * 159 *
160 * BXT+ platforms, HDCP key needs to be loaded by SW. Only SKL and KBL 160 * BXT+ platforms, HDCP key needs to be loaded by SW. Only Gen 9
161 * differ in the key load trigger process from other platforms. 161 * platforms except BXT and GLK, differ in the key load trigger process
162 * from other platforms. So GEN9_BC uses the GT Driver Mailbox i/f.
162 */ 163 */
163 if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { 164 if (IS_GEN9_BC(dev_priv)) {
164 mutex_lock(&dev_priv->pcu_lock); 165 mutex_lock(&dev_priv->pcu_lock);
165 ret = sandybridge_pcode_write(dev_priv, 166 ret = sandybridge_pcode_write(dev_priv,
166 SKL_PCODE_LOAD_HDCP_KEYS, 1); 167 SKL_PCODE_LOAD_HDCP_KEYS, 1);
@@ -636,7 +637,8 @@ static int intel_hdcp_auth(struct intel_digital_port *intel_dig_port,
636 637
637 /* Wait for encryption confirmation */ 638 /* Wait for encryption confirmation */
638 if (intel_wait_for_register(dev_priv, PORT_HDCP_STATUS(port), 639 if (intel_wait_for_register(dev_priv, PORT_HDCP_STATUS(port),
639 HDCP_STATUS_ENC, HDCP_STATUS_ENC, 20)) { 640 HDCP_STATUS_ENC, HDCP_STATUS_ENC,
641 ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
640 DRM_ERROR("Timed out waiting for encryption\n"); 642 DRM_ERROR("Timed out waiting for encryption\n");
641 return -ETIMEDOUT; 643 return -ETIMEDOUT;
642 } 644 }
@@ -666,7 +668,7 @@ static int _intel_hdcp_disable(struct intel_connector *connector)
666 668
667 I915_WRITE(PORT_HDCP_CONF(port), 0); 669 I915_WRITE(PORT_HDCP_CONF(port), 0);
668 if (intel_wait_for_register(dev_priv, PORT_HDCP_STATUS(port), ~0, 0, 670 if (intel_wait_for_register(dev_priv, PORT_HDCP_STATUS(port), ~0, 0,
669 20)) { 671 ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
670 DRM_ERROR("Failed to disable HDCP, timeout clearing status\n"); 672 DRM_ERROR("Failed to disable HDCP, timeout clearing status\n");
671 return -ETIMEDOUT; 673 return -ETIMEDOUT;
672 } 674 }
@@ -768,8 +770,7 @@ static void intel_hdcp_prop_work(struct work_struct *work)
768bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port) 770bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port)
769{ 771{
770 /* PORT E doesn't have HDCP, and PORT F is disabled */ 772 /* PORT E doesn't have HDCP, and PORT F is disabled */
771 return ((INTEL_GEN(dev_priv) >= 8 || IS_HASWELL(dev_priv)) && 773 return INTEL_GEN(dev_priv) >= 9 && port < PORT_E;
772 !IS_CHERRYVIEW(dev_priv) && port < PORT_E);
773} 774}
774 775
775int intel_hdcp_init(struct intel_connector *connector, 776int intel_hdcp_init(struct intel_connector *connector,
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 1da7bb148fca..ca195e6203c5 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -30,7 +30,6 @@
30#include <linux/slab.h> 30#include <linux/slab.h>
31#include <linux/delay.h> 31#include <linux/delay.h>
32#include <linux/hdmi.h> 32#include <linux/hdmi.h>
33#include <drm/drmP.h>
34#include <drm/drm_atomic_helper.h> 33#include <drm/drm_atomic_helper.h>
35#include <drm/drm_crtc.h> 34#include <drm/drm_crtc.h>
36#include <drm/drm_edid.h> 35#include <drm/drm_edid.h>
diff --git a/drivers/gpu/drm/i915/intel_hotplug.c b/drivers/gpu/drm/i915/intel_hotplug.c
index e24174d08fed..ae92d6560165 100644
--- a/drivers/gpu/drm/i915/intel_hotplug.c
+++ b/drivers/gpu/drm/i915/intel_hotplug.c
@@ -23,7 +23,6 @@
23 23
24#include <linux/kernel.h> 24#include <linux/kernel.h>
25 25
26#include <drm/drmP.h>
27#include <drm/i915_drm.h> 26#include <drm/i915_drm.h>
28 27
29#include "i915_drv.h" 28#include "i915_drv.h"
diff --git a/drivers/gpu/drm/i915/intel_huc_fw.c b/drivers/gpu/drm/i915/intel_huc_fw.c
index f93d2384d482..7d7bfc7f7ca7 100644
--- a/drivers/gpu/drm/i915/intel_huc_fw.c
+++ b/drivers/gpu/drm/i915/intel_huc_fw.c
@@ -23,8 +23,8 @@
23 */ 23 */
24 24
25#define BXT_HUC_FW_MAJOR 01 25#define BXT_HUC_FW_MAJOR 01
26#define BXT_HUC_FW_MINOR 07 26#define BXT_HUC_FW_MINOR 8
27#define BXT_BLD_NUM 1398 27#define BXT_BLD_NUM 2893
28 28
29#define SKL_HUC_FW_MAJOR 01 29#define SKL_HUC_FW_MAJOR 01
30#define SKL_HUC_FW_MINOR 07 30#define SKL_HUC_FW_MINOR 07
@@ -76,9 +76,6 @@ static void huc_fw_select(struct intel_uc_fw *huc_fw)
76 huc_fw->path = I915_KBL_HUC_UCODE; 76 huc_fw->path = I915_KBL_HUC_UCODE;
77 huc_fw->major_ver_wanted = KBL_HUC_FW_MAJOR; 77 huc_fw->major_ver_wanted = KBL_HUC_FW_MAJOR;
78 huc_fw->minor_ver_wanted = KBL_HUC_FW_MINOR; 78 huc_fw->minor_ver_wanted = KBL_HUC_FW_MINOR;
79 } else {
80 DRM_WARN("%s: No firmware known for this platform!\n",
81 intel_uc_fw_type_repr(huc_fw->type));
82 } 79 }
83} 80}
84 81
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index 802d0394ccc4..c6159aff9dc8 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -29,7 +29,6 @@
29#include <linux/i2c.h> 29#include <linux/i2c.h>
30#include <linux/i2c-algo-bit.h> 30#include <linux/i2c-algo-bit.h>
31#include <linux/export.h> 31#include <linux/export.h>
32#include <drm/drmP.h>
33#include <drm/drm_hdcp.h> 32#include <drm/drm_hdcp.h>
34#include "intel_drv.h" 33#include "intel_drv.h"
35#include <drm/i915_drm.h> 34#include <drm/i915_drm.h>
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index d84c7815ee0c..1effbf49fa08 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -133,7 +133,6 @@
133 */ 133 */
134#include <linux/interrupt.h> 134#include <linux/interrupt.h>
135 135
136#include <drm/drmP.h>
137#include <drm/i915_drm.h> 136#include <drm/i915_drm.h>
138#include "i915_drv.h" 137#include "i915_drv.h"
139#include "i915_gem_render_state.h" 138#include "i915_gem_render_state.h"
@@ -363,31 +362,12 @@ execlists_context_schedule_out(struct i915_request *rq, unsigned long status)
363 trace_i915_request_out(rq); 362 trace_i915_request_out(rq);
364} 363}
365 364
366static void
367execlists_update_context_pdps(struct i915_hw_ppgtt *ppgtt, u32 *reg_state)
368{
369 ASSIGN_CTX_PDP(ppgtt, reg_state, 3);
370 ASSIGN_CTX_PDP(ppgtt, reg_state, 2);
371 ASSIGN_CTX_PDP(ppgtt, reg_state, 1);
372 ASSIGN_CTX_PDP(ppgtt, reg_state, 0);
373}
374
375static u64 execlists_update_context(struct i915_request *rq) 365static u64 execlists_update_context(struct i915_request *rq)
376{ 366{
377 struct i915_hw_ppgtt *ppgtt = rq->gem_context->ppgtt;
378 struct intel_context *ce = rq->hw_context; 367 struct intel_context *ce = rq->hw_context;
379 u32 *reg_state = ce->lrc_reg_state;
380 368
381 reg_state[CTX_RING_TAIL+1] = intel_ring_set_tail(rq->ring, rq->tail); 369 ce->lrc_reg_state[CTX_RING_TAIL + 1] =
382 370 intel_ring_set_tail(rq->ring, rq->tail);
383 /*
384 * True 32b PPGTT with dynamic page allocation: update PDP
385 * registers and point the unallocated PDPs to scratch page.
386 * PML4 is allocated during ppgtt init, so this is not needed
387 * in 48-bit mode.
388 */
389 if (!i915_vm_is_48bit(&ppgtt->vm))
390 execlists_update_context_pdps(ppgtt, reg_state);
391 371
392 /* 372 /*
393 * Make sure the context image is complete before we submit it to HW. 373 * Make sure the context image is complete before we submit it to HW.
@@ -770,6 +750,13 @@ execlists_cancel_port_requests(struct intel_engine_execlists * const execlists)
770 execlists_clear_all_active(execlists); 750 execlists_clear_all_active(execlists);
771} 751}
772 752
753static inline void
754invalidate_csb_entries(const u32 *first, const u32 *last)
755{
756 clflush((void *)first);
757 clflush((void *)last);
758}
759
773static void reset_csb_pointers(struct intel_engine_execlists *execlists) 760static void reset_csb_pointers(struct intel_engine_execlists *execlists)
774{ 761{
775 const unsigned int reset_value = GEN8_CSB_ENTRIES - 1; 762 const unsigned int reset_value = GEN8_CSB_ENTRIES - 1;
@@ -785,6 +772,9 @@ static void reset_csb_pointers(struct intel_engine_execlists *execlists)
785 */ 772 */
786 execlists->csb_head = reset_value; 773 execlists->csb_head = reset_value;
787 WRITE_ONCE(*execlists->csb_write, reset_value); 774 WRITE_ONCE(*execlists->csb_write, reset_value);
775
776 invalidate_csb_entries(&execlists->csb_status[0],
777 &execlists->csb_status[GEN8_CSB_ENTRIES - 1]);
788} 778}
789 779
790static void nop_submission_tasklet(unsigned long data) 780static void nop_submission_tasklet(unsigned long data)
@@ -1020,6 +1010,19 @@ static void process_csb(struct intel_engine_cs *engine)
1020 } while (head != tail); 1010 } while (head != tail);
1021 1011
1022 execlists->csb_head = head; 1012 execlists->csb_head = head;
1013
1014 /*
1015 * Gen11 has proven to fail wrt global observation point between
1016 * entry and tail update, failing on the ordering and thus
1017 * we see an old entry in the context status buffer.
1018 *
1019 * Forcibly evict out entries for the next gpu csb update,
1020 * to increase the odds that we get a fresh entries with non
1021 * working hardware. The cost for doing so comes out mostly with
1022 * the wash as hardware, working or not, will need to do the
1023 * invalidation before.
1024 */
1025 invalidate_csb_entries(&buf[0], &buf[GEN8_CSB_ENTRIES - 1]);
1023} 1026}
1024 1027
1025static void __execlists_submission_tasklet(struct intel_engine_cs *const engine) 1028static void __execlists_submission_tasklet(struct intel_engine_cs *const engine)
@@ -1247,29 +1250,88 @@ execlists_context_pin(struct intel_engine_cs *engine,
1247 return __execlists_context_pin(engine, ctx, ce); 1250 return __execlists_context_pin(engine, ctx, ce);
1248} 1251}
1249 1252
1253static int emit_pdps(struct i915_request *rq)
1254{
1255 const struct intel_engine_cs * const engine = rq->engine;
1256 struct i915_hw_ppgtt * const ppgtt = rq->gem_context->ppgtt;
1257 int err, i;
1258 u32 *cs;
1259
1260 GEM_BUG_ON(intel_vgpu_active(rq->i915));
1261
1262 /*
1263 * Beware ye of the dragons, this sequence is magic!
1264 *
1265 * Small changes to this sequence can cause anything from
1266 * GPU hangs to forcewake errors and machine lockups!
1267 */
1268
1269 /* Flush any residual operations from the context load */
1270 err = engine->emit_flush(rq, EMIT_FLUSH);
1271 if (err)
1272 return err;
1273
1274 /* Magic required to prevent forcewake errors! */
1275 err = engine->emit_flush(rq, EMIT_INVALIDATE);
1276 if (err)
1277 return err;
1278
1279 cs = intel_ring_begin(rq, 4 * GEN8_3LVL_PDPES + 2);
1280 if (IS_ERR(cs))
1281 return PTR_ERR(cs);
1282
1283 /* Ensure the LRI have landed before we invalidate & continue */
1284 *cs++ = MI_LOAD_REGISTER_IMM(2 * GEN8_3LVL_PDPES) | MI_LRI_FORCE_POSTED;
1285 for (i = GEN8_3LVL_PDPES; i--; ) {
1286 const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
1287
1288 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(engine, i));
1289 *cs++ = upper_32_bits(pd_daddr);
1290 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(engine, i));
1291 *cs++ = lower_32_bits(pd_daddr);
1292 }
1293 *cs++ = MI_NOOP;
1294
1295 intel_ring_advance(rq, cs);
1296
1297 /* Be doubly sure the LRI have landed before proceeding */
1298 err = engine->emit_flush(rq, EMIT_FLUSH);
1299 if (err)
1300 return err;
1301
1302 /* Re-invalidate the TLB for luck */
1303 return engine->emit_flush(rq, EMIT_INVALIDATE);
1304}
1305
1250static int execlists_request_alloc(struct i915_request *request) 1306static int execlists_request_alloc(struct i915_request *request)
1251{ 1307{
1252 int ret; 1308 int ret;
1253 1309
1254 GEM_BUG_ON(!request->hw_context->pin_count); 1310 GEM_BUG_ON(!request->hw_context->pin_count);
1255 1311
1256 /* Flush enough space to reduce the likelihood of waiting after 1312 /*
1313 * Flush enough space to reduce the likelihood of waiting after
1257 * we start building the request - in which case we will just 1314 * we start building the request - in which case we will just
1258 * have to repeat work. 1315 * have to repeat work.
1259 */ 1316 */
1260 request->reserved_space += EXECLISTS_REQUEST_SIZE; 1317 request->reserved_space += EXECLISTS_REQUEST_SIZE;
1261 1318
1262 ret = intel_ring_wait_for_space(request->ring, request->reserved_space); 1319 /*
1263 if (ret) 1320 * Note that after this point, we have committed to using
1264 return ret;
1265
1266 /* Note that after this point, we have committed to using
1267 * this request as it is being used to both track the 1321 * this request as it is being used to both track the
1268 * state of engine initialisation and liveness of the 1322 * state of engine initialisation and liveness of the
1269 * golden renderstate above. Think twice before you try 1323 * golden renderstate above. Think twice before you try
1270 * to cancel/unwind this request now. 1324 * to cancel/unwind this request now.
1271 */ 1325 */
1272 1326
1327 /* Unconditionally invalidate GPU caches and TLBs. */
1328 if (i915_vm_is_48bit(&request->gem_context->ppgtt->vm))
1329 ret = request->engine->emit_flush(request, EMIT_INVALIDATE);
1330 else
1331 ret = emit_pdps(request);
1332 if (ret)
1333 return ret;
1334
1273 request->reserved_space -= EXECLISTS_REQUEST_SIZE; 1335 request->reserved_space -= EXECLISTS_REQUEST_SIZE;
1274 return 0; 1336 return 0;
1275} 1337}
@@ -1592,7 +1654,7 @@ static void enable_execlists(struct intel_engine_cs *engine)
1592{ 1654{
1593 struct drm_i915_private *dev_priv = engine->i915; 1655 struct drm_i915_private *dev_priv = engine->i915;
1594 1656
1595 I915_WRITE(RING_HWSTAM(engine->mmio_base), 0xffffffff); 1657 intel_engine_set_hwsp_writemask(engine, ~0u); /* HWSTAM */
1596 1658
1597 /* 1659 /*
1598 * Make sure we're not enabling the new 12-deep CSB 1660 * Make sure we're not enabling the new 12-deep CSB
@@ -1633,6 +1695,7 @@ static bool unexpected_starting_state(struct intel_engine_cs *engine)
1633static int gen8_init_common_ring(struct intel_engine_cs *engine) 1695static int gen8_init_common_ring(struct intel_engine_cs *engine)
1634{ 1696{
1635 intel_engine_apply_workarounds(engine); 1697 intel_engine_apply_workarounds(engine);
1698 intel_engine_apply_whitelist(engine);
1636 1699
1637 intel_mocs_init_engine(engine); 1700 intel_mocs_init_engine(engine);
1638 1701
@@ -1649,43 +1712,6 @@ static int gen8_init_common_ring(struct intel_engine_cs *engine)
1649 return 0; 1712 return 0;
1650} 1713}
1651 1714
1652static int gen8_init_render_ring(struct intel_engine_cs *engine)
1653{
1654 struct drm_i915_private *dev_priv = engine->i915;
1655 int ret;
1656
1657 ret = gen8_init_common_ring(engine);
1658 if (ret)
1659 return ret;
1660
1661 intel_engine_apply_whitelist(engine);
1662
1663 /* We need to disable the AsyncFlip performance optimisations in order
1664 * to use MI_WAIT_FOR_EVENT within the CS. It should already be
1665 * programmed to '1' on all products.
1666 *
1667 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv,bdw,chv
1668 */
1669 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
1670
1671 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
1672
1673 return 0;
1674}
1675
1676static int gen9_init_render_ring(struct intel_engine_cs *engine)
1677{
1678 int ret;
1679
1680 ret = gen8_init_common_ring(engine);
1681 if (ret)
1682 return ret;
1683
1684 intel_engine_apply_whitelist(engine);
1685
1686 return 0;
1687}
1688
1689static struct i915_request * 1715static struct i915_request *
1690execlists_reset_prepare(struct intel_engine_cs *engine) 1716execlists_reset_prepare(struct intel_engine_cs *engine)
1691{ 1717{
@@ -1841,56 +1867,11 @@ static void execlists_reset_finish(struct intel_engine_cs *engine)
1841 atomic_read(&execlists->tasklet.count)); 1867 atomic_read(&execlists->tasklet.count));
1842} 1868}
1843 1869
1844static int intel_logical_ring_emit_pdps(struct i915_request *rq)
1845{
1846 struct i915_hw_ppgtt *ppgtt = rq->gem_context->ppgtt;
1847 struct intel_engine_cs *engine = rq->engine;
1848 const int num_lri_cmds = GEN8_3LVL_PDPES * 2;
1849 u32 *cs;
1850 int i;
1851
1852 cs = intel_ring_begin(rq, num_lri_cmds * 2 + 2);
1853 if (IS_ERR(cs))
1854 return PTR_ERR(cs);
1855
1856 *cs++ = MI_LOAD_REGISTER_IMM(num_lri_cmds);
1857 for (i = GEN8_3LVL_PDPES - 1; i >= 0; i--) {
1858 const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
1859
1860 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(engine, i));
1861 *cs++ = upper_32_bits(pd_daddr);
1862 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(engine, i));
1863 *cs++ = lower_32_bits(pd_daddr);
1864 }
1865
1866 *cs++ = MI_NOOP;
1867 intel_ring_advance(rq, cs);
1868
1869 return 0;
1870}
1871
1872static int gen8_emit_bb_start(struct i915_request *rq, 1870static int gen8_emit_bb_start(struct i915_request *rq,
1873 u64 offset, u32 len, 1871 u64 offset, u32 len,
1874 const unsigned int flags) 1872 const unsigned int flags)
1875{ 1873{
1876 u32 *cs; 1874 u32 *cs;
1877 int ret;
1878
1879 /* Don't rely in hw updating PDPs, specially in lite-restore.
1880 * Ideally, we should set Force PD Restore in ctx descriptor,
1881 * but we can't. Force Restore would be a second option, but
1882 * it is unsafe in case of lite-restore (because the ctx is
1883 * not idle). PML4 is allocated during ppgtt init so this is
1884 * not needed in 48-bit.*/
1885 if ((intel_engine_flag(rq->engine) & rq->gem_context->ppgtt->pd_dirty_rings) &&
1886 !i915_vm_is_48bit(&rq->gem_context->ppgtt->vm) &&
1887 !intel_vgpu_active(rq->i915)) {
1888 ret = intel_logical_ring_emit_pdps(rq);
1889 if (ret)
1890 return ret;
1891
1892 rq->gem_context->ppgtt->pd_dirty_rings &= ~intel_engine_flag(rq->engine);
1893 }
1894 1875
1895 cs = intel_ring_begin(rq, 6); 1876 cs = intel_ring_begin(rq, 6);
1896 if (IS_ERR(cs)) 1877 if (IS_ERR(cs))
@@ -1923,6 +1904,7 @@ static int gen8_emit_bb_start(struct i915_request *rq,
1923 1904
1924 *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE; 1905 *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
1925 *cs++ = MI_NOOP; 1906 *cs++ = MI_NOOP;
1907
1926 intel_ring_advance(rq, cs); 1908 intel_ring_advance(rq, cs);
1927 1909
1928 return 0; 1910 return 0;
@@ -2007,7 +1989,7 @@ static int gen8_emit_flush_render(struct i915_request *request,
2007 * On GEN9: before VF_CACHE_INVALIDATE we need to emit a NULL 1989 * On GEN9: before VF_CACHE_INVALIDATE we need to emit a NULL
2008 * pipe control. 1990 * pipe control.
2009 */ 1991 */
2010 if (IS_GEN9(request->i915)) 1992 if (IS_GEN(request->i915, 9))
2011 vf_flush_wa = true; 1993 vf_flush_wa = true;
2012 1994
2013 /* WaForGAMHang:kbl */ 1995 /* WaForGAMHang:kbl */
@@ -2078,10 +2060,18 @@ static void gen8_emit_breadcrumb_rcs(struct i915_request *request, u32 *cs)
2078 /* We're using qword write, seqno should be aligned to 8 bytes. */ 2060 /* We're using qword write, seqno should be aligned to 8 bytes. */
2079 BUILD_BUG_ON(I915_GEM_HWS_INDEX & 1); 2061 BUILD_BUG_ON(I915_GEM_HWS_INDEX & 1);
2080 2062
2081 cs = gen8_emit_ggtt_write_rcs(cs, request->global_seqno, 2063 cs = gen8_emit_ggtt_write_rcs(cs,
2082 intel_hws_seqno_address(request->engine)); 2064 request->global_seqno,
2065 intel_hws_seqno_address(request->engine),
2066 PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
2067 PIPE_CONTROL_DEPTH_CACHE_FLUSH |
2068 PIPE_CONTROL_DC_FLUSH_ENABLE |
2069 PIPE_CONTROL_FLUSH_ENABLE |
2070 PIPE_CONTROL_CS_STALL);
2071
2083 *cs++ = MI_USER_INTERRUPT; 2072 *cs++ = MI_USER_INTERRUPT;
2084 *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; 2073 *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
2074
2085 request->tail = intel_ring_offset(request, cs); 2075 request->tail = intel_ring_offset(request, cs);
2086 assert_ring_tail_valid(request->ring, request->tail); 2076 assert_ring_tail_valid(request->ring, request->tail);
2087 2077
@@ -2244,6 +2234,8 @@ static int logical_ring_init(struct intel_engine_cs *engine)
2244 if (ret) 2234 if (ret)
2245 return ret; 2235 return ret;
2246 2236
2237 intel_engine_init_workarounds(engine);
2238
2247 if (HAS_LOGICAL_RING_ELSQ(i915)) { 2239 if (HAS_LOGICAL_RING_ELSQ(i915)) {
2248 execlists->submit_reg = i915->regs + 2240 execlists->submit_reg = i915->regs +
2249 i915_mmio_reg_offset(RING_EXECLIST_SQ_CONTENTS(engine)); 2241 i915_mmio_reg_offset(RING_EXECLIST_SQ_CONTENTS(engine));
@@ -2276,19 +2268,11 @@ static int logical_ring_init(struct intel_engine_cs *engine)
2276 2268
2277int logical_render_ring_init(struct intel_engine_cs *engine) 2269int logical_render_ring_init(struct intel_engine_cs *engine)
2278{ 2270{
2279 struct drm_i915_private *dev_priv = engine->i915;
2280 int ret; 2271 int ret;
2281 2272
2282 logical_ring_setup(engine); 2273 logical_ring_setup(engine);
2283 2274
2284 if (HAS_L3_DPF(dev_priv))
2285 engine->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
2286
2287 /* Override some for render ring. */ 2275 /* Override some for render ring. */
2288 if (INTEL_GEN(dev_priv) >= 9)
2289 engine->init_hw = gen9_init_render_ring;
2290 else
2291 engine->init_hw = gen8_init_render_ring;
2292 engine->init_context = gen8_init_rcs_context; 2276 engine->init_context = gen8_init_rcs_context;
2293 engine->emit_flush = gen8_emit_flush_render; 2277 engine->emit_flush = gen8_emit_flush_render;
2294 engine->emit_breadcrumb = gen8_emit_breadcrumb_rcs; 2278 engine->emit_breadcrumb = gen8_emit_breadcrumb_rcs;
@@ -2310,7 +2294,6 @@ int logical_render_ring_init(struct intel_engine_cs *engine)
2310 } 2294 }
2311 2295
2312 intel_engine_init_whitelist(engine); 2296 intel_engine_init_whitelist(engine);
2313 intel_engine_init_workarounds(engine);
2314 2297
2315 return 0; 2298 return 0;
2316} 2299}
@@ -2325,9 +2308,9 @@ int logical_xcs_ring_init(struct intel_engine_cs *engine)
2325static u32 2308static u32
2326make_rpcs(struct drm_i915_private *dev_priv) 2309make_rpcs(struct drm_i915_private *dev_priv)
2327{ 2310{
2328 bool subslice_pg = INTEL_INFO(dev_priv)->sseu.has_subslice_pg; 2311 bool subslice_pg = RUNTIME_INFO(dev_priv)->sseu.has_subslice_pg;
2329 u8 slices = hweight8(INTEL_INFO(dev_priv)->sseu.slice_mask); 2312 u8 slices = hweight8(RUNTIME_INFO(dev_priv)->sseu.slice_mask);
2330 u8 subslices = hweight8(INTEL_INFO(dev_priv)->sseu.subslice_mask[0]); 2313 u8 subslices = hweight8(RUNTIME_INFO(dev_priv)->sseu.subslice_mask[0]);
2331 u32 rpcs = 0; 2314 u32 rpcs = 0;
2332 2315
2333 /* 2316 /*
@@ -2362,7 +2345,7 @@ make_rpcs(struct drm_i915_private *dev_priv)
2362 * subslices are enabled, or a count between one and four on the first 2345 * subslices are enabled, or a count between one and four on the first
2363 * slice. 2346 * slice.
2364 */ 2347 */
2365 if (IS_GEN11(dev_priv) && slices == 1 && subslices >= 4) { 2348 if (IS_GEN(dev_priv, 11) && slices == 1 && subslices >= 4) {
2366 GEM_BUG_ON(subslices & 1); 2349 GEM_BUG_ON(subslices & 1);
2367 2350
2368 subslice_pg = false; 2351 subslice_pg = false;
@@ -2375,7 +2358,7 @@ make_rpcs(struct drm_i915_private *dev_priv)
2375 * must make an explicit request through RPCS for full 2358 * must make an explicit request through RPCS for full
2376 * enablement. 2359 * enablement.
2377 */ 2360 */
2378 if (INTEL_INFO(dev_priv)->sseu.has_slice_pg) { 2361 if (RUNTIME_INFO(dev_priv)->sseu.has_slice_pg) {
2379 u32 mask, val = slices; 2362 u32 mask, val = slices;
2380 2363
2381 if (INTEL_GEN(dev_priv) >= 11) { 2364 if (INTEL_GEN(dev_priv) >= 11) {
@@ -2403,17 +2386,17 @@ make_rpcs(struct drm_i915_private *dev_priv)
2403 rpcs |= GEN8_RPCS_ENABLE | GEN8_RPCS_SS_CNT_ENABLE | val; 2386 rpcs |= GEN8_RPCS_ENABLE | GEN8_RPCS_SS_CNT_ENABLE | val;
2404 } 2387 }
2405 2388
2406 if (INTEL_INFO(dev_priv)->sseu.has_eu_pg) { 2389 if (RUNTIME_INFO(dev_priv)->sseu.has_eu_pg) {
2407 u32 val; 2390 u32 val;
2408 2391
2409 val = INTEL_INFO(dev_priv)->sseu.eu_per_subslice << 2392 val = RUNTIME_INFO(dev_priv)->sseu.eu_per_subslice <<
2410 GEN8_RPCS_EU_MIN_SHIFT; 2393 GEN8_RPCS_EU_MIN_SHIFT;
2411 GEM_BUG_ON(val & ~GEN8_RPCS_EU_MIN_MASK); 2394 GEM_BUG_ON(val & ~GEN8_RPCS_EU_MIN_MASK);
2412 val &= GEN8_RPCS_EU_MIN_MASK; 2395 val &= GEN8_RPCS_EU_MIN_MASK;
2413 2396
2414 rpcs |= val; 2397 rpcs |= val;
2415 2398
2416 val = INTEL_INFO(dev_priv)->sseu.eu_per_subslice << 2399 val = RUNTIME_INFO(dev_priv)->sseu.eu_per_subslice <<
2417 GEN8_RPCS_EU_MAX_SHIFT; 2400 GEN8_RPCS_EU_MAX_SHIFT;
2418 GEM_BUG_ON(val & ~GEN8_RPCS_EU_MAX_MASK); 2401 GEM_BUG_ON(val & ~GEN8_RPCS_EU_MAX_MASK);
2419 val &= GEN8_RPCS_EU_MAX_MASK; 2402 val &= GEN8_RPCS_EU_MAX_MASK;
@@ -2538,6 +2521,11 @@ static void execlists_init_reg_state(u32 *regs,
2538 * other PDP Descriptors are ignored. 2521 * other PDP Descriptors are ignored.
2539 */ 2522 */
2540 ASSIGN_CTX_PML4(ctx->ppgtt, regs); 2523 ASSIGN_CTX_PML4(ctx->ppgtt, regs);
2524 } else {
2525 ASSIGN_CTX_PDP(ctx->ppgtt, regs, 3);
2526 ASSIGN_CTX_PDP(ctx->ppgtt, regs, 2);
2527 ASSIGN_CTX_PDP(ctx->ppgtt, regs, 1);
2528 ASSIGN_CTX_PDP(ctx->ppgtt, regs, 0);
2541 } 2529 }
2542 2530
2543 if (rcs) { 2531 if (rcs) {
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 3377d813dbb3..904d16af89a8 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -32,7 +32,6 @@
32#include <linux/i2c.h> 32#include <linux/i2c.h>
33#include <linux/slab.h> 33#include <linux/slab.h>
34#include <linux/vga_switcheroo.h> 34#include <linux/vga_switcheroo.h>
35#include <drm/drmP.h>
36#include <drm/drm_atomic_helper.h> 35#include <drm/drm_atomic_helper.h>
37#include <drm/drm_crtc.h> 36#include <drm/drm_crtc.h>
38#include <drm/drm_edid.h> 37#include <drm/drm_edid.h>
@@ -279,7 +278,7 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder,
279 * special lvds dither control bit on pch-split platforms, dithering is 278 * special lvds dither control bit on pch-split platforms, dithering is
280 * only controlled through the PIPECONF reg. 279 * only controlled through the PIPECONF reg.
281 */ 280 */
282 if (IS_GEN4(dev_priv)) { 281 if (IS_GEN(dev_priv, 4)) {
283 /* 282 /*
284 * Bspec wording suggests that LVDS port dithering only exists 283 * Bspec wording suggests that LVDS port dithering only exists
285 * for 18bpp panels. 284 * for 18bpp panels.
@@ -919,7 +918,7 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
919 intel_encoder->cloneable = 0; 918 intel_encoder->cloneable = 0;
920 if (HAS_PCH_SPLIT(dev_priv)) 919 if (HAS_PCH_SPLIT(dev_priv))
921 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); 920 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
922 else if (IS_GEN4(dev_priv)) 921 else if (IS_GEN(dev_priv, 4))
923 intel_encoder->crtc_mask = (1 << 0) | (1 << 1); 922 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
924 else 923 else
925 intel_encoder->crtc_mask = (1 << 1); 924 intel_encoder->crtc_mask = (1 << 1);
diff --git a/drivers/gpu/drm/i915/intel_mocs.c b/drivers/gpu/drm/i915/intel_mocs.c
index 77e9871a8c9a..e976c5ce5479 100644
--- a/drivers/gpu/drm/i915/intel_mocs.c
+++ b/drivers/gpu/drm/i915/intel_mocs.c
@@ -193,7 +193,7 @@ static bool get_mocs_settings(struct drm_i915_private *dev_priv,
193 } 193 }
194 194
195 /* WaDisableSkipCaching:skl,bxt,kbl,glk */ 195 /* WaDisableSkipCaching:skl,bxt,kbl,glk */
196 if (IS_GEN9(dev_priv)) { 196 if (IS_GEN(dev_priv, 9)) {
197 int i; 197 int i;
198 198
199 for (i = 0; i < table->size; i++) 199 for (i = 0; i < table->size; i++)
diff --git a/drivers/gpu/drm/i915/intel_mocs.h b/drivers/gpu/drm/i915/intel_mocs.h
index d89080d75b80..3d99d1271b2b 100644
--- a/drivers/gpu/drm/i915/intel_mocs.h
+++ b/drivers/gpu/drm/i915/intel_mocs.h
@@ -49,7 +49,6 @@
49 * context handling keep the MOCS in step. 49 * context handling keep the MOCS in step.
50 */ 50 */
51 51
52#include <drm/drmP.h>
53#include "i915_drv.h" 52#include "i915_drv.h"
54 53
55int intel_rcs_context_init_mocs(struct i915_request *rq); 54int intel_rcs_context_init_mocs(struct i915_request *rq);
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index b8f106d9ecf8..30ae96c5c97c 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -30,7 +30,6 @@
30#include <linux/firmware.h> 30#include <linux/firmware.h>
31#include <acpi/video.h> 31#include <acpi/video.h>
32 32
33#include <drm/drmP.h>
34#include <drm/i915_drm.h> 33#include <drm/i915_drm.h>
35 34
36#include "intel_opregion.h" 35#include "intel_opregion.h"
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 20ea7c99d13a..c81db81e4416 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -25,7 +25,6 @@
25 * 25 *
26 * Derived from Xorg ddx, xf86-video-intel, src/i830_video.c 26 * Derived from Xorg ddx, xf86-video-intel, src/i830_video.c
27 */ 27 */
28#include <drm/drmP.h>
29#include <drm/i915_drm.h> 28#include <drm/i915_drm.h>
30#include "i915_drv.h" 29#include "i915_drv.h"
31#include "i915_reg.h" 30#include "i915_reg.h"
@@ -541,7 +540,7 @@ static u32 calc_swidthsw(struct drm_i915_private *dev_priv, u32 offset, u32 widt
541{ 540{
542 u32 sw; 541 u32 sw;
543 542
544 if (IS_GEN2(dev_priv)) 543 if (IS_GEN(dev_priv, 2))
545 sw = ALIGN((offset & 31) + width, 32); 544 sw = ALIGN((offset & 31) + width, 32);
546 else 545 else
547 sw = ALIGN((offset & 63) + width, 64); 546 sw = ALIGN((offset & 63) + width, 64);
@@ -778,7 +777,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
778 u32 oconfig; 777 u32 oconfig;
779 778
780 oconfig = OCONF_CC_OUT_8BIT; 779 oconfig = OCONF_CC_OUT_8BIT;
781 if (IS_GEN4(dev_priv)) 780 if (IS_GEN(dev_priv, 4))
782 oconfig |= OCONF_CSC_MODE_BT709; 781 oconfig |= OCONF_CSC_MODE_BT709;
783 oconfig |= pipe == 0 ? 782 oconfig |= pipe == 0 ?
784 OCONF_PIPE_A : OCONF_PIPE_B; 783 OCONF_PIPE_A : OCONF_PIPE_B;
@@ -1012,7 +1011,7 @@ static int check_overlay_src(struct drm_i915_private *dev_priv,
1012 1011
1013 if (rec->stride_Y & stride_mask || rec->stride_UV & stride_mask) 1012 if (rec->stride_Y & stride_mask || rec->stride_UV & stride_mask)
1014 return -EINVAL; 1013 return -EINVAL;
1015 if (IS_GEN4(dev_priv) && rec->stride_Y < 512) 1014 if (IS_GEN(dev_priv, 4) && rec->stride_Y < 512)
1016 return -EINVAL; 1015 return -EINVAL;
1017 1016
1018 tmp = (rec->flags & I915_OVERLAY_TYPE_MASK) == I915_OVERLAY_YUV_PLANAR ? 1017 tmp = (rec->flags & I915_OVERLAY_TYPE_MASK) == I915_OVERLAY_YUV_PLANAR ?
@@ -1246,7 +1245,7 @@ int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
1246 attrs->contrast = overlay->contrast; 1245 attrs->contrast = overlay->contrast;
1247 attrs->saturation = overlay->saturation; 1246 attrs->saturation = overlay->saturation;
1248 1247
1249 if (!IS_GEN2(dev_priv)) { 1248 if (!IS_GEN(dev_priv, 2)) {
1250 attrs->gamma0 = I915_READ(OGAMC0); 1249 attrs->gamma0 = I915_READ(OGAMC0);
1251 attrs->gamma1 = I915_READ(OGAMC1); 1250 attrs->gamma1 = I915_READ(OGAMC1);
1252 attrs->gamma2 = I915_READ(OGAMC2); 1251 attrs->gamma2 = I915_READ(OGAMC2);
@@ -1270,7 +1269,7 @@ int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
1270 update_reg_attrs(overlay, overlay->regs); 1269 update_reg_attrs(overlay, overlay->regs);
1271 1270
1272 if (attrs->flags & I915_OVERLAY_UPDATE_GAMMA) { 1271 if (attrs->flags & I915_OVERLAY_UPDATE_GAMMA) {
1273 if (IS_GEN2(dev_priv)) 1272 if (IS_GEN(dev_priv, 2))
1274 goto out_unlock; 1273 goto out_unlock;
1275 1274
1276 if (overlay->active) { 1275 if (overlay->active) {
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index e6cd7b55c018..ee3e0842d542 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -563,7 +563,7 @@ static void i9xx_set_backlight(const struct drm_connector_state *conn_state, u32
563 pci_write_config_byte(dev_priv->drm.pdev, LBPC, lbpc); 563 pci_write_config_byte(dev_priv->drm.pdev, LBPC, lbpc);
564 } 564 }
565 565
566 if (IS_GEN4(dev_priv)) { 566 if (IS_GEN(dev_priv, 4)) {
567 mask = BACKLIGHT_DUTY_CYCLE_MASK; 567 mask = BACKLIGHT_DUTY_CYCLE_MASK;
568 } else { 568 } else {
569 level <<= 1; 569 level <<= 1;
@@ -929,7 +929,7 @@ static void i9xx_enable_backlight(const struct intel_crtc_state *crtc_state,
929 * 855gm only, but checking for gen2 is safe, as 855gm is the only gen2 929 * 855gm only, but checking for gen2 is safe, as 855gm is the only gen2
930 * that has backlight. 930 * that has backlight.
931 */ 931 */
932 if (IS_GEN2(dev_priv)) 932 if (IS_GEN(dev_priv, 2))
933 I915_WRITE(BLC_HIST_CTL, BLM_HISTOGRAM_ENABLE); 933 I915_WRITE(BLC_HIST_CTL, BLM_HISTOGRAM_ENABLE);
934} 934}
935 935
@@ -1557,7 +1557,7 @@ static int i9xx_setup_backlight(struct intel_connector *connector, enum pipe unu
1557 1557
1558 ctl = I915_READ(BLC_PWM_CTL); 1558 ctl = I915_READ(BLC_PWM_CTL);
1559 1559
1560 if (IS_GEN2(dev_priv) || IS_I915GM(dev_priv) || IS_I945GM(dev_priv)) 1560 if (IS_GEN(dev_priv, 2) || IS_I915GM(dev_priv) || IS_I945GM(dev_priv))
1561 panel->backlight.combination_mode = ctl & BLM_LEGACY_MODE; 1561 panel->backlight.combination_mode = ctl & BLM_LEGACY_MODE;
1562 1562
1563 if (IS_PINEVIEW(dev_priv)) 1563 if (IS_PINEVIEW(dev_priv))
@@ -1886,7 +1886,7 @@ intel_panel_init_backlight_funcs(struct intel_panel *panel)
1886 panel->backlight.get = vlv_get_backlight; 1886 panel->backlight.get = vlv_get_backlight;
1887 panel->backlight.hz_to_pwm = vlv_hz_to_pwm; 1887 panel->backlight.hz_to_pwm = vlv_hz_to_pwm;
1888 } 1888 }
1889 } else if (IS_GEN4(dev_priv)) { 1889 } else if (IS_GEN(dev_priv, 4)) {
1890 panel->backlight.setup = i965_setup_backlight; 1890 panel->backlight.setup = i965_setup_backlight;
1891 panel->backlight.enable = i965_enable_backlight; 1891 panel->backlight.enable = i965_enable_backlight;
1892 panel->backlight.disable = i965_disable_backlight; 1892 panel->backlight.disable = i965_disable_backlight;
diff --git a/drivers/gpu/drm/i915/intel_pipe_crc.c b/drivers/gpu/drm/i915/intel_pipe_crc.c
index f3c9010e332a..bdabcfab8090 100644
--- a/drivers/gpu/drm/i915/intel_pipe_crc.c
+++ b/drivers/gpu/drm/i915/intel_pipe_crc.c
@@ -427,13 +427,13 @@ static int get_new_crc_ctl_reg(struct drm_i915_private *dev_priv,
427 enum intel_pipe_crc_source *source, u32 *val, 427 enum intel_pipe_crc_source *source, u32 *val,
428 bool set_wa) 428 bool set_wa)
429{ 429{
430 if (IS_GEN2(dev_priv)) 430 if (IS_GEN(dev_priv, 2))
431 return i8xx_pipe_crc_ctl_reg(source, val); 431 return i8xx_pipe_crc_ctl_reg(source, val);
432 else if (INTEL_GEN(dev_priv) < 5) 432 else if (INTEL_GEN(dev_priv) < 5)
433 return i9xx_pipe_crc_ctl_reg(dev_priv, pipe, source, val); 433 return i9xx_pipe_crc_ctl_reg(dev_priv, pipe, source, val);
434 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 434 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
435 return vlv_pipe_crc_ctl_reg(dev_priv, pipe, source, val); 435 return vlv_pipe_crc_ctl_reg(dev_priv, pipe, source, val);
436 else if (IS_GEN5(dev_priv) || IS_GEN6(dev_priv)) 436 else if (IS_GEN_RANGE(dev_priv, 5, 6))
437 return ilk_pipe_crc_ctl_reg(source, val); 437 return ilk_pipe_crc_ctl_reg(source, val);
438 else 438 else
439 return ivb_pipe_crc_ctl_reg(dev_priv, pipe, source, val, set_wa); 439 return ivb_pipe_crc_ctl_reg(dev_priv, pipe, source, val, set_wa);
@@ -544,13 +544,13 @@ static int
544intel_is_valid_crc_source(struct drm_i915_private *dev_priv, 544intel_is_valid_crc_source(struct drm_i915_private *dev_priv,
545 const enum intel_pipe_crc_source source) 545 const enum intel_pipe_crc_source source)
546{ 546{
547 if (IS_GEN2(dev_priv)) 547 if (IS_GEN(dev_priv, 2))
548 return i8xx_crc_source_valid(dev_priv, source); 548 return i8xx_crc_source_valid(dev_priv, source);
549 else if (INTEL_GEN(dev_priv) < 5) 549 else if (INTEL_GEN(dev_priv) < 5)
550 return i9xx_crc_source_valid(dev_priv, source); 550 return i9xx_crc_source_valid(dev_priv, source);
551 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 551 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
552 return vlv_crc_source_valid(dev_priv, source); 552 return vlv_crc_source_valid(dev_priv, source);
553 else if (IS_GEN5(dev_priv) || IS_GEN6(dev_priv)) 553 else if (IS_GEN_RANGE(dev_priv, 5, 6))
554 return ilk_crc_source_valid(dev_priv, source); 554 return ilk_crc_source_valid(dev_priv, source);
555 else 555 else
556 return ivb_crc_source_valid(dev_priv, source); 556 return ivb_crc_source_valid(dev_priv, source);
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index a26b4eddda25..83b01cde8113 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -1399,10 +1399,9 @@ static int g4x_compute_pipe_wm(struct intel_crtc_state *crtc_state)
1399 return 0; 1399 return 0;
1400} 1400}
1401 1401
1402static int g4x_compute_intermediate_wm(struct drm_device *dev, 1402static int g4x_compute_intermediate_wm(struct intel_crtc_state *new_crtc_state)
1403 struct intel_crtc *crtc,
1404 struct intel_crtc_state *new_crtc_state)
1405{ 1403{
1404 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
1406 struct g4x_wm_state *intermediate = &new_crtc_state->wm.g4x.intermediate; 1405 struct g4x_wm_state *intermediate = &new_crtc_state->wm.g4x.intermediate;
1407 const struct g4x_wm_state *optimal = &new_crtc_state->wm.g4x.optimal; 1406 const struct g4x_wm_state *optimal = &new_crtc_state->wm.g4x.optimal;
1408 struct intel_atomic_state *intel_state = 1407 struct intel_atomic_state *intel_state =
@@ -2032,10 +2031,9 @@ static void vlv_atomic_update_fifo(struct intel_atomic_state *state,
2032 2031
2033#undef VLV_FIFO 2032#undef VLV_FIFO
2034 2033
2035static int vlv_compute_intermediate_wm(struct drm_device *dev, 2034static int vlv_compute_intermediate_wm(struct intel_crtc_state *new_crtc_state)
2036 struct intel_crtc *crtc,
2037 struct intel_crtc_state *new_crtc_state)
2038{ 2035{
2036 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
2039 struct vlv_wm_state *intermediate = &new_crtc_state->wm.vlv.intermediate; 2037 struct vlv_wm_state *intermediate = &new_crtc_state->wm.vlv.intermediate;
2040 const struct vlv_wm_state *optimal = &new_crtc_state->wm.vlv.optimal; 2038 const struct vlv_wm_state *optimal = &new_crtc_state->wm.vlv.optimal;
2041 struct intel_atomic_state *intel_state = 2039 struct intel_atomic_state *intel_state =
@@ -2273,7 +2271,7 @@ static void i9xx_update_wm(struct intel_crtc *unused_crtc)
2273 2271
2274 if (IS_I945GM(dev_priv)) 2272 if (IS_I945GM(dev_priv))
2275 wm_info = &i945_wm_info; 2273 wm_info = &i945_wm_info;
2276 else if (!IS_GEN2(dev_priv)) 2274 else if (!IS_GEN(dev_priv, 2))
2277 wm_info = &i915_wm_info; 2275 wm_info = &i915_wm_info;
2278 else 2276 else
2279 wm_info = &i830_a_wm_info; 2277 wm_info = &i830_a_wm_info;
@@ -2287,7 +2285,7 @@ static void i9xx_update_wm(struct intel_crtc *unused_crtc)
2287 crtc->base.primary->state->fb; 2285 crtc->base.primary->state->fb;
2288 int cpp; 2286 int cpp;
2289 2287
2290 if (IS_GEN2(dev_priv)) 2288 if (IS_GEN(dev_priv, 2))
2291 cpp = 4; 2289 cpp = 4;
2292 else 2290 else
2293 cpp = fb->format->cpp[0]; 2291 cpp = fb->format->cpp[0];
@@ -2302,7 +2300,7 @@ static void i9xx_update_wm(struct intel_crtc *unused_crtc)
2302 planea_wm = wm_info->max_wm; 2300 planea_wm = wm_info->max_wm;
2303 } 2301 }
2304 2302
2305 if (IS_GEN2(dev_priv)) 2303 if (IS_GEN(dev_priv, 2))
2306 wm_info = &i830_bc_wm_info; 2304 wm_info = &i830_bc_wm_info;
2307 2305
2308 fifo_size = dev_priv->display.get_fifo_size(dev_priv, PLANE_B); 2306 fifo_size = dev_priv->display.get_fifo_size(dev_priv, PLANE_B);
@@ -2314,7 +2312,7 @@ static void i9xx_update_wm(struct intel_crtc *unused_crtc)
2314 crtc->base.primary->state->fb; 2312 crtc->base.primary->state->fb;
2315 int cpp; 2313 int cpp;
2316 2314
2317 if (IS_GEN2(dev_priv)) 2315 if (IS_GEN(dev_priv, 2))
2318 cpp = 4; 2316 cpp = 4;
2319 else 2317 else
2320 cpp = fb->format->cpp[0]; 2318 cpp = fb->format->cpp[0];
@@ -2626,13 +2624,12 @@ static unsigned int ilk_fbc_wm_reg_max(const struct drm_i915_private *dev_priv)
2626} 2624}
2627 2625
2628/* Calculate the maximum primary/sprite plane watermark */ 2626/* Calculate the maximum primary/sprite plane watermark */
2629static unsigned int ilk_plane_wm_max(const struct drm_device *dev, 2627static unsigned int ilk_plane_wm_max(const struct drm_i915_private *dev_priv,
2630 int level, 2628 int level,
2631 const struct intel_wm_config *config, 2629 const struct intel_wm_config *config,
2632 enum intel_ddb_partitioning ddb_partitioning, 2630 enum intel_ddb_partitioning ddb_partitioning,
2633 bool is_sprite) 2631 bool is_sprite)
2634{ 2632{
2635 struct drm_i915_private *dev_priv = to_i915(dev);
2636 unsigned int fifo_size = ilk_display_fifo_size(dev_priv); 2633 unsigned int fifo_size = ilk_display_fifo_size(dev_priv);
2637 2634
2638 /* if sprites aren't enabled, sprites get nothing */ 2635 /* if sprites aren't enabled, sprites get nothing */
@@ -2668,7 +2665,7 @@ static unsigned int ilk_plane_wm_max(const struct drm_device *dev,
2668} 2665}
2669 2666
2670/* Calculate the maximum cursor plane watermark */ 2667/* Calculate the maximum cursor plane watermark */
2671static unsigned int ilk_cursor_wm_max(const struct drm_device *dev, 2668static unsigned int ilk_cursor_wm_max(const struct drm_i915_private *dev_priv,
2672 int level, 2669 int level,
2673 const struct intel_wm_config *config) 2670 const struct intel_wm_config *config)
2674{ 2671{
@@ -2677,19 +2674,19 @@ static unsigned int ilk_cursor_wm_max(const struct drm_device *dev,
2677 return 64; 2674 return 64;
2678 2675
2679 /* otherwise just report max that registers can hold */ 2676 /* otherwise just report max that registers can hold */
2680 return ilk_cursor_wm_reg_max(to_i915(dev), level); 2677 return ilk_cursor_wm_reg_max(dev_priv, level);
2681} 2678}
2682 2679
2683static void ilk_compute_wm_maximums(const struct drm_device *dev, 2680static void ilk_compute_wm_maximums(const struct drm_i915_private *dev_priv,
2684 int level, 2681 int level,
2685 const struct intel_wm_config *config, 2682 const struct intel_wm_config *config,
2686 enum intel_ddb_partitioning ddb_partitioning, 2683 enum intel_ddb_partitioning ddb_partitioning,
2687 struct ilk_wm_maximums *max) 2684 struct ilk_wm_maximums *max)
2688{ 2685{
2689 max->pri = ilk_plane_wm_max(dev, level, config, ddb_partitioning, false); 2686 max->pri = ilk_plane_wm_max(dev_priv, level, config, ddb_partitioning, false);
2690 max->spr = ilk_plane_wm_max(dev, level, config, ddb_partitioning, true); 2687 max->spr = ilk_plane_wm_max(dev_priv, level, config, ddb_partitioning, true);
2691 max->cur = ilk_cursor_wm_max(dev, level, config); 2688 max->cur = ilk_cursor_wm_max(dev_priv, level, config);
2692 max->fbc = ilk_fbc_wm_reg_max(to_i915(dev)); 2689 max->fbc = ilk_fbc_wm_reg_max(dev_priv);
2693} 2690}
2694 2691
2695static void ilk_compute_wm_reg_maximums(const struct drm_i915_private *dev_priv, 2692static void ilk_compute_wm_reg_maximums(const struct drm_i915_private *dev_priv,
@@ -2926,7 +2923,7 @@ static void intel_fixup_spr_wm_latency(struct drm_i915_private *dev_priv,
2926 uint16_t wm[5]) 2923 uint16_t wm[5])
2927{ 2924{
2928 /* ILK sprite LP0 latency is 1300 ns */ 2925 /* ILK sprite LP0 latency is 1300 ns */
2929 if (IS_GEN5(dev_priv)) 2926 if (IS_GEN(dev_priv, 5))
2930 wm[0] = 13; 2927 wm[0] = 13;
2931} 2928}
2932 2929
@@ -2934,7 +2931,7 @@ static void intel_fixup_cur_wm_latency(struct drm_i915_private *dev_priv,
2934 uint16_t wm[5]) 2931 uint16_t wm[5])
2935{ 2932{
2936 /* ILK cursor LP0 latency is 1300 ns */ 2933 /* ILK cursor LP0 latency is 1300 ns */
2937 if (IS_GEN5(dev_priv)) 2934 if (IS_GEN(dev_priv, 5))
2938 wm[0] = 13; 2935 wm[0] = 13;
2939} 2936}
2940 2937
@@ -3061,7 +3058,7 @@ static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv)
3061 intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency); 3058 intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency);
3062 intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency); 3059 intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
3063 3060
3064 if (IS_GEN6(dev_priv)) { 3061 if (IS_GEN(dev_priv, 6)) {
3065 snb_wm_latency_quirk(dev_priv); 3062 snb_wm_latency_quirk(dev_priv);
3066 snb_wm_lp3_irq_quirk(dev_priv); 3063 snb_wm_lp3_irq_quirk(dev_priv);
3067 } 3064 }
@@ -3073,7 +3070,7 @@ static void skl_setup_wm_latency(struct drm_i915_private *dev_priv)
3073 intel_print_wm_latency(dev_priv, "Gen9 Plane", dev_priv->wm.skl_latency); 3070 intel_print_wm_latency(dev_priv, "Gen9 Plane", dev_priv->wm.skl_latency);
3074} 3071}
3075 3072
3076static bool ilk_validate_pipe_wm(struct drm_device *dev, 3073static bool ilk_validate_pipe_wm(const struct drm_i915_private *dev_priv,
3077 struct intel_pipe_wm *pipe_wm) 3074 struct intel_pipe_wm *pipe_wm)
3078{ 3075{
3079 /* LP0 watermark maximums depend on this pipe alone */ 3076 /* LP0 watermark maximums depend on this pipe alone */
@@ -3085,7 +3082,7 @@ static bool ilk_validate_pipe_wm(struct drm_device *dev,
3085 struct ilk_wm_maximums max; 3082 struct ilk_wm_maximums max;
3086 3083
3087 /* LP0 watermarks always use 1/2 DDB partitioning */ 3084 /* LP0 watermarks always use 1/2 DDB partitioning */
3088 ilk_compute_wm_maximums(dev, 0, &config, INTEL_DDB_PART_1_2, &max); 3085 ilk_compute_wm_maximums(dev_priv, 0, &config, INTEL_DDB_PART_1_2, &max);
3089 3086
3090 /* At least LP0 must be valid */ 3087 /* At least LP0 must be valid */
3091 if (!ilk_validate_wm_level(0, &max, &pipe_wm->wm[0])) { 3088 if (!ilk_validate_wm_level(0, &max, &pipe_wm->wm[0])) {
@@ -3150,7 +3147,7 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
3150 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 3147 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
3151 pipe_wm->linetime = hsw_compute_linetime_wm(cstate); 3148 pipe_wm->linetime = hsw_compute_linetime_wm(cstate);
3152 3149
3153 if (!ilk_validate_pipe_wm(dev, pipe_wm)) 3150 if (!ilk_validate_pipe_wm(dev_priv, pipe_wm))
3154 return -EINVAL; 3151 return -EINVAL;
3155 3152
3156 ilk_compute_wm_reg_maximums(dev_priv, 1, &max); 3153 ilk_compute_wm_reg_maximums(dev_priv, 1, &max);
@@ -3180,17 +3177,17 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
3180 * state and the new state. These can be programmed to the hardware 3177 * state and the new state. These can be programmed to the hardware
3181 * immediately. 3178 * immediately.
3182 */ 3179 */
3183static int ilk_compute_intermediate_wm(struct drm_device *dev, 3180static int ilk_compute_intermediate_wm(struct intel_crtc_state *newstate)
3184 struct intel_crtc *intel_crtc,
3185 struct intel_crtc_state *newstate)
3186{ 3181{
3182 struct intel_crtc *intel_crtc = to_intel_crtc(newstate->base.crtc);
3183 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
3187 struct intel_pipe_wm *a = &newstate->wm.ilk.intermediate; 3184 struct intel_pipe_wm *a = &newstate->wm.ilk.intermediate;
3188 struct intel_atomic_state *intel_state = 3185 struct intel_atomic_state *intel_state =
3189 to_intel_atomic_state(newstate->base.state); 3186 to_intel_atomic_state(newstate->base.state);
3190 const struct intel_crtc_state *oldstate = 3187 const struct intel_crtc_state *oldstate =
3191 intel_atomic_get_old_crtc_state(intel_state, intel_crtc); 3188 intel_atomic_get_old_crtc_state(intel_state, intel_crtc);
3192 const struct intel_pipe_wm *b = &oldstate->wm.ilk.optimal; 3189 const struct intel_pipe_wm *b = &oldstate->wm.ilk.optimal;
3193 int level, max_level = ilk_wm_max_level(to_i915(dev)); 3190 int level, max_level = ilk_wm_max_level(dev_priv);
3194 3191
3195 /* 3192 /*
3196 * Start with the final, target watermarks, then combine with the 3193 * Start with the final, target watermarks, then combine with the
@@ -3223,7 +3220,7 @@ static int ilk_compute_intermediate_wm(struct drm_device *dev,
3223 * there's no safe way to transition from the old state to 3220 * there's no safe way to transition from the old state to
3224 * the new state, so we need to fail the atomic transaction. 3221 * the new state, so we need to fail the atomic transaction.
3225 */ 3222 */
3226 if (!ilk_validate_pipe_wm(dev, a)) 3223 if (!ilk_validate_pipe_wm(dev_priv, a))
3227 return -EINVAL; 3224 return -EINVAL;
3228 3225
3229 /* 3226 /*
@@ -3239,7 +3236,7 @@ static int ilk_compute_intermediate_wm(struct drm_device *dev,
3239/* 3236/*
3240 * Merge the watermarks from all active pipes for a specific level. 3237 * Merge the watermarks from all active pipes for a specific level.
3241 */ 3238 */
3242static void ilk_merge_wm_level(struct drm_device *dev, 3239static void ilk_merge_wm_level(struct drm_i915_private *dev_priv,
3243 int level, 3240 int level,
3244 struct intel_wm_level *ret_wm) 3241 struct intel_wm_level *ret_wm)
3245{ 3242{
@@ -3247,7 +3244,7 @@ static void ilk_merge_wm_level(struct drm_device *dev,
3247 3244
3248 ret_wm->enable = true; 3245 ret_wm->enable = true;
3249 3246
3250 for_each_intel_crtc(dev, intel_crtc) { 3247 for_each_intel_crtc(&dev_priv->drm, intel_crtc) {
3251 const struct intel_pipe_wm *active = &intel_crtc->wm.active.ilk; 3248 const struct intel_pipe_wm *active = &intel_crtc->wm.active.ilk;
3252 const struct intel_wm_level *wm = &active->wm[level]; 3249 const struct intel_wm_level *wm = &active->wm[level];
3253 3250
@@ -3272,12 +3269,11 @@ static void ilk_merge_wm_level(struct drm_device *dev,
3272/* 3269/*
3273 * Merge all low power watermarks for all active pipes. 3270 * Merge all low power watermarks for all active pipes.
3274 */ 3271 */
3275static void ilk_wm_merge(struct drm_device *dev, 3272static void ilk_wm_merge(struct drm_i915_private *dev_priv,
3276 const struct intel_wm_config *config, 3273 const struct intel_wm_config *config,
3277 const struct ilk_wm_maximums *max, 3274 const struct ilk_wm_maximums *max,
3278 struct intel_pipe_wm *merged) 3275 struct intel_pipe_wm *merged)
3279{ 3276{
3280 struct drm_i915_private *dev_priv = to_i915(dev);
3281 int level, max_level = ilk_wm_max_level(dev_priv); 3277 int level, max_level = ilk_wm_max_level(dev_priv);
3282 int last_enabled_level = max_level; 3278 int last_enabled_level = max_level;
3283 3279
@@ -3293,7 +3289,7 @@ static void ilk_wm_merge(struct drm_device *dev,
3293 for (level = 1; level <= max_level; level++) { 3289 for (level = 1; level <= max_level; level++) {
3294 struct intel_wm_level *wm = &merged->wm[level]; 3290 struct intel_wm_level *wm = &merged->wm[level];
3295 3291
3296 ilk_merge_wm_level(dev, level, wm); 3292 ilk_merge_wm_level(dev_priv, level, wm);
3297 3293
3298 if (level > last_enabled_level) 3294 if (level > last_enabled_level)
3299 wm->enable = false; 3295 wm->enable = false;
@@ -3318,7 +3314,7 @@ static void ilk_wm_merge(struct drm_device *dev,
3318 * What we should check here is whether FBC can be 3314 * What we should check here is whether FBC can be
3319 * enabled sometime later. 3315 * enabled sometime later.
3320 */ 3316 */
3321 if (IS_GEN5(dev_priv) && !merged->fbc_wm_enabled && 3317 if (IS_GEN(dev_priv, 5) && !merged->fbc_wm_enabled &&
3322 intel_fbc_is_active(dev_priv)) { 3318 intel_fbc_is_active(dev_priv)) {
3323 for (level = 2; level <= max_level; level++) { 3319 for (level = 2; level <= max_level; level++) {
3324 struct intel_wm_level *wm = &merged->wm[level]; 3320 struct intel_wm_level *wm = &merged->wm[level];
@@ -3335,22 +3331,20 @@ static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm)
3335} 3331}
3336 3332
3337/* The value we need to program into the WM_LPx latency field */ 3333/* The value we need to program into the WM_LPx latency field */
3338static unsigned int ilk_wm_lp_latency(struct drm_device *dev, int level) 3334static unsigned int ilk_wm_lp_latency(struct drm_i915_private *dev_priv,
3335 int level)
3339{ 3336{
3340 struct drm_i915_private *dev_priv = to_i915(dev);
3341
3342 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 3337 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
3343 return 2 * level; 3338 return 2 * level;
3344 else 3339 else
3345 return dev_priv->wm.pri_latency[level]; 3340 return dev_priv->wm.pri_latency[level];
3346} 3341}
3347 3342
3348static void ilk_compute_wm_results(struct drm_device *dev, 3343static void ilk_compute_wm_results(struct drm_i915_private *dev_priv,
3349 const struct intel_pipe_wm *merged, 3344 const struct intel_pipe_wm *merged,
3350 enum intel_ddb_partitioning partitioning, 3345 enum intel_ddb_partitioning partitioning,
3351 struct ilk_wm_values *results) 3346 struct ilk_wm_values *results)
3352{ 3347{
3353 struct drm_i915_private *dev_priv = to_i915(dev);
3354 struct intel_crtc *intel_crtc; 3348 struct intel_crtc *intel_crtc;
3355 int level, wm_lp; 3349 int level, wm_lp;
3356 3350
@@ -3370,7 +3364,7 @@ static void ilk_compute_wm_results(struct drm_device *dev,
3370 * disabled. Doing otherwise could cause underruns. 3364 * disabled. Doing otherwise could cause underruns.
3371 */ 3365 */
3372 results->wm_lp[wm_lp - 1] = 3366 results->wm_lp[wm_lp - 1] =
3373 (ilk_wm_lp_latency(dev, level) << WM1_LP_LATENCY_SHIFT) | 3367 (ilk_wm_lp_latency(dev_priv, level) << WM1_LP_LATENCY_SHIFT) |
3374 (r->pri_val << WM1_LP_SR_SHIFT) | 3368 (r->pri_val << WM1_LP_SR_SHIFT) |
3375 r->cur_val; 3369 r->cur_val;
3376 3370
@@ -3396,7 +3390,7 @@ static void ilk_compute_wm_results(struct drm_device *dev,
3396 } 3390 }
3397 3391
3398 /* LP0 register values */ 3392 /* LP0 register values */
3399 for_each_intel_crtc(dev, intel_crtc) { 3393 for_each_intel_crtc(&dev_priv->drm, intel_crtc) {
3400 enum pipe pipe = intel_crtc->pipe; 3394 enum pipe pipe = intel_crtc->pipe;
3401 const struct intel_wm_level *r = 3395 const struct intel_wm_level *r =
3402 &intel_crtc->wm.active.ilk.wm[0]; 3396 &intel_crtc->wm.active.ilk.wm[0];
@@ -3415,11 +3409,12 @@ static void ilk_compute_wm_results(struct drm_device *dev,
3415 3409
3416/* Find the result with the highest level enabled. Check for enable_fbc_wm in 3410/* Find the result with the highest level enabled. Check for enable_fbc_wm in
3417 * case both are at the same level. Prefer r1 in case they're the same. */ 3411 * case both are at the same level. Prefer r1 in case they're the same. */
3418static struct intel_pipe_wm *ilk_find_best_result(struct drm_device *dev, 3412static struct intel_pipe_wm *
3419 struct intel_pipe_wm *r1, 3413ilk_find_best_result(struct drm_i915_private *dev_priv,
3420 struct intel_pipe_wm *r2) 3414 struct intel_pipe_wm *r1,
3415 struct intel_pipe_wm *r2)
3421{ 3416{
3422 int level, max_level = ilk_wm_max_level(to_i915(dev)); 3417 int level, max_level = ilk_wm_max_level(dev_priv);
3423 int level1 = 0, level2 = 0; 3418 int level1 = 0, level2 = 0;
3424 3419
3425 for (level = 1; level <= max_level; level++) { 3420 for (level = 1; level <= max_level; level++) {
@@ -3756,9 +3751,9 @@ bool intel_can_enable_sagv(struct drm_atomic_state *state)
3756 if (!intel_has_sagv(dev_priv)) 3751 if (!intel_has_sagv(dev_priv))
3757 return false; 3752 return false;
3758 3753
3759 if (IS_GEN9(dev_priv)) 3754 if (IS_GEN(dev_priv, 9))
3760 sagv_block_time_us = 30; 3755 sagv_block_time_us = 30;
3761 else if (IS_GEN10(dev_priv)) 3756 else if (IS_GEN(dev_priv, 10))
3762 sagv_block_time_us = 20; 3757 sagv_block_time_us = 20;
3763 else 3758 else
3764 sagv_block_time_us = 10; 3759 sagv_block_time_us = 10;
@@ -4306,102 +4301,6 @@ icl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate,
4306 return total_data_rate; 4301 return total_data_rate;
4307} 4302}
4308 4303
4309static uint16_t
4310skl_ddb_min_alloc(const struct drm_plane_state *pstate, const int plane)
4311{
4312 struct drm_framebuffer *fb = pstate->fb;
4313 struct intel_plane_state *intel_pstate = to_intel_plane_state(pstate);
4314 uint32_t src_w, src_h;
4315 uint32_t min_scanlines = 8;
4316 uint8_t plane_bpp;
4317
4318 if (WARN_ON(!fb))
4319 return 0;
4320
4321 /* For packed formats, and uv-plane, return 0 */
4322 if (plane == 1 && fb->format->format != DRM_FORMAT_NV12)
4323 return 0;
4324
4325 /* For Non Y-tile return 8-blocks */
4326 if (fb->modifier != I915_FORMAT_MOD_Y_TILED &&
4327 fb->modifier != I915_FORMAT_MOD_Yf_TILED &&
4328 fb->modifier != I915_FORMAT_MOD_Y_TILED_CCS &&
4329 fb->modifier != I915_FORMAT_MOD_Yf_TILED_CCS)
4330 return 8;
4331
4332 /*
4333 * Src coordinates are already rotated by 270 degrees for
4334 * the 90/270 degree plane rotation cases (to match the
4335 * GTT mapping), hence no need to account for rotation here.
4336 */
4337 src_w = drm_rect_width(&intel_pstate->base.src) >> 16;
4338 src_h = drm_rect_height(&intel_pstate->base.src) >> 16;
4339
4340 /* Halve UV plane width and height for NV12 */
4341 if (plane == 1) {
4342 src_w /= 2;
4343 src_h /= 2;
4344 }
4345
4346 plane_bpp = fb->format->cpp[plane];
4347
4348 if (drm_rotation_90_or_270(pstate->rotation)) {
4349 switch (plane_bpp) {
4350 case 1:
4351 min_scanlines = 32;
4352 break;
4353 case 2:
4354 min_scanlines = 16;
4355 break;
4356 case 4:
4357 min_scanlines = 8;
4358 break;
4359 case 8:
4360 min_scanlines = 4;
4361 break;
4362 default:
4363 WARN(1, "Unsupported pixel depth %u for rotation",
4364 plane_bpp);
4365 min_scanlines = 32;
4366 }
4367 }
4368
4369 return DIV_ROUND_UP((4 * src_w * plane_bpp), 512) * min_scanlines/4 + 3;
4370}
4371
4372static void
4373skl_ddb_calc_min(const struct intel_crtc_state *cstate, int num_active,
4374 uint16_t *minimum, uint16_t *uv_minimum)
4375{
4376 const struct drm_plane_state *pstate;
4377 struct drm_plane *plane;
4378
4379 drm_atomic_crtc_state_for_each_plane_state(plane, pstate, &cstate->base) {
4380 enum plane_id plane_id = to_intel_plane(plane)->id;
4381 struct intel_plane_state *plane_state = to_intel_plane_state(pstate);
4382
4383 if (plane_id == PLANE_CURSOR)
4384 continue;
4385
4386 /* slave plane must be invisible and calculated from master */
4387 if (!pstate->visible || WARN_ON(plane_state->slave))
4388 continue;
4389
4390 if (!plane_state->linked_plane) {
4391 minimum[plane_id] = skl_ddb_min_alloc(pstate, 0);
4392 uv_minimum[plane_id] = skl_ddb_min_alloc(pstate, 1);
4393 } else {
4394 enum plane_id y_plane_id =
4395 plane_state->linked_plane->id;
4396
4397 minimum[y_plane_id] = skl_ddb_min_alloc(pstate, 0);
4398 minimum[plane_id] = skl_ddb_min_alloc(pstate, 1);
4399 }
4400 }
4401
4402 minimum[PLANE_CURSOR] = skl_cursor_allocation(num_active);
4403}
4404
4405static int 4304static int
4406skl_allocate_pipe_ddb(struct intel_crtc_state *cstate, 4305skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
4407 struct skl_ddb_allocation *ddb /* out */) 4306 struct skl_ddb_allocation *ddb /* out */)
@@ -4411,15 +4310,17 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
4411 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 4310 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
4412 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4311 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4413 struct skl_ddb_entry *alloc = &cstate->wm.skl.ddb; 4312 struct skl_ddb_entry *alloc = &cstate->wm.skl.ddb;
4414 uint16_t alloc_size, start; 4313 struct skl_plane_wm *wm;
4415 uint16_t minimum[I915_MAX_PLANES] = {}; 4314 uint16_t alloc_size, start = 0;
4416 uint16_t uv_minimum[I915_MAX_PLANES] = {}; 4315 uint16_t total[I915_MAX_PLANES] = {};
4316 uint16_t uv_total[I915_MAX_PLANES] = {};
4417 u64 total_data_rate; 4317 u64 total_data_rate;
4418 enum plane_id plane_id; 4318 enum plane_id plane_id;
4419 int num_active; 4319 int num_active;
4420 u64 plane_data_rate[I915_MAX_PLANES] = {}; 4320 u64 plane_data_rate[I915_MAX_PLANES] = {};
4421 u64 uv_plane_data_rate[I915_MAX_PLANES] = {}; 4321 u64 uv_plane_data_rate[I915_MAX_PLANES] = {};
4422 uint16_t total_min_blocks = 0; 4322 uint16_t blocks = 0;
4323 int level;
4423 4324
4424 /* Clear the partitioning for disabled planes. */ 4325 /* Clear the partitioning for disabled planes. */
4425 memset(cstate->wm.skl.plane_ddb_y, 0, sizeof(cstate->wm.skl.plane_ddb_y)); 4326 memset(cstate->wm.skl.plane_ddb_y, 0, sizeof(cstate->wm.skl.plane_ddb_y));
@@ -4449,81 +4350,135 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
4449 if (alloc_size == 0) 4350 if (alloc_size == 0)
4450 return 0; 4351 return 0;
4451 4352
4452 skl_ddb_calc_min(cstate, num_active, minimum, uv_minimum); 4353 /* Allocate fixed number of blocks for cursor. */
4354 total[PLANE_CURSOR] = skl_cursor_allocation(num_active);
4355 alloc_size -= total[PLANE_CURSOR];
4356 cstate->wm.skl.plane_ddb_y[PLANE_CURSOR].start =
4357 alloc->end - total[PLANE_CURSOR];
4358 cstate->wm.skl.plane_ddb_y[PLANE_CURSOR].end = alloc->end;
4359
4360 if (total_data_rate == 0)
4361 return 0;
4453 4362
4454 /* 4363 /*
4455 * 1. Allocate the mininum required blocks for each active plane 4364 * Find the highest watermark level for which we can satisfy the block
4456 * and allocate the cursor, it doesn't require extra allocation 4365 * requirement of active planes.
4457 * proportional to the data rate.
4458 */ 4366 */
4367 for (level = ilk_wm_max_level(dev_priv); level >= 0; level--) {
4368 blocks = 0;
4369 for_each_plane_id_on_crtc(intel_crtc, plane_id) {
4370 if (plane_id == PLANE_CURSOR)
4371 continue;
4459 4372
4460 for_each_plane_id_on_crtc(intel_crtc, plane_id) { 4373 wm = &cstate->wm.skl.optimal.planes[plane_id];
4461 total_min_blocks += minimum[plane_id]; 4374 blocks += wm->wm[level].plane_res_b;
4462 total_min_blocks += uv_minimum[plane_id]; 4375 blocks += wm->uv_wm[level].plane_res_b;
4376 }
4377
4378 if (blocks < alloc_size) {
4379 alloc_size -= blocks;
4380 break;
4381 }
4463 } 4382 }
4464 4383
4465 if (total_min_blocks > alloc_size) { 4384 if (level < 0) {
4466 DRM_DEBUG_KMS("Requested display configuration exceeds system DDB limitations"); 4385 DRM_DEBUG_KMS("Requested display configuration exceeds system DDB limitations");
4467 DRM_DEBUG_KMS("minimum required %d/%d\n", total_min_blocks, 4386 DRM_DEBUG_KMS("minimum required %d/%d\n", blocks,
4468 alloc_size); 4387 alloc_size);
4469 return -EINVAL; 4388 return -EINVAL;
4470 } 4389 }
4471 4390
4472 alloc_size -= total_min_blocks;
4473 cstate->wm.skl.plane_ddb_y[PLANE_CURSOR].start = alloc->end - minimum[PLANE_CURSOR];
4474 cstate->wm.skl.plane_ddb_y[PLANE_CURSOR].end = alloc->end;
4475
4476 /* 4391 /*
4477 * 2. Distribute the remaining space in proportion to the amount of 4392 * Grant each plane the blocks it requires at the highest achievable
4478 * data each plane needs to fetch from memory. 4393 * watermark level, plus an extra share of the leftover blocks
4479 * 4394 * proportional to its relative data rate.
4480 * FIXME: we may not allocate every single block here.
4481 */ 4395 */
4482 if (total_data_rate == 0)
4483 return 0;
4484
4485 start = alloc->start;
4486 for_each_plane_id_on_crtc(intel_crtc, plane_id) { 4396 for_each_plane_id_on_crtc(intel_crtc, plane_id) {
4487 u64 data_rate, uv_data_rate; 4397 u64 rate;
4488 uint16_t plane_blocks, uv_plane_blocks; 4398 u16 extra;
4489 4399
4490 if (plane_id == PLANE_CURSOR) 4400 if (plane_id == PLANE_CURSOR)
4491 continue; 4401 continue;
4492 4402
4493 data_rate = plane_data_rate[plane_id];
4494
4495 /* 4403 /*
4496 * allocation for (packed formats) or (uv-plane part of planar format): 4404 * We've accounted for all active planes; remaining planes are
4497 * promote the expression to 64 bits to avoid overflowing, the 4405 * all disabled.
4498 * result is < available as data_rate / total_data_rate < 1
4499 */ 4406 */
4500 plane_blocks = minimum[plane_id]; 4407 if (total_data_rate == 0)
4501 plane_blocks += div64_u64(alloc_size * data_rate, total_data_rate); 4408 break;
4502 4409
4503 /* Leave disabled planes at (0,0) */ 4410 wm = &cstate->wm.skl.optimal.planes[plane_id];
4504 if (data_rate) {
4505 cstate->wm.skl.plane_ddb_y[plane_id].start = start;
4506 cstate->wm.skl.plane_ddb_y[plane_id].end = start + plane_blocks;
4507 }
4508 4411
4509 start += plane_blocks; 4412 rate = plane_data_rate[plane_id];
4413 extra = min_t(u16, alloc_size,
4414 DIV64_U64_ROUND_UP(alloc_size * rate,
4415 total_data_rate));
4416 total[plane_id] = wm->wm[level].plane_res_b + extra;
4417 alloc_size -= extra;
4418 total_data_rate -= rate;
4510 4419
4511 /* Allocate DDB for UV plane for planar format/NV12 */ 4420 if (total_data_rate == 0)
4512 uv_data_rate = uv_plane_data_rate[plane_id]; 4421 break;
4513 4422
4514 uv_plane_blocks = uv_minimum[plane_id]; 4423 rate = uv_plane_data_rate[plane_id];
4515 uv_plane_blocks += div64_u64(alloc_size * uv_data_rate, total_data_rate); 4424 extra = min_t(u16, alloc_size,
4425 DIV64_U64_ROUND_UP(alloc_size * rate,
4426 total_data_rate));
4427 uv_total[plane_id] = wm->uv_wm[level].plane_res_b + extra;
4428 alloc_size -= extra;
4429 total_data_rate -= rate;
4430 }
4431 WARN_ON(alloc_size != 0 || total_data_rate != 0);
4432
4433 /* Set the actual DDB start/end points for each plane */
4434 start = alloc->start;
4435 for_each_plane_id_on_crtc(intel_crtc, plane_id) {
4436 struct skl_ddb_entry *plane_alloc, *uv_plane_alloc;
4437
4438 if (plane_id == PLANE_CURSOR)
4439 continue;
4440
4441 plane_alloc = &cstate->wm.skl.plane_ddb_y[plane_id];
4442 uv_plane_alloc = &cstate->wm.skl.plane_ddb_uv[plane_id];
4516 4443
4517 /* Gen11+ uses a separate plane for UV watermarks */ 4444 /* Gen11+ uses a separate plane for UV watermarks */
4518 WARN_ON(INTEL_GEN(dev_priv) >= 11 && uv_plane_blocks); 4445 WARN_ON(INTEL_GEN(dev_priv) >= 11 && uv_total[plane_id]);
4446
4447 /* Leave disabled planes at (0,0) */
4448 if (total[plane_id]) {
4449 plane_alloc->start = start;
4450 start += total[plane_id];
4451 plane_alloc->end = start;
4452 }
4519 4453
4520 if (uv_data_rate) { 4454 if (uv_total[plane_id]) {
4521 cstate->wm.skl.plane_ddb_uv[plane_id].start = start; 4455 uv_plane_alloc->start = start;
4522 cstate->wm.skl.plane_ddb_uv[plane_id].end = 4456 start += uv_total[plane_id];
4523 start + uv_plane_blocks; 4457 uv_plane_alloc->end = start;
4524 } 4458 }
4459 }
4525 4460
4526 start += uv_plane_blocks; 4461 /*
4462 * When we calculated watermark values we didn't know how high
4463 * of a level we'd actually be able to hit, so we just marked
4464 * all levels as "enabled." Go back now and disable the ones
4465 * that aren't actually possible.
4466 */
4467 for (level++; level <= ilk_wm_max_level(dev_priv); level++) {
4468 for_each_plane_id_on_crtc(intel_crtc, plane_id) {
4469 wm = &cstate->wm.skl.optimal.planes[plane_id];
4470 memset(&wm->wm[level], 0, sizeof(wm->wm[level]));
4471 }
4472 }
4473
4474 /*
4475 * Go back and disable the transition watermark if it turns out we
4476 * don't have enough DDB blocks for it.
4477 */
4478 for_each_plane_id_on_crtc(intel_crtc, plane_id) {
4479 wm = &cstate->wm.skl.optimal.planes[plane_id];
4480 if (wm->trans_wm.plane_res_b > total[plane_id])
4481 memset(&wm->trans_wm, 0, sizeof(wm->trans_wm));
4527 } 4482 }
4528 4483
4529 return 0; 4484 return 0;
@@ -4702,7 +4657,7 @@ skl_compute_plane_wm_params(const struct intel_crtc_state *cstate,
4702 4657
4703 wp->plane_blocks_per_line = div_fixed16(interm_pbpl, 4658 wp->plane_blocks_per_line = div_fixed16(interm_pbpl,
4704 wp->y_min_scanlines); 4659 wp->y_min_scanlines);
4705 } else if (wp->x_tiled && IS_GEN9(dev_priv)) { 4660 } else if (wp->x_tiled && IS_GEN(dev_priv, 9)) {
4706 interm_pbpl = DIV_ROUND_UP(wp->plane_bytes_per_line, 4661 interm_pbpl = DIV_ROUND_UP(wp->plane_bytes_per_line,
4707 wp->dbuf_block_size); 4662 wp->dbuf_block_size);
4708 wp->plane_blocks_per_line = u32_to_fixed16(interm_pbpl); 4663 wp->plane_blocks_per_line = u32_to_fixed16(interm_pbpl);
@@ -4720,17 +4675,15 @@ skl_compute_plane_wm_params(const struct intel_crtc_state *cstate,
4720 return 0; 4675 return 0;
4721} 4676}
4722 4677
4723static int skl_compute_plane_wm(const struct intel_crtc_state *cstate, 4678static void skl_compute_plane_wm(const struct intel_crtc_state *cstate,
4724 const struct intel_plane_state *intel_pstate, 4679 const struct intel_plane_state *intel_pstate,
4725 uint16_t ddb_allocation, 4680 int level,
4726 int level, 4681 const struct skl_wm_params *wp,
4727 const struct skl_wm_params *wp, 4682 const struct skl_wm_level *result_prev,
4728 const struct skl_wm_level *result_prev, 4683 struct skl_wm_level *result /* out */)
4729 struct skl_wm_level *result /* out */)
4730{ 4684{
4731 struct drm_i915_private *dev_priv = 4685 struct drm_i915_private *dev_priv =
4732 to_i915(intel_pstate->base.plane->dev); 4686 to_i915(intel_pstate->base.plane->dev);
4733 const struct drm_plane_state *pstate = &intel_pstate->base;
4734 uint32_t latency = dev_priv->wm.skl_latency[level]; 4687 uint32_t latency = dev_priv->wm.skl_latency[level];
4735 uint_fixed_16_16_t method1, method2; 4688 uint_fixed_16_16_t method1, method2;
4736 uint_fixed_16_16_t selected_result; 4689 uint_fixed_16_16_t selected_result;
@@ -4738,10 +4691,6 @@ static int skl_compute_plane_wm(const struct intel_crtc_state *cstate,
4738 struct intel_atomic_state *state = 4691 struct intel_atomic_state *state =
4739 to_intel_atomic_state(cstate->base.state); 4692 to_intel_atomic_state(cstate->base.state);
4740 bool apply_memory_bw_wa = skl_needs_memory_bw_wa(state); 4693 bool apply_memory_bw_wa = skl_needs_memory_bw_wa(state);
4741 uint32_t min_disp_buf_needed;
4742
4743 if (latency == 0)
4744 return level == 0 ? -EINVAL : 0;
4745 4694
4746 /* Display WA #1141: kbl,cfl */ 4695 /* Display WA #1141: kbl,cfl */
4747 if ((IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv) || 4696 if ((IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv) ||
@@ -4766,15 +4715,8 @@ static int skl_compute_plane_wm(const struct intel_crtc_state *cstate,
4766 wp->dbuf_block_size < 1) && 4715 wp->dbuf_block_size < 1) &&
4767 (wp->plane_bytes_per_line / wp->dbuf_block_size < 1)) { 4716 (wp->plane_bytes_per_line / wp->dbuf_block_size < 1)) {
4768 selected_result = method2; 4717 selected_result = method2;
4769 } else if (ddb_allocation >=
4770 fixed16_to_u32_round_up(wp->plane_blocks_per_line)) {
4771 if (IS_GEN9(dev_priv) &&
4772 !IS_GEMINILAKE(dev_priv))
4773 selected_result = min_fixed16(method1, method2);
4774 else
4775 selected_result = method2;
4776 } else if (latency >= wp->linetime_us) { 4718 } else if (latency >= wp->linetime_us) {
4777 if (IS_GEN9(dev_priv) && 4719 if (IS_GEN(dev_priv, 9) &&
4778 !IS_GEMINILAKE(dev_priv)) 4720 !IS_GEMINILAKE(dev_priv))
4779 selected_result = min_fixed16(method1, method2); 4721 selected_result = min_fixed16(method1, method2);
4780 else 4722 else
@@ -4788,85 +4730,51 @@ static int skl_compute_plane_wm(const struct intel_crtc_state *cstate,
4788 res_lines = div_round_up_fixed16(selected_result, 4730 res_lines = div_round_up_fixed16(selected_result,
4789 wp->plane_blocks_per_line); 4731 wp->plane_blocks_per_line);
4790 4732
4791 /* Display WA #1125: skl,bxt,kbl,glk */ 4733 if (IS_GEN9_BC(dev_priv) || IS_BROXTON(dev_priv)) {
4792 if (level == 0 && wp->rc_surface) 4734 /* Display WA #1125: skl,bxt,kbl */
4793 res_blocks += fixed16_to_u32_round_up(wp->y_tile_minimum); 4735 if (level == 0 && wp->rc_surface)
4794 4736 res_blocks +=
4795 /* Display WA #1126: skl,bxt,kbl,glk */ 4737 fixed16_to_u32_round_up(wp->y_tile_minimum);
4796 if (level >= 1 && level <= 7) { 4738
4797 if (wp->y_tiled) { 4739 /* Display WA #1126: skl,bxt,kbl */
4798 res_blocks += fixed16_to_u32_round_up( 4740 if (level >= 1 && level <= 7) {
4799 wp->y_tile_minimum); 4741 if (wp->y_tiled) {
4800 res_lines += wp->y_min_scanlines; 4742 res_blocks +=
4801 } else { 4743 fixed16_to_u32_round_up(wp->y_tile_minimum);
4802 res_blocks++; 4744 res_lines += wp->y_min_scanlines;
4803 } 4745 } else {
4804 4746 res_blocks++;
4805 /* 4747 }
4806 * Make sure result blocks for higher latency levels are atleast
4807 * as high as level below the current level.
4808 * Assumption in DDB algorithm optimization for special cases.
4809 * Also covers Display WA #1125 for RC.
4810 */
4811 if (result_prev->plane_res_b > res_blocks)
4812 res_blocks = result_prev->plane_res_b;
4813 }
4814
4815 if (INTEL_GEN(dev_priv) >= 11) {
4816 if (wp->y_tiled) {
4817 uint32_t extra_lines;
4818 uint_fixed_16_16_t fp_min_disp_buf_needed;
4819
4820 if (res_lines % wp->y_min_scanlines == 0)
4821 extra_lines = wp->y_min_scanlines;
4822 else
4823 extra_lines = wp->y_min_scanlines * 2 -
4824 res_lines % wp->y_min_scanlines;
4825
4826 fp_min_disp_buf_needed = mul_u32_fixed16(res_lines +
4827 extra_lines,
4828 wp->plane_blocks_per_line);
4829 min_disp_buf_needed = fixed16_to_u32_round_up(
4830 fp_min_disp_buf_needed);
4831 } else {
4832 min_disp_buf_needed = DIV_ROUND_UP(res_blocks * 11, 10);
4833 }
4834 } else {
4835 min_disp_buf_needed = res_blocks;
4836 }
4837
4838 if ((level > 0 && res_lines > 31) ||
4839 res_blocks >= ddb_allocation ||
4840 min_disp_buf_needed >= ddb_allocation) {
4841 /*
4842 * If there are no valid level 0 watermarks, then we can't
4843 * support this display configuration.
4844 */
4845 if (level) {
4846 return 0;
4847 } else {
4848 struct drm_plane *plane = pstate->plane;
4849 4748
4850 DRM_DEBUG_KMS("Requested display configuration exceeds system watermark limitations\n"); 4749 /*
4851 DRM_DEBUG_KMS("[PLANE:%d:%s] blocks required = %u/%u, lines required = %u/31\n", 4750 * Make sure result blocks for higher latency levels are
4852 plane->base.id, plane->name, 4751 * atleast as high as level below the current level.
4853 res_blocks, ddb_allocation, res_lines); 4752 * Assumption in DDB algorithm optimization for special
4854 return -EINVAL; 4753 * cases. Also covers Display WA #1125 for RC.
4754 */
4755 if (result_prev->plane_res_b > res_blocks)
4756 res_blocks = result_prev->plane_res_b;
4855 } 4757 }
4856 } 4758 }
4857 4759
4858 /* The number of lines are ignored for the level 0 watermark. */ 4760 /* The number of lines are ignored for the level 0 watermark. */
4761 if (level > 0 && res_lines > 31)
4762 return;
4763
4764 /*
4765 * If res_lines is valid, assume we can use this watermark level
4766 * for now. We'll come back and disable it after we calculate the
4767 * DDB allocation if it turns out we don't actually have enough
4768 * blocks to satisfy it.
4769 */
4859 result->plane_res_b = res_blocks; 4770 result->plane_res_b = res_blocks;
4860 result->plane_res_l = res_lines; 4771 result->plane_res_l = res_lines;
4861 result->plane_en = true; 4772 result->plane_en = true;
4862
4863 return 0;
4864} 4773}
4865 4774
4866static int 4775static void
4867skl_compute_wm_levels(const struct intel_crtc_state *cstate, 4776skl_compute_wm_levels(const struct intel_crtc_state *cstate,
4868 const struct intel_plane_state *intel_pstate, 4777 const struct intel_plane_state *intel_pstate,
4869 uint16_t ddb_blocks,
4870 const struct skl_wm_params *wm_params, 4778 const struct skl_wm_params *wm_params,
4871 struct skl_wm_level *levels) 4779 struct skl_wm_level *levels)
4872{ 4780{
@@ -4874,25 +4782,15 @@ skl_compute_wm_levels(const struct intel_crtc_state *cstate,
4874 to_i915(intel_pstate->base.plane->dev); 4782 to_i915(intel_pstate->base.plane->dev);
4875 int level, max_level = ilk_wm_max_level(dev_priv); 4783 int level, max_level = ilk_wm_max_level(dev_priv);
4876 struct skl_wm_level *result_prev = &levels[0]; 4784 struct skl_wm_level *result_prev = &levels[0];
4877 int ret;
4878 4785
4879 for (level = 0; level <= max_level; level++) { 4786 for (level = 0; level <= max_level; level++) {
4880 struct skl_wm_level *result = &levels[level]; 4787 struct skl_wm_level *result = &levels[level];
4881 4788
4882 ret = skl_compute_plane_wm(cstate, 4789 skl_compute_plane_wm(cstate, intel_pstate, level, wm_params,
4883 intel_pstate, 4790 result_prev, result);
4884 ddb_blocks,
4885 level,
4886 wm_params,
4887 result_prev,
4888 result);
4889 if (ret)
4890 return ret;
4891 4791
4892 result_prev = result; 4792 result_prev = result;
4893 } 4793 }
4894
4895 return 0;
4896} 4794}
4897 4795
4898static uint32_t 4796static uint32_t
@@ -4920,8 +4818,7 @@ skl_compute_linetime_wm(const struct intel_crtc_state *cstate)
4920 4818
4921static void skl_compute_transition_wm(const struct intel_crtc_state *cstate, 4819static void skl_compute_transition_wm(const struct intel_crtc_state *cstate,
4922 const struct skl_wm_params *wp, 4820 const struct skl_wm_params *wp,
4923 struct skl_plane_wm *wm, 4821 struct skl_plane_wm *wm)
4924 uint16_t ddb_allocation)
4925{ 4822{
4926 struct drm_device *dev = cstate->base.crtc->dev; 4823 struct drm_device *dev = cstate->base.crtc->dev;
4927 const struct drm_i915_private *dev_priv = to_i915(dev); 4824 const struct drm_i915_private *dev_priv = to_i915(dev);
@@ -4969,12 +4866,13 @@ static void skl_compute_transition_wm(const struct intel_crtc_state *cstate,
4969 4866
4970 } 4867 }
4971 4868
4972 res_blocks += 1; 4869 /*
4973 4870 * Just assume we can enable the transition watermark. After
4974 if (res_blocks < ddb_allocation) { 4871 * computing the DDB we'll come back and disable it if that
4975 wm->trans_wm.plane_res_b = res_blocks; 4872 * assumption turns out to be false.
4976 wm->trans_wm.plane_en = true; 4873 */
4977 } 4874 wm->trans_wm.plane_res_b = res_blocks + 1;
4875 wm->trans_wm.plane_en = true;
4978} 4876}
4979 4877
4980static int skl_build_plane_wm_single(struct intel_crtc_state *crtc_state, 4878static int skl_build_plane_wm_single(struct intel_crtc_state *crtc_state,
@@ -4982,7 +4880,6 @@ static int skl_build_plane_wm_single(struct intel_crtc_state *crtc_state,
4982 enum plane_id plane_id, int color_plane) 4880 enum plane_id plane_id, int color_plane)
4983{ 4881{
4984 struct skl_plane_wm *wm = &crtc_state->wm.skl.optimal.planes[plane_id]; 4882 struct skl_plane_wm *wm = &crtc_state->wm.skl.optimal.planes[plane_id];
4985 u16 ddb_blocks = skl_ddb_entry_size(&crtc_state->wm.skl.plane_ddb_y[plane_id]);
4986 struct skl_wm_params wm_params; 4883 struct skl_wm_params wm_params;
4987 int ret; 4884 int ret;
4988 4885
@@ -4991,12 +4888,8 @@ static int skl_build_plane_wm_single(struct intel_crtc_state *crtc_state,
4991 if (ret) 4888 if (ret)
4992 return ret; 4889 return ret;
4993 4890
4994 ret = skl_compute_wm_levels(crtc_state, plane_state, 4891 skl_compute_wm_levels(crtc_state, plane_state, &wm_params, wm->wm);
4995 ddb_blocks, &wm_params, wm->wm); 4892 skl_compute_transition_wm(crtc_state, &wm_params, wm);
4996 if (ret)
4997 return ret;
4998
4999 skl_compute_transition_wm(crtc_state, &wm_params, wm, ddb_blocks);
5000 4893
5001 return 0; 4894 return 0;
5002} 4895}
@@ -5006,7 +4899,6 @@ static int skl_build_plane_wm_uv(struct intel_crtc_state *crtc_state,
5006 enum plane_id plane_id) 4899 enum plane_id plane_id)
5007{ 4900{
5008 struct skl_plane_wm *wm = &crtc_state->wm.skl.optimal.planes[plane_id]; 4901 struct skl_plane_wm *wm = &crtc_state->wm.skl.optimal.planes[plane_id];
5009 u16 ddb_blocks = skl_ddb_entry_size(&crtc_state->wm.skl.plane_ddb_uv[plane_id]);
5010 struct skl_wm_params wm_params; 4902 struct skl_wm_params wm_params;
5011 int ret; 4903 int ret;
5012 4904
@@ -5018,10 +4910,7 @@ static int skl_build_plane_wm_uv(struct intel_crtc_state *crtc_state,
5018 if (ret) 4910 if (ret)
5019 return ret; 4911 return ret;
5020 4912
5021 ret = skl_compute_wm_levels(crtc_state, plane_state, 4913 skl_compute_wm_levels(crtc_state, plane_state, &wm_params, wm->uv_wm);
5022 ddb_blocks, &wm_params, wm->uv_wm);
5023 if (ret)
5024 return ret;
5025 4914
5026 return 0; 4915 return 0;
5027} 4916}
@@ -5251,15 +5140,14 @@ bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb,
5251 return false; 5140 return false;
5252} 5141}
5253 5142
5254static int skl_update_pipe_wm(struct drm_crtc_state *cstate, 5143static int skl_update_pipe_wm(struct intel_crtc_state *cstate,
5255 const struct skl_pipe_wm *old_pipe_wm, 5144 const struct skl_pipe_wm *old_pipe_wm,
5256 struct skl_pipe_wm *pipe_wm, /* out */ 5145 struct skl_pipe_wm *pipe_wm, /* out */
5257 bool *changed /* out */) 5146 bool *changed /* out */)
5258{ 5147{
5259 struct intel_crtc_state *intel_cstate = to_intel_crtc_state(cstate);
5260 int ret; 5148 int ret;
5261 5149
5262 ret = skl_build_pipe_wm(intel_cstate, pipe_wm); 5150 ret = skl_build_pipe_wm(cstate, pipe_wm);
5263 if (ret) 5151 if (ret)
5264 return ret; 5152 return ret;
5265 5153
@@ -5272,14 +5160,14 @@ static int skl_update_pipe_wm(struct drm_crtc_state *cstate,
5272} 5160}
5273 5161
5274static uint32_t 5162static uint32_t
5275pipes_modified(struct drm_atomic_state *state) 5163pipes_modified(struct intel_atomic_state *state)
5276{ 5164{
5277 struct drm_crtc *crtc; 5165 struct intel_crtc *crtc;
5278 struct drm_crtc_state *cstate; 5166 struct intel_crtc_state *cstate;
5279 uint32_t i, ret = 0; 5167 uint32_t i, ret = 0;
5280 5168
5281 for_each_new_crtc_in_state(state, crtc, cstate, i) 5169 for_each_new_intel_crtc_in_state(state, crtc, cstate, i)
5282 ret |= drm_crtc_mask(crtc); 5170 ret |= drm_crtc_mask(&crtc->base);
5283 5171
5284 return ret; 5172 return ret;
5285} 5173}
@@ -5314,11 +5202,10 @@ skl_ddb_add_affected_planes(const struct intel_crtc_state *old_crtc_state,
5314} 5202}
5315 5203
5316static int 5204static int
5317skl_compute_ddb(struct drm_atomic_state *state) 5205skl_compute_ddb(struct intel_atomic_state *state)
5318{ 5206{
5319 const struct drm_i915_private *dev_priv = to_i915(state->dev); 5207 const struct drm_i915_private *dev_priv = to_i915(state->base.dev);
5320 struct intel_atomic_state *intel_state = to_intel_atomic_state(state); 5208 struct skl_ddb_allocation *ddb = &state->wm_results.ddb;
5321 struct skl_ddb_allocation *ddb = &intel_state->wm_results.ddb;
5322 struct intel_crtc_state *old_crtc_state; 5209 struct intel_crtc_state *old_crtc_state;
5323 struct intel_crtc_state *new_crtc_state; 5210 struct intel_crtc_state *new_crtc_state;
5324 struct intel_crtc *crtc; 5211 struct intel_crtc *crtc;
@@ -5326,7 +5213,7 @@ skl_compute_ddb(struct drm_atomic_state *state)
5326 5213
5327 memcpy(ddb, &dev_priv->wm.skl_hw.ddb, sizeof(*ddb)); 5214 memcpy(ddb, &dev_priv->wm.skl_hw.ddb, sizeof(*ddb));
5328 5215
5329 for_each_oldnew_intel_crtc_in_state(intel_state, crtc, old_crtc_state, 5216 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
5330 new_crtc_state, i) { 5217 new_crtc_state, i) {
5331 ret = skl_allocate_pipe_ddb(new_crtc_state, ddb); 5218 ret = skl_allocate_pipe_ddb(new_crtc_state, ddb);
5332 if (ret) 5219 if (ret)
@@ -5372,14 +5259,12 @@ skl_print_wm_changes(struct intel_atomic_state *state)
5372} 5259}
5373 5260
5374static int 5261static int
5375skl_ddb_add_affected_pipes(struct drm_atomic_state *state, bool *changed) 5262skl_ddb_add_affected_pipes(struct intel_atomic_state *state, bool *changed)
5376{ 5263{
5377 struct drm_device *dev = state->dev; 5264 struct drm_device *dev = state->base.dev;
5378 const struct drm_i915_private *dev_priv = to_i915(dev); 5265 const struct drm_i915_private *dev_priv = to_i915(dev);
5379 const struct drm_crtc *crtc; 5266 struct intel_crtc *crtc;
5380 const struct drm_crtc_state *cstate; 5267 struct intel_crtc_state *crtc_state;
5381 struct intel_crtc *intel_crtc;
5382 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
5383 uint32_t realloc_pipes = pipes_modified(state); 5268 uint32_t realloc_pipes = pipes_modified(state);
5384 int ret, i; 5269 int ret, i;
5385 5270
@@ -5398,7 +5283,7 @@ skl_ddb_add_affected_pipes(struct drm_atomic_state *state, bool *changed)
5398 * since any racing commits that want to update them would need to 5283 * since any racing commits that want to update them would need to
5399 * hold _all_ CRTC state mutexes. 5284 * hold _all_ CRTC state mutexes.
5400 */ 5285 */
5401 for_each_new_crtc_in_state(state, crtc, cstate, i) 5286 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i)
5402 (*changed) = true; 5287 (*changed) = true;
5403 5288
5404 if (!*changed) 5289 if (!*changed)
@@ -5412,20 +5297,20 @@ skl_ddb_add_affected_pipes(struct drm_atomic_state *state, bool *changed)
5412 */ 5297 */
5413 if (dev_priv->wm.distrust_bios_wm) { 5298 if (dev_priv->wm.distrust_bios_wm) {
5414 ret = drm_modeset_lock(&dev->mode_config.connection_mutex, 5299 ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
5415 state->acquire_ctx); 5300 state->base.acquire_ctx);
5416 if (ret) 5301 if (ret)
5417 return ret; 5302 return ret;
5418 5303
5419 intel_state->active_pipe_changes = ~0; 5304 state->active_pipe_changes = ~0;
5420 5305
5421 /* 5306 /*
5422 * We usually only initialize intel_state->active_crtcs if we 5307 * We usually only initialize state->active_crtcs if we
5423 * we're doing a modeset; make sure this field is always 5308 * we're doing a modeset; make sure this field is always
5424 * initialized during the sanitization process that happens 5309 * initialized during the sanitization process that happens
5425 * on the first commit too. 5310 * on the first commit too.
5426 */ 5311 */
5427 if (!intel_state->modeset) 5312 if (!state->modeset)
5428 intel_state->active_crtcs = dev_priv->active_crtcs; 5313 state->active_crtcs = dev_priv->active_crtcs;
5429 } 5314 }
5430 5315
5431 /* 5316 /*
@@ -5441,21 +5326,19 @@ skl_ddb_add_affected_pipes(struct drm_atomic_state *state, bool *changed)
5441 * any other display updates race with this transaction, so we need 5326 * any other display updates race with this transaction, so we need
5442 * to grab the lock on *all* CRTC's. 5327 * to grab the lock on *all* CRTC's.
5443 */ 5328 */
5444 if (intel_state->active_pipe_changes || intel_state->modeset) { 5329 if (state->active_pipe_changes || state->modeset) {
5445 realloc_pipes = ~0; 5330 realloc_pipes = ~0;
5446 intel_state->wm_results.dirty_pipes = ~0; 5331 state->wm_results.dirty_pipes = ~0;
5447 } 5332 }
5448 5333
5449 /* 5334 /*
5450 * We're not recomputing for the pipes not included in the commit, so 5335 * We're not recomputing for the pipes not included in the commit, so
5451 * make sure we start with the current state. 5336 * make sure we start with the current state.
5452 */ 5337 */
5453 for_each_intel_crtc_mask(dev, intel_crtc, realloc_pipes) { 5338 for_each_intel_crtc_mask(dev, crtc, realloc_pipes) {
5454 struct intel_crtc_state *cstate; 5339 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
5455 5340 if (IS_ERR(crtc_state))
5456 cstate = intel_atomic_get_crtc_state(state, intel_crtc); 5341 return PTR_ERR(crtc_state);
5457 if (IS_ERR(cstate))
5458 return PTR_ERR(cstate);
5459 } 5342 }
5460 5343
5461 return 0; 5344 return 0;
@@ -5522,12 +5405,12 @@ static int skl_wm_add_affected_planes(struct intel_atomic_state *state,
5522} 5405}
5523 5406
5524static int 5407static int
5525skl_compute_wm(struct drm_atomic_state *state) 5408skl_compute_wm(struct intel_atomic_state *state)
5526{ 5409{
5527 struct drm_crtc *crtc; 5410 struct intel_crtc *crtc;
5528 struct drm_crtc_state *cstate; 5411 struct intel_crtc_state *cstate;
5529 struct intel_atomic_state *intel_state = to_intel_atomic_state(state); 5412 struct intel_crtc_state *old_crtc_state;
5530 struct skl_ddb_values *results = &intel_state->wm_results; 5413 struct skl_ddb_values *results = &state->wm_results;
5531 struct skl_pipe_wm *pipe_wm; 5414 struct skl_pipe_wm *pipe_wm;
5532 bool changed = false; 5415 bool changed = false;
5533 int ret, i; 5416 int ret, i;
@@ -5539,47 +5422,35 @@ skl_compute_wm(struct drm_atomic_state *state)
5539 if (ret || !changed) 5422 if (ret || !changed)
5540 return ret; 5423 return ret;
5541 5424
5542 ret = skl_compute_ddb(state);
5543 if (ret)
5544 return ret;
5545
5546 /* 5425 /*
5547 * Calculate WM's for all pipes that are part of this transaction. 5426 * Calculate WM's for all pipes that are part of this transaction.
5548 * Note that the DDB allocation above may have added more CRTC's that 5427 * Note that skl_ddb_add_affected_pipes may have added more CRTC's that
5549 * weren't otherwise being modified (and set bits in dirty_pipes) if 5428 * weren't otherwise being modified (and set bits in dirty_pipes) if
5550 * pipe allocations had to change. 5429 * pipe allocations had to change.
5551 *
5552 * FIXME: Now that we're doing this in the atomic check phase, we
5553 * should allow skl_update_pipe_wm() to return failure in cases where
5554 * no suitable watermark values can be found.
5555 */ 5430 */
5556 for_each_new_crtc_in_state(state, crtc, cstate, i) { 5431 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
5557 struct intel_crtc_state *intel_cstate = 5432 cstate, i) {
5558 to_intel_crtc_state(cstate);
5559 const struct skl_pipe_wm *old_pipe_wm = 5433 const struct skl_pipe_wm *old_pipe_wm =
5560 &to_intel_crtc_state(crtc->state)->wm.skl.optimal; 5434 &old_crtc_state->wm.skl.optimal;
5561 5435
5562 pipe_wm = &intel_cstate->wm.skl.optimal; 5436 pipe_wm = &cstate->wm.skl.optimal;
5563 ret = skl_update_pipe_wm(cstate, old_pipe_wm, pipe_wm, &changed); 5437 ret = skl_update_pipe_wm(cstate, old_pipe_wm, pipe_wm, &changed);
5564 if (ret) 5438 if (ret)
5565 return ret; 5439 return ret;
5566 5440
5567 ret = skl_wm_add_affected_planes(intel_state, 5441 ret = skl_wm_add_affected_planes(state, crtc);
5568 to_intel_crtc(crtc));
5569 if (ret) 5442 if (ret)
5570 return ret; 5443 return ret;
5571 5444
5572 if (changed) 5445 if (changed)
5573 results->dirty_pipes |= drm_crtc_mask(crtc); 5446 results->dirty_pipes |= drm_crtc_mask(&crtc->base);
5574
5575 if ((results->dirty_pipes & drm_crtc_mask(crtc)) == 0)
5576 /* This pipe's WM's did not change */
5577 continue;
5578
5579 intel_cstate->update_wm_pre = true;
5580 } 5447 }
5581 5448
5582 skl_print_wm_changes(intel_state); 5449 ret = skl_compute_ddb(state);
5450 if (ret)
5451 return ret;
5452
5453 skl_print_wm_changes(state);
5583 5454
5584 return 0; 5455 return 0;
5585} 5456}
@@ -5617,13 +5488,13 @@ static void skl_initial_wm(struct intel_atomic_state *state,
5617 mutex_unlock(&dev_priv->wm.wm_mutex); 5488 mutex_unlock(&dev_priv->wm.wm_mutex);
5618} 5489}
5619 5490
5620static void ilk_compute_wm_config(struct drm_device *dev, 5491static void ilk_compute_wm_config(struct drm_i915_private *dev_priv,
5621 struct intel_wm_config *config) 5492 struct intel_wm_config *config)
5622{ 5493{
5623 struct intel_crtc *crtc; 5494 struct intel_crtc *crtc;
5624 5495
5625 /* Compute the currently _active_ config */ 5496 /* Compute the currently _active_ config */
5626 for_each_intel_crtc(dev, crtc) { 5497 for_each_intel_crtc(&dev_priv->drm, crtc) {
5627 const struct intel_pipe_wm *wm = &crtc->wm.active.ilk; 5498 const struct intel_pipe_wm *wm = &crtc->wm.active.ilk;
5628 5499
5629 if (!wm->pipe_enabled) 5500 if (!wm->pipe_enabled)
@@ -5637,25 +5508,24 @@ static void ilk_compute_wm_config(struct drm_device *dev,
5637 5508
5638static void ilk_program_watermarks(struct drm_i915_private *dev_priv) 5509static void ilk_program_watermarks(struct drm_i915_private *dev_priv)
5639{ 5510{
5640 struct drm_device *dev = &dev_priv->drm;
5641 struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm; 5511 struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
5642 struct ilk_wm_maximums max; 5512 struct ilk_wm_maximums max;
5643 struct intel_wm_config config = {}; 5513 struct intel_wm_config config = {};
5644 struct ilk_wm_values results = {}; 5514 struct ilk_wm_values results = {};
5645 enum intel_ddb_partitioning partitioning; 5515 enum intel_ddb_partitioning partitioning;
5646 5516
5647 ilk_compute_wm_config(dev, &config); 5517 ilk_compute_wm_config(dev_priv, &config);
5648 5518
5649 ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_1_2, &max); 5519 ilk_compute_wm_maximums(dev_priv, 1, &config, INTEL_DDB_PART_1_2, &max);
5650 ilk_wm_merge(dev, &config, &max, &lp_wm_1_2); 5520 ilk_wm_merge(dev_priv, &config, &max, &lp_wm_1_2);
5651 5521
5652 /* 5/6 split only in single pipe config on IVB+ */ 5522 /* 5/6 split only in single pipe config on IVB+ */
5653 if (INTEL_GEN(dev_priv) >= 7 && 5523 if (INTEL_GEN(dev_priv) >= 7 &&
5654 config.num_pipes_active == 1 && config.sprites_enabled) { 5524 config.num_pipes_active == 1 && config.sprites_enabled) {
5655 ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_5_6, &max); 5525 ilk_compute_wm_maximums(dev_priv, 1, &config, INTEL_DDB_PART_5_6, &max);
5656 ilk_wm_merge(dev, &config, &max, &lp_wm_5_6); 5526 ilk_wm_merge(dev_priv, &config, &max, &lp_wm_5_6);
5657 5527
5658 best_lp_wm = ilk_find_best_result(dev, &lp_wm_1_2, &lp_wm_5_6); 5528 best_lp_wm = ilk_find_best_result(dev_priv, &lp_wm_1_2, &lp_wm_5_6);
5659 } else { 5529 } else {
5660 best_lp_wm = &lp_wm_1_2; 5530 best_lp_wm = &lp_wm_1_2;
5661 } 5531 }
@@ -5663,7 +5533,7 @@ static void ilk_program_watermarks(struct drm_i915_private *dev_priv)
5663 partitioning = (best_lp_wm == &lp_wm_1_2) ? 5533 partitioning = (best_lp_wm == &lp_wm_1_2) ?
5664 INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6; 5534 INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6;
5665 5535
5666 ilk_compute_wm_results(dev, best_lp_wm, partitioning, &results); 5536 ilk_compute_wm_results(dev_priv, best_lp_wm, partitioning, &results);
5667 5537
5668 ilk_write_wm_values(dev_priv, &results); 5538 ilk_write_wm_values(dev_priv, &results);
5669} 5539}
@@ -5703,19 +5573,18 @@ static inline void skl_wm_level_from_reg_val(uint32_t val,
5703 PLANE_WM_LINES_MASK; 5573 PLANE_WM_LINES_MASK;
5704} 5574}
5705 5575
5706void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc, 5576void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
5707 struct skl_pipe_wm *out) 5577 struct skl_pipe_wm *out)
5708{ 5578{
5709 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 5579 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5710 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5580 enum pipe pipe = crtc->pipe;
5711 enum pipe pipe = intel_crtc->pipe;
5712 int level, max_level; 5581 int level, max_level;
5713 enum plane_id plane_id; 5582 enum plane_id plane_id;
5714 uint32_t val; 5583 uint32_t val;
5715 5584
5716 max_level = ilk_wm_max_level(dev_priv); 5585 max_level = ilk_wm_max_level(dev_priv);
5717 5586
5718 for_each_plane_id_on_crtc(intel_crtc, plane_id) { 5587 for_each_plane_id_on_crtc(crtc, plane_id) {
5719 struct skl_plane_wm *wm = &out->planes[plane_id]; 5588 struct skl_plane_wm *wm = &out->planes[plane_id];
5720 5589
5721 for (level = 0; level <= max_level; level++) { 5590 for (level = 0; level <= max_level; level++) {
@@ -5735,30 +5604,27 @@ void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc,
5735 skl_wm_level_from_reg_val(val, &wm->trans_wm); 5604 skl_wm_level_from_reg_val(val, &wm->trans_wm);
5736 } 5605 }
5737 5606
5738 if (!intel_crtc->active) 5607 if (!crtc->active)
5739 return; 5608 return;
5740 5609
5741 out->linetime = I915_READ(PIPE_WM_LINETIME(pipe)); 5610 out->linetime = I915_READ(PIPE_WM_LINETIME(pipe));
5742} 5611}
5743 5612
5744void skl_wm_get_hw_state(struct drm_device *dev) 5613void skl_wm_get_hw_state(struct drm_i915_private *dev_priv)
5745{ 5614{
5746 struct drm_i915_private *dev_priv = to_i915(dev);
5747 struct skl_ddb_values *hw = &dev_priv->wm.skl_hw; 5615 struct skl_ddb_values *hw = &dev_priv->wm.skl_hw;
5748 struct skl_ddb_allocation *ddb = &dev_priv->wm.skl_hw.ddb; 5616 struct skl_ddb_allocation *ddb = &dev_priv->wm.skl_hw.ddb;
5749 struct drm_crtc *crtc; 5617 struct intel_crtc *crtc;
5750 struct intel_crtc *intel_crtc;
5751 struct intel_crtc_state *cstate; 5618 struct intel_crtc_state *cstate;
5752 5619
5753 skl_ddb_get_hw_state(dev_priv, ddb); 5620 skl_ddb_get_hw_state(dev_priv, ddb);
5754 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 5621 for_each_intel_crtc(&dev_priv->drm, crtc) {
5755 intel_crtc = to_intel_crtc(crtc); 5622 cstate = to_intel_crtc_state(crtc->base.state);
5756 cstate = to_intel_crtc_state(crtc->state);
5757 5623
5758 skl_pipe_wm_get_hw_state(crtc, &cstate->wm.skl.optimal); 5624 skl_pipe_wm_get_hw_state(crtc, &cstate->wm.skl.optimal);
5759 5625
5760 if (intel_crtc->active) 5626 if (crtc->active)
5761 hw->dirty_pipes |= drm_crtc_mask(crtc); 5627 hw->dirty_pipes |= drm_crtc_mask(&crtc->base);
5762 } 5628 }
5763 5629
5764 if (dev_priv->active_crtcs) { 5630 if (dev_priv->active_crtcs) {
@@ -5767,15 +5633,14 @@ void skl_wm_get_hw_state(struct drm_device *dev)
5767 } 5633 }
5768} 5634}
5769 5635
5770static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc) 5636static void ilk_pipe_wm_get_hw_state(struct intel_crtc *crtc)
5771{ 5637{
5772 struct drm_device *dev = crtc->dev; 5638 struct drm_device *dev = crtc->base.dev;
5773 struct drm_i915_private *dev_priv = to_i915(dev); 5639 struct drm_i915_private *dev_priv = to_i915(dev);
5774 struct ilk_wm_values *hw = &dev_priv->wm.hw; 5640 struct ilk_wm_values *hw = &dev_priv->wm.hw;
5775 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5641 struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->base.state);
5776 struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
5777 struct intel_pipe_wm *active = &cstate->wm.ilk.optimal; 5642 struct intel_pipe_wm *active = &cstate->wm.ilk.optimal;
5778 enum pipe pipe = intel_crtc->pipe; 5643 enum pipe pipe = crtc->pipe;
5779 static const i915_reg_t wm0_pipe_reg[] = { 5644 static const i915_reg_t wm0_pipe_reg[] = {
5780 [PIPE_A] = WM0_PIPEA_ILK, 5645 [PIPE_A] = WM0_PIPEA_ILK,
5781 [PIPE_B] = WM0_PIPEB_ILK, 5646 [PIPE_B] = WM0_PIPEB_ILK,
@@ -5788,7 +5653,7 @@ static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
5788 5653
5789 memset(active, 0, sizeof(*active)); 5654 memset(active, 0, sizeof(*active));
5790 5655
5791 active->pipe_enabled = intel_crtc->active; 5656 active->pipe_enabled = crtc->active;
5792 5657
5793 if (active->pipe_enabled) { 5658 if (active->pipe_enabled) {
5794 u32 tmp = hw->wm_pipe[pipe]; 5659 u32 tmp = hw->wm_pipe[pipe];
@@ -5816,7 +5681,7 @@ static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
5816 active->wm[level].enable = true; 5681 active->wm[level].enable = true;
5817 } 5682 }
5818 5683
5819 intel_crtc->wm.active.ilk = *active; 5684 crtc->wm.active.ilk = *active;
5820} 5685}
5821 5686
5822#define _FW_WM(value, plane) \ 5687#define _FW_WM(value, plane) \
@@ -5926,9 +5791,8 @@ static void vlv_read_wm_values(struct drm_i915_private *dev_priv,
5926#undef _FW_WM 5791#undef _FW_WM
5927#undef _FW_WM_VLV 5792#undef _FW_WM_VLV
5928 5793
5929void g4x_wm_get_hw_state(struct drm_device *dev) 5794void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv)
5930{ 5795{
5931 struct drm_i915_private *dev_priv = to_i915(dev);
5932 struct g4x_wm_values *wm = &dev_priv->wm.g4x; 5796 struct g4x_wm_values *wm = &dev_priv->wm.g4x;
5933 struct intel_crtc *crtc; 5797 struct intel_crtc *crtc;
5934 5798
@@ -5936,7 +5800,7 @@ void g4x_wm_get_hw_state(struct drm_device *dev)
5936 5800
5937 wm->cxsr = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN; 5801 wm->cxsr = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
5938 5802
5939 for_each_intel_crtc(dev, crtc) { 5803 for_each_intel_crtc(&dev_priv->drm, crtc) {
5940 struct intel_crtc_state *crtc_state = 5804 struct intel_crtc_state *crtc_state =
5941 to_intel_crtc_state(crtc->base.state); 5805 to_intel_crtc_state(crtc->base.state);
5942 struct g4x_wm_state *active = &crtc->wm.active.g4x; 5806 struct g4x_wm_state *active = &crtc->wm.active.g4x;
@@ -6067,9 +5931,8 @@ void g4x_wm_sanitize(struct drm_i915_private *dev_priv)
6067 mutex_unlock(&dev_priv->wm.wm_mutex); 5931 mutex_unlock(&dev_priv->wm.wm_mutex);
6068} 5932}
6069 5933
6070void vlv_wm_get_hw_state(struct drm_device *dev) 5934void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv)
6071{ 5935{
6072 struct drm_i915_private *dev_priv = to_i915(dev);
6073 struct vlv_wm_values *wm = &dev_priv->wm.vlv; 5936 struct vlv_wm_values *wm = &dev_priv->wm.vlv;
6074 struct intel_crtc *crtc; 5937 struct intel_crtc *crtc;
6075 u32 val; 5938 u32 val;
@@ -6113,7 +5976,7 @@ void vlv_wm_get_hw_state(struct drm_device *dev)
6113 mutex_unlock(&dev_priv->pcu_lock); 5976 mutex_unlock(&dev_priv->pcu_lock);
6114 } 5977 }
6115 5978
6116 for_each_intel_crtc(dev, crtc) { 5979 for_each_intel_crtc(&dev_priv->drm, crtc) {
6117 struct intel_crtc_state *crtc_state = 5980 struct intel_crtc_state *crtc_state =
6118 to_intel_crtc_state(crtc->base.state); 5981 to_intel_crtc_state(crtc->base.state);
6119 struct vlv_wm_state *active = &crtc->wm.active.vlv; 5982 struct vlv_wm_state *active = &crtc->wm.active.vlv;
@@ -6230,15 +6093,14 @@ static void ilk_init_lp_watermarks(struct drm_i915_private *dev_priv)
6230 */ 6093 */
6231} 6094}
6232 6095
6233void ilk_wm_get_hw_state(struct drm_device *dev) 6096void ilk_wm_get_hw_state(struct drm_i915_private *dev_priv)
6234{ 6097{
6235 struct drm_i915_private *dev_priv = to_i915(dev);
6236 struct ilk_wm_values *hw = &dev_priv->wm.hw; 6098 struct ilk_wm_values *hw = &dev_priv->wm.hw;
6237 struct drm_crtc *crtc; 6099 struct intel_crtc *crtc;
6238 6100
6239 ilk_init_lp_watermarks(dev_priv); 6101 ilk_init_lp_watermarks(dev_priv);
6240 6102
6241 for_each_crtc(dev, crtc) 6103 for_each_intel_crtc(&dev_priv->drm, crtc)
6242 ilk_pipe_wm_get_hw_state(crtc); 6104 ilk_pipe_wm_get_hw_state(crtc);
6243 6105
6244 hw->wm_lp[0] = I915_READ(WM1_LP_ILK); 6106 hw->wm_lp[0] = I915_READ(WM1_LP_ILK);
@@ -7049,7 +6911,7 @@ static void gen9_enable_rps(struct drm_i915_private *dev_priv)
7049 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 6911 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
7050 6912
7051 /* Program defaults and thresholds for RPS */ 6913 /* Program defaults and thresholds for RPS */
7052 if (IS_GEN9(dev_priv)) 6914 if (IS_GEN(dev_priv, 9))
7053 I915_WRITE(GEN6_RC_VIDEO_FREQ, 6915 I915_WRITE(GEN6_RC_VIDEO_FREQ,
7054 GEN9_FREQUENCY(dev_priv->gt_pm.rps.rp1_freq)); 6916 GEN9_FREQUENCY(dev_priv->gt_pm.rps.rp1_freq));
7055 6917
@@ -7285,9 +7147,9 @@ static void gen6_enable_rc6(struct drm_i915_private *dev_priv)
7285 7147
7286 rc6vids = 0; 7148 rc6vids = 0;
7287 ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids); 7149 ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
7288 if (IS_GEN6(dev_priv) && ret) { 7150 if (IS_GEN(dev_priv, 6) && ret) {
7289 DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n"); 7151 DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n");
7290 } else if (IS_GEN6(dev_priv) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) { 7152 } else if (IS_GEN(dev_priv, 6) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) {
7291 DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n", 7153 DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n",
7292 GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450); 7154 GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450);
7293 rc6vids &= 0xffff00; 7155 rc6vids &= 0xffff00;
@@ -7412,7 +7274,7 @@ static int cherryview_rps_max_freq(struct drm_i915_private *dev_priv)
7412 7274
7413 val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE); 7275 val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE);
7414 7276
7415 switch (INTEL_INFO(dev_priv)->sseu.eu_total) { 7277 switch (RUNTIME_INFO(dev_priv)->sseu.eu_total) {
7416 case 8: 7278 case 8:
7417 /* (2 * 4) config */ 7279 /* (2 * 4) config */
7418 rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT); 7280 rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT);
@@ -7987,7 +7849,7 @@ unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
7987{ 7849{
7988 unsigned long val; 7850 unsigned long val;
7989 7851
7990 if (!IS_GEN5(dev_priv)) 7852 if (!IS_GEN(dev_priv, 5))
7991 return 0; 7853 return 0;
7992 7854
7993 spin_lock_irq(&mchdev_lock); 7855 spin_lock_irq(&mchdev_lock);
@@ -8071,7 +7933,7 @@ static void __i915_update_gfx_val(struct drm_i915_private *dev_priv)
8071 7933
8072void i915_update_gfx_val(struct drm_i915_private *dev_priv) 7934void i915_update_gfx_val(struct drm_i915_private *dev_priv)
8073{ 7935{
8074 if (!IS_GEN5(dev_priv)) 7936 if (!IS_GEN(dev_priv, 5))
8075 return; 7937 return;
8076 7938
8077 spin_lock_irq(&mchdev_lock); 7939 spin_lock_irq(&mchdev_lock);
@@ -8122,7 +7984,7 @@ unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
8122{ 7984{
8123 unsigned long val; 7985 unsigned long val;
8124 7986
8125 if (!IS_GEN5(dev_priv)) 7987 if (!IS_GEN(dev_priv, 5))
8126 return 0; 7988 return 0;
8127 7989
8128 spin_lock_irq(&mchdev_lock); 7990 spin_lock_irq(&mchdev_lock);
@@ -8410,7 +8272,7 @@ void intel_init_gt_powersave(struct drm_i915_private *dev_priv)
8410 intel_freq_opcode(dev_priv, 450)); 8272 intel_freq_opcode(dev_priv, 450));
8411 8273
8412 /* After setting max-softlimit, find the overclock max freq */ 8274 /* After setting max-softlimit, find the overclock max freq */
8413 if (IS_GEN6(dev_priv) || 8275 if (IS_GEN(dev_priv, 6) ||
8414 IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv)) { 8276 IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv)) {
8415 u32 params = 0; 8277 u32 params = 0;
8416 8278
@@ -9480,9 +9342,9 @@ void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
9480 dev_priv->display.init_clock_gating = ivb_init_clock_gating; 9342 dev_priv->display.init_clock_gating = ivb_init_clock_gating;
9481 else if (IS_VALLEYVIEW(dev_priv)) 9343 else if (IS_VALLEYVIEW(dev_priv))
9482 dev_priv->display.init_clock_gating = vlv_init_clock_gating; 9344 dev_priv->display.init_clock_gating = vlv_init_clock_gating;
9483 else if (IS_GEN6(dev_priv)) 9345 else if (IS_GEN(dev_priv, 6))
9484 dev_priv->display.init_clock_gating = gen6_init_clock_gating; 9346 dev_priv->display.init_clock_gating = gen6_init_clock_gating;
9485 else if (IS_GEN5(dev_priv)) 9347 else if (IS_GEN(dev_priv, 5))
9486 dev_priv->display.init_clock_gating = ilk_init_clock_gating; 9348 dev_priv->display.init_clock_gating = ilk_init_clock_gating;
9487 else if (IS_G4X(dev_priv)) 9349 else if (IS_G4X(dev_priv))
9488 dev_priv->display.init_clock_gating = g4x_init_clock_gating; 9350 dev_priv->display.init_clock_gating = g4x_init_clock_gating;
@@ -9490,11 +9352,11 @@ void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
9490 dev_priv->display.init_clock_gating = i965gm_init_clock_gating; 9352 dev_priv->display.init_clock_gating = i965gm_init_clock_gating;
9491 else if (IS_I965G(dev_priv)) 9353 else if (IS_I965G(dev_priv))
9492 dev_priv->display.init_clock_gating = i965g_init_clock_gating; 9354 dev_priv->display.init_clock_gating = i965g_init_clock_gating;
9493 else if (IS_GEN3(dev_priv)) 9355 else if (IS_GEN(dev_priv, 3))
9494 dev_priv->display.init_clock_gating = gen3_init_clock_gating; 9356 dev_priv->display.init_clock_gating = gen3_init_clock_gating;
9495 else if (IS_I85X(dev_priv) || IS_I865G(dev_priv)) 9357 else if (IS_I85X(dev_priv) || IS_I865G(dev_priv))
9496 dev_priv->display.init_clock_gating = i85x_init_clock_gating; 9358 dev_priv->display.init_clock_gating = i85x_init_clock_gating;
9497 else if (IS_GEN2(dev_priv)) 9359 else if (IS_GEN(dev_priv, 2))
9498 dev_priv->display.init_clock_gating = i830_init_clock_gating; 9360 dev_priv->display.init_clock_gating = i830_init_clock_gating;
9499 else { 9361 else {
9500 MISSING_CASE(INTEL_DEVID(dev_priv)); 9362 MISSING_CASE(INTEL_DEVID(dev_priv));
@@ -9508,7 +9370,7 @@ void intel_init_pm(struct drm_i915_private *dev_priv)
9508 /* For cxsr */ 9370 /* For cxsr */
9509 if (IS_PINEVIEW(dev_priv)) 9371 if (IS_PINEVIEW(dev_priv))
9510 i915_pineview_get_mem_freq(dev_priv); 9372 i915_pineview_get_mem_freq(dev_priv);
9511 else if (IS_GEN5(dev_priv)) 9373 else if (IS_GEN(dev_priv, 5))
9512 i915_ironlake_get_mem_freq(dev_priv); 9374 i915_ironlake_get_mem_freq(dev_priv);
9513 9375
9514 /* For FIFO watermark updates */ 9376 /* For FIFO watermark updates */
@@ -9520,9 +9382,9 @@ void intel_init_pm(struct drm_i915_private *dev_priv)
9520 } else if (HAS_PCH_SPLIT(dev_priv)) { 9382 } else if (HAS_PCH_SPLIT(dev_priv)) {
9521 ilk_setup_wm_latency(dev_priv); 9383 ilk_setup_wm_latency(dev_priv);
9522 9384
9523 if ((IS_GEN5(dev_priv) && dev_priv->wm.pri_latency[1] && 9385 if ((IS_GEN(dev_priv, 5) && dev_priv->wm.pri_latency[1] &&
9524 dev_priv->wm.spr_latency[1] && dev_priv->wm.cur_latency[1]) || 9386 dev_priv->wm.spr_latency[1] && dev_priv->wm.cur_latency[1]) ||
9525 (!IS_GEN5(dev_priv) && dev_priv->wm.pri_latency[0] && 9387 (!IS_GEN(dev_priv, 5) && dev_priv->wm.pri_latency[0] &&
9526 dev_priv->wm.spr_latency[0] && dev_priv->wm.cur_latency[0])) { 9388 dev_priv->wm.spr_latency[0] && dev_priv->wm.cur_latency[0])) {
9527 dev_priv->display.compute_pipe_wm = ilk_compute_pipe_wm; 9389 dev_priv->display.compute_pipe_wm = ilk_compute_pipe_wm;
9528 dev_priv->display.compute_intermediate_wm = 9390 dev_priv->display.compute_intermediate_wm =
@@ -9563,12 +9425,12 @@ void intel_init_pm(struct drm_i915_private *dev_priv)
9563 dev_priv->display.update_wm = NULL; 9425 dev_priv->display.update_wm = NULL;
9564 } else 9426 } else
9565 dev_priv->display.update_wm = pineview_update_wm; 9427 dev_priv->display.update_wm = pineview_update_wm;
9566 } else if (IS_GEN4(dev_priv)) { 9428 } else if (IS_GEN(dev_priv, 4)) {
9567 dev_priv->display.update_wm = i965_update_wm; 9429 dev_priv->display.update_wm = i965_update_wm;
9568 } else if (IS_GEN3(dev_priv)) { 9430 } else if (IS_GEN(dev_priv, 3)) {
9569 dev_priv->display.update_wm = i9xx_update_wm; 9431 dev_priv->display.update_wm = i9xx_update_wm;
9570 dev_priv->display.get_fifo_size = i9xx_get_fifo_size; 9432 dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
9571 } else if (IS_GEN2(dev_priv)) { 9433 } else if (IS_GEN(dev_priv, 2)) {
9572 if (INTEL_INFO(dev_priv)->num_pipes == 1) { 9434 if (INTEL_INFO(dev_priv)->num_pipes == 1) {
9573 dev_priv->display.update_wm = i845_update_wm; 9435 dev_priv->display.update_wm = i845_update_wm;
9574 dev_priv->display.get_fifo_size = i845_get_fifo_size; 9436 dev_priv->display.get_fifo_size = i845_get_fifo_size;
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index 419e56342523..0f6b2b4702e3 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -51,7 +51,6 @@
51 * must be correctly synchronized/cancelled when shutting down the pipe." 51 * must be correctly synchronized/cancelled when shutting down the pipe."
52 */ 52 */
53 53
54#include <drm/drmP.h>
55 54
56#include "intel_drv.h" 55#include "intel_drv.h"
57#include "i915_drv.h" 56#include "i915_drv.h"
@@ -261,6 +260,32 @@ static u8 intel_dp_get_sink_sync_latency(struct intel_dp *intel_dp)
261 return val; 260 return val;
262} 261}
263 262
263static u16 intel_dp_get_su_x_granulartiy(struct intel_dp *intel_dp)
264{
265 u16 val;
266 ssize_t r;
267
268 /*
269 * Returning the default X granularity if granularity not required or
270 * if DPCD read fails
271 */
272 if (!(intel_dp->psr_dpcd[1] & DP_PSR2_SU_GRANULARITY_REQUIRED))
273 return 4;
274
275 r = drm_dp_dpcd_read(&intel_dp->aux, DP_PSR2_SU_X_GRANULARITY, &val, 2);
276 if (r != 2)
277 DRM_DEBUG_KMS("Unable to read DP_PSR2_SU_X_GRANULARITY\n");
278
279 /*
280 * Spec says that if the value read is 0 the default granularity should
281 * be used instead.
282 */
283 if (r != 2 || val == 0)
284 val = 4;
285
286 return val;
287}
288
264void intel_psr_init_dpcd(struct intel_dp *intel_dp) 289void intel_psr_init_dpcd(struct intel_dp *intel_dp)
265{ 290{
266 struct drm_i915_private *dev_priv = 291 struct drm_i915_private *dev_priv =
@@ -274,10 +299,16 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp)
274 DRM_DEBUG_KMS("eDP panel supports PSR version %x\n", 299 DRM_DEBUG_KMS("eDP panel supports PSR version %x\n",
275 intel_dp->psr_dpcd[0]); 300 intel_dp->psr_dpcd[0]);
276 301
302 if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_PSR)) {
303 DRM_DEBUG_KMS("PSR support not currently available for this panel\n");
304 return;
305 }
306
277 if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) { 307 if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) {
278 DRM_DEBUG_KMS("Panel lacks power state control, PSR cannot be enabled\n"); 308 DRM_DEBUG_KMS("Panel lacks power state control, PSR cannot be enabled\n");
279 return; 309 return;
280 } 310 }
311
281 dev_priv->psr.sink_support = true; 312 dev_priv->psr.sink_support = true;
282 dev_priv->psr.sink_sync_latency = 313 dev_priv->psr.sink_sync_latency =
283 intel_dp_get_sink_sync_latency(intel_dp); 314 intel_dp_get_sink_sync_latency(intel_dp);
@@ -309,6 +340,8 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp)
309 if (dev_priv->psr.sink_psr2_support) { 340 if (dev_priv->psr.sink_psr2_support) {
310 dev_priv->psr.colorimetry_support = 341 dev_priv->psr.colorimetry_support =
311 intel_dp_get_colorimetry_status(intel_dp); 342 intel_dp_get_colorimetry_status(intel_dp);
343 dev_priv->psr.su_x_granularity =
344 intel_dp_get_su_x_granulartiy(intel_dp);
312 } 345 }
313 } 346 }
314} 347}
@@ -388,13 +421,15 @@ static void intel_psr_enable_sink(struct intel_dp *intel_dp)
388 if (dev_priv->psr.psr2_enabled) { 421 if (dev_priv->psr.psr2_enabled) {
389 drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG, 422 drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG,
390 DP_ALPM_ENABLE); 423 DP_ALPM_ENABLE);
391 dpcd_val |= DP_PSR_ENABLE_PSR2; 424 dpcd_val |= DP_PSR_ENABLE_PSR2 | DP_PSR_IRQ_HPD_WITH_CRC_ERRORS;
425 } else {
426 if (dev_priv->psr.link_standby)
427 dpcd_val |= DP_PSR_MAIN_LINK_ACTIVE;
428
429 if (INTEL_GEN(dev_priv) >= 8)
430 dpcd_val |= DP_PSR_CRC_VERIFICATION;
392 } 431 }
393 432
394 if (dev_priv->psr.link_standby)
395 dpcd_val |= DP_PSR_MAIN_LINK_ACTIVE;
396 if (!dev_priv->psr.psr2_enabled && INTEL_GEN(dev_priv) >= 8)
397 dpcd_val |= DP_PSR_CRC_VERIFICATION;
398 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, dpcd_val); 433 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, dpcd_val);
399 434
400 drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0); 435 drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
@@ -468,9 +503,6 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
468 idle_frames = max(idle_frames, dev_priv->psr.sink_sync_latency + 1); 503 idle_frames = max(idle_frames, dev_priv->psr.sink_sync_latency + 1);
469 val = idle_frames << EDP_PSR2_IDLE_FRAME_SHIFT; 504 val = idle_frames << EDP_PSR2_IDLE_FRAME_SHIFT;
470 505
471 /* FIXME: selective update is probably totally broken because it doesn't
472 * mesh at all with our frontbuffer tracking. And the hw alone isn't
473 * good enough. */
474 val |= EDP_PSR2_ENABLE | EDP_SU_TRACK_ENABLE; 506 val |= EDP_PSR2_ENABLE | EDP_SU_TRACK_ENABLE;
475 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) 507 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
476 val |= EDP_Y_COORDINATE_ENABLE; 508 val |= EDP_Y_COORDINATE_ENABLE;
@@ -519,7 +551,7 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
519 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) { 551 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
520 psr_max_h = 4096; 552 psr_max_h = 4096;
521 psr_max_v = 2304; 553 psr_max_v = 2304;
522 } else if (IS_GEN9(dev_priv)) { 554 } else if (IS_GEN(dev_priv, 9)) {
523 psr_max_h = 3640; 555 psr_max_h = 3640;
524 psr_max_v = 2304; 556 psr_max_v = 2304;
525 } 557 }
@@ -531,6 +563,18 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
531 return false; 563 return false;
532 } 564 }
533 565
566 /*
567 * HW sends SU blocks of size four scan lines, which means the starting
568 * X coordinate and Y granularity requirements will always be met. We
569 * only need to validate the SU block width is a multiple of
570 * x granularity.
571 */
572 if (crtc_hdisplay % dev_priv->psr.su_x_granularity) {
573 DRM_DEBUG_KMS("PSR2 not enabled, hdisplay(%d) not multiple of %d\n",
574 crtc_hdisplay, dev_priv->psr.su_x_granularity);
575 return false;
576 }
577
534 return true; 578 return true;
535} 579}
536 580
@@ -641,17 +685,14 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
641 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 685 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
642 hsw_psr_setup_aux(intel_dp); 686 hsw_psr_setup_aux(intel_dp);
643 687
644 if (dev_priv->psr.psr2_enabled) { 688 if (dev_priv->psr.psr2_enabled && (IS_GEN(dev_priv, 9) &&
689 !IS_GEMINILAKE(dev_priv))) {
645 i915_reg_t reg = gen9_chicken_trans_reg(dev_priv, 690 i915_reg_t reg = gen9_chicken_trans_reg(dev_priv,
646 cpu_transcoder); 691 cpu_transcoder);
647 u32 chicken = I915_READ(reg); 692 u32 chicken = I915_READ(reg);
648 693
649 if (IS_GEN9(dev_priv) && !IS_GEMINILAKE(dev_priv)) 694 chicken |= PSR2_VSC_ENABLE_PROG_HEADER |
650 chicken |= (PSR2_VSC_ENABLE_PROG_HEADER 695 PSR2_ADD_VERTICAL_LINE_COUNT;
651 | PSR2_ADD_VERTICAL_LINE_COUNT);
652
653 else
654 chicken &= ~VSC_DATA_SEL_SOFTWARE_CONTROL;
655 I915_WRITE(reg, chicken); 696 I915_WRITE(reg, chicken);
656 } 697 }
657 698
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index fbeaec3994e7..26b7274a2d43 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -29,7 +29,6 @@
29 29
30#include <linux/log2.h> 30#include <linux/log2.h>
31 31
32#include <drm/drmP.h>
33#include <drm/i915_drm.h> 32#include <drm/i915_drm.h>
34 33
35#include "i915_drv.h" 34#include "i915_drv.h"
@@ -133,7 +132,7 @@ gen4_render_ring_flush(struct i915_request *rq, u32 mode)
133 cmd = MI_FLUSH; 132 cmd = MI_FLUSH;
134 if (mode & EMIT_INVALIDATE) { 133 if (mode & EMIT_INVALIDATE) {
135 cmd |= MI_EXE_FLUSH; 134 cmd |= MI_EXE_FLUSH;
136 if (IS_G4X(rq->i915) || IS_GEN5(rq->i915)) 135 if (IS_G4X(rq->i915) || IS_GEN(rq->i915, 5))
137 cmd |= MI_INVALIDATE_ISP; 136 cmd |= MI_INVALIDATE_ISP;
138 } 137 }
139 138
@@ -217,7 +216,7 @@ gen4_render_ring_flush(struct i915_request *rq, u32 mode)
217 * really our business. That leaves only stall at scoreboard. 216 * really our business. That leaves only stall at scoreboard.
218 */ 217 */
219static int 218static int
220intel_emit_post_sync_nonzero_flush(struct i915_request *rq) 219gen6_emit_post_sync_nonzero_flush(struct i915_request *rq)
221{ 220{
222 u32 scratch_addr = i915_scratch_offset(rq->i915) + 2 * CACHELINE_BYTES; 221 u32 scratch_addr = i915_scratch_offset(rq->i915) + 2 * CACHELINE_BYTES;
223 u32 *cs; 222 u32 *cs;
@@ -257,7 +256,7 @@ gen6_render_ring_flush(struct i915_request *rq, u32 mode)
257 int ret; 256 int ret;
258 257
259 /* Force SNB workarounds for PIPE_CONTROL flushes */ 258 /* Force SNB workarounds for PIPE_CONTROL flushes */
260 ret = intel_emit_post_sync_nonzero_flush(rq); 259 ret = gen6_emit_post_sync_nonzero_flush(rq);
261 if (ret) 260 if (ret)
262 return ret; 261 return ret;
263 262
@@ -300,6 +299,37 @@ gen6_render_ring_flush(struct i915_request *rq, u32 mode)
300 return 0; 299 return 0;
301} 300}
302 301
302static void gen6_rcs_emit_breadcrumb(struct i915_request *rq, u32 *cs)
303{
304 /* First we do the gen6_emit_post_sync_nonzero_flush w/a */
305 *cs++ = GFX_OP_PIPE_CONTROL(4);
306 *cs++ = PIPE_CONTROL_CS_STALL | PIPE_CONTROL_STALL_AT_SCOREBOARD;
307 *cs++ = 0;
308 *cs++ = 0;
309
310 *cs++ = GFX_OP_PIPE_CONTROL(4);
311 *cs++ = PIPE_CONTROL_QW_WRITE;
312 *cs++ = i915_scratch_offset(rq->i915) | PIPE_CONTROL_GLOBAL_GTT;
313 *cs++ = 0;
314
315 /* Finally we can flush and with it emit the breadcrumb */
316 *cs++ = GFX_OP_PIPE_CONTROL(4);
317 *cs++ = (PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
318 PIPE_CONTROL_DEPTH_CACHE_FLUSH |
319 PIPE_CONTROL_DC_FLUSH_ENABLE |
320 PIPE_CONTROL_QW_WRITE |
321 PIPE_CONTROL_CS_STALL);
322 *cs++ = intel_hws_seqno_address(rq->engine) | PIPE_CONTROL_GLOBAL_GTT;
323 *cs++ = rq->global_seqno;
324
325 *cs++ = MI_USER_INTERRUPT;
326 *cs++ = MI_NOOP;
327
328 rq->tail = intel_ring_offset(rq, cs);
329 assert_ring_tail_valid(rq->ring, rq->tail);
330}
331static const int gen6_rcs_emit_breadcrumb_sz = 14;
332
303static int 333static int
304gen7_render_ring_cs_stall_wa(struct i915_request *rq) 334gen7_render_ring_cs_stall_wa(struct i915_request *rq)
305{ 335{
@@ -379,11 +409,86 @@ gen7_render_ring_flush(struct i915_request *rq, u32 mode)
379 return 0; 409 return 0;
380} 410}
381 411
382static void ring_setup_phys_status_page(struct intel_engine_cs *engine) 412static void gen7_rcs_emit_breadcrumb(struct i915_request *rq, u32 *cs)
413{
414 *cs++ = GFX_OP_PIPE_CONTROL(4);
415 *cs++ = (PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
416 PIPE_CONTROL_DEPTH_CACHE_FLUSH |
417 PIPE_CONTROL_DC_FLUSH_ENABLE |
418 PIPE_CONTROL_FLUSH_ENABLE |
419 PIPE_CONTROL_QW_WRITE |
420 PIPE_CONTROL_GLOBAL_GTT_IVB |
421 PIPE_CONTROL_CS_STALL);
422 *cs++ = intel_hws_seqno_address(rq->engine);
423 *cs++ = rq->global_seqno;
424
425 *cs++ = MI_USER_INTERRUPT;
426 *cs++ = MI_NOOP;
427
428 rq->tail = intel_ring_offset(rq, cs);
429 assert_ring_tail_valid(rq->ring, rq->tail);
430}
431static const int gen7_rcs_emit_breadcrumb_sz = 6;
432
433static void gen6_xcs_emit_breadcrumb(struct i915_request *rq, u32 *cs)
434{
435 *cs++ = MI_FLUSH_DW | MI_FLUSH_DW_OP_STOREDW;
436 *cs++ = intel_hws_seqno_address(rq->engine) | MI_FLUSH_DW_USE_GTT;
437 *cs++ = rq->global_seqno;
438 *cs++ = MI_USER_INTERRUPT;
439
440 rq->tail = intel_ring_offset(rq, cs);
441 assert_ring_tail_valid(rq->ring, rq->tail);
442}
443static const int gen6_xcs_emit_breadcrumb_sz = 4;
444
445#define GEN7_XCS_WA 32
446static void gen7_xcs_emit_breadcrumb(struct i915_request *rq, u32 *cs)
447{
448 int i;
449
450 *cs++ = MI_FLUSH_DW | MI_FLUSH_DW_OP_STOREDW;
451 *cs++ = intel_hws_seqno_address(rq->engine) | MI_FLUSH_DW_USE_GTT;
452 *cs++ = rq->global_seqno;
453
454 for (i = 0; i < GEN7_XCS_WA; i++) {
455 *cs++ = MI_STORE_DWORD_INDEX;
456 *cs++ = I915_GEM_HWS_INDEX_ADDR;
457 *cs++ = rq->global_seqno;
458 }
459
460 *cs++ = MI_FLUSH_DW;
461 *cs++ = 0;
462 *cs++ = 0;
463
464 *cs++ = MI_USER_INTERRUPT;
465 *cs++ = MI_NOOP;
466
467 rq->tail = intel_ring_offset(rq, cs);
468 assert_ring_tail_valid(rq->ring, rq->tail);
469}
470static const int gen7_xcs_emit_breadcrumb_sz = 8 + GEN7_XCS_WA * 3;
471#undef GEN7_XCS_WA
472
473static void set_hwstam(struct intel_engine_cs *engine, u32 mask)
474{
475 /*
476 * Keep the render interrupt unmasked as this papers over
477 * lost interrupts following a reset.
478 */
479 if (engine->class == RENDER_CLASS) {
480 if (INTEL_GEN(engine->i915) >= 6)
481 mask &= ~BIT(0);
482 else
483 mask &= ~I915_USER_INTERRUPT;
484 }
485
486 intel_engine_set_hwsp_writemask(engine, mask);
487}
488
489static void set_hws_pga(struct intel_engine_cs *engine, phys_addr_t phys)
383{ 490{
384 struct drm_i915_private *dev_priv = engine->i915; 491 struct drm_i915_private *dev_priv = engine->i915;
385 struct page *page = virt_to_page(engine->status_page.page_addr);
386 phys_addr_t phys = PFN_PHYS(page_to_pfn(page));
387 u32 addr; 492 u32 addr;
388 493
389 addr = lower_32_bits(phys); 494 addr = lower_32_bits(phys);
@@ -393,15 +498,25 @@ static void ring_setup_phys_status_page(struct intel_engine_cs *engine)
393 I915_WRITE(HWS_PGA, addr); 498 I915_WRITE(HWS_PGA, addr);
394} 499}
395 500
396static void intel_ring_setup_status_page(struct intel_engine_cs *engine) 501static void ring_setup_phys_status_page(struct intel_engine_cs *engine)
502{
503 struct page *page = virt_to_page(engine->status_page.page_addr);
504 phys_addr_t phys = PFN_PHYS(page_to_pfn(page));
505
506 set_hws_pga(engine, phys);
507 set_hwstam(engine, ~0u);
508}
509
510static void set_hwsp(struct intel_engine_cs *engine, u32 offset)
397{ 511{
398 struct drm_i915_private *dev_priv = engine->i915; 512 struct drm_i915_private *dev_priv = engine->i915;
399 i915_reg_t mmio; 513 i915_reg_t hwsp;
400 514
401 /* The ring status page addresses are no longer next to the rest of 515 /*
516 * The ring status page addresses are no longer next to the rest of
402 * the ring registers as of gen7. 517 * the ring registers as of gen7.
403 */ 518 */
404 if (IS_GEN7(dev_priv)) { 519 if (IS_GEN(dev_priv, 7)) {
405 switch (engine->id) { 520 switch (engine->id) {
406 /* 521 /*
407 * No more rings exist on Gen7. Default case is only to shut up 522 * No more rings exist on Gen7. Default case is only to shut up
@@ -410,56 +525,55 @@ static void intel_ring_setup_status_page(struct intel_engine_cs *engine)
410 default: 525 default:
411 GEM_BUG_ON(engine->id); 526 GEM_BUG_ON(engine->id);
412 case RCS: 527 case RCS:
413 mmio = RENDER_HWS_PGA_GEN7; 528 hwsp = RENDER_HWS_PGA_GEN7;
414 break; 529 break;
415 case BCS: 530 case BCS:
416 mmio = BLT_HWS_PGA_GEN7; 531 hwsp = BLT_HWS_PGA_GEN7;
417 break; 532 break;
418 case VCS: 533 case VCS:
419 mmio = BSD_HWS_PGA_GEN7; 534 hwsp = BSD_HWS_PGA_GEN7;
420 break; 535 break;
421 case VECS: 536 case VECS:
422 mmio = VEBOX_HWS_PGA_GEN7; 537 hwsp = VEBOX_HWS_PGA_GEN7;
423 break; 538 break;
424 } 539 }
425 } else if (IS_GEN6(dev_priv)) { 540 } else if (IS_GEN(dev_priv, 6)) {
426 mmio = RING_HWS_PGA_GEN6(engine->mmio_base); 541 hwsp = RING_HWS_PGA_GEN6(engine->mmio_base);
427 } else { 542 } else {
428 mmio = RING_HWS_PGA(engine->mmio_base); 543 hwsp = RING_HWS_PGA(engine->mmio_base);
429 } 544 }
430 545
431 if (INTEL_GEN(dev_priv) >= 6) { 546 I915_WRITE(hwsp, offset);
432 u32 mask = ~0u; 547 POSTING_READ(hwsp);
548}
433 549
434 /* 550static void flush_cs_tlb(struct intel_engine_cs *engine)
435 * Keep the render interrupt unmasked as this papers over 551{
436 * lost interrupts following a reset. 552 struct drm_i915_private *dev_priv = engine->i915;
437 */ 553 i915_reg_t instpm = RING_INSTPM(engine->mmio_base);
438 if (engine->id == RCS)
439 mask &= ~BIT(0);
440 554
441 I915_WRITE(RING_HWSTAM(engine->mmio_base), mask); 555 if (!IS_GEN_RANGE(dev_priv, 6, 7))
442 } 556 return;
443 557
444 I915_WRITE(mmio, engine->status_page.ggtt_offset); 558 /* ring should be idle before issuing a sync flush*/
445 POSTING_READ(mmio); 559 WARN_ON((I915_READ_MODE(engine) & MODE_IDLE) == 0);
446 560
447 /* Flush the TLB for this page */ 561 I915_WRITE(instpm,
448 if (IS_GEN(dev_priv, 6, 7)) { 562 _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
449 i915_reg_t reg = RING_INSTPM(engine->mmio_base); 563 INSTPM_SYNC_FLUSH));
564 if (intel_wait_for_register(dev_priv,
565 instpm, INSTPM_SYNC_FLUSH, 0,
566 1000))
567 DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
568 engine->name);
569}
450 570
451 /* ring should be idle before issuing a sync flush*/ 571static void ring_setup_status_page(struct intel_engine_cs *engine)
452 WARN_ON((I915_READ_MODE(engine) & MODE_IDLE) == 0); 572{
573 set_hwsp(engine, engine->status_page.ggtt_offset);
574 set_hwstam(engine, ~0u);
453 575
454 I915_WRITE(reg, 576 flush_cs_tlb(engine);
455 _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
456 INSTPM_SYNC_FLUSH));
457 if (intel_wait_for_register(dev_priv,
458 reg, INSTPM_SYNC_FLUSH, 0,
459 1000))
460 DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
461 engine->name);
462 }
463} 577}
464 578
465static bool stop_ring(struct intel_engine_cs *engine) 579static bool stop_ring(struct intel_engine_cs *engine)
@@ -529,17 +643,10 @@ static int init_ring_common(struct intel_engine_cs *engine)
529 if (HWS_NEEDS_PHYSICAL(dev_priv)) 643 if (HWS_NEEDS_PHYSICAL(dev_priv))
530 ring_setup_phys_status_page(engine); 644 ring_setup_phys_status_page(engine);
531 else 645 else
532 intel_ring_setup_status_page(engine); 646 ring_setup_status_page(engine);
533 647
534 intel_engine_reset_breadcrumbs(engine); 648 intel_engine_reset_breadcrumbs(engine);
535 649
536 if (HAS_LEGACY_SEMAPHORES(engine->i915)) {
537 I915_WRITE(RING_SYNC_0(engine->mmio_base), 0);
538 I915_WRITE(RING_SYNC_1(engine->mmio_base), 0);
539 if (HAS_VEBOX(dev_priv))
540 I915_WRITE(RING_SYNC_2(engine->mmio_base), 0);
541 }
542
543 /* Enforce ordering by reading HEAD register back */ 650 /* Enforce ordering by reading HEAD register back */
544 I915_READ_HEAD(engine); 651 I915_READ_HEAD(engine);
545 652
@@ -603,10 +710,6 @@ out:
603static struct i915_request *reset_prepare(struct intel_engine_cs *engine) 710static struct i915_request *reset_prepare(struct intel_engine_cs *engine)
604{ 711{
605 intel_engine_stop_cs(engine); 712 intel_engine_stop_cs(engine);
606
607 if (engine->irq_seqno_barrier)
608 engine->irq_seqno_barrier(engine);
609
610 return i915_gem_find_active_request(engine); 713 return i915_gem_find_active_request(engine);
611} 714}
612 715
@@ -679,7 +782,7 @@ static int init_render_ring(struct intel_engine_cs *engine)
679 return ret; 782 return ret;
680 783
681 /* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */ 784 /* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */
682 if (IS_GEN(dev_priv, 4, 6)) 785 if (IS_GEN_RANGE(dev_priv, 4, 6))
683 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH)); 786 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
684 787
685 /* We need to disable the AsyncFlip performance optimisations in order 788 /* We need to disable the AsyncFlip performance optimisations in order
@@ -688,22 +791,22 @@ static int init_render_ring(struct intel_engine_cs *engine)
688 * 791 *
689 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv 792 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv
690 */ 793 */
691 if (IS_GEN(dev_priv, 6, 7)) 794 if (IS_GEN_RANGE(dev_priv, 6, 7))
692 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE)); 795 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
693 796
694 /* Required for the hardware to program scanline values for waiting */ 797 /* Required for the hardware to program scanline values for waiting */
695 /* WaEnableFlushTlbInvalidationMode:snb */ 798 /* WaEnableFlushTlbInvalidationMode:snb */
696 if (IS_GEN6(dev_priv)) 799 if (IS_GEN(dev_priv, 6))
697 I915_WRITE(GFX_MODE, 800 I915_WRITE(GFX_MODE,
698 _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT)); 801 _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT));
699 802
700 /* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */ 803 /* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */
701 if (IS_GEN7(dev_priv)) 804 if (IS_GEN(dev_priv, 7))
702 I915_WRITE(GFX_MODE_GEN7, 805 I915_WRITE(GFX_MODE_GEN7,
703 _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT) | 806 _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT) |
704 _MASKED_BIT_ENABLE(GFX_REPLAY_MODE)); 807 _MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
705 808
706 if (IS_GEN6(dev_priv)) { 809 if (IS_GEN(dev_priv, 6)) {
707 /* From the Sandybridge PRM, volume 1 part 3, page 24: 810 /* From the Sandybridge PRM, volume 1 part 3, page 24:
708 * "If this bit is set, STCunit will have LRA as replacement 811 * "If this bit is set, STCunit will have LRA as replacement
709 * policy. [...] This bit must be reset. LRA replacement 812 * policy. [...] This bit must be reset. LRA replacement
@@ -713,7 +816,7 @@ static int init_render_ring(struct intel_engine_cs *engine)
713 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB)); 816 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
714 } 817 }
715 818
716 if (IS_GEN(dev_priv, 6, 7)) 819 if (IS_GEN_RANGE(dev_priv, 6, 7))
717 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING)); 820 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
718 821
719 if (INTEL_GEN(dev_priv) >= 6) 822 if (INTEL_GEN(dev_priv) >= 6)
@@ -722,33 +825,6 @@ static int init_render_ring(struct intel_engine_cs *engine)
722 return 0; 825 return 0;
723} 826}
724 827
725static u32 *gen6_signal(struct i915_request *rq, u32 *cs)
726{
727 struct drm_i915_private *dev_priv = rq->i915;
728 struct intel_engine_cs *engine;
729 enum intel_engine_id id;
730 int num_rings = 0;
731
732 for_each_engine(engine, dev_priv, id) {
733 i915_reg_t mbox_reg;
734
735 if (!(BIT(engine->hw_id) & GEN6_SEMAPHORES_MASK))
736 continue;
737
738 mbox_reg = rq->engine->semaphore.mbox.signal[engine->hw_id];
739 if (i915_mmio_reg_valid(mbox_reg)) {
740 *cs++ = MI_LOAD_REGISTER_IMM(1);
741 *cs++ = i915_mmio_reg_offset(mbox_reg);
742 *cs++ = rq->global_seqno;
743 num_rings++;
744 }
745 }
746 if (num_rings & 1)
747 *cs++ = MI_NOOP;
748
749 return cs;
750}
751
752static void cancel_requests(struct intel_engine_cs *engine) 828static void cancel_requests(struct intel_engine_cs *engine)
753{ 829{
754 struct i915_request *request; 830 struct i915_request *request;
@@ -788,92 +864,41 @@ static void i9xx_submit_request(struct i915_request *request)
788 864
789static void i9xx_emit_breadcrumb(struct i915_request *rq, u32 *cs) 865static void i9xx_emit_breadcrumb(struct i915_request *rq, u32 *cs)
790{ 866{
867 *cs++ = MI_FLUSH;
868
791 *cs++ = MI_STORE_DWORD_INDEX; 869 *cs++ = MI_STORE_DWORD_INDEX;
792 *cs++ = I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT; 870 *cs++ = I915_GEM_HWS_INDEX_ADDR;
793 *cs++ = rq->global_seqno; 871 *cs++ = rq->global_seqno;
872
794 *cs++ = MI_USER_INTERRUPT; 873 *cs++ = MI_USER_INTERRUPT;
874 *cs++ = MI_NOOP;
795 875
796 rq->tail = intel_ring_offset(rq, cs); 876 rq->tail = intel_ring_offset(rq, cs);
797 assert_ring_tail_valid(rq->ring, rq->tail); 877 assert_ring_tail_valid(rq->ring, rq->tail);
798} 878}
879static const int i9xx_emit_breadcrumb_sz = 6;
799 880
800static const int i9xx_emit_breadcrumb_sz = 4; 881#define GEN5_WA_STORES 8 /* must be at least 1! */
801 882static void gen5_emit_breadcrumb(struct i915_request *rq, u32 *cs)
802static void gen6_sema_emit_breadcrumb(struct i915_request *rq, u32 *cs)
803{
804 return i9xx_emit_breadcrumb(rq, rq->engine->semaphore.signal(rq, cs));
805}
806
807static int
808gen6_ring_sync_to(struct i915_request *rq, struct i915_request *signal)
809{ 883{
810 u32 dw1 = MI_SEMAPHORE_MBOX | 884 int i;
811 MI_SEMAPHORE_COMPARE |
812 MI_SEMAPHORE_REGISTER;
813 u32 wait_mbox = signal->engine->semaphore.mbox.wait[rq->engine->hw_id];
814 u32 *cs;
815
816 WARN_ON(wait_mbox == MI_SEMAPHORE_SYNC_INVALID);
817
818 cs = intel_ring_begin(rq, 4);
819 if (IS_ERR(cs))
820 return PTR_ERR(cs);
821
822 *cs++ = dw1 | wait_mbox;
823 /* Throughout all of the GEM code, seqno passed implies our current
824 * seqno is >= the last seqno executed. However for hardware the
825 * comparison is strictly greater than.
826 */
827 *cs++ = signal->global_seqno - 1;
828 *cs++ = 0;
829 *cs++ = MI_NOOP;
830 intel_ring_advance(rq, cs);
831 885
832 return 0; 886 *cs++ = MI_FLUSH;
833}
834 887
835static void 888 BUILD_BUG_ON(GEN5_WA_STORES < 1);
836gen5_seqno_barrier(struct intel_engine_cs *engine) 889 for (i = 0; i < GEN5_WA_STORES; i++) {
837{ 890 *cs++ = MI_STORE_DWORD_INDEX;
838 /* MI_STORE are internally buffered by the GPU and not flushed 891 *cs++ = I915_GEM_HWS_INDEX_ADDR;
839 * either by MI_FLUSH or SyncFlush or any other combination of 892 *cs++ = rq->global_seqno;
840 * MI commands. 893 }
841 *
842 * "Only the submission of the store operation is guaranteed.
843 * The write result will be complete (coherent) some time later
844 * (this is practically a finite period but there is no guaranteed
845 * latency)."
846 *
847 * Empirically, we observe that we need a delay of at least 75us to
848 * be sure that the seqno write is visible by the CPU.
849 */
850 usleep_range(125, 250);
851}
852 894
853static void 895 *cs++ = MI_USER_INTERRUPT;
854gen6_seqno_barrier(struct intel_engine_cs *engine)
855{
856 struct drm_i915_private *dev_priv = engine->i915;
857 896
858 /* Workaround to force correct ordering between irq and seqno writes on 897 rq->tail = intel_ring_offset(rq, cs);
859 * ivb (and maybe also on snb) by reading from a CS register (like 898 assert_ring_tail_valid(rq->ring, rq->tail);
860 * ACTHD) before reading the status page.
861 *
862 * Note that this effectively stalls the read by the time it takes to
863 * do a memory transaction, which more or less ensures that the write
864 * from the GPU has sufficient time to invalidate the CPU cacheline.
865 * Alternatively we could delay the interrupt from the CS ring to give
866 * the write time to land, but that would incur a delay after every
867 * batch i.e. much more frequent than a delay when waiting for the
868 * interrupt (with the same net latency).
869 *
870 * Also note that to prevent whole machine hangs on gen7, we have to
871 * take the spinlock to guard against concurrent cacheline access.
872 */
873 spin_lock_irq(&dev_priv->uncore.lock);
874 POSTING_READ_FW(RING_ACTHD(engine->mmio_base));
875 spin_unlock_irq(&dev_priv->uncore.lock);
876} 899}
900static const int gen5_emit_breadcrumb_sz = GEN5_WA_STORES * 3 + 2;
901#undef GEN5_WA_STORES
877 902
878static void 903static void
879gen5_irq_enable(struct intel_engine_cs *engine) 904gen5_irq_enable(struct intel_engine_cs *engine)
@@ -948,6 +973,10 @@ gen6_irq_enable(struct intel_engine_cs *engine)
948 I915_WRITE_IMR(engine, 973 I915_WRITE_IMR(engine,
949 ~(engine->irq_enable_mask | 974 ~(engine->irq_enable_mask |
950 engine->irq_keep_mask)); 975 engine->irq_keep_mask));
976
977 /* Flush/delay to ensure the RING_IMR is active before the GT IMR */
978 POSTING_READ_FW(RING_IMR(engine->mmio_base));
979
951 gen5_enable_gt_irq(dev_priv, engine->irq_enable_mask); 980 gen5_enable_gt_irq(dev_priv, engine->irq_enable_mask);
952} 981}
953 982
@@ -966,6 +995,10 @@ hsw_vebox_irq_enable(struct intel_engine_cs *engine)
966 struct drm_i915_private *dev_priv = engine->i915; 995 struct drm_i915_private *dev_priv = engine->i915;
967 996
968 I915_WRITE_IMR(engine, ~engine->irq_enable_mask); 997 I915_WRITE_IMR(engine, ~engine->irq_enable_mask);
998
999 /* Flush/delay to ensure the RING_IMR is active before the GT IMR */
1000 POSTING_READ_FW(RING_IMR(engine->mmio_base));
1001
969 gen6_unmask_pm_irq(dev_priv, engine->irq_enable_mask); 1002 gen6_unmask_pm_irq(dev_priv, engine->irq_enable_mask);
970} 1003}
971 1004
@@ -1581,10 +1614,7 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
1581 struct intel_engine_cs *engine = rq->engine; 1614 struct intel_engine_cs *engine = rq->engine;
1582 enum intel_engine_id id; 1615 enum intel_engine_id id;
1583 const int num_rings = 1616 const int num_rings =
1584 /* Use an extended w/a on gen7 if signalling from other rings */ 1617 IS_HSW_GT1(i915) ? RUNTIME_INFO(i915)->num_rings - 1 : 0;
1585 (HAS_LEGACY_SEMAPHORES(i915) && IS_GEN7(i915)) ?
1586 INTEL_INFO(i915)->num_rings - 1 :
1587 0;
1588 bool force_restore = false; 1618 bool force_restore = false;
1589 int len; 1619 int len;
1590 u32 *cs; 1620 u32 *cs;
@@ -1597,7 +1627,7 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
1597 flags |= MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN; 1627 flags |= MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN;
1598 1628
1599 len = 4; 1629 len = 4;
1600 if (IS_GEN7(i915)) 1630 if (IS_GEN(i915, 7))
1601 len += 2 + (num_rings ? 4*num_rings + 6 : 0); 1631 len += 2 + (num_rings ? 4*num_rings + 6 : 0);
1602 if (flags & MI_FORCE_RESTORE) { 1632 if (flags & MI_FORCE_RESTORE) {
1603 GEM_BUG_ON(flags & MI_RESTORE_INHIBIT); 1633 GEM_BUG_ON(flags & MI_RESTORE_INHIBIT);
@@ -1611,7 +1641,7 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
1611 return PTR_ERR(cs); 1641 return PTR_ERR(cs);
1612 1642
1613 /* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */ 1643 /* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
1614 if (IS_GEN7(i915)) { 1644 if (IS_GEN(i915, 7)) {
1615 *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE; 1645 *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
1616 if (num_rings) { 1646 if (num_rings) {
1617 struct intel_engine_cs *signaller; 1647 struct intel_engine_cs *signaller;
@@ -1658,7 +1688,7 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
1658 */ 1688 */
1659 *cs++ = MI_NOOP; 1689 *cs++ = MI_NOOP;
1660 1690
1661 if (IS_GEN7(i915)) { 1691 if (IS_GEN(i915, 7)) {
1662 if (num_rings) { 1692 if (num_rings) {
1663 struct intel_engine_cs *signaller; 1693 struct intel_engine_cs *signaller;
1664 i915_reg_t last_reg = {}; /* keep gcc quiet */ 1694 i915_reg_t last_reg = {}; /* keep gcc quiet */
@@ -1829,17 +1859,19 @@ static int ring_request_alloc(struct i915_request *request)
1829 1859
1830 GEM_BUG_ON(!request->hw_context->pin_count); 1860 GEM_BUG_ON(!request->hw_context->pin_count);
1831 1861
1832 /* Flush enough space to reduce the likelihood of waiting after 1862 /*
1863 * Flush enough space to reduce the likelihood of waiting after
1833 * we start building the request - in which case we will just 1864 * we start building the request - in which case we will just
1834 * have to repeat work. 1865 * have to repeat work.
1835 */ 1866 */
1836 request->reserved_space += LEGACY_REQUEST_SIZE; 1867 request->reserved_space += LEGACY_REQUEST_SIZE;
1837 1868
1838 ret = intel_ring_wait_for_space(request->ring, request->reserved_space); 1869 ret = switch_context(request);
1839 if (ret) 1870 if (ret)
1840 return ret; 1871 return ret;
1841 1872
1842 ret = switch_context(request); 1873 /* Unconditionally invalidate GPU caches and TLBs. */
1874 ret = request->engine->emit_flush(request, EMIT_INVALIDATE);
1843 if (ret) 1875 if (ret)
1844 return ret; 1876 return ret;
1845 1877
@@ -1881,22 +1913,6 @@ static noinline int wait_for_space(struct intel_ring *ring, unsigned int bytes)
1881 return 0; 1913 return 0;
1882} 1914}
1883 1915
1884int intel_ring_wait_for_space(struct intel_ring *ring, unsigned int bytes)
1885{
1886 GEM_BUG_ON(bytes > ring->effective_size);
1887 if (unlikely(bytes > ring->effective_size - ring->emit))
1888 bytes += ring->size - ring->emit;
1889
1890 if (unlikely(bytes > ring->space)) {
1891 int ret = wait_for_space(ring, bytes);
1892 if (unlikely(ret))
1893 return ret;
1894 }
1895
1896 GEM_BUG_ON(ring->space < bytes);
1897 return 0;
1898}
1899
1900u32 *intel_ring_begin(struct i915_request *rq, unsigned int num_dwords) 1916u32 *intel_ring_begin(struct i915_request *rq, unsigned int num_dwords)
1901{ 1917{
1902 struct intel_ring *ring = rq->ring; 1918 struct intel_ring *ring = rq->ring;
@@ -2129,77 +2145,15 @@ static int gen6_ring_flush(struct i915_request *rq, u32 mode)
2129 return gen6_flush_dw(rq, mode, MI_INVALIDATE_TLB); 2145 return gen6_flush_dw(rq, mode, MI_INVALIDATE_TLB);
2130} 2146}
2131 2147
2132static void intel_ring_init_semaphores(struct drm_i915_private *dev_priv,
2133 struct intel_engine_cs *engine)
2134{
2135 int i;
2136
2137 if (!HAS_LEGACY_SEMAPHORES(dev_priv))
2138 return;
2139
2140 GEM_BUG_ON(INTEL_GEN(dev_priv) < 6);
2141 engine->semaphore.sync_to = gen6_ring_sync_to;
2142 engine->semaphore.signal = gen6_signal;
2143
2144 /*
2145 * The current semaphore is only applied on pre-gen8
2146 * platform. And there is no VCS2 ring on the pre-gen8
2147 * platform. So the semaphore between RCS and VCS2 is
2148 * initialized as INVALID.
2149 */
2150 for (i = 0; i < GEN6_NUM_SEMAPHORES; i++) {
2151 static const struct {
2152 u32 wait_mbox;
2153 i915_reg_t mbox_reg;
2154 } sem_data[GEN6_NUM_SEMAPHORES][GEN6_NUM_SEMAPHORES] = {
2155 [RCS_HW] = {
2156 [VCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_RV, .mbox_reg = GEN6_VRSYNC },
2157 [BCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_RB, .mbox_reg = GEN6_BRSYNC },
2158 [VECS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_RVE, .mbox_reg = GEN6_VERSYNC },
2159 },
2160 [VCS_HW] = {
2161 [RCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VR, .mbox_reg = GEN6_RVSYNC },
2162 [BCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VB, .mbox_reg = GEN6_BVSYNC },
2163 [VECS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VVE, .mbox_reg = GEN6_VEVSYNC },
2164 },
2165 [BCS_HW] = {
2166 [RCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_BR, .mbox_reg = GEN6_RBSYNC },
2167 [VCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_BV, .mbox_reg = GEN6_VBSYNC },
2168 [VECS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_BVE, .mbox_reg = GEN6_VEBSYNC },
2169 },
2170 [VECS_HW] = {
2171 [RCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VER, .mbox_reg = GEN6_RVESYNC },
2172 [VCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VEV, .mbox_reg = GEN6_VVESYNC },
2173 [BCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VEB, .mbox_reg = GEN6_BVESYNC },
2174 },
2175 };
2176 u32 wait_mbox;
2177 i915_reg_t mbox_reg;
2178
2179 if (i == engine->hw_id) {
2180 wait_mbox = MI_SEMAPHORE_SYNC_INVALID;
2181 mbox_reg = GEN6_NOSYNC;
2182 } else {
2183 wait_mbox = sem_data[engine->hw_id][i].wait_mbox;
2184 mbox_reg = sem_data[engine->hw_id][i].mbox_reg;
2185 }
2186
2187 engine->semaphore.mbox.wait[i] = wait_mbox;
2188 engine->semaphore.mbox.signal[i] = mbox_reg;
2189 }
2190}
2191
2192static void intel_ring_init_irq(struct drm_i915_private *dev_priv, 2148static void intel_ring_init_irq(struct drm_i915_private *dev_priv,
2193 struct intel_engine_cs *engine) 2149 struct intel_engine_cs *engine)
2194{ 2150{
2195 if (INTEL_GEN(dev_priv) >= 6) { 2151 if (INTEL_GEN(dev_priv) >= 6) {
2196 engine->irq_enable = gen6_irq_enable; 2152 engine->irq_enable = gen6_irq_enable;
2197 engine->irq_disable = gen6_irq_disable; 2153 engine->irq_disable = gen6_irq_disable;
2198 engine->irq_seqno_barrier = gen6_seqno_barrier;
2199 } else if (INTEL_GEN(dev_priv) >= 5) { 2154 } else if (INTEL_GEN(dev_priv) >= 5) {
2200 engine->irq_enable = gen5_irq_enable; 2155 engine->irq_enable = gen5_irq_enable;
2201 engine->irq_disable = gen5_irq_disable; 2156 engine->irq_disable = gen5_irq_disable;
2202 engine->irq_seqno_barrier = gen5_seqno_barrier;
2203 } else if (INTEL_GEN(dev_priv) >= 3) { 2157 } else if (INTEL_GEN(dev_priv) >= 3) {
2204 engine->irq_enable = i9xx_irq_enable; 2158 engine->irq_enable = i9xx_irq_enable;
2205 engine->irq_disable = i9xx_irq_disable; 2159 engine->irq_disable = i9xx_irq_disable;
@@ -2231,7 +2185,6 @@ static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv,
2231 GEM_BUG_ON(INTEL_GEN(dev_priv) >= 8); 2185 GEM_BUG_ON(INTEL_GEN(dev_priv) >= 8);
2232 2186
2233 intel_ring_init_irq(dev_priv, engine); 2187 intel_ring_init_irq(dev_priv, engine);
2234 intel_ring_init_semaphores(dev_priv, engine);
2235 2188
2236 engine->init_hw = init_ring_common; 2189 engine->init_hw = init_ring_common;
2237 engine->reset.prepare = reset_prepare; 2190 engine->reset.prepare = reset_prepare;
@@ -2243,15 +2196,9 @@ static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv,
2243 2196
2244 engine->emit_breadcrumb = i9xx_emit_breadcrumb; 2197 engine->emit_breadcrumb = i9xx_emit_breadcrumb;
2245 engine->emit_breadcrumb_sz = i9xx_emit_breadcrumb_sz; 2198 engine->emit_breadcrumb_sz = i9xx_emit_breadcrumb_sz;
2246 if (HAS_LEGACY_SEMAPHORES(dev_priv)) { 2199 if (IS_GEN(dev_priv, 5)) {
2247 int num_rings; 2200 engine->emit_breadcrumb = gen5_emit_breadcrumb;
2248 2201 engine->emit_breadcrumb_sz = gen5_emit_breadcrumb_sz;
2249 engine->emit_breadcrumb = gen6_sema_emit_breadcrumb;
2250
2251 num_rings = INTEL_INFO(dev_priv)->num_rings - 1;
2252 engine->emit_breadcrumb_sz += num_rings * 3;
2253 if (num_rings & 1)
2254 engine->emit_breadcrumb_sz++;
2255 } 2202 }
2256 2203
2257 engine->set_default_submission = i9xx_set_default_submission; 2204 engine->set_default_submission = i9xx_set_default_submission;
@@ -2278,12 +2225,17 @@ int intel_init_render_ring_buffer(struct intel_engine_cs *engine)
2278 2225
2279 engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT; 2226 engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
2280 2227
2281 if (INTEL_GEN(dev_priv) >= 6) { 2228 if (INTEL_GEN(dev_priv) >= 7) {
2282 engine->init_context = intel_rcs_ctx_init; 2229 engine->init_context = intel_rcs_ctx_init;
2283 engine->emit_flush = gen7_render_ring_flush; 2230 engine->emit_flush = gen7_render_ring_flush;
2284 if (IS_GEN6(dev_priv)) 2231 engine->emit_breadcrumb = gen7_rcs_emit_breadcrumb;
2285 engine->emit_flush = gen6_render_ring_flush; 2232 engine->emit_breadcrumb_sz = gen7_rcs_emit_breadcrumb_sz;
2286 } else if (IS_GEN5(dev_priv)) { 2233 } else if (IS_GEN(dev_priv, 6)) {
2234 engine->init_context = intel_rcs_ctx_init;
2235 engine->emit_flush = gen6_render_ring_flush;
2236 engine->emit_breadcrumb = gen6_rcs_emit_breadcrumb;
2237 engine->emit_breadcrumb_sz = gen6_rcs_emit_breadcrumb_sz;
2238 } else if (IS_GEN(dev_priv, 5)) {
2287 engine->emit_flush = gen4_render_ring_flush; 2239 engine->emit_flush = gen4_render_ring_flush;
2288 } else { 2240 } else {
2289 if (INTEL_GEN(dev_priv) < 4) 2241 if (INTEL_GEN(dev_priv) < 4)
@@ -2313,13 +2265,21 @@ int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine)
2313 2265
2314 if (INTEL_GEN(dev_priv) >= 6) { 2266 if (INTEL_GEN(dev_priv) >= 6) {
2315 /* gen6 bsd needs a special wa for tail updates */ 2267 /* gen6 bsd needs a special wa for tail updates */
2316 if (IS_GEN6(dev_priv)) 2268 if (IS_GEN(dev_priv, 6))
2317 engine->set_default_submission = gen6_bsd_set_default_submission; 2269 engine->set_default_submission = gen6_bsd_set_default_submission;
2318 engine->emit_flush = gen6_bsd_ring_flush; 2270 engine->emit_flush = gen6_bsd_ring_flush;
2319 engine->irq_enable_mask = GT_BSD_USER_INTERRUPT; 2271 engine->irq_enable_mask = GT_BSD_USER_INTERRUPT;
2272
2273 if (IS_GEN(dev_priv, 6)) {
2274 engine->emit_breadcrumb = gen6_xcs_emit_breadcrumb;
2275 engine->emit_breadcrumb_sz = gen6_xcs_emit_breadcrumb_sz;
2276 } else {
2277 engine->emit_breadcrumb = gen7_xcs_emit_breadcrumb;
2278 engine->emit_breadcrumb_sz = gen7_xcs_emit_breadcrumb_sz;
2279 }
2320 } else { 2280 } else {
2321 engine->emit_flush = bsd_ring_flush; 2281 engine->emit_flush = bsd_ring_flush;
2322 if (IS_GEN5(dev_priv)) 2282 if (IS_GEN(dev_priv, 5))
2323 engine->irq_enable_mask = ILK_BSD_USER_INTERRUPT; 2283 engine->irq_enable_mask = ILK_BSD_USER_INTERRUPT;
2324 else 2284 else
2325 engine->irq_enable_mask = I915_BSD_USER_INTERRUPT; 2285 engine->irq_enable_mask = I915_BSD_USER_INTERRUPT;
@@ -2332,11 +2292,21 @@ int intel_init_blt_ring_buffer(struct intel_engine_cs *engine)
2332{ 2292{
2333 struct drm_i915_private *dev_priv = engine->i915; 2293 struct drm_i915_private *dev_priv = engine->i915;
2334 2294
2295 GEM_BUG_ON(INTEL_GEN(dev_priv) < 6);
2296
2335 intel_ring_default_vfuncs(dev_priv, engine); 2297 intel_ring_default_vfuncs(dev_priv, engine);
2336 2298
2337 engine->emit_flush = gen6_ring_flush; 2299 engine->emit_flush = gen6_ring_flush;
2338 engine->irq_enable_mask = GT_BLT_USER_INTERRUPT; 2300 engine->irq_enable_mask = GT_BLT_USER_INTERRUPT;
2339 2301
2302 if (IS_GEN(dev_priv, 6)) {
2303 engine->emit_breadcrumb = gen6_xcs_emit_breadcrumb;
2304 engine->emit_breadcrumb_sz = gen6_xcs_emit_breadcrumb_sz;
2305 } else {
2306 engine->emit_breadcrumb = gen7_xcs_emit_breadcrumb;
2307 engine->emit_breadcrumb_sz = gen7_xcs_emit_breadcrumb_sz;
2308 }
2309
2340 return intel_init_ring_buffer(engine); 2310 return intel_init_ring_buffer(engine);
2341} 2311}
2342 2312
@@ -2344,6 +2314,8 @@ int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine)
2344{ 2314{
2345 struct drm_i915_private *dev_priv = engine->i915; 2315 struct drm_i915_private *dev_priv = engine->i915;
2346 2316
2317 GEM_BUG_ON(INTEL_GEN(dev_priv) < 7);
2318
2347 intel_ring_default_vfuncs(dev_priv, engine); 2319 intel_ring_default_vfuncs(dev_priv, engine);
2348 2320
2349 engine->emit_flush = gen6_ring_flush; 2321 engine->emit_flush = gen6_ring_flush;
@@ -2351,5 +2323,8 @@ int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine)
2351 engine->irq_enable = hsw_vebox_irq_enable; 2323 engine->irq_enable = hsw_vebox_irq_enable;
2352 engine->irq_disable = hsw_vebox_irq_disable; 2324 engine->irq_disable = hsw_vebox_irq_disable;
2353 2325
2326 engine->emit_breadcrumb = gen7_xcs_emit_breadcrumb;
2327 engine->emit_breadcrumb_sz = gen7_xcs_emit_breadcrumb_sz;
2328
2354 return intel_init_ring_buffer(engine); 2329 return intel_init_ring_buffer(engine);
2355} 2330}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 72edaa7ff411..3c1366c58cf3 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -94,12 +94,12 @@ hangcheck_action_to_str(const enum intel_engine_hangcheck_action a)
94#define I915_MAX_SUBSLICES 8 94#define I915_MAX_SUBSLICES 8
95 95
96#define instdone_slice_mask(dev_priv__) \ 96#define instdone_slice_mask(dev_priv__) \
97 (IS_GEN7(dev_priv__) ? \ 97 (IS_GEN(dev_priv__, 7) ? \
98 1 : INTEL_INFO(dev_priv__)->sseu.slice_mask) 98 1 : RUNTIME_INFO(dev_priv__)->sseu.slice_mask)
99 99
100#define instdone_subslice_mask(dev_priv__) \ 100#define instdone_subslice_mask(dev_priv__) \
101 (IS_GEN7(dev_priv__) ? \ 101 (IS_GEN(dev_priv__, 7) ? \
102 1 : INTEL_INFO(dev_priv__)->sseu.subslice_mask[0]) 102 1 : RUNTIME_INFO(dev_priv__)->sseu.subslice_mask[0])
103 103
104#define for_each_instdone_slice_subslice(dev_priv__, slice__, subslice__) \ 104#define for_each_instdone_slice_subslice(dev_priv__, slice__, subslice__) \
105 for ((slice__) = 0, (subslice__) = 0; \ 105 for ((slice__) = 0, (subslice__) = 0; \
@@ -365,9 +365,6 @@ struct intel_engine_cs {
365 struct drm_i915_gem_object *default_state; 365 struct drm_i915_gem_object *default_state;
366 void *pinned_default_state; 366 void *pinned_default_state;
367 367
368 unsigned long irq_posted;
369#define ENGINE_IRQ_BREADCRUMB 0
370
371 /* Rather than have every client wait upon all user interrupts, 368 /* Rather than have every client wait upon all user interrupts,
372 * with the herd waking after every interrupt and each doing the 369 * with the herd waking after every interrupt and each doing the
373 * heavyweight seqno dance, we delegate the task (of being the 370 * heavyweight seqno dance, we delegate the task (of being the
@@ -501,69 +498,8 @@ struct intel_engine_cs {
501 */ 498 */
502 void (*cancel_requests)(struct intel_engine_cs *engine); 499 void (*cancel_requests)(struct intel_engine_cs *engine);
503 500
504 /* Some chipsets are not quite as coherent as advertised and need
505 * an expensive kick to force a true read of the up-to-date seqno.
506 * However, the up-to-date seqno is not always required and the last
507 * seen value is good enough. Note that the seqno will always be
508 * monotonic, even if not coherent.
509 */
510 void (*irq_seqno_barrier)(struct intel_engine_cs *engine);
511 void (*cleanup)(struct intel_engine_cs *engine); 501 void (*cleanup)(struct intel_engine_cs *engine);
512 502
513 /* GEN8 signal/wait table - never trust comments!
514 * signal to signal to signal to signal to signal to
515 * RCS VCS BCS VECS VCS2
516 * --------------------------------------------------------------------
517 * RCS | NOP (0x00) | VCS (0x08) | BCS (0x10) | VECS (0x18) | VCS2 (0x20) |
518 * |-------------------------------------------------------------------
519 * VCS | RCS (0x28) | NOP (0x30) | BCS (0x38) | VECS (0x40) | VCS2 (0x48) |
520 * |-------------------------------------------------------------------
521 * BCS | RCS (0x50) | VCS (0x58) | NOP (0x60) | VECS (0x68) | VCS2 (0x70) |
522 * |-------------------------------------------------------------------
523 * VECS | RCS (0x78) | VCS (0x80) | BCS (0x88) | NOP (0x90) | VCS2 (0x98) |
524 * |-------------------------------------------------------------------
525 * VCS2 | RCS (0xa0) | VCS (0xa8) | BCS (0xb0) | VECS (0xb8) | NOP (0xc0) |
526 * |-------------------------------------------------------------------
527 *
528 * Generalization:
529 * f(x, y) := (x->id * NUM_RINGS * seqno_size) + (seqno_size * y->id)
530 * ie. transpose of g(x, y)
531 *
532 * sync from sync from sync from sync from sync from
533 * RCS VCS BCS VECS VCS2
534 * --------------------------------------------------------------------
535 * RCS | NOP (0x00) | VCS (0x28) | BCS (0x50) | VECS (0x78) | VCS2 (0xa0) |
536 * |-------------------------------------------------------------------
537 * VCS | RCS (0x08) | NOP (0x30) | BCS (0x58) | VECS (0x80) | VCS2 (0xa8) |
538 * |-------------------------------------------------------------------
539 * BCS | RCS (0x10) | VCS (0x38) | NOP (0x60) | VECS (0x88) | VCS2 (0xb0) |
540 * |-------------------------------------------------------------------
541 * VECS | RCS (0x18) | VCS (0x40) | BCS (0x68) | NOP (0x90) | VCS2 (0xb8) |
542 * |-------------------------------------------------------------------
543 * VCS2 | RCS (0x20) | VCS (0x48) | BCS (0x70) | VECS (0x98) | NOP (0xc0) |
544 * |-------------------------------------------------------------------
545 *
546 * Generalization:
547 * g(x, y) := (y->id * NUM_RINGS * seqno_size) + (seqno_size * x->id)
548 * ie. transpose of f(x, y)
549 */
550 struct {
551#define GEN6_SEMAPHORE_LAST VECS_HW
552#define GEN6_NUM_SEMAPHORES (GEN6_SEMAPHORE_LAST + 1)
553#define GEN6_SEMAPHORES_MASK GENMASK(GEN6_SEMAPHORE_LAST, 0)
554 struct {
555 /* our mbox written by others */
556 u32 wait[GEN6_NUM_SEMAPHORES];
557 /* mboxes this ring signals to */
558 i915_reg_t signal[GEN6_NUM_SEMAPHORES];
559 } mbox;
560
561 /* AKA wait() */
562 int (*sync_to)(struct i915_request *rq,
563 struct i915_request *signal);
564 u32 *(*signal)(struct i915_request *rq, u32 *cs);
565 } semaphore;
566
567 struct intel_engine_execlists execlists; 503 struct intel_engine_execlists execlists;
568 504
569 /* Contexts are pinned whilst they are active on the GPU. The last 505 /* Contexts are pinned whilst they are active on the GPU. The last
@@ -808,7 +744,6 @@ void intel_legacy_submission_resume(struct drm_i915_private *dev_priv);
808 744
809int __must_check intel_ring_cacheline_align(struct i915_request *rq); 745int __must_check intel_ring_cacheline_align(struct i915_request *rq);
810 746
811int intel_ring_wait_for_space(struct intel_ring *ring, unsigned int bytes);
812u32 __must_check *intel_ring_begin(struct i915_request *rq, unsigned int n); 747u32 __must_check *intel_ring_begin(struct i915_request *rq, unsigned int n);
813 748
814static inline void intel_ring_advance(struct i915_request *rq, u32 *cs) 749static inline void intel_ring_advance(struct i915_request *rq, u32 *cs)
@@ -889,7 +824,7 @@ intel_ring_set_tail(struct intel_ring *ring, unsigned int tail)
889 return tail; 824 return tail;
890} 825}
891 826
892void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno); 827void intel_engine_write_global_seqno(struct intel_engine_cs *engine, u32 seqno);
893 828
894void intel_engine_setup_common(struct intel_engine_cs *engine); 829void intel_engine_setup_common(struct intel_engine_cs *engine);
895int intel_engine_init_common(struct intel_engine_cs *engine); 830int intel_engine_init_common(struct intel_engine_cs *engine);
@@ -903,6 +838,8 @@ int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine);
903int intel_engine_stop_cs(struct intel_engine_cs *engine); 838int intel_engine_stop_cs(struct intel_engine_cs *engine);
904void intel_engine_cancel_stop_cs(struct intel_engine_cs *engine); 839void intel_engine_cancel_stop_cs(struct intel_engine_cs *engine);
905 840
841void intel_engine_set_hwsp_writemask(struct intel_engine_cs *engine, u32 mask);
842
906u64 intel_engine_get_active_head(const struct intel_engine_cs *engine); 843u64 intel_engine_get_active_head(const struct intel_engine_cs *engine);
907u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine); 844u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine);
908 845
@@ -947,15 +884,6 @@ static inline bool intel_engine_has_started(struct intel_engine_cs *engine,
947void intel_engine_get_instdone(struct intel_engine_cs *engine, 884void intel_engine_get_instdone(struct intel_engine_cs *engine,
948 struct intel_instdone *instdone); 885 struct intel_instdone *instdone);
949 886
950/*
951 * Arbitrary size for largest possible 'add request' sequence. The code paths
952 * are complex and variable. Empirical measurement shows that the worst case
953 * is BDW at 192 bytes (6 + 6 + 36 dwords), then ILK at 136 bytes. However,
954 * we need to allocate double the largest single packet within that emission
955 * to account for tail wraparound (so 6 + 6 + 72 dwords for BDW).
956 */
957#define MIN_SPACE_FOR_ADD_REQUEST 336
958
959static inline u32 intel_hws_seqno_address(struct intel_engine_cs *engine) 887static inline u32 intel_hws_seqno_address(struct intel_engine_cs *engine)
960{ 888{
961 return engine->status_page.ggtt_offset + I915_GEM_HWS_INDEX_ADDR; 889 return engine->status_page.ggtt_offset + I915_GEM_HWS_INDEX_ADDR;
@@ -1055,7 +983,7 @@ static inline u32 *gen8_emit_pipe_control(u32 *batch, u32 flags, u32 offset)
1055} 983}
1056 984
1057static inline u32 * 985static inline u32 *
1058gen8_emit_ggtt_write_rcs(u32 *cs, u32 value, u32 gtt_offset) 986gen8_emit_ggtt_write_rcs(u32 *cs, u32 value, u32 gtt_offset, u32 flags)
1059{ 987{
1060 /* We're using qword write, offset should be aligned to 8 bytes. */ 988 /* We're using qword write, offset should be aligned to 8 bytes. */
1061 GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8)); 989 GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8));
@@ -1065,8 +993,7 @@ gen8_emit_ggtt_write_rcs(u32 *cs, u32 value, u32 gtt_offset)
1065 * following the batch. 993 * following the batch.
1066 */ 994 */
1067 *cs++ = GFX_OP_PIPE_CONTROL(6); 995 *cs++ = GFX_OP_PIPE_CONTROL(6);
1068 *cs++ = PIPE_CONTROL_GLOBAL_GTT_IVB | PIPE_CONTROL_CS_STALL | 996 *cs++ = flags | PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_GLOBAL_GTT_IVB;
1069 PIPE_CONTROL_QW_WRITE;
1070 *cs++ = gtt_offset; 997 *cs++ = gtt_offset;
1071 *cs++ = 0; 998 *cs++ = 0;
1072 *cs++ = value; 999 *cs++ = value;
@@ -1092,7 +1019,7 @@ gen8_emit_ggtt_write(u32 *cs, u32 value, u32 gtt_offset)
1092 return cs; 1019 return cs;
1093} 1020}
1094 1021
1095void intel_engines_sanitize(struct drm_i915_private *i915); 1022void intel_engines_sanitize(struct drm_i915_private *i915, bool force);
1096 1023
1097bool intel_engine_is_idle(struct intel_engine_cs *engine); 1024bool intel_engine_is_idle(struct intel_engine_cs *engine);
1098bool intel_engines_are_idle(struct drm_i915_private *dev_priv); 1025bool intel_engines_are_idle(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 4350a5270423..9e9501f82f06 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -509,7 +509,7 @@ static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
509 * BIOS's own request bits, which are forced-on for these power wells 509 * BIOS's own request bits, which are forced-on for these power wells
510 * when exiting DC5/6. 510 * when exiting DC5/6.
511 */ 511 */
512 if (IS_GEN9(dev_priv) && !IS_GEN9_LP(dev_priv) && 512 if (IS_GEN(dev_priv, 9) && !IS_GEN9_LP(dev_priv) &&
513 (id == SKL_DISP_PW_1 || id == SKL_DISP_PW_MISC_IO)) 513 (id == SKL_DISP_PW_1 || id == SKL_DISP_PW_MISC_IO))
514 val |= I915_READ(regs->bios); 514 val |= I915_READ(regs->bios);
515 515
@@ -3058,7 +3058,7 @@ static uint32_t get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
3058 * suspend/resume, so allow it unconditionally. 3058 * suspend/resume, so allow it unconditionally.
3059 */ 3059 */
3060 mask = DC_STATE_EN_DC9; 3060 mask = DC_STATE_EN_DC9;
3061 } else if (IS_GEN10(dev_priv) || IS_GEN9_BC(dev_priv)) { 3061 } else if (IS_GEN(dev_priv, 10) || IS_GEN9_BC(dev_priv)) {
3062 max_dc = 2; 3062 max_dc = 2;
3063 mask = 0; 3063 mask = 0;
3064 } else if (IS_GEN9_LP(dev_priv)) { 3064 } else if (IS_GEN9_LP(dev_priv)) {
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 4a03b7f67dd5..df2d830a7405 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -29,7 +29,6 @@
29#include <linux/slab.h> 29#include <linux/slab.h>
30#include <linux/delay.h> 30#include <linux/delay.h>
31#include <linux/export.h> 31#include <linux/export.h>
32#include <drm/drmP.h>
33#include <drm/drm_atomic_helper.h> 32#include <drm/drm_atomic_helper.h>
34#include <drm/drm_crtc.h> 33#include <drm/drm_crtc.h>
35#include <drm/drm_edid.h> 34#include <drm/drm_edid.h>
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index d2e003d8f3db..8f3982c03925 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -29,7 +29,6 @@
29 * registers; newer ones are much simpler and we can use the new DRM plane 29 * registers; newer ones are much simpler and we can use the new DRM plane
30 * support. 30 * support.
31 */ 31 */
32#include <drm/drmP.h>
33#include <drm/drm_atomic_helper.h> 32#include <drm/drm_atomic_helper.h>
34#include <drm/drm_crtc.h> 33#include <drm/drm_crtc.h>
35#include <drm/drm_fourcc.h> 34#include <drm/drm_fourcc.h>
@@ -1087,7 +1086,7 @@ static u32 g4x_sprite_ctl(const struct intel_crtc_state *crtc_state,
1087 1086
1088 dvscntr = DVS_ENABLE | DVS_GAMMA_ENABLE; 1087 dvscntr = DVS_ENABLE | DVS_GAMMA_ENABLE;
1089 1088
1090 if (IS_GEN6(dev_priv)) 1089 if (IS_GEN(dev_priv, 6))
1091 dvscntr |= DVS_TRICKLE_FEED_DISABLE; 1090 dvscntr |= DVS_TRICKLE_FEED_DISABLE;
1092 1091
1093 switch (fb->format->format) { 1092 switch (fb->format->format) {
@@ -1983,7 +1982,7 @@ static bool skl_plane_has_planar(struct drm_i915_private *dev_priv,
1983 if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv)) 1982 if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))
1984 return false; 1983 return false;
1985 1984
1986 if (IS_GEN9(dev_priv) && !IS_GEMINILAKE(dev_priv) && pipe == PIPE_C) 1985 if (IS_GEN(dev_priv, 9) && !IS_GEMINILAKE(dev_priv) && pipe == PIPE_C)
1987 return false; 1986 return false;
1988 1987
1989 if (plane_id != PLANE_PRIMARY && plane_id != PLANE_SPRITE0) 1988 if (plane_id != PLANE_PRIMARY && plane_id != PLANE_SPRITE0)
@@ -2163,7 +2162,7 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
2163 plane->check_plane = g4x_sprite_check; 2162 plane->check_plane = g4x_sprite_check;
2164 2163
2165 modifiers = i9xx_plane_format_modifiers; 2164 modifiers = i9xx_plane_format_modifiers;
2166 if (IS_GEN6(dev_priv)) { 2165 if (IS_GEN(dev_priv, 6)) {
2167 formats = snb_plane_formats; 2166 formats = snb_plane_formats;
2168 num_formats = ARRAY_SIZE(snb_plane_formats); 2167 num_formats = ARRAY_SIZE(snb_plane_formats);
2169 2168
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 9bbe35a0f0f2..bd5536f0ec92 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -30,7 +30,6 @@
30 * Integrated TV-out support for the 915GM and 945GM. 30 * Integrated TV-out support for the 915GM and 945GM.
31 */ 31 */
32 32
33#include <drm/drmP.h>
34#include <drm/drm_atomic_helper.h> 33#include <drm/drm_atomic_helper.h>
35#include <drm/drm_crtc.h> 34#include <drm/drm_crtc.h>
36#include <drm/drm_edid.h> 35#include <drm/drm_edid.h>
diff --git a/drivers/gpu/drm/i915/intel_uc.c b/drivers/gpu/drm/i915/intel_uc.c
index b34c318b238d..731b82afe636 100644
--- a/drivers/gpu/drm/i915/intel_uc.c
+++ b/drivers/gpu/drm/i915/intel_uc.c
@@ -71,7 +71,7 @@ static int __get_default_guc_log_level(struct drm_i915_private *i915)
71{ 71{
72 int guc_log_level; 72 int guc_log_level;
73 73
74 if (!HAS_GUC(i915) || !intel_uc_is_using_guc()) 74 if (!HAS_GUC(i915) || !intel_uc_is_using_guc(i915))
75 guc_log_level = GUC_LOG_LEVEL_DISABLED; 75 guc_log_level = GUC_LOG_LEVEL_DISABLED;
76 else if (IS_ENABLED(CONFIG_DRM_I915_DEBUG) || 76 else if (IS_ENABLED(CONFIG_DRM_I915_DEBUG) ||
77 IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) 77 IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
@@ -112,11 +112,11 @@ static void sanitize_options_early(struct drm_i915_private *i915)
112 112
113 DRM_DEBUG_DRIVER("enable_guc=%d (submission:%s huc:%s)\n", 113 DRM_DEBUG_DRIVER("enable_guc=%d (submission:%s huc:%s)\n",
114 i915_modparams.enable_guc, 114 i915_modparams.enable_guc,
115 yesno(intel_uc_is_using_guc_submission()), 115 yesno(intel_uc_is_using_guc_submission(i915)),
116 yesno(intel_uc_is_using_huc())); 116 yesno(intel_uc_is_using_huc(i915)));
117 117
118 /* Verify GuC firmware availability */ 118 /* Verify GuC firmware availability */
119 if (intel_uc_is_using_guc() && !intel_uc_fw_is_selected(guc_fw)) { 119 if (intel_uc_is_using_guc(i915) && !intel_uc_fw_is_selected(guc_fw)) {
120 DRM_WARN("Incompatible option detected: %s=%d, %s!\n", 120 DRM_WARN("Incompatible option detected: %s=%d, %s!\n",
121 "enable_guc", i915_modparams.enable_guc, 121 "enable_guc", i915_modparams.enable_guc,
122 !HAS_GUC(i915) ? "no GuC hardware" : 122 !HAS_GUC(i915) ? "no GuC hardware" :
@@ -124,7 +124,7 @@ static void sanitize_options_early(struct drm_i915_private *i915)
124 } 124 }
125 125
126 /* Verify HuC firmware availability */ 126 /* Verify HuC firmware availability */
127 if (intel_uc_is_using_huc() && !intel_uc_fw_is_selected(huc_fw)) { 127 if (intel_uc_is_using_huc(i915) && !intel_uc_fw_is_selected(huc_fw)) {
128 DRM_WARN("Incompatible option detected: %s=%d, %s!\n", 128 DRM_WARN("Incompatible option detected: %s=%d, %s!\n",
129 "enable_guc", i915_modparams.enable_guc, 129 "enable_guc", i915_modparams.enable_guc,
130 !HAS_HUC(i915) ? "no HuC hardware" : 130 !HAS_HUC(i915) ? "no HuC hardware" :
@@ -136,7 +136,7 @@ static void sanitize_options_early(struct drm_i915_private *i915)
136 i915_modparams.guc_log_level = 136 i915_modparams.guc_log_level =
137 __get_default_guc_log_level(i915); 137 __get_default_guc_log_level(i915);
138 138
139 if (i915_modparams.guc_log_level > 0 && !intel_uc_is_using_guc()) { 139 if (i915_modparams.guc_log_level > 0 && !intel_uc_is_using_guc(i915)) {
140 DRM_WARN("Incompatible option detected: %s=%d, %s!\n", 140 DRM_WARN("Incompatible option detected: %s=%d, %s!\n",
141 "guc_log_level", i915_modparams.guc_log_level, 141 "guc_log_level", i915_modparams.guc_log_level,
142 !HAS_GUC(i915) ? "no GuC hardware" : 142 !HAS_GUC(i915) ? "no GuC hardware" :
@@ -354,7 +354,7 @@ int intel_uc_init_hw(struct drm_i915_private *i915)
354 354
355 /* WaEnableuKernelHeaderValidFix:skl */ 355 /* WaEnableuKernelHeaderValidFix:skl */
356 /* WaEnableGuCBootHashCheckNotSet:skl,bxt,kbl */ 356 /* WaEnableGuCBootHashCheckNotSet:skl,bxt,kbl */
357 if (IS_GEN9(i915)) 357 if (IS_GEN(i915, 9))
358 attempts = 3; 358 attempts = 3;
359 else 359 else
360 attempts = 1; 360 attempts = 1;
diff --git a/drivers/gpu/drm/i915/intel_uc.h b/drivers/gpu/drm/i915/intel_uc.h
index 25d73ada74ae..870faf9011b9 100644
--- a/drivers/gpu/drm/i915/intel_uc.h
+++ b/drivers/gpu/drm/i915/intel_uc.h
@@ -41,19 +41,19 @@ void intel_uc_fini(struct drm_i915_private *dev_priv);
41int intel_uc_suspend(struct drm_i915_private *dev_priv); 41int intel_uc_suspend(struct drm_i915_private *dev_priv);
42int intel_uc_resume(struct drm_i915_private *dev_priv); 42int intel_uc_resume(struct drm_i915_private *dev_priv);
43 43
44static inline bool intel_uc_is_using_guc(void) 44static inline bool intel_uc_is_using_guc(struct drm_i915_private *i915)
45{ 45{
46 GEM_BUG_ON(i915_modparams.enable_guc < 0); 46 GEM_BUG_ON(i915_modparams.enable_guc < 0);
47 return i915_modparams.enable_guc > 0; 47 return i915_modparams.enable_guc > 0;
48} 48}
49 49
50static inline bool intel_uc_is_using_guc_submission(void) 50static inline bool intel_uc_is_using_guc_submission(struct drm_i915_private *i915)
51{ 51{
52 GEM_BUG_ON(i915_modparams.enable_guc < 0); 52 GEM_BUG_ON(i915_modparams.enable_guc < 0);
53 return i915_modparams.enable_guc & ENABLE_GUC_SUBMISSION; 53 return i915_modparams.enable_guc & ENABLE_GUC_SUBMISSION;
54} 54}
55 55
56static inline bool intel_uc_is_using_huc(void) 56static inline bool intel_uc_is_using_huc(struct drm_i915_private *i915)
57{ 57{
58 GEM_BUG_ON(i915_modparams.enable_guc < 0); 58 GEM_BUG_ON(i915_modparams.enable_guc < 0);
59 return i915_modparams.enable_guc & ENABLE_GUC_LOAD_HUC; 59 return i915_modparams.enable_guc & ENABLE_GUC_LOAD_HUC;
diff --git a/drivers/gpu/drm/i915/intel_uc_fw.c b/drivers/gpu/drm/i915/intel_uc_fw.c
index fd496416087c..becf05ebae4d 100644
--- a/drivers/gpu/drm/i915/intel_uc_fw.c
+++ b/drivers/gpu/drm/i915/intel_uc_fw.c
@@ -46,12 +46,17 @@ void intel_uc_fw_fetch(struct drm_i915_private *dev_priv,
46 size_t size; 46 size_t size;
47 int err; 47 int err;
48 48
49 if (!uc_fw->path) {
50 dev_info(dev_priv->drm.dev,
51 "%s: No firmware was defined for %s!\n",
52 intel_uc_fw_type_repr(uc_fw->type),
53 intel_platform_name(INTEL_INFO(dev_priv)->platform));
54 return;
55 }
56
49 DRM_DEBUG_DRIVER("%s fw fetch %s\n", 57 DRM_DEBUG_DRIVER("%s fw fetch %s\n",
50 intel_uc_fw_type_repr(uc_fw->type), uc_fw->path); 58 intel_uc_fw_type_repr(uc_fw->type), uc_fw->path);
51 59
52 if (!uc_fw->path)
53 return;
54
55 uc_fw->fetch_status = INTEL_UC_FIRMWARE_PENDING; 60 uc_fw->fetch_status = INTEL_UC_FIRMWARE_PENDING;
56 DRM_DEBUG_DRIVER("%s fw fetch %s\n", 61 DRM_DEBUG_DRIVER("%s fw fetch %s\n",
57 intel_uc_fw_type_repr(uc_fw->type), 62 intel_uc_fw_type_repr(uc_fw->type),
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 9289515108c3..fff468f17d2d 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -528,7 +528,7 @@ check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
528 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 528 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
529 ret |= vlv_check_for_unclaimed_mmio(dev_priv); 529 ret |= vlv_check_for_unclaimed_mmio(dev_priv);
530 530
531 if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) 531 if (IS_GEN_RANGE(dev_priv, 6, 7))
532 ret |= gen6_check_for_fifo_debug(dev_priv); 532 ret |= gen6_check_for_fifo_debug(dev_priv);
533 533
534 return ret; 534 return ret;
@@ -556,7 +556,7 @@ static void __intel_uncore_early_sanitize(struct drm_i915_private *dev_priv,
556 dev_priv->uncore.funcs.force_wake_get(dev_priv, 556 dev_priv->uncore.funcs.force_wake_get(dev_priv,
557 restore_forcewake); 557 restore_forcewake);
558 558
559 if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) 559 if (IS_GEN_RANGE(dev_priv, 6, 7))
560 dev_priv->uncore.fifo_count = 560 dev_priv->uncore.fifo_count =
561 fifo_free_entries(dev_priv); 561 fifo_free_entries(dev_priv);
562 spin_unlock_irq(&dev_priv->uncore.lock); 562 spin_unlock_irq(&dev_priv->uncore.lock);
@@ -1398,7 +1398,7 @@ static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv)
1398 if (INTEL_GEN(dev_priv) <= 5 || intel_vgpu_active(dev_priv)) 1398 if (INTEL_GEN(dev_priv) <= 5 || intel_vgpu_active(dev_priv))
1399 return; 1399 return;
1400 1400
1401 if (IS_GEN6(dev_priv)) { 1401 if (IS_GEN(dev_priv, 6)) {
1402 dev_priv->uncore.fw_reset = 0; 1402 dev_priv->uncore.fw_reset = 0;
1403 dev_priv->uncore.fw_set = FORCEWAKE_KERNEL; 1403 dev_priv->uncore.fw_set = FORCEWAKE_KERNEL;
1404 dev_priv->uncore.fw_clear = 0; 1404 dev_priv->uncore.fw_clear = 0;
@@ -1437,7 +1437,7 @@ static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv)
1437 FORCEWAKE_MEDIA_VEBOX_GEN11(i), 1437 FORCEWAKE_MEDIA_VEBOX_GEN11(i),
1438 FORCEWAKE_ACK_MEDIA_VEBOX_GEN11(i)); 1438 FORCEWAKE_ACK_MEDIA_VEBOX_GEN11(i));
1439 } 1439 }
1440 } else if (IS_GEN10(dev_priv) || IS_GEN9(dev_priv)) { 1440 } else if (IS_GEN_RANGE(dev_priv, 9, 10)) {
1441 dev_priv->uncore.funcs.force_wake_get = 1441 dev_priv->uncore.funcs.force_wake_get =
1442 fw_domains_get_with_fallback; 1442 fw_domains_get_with_fallback;
1443 dev_priv->uncore.funcs.force_wake_put = fw_domains_put; 1443 dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
@@ -1503,7 +1503,7 @@ static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv)
1503 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, 1503 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1504 FORCEWAKE, FORCEWAKE_ACK); 1504 FORCEWAKE, FORCEWAKE_ACK);
1505 } 1505 }
1506 } else if (IS_GEN6(dev_priv)) { 1506 } else if (IS_GEN(dev_priv, 6)) {
1507 dev_priv->uncore.funcs.force_wake_get = 1507 dev_priv->uncore.funcs.force_wake_get =
1508 fw_domains_get_with_thread_status; 1508 fw_domains_get_with_thread_status;
1509 dev_priv->uncore.funcs.force_wake_put = fw_domains_put; 1509 dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
@@ -1567,13 +1567,13 @@ void intel_uncore_init(struct drm_i915_private *dev_priv)
1567 dev_priv->uncore.pmic_bus_access_nb.notifier_call = 1567 dev_priv->uncore.pmic_bus_access_nb.notifier_call =
1568 i915_pmic_bus_access_notifier; 1568 i915_pmic_bus_access_notifier;
1569 1569
1570 if (IS_GEN(dev_priv, 2, 4) || intel_vgpu_active(dev_priv)) { 1570 if (IS_GEN_RANGE(dev_priv, 2, 4) || intel_vgpu_active(dev_priv)) {
1571 ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen2); 1571 ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen2);
1572 ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen2); 1572 ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen2);
1573 } else if (IS_GEN5(dev_priv)) { 1573 } else if (IS_GEN(dev_priv, 5)) {
1574 ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen5); 1574 ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen5);
1575 ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen5); 1575 ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen5);
1576 } else if (IS_GEN(dev_priv, 6, 7)) { 1576 } else if (IS_GEN_RANGE(dev_priv, 6, 7)) {
1577 ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen6); 1577 ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen6);
1578 1578
1579 if (IS_VALLEYVIEW(dev_priv)) { 1579 if (IS_VALLEYVIEW(dev_priv)) {
@@ -1582,7 +1582,7 @@ void intel_uncore_init(struct drm_i915_private *dev_priv)
1582 } else { 1582 } else {
1583 ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen6); 1583 ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen6);
1584 } 1584 }
1585 } else if (IS_GEN8(dev_priv)) { 1585 } else if (IS_GEN(dev_priv, 8)) {
1586 if (IS_CHERRYVIEW(dev_priv)) { 1586 if (IS_CHERRYVIEW(dev_priv)) {
1587 ASSIGN_FW_DOMAINS_TABLE(__chv_fw_ranges); 1587 ASSIGN_FW_DOMAINS_TABLE(__chv_fw_ranges);
1588 ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, fwtable); 1588 ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, fwtable);
@@ -1592,7 +1592,7 @@ void intel_uncore_init(struct drm_i915_private *dev_priv)
1592 ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen8); 1592 ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen8);
1593 ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen6); 1593 ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen6);
1594 } 1594 }
1595 } else if (IS_GEN(dev_priv, 9, 10)) { 1595 } else if (IS_GEN_RANGE(dev_priv, 9, 10)) {
1596 ASSIGN_FW_DOMAINS_TABLE(__gen9_fw_ranges); 1596 ASSIGN_FW_DOMAINS_TABLE(__gen9_fw_ranges);
1597 ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, fwtable); 1597 ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, fwtable);
1598 ASSIGN_READ_MMIO_VFUNCS(dev_priv, fwtable); 1598 ASSIGN_READ_MMIO_VFUNCS(dev_priv, fwtable);
@@ -1931,6 +1931,103 @@ static int gen6_reset_engines(struct drm_i915_private *dev_priv,
1931 return gen6_hw_domain_reset(dev_priv, hw_mask); 1931 return gen6_hw_domain_reset(dev_priv, hw_mask);
1932} 1932}
1933 1933
1934static u32 gen11_lock_sfc(struct drm_i915_private *dev_priv,
1935 struct intel_engine_cs *engine)
1936{
1937 u8 vdbox_sfc_access = RUNTIME_INFO(dev_priv)->vdbox_sfc_access;
1938 i915_reg_t sfc_forced_lock, sfc_forced_lock_ack;
1939 u32 sfc_forced_lock_bit, sfc_forced_lock_ack_bit;
1940 i915_reg_t sfc_usage;
1941 u32 sfc_usage_bit;
1942 u32 sfc_reset_bit;
1943
1944 switch (engine->class) {
1945 case VIDEO_DECODE_CLASS:
1946 if ((BIT(engine->instance) & vdbox_sfc_access) == 0)
1947 return 0;
1948
1949 sfc_forced_lock = GEN11_VCS_SFC_FORCED_LOCK(engine);
1950 sfc_forced_lock_bit = GEN11_VCS_SFC_FORCED_LOCK_BIT;
1951
1952 sfc_forced_lock_ack = GEN11_VCS_SFC_LOCK_STATUS(engine);
1953 sfc_forced_lock_ack_bit = GEN11_VCS_SFC_LOCK_ACK_BIT;
1954
1955 sfc_usage = GEN11_VCS_SFC_LOCK_STATUS(engine);
1956 sfc_usage_bit = GEN11_VCS_SFC_USAGE_BIT;
1957 sfc_reset_bit = GEN11_VCS_SFC_RESET_BIT(engine->instance);
1958 break;
1959
1960 case VIDEO_ENHANCEMENT_CLASS:
1961 sfc_forced_lock = GEN11_VECS_SFC_FORCED_LOCK(engine);
1962 sfc_forced_lock_bit = GEN11_VECS_SFC_FORCED_LOCK_BIT;
1963
1964 sfc_forced_lock_ack = GEN11_VECS_SFC_LOCK_ACK(engine);
1965 sfc_forced_lock_ack_bit = GEN11_VECS_SFC_LOCK_ACK_BIT;
1966
1967 sfc_usage = GEN11_VECS_SFC_USAGE(engine);
1968 sfc_usage_bit = GEN11_VECS_SFC_USAGE_BIT;
1969 sfc_reset_bit = GEN11_VECS_SFC_RESET_BIT(engine->instance);
1970 break;
1971
1972 default:
1973 return 0;
1974 }
1975
1976 /*
1977 * Tell the engine that a software reset is going to happen. The engine
1978 * will then try to force lock the SFC (if currently locked, it will
1979 * remain so until we tell the engine it is safe to unlock; if currently
1980 * unlocked, it will ignore this and all new lock requests). If SFC
1981 * ends up being locked to the engine we want to reset, we have to reset
1982 * it as well (we will unlock it once the reset sequence is completed).
1983 */
1984 I915_WRITE_FW(sfc_forced_lock,
1985 I915_READ_FW(sfc_forced_lock) | sfc_forced_lock_bit);
1986
1987 if (__intel_wait_for_register_fw(dev_priv,
1988 sfc_forced_lock_ack,
1989 sfc_forced_lock_ack_bit,
1990 sfc_forced_lock_ack_bit,
1991 1000, 0, NULL)) {
1992 DRM_DEBUG_DRIVER("Wait for SFC forced lock ack failed\n");
1993 return 0;
1994 }
1995
1996 if (I915_READ_FW(sfc_usage) & sfc_usage_bit)
1997 return sfc_reset_bit;
1998
1999 return 0;
2000}
2001
2002static void gen11_unlock_sfc(struct drm_i915_private *dev_priv,
2003 struct intel_engine_cs *engine)
2004{
2005 u8 vdbox_sfc_access = RUNTIME_INFO(dev_priv)->vdbox_sfc_access;
2006 i915_reg_t sfc_forced_lock;
2007 u32 sfc_forced_lock_bit;
2008
2009 switch (engine->class) {
2010 case VIDEO_DECODE_CLASS:
2011 if ((BIT(engine->instance) & vdbox_sfc_access) == 0)
2012 return;
2013
2014 sfc_forced_lock = GEN11_VCS_SFC_FORCED_LOCK(engine);
2015 sfc_forced_lock_bit = GEN11_VCS_SFC_FORCED_LOCK_BIT;
2016 break;
2017
2018 case VIDEO_ENHANCEMENT_CLASS:
2019 sfc_forced_lock = GEN11_VECS_SFC_FORCED_LOCK(engine);
2020 sfc_forced_lock_bit = GEN11_VECS_SFC_FORCED_LOCK_BIT;
2021 break;
2022
2023 default:
2024 return;
2025 }
2026
2027 I915_WRITE_FW(sfc_forced_lock,
2028 I915_READ_FW(sfc_forced_lock) & ~sfc_forced_lock_bit);
2029}
2030
1934/** 2031/**
1935 * gen11_reset_engines - reset individual engines 2032 * gen11_reset_engines - reset individual engines
1936 * @dev_priv: i915 device 2033 * @dev_priv: i915 device
@@ -1947,7 +2044,6 @@ static int gen6_reset_engines(struct drm_i915_private *dev_priv,
1947static int gen11_reset_engines(struct drm_i915_private *dev_priv, 2044static int gen11_reset_engines(struct drm_i915_private *dev_priv,
1948 unsigned int engine_mask) 2045 unsigned int engine_mask)
1949{ 2046{
1950 struct intel_engine_cs *engine;
1951 const u32 hw_engine_mask[I915_NUM_ENGINES] = { 2047 const u32 hw_engine_mask[I915_NUM_ENGINES] = {
1952 [RCS] = GEN11_GRDOM_RENDER, 2048 [RCS] = GEN11_GRDOM_RENDER,
1953 [BCS] = GEN11_GRDOM_BLT, 2049 [BCS] = GEN11_GRDOM_BLT,
@@ -1958,21 +2054,30 @@ static int gen11_reset_engines(struct drm_i915_private *dev_priv,
1958 [VECS] = GEN11_GRDOM_VECS, 2054 [VECS] = GEN11_GRDOM_VECS,
1959 [VECS2] = GEN11_GRDOM_VECS2, 2055 [VECS2] = GEN11_GRDOM_VECS2,
1960 }; 2056 };
2057 struct intel_engine_cs *engine;
2058 unsigned int tmp;
1961 u32 hw_mask; 2059 u32 hw_mask;
2060 int ret;
1962 2061
1963 BUILD_BUG_ON(VECS2 + 1 != I915_NUM_ENGINES); 2062 BUILD_BUG_ON(VECS2 + 1 != I915_NUM_ENGINES);
1964 2063
1965 if (engine_mask == ALL_ENGINES) { 2064 if (engine_mask == ALL_ENGINES) {
1966 hw_mask = GEN11_GRDOM_FULL; 2065 hw_mask = GEN11_GRDOM_FULL;
1967 } else { 2066 } else {
1968 unsigned int tmp;
1969
1970 hw_mask = 0; 2067 hw_mask = 0;
1971 for_each_engine_masked(engine, dev_priv, engine_mask, tmp) 2068 for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
1972 hw_mask |= hw_engine_mask[engine->id]; 2069 hw_mask |= hw_engine_mask[engine->id];
2070 hw_mask |= gen11_lock_sfc(dev_priv, engine);
2071 }
1973 } 2072 }
1974 2073
1975 return gen6_hw_domain_reset(dev_priv, hw_mask); 2074 ret = gen6_hw_domain_reset(dev_priv, hw_mask);
2075
2076 if (engine_mask != ALL_ENGINES)
2077 for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
2078 gen11_unlock_sfc(dev_priv, engine);
2079
2080 return ret;
1976} 2081}
1977 2082
1978/** 2083/**
@@ -2173,7 +2278,7 @@ static reset_func intel_get_gpu_reset(struct drm_i915_private *dev_priv)
2173 return gen8_reset_engines; 2278 return gen8_reset_engines;
2174 else if (INTEL_GEN(dev_priv) >= 6) 2279 else if (INTEL_GEN(dev_priv) >= 6)
2175 return gen6_reset_engines; 2280 return gen6_reset_engines;
2176 else if (IS_GEN5(dev_priv)) 2281 else if (IS_GEN(dev_priv, 5))
2177 return ironlake_do_reset; 2282 return ironlake_do_reset;
2178 else if (IS_G4X(dev_priv)) 2283 else if (IS_G4X(dev_priv))
2179 return g4x_do_reset; 2284 return g4x_do_reset;
@@ -2256,7 +2361,7 @@ bool intel_has_gpu_reset(struct drm_i915_private *dev_priv)
2256 2361
2257bool intel_has_reset_engine(struct drm_i915_private *dev_priv) 2362bool intel_has_reset_engine(struct drm_i915_private *dev_priv)
2258{ 2363{
2259 return (dev_priv->info.has_reset_engine && 2364 return (INTEL_INFO(dev_priv)->has_reset_engine &&
2260 i915_modparams.reset >= 2); 2365 i915_modparams.reset >= 2);
2261} 2366}
2262 2367
@@ -2321,7 +2426,7 @@ intel_uncore_forcewake_for_read(struct drm_i915_private *dev_priv,
2321 } else if (INTEL_GEN(dev_priv) >= 6) { 2426 } else if (INTEL_GEN(dev_priv) >= 6) {
2322 fw_domains = __gen6_reg_read_fw_domains(offset); 2427 fw_domains = __gen6_reg_read_fw_domains(offset);
2323 } else { 2428 } else {
2324 WARN_ON(!IS_GEN(dev_priv, 2, 5)); 2429 WARN_ON(!IS_GEN_RANGE(dev_priv, 2, 5));
2325 fw_domains = 0; 2430 fw_domains = 0;
2326 } 2431 }
2327 2432
@@ -2341,12 +2446,12 @@ intel_uncore_forcewake_for_write(struct drm_i915_private *dev_priv,
2341 fw_domains = __gen11_fwtable_reg_write_fw_domains(offset); 2446 fw_domains = __gen11_fwtable_reg_write_fw_domains(offset);
2342 } else if (HAS_FWTABLE(dev_priv) && !IS_VALLEYVIEW(dev_priv)) { 2447 } else if (HAS_FWTABLE(dev_priv) && !IS_VALLEYVIEW(dev_priv)) {
2343 fw_domains = __fwtable_reg_write_fw_domains(offset); 2448 fw_domains = __fwtable_reg_write_fw_domains(offset);
2344 } else if (IS_GEN8(dev_priv)) { 2449 } else if (IS_GEN(dev_priv, 8)) {
2345 fw_domains = __gen8_reg_write_fw_domains(offset); 2450 fw_domains = __gen8_reg_write_fw_domains(offset);
2346 } else if (IS_GEN(dev_priv, 6, 7)) { 2451 } else if (IS_GEN_RANGE(dev_priv, 6, 7)) {
2347 fw_domains = FORCEWAKE_RENDER; 2452 fw_domains = FORCEWAKE_RENDER;
2348 } else { 2453 } else {
2349 WARN_ON(!IS_GEN(dev_priv, 2, 5)); 2454 WARN_ON(!IS_GEN_RANGE(dev_priv, 2, 5));
2350 fw_domains = 0; 2455 fw_domains = 0;
2351 } 2456 }
2352 2457
diff --git a/drivers/gpu/drm/i915/intel_vdsc.c b/drivers/gpu/drm/i915/intel_vdsc.c
index c56ba0e04044..48537827616f 100644
--- a/drivers/gpu/drm/i915/intel_vdsc.c
+++ b/drivers/gpu/drm/i915/intel_vdsc.c
@@ -6,7 +6,6 @@
6 * Manasi Navare <manasi.d.navare@intel.com> 6 * Manasi Navare <manasi.d.navare@intel.com>
7 */ 7 */
8 8
9#include <drm/drmP.h>
10#include <drm/i915_drm.h> 9#include <drm/i915_drm.h>
11#include "i915_drv.h" 10#include "i915_drv.h"
12#include "intel_drv.h" 11#include "intel_drv.h"
diff --git a/drivers/gpu/drm/i915/intel_wopcm.c b/drivers/gpu/drm/i915/intel_wopcm.c
index 92cb82dd0c07..f82a415ea2ba 100644
--- a/drivers/gpu/drm/i915/intel_wopcm.c
+++ b/drivers/gpu/drm/i915/intel_wopcm.c
@@ -130,11 +130,11 @@ static inline int check_hw_restriction(struct drm_i915_private *i915,
130{ 130{
131 int err = 0; 131 int err = 0;
132 132
133 if (IS_GEN9(i915)) 133 if (IS_GEN(i915, 9))
134 err = gen9_check_dword_gap(guc_wopcm_base, guc_wopcm_size); 134 err = gen9_check_dword_gap(guc_wopcm_base, guc_wopcm_size);
135 135
136 if (!err && 136 if (!err &&
137 (IS_GEN9(i915) || IS_CNL_REVID(i915, CNL_REVID_A0, CNL_REVID_A0))) 137 (IS_GEN(i915, 9) || IS_CNL_REVID(i915, CNL_REVID_A0, CNL_REVID_A0)))
138 err = gen9_check_huc_fw_fits(guc_wopcm_size, huc_fw_size); 138 err = gen9_check_huc_fw_fits(guc_wopcm_size, huc_fw_size);
139 139
140 return err; 140 return err;
@@ -163,7 +163,7 @@ int intel_wopcm_init(struct intel_wopcm *wopcm)
163 u32 guc_wopcm_rsvd; 163 u32 guc_wopcm_rsvd;
164 int err; 164 int err;
165 165
166 if (!USES_GUC(dev_priv)) 166 if (!USES_GUC(i915))
167 return 0; 167 return 0;
168 168
169 GEM_BUG_ON(!wopcm->size); 169 GEM_BUG_ON(!wopcm->size);
diff --git a/drivers/gpu/drm/i915/intel_workarounds.c b/drivers/gpu/drm/i915/intel_workarounds.c
index 4f41e326f3f3..3210ad4e08f7 100644
--- a/drivers/gpu/drm/i915/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/intel_workarounds.c
@@ -366,7 +366,7 @@ static void skl_tune_iz_hashing(struct intel_engine_cs *engine)
366 * Only consider slices where one, and only one, subslice has 7 366 * Only consider slices where one, and only one, subslice has 7
367 * EUs 367 * EUs
368 */ 368 */
369 if (!is_power_of_2(INTEL_INFO(i915)->sseu.subslice_7eu[i])) 369 if (!is_power_of_2(RUNTIME_INFO(i915)->sseu.subslice_7eu[i]))
370 continue; 370 continue;
371 371
372 /* 372 /*
@@ -375,7 +375,7 @@ static void skl_tune_iz_hashing(struct intel_engine_cs *engine)
375 * 375 *
376 * -> 0 <= ss <= 3; 376 * -> 0 <= ss <= 3;
377 */ 377 */
378 ss = ffs(INTEL_INFO(i915)->sseu.subslice_7eu[i]) - 1; 378 ss = ffs(RUNTIME_INFO(i915)->sseu.subslice_7eu[i]) - 1;
379 vals[i] = 3 - ss; 379 vals[i] = 3 - ss;
380 } 380 }
381 381
@@ -639,10 +639,9 @@ wa_write_or(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
639 wa_write_masked_or(wal, reg, val, val); 639 wa_write_masked_or(wal, reg, val, val);
640} 640}
641 641
642static void gen9_gt_workarounds_init(struct drm_i915_private *i915) 642static void
643gen9_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
643{ 644{
644 struct i915_wa_list *wal = &i915->gt_wa_list;
645
646 /* WaDisableKillLogic:bxt,skl,kbl */ 645 /* WaDisableKillLogic:bxt,skl,kbl */
647 if (!IS_COFFEELAKE(i915)) 646 if (!IS_COFFEELAKE(i915))
648 wa_write_or(wal, 647 wa_write_or(wal,
@@ -666,11 +665,10 @@ static void gen9_gt_workarounds_init(struct drm_i915_private *i915)
666 BDW_DISABLE_HDC_INVALIDATION); 665 BDW_DISABLE_HDC_INVALIDATION);
667} 666}
668 667
669static void skl_gt_workarounds_init(struct drm_i915_private *i915) 668static void
669skl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
670{ 670{
671 struct i915_wa_list *wal = &i915->gt_wa_list; 671 gen9_gt_workarounds_init(i915, wal);
672
673 gen9_gt_workarounds_init(i915);
674 672
675 /* WaDisableGafsUnitClkGating:skl */ 673 /* WaDisableGafsUnitClkGating:skl */
676 wa_write_or(wal, 674 wa_write_or(wal,
@@ -684,11 +682,10 @@ static void skl_gt_workarounds_init(struct drm_i915_private *i915)
684 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); 682 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
685} 683}
686 684
687static void bxt_gt_workarounds_init(struct drm_i915_private *i915) 685static void
686bxt_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
688{ 687{
689 struct i915_wa_list *wal = &i915->gt_wa_list; 688 gen9_gt_workarounds_init(i915, wal);
690
691 gen9_gt_workarounds_init(i915);
692 689
693 /* WaInPlaceDecompressionHang:bxt */ 690 /* WaInPlaceDecompressionHang:bxt */
694 wa_write_or(wal, 691 wa_write_or(wal,
@@ -696,11 +693,10 @@ static void bxt_gt_workarounds_init(struct drm_i915_private *i915)
696 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); 693 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
697} 694}
698 695
699static void kbl_gt_workarounds_init(struct drm_i915_private *i915) 696static void
697kbl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
700{ 698{
701 struct i915_wa_list *wal = &i915->gt_wa_list; 699 gen9_gt_workarounds_init(i915, wal);
702
703 gen9_gt_workarounds_init(i915);
704 700
705 /* WaDisableDynamicCreditSharing:kbl */ 701 /* WaDisableDynamicCreditSharing:kbl */
706 if (IS_KBL_REVID(i915, 0, KBL_REVID_B0)) 702 if (IS_KBL_REVID(i915, 0, KBL_REVID_B0))
@@ -719,16 +715,16 @@ static void kbl_gt_workarounds_init(struct drm_i915_private *i915)
719 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); 715 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
720} 716}
721 717
722static void glk_gt_workarounds_init(struct drm_i915_private *i915) 718static void
719glk_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
723{ 720{
724 gen9_gt_workarounds_init(i915); 721 gen9_gt_workarounds_init(i915, wal);
725} 722}
726 723
727static void cfl_gt_workarounds_init(struct drm_i915_private *i915) 724static void
725cfl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
728{ 726{
729 struct i915_wa_list *wal = &i915->gt_wa_list; 727 gen9_gt_workarounds_init(i915, wal);
730
731 gen9_gt_workarounds_init(i915);
732 728
733 /* WaDisableGafsUnitClkGating:cfl */ 729 /* WaDisableGafsUnitClkGating:cfl */
734 wa_write_or(wal, 730 wa_write_or(wal,
@@ -741,10 +737,10 @@ static void cfl_gt_workarounds_init(struct drm_i915_private *i915)
741 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); 737 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
742} 738}
743 739
744static void wa_init_mcr(struct drm_i915_private *dev_priv) 740static void
741wa_init_mcr(struct drm_i915_private *dev_priv, struct i915_wa_list *wal)
745{ 742{
746 const struct sseu_dev_info *sseu = &(INTEL_INFO(dev_priv)->sseu); 743 const struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
747 struct i915_wa_list *wal = &dev_priv->gt_wa_list;
748 u32 mcr_slice_subslice_mask; 744 u32 mcr_slice_subslice_mask;
749 745
750 /* 746 /*
@@ -804,11 +800,10 @@ static void wa_init_mcr(struct drm_i915_private *dev_priv)
804 intel_calculate_mcr_s_ss_select(dev_priv)); 800 intel_calculate_mcr_s_ss_select(dev_priv));
805} 801}
806 802
807static void cnl_gt_workarounds_init(struct drm_i915_private *i915) 803static void
804cnl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
808{ 805{
809 struct i915_wa_list *wal = &i915->gt_wa_list; 806 wa_init_mcr(i915, wal);
810
811 wa_init_mcr(i915);
812 807
813 /* WaDisableI2mCycleOnWRPort:cnl (pre-prod) */ 808 /* WaDisableI2mCycleOnWRPort:cnl (pre-prod) */
814 if (IS_CNL_REVID(i915, CNL_REVID_B0, CNL_REVID_B0)) 809 if (IS_CNL_REVID(i915, CNL_REVID_B0, CNL_REVID_B0))
@@ -822,11 +817,10 @@ static void cnl_gt_workarounds_init(struct drm_i915_private *i915)
822 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); 817 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
823} 818}
824 819
825static void icl_gt_workarounds_init(struct drm_i915_private *i915) 820static void
821icl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
826{ 822{
827 struct i915_wa_list *wal = &i915->gt_wa_list; 823 wa_init_mcr(i915, wal);
828
829 wa_init_mcr(i915);
830 824
831 /* WaInPlaceDecompressionHang:icl */ 825 /* WaInPlaceDecompressionHang:icl */
832 wa_write_or(wal, 826 wa_write_or(wal,
@@ -879,12 +873,9 @@ static void icl_gt_workarounds_init(struct drm_i915_private *i915)
879 GAMT_CHKN_DISABLE_L3_COH_PIPE); 873 GAMT_CHKN_DISABLE_L3_COH_PIPE);
880} 874}
881 875
882void intel_gt_init_workarounds(struct drm_i915_private *i915) 876static void
877gt_init_workarounds(struct drm_i915_private *i915, struct i915_wa_list *wal)
883{ 878{
884 struct i915_wa_list *wal = &i915->gt_wa_list;
885
886 wa_init_start(wal, "GT");
887
888 if (INTEL_GEN(i915) < 8) 879 if (INTEL_GEN(i915) < 8)
889 return; 880 return;
890 else if (IS_BROADWELL(i915)) 881 else if (IS_BROADWELL(i915))
@@ -892,22 +883,29 @@ void intel_gt_init_workarounds(struct drm_i915_private *i915)
892 else if (IS_CHERRYVIEW(i915)) 883 else if (IS_CHERRYVIEW(i915))
893 return; 884 return;
894 else if (IS_SKYLAKE(i915)) 885 else if (IS_SKYLAKE(i915))
895 skl_gt_workarounds_init(i915); 886 skl_gt_workarounds_init(i915, wal);
896 else if (IS_BROXTON(i915)) 887 else if (IS_BROXTON(i915))
897 bxt_gt_workarounds_init(i915); 888 bxt_gt_workarounds_init(i915, wal);
898 else if (IS_KABYLAKE(i915)) 889 else if (IS_KABYLAKE(i915))
899 kbl_gt_workarounds_init(i915); 890 kbl_gt_workarounds_init(i915, wal);
900 else if (IS_GEMINILAKE(i915)) 891 else if (IS_GEMINILAKE(i915))
901 glk_gt_workarounds_init(i915); 892 glk_gt_workarounds_init(i915, wal);
902 else if (IS_COFFEELAKE(i915)) 893 else if (IS_COFFEELAKE(i915))
903 cfl_gt_workarounds_init(i915); 894 cfl_gt_workarounds_init(i915, wal);
904 else if (IS_CANNONLAKE(i915)) 895 else if (IS_CANNONLAKE(i915))
905 cnl_gt_workarounds_init(i915); 896 cnl_gt_workarounds_init(i915, wal);
906 else if (IS_ICELAKE(i915)) 897 else if (IS_ICELAKE(i915))
907 icl_gt_workarounds_init(i915); 898 icl_gt_workarounds_init(i915, wal);
908 else 899 else
909 MISSING_CASE(INTEL_GEN(i915)); 900 MISSING_CASE(INTEL_GEN(i915));
901}
910 902
903void intel_gt_init_workarounds(struct drm_i915_private *i915)
904{
905 struct i915_wa_list *wal = &i915->gt_wa_list;
906
907 wa_init_start(wal, "GT");
908 gt_init_workarounds(i915, wal);
911 wa_init_finish(wal); 909 wa_init_finish(wal);
912} 910}
913 911
@@ -955,8 +953,6 @@ wa_list_apply(struct drm_i915_private *dev_priv, const struct i915_wa_list *wal)
955 953
956 intel_uncore_forcewake_put__locked(dev_priv, fw); 954 intel_uncore_forcewake_put__locked(dev_priv, fw);
957 spin_unlock_irqrestore(&dev_priv->uncore.lock, flags); 955 spin_unlock_irqrestore(&dev_priv->uncore.lock, flags);
958
959 DRM_DEBUG_DRIVER("Applied %u %s workarounds\n", wal->count, wal->name);
960} 956}
961 957
962void intel_gt_apply_workarounds(struct drm_i915_private *dev_priv) 958void intel_gt_apply_workarounds(struct drm_i915_private *dev_priv)
@@ -1126,14 +1122,12 @@ void intel_engine_apply_whitelist(struct intel_engine_cs *engine)
1126 for (; i < RING_MAX_NONPRIV_SLOTS; i++) 1122 for (; i < RING_MAX_NONPRIV_SLOTS; i++)
1127 I915_WRITE(RING_FORCE_TO_NONPRIV(base, i), 1123 I915_WRITE(RING_FORCE_TO_NONPRIV(base, i),
1128 i915_mmio_reg_offset(RING_NOPID(base))); 1124 i915_mmio_reg_offset(RING_NOPID(base)));
1129
1130 DRM_DEBUG_DRIVER("Applied %u %s workarounds\n", wal->count, wal->name);
1131} 1125}
1132 1126
1133static void rcs_engine_wa_init(struct intel_engine_cs *engine) 1127static void
1128rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
1134{ 1129{
1135 struct drm_i915_private *i915 = engine->i915; 1130 struct drm_i915_private *i915 = engine->i915;
1136 struct i915_wa_list *wal = &engine->wa_list;
1137 1131
1138 if (IS_ICELAKE(i915)) { 1132 if (IS_ICELAKE(i915)) {
1139 /* This is not an Wa. Enable for better image quality */ 1133 /* This is not an Wa. Enable for better image quality */
@@ -1190,7 +1184,7 @@ static void rcs_engine_wa_init(struct intel_engine_cs *engine)
1190 GEN7_DISABLE_SAMPLER_PREFETCH); 1184 GEN7_DISABLE_SAMPLER_PREFETCH);
1191 } 1185 }
1192 1186
1193 if (IS_GEN9(i915) || IS_CANNONLAKE(i915)) { 1187 if (IS_GEN(i915, 9) || IS_CANNONLAKE(i915)) {
1194 /* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl,cfl,cnl */ 1188 /* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl,cfl,cnl */
1195 wa_masked_en(wal, 1189 wa_masked_en(wal,
1196 GEN7_FF_SLICE_CS_CHICKEN1, 1190 GEN7_FF_SLICE_CS_CHICKEN1,
@@ -1211,7 +1205,7 @@ static void rcs_engine_wa_init(struct intel_engine_cs *engine)
1211 GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE); 1205 GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE);
1212 } 1206 }
1213 1207
1214 if (IS_GEN9(i915)) { 1208 if (IS_GEN(i915, 9)) {
1215 /* WaContextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl,glk,cfl */ 1209 /* WaContextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl,glk,cfl */
1216 wa_masked_en(wal, 1210 wa_masked_en(wal,
1217 GEN9_CSFE_CHICKEN1_RCS, 1211 GEN9_CSFE_CHICKEN1_RCS,
@@ -1237,10 +1231,10 @@ static void rcs_engine_wa_init(struct intel_engine_cs *engine)
1237 } 1231 }
1238} 1232}
1239 1233
1240static void xcs_engine_wa_init(struct intel_engine_cs *engine) 1234static void
1235xcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
1241{ 1236{
1242 struct drm_i915_private *i915 = engine->i915; 1237 struct drm_i915_private *i915 = engine->i915;
1243 struct i915_wa_list *wal = &engine->wa_list;
1244 1238
1245 /* WaKBLVECSSemaphoreWaitPoll:kbl */ 1239 /* WaKBLVECSSemaphoreWaitPoll:kbl */
1246 if (IS_KBL_REVID(i915, KBL_REVID_A0, KBL_REVID_E0)) { 1240 if (IS_KBL_REVID(i915, KBL_REVID_A0, KBL_REVID_E0)) {
@@ -1250,6 +1244,18 @@ static void xcs_engine_wa_init(struct intel_engine_cs *engine)
1250 } 1244 }
1251} 1245}
1252 1246
1247static void
1248engine_init_workarounds(struct intel_engine_cs *engine, struct i915_wa_list *wal)
1249{
1250 if (I915_SELFTEST_ONLY(INTEL_GEN(engine->i915) < 8))
1251 return;
1252
1253 if (engine->id == RCS)
1254 rcs_engine_wa_init(engine, wal);
1255 else
1256 xcs_engine_wa_init(engine, wal);
1257}
1258
1253void intel_engine_init_workarounds(struct intel_engine_cs *engine) 1259void intel_engine_init_workarounds(struct intel_engine_cs *engine)
1254{ 1260{
1255 struct i915_wa_list *wal = &engine->wa_list; 1261 struct i915_wa_list *wal = &engine->wa_list;
@@ -1258,12 +1264,7 @@ void intel_engine_init_workarounds(struct intel_engine_cs *engine)
1258 return; 1264 return;
1259 1265
1260 wa_init_start(wal, engine->name); 1266 wa_init_start(wal, engine->name);
1261 1267 engine_init_workarounds(engine, wal);
1262 if (engine->id == RCS)
1263 rcs_engine_wa_init(engine);
1264 else
1265 xcs_engine_wa_init(engine);
1266
1267 wa_init_finish(wal); 1268 wa_init_finish(wal);
1268} 1269}
1269 1270
@@ -1273,11 +1274,5 @@ void intel_engine_apply_workarounds(struct intel_engine_cs *engine)
1273} 1274}
1274 1275
1275#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 1276#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1276static bool intel_engine_verify_workarounds(struct intel_engine_cs *engine,
1277 const char *from)
1278{
1279 return wa_list_verify(engine->i915, &engine->wa_list, from);
1280}
1281
1282#include "selftests/intel_workarounds.c" 1277#include "selftests/intel_workarounds.c"
1283#endif 1278#endif
diff --git a/drivers/gpu/drm/i915/selftests/huge_pages.c b/drivers/gpu/drm/i915/selftests/huge_pages.c
index 26c065c8d2c0..6c10734e948d 100644
--- a/drivers/gpu/drm/i915/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/selftests/huge_pages.c
@@ -972,7 +972,6 @@ static int gpu_write(struct i915_vma *vma,
972{ 972{
973 struct i915_request *rq; 973 struct i915_request *rq;
974 struct i915_vma *batch; 974 struct i915_vma *batch;
975 int flags = 0;
976 int err; 975 int err;
977 976
978 GEM_BUG_ON(!intel_engine_can_store_dword(engine)); 977 GEM_BUG_ON(!intel_engine_can_store_dword(engine));
@@ -981,14 +980,14 @@ static int gpu_write(struct i915_vma *vma,
981 if (err) 980 if (err)
982 return err; 981 return err;
983 982
984 rq = i915_request_alloc(engine, ctx);
985 if (IS_ERR(rq))
986 return PTR_ERR(rq);
987
988 batch = gpu_write_dw(vma, dword * sizeof(u32), value); 983 batch = gpu_write_dw(vma, dword * sizeof(u32), value);
989 if (IS_ERR(batch)) { 984 if (IS_ERR(batch))
990 err = PTR_ERR(batch); 985 return PTR_ERR(batch);
991 goto err_request; 986
987 rq = i915_request_alloc(engine, ctx);
988 if (IS_ERR(rq)) {
989 err = PTR_ERR(rq);
990 goto err_batch;
992 } 991 }
993 992
994 err = i915_vma_move_to_active(batch, rq, 0); 993 err = i915_vma_move_to_active(batch, rq, 0);
@@ -996,21 +995,21 @@ static int gpu_write(struct i915_vma *vma,
996 goto err_request; 995 goto err_request;
997 996
998 i915_gem_object_set_active_reference(batch->obj); 997 i915_gem_object_set_active_reference(batch->obj);
999 i915_vma_unpin(batch);
1000 i915_vma_close(batch);
1001 998
1002 err = engine->emit_bb_start(rq, 999 err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
1003 batch->node.start, batch->node.size,
1004 flags);
1005 if (err) 1000 if (err)
1006 goto err_request; 1001 goto err_request;
1007 1002
1008 err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); 1003 err = engine->emit_bb_start(rq,
1004 batch->node.start, batch->node.size,
1005 0);
1006err_request:
1009 if (err) 1007 if (err)
1010 i915_request_skip(rq, err); 1008 i915_request_skip(rq, err);
1011
1012err_request:
1013 i915_request_add(rq); 1009 i915_request_add(rq);
1010err_batch:
1011 i915_vma_unpin(batch);
1012 i915_vma_close(batch);
1014 1013
1015 return err; 1014 return err;
1016} 1015}
@@ -1703,7 +1702,6 @@ int i915_gem_huge_page_mock_selftests(void)
1703 }; 1702 };
1704 struct drm_i915_private *dev_priv; 1703 struct drm_i915_private *dev_priv;
1705 struct i915_hw_ppgtt *ppgtt; 1704 struct i915_hw_ppgtt *ppgtt;
1706 struct pci_dev *pdev;
1707 int err; 1705 int err;
1708 1706
1709 dev_priv = mock_gem_device(); 1707 dev_priv = mock_gem_device();
@@ -1713,9 +1711,6 @@ int i915_gem_huge_page_mock_selftests(void)
1713 /* Pretend to be a device which supports the 48b PPGTT */ 1711 /* Pretend to be a device which supports the 48b PPGTT */
1714 mkwrite_device_info(dev_priv)->ppgtt = INTEL_PPGTT_FULL_4LVL; 1712 mkwrite_device_info(dev_priv)->ppgtt = INTEL_PPGTT_FULL_4LVL;
1715 1713
1716 pdev = dev_priv->drm.pdev;
1717 dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(39));
1718
1719 mutex_lock(&dev_priv->drm.struct_mutex); 1714 mutex_lock(&dev_priv->drm.struct_mutex);
1720 ppgtt = i915_ppgtt_create(dev_priv, ERR_PTR(-ENODEV)); 1715 ppgtt = i915_ppgtt_create(dev_priv, ERR_PTR(-ENODEV));
1721 if (IS_ERR(ppgtt)) { 1716 if (IS_ERR(ppgtt)) {
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem.c b/drivers/gpu/drm/i915/selftests/i915_gem.c
index d0aa19d17653..bdcc53e15e75 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem.c
@@ -121,7 +121,7 @@ static void pm_resume(struct drm_i915_private *i915)
121 */ 121 */
122 intel_runtime_pm_get(i915); 122 intel_runtime_pm_get(i915);
123 123
124 intel_engines_sanitize(i915); 124 intel_engines_sanitize(i915, false);
125 i915_gem_sanitize(i915); 125 i915_gem_sanitize(i915);
126 i915_gem_resume(i915); 126 i915_gem_resume(i915);
127 127
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
index 7d82043aff10..d00cdf3c2939 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
@@ -627,7 +627,7 @@ static int igt_ctx_exec(void *arg)
627 ncontexts++; 627 ncontexts++;
628 } 628 }
629 pr_info("Submitted %lu contexts (across %u engines), filling %lu dwords\n", 629 pr_info("Submitted %lu contexts (across %u engines), filling %lu dwords\n",
630 ncontexts, INTEL_INFO(i915)->num_rings, ndwords); 630 ncontexts, RUNTIME_INFO(i915)->num_rings, ndwords);
631 631
632 dw = 0; 632 dw = 0;
633 list_for_each_entry(obj, &objects, st_link) { 633 list_for_each_entry(obj, &objects, st_link) {
@@ -732,7 +732,7 @@ static int igt_ctx_readonly(void *arg)
732 } 732 }
733 } 733 }
734 pr_info("Submitted %lu dwords (across %u engines)\n", 734 pr_info("Submitted %lu dwords (across %u engines)\n",
735 ndwords, INTEL_INFO(i915)->num_rings); 735 ndwords, RUNTIME_INFO(i915)->num_rings);
736 736
737 dw = 0; 737 dw = 0;
738 list_for_each_entry(obj, &objects, st_link) { 738 list_for_each_entry(obj, &objects, st_link) {
@@ -1064,7 +1064,7 @@ static int igt_vm_isolation(void *arg)
1064 count += this; 1064 count += this;
1065 } 1065 }
1066 pr_info("Checked %lu scratch offsets across %d engines\n", 1066 pr_info("Checked %lu scratch offsets across %d engines\n",
1067 count, INTEL_INFO(i915)->num_rings); 1067 count, RUNTIME_INFO(i915)->num_rings);
1068 1068
1069out_rpm: 1069out_rpm:
1070 intel_runtime_pm_put(i915); 1070 intel_runtime_pm_put(i915);
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_object.c b/drivers/gpu/drm/i915/selftests/i915_gem_object.c
index c3999dd2021e..be7ecb66ad11 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_object.c
@@ -238,6 +238,7 @@ static int check_partial_mapping(struct drm_i915_gem_object *obj,
238 u32 *cpu; 238 u32 *cpu;
239 239
240 GEM_BUG_ON(view.partial.size > nreal); 240 GEM_BUG_ON(view.partial.size > nreal);
241 cond_resched();
241 242
242 err = i915_gem_object_set_to_gtt_domain(obj, true); 243 err = i915_gem_object_set_to_gtt_domain(obj, true);
243 if (err) { 244 if (err) {
diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.c b/drivers/gpu/drm/i915/selftests/igt_spinner.c
index 8cd34f6e6859..0e70df0230b8 100644
--- a/drivers/gpu/drm/i915/selftests/igt_spinner.c
+++ b/drivers/gpu/drm/i915/selftests/igt_spinner.c
@@ -68,48 +68,65 @@ static u64 hws_address(const struct i915_vma *hws,
68 return hws->node.start + seqno_offset(rq->fence.context); 68 return hws->node.start + seqno_offset(rq->fence.context);
69} 69}
70 70
71static int emit_recurse_batch(struct igt_spinner *spin, 71static int move_to_active(struct i915_vma *vma,
72 struct i915_request *rq, 72 struct i915_request *rq,
73 u32 arbitration_command) 73 unsigned int flags)
74{ 74{
75 struct i915_address_space *vm = &rq->gem_context->ppgtt->vm; 75 int err;
76
77 err = i915_vma_move_to_active(vma, rq, flags);
78 if (err)
79 return err;
80
81 if (!i915_gem_object_has_active_reference(vma->obj)) {
82 i915_gem_object_get(vma->obj);
83 i915_gem_object_set_active_reference(vma->obj);
84 }
85
86 return 0;
87}
88
89struct i915_request *
90igt_spinner_create_request(struct igt_spinner *spin,
91 struct i915_gem_context *ctx,
92 struct intel_engine_cs *engine,
93 u32 arbitration_command)
94{
95 struct i915_address_space *vm = &ctx->ppgtt->vm;
96 struct i915_request *rq = NULL;
76 struct i915_vma *hws, *vma; 97 struct i915_vma *hws, *vma;
77 u32 *batch; 98 u32 *batch;
78 int err; 99 int err;
79 100
80 vma = i915_vma_instance(spin->obj, vm, NULL); 101 vma = i915_vma_instance(spin->obj, vm, NULL);
81 if (IS_ERR(vma)) 102 if (IS_ERR(vma))
82 return PTR_ERR(vma); 103 return ERR_CAST(vma);
83 104
84 hws = i915_vma_instance(spin->hws, vm, NULL); 105 hws = i915_vma_instance(spin->hws, vm, NULL);
85 if (IS_ERR(hws)) 106 if (IS_ERR(hws))
86 return PTR_ERR(hws); 107 return ERR_CAST(hws);
87 108
88 err = i915_vma_pin(vma, 0, 0, PIN_USER); 109 err = i915_vma_pin(vma, 0, 0, PIN_USER);
89 if (err) 110 if (err)
90 return err; 111 return ERR_PTR(err);
91 112
92 err = i915_vma_pin(hws, 0, 0, PIN_USER); 113 err = i915_vma_pin(hws, 0, 0, PIN_USER);
93 if (err) 114 if (err)
94 goto unpin_vma; 115 goto unpin_vma;
95 116
96 err = i915_vma_move_to_active(vma, rq, 0); 117 rq = i915_request_alloc(engine, ctx);
97 if (err) 118 if (IS_ERR(rq)) {
119 err = PTR_ERR(rq);
98 goto unpin_hws; 120 goto unpin_hws;
99
100 if (!i915_gem_object_has_active_reference(vma->obj)) {
101 i915_gem_object_get(vma->obj);
102 i915_gem_object_set_active_reference(vma->obj);
103 } 121 }
104 122
105 err = i915_vma_move_to_active(hws, rq, 0); 123 err = move_to_active(vma, rq, 0);
106 if (err) 124 if (err)
107 goto unpin_hws; 125 goto cancel_rq;
108 126
109 if (!i915_gem_object_has_active_reference(hws->obj)) { 127 err = move_to_active(hws, rq, 0);
110 i915_gem_object_get(hws->obj); 128 if (err)
111 i915_gem_object_set_active_reference(hws->obj); 129 goto cancel_rq;
112 }
113 130
114 batch = spin->batch; 131 batch = spin->batch;
115 132
@@ -127,35 +144,18 @@ static int emit_recurse_batch(struct igt_spinner *spin,
127 144
128 i915_gem_chipset_flush(spin->i915); 145 i915_gem_chipset_flush(spin->i915);
129 146
130 err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, 0); 147 err = engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, 0);
131 148
149cancel_rq:
150 if (err) {
151 i915_request_skip(rq, err);
152 i915_request_add(rq);
153 }
132unpin_hws: 154unpin_hws:
133 i915_vma_unpin(hws); 155 i915_vma_unpin(hws);
134unpin_vma: 156unpin_vma:
135 i915_vma_unpin(vma); 157 i915_vma_unpin(vma);
136 return err; 158 return err ? ERR_PTR(err) : rq;
137}
138
139struct i915_request *
140igt_spinner_create_request(struct igt_spinner *spin,
141 struct i915_gem_context *ctx,
142 struct intel_engine_cs *engine,
143 u32 arbitration_command)
144{
145 struct i915_request *rq;
146 int err;
147
148 rq = i915_request_alloc(engine, ctx);
149 if (IS_ERR(rq))
150 return rq;
151
152 err = emit_recurse_batch(spin, rq, arbitration_command);
153 if (err) {
154 i915_request_add(rq);
155 return ERR_PTR(err);
156 }
157
158 return rq;
159} 159}
160 160
161static u32 161static u32
diff --git a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
index 5910da3e7d79..0aadbd9c7d56 100644
--- a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
+++ b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
@@ -103,52 +103,87 @@ static u64 hws_address(const struct i915_vma *hws,
103 return hws->node.start + offset_in_page(sizeof(u32)*rq->fence.context); 103 return hws->node.start + offset_in_page(sizeof(u32)*rq->fence.context);
104} 104}
105 105
106static int emit_recurse_batch(struct hang *h, 106static int move_to_active(struct i915_vma *vma,
107 struct i915_request *rq) 107 struct i915_request *rq,
108 unsigned int flags)
109{
110 int err;
111
112 err = i915_vma_move_to_active(vma, rq, flags);
113 if (err)
114 return err;
115
116 if (!i915_gem_object_has_active_reference(vma->obj)) {
117 i915_gem_object_get(vma->obj);
118 i915_gem_object_set_active_reference(vma->obj);
119 }
120
121 return 0;
122}
123
124static struct i915_request *
125hang_create_request(struct hang *h, struct intel_engine_cs *engine)
108{ 126{
109 struct drm_i915_private *i915 = h->i915; 127 struct drm_i915_private *i915 = h->i915;
110 struct i915_address_space *vm = 128 struct i915_address_space *vm =
111 rq->gem_context->ppgtt ? 129 h->ctx->ppgtt ? &h->ctx->ppgtt->vm : &i915->ggtt.vm;
112 &rq->gem_context->ppgtt->vm : 130 struct i915_request *rq = NULL;
113 &i915->ggtt.vm;
114 struct i915_vma *hws, *vma; 131 struct i915_vma *hws, *vma;
115 unsigned int flags; 132 unsigned int flags;
116 u32 *batch; 133 u32 *batch;
117 int err; 134 int err;
118 135
136 if (i915_gem_object_is_active(h->obj)) {
137 struct drm_i915_gem_object *obj;
138 void *vaddr;
139
140 obj = i915_gem_object_create_internal(h->i915, PAGE_SIZE);
141 if (IS_ERR(obj))
142 return ERR_CAST(obj);
143
144 vaddr = i915_gem_object_pin_map(obj,
145 i915_coherent_map_type(h->i915));
146 if (IS_ERR(vaddr)) {
147 i915_gem_object_put(obj);
148 return ERR_CAST(vaddr);
149 }
150
151 i915_gem_object_unpin_map(h->obj);
152 i915_gem_object_put(h->obj);
153
154 h->obj = obj;
155 h->batch = vaddr;
156 }
157
119 vma = i915_vma_instance(h->obj, vm, NULL); 158 vma = i915_vma_instance(h->obj, vm, NULL);
120 if (IS_ERR(vma)) 159 if (IS_ERR(vma))
121 return PTR_ERR(vma); 160 return ERR_CAST(vma);
122 161
123 hws = i915_vma_instance(h->hws, vm, NULL); 162 hws = i915_vma_instance(h->hws, vm, NULL);
124 if (IS_ERR(hws)) 163 if (IS_ERR(hws))
125 return PTR_ERR(hws); 164 return ERR_CAST(hws);
126 165
127 err = i915_vma_pin(vma, 0, 0, PIN_USER); 166 err = i915_vma_pin(vma, 0, 0, PIN_USER);
128 if (err) 167 if (err)
129 return err; 168 return ERR_PTR(err);
130 169
131 err = i915_vma_pin(hws, 0, 0, PIN_USER); 170 err = i915_vma_pin(hws, 0, 0, PIN_USER);
132 if (err) 171 if (err)
133 goto unpin_vma; 172 goto unpin_vma;
134 173
135 err = i915_vma_move_to_active(vma, rq, 0); 174 rq = i915_request_alloc(engine, h->ctx);
136 if (err) 175 if (IS_ERR(rq)) {
176 err = PTR_ERR(rq);
137 goto unpin_hws; 177 goto unpin_hws;
138
139 if (!i915_gem_object_has_active_reference(vma->obj)) {
140 i915_gem_object_get(vma->obj);
141 i915_gem_object_set_active_reference(vma->obj);
142 } 178 }
143 179
144 err = i915_vma_move_to_active(hws, rq, 0); 180 err = move_to_active(vma, rq, 0);
145 if (err) 181 if (err)
146 goto unpin_hws; 182 goto cancel_rq;
147 183
148 if (!i915_gem_object_has_active_reference(hws->obj)) { 184 err = move_to_active(hws, rq, 0);
149 i915_gem_object_get(hws->obj); 185 if (err)
150 i915_gem_object_set_active_reference(hws->obj); 186 goto cancel_rq;
151 }
152 187
153 batch = h->batch; 188 batch = h->batch;
154 if (INTEL_GEN(i915) >= 8) { 189 if (INTEL_GEN(i915) >= 8) {
@@ -213,52 +248,16 @@ static int emit_recurse_batch(struct hang *h,
213 248
214 err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, flags); 249 err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, flags);
215 250
251cancel_rq:
252 if (err) {
253 i915_request_skip(rq, err);
254 i915_request_add(rq);
255 }
216unpin_hws: 256unpin_hws:
217 i915_vma_unpin(hws); 257 i915_vma_unpin(hws);
218unpin_vma: 258unpin_vma:
219 i915_vma_unpin(vma); 259 i915_vma_unpin(vma);
220 return err; 260 return err ? ERR_PTR(err) : rq;
221}
222
223static struct i915_request *
224hang_create_request(struct hang *h, struct intel_engine_cs *engine)
225{
226 struct i915_request *rq;
227 int err;
228
229 if (i915_gem_object_is_active(h->obj)) {
230 struct drm_i915_gem_object *obj;
231 void *vaddr;
232
233 obj = i915_gem_object_create_internal(h->i915, PAGE_SIZE);
234 if (IS_ERR(obj))
235 return ERR_CAST(obj);
236
237 vaddr = i915_gem_object_pin_map(obj,
238 i915_coherent_map_type(h->i915));
239 if (IS_ERR(vaddr)) {
240 i915_gem_object_put(obj);
241 return ERR_CAST(vaddr);
242 }
243
244 i915_gem_object_unpin_map(h->obj);
245 i915_gem_object_put(h->obj);
246
247 h->obj = obj;
248 h->batch = vaddr;
249 }
250
251 rq = i915_request_alloc(engine, h->ctx);
252 if (IS_ERR(rq))
253 return rq;
254
255 err = emit_recurse_batch(h, rq);
256 if (err) {
257 i915_request_add(rq);
258 return ERR_PTR(err);
259 }
260
261 return rq;
262} 261}
263 262
264static u32 hws_seqno(const struct hang *h, const struct i915_request *rq) 263static u32 hws_seqno(const struct hang *h, const struct i915_request *rq)
@@ -386,6 +385,30 @@ static int igt_global_reset(void *arg)
386 return err; 385 return err;
387} 386}
388 387
388static int igt_wedged_reset(void *arg)
389{
390 struct drm_i915_private *i915 = arg;
391
392 /* Check that we can recover a wedged device with a GPU reset */
393
394 igt_global_reset_lock(i915);
395 mutex_lock(&i915->drm.struct_mutex);
396 intel_runtime_pm_get(i915);
397
398 i915_gem_set_wedged(i915);
399 GEM_BUG_ON(!i915_terminally_wedged(&i915->gpu_error));
400
401 set_bit(I915_RESET_HANDOFF, &i915->gpu_error.flags);
402 i915_reset(i915, ALL_ENGINES, NULL);
403 GEM_BUG_ON(test_bit(I915_RESET_HANDOFF, &i915->gpu_error.flags));
404
405 intel_runtime_pm_put(i915);
406 mutex_unlock(&i915->drm.struct_mutex);
407 igt_global_reset_unlock(i915);
408
409 return i915_terminally_wedged(&i915->gpu_error) ? -EIO : 0;
410}
411
389static bool wait_for_idle(struct intel_engine_cs *engine) 412static bool wait_for_idle(struct intel_engine_cs *engine)
390{ 413{
391 return wait_for(intel_engine_is_idle(engine), IGT_IDLE_TIMEOUT) == 0; 414 return wait_for(intel_engine_is_idle(engine), IGT_IDLE_TIMEOUT) == 0;
@@ -1449,10 +1472,182 @@ err_unlock:
1449 return err; 1472 return err;
1450} 1473}
1451 1474
1475static void __preempt_begin(void)
1476{
1477 preempt_disable();
1478}
1479
1480static void __preempt_end(void)
1481{
1482 preempt_enable();
1483}
1484
1485static void __softirq_begin(void)
1486{
1487 local_bh_disable();
1488}
1489
1490static void __softirq_end(void)
1491{
1492 local_bh_enable();
1493}
1494
1495static void __hardirq_begin(void)
1496{
1497 local_irq_disable();
1498}
1499
1500static void __hardirq_end(void)
1501{
1502 local_irq_enable();
1503}
1504
1505struct atomic_section {
1506 const char *name;
1507 void (*critical_section_begin)(void);
1508 void (*critical_section_end)(void);
1509};
1510
1511static int __igt_atomic_reset_engine(struct intel_engine_cs *engine,
1512 const struct atomic_section *p,
1513 const char *mode)
1514{
1515 struct tasklet_struct * const t = &engine->execlists.tasklet;
1516 int err;
1517
1518 GEM_TRACE("i915_reset_engine(%s:%s) under %s\n",
1519 engine->name, mode, p->name);
1520
1521 tasklet_disable_nosync(t);
1522 p->critical_section_begin();
1523
1524 err = i915_reset_engine(engine, NULL);
1525
1526 p->critical_section_end();
1527 tasklet_enable(t);
1528
1529 if (err)
1530 pr_err("i915_reset_engine(%s:%s) failed under %s\n",
1531 engine->name, mode, p->name);
1532
1533 return err;
1534}
1535
1536static int igt_atomic_reset_engine(struct intel_engine_cs *engine,
1537 const struct atomic_section *p)
1538{
1539 struct drm_i915_private *i915 = engine->i915;
1540 struct i915_request *rq;
1541 struct hang h;
1542 int err;
1543
1544 err = __igt_atomic_reset_engine(engine, p, "idle");
1545 if (err)
1546 return err;
1547
1548 err = hang_init(&h, i915);
1549 if (err)
1550 return err;
1551
1552 rq = hang_create_request(&h, engine);
1553 if (IS_ERR(rq)) {
1554 err = PTR_ERR(rq);
1555 goto out;
1556 }
1557
1558 i915_request_get(rq);
1559 i915_request_add(rq);
1560
1561 if (wait_until_running(&h, rq)) {
1562 err = __igt_atomic_reset_engine(engine, p, "active");
1563 } else {
1564 pr_err("%s(%s): Failed to start request %llx, at %x\n",
1565 __func__, engine->name,
1566 rq->fence.seqno, hws_seqno(&h, rq));
1567 err = -EIO;
1568 }
1569
1570 if (err == 0) {
1571 struct igt_wedge_me w;
1572
1573 igt_wedge_on_timeout(&w, i915, HZ / 20 /* 50ms timeout*/)
1574 i915_request_wait(rq,
1575 I915_WAIT_LOCKED,
1576 MAX_SCHEDULE_TIMEOUT);
1577 if (i915_terminally_wedged(&i915->gpu_error))
1578 err = -EIO;
1579 }
1580
1581 i915_request_put(rq);
1582out:
1583 hang_fini(&h);
1584 return err;
1585}
1586
1587static void force_reset(struct drm_i915_private *i915)
1588{
1589 i915_gem_set_wedged(i915);
1590 set_bit(I915_RESET_HANDOFF, &i915->gpu_error.flags);
1591 i915_reset(i915, 0, NULL);
1592}
1593
1594static int igt_atomic_reset(void *arg)
1595{
1596 static const struct atomic_section phases[] = {
1597 { "preempt", __preempt_begin, __preempt_end },
1598 { "softirq", __softirq_begin, __softirq_end },
1599 { "hardirq", __hardirq_begin, __hardirq_end },
1600 { }
1601 };
1602 struct drm_i915_private *i915 = arg;
1603 int err = 0;
1604
1605 /* Check that the resets are usable from atomic context */
1606
1607 if (USES_GUC_SUBMISSION(i915))
1608 return 0; /* guc is dead; long live the guc */
1609
1610 igt_global_reset_lock(i915);
1611 mutex_lock(&i915->drm.struct_mutex);
1612 intel_runtime_pm_get(i915);
1613
1614 /* Flush any requests before we get started and check basics */
1615 force_reset(i915);
1616 if (i915_terminally_wedged(&i915->gpu_error))
1617 goto unlock;
1618
1619 if (intel_has_reset_engine(i915)) {
1620 struct intel_engine_cs *engine;
1621 enum intel_engine_id id;
1622
1623 for_each_engine(engine, i915, id) {
1624 const typeof(*phases) *p;
1625
1626 for (p = phases; p->name; p++) {
1627 err = igt_atomic_reset_engine(engine, p);
1628 if (err)
1629 goto out;
1630 }
1631 }
1632 }
1633
1634out:
1635 /* As we poke around the guts, do a full reset before continuing. */
1636 force_reset(i915);
1637
1638unlock:
1639 intel_runtime_pm_put(i915);
1640 mutex_unlock(&i915->drm.struct_mutex);
1641 igt_global_reset_unlock(i915);
1642
1643 return err;
1644}
1645
1452int intel_hangcheck_live_selftests(struct drm_i915_private *i915) 1646int intel_hangcheck_live_selftests(struct drm_i915_private *i915)
1453{ 1647{
1454 static const struct i915_subtest tests[] = { 1648 static const struct i915_subtest tests[] = {
1455 SUBTEST(igt_global_reset), /* attempt to recover GPU first */ 1649 SUBTEST(igt_global_reset), /* attempt to recover GPU first */
1650 SUBTEST(igt_wedged_reset),
1456 SUBTEST(igt_hang_sanitycheck), 1651 SUBTEST(igt_hang_sanitycheck),
1457 SUBTEST(igt_reset_idle_engine), 1652 SUBTEST(igt_reset_idle_engine),
1458 SUBTEST(igt_reset_active_engine), 1653 SUBTEST(igt_reset_active_engine),
@@ -1463,6 +1658,7 @@ int intel_hangcheck_live_selftests(struct drm_i915_private *i915)
1463 SUBTEST(igt_reset_evict_ppgtt), 1658 SUBTEST(igt_reset_evict_ppgtt),
1464 SUBTEST(igt_reset_evict_fence), 1659 SUBTEST(igt_reset_evict_fence),
1465 SUBTEST(igt_handle_error), 1660 SUBTEST(igt_handle_error),
1661 SUBTEST(igt_atomic_reset),
1466 }; 1662 };
1467 bool saved_hangcheck; 1663 bool saved_hangcheck;
1468 int err; 1664 int err;
diff --git a/drivers/gpu/drm/i915/selftests/intel_lrc.c b/drivers/gpu/drm/i915/selftests/intel_lrc.c
index ca461e3a5f27..00caaa00f02f 100644
--- a/drivers/gpu/drm/i915/selftests/intel_lrc.c
+++ b/drivers/gpu/drm/i915/selftests/intel_lrc.c
@@ -522,7 +522,7 @@ static int smoke_crescendo(struct preempt_smoke *smoke, unsigned int flags)
522 522
523 pr_info("Submitted %lu crescendo:%x requests across %d engines and %d contexts\n", 523 pr_info("Submitted %lu crescendo:%x requests across %d engines and %d contexts\n",
524 count, flags, 524 count, flags,
525 INTEL_INFO(smoke->i915)->num_rings, smoke->ncontext); 525 RUNTIME_INFO(smoke->i915)->num_rings, smoke->ncontext);
526 return 0; 526 return 0;
527} 527}
528 528
@@ -550,7 +550,7 @@ static int smoke_random(struct preempt_smoke *smoke, unsigned int flags)
550 550
551 pr_info("Submitted %lu random:%x requests across %d engines and %d contexts\n", 551 pr_info("Submitted %lu random:%x requests across %d engines and %d contexts\n",
552 count, flags, 552 count, flags,
553 INTEL_INFO(smoke->i915)->num_rings, smoke->ncontext); 553 RUNTIME_INFO(smoke->i915)->num_rings, smoke->ncontext);
554 return 0; 554 return 0;
555} 555}
556 556
diff --git a/drivers/gpu/drm/i915/selftests/intel_workarounds.c b/drivers/gpu/drm/i915/selftests/intel_workarounds.c
index 67017d5175b8..8b3f3200a3bd 100644
--- a/drivers/gpu/drm/i915/selftests/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/selftests/intel_workarounds.c
@@ -12,6 +12,51 @@
12#include "igt_wedge_me.h" 12#include "igt_wedge_me.h"
13#include "mock_context.h" 13#include "mock_context.h"
14 14
15#define REF_NAME_MAX (INTEL_ENGINE_CS_MAX_NAME + 4)
16struct wa_lists {
17 struct i915_wa_list gt_wa_list;
18 struct {
19 char name[REF_NAME_MAX];
20 struct i915_wa_list wa_list;
21 } engine[I915_NUM_ENGINES];
22};
23
24static void
25reference_lists_init(struct drm_i915_private *i915, struct wa_lists *lists)
26{
27 struct intel_engine_cs *engine;
28 enum intel_engine_id id;
29
30 memset(lists, 0, sizeof(*lists));
31
32 wa_init_start(&lists->gt_wa_list, "GT_REF");
33 gt_init_workarounds(i915, &lists->gt_wa_list);
34 wa_init_finish(&lists->gt_wa_list);
35
36 for_each_engine(engine, i915, id) {
37 struct i915_wa_list *wal = &lists->engine[id].wa_list;
38 char *name = lists->engine[id].name;
39
40 snprintf(name, REF_NAME_MAX, "%s_REF", engine->name);
41
42 wa_init_start(wal, name);
43 engine_init_workarounds(engine, wal);
44 wa_init_finish(wal);
45 }
46}
47
48static void
49reference_lists_fini(struct drm_i915_private *i915, struct wa_lists *lists)
50{
51 struct intel_engine_cs *engine;
52 enum intel_engine_id id;
53
54 for_each_engine(engine, i915, id)
55 intel_wa_list_free(&lists->engine[id].wa_list);
56
57 intel_wa_list_free(&lists->gt_wa_list);
58}
59
15static struct drm_i915_gem_object * 60static struct drm_i915_gem_object *
16read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine) 61read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
17{ 62{
@@ -326,16 +371,17 @@ out:
326 return err; 371 return err;
327} 372}
328 373
329static bool verify_gt_engine_wa(struct drm_i915_private *i915, const char *str) 374static bool verify_gt_engine_wa(struct drm_i915_private *i915,
375 struct wa_lists *lists, const char *str)
330{ 376{
331 struct intel_engine_cs *engine; 377 struct intel_engine_cs *engine;
332 enum intel_engine_id id; 378 enum intel_engine_id id;
333 bool ok = true; 379 bool ok = true;
334 380
335 ok &= intel_gt_verify_workarounds(i915, str); 381 ok &= wa_list_verify(i915, &lists->gt_wa_list, str);
336 382
337 for_each_engine(engine, i915, id) 383 for_each_engine(engine, i915, id)
338 ok &= intel_engine_verify_workarounds(engine, str); 384 ok &= wa_list_verify(i915, &lists->engine[id].wa_list, str);
339 385
340 return ok; 386 return ok;
341} 387}
@@ -345,6 +391,7 @@ live_gpu_reset_gt_engine_workarounds(void *arg)
345{ 391{
346 struct drm_i915_private *i915 = arg; 392 struct drm_i915_private *i915 = arg;
347 struct i915_gpu_error *error = &i915->gpu_error; 393 struct i915_gpu_error *error = &i915->gpu_error;
394 struct wa_lists lists;
348 bool ok; 395 bool ok;
349 396
350 if (!intel_has_gpu_reset(i915)) 397 if (!intel_has_gpu_reset(i915))
@@ -353,19 +400,21 @@ live_gpu_reset_gt_engine_workarounds(void *arg)
353 pr_info("Verifying after GPU reset...\n"); 400 pr_info("Verifying after GPU reset...\n");
354 401
355 igt_global_reset_lock(i915); 402 igt_global_reset_lock(i915);
403 intel_runtime_pm_get(i915);
404 reference_lists_init(i915, &lists);
356 405
357 ok = verify_gt_engine_wa(i915, "before reset"); 406 ok = verify_gt_engine_wa(i915, &lists, "before reset");
358 if (!ok) 407 if (!ok)
359 goto out; 408 goto out;
360 409
361 intel_runtime_pm_get(i915);
362 set_bit(I915_RESET_HANDOFF, &error->flags); 410 set_bit(I915_RESET_HANDOFF, &error->flags);
363 i915_reset(i915, ALL_ENGINES, "live_workarounds"); 411 i915_reset(i915, ALL_ENGINES, "live_workarounds");
364 intel_runtime_pm_put(i915);
365 412
366 ok = verify_gt_engine_wa(i915, "after reset"); 413 ok = verify_gt_engine_wa(i915, &lists, "after reset");
367 414
368out: 415out:
416 reference_lists_fini(i915, &lists);
417 intel_runtime_pm_put(i915);
369 igt_global_reset_unlock(i915); 418 igt_global_reset_unlock(i915);
370 419
371 return ok ? 0 : -ESRCH; 420 return ok ? 0 : -ESRCH;
@@ -380,6 +429,7 @@ live_engine_reset_gt_engine_workarounds(void *arg)
380 struct igt_spinner spin; 429 struct igt_spinner spin;
381 enum intel_engine_id id; 430 enum intel_engine_id id;
382 struct i915_request *rq; 431 struct i915_request *rq;
432 struct wa_lists lists;
383 int ret = 0; 433 int ret = 0;
384 434
385 if (!intel_has_reset_engine(i915)) 435 if (!intel_has_reset_engine(i915))
@@ -390,23 +440,23 @@ live_engine_reset_gt_engine_workarounds(void *arg)
390 return PTR_ERR(ctx); 440 return PTR_ERR(ctx);
391 441
392 igt_global_reset_lock(i915); 442 igt_global_reset_lock(i915);
443 intel_runtime_pm_get(i915);
444 reference_lists_init(i915, &lists);
393 445
394 for_each_engine(engine, i915, id) { 446 for_each_engine(engine, i915, id) {
395 bool ok; 447 bool ok;
396 448
397 pr_info("Verifying after %s reset...\n", engine->name); 449 pr_info("Verifying after %s reset...\n", engine->name);
398 450
399 ok = verify_gt_engine_wa(i915, "before reset"); 451 ok = verify_gt_engine_wa(i915, &lists, "before reset");
400 if (!ok) { 452 if (!ok) {
401 ret = -ESRCH; 453 ret = -ESRCH;
402 goto err; 454 goto err;
403 } 455 }
404 456
405 intel_runtime_pm_get(i915);
406 i915_reset_engine(engine, "live_workarounds"); 457 i915_reset_engine(engine, "live_workarounds");
407 intel_runtime_pm_put(i915);
408 458
409 ok = verify_gt_engine_wa(i915, "after idle reset"); 459 ok = verify_gt_engine_wa(i915, &lists, "after idle reset");
410 if (!ok) { 460 if (!ok) {
411 ret = -ESRCH; 461 ret = -ESRCH;
412 goto err; 462 goto err;
@@ -416,13 +466,10 @@ live_engine_reset_gt_engine_workarounds(void *arg)
416 if (ret) 466 if (ret)
417 goto err; 467 goto err;
418 468
419 intel_runtime_pm_get(i915);
420
421 rq = igt_spinner_create_request(&spin, ctx, engine, MI_NOOP); 469 rq = igt_spinner_create_request(&spin, ctx, engine, MI_NOOP);
422 if (IS_ERR(rq)) { 470 if (IS_ERR(rq)) {
423 ret = PTR_ERR(rq); 471 ret = PTR_ERR(rq);
424 igt_spinner_fini(&spin); 472 igt_spinner_fini(&spin);
425 intel_runtime_pm_put(i915);
426 goto err; 473 goto err;
427 } 474 }
428 475
@@ -431,19 +478,16 @@ live_engine_reset_gt_engine_workarounds(void *arg)
431 if (!igt_wait_for_spinner(&spin, rq)) { 478 if (!igt_wait_for_spinner(&spin, rq)) {
432 pr_err("Spinner failed to start\n"); 479 pr_err("Spinner failed to start\n");
433 igt_spinner_fini(&spin); 480 igt_spinner_fini(&spin);
434 intel_runtime_pm_put(i915);
435 ret = -ETIMEDOUT; 481 ret = -ETIMEDOUT;
436 goto err; 482 goto err;
437 } 483 }
438 484
439 i915_reset_engine(engine, "live_workarounds"); 485 i915_reset_engine(engine, "live_workarounds");
440 486
441 intel_runtime_pm_put(i915);
442
443 igt_spinner_end(&spin); 487 igt_spinner_end(&spin);
444 igt_spinner_fini(&spin); 488 igt_spinner_fini(&spin);
445 489
446 ok = verify_gt_engine_wa(i915, "after busy reset"); 490 ok = verify_gt_engine_wa(i915, &lists, "after busy reset");
447 if (!ok) { 491 if (!ok) {
448 ret = -ESRCH; 492 ret = -ESRCH;
449 goto err; 493 goto err;
@@ -451,6 +495,8 @@ live_engine_reset_gt_engine_workarounds(void *arg)
451 } 495 }
452 496
453err: 497err:
498 reference_lists_fini(i915, &lists);
499 intel_runtime_pm_put(i915);
454 igt_global_reset_unlock(i915); 500 igt_global_reset_unlock(i915);
455 kernel_context_close(ctx); 501 kernel_context_close(ctx);
456 502
diff --git a/drivers/gpu/drm/i915/selftests/mock_engine.c b/drivers/gpu/drm/i915/selftests/mock_engine.c
index d0c44c18db42..50e1a0b1af7e 100644
--- a/drivers/gpu/drm/i915/selftests/mock_engine.c
+++ b/drivers/gpu/drm/i915/selftests/mock_engine.c
@@ -148,8 +148,6 @@ static struct intel_ring *mock_ring(struct intel_engine_cs *engine)
148 const unsigned long sz = PAGE_SIZE / 2; 148 const unsigned long sz = PAGE_SIZE / 2;
149 struct mock_ring *ring; 149 struct mock_ring *ring;
150 150
151 BUILD_BUG_ON(MIN_SPACE_FOR_ADD_REQUEST > sz);
152
153 ring = kzalloc(sizeof(*ring) + sz, GFP_KERNEL); 151 ring = kzalloc(sizeof(*ring) + sz, GFP_KERNEL);
154 if (!ring) 152 if (!ring)
155 return NULL; 153 return NULL;
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index 43ed8b28aeaa..baa3c38919de 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -147,7 +147,7 @@ struct drm_i915_private *mock_gem_device(void)
147 pdev->class = PCI_BASE_CLASS_DISPLAY << 16; 147 pdev->class = PCI_BASE_CLASS_DISPLAY << 16;
148 pdev->dev.release = release_dev; 148 pdev->dev.release = release_dev;
149 dev_set_name(&pdev->dev, "mock"); 149 dev_set_name(&pdev->dev, "mock");
150 dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 150 dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
151 151
152#if IS_ENABLED(CONFIG_IOMMU_API) && defined(CONFIG_INTEL_IOMMU) 152#if IS_ENABLED(CONFIG_IOMMU_API) && defined(CONFIG_INTEL_IOMMU)
153 /* hack to disable iommu for the fake device; force identity mapping */ 153 /* hack to disable iommu for the fake device; force identity mapping */
diff --git a/drivers/gpu/drm/i915/vlv_dsi.c b/drivers/gpu/drm/i915/vlv_dsi.c
index 9fc8085f76dc..3225621f820c 100644
--- a/drivers/gpu/drm/i915/vlv_dsi.c
+++ b/drivers/gpu/drm/i915/vlv_dsi.c
@@ -23,7 +23,6 @@
23 * Author: Jani Nikula <jani.nikula@intel.com> 23 * Author: Jani Nikula <jani.nikula@intel.com>
24 */ 24 */
25 25
26#include <drm/drmP.h>
27#include <drm/drm_atomic_helper.h> 26#include <drm/drm_atomic_helper.h>
28#include <drm/drm_crtc.h> 27#include <drm/drm_crtc.h>
29#include <drm/drm_edid.h> 28#include <drm/drm_edid.h>
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c
index f3ef7bf80563..12ad00d01063 100644
--- a/drivers/gpu/drm/mediatek/mtk_hdmi.c
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c
@@ -1371,8 +1371,8 @@ static void mtk_hdmi_bridge_post_disable(struct drm_bridge *bridge)
1371} 1371}
1372 1372
1373static void mtk_hdmi_bridge_mode_set(struct drm_bridge *bridge, 1373static void mtk_hdmi_bridge_mode_set(struct drm_bridge *bridge,
1374 struct drm_display_mode *mode, 1374 const struct drm_display_mode *mode,
1375 struct drm_display_mode *adjusted_mode) 1375 const struct drm_display_mode *adjusted_mode)
1376{ 1376{
1377 struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge); 1377 struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
1378 1378
diff --git a/drivers/gpu/drm/msm/dsi/dsi.h b/drivers/gpu/drm/msm/dsi/dsi.h
index 08f3fc6771b7..9c6b31c2d79f 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.h
+++ b/drivers/gpu/drm/msm/dsi/dsi.h
@@ -168,7 +168,7 @@ int msm_dsi_host_power_on(struct mipi_dsi_host *host,
168 bool is_dual_dsi); 168 bool is_dual_dsi);
169int msm_dsi_host_power_off(struct mipi_dsi_host *host); 169int msm_dsi_host_power_off(struct mipi_dsi_host *host);
170int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host, 170int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host,
171 struct drm_display_mode *mode); 171 const struct drm_display_mode *mode);
172struct drm_panel *msm_dsi_host_get_panel(struct mipi_dsi_host *host, 172struct drm_panel *msm_dsi_host_get_panel(struct mipi_dsi_host *host,
173 unsigned long *panel_flags); 173 unsigned long *panel_flags);
174struct drm_bridge *msm_dsi_host_get_bridge(struct mipi_dsi_host *host); 174struct drm_bridge *msm_dsi_host_get_bridge(struct mipi_dsi_host *host);
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index 38e481d2d606..610183db1daf 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -2424,7 +2424,7 @@ unlock_ret:
2424} 2424}
2425 2425
2426int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host, 2426int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host,
2427 struct drm_display_mode *mode) 2427 const struct drm_display_mode *mode)
2428{ 2428{
2429 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 2429 struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
2430 2430
diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c
index 7c8352a8ea97..979a8e929341 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_manager.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c
@@ -527,8 +527,8 @@ disable_phy:
527} 527}
528 528
529static void dsi_mgr_bridge_mode_set(struct drm_bridge *bridge, 529static void dsi_mgr_bridge_mode_set(struct drm_bridge *bridge,
530 struct drm_display_mode *mode, 530 const struct drm_display_mode *mode,
531 struct drm_display_mode *adjusted_mode) 531 const struct drm_display_mode *adjusted_mode)
532{ 532{
533 int id = dsi_mgr_bridge_get_id(bridge); 533 int id = dsi_mgr_bridge_get_id(bridge);
534 struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id); 534 struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
diff --git a/drivers/gpu/drm/msm/edp/edp_bridge.c b/drivers/gpu/drm/msm/edp/edp_bridge.c
index 153f350ce017..11166bf232ff 100644
--- a/drivers/gpu/drm/msm/edp/edp_bridge.c
+++ b/drivers/gpu/drm/msm/edp/edp_bridge.c
@@ -52,8 +52,8 @@ static void edp_bridge_post_disable(struct drm_bridge *bridge)
52} 52}
53 53
54static void edp_bridge_mode_set(struct drm_bridge *bridge, 54static void edp_bridge_mode_set(struct drm_bridge *bridge,
55 struct drm_display_mode *mode, 55 const struct drm_display_mode *mode,
56 struct drm_display_mode *adjusted_mode) 56 const struct drm_display_mode *adjusted_mode)
57{ 57{
58 struct drm_device *dev = bridge->dev; 58 struct drm_device *dev = bridge->dev;
59 struct drm_connector *connector; 59 struct drm_connector *connector;
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
index 7ba6f52ed72c..03197b8959ba 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
@@ -208,8 +208,8 @@ static void msm_hdmi_bridge_post_disable(struct drm_bridge *bridge)
208} 208}
209 209
210static void msm_hdmi_bridge_mode_set(struct drm_bridge *bridge, 210static void msm_hdmi_bridge_mode_set(struct drm_bridge *bridge,
211 struct drm_display_mode *mode, 211 const struct drm_display_mode *mode,
212 struct drm_display_mode *adjusted_mode) 212 const struct drm_display_mode *adjusted_mode)
213{ 213{
214 struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge); 214 struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
215 struct hdmi *hdmi = hdmi_bridge->hdmi; 215 struct hdmi *hdmi = hdmi_bridge->hdmi;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
index 90dacab67be5..771b460c7216 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
@@ -11,7 +11,6 @@
11#include <linux/mutex.h> 11#include <linux/mutex.h>
12#include <linux/sys_soc.h> 12#include <linux/sys_soc.h>
13 13
14#include <drm/drmP.h>
15#include <drm/drm_atomic.h> 14#include <drm/drm_atomic.h>
16#include <drm/drm_atomic_helper.h> 15#include <drm/drm_atomic_helper.h>
17#include <drm/drm_crtc.h> 16#include <drm/drm_crtc.h>
@@ -22,6 +21,7 @@
22 21
23#include "rcar_du_crtc.h" 22#include "rcar_du_crtc.h"
24#include "rcar_du_drv.h" 23#include "rcar_du_drv.h"
24#include "rcar_du_encoder.h"
25#include "rcar_du_kms.h" 25#include "rcar_du_kms.h"
26#include "rcar_du_plane.h" 26#include "rcar_du_plane.h"
27#include "rcar_du_regs.h" 27#include "rcar_du_regs.h"
@@ -316,26 +316,6 @@ static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc)
316 rcar_du_crtc_write(rcrtc, DEWR, mode->hdisplay); 316 rcar_du_crtc_write(rcrtc, DEWR, mode->hdisplay);
317} 317}
318 318
319void rcar_du_crtc_route_output(struct drm_crtc *crtc,
320 enum rcar_du_output output)
321{
322 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
323 struct rcar_du_device *rcdu = rcrtc->group->dev;
324
325 /*
326 * Store the route from the CRTC output to the DU output. The DU will be
327 * configured when starting the CRTC.
328 */
329 rcrtc->outputs |= BIT(output);
330
331 /*
332 * Store RGB routing to DPAD0, the hardware will be configured when
333 * starting the CRTC.
334 */
335 if (output == RCAR_DU_OUTPUT_DPAD0)
336 rcdu->dpad0_source = rcrtc->index;
337}
338
339static unsigned int plane_zpos(struct rcar_du_plane *plane) 319static unsigned int plane_zpos(struct rcar_du_plane *plane)
340{ 320{
341 return plane->plane.state->normalized_zpos; 321 return plane->plane.state->normalized_zpos;
@@ -655,6 +635,24 @@ static void rcar_du_crtc_stop(struct rcar_du_crtc *rcrtc)
655 * CRTC Functions 635 * CRTC Functions
656 */ 636 */
657 637
638static int rcar_du_crtc_atomic_check(struct drm_crtc *crtc,
639 struct drm_crtc_state *state)
640{
641 struct rcar_du_crtc_state *rstate = to_rcar_crtc_state(state);
642 struct drm_encoder *encoder;
643
644 /* Store the routes from the CRTC output to the DU outputs. */
645 rstate->outputs = 0;
646
647 drm_for_each_encoder_mask(encoder, crtc->dev, state->encoder_mask) {
648 struct rcar_du_encoder *renc = to_rcar_encoder(encoder);
649
650 rstate->outputs |= BIT(renc->output);
651 }
652
653 return 0;
654}
655
658static void rcar_du_crtc_atomic_enable(struct drm_crtc *crtc, 656static void rcar_du_crtc_atomic_enable(struct drm_crtc *crtc,
659 struct drm_crtc_state *old_state) 657 struct drm_crtc_state *old_state)
660{ 658{
@@ -678,8 +676,6 @@ static void rcar_du_crtc_atomic_disable(struct drm_crtc *crtc,
678 crtc->state->event = NULL; 676 crtc->state->event = NULL;
679 } 677 }
680 spin_unlock_irq(&crtc->dev->event_lock); 678 spin_unlock_irq(&crtc->dev->event_lock);
681
682 rcrtc->outputs = 0;
683} 679}
684 680
685static void rcar_du_crtc_atomic_begin(struct drm_crtc *crtc, 681static void rcar_du_crtc_atomic_begin(struct drm_crtc *crtc,
@@ -755,6 +751,7 @@ enum drm_mode_status rcar_du_crtc_mode_valid(struct drm_crtc *crtc,
755} 751}
756 752
757static const struct drm_crtc_helper_funcs crtc_helper_funcs = { 753static const struct drm_crtc_helper_funcs crtc_helper_funcs = {
754 .atomic_check = rcar_du_crtc_atomic_check,
758 .atomic_begin = rcar_du_crtc_atomic_begin, 755 .atomic_begin = rcar_du_crtc_atomic_begin,
759 .atomic_flush = rcar_du_crtc_atomic_flush, 756 .atomic_flush = rcar_du_crtc_atomic_flush,
760 .atomic_enable = rcar_du_crtc_atomic_enable, 757 .atomic_enable = rcar_du_crtc_atomic_enable,
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.h b/drivers/gpu/drm/rcar-du/rcar_du_crtc.h
index 59ac6e7d22c9..bcb35b0b7612 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.h
@@ -14,7 +14,6 @@
14#include <linux/spinlock.h> 14#include <linux/spinlock.h>
15#include <linux/wait.h> 15#include <linux/wait.h>
16 16
17#include <drm/drmP.h>
18#include <drm/drm_crtc.h> 17#include <drm/drm_crtc.h>
19 18
20#include <media/vsp1.h> 19#include <media/vsp1.h>
@@ -37,7 +36,6 @@ struct rcar_du_vsp;
37 * @vblank_lock: protects vblank_wait and vblank_count 36 * @vblank_lock: protects vblank_wait and vblank_count
38 * @vblank_wait: wait queue used to signal vertical blanking 37 * @vblank_wait: wait queue used to signal vertical blanking
39 * @vblank_count: number of vertical blanking interrupts to wait for 38 * @vblank_count: number of vertical blanking interrupts to wait for
40 * @outputs: bitmask of the outputs (enum rcar_du_output) driven by this CRTC
41 * @group: CRTC group this CRTC belongs to 39 * @group: CRTC group this CRTC belongs to
42 * @vsp: VSP feeding video to this CRTC 40 * @vsp: VSP feeding video to this CRTC
43 * @vsp_pipe: index of the VSP pipeline feeding video to this CRTC 41 * @vsp_pipe: index of the VSP pipeline feeding video to this CRTC
@@ -61,8 +59,6 @@ struct rcar_du_crtc {
61 wait_queue_head_t vblank_wait; 59 wait_queue_head_t vblank_wait;
62 unsigned int vblank_count; 60 unsigned int vblank_count;
63 61
64 unsigned int outputs;
65
66 struct rcar_du_group *group; 62 struct rcar_du_group *group;
67 struct rcar_du_vsp *vsp; 63 struct rcar_du_vsp *vsp;
68 unsigned int vsp_pipe; 64 unsigned int vsp_pipe;
@@ -77,11 +73,13 @@ struct rcar_du_crtc {
77 * struct rcar_du_crtc_state - Driver-specific CRTC state 73 * struct rcar_du_crtc_state - Driver-specific CRTC state
78 * @state: base DRM CRTC state 74 * @state: base DRM CRTC state
79 * @crc: CRC computation configuration 75 * @crc: CRC computation configuration
76 * @outputs: bitmask of the outputs (enum rcar_du_output) driven by this CRTC
80 */ 77 */
81struct rcar_du_crtc_state { 78struct rcar_du_crtc_state {
82 struct drm_crtc_state state; 79 struct drm_crtc_state state;
83 80
84 struct vsp1_du_crc_config crc; 81 struct vsp1_du_crc_config crc;
82 unsigned int outputs;
85}; 83};
86 84
87#define to_rcar_crtc_state(s) container_of(s, struct rcar_du_crtc_state, state) 85#define to_rcar_crtc_state(s) container_of(s, struct rcar_du_crtc_state, state)
@@ -102,8 +100,6 @@ int rcar_du_crtc_create(struct rcar_du_group *rgrp, unsigned int swindex,
102void rcar_du_crtc_suspend(struct rcar_du_crtc *rcrtc); 100void rcar_du_crtc_suspend(struct rcar_du_crtc *rcrtc);
103void rcar_du_crtc_resume(struct rcar_du_crtc *rcrtc); 101void rcar_du_crtc_resume(struct rcar_du_crtc *rcrtc);
104 102
105void rcar_du_crtc_route_output(struct drm_crtc *crtc,
106 enum rcar_du_output output);
107void rcar_du_crtc_finish_page_flip(struct rcar_du_crtc *rcrtc); 103void rcar_du_crtc_finish_page_flip(struct rcar_du_crtc *rcrtc);
108 104
109void rcar_du_crtc_dsysr_clr_set(struct rcar_du_crtc *rcrtc, u32 clr, u32 set); 105void rcar_du_crtc_dsysr_clr_set(struct rcar_du_crtc *rcrtc, u32 clr, u32 set);
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
index f50a3b1864bb..d1f305694367 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
@@ -17,7 +17,6 @@
17#include <linux/slab.h> 17#include <linux/slab.h>
18#include <linux/wait.h> 18#include <linux/wait.h>
19 19
20#include <drm/drmP.h>
21#include <drm/drm_atomic_helper.h> 20#include <drm/drm_atomic_helper.h>
22#include <drm/drm_crtc_helper.h> 21#include <drm/drm_crtc_helper.h>
23#include <drm/drm_fb_cma_helper.h> 22#include <drm/drm_fb_cma_helper.h>
@@ -36,7 +35,6 @@
36static const struct rcar_du_device_info rzg1_du_r8a7743_info = { 35static const struct rcar_du_device_info rzg1_du_r8a7743_info = {
37 .gen = 2, 36 .gen = 2,
38 .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK 37 .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
39 | RCAR_DU_FEATURE_EXT_CTRL_REGS
40 | RCAR_DU_FEATURE_INTERLACED 38 | RCAR_DU_FEATURE_INTERLACED
41 | RCAR_DU_FEATURE_TVM_SYNC, 39 | RCAR_DU_FEATURE_TVM_SYNC,
42 .channels_mask = BIT(1) | BIT(0), 40 .channels_mask = BIT(1) | BIT(0),
@@ -59,7 +57,6 @@ static const struct rcar_du_device_info rzg1_du_r8a7743_info = {
59static const struct rcar_du_device_info rzg1_du_r8a7745_info = { 57static const struct rcar_du_device_info rzg1_du_r8a7745_info = {
60 .gen = 2, 58 .gen = 2,
61 .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK 59 .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
62 | RCAR_DU_FEATURE_EXT_CTRL_REGS
63 | RCAR_DU_FEATURE_INTERLACED 60 | RCAR_DU_FEATURE_INTERLACED
64 | RCAR_DU_FEATURE_TVM_SYNC, 61 | RCAR_DU_FEATURE_TVM_SYNC,
65 .channels_mask = BIT(1) | BIT(0), 62 .channels_mask = BIT(1) | BIT(0),
@@ -81,7 +78,6 @@ static const struct rcar_du_device_info rzg1_du_r8a7745_info = {
81static const struct rcar_du_device_info rzg1_du_r8a77470_info = { 78static const struct rcar_du_device_info rzg1_du_r8a77470_info = {
82 .gen = 2, 79 .gen = 2,
83 .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK 80 .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
84 | RCAR_DU_FEATURE_EXT_CTRL_REGS
85 | RCAR_DU_FEATURE_INTERLACED 81 | RCAR_DU_FEATURE_INTERLACED
86 | RCAR_DU_FEATURE_TVM_SYNC, 82 | RCAR_DU_FEATURE_TVM_SYNC,
87 .channels_mask = BIT(1) | BIT(0), 83 .channels_mask = BIT(1) | BIT(0),
@@ -105,8 +101,34 @@ static const struct rcar_du_device_info rzg1_du_r8a77470_info = {
105 }, 101 },
106}; 102};
107 103
104static const struct rcar_du_device_info rcar_du_r8a774c0_info = {
105 .gen = 3,
106 .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
107 | RCAR_DU_FEATURE_VSP1_SOURCE,
108 .channels_mask = BIT(1) | BIT(0),
109 .routes = {
110 /*
111 * R8A774C0 has one RGB output and two LVDS outputs
112 */
113 [RCAR_DU_OUTPUT_DPAD0] = {
114 .possible_crtcs = BIT(0) | BIT(1),
115 .port = 0,
116 },
117 [RCAR_DU_OUTPUT_LVDS0] = {
118 .possible_crtcs = BIT(0),
119 .port = 1,
120 },
121 [RCAR_DU_OUTPUT_LVDS1] = {
122 .possible_crtcs = BIT(1),
123 .port = 2,
124 },
125 },
126 .num_lvds = 2,
127 .lvds_clk_mask = BIT(1) | BIT(0),
128};
129
108static const struct rcar_du_device_info rcar_du_r8a7779_info = { 130static const struct rcar_du_device_info rcar_du_r8a7779_info = {
109 .gen = 2, 131 .gen = 1,
110 .features = RCAR_DU_FEATURE_INTERLACED 132 .features = RCAR_DU_FEATURE_INTERLACED
111 | RCAR_DU_FEATURE_TVM_SYNC, 133 | RCAR_DU_FEATURE_TVM_SYNC,
112 .channels_mask = BIT(1) | BIT(0), 134 .channels_mask = BIT(1) | BIT(0),
@@ -129,7 +151,6 @@ static const struct rcar_du_device_info rcar_du_r8a7779_info = {
129static const struct rcar_du_device_info rcar_du_r8a7790_info = { 151static const struct rcar_du_device_info rcar_du_r8a7790_info = {
130 .gen = 2, 152 .gen = 2,
131 .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK 153 .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
132 | RCAR_DU_FEATURE_EXT_CTRL_REGS
133 | RCAR_DU_FEATURE_INTERLACED 154 | RCAR_DU_FEATURE_INTERLACED
134 | RCAR_DU_FEATURE_TVM_SYNC, 155 | RCAR_DU_FEATURE_TVM_SYNC,
135 .quirks = RCAR_DU_QUIRK_ALIGN_128B, 156 .quirks = RCAR_DU_QUIRK_ALIGN_128B,
@@ -159,7 +180,6 @@ static const struct rcar_du_device_info rcar_du_r8a7790_info = {
159static const struct rcar_du_device_info rcar_du_r8a7791_info = { 180static const struct rcar_du_device_info rcar_du_r8a7791_info = {
160 .gen = 2, 181 .gen = 2,
161 .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK 182 .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
162 | RCAR_DU_FEATURE_EXT_CTRL_REGS
163 | RCAR_DU_FEATURE_INTERLACED 183 | RCAR_DU_FEATURE_INTERLACED
164 | RCAR_DU_FEATURE_TVM_SYNC, 184 | RCAR_DU_FEATURE_TVM_SYNC,
165 .channels_mask = BIT(1) | BIT(0), 185 .channels_mask = BIT(1) | BIT(0),
@@ -183,7 +203,6 @@ static const struct rcar_du_device_info rcar_du_r8a7791_info = {
183static const struct rcar_du_device_info rcar_du_r8a7792_info = { 203static const struct rcar_du_device_info rcar_du_r8a7792_info = {
184 .gen = 2, 204 .gen = 2,
185 .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK 205 .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
186 | RCAR_DU_FEATURE_EXT_CTRL_REGS
187 | RCAR_DU_FEATURE_INTERLACED 206 | RCAR_DU_FEATURE_INTERLACED
188 | RCAR_DU_FEATURE_TVM_SYNC, 207 | RCAR_DU_FEATURE_TVM_SYNC,
189 .channels_mask = BIT(1) | BIT(0), 208 .channels_mask = BIT(1) | BIT(0),
@@ -203,7 +222,6 @@ static const struct rcar_du_device_info rcar_du_r8a7792_info = {
203static const struct rcar_du_device_info rcar_du_r8a7794_info = { 222static const struct rcar_du_device_info rcar_du_r8a7794_info = {
204 .gen = 2, 223 .gen = 2,
205 .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK 224 .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
206 | RCAR_DU_FEATURE_EXT_CTRL_REGS
207 | RCAR_DU_FEATURE_INTERLACED 225 | RCAR_DU_FEATURE_INTERLACED
208 | RCAR_DU_FEATURE_TVM_SYNC, 226 | RCAR_DU_FEATURE_TVM_SYNC,
209 .channels_mask = BIT(1) | BIT(0), 227 .channels_mask = BIT(1) | BIT(0),
@@ -226,7 +244,6 @@ static const struct rcar_du_device_info rcar_du_r8a7794_info = {
226static const struct rcar_du_device_info rcar_du_r8a7795_info = { 244static const struct rcar_du_device_info rcar_du_r8a7795_info = {
227 .gen = 3, 245 .gen = 3,
228 .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK 246 .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
229 | RCAR_DU_FEATURE_EXT_CTRL_REGS
230 | RCAR_DU_FEATURE_VSP1_SOURCE 247 | RCAR_DU_FEATURE_VSP1_SOURCE
231 | RCAR_DU_FEATURE_INTERLACED 248 | RCAR_DU_FEATURE_INTERLACED
232 | RCAR_DU_FEATURE_TVM_SYNC, 249 | RCAR_DU_FEATURE_TVM_SYNC,
@@ -260,7 +277,6 @@ static const struct rcar_du_device_info rcar_du_r8a7795_info = {
260static const struct rcar_du_device_info rcar_du_r8a7796_info = { 277static const struct rcar_du_device_info rcar_du_r8a7796_info = {
261 .gen = 3, 278 .gen = 3,
262 .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK 279 .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
263 | RCAR_DU_FEATURE_EXT_CTRL_REGS
264 | RCAR_DU_FEATURE_VSP1_SOURCE 280 | RCAR_DU_FEATURE_VSP1_SOURCE
265 | RCAR_DU_FEATURE_INTERLACED 281 | RCAR_DU_FEATURE_INTERLACED
266 | RCAR_DU_FEATURE_TVM_SYNC, 282 | RCAR_DU_FEATURE_TVM_SYNC,
@@ -290,7 +306,6 @@ static const struct rcar_du_device_info rcar_du_r8a7796_info = {
290static const struct rcar_du_device_info rcar_du_r8a77965_info = { 306static const struct rcar_du_device_info rcar_du_r8a77965_info = {
291 .gen = 3, 307 .gen = 3,
292 .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK 308 .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
293 | RCAR_DU_FEATURE_EXT_CTRL_REGS
294 | RCAR_DU_FEATURE_VSP1_SOURCE 309 | RCAR_DU_FEATURE_VSP1_SOURCE
295 | RCAR_DU_FEATURE_INTERLACED 310 | RCAR_DU_FEATURE_INTERLACED
296 | RCAR_DU_FEATURE_TVM_SYNC, 311 | RCAR_DU_FEATURE_TVM_SYNC,
@@ -320,7 +335,6 @@ static const struct rcar_du_device_info rcar_du_r8a77965_info = {
320static const struct rcar_du_device_info rcar_du_r8a77970_info = { 335static const struct rcar_du_device_info rcar_du_r8a77970_info = {
321 .gen = 3, 336 .gen = 3,
322 .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK 337 .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
323 | RCAR_DU_FEATURE_EXT_CTRL_REGS
324 | RCAR_DU_FEATURE_VSP1_SOURCE 338 | RCAR_DU_FEATURE_VSP1_SOURCE
325 | RCAR_DU_FEATURE_INTERLACED 339 | RCAR_DU_FEATURE_INTERLACED
326 | RCAR_DU_FEATURE_TVM_SYNC, 340 | RCAR_DU_FEATURE_TVM_SYNC,
@@ -342,7 +356,6 @@ static const struct rcar_du_device_info rcar_du_r8a77970_info = {
342static const struct rcar_du_device_info rcar_du_r8a7799x_info = { 356static const struct rcar_du_device_info rcar_du_r8a7799x_info = {
343 .gen = 3, 357 .gen = 3,
344 .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK 358 .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
345 | RCAR_DU_FEATURE_EXT_CTRL_REGS
346 | RCAR_DU_FEATURE_VSP1_SOURCE, 359 | RCAR_DU_FEATURE_VSP1_SOURCE,
347 .channels_mask = BIT(1) | BIT(0), 360 .channels_mask = BIT(1) | BIT(0),
348 .routes = { 361 .routes = {
@@ -372,6 +385,7 @@ static const struct of_device_id rcar_du_of_table[] = {
372 { .compatible = "renesas,du-r8a7744", .data = &rzg1_du_r8a7743_info }, 385 { .compatible = "renesas,du-r8a7744", .data = &rzg1_du_r8a7743_info },
373 { .compatible = "renesas,du-r8a7745", .data = &rzg1_du_r8a7745_info }, 386 { .compatible = "renesas,du-r8a7745", .data = &rzg1_du_r8a7745_info },
374 { .compatible = "renesas,du-r8a77470", .data = &rzg1_du_r8a77470_info }, 387 { .compatible = "renesas,du-r8a77470", .data = &rzg1_du_r8a77470_info },
388 { .compatible = "renesas,du-r8a774c0", .data = &rcar_du_r8a774c0_info },
375 { .compatible = "renesas,du-r8a7779", .data = &rcar_du_r8a7779_info }, 389 { .compatible = "renesas,du-r8a7779", .data = &rcar_du_r8a7779_info },
376 { .compatible = "renesas,du-r8a7790", .data = &rcar_du_r8a7790_info }, 390 { .compatible = "renesas,du-r8a7790", .data = &rcar_du_r8a7790_info },
377 { .compatible = "renesas,du-r8a7791", .data = &rcar_du_r8a7791_info }, 391 { .compatible = "renesas,du-r8a7791", .data = &rcar_du_r8a7791_info },
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.h b/drivers/gpu/drm/rcar-du/rcar_du_drv.h
index a68da79b424e..6c187d0bf7c2 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.h
@@ -20,13 +20,13 @@
20struct clk; 20struct clk;
21struct device; 21struct device;
22struct drm_device; 22struct drm_device;
23struct drm_property;
23struct rcar_du_device; 24struct rcar_du_device;
24 25
25#define RCAR_DU_FEATURE_CRTC_IRQ_CLOCK BIT(0) /* Per-CRTC IRQ and clock */ 26#define RCAR_DU_FEATURE_CRTC_IRQ_CLOCK BIT(0) /* Per-CRTC IRQ and clock */
26#define RCAR_DU_FEATURE_EXT_CTRL_REGS BIT(1) /* Has extended control registers */ 27#define RCAR_DU_FEATURE_VSP1_SOURCE BIT(1) /* Has inputs from VSP1 */
27#define RCAR_DU_FEATURE_VSP1_SOURCE BIT(2) /* Has inputs from VSP1 */ 28#define RCAR_DU_FEATURE_INTERLACED BIT(2) /* HW supports interlaced */
28#define RCAR_DU_FEATURE_INTERLACED BIT(3) /* HW supports interlaced */ 29#define RCAR_DU_FEATURE_TVM_SYNC BIT(3) /* Has TV switch/sync modes */
29#define RCAR_DU_FEATURE_TVM_SYNC BIT(4) /* Has TV switch/sync modes */
30 30
31#define RCAR_DU_QUIRK_ALIGN_128B BIT(0) /* Align pitches to 128 bytes */ 31#define RCAR_DU_QUIRK_ALIGN_128B BIT(0) /* Align pitches to 128 bytes */
32 32
@@ -89,6 +89,7 @@ struct rcar_du_device {
89 } props; 89 } props;
90 90
91 unsigned int dpad0_source; 91 unsigned int dpad0_source;
92 unsigned int dpad1_source;
92 unsigned int vspd1_sink; 93 unsigned int vspd1_sink;
93}; 94};
94 95
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
index 1877764bd6d9..f16209499117 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
@@ -9,7 +9,6 @@
9 9
10#include <linux/export.h> 10#include <linux/export.h>
11 11
12#include <drm/drmP.h>
13#include <drm/drm_crtc.h> 12#include <drm/drm_crtc.h>
14#include <drm/drm_crtc_helper.h> 13#include <drm/drm_crtc_helper.h>
15#include <drm/drm_panel.h> 14#include <drm/drm_panel.h>
@@ -22,17 +21,7 @@
22 * Encoder 21 * Encoder
23 */ 22 */
24 23
25static void rcar_du_encoder_mode_set(struct drm_encoder *encoder,
26 struct drm_crtc_state *crtc_state,
27 struct drm_connector_state *conn_state)
28{
29 struct rcar_du_encoder *renc = to_rcar_encoder(encoder);
30
31 rcar_du_crtc_route_output(crtc_state->crtc, renc->output);
32}
33
34static const struct drm_encoder_helper_funcs encoder_helper_funcs = { 24static const struct drm_encoder_helper_funcs encoder_helper_funcs = {
35 .atomic_mode_set = rcar_du_encoder_mode_set,
36}; 25};
37 26
38static const struct drm_encoder_funcs encoder_funcs = { 27static const struct drm_encoder_funcs encoder_funcs = {
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_encoder.h b/drivers/gpu/drm/rcar-du/rcar_du_encoder.h
index ce3cbc85695e..552f2a02e5b5 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_encoder.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_encoder.h
@@ -10,10 +10,8 @@
10#ifndef __RCAR_DU_ENCODER_H__ 10#ifndef __RCAR_DU_ENCODER_H__
11#define __RCAR_DU_ENCODER_H__ 11#define __RCAR_DU_ENCODER_H__
12 12
13#include <drm/drm_crtc.h>
14#include <drm/drm_encoder.h> 13#include <drm/drm_encoder.h>
15 14
16struct drm_panel;
17struct rcar_du_device; 15struct rcar_du_device;
18 16
19struct rcar_du_encoder { 17struct rcar_du_encoder {
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_group.c b/drivers/gpu/drm/rcar-du/rcar_du_group.c
index cebf313c6e1f..9eee47969e77 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_group.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_group.c
@@ -147,7 +147,7 @@ static void rcar_du_group_setup(struct rcar_du_group *rgrp)
147 147
148 rcar_du_group_setup_pins(rgrp); 148 rcar_du_group_setup_pins(rgrp);
149 149
150 if (rcar_du_has(rgrp->dev, RCAR_DU_FEATURE_EXT_CTRL_REGS)) { 150 if (rcdu->info->gen >= 2) {
151 rcar_du_group_setup_defr8(rgrp); 151 rcar_du_group_setup_defr8(rgrp);
152 rcar_du_group_setup_didsr(rgrp); 152 rcar_du_group_setup_didsr(rgrp);
153 } 153 }
@@ -262,7 +262,7 @@ int rcar_du_set_dpad0_vsp1_routing(struct rcar_du_device *rcdu)
262 unsigned int index; 262 unsigned int index;
263 int ret; 263 int ret;
264 264
265 if (!rcar_du_has(rcdu, RCAR_DU_FEATURE_EXT_CTRL_REGS)) 265 if (rcdu->info->gen < 2)
266 return 0; 266 return 0;
267 267
268 /* 268 /*
@@ -287,9 +287,50 @@ int rcar_du_set_dpad0_vsp1_routing(struct rcar_du_device *rcdu)
287 return 0; 287 return 0;
288} 288}
289 289
290static void rcar_du_group_set_dpad_levels(struct rcar_du_group *rgrp)
291{
292 static const u32 doflr_values[2] = {
293 DOFLR_HSYCFL0 | DOFLR_VSYCFL0 | DOFLR_ODDFL0 |
294 DOFLR_DISPFL0 | DOFLR_CDEFL0 | DOFLR_RGBFL0,
295 DOFLR_HSYCFL1 | DOFLR_VSYCFL1 | DOFLR_ODDFL1 |
296 DOFLR_DISPFL1 | DOFLR_CDEFL1 | DOFLR_RGBFL1,
297 };
298 static const u32 dpad_mask = BIT(RCAR_DU_OUTPUT_DPAD1)
299 | BIT(RCAR_DU_OUTPUT_DPAD0);
300 struct rcar_du_device *rcdu = rgrp->dev;
301 u32 doflr = DOFLR_CODE;
302 unsigned int i;
303
304 if (rcdu->info->gen < 2)
305 return;
306
307 /*
308 * The DPAD outputs can't be controlled directly. However, the parallel
309 * output of the DU channels routed to DPAD can be set to fixed levels
310 * through the DOFLR group register. Use this to turn the DPAD on or off
311 * by driving fixed low-level signals at the output of any DU channel
312 * not routed to a DPAD output. This doesn't affect the DU output
313 * signals going to other outputs, such as the internal LVDS and HDMI
314 * encoders.
315 */
316
317 for (i = 0; i < rgrp->num_crtcs; ++i) {
318 struct rcar_du_crtc_state *rstate;
319 struct rcar_du_crtc *rcrtc;
320
321 rcrtc = &rcdu->crtcs[rgrp->index * 2 + i];
322 rstate = to_rcar_crtc_state(rcrtc->crtc.state);
323
324 if (!(rstate->outputs & dpad_mask))
325 doflr |= doflr_values[i];
326 }
327
328 rcar_du_group_write(rgrp, DOFLR, doflr);
329}
330
290int rcar_du_group_set_routing(struct rcar_du_group *rgrp) 331int rcar_du_group_set_routing(struct rcar_du_group *rgrp)
291{ 332{
292 struct rcar_du_crtc *crtc0 = &rgrp->dev->crtcs[rgrp->index * 2]; 333 struct rcar_du_device *rcdu = rgrp->dev;
293 u32 dorcr = rcar_du_group_read(rgrp, DORCR); 334 u32 dorcr = rcar_du_group_read(rgrp, DORCR);
294 335
295 dorcr &= ~(DORCR_PG2T | DORCR_DK2S | DORCR_PG2D_MASK); 336 dorcr &= ~(DORCR_PG2T | DORCR_DK2S | DORCR_PG2D_MASK);
@@ -299,12 +340,14 @@ int rcar_du_group_set_routing(struct rcar_du_group *rgrp)
299 * CRTC 1 in all other cases to avoid cloning CRTC 0 to DPAD0 and DPAD1 340 * CRTC 1 in all other cases to avoid cloning CRTC 0 to DPAD0 and DPAD1
300 * by default. 341 * by default.
301 */ 342 */
302 if (crtc0->outputs & BIT(RCAR_DU_OUTPUT_DPAD1)) 343 if (rcdu->dpad1_source == rgrp->index * 2)
303 dorcr |= DORCR_PG2D_DS1; 344 dorcr |= DORCR_PG2D_DS1;
304 else 345 else
305 dorcr |= DORCR_PG2T | DORCR_DK2S | DORCR_PG2D_DS2; 346 dorcr |= DORCR_PG2T | DORCR_DK2S | DORCR_PG2D_DS2;
306 347
307 rcar_du_group_write(rgrp, DORCR, dorcr); 348 rcar_du_group_write(rgrp, DORCR, dorcr);
308 349
350 rcar_du_group_set_dpad_levels(rgrp);
351
309 return rcar_du_set_dpad0_vsp1_routing(rgrp->dev); 352 return rcar_du_set_dpad0_vsp1_routing(rgrp->dev);
310} 353}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
index 9c7007d45408..e4b248e368d6 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
@@ -7,7 +7,6 @@
7 * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com) 7 * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
8 */ 8 */
9 9
10#include <drm/drmP.h>
11#include <drm/drm_atomic.h> 10#include <drm/drm_atomic.h>
12#include <drm/drm_atomic_helper.h> 11#include <drm/drm_atomic_helper.h>
13#include <drm/drm_crtc.h> 12#include <drm/drm_crtc.h>
@@ -278,6 +277,28 @@ static int rcar_du_atomic_check(struct drm_device *dev,
278static void rcar_du_atomic_commit_tail(struct drm_atomic_state *old_state) 277static void rcar_du_atomic_commit_tail(struct drm_atomic_state *old_state)
279{ 278{
280 struct drm_device *dev = old_state->dev; 279 struct drm_device *dev = old_state->dev;
280 struct rcar_du_device *rcdu = dev->dev_private;
281 struct drm_crtc_state *crtc_state;
282 struct drm_crtc *crtc;
283 unsigned int i;
284
285 /*
286 * Store RGB routing to DPAD0 and DPAD1, the hardware will be configured
287 * when starting the CRTCs.
288 */
289 rcdu->dpad1_source = -1;
290
291 for_each_new_crtc_in_state(old_state, crtc, crtc_state, i) {
292 struct rcar_du_crtc_state *rcrtc_state =
293 to_rcar_crtc_state(crtc_state);
294 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
295
296 if (rcrtc_state->outputs & BIT(RCAR_DU_OUTPUT_DPAD0))
297 rcdu->dpad0_source = rcrtc->index;
298
299 if (rcrtc_state->outputs & BIT(RCAR_DU_OUTPUT_DPAD1))
300 rcdu->dpad1_source = rcrtc->index;
301 }
281 302
282 /* Apply the atomic update. */ 303 /* Apply the atomic update. */
283 drm_atomic_helper_commit_modeset_disables(dev, old_state); 304 drm_atomic_helper_commit_modeset_disables(dev, old_state);
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_of_lvds_r8a7790.dts b/drivers/gpu/drm/rcar-du/rcar_du_of_lvds_r8a7790.dts
index 579753e04f3b..8bee4e787a0a 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_of_lvds_r8a7790.dts
+++ b/drivers/gpu/drm/rcar-du/rcar_du_of_lvds_r8a7790.dts
@@ -7,70 +7,63 @@
7 7
8/dts-v1/; 8/dts-v1/;
9/plugin/; 9/plugin/;
10/ {
11 fragment@0 {
12 target-path = "/";
13 __overlay__ {
14 #address-cells = <2>;
15 #size-cells = <2>;
16 10
17 lvds@feb90000 { 11&{/} {
18 compatible = "renesas,r8a7790-lvds"; 12 #address-cells = <2>;
19 reg = <0 0xfeb90000 0 0x1c>; 13 #size-cells = <2>;
20 14
21 ports { 15 lvds@feb90000 {
22 #address-cells = <1>; 16 compatible = "renesas,r8a7790-lvds";
23 #size-cells = <0>; 17 reg = <0 0xfeb90000 0 0x1c>;
24 18
25 port@0 { 19 ports {
26 reg = <0>; 20 #address-cells = <1>;
27 lvds0_input: endpoint { 21 #size-cells = <0>;
28 }; 22
29 }; 23 port@0 {
30 port@1 { 24 reg = <0>;
31 reg = <1>; 25 lvds0_input: endpoint {
32 lvds0_out: endpoint {
33 };
34 };
35 }; 26 };
36 }; 27 };
37 28 port@1 {
38 lvds@feb94000 { 29 reg = <1>;
39 compatible = "renesas,r8a7790-lvds"; 30 lvds0_out: endpoint {
40 reg = <0 0xfeb94000 0 0x1c>;
41
42 ports {
43 #address-cells = <1>;
44 #size-cells = <0>;
45
46 port@0 {
47 reg = <0>;
48 lvds1_input: endpoint {
49 };
50 };
51 port@1 {
52 reg = <1>;
53 lvds1_out: endpoint {
54 };
55 };
56 }; 31 };
57 }; 32 };
58 }; 33 };
59 }; 34 };
60 35
61 fragment@1 { 36 lvds@feb94000 {
62 target-path = "/display@feb00000/ports"; 37 compatible = "renesas,r8a7790-lvds";
63 __overlay__ { 38 reg = <0 0xfeb94000 0 0x1c>;
64 port@1 { 39
65 endpoint { 40 ports {
66 remote-endpoint = <&lvds0_input>; 41 #address-cells = <1>;
42 #size-cells = <0>;
43
44 port@0 {
45 reg = <0>;
46 lvds1_input: endpoint {
67 }; 47 };
68 }; 48 };
69 port@2 { 49 port@1 {
70 endpoint { 50 reg = <1>;
71 remote-endpoint = <&lvds1_input>; 51 lvds1_out: endpoint {
72 }; 52 };
73 }; 53 };
74 }; 54 };
75 }; 55 };
76}; 56};
57
58&{/display@feb00000/ports} {
59 port@1 {
60 endpoint {
61 remote-endpoint = <&lvds0_input>;
62 };
63 };
64 port@2 {
65 endpoint {
66 remote-endpoint = <&lvds1_input>;
67 };
68 };
69};
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_of_lvds_r8a7791.dts b/drivers/gpu/drm/rcar-du/rcar_du_of_lvds_r8a7791.dts
index cb9da1f3942b..92c0509971ec 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_of_lvds_r8a7791.dts
+++ b/drivers/gpu/drm/rcar-du/rcar_du_of_lvds_r8a7791.dts
@@ -7,44 +7,37 @@
7 7
8/dts-v1/; 8/dts-v1/;
9/plugin/; 9/plugin/;
10/ {
11 fragment@0 {
12 target-path = "/";
13 __overlay__ {
14 #address-cells = <2>;
15 #size-cells = <2>;
16 10
17 lvds@feb90000 { 11&{/} {
18 compatible = "renesas,r8a7791-lvds"; 12 #address-cells = <2>;
19 reg = <0 0xfeb90000 0 0x1c>; 13 #size-cells = <2>;
20 14
21 ports { 15 lvds@feb90000 {
22 #address-cells = <1>; 16 compatible = "renesas,r8a7791-lvds";
23 #size-cells = <0>; 17 reg = <0 0xfeb90000 0 0x1c>;
24 18
25 port@0 { 19 ports {
26 reg = <0>; 20 #address-cells = <1>;
27 lvds0_input: endpoint { 21 #size-cells = <0>;
28 }; 22
29 }; 23 port@0 {
30 port@1 { 24 reg = <0>;
31 reg = <1>; 25 lvds0_input: endpoint {
32 lvds0_out: endpoint {
33 };
34 };
35 }; 26 };
36 }; 27 };
37 };
38 };
39
40 fragment@1 {
41 target-path = "/display@feb00000/ports";
42 __overlay__ {
43 port@1 { 28 port@1 {
44 endpoint { 29 reg = <1>;
45 remote-endpoint = <&lvds0_input>; 30 lvds0_out: endpoint {
46 }; 31 };
47 }; 32 };
48 }; 33 };
49 }; 34 };
50}; 35};
36
37&{/display@feb00000/ports} {
38 port@1 {
39 endpoint {
40 remote-endpoint = <&lvds0_input>;
41 };
42 };
43};
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_of_lvds_r8a7793.dts b/drivers/gpu/drm/rcar-du/rcar_du_of_lvds_r8a7793.dts
index e7b8804dc3c1..c8b93f21de0f 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_of_lvds_r8a7793.dts
+++ b/drivers/gpu/drm/rcar-du/rcar_du_of_lvds_r8a7793.dts
@@ -7,44 +7,37 @@
7 7
8/dts-v1/; 8/dts-v1/;
9/plugin/; 9/plugin/;
10/ {
11 fragment@0 {
12 target-path = "/";
13 __overlay__ {
14 #address-cells = <2>;
15 #size-cells = <2>;
16 10
17 lvds@feb90000 { 11&{/} {
18 compatible = "renesas,r8a7793-lvds"; 12 #address-cells = <2>;
19 reg = <0 0xfeb90000 0 0x1c>; 13 #size-cells = <2>;
20 14
21 ports { 15 lvds@feb90000 {
22 #address-cells = <1>; 16 compatible = "renesas,r8a7793-lvds";
23 #size-cells = <0>; 17 reg = <0 0xfeb90000 0 0x1c>;
24 18
25 port@0 { 19 ports {
26 reg = <0>; 20 #address-cells = <1>;
27 lvds0_input: endpoint { 21 #size-cells = <0>;
28 }; 22
29 }; 23 port@0 {
30 port@1 { 24 reg = <0>;
31 reg = <1>; 25 lvds0_input: endpoint {
32 lvds0_out: endpoint {
33 };
34 };
35 }; 26 };
36 }; 27 };
37 };
38 };
39
40 fragment@1 {
41 target-path = "/display@feb00000/ports";
42 __overlay__ {
43 port@1 { 28 port@1 {
44 endpoint { 29 reg = <1>;
45 remote-endpoint = <&lvds0_input>; 30 lvds0_out: endpoint {
46 }; 31 };
47 }; 32 };
48 }; 33 };
49 }; 34 };
50}; 35};
36
37&{/display@feb00000/ports} {
38 port@1 {
39 endpoint {
40 remote-endpoint = <&lvds0_input>;
41 };
42 };
43};
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_of_lvds_r8a7795.dts b/drivers/gpu/drm/rcar-du/rcar_du_of_lvds_r8a7795.dts
index a1327443e6fa..16c2d03cb016 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_of_lvds_r8a7795.dts
+++ b/drivers/gpu/drm/rcar-du/rcar_du_of_lvds_r8a7795.dts
@@ -7,44 +7,37 @@
7 7
8/dts-v1/; 8/dts-v1/;
9/plugin/; 9/plugin/;
10/ {
11 fragment@0 {
12 target-path = "/soc";
13 __overlay__ {
14 #address-cells = <2>;
15 #size-cells = <2>;
16 10
17 lvds@feb90000 { 11&{/soc} {
18 compatible = "renesas,r8a7795-lvds"; 12 #address-cells = <2>;
19 reg = <0 0xfeb90000 0 0x14>; 13 #size-cells = <2>;
20 14
21 ports { 15 lvds@feb90000 {
22 #address-cells = <1>; 16 compatible = "renesas,r8a7795-lvds";
23 #size-cells = <0>; 17 reg = <0 0xfeb90000 0 0x14>;
24 18
25 port@0 { 19 ports {
26 reg = <0>; 20 #address-cells = <1>;
27 lvds0_input: endpoint { 21 #size-cells = <0>;
28 }; 22
29 }; 23 port@0 {
30 port@1 { 24 reg = <0>;
31 reg = <1>; 25 lvds0_input: endpoint {
32 lvds0_out: endpoint { 26 };
33 }; 27 };
34 }; 28 port@1 {
29 reg = <1>;
30 lvds0_out: endpoint {
35 }; 31 };
36 }; 32 };
37 }; 33 };
38 }; 34 };
35};
39 36
40 fragment@1 { 37&{/soc/display@feb00000/ports} {
41 target-path = "/soc/display@feb00000/ports"; 38 port@3 {
42 __overlay__ { 39 endpoint {
43 port@3 { 40 remote-endpoint = <&lvds0_input>;
44 endpoint {
45 remote-endpoint = <&lvds0_input>;
46 };
47 };
48 }; 41 };
49 }; 42 };
50}; 43};
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_of_lvds_r8a7796.dts b/drivers/gpu/drm/rcar-du/rcar_du_of_lvds_r8a7796.dts
index b23d6466c415..680e923ac036 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_of_lvds_r8a7796.dts
+++ b/drivers/gpu/drm/rcar-du/rcar_du_of_lvds_r8a7796.dts
@@ -7,44 +7,37 @@
7 7
8/dts-v1/; 8/dts-v1/;
9/plugin/; 9/plugin/;
10/ {
11 fragment@0 {
12 target-path = "/soc";
13 __overlay__ {
14 #address-cells = <2>;
15 #size-cells = <2>;
16 10
17 lvds@feb90000 { 11&{/soc} {
18 compatible = "renesas,r8a7796-lvds"; 12 #address-cells = <2>;
19 reg = <0 0xfeb90000 0 0x14>; 13 #size-cells = <2>;
20 14
21 ports { 15 lvds@feb90000 {
22 #address-cells = <1>; 16 compatible = "renesas,r8a7796-lvds";
23 #size-cells = <0>; 17 reg = <0 0xfeb90000 0 0x14>;
24 18
25 port@0 { 19 ports {
26 reg = <0>; 20 #address-cells = <1>;
27 lvds0_input: endpoint { 21 #size-cells = <0>;
28 }; 22
29 }; 23 port@0 {
30 port@1 { 24 reg = <0>;
31 reg = <1>; 25 lvds0_input: endpoint {
32 lvds0_out: endpoint { 26 };
33 }; 27 };
34 }; 28 port@1 {
29 reg = <1>;
30 lvds0_out: endpoint {
35 }; 31 };
36 }; 32 };
37 }; 33 };
38 }; 34 };
35};
39 36
40 fragment@1 { 37&{/soc/display@feb00000/ports} {
41 target-path = "/soc/display@feb00000/ports"; 38 port@3 {
42 __overlay__ { 39 endpoint {
43 port@3 { 40 remote-endpoint = <&lvds0_input>;
44 endpoint {
45 remote-endpoint = <&lvds0_input>;
46 };
47 };
48 }; 41 };
49 }; 42 };
50}; 43};
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_plane.c b/drivers/gpu/drm/rcar-du/rcar_du_plane.c
index 39d5ae3fdf72..fa6b9aabc832 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_plane.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_plane.c
@@ -7,7 +7,6 @@
7 * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com) 7 * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
8 */ 8 */
9 9
10#include <drm/drmP.h>
11#include <drm/drm_atomic.h> 10#include <drm/drm_atomic.h>
12#include <drm/drm_atomic_helper.h> 11#include <drm/drm_atomic_helper.h>
13#include <drm/drm_crtc.h> 12#include <drm/drm_crtc.h>
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_plane.h b/drivers/gpu/drm/rcar-du/rcar_du_plane.h
index 2f223a4c1d33..81bbf207ad0e 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_plane.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_plane.h
@@ -10,8 +10,7 @@
10#ifndef __RCAR_DU_PLANE_H__ 10#ifndef __RCAR_DU_PLANE_H__
11#define __RCAR_DU_PLANE_H__ 11#define __RCAR_DU_PLANE_H__
12 12
13#include <drm/drmP.h> 13#include <drm/drm_plane.h>
14#include <drm/drm_crtc.h>
15 14
16struct rcar_du_format_info; 15struct rcar_du_format_info;
17struct rcar_du_group; 16struct rcar_du_group;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
index 4576119e7777..dec314a687e0 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
@@ -7,7 +7,6 @@
7 * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com) 7 * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
8 */ 8 */
9 9
10#include <drm/drmP.h>
11#include <drm/drm_atomic_helper.h> 10#include <drm/drm_atomic_helper.h>
12#include <drm/drm_crtc.h> 11#include <drm/drm_crtc.h>
13#include <drm/drm_crtc_helper.h> 12#include <drm/drm_crtc_helper.h>
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vsp.h b/drivers/gpu/drm/rcar-du/rcar_du_vsp.h
index e8c14dc5cb93..db232037f24a 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_vsp.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_vsp.h
@@ -10,8 +10,7 @@
10#ifndef __RCAR_DU_VSP_H__ 10#ifndef __RCAR_DU_VSP_H__
11#define __RCAR_DU_VSP_H__ 11#define __RCAR_DU_VSP_H__
12 12
13#include <drm/drmP.h> 13#include <drm/drm_plane.h>
14#include <drm/drm_crtc.h>
15 14
16struct rcar_du_format_info; 15struct rcar_du_format_info;
17struct rcar_du_vsp; 16struct rcar_du_vsp;
diff --git a/drivers/gpu/drm/rcar-du/rcar_dw_hdmi.c b/drivers/gpu/drm/rcar-du/rcar_dw_hdmi.c
index 790d499daa10..452461dc96f2 100644
--- a/drivers/gpu/drm/rcar-du/rcar_dw_hdmi.c
+++ b/drivers/gpu/drm/rcar-du/rcar_dw_hdmi.c
@@ -12,6 +12,7 @@
12#include <linux/platform_device.h> 12#include <linux/platform_device.h>
13 13
14#include <drm/bridge/dw_hdmi.h> 14#include <drm/bridge/dw_hdmi.h>
15#include <drm/drm_modes.h>
15 16
16#define RCAR_HDMI_PHY_OPMODE_PLLCFG 0x06 /* Mode of operation and PLL dividers */ 17#define RCAR_HDMI_PHY_OPMODE_PLLCFG 0x06 /* Mode of operation and PLL dividers */
17#define RCAR_HDMI_PHY_PLLCURRGMPCTRL 0x10 /* PLL current and Gmp (conductance) */ 18#define RCAR_HDMI_PHY_PLLCURRGMPCTRL 0x10 /* PLL current and Gmp (conductance) */
@@ -36,6 +37,20 @@ static const struct rcar_hdmi_phy_params rcar_hdmi_phy_params[] = {
36 { ~0UL, 0x0000, 0x0000, 0x0000 }, 37 { ~0UL, 0x0000, 0x0000, 0x0000 },
37}; 38};
38 39
40static enum drm_mode_status
41rcar_hdmi_mode_valid(struct drm_connector *connector,
42 const struct drm_display_mode *mode)
43{
44 /*
45 * The maximum supported clock frequency is 297 MHz, as shown in the PHY
46 * parameters table.
47 */
48 if (mode->clock > 297000)
49 return MODE_CLOCK_HIGH;
50
51 return MODE_OK;
52}
53
39static int rcar_hdmi_phy_configure(struct dw_hdmi *hdmi, 54static int rcar_hdmi_phy_configure(struct dw_hdmi *hdmi,
40 const struct dw_hdmi_plat_data *pdata, 55 const struct dw_hdmi_plat_data *pdata,
41 unsigned long mpixelclock) 56 unsigned long mpixelclock)
@@ -60,6 +75,7 @@ static int rcar_hdmi_phy_configure(struct dw_hdmi *hdmi,
60} 75}
61 76
62static const struct dw_hdmi_plat_data rcar_dw_hdmi_plat_data = { 77static const struct dw_hdmi_plat_data rcar_dw_hdmi_plat_data = {
78 .mode_valid = rcar_hdmi_mode_valid,
63 .configure_phy = rcar_hdmi_phy_configure, 79 .configure_phy = rcar_hdmi_phy_configure,
64}; 80};
65 81
diff --git a/drivers/gpu/drm/rcar-du/rcar_lvds.c b/drivers/gpu/drm/rcar-du/rcar_lvds.c
index 534a128a869d..96d749a35b25 100644
--- a/drivers/gpu/drm/rcar-du/rcar_lvds.c
+++ b/drivers/gpu/drm/rcar-du/rcar_lvds.c
@@ -520,8 +520,8 @@ static void rcar_lvds_get_lvds_mode(struct rcar_lvds *lvds)
520} 520}
521 521
522static void rcar_lvds_mode_set(struct drm_bridge *bridge, 522static void rcar_lvds_mode_set(struct drm_bridge *bridge,
523 struct drm_display_mode *mode, 523 const struct drm_display_mode *mode,
524 struct drm_display_mode *adjusted_mode) 524 const struct drm_display_mode *adjusted_mode)
525{ 525{
526 struct rcar_lvds *lvds = bridge_to_rcar_lvds(bridge); 526 struct rcar_lvds *lvds = bridge_to_rcar_lvds(bridge);
527 527
@@ -785,6 +785,7 @@ static const struct rcar_lvds_device_info rcar_lvds_r8a77995_info = {
785 785
786static const struct of_device_id rcar_lvds_of_table[] = { 786static const struct of_device_id rcar_lvds_of_table[] = {
787 { .compatible = "renesas,r8a7743-lvds", .data = &rcar_lvds_gen2_info }, 787 { .compatible = "renesas,r8a7743-lvds", .data = &rcar_lvds_gen2_info },
788 { .compatible = "renesas,r8a774c0-lvds", .data = &rcar_lvds_r8a77990_info },
788 { .compatible = "renesas,r8a7790-lvds", .data = &rcar_lvds_r8a7790_info }, 789 { .compatible = "renesas,r8a7790-lvds", .data = &rcar_lvds_r8a7790_info },
789 { .compatible = "renesas,r8a7791-lvds", .data = &rcar_lvds_gen2_info }, 790 { .compatible = "renesas,r8a7791-lvds", .data = &rcar_lvds_gen2_info },
790 { .compatible = "renesas,r8a7793-lvds", .data = &rcar_lvds_gen2_info }, 791 { .compatible = "renesas,r8a7793-lvds", .data = &rcar_lvds_gen2_info },
diff --git a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
index 7ee359bcee62..ef8486e5e2cd 100644
--- a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
+++ b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
@@ -467,7 +467,7 @@ static int dw_mipi_dsi_phy_init(void *priv_data)
467} 467}
468 468
469static int 469static int
470dw_mipi_dsi_get_lane_mbps(void *priv_data, struct drm_display_mode *mode, 470dw_mipi_dsi_get_lane_mbps(void *priv_data, const struct drm_display_mode *mode,
471 unsigned long mode_flags, u32 lanes, u32 format, 471 unsigned long mode_flags, u32 lanes, u32 format,
472 unsigned int *lane_mbps) 472 unsigned int *lane_mbps)
473{ 473{
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_drv.c b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
index 8554102a6ead..f2cfd1698b78 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_drv.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
@@ -229,8 +229,8 @@ static int shmob_drm_probe(struct platform_device *pdev)
229 229
230 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 230 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
231 sdev->mmio = devm_ioremap_resource(&pdev->dev, res); 231 sdev->mmio = devm_ioremap_resource(&pdev->dev, res);
232 if (sdev->mmio == NULL) 232 if (IS_ERR(sdev->mmio))
233 return -ENOMEM; 233 return PTR_ERR(sdev->mmio);
234 234
235 ret = shmob_drm_setup_clocks(sdev, pdata->clk_source); 235 ret = shmob_drm_setup_clocks(sdev, pdata->clk_source);
236 if (ret < 0) 236 if (ret < 0)
diff --git a/drivers/gpu/drm/sti/sti_dvo.c b/drivers/gpu/drm/sti/sti_dvo.c
index b08376b7611b..6d33772f77eb 100644
--- a/drivers/gpu/drm/sti/sti_dvo.c
+++ b/drivers/gpu/drm/sti/sti_dvo.c
@@ -277,8 +277,8 @@ static void sti_dvo_pre_enable(struct drm_bridge *bridge)
277} 277}
278 278
279static void sti_dvo_set_mode(struct drm_bridge *bridge, 279static void sti_dvo_set_mode(struct drm_bridge *bridge,
280 struct drm_display_mode *mode, 280 const struct drm_display_mode *mode,
281 struct drm_display_mode *adjusted_mode) 281 const struct drm_display_mode *adjusted_mode)
282{ 282{
283 struct sti_dvo *dvo = bridge->driver_private; 283 struct sti_dvo *dvo = bridge->driver_private;
284 struct sti_mixer *mixer = to_sti_mixer(dvo->encoder->crtc); 284 struct sti_mixer *mixer = to_sti_mixer(dvo->encoder->crtc);
diff --git a/drivers/gpu/drm/sti/sti_hda.c b/drivers/gpu/drm/sti/sti_hda.c
index 19b9b5ed1297..a63dd5eb7081 100644
--- a/drivers/gpu/drm/sti/sti_hda.c
+++ b/drivers/gpu/drm/sti/sti_hda.c
@@ -508,8 +508,8 @@ static void sti_hda_pre_enable(struct drm_bridge *bridge)
508} 508}
509 509
510static void sti_hda_set_mode(struct drm_bridge *bridge, 510static void sti_hda_set_mode(struct drm_bridge *bridge,
511 struct drm_display_mode *mode, 511 const struct drm_display_mode *mode,
512 struct drm_display_mode *adjusted_mode) 512 const struct drm_display_mode *adjusted_mode)
513{ 513{
514 struct sti_hda *hda = bridge->driver_private; 514 struct sti_hda *hda = bridge->driver_private;
515 u32 mode_idx; 515 u32 mode_idx;
diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c
index 4b86878f8ddf..458fcb5a93f2 100644
--- a/drivers/gpu/drm/sti/sti_hdmi.c
+++ b/drivers/gpu/drm/sti/sti_hdmi.c
@@ -918,8 +918,8 @@ static void sti_hdmi_pre_enable(struct drm_bridge *bridge)
918} 918}
919 919
920static void sti_hdmi_set_mode(struct drm_bridge *bridge, 920static void sti_hdmi_set_mode(struct drm_bridge *bridge,
921 struct drm_display_mode *mode, 921 const struct drm_display_mode *mode,
922 struct drm_display_mode *adjusted_mode) 922 const struct drm_display_mode *adjusted_mode)
923{ 923{
924 struct sti_hdmi *hdmi = bridge->driver_private; 924 struct sti_hdmi *hdmi = bridge->driver_private;
925 int ret; 925 int ret;
diff --git a/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c b/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c
index a514b593f37c..a672b59a2226 100644
--- a/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c
+++ b/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c
@@ -215,7 +215,7 @@ static int dw_mipi_dsi_phy_init(void *priv_data)
215} 215}
216 216
217static int 217static int
218dw_mipi_dsi_get_lane_mbps(void *priv_data, struct drm_display_mode *mode, 218dw_mipi_dsi_get_lane_mbps(void *priv_data, const struct drm_display_mode *mode,
219 unsigned long mode_flags, u32 lanes, u32 format, 219 unsigned long mode_flags, u32 lanes, u32 format,
220 unsigned int *lane_mbps) 220 unsigned int *lane_mbps)
221{ 221{
diff --git a/include/drm/bridge/dw_mipi_dsi.h b/include/drm/bridge/dw_mipi_dsi.h
index 48a671e782ca..7d3dd69a5caa 100644
--- a/include/drm/bridge/dw_mipi_dsi.h
+++ b/include/drm/bridge/dw_mipi_dsi.h
@@ -14,7 +14,8 @@ struct dw_mipi_dsi;
14 14
15struct dw_mipi_dsi_phy_ops { 15struct dw_mipi_dsi_phy_ops {
16 int (*init)(void *priv_data); 16 int (*init)(void *priv_data);
17 int (*get_lane_mbps)(void *priv_data, struct drm_display_mode *mode, 17 int (*get_lane_mbps)(void *priv_data,
18 const struct drm_display_mode *mode,
18 unsigned long mode_flags, u32 lanes, u32 format, 19 unsigned long mode_flags, u32 lanes, u32 format,
19 unsigned int *lane_mbps); 20 unsigned int *lane_mbps);
20}; 21};
diff --git a/include/drm/drm_bridge.h b/include/drm/drm_bridge.h
index bd850747ce54..9da8c93f7976 100644
--- a/include/drm/drm_bridge.h
+++ b/include/drm/drm_bridge.h
@@ -196,8 +196,8 @@ struct drm_bridge_funcs {
196 * the DRM framework will have to be extended with DRM bridge states. 196 * the DRM framework will have to be extended with DRM bridge states.
197 */ 197 */
198 void (*mode_set)(struct drm_bridge *bridge, 198 void (*mode_set)(struct drm_bridge *bridge,
199 struct drm_display_mode *mode, 199 const struct drm_display_mode *mode,
200 struct drm_display_mode *adjusted_mode); 200 const struct drm_display_mode *adjusted_mode);
201 /** 201 /**
202 * @pre_enable: 202 * @pre_enable:
203 * 203 *
@@ -310,8 +310,8 @@ enum drm_mode_status drm_bridge_mode_valid(struct drm_bridge *bridge,
310void drm_bridge_disable(struct drm_bridge *bridge); 310void drm_bridge_disable(struct drm_bridge *bridge);
311void drm_bridge_post_disable(struct drm_bridge *bridge); 311void drm_bridge_post_disable(struct drm_bridge *bridge);
312void drm_bridge_mode_set(struct drm_bridge *bridge, 312void drm_bridge_mode_set(struct drm_bridge *bridge,
313 struct drm_display_mode *mode, 313 const struct drm_display_mode *mode,
314 struct drm_display_mode *adjusted_mode); 314 const struct drm_display_mode *adjusted_mode);
315void drm_bridge_pre_enable(struct drm_bridge *bridge); 315void drm_bridge_pre_enable(struct drm_bridge *bridge);
316void drm_bridge_enable(struct drm_bridge *bridge); 316void drm_bridge_enable(struct drm_bridge *bridge);
317 317
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index 5736c942c85b..c223c87ef119 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -314,6 +314,10 @@
314# define DP_PSR_SETUP_TIME_SHIFT 1 314# define DP_PSR_SETUP_TIME_SHIFT 1
315# define DP_PSR2_SU_Y_COORDINATE_REQUIRED (1 << 4) /* eDP 1.4a */ 315# define DP_PSR2_SU_Y_COORDINATE_REQUIRED (1 << 4) /* eDP 1.4a */
316# define DP_PSR2_SU_GRANULARITY_REQUIRED (1 << 5) /* eDP 1.4b */ 316# define DP_PSR2_SU_GRANULARITY_REQUIRED (1 << 5) /* eDP 1.4b */
317
318#define DP_PSR2_SU_X_GRANULARITY 0x072 /* eDP 1.4b */
319#define DP_PSR2_SU_Y_GRANULARITY 0x074 /* eDP 1.4b */
320
317/* 321/*
318 * 0x80-0x8f describe downstream port capabilities, but there are two layouts 322 * 0x80-0x8f describe downstream port capabilities, but there are two layouts
319 * based on whether DP_DETAILED_CAP_INFO_AVAILABLE was set. If it was not, 323 * based on whether DP_DETAILED_CAP_INFO_AVAILABLE was set. If it was not,
@@ -1365,6 +1369,13 @@ enum drm_dp_quirk {
1365 * to 16 bits. So will give a constant value (0x8000) for compatability. 1369 * to 16 bits. So will give a constant value (0x8000) for compatability.
1366 */ 1370 */
1367 DP_DPCD_QUIRK_CONSTANT_N, 1371 DP_DPCD_QUIRK_CONSTANT_N,
1372 /**
1373 * @DP_DPCD_QUIRK_NO_PSR:
1374 *
1375 * The device does not support PSR even if reports that it supports or
1376 * driver still need to implement proper handling for such device.
1377 */
1378 DP_DPCD_QUIRK_NO_PSR,
1368}; 1379};
1369 1380
1370/** 1381/**
diff --git a/include/linux/mfd/intel_soc_pmic.h b/include/linux/mfd/intel_soc_pmic.h
index ed1dfba5e5f9..bfecd6bd4990 100644
--- a/include/linux/mfd/intel_soc_pmic.h
+++ b/include/linux/mfd/intel_soc_pmic.h
@@ -26,4 +26,7 @@ struct intel_soc_pmic {
26 struct device *dev; 26 struct device *dev;
27}; 27};
28 28
29int intel_soc_pmic_exec_mipi_pmic_seq_element(u16 i2c_address, u32 reg_address,
30 u32 value, u32 mask);
31
29#endif /* __INTEL_SOC_PMIC_H__ */ 32#endif /* __INTEL_SOC_PMIC_H__ */