aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/vm/unevictable-lru.rst6
-rw-r--r--drivers/gpu/drm/drm_atomic.c5
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c4
-rw-r--r--drivers/gpu/drm/drm_atomic_uapi.c4
-rw-r--r--drivers/gpu/drm/drm_connector.c41
-rw-r--r--drivers/gpu/drm/drm_dp_helper.c90
-rw-r--r--drivers/gpu/drm/i915/Makefile6
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c28
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c235
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c98
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h240
-rw-r--r--drivers/gpu/drm/i915/i915_fixed.h143
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c67
-rw-r--r--drivers/gpu/drm/i915/i915_gem.h4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c13
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.h1
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c7
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c249
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.h11
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c29
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.h8
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c83
-rw-r--r--drivers/gpu/drm/i915/i915_oa_bdw.c27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_bdw.h27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_bxt.c27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_bxt.h27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_cflgt2.c27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_cflgt2.h27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_cflgt3.c27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_cflgt3.h27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_chv.c27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_chv.h27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_cnl.c27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_cnl.h27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_glk.c27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_glk.h27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_hsw.c27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_hsw.h27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_icl.c27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_icl.h27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_kblgt2.c27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_kblgt2.h27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_kblgt3.c27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_kblgt3.h27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_sklgt2.c27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_sklgt2.h27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_sklgt3.c27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_sklgt3.h27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_sklgt4.c27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_sklgt4.h27
-rw-r--r--drivers/gpu/drm/i915/i915_params.c9
-rw-r--r--drivers/gpu/drm/i915/i915_params.h1
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c35
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c49
-rw-r--r--drivers/gpu/drm/i915/i915_query.c3
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h797
-rw-r--r--drivers/gpu/drm/i915/i915_request.c118
-rw-r--r--drivers/gpu/drm/i915/i915_request.h13
-rw-r--r--drivers/gpu/drm/i915/i915_scheduler.c399
-rw-r--r--drivers/gpu/drm/i915/i915_scheduler.h36
-rw-r--r--drivers/gpu/drm/i915/i915_syncmap.c2
-rw-r--r--drivers/gpu/drm/i915/i915_timeline.h19
-rw-r--r--drivers/gpu/drm/i915/i915_utils.h2
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c8
-rw-r--r--drivers/gpu/drm/i915/icl_dsi.c858
-rw-r--r--drivers/gpu/drm/i915/intel_atomic.c118
-rw-r--r--drivers/gpu/drm/i915/intel_atomic_plane.c118
-rw-r--r--drivers/gpu/drm/i915/intel_audio.c37
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c89
-rw-r--r--drivers/gpu/drm/i915/intel_cdclk.c39
-rw-r--r--drivers/gpu/drm/i915/intel_color.c3
-rw-r--r--drivers/gpu/drm/i915/intel_combo_phy.c254
-rw-r--r--drivers/gpu/drm/i915/intel_connector.c (renamed from drivers/gpu/drm/i915/intel_modes.c)129
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c11
-rw-r--r--drivers/gpu/drm/i915/intel_csr.c160
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c445
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.c69
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.h20
-rw-r--r--drivers/gpu/drm/i915/intel_display.c1893
-rw-r--r--drivers/gpu/drm/i915/intel_display.h15
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c831
-rw-r--r--drivers/gpu/drm/i915/intel_dp_mst.c33
-rw-r--r--drivers/gpu/drm/i915/intel_dpio_phy.c4
-rw-r--r--drivers/gpu/drm/i915/intel_dpll_mgr.c117
-rw-r--r--drivers/gpu/drm/i915/intel_dpll_mgr.h8
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h230
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.c128
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.h30
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_vbt.c284
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c10
-rw-r--r--drivers/gpu/drm/i915/intel_engine_cs.c25
-rw-r--r--drivers/gpu/drm/i915/intel_fbc.c10
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c8
-rw-r--r--drivers/gpu/drm/i915/intel_guc.c45
-rw-r--r--drivers/gpu/drm/i915/intel_guc.h5
-rw-r--r--drivers/gpu/drm/i915/intel_guc_fw.c113
-rw-r--r--drivers/gpu/drm/i915/intel_guc_fwif.h41
-rw-r--r--drivers/gpu/drm/i915/intel_guc_reg.h12
-rw-r--r--drivers/gpu/drm/i915/intel_guc_submission.c216
-rw-r--r--drivers/gpu/drm/i915/intel_hdcp.c214
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c213
-rw-r--r--drivers/gpu/drm/i915/intel_hotplug.c67
-rw-r--r--drivers/gpu/drm/i915/intel_huc.c7
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c314
-rw-r--r--drivers/gpu/drm/i915/intel_lspcon.c347
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c67
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c158
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.h15
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c4
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c13
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c522
-rw-r--r--drivers/gpu/drm/i915/intel_psr.c168
-rw-r--r--drivers/gpu/drm/i915/intel_quirks.c169
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c7
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h24
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c342
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c56
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c652
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c10
-rw-r--r--drivers/gpu/drm/i915/intel_uc.c2
-rw-r--r--drivers/gpu/drm/i915/intel_uc_fw.h7
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c2
-rw-r--r--drivers/gpu/drm/i915/intel_vbt_defs.h7
-rw-r--r--drivers/gpu/drm/i915/intel_workarounds.c46
-rw-r--r--drivers/gpu/drm/i915/selftests/huge_pages.c34
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_context.c428
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_evict.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_gtt.c4
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_guc.c59
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_hangcheck.c4
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_lrc.c267
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_engine.c2
-rw-r--r--drivers/gpu/drm/i915/vlv_dsi.c190
-rw-r--r--include/drm/drm_connector.h20
-rw-r--r--include/drm/drm_dp_helper.h95
-rw-r--r--include/drm/drm_hdcp.h212
-rw-r--r--include/drm/i915_pciids.h21
-rw-r--r--include/linux/swap.h4
-rw-r--r--include/uapi/drm/i915_drm.h8
-rw-r--r--mm/shmem.c2
-rw-r--r--mm/vmscan.c22
-rw-r--r--sound/x86/intel_hdmi_audio.c26
142 files changed, 9473 insertions, 5387 deletions
diff --git a/Documentation/vm/unevictable-lru.rst b/Documentation/vm/unevictable-lru.rst
index fdd84cb8d511..b8e29f977f2d 100644
--- a/Documentation/vm/unevictable-lru.rst
+++ b/Documentation/vm/unevictable-lru.rst
@@ -143,7 +143,7 @@ using a number of wrapper functions:
143 Query the address space, and return true if it is completely 143 Query the address space, and return true if it is completely
144 unevictable. 144 unevictable.
145 145
146These are currently used in two places in the kernel: 146These are currently used in three places in the kernel:
147 147
148 (1) By ramfs to mark the address spaces of its inodes when they are created, 148 (1) By ramfs to mark the address spaces of its inodes when they are created,
149 and this mark remains for the life of the inode. 149 and this mark remains for the life of the inode.
@@ -154,6 +154,10 @@ These are currently used in two places in the kernel:
154 swapped out; the application must touch the pages manually if it wants to 154 swapped out; the application must touch the pages manually if it wants to
155 ensure they're in memory. 155 ensure they're in memory.
156 156
157 (3) By the i915 driver to mark pinned address space until it's unpinned. The
158 amount of unevictable memory marked by i915 driver is roughly the bounded
159 object size in debugfs/dri/0/i915_gem_objects.
160
157 161
158Detecting Unevictable Pages 162Detecting Unevictable Pages
159--------------------------- 163---------------------------
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index 3dbfbddae7e6..1706ed1100d5 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -395,6 +395,11 @@ static int drm_atomic_connector_check(struct drm_connector *connector,
395{ 395{
396 struct drm_crtc_state *crtc_state; 396 struct drm_crtc_state *crtc_state;
397 struct drm_writeback_job *writeback_job = state->writeback_job; 397 struct drm_writeback_job *writeback_job = state->writeback_job;
398 const struct drm_display_info *info = &connector->display_info;
399
400 state->max_bpc = info->bpc ? info->bpc : 8;
401 if (connector->max_bpc_property)
402 state->max_bpc = min(state->max_bpc, state->max_requested_bpc);
398 403
399 if ((connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK) || !writeback_job) 404 if ((connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK) || !writeback_job)
400 return 0; 405 return 0;
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 474b503a73a1..fa95f9974f6d 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -669,6 +669,10 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
669 if (old_connector_state->link_status != 669 if (old_connector_state->link_status !=
670 new_connector_state->link_status) 670 new_connector_state->link_status)
671 new_crtc_state->connectors_changed = true; 671 new_crtc_state->connectors_changed = true;
672
673 if (old_connector_state->max_requested_bpc !=
674 new_connector_state->max_requested_bpc)
675 new_crtc_state->connectors_changed = true;
672 } 676 }
673 677
674 if (funcs->atomic_check) 678 if (funcs->atomic_check)
diff --git a/drivers/gpu/drm/drm_atomic_uapi.c b/drivers/gpu/drm/drm_atomic_uapi.c
index d5b7f315098c..86ac33922b09 100644
--- a/drivers/gpu/drm/drm_atomic_uapi.c
+++ b/drivers/gpu/drm/drm_atomic_uapi.c
@@ -740,6 +740,8 @@ static int drm_atomic_connector_set_property(struct drm_connector *connector,
740 740
741 return set_out_fence_for_connector(state->state, connector, 741 return set_out_fence_for_connector(state->state, connector,
742 fence_ptr); 742 fence_ptr);
743 } else if (property == connector->max_bpc_property) {
744 state->max_requested_bpc = val;
743 } else if (connector->funcs->atomic_set_property) { 745 } else if (connector->funcs->atomic_set_property) {
744 return connector->funcs->atomic_set_property(connector, 746 return connector->funcs->atomic_set_property(connector,
745 state, property, val); 747 state, property, val);
@@ -804,6 +806,8 @@ drm_atomic_connector_get_property(struct drm_connector *connector,
804 *val = 0; 806 *val = 0;
805 } else if (property == config->writeback_out_fence_ptr_property) { 807 } else if (property == config->writeback_out_fence_ptr_property) {
806 *val = 0; 808 *val = 0;
809 } else if (property == connector->max_bpc_property) {
810 *val = state->max_requested_bpc;
807 } else if (connector->funcs->atomic_get_property) { 811 } else if (connector->funcs->atomic_get_property) {
808 return connector->funcs->atomic_get_property(connector, 812 return connector->funcs->atomic_get_property(connector,
809 state, property, val); 813 state, property, val);
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
index aa18b1d7d3e4..fa9baacc863b 100644
--- a/drivers/gpu/drm/drm_connector.c
+++ b/drivers/gpu/drm/drm_connector.c
@@ -932,6 +932,13 @@ DRM_ENUM_NAME_FN(drm_get_content_protection_name, drm_cp_enum_list)
932 * is no longer protected and userspace should take appropriate action 932 * is no longer protected and userspace should take appropriate action
933 * (whatever that might be). 933 * (whatever that might be).
934 * 934 *
935 * max bpc:
936 * This range property is used by userspace to limit the bit depth. When
937 * used the driver would limit the bpc in accordance with the valid range
938 * supported by the hardware and sink. Drivers to use the function
939 * drm_connector_attach_max_bpc_property() to create and attach the
940 * property to the connector during initialization.
941 *
935 * Connectors also have one standardized atomic property: 942 * Connectors also have one standardized atomic property:
936 * 943 *
937 * CRTC_ID: 944 * CRTC_ID:
@@ -1600,6 +1607,40 @@ void drm_connector_set_link_status_property(struct drm_connector *connector,
1600EXPORT_SYMBOL(drm_connector_set_link_status_property); 1607EXPORT_SYMBOL(drm_connector_set_link_status_property);
1601 1608
1602/** 1609/**
1610 * drm_connector_attach_max_bpc_property - attach "max bpc" property
1611 * @connector: connector to attach max bpc property on.
1612 * @min: The minimum bit depth supported by the connector.
1613 * @max: The maximum bit depth supported by the connector.
1614 *
1615 * This is used to add support for limiting the bit depth on a connector.
1616 *
1617 * Returns:
1618 * Zero on success, negative errno on failure.
1619 */
1620int drm_connector_attach_max_bpc_property(struct drm_connector *connector,
1621 int min, int max)
1622{
1623 struct drm_device *dev = connector->dev;
1624 struct drm_property *prop;
1625
1626 prop = connector->max_bpc_property;
1627 if (!prop) {
1628 prop = drm_property_create_range(dev, 0, "max bpc", min, max);
1629 if (!prop)
1630 return -ENOMEM;
1631
1632 connector->max_bpc_property = prop;
1633 }
1634
1635 drm_object_attach_property(&connector->base, prop, max);
1636 connector->state->max_requested_bpc = max;
1637 connector->state->max_bpc = max;
1638
1639 return 0;
1640}
1641EXPORT_SYMBOL(drm_connector_attach_max_bpc_property);
1642
1643/**
1603 * drm_connector_init_panel_orientation_property - 1644 * drm_connector_init_panel_orientation_property -
1604 * initialize the connecters panel_orientation property 1645 * initialize the connecters panel_orientation property
1605 * @connector: connector for which to init the panel-orientation property. 1646 * @connector: connector for which to init the panel-orientation property.
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index 37c01b6076ec..6d483487f2b4 100644
--- a/drivers/gpu/drm/drm_dp_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -1352,3 +1352,93 @@ int drm_dp_read_desc(struct drm_dp_aux *aux, struct drm_dp_desc *desc,
1352 return 0; 1352 return 0;
1353} 1353}
1354EXPORT_SYMBOL(drm_dp_read_desc); 1354EXPORT_SYMBOL(drm_dp_read_desc);
1355
1356/**
1357 * DRM DP Helpers for DSC
1358 */
1359u8 drm_dp_dsc_sink_max_slice_count(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE],
1360 bool is_edp)
1361{
1362 u8 slice_cap1 = dsc_dpcd[DP_DSC_SLICE_CAP_1 - DP_DSC_SUPPORT];
1363
1364 if (is_edp) {
1365 /* For eDP, register DSC_SLICE_CAPABILITIES_1 gives slice count */
1366 if (slice_cap1 & DP_DSC_4_PER_DP_DSC_SINK)
1367 return 4;
1368 if (slice_cap1 & DP_DSC_2_PER_DP_DSC_SINK)
1369 return 2;
1370 if (slice_cap1 & DP_DSC_1_PER_DP_DSC_SINK)
1371 return 1;
1372 } else {
1373 /* For DP, use values from DSC_SLICE_CAP_1 and DSC_SLICE_CAP2 */
1374 u8 slice_cap2 = dsc_dpcd[DP_DSC_SLICE_CAP_2 - DP_DSC_SUPPORT];
1375
1376 if (slice_cap2 & DP_DSC_24_PER_DP_DSC_SINK)
1377 return 24;
1378 if (slice_cap2 & DP_DSC_20_PER_DP_DSC_SINK)
1379 return 20;
1380 if (slice_cap2 & DP_DSC_16_PER_DP_DSC_SINK)
1381 return 16;
1382 if (slice_cap1 & DP_DSC_12_PER_DP_DSC_SINK)
1383 return 12;
1384 if (slice_cap1 & DP_DSC_10_PER_DP_DSC_SINK)
1385 return 10;
1386 if (slice_cap1 & DP_DSC_8_PER_DP_DSC_SINK)
1387 return 8;
1388 if (slice_cap1 & DP_DSC_6_PER_DP_DSC_SINK)
1389 return 6;
1390 if (slice_cap1 & DP_DSC_4_PER_DP_DSC_SINK)
1391 return 4;
1392 if (slice_cap1 & DP_DSC_2_PER_DP_DSC_SINK)
1393 return 2;
1394 if (slice_cap1 & DP_DSC_1_PER_DP_DSC_SINK)
1395 return 1;
1396 }
1397
1398 return 0;
1399}
1400EXPORT_SYMBOL(drm_dp_dsc_sink_max_slice_count);
1401
1402u8 drm_dp_dsc_sink_line_buf_depth(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE])
1403{
1404 u8 line_buf_depth = dsc_dpcd[DP_DSC_LINE_BUF_BIT_DEPTH - DP_DSC_SUPPORT];
1405
1406 switch (line_buf_depth & DP_DSC_LINE_BUF_BIT_DEPTH_MASK) {
1407 case DP_DSC_LINE_BUF_BIT_DEPTH_9:
1408 return 9;
1409 case DP_DSC_LINE_BUF_BIT_DEPTH_10:
1410 return 10;
1411 case DP_DSC_LINE_BUF_BIT_DEPTH_11:
1412 return 11;
1413 case DP_DSC_LINE_BUF_BIT_DEPTH_12:
1414 return 12;
1415 case DP_DSC_LINE_BUF_BIT_DEPTH_13:
1416 return 13;
1417 case DP_DSC_LINE_BUF_BIT_DEPTH_14:
1418 return 14;
1419 case DP_DSC_LINE_BUF_BIT_DEPTH_15:
1420 return 15;
1421 case DP_DSC_LINE_BUF_BIT_DEPTH_16:
1422 return 16;
1423 case DP_DSC_LINE_BUF_BIT_DEPTH_8:
1424 return 8;
1425 }
1426
1427 return 0;
1428}
1429EXPORT_SYMBOL(drm_dp_dsc_sink_line_buf_depth);
1430
1431u8 drm_dp_dsc_sink_max_color_depth(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE])
1432{
1433 u8 color_depth = dsc_dpcd[DP_DSC_DEC_COLOR_DEPTH_CAP - DP_DSC_SUPPORT];
1434
1435 if (color_depth & DP_DSC_12_BPC)
1436 return 12;
1437 if (color_depth & DP_DSC_10_BPC)
1438 return 10;
1439 if (color_depth & DP_DSC_8_BPC)
1440 return 8;
1441
1442 return 0;
1443}
1444EXPORT_SYMBOL(drm_dp_dsc_sink_max_color_depth);
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 1c2857f13ad4..0ff878c994e2 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -75,6 +75,7 @@ i915-y += i915_cmd_parser.o \
75 i915_gemfs.o \ 75 i915_gemfs.o \
76 i915_query.o \ 76 i915_query.o \
77 i915_request.o \ 77 i915_request.o \
78 i915_scheduler.o \
78 i915_timeline.o \ 79 i915_timeline.o \
79 i915_trace_points.o \ 80 i915_trace_points.o \
80 i915_vma.o \ 81 i915_vma.o \
@@ -112,6 +113,8 @@ i915-y += intel_audio.o \
112 intel_bios.o \ 113 intel_bios.o \
113 intel_cdclk.o \ 114 intel_cdclk.o \
114 intel_color.o \ 115 intel_color.o \
116 intel_combo_phy.o \
117 intel_connector.o \
115 intel_display.o \ 118 intel_display.o \
116 intel_dpio_phy.o \ 119 intel_dpio_phy.o \
117 intel_dpll_mgr.o \ 120 intel_dpll_mgr.o \
@@ -120,9 +123,9 @@ i915-y += intel_audio.o \
120 intel_frontbuffer.o \ 123 intel_frontbuffer.o \
121 intel_hdcp.o \ 124 intel_hdcp.o \
122 intel_hotplug.o \ 125 intel_hotplug.o \
123 intel_modes.o \
124 intel_overlay.o \ 126 intel_overlay.o \
125 intel_psr.o \ 127 intel_psr.o \
128 intel_quirks.o \
126 intel_sideband.o \ 129 intel_sideband.o \
127 intel_sprite.o 130 intel_sprite.o
128i915-$(CONFIG_ACPI) += intel_acpi.o intel_opregion.o 131i915-$(CONFIG_ACPI) += intel_acpi.o intel_opregion.o
@@ -142,6 +145,7 @@ i915-y += dvo_ch7017.o \
142 intel_dp_link_training.o \ 145 intel_dp_link_training.o \
143 intel_dp_mst.o \ 146 intel_dp_mst.o \
144 intel_dp.o \ 147 intel_dp.o \
148 intel_dsi.o \
145 intel_dsi_dcs_backlight.o \ 149 intel_dsi_dcs_backlight.o \
146 intel_dsi_vbt.o \ 150 intel_dsi_vbt.o \
147 intel_dvo.o \ 151 intel_dvo.o \
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index ea34003d6dd2..b8fbe3fabea3 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -334,6 +334,28 @@ static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
334 i915_gem_object_put(wa_ctx->indirect_ctx.obj); 334 i915_gem_object_put(wa_ctx->indirect_ctx.obj);
335} 335}
336 336
337static int set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload,
338 struct i915_gem_context *ctx)
339{
340 struct intel_vgpu_mm *mm = workload->shadow_mm;
341 struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
342 int i = 0;
343
344 if (mm->type != INTEL_GVT_MM_PPGTT || !mm->ppgtt_mm.shadowed)
345 return -1;
346
347 if (mm->ppgtt_mm.root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY) {
348 px_dma(&ppgtt->pml4) = mm->ppgtt_mm.shadow_pdps[0];
349 } else {
350 for (i = 0; i < GVT_RING_CTX_NR_PDPS; i++) {
351 px_dma(ppgtt->pdp.page_directory[i]) =
352 mm->ppgtt_mm.shadow_pdps[i];
353 }
354 }
355
356 return 0;
357}
358
337/** 359/**
338 * intel_gvt_scan_and_shadow_workload - audit the workload by scanning and 360 * intel_gvt_scan_and_shadow_workload - audit the workload by scanning and
339 * shadow it as well, include ringbuffer,wa_ctx and ctx. 361 * shadow it as well, include ringbuffer,wa_ctx and ctx.
@@ -358,6 +380,12 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
358 if (workload->req) 380 if (workload->req)
359 return 0; 381 return 0;
360 382
383 ret = set_context_ppgtt_from_shadow(workload, shadow_ctx);
384 if (ret < 0) {
385 gvt_vgpu_err("workload shadow ppgtt isn't ready\n");
386 return ret;
387 }
388
361 /* pin shadow context by gvt even the shadow context will be pinned 389 /* pin shadow context by gvt even the shadow context will be pinned
362 * when i915 alloc request. That is because gvt will update the guest 390 * when i915 alloc request. That is because gvt will update the guest
363 * context from shadow context when workload is completed, and at that 391 * context from shadow context when workload is completed, and at that
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 4f3ac0a12889..670db5073d70 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -2215,8 +2215,23 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
2215 struct drm_i915_private *dev_priv = node_to_i915(m->private); 2215 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2216 struct drm_device *dev = &dev_priv->drm; 2216 struct drm_device *dev = &dev_priv->drm;
2217 struct intel_rps *rps = &dev_priv->gt_pm.rps; 2217 struct intel_rps *rps = &dev_priv->gt_pm.rps;
2218 u32 act_freq = rps->cur_freq;
2218 struct drm_file *file; 2219 struct drm_file *file;
2219 2220
2221 if (intel_runtime_pm_get_if_in_use(dev_priv)) {
2222 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
2223 mutex_lock(&dev_priv->pcu_lock);
2224 act_freq = vlv_punit_read(dev_priv,
2225 PUNIT_REG_GPU_FREQ_STS);
2226 act_freq = (act_freq >> 8) & 0xff;
2227 mutex_unlock(&dev_priv->pcu_lock);
2228 } else {
2229 act_freq = intel_get_cagf(dev_priv,
2230 I915_READ(GEN6_RPSTAT1));
2231 }
2232 intel_runtime_pm_put(dev_priv);
2233 }
2234
2220 seq_printf(m, "RPS enabled? %d\n", rps->enabled); 2235 seq_printf(m, "RPS enabled? %d\n", rps->enabled);
2221 seq_printf(m, "GPU busy? %s [%d requests]\n", 2236 seq_printf(m, "GPU busy? %s [%d requests]\n",
2222 yesno(dev_priv->gt.awake), dev_priv->gt.active_requests); 2237 yesno(dev_priv->gt.awake), dev_priv->gt.active_requests);
@@ -2224,8 +2239,9 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
2224 seq_printf(m, "Boosts outstanding? %d\n", 2239 seq_printf(m, "Boosts outstanding? %d\n",
2225 atomic_read(&rps->num_waiters)); 2240 atomic_read(&rps->num_waiters));
2226 seq_printf(m, "Interactive? %d\n", READ_ONCE(rps->power.interactive)); 2241 seq_printf(m, "Interactive? %d\n", READ_ONCE(rps->power.interactive));
2227 seq_printf(m, "Frequency requested %d\n", 2242 seq_printf(m, "Frequency requested %d, actual %d\n",
2228 intel_gpu_freq(dev_priv, rps->cur_freq)); 2243 intel_gpu_freq(dev_priv, rps->cur_freq),
2244 intel_gpu_freq(dev_priv, act_freq));
2229 seq_printf(m, " min hard:%d, soft:%d; max soft:%d, hard:%d\n", 2245 seq_printf(m, " min hard:%d, soft:%d; max soft:%d, hard:%d\n",
2230 intel_gpu_freq(dev_priv, rps->min_freq), 2246 intel_gpu_freq(dev_priv, rps->min_freq),
2231 intel_gpu_freq(dev_priv, rps->min_freq_softlimit), 2247 intel_gpu_freq(dev_priv, rps->min_freq_softlimit),
@@ -2900,16 +2916,15 @@ static int i915_dmc_info(struct seq_file *m, void *unused)
2900 seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version), 2916 seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version),
2901 CSR_VERSION_MINOR(csr->version)); 2917 CSR_VERSION_MINOR(csr->version));
2902 2918
2903 if (IS_KABYLAKE(dev_priv) || 2919 if (WARN_ON(INTEL_GEN(dev_priv) > 11))
2904 (IS_SKYLAKE(dev_priv) && csr->version >= CSR_VERSION(1, 6))) { 2920 goto out;
2905 seq_printf(m, "DC3 -> DC5 count: %d\n", 2921
2906 I915_READ(SKL_CSR_DC3_DC5_COUNT)); 2922 seq_printf(m, "DC3 -> DC5 count: %d\n",
2923 I915_READ(IS_BROXTON(dev_priv) ? BXT_CSR_DC3_DC5_COUNT :
2924 SKL_CSR_DC3_DC5_COUNT));
2925 if (!IS_GEN9_LP(dev_priv))
2907 seq_printf(m, "DC5 -> DC6 count: %d\n", 2926 seq_printf(m, "DC5 -> DC6 count: %d\n",
2908 I915_READ(SKL_CSR_DC5_DC6_COUNT)); 2927 I915_READ(SKL_CSR_DC5_DC6_COUNT));
2909 } else if (IS_BROXTON(dev_priv) && csr->version >= CSR_VERSION(1, 4)) {
2910 seq_printf(m, "DC3 -> DC5 count: %d\n",
2911 I915_READ(BXT_CSR_DC3_DC5_COUNT));
2912 }
2913 2928
2914out: 2929out:
2915 seq_printf(m, "program base: 0x%08x\n", I915_READ(CSR_PROGRAM(0))); 2930 seq_printf(m, "program base: 0x%08x\n", I915_READ(CSR_PROGRAM(0)));
@@ -3049,16 +3064,17 @@ static void intel_connector_info(struct seq_file *m,
3049 seq_printf(m, "connector %d: type %s, status: %s\n", 3064 seq_printf(m, "connector %d: type %s, status: %s\n",
3050 connector->base.id, connector->name, 3065 connector->base.id, connector->name,
3051 drm_get_connector_status_name(connector->status)); 3066 drm_get_connector_status_name(connector->status));
3052 if (connector->status == connector_status_connected) { 3067
3053 seq_printf(m, "\tname: %s\n", connector->display_info.name); 3068 if (connector->status == connector_status_disconnected)
3054 seq_printf(m, "\tphysical dimensions: %dx%dmm\n", 3069 return;
3055 connector->display_info.width_mm, 3070
3056 connector->display_info.height_mm); 3071 seq_printf(m, "\tname: %s\n", connector->display_info.name);
3057 seq_printf(m, "\tsubpixel order: %s\n", 3072 seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
3058 drm_get_subpixel_order_name(connector->display_info.subpixel_order)); 3073 connector->display_info.width_mm,
3059 seq_printf(m, "\tCEA rev: %d\n", 3074 connector->display_info.height_mm);
3060 connector->display_info.cea_rev); 3075 seq_printf(m, "\tsubpixel order: %s\n",
3061 } 3076 drm_get_subpixel_order_name(connector->display_info.subpixel_order));
3077 seq_printf(m, "\tCEA rev: %d\n", connector->display_info.cea_rev);
3062 3078
3063 if (!intel_encoder) 3079 if (!intel_encoder)
3064 return; 3080 return;
@@ -4172,6 +4188,7 @@ i915_drop_caches_set(void *data, u64 val)
4172 4188
4173 DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n", 4189 DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n",
4174 val, val & DROP_ALL); 4190 val, val & DROP_ALL);
4191 intel_runtime_pm_get(i915);
4175 4192
4176 if (val & DROP_RESET_ACTIVE && !intel_engines_are_idle(i915)) 4193 if (val & DROP_RESET_ACTIVE && !intel_engines_are_idle(i915))
4177 i915_gem_set_wedged(i915); 4194 i915_gem_set_wedged(i915);
@@ -4181,7 +4198,7 @@ i915_drop_caches_set(void *data, u64 val)
4181 if (val & (DROP_ACTIVE | DROP_RETIRE | DROP_RESET_SEQNO)) { 4198 if (val & (DROP_ACTIVE | DROP_RETIRE | DROP_RESET_SEQNO)) {
4182 ret = mutex_lock_interruptible(&i915->drm.struct_mutex); 4199 ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
4183 if (ret) 4200 if (ret)
4184 return ret; 4201 goto out;
4185 4202
4186 if (val & DROP_ACTIVE) 4203 if (val & DROP_ACTIVE)
4187 ret = i915_gem_wait_for_idle(i915, 4204 ret = i915_gem_wait_for_idle(i915,
@@ -4189,11 +4206,8 @@ i915_drop_caches_set(void *data, u64 val)
4189 I915_WAIT_LOCKED, 4206 I915_WAIT_LOCKED,
4190 MAX_SCHEDULE_TIMEOUT); 4207 MAX_SCHEDULE_TIMEOUT);
4191 4208
4192 if (ret == 0 && val & DROP_RESET_SEQNO) { 4209 if (ret == 0 && val & DROP_RESET_SEQNO)
4193 intel_runtime_pm_get(i915);
4194 ret = i915_gem_set_global_seqno(&i915->drm, 1); 4210 ret = i915_gem_set_global_seqno(&i915->drm, 1);
4195 intel_runtime_pm_put(i915);
4196 }
4197 4211
4198 if (val & DROP_RETIRE) 4212 if (val & DROP_RETIRE)
4199 i915_retire_requests(i915); 4213 i915_retire_requests(i915);
@@ -4231,6 +4245,9 @@ i915_drop_caches_set(void *data, u64 val)
4231 if (val & DROP_FREED) 4245 if (val & DROP_FREED)
4232 i915_gem_drain_freed_objects(i915); 4246 i915_gem_drain_freed_objects(i915);
4233 4247
4248out:
4249 intel_runtime_pm_put(i915);
4250
4234 return ret; 4251 return ret;
4235} 4252}
4236 4253
@@ -4641,24 +4658,122 @@ static const struct file_operations i915_hpd_storm_ctl_fops = {
4641 .write = i915_hpd_storm_ctl_write 4658 .write = i915_hpd_storm_ctl_write
4642}; 4659};
4643 4660
4661static int i915_hpd_short_storm_ctl_show(struct seq_file *m, void *data)
4662{
4663 struct drm_i915_private *dev_priv = m->private;
4664
4665 seq_printf(m, "Enabled: %s\n",
4666 yesno(dev_priv->hotplug.hpd_short_storm_enabled));
4667
4668 return 0;
4669}
4670
4671static int
4672i915_hpd_short_storm_ctl_open(struct inode *inode, struct file *file)
4673{
4674 return single_open(file, i915_hpd_short_storm_ctl_show,
4675 inode->i_private);
4676}
4677
4678static ssize_t i915_hpd_short_storm_ctl_write(struct file *file,
4679 const char __user *ubuf,
4680 size_t len, loff_t *offp)
4681{
4682 struct seq_file *m = file->private_data;
4683 struct drm_i915_private *dev_priv = m->private;
4684 struct i915_hotplug *hotplug = &dev_priv->hotplug;
4685 char *newline;
4686 char tmp[16];
4687 int i;
4688 bool new_state;
4689
4690 if (len >= sizeof(tmp))
4691 return -EINVAL;
4692
4693 if (copy_from_user(tmp, ubuf, len))
4694 return -EFAULT;
4695
4696 tmp[len] = '\0';
4697
4698 /* Strip newline, if any */
4699 newline = strchr(tmp, '\n');
4700 if (newline)
4701 *newline = '\0';
4702
4703 /* Reset to the "default" state for this system */
4704 if (strcmp(tmp, "reset") == 0)
4705 new_state = !HAS_DP_MST(dev_priv);
4706 else if (kstrtobool(tmp, &new_state) != 0)
4707 return -EINVAL;
4708
4709 DRM_DEBUG_KMS("%sabling HPD short storm detection\n",
4710 new_state ? "En" : "Dis");
4711
4712 spin_lock_irq(&dev_priv->irq_lock);
4713 hotplug->hpd_short_storm_enabled = new_state;
4714 /* Reset the HPD storm stats so we don't accidentally trigger a storm */
4715 for_each_hpd_pin(i)
4716 hotplug->stats[i].count = 0;
4717 spin_unlock_irq(&dev_priv->irq_lock);
4718
4719 /* Re-enable hpd immediately if we were in an irq storm */
4720 flush_delayed_work(&dev_priv->hotplug.reenable_work);
4721
4722 return len;
4723}
4724
4725static const struct file_operations i915_hpd_short_storm_ctl_fops = {
4726 .owner = THIS_MODULE,
4727 .open = i915_hpd_short_storm_ctl_open,
4728 .read = seq_read,
4729 .llseek = seq_lseek,
4730 .release = single_release,
4731 .write = i915_hpd_short_storm_ctl_write,
4732};
4733
4644static int i915_drrs_ctl_set(void *data, u64 val) 4734static int i915_drrs_ctl_set(void *data, u64 val)
4645{ 4735{
4646 struct drm_i915_private *dev_priv = data; 4736 struct drm_i915_private *dev_priv = data;
4647 struct drm_device *dev = &dev_priv->drm; 4737 struct drm_device *dev = &dev_priv->drm;
4648 struct intel_crtc *intel_crtc; 4738 struct intel_crtc *crtc;
4649 struct intel_encoder *encoder;
4650 struct intel_dp *intel_dp;
4651 4739
4652 if (INTEL_GEN(dev_priv) < 7) 4740 if (INTEL_GEN(dev_priv) < 7)
4653 return -ENODEV; 4741 return -ENODEV;
4654 4742
4655 drm_modeset_lock_all(dev); 4743 for_each_intel_crtc(dev, crtc) {
4656 for_each_intel_crtc(dev, intel_crtc) { 4744 struct drm_connector_list_iter conn_iter;
4657 if (!intel_crtc->base.state->active || 4745 struct intel_crtc_state *crtc_state;
4658 !intel_crtc->config->has_drrs) 4746 struct drm_connector *connector;
4659 continue; 4747 struct drm_crtc_commit *commit;
4748 int ret;
4749
4750 ret = drm_modeset_lock_single_interruptible(&crtc->base.mutex);
4751 if (ret)
4752 return ret;
4753
4754 crtc_state = to_intel_crtc_state(crtc->base.state);
4755
4756 if (!crtc_state->base.active ||
4757 !crtc_state->has_drrs)
4758 goto out;
4759
4760 commit = crtc_state->base.commit;
4761 if (commit) {
4762 ret = wait_for_completion_interruptible(&commit->hw_done);
4763 if (ret)
4764 goto out;
4765 }
4660 4766
4661 for_each_encoder_on_crtc(dev, &intel_crtc->base, encoder) { 4767 drm_connector_list_iter_begin(dev, &conn_iter);
4768 drm_for_each_connector_iter(connector, &conn_iter) {
4769 struct intel_encoder *encoder;
4770 struct intel_dp *intel_dp;
4771
4772 if (!(crtc_state->base.connector_mask &
4773 drm_connector_mask(connector)))
4774 continue;
4775
4776 encoder = intel_attached_encoder(connector);
4662 if (encoder->type != INTEL_OUTPUT_EDP) 4777 if (encoder->type != INTEL_OUTPUT_EDP)
4663 continue; 4778 continue;
4664 4779
@@ -4668,13 +4783,18 @@ static int i915_drrs_ctl_set(void *data, u64 val)
4668 intel_dp = enc_to_intel_dp(&encoder->base); 4783 intel_dp = enc_to_intel_dp(&encoder->base);
4669 if (val) 4784 if (val)
4670 intel_edp_drrs_enable(intel_dp, 4785 intel_edp_drrs_enable(intel_dp,
4671 intel_crtc->config); 4786 crtc_state);
4672 else 4787 else
4673 intel_edp_drrs_disable(intel_dp, 4788 intel_edp_drrs_disable(intel_dp,
4674 intel_crtc->config); 4789 crtc_state);
4675 } 4790 }
4791 drm_connector_list_iter_end(&conn_iter);
4792
4793out:
4794 drm_modeset_unlock(&crtc->base.mutex);
4795 if (ret)
4796 return ret;
4676 } 4797 }
4677 drm_modeset_unlock_all(dev);
4678 4798
4679 return 0; 4799 return 0;
4680} 4800}
@@ -4818,6 +4938,7 @@ static const struct i915_debugfs_files {
4818 {"i915_guc_log_level", &i915_guc_log_level_fops}, 4938 {"i915_guc_log_level", &i915_guc_log_level_fops},
4819 {"i915_guc_log_relay", &i915_guc_log_relay_fops}, 4939 {"i915_guc_log_relay", &i915_guc_log_relay_fops},
4820 {"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops}, 4940 {"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops},
4941 {"i915_hpd_short_storm_ctl", &i915_hpd_short_storm_ctl_fops},
4821 {"i915_ipc_status", &i915_ipc_status_fops}, 4942 {"i915_ipc_status", &i915_ipc_status_fops},
4822 {"i915_drrs_ctl", &i915_drrs_ctl_fops}, 4943 {"i915_drrs_ctl", &i915_drrs_ctl_fops},
4823 {"i915_edp_psr_debug", &i915_edp_psr_debug_fops} 4944 {"i915_edp_psr_debug", &i915_edp_psr_debug_fops}
@@ -4899,13 +5020,10 @@ static int i915_dpcd_show(struct seq_file *m, void *data)
4899 continue; 5020 continue;
4900 5021
4901 err = drm_dp_dpcd_read(&intel_dp->aux, b->offset, buf, size); 5022 err = drm_dp_dpcd_read(&intel_dp->aux, b->offset, buf, size);
4902 if (err <= 0) { 5023 if (err < 0)
4903 DRM_ERROR("dpcd read (%zu bytes at %u) failed (%zd)\n", 5024 seq_printf(m, "%04x: ERROR %d\n", b->offset, (int)err);
4904 size, b->offset, err); 5025 else
4905 continue; 5026 seq_printf(m, "%04x: %*ph\n", b->offset, (int)err, buf);
4906 }
4907
4908 seq_printf(m, "%04x: %*ph\n", b->offset, (int) size, buf);
4909 } 5027 }
4910 5028
4911 return 0; 5029 return 0;
@@ -4934,6 +5052,28 @@ static int i915_panel_show(struct seq_file *m, void *data)
4934} 5052}
4935DEFINE_SHOW_ATTRIBUTE(i915_panel); 5053DEFINE_SHOW_ATTRIBUTE(i915_panel);
4936 5054
5055static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data)
5056{
5057 struct drm_connector *connector = m->private;
5058 struct intel_connector *intel_connector = to_intel_connector(connector);
5059
5060 if (connector->status != connector_status_connected)
5061 return -ENODEV;
5062
5063 /* HDCP is supported by connector */
5064 if (!intel_connector->hdcp.shim)
5065 return -EINVAL;
5066
5067 seq_printf(m, "%s:%d HDCP version: ", connector->name,
5068 connector->base.id);
5069 seq_printf(m, "%s ", !intel_hdcp_capable(intel_connector) ?
5070 "None" : "HDCP1.4");
5071 seq_puts(m, "\n");
5072
5073 return 0;
5074}
5075DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability);
5076
4937/** 5077/**
4938 * i915_debugfs_connector_add - add i915 specific connector debugfs files 5078 * i915_debugfs_connector_add - add i915 specific connector debugfs files
4939 * @connector: pointer to a registered drm_connector 5079 * @connector: pointer to a registered drm_connector
@@ -4963,5 +5103,12 @@ int i915_debugfs_connector_add(struct drm_connector *connector)
4963 connector, &i915_psr_sink_status_fops); 5103 connector, &i915_psr_sink_status_fops);
4964 } 5104 }
4965 5105
5106 if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
5107 connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
5108 connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) {
5109 debugfs_create_file("i915_hdcp_sink_capability", S_IRUGO, root,
5110 connector, &i915_hdcp_sink_capability_fops);
5111 }
5112
4966 return 0; 5113 return 0;
4967} 5114}
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index ffdbbac4400e..b1d23c73c147 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -345,7 +345,7 @@ static int i915_getparam_ioctl(struct drm_device *dev, void *data,
345 value = HAS_WT(dev_priv); 345 value = HAS_WT(dev_priv);
346 break; 346 break;
347 case I915_PARAM_HAS_ALIASING_PPGTT: 347 case I915_PARAM_HAS_ALIASING_PPGTT:
348 value = USES_PPGTT(dev_priv); 348 value = min_t(int, INTEL_PPGTT(dev_priv), I915_GEM_PPGTT_FULL);
349 break; 349 break;
350 case I915_PARAM_HAS_SEMAPHORES: 350 case I915_PARAM_HAS_SEMAPHORES:
351 value = HAS_LEGACY_SEMAPHORES(dev_priv); 351 value = HAS_LEGACY_SEMAPHORES(dev_priv);
@@ -645,6 +645,13 @@ static int i915_load_modeset_init(struct drm_device *dev)
645 if (i915_inject_load_failure()) 645 if (i915_inject_load_failure())
646 return -ENODEV; 646 return -ENODEV;
647 647
648 if (INTEL_INFO(dev_priv)->num_pipes) {
649 ret = drm_vblank_init(&dev_priv->drm,
650 INTEL_INFO(dev_priv)->num_pipes);
651 if (ret)
652 goto out;
653 }
654
648 intel_bios_init(dev_priv); 655 intel_bios_init(dev_priv);
649 656
650 /* If we have > 1 VGA cards, then we need to arbitrate access 657 /* If we have > 1 VGA cards, then we need to arbitrate access
@@ -687,7 +694,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
687 if (ret) 694 if (ret)
688 goto cleanup_modeset; 695 goto cleanup_modeset;
689 696
690 intel_setup_overlay(dev_priv); 697 intel_overlay_setup(dev_priv);
691 698
692 if (INTEL_INFO(dev_priv)->num_pipes == 0) 699 if (INTEL_INFO(dev_priv)->num_pipes == 0)
693 return 0; 700 return 0;
@@ -699,6 +706,8 @@ static int i915_load_modeset_init(struct drm_device *dev)
699 /* Only enable hotplug handling once the fbdev is fully set up. */ 706 /* Only enable hotplug handling once the fbdev is fully set up. */
700 intel_hpd_init(dev_priv); 707 intel_hpd_init(dev_priv);
701 708
709 intel_init_ipc(dev_priv);
710
702 return 0; 711 return 0;
703 712
704cleanup_gem: 713cleanup_gem:
@@ -1030,6 +1039,7 @@ static int i915_driver_init_mmio(struct drm_i915_private *dev_priv)
1030 1039
1031err_uncore: 1040err_uncore:
1032 intel_uncore_fini(dev_priv); 1041 intel_uncore_fini(dev_priv);
1042 i915_mmio_cleanup(dev_priv);
1033err_bridge: 1043err_bridge:
1034 pci_dev_put(dev_priv->bridge_dev); 1044 pci_dev_put(dev_priv->bridge_dev);
1035 1045
@@ -1049,17 +1059,6 @@ static void i915_driver_cleanup_mmio(struct drm_i915_private *dev_priv)
1049 1059
1050static void intel_sanitize_options(struct drm_i915_private *dev_priv) 1060static void intel_sanitize_options(struct drm_i915_private *dev_priv)
1051{ 1061{
1052 /*
1053 * i915.enable_ppgtt is read-only, so do an early pass to validate the
1054 * user's requested state against the hardware/driver capabilities. We
1055 * do this now so that we can print out any log messages once rather
1056 * than every time we check intel_enable_ppgtt().
1057 */
1058 i915_modparams.enable_ppgtt =
1059 intel_sanitize_enable_ppgtt(dev_priv,
1060 i915_modparams.enable_ppgtt);
1061 DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915_modparams.enable_ppgtt);
1062
1063 intel_gvt_sanitize_options(dev_priv); 1062 intel_gvt_sanitize_options(dev_priv);
1064} 1063}
1065 1064
@@ -1340,7 +1339,7 @@ intel_get_dram_info(struct drm_i915_private *dev_priv)
1340 /* Need to calculate bandwidth only for Gen9 */ 1339 /* Need to calculate bandwidth only for Gen9 */
1341 if (IS_BROXTON(dev_priv)) 1340 if (IS_BROXTON(dev_priv))
1342 ret = bxt_get_dram_info(dev_priv); 1341 ret = bxt_get_dram_info(dev_priv);
1343 else if (INTEL_GEN(dev_priv) == 9) 1342 else if (IS_GEN9(dev_priv))
1344 ret = skl_get_dram_info(dev_priv); 1343 ret = skl_get_dram_info(dev_priv);
1345 else 1344 else
1346 ret = skl_dram_get_channels_info(dev_priv); 1345 ret = skl_dram_get_channels_info(dev_priv);
@@ -1375,6 +1374,15 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
1375 1374
1376 intel_device_info_runtime_init(mkwrite_device_info(dev_priv)); 1375 intel_device_info_runtime_init(mkwrite_device_info(dev_priv));
1377 1376
1377 if (HAS_PPGTT(dev_priv)) {
1378 if (intel_vgpu_active(dev_priv) &&
1379 !intel_vgpu_has_full_48bit_ppgtt(dev_priv)) {
1380 i915_report_error(dev_priv,
1381 "incompatible vGPU found, support for isolated ppGTT required\n");
1382 return -ENXIO;
1383 }
1384 }
1385
1378 intel_sanitize_options(dev_priv); 1386 intel_sanitize_options(dev_priv);
1379 1387
1380 i915_perf_init(dev_priv); 1388 i915_perf_init(dev_priv);
@@ -1630,14 +1638,16 @@ i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent)
1630 (struct intel_device_info *)ent->driver_data; 1638 (struct intel_device_info *)ent->driver_data;
1631 struct intel_device_info *device_info; 1639 struct intel_device_info *device_info;
1632 struct drm_i915_private *i915; 1640 struct drm_i915_private *i915;
1641 int err;
1633 1642
1634 i915 = kzalloc(sizeof(*i915), GFP_KERNEL); 1643 i915 = kzalloc(sizeof(*i915), GFP_KERNEL);
1635 if (!i915) 1644 if (!i915)
1636 return NULL; 1645 return ERR_PTR(-ENOMEM);
1637 1646
1638 if (drm_dev_init(&i915->drm, &driver, &pdev->dev)) { 1647 err = drm_dev_init(&i915->drm, &driver, &pdev->dev);
1648 if (err) {
1639 kfree(i915); 1649 kfree(i915);
1640 return NULL; 1650 return ERR_PTR(err);
1641 } 1651 }
1642 1652
1643 i915->drm.pdev = pdev; 1653 i915->drm.pdev = pdev;
@@ -1650,8 +1660,8 @@ i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent)
1650 device_info->device_id = pdev->device; 1660 device_info->device_id = pdev->device;
1651 1661
1652 BUILD_BUG_ON(INTEL_MAX_PLATFORMS > 1662 BUILD_BUG_ON(INTEL_MAX_PLATFORMS >
1653 sizeof(device_info->platform_mask) * BITS_PER_BYTE); 1663 BITS_PER_TYPE(device_info->platform_mask));
1654 BUG_ON(device_info->gen > sizeof(device_info->gen_mask) * BITS_PER_BYTE); 1664 BUG_ON(device_info->gen > BITS_PER_TYPE(device_info->gen_mask));
1655 1665
1656 return i915; 1666 return i915;
1657} 1667}
@@ -1686,8 +1696,8 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
1686 int ret; 1696 int ret;
1687 1697
1688 dev_priv = i915_driver_create(pdev, ent); 1698 dev_priv = i915_driver_create(pdev, ent);
1689 if (!dev_priv) 1699 if (IS_ERR(dev_priv))
1690 return -ENOMEM; 1700 return PTR_ERR(dev_priv);
1691 1701
1692 /* Disable nuclear pageflip by default on pre-ILK */ 1702 /* Disable nuclear pageflip by default on pre-ILK */
1693 if (!i915_modparams.nuclear_pageflip && match_info->gen < 5) 1703 if (!i915_modparams.nuclear_pageflip && match_info->gen < 5)
@@ -1711,26 +1721,12 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
1711 if (ret < 0) 1721 if (ret < 0)
1712 goto out_cleanup_mmio; 1722 goto out_cleanup_mmio;
1713 1723
1714 /*
1715 * TODO: move the vblank init and parts of modeset init steps into one
1716 * of the i915_driver_init_/i915_driver_register functions according
1717 * to the role/effect of the given init step.
1718 */
1719 if (INTEL_INFO(dev_priv)->num_pipes) {
1720 ret = drm_vblank_init(&dev_priv->drm,
1721 INTEL_INFO(dev_priv)->num_pipes);
1722 if (ret)
1723 goto out_cleanup_hw;
1724 }
1725
1726 ret = i915_load_modeset_init(&dev_priv->drm); 1724 ret = i915_load_modeset_init(&dev_priv->drm);
1727 if (ret < 0) 1725 if (ret < 0)
1728 goto out_cleanup_hw; 1726 goto out_cleanup_hw;
1729 1727
1730 i915_driver_register(dev_priv); 1728 i915_driver_register(dev_priv);
1731 1729
1732 intel_init_ipc(dev_priv);
1733
1734 enable_rpm_wakeref_asserts(dev_priv); 1730 enable_rpm_wakeref_asserts(dev_priv);
1735 1731
1736 i915_welcome_messages(dev_priv); 1732 i915_welcome_messages(dev_priv);
@@ -1782,7 +1778,6 @@ void i915_driver_unload(struct drm_device *dev)
1782 i915_reset_error_state(dev_priv); 1778 i915_reset_error_state(dev_priv);
1783 1779
1784 i915_gem_fini(dev_priv); 1780 i915_gem_fini(dev_priv);
1785 intel_fbc_cleanup_cfb(dev_priv);
1786 1781
1787 intel_power_domains_fini_hw(dev_priv); 1782 intel_power_domains_fini_hw(dev_priv);
1788 1783
@@ -1920,9 +1915,7 @@ static int i915_drm_suspend(struct drm_device *dev)
1920 i915_save_state(dev_priv); 1915 i915_save_state(dev_priv);
1921 1916
1922 opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold; 1917 opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold;
1923 intel_opregion_notify_adapter(dev_priv, opregion_target_state); 1918 intel_opregion_suspend(dev_priv, opregion_target_state);
1924
1925 intel_opregion_unregister(dev_priv);
1926 1919
1927 intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true); 1920 intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true);
1928 1921
@@ -1963,7 +1956,7 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
1963 get_suspend_mode(dev_priv, hibernation)); 1956 get_suspend_mode(dev_priv, hibernation));
1964 1957
1965 ret = 0; 1958 ret = 0;
1966 if (IS_GEN9_LP(dev_priv)) 1959 if (INTEL_GEN(dev_priv) >= 11 || IS_GEN9_LP(dev_priv))
1967 bxt_enable_dc9(dev_priv); 1960 bxt_enable_dc9(dev_priv);
1968 else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 1961 else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
1969 hsw_enable_pc8(dev_priv); 1962 hsw_enable_pc8(dev_priv);
@@ -2041,7 +2034,6 @@ static int i915_drm_resume(struct drm_device *dev)
2041 2034
2042 i915_restore_state(dev_priv); 2035 i915_restore_state(dev_priv);
2043 intel_pps_unlock_regs_wa(dev_priv); 2036 intel_pps_unlock_regs_wa(dev_priv);
2044 intel_opregion_setup(dev_priv);
2045 2037
2046 intel_init_pch_refclk(dev_priv); 2038 intel_init_pch_refclk(dev_priv);
2047 2039
@@ -2083,12 +2075,10 @@ static int i915_drm_resume(struct drm_device *dev)
2083 * */ 2075 * */
2084 intel_hpd_init(dev_priv); 2076 intel_hpd_init(dev_priv);
2085 2077
2086 intel_opregion_register(dev_priv); 2078 intel_opregion_resume(dev_priv);
2087 2079
2088 intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false); 2080 intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false);
2089 2081
2090 intel_opregion_notify_adapter(dev_priv, PCI_D0);
2091
2092 intel_power_domains_enable(dev_priv); 2082 intel_power_domains_enable(dev_priv);
2093 2083
2094 enable_rpm_wakeref_asserts(dev_priv); 2084 enable_rpm_wakeref_asserts(dev_priv);
@@ -2156,7 +2146,7 @@ static int i915_drm_resume_early(struct drm_device *dev)
2156 2146
2157 intel_uncore_resume_early(dev_priv); 2147 intel_uncore_resume_early(dev_priv);
2158 2148
2159 if (IS_GEN9_LP(dev_priv)) { 2149 if (INTEL_GEN(dev_priv) >= 11 || IS_GEN9_LP(dev_priv)) {
2160 gen9_sanitize_dc_state(dev_priv); 2150 gen9_sanitize_dc_state(dev_priv);
2161 bxt_disable_dc9(dev_priv); 2151 bxt_disable_dc9(dev_priv);
2162 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { 2152 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
@@ -2923,7 +2913,10 @@ static int intel_runtime_suspend(struct device *kdev)
2923 intel_uncore_suspend(dev_priv); 2913 intel_uncore_suspend(dev_priv);
2924 2914
2925 ret = 0; 2915 ret = 0;
2926 if (IS_GEN9_LP(dev_priv)) { 2916 if (INTEL_GEN(dev_priv) >= 11) {
2917 icl_display_core_uninit(dev_priv);
2918 bxt_enable_dc9(dev_priv);
2919 } else if (IS_GEN9_LP(dev_priv)) {
2927 bxt_display_core_uninit(dev_priv); 2920 bxt_display_core_uninit(dev_priv);
2928 bxt_enable_dc9(dev_priv); 2921 bxt_enable_dc9(dev_priv);
2929 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { 2922 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
@@ -3008,7 +3001,18 @@ static int intel_runtime_resume(struct device *kdev)
3008 if (intel_uncore_unclaimed_mmio(dev_priv)) 3001 if (intel_uncore_unclaimed_mmio(dev_priv))
3009 DRM_DEBUG_DRIVER("Unclaimed access during suspend, bios?\n"); 3002 DRM_DEBUG_DRIVER("Unclaimed access during suspend, bios?\n");
3010 3003
3011 if (IS_GEN9_LP(dev_priv)) { 3004 if (INTEL_GEN(dev_priv) >= 11) {
3005 bxt_disable_dc9(dev_priv);
3006 icl_display_core_init(dev_priv, true);
3007 if (dev_priv->csr.dmc_payload) {
3008 if (dev_priv->csr.allowed_dc_mask &
3009 DC_STATE_EN_UPTO_DC6)
3010 skl_enable_dc6(dev_priv);
3011 else if (dev_priv->csr.allowed_dc_mask &
3012 DC_STATE_EN_UPTO_DC5)
3013 gen9_enable_dc5(dev_priv);
3014 }
3015 } else if (IS_GEN9_LP(dev_priv)) {
3012 bxt_disable_dc9(dev_priv); 3016 bxt_disable_dc9(dev_priv);
3013 bxt_display_core_init(dev_priv, true); 3017 bxt_display_core_init(dev_priv, true);
3014 if (dev_priv->csr.dmc_payload && 3018 if (dev_priv->csr.dmc_payload &&
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 9102571e9692..21e4405e2168 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -54,6 +54,7 @@
54#include <drm/drm_cache.h> 54#include <drm/drm_cache.h>
55#include <drm/drm_util.h> 55#include <drm/drm_util.h>
56 56
57#include "i915_fixed.h"
57#include "i915_params.h" 58#include "i915_params.h"
58#include "i915_reg.h" 59#include "i915_reg.h"
59#include "i915_utils.h" 60#include "i915_utils.h"
@@ -87,8 +88,8 @@
87 88
88#define DRIVER_NAME "i915" 89#define DRIVER_NAME "i915"
89#define DRIVER_DESC "Intel Graphics" 90#define DRIVER_DESC "Intel Graphics"
90#define DRIVER_DATE "20180921" 91#define DRIVER_DATE "20181102"
91#define DRIVER_TIMESTAMP 1537521997 92#define DRIVER_TIMESTAMP 1541153051
92 93
93/* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and 94/* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and
94 * WARN_ON()) for hw state sanity checks to check for unexpected conditions 95 * WARN_ON()) for hw state sanity checks to check for unexpected conditions
@@ -127,144 +128,6 @@ bool i915_error_injected(void);
127 __i915_printk(i915, i915_error_injected() ? KERN_DEBUG : KERN_ERR, \ 128 __i915_printk(i915, i915_error_injected() ? KERN_DEBUG : KERN_ERR, \
128 fmt, ##__VA_ARGS__) 129 fmt, ##__VA_ARGS__)
129 130
130typedef struct {
131 uint32_t val;
132} uint_fixed_16_16_t;
133
134#define FP_16_16_MAX ({ \
135 uint_fixed_16_16_t fp; \
136 fp.val = UINT_MAX; \
137 fp; \
138})
139
140static inline bool is_fixed16_zero(uint_fixed_16_16_t val)
141{
142 if (val.val == 0)
143 return true;
144 return false;
145}
146
147static inline uint_fixed_16_16_t u32_to_fixed16(uint32_t val)
148{
149 uint_fixed_16_16_t fp;
150
151 WARN_ON(val > U16_MAX);
152
153 fp.val = val << 16;
154 return fp;
155}
156
157static inline uint32_t fixed16_to_u32_round_up(uint_fixed_16_16_t fp)
158{
159 return DIV_ROUND_UP(fp.val, 1 << 16);
160}
161
162static inline uint32_t fixed16_to_u32(uint_fixed_16_16_t fp)
163{
164 return fp.val >> 16;
165}
166
167static inline uint_fixed_16_16_t min_fixed16(uint_fixed_16_16_t min1,
168 uint_fixed_16_16_t min2)
169{
170 uint_fixed_16_16_t min;
171
172 min.val = min(min1.val, min2.val);
173 return min;
174}
175
176static inline uint_fixed_16_16_t max_fixed16(uint_fixed_16_16_t max1,
177 uint_fixed_16_16_t max2)
178{
179 uint_fixed_16_16_t max;
180
181 max.val = max(max1.val, max2.val);
182 return max;
183}
184
185static inline uint_fixed_16_16_t clamp_u64_to_fixed16(uint64_t val)
186{
187 uint_fixed_16_16_t fp;
188 WARN_ON(val > U32_MAX);
189 fp.val = (uint32_t) val;
190 return fp;
191}
192
193static inline uint32_t div_round_up_fixed16(uint_fixed_16_16_t val,
194 uint_fixed_16_16_t d)
195{
196 return DIV_ROUND_UP(val.val, d.val);
197}
198
199static inline uint32_t mul_round_up_u32_fixed16(uint32_t val,
200 uint_fixed_16_16_t mul)
201{
202 uint64_t intermediate_val;
203
204 intermediate_val = (uint64_t) val * mul.val;
205 intermediate_val = DIV_ROUND_UP_ULL(intermediate_val, 1 << 16);
206 WARN_ON(intermediate_val > U32_MAX);
207 return (uint32_t) intermediate_val;
208}
209
210static inline uint_fixed_16_16_t mul_fixed16(uint_fixed_16_16_t val,
211 uint_fixed_16_16_t mul)
212{
213 uint64_t intermediate_val;
214
215 intermediate_val = (uint64_t) val.val * mul.val;
216 intermediate_val = intermediate_val >> 16;
217 return clamp_u64_to_fixed16(intermediate_val);
218}
219
220static inline uint_fixed_16_16_t div_fixed16(uint32_t val, uint32_t d)
221{
222 uint64_t interm_val;
223
224 interm_val = (uint64_t)val << 16;
225 interm_val = DIV_ROUND_UP_ULL(interm_val, d);
226 return clamp_u64_to_fixed16(interm_val);
227}
228
229static inline uint32_t div_round_up_u32_fixed16(uint32_t val,
230 uint_fixed_16_16_t d)
231{
232 uint64_t interm_val;
233
234 interm_val = (uint64_t)val << 16;
235 interm_val = DIV_ROUND_UP_ULL(interm_val, d.val);
236 WARN_ON(interm_val > U32_MAX);
237 return (uint32_t) interm_val;
238}
239
240static inline uint_fixed_16_16_t mul_u32_fixed16(uint32_t val,
241 uint_fixed_16_16_t mul)
242{
243 uint64_t intermediate_val;
244
245 intermediate_val = (uint64_t) val * mul.val;
246 return clamp_u64_to_fixed16(intermediate_val);
247}
248
249static inline uint_fixed_16_16_t add_fixed16(uint_fixed_16_16_t add1,
250 uint_fixed_16_16_t add2)
251{
252 uint64_t interm_sum;
253
254 interm_sum = (uint64_t) add1.val + add2.val;
255 return clamp_u64_to_fixed16(interm_sum);
256}
257
258static inline uint_fixed_16_16_t add_fixed16_u32(uint_fixed_16_16_t add1,
259 uint32_t add2)
260{
261 uint64_t interm_sum;
262 uint_fixed_16_16_t interm_add2 = u32_to_fixed16(add2);
263
264 interm_sum = (uint64_t) add1.val + interm_add2.val;
265 return clamp_u64_to_fixed16(interm_sum);
266}
267
268enum hpd_pin { 131enum hpd_pin {
269 HPD_NONE = 0, 132 HPD_NONE = 0,
270 HPD_TV = HPD_NONE, /* TV is known to be unreliable */ 133 HPD_TV = HPD_NONE, /* TV is known to be unreliable */
@@ -283,7 +146,8 @@ enum hpd_pin {
283#define for_each_hpd_pin(__pin) \ 146#define for_each_hpd_pin(__pin) \
284 for ((__pin) = (HPD_NONE + 1); (__pin) < HPD_NUM_PINS; (__pin)++) 147 for ((__pin) = (HPD_NONE + 1); (__pin) < HPD_NUM_PINS; (__pin)++)
285 148
286#define HPD_STORM_DEFAULT_THRESHOLD 5 149/* Threshold == 5 for long IRQs, 50 for short */
150#define HPD_STORM_DEFAULT_THRESHOLD 50
287 151
288struct i915_hotplug { 152struct i915_hotplug {
289 struct work_struct hotplug_work; 153 struct work_struct hotplug_work;
@@ -308,6 +172,8 @@ struct i915_hotplug {
308 bool poll_enabled; 172 bool poll_enabled;
309 173
310 unsigned int hpd_storm_threshold; 174 unsigned int hpd_storm_threshold;
175 /* Whether or not to count short HPD IRQs in HPD storms */
176 u8 hpd_short_storm_enabled;
311 177
312 /* 178 /*
313 * if we get a HPD irq from DP and a HPD irq from non-DP 179 * if we get a HPD irq from DP and a HPD irq from non-DP
@@ -465,8 +331,10 @@ struct drm_i915_display_funcs {
465struct intel_csr { 331struct intel_csr {
466 struct work_struct work; 332 struct work_struct work;
467 const char *fw_path; 333 const char *fw_path;
334 uint32_t required_version;
335 uint32_t max_fw_size; /* bytes */
468 uint32_t *dmc_payload; 336 uint32_t *dmc_payload;
469 uint32_t dmc_fw_size; 337 uint32_t dmc_fw_size; /* dwords */
470 uint32_t version; 338 uint32_t version;
471 uint32_t mmio_count; 339 uint32_t mmio_count;
472 i915_reg_t mmioaddr[8]; 340 i915_reg_t mmioaddr[8];
@@ -546,6 +414,8 @@ struct intel_fbc {
546 int adjusted_y; 414 int adjusted_y;
547 415
548 int y; 416 int y;
417
418 uint16_t pixel_blend_mode;
549 } plane; 419 } plane;
550 420
551 struct { 421 struct {
@@ -630,7 +500,6 @@ struct i915_psr {
630 bool sink_psr2_support; 500 bool sink_psr2_support;
631 bool link_standby; 501 bool link_standby;
632 bool colorimetry_support; 502 bool colorimetry_support;
633 bool alpm;
634 bool psr2_enabled; 503 bool psr2_enabled;
635 u8 sink_sync_latency; 504 u8 sink_sync_latency;
636 ktime_t last_entry_attempt; 505 ktime_t last_entry_attempt;
@@ -918,6 +787,11 @@ struct i915_power_well_desc {
918 /* The pw is backing the VGA functionality */ 787 /* The pw is backing the VGA functionality */
919 bool has_vga:1; 788 bool has_vga:1;
920 bool has_fuses:1; 789 bool has_fuses:1;
790 /*
791 * The pw is for an ICL+ TypeC PHY port in
792 * Thunderbolt mode.
793 */
794 bool is_tc_tbt:1;
921 } hsw; 795 } hsw;
922 }; 796 };
923 const struct i915_power_well_ops *ops; 797 const struct i915_power_well_ops *ops;
@@ -1042,17 +916,6 @@ struct i915_gem_mm {
1042 916
1043#define I915_ENGINE_WEDGED_TIMEOUT (60 * HZ) /* Reset but no recovery? */ 917#define I915_ENGINE_WEDGED_TIMEOUT (60 * HZ) /* Reset but no recovery? */
1044 918
1045#define DP_AUX_A 0x40
1046#define DP_AUX_B 0x10
1047#define DP_AUX_C 0x20
1048#define DP_AUX_D 0x30
1049#define DP_AUX_E 0x50
1050#define DP_AUX_F 0x60
1051
1052#define DDC_PIN_B 0x05
1053#define DDC_PIN_C 0x04
1054#define DDC_PIN_D 0x06
1055
1056struct ddi_vbt_port_info { 919struct ddi_vbt_port_info {
1057 int max_tmds_clock; 920 int max_tmds_clock;
1058 921
@@ -1099,6 +962,7 @@ struct intel_vbt_data {
1099 unsigned int panel_type:4; 962 unsigned int panel_type:4;
1100 int lvds_ssc_freq; 963 int lvds_ssc_freq;
1101 unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */ 964 unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
965 enum drm_panel_orientation orientation;
1102 966
1103 enum drrs_support_type drrs_type; 967 enum drrs_support_type drrs_type;
1104 968
@@ -1144,6 +1008,7 @@ struct intel_vbt_data {
1144 u8 *data; 1008 u8 *data;
1145 const u8 *sequence[MIPI_SEQ_MAX]; 1009 const u8 *sequence[MIPI_SEQ_MAX];
1146 u8 *deassert_seq; /* Used by fixup_mipi_sequences() */ 1010 u8 *deassert_seq; /* Used by fixup_mipi_sequences() */
1011 enum drm_panel_orientation orientation;
1147 } dsi; 1012 } dsi;
1148 1013
1149 int crt_ddc_pin; 1014 int crt_ddc_pin;
@@ -1240,9 +1105,9 @@ struct skl_ddb_values {
1240}; 1105};
1241 1106
1242struct skl_wm_level { 1107struct skl_wm_level {
1243 bool plane_en;
1244 uint16_t plane_res_b; 1108 uint16_t plane_res_b;
1245 uint8_t plane_res_l; 1109 uint8_t plane_res_l;
1110 bool plane_en;
1246}; 1111};
1247 1112
1248/* Stores plane specific WM parameters */ 1113/* Stores plane specific WM parameters */
@@ -1520,30 +1385,12 @@ struct i915_oa_ops {
1520 bool (*is_valid_flex_reg)(struct drm_i915_private *dev_priv, u32 addr); 1385 bool (*is_valid_flex_reg)(struct drm_i915_private *dev_priv, u32 addr);
1521 1386
1522 /** 1387 /**
1523 * @init_oa_buffer: Resets the head and tail pointers of the
1524 * circular buffer for periodic OA reports.
1525 *
1526 * Called when first opening a stream for OA metrics, but also may be
1527 * called in response to an OA buffer overflow or other error
1528 * condition.
1529 *
1530 * Note it may be necessary to clear the full OA buffer here as part of
1531 * maintaining the invariable that new reports must be written to
1532 * zeroed memory for us to be able to reliable detect if an expected
1533 * report has not yet landed in memory. (At least on Haswell the OA
1534 * buffer tail pointer is not synchronized with reports being visible
1535 * to the CPU)
1536 */
1537 void (*init_oa_buffer)(struct drm_i915_private *dev_priv);
1538
1539 /**
1540 * @enable_metric_set: Selects and applies any MUX configuration to set 1388 * @enable_metric_set: Selects and applies any MUX configuration to set
1541 * up the Boolean and Custom (B/C) counters that are part of the 1389 * up the Boolean and Custom (B/C) counters that are part of the
1542 * counter reports being sampled. May apply system constraints such as 1390 * counter reports being sampled. May apply system constraints such as
1543 * disabling EU clock gating as required. 1391 * disabling EU clock gating as required.
1544 */ 1392 */
1545 int (*enable_metric_set)(struct drm_i915_private *dev_priv, 1393 int (*enable_metric_set)(struct i915_perf_stream *stream);
1546 const struct i915_oa_config *oa_config);
1547 1394
1548 /** 1395 /**
1549 * @disable_metric_set: Remove system constraints associated with using 1396 * @disable_metric_set: Remove system constraints associated with using
@@ -1554,12 +1401,12 @@ struct i915_oa_ops {
1554 /** 1401 /**
1555 * @oa_enable: Enable periodic sampling 1402 * @oa_enable: Enable periodic sampling
1556 */ 1403 */
1557 void (*oa_enable)(struct drm_i915_private *dev_priv); 1404 void (*oa_enable)(struct i915_perf_stream *stream);
1558 1405
1559 /** 1406 /**
1560 * @oa_disable: Disable periodic sampling 1407 * @oa_disable: Disable periodic sampling
1561 */ 1408 */
1562 void (*oa_disable)(struct drm_i915_private *dev_priv); 1409 void (*oa_disable)(struct i915_perf_stream *stream);
1563 1410
1564 /** 1411 /**
1565 * @read: Copy data from the circular OA buffer into a given userspace 1412 * @read: Copy data from the circular OA buffer into a given userspace
@@ -2322,6 +2169,8 @@ static inline struct scatterlist *__sg_next(struct scatterlist *sg)
2322 (((__iter).curr += PAGE_SIZE) >= (__iter).max) ? \ 2169 (((__iter).curr += PAGE_SIZE) >= (__iter).max) ? \
2323 (__iter) = __sgt_iter(__sg_next((__iter).sgp), false), 0 : 0) 2170 (__iter) = __sgt_iter(__sg_next((__iter).sgp), false), 0 : 0)
2324 2171
2172bool i915_sg_trim(struct sg_table *orig_st);
2173
2325static inline unsigned int i915_sg_page_sizes(struct scatterlist *sg) 2174static inline unsigned int i915_sg_page_sizes(struct scatterlist *sg)
2326{ 2175{
2327 unsigned int page_sizes; 2176 unsigned int page_sizes;
@@ -2367,20 +2216,12 @@ intel_info(const struct drm_i915_private *dev_priv)
2367#define REVID_FOREVER 0xff 2216#define REVID_FOREVER 0xff
2368#define INTEL_REVID(dev_priv) ((dev_priv)->drm.pdev->revision) 2217#define INTEL_REVID(dev_priv) ((dev_priv)->drm.pdev->revision)
2369 2218
2370#define GEN_FOREVER (0)
2371
2372#define INTEL_GEN_MASK(s, e) ( \ 2219#define INTEL_GEN_MASK(s, e) ( \
2373 BUILD_BUG_ON_ZERO(!__builtin_constant_p(s)) + \ 2220 BUILD_BUG_ON_ZERO(!__builtin_constant_p(s)) + \
2374 BUILD_BUG_ON_ZERO(!__builtin_constant_p(e)) + \ 2221 BUILD_BUG_ON_ZERO(!__builtin_constant_p(e)) + \
2375 GENMASK((e) != GEN_FOREVER ? (e) - 1 : BITS_PER_LONG - 1, \ 2222 GENMASK((e) - 1, (s) - 1))
2376 (s) != GEN_FOREVER ? (s) - 1 : 0) \
2377)
2378 2223
2379/* 2224/* Returns true if Gen is in inclusive range [Start, End] */
2380 * Returns true if Gen is in inclusive range [Start, End].
2381 *
2382 * Use GEN_FOREVER for unbound start and or end.
2383 */
2384#define IS_GEN(dev_priv, s, e) \ 2225#define IS_GEN(dev_priv, s, e) \
2385 (!!((dev_priv)->info.gen_mask & INTEL_GEN_MASK((s), (e)))) 2226 (!!((dev_priv)->info.gen_mask & INTEL_GEN_MASK((s), (e))))
2386 2227
@@ -2461,6 +2302,8 @@ intel_info(const struct drm_i915_private *dev_priv)
2461#define IS_KBL_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x590E || \ 2302#define IS_KBL_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x590E || \
2462 INTEL_DEVID(dev_priv) == 0x5915 || \ 2303 INTEL_DEVID(dev_priv) == 0x5915 || \
2463 INTEL_DEVID(dev_priv) == 0x591E) 2304 INTEL_DEVID(dev_priv) == 0x591E)
2305#define IS_AML_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x591C || \
2306 INTEL_DEVID(dev_priv) == 0x87C0)
2464#define IS_SKL_GT2(dev_priv) (IS_SKYLAKE(dev_priv) && \ 2307#define IS_SKL_GT2(dev_priv) (IS_SKYLAKE(dev_priv) && \
2465 (dev_priv)->info.gt == 2) 2308 (dev_priv)->info.gt == 2)
2466#define IS_SKL_GT3(dev_priv) (IS_SKYLAKE(dev_priv) && \ 2309#define IS_SKL_GT3(dev_priv) (IS_SKYLAKE(dev_priv) && \
@@ -2592,9 +2435,14 @@ intel_info(const struct drm_i915_private *dev_priv)
2592 2435
2593#define HAS_EXECLISTS(dev_priv) HAS_LOGICAL_RING_CONTEXTS(dev_priv) 2436#define HAS_EXECLISTS(dev_priv) HAS_LOGICAL_RING_CONTEXTS(dev_priv)
2594 2437
2595#define USES_PPGTT(dev_priv) (i915_modparams.enable_ppgtt) 2438#define INTEL_PPGTT(dev_priv) (INTEL_INFO(dev_priv)->ppgtt)
2596#define USES_FULL_PPGTT(dev_priv) (i915_modparams.enable_ppgtt >= 2) 2439#define HAS_PPGTT(dev_priv) \
2597#define USES_FULL_48BIT_PPGTT(dev_priv) (i915_modparams.enable_ppgtt == 3) 2440 (INTEL_PPGTT(dev_priv) != INTEL_PPGTT_NONE)
2441#define HAS_FULL_PPGTT(dev_priv) \
2442 (INTEL_PPGTT(dev_priv) >= INTEL_PPGTT_FULL)
2443#define HAS_FULL_48BIT_PPGTT(dev_priv) \
2444 (INTEL_PPGTT(dev_priv) >= INTEL_PPGTT_FULL_4LVL)
2445
2598#define HAS_PAGE_SIZES(dev_priv, sizes) ({ \ 2446#define HAS_PAGE_SIZES(dev_priv, sizes) ({ \
2599 GEM_BUG_ON((sizes) == 0); \ 2447 GEM_BUG_ON((sizes) == 0); \
2600 ((sizes) & ~(dev_priv)->info.page_sizes) == 0; \ 2448 ((sizes) & ~(dev_priv)->info.page_sizes) == 0; \
@@ -2742,9 +2590,6 @@ intel_ggtt_update_needs_vtd_wa(struct drm_i915_private *dev_priv)
2742 return IS_BROXTON(dev_priv) && intel_vtd_active(); 2590 return IS_BROXTON(dev_priv) && intel_vtd_active();
2743} 2591}
2744 2592
2745int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
2746 int enable_ppgtt);
2747
2748/* i915_drv.c */ 2593/* i915_drv.c */
2749void __printf(3, 4) 2594void __printf(3, 4)
2750__i915_printk(struct drm_i915_private *dev_priv, const char *level, 2595__i915_printk(struct drm_i915_private *dev_priv, const char *level,
@@ -3229,7 +3074,7 @@ int i915_gem_object_wait(struct drm_i915_gem_object *obj,
3229int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj, 3074int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
3230 unsigned int flags, 3075 unsigned int flags,
3231 const struct i915_sched_attr *attr); 3076 const struct i915_sched_attr *attr);
3232#define I915_PRIORITY_DISPLAY I915_PRIORITY_MAX 3077#define I915_PRIORITY_DISPLAY I915_USER_PRIORITY(I915_PRIORITY_MAX)
3233 3078
3234int __must_check 3079int __must_check
3235i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write); 3080i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write);
@@ -3461,6 +3306,7 @@ bool intel_bios_is_port_hpd_inverted(struct drm_i915_private *dev_priv,
3461 enum port port); 3306 enum port port);
3462bool intel_bios_is_lspcon_present(struct drm_i915_private *dev_priv, 3307bool intel_bios_is_lspcon_present(struct drm_i915_private *dev_priv,
3463 enum port port); 3308 enum port port);
3309enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *dev_priv, enum port port);
3464 3310
3465/* intel_acpi.c */ 3311/* intel_acpi.c */
3466#ifdef CONFIG_ACPI 3312#ifdef CONFIG_ACPI
@@ -3482,8 +3328,6 @@ mkwrite_device_info(struct drm_i915_private *dev_priv)
3482extern void intel_modeset_init_hw(struct drm_device *dev); 3328extern void intel_modeset_init_hw(struct drm_device *dev);
3483extern int intel_modeset_init(struct drm_device *dev); 3329extern int intel_modeset_init(struct drm_device *dev);
3484extern void intel_modeset_cleanup(struct drm_device *dev); 3330extern void intel_modeset_cleanup(struct drm_device *dev);
3485extern int intel_connector_register(struct drm_connector *);
3486extern void intel_connector_unregister(struct drm_connector *);
3487extern int intel_modeset_vga_set_state(struct drm_i915_private *dev_priv, 3331extern int intel_modeset_vga_set_state(struct drm_i915_private *dev_priv,
3488 bool state); 3332 bool state);
3489extern void intel_display_resume(struct drm_device *dev); 3333extern void intel_display_resume(struct drm_device *dev);
@@ -3583,6 +3427,12 @@ void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder,
3583void vlv_phy_reset_lanes(struct intel_encoder *encoder, 3427void vlv_phy_reset_lanes(struct intel_encoder *encoder,
3584 const struct intel_crtc_state *old_crtc_state); 3428 const struct intel_crtc_state *old_crtc_state);
3585 3429
3430/* intel_combo_phy.c */
3431void icl_combo_phys_init(struct drm_i915_private *dev_priv);
3432void icl_combo_phys_uninit(struct drm_i915_private *dev_priv);
3433void cnl_combo_phys_init(struct drm_i915_private *dev_priv);
3434void cnl_combo_phys_uninit(struct drm_i915_private *dev_priv);
3435
3586int intel_gpu_freq(struct drm_i915_private *dev_priv, int val); 3436int intel_gpu_freq(struct drm_i915_private *dev_priv, int val);
3587int intel_freq_opcode(struct drm_i915_private *dev_priv, int val); 3437int intel_freq_opcode(struct drm_i915_private *dev_priv, int val);
3588u64 intel_rc6_residency_ns(struct drm_i915_private *dev_priv, 3438u64 intel_rc6_residency_ns(struct drm_i915_private *dev_priv,
diff --git a/drivers/gpu/drm/i915/i915_fixed.h b/drivers/gpu/drm/i915/i915_fixed.h
new file mode 100644
index 000000000000..591dd89ba7af
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_fixed.h
@@ -0,0 +1,143 @@
1/* SPDX-License-Identifier: MIT */
2/*
3 * Copyright © 2018 Intel Corporation
4 */
5
6#ifndef _I915_FIXED_H_
7#define _I915_FIXED_H_
8
9typedef struct {
10 u32 val;
11} uint_fixed_16_16_t;
12
13#define FP_16_16_MAX ((uint_fixed_16_16_t){ .val = UINT_MAX })
14
15static inline bool is_fixed16_zero(uint_fixed_16_16_t val)
16{
17 return val.val == 0;
18}
19
20static inline uint_fixed_16_16_t u32_to_fixed16(u32 val)
21{
22 uint_fixed_16_16_t fp = { .val = val << 16 };
23
24 WARN_ON(val > U16_MAX);
25
26 return fp;
27}
28
29static inline u32 fixed16_to_u32_round_up(uint_fixed_16_16_t fp)
30{
31 return DIV_ROUND_UP(fp.val, 1 << 16);
32}
33
34static inline u32 fixed16_to_u32(uint_fixed_16_16_t fp)
35{
36 return fp.val >> 16;
37}
38
39static inline uint_fixed_16_16_t min_fixed16(uint_fixed_16_16_t min1,
40 uint_fixed_16_16_t min2)
41{
42 uint_fixed_16_16_t min = { .val = min(min1.val, min2.val) };
43
44 return min;
45}
46
47static inline uint_fixed_16_16_t max_fixed16(uint_fixed_16_16_t max1,
48 uint_fixed_16_16_t max2)
49{
50 uint_fixed_16_16_t max = { .val = max(max1.val, max2.val) };
51
52 return max;
53}
54
55static inline uint_fixed_16_16_t clamp_u64_to_fixed16(u64 val)
56{
57 uint_fixed_16_16_t fp = { .val = (u32)val };
58
59 WARN_ON(val > U32_MAX);
60
61 return fp;
62}
63
64static inline u32 div_round_up_fixed16(uint_fixed_16_16_t val,
65 uint_fixed_16_16_t d)
66{
67 return DIV_ROUND_UP(val.val, d.val);
68}
69
70static inline u32 mul_round_up_u32_fixed16(u32 val, uint_fixed_16_16_t mul)
71{
72 u64 tmp;
73
74 tmp = (u64)val * mul.val;
75 tmp = DIV_ROUND_UP_ULL(tmp, 1 << 16);
76 WARN_ON(tmp > U32_MAX);
77
78 return (u32)tmp;
79}
80
81static inline uint_fixed_16_16_t mul_fixed16(uint_fixed_16_16_t val,
82 uint_fixed_16_16_t mul)
83{
84 u64 tmp;
85
86 tmp = (u64)val.val * mul.val;
87 tmp = tmp >> 16;
88
89 return clamp_u64_to_fixed16(tmp);
90}
91
92static inline uint_fixed_16_16_t div_fixed16(u32 val, u32 d)
93{
94 u64 tmp;
95
96 tmp = (u64)val << 16;
97 tmp = DIV_ROUND_UP_ULL(tmp, d);
98
99 return clamp_u64_to_fixed16(tmp);
100}
101
102static inline u32 div_round_up_u32_fixed16(u32 val, uint_fixed_16_16_t d)
103{
104 u64 tmp;
105
106 tmp = (u64)val << 16;
107 tmp = DIV_ROUND_UP_ULL(tmp, d.val);
108 WARN_ON(tmp > U32_MAX);
109
110 return (u32)tmp;
111}
112
113static inline uint_fixed_16_16_t mul_u32_fixed16(u32 val, uint_fixed_16_16_t mul)
114{
115 u64 tmp;
116
117 tmp = (u64)val * mul.val;
118
119 return clamp_u64_to_fixed16(tmp);
120}
121
122static inline uint_fixed_16_16_t add_fixed16(uint_fixed_16_16_t add1,
123 uint_fixed_16_16_t add2)
124{
125 u64 tmp;
126
127 tmp = (u64)add1.val + add2.val;
128
129 return clamp_u64_to_fixed16(tmp);
130}
131
132static inline uint_fixed_16_16_t add_fixed16_u32(uint_fixed_16_16_t add1,
133 u32 add2)
134{
135 uint_fixed_16_16_t tmp_add2 = u32_to_fixed16(add2);
136 u64 tmp;
137
138 tmp = (u64)add1.val + tmp_add2.val;
139
140 return clamp_u64_to_fixed16(tmp);
141}
142
143#endif /* _I915_FIXED_H_ */
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 0c8aa57ce83b..c55b1f75c980 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1740,6 +1740,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1740 */ 1740 */
1741 err = i915_gem_object_wait(obj, 1741 err = i915_gem_object_wait(obj,
1742 I915_WAIT_INTERRUPTIBLE | 1742 I915_WAIT_INTERRUPTIBLE |
1743 I915_WAIT_PRIORITY |
1743 (write_domain ? I915_WAIT_ALL : 0), 1744 (write_domain ? I915_WAIT_ALL : 0),
1744 MAX_SCHEDULE_TIMEOUT, 1745 MAX_SCHEDULE_TIMEOUT,
1745 to_rps_client(file)); 1746 to_rps_client(file));
@@ -2381,11 +2382,23 @@ void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
2381 invalidate_mapping_pages(mapping, 0, (loff_t)-1); 2382 invalidate_mapping_pages(mapping, 0, (loff_t)-1);
2382} 2383}
2383 2384
2385/*
2386 * Move pages to appropriate lru and release the pagevec, decrementing the
2387 * ref count of those pages.
2388 */
2389static void check_release_pagevec(struct pagevec *pvec)
2390{
2391 check_move_unevictable_pages(pvec);
2392 __pagevec_release(pvec);
2393 cond_resched();
2394}
2395
2384static void 2396static void
2385i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj, 2397i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj,
2386 struct sg_table *pages) 2398 struct sg_table *pages)
2387{ 2399{
2388 struct sgt_iter sgt_iter; 2400 struct sgt_iter sgt_iter;
2401 struct pagevec pvec;
2389 struct page *page; 2402 struct page *page;
2390 2403
2391 __i915_gem_object_release_shmem(obj, pages, true); 2404 __i915_gem_object_release_shmem(obj, pages, true);
@@ -2395,6 +2408,9 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj,
2395 if (i915_gem_object_needs_bit17_swizzle(obj)) 2408 if (i915_gem_object_needs_bit17_swizzle(obj))
2396 i915_gem_object_save_bit_17_swizzle(obj, pages); 2409 i915_gem_object_save_bit_17_swizzle(obj, pages);
2397 2410
2411 mapping_clear_unevictable(file_inode(obj->base.filp)->i_mapping);
2412
2413 pagevec_init(&pvec);
2398 for_each_sgt_page(page, sgt_iter, pages) { 2414 for_each_sgt_page(page, sgt_iter, pages) {
2399 if (obj->mm.dirty) 2415 if (obj->mm.dirty)
2400 set_page_dirty(page); 2416 set_page_dirty(page);
@@ -2402,8 +2418,11 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj,
2402 if (obj->mm.madv == I915_MADV_WILLNEED) 2418 if (obj->mm.madv == I915_MADV_WILLNEED)
2403 mark_page_accessed(page); 2419 mark_page_accessed(page);
2404 2420
2405 put_page(page); 2421 if (!pagevec_add(&pvec, page))
2422 check_release_pagevec(&pvec);
2406 } 2423 }
2424 if (pagevec_count(&pvec))
2425 check_release_pagevec(&pvec);
2407 obj->mm.dirty = false; 2426 obj->mm.dirty = false;
2408 2427
2409 sg_free_table(pages); 2428 sg_free_table(pages);
@@ -2483,7 +2502,7 @@ unlock:
2483 mutex_unlock(&obj->mm.lock); 2502 mutex_unlock(&obj->mm.lock);
2484} 2503}
2485 2504
2486static bool i915_sg_trim(struct sg_table *orig_st) 2505bool i915_sg_trim(struct sg_table *orig_st)
2487{ 2506{
2488 struct sg_table new_st; 2507 struct sg_table new_st;
2489 struct scatterlist *sg, *new_sg; 2508 struct scatterlist *sg, *new_sg;
@@ -2524,6 +2543,7 @@ static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
2524 unsigned long last_pfn = 0; /* suppress gcc warning */ 2543 unsigned long last_pfn = 0; /* suppress gcc warning */
2525 unsigned int max_segment = i915_sg_segment_size(); 2544 unsigned int max_segment = i915_sg_segment_size();
2526 unsigned int sg_page_sizes; 2545 unsigned int sg_page_sizes;
2546 struct pagevec pvec;
2527 gfp_t noreclaim; 2547 gfp_t noreclaim;
2528 int ret; 2548 int ret;
2529 2549
@@ -2559,6 +2579,7 @@ rebuild_st:
2559 * Fail silently without starting the shrinker 2579 * Fail silently without starting the shrinker
2560 */ 2580 */
2561 mapping = obj->base.filp->f_mapping; 2581 mapping = obj->base.filp->f_mapping;
2582 mapping_set_unevictable(mapping);
2562 noreclaim = mapping_gfp_constraint(mapping, ~__GFP_RECLAIM); 2583 noreclaim = mapping_gfp_constraint(mapping, ~__GFP_RECLAIM);
2563 noreclaim |= __GFP_NORETRY | __GFP_NOWARN; 2584 noreclaim |= __GFP_NORETRY | __GFP_NOWARN;
2564 2585
@@ -2573,6 +2594,7 @@ rebuild_st:
2573 gfp_t gfp = noreclaim; 2594 gfp_t gfp = noreclaim;
2574 2595
2575 do { 2596 do {
2597 cond_resched();
2576 page = shmem_read_mapping_page_gfp(mapping, i, gfp); 2598 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
2577 if (likely(!IS_ERR(page))) 2599 if (likely(!IS_ERR(page)))
2578 break; 2600 break;
@@ -2583,7 +2605,6 @@ rebuild_st:
2583 } 2605 }
2584 2606
2585 i915_gem_shrink(dev_priv, 2 * page_count, NULL, *s++); 2607 i915_gem_shrink(dev_priv, 2 * page_count, NULL, *s++);
2586 cond_resched();
2587 2608
2588 /* 2609 /*
2589 * We've tried hard to allocate the memory by reaping 2610 * We've tried hard to allocate the memory by reaping
@@ -2673,8 +2694,14 @@ rebuild_st:
2673err_sg: 2694err_sg:
2674 sg_mark_end(sg); 2695 sg_mark_end(sg);
2675err_pages: 2696err_pages:
2676 for_each_sgt_page(page, sgt_iter, st) 2697 mapping_clear_unevictable(mapping);
2677 put_page(page); 2698 pagevec_init(&pvec);
2699 for_each_sgt_page(page, sgt_iter, st) {
2700 if (!pagevec_add(&pvec, page))
2701 check_release_pagevec(&pvec);
2702 }
2703 if (pagevec_count(&pvec))
2704 check_release_pagevec(&pvec);
2678 sg_free_table(st); 2705 sg_free_table(st);
2679 kfree(st); 2706 kfree(st);
2680 2707
@@ -3530,6 +3557,8 @@ static void __sleep_rcu(struct rcu_head *rcu)
3530 struct sleep_rcu_work *s = container_of(rcu, typeof(*s), rcu); 3557 struct sleep_rcu_work *s = container_of(rcu, typeof(*s), rcu);
3531 struct drm_i915_private *i915 = s->i915; 3558 struct drm_i915_private *i915 = s->i915;
3532 3559
3560 destroy_rcu_head(&s->rcu);
3561
3533 if (same_epoch(i915, s->epoch)) { 3562 if (same_epoch(i915, s->epoch)) {
3534 INIT_WORK(&s->work, __sleep_work); 3563 INIT_WORK(&s->work, __sleep_work);
3535 queue_work(i915->wq, &s->work); 3564 queue_work(i915->wq, &s->work);
@@ -3646,6 +3675,7 @@ out_rearm:
3646 if (same_epoch(dev_priv, epoch)) { 3675 if (same_epoch(dev_priv, epoch)) {
3647 struct sleep_rcu_work *s = kmalloc(sizeof(*s), GFP_KERNEL); 3676 struct sleep_rcu_work *s = kmalloc(sizeof(*s), GFP_KERNEL);
3648 if (s) { 3677 if (s) {
3678 init_rcu_head(&s->rcu);
3649 s->i915 = dev_priv; 3679 s->i915 = dev_priv;
3650 s->epoch = epoch; 3680 s->epoch = epoch;
3651 call_rcu(&s->rcu, __sleep_rcu); 3681 call_rcu(&s->rcu, __sleep_rcu);
@@ -3743,7 +3773,9 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
3743 start = ktime_get(); 3773 start = ktime_get();
3744 3774
3745 ret = i915_gem_object_wait(obj, 3775 ret = i915_gem_object_wait(obj,
3746 I915_WAIT_INTERRUPTIBLE | I915_WAIT_ALL, 3776 I915_WAIT_INTERRUPTIBLE |
3777 I915_WAIT_PRIORITY |
3778 I915_WAIT_ALL,
3747 to_wait_timeout(args->timeout_ns), 3779 to_wait_timeout(args->timeout_ns),
3748 to_rps_client(file)); 3780 to_rps_client(file));
3749 3781
@@ -4710,6 +4742,8 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
4710 INIT_LIST_HEAD(&obj->lut_list); 4742 INIT_LIST_HEAD(&obj->lut_list);
4711 INIT_LIST_HEAD(&obj->batch_pool_link); 4743 INIT_LIST_HEAD(&obj->batch_pool_link);
4712 4744
4745 init_rcu_head(&obj->rcu);
4746
4713 obj->ops = ops; 4747 obj->ops = ops;
4714 4748
4715 reservation_object_init(&obj->__builtin_resv); 4749 reservation_object_init(&obj->__builtin_resv);
@@ -4977,6 +5011,13 @@ static void __i915_gem_free_object_rcu(struct rcu_head *head)
4977 struct drm_i915_private *i915 = to_i915(obj->base.dev); 5011 struct drm_i915_private *i915 = to_i915(obj->base.dev);
4978 5012
4979 /* 5013 /*
5014 * We reuse obj->rcu for the freed list, so we had better not treat
5015 * it like a rcu_head from this point forwards. And we expect all
5016 * objects to be freed via this path.
5017 */
5018 destroy_rcu_head(&obj->rcu);
5019
5020 /*
4980 * Since we require blocking on struct_mutex to unbind the freed 5021 * Since we require blocking on struct_mutex to unbind the freed
4981 * object from the GPU before releasing resources back to the 5022 * object from the GPU before releasing resources back to the
4982 * system, we can not do that directly from the RCU callback (which may 5023 * system, we can not do that directly from the RCU callback (which may
@@ -5293,18 +5334,6 @@ int i915_gem_init_hw(struct drm_i915_private *dev_priv)
5293 I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev_priv) ? 5334 I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev_priv) ?
5294 LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED); 5335 LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
5295 5336
5296 if (HAS_PCH_NOP(dev_priv)) {
5297 if (IS_IVYBRIDGE(dev_priv)) {
5298 u32 temp = I915_READ(GEN7_MSG_CTL);
5299 temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
5300 I915_WRITE(GEN7_MSG_CTL, temp);
5301 } else if (INTEL_GEN(dev_priv) >= 7) {
5302 u32 temp = I915_READ(HSW_NDE_RSTWRN_OPT);
5303 temp &= ~RESET_PCH_HANDSHAKE_ENABLE;
5304 I915_WRITE(HSW_NDE_RSTWRN_OPT, temp);
5305 }
5306 }
5307
5308 intel_gt_workarounds_apply(dev_priv); 5337 intel_gt_workarounds_apply(dev_priv);
5309 5338
5310 i915_gem_init_swizzling(dev_priv); 5339 i915_gem_init_swizzling(dev_priv);
@@ -5951,7 +5980,7 @@ void i915_gem_track_fb(struct drm_i915_gem_object *old,
5951 * the bits. 5980 * the bits.
5952 */ 5981 */
5953 BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES > 5982 BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES >
5954 sizeof(atomic_t) * BITS_PER_BYTE); 5983 BITS_PER_TYPE(atomic_t));
5955 5984
5956 if (old) { 5985 if (old) {
5957 WARN_ON(!(atomic_read(&old->frontbuffer_bits) & frontbuffer_bits)); 5986 WARN_ON(!(atomic_read(&old->frontbuffer_bits) & frontbuffer_bits));
diff --git a/drivers/gpu/drm/i915/i915_gem.h b/drivers/gpu/drm/i915/i915_gem.h
index 599c4f6eb1ea..b0e4b976880c 100644
--- a/drivers/gpu/drm/i915/i915_gem.h
+++ b/drivers/gpu/drm/i915/i915_gem.h
@@ -47,17 +47,19 @@ struct drm_i915_private;
47#define GEM_DEBUG_DECL(var) var 47#define GEM_DEBUG_DECL(var) var
48#define GEM_DEBUG_EXEC(expr) expr 48#define GEM_DEBUG_EXEC(expr) expr
49#define GEM_DEBUG_BUG_ON(expr) GEM_BUG_ON(expr) 49#define GEM_DEBUG_BUG_ON(expr) GEM_BUG_ON(expr)
50#define GEM_DEBUG_WARN_ON(expr) GEM_WARN_ON(expr)
50 51
51#else 52#else
52 53
53#define GEM_SHOW_DEBUG() (0) 54#define GEM_SHOW_DEBUG() (0)
54 55
55#define GEM_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr) 56#define GEM_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr)
56#define GEM_WARN_ON(expr) (BUILD_BUG_ON_INVALID(expr), 0) 57#define GEM_WARN_ON(expr) ({ unlikely(!!(expr)); })
57 58
58#define GEM_DEBUG_DECL(var) 59#define GEM_DEBUG_DECL(var)
59#define GEM_DEBUG_EXEC(expr) do { } while (0) 60#define GEM_DEBUG_EXEC(expr) do { } while (0)
60#define GEM_DEBUG_BUG_ON(expr) 61#define GEM_DEBUG_BUG_ON(expr)
62#define GEM_DEBUG_WARN_ON(expr) ({ BUILD_BUG_ON_INVALID(expr); 0; })
61#endif 63#endif
62 64
63#if IS_ENABLED(CONFIG_DRM_I915_TRACE_GEM) 65#if IS_ENABLED(CONFIG_DRM_I915_TRACE_GEM)
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index f772593b99ab..b97963db0287 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -337,7 +337,7 @@ __create_hw_context(struct drm_i915_private *dev_priv,
337 kref_init(&ctx->ref); 337 kref_init(&ctx->ref);
338 list_add_tail(&ctx->link, &dev_priv->contexts.list); 338 list_add_tail(&ctx->link, &dev_priv->contexts.list);
339 ctx->i915 = dev_priv; 339 ctx->i915 = dev_priv;
340 ctx->sched.priority = I915_PRIORITY_NORMAL; 340 ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_NORMAL);
341 341
342 for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++) { 342 for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++) {
343 struct intel_context *ce = &ctx->__engine[n]; 343 struct intel_context *ce = &ctx->__engine[n];
@@ -414,7 +414,7 @@ i915_gem_create_context(struct drm_i915_private *dev_priv,
414 if (IS_ERR(ctx)) 414 if (IS_ERR(ctx))
415 return ctx; 415 return ctx;
416 416
417 if (USES_FULL_PPGTT(dev_priv)) { 417 if (HAS_FULL_PPGTT(dev_priv)) {
418 struct i915_hw_ppgtt *ppgtt; 418 struct i915_hw_ppgtt *ppgtt;
419 419
420 ppgtt = i915_ppgtt_create(dev_priv, file_priv); 420 ppgtt = i915_ppgtt_create(dev_priv, file_priv);
@@ -457,7 +457,7 @@ i915_gem_context_create_gvt(struct drm_device *dev)
457 if (ret) 457 if (ret)
458 return ERR_PTR(ret); 458 return ERR_PTR(ret);
459 459
460 ctx = __create_hw_context(to_i915(dev), NULL); 460 ctx = i915_gem_create_context(to_i915(dev), NULL);
461 if (IS_ERR(ctx)) 461 if (IS_ERR(ctx))
462 goto out; 462 goto out;
463 463
@@ -504,7 +504,7 @@ i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio)
504 } 504 }
505 505
506 i915_gem_context_clear_bannable(ctx); 506 i915_gem_context_clear_bannable(ctx);
507 ctx->sched.priority = prio; 507 ctx->sched.priority = I915_USER_PRIORITY(prio);
508 ctx->ring_size = PAGE_SIZE; 508 ctx->ring_size = PAGE_SIZE;
509 509
510 GEM_BUG_ON(!i915_gem_context_is_kernel(ctx)); 510 GEM_BUG_ON(!i915_gem_context_is_kernel(ctx));
@@ -879,7 +879,7 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
879 args->value = i915_gem_context_is_bannable(ctx); 879 args->value = i915_gem_context_is_bannable(ctx);
880 break; 880 break;
881 case I915_CONTEXT_PARAM_PRIORITY: 881 case I915_CONTEXT_PARAM_PRIORITY:
882 args->value = ctx->sched.priority; 882 args->value = ctx->sched.priority >> I915_USER_PRIORITY_SHIFT;
883 break; 883 break;
884 default: 884 default:
885 ret = -EINVAL; 885 ret = -EINVAL;
@@ -948,7 +948,8 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
948 !capable(CAP_SYS_NICE)) 948 !capable(CAP_SYS_NICE))
949 ret = -EPERM; 949 ret = -EPERM;
950 else 950 else
951 ctx->sched.priority = priority; 951 ctx->sched.priority =
952 I915_USER_PRIORITY(priority);
952 } 953 }
953 break; 954 break;
954 955
diff --git a/drivers/gpu/drm/i915/i915_gem_context.h b/drivers/gpu/drm/i915/i915_gem_context.h
index 08165f6a0a84..f6d870b1f73e 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.h
+++ b/drivers/gpu/drm/i915/i915_gem_context.h
@@ -163,6 +163,7 @@ struct i915_gem_context {
163 /** engine: per-engine logical HW state */ 163 /** engine: per-engine logical HW state */
164 struct intel_context { 164 struct intel_context {
165 struct i915_gem_context *gem_context; 165 struct i915_gem_context *gem_context;
166 struct intel_engine_cs *active;
166 struct i915_vma *state; 167 struct i915_vma *state;
167 struct intel_ring *ring; 168 struct intel_ring *ring;
168 u32 *lrc_reg_state; 169 u32 *lrc_reg_state;
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 1a1c04db6c80..7b3ae2333dbf 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -1268,7 +1268,7 @@ relocate_entry(struct i915_vma *vma,
1268 else if (gen >= 4) 1268 else if (gen >= 4)
1269 len = 4; 1269 len = 4;
1270 else 1270 else
1271 len = 3; 1271 len = 6;
1272 1272
1273 batch = reloc_gpu(eb, vma, len); 1273 batch = reloc_gpu(eb, vma, len);
1274 if (IS_ERR(batch)) 1274 if (IS_ERR(batch))
@@ -1309,6 +1309,11 @@ relocate_entry(struct i915_vma *vma,
1309 *batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL; 1309 *batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
1310 *batch++ = addr; 1310 *batch++ = addr;
1311 *batch++ = target_offset; 1311 *batch++ = target_offset;
1312
1313 /* And again for good measure (blb/pnv) */
1314 *batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
1315 *batch++ = addr;
1316 *batch++ = target_offset;
1312 } 1317 }
1313 1318
1314 goto out; 1319 goto out;
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 47c302543799..add1fe7aeb93 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -133,55 +133,6 @@ static inline void i915_ggtt_invalidate(struct drm_i915_private *i915)
133 i915->ggtt.invalidate(i915); 133 i915->ggtt.invalidate(i915);
134} 134}
135 135
136int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
137 int enable_ppgtt)
138{
139 bool has_full_ppgtt;
140 bool has_full_48bit_ppgtt;
141
142 if (!dev_priv->info.has_aliasing_ppgtt)
143 return 0;
144
145 has_full_ppgtt = dev_priv->info.has_full_ppgtt;
146 has_full_48bit_ppgtt = dev_priv->info.has_full_48bit_ppgtt;
147
148 if (intel_vgpu_active(dev_priv)) {
149 /* GVT-g has no support for 32bit ppgtt */
150 has_full_ppgtt = false;
151 has_full_48bit_ppgtt = intel_vgpu_has_full_48bit_ppgtt(dev_priv);
152 }
153
154 /*
155 * We don't allow disabling PPGTT for gen9+ as it's a requirement for
156 * execlists, the sole mechanism available to submit work.
157 */
158 if (enable_ppgtt == 0 && INTEL_GEN(dev_priv) < 9)
159 return 0;
160
161 if (enable_ppgtt == 1)
162 return 1;
163
164 if (enable_ppgtt == 2 && has_full_ppgtt)
165 return 2;
166
167 if (enable_ppgtt == 3 && has_full_48bit_ppgtt)
168 return 3;
169
170 /* Disable ppgtt on SNB if VT-d is on. */
171 if (IS_GEN6(dev_priv) && intel_vtd_active()) {
172 DRM_INFO("Disabling PPGTT because VT-d is on\n");
173 return 0;
174 }
175
176 if (has_full_48bit_ppgtt)
177 return 3;
178
179 if (has_full_ppgtt)
180 return 2;
181
182 return 1;
183}
184
185static int ppgtt_bind_vma(struct i915_vma *vma, 136static int ppgtt_bind_vma(struct i915_vma *vma,
186 enum i915_cache_level cache_level, 137 enum i915_cache_level cache_level,
187 u32 unused) 138 u32 unused)
@@ -235,9 +186,9 @@ static void clear_pages(struct i915_vma *vma)
235 memset(&vma->page_sizes, 0, sizeof(vma->page_sizes)); 186 memset(&vma->page_sizes, 0, sizeof(vma->page_sizes));
236} 187}
237 188
238static gen8_pte_t gen8_pte_encode(dma_addr_t addr, 189static u64 gen8_pte_encode(dma_addr_t addr,
239 enum i915_cache_level level, 190 enum i915_cache_level level,
240 u32 flags) 191 u32 flags)
241{ 192{
242 gen8_pte_t pte = addr | _PAGE_PRESENT | _PAGE_RW; 193 gen8_pte_t pte = addr | _PAGE_PRESENT | _PAGE_RW;
243 194
@@ -274,9 +225,9 @@ static gen8_pde_t gen8_pde_encode(const dma_addr_t addr,
274#define gen8_pdpe_encode gen8_pde_encode 225#define gen8_pdpe_encode gen8_pde_encode
275#define gen8_pml4e_encode gen8_pde_encode 226#define gen8_pml4e_encode gen8_pde_encode
276 227
277static gen6_pte_t snb_pte_encode(dma_addr_t addr, 228static u64 snb_pte_encode(dma_addr_t addr,
278 enum i915_cache_level level, 229 enum i915_cache_level level,
279 u32 unused) 230 u32 flags)
280{ 231{
281 gen6_pte_t pte = GEN6_PTE_VALID; 232 gen6_pte_t pte = GEN6_PTE_VALID;
282 pte |= GEN6_PTE_ADDR_ENCODE(addr); 233 pte |= GEN6_PTE_ADDR_ENCODE(addr);
@@ -296,9 +247,9 @@ static gen6_pte_t snb_pte_encode(dma_addr_t addr,
296 return pte; 247 return pte;
297} 248}
298 249
299static gen6_pte_t ivb_pte_encode(dma_addr_t addr, 250static u64 ivb_pte_encode(dma_addr_t addr,
300 enum i915_cache_level level, 251 enum i915_cache_level level,
301 u32 unused) 252 u32 flags)
302{ 253{
303 gen6_pte_t pte = GEN6_PTE_VALID; 254 gen6_pte_t pte = GEN6_PTE_VALID;
304 pte |= GEN6_PTE_ADDR_ENCODE(addr); 255 pte |= GEN6_PTE_ADDR_ENCODE(addr);
@@ -320,9 +271,9 @@ static gen6_pte_t ivb_pte_encode(dma_addr_t addr,
320 return pte; 271 return pte;
321} 272}
322 273
323static gen6_pte_t byt_pte_encode(dma_addr_t addr, 274static u64 byt_pte_encode(dma_addr_t addr,
324 enum i915_cache_level level, 275 enum i915_cache_level level,
325 u32 flags) 276 u32 flags)
326{ 277{
327 gen6_pte_t pte = GEN6_PTE_VALID; 278 gen6_pte_t pte = GEN6_PTE_VALID;
328 pte |= GEN6_PTE_ADDR_ENCODE(addr); 279 pte |= GEN6_PTE_ADDR_ENCODE(addr);
@@ -336,9 +287,9 @@ static gen6_pte_t byt_pte_encode(dma_addr_t addr,
336 return pte; 287 return pte;
337} 288}
338 289
339static gen6_pte_t hsw_pte_encode(dma_addr_t addr, 290static u64 hsw_pte_encode(dma_addr_t addr,
340 enum i915_cache_level level, 291 enum i915_cache_level level,
341 u32 unused) 292 u32 flags)
342{ 293{
343 gen6_pte_t pte = GEN6_PTE_VALID; 294 gen6_pte_t pte = GEN6_PTE_VALID;
344 pte |= HSW_PTE_ADDR_ENCODE(addr); 295 pte |= HSW_PTE_ADDR_ENCODE(addr);
@@ -349,9 +300,9 @@ static gen6_pte_t hsw_pte_encode(dma_addr_t addr,
349 return pte; 300 return pte;
350} 301}
351 302
352static gen6_pte_t iris_pte_encode(dma_addr_t addr, 303static u64 iris_pte_encode(dma_addr_t addr,
353 enum i915_cache_level level, 304 enum i915_cache_level level,
354 u32 unused) 305 u32 flags)
355{ 306{
356 gen6_pte_t pte = GEN6_PTE_VALID; 307 gen6_pte_t pte = GEN6_PTE_VALID;
357 pte |= HSW_PTE_ADDR_ENCODE(addr); 308 pte |= HSW_PTE_ADDR_ENCODE(addr);
@@ -629,10 +580,9 @@ setup_scratch_page(struct i915_address_space *vm, gfp_t gfp)
629 * region, including any PTEs which happen to point to scratch. 580 * region, including any PTEs which happen to point to scratch.
630 * 581 *
631 * This is only relevant for the 48b PPGTT where we support 582 * This is only relevant for the 48b PPGTT where we support
632 * huge-gtt-pages, see also i915_vma_insert(). 583 * huge-gtt-pages, see also i915_vma_insert(). However, as we share the
633 * 584 * scratch (read-only) between all vm, we create one 64k scratch page
634 * TODO: we should really consider write-protecting the scratch-page and 585 * for all.
635 * sharing between ppgtt
636 */ 586 */
637 size = I915_GTT_PAGE_SIZE_4K; 587 size = I915_GTT_PAGE_SIZE_4K;
638 if (i915_vm_is_48bit(vm) && 588 if (i915_vm_is_48bit(vm) &&
@@ -715,14 +665,13 @@ static void free_pt(struct i915_address_space *vm, struct i915_page_table *pt)
715static void gen8_initialize_pt(struct i915_address_space *vm, 665static void gen8_initialize_pt(struct i915_address_space *vm,
716 struct i915_page_table *pt) 666 struct i915_page_table *pt)
717{ 667{
718 fill_px(vm, pt, 668 fill_px(vm, pt, vm->scratch_pte);
719 gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0));
720} 669}
721 670
722static void gen6_initialize_pt(struct gen6_hw_ppgtt *ppgtt, 671static void gen6_initialize_pt(struct i915_address_space *vm,
723 struct i915_page_table *pt) 672 struct i915_page_table *pt)
724{ 673{
725 fill32_px(&ppgtt->base.vm, pt, ppgtt->scratch_pte); 674 fill32_px(vm, pt, vm->scratch_pte);
726} 675}
727 676
728static struct i915_page_directory *alloc_pd(struct i915_address_space *vm) 677static struct i915_page_directory *alloc_pd(struct i915_address_space *vm)
@@ -856,15 +805,13 @@ static void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt)
856/* Removes entries from a single page table, releasing it if it's empty. 805/* Removes entries from a single page table, releasing it if it's empty.
857 * Caller can use the return value to update higher-level entries. 806 * Caller can use the return value to update higher-level entries.
858 */ 807 */
859static bool gen8_ppgtt_clear_pt(struct i915_address_space *vm, 808static bool gen8_ppgtt_clear_pt(const struct i915_address_space *vm,
860 struct i915_page_table *pt, 809 struct i915_page_table *pt,
861 u64 start, u64 length) 810 u64 start, u64 length)
862{ 811{
863 unsigned int num_entries = gen8_pte_count(start, length); 812 unsigned int num_entries = gen8_pte_count(start, length);
864 unsigned int pte = gen8_pte_index(start); 813 unsigned int pte = gen8_pte_index(start);
865 unsigned int pte_end = pte + num_entries; 814 unsigned int pte_end = pte + num_entries;
866 const gen8_pte_t scratch_pte =
867 gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0);
868 gen8_pte_t *vaddr; 815 gen8_pte_t *vaddr;
869 816
870 GEM_BUG_ON(num_entries > pt->used_ptes); 817 GEM_BUG_ON(num_entries > pt->used_ptes);
@@ -875,7 +822,7 @@ static bool gen8_ppgtt_clear_pt(struct i915_address_space *vm,
875 822
876 vaddr = kmap_atomic_px(pt); 823 vaddr = kmap_atomic_px(pt);
877 while (pte < pte_end) 824 while (pte < pte_end)
878 vaddr[pte++] = scratch_pte; 825 vaddr[pte++] = vm->scratch_pte;
879 kunmap_atomic(vaddr); 826 kunmap_atomic(vaddr);
880 827
881 return false; 828 return false;
@@ -1208,7 +1155,7 @@ static void gen8_ppgtt_insert_huge_entries(struct i915_vma *vma,
1208 if (I915_SELFTEST_ONLY(vma->vm->scrub_64K)) { 1155 if (I915_SELFTEST_ONLY(vma->vm->scrub_64K)) {
1209 u16 i; 1156 u16 i;
1210 1157
1211 encode = pte_encode | vma->vm->scratch_page.daddr; 1158 encode = vma->vm->scratch_pte;
1212 vaddr = kmap_atomic_px(pd->page_table[idx.pde]); 1159 vaddr = kmap_atomic_px(pd->page_table[idx.pde]);
1213 1160
1214 for (i = 1; i < index; i += 16) 1161 for (i = 1; i < index; i += 16)
@@ -1261,10 +1208,35 @@ static int gen8_init_scratch(struct i915_address_space *vm)
1261{ 1208{
1262 int ret; 1209 int ret;
1263 1210
1211 /*
1212 * If everybody agrees to not to write into the scratch page,
1213 * we can reuse it for all vm, keeping contexts and processes separate.
1214 */
1215 if (vm->has_read_only &&
1216 vm->i915->kernel_context &&
1217 vm->i915->kernel_context->ppgtt) {
1218 struct i915_address_space *clone =
1219 &vm->i915->kernel_context->ppgtt->vm;
1220
1221 GEM_BUG_ON(!clone->has_read_only);
1222
1223 vm->scratch_page.order = clone->scratch_page.order;
1224 vm->scratch_pte = clone->scratch_pte;
1225 vm->scratch_pt = clone->scratch_pt;
1226 vm->scratch_pd = clone->scratch_pd;
1227 vm->scratch_pdp = clone->scratch_pdp;
1228 return 0;
1229 }
1230
1264 ret = setup_scratch_page(vm, __GFP_HIGHMEM); 1231 ret = setup_scratch_page(vm, __GFP_HIGHMEM);
1265 if (ret) 1232 if (ret)
1266 return ret; 1233 return ret;
1267 1234
1235 vm->scratch_pte =
1236 gen8_pte_encode(vm->scratch_page.daddr,
1237 I915_CACHE_LLC,
1238 PTE_READ_ONLY);
1239
1268 vm->scratch_pt = alloc_pt(vm); 1240 vm->scratch_pt = alloc_pt(vm);
1269 if (IS_ERR(vm->scratch_pt)) { 1241 if (IS_ERR(vm->scratch_pt)) {
1270 ret = PTR_ERR(vm->scratch_pt); 1242 ret = PTR_ERR(vm->scratch_pt);
@@ -1336,6 +1308,9 @@ static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create)
1336 1308
1337static void gen8_free_scratch(struct i915_address_space *vm) 1309static void gen8_free_scratch(struct i915_address_space *vm)
1338{ 1310{
1311 if (!vm->scratch_page.daddr)
1312 return;
1313
1339 if (use_4lvl(vm)) 1314 if (use_4lvl(vm))
1340 free_pdp(vm, vm->scratch_pdp); 1315 free_pdp(vm, vm->scratch_pdp);
1341 free_pd(vm, vm->scratch_pd); 1316 free_pd(vm, vm->scratch_pd);
@@ -1573,8 +1548,7 @@ static void gen8_dump_pdp(struct i915_hw_ppgtt *ppgtt,
1573static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m) 1548static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
1574{ 1549{
1575 struct i915_address_space *vm = &ppgtt->vm; 1550 struct i915_address_space *vm = &ppgtt->vm;
1576 const gen8_pte_t scratch_pte = 1551 const gen8_pte_t scratch_pte = vm->scratch_pte;
1577 gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0);
1578 u64 start = 0, length = ppgtt->vm.total; 1552 u64 start = 0, length = ppgtt->vm.total;
1579 1553
1580 if (use_4lvl(vm)) { 1554 if (use_4lvl(vm)) {
@@ -1647,16 +1621,12 @@ static struct i915_hw_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915)
1647 ppgtt->vm.i915 = i915; 1621 ppgtt->vm.i915 = i915;
1648 ppgtt->vm.dma = &i915->drm.pdev->dev; 1622 ppgtt->vm.dma = &i915->drm.pdev->dev;
1649 1623
1650 ppgtt->vm.total = USES_FULL_48BIT_PPGTT(i915) ? 1624 ppgtt->vm.total = HAS_FULL_48BIT_PPGTT(i915) ?
1651 1ULL << 48 : 1625 1ULL << 48 :
1652 1ULL << 32; 1626 1ULL << 32;
1653 1627
1654 /* 1628 /* From bdw, there is support for read-only pages in the PPGTT. */
1655 * From bdw, there is support for read-only pages in the PPGTT. 1629 ppgtt->vm.has_read_only = true;
1656 *
1657 * XXX GVT is not honouring the lack of RW in the PTE bits.
1658 */
1659 ppgtt->vm.has_read_only = !intel_vgpu_active(i915);
1660 1630
1661 i915_address_space_init(&ppgtt->vm, i915); 1631 i915_address_space_init(&ppgtt->vm, i915);
1662 1632
@@ -1721,7 +1691,7 @@ err_free:
1721static void gen6_dump_ppgtt(struct i915_hw_ppgtt *base, struct seq_file *m) 1691static void gen6_dump_ppgtt(struct i915_hw_ppgtt *base, struct seq_file *m)
1722{ 1692{
1723 struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base); 1693 struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base);
1724 const gen6_pte_t scratch_pte = ppgtt->scratch_pte; 1694 const gen6_pte_t scratch_pte = base->vm.scratch_pte;
1725 struct i915_page_table *pt; 1695 struct i915_page_table *pt;
1726 u32 pte, pde; 1696 u32 pte, pde;
1727 1697
@@ -1782,19 +1752,6 @@ static inline void gen6_write_pde(const struct gen6_hw_ppgtt *ppgtt,
1782 ppgtt->pd_addr + pde); 1752 ppgtt->pd_addr + pde);
1783} 1753}
1784 1754
1785static void gen8_ppgtt_enable(struct drm_i915_private *dev_priv)
1786{
1787 struct intel_engine_cs *engine;
1788 enum intel_engine_id id;
1789
1790 for_each_engine(engine, dev_priv, id) {
1791 u32 four_level = USES_FULL_48BIT_PPGTT(dev_priv) ?
1792 GEN8_GFX_PPGTT_48B : 0;
1793 I915_WRITE(RING_MODE_GEN7(engine),
1794 _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE | four_level));
1795 }
1796}
1797
1798static void gen7_ppgtt_enable(struct drm_i915_private *dev_priv) 1755static void gen7_ppgtt_enable(struct drm_i915_private *dev_priv)
1799{ 1756{
1800 struct intel_engine_cs *engine; 1757 struct intel_engine_cs *engine;
@@ -1834,7 +1791,8 @@ static void gen6_ppgtt_enable(struct drm_i915_private *dev_priv)
1834 ecochk = I915_READ(GAM_ECOCHK); 1791 ecochk = I915_READ(GAM_ECOCHK);
1835 I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B); 1792 I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B);
1836 1793
1837 I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); 1794 if (HAS_PPGTT(dev_priv)) /* may be disabled for VT-d */
1795 I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
1838} 1796}
1839 1797
1840/* PPGTT support for Sandybdrige/Gen6 and later */ 1798/* PPGTT support for Sandybdrige/Gen6 and later */
@@ -1846,7 +1804,7 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
1846 unsigned int pde = first_entry / GEN6_PTES; 1804 unsigned int pde = first_entry / GEN6_PTES;
1847 unsigned int pte = first_entry % GEN6_PTES; 1805 unsigned int pte = first_entry % GEN6_PTES;
1848 unsigned int num_entries = length / I915_GTT_PAGE_SIZE; 1806 unsigned int num_entries = length / I915_GTT_PAGE_SIZE;
1849 const gen6_pte_t scratch_pte = ppgtt->scratch_pte; 1807 const gen6_pte_t scratch_pte = vm->scratch_pte;
1850 1808
1851 while (num_entries) { 1809 while (num_entries) {
1852 struct i915_page_table *pt = ppgtt->base.pd.page_table[pde++]; 1810 struct i915_page_table *pt = ppgtt->base.pd.page_table[pde++];
@@ -1937,7 +1895,7 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
1937 if (IS_ERR(pt)) 1895 if (IS_ERR(pt))
1938 goto unwind_out; 1896 goto unwind_out;
1939 1897
1940 gen6_initialize_pt(ppgtt, pt); 1898 gen6_initialize_pt(vm, pt);
1941 ppgtt->base.pd.page_table[pde] = pt; 1899 ppgtt->base.pd.page_table[pde] = pt;
1942 1900
1943 if (i915_vma_is_bound(ppgtt->vma, 1901 if (i915_vma_is_bound(ppgtt->vma,
@@ -1975,9 +1933,9 @@ static int gen6_ppgtt_init_scratch(struct gen6_hw_ppgtt *ppgtt)
1975 if (ret) 1933 if (ret)
1976 return ret; 1934 return ret;
1977 1935
1978 ppgtt->scratch_pte = 1936 vm->scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
1979 vm->pte_encode(vm->scratch_page.daddr, 1937 I915_CACHE_NONE,
1980 I915_CACHE_NONE, PTE_READ_ONLY); 1938 PTE_READ_ONLY);
1981 1939
1982 vm->scratch_pt = alloc_pt(vm); 1940 vm->scratch_pt = alloc_pt(vm);
1983 if (IS_ERR(vm->scratch_pt)) { 1941 if (IS_ERR(vm->scratch_pt)) {
@@ -1985,7 +1943,7 @@ static int gen6_ppgtt_init_scratch(struct gen6_hw_ppgtt *ppgtt)
1985 return PTR_ERR(vm->scratch_pt); 1943 return PTR_ERR(vm->scratch_pt);
1986 } 1944 }
1987 1945
1988 gen6_initialize_pt(ppgtt, vm->scratch_pt); 1946 gen6_initialize_pt(vm, vm->scratch_pt);
1989 gen6_for_all_pdes(unused, &ppgtt->base.pd, pde) 1947 gen6_for_all_pdes(unused, &ppgtt->base.pd, pde)
1990 ppgtt->base.pd.page_table[pde] = vm->scratch_pt; 1948 ppgtt->base.pd.page_table[pde] = vm->scratch_pt;
1991 1949
@@ -2237,23 +2195,10 @@ int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv)
2237{ 2195{
2238 gtt_write_workarounds(dev_priv); 2196 gtt_write_workarounds(dev_priv);
2239 2197
2240 /* In the case of execlists, PPGTT is enabled by the context descriptor
2241 * and the PDPs are contained within the context itself. We don't
2242 * need to do anything here. */
2243 if (HAS_LOGICAL_RING_CONTEXTS(dev_priv))
2244 return 0;
2245
2246 if (!USES_PPGTT(dev_priv))
2247 return 0;
2248
2249 if (IS_GEN6(dev_priv)) 2198 if (IS_GEN6(dev_priv))
2250 gen6_ppgtt_enable(dev_priv); 2199 gen6_ppgtt_enable(dev_priv);
2251 else if (IS_GEN7(dev_priv)) 2200 else if (IS_GEN7(dev_priv))
2252 gen7_ppgtt_enable(dev_priv); 2201 gen7_ppgtt_enable(dev_priv);
2253 else if (INTEL_GEN(dev_priv) >= 8)
2254 gen8_ppgtt_enable(dev_priv);
2255 else
2256 MISSING_CASE(INTEL_GEN(dev_priv));
2257 2202
2258 return 0; 2203 return 0;
2259} 2204}
@@ -2543,8 +2488,7 @@ static void gen8_ggtt_clear_range(struct i915_address_space *vm,
2543 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); 2488 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2544 unsigned first_entry = start / I915_GTT_PAGE_SIZE; 2489 unsigned first_entry = start / I915_GTT_PAGE_SIZE;
2545 unsigned num_entries = length / I915_GTT_PAGE_SIZE; 2490 unsigned num_entries = length / I915_GTT_PAGE_SIZE;
2546 const gen8_pte_t scratch_pte = 2491 const gen8_pte_t scratch_pte = vm->scratch_pte;
2547 gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0);
2548 gen8_pte_t __iomem *gtt_base = 2492 gen8_pte_t __iomem *gtt_base =
2549 (gen8_pte_t __iomem *)ggtt->gsm + first_entry; 2493 (gen8_pte_t __iomem *)ggtt->gsm + first_entry;
2550 const int max_entries = ggtt_total_entries(ggtt) - first_entry; 2494 const int max_entries = ggtt_total_entries(ggtt) - first_entry;
@@ -2669,8 +2613,7 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm,
2669 first_entry, num_entries, max_entries)) 2613 first_entry, num_entries, max_entries))
2670 num_entries = max_entries; 2614 num_entries = max_entries;
2671 2615
2672 scratch_pte = vm->pte_encode(vm->scratch_page.daddr, 2616 scratch_pte = vm->scratch_pte;
2673 I915_CACHE_LLC, 0);
2674 2617
2675 for (i = 0; i < num_entries; i++) 2618 for (i = 0; i < num_entries; i++)
2676 iowrite32(scratch_pte, &gtt_base[i]); 2619 iowrite32(scratch_pte, &gtt_base[i]);
@@ -2952,7 +2895,7 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
2952 /* And finally clear the reserved guard page */ 2895 /* And finally clear the reserved guard page */
2953 ggtt->vm.clear_range(&ggtt->vm, ggtt->vm.total - PAGE_SIZE, PAGE_SIZE); 2896 ggtt->vm.clear_range(&ggtt->vm, ggtt->vm.total - PAGE_SIZE, PAGE_SIZE);
2954 2897
2955 if (USES_PPGTT(dev_priv) && !USES_FULL_PPGTT(dev_priv)) { 2898 if (INTEL_PPGTT(dev_priv) == INTEL_PPGTT_ALIASING) {
2956 ret = i915_gem_init_aliasing_ppgtt(dev_priv); 2899 ret = i915_gem_init_aliasing_ppgtt(dev_priv);
2957 if (ret) 2900 if (ret)
2958 goto err; 2901 goto err;
@@ -3076,6 +3019,10 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
3076 return ret; 3019 return ret;
3077 } 3020 }
3078 3021
3022 ggtt->vm.scratch_pte =
3023 ggtt->vm.pte_encode(ggtt->vm.scratch_page.daddr,
3024 I915_CACHE_NONE, 0);
3025
3079 return 0; 3026 return 0;
3080} 3027}
3081 3028
@@ -3275,7 +3222,7 @@ static void bdw_setup_private_ppat(struct intel_ppat *ppat)
3275 ppat->match = bdw_private_pat_match; 3222 ppat->match = bdw_private_pat_match;
3276 ppat->clear_value = GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3); 3223 ppat->clear_value = GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3);
3277 3224
3278 if (!USES_PPGTT(ppat->i915)) { 3225 if (!HAS_PPGTT(ppat->i915)) {
3279 /* Spec: "For GGTT, there is NO pat_sel[2:0] from the entry, 3226 /* Spec: "For GGTT, there is NO pat_sel[2:0] from the entry,
3280 * so RTL will always use the value corresponding to 3227 * so RTL will always use the value corresponding to
3281 * pat_sel = 000". 3228 * pat_sel = 000".
@@ -3402,7 +3349,7 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
3402 ggtt->vm.cleanup = gen6_gmch_remove; 3349 ggtt->vm.cleanup = gen6_gmch_remove;
3403 ggtt->vm.insert_page = gen8_ggtt_insert_page; 3350 ggtt->vm.insert_page = gen8_ggtt_insert_page;
3404 ggtt->vm.clear_range = nop_clear_range; 3351 ggtt->vm.clear_range = nop_clear_range;
3405 if (!USES_FULL_PPGTT(dev_priv) || intel_scanout_needs_vtd_wa(dev_priv)) 3352 if (intel_scanout_needs_vtd_wa(dev_priv))
3406 ggtt->vm.clear_range = gen8_ggtt_clear_range; 3353 ggtt->vm.clear_range = gen8_ggtt_clear_range;
3407 3354
3408 ggtt->vm.insert_entries = gen8_ggtt_insert_entries; 3355 ggtt->vm.insert_entries = gen8_ggtt_insert_entries;
@@ -3413,6 +3360,11 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
3413 ggtt->vm.insert_page = bxt_vtd_ggtt_insert_page__BKL; 3360 ggtt->vm.insert_page = bxt_vtd_ggtt_insert_page__BKL;
3414 if (ggtt->vm.clear_range != nop_clear_range) 3361 if (ggtt->vm.clear_range != nop_clear_range)
3415 ggtt->vm.clear_range = bxt_vtd_ggtt_clear_range__BKL; 3362 ggtt->vm.clear_range = bxt_vtd_ggtt_clear_range__BKL;
3363
3364 /* Prevent recursively calling stop_machine() and deadlocks. */
3365 dev_info(dev_priv->drm.dev,
3366 "Disabling error capture for VT-d workaround\n");
3367 i915_disable_error_state(dev_priv, -ENODEV);
3416 } 3368 }
3417 3369
3418 ggtt->invalidate = gen6_ggtt_invalidate; 3370 ggtt->invalidate = gen6_ggtt_invalidate;
@@ -3422,6 +3374,8 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
3422 ggtt->vm.vma_ops.set_pages = ggtt_set_pages; 3374 ggtt->vm.vma_ops.set_pages = ggtt_set_pages;
3423 ggtt->vm.vma_ops.clear_pages = clear_pages; 3375 ggtt->vm.vma_ops.clear_pages = clear_pages;
3424 3376
3377 ggtt->vm.pte_encode = gen8_pte_encode;
3378
3425 setup_private_pat(dev_priv); 3379 setup_private_pat(dev_priv);
3426 3380
3427 return ggtt_probe_common(ggtt, size); 3381 return ggtt_probe_common(ggtt, size);
@@ -3609,7 +3563,7 @@ int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
3609 /* Only VLV supports read-only GGTT mappings */ 3563 /* Only VLV supports read-only GGTT mappings */
3610 ggtt->vm.has_read_only = IS_VALLEYVIEW(dev_priv); 3564 ggtt->vm.has_read_only = IS_VALLEYVIEW(dev_priv);
3611 3565
3612 if (!HAS_LLC(dev_priv) && !USES_PPGTT(dev_priv)) 3566 if (!HAS_LLC(dev_priv) && !HAS_PPGTT(dev_priv))
3613 ggtt->vm.mm.color_adjust = i915_gtt_color_adjust; 3567 ggtt->vm.mm.color_adjust = i915_gtt_color_adjust;
3614 mutex_unlock(&dev_priv->drm.struct_mutex); 3568 mutex_unlock(&dev_priv->drm.struct_mutex);
3615 3569
@@ -3711,7 +3665,7 @@ void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
3711} 3665}
3712 3666
3713static struct scatterlist * 3667static struct scatterlist *
3714rotate_pages(const dma_addr_t *in, unsigned int offset, 3668rotate_pages(struct drm_i915_gem_object *obj, unsigned int offset,
3715 unsigned int width, unsigned int height, 3669 unsigned int width, unsigned int height,
3716 unsigned int stride, 3670 unsigned int stride,
3717 struct sg_table *st, struct scatterlist *sg) 3671 struct sg_table *st, struct scatterlist *sg)
@@ -3720,7 +3674,7 @@ rotate_pages(const dma_addr_t *in, unsigned int offset,
3720 unsigned int src_idx; 3674 unsigned int src_idx;
3721 3675
3722 for (column = 0; column < width; column++) { 3676 for (column = 0; column < width; column++) {
3723 src_idx = stride * (height - 1) + column; 3677 src_idx = stride * (height - 1) + column + offset;
3724 for (row = 0; row < height; row++) { 3678 for (row = 0; row < height; row++) {
3725 st->nents++; 3679 st->nents++;
3726 /* We don't need the pages, but need to initialize 3680 /* We don't need the pages, but need to initialize
@@ -3728,7 +3682,8 @@ rotate_pages(const dma_addr_t *in, unsigned int offset,
3728 * The only thing we need are DMA addresses. 3682 * The only thing we need are DMA addresses.
3729 */ 3683 */
3730 sg_set_page(sg, NULL, I915_GTT_PAGE_SIZE, 0); 3684 sg_set_page(sg, NULL, I915_GTT_PAGE_SIZE, 0);
3731 sg_dma_address(sg) = in[offset + src_idx]; 3685 sg_dma_address(sg) =
3686 i915_gem_object_get_dma_address(obj, src_idx);
3732 sg_dma_len(sg) = I915_GTT_PAGE_SIZE; 3687 sg_dma_len(sg) = I915_GTT_PAGE_SIZE;
3733 sg = sg_next(sg); 3688 sg = sg_next(sg);
3734 src_idx -= stride; 3689 src_idx -= stride;
@@ -3742,22 +3697,11 @@ static noinline struct sg_table *
3742intel_rotate_pages(struct intel_rotation_info *rot_info, 3697intel_rotate_pages(struct intel_rotation_info *rot_info,
3743 struct drm_i915_gem_object *obj) 3698 struct drm_i915_gem_object *obj)
3744{ 3699{
3745 const unsigned long n_pages = obj->base.size / I915_GTT_PAGE_SIZE;
3746 unsigned int size = intel_rotation_info_size(rot_info); 3700 unsigned int size = intel_rotation_info_size(rot_info);
3747 struct sgt_iter sgt_iter;
3748 dma_addr_t dma_addr;
3749 unsigned long i;
3750 dma_addr_t *page_addr_list;
3751 struct sg_table *st; 3701 struct sg_table *st;
3752 struct scatterlist *sg; 3702 struct scatterlist *sg;
3753 int ret = -ENOMEM; 3703 int ret = -ENOMEM;
3754 3704 int i;
3755 /* Allocate a temporary list of source pages for random access. */
3756 page_addr_list = kvmalloc_array(n_pages,
3757 sizeof(dma_addr_t),
3758 GFP_KERNEL);
3759 if (!page_addr_list)
3760 return ERR_PTR(ret);
3761 3705
3762 /* Allocate target SG list. */ 3706 /* Allocate target SG list. */
3763 st = kmalloc(sizeof(*st), GFP_KERNEL); 3707 st = kmalloc(sizeof(*st), GFP_KERNEL);
@@ -3768,29 +3712,20 @@ intel_rotate_pages(struct intel_rotation_info *rot_info,
3768 if (ret) 3712 if (ret)
3769 goto err_sg_alloc; 3713 goto err_sg_alloc;
3770 3714
3771 /* Populate source page list from the object. */
3772 i = 0;
3773 for_each_sgt_dma(dma_addr, sgt_iter, obj->mm.pages)
3774 page_addr_list[i++] = dma_addr;
3775
3776 GEM_BUG_ON(i != n_pages);
3777 st->nents = 0; 3715 st->nents = 0;
3778 sg = st->sgl; 3716 sg = st->sgl;
3779 3717
3780 for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++) { 3718 for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++) {
3781 sg = rotate_pages(page_addr_list, rot_info->plane[i].offset, 3719 sg = rotate_pages(obj, rot_info->plane[i].offset,
3782 rot_info->plane[i].width, rot_info->plane[i].height, 3720 rot_info->plane[i].width, rot_info->plane[i].height,
3783 rot_info->plane[i].stride, st, sg); 3721 rot_info->plane[i].stride, st, sg);
3784 } 3722 }
3785 3723
3786 kvfree(page_addr_list);
3787
3788 return st; 3724 return st;
3789 3725
3790err_sg_alloc: 3726err_sg_alloc:
3791 kfree(st); 3727 kfree(st);
3792err_st_alloc: 3728err_st_alloc:
3793 kvfree(page_addr_list);
3794 3729
3795 DRM_DEBUG_DRIVER("Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n", 3730 DRM_DEBUG_DRIVER("Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n",
3796 obj->base.size, rot_info->plane[0].width, rot_info->plane[0].height, size); 3731 obj->base.size, rot_info->plane[0].width, rot_info->plane[0].height, size);
@@ -3835,6 +3770,8 @@ intel_partial_pages(const struct i915_ggtt_view *view,
3835 count -= len >> PAGE_SHIFT; 3770 count -= len >> PAGE_SHIFT;
3836 if (count == 0) { 3771 if (count == 0) {
3837 sg_mark_end(sg); 3772 sg_mark_end(sg);
3773 i915_sg_trim(st); /* Drop any unused tail entries. */
3774
3838 return st; 3775 return st;
3839 } 3776 }
3840 3777
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index 28039290655c..4874da09a3c4 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -289,6 +289,7 @@ struct i915_address_space {
289 289
290 struct mutex mutex; /* protects vma and our lists */ 290 struct mutex mutex; /* protects vma and our lists */
291 291
292 u64 scratch_pte;
292 struct i915_page_dma scratch_page; 293 struct i915_page_dma scratch_page;
293 struct i915_page_table *scratch_pt; 294 struct i915_page_table *scratch_pt;
294 struct i915_page_directory *scratch_pd; 295 struct i915_page_directory *scratch_pd;
@@ -335,12 +336,11 @@ struct i915_address_space {
335 /* Some systems support read-only mappings for GGTT and/or PPGTT */ 336 /* Some systems support read-only mappings for GGTT and/or PPGTT */
336 bool has_read_only:1; 337 bool has_read_only:1;
337 338
338 /* FIXME: Need a more generic return type */ 339 u64 (*pte_encode)(dma_addr_t addr,
339 gen6_pte_t (*pte_encode)(dma_addr_t addr, 340 enum i915_cache_level level,
340 enum i915_cache_level level, 341 u32 flags); /* Create a valid PTE */
341 u32 flags); /* Create a valid PTE */
342 /* flags for pte_encode */
343#define PTE_READ_ONLY (1<<0) 342#define PTE_READ_ONLY (1<<0)
343
344 int (*allocate_va_range)(struct i915_address_space *vm, 344 int (*allocate_va_range)(struct i915_address_space *vm,
345 u64 start, u64 length); 345 u64 start, u64 length);
346 void (*clear_range)(struct i915_address_space *vm, 346 void (*clear_range)(struct i915_address_space *vm,
@@ -422,7 +422,6 @@ struct gen6_hw_ppgtt {
422 422
423 struct i915_vma *vma; 423 struct i915_vma *vma;
424 gen6_pte_t __iomem *pd_addr; 424 gen6_pte_t __iomem *pd_addr;
425 gen6_pte_t scratch_pte;
426 425
427 unsigned int pin_count; 426 unsigned int pin_count;
428 bool scan_for_unused_pt; 427 bool scan_for_unused_pt;
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 8762d17b6659..21b5c8765015 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -512,7 +512,7 @@ static void error_print_engine(struct drm_i915_error_state_buf *m,
512 err_printf(m, " SYNC_2: 0x%08x\n", 512 err_printf(m, " SYNC_2: 0x%08x\n",
513 ee->semaphore_mboxes[2]); 513 ee->semaphore_mboxes[2]);
514 } 514 }
515 if (USES_PPGTT(m->i915)) { 515 if (HAS_PPGTT(m->i915)) {
516 err_printf(m, " GFX_MODE: 0x%08x\n", ee->vm_info.gfx_mode); 516 err_printf(m, " GFX_MODE: 0x%08x\n", ee->vm_info.gfx_mode);
517 517
518 if (INTEL_GEN(m->i915) >= 8) { 518 if (INTEL_GEN(m->i915) >= 8) {
@@ -648,6 +648,9 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
648 return 0; 648 return 0;
649 } 649 }
650 650
651 if (IS_ERR(error))
652 return PTR_ERR(error);
653
651 if (*error->error_msg) 654 if (*error->error_msg)
652 err_printf(m, "%s\n", error->error_msg); 655 err_printf(m, "%s\n", error->error_msg);
653 err_printf(m, "Kernel: " UTS_RELEASE "\n"); 656 err_printf(m, "Kernel: " UTS_RELEASE "\n");
@@ -999,7 +1002,6 @@ i915_error_object_create(struct drm_i915_private *i915,
999 } 1002 }
1000 1003
1001 compress_fini(&compress, dst); 1004 compress_fini(&compress, dst);
1002 ggtt->vm.clear_range(&ggtt->vm, slot, PAGE_SIZE);
1003 return dst; 1005 return dst;
1004} 1006}
1005 1007
@@ -1268,7 +1270,7 @@ static void error_record_engine_registers(struct i915_gpu_state *error,
1268 ee->reset_count = i915_reset_engine_count(&dev_priv->gpu_error, 1270 ee->reset_count = i915_reset_engine_count(&dev_priv->gpu_error,
1269 engine); 1271 engine);
1270 1272
1271 if (USES_PPGTT(dev_priv)) { 1273 if (HAS_PPGTT(dev_priv)) {
1272 int i; 1274 int i;
1273 1275
1274 ee->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(engine)); 1276 ee->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(engine));
@@ -1785,6 +1787,14 @@ static unsigned long capture_find_epoch(const struct i915_gpu_state *error)
1785 return epoch; 1787 return epoch;
1786} 1788}
1787 1789
1790static void capture_finish(struct i915_gpu_state *error)
1791{
1792 struct i915_ggtt *ggtt = &error->i915->ggtt;
1793 const u64 slot = ggtt->error_capture.start;
1794
1795 ggtt->vm.clear_range(&ggtt->vm, slot, PAGE_SIZE);
1796}
1797
1788static int capture(void *data) 1798static int capture(void *data)
1789{ 1799{
1790 struct i915_gpu_state *error = data; 1800 struct i915_gpu_state *error = data;
@@ -1809,6 +1819,7 @@ static int capture(void *data)
1809 1819
1810 error->epoch = capture_find_epoch(error); 1820 error->epoch = capture_find_epoch(error);
1811 1821
1822 capture_finish(error);
1812 return 0; 1823 return 0;
1813} 1824}
1814 1825
@@ -1859,6 +1870,7 @@ void i915_capture_error_state(struct drm_i915_private *i915,
1859 error = i915_capture_gpu_state(i915); 1870 error = i915_capture_gpu_state(i915);
1860 if (!error) { 1871 if (!error) {
1861 DRM_DEBUG_DRIVER("out of memory, not capturing error state\n"); 1872 DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
1873 i915_disable_error_state(i915, -ENOMEM);
1862 return; 1874 return;
1863 } 1875 }
1864 1876
@@ -1914,5 +1926,14 @@ void i915_reset_error_state(struct drm_i915_private *i915)
1914 i915->gpu_error.first_error = NULL; 1926 i915->gpu_error.first_error = NULL;
1915 spin_unlock_irq(&i915->gpu_error.lock); 1927 spin_unlock_irq(&i915->gpu_error.lock);
1916 1928
1917 i915_gpu_state_put(error); 1929 if (!IS_ERR(error))
1930 i915_gpu_state_put(error);
1931}
1932
1933void i915_disable_error_state(struct drm_i915_private *i915, int err)
1934{
1935 spin_lock_irq(&i915->gpu_error.lock);
1936 if (!i915->gpu_error.first_error)
1937 i915->gpu_error.first_error = ERR_PTR(err);
1938 spin_unlock_irq(&i915->gpu_error.lock);
1918} 1939}
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h
index 8710fb18ed74..3ec89a504de5 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.h
+++ b/drivers/gpu/drm/i915/i915_gpu_error.h
@@ -343,6 +343,7 @@ static inline void i915_gpu_state_put(struct i915_gpu_state *gpu)
343 343
344struct i915_gpu_state *i915_first_error_state(struct drm_i915_private *i915); 344struct i915_gpu_state *i915_first_error_state(struct drm_i915_private *i915);
345void i915_reset_error_state(struct drm_i915_private *i915); 345void i915_reset_error_state(struct drm_i915_private *i915);
346void i915_disable_error_state(struct drm_i915_private *i915, int err);
346 347
347#else 348#else
348 349
@@ -355,13 +356,18 @@ static inline void i915_capture_error_state(struct drm_i915_private *dev_priv,
355static inline struct i915_gpu_state * 356static inline struct i915_gpu_state *
356i915_first_error_state(struct drm_i915_private *i915) 357i915_first_error_state(struct drm_i915_private *i915)
357{ 358{
358 return NULL; 359 return ERR_PTR(-ENODEV);
359} 360}
360 361
361static inline void i915_reset_error_state(struct drm_i915_private *i915) 362static inline void i915_reset_error_state(struct drm_i915_private *i915)
362{ 363{
363} 364}
364 365
366static inline void i915_disable_error_state(struct drm_i915_private *i915,
367 int err)
368{
369}
370
365#endif /* IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) */ 371#endif /* IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) */
366 372
367#endif /* _I915_GPU_ERROR_H_ */ 373#endif /* _I915_GPU_ERROR_H_ */
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 2e242270e270..d447d7d508f4 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -2887,21 +2887,39 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
2887 return ret; 2887 return ret;
2888} 2888}
2889 2889
2890static inline u32 gen8_master_intr_disable(void __iomem * const regs)
2891{
2892 raw_reg_write(regs, GEN8_MASTER_IRQ, 0);
2893
2894 /*
2895 * Now with master disabled, get a sample of level indications
2896 * for this interrupt. Indications will be cleared on related acks.
2897 * New indications can and will light up during processing,
2898 * and will generate new interrupt after enabling master.
2899 */
2900 return raw_reg_read(regs, GEN8_MASTER_IRQ);
2901}
2902
2903static inline void gen8_master_intr_enable(void __iomem * const regs)
2904{
2905 raw_reg_write(regs, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
2906}
2907
2890static irqreturn_t gen8_irq_handler(int irq, void *arg) 2908static irqreturn_t gen8_irq_handler(int irq, void *arg)
2891{ 2909{
2892 struct drm_i915_private *dev_priv = to_i915(arg); 2910 struct drm_i915_private *dev_priv = to_i915(arg);
2911 void __iomem * const regs = dev_priv->regs;
2893 u32 master_ctl; 2912 u32 master_ctl;
2894 u32 gt_iir[4]; 2913 u32 gt_iir[4];
2895 2914
2896 if (!intel_irqs_enabled(dev_priv)) 2915 if (!intel_irqs_enabled(dev_priv))
2897 return IRQ_NONE; 2916 return IRQ_NONE;
2898 2917
2899 master_ctl = I915_READ_FW(GEN8_MASTER_IRQ); 2918 master_ctl = gen8_master_intr_disable(regs);
2900 master_ctl &= ~GEN8_MASTER_IRQ_CONTROL; 2919 if (!master_ctl) {
2901 if (!master_ctl) 2920 gen8_master_intr_enable(regs);
2902 return IRQ_NONE; 2921 return IRQ_NONE;
2903 2922 }
2904 I915_WRITE_FW(GEN8_MASTER_IRQ, 0);
2905 2923
2906 /* Find, clear, then process each source of interrupt */ 2924 /* Find, clear, then process each source of interrupt */
2907 gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir); 2925 gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir);
@@ -2913,7 +2931,7 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
2913 enable_rpm_wakeref_asserts(dev_priv); 2931 enable_rpm_wakeref_asserts(dev_priv);
2914 } 2932 }
2915 2933
2916 I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 2934 gen8_master_intr_enable(regs);
2917 2935
2918 gen8_gt_irq_handler(dev_priv, master_ctl, gt_iir); 2936 gen8_gt_irq_handler(dev_priv, master_ctl, gt_iir);
2919 2937
@@ -3111,6 +3129,24 @@ gen11_gu_misc_irq_handler(struct drm_i915_private *dev_priv, const u32 iir)
3111 intel_opregion_asle_intr(dev_priv); 3129 intel_opregion_asle_intr(dev_priv);
3112} 3130}
3113 3131
3132static inline u32 gen11_master_intr_disable(void __iomem * const regs)
3133{
3134 raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, 0);
3135
3136 /*
3137 * Now with master disabled, get a sample of level indications
3138 * for this interrupt. Indications will be cleared on related acks.
3139 * New indications can and will light up during processing,
3140 * and will generate new interrupt after enabling master.
3141 */
3142 return raw_reg_read(regs, GEN11_GFX_MSTR_IRQ);
3143}
3144
3145static inline void gen11_master_intr_enable(void __iomem * const regs)
3146{
3147 raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ);
3148}
3149
3114static irqreturn_t gen11_irq_handler(int irq, void *arg) 3150static irqreturn_t gen11_irq_handler(int irq, void *arg)
3115{ 3151{
3116 struct drm_i915_private * const i915 = to_i915(arg); 3152 struct drm_i915_private * const i915 = to_i915(arg);
@@ -3121,13 +3157,11 @@ static irqreturn_t gen11_irq_handler(int irq, void *arg)
3121 if (!intel_irqs_enabled(i915)) 3157 if (!intel_irqs_enabled(i915))
3122 return IRQ_NONE; 3158 return IRQ_NONE;
3123 3159
3124 master_ctl = raw_reg_read(regs, GEN11_GFX_MSTR_IRQ); 3160 master_ctl = gen11_master_intr_disable(regs);
3125 master_ctl &= ~GEN11_MASTER_IRQ; 3161 if (!master_ctl) {
3126 if (!master_ctl) 3162 gen11_master_intr_enable(regs);
3127 return IRQ_NONE; 3163 return IRQ_NONE;
3128 3164 }
3129 /* Disable interrupts. */
3130 raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, 0);
3131 3165
3132 /* Find, clear, then process each source of interrupt. */ 3166 /* Find, clear, then process each source of interrupt. */
3133 gen11_gt_irq_handler(i915, master_ctl); 3167 gen11_gt_irq_handler(i915, master_ctl);
@@ -3147,8 +3181,7 @@ static irqreturn_t gen11_irq_handler(int irq, void *arg)
3147 3181
3148 gu_misc_iir = gen11_gu_misc_irq_ack(i915, master_ctl); 3182 gu_misc_iir = gen11_gu_misc_irq_ack(i915, master_ctl);
3149 3183
3150 /* Acknowledge and enable interrupts. */ 3184 gen11_master_intr_enable(regs);
3151 raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ | master_ctl);
3152 3185
3153 gen11_gu_misc_irq_handler(i915, gu_misc_iir); 3186 gen11_gu_misc_irq_handler(i915, gu_misc_iir);
3154 3187
@@ -3598,8 +3631,7 @@ static void gen8_irq_reset(struct drm_device *dev)
3598 struct drm_i915_private *dev_priv = to_i915(dev); 3631 struct drm_i915_private *dev_priv = to_i915(dev);
3599 int pipe; 3632 int pipe;
3600 3633
3601 I915_WRITE(GEN8_MASTER_IRQ, 0); 3634 gen8_master_intr_disable(dev_priv->regs);
3602 POSTING_READ(GEN8_MASTER_IRQ);
3603 3635
3604 gen8_gt_irq_reset(dev_priv); 3636 gen8_gt_irq_reset(dev_priv);
3605 3637
@@ -3641,13 +3673,15 @@ static void gen11_irq_reset(struct drm_device *dev)
3641 struct drm_i915_private *dev_priv = dev->dev_private; 3673 struct drm_i915_private *dev_priv = dev->dev_private;
3642 int pipe; 3674 int pipe;
3643 3675
3644 I915_WRITE(GEN11_GFX_MSTR_IRQ, 0); 3676 gen11_master_intr_disable(dev_priv->regs);
3645 POSTING_READ(GEN11_GFX_MSTR_IRQ);
3646 3677
3647 gen11_gt_irq_reset(dev_priv); 3678 gen11_gt_irq_reset(dev_priv);
3648 3679
3649 I915_WRITE(GEN11_DISPLAY_INT_CTL, 0); 3680 I915_WRITE(GEN11_DISPLAY_INT_CTL, 0);
3650 3681
3682 I915_WRITE(EDP_PSR_IMR, 0xffffffff);
3683 I915_WRITE(EDP_PSR_IIR, 0xffffffff);
3684
3651 for_each_pipe(dev_priv, pipe) 3685 for_each_pipe(dev_priv, pipe)
3652 if (intel_display_power_is_enabled(dev_priv, 3686 if (intel_display_power_is_enabled(dev_priv,
3653 POWER_DOMAIN_PIPE(pipe))) 3687 POWER_DOMAIN_PIPE(pipe)))
@@ -4244,8 +4278,7 @@ static int gen8_irq_postinstall(struct drm_device *dev)
4244 if (HAS_PCH_SPLIT(dev_priv)) 4278 if (HAS_PCH_SPLIT(dev_priv))
4245 ibx_irq_postinstall(dev); 4279 ibx_irq_postinstall(dev);
4246 4280
4247 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 4281 gen8_master_intr_enable(dev_priv->regs);
4248 POSTING_READ(GEN8_MASTER_IRQ);
4249 4282
4250 return 0; 4283 return 0;
4251} 4284}
@@ -4307,8 +4340,7 @@ static int gen11_irq_postinstall(struct drm_device *dev)
4307 4340
4308 I915_WRITE(GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE); 4341 I915_WRITE(GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE);
4309 4342
4310 I915_WRITE(GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ); 4343 gen11_master_intr_enable(dev_priv->regs);
4311 POSTING_READ(GEN11_GFX_MSTR_IRQ);
4312 4344
4313 return 0; 4345 return 0;
4314} 4346}
@@ -4834,6 +4866,13 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
4834 dev_priv->display_irqs_enabled = false; 4866 dev_priv->display_irqs_enabled = false;
4835 4867
4836 dev_priv->hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD; 4868 dev_priv->hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD;
4869 /* If we have MST support, we want to avoid doing short HPD IRQ storm
4870 * detection, as short HPD storms will occur as a natural part of
4871 * sideband messaging with MST.
4872 * On older platforms however, IRQ storms can occur with both long and
4873 * short pulses, as seen on some G4x systems.
4874 */
4875 dev_priv->hotplug.hpd_short_storm_enabled = !HAS_DP_MST(dev_priv);
4837 4876
4838 dev->driver->get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos; 4877 dev->driver->get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos;
4839 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; 4878 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
diff --git a/drivers/gpu/drm/i915/i915_oa_bdw.c b/drivers/gpu/drm/i915/i915_oa_bdw.c
index 4abd2e8b5083..4acdb94555b7 100644
--- a/drivers/gpu/drm/i915/i915_oa_bdw.c
+++ b/drivers/gpu/drm/i915/i915_oa_bdw.c
@@ -1,29 +1,10 @@
1/* 1/*
2 * Autogenerated file by GPU Top : https://github.com/rib/gputop 2 * SPDX-License-Identifier: MIT
3 * DO NOT EDIT manually!
4 *
5 *
6 * Copyright (c) 2015 Intel Corporation
7 * 3 *
8 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Copyright © 2018 Intel Corporation
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 * 5 *
6 * Autogenerated file by GPU Top : https://github.com/rib/gputop
7 * DO NOT EDIT manually!
27 */ 8 */
28 9
29#include <linux/sysfs.h> 10#include <linux/sysfs.h>
diff --git a/drivers/gpu/drm/i915/i915_oa_bdw.h b/drivers/gpu/drm/i915/i915_oa_bdw.h
index b812d16162ac..0e667f1a8aa1 100644
--- a/drivers/gpu/drm/i915/i915_oa_bdw.h
+++ b/drivers/gpu/drm/i915/i915_oa_bdw.h
@@ -1,29 +1,10 @@
1/* 1/*
2 * Autogenerated file by GPU Top : https://github.com/rib/gputop 2 * SPDX-License-Identifier: MIT
3 * DO NOT EDIT manually!
4 *
5 *
6 * Copyright (c) 2015 Intel Corporation
7 * 3 *
8 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Copyright © 2018 Intel Corporation
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 * 5 *
6 * Autogenerated file by GPU Top : https://github.com/rib/gputop
7 * DO NOT EDIT manually!
27 */ 8 */
28 9
29#ifndef __I915_OA_BDW_H__ 10#ifndef __I915_OA_BDW_H__
diff --git a/drivers/gpu/drm/i915/i915_oa_bxt.c b/drivers/gpu/drm/i915/i915_oa_bxt.c
index cb6f304ec16a..a44195c39923 100644
--- a/drivers/gpu/drm/i915/i915_oa_bxt.c
+++ b/drivers/gpu/drm/i915/i915_oa_bxt.c
@@ -1,29 +1,10 @@
1/* 1/*
2 * Autogenerated file by GPU Top : https://github.com/rib/gputop 2 * SPDX-License-Identifier: MIT
3 * DO NOT EDIT manually!
4 *
5 *
6 * Copyright (c) 2015 Intel Corporation
7 * 3 *
8 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Copyright © 2018 Intel Corporation
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 * 5 *
6 * Autogenerated file by GPU Top : https://github.com/rib/gputop
7 * DO NOT EDIT manually!
27 */ 8 */
28 9
29#include <linux/sysfs.h> 10#include <linux/sysfs.h>
diff --git a/drivers/gpu/drm/i915/i915_oa_bxt.h b/drivers/gpu/drm/i915/i915_oa_bxt.h
index 690b963a2383..679e92cf4f1d 100644
--- a/drivers/gpu/drm/i915/i915_oa_bxt.h
+++ b/drivers/gpu/drm/i915/i915_oa_bxt.h
@@ -1,29 +1,10 @@
1/* 1/*
2 * Autogenerated file by GPU Top : https://github.com/rib/gputop 2 * SPDX-License-Identifier: MIT
3 * DO NOT EDIT manually!
4 *
5 *
6 * Copyright (c) 2015 Intel Corporation
7 * 3 *
8 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Copyright © 2018 Intel Corporation
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 * 5 *
6 * Autogenerated file by GPU Top : https://github.com/rib/gputop
7 * DO NOT EDIT manually!
27 */ 8 */
28 9
29#ifndef __I915_OA_BXT_H__ 10#ifndef __I915_OA_BXT_H__
diff --git a/drivers/gpu/drm/i915/i915_oa_cflgt2.c b/drivers/gpu/drm/i915/i915_oa_cflgt2.c
index 8641ae30e343..7f60d51b8761 100644
--- a/drivers/gpu/drm/i915/i915_oa_cflgt2.c
+++ b/drivers/gpu/drm/i915/i915_oa_cflgt2.c
@@ -1,29 +1,10 @@
1/* 1/*
2 * Autogenerated file by GPU Top : https://github.com/rib/gputop 2 * SPDX-License-Identifier: MIT
3 * DO NOT EDIT manually!
4 *
5 *
6 * Copyright (c) 2015 Intel Corporation
7 * 3 *
8 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Copyright © 2018 Intel Corporation
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 * 5 *
6 * Autogenerated file by GPU Top : https://github.com/rib/gputop
7 * DO NOT EDIT manually!
27 */ 8 */
28 9
29#include <linux/sysfs.h> 10#include <linux/sysfs.h>
diff --git a/drivers/gpu/drm/i915/i915_oa_cflgt2.h b/drivers/gpu/drm/i915/i915_oa_cflgt2.h
index 1f3268ef2ea2..4d6025559bbe 100644
--- a/drivers/gpu/drm/i915/i915_oa_cflgt2.h
+++ b/drivers/gpu/drm/i915/i915_oa_cflgt2.h
@@ -1,29 +1,10 @@
1/* 1/*
2 * Autogenerated file by GPU Top : https://github.com/rib/gputop 2 * SPDX-License-Identifier: MIT
3 * DO NOT EDIT manually!
4 *
5 *
6 * Copyright (c) 2015 Intel Corporation
7 * 3 *
8 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Copyright © 2018 Intel Corporation
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 * 5 *
6 * Autogenerated file by GPU Top : https://github.com/rib/gputop
7 * DO NOT EDIT manually!
27 */ 8 */
28 9
29#ifndef __I915_OA_CFLGT2_H__ 10#ifndef __I915_OA_CFLGT2_H__
diff --git a/drivers/gpu/drm/i915/i915_oa_cflgt3.c b/drivers/gpu/drm/i915/i915_oa_cflgt3.c
index 792facdb6702..a92c38e3a0ce 100644
--- a/drivers/gpu/drm/i915/i915_oa_cflgt3.c
+++ b/drivers/gpu/drm/i915/i915_oa_cflgt3.c
@@ -1,29 +1,10 @@
1/* 1/*
2 * Autogenerated file by GPU Top : https://github.com/rib/gputop 2 * SPDX-License-Identifier: MIT
3 * DO NOT EDIT manually!
4 *
5 *
6 * Copyright (c) 2015 Intel Corporation
7 * 3 *
8 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Copyright © 2018 Intel Corporation
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 * 5 *
6 * Autogenerated file by GPU Top : https://github.com/rib/gputop
7 * DO NOT EDIT manually!
27 */ 8 */
28 9
29#include <linux/sysfs.h> 10#include <linux/sysfs.h>
diff --git a/drivers/gpu/drm/i915/i915_oa_cflgt3.h b/drivers/gpu/drm/i915/i915_oa_cflgt3.h
index c13b5aac01b9..0697f4077402 100644
--- a/drivers/gpu/drm/i915/i915_oa_cflgt3.h
+++ b/drivers/gpu/drm/i915/i915_oa_cflgt3.h
@@ -1,29 +1,10 @@
1/* 1/*
2 * Autogenerated file by GPU Top : https://github.com/rib/gputop 2 * SPDX-License-Identifier: MIT
3 * DO NOT EDIT manually!
4 *
5 *
6 * Copyright (c) 2015 Intel Corporation
7 * 3 *
8 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Copyright © 2018 Intel Corporation
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 * 5 *
6 * Autogenerated file by GPU Top : https://github.com/rib/gputop
7 * DO NOT EDIT manually!
27 */ 8 */
28 9
29#ifndef __I915_OA_CFLGT3_H__ 10#ifndef __I915_OA_CFLGT3_H__
diff --git a/drivers/gpu/drm/i915/i915_oa_chv.c b/drivers/gpu/drm/i915/i915_oa_chv.c
index 556febb2c3c8..71ec889a0114 100644
--- a/drivers/gpu/drm/i915/i915_oa_chv.c
+++ b/drivers/gpu/drm/i915/i915_oa_chv.c
@@ -1,29 +1,10 @@
1/* 1/*
2 * Autogenerated file by GPU Top : https://github.com/rib/gputop 2 * SPDX-License-Identifier: MIT
3 * DO NOT EDIT manually!
4 *
5 *
6 * Copyright (c) 2015 Intel Corporation
7 * 3 *
8 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Copyright © 2018 Intel Corporation
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 * 5 *
6 * Autogenerated file by GPU Top : https://github.com/rib/gputop
7 * DO NOT EDIT manually!
27 */ 8 */
28 9
29#include <linux/sysfs.h> 10#include <linux/sysfs.h>
diff --git a/drivers/gpu/drm/i915/i915_oa_chv.h b/drivers/gpu/drm/i915/i915_oa_chv.h
index b9622496979e..0986eae3135f 100644
--- a/drivers/gpu/drm/i915/i915_oa_chv.h
+++ b/drivers/gpu/drm/i915/i915_oa_chv.h
@@ -1,29 +1,10 @@
1/* 1/*
2 * Autogenerated file by GPU Top : https://github.com/rib/gputop 2 * SPDX-License-Identifier: MIT
3 * DO NOT EDIT manually!
4 *
5 *
6 * Copyright (c) 2015 Intel Corporation
7 * 3 *
8 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Copyright © 2018 Intel Corporation
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 * 5 *
6 * Autogenerated file by GPU Top : https://github.com/rib/gputop
7 * DO NOT EDIT manually!
27 */ 8 */
28 9
29#ifndef __I915_OA_CHV_H__ 10#ifndef __I915_OA_CHV_H__
diff --git a/drivers/gpu/drm/i915/i915_oa_cnl.c b/drivers/gpu/drm/i915/i915_oa_cnl.c
index ba9140c87cc0..5c23d883d6c9 100644
--- a/drivers/gpu/drm/i915/i915_oa_cnl.c
+++ b/drivers/gpu/drm/i915/i915_oa_cnl.c
@@ -1,29 +1,10 @@
1/* 1/*
2 * Autogenerated file by GPU Top : https://github.com/rib/gputop 2 * SPDX-License-Identifier: MIT
3 * DO NOT EDIT manually!
4 *
5 *
6 * Copyright (c) 2015 Intel Corporation
7 * 3 *
8 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Copyright © 2018 Intel Corporation
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 * 5 *
6 * Autogenerated file by GPU Top : https://github.com/rib/gputop
7 * DO NOT EDIT manually!
27 */ 8 */
28 9
29#include <linux/sysfs.h> 10#include <linux/sysfs.h>
diff --git a/drivers/gpu/drm/i915/i915_oa_cnl.h b/drivers/gpu/drm/i915/i915_oa_cnl.h
index fb918b131105..e830a406aff2 100644
--- a/drivers/gpu/drm/i915/i915_oa_cnl.h
+++ b/drivers/gpu/drm/i915/i915_oa_cnl.h
@@ -1,29 +1,10 @@
1/* 1/*
2 * Autogenerated file by GPU Top : https://github.com/rib/gputop 2 * SPDX-License-Identifier: MIT
3 * DO NOT EDIT manually!
4 *
5 *
6 * Copyright (c) 2015 Intel Corporation
7 * 3 *
8 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Copyright © 2018 Intel Corporation
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 * 5 *
6 * Autogenerated file by GPU Top : https://github.com/rib/gputop
7 * DO NOT EDIT manually!
27 */ 8 */
28 9
29#ifndef __I915_OA_CNL_H__ 10#ifndef __I915_OA_CNL_H__
diff --git a/drivers/gpu/drm/i915/i915_oa_glk.c b/drivers/gpu/drm/i915/i915_oa_glk.c
index 971db587957c..4bdda66df7d2 100644
--- a/drivers/gpu/drm/i915/i915_oa_glk.c
+++ b/drivers/gpu/drm/i915/i915_oa_glk.c
@@ -1,29 +1,10 @@
1/* 1/*
2 * Autogenerated file by GPU Top : https://github.com/rib/gputop 2 * SPDX-License-Identifier: MIT
3 * DO NOT EDIT manually!
4 *
5 *
6 * Copyright (c) 2015 Intel Corporation
7 * 3 *
8 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Copyright © 2018 Intel Corporation
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 * 5 *
6 * Autogenerated file by GPU Top : https://github.com/rib/gputop
7 * DO NOT EDIT manually!
27 */ 8 */
28 9
29#include <linux/sysfs.h> 10#include <linux/sysfs.h>
diff --git a/drivers/gpu/drm/i915/i915_oa_glk.h b/drivers/gpu/drm/i915/i915_oa_glk.h
index 63bd113f4bc9..06dedf991edb 100644
--- a/drivers/gpu/drm/i915/i915_oa_glk.h
+++ b/drivers/gpu/drm/i915/i915_oa_glk.h
@@ -1,29 +1,10 @@
1/* 1/*
2 * Autogenerated file by GPU Top : https://github.com/rib/gputop 2 * SPDX-License-Identifier: MIT
3 * DO NOT EDIT manually!
4 *
5 *
6 * Copyright (c) 2015 Intel Corporation
7 * 3 *
8 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Copyright © 2018 Intel Corporation
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 * 5 *
6 * Autogenerated file by GPU Top : https://github.com/rib/gputop
7 * DO NOT EDIT manually!
27 */ 8 */
28 9
29#ifndef __I915_OA_GLK_H__ 10#ifndef __I915_OA_GLK_H__
diff --git a/drivers/gpu/drm/i915/i915_oa_hsw.c b/drivers/gpu/drm/i915/i915_oa_hsw.c
index 434a9b96d7ab..cc6526fdd2bd 100644
--- a/drivers/gpu/drm/i915/i915_oa_hsw.c
+++ b/drivers/gpu/drm/i915/i915_oa_hsw.c
@@ -1,29 +1,10 @@
1/* 1/*
2 * Autogenerated file by GPU Top : https://github.com/rib/gputop 2 * SPDX-License-Identifier: MIT
3 * DO NOT EDIT manually!
4 *
5 *
6 * Copyright (c) 2015 Intel Corporation
7 * 3 *
8 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Copyright © 2018 Intel Corporation
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 * 5 *
6 * Autogenerated file by GPU Top : https://github.com/rib/gputop
7 * DO NOT EDIT manually!
27 */ 8 */
28 9
29#include <linux/sysfs.h> 10#include <linux/sysfs.h>
diff --git a/drivers/gpu/drm/i915/i915_oa_hsw.h b/drivers/gpu/drm/i915/i915_oa_hsw.h
index 74d03439c157..3d0c870cd0bd 100644
--- a/drivers/gpu/drm/i915/i915_oa_hsw.h
+++ b/drivers/gpu/drm/i915/i915_oa_hsw.h
@@ -1,29 +1,10 @@
1/* 1/*
2 * Autogenerated file by GPU Top : https://github.com/rib/gputop 2 * SPDX-License-Identifier: MIT
3 * DO NOT EDIT manually!
4 *
5 *
6 * Copyright (c) 2015 Intel Corporation
7 * 3 *
8 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Copyright © 2018 Intel Corporation
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 * 5 *
6 * Autogenerated file by GPU Top : https://github.com/rib/gputop
7 * DO NOT EDIT manually!
27 */ 8 */
28 9
29#ifndef __I915_OA_HSW_H__ 10#ifndef __I915_OA_HSW_H__
diff --git a/drivers/gpu/drm/i915/i915_oa_icl.c b/drivers/gpu/drm/i915/i915_oa_icl.c
index a5667926e3de..baa51427a543 100644
--- a/drivers/gpu/drm/i915/i915_oa_icl.c
+++ b/drivers/gpu/drm/i915/i915_oa_icl.c
@@ -1,29 +1,10 @@
1/* 1/*
2 * Autogenerated file by GPU Top : https://github.com/rib/gputop 2 * SPDX-License-Identifier: MIT
3 * DO NOT EDIT manually!
4 *
5 *
6 * Copyright (c) 2015 Intel Corporation
7 * 3 *
8 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Copyright © 2018 Intel Corporation
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 * 5 *
6 * Autogenerated file by GPU Top : https://github.com/rib/gputop
7 * DO NOT EDIT manually!
27 */ 8 */
28 9
29#include <linux/sysfs.h> 10#include <linux/sysfs.h>
diff --git a/drivers/gpu/drm/i915/i915_oa_icl.h b/drivers/gpu/drm/i915/i915_oa_icl.h
index ae1c24aafe4f..24eaa97d61ba 100644
--- a/drivers/gpu/drm/i915/i915_oa_icl.h
+++ b/drivers/gpu/drm/i915/i915_oa_icl.h
@@ -1,29 +1,10 @@
1/* 1/*
2 * Autogenerated file by GPU Top : https://github.com/rib/gputop 2 * SPDX-License-Identifier: MIT
3 * DO NOT EDIT manually!
4 *
5 *
6 * Copyright (c) 2015 Intel Corporation
7 * 3 *
8 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Copyright © 2018 Intel Corporation
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 * 5 *
6 * Autogenerated file by GPU Top : https://github.com/rib/gputop
7 * DO NOT EDIT manually!
27 */ 8 */
28 9
29#ifndef __I915_OA_ICL_H__ 10#ifndef __I915_OA_ICL_H__
diff --git a/drivers/gpu/drm/i915/i915_oa_kblgt2.c b/drivers/gpu/drm/i915/i915_oa_kblgt2.c
index 2fa98a40bbc8..168e49ab0d4d 100644
--- a/drivers/gpu/drm/i915/i915_oa_kblgt2.c
+++ b/drivers/gpu/drm/i915/i915_oa_kblgt2.c
@@ -1,29 +1,10 @@
1/* 1/*
2 * Autogenerated file by GPU Top : https://github.com/rib/gputop 2 * SPDX-License-Identifier: MIT
3 * DO NOT EDIT manually!
4 *
5 *
6 * Copyright (c) 2015 Intel Corporation
7 * 3 *
8 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Copyright © 2018 Intel Corporation
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 * 5 *
6 * Autogenerated file by GPU Top : https://github.com/rib/gputop
7 * DO NOT EDIT manually!
27 */ 8 */
28 9
29#include <linux/sysfs.h> 10#include <linux/sysfs.h>
diff --git a/drivers/gpu/drm/i915/i915_oa_kblgt2.h b/drivers/gpu/drm/i915/i915_oa_kblgt2.h
index 25b803546dc1..a55398a904de 100644
--- a/drivers/gpu/drm/i915/i915_oa_kblgt2.h
+++ b/drivers/gpu/drm/i915/i915_oa_kblgt2.h
@@ -1,29 +1,10 @@
1/* 1/*
2 * Autogenerated file by GPU Top : https://github.com/rib/gputop 2 * SPDX-License-Identifier: MIT
3 * DO NOT EDIT manually!
4 *
5 *
6 * Copyright (c) 2015 Intel Corporation
7 * 3 *
8 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Copyright © 2018 Intel Corporation
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 * 5 *
6 * Autogenerated file by GPU Top : https://github.com/rib/gputop
7 * DO NOT EDIT manually!
27 */ 8 */
28 9
29#ifndef __I915_OA_KBLGT2_H__ 10#ifndef __I915_OA_KBLGT2_H__
diff --git a/drivers/gpu/drm/i915/i915_oa_kblgt3.c b/drivers/gpu/drm/i915/i915_oa_kblgt3.c
index f3cb6679a1bc..6ffa553c388e 100644
--- a/drivers/gpu/drm/i915/i915_oa_kblgt3.c
+++ b/drivers/gpu/drm/i915/i915_oa_kblgt3.c
@@ -1,29 +1,10 @@
1/* 1/*
2 * Autogenerated file by GPU Top : https://github.com/rib/gputop 2 * SPDX-License-Identifier: MIT
3 * DO NOT EDIT manually!
4 *
5 *
6 * Copyright (c) 2015 Intel Corporation
7 * 3 *
8 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Copyright © 2018 Intel Corporation
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 * 5 *
6 * Autogenerated file by GPU Top : https://github.com/rib/gputop
7 * DO NOT EDIT manually!
27 */ 8 */
28 9
29#include <linux/sysfs.h> 10#include <linux/sysfs.h>
diff --git a/drivers/gpu/drm/i915/i915_oa_kblgt3.h b/drivers/gpu/drm/i915/i915_oa_kblgt3.h
index d5b5b5c1923e..3ddd3483b7cc 100644
--- a/drivers/gpu/drm/i915/i915_oa_kblgt3.h
+++ b/drivers/gpu/drm/i915/i915_oa_kblgt3.h
@@ -1,29 +1,10 @@
1/* 1/*
2 * Autogenerated file by GPU Top : https://github.com/rib/gputop 2 * SPDX-License-Identifier: MIT
3 * DO NOT EDIT manually!
4 *
5 *
6 * Copyright (c) 2015 Intel Corporation
7 * 3 *
8 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Copyright © 2018 Intel Corporation
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 * 5 *
6 * Autogenerated file by GPU Top : https://github.com/rib/gputop
7 * DO NOT EDIT manually!
27 */ 8 */
28 9
29#ifndef __I915_OA_KBLGT3_H__ 10#ifndef __I915_OA_KBLGT3_H__
diff --git a/drivers/gpu/drm/i915/i915_oa_sklgt2.c b/drivers/gpu/drm/i915/i915_oa_sklgt2.c
index bf8b8cd8a50d..7ce6ee851d43 100644
--- a/drivers/gpu/drm/i915/i915_oa_sklgt2.c
+++ b/drivers/gpu/drm/i915/i915_oa_sklgt2.c
@@ -1,29 +1,10 @@
1/* 1/*
2 * Autogenerated file by GPU Top : https://github.com/rib/gputop 2 * SPDX-License-Identifier: MIT
3 * DO NOT EDIT manually!
4 *
5 *
6 * Copyright (c) 2015 Intel Corporation
7 * 3 *
8 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Copyright © 2018 Intel Corporation
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 * 5 *
6 * Autogenerated file by GPU Top : https://github.com/rib/gputop
7 * DO NOT EDIT manually!
27 */ 8 */
28 9
29#include <linux/sysfs.h> 10#include <linux/sysfs.h>
diff --git a/drivers/gpu/drm/i915/i915_oa_sklgt2.h b/drivers/gpu/drm/i915/i915_oa_sklgt2.h
index fe1aa2c03958..be6256037239 100644
--- a/drivers/gpu/drm/i915/i915_oa_sklgt2.h
+++ b/drivers/gpu/drm/i915/i915_oa_sklgt2.h
@@ -1,29 +1,10 @@
1/* 1/*
2 * Autogenerated file by GPU Top : https://github.com/rib/gputop 2 * SPDX-License-Identifier: MIT
3 * DO NOT EDIT manually!
4 *
5 *
6 * Copyright (c) 2015 Intel Corporation
7 * 3 *
8 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Copyright © 2018 Intel Corporation
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 * 5 *
6 * Autogenerated file by GPU Top : https://github.com/rib/gputop
7 * DO NOT EDIT manually!
27 */ 8 */
28 9
29#ifndef __I915_OA_SKLGT2_H__ 10#ifndef __I915_OA_SKLGT2_H__
diff --git a/drivers/gpu/drm/i915/i915_oa_sklgt3.c b/drivers/gpu/drm/i915/i915_oa_sklgt3.c
index ae534c7c8135..086ca2631e1c 100644
--- a/drivers/gpu/drm/i915/i915_oa_sklgt3.c
+++ b/drivers/gpu/drm/i915/i915_oa_sklgt3.c
@@ -1,29 +1,10 @@
1/* 1/*
2 * Autogenerated file by GPU Top : https://github.com/rib/gputop 2 * SPDX-License-Identifier: MIT
3 * DO NOT EDIT manually!
4 *
5 *
6 * Copyright (c) 2015 Intel Corporation
7 * 3 *
8 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Copyright © 2018 Intel Corporation
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 * 5 *
6 * Autogenerated file by GPU Top : https://github.com/rib/gputop
7 * DO NOT EDIT manually!
27 */ 8 */
28 9
29#include <linux/sysfs.h> 10#include <linux/sysfs.h>
diff --git a/drivers/gpu/drm/i915/i915_oa_sklgt3.h b/drivers/gpu/drm/i915/i915_oa_sklgt3.h
index 06746b2616c8..650beb068e56 100644
--- a/drivers/gpu/drm/i915/i915_oa_sklgt3.h
+++ b/drivers/gpu/drm/i915/i915_oa_sklgt3.h
@@ -1,29 +1,10 @@
1/* 1/*
2 * Autogenerated file by GPU Top : https://github.com/rib/gputop 2 * SPDX-License-Identifier: MIT
3 * DO NOT EDIT manually!
4 *
5 *
6 * Copyright (c) 2015 Intel Corporation
7 * 3 *
8 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Copyright © 2018 Intel Corporation
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 * 5 *
6 * Autogenerated file by GPU Top : https://github.com/rib/gputop
7 * DO NOT EDIT manually!
27 */ 8 */
28 9
29#ifndef __I915_OA_SKLGT3_H__ 10#ifndef __I915_OA_SKLGT3_H__
diff --git a/drivers/gpu/drm/i915/i915_oa_sklgt4.c b/drivers/gpu/drm/i915/i915_oa_sklgt4.c
index 817fba2d82df..b291a6eb8a87 100644
--- a/drivers/gpu/drm/i915/i915_oa_sklgt4.c
+++ b/drivers/gpu/drm/i915/i915_oa_sklgt4.c
@@ -1,29 +1,10 @@
1/* 1/*
2 * Autogenerated file by GPU Top : https://github.com/rib/gputop 2 * SPDX-License-Identifier: MIT
3 * DO NOT EDIT manually!
4 *
5 *
6 * Copyright (c) 2015 Intel Corporation
7 * 3 *
8 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Copyright © 2018 Intel Corporation
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 * 5 *
6 * Autogenerated file by GPU Top : https://github.com/rib/gputop
7 * DO NOT EDIT manually!
27 */ 8 */
28 9
29#include <linux/sysfs.h> 10#include <linux/sysfs.h>
diff --git a/drivers/gpu/drm/i915/i915_oa_sklgt4.h b/drivers/gpu/drm/i915/i915_oa_sklgt4.h
index 944fd525c8b1..8dcf849d131e 100644
--- a/drivers/gpu/drm/i915/i915_oa_sklgt4.h
+++ b/drivers/gpu/drm/i915/i915_oa_sklgt4.h
@@ -1,29 +1,10 @@
1/* 1/*
2 * Autogenerated file by GPU Top : https://github.com/rib/gputop 2 * SPDX-License-Identifier: MIT
3 * DO NOT EDIT manually!
4 *
5 *
6 * Copyright (c) 2015 Intel Corporation
7 * 3 *
8 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Copyright © 2018 Intel Corporation
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 * 5 *
6 * Autogenerated file by GPU Top : https://github.com/rib/gputop
7 * DO NOT EDIT manually!
27 */ 8 */
28 9
29#ifndef __I915_OA_SKLGT4_H__ 10#ifndef __I915_OA_SKLGT4_H__
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index 295e981e4a39..2e0356561839 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -82,10 +82,6 @@ i915_param_named_unsafe(enable_hangcheck, bool, 0644,
82 "WARNING: Disabling this can cause system wide hangs. " 82 "WARNING: Disabling this can cause system wide hangs. "
83 "(default: true)"); 83 "(default: true)");
84 84
85i915_param_named_unsafe(enable_ppgtt, int, 0400,
86 "Override PPGTT usage. "
87 "(-1=auto [default], 0=disabled, 1=aliasing, 2=full, 3=full with extended address space)");
88
89i915_param_named_unsafe(enable_psr, int, 0600, 85i915_param_named_unsafe(enable_psr, int, 0600,
90 "Enable PSR " 86 "Enable PSR "
91 "(0=disabled, 1=enabled) " 87 "(0=disabled, 1=enabled) "
@@ -171,8 +167,10 @@ i915_param_named_unsafe(inject_load_failure, uint, 0400,
171i915_param_named(enable_dpcd_backlight, bool, 0600, 167i915_param_named(enable_dpcd_backlight, bool, 0600,
172 "Enable support for DPCD backlight control (default:false)"); 168 "Enable support for DPCD backlight control (default:false)");
173 169
170#if IS_ENABLED(CONFIG_DRM_I915_GVT)
174i915_param_named(enable_gvt, bool, 0400, 171i915_param_named(enable_gvt, bool, 0400,
175 "Enable support for Intel GVT-g graphics virtualization host support(default:false)"); 172 "Enable support for Intel GVT-g graphics virtualization host support(default:false)");
173#endif
176 174
177static __always_inline void _print_param(struct drm_printer *p, 175static __always_inline void _print_param(struct drm_printer *p,
178 const char *name, 176 const char *name,
@@ -188,7 +186,8 @@ static __always_inline void _print_param(struct drm_printer *p,
188 else if (!__builtin_strcmp(type, "char *")) 186 else if (!__builtin_strcmp(type, "char *"))
189 drm_printf(p, "i915.%s=%s\n", name, *(const char **)x); 187 drm_printf(p, "i915.%s=%s\n", name, *(const char **)x);
190 else 188 else
191 BUILD_BUG(); 189 WARN_ONCE(1, "no printer defined for param type %s (i915.%s)\n",
190 type, name);
192} 191}
193 192
194/** 193/**
diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h
index 6c4d4a21474b..7e56c516c815 100644
--- a/drivers/gpu/drm/i915/i915_params.h
+++ b/drivers/gpu/drm/i915/i915_params.h
@@ -41,7 +41,6 @@ struct drm_printer;
41 param(int, vbt_sdvo_panel_type, -1) \ 41 param(int, vbt_sdvo_panel_type, -1) \
42 param(int, enable_dc, -1) \ 42 param(int, enable_dc, -1) \
43 param(int, enable_fbc, -1) \ 43 param(int, enable_fbc, -1) \
44 param(int, enable_ppgtt, -1) \
45 param(int, enable_psr, -1) \ 44 param(int, enable_psr, -1) \
46 param(int, disable_power_well, -1) \ 45 param(int, disable_power_well, -1) \
47 param(int, enable_ips, 1) \ 46 param(int, enable_ips, 1) \
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index d6f7b9fe1d26..983ae7fd8217 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -36,16 +36,13 @@
36 .pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \ 36 .pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
37 PIPE_C_OFFSET, PIPE_EDP_OFFSET }, \ 37 PIPE_C_OFFSET, PIPE_EDP_OFFSET }, \
38 .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \ 38 .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
39 TRANSCODER_C_OFFSET, TRANSCODER_EDP_OFFSET }, \ 39 TRANSCODER_C_OFFSET, TRANSCODER_EDP_OFFSET }
40 .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET }
41 40
42#define GEN_CHV_PIPEOFFSETS \ 41#define GEN_CHV_PIPEOFFSETS \
43 .pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \ 42 .pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
44 CHV_PIPE_C_OFFSET }, \ 43 CHV_PIPE_C_OFFSET }, \
45 .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \ 44 .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
46 CHV_TRANSCODER_C_OFFSET, }, \ 45 CHV_TRANSCODER_C_OFFSET }
47 .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET, \
48 CHV_PALETTE_C_OFFSET }
49 46
50#define CURSOR_OFFSETS \ 47#define CURSOR_OFFSETS \
51 .cursor_offsets = { CURSOR_A_OFFSET, CURSOR_B_OFFSET, CHV_CURSOR_C_OFFSET } 48 .cursor_offsets = { CURSOR_A_OFFSET, CURSOR_B_OFFSET, CHV_CURSOR_C_OFFSET }
@@ -252,7 +249,7 @@ static const struct intel_device_info intel_ironlake_m_info = {
252 .has_llc = 1, \ 249 .has_llc = 1, \
253 .has_rc6 = 1, \ 250 .has_rc6 = 1, \
254 .has_rc6p = 1, \ 251 .has_rc6p = 1, \
255 .has_aliasing_ppgtt = 1, \ 252 .ppgtt = INTEL_PPGTT_ALIASING, \
256 GEN_DEFAULT_PIPEOFFSETS, \ 253 GEN_DEFAULT_PIPEOFFSETS, \
257 GEN_DEFAULT_PAGE_SIZES, \ 254 GEN_DEFAULT_PAGE_SIZES, \
258 CURSOR_OFFSETS 255 CURSOR_OFFSETS
@@ -297,8 +294,7 @@ static const struct intel_device_info intel_sandybridge_m_gt2_info = {
297 .has_llc = 1, \ 294 .has_llc = 1, \
298 .has_rc6 = 1, \ 295 .has_rc6 = 1, \
299 .has_rc6p = 1, \ 296 .has_rc6p = 1, \
300 .has_aliasing_ppgtt = 1, \ 297 .ppgtt = INTEL_PPGTT_FULL, \
301 .has_full_ppgtt = 1, \
302 GEN_DEFAULT_PIPEOFFSETS, \ 298 GEN_DEFAULT_PIPEOFFSETS, \
303 GEN_DEFAULT_PAGE_SIZES, \ 299 GEN_DEFAULT_PAGE_SIZES, \
304 IVB_CURSOR_OFFSETS 300 IVB_CURSOR_OFFSETS
@@ -351,8 +347,7 @@ static const struct intel_device_info intel_valleyview_info = {
351 .has_rc6 = 1, 347 .has_rc6 = 1,
352 .has_gmch_display = 1, 348 .has_gmch_display = 1,
353 .has_hotplug = 1, 349 .has_hotplug = 1,
354 .has_aliasing_ppgtt = 1, 350 .ppgtt = INTEL_PPGTT_FULL,
355 .has_full_ppgtt = 1,
356 .has_snoop = true, 351 .has_snoop = true,
357 .has_coherent_ggtt = false, 352 .has_coherent_ggtt = false,
358 .ring_mask = RENDER_RING | BSD_RING | BLT_RING, 353 .ring_mask = RENDER_RING | BSD_RING | BLT_RING,
@@ -399,7 +394,7 @@ static const struct intel_device_info intel_haswell_gt3_info = {
399 .page_sizes = I915_GTT_PAGE_SIZE_4K | \ 394 .page_sizes = I915_GTT_PAGE_SIZE_4K | \
400 I915_GTT_PAGE_SIZE_2M, \ 395 I915_GTT_PAGE_SIZE_2M, \
401 .has_logical_ring_contexts = 1, \ 396 .has_logical_ring_contexts = 1, \
402 .has_full_48bit_ppgtt = 1, \ 397 .ppgtt = INTEL_PPGTT_FULL_4LVL, \
403 .has_64bit_reloc = 1, \ 398 .has_64bit_reloc = 1, \
404 .has_reset_engine = 1 399 .has_reset_engine = 1
405 400
@@ -443,8 +438,7 @@ static const struct intel_device_info intel_cherryview_info = {
443 .has_rc6 = 1, 438 .has_rc6 = 1,
444 .has_logical_ring_contexts = 1, 439 .has_logical_ring_contexts = 1,
445 .has_gmch_display = 1, 440 .has_gmch_display = 1,
446 .has_aliasing_ppgtt = 1, 441 .ppgtt = INTEL_PPGTT_FULL,
447 .has_full_ppgtt = 1,
448 .has_reset_engine = 1, 442 .has_reset_engine = 1,
449 .has_snoop = true, 443 .has_snoop = true,
450 .has_coherent_ggtt = false, 444 .has_coherent_ggtt = false,
@@ -472,6 +466,8 @@ static const struct intel_device_info intel_cherryview_info = {
472 466
473#define SKL_PLATFORM \ 467#define SKL_PLATFORM \
474 GEN9_FEATURES, \ 468 GEN9_FEATURES, \
469 /* Display WA #0477 WaDisableIPC: skl */ \
470 .has_ipc = 0, \
475 PLATFORM(INTEL_SKYLAKE) 471 PLATFORM(INTEL_SKYLAKE)
476 472
477static const struct intel_device_info intel_skylake_gt1_info = { 473static const struct intel_device_info intel_skylake_gt1_info = {
@@ -518,9 +514,7 @@ static const struct intel_device_info intel_skylake_gt4_info = {
518 .has_logical_ring_contexts = 1, \ 514 .has_logical_ring_contexts = 1, \
519 .has_logical_ring_preemption = 1, \ 515 .has_logical_ring_preemption = 1, \
520 .has_guc = 1, \ 516 .has_guc = 1, \
521 .has_aliasing_ppgtt = 1, \ 517 .ppgtt = INTEL_PPGTT_FULL_4LVL, \
522 .has_full_ppgtt = 1, \
523 .has_full_48bit_ppgtt = 1, \
524 .has_reset_engine = 1, \ 518 .has_reset_engine = 1, \
525 .has_snoop = true, \ 519 .has_snoop = true, \
526 .has_coherent_ggtt = false, \ 520 .has_coherent_ggtt = false, \
@@ -598,6 +592,12 @@ static const struct intel_device_info intel_cannonlake_info = {
598 592
599#define GEN11_FEATURES \ 593#define GEN11_FEATURES \
600 GEN10_FEATURES, \ 594 GEN10_FEATURES, \
595 .pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
596 PIPE_C_OFFSET, PIPE_EDP_OFFSET, \
597 PIPE_DSI0_OFFSET, PIPE_DSI1_OFFSET }, \
598 .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
599 TRANSCODER_C_OFFSET, TRANSCODER_EDP_OFFSET, \
600 TRANSCODER_DSI0_OFFSET, TRANSCODER_DSI1_OFFSET}, \
601 GEN(11), \ 601 GEN(11), \
602 .ddb_size = 2048, \ 602 .ddb_size = 2048, \
603 .has_logical_ring_elsq = 1 603 .has_logical_ring_elsq = 1
@@ -663,7 +663,7 @@ static const struct pci_device_id pciidlist[] = {
663 INTEL_KBL_GT2_IDS(&intel_kabylake_gt2_info), 663 INTEL_KBL_GT2_IDS(&intel_kabylake_gt2_info),
664 INTEL_KBL_GT3_IDS(&intel_kabylake_gt3_info), 664 INTEL_KBL_GT3_IDS(&intel_kabylake_gt3_info),
665 INTEL_KBL_GT4_IDS(&intel_kabylake_gt3_info), 665 INTEL_KBL_GT4_IDS(&intel_kabylake_gt3_info),
666 INTEL_AML_GT2_IDS(&intel_kabylake_gt2_info), 666 INTEL_AML_KBL_GT2_IDS(&intel_kabylake_gt2_info),
667 INTEL_CFL_S_GT1_IDS(&intel_coffeelake_gt1_info), 667 INTEL_CFL_S_GT1_IDS(&intel_coffeelake_gt1_info),
668 INTEL_CFL_S_GT2_IDS(&intel_coffeelake_gt2_info), 668 INTEL_CFL_S_GT2_IDS(&intel_coffeelake_gt2_info),
669 INTEL_CFL_H_GT2_IDS(&intel_coffeelake_gt2_info), 669 INTEL_CFL_H_GT2_IDS(&intel_coffeelake_gt2_info),
@@ -671,6 +671,7 @@ static const struct pci_device_id pciidlist[] = {
671 INTEL_CFL_U_GT3_IDS(&intel_coffeelake_gt3_info), 671 INTEL_CFL_U_GT3_IDS(&intel_coffeelake_gt3_info),
672 INTEL_WHL_U_GT1_IDS(&intel_coffeelake_gt1_info), 672 INTEL_WHL_U_GT1_IDS(&intel_coffeelake_gt1_info),
673 INTEL_WHL_U_GT2_IDS(&intel_coffeelake_gt2_info), 673 INTEL_WHL_U_GT2_IDS(&intel_coffeelake_gt2_info),
674 INTEL_AML_CFL_GT2_IDS(&intel_coffeelake_gt2_info),
674 INTEL_WHL_U_GT3_IDS(&intel_coffeelake_gt3_info), 675 INTEL_WHL_U_GT3_IDS(&intel_coffeelake_gt3_info),
675 INTEL_CNL_IDS(&intel_cannonlake_info), 676 INTEL_CNL_IDS(&intel_cannonlake_info),
676 INTEL_ICL_11_IDS(&intel_icelake_11_info), 677 INTEL_ICL_11_IDS(&intel_icelake_11_info),
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 664b96bb65a3..4529edfdcfc8 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -890,8 +890,8 @@ static int gen8_oa_read(struct i915_perf_stream *stream,
890 DRM_DEBUG("OA buffer overflow (exponent = %d): force restart\n", 890 DRM_DEBUG("OA buffer overflow (exponent = %d): force restart\n",
891 dev_priv->perf.oa.period_exponent); 891 dev_priv->perf.oa.period_exponent);
892 892
893 dev_priv->perf.oa.ops.oa_disable(dev_priv); 893 dev_priv->perf.oa.ops.oa_disable(stream);
894 dev_priv->perf.oa.ops.oa_enable(dev_priv); 894 dev_priv->perf.oa.ops.oa_enable(stream);
895 895
896 /* 896 /*
897 * Note: .oa_enable() is expected to re-init the oabuffer and 897 * Note: .oa_enable() is expected to re-init the oabuffer and
@@ -1114,8 +1114,8 @@ static int gen7_oa_read(struct i915_perf_stream *stream,
1114 DRM_DEBUG("OA buffer overflow (exponent = %d): force restart\n", 1114 DRM_DEBUG("OA buffer overflow (exponent = %d): force restart\n",
1115 dev_priv->perf.oa.period_exponent); 1115 dev_priv->perf.oa.period_exponent);
1116 1116
1117 dev_priv->perf.oa.ops.oa_disable(dev_priv); 1117 dev_priv->perf.oa.ops.oa_disable(stream);
1118 dev_priv->perf.oa.ops.oa_enable(dev_priv); 1118 dev_priv->perf.oa.ops.oa_enable(stream);
1119 1119
1120 oastatus1 = I915_READ(GEN7_OASTATUS1); 1120 oastatus1 = I915_READ(GEN7_OASTATUS1);
1121 } 1121 }
@@ -1528,8 +1528,6 @@ static int alloc_oa_buffer(struct drm_i915_private *dev_priv)
1528 goto err_unpin; 1528 goto err_unpin;
1529 } 1529 }
1530 1530
1531 dev_priv->perf.oa.ops.init_oa_buffer(dev_priv);
1532
1533 DRM_DEBUG_DRIVER("OA Buffer initialized, gtt offset = 0x%x, vaddr = %p\n", 1531 DRM_DEBUG_DRIVER("OA Buffer initialized, gtt offset = 0x%x, vaddr = %p\n",
1534 i915_ggtt_offset(dev_priv->perf.oa.oa_buffer.vma), 1532 i915_ggtt_offset(dev_priv->perf.oa.oa_buffer.vma),
1535 dev_priv->perf.oa.oa_buffer.vaddr); 1533 dev_priv->perf.oa.oa_buffer.vaddr);
@@ -1563,9 +1561,11 @@ static void config_oa_regs(struct drm_i915_private *dev_priv,
1563 } 1561 }
1564} 1562}
1565 1563
1566static int hsw_enable_metric_set(struct drm_i915_private *dev_priv, 1564static int hsw_enable_metric_set(struct i915_perf_stream *stream)
1567 const struct i915_oa_config *oa_config)
1568{ 1565{
1566 struct drm_i915_private *dev_priv = stream->dev_priv;
1567 const struct i915_oa_config *oa_config = stream->oa_config;
1568
1569 /* PRM: 1569 /* PRM:
1570 * 1570 *
1571 * OA unit is using “crclk” for its functionality. When trunk 1571 * OA unit is using “crclk” for its functionality. When trunk
@@ -1767,9 +1767,10 @@ static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
1767 return 0; 1767 return 0;
1768} 1768}
1769 1769
1770static int gen8_enable_metric_set(struct drm_i915_private *dev_priv, 1770static int gen8_enable_metric_set(struct i915_perf_stream *stream)
1771 const struct i915_oa_config *oa_config)
1772{ 1771{
1772 struct drm_i915_private *dev_priv = stream->dev_priv;
1773 const struct i915_oa_config *oa_config = stream->oa_config;
1773 int ret; 1774 int ret;
1774 1775
1775 /* 1776 /*
@@ -1837,10 +1838,10 @@ static void gen10_disable_metric_set(struct drm_i915_private *dev_priv)
1837 I915_READ(RPM_CONFIG1) & ~GEN10_GT_NOA_ENABLE); 1838 I915_READ(RPM_CONFIG1) & ~GEN10_GT_NOA_ENABLE);
1838} 1839}
1839 1840
1840static void gen7_oa_enable(struct drm_i915_private *dev_priv) 1841static void gen7_oa_enable(struct i915_perf_stream *stream)
1841{ 1842{
1842 struct i915_gem_context *ctx = 1843 struct drm_i915_private *dev_priv = stream->dev_priv;
1843 dev_priv->perf.oa.exclusive_stream->ctx; 1844 struct i915_gem_context *ctx = stream->ctx;
1844 u32 ctx_id = dev_priv->perf.oa.specific_ctx_id; 1845 u32 ctx_id = dev_priv->perf.oa.specific_ctx_id;
1845 bool periodic = dev_priv->perf.oa.periodic; 1846 bool periodic = dev_priv->perf.oa.periodic;
1846 u32 period_exponent = dev_priv->perf.oa.period_exponent; 1847 u32 period_exponent = dev_priv->perf.oa.period_exponent;
@@ -1867,8 +1868,9 @@ static void gen7_oa_enable(struct drm_i915_private *dev_priv)
1867 GEN7_OACONTROL_ENABLE); 1868 GEN7_OACONTROL_ENABLE);
1868} 1869}
1869 1870
1870static void gen8_oa_enable(struct drm_i915_private *dev_priv) 1871static void gen8_oa_enable(struct i915_perf_stream *stream)
1871{ 1872{
1873 struct drm_i915_private *dev_priv = stream->dev_priv;
1872 u32 report_format = dev_priv->perf.oa.oa_buffer.format; 1874 u32 report_format = dev_priv->perf.oa.oa_buffer.format;
1873 1875
1874 /* 1876 /*
@@ -1905,7 +1907,7 @@ static void i915_oa_stream_enable(struct i915_perf_stream *stream)
1905{ 1907{
1906 struct drm_i915_private *dev_priv = stream->dev_priv; 1908 struct drm_i915_private *dev_priv = stream->dev_priv;
1907 1909
1908 dev_priv->perf.oa.ops.oa_enable(dev_priv); 1910 dev_priv->perf.oa.ops.oa_enable(stream);
1909 1911
1910 if (dev_priv->perf.oa.periodic) 1912 if (dev_priv->perf.oa.periodic)
1911 hrtimer_start(&dev_priv->perf.oa.poll_check_timer, 1913 hrtimer_start(&dev_priv->perf.oa.poll_check_timer,
@@ -1913,8 +1915,10 @@ static void i915_oa_stream_enable(struct i915_perf_stream *stream)
1913 HRTIMER_MODE_REL_PINNED); 1915 HRTIMER_MODE_REL_PINNED);
1914} 1916}
1915 1917
1916static void gen7_oa_disable(struct drm_i915_private *dev_priv) 1918static void gen7_oa_disable(struct i915_perf_stream *stream)
1917{ 1919{
1920 struct drm_i915_private *dev_priv = stream->dev_priv;
1921
1918 I915_WRITE(GEN7_OACONTROL, 0); 1922 I915_WRITE(GEN7_OACONTROL, 0);
1919 if (intel_wait_for_register(dev_priv, 1923 if (intel_wait_for_register(dev_priv,
1920 GEN7_OACONTROL, GEN7_OACONTROL_ENABLE, 0, 1924 GEN7_OACONTROL, GEN7_OACONTROL_ENABLE, 0,
@@ -1922,8 +1926,10 @@ static void gen7_oa_disable(struct drm_i915_private *dev_priv)
1922 DRM_ERROR("wait for OA to be disabled timed out\n"); 1926 DRM_ERROR("wait for OA to be disabled timed out\n");
1923} 1927}
1924 1928
1925static void gen8_oa_disable(struct drm_i915_private *dev_priv) 1929static void gen8_oa_disable(struct i915_perf_stream *stream)
1926{ 1930{
1931 struct drm_i915_private *dev_priv = stream->dev_priv;
1932
1927 I915_WRITE(GEN8_OACONTROL, 0); 1933 I915_WRITE(GEN8_OACONTROL, 0);
1928 if (intel_wait_for_register(dev_priv, 1934 if (intel_wait_for_register(dev_priv,
1929 GEN8_OACONTROL, GEN8_OA_COUNTER_ENABLE, 0, 1935 GEN8_OACONTROL, GEN8_OA_COUNTER_ENABLE, 0,
@@ -1943,7 +1949,7 @@ static void i915_oa_stream_disable(struct i915_perf_stream *stream)
1943{ 1949{
1944 struct drm_i915_private *dev_priv = stream->dev_priv; 1950 struct drm_i915_private *dev_priv = stream->dev_priv;
1945 1951
1946 dev_priv->perf.oa.ops.oa_disable(dev_priv); 1952 dev_priv->perf.oa.ops.oa_disable(stream);
1947 1953
1948 if (dev_priv->perf.oa.periodic) 1954 if (dev_priv->perf.oa.periodic)
1949 hrtimer_cancel(&dev_priv->perf.oa.poll_check_timer); 1955 hrtimer_cancel(&dev_priv->perf.oa.poll_check_timer);
@@ -1998,7 +2004,7 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
1998 return -EINVAL; 2004 return -EINVAL;
1999 } 2005 }
2000 2006
2001 if (!dev_priv->perf.oa.ops.init_oa_buffer) { 2007 if (!dev_priv->perf.oa.ops.enable_metric_set) {
2002 DRM_DEBUG("OA unit not supported\n"); 2008 DRM_DEBUG("OA unit not supported\n");
2003 return -ENODEV; 2009 return -ENODEV;
2004 } 2010 }
@@ -2092,8 +2098,7 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
2092 if (ret) 2098 if (ret)
2093 goto err_lock; 2099 goto err_lock;
2094 2100
2095 ret = dev_priv->perf.oa.ops.enable_metric_set(dev_priv, 2101 ret = dev_priv->perf.oa.ops.enable_metric_set(stream);
2096 stream->oa_config);
2097 if (ret) { 2102 if (ret) {
2098 DRM_DEBUG("Unable to enable metric set\n"); 2103 DRM_DEBUG("Unable to enable metric set\n");
2099 goto err_enable; 2104 goto err_enable;
@@ -3387,7 +3392,6 @@ void i915_perf_init(struct drm_i915_private *dev_priv)
3387 dev_priv->perf.oa.ops.is_valid_mux_reg = 3392 dev_priv->perf.oa.ops.is_valid_mux_reg =
3388 hsw_is_valid_mux_addr; 3393 hsw_is_valid_mux_addr;
3389 dev_priv->perf.oa.ops.is_valid_flex_reg = NULL; 3394 dev_priv->perf.oa.ops.is_valid_flex_reg = NULL;
3390 dev_priv->perf.oa.ops.init_oa_buffer = gen7_init_oa_buffer;
3391 dev_priv->perf.oa.ops.enable_metric_set = hsw_enable_metric_set; 3395 dev_priv->perf.oa.ops.enable_metric_set = hsw_enable_metric_set;
3392 dev_priv->perf.oa.ops.disable_metric_set = hsw_disable_metric_set; 3396 dev_priv->perf.oa.ops.disable_metric_set = hsw_disable_metric_set;
3393 dev_priv->perf.oa.ops.oa_enable = gen7_oa_enable; 3397 dev_priv->perf.oa.ops.oa_enable = gen7_oa_enable;
@@ -3406,7 +3410,6 @@ void i915_perf_init(struct drm_i915_private *dev_priv)
3406 */ 3410 */
3407 dev_priv->perf.oa.oa_formats = gen8_plus_oa_formats; 3411 dev_priv->perf.oa.oa_formats = gen8_plus_oa_formats;
3408 3412
3409 dev_priv->perf.oa.ops.init_oa_buffer = gen8_init_oa_buffer;
3410 dev_priv->perf.oa.ops.oa_enable = gen8_oa_enable; 3413 dev_priv->perf.oa.ops.oa_enable = gen8_oa_enable;
3411 dev_priv->perf.oa.ops.oa_disable = gen8_oa_disable; 3414 dev_priv->perf.oa.ops.oa_disable = gen8_oa_disable;
3412 dev_priv->perf.oa.ops.read = gen8_oa_read; 3415 dev_priv->perf.oa.ops.read = gen8_oa_read;
diff --git a/drivers/gpu/drm/i915/i915_query.c b/drivers/gpu/drm/i915/i915_query.c
index 3f502eef2431..6fc4b8eeab42 100644
--- a/drivers/gpu/drm/i915/i915_query.c
+++ b/drivers/gpu/drm/i915/i915_query.c
@@ -27,8 +27,7 @@ static int query_topology_info(struct drm_i915_private *dev_priv,
27 27
28 slice_length = sizeof(sseu->slice_mask); 28 slice_length = sizeof(sseu->slice_mask);
29 subslice_length = sseu->max_slices * 29 subslice_length = sseu->max_slices *
30 DIV_ROUND_UP(sseu->max_subslices, 30 DIV_ROUND_UP(sseu->max_subslices, BITS_PER_BYTE);
31 sizeof(sseu->subslice_mask[0]) * BITS_PER_BYTE);
32 eu_length = sseu->max_slices * sseu->max_subslices * 31 eu_length = sseu->max_slices * sseu->max_subslices *
33 DIV_ROUND_UP(sseu->max_eus_per_subslice, BITS_PER_BYTE); 32 DIV_ROUND_UP(sseu->max_eus_per_subslice, BITS_PER_BYTE);
34 33
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index e31c27e45734..edb58af1e903 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -157,20 +157,37 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
157/* 157/*
158 * Named helper wrappers around _PICK_EVEN() and _PICK(). 158 * Named helper wrappers around _PICK_EVEN() and _PICK().
159 */ 159 */
160#define _PIPE(pipe, a, b) _PICK_EVEN(pipe, a, b) 160#define _PIPE(pipe, a, b) _PICK_EVEN(pipe, a, b)
161#define _MMIO_PIPE(pipe, a, b) _MMIO(_PIPE(pipe, a, b)) 161#define _PLANE(plane, a, b) _PICK_EVEN(plane, a, b)
162#define _PLANE(plane, a, b) _PICK_EVEN(plane, a, b) 162#define _TRANS(tran, a, b) _PICK_EVEN(tran, a, b)
163#define _MMIO_PLANE(plane, a, b) _MMIO_PIPE(plane, a, b) 163#define _PORT(port, a, b) _PICK_EVEN(port, a, b)
164#define _TRANS(tran, a, b) _PICK_EVEN(tran, a, b) 164#define _PLL(pll, a, b) _PICK_EVEN(pll, a, b)
165#define _MMIO_TRANS(tran, a, b) _MMIO(_TRANS(tran, a, b)) 165
166#define _PORT(port, a, b) _PICK_EVEN(port, a, b) 166#define _MMIO_PIPE(pipe, a, b) _MMIO(_PIPE(pipe, a, b))
167#define _MMIO_PORT(port, a, b) _MMIO(_PORT(port, a, b)) 167#define _MMIO_PLANE(plane, a, b) _MMIO(_PLANE(plane, a, b))
168#define _MMIO_PIPE3(pipe, a, b, c) _MMIO(_PICK(pipe, a, b, c)) 168#define _MMIO_TRANS(tran, a, b) _MMIO(_TRANS(tran, a, b))
169#define _MMIO_PORT3(pipe, a, b, c) _MMIO(_PICK(pipe, a, b, c)) 169#define _MMIO_PORT(port, a, b) _MMIO(_PORT(port, a, b))
170#define _PLL(pll, a, b) _PICK_EVEN(pll, a, b) 170#define _MMIO_PLL(pll, a, b) _MMIO(_PLL(pll, a, b))
171#define _MMIO_PLL(pll, a, b) _MMIO(_PLL(pll, a, b)) 171
172#define _PHY3(phy, ...) _PICK(phy, __VA_ARGS__) 172#define _PHY3(phy, ...) _PICK(phy, __VA_ARGS__)
173#define _MMIO_PHY3(phy, a, b, c) _MMIO(_PHY3(phy, a, b, c)) 173
174#define _MMIO_PIPE3(pipe, a, b, c) _MMIO(_PICK(pipe, a, b, c))
175#define _MMIO_PORT3(pipe, a, b, c) _MMIO(_PICK(pipe, a, b, c))
176#define _MMIO_PHY3(phy, a, b, c) _MMIO(_PHY3(phy, a, b, c))
177
178/*
179 * Device info offset array based helpers for groups of registers with unevenly
180 * spaced base offsets.
181 */
182#define _MMIO_PIPE2(pipe, reg) _MMIO(dev_priv->info.pipe_offsets[pipe] - \
183 dev_priv->info.pipe_offsets[PIPE_A] + (reg) + \
184 dev_priv->info.display_mmio_offset)
185#define _MMIO_TRANS2(pipe, reg) _MMIO(dev_priv->info.trans_offsets[(pipe)] - \
186 dev_priv->info.trans_offsets[TRANSCODER_A] + (reg) + \
187 dev_priv->info.display_mmio_offset)
188#define _CURSOR2(pipe, reg) _MMIO(dev_priv->info.cursor_offsets[(pipe)] - \
189 dev_priv->info.cursor_offsets[PIPE_A] + (reg) + \
190 dev_priv->info.display_mmio_offset)
174 191
175#define __MASKED_FIELD(mask, value) ((mask) << 16 | (value)) 192#define __MASKED_FIELD(mask, value) ((mask) << 16 | (value))
176#define _MASKED_FIELD(mask, value) ({ \ 193#define _MASKED_FIELD(mask, value) ({ \
@@ -1631,35 +1648,6 @@ enum i915_power_well_id {
1631#define PHY_RESERVED (1 << 7) 1648#define PHY_RESERVED (1 << 7)
1632#define BXT_PORT_CL1CM_DW0(phy) _BXT_PHY((phy), _PORT_CL1CM_DW0_BC) 1649#define BXT_PORT_CL1CM_DW0(phy) _BXT_PHY((phy), _PORT_CL1CM_DW0_BC)
1633 1650
1634#define CNL_PORT_CL1CM_DW5 _MMIO(0x162014)
1635#define CL_POWER_DOWN_ENABLE (1 << 4)
1636#define SUS_CLOCK_CONFIG (3 << 0)
1637
1638#define _ICL_PORT_CL_DW5_A 0x162014
1639#define _ICL_PORT_CL_DW5_B 0x6C014
1640#define ICL_PORT_CL_DW5(port) _MMIO_PORT(port, _ICL_PORT_CL_DW5_A, \
1641 _ICL_PORT_CL_DW5_B)
1642
1643#define _CNL_PORT_CL_DW10_A 0x162028
1644#define _ICL_PORT_CL_DW10_B 0x6c028
1645#define ICL_PORT_CL_DW10(port) _MMIO_PORT(port, \
1646 _CNL_PORT_CL_DW10_A, \
1647 _ICL_PORT_CL_DW10_B)
1648#define PG_SEQ_DELAY_OVERRIDE_MASK (3 << 25)
1649#define PG_SEQ_DELAY_OVERRIDE_SHIFT 25
1650#define PG_SEQ_DELAY_OVERRIDE_ENABLE (1 << 24)
1651#define PWR_UP_ALL_LANES (0x0 << 4)
1652#define PWR_DOWN_LN_3_2_1 (0xe << 4)
1653#define PWR_DOWN_LN_3_2 (0xc << 4)
1654#define PWR_DOWN_LN_3 (0x8 << 4)
1655#define PWR_DOWN_LN_2_1_0 (0x7 << 4)
1656#define PWR_DOWN_LN_1_0 (0x3 << 4)
1657#define PWR_DOWN_LN_1 (0x2 << 4)
1658#define PWR_DOWN_LN_3_1 (0xa << 4)
1659#define PWR_DOWN_LN_3_1_0 (0xb << 4)
1660#define PWR_DOWN_LN_MASK (0xf << 4)
1661#define PWR_DOWN_LN_SHIFT 4
1662
1663#define _PORT_CL1CM_DW9_A 0x162024 1651#define _PORT_CL1CM_DW9_A 0x162024
1664#define _PORT_CL1CM_DW9_BC 0x6C024 1652#define _PORT_CL1CM_DW9_BC 0x6C024
1665#define IREF0RC_OFFSET_SHIFT 8 1653#define IREF0RC_OFFSET_SHIFT 8
@@ -1672,13 +1660,6 @@ enum i915_power_well_id {
1672#define IREF1RC_OFFSET_MASK (0xFF << IREF1RC_OFFSET_SHIFT) 1660#define IREF1RC_OFFSET_MASK (0xFF << IREF1RC_OFFSET_SHIFT)
1673#define BXT_PORT_CL1CM_DW10(phy) _BXT_PHY((phy), _PORT_CL1CM_DW10_BC) 1661#define BXT_PORT_CL1CM_DW10(phy) _BXT_PHY((phy), _PORT_CL1CM_DW10_BC)
1674 1662
1675#define _ICL_PORT_CL_DW12_A 0x162030
1676#define _ICL_PORT_CL_DW12_B 0x6C030
1677#define ICL_LANE_ENABLE_AUX (1 << 0)
1678#define ICL_PORT_CL_DW12(port) _MMIO_PORT((port), \
1679 _ICL_PORT_CL_DW12_A, \
1680 _ICL_PORT_CL_DW12_B)
1681
1682#define _PORT_CL1CM_DW28_A 0x162070 1663#define _PORT_CL1CM_DW28_A 0x162070
1683#define _PORT_CL1CM_DW28_BC 0x6C070 1664#define _PORT_CL1CM_DW28_BC 0x6C070
1684#define OCL1_POWER_DOWN_EN (1 << 23) 1665#define OCL1_POWER_DOWN_EN (1 << 23)
@@ -1691,6 +1672,74 @@ enum i915_power_well_id {
1691#define OCL2_LDOFUSE_PWR_DIS (1 << 6) 1672#define OCL2_LDOFUSE_PWR_DIS (1 << 6)
1692#define BXT_PORT_CL1CM_DW30(phy) _BXT_PHY((phy), _PORT_CL1CM_DW30_BC) 1673#define BXT_PORT_CL1CM_DW30(phy) _BXT_PHY((phy), _PORT_CL1CM_DW30_BC)
1693 1674
1675/*
1676 * CNL/ICL Port/COMBO-PHY Registers
1677 */
1678#define _ICL_COMBOPHY_A 0x162000
1679#define _ICL_COMBOPHY_B 0x6C000
1680#define _ICL_COMBOPHY(port) _PICK(port, _ICL_COMBOPHY_A, \
1681 _ICL_COMBOPHY_B)
1682
1683/* CNL/ICL Port CL_DW registers */
1684#define _ICL_PORT_CL_DW(dw, port) (_ICL_COMBOPHY(port) + \
1685 4 * (dw))
1686
1687#define CNL_PORT_CL1CM_DW5 _MMIO(0x162014)
1688#define ICL_PORT_CL_DW5(port) _MMIO(_ICL_PORT_CL_DW(5, port))
1689#define CL_POWER_DOWN_ENABLE (1 << 4)
1690#define SUS_CLOCK_CONFIG (3 << 0)
1691
1692#define ICL_PORT_CL_DW10(port) _MMIO(_ICL_PORT_CL_DW(10, port))
1693#define PG_SEQ_DELAY_OVERRIDE_MASK (3 << 25)
1694#define PG_SEQ_DELAY_OVERRIDE_SHIFT 25
1695#define PG_SEQ_DELAY_OVERRIDE_ENABLE (1 << 24)
1696#define PWR_UP_ALL_LANES (0x0 << 4)
1697#define PWR_DOWN_LN_3_2_1 (0xe << 4)
1698#define PWR_DOWN_LN_3_2 (0xc << 4)
1699#define PWR_DOWN_LN_3 (0x8 << 4)
1700#define PWR_DOWN_LN_2_1_0 (0x7 << 4)
1701#define PWR_DOWN_LN_1_0 (0x3 << 4)
1702#define PWR_DOWN_LN_1 (0x2 << 4)
1703#define PWR_DOWN_LN_3_1 (0xa << 4)
1704#define PWR_DOWN_LN_3_1_0 (0xb << 4)
1705#define PWR_DOWN_LN_MASK (0xf << 4)
1706#define PWR_DOWN_LN_SHIFT 4
1707
1708#define ICL_PORT_CL_DW12(port) _MMIO(_ICL_PORT_CL_DW(12, port))
1709#define ICL_LANE_ENABLE_AUX (1 << 0)
1710
1711/* CNL/ICL Port COMP_DW registers */
1712#define _ICL_PORT_COMP 0x100
1713#define _ICL_PORT_COMP_DW(dw, port) (_ICL_COMBOPHY(port) + \
1714 _ICL_PORT_COMP + 4 * (dw))
1715
1716#define CNL_PORT_COMP_DW0 _MMIO(0x162100)
1717#define ICL_PORT_COMP_DW0(port) _MMIO(_ICL_PORT_COMP_DW(0, port))
1718#define COMP_INIT (1 << 31)
1719
1720#define CNL_PORT_COMP_DW1 _MMIO(0x162104)
1721#define ICL_PORT_COMP_DW1(port) _MMIO(_ICL_PORT_COMP_DW(1, port))
1722
1723#define CNL_PORT_COMP_DW3 _MMIO(0x16210c)
1724#define ICL_PORT_COMP_DW3(port) _MMIO(_ICL_PORT_COMP_DW(3, port))
1725#define PROCESS_INFO_DOT_0 (0 << 26)
1726#define PROCESS_INFO_DOT_1 (1 << 26)
1727#define PROCESS_INFO_DOT_4 (2 << 26)
1728#define PROCESS_INFO_MASK (7 << 26)
1729#define PROCESS_INFO_SHIFT 26
1730#define VOLTAGE_INFO_0_85V (0 << 24)
1731#define VOLTAGE_INFO_0_95V (1 << 24)
1732#define VOLTAGE_INFO_1_05V (2 << 24)
1733#define VOLTAGE_INFO_MASK (3 << 24)
1734#define VOLTAGE_INFO_SHIFT 24
1735
1736#define CNL_PORT_COMP_DW9 _MMIO(0x162124)
1737#define ICL_PORT_COMP_DW9(port) _MMIO(_ICL_PORT_COMP_DW(9, port))
1738
1739#define CNL_PORT_COMP_DW10 _MMIO(0x162128)
1740#define ICL_PORT_COMP_DW10(port) _MMIO(_ICL_PORT_COMP_DW(10, port))
1741
1742/* CNL/ICL Port PCS registers */
1694#define _CNL_PORT_PCS_DW1_GRP_AE 0x162304 1743#define _CNL_PORT_PCS_DW1_GRP_AE 0x162304
1695#define _CNL_PORT_PCS_DW1_GRP_B 0x162384 1744#define _CNL_PORT_PCS_DW1_GRP_B 0x162384
1696#define _CNL_PORT_PCS_DW1_GRP_C 0x162B04 1745#define _CNL_PORT_PCS_DW1_GRP_C 0x162B04
@@ -1708,7 +1757,6 @@ enum i915_power_well_id {
1708 _CNL_PORT_PCS_DW1_GRP_D, \ 1757 _CNL_PORT_PCS_DW1_GRP_D, \
1709 _CNL_PORT_PCS_DW1_GRP_AE, \ 1758 _CNL_PORT_PCS_DW1_GRP_AE, \
1710 _CNL_PORT_PCS_DW1_GRP_F)) 1759 _CNL_PORT_PCS_DW1_GRP_F))
1711
1712#define CNL_PORT_PCS_DW1_LN0(port) _MMIO(_PICK(port, \ 1760#define CNL_PORT_PCS_DW1_LN0(port) _MMIO(_PICK(port, \
1713 _CNL_PORT_PCS_DW1_LN0_AE, \ 1761 _CNL_PORT_PCS_DW1_LN0_AE, \
1714 _CNL_PORT_PCS_DW1_LN0_B, \ 1762 _CNL_PORT_PCS_DW1_LN0_B, \
@@ -1717,24 +1765,21 @@ enum i915_power_well_id {
1717 _CNL_PORT_PCS_DW1_LN0_AE, \ 1765 _CNL_PORT_PCS_DW1_LN0_AE, \
1718 _CNL_PORT_PCS_DW1_LN0_F)) 1766 _CNL_PORT_PCS_DW1_LN0_F))
1719 1767
1720#define _ICL_PORT_PCS_DW1_GRP_A 0x162604 1768#define _ICL_PORT_PCS_AUX 0x300
1721#define _ICL_PORT_PCS_DW1_GRP_B 0x6C604 1769#define _ICL_PORT_PCS_GRP 0x600
1722#define _ICL_PORT_PCS_DW1_LN0_A 0x162804 1770#define _ICL_PORT_PCS_LN(ln) (0x800 + (ln) * 0x100)
1723#define _ICL_PORT_PCS_DW1_LN0_B 0x6C804 1771#define _ICL_PORT_PCS_DW_AUX(dw, port) (_ICL_COMBOPHY(port) + \
1724#define _ICL_PORT_PCS_DW1_AUX_A 0x162304 1772 _ICL_PORT_PCS_AUX + 4 * (dw))
1725#define _ICL_PORT_PCS_DW1_AUX_B 0x6c304 1773#define _ICL_PORT_PCS_DW_GRP(dw, port) (_ICL_COMBOPHY(port) + \
1726#define ICL_PORT_PCS_DW1_GRP(port) _MMIO_PORT(port,\ 1774 _ICL_PORT_PCS_GRP + 4 * (dw))
1727 _ICL_PORT_PCS_DW1_GRP_A, \ 1775#define _ICL_PORT_PCS_DW_LN(dw, ln, port) (_ICL_COMBOPHY(port) + \
1728 _ICL_PORT_PCS_DW1_GRP_B) 1776 _ICL_PORT_PCS_LN(ln) + 4 * (dw))
1729#define ICL_PORT_PCS_DW1_LN0(port) _MMIO_PORT(port, \ 1777#define ICL_PORT_PCS_DW1_AUX(port) _MMIO(_ICL_PORT_PCS_DW_AUX(1, port))
1730 _ICL_PORT_PCS_DW1_LN0_A, \ 1778#define ICL_PORT_PCS_DW1_GRP(port) _MMIO(_ICL_PORT_PCS_DW_GRP(1, port))
1731 _ICL_PORT_PCS_DW1_LN0_B) 1779#define ICL_PORT_PCS_DW1_LN0(port) _MMIO(_ICL_PORT_PCS_DW_LN(1, 0, port))
1732#define ICL_PORT_PCS_DW1_AUX(port) _MMIO_PORT(port, \
1733 _ICL_PORT_PCS_DW1_AUX_A, \
1734 _ICL_PORT_PCS_DW1_AUX_B)
1735#define COMMON_KEEPER_EN (1 << 26) 1780#define COMMON_KEEPER_EN (1 << 26)
1736 1781
1737/* CNL Port TX registers */ 1782/* CNL/ICL Port TX registers */
1738#define _CNL_PORT_TX_AE_GRP_OFFSET 0x162340 1783#define _CNL_PORT_TX_AE_GRP_OFFSET 0x162340
1739#define _CNL_PORT_TX_B_GRP_OFFSET 0x1623C0 1784#define _CNL_PORT_TX_B_GRP_OFFSET 0x1623C0
1740#define _CNL_PORT_TX_C_GRP_OFFSET 0x162B40 1785#define _CNL_PORT_TX_C_GRP_OFFSET 0x162B40
@@ -1762,23 +1807,22 @@ enum i915_power_well_id {
1762 _CNL_PORT_TX_F_LN0_OFFSET) + \ 1807 _CNL_PORT_TX_F_LN0_OFFSET) + \
1763 4 * (dw)) 1808 4 * (dw))
1764 1809
1765#define CNL_PORT_TX_DW2_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP((port), 2)) 1810#define _ICL_PORT_TX_AUX 0x380
1766#define CNL_PORT_TX_DW2_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0((port), 2)) 1811#define _ICL_PORT_TX_GRP 0x680
1767#define _ICL_PORT_TX_DW2_GRP_A 0x162688 1812#define _ICL_PORT_TX_LN(ln) (0x880 + (ln) * 0x100)
1768#define _ICL_PORT_TX_DW2_GRP_B 0x6C688 1813
1769#define _ICL_PORT_TX_DW2_LN0_A 0x162888 1814#define _ICL_PORT_TX_DW_AUX(dw, port) (_ICL_COMBOPHY(port) + \
1770#define _ICL_PORT_TX_DW2_LN0_B 0x6C888 1815 _ICL_PORT_TX_AUX + 4 * (dw))
1771#define _ICL_PORT_TX_DW2_AUX_A 0x162388 1816#define _ICL_PORT_TX_DW_GRP(dw, port) (_ICL_COMBOPHY(port) + \
1772#define _ICL_PORT_TX_DW2_AUX_B 0x6c388 1817 _ICL_PORT_TX_GRP + 4 * (dw))
1773#define ICL_PORT_TX_DW2_GRP(port) _MMIO_PORT(port, \ 1818#define _ICL_PORT_TX_DW_LN(dw, ln, port) (_ICL_COMBOPHY(port) + \
1774 _ICL_PORT_TX_DW2_GRP_A, \ 1819 _ICL_PORT_TX_LN(ln) + 4 * (dw))
1775 _ICL_PORT_TX_DW2_GRP_B) 1820
1776#define ICL_PORT_TX_DW2_LN0(port) _MMIO_PORT(port, \ 1821#define CNL_PORT_TX_DW2_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP(2, port))
1777 _ICL_PORT_TX_DW2_LN0_A, \ 1822#define CNL_PORT_TX_DW2_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0(2, port))
1778 _ICL_PORT_TX_DW2_LN0_B) 1823#define ICL_PORT_TX_DW2_AUX(port) _MMIO(_ICL_PORT_TX_DW_AUX(2, port))
1779#define ICL_PORT_TX_DW2_AUX(port) _MMIO_PORT(port, \ 1824#define ICL_PORT_TX_DW2_GRP(port) _MMIO(_ICL_PORT_TX_DW_GRP(2, port))
1780 _ICL_PORT_TX_DW2_AUX_A, \ 1825#define ICL_PORT_TX_DW2_LN0(port) _MMIO(_ICL_PORT_TX_DW_LN(2, 0, port))
1781 _ICL_PORT_TX_DW2_AUX_B)
1782#define SWING_SEL_UPPER(x) (((x) >> 3) << 15) 1826#define SWING_SEL_UPPER(x) (((x) >> 3) << 15)
1783#define SWING_SEL_UPPER_MASK (1 << 15) 1827#define SWING_SEL_UPPER_MASK (1 << 15)
1784#define SWING_SEL_LOWER(x) (((x) & 0x7) << 11) 1828#define SWING_SEL_LOWER(x) (((x) & 0x7) << 11)
@@ -1795,24 +1839,10 @@ enum i915_power_well_id {
1795#define CNL_PORT_TX_DW4_LN(port, ln) _MMIO(_CNL_PORT_TX_DW_LN0((port), 4) + \ 1839#define CNL_PORT_TX_DW4_LN(port, ln) _MMIO(_CNL_PORT_TX_DW_LN0((port), 4) + \
1796 ((ln) * (_CNL_PORT_TX_DW4_LN1_AE - \ 1840 ((ln) * (_CNL_PORT_TX_DW4_LN1_AE - \
1797 _CNL_PORT_TX_DW4_LN0_AE))) 1841 _CNL_PORT_TX_DW4_LN0_AE)))
1798#define _ICL_PORT_TX_DW4_GRP_A 0x162690 1842#define ICL_PORT_TX_DW4_AUX(port) _MMIO(_ICL_PORT_TX_DW_AUX(4, port))
1799#define _ICL_PORT_TX_DW4_GRP_B 0x6C690 1843#define ICL_PORT_TX_DW4_GRP(port) _MMIO(_ICL_PORT_TX_DW_GRP(4, port))
1800#define _ICL_PORT_TX_DW4_LN0_A 0x162890 1844#define ICL_PORT_TX_DW4_LN0(port) _MMIO(_ICL_PORT_TX_DW_LN(4, 0, port))
1801#define _ICL_PORT_TX_DW4_LN1_A 0x162990 1845#define ICL_PORT_TX_DW4_LN(port, ln) _MMIO(_ICL_PORT_TX_DW_LN(4, ln, port))
1802#define _ICL_PORT_TX_DW4_LN0_B 0x6C890
1803#define _ICL_PORT_TX_DW4_AUX_A 0x162390
1804#define _ICL_PORT_TX_DW4_AUX_B 0x6c390
1805#define ICL_PORT_TX_DW4_GRP(port) _MMIO_PORT(port, \
1806 _ICL_PORT_TX_DW4_GRP_A, \
1807 _ICL_PORT_TX_DW4_GRP_B)
1808#define ICL_PORT_TX_DW4_LN(port, ln) _MMIO(_PORT(port, \
1809 _ICL_PORT_TX_DW4_LN0_A, \
1810 _ICL_PORT_TX_DW4_LN0_B) + \
1811 ((ln) * (_ICL_PORT_TX_DW4_LN1_A - \
1812 _ICL_PORT_TX_DW4_LN0_A)))
1813#define ICL_PORT_TX_DW4_AUX(port) _MMIO_PORT(port, \
1814 _ICL_PORT_TX_DW4_AUX_A, \
1815 _ICL_PORT_TX_DW4_AUX_B)
1816#define LOADGEN_SELECT (1 << 31) 1846#define LOADGEN_SELECT (1 << 31)
1817#define POST_CURSOR_1(x) ((x) << 12) 1847#define POST_CURSOR_1(x) ((x) << 12)
1818#define POST_CURSOR_1_MASK (0x3F << 12) 1848#define POST_CURSOR_1_MASK (0x3F << 12)
@@ -1821,23 +1851,11 @@ enum i915_power_well_id {
1821#define CURSOR_COEFF(x) ((x) << 0) 1851#define CURSOR_COEFF(x) ((x) << 0)
1822#define CURSOR_COEFF_MASK (0x3F << 0) 1852#define CURSOR_COEFF_MASK (0x3F << 0)
1823 1853
1824#define CNL_PORT_TX_DW5_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP((port), 5)) 1854#define CNL_PORT_TX_DW5_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP(5, port))
1825#define CNL_PORT_TX_DW5_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0((port), 5)) 1855#define CNL_PORT_TX_DW5_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0(5, port))
1826#define _ICL_PORT_TX_DW5_GRP_A 0x162694 1856#define ICL_PORT_TX_DW5_AUX(port) _MMIO(_ICL_PORT_TX_DW_AUX(5, port))
1827#define _ICL_PORT_TX_DW5_GRP_B 0x6C694 1857#define ICL_PORT_TX_DW5_GRP(port) _MMIO(_ICL_PORT_TX_DW_GRP(5, port))
1828#define _ICL_PORT_TX_DW5_LN0_A 0x162894 1858#define ICL_PORT_TX_DW5_LN0(port) _MMIO(_ICL_PORT_TX_DW_LN(5, 0, port))
1829#define _ICL_PORT_TX_DW5_LN0_B 0x6C894
1830#define _ICL_PORT_TX_DW5_AUX_A 0x162394
1831#define _ICL_PORT_TX_DW5_AUX_B 0x6c394
1832#define ICL_PORT_TX_DW5_GRP(port) _MMIO_PORT(port, \
1833 _ICL_PORT_TX_DW5_GRP_A, \
1834 _ICL_PORT_TX_DW5_GRP_B)
1835#define ICL_PORT_TX_DW5_LN0(port) _MMIO_PORT(port, \
1836 _ICL_PORT_TX_DW5_LN0_A, \
1837 _ICL_PORT_TX_DW5_LN0_B)
1838#define ICL_PORT_TX_DW5_AUX(port) _MMIO_PORT(port, \
1839 _ICL_PORT_TX_DW5_AUX_A, \
1840 _ICL_PORT_TX_DW5_AUX_B)
1841#define TX_TRAINING_EN (1 << 31) 1859#define TX_TRAINING_EN (1 << 31)
1842#define TAP2_DISABLE (1 << 30) 1860#define TAP2_DISABLE (1 << 30)
1843#define TAP3_DISABLE (1 << 29) 1861#define TAP3_DISABLE (1 << 29)
@@ -2054,47 +2072,10 @@ enum i915_power_well_id {
2054#define BXT_PORT_CL2CM_DW6(phy) _BXT_PHY((phy), _PORT_CL2CM_DW6_BC) 2072#define BXT_PORT_CL2CM_DW6(phy) _BXT_PHY((phy), _PORT_CL2CM_DW6_BC)
2055#define DW6_OLDO_DYN_PWR_DOWN_EN (1 << 28) 2073#define DW6_OLDO_DYN_PWR_DOWN_EN (1 << 28)
2056 2074
2057#define CNL_PORT_COMP_DW0 _MMIO(0x162100) 2075#define FIA1_BASE 0x163000
2058#define COMP_INIT (1 << 31)
2059#define CNL_PORT_COMP_DW1 _MMIO(0x162104)
2060#define CNL_PORT_COMP_DW3 _MMIO(0x16210c)
2061#define PROCESS_INFO_DOT_0 (0 << 26)
2062#define PROCESS_INFO_DOT_1 (1 << 26)
2063#define PROCESS_INFO_DOT_4 (2 << 26)
2064#define PROCESS_INFO_MASK (7 << 26)
2065#define PROCESS_INFO_SHIFT 26
2066#define VOLTAGE_INFO_0_85V (0 << 24)
2067#define VOLTAGE_INFO_0_95V (1 << 24)
2068#define VOLTAGE_INFO_1_05V (2 << 24)
2069#define VOLTAGE_INFO_MASK (3 << 24)
2070#define VOLTAGE_INFO_SHIFT 24
2071#define CNL_PORT_COMP_DW9 _MMIO(0x162124)
2072#define CNL_PORT_COMP_DW10 _MMIO(0x162128)
2073
2074#define _ICL_PORT_COMP_DW0_A 0x162100
2075#define _ICL_PORT_COMP_DW0_B 0x6C100
2076#define ICL_PORT_COMP_DW0(port) _MMIO_PORT(port, _ICL_PORT_COMP_DW0_A, \
2077 _ICL_PORT_COMP_DW0_B)
2078#define _ICL_PORT_COMP_DW1_A 0x162104
2079#define _ICL_PORT_COMP_DW1_B 0x6C104
2080#define ICL_PORT_COMP_DW1(port) _MMIO_PORT(port, _ICL_PORT_COMP_DW1_A, \
2081 _ICL_PORT_COMP_DW1_B)
2082#define _ICL_PORT_COMP_DW3_A 0x16210C
2083#define _ICL_PORT_COMP_DW3_B 0x6C10C
2084#define ICL_PORT_COMP_DW3(port) _MMIO_PORT(port, _ICL_PORT_COMP_DW3_A, \
2085 _ICL_PORT_COMP_DW3_B)
2086#define _ICL_PORT_COMP_DW9_A 0x162124
2087#define _ICL_PORT_COMP_DW9_B 0x6C124
2088#define ICL_PORT_COMP_DW9(port) _MMIO_PORT(port, _ICL_PORT_COMP_DW9_A, \
2089 _ICL_PORT_COMP_DW9_B)
2090#define _ICL_PORT_COMP_DW10_A 0x162128
2091#define _ICL_PORT_COMP_DW10_B 0x6C128
2092#define ICL_PORT_COMP_DW10(port) _MMIO_PORT(port, \
2093 _ICL_PORT_COMP_DW10_A, \
2094 _ICL_PORT_COMP_DW10_B)
2095 2076
2096/* ICL PHY DFLEX registers */ 2077/* ICL PHY DFLEX registers */
2097#define PORT_TX_DFLEXDPMLE1 _MMIO(0x1638C0) 2078#define PORT_TX_DFLEXDPMLE1 _MMIO(FIA1_BASE + 0x008C0)
2098#define DFLEXDPMLE1_DPMLETC_MASK(tc_port) (0xf << (4 * (tc_port))) 2079#define DFLEXDPMLE1_DPMLETC_MASK(tc_port) (0xf << (4 * (tc_port)))
2099#define DFLEXDPMLE1_DPMLETC_ML0(tc_port) (1 << (4 * (tc_port))) 2080#define DFLEXDPMLE1_DPMLETC_ML0(tc_port) (1 << (4 * (tc_port)))
2100#define DFLEXDPMLE1_DPMLETC_ML1_0(tc_port) (3 << (4 * (tc_port))) 2081#define DFLEXDPMLE1_DPMLETC_ML1_0(tc_port) (3 << (4 * (tc_port)))
@@ -2417,6 +2398,7 @@ enum i915_power_well_id {
2417 2398
2418#define GEN8_GAMW_ECO_DEV_RW_IA _MMIO(0x4080) 2399#define GEN8_GAMW_ECO_DEV_RW_IA _MMIO(0x4080)
2419#define GAMW_ECO_ENABLE_64K_IPS_FIELD 0xF 2400#define GAMW_ECO_ENABLE_64K_IPS_FIELD 0xF
2401#define GAMW_ECO_DEV_CTX_RELOAD_DISABLE (1 << 7)
2420 2402
2421#define GAMT_CHKN_BIT_REG _MMIO(0x4ab8) 2403#define GAMT_CHKN_BIT_REG _MMIO(0x4ab8)
2422#define GAMT_CHKN_DISABLE_L3_COH_PIPE (1 << 31) 2404#define GAMT_CHKN_DISABLE_L3_COH_PIPE (1 << 31)
@@ -2577,6 +2559,7 @@ enum i915_power_well_id {
2577/* chicken reg for WaConextSwitchWithConcurrentTLBInvalidate */ 2559/* chicken reg for WaConextSwitchWithConcurrentTLBInvalidate */
2578#define GEN9_CSFE_CHICKEN1_RCS _MMIO(0x20D4) 2560#define GEN9_CSFE_CHICKEN1_RCS _MMIO(0x20D4)
2579#define GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE (1 << 2) 2561#define GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE (1 << 2)
2562#define GEN11_ENABLE_32_PLANE_MODE (1 << 7)
2580 2563
2581/* WaClearTdlStateAckDirtyBits */ 2564/* WaClearTdlStateAckDirtyBits */
2582#define GEN8_STATE_ACK _MMIO(0x20F0) 2565#define GEN8_STATE_ACK _MMIO(0x20F0)
@@ -3479,11 +3462,13 @@ enum i915_power_well_id {
3479/* 3462/*
3480 * Palette regs 3463 * Palette regs
3481 */ 3464 */
3482#define PALETTE_A_OFFSET 0xa000 3465#define _PALETTE_A 0xa000
3483#define PALETTE_B_OFFSET 0xa800 3466#define _PALETTE_B 0xa800
3484#define CHV_PALETTE_C_OFFSET 0xc000 3467#define _CHV_PALETTE_C 0xc000
3485#define PALETTE(pipe, i) _MMIO(dev_priv->info.palette_offsets[pipe] + \ 3468#define PALETTE(pipe, i) _MMIO(dev_priv->info.display_mmio_offset + \
3486 dev_priv->info.display_mmio_offset + (i) * 4) 3469 _PICK((pipe), _PALETTE_A, \
3470 _PALETTE_B, _CHV_PALETTE_C) + \
3471 (i) * 4)
3487 3472
3488/* MCH MMIO space */ 3473/* MCH MMIO space */
3489 3474
@@ -4065,15 +4050,27 @@ enum {
4065#define _VSYNCSHIFT_B 0x61028 4050#define _VSYNCSHIFT_B 0x61028
4066#define _PIPE_MULT_B 0x6102c 4051#define _PIPE_MULT_B 0x6102c
4067 4052
4053/* DSI 0 timing regs */
4054#define _HTOTAL_DSI0 0x6b000
4055#define _HSYNC_DSI0 0x6b008
4056#define _VTOTAL_DSI0 0x6b00c
4057#define _VSYNC_DSI0 0x6b014
4058#define _VSYNCSHIFT_DSI0 0x6b028
4059
4060/* DSI 1 timing regs */
4061#define _HTOTAL_DSI1 0x6b800
4062#define _HSYNC_DSI1 0x6b808
4063#define _VTOTAL_DSI1 0x6b80c
4064#define _VSYNC_DSI1 0x6b814
4065#define _VSYNCSHIFT_DSI1 0x6b828
4066
4068#define TRANSCODER_A_OFFSET 0x60000 4067#define TRANSCODER_A_OFFSET 0x60000
4069#define TRANSCODER_B_OFFSET 0x61000 4068#define TRANSCODER_B_OFFSET 0x61000
4070#define TRANSCODER_C_OFFSET 0x62000 4069#define TRANSCODER_C_OFFSET 0x62000
4071#define CHV_TRANSCODER_C_OFFSET 0x63000 4070#define CHV_TRANSCODER_C_OFFSET 0x63000
4072#define TRANSCODER_EDP_OFFSET 0x6f000 4071#define TRANSCODER_EDP_OFFSET 0x6f000
4073 4072#define TRANSCODER_DSI0_OFFSET 0x6b000
4074#define _MMIO_TRANS2(pipe, reg) _MMIO(dev_priv->info.trans_offsets[(pipe)] - \ 4073#define TRANSCODER_DSI1_OFFSET 0x6b800
4075 dev_priv->info.trans_offsets[TRANSCODER_A] + (reg) + \
4076 dev_priv->info.display_mmio_offset)
4077 4074
4078#define HTOTAL(trans) _MMIO_TRANS2(trans, _HTOTAL_A) 4075#define HTOTAL(trans) _MMIO_TRANS2(trans, _HTOTAL_A)
4079#define HBLANK(trans) _MMIO_TRANS2(trans, _HBLANK_A) 4076#define HBLANK(trans) _MMIO_TRANS2(trans, _HBLANK_A)
@@ -4199,7 +4196,7 @@ enum {
4199#define EDP_PSR_DEBUG_MASK_LPSP (1 << 27) 4196#define EDP_PSR_DEBUG_MASK_LPSP (1 << 27)
4200#define EDP_PSR_DEBUG_MASK_MEMUP (1 << 26) 4197#define EDP_PSR_DEBUG_MASK_MEMUP (1 << 26)
4201#define EDP_PSR_DEBUG_MASK_HPD (1 << 25) 4198#define EDP_PSR_DEBUG_MASK_HPD (1 << 25)
4202#define EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (1 << 16) 4199#define EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (1 << 16) /* Reserved in ICL+ */
4203#define EDP_PSR_DEBUG_EXIT_ON_PIXEL_UNDERRUN (1 << 15) /* SKL+ */ 4200#define EDP_PSR_DEBUG_EXIT_ON_PIXEL_UNDERRUN (1 << 15) /* SKL+ */
4204 4201
4205#define EDP_PSR2_CTL _MMIO(0x6f900) 4202#define EDP_PSR2_CTL _MMIO(0x6f900)
@@ -4236,7 +4233,7 @@ enum {
4236#define PSR_EVENT_FRONT_BUFFER_MODIFY (1 << 9) 4233#define PSR_EVENT_FRONT_BUFFER_MODIFY (1 << 9)
4237#define PSR_EVENT_WD_TIMER_EXPIRE (1 << 8) 4234#define PSR_EVENT_WD_TIMER_EXPIRE (1 << 8)
4238#define PSR_EVENT_PIPE_REGISTERS_UPDATE (1 << 6) 4235#define PSR_EVENT_PIPE_REGISTERS_UPDATE (1 << 6)
4239#define PSR_EVENT_REGISTER_UPDATE (1 << 5) 4236#define PSR_EVENT_REGISTER_UPDATE (1 << 5) /* Reserved in ICL+ */
4240#define PSR_EVENT_HDCP_ENABLE (1 << 4) 4237#define PSR_EVENT_HDCP_ENABLE (1 << 4)
4241#define PSR_EVENT_KVMR_SESSION_ENABLE (1 << 3) 4238#define PSR_EVENT_KVMR_SESSION_ENABLE (1 << 3)
4242#define PSR_EVENT_VBI_ENABLE (1 << 2) 4239#define PSR_EVENT_VBI_ENABLE (1 << 2)
@@ -4588,6 +4585,15 @@ enum {
4588#define VIDEO_DIP_FREQ_2VSYNC (2 << 16) 4585#define VIDEO_DIP_FREQ_2VSYNC (2 << 16)
4589#define VIDEO_DIP_FREQ_MASK (3 << 16) 4586#define VIDEO_DIP_FREQ_MASK (3 << 16)
4590/* HSW and later: */ 4587/* HSW and later: */
4588#define DRM_DIP_ENABLE (1 << 28)
4589#define PSR_VSC_BIT_7_SET (1 << 27)
4590#define VSC_SELECT_MASK (0x3 << 25)
4591#define VSC_SELECT_SHIFT 25
4592#define VSC_DIP_HW_HEA_DATA (0 << 25)
4593#define VSC_DIP_HW_HEA_SW_DATA (1 << 25)
4594#define VSC_DIP_HW_DATA_SW_HEA (2 << 25)
4595#define VSC_DIP_SW_HEA_DATA (3 << 25)
4596#define VDIP_ENABLE_PPS (1 << 24)
4591#define VIDEO_DIP_ENABLE_VSC_HSW (1 << 20) 4597#define VIDEO_DIP_ENABLE_VSC_HSW (1 << 20)
4592#define VIDEO_DIP_ENABLE_GCP_HSW (1 << 16) 4598#define VIDEO_DIP_ENABLE_GCP_HSW (1 << 16)
4593#define VIDEO_DIP_ENABLE_AVI_HSW (1 << 12) 4599#define VIDEO_DIP_ENABLE_AVI_HSW (1 << 12)
@@ -4595,16 +4601,6 @@ enum {
4595#define VIDEO_DIP_ENABLE_GMP_HSW (1 << 4) 4601#define VIDEO_DIP_ENABLE_GMP_HSW (1 << 4)
4596#define VIDEO_DIP_ENABLE_SPD_HSW (1 << 0) 4602#define VIDEO_DIP_ENABLE_SPD_HSW (1 << 0)
4597 4603
4598#define DRM_DIP_ENABLE (1 << 28)
4599#define PSR_VSC_BIT_7_SET (1 << 27)
4600#define VSC_SELECT_MASK (0x3 << 25)
4601#define VSC_SELECT_SHIFT 25
4602#define VSC_DIP_HW_HEA_DATA (0 << 25)
4603#define VSC_DIP_HW_HEA_SW_DATA (1 << 25)
4604#define VSC_DIP_HW_DATA_SW_HEA (2 << 25)
4605#define VSC_DIP_SW_HEA_DATA (3 << 25)
4606#define VDIP_ENABLE_PPS (1 << 24)
4607
4608/* Panel power sequencing */ 4604/* Panel power sequencing */
4609#define PPS_BASE 0x61200 4605#define PPS_BASE 0x61200
4610#define VLV_PPS_BASE (VLV_DISPLAY_BASE + PPS_BASE) 4606#define VLV_PPS_BASE (VLV_DISPLAY_BASE + PPS_BASE)
@@ -5640,9 +5636,9 @@ enum {
5640 */ 5636 */
5641#define PIPE_EDP_OFFSET 0x7f000 5637#define PIPE_EDP_OFFSET 0x7f000
5642 5638
5643#define _MMIO_PIPE2(pipe, reg) _MMIO(dev_priv->info.pipe_offsets[pipe] - \ 5639/* ICL DSI 0 and 1 */
5644 dev_priv->info.pipe_offsets[PIPE_A] + (reg) + \ 5640#define PIPE_DSI0_OFFSET 0x7b000
5645 dev_priv->info.display_mmio_offset) 5641#define PIPE_DSI1_OFFSET 0x7b800
5646 5642
5647#define PIPECONF(pipe) _MMIO_PIPE2(pipe, _PIPEACONF) 5643#define PIPECONF(pipe) _MMIO_PIPE2(pipe, _PIPEACONF)
5648#define PIPEDSL(pipe) _MMIO_PIPE2(pipe, _PIPEADSL) 5644#define PIPEDSL(pipe) _MMIO_PIPE2(pipe, _PIPEADSL)
@@ -6091,10 +6087,6 @@ enum {
6091#define _CURBBASE_IVB 0x71084 6087#define _CURBBASE_IVB 0x71084
6092#define _CURBPOS_IVB 0x71088 6088#define _CURBPOS_IVB 0x71088
6093 6089
6094#define _CURSOR2(pipe, reg) _MMIO(dev_priv->info.cursor_offsets[(pipe)] - \
6095 dev_priv->info.cursor_offsets[PIPE_A] + (reg) + \
6096 dev_priv->info.display_mmio_offset)
6097
6098#define CURCNTR(pipe) _CURSOR2(pipe, _CURACNTR) 6090#define CURCNTR(pipe) _CURSOR2(pipe, _CURACNTR)
6099#define CURBASE(pipe) _CURSOR2(pipe, _CURABASE) 6091#define CURBASE(pipe) _CURSOR2(pipe, _CURABASE)
6100#define CURPOS(pipe) _CURSOR2(pipe, _CURAPOS) 6092#define CURPOS(pipe) _CURSOR2(pipe, _CURAPOS)
@@ -6228,6 +6220,10 @@ enum {
6228#define _DSPBOFFSET (dev_priv->info.display_mmio_offset + 0x711A4) 6220#define _DSPBOFFSET (dev_priv->info.display_mmio_offset + 0x711A4)
6229#define _DSPBSURFLIVE (dev_priv->info.display_mmio_offset + 0x711AC) 6221#define _DSPBSURFLIVE (dev_priv->info.display_mmio_offset + 0x711AC)
6230 6222
6223/* ICL DSI 0 and 1 */
6224#define _PIPEDSI0CONF 0x7b008
6225#define _PIPEDSI1CONF 0x7b808
6226
6231/* Sprite A control */ 6227/* Sprite A control */
6232#define _DVSACNTR 0x72180 6228#define _DVSACNTR 0x72180
6233#define DVS_ENABLE (1 << 31) 6229#define DVS_ENABLE (1 << 31)
@@ -6515,6 +6511,7 @@ enum {
6515#define PLANE_CTL_KEY_ENABLE_DESTINATION (2 << 21) 6511#define PLANE_CTL_KEY_ENABLE_DESTINATION (2 << 21)
6516#define PLANE_CTL_ORDER_BGRX (0 << 20) 6512#define PLANE_CTL_ORDER_BGRX (0 << 20)
6517#define PLANE_CTL_ORDER_RGBX (1 << 20) 6513#define PLANE_CTL_ORDER_RGBX (1 << 20)
6514#define PLANE_CTL_YUV420_Y_PLANE (1 << 19)
6518#define PLANE_CTL_YUV_TO_RGB_CSC_FORMAT_BT709 (1 << 18) 6515#define PLANE_CTL_YUV_TO_RGB_CSC_FORMAT_BT709 (1 << 18)
6519#define PLANE_CTL_YUV422_ORDER_MASK (0x3 << 16) 6516#define PLANE_CTL_YUV422_ORDER_MASK (0x3 << 16)
6520#define PLANE_CTL_YUV422_YUYV (0 << 16) 6517#define PLANE_CTL_YUV422_YUYV (0 << 16)
@@ -6558,17 +6555,33 @@ enum {
6558#define _PLANE_KEYVAL_2_A 0x70294 6555#define _PLANE_KEYVAL_2_A 0x70294
6559#define _PLANE_KEYMSK_1_A 0x70198 6556#define _PLANE_KEYMSK_1_A 0x70198
6560#define _PLANE_KEYMSK_2_A 0x70298 6557#define _PLANE_KEYMSK_2_A 0x70298
6558#define PLANE_KEYMSK_ALPHA_ENABLE (1 << 31)
6561#define _PLANE_KEYMAX_1_A 0x701a0 6559#define _PLANE_KEYMAX_1_A 0x701a0
6562#define _PLANE_KEYMAX_2_A 0x702a0 6560#define _PLANE_KEYMAX_2_A 0x702a0
6561#define PLANE_KEYMAX_ALPHA(a) ((a) << 24)
6563#define _PLANE_AUX_DIST_1_A 0x701c0 6562#define _PLANE_AUX_DIST_1_A 0x701c0
6564#define _PLANE_AUX_DIST_2_A 0x702c0 6563#define _PLANE_AUX_DIST_2_A 0x702c0
6565#define _PLANE_AUX_OFFSET_1_A 0x701c4 6564#define _PLANE_AUX_OFFSET_1_A 0x701c4
6566#define _PLANE_AUX_OFFSET_2_A 0x702c4 6565#define _PLANE_AUX_OFFSET_2_A 0x702c4
6566#define _PLANE_CUS_CTL_1_A 0x701c8
6567#define _PLANE_CUS_CTL_2_A 0x702c8
6568#define PLANE_CUS_ENABLE (1 << 31)
6569#define PLANE_CUS_PLANE_6 (0 << 30)
6570#define PLANE_CUS_PLANE_7 (1 << 30)
6571#define PLANE_CUS_HPHASE_SIGN_NEGATIVE (1 << 19)
6572#define PLANE_CUS_HPHASE_0 (0 << 16)
6573#define PLANE_CUS_HPHASE_0_25 (1 << 16)
6574#define PLANE_CUS_HPHASE_0_5 (2 << 16)
6575#define PLANE_CUS_VPHASE_SIGN_NEGATIVE (1 << 15)
6576#define PLANE_CUS_VPHASE_0 (0 << 12)
6577#define PLANE_CUS_VPHASE_0_25 (1 << 12)
6578#define PLANE_CUS_VPHASE_0_5 (2 << 12)
6567#define _PLANE_COLOR_CTL_1_A 0x701CC /* GLK+ */ 6579#define _PLANE_COLOR_CTL_1_A 0x701CC /* GLK+ */
6568#define _PLANE_COLOR_CTL_2_A 0x702CC /* GLK+ */ 6580#define _PLANE_COLOR_CTL_2_A 0x702CC /* GLK+ */
6569#define _PLANE_COLOR_CTL_3_A 0x703CC /* GLK+ */ 6581#define _PLANE_COLOR_CTL_3_A 0x703CC /* GLK+ */
6570#define PLANE_COLOR_PIPE_GAMMA_ENABLE (1 << 30) /* Pre-ICL */ 6582#define PLANE_COLOR_PIPE_GAMMA_ENABLE (1 << 30) /* Pre-ICL */
6571#define PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE (1 << 28) 6583#define PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE (1 << 28)
6584#define PLANE_COLOR_INPUT_CSC_ENABLE (1 << 20) /* ICL+ */
6572#define PLANE_COLOR_PIPE_CSC_ENABLE (1 << 23) /* Pre-ICL */ 6585#define PLANE_COLOR_PIPE_CSC_ENABLE (1 << 23) /* Pre-ICL */
6573#define PLANE_COLOR_CSC_MODE_BYPASS (0 << 17) 6586#define PLANE_COLOR_CSC_MODE_BYPASS (0 << 17)
6574#define PLANE_COLOR_CSC_MODE_YUV601_TO_RGB709 (1 << 17) 6587#define PLANE_COLOR_CSC_MODE_YUV601_TO_RGB709 (1 << 17)
@@ -6585,6 +6598,55 @@ enum {
6585#define _PLANE_NV12_BUF_CFG_1_A 0x70278 6598#define _PLANE_NV12_BUF_CFG_1_A 0x70278
6586#define _PLANE_NV12_BUF_CFG_2_A 0x70378 6599#define _PLANE_NV12_BUF_CFG_2_A 0x70378
6587 6600
6601/* Input CSC Register Definitions */
6602#define _PLANE_INPUT_CSC_RY_GY_1_A 0x701E0
6603#define _PLANE_INPUT_CSC_RY_GY_2_A 0x702E0
6604
6605#define _PLANE_INPUT_CSC_RY_GY_1_B 0x711E0
6606#define _PLANE_INPUT_CSC_RY_GY_2_B 0x712E0
6607
6608#define _PLANE_INPUT_CSC_RY_GY_1(pipe) \
6609 _PIPE(pipe, _PLANE_INPUT_CSC_RY_GY_1_A, \
6610 _PLANE_INPUT_CSC_RY_GY_1_B)
6611#define _PLANE_INPUT_CSC_RY_GY_2(pipe) \
6612 _PIPE(pipe, _PLANE_INPUT_CSC_RY_GY_2_A, \
6613 _PLANE_INPUT_CSC_RY_GY_2_B)
6614
6615#define PLANE_INPUT_CSC_COEFF(pipe, plane, index) \
6616 _MMIO_PLANE(plane, _PLANE_INPUT_CSC_RY_GY_1(pipe) + (index) * 4, \
6617 _PLANE_INPUT_CSC_RY_GY_2(pipe) + (index) * 4)
6618
6619#define _PLANE_INPUT_CSC_PREOFF_HI_1_A 0x701F8
6620#define _PLANE_INPUT_CSC_PREOFF_HI_2_A 0x702F8
6621
6622#define _PLANE_INPUT_CSC_PREOFF_HI_1_B 0x711F8
6623#define _PLANE_INPUT_CSC_PREOFF_HI_2_B 0x712F8
6624
6625#define _PLANE_INPUT_CSC_PREOFF_HI_1(pipe) \
6626 _PIPE(pipe, _PLANE_INPUT_CSC_PREOFF_HI_1_A, \
6627 _PLANE_INPUT_CSC_PREOFF_HI_1_B)
6628#define _PLANE_INPUT_CSC_PREOFF_HI_2(pipe) \
6629 _PIPE(pipe, _PLANE_INPUT_CSC_PREOFF_HI_2_A, \
6630 _PLANE_INPUT_CSC_PREOFF_HI_2_B)
6631#define PLANE_INPUT_CSC_PREOFF(pipe, plane, index) \
6632 _MMIO_PLANE(plane, _PLANE_INPUT_CSC_PREOFF_HI_1(pipe) + (index) * 4, \
6633 _PLANE_INPUT_CSC_PREOFF_HI_2(pipe) + (index) * 4)
6634
6635#define _PLANE_INPUT_CSC_POSTOFF_HI_1_A 0x70204
6636#define _PLANE_INPUT_CSC_POSTOFF_HI_2_A 0x70304
6637
6638#define _PLANE_INPUT_CSC_POSTOFF_HI_1_B 0x71204
6639#define _PLANE_INPUT_CSC_POSTOFF_HI_2_B 0x71304
6640
6641#define _PLANE_INPUT_CSC_POSTOFF_HI_1(pipe) \
6642 _PIPE(pipe, _PLANE_INPUT_CSC_POSTOFF_HI_1_A, \
6643 _PLANE_INPUT_CSC_POSTOFF_HI_1_B)
6644#define _PLANE_INPUT_CSC_POSTOFF_HI_2(pipe) \
6645 _PIPE(pipe, _PLANE_INPUT_CSC_POSTOFF_HI_2_A, \
6646 _PLANE_INPUT_CSC_POSTOFF_HI_2_B)
6647#define PLANE_INPUT_CSC_POSTOFF(pipe, plane, index) \
6648 _MMIO_PLANE(plane, _PLANE_INPUT_CSC_POSTOFF_HI_1(pipe) + (index) * 4, \
6649 _PLANE_INPUT_CSC_POSTOFF_HI_2(pipe) + (index) * 4)
6588 6650
6589#define _PLANE_CTL_1_B 0x71180 6651#define _PLANE_CTL_1_B 0x71180
6590#define _PLANE_CTL_2_B 0x71280 6652#define _PLANE_CTL_2_B 0x71280
@@ -6701,6 +6763,15 @@ enum {
6701#define PLANE_AUX_OFFSET(pipe, plane) \ 6763#define PLANE_AUX_OFFSET(pipe, plane) \
6702 _MMIO_PLANE(plane, _PLANE_AUX_OFFSET_1(pipe), _PLANE_AUX_OFFSET_2(pipe)) 6764 _MMIO_PLANE(plane, _PLANE_AUX_OFFSET_1(pipe), _PLANE_AUX_OFFSET_2(pipe))
6703 6765
6766#define _PLANE_CUS_CTL_1_B 0x711c8
6767#define _PLANE_CUS_CTL_2_B 0x712c8
6768#define _PLANE_CUS_CTL_1(pipe) \
6769 _PIPE(pipe, _PLANE_CUS_CTL_1_A, _PLANE_CUS_CTL_1_B)
6770#define _PLANE_CUS_CTL_2(pipe) \
6771 _PIPE(pipe, _PLANE_CUS_CTL_2_A, _PLANE_CUS_CTL_2_B)
6772#define PLANE_CUS_CTL(pipe, plane) \
6773 _MMIO_PLANE(plane, _PLANE_CUS_CTL_1(pipe), _PLANE_CUS_CTL_2(pipe))
6774
6704#define _PLANE_COLOR_CTL_1_B 0x711CC 6775#define _PLANE_COLOR_CTL_1_B 0x711CC
6705#define _PLANE_COLOR_CTL_2_B 0x712CC 6776#define _PLANE_COLOR_CTL_2_B 0x712CC
6706#define _PLANE_COLOR_CTL_3_B 0x713CC 6777#define _PLANE_COLOR_CTL_3_B 0x713CC
@@ -6854,11 +6925,12 @@ enum {
6854#define _PS_2B_CTRL 0x68A80 6925#define _PS_2B_CTRL 0x68A80
6855#define _PS_1C_CTRL 0x69180 6926#define _PS_1C_CTRL 0x69180
6856#define PS_SCALER_EN (1 << 31) 6927#define PS_SCALER_EN (1 << 31)
6857#define PS_SCALER_MODE_MASK (3 << 28) 6928#define SKL_PS_SCALER_MODE_MASK (3 << 28)
6858#define PS_SCALER_MODE_DYN (0 << 28) 6929#define SKL_PS_SCALER_MODE_DYN (0 << 28)
6859#define PS_SCALER_MODE_HQ (1 << 28) 6930#define SKL_PS_SCALER_MODE_HQ (1 << 28)
6860#define SKL_PS_SCALER_MODE_NV12 (2 << 28) 6931#define SKL_PS_SCALER_MODE_NV12 (2 << 28)
6861#define PS_SCALER_MODE_PLANAR (1 << 29) 6932#define PS_SCALER_MODE_PLANAR (1 << 29)
6933#define PS_SCALER_MODE_NORMAL (0 << 29)
6862#define PS_PLANE_SEL_MASK (7 << 25) 6934#define PS_PLANE_SEL_MASK (7 << 25)
6863#define PS_PLANE_SEL(plane) (((plane) + 1) << 25) 6935#define PS_PLANE_SEL(plane) (((plane) + 1) << 25)
6864#define PS_FILTER_MASK (3 << 23) 6936#define PS_FILTER_MASK (3 << 23)
@@ -6875,6 +6947,8 @@ enum {
6875#define PS_VADAPT_MODE_LEAST_ADAPT (0 << 5) 6947#define PS_VADAPT_MODE_LEAST_ADAPT (0 << 5)
6876#define PS_VADAPT_MODE_MOD_ADAPT (1 << 5) 6948#define PS_VADAPT_MODE_MOD_ADAPT (1 << 5)
6877#define PS_VADAPT_MODE_MOST_ADAPT (3 << 5) 6949#define PS_VADAPT_MODE_MOST_ADAPT (3 << 5)
6950#define PS_PLANE_Y_SEL_MASK (7 << 5)
6951#define PS_PLANE_Y_SEL(plane) (((plane) + 1) << 5)
6878 6952
6879#define _PS_PWR_GATE_1A 0x68160 6953#define _PS_PWR_GATE_1A 0x68160
6880#define _PS_PWR_GATE_2A 0x68260 6954#define _PS_PWR_GATE_2A 0x68260
@@ -7413,6 +7487,10 @@ enum {
7413#define GEN9_SLICE_COMMON_ECO_CHICKEN1 _MMIO(0x731c) 7487#define GEN9_SLICE_COMMON_ECO_CHICKEN1 _MMIO(0x731c)
7414#define GEN11_STATE_CACHE_REDIRECT_TO_CS (1 << 11) 7488#define GEN11_STATE_CACHE_REDIRECT_TO_CS (1 << 11)
7415 7489
7490#define GEN7_SARCHKMD _MMIO(0xB000)
7491#define GEN7_DISABLE_DEMAND_PREFETCH (1 << 31)
7492#define GEN7_DISABLE_SAMPLER_PREFETCH (1 << 30)
7493
7416#define GEN7_L3SQCREG1 _MMIO(0xB010) 7494#define GEN7_L3SQCREG1 _MMIO(0xB010)
7417#define VLV_B0_WA_L3SQCREG1_VALUE 0x00D30000 7495#define VLV_B0_WA_L3SQCREG1_VALUE 0x00D30000
7418 7496
@@ -7828,8 +7906,7 @@ enum {
7828#define CNP_RAWCLK_DIV_MASK (0x3ff << 16) 7906#define CNP_RAWCLK_DIV_MASK (0x3ff << 16)
7829#define CNP_RAWCLK_DIV(div) ((div) << 16) 7907#define CNP_RAWCLK_DIV(div) ((div) << 16)
7830#define CNP_RAWCLK_FRAC_MASK (0xf << 26) 7908#define CNP_RAWCLK_FRAC_MASK (0xf << 26)
7831#define CNP_RAWCLK_FRAC(frac) ((frac) << 26) 7909#define CNP_RAWCLK_DEN(den) ((den) << 26)
7832#define ICP_RAWCLK_DEN(den) ((den) << 26)
7833#define ICP_RAWCLK_NUM(num) ((num) << 11) 7910#define ICP_RAWCLK_NUM(num) ((num) << 11)
7834 7911
7835#define PCH_DPLL_TMR_CFG _MMIO(0xc6208) 7912#define PCH_DPLL_TMR_CFG _MMIO(0xc6208)
@@ -8629,8 +8706,7 @@ enum {
8629#define GEN11_LSN_UNSLCVC_GAFS_HALF_CL2_MAXALLOC (1 << 9) 8706#define GEN11_LSN_UNSLCVC_GAFS_HALF_CL2_MAXALLOC (1 << 9)
8630#define GEN11_LSN_UNSLCVC_GAFS_HALF_SF_MAXALLOC (1 << 7) 8707#define GEN11_LSN_UNSLCVC_GAFS_HALF_SF_MAXALLOC (1 << 7)
8631 8708
8632#define GAMW_ECO_DEV_RW_IA_REG _MMIO(0x4080) 8709#define GEN10_SAMPLER_MODE _MMIO(0xE18C)
8633#define GAMW_ECO_DEV_CTX_RELOAD_DISABLE (1 << 7)
8634 8710
8635/* IVYBRIDGE DPF */ 8711/* IVYBRIDGE DPF */
8636#define GEN7_L3CDERRST1(slice) _MMIO(0xB008 + (slice) * 0x200) /* L3CD Error Status 1 */ 8712#define GEN7_L3CDERRST1(slice) _MMIO(0xB008 + (slice) * 0x200) /* L3CD Error Status 1 */
@@ -8931,6 +9007,15 @@ enum skl_power_gate {
8931#define CNL_AUX_ANAOVRD1_ENABLE (1 << 16) 9007#define CNL_AUX_ANAOVRD1_ENABLE (1 << 16)
8932#define CNL_AUX_ANAOVRD1_LDO_BYPASS (1 << 23) 9008#define CNL_AUX_ANAOVRD1_LDO_BYPASS (1 << 23)
8933 9009
9010#define _ICL_AUX_REG_IDX(pw_idx) ((pw_idx) - ICL_PW_CTL_IDX_AUX_A)
9011#define _ICL_AUX_ANAOVRD1_A 0x162398
9012#define _ICL_AUX_ANAOVRD1_B 0x6C398
9013#define ICL_AUX_ANAOVRD1(pw_idx) _MMIO(_PICK(_ICL_AUX_REG_IDX(pw_idx), \
9014 _ICL_AUX_ANAOVRD1_A, \
9015 _ICL_AUX_ANAOVRD1_B))
9016#define ICL_AUX_ANAOVRD1_LDO_BYPASS (1 << 7)
9017#define ICL_AUX_ANAOVRD1_ENABLE (1 << 0)
9018
8934/* HDCP Key Registers */ 9019/* HDCP Key Registers */
8935#define HDCP_KEY_CONF _MMIO(0x66c00) 9020#define HDCP_KEY_CONF _MMIO(0x66c00)
8936#define HDCP_AKSV_SEND_TRIGGER BIT(31) 9021#define HDCP_AKSV_SEND_TRIGGER BIT(31)
@@ -9013,11 +9098,45 @@ enum skl_power_gate {
9013#define HDCP_STATUS_CIPHER BIT(16) 9098#define HDCP_STATUS_CIPHER BIT(16)
9014#define HDCP_STATUS_FRAME_CNT(x) (((x) >> 8) & 0xff) 9099#define HDCP_STATUS_FRAME_CNT(x) (((x) >> 8) & 0xff)
9015 9100
9101/* HDCP2.2 Registers */
9102#define _PORTA_HDCP2_BASE 0x66800
9103#define _PORTB_HDCP2_BASE 0x66500
9104#define _PORTC_HDCP2_BASE 0x66600
9105#define _PORTD_HDCP2_BASE 0x66700
9106#define _PORTE_HDCP2_BASE 0x66A00
9107#define _PORTF_HDCP2_BASE 0x66900
9108#define _PORT_HDCP2_BASE(port, x) _MMIO(_PICK((port), \
9109 _PORTA_HDCP2_BASE, \
9110 _PORTB_HDCP2_BASE, \
9111 _PORTC_HDCP2_BASE, \
9112 _PORTD_HDCP2_BASE, \
9113 _PORTE_HDCP2_BASE, \
9114 _PORTF_HDCP2_BASE) + (x))
9115
9116#define HDCP2_AUTH_DDI(port) _PORT_HDCP2_BASE(port, 0x98)
9117#define AUTH_LINK_AUTHENTICATED BIT(31)
9118#define AUTH_LINK_TYPE BIT(30)
9119#define AUTH_FORCE_CLR_INPUTCTR BIT(19)
9120#define AUTH_CLR_KEYS BIT(18)
9121
9122#define HDCP2_CTL_DDI(port) _PORT_HDCP2_BASE(port, 0xB0)
9123#define CTL_LINK_ENCRYPTION_REQ BIT(31)
9124
9125#define HDCP2_STATUS_DDI(port) _PORT_HDCP2_BASE(port, 0xB4)
9126#define STREAM_ENCRYPTION_STATUS_A BIT(31)
9127#define STREAM_ENCRYPTION_STATUS_B BIT(30)
9128#define STREAM_ENCRYPTION_STATUS_C BIT(29)
9129#define LINK_TYPE_STATUS BIT(22)
9130#define LINK_AUTH_STATUS BIT(21)
9131#define LINK_ENCRYPTION_STATUS BIT(20)
9132
9016/* Per-pipe DDI Function Control */ 9133/* Per-pipe DDI Function Control */
9017#define _TRANS_DDI_FUNC_CTL_A 0x60400 9134#define _TRANS_DDI_FUNC_CTL_A 0x60400
9018#define _TRANS_DDI_FUNC_CTL_B 0x61400 9135#define _TRANS_DDI_FUNC_CTL_B 0x61400
9019#define _TRANS_DDI_FUNC_CTL_C 0x62400 9136#define _TRANS_DDI_FUNC_CTL_C 0x62400
9020#define _TRANS_DDI_FUNC_CTL_EDP 0x6F400 9137#define _TRANS_DDI_FUNC_CTL_EDP 0x6F400
9138#define _TRANS_DDI_FUNC_CTL_DSI0 0x6b400
9139#define _TRANS_DDI_FUNC_CTL_DSI1 0x6bc00
9021#define TRANS_DDI_FUNC_CTL(tran) _MMIO_TRANS2(tran, _TRANS_DDI_FUNC_CTL_A) 9140#define TRANS_DDI_FUNC_CTL(tran) _MMIO_TRANS2(tran, _TRANS_DDI_FUNC_CTL_A)
9022 9141
9023#define TRANS_DDI_FUNC_ENABLE (1 << 31) 9142#define TRANS_DDI_FUNC_ENABLE (1 << 31)
@@ -9055,6 +9174,19 @@ enum skl_power_gate {
9055 | TRANS_DDI_HDMI_SCRAMBLER_RESET_FREQ \ 9174 | TRANS_DDI_HDMI_SCRAMBLER_RESET_FREQ \
9056 | TRANS_DDI_HDMI_SCRAMBLING) 9175 | TRANS_DDI_HDMI_SCRAMBLING)
9057 9176
9177#define _TRANS_DDI_FUNC_CTL2_A 0x60404
9178#define _TRANS_DDI_FUNC_CTL2_B 0x61404
9179#define _TRANS_DDI_FUNC_CTL2_C 0x62404
9180#define _TRANS_DDI_FUNC_CTL2_EDP 0x6f404
9181#define _TRANS_DDI_FUNC_CTL2_DSI0 0x6b404
9182#define _TRANS_DDI_FUNC_CTL2_DSI1 0x6bc04
9183#define TRANS_DDI_FUNC_CTL2(tran) _MMIO_TRANS2(tran, \
9184 _TRANS_DDI_FUNC_CTL2_A)
9185#define PORT_SYNC_MODE_ENABLE (1 << 4)
9186#define PORT_SYNC_MODE_MASTER_SELECT(x) ((x) < 0)
9187#define PORT_SYNC_MODE_MASTER_SELECT_MASK (0x7 << 0)
9188#define PORT_SYNC_MODE_MASTER_SELECT_SHIFT 0
9189
9058/* DisplayPort Transport Control */ 9190/* DisplayPort Transport Control */
9059#define _DP_TP_CTL_A 0x64040 9191#define _DP_TP_CTL_A 0x64040
9060#define _DP_TP_CTL_B 0x64140 9192#define _DP_TP_CTL_B 0x64140
@@ -9226,6 +9358,8 @@ enum skl_power_gate {
9226#define TRANS_MSA_MISC(tran) _MMIO_TRANS2(tran, _TRANSA_MSA_MISC) 9358#define TRANS_MSA_MISC(tran) _MMIO_TRANS2(tran, _TRANSA_MSA_MISC)
9227 9359
9228#define TRANS_MSA_SYNC_CLK (1 << 0) 9360#define TRANS_MSA_SYNC_CLK (1 << 0)
9361#define TRANS_MSA_SAMPLING_444 (2 << 1)
9362#define TRANS_MSA_CLRSP_YCBCR (2 << 3)
9229#define TRANS_MSA_6_BPC (0 << 5) 9363#define TRANS_MSA_6_BPC (0 << 5)
9230#define TRANS_MSA_8_BPC (1 << 5) 9364#define TRANS_MSA_8_BPC (1 << 5)
9231#define TRANS_MSA_10_BPC (2 << 5) 9365#define TRANS_MSA_10_BPC (2 << 5)
@@ -9793,6 +9927,10 @@ enum skl_power_gate {
9793#define _MIPI_PORT(port, a, c) (((port) == PORT_A) ? a : c) /* ports A and C only */ 9927#define _MIPI_PORT(port, a, c) (((port) == PORT_A) ? a : c) /* ports A and C only */
9794#define _MMIO_MIPI(port, a, c) _MMIO(_MIPI_PORT(port, a, c)) 9928#define _MMIO_MIPI(port, a, c) _MMIO(_MIPI_PORT(port, a, c))
9795 9929
9930/* Gen11 DSI */
9931#define _MMIO_DSI(tc, dsi0, dsi1) _MMIO_TRANS((tc) - TRANSCODER_DSI_0, \
9932 dsi0, dsi1)
9933
9796#define MIPIO_TXESC_CLK_DIV1 _MMIO(0x160004) 9934#define MIPIO_TXESC_CLK_DIV1 _MMIO(0x160004)
9797#define GLK_TX_ESC_CLK_DIV1_MASK 0x3FF 9935#define GLK_TX_ESC_CLK_DIV1_MASK 0x3FF
9798#define MIPIO_TXESC_CLK_DIV2 _MMIO(0x160008) 9936#define MIPIO_TXESC_CLK_DIV2 _MMIO(0x160008)
@@ -9956,6 +10094,39 @@ enum skl_power_gate {
9956 _ICL_DSI_IO_MODECTL_1) 10094 _ICL_DSI_IO_MODECTL_1)
9957#define COMBO_PHY_MODE_DSI (1 << 0) 10095#define COMBO_PHY_MODE_DSI (1 << 0)
9958 10096
10097/* Display Stream Splitter Control */
10098#define DSS_CTL1 _MMIO(0x67400)
10099#define SPLITTER_ENABLE (1 << 31)
10100#define JOINER_ENABLE (1 << 30)
10101#define DUAL_LINK_MODE_INTERLEAVE (1 << 24)
10102#define DUAL_LINK_MODE_FRONTBACK (0 << 24)
10103#define OVERLAP_PIXELS_MASK (0xf << 16)
10104#define OVERLAP_PIXELS(pixels) ((pixels) << 16)
10105#define LEFT_DL_BUF_TARGET_DEPTH_MASK (0xfff << 0)
10106#define LEFT_DL_BUF_TARGET_DEPTH(pixels) ((pixels) << 0)
10107#define MAX_DL_BUFFER_TARGET_DEPTH 0x5a0
10108
10109#define DSS_CTL2 _MMIO(0x67404)
10110#define LEFT_BRANCH_VDSC_ENABLE (1 << 31)
10111#define RIGHT_BRANCH_VDSC_ENABLE (1 << 15)
10112#define RIGHT_DL_BUF_TARGET_DEPTH_MASK (0xfff << 0)
10113#define RIGHT_DL_BUF_TARGET_DEPTH(pixels) ((pixels) << 0)
10114
10115#define _ICL_PIPE_DSS_CTL1_PB 0x78200
10116#define _ICL_PIPE_DSS_CTL1_PC 0x78400
10117#define ICL_PIPE_DSS_CTL1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
10118 _ICL_PIPE_DSS_CTL1_PB, \
10119 _ICL_PIPE_DSS_CTL1_PC)
10120#define BIG_JOINER_ENABLE (1 << 29)
10121#define MASTER_BIG_JOINER_ENABLE (1 << 28)
10122#define VGA_CENTERING_ENABLE (1 << 27)
10123
10124#define _ICL_PIPE_DSS_CTL2_PB 0x78204
10125#define _ICL_PIPE_DSS_CTL2_PC 0x78404
10126#define ICL_PIPE_DSS_CTL2(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
10127 _ICL_PIPE_DSS_CTL2_PB, \
10128 _ICL_PIPE_DSS_CTL2_PC)
10129
9959#define BXT_P_DSI_REGULATOR_CFG _MMIO(0x160020) 10130#define BXT_P_DSI_REGULATOR_CFG _MMIO(0x160020)
9960#define STAP_SELECT (1 << 0) 10131#define STAP_SELECT (1 << 0)
9961 10132
@@ -10292,6 +10463,235 @@ enum skl_power_gate {
10292 _ICL_DSI_T_INIT_MASTER_0,\ 10463 _ICL_DSI_T_INIT_MASTER_0,\
10293 _ICL_DSI_T_INIT_MASTER_1) 10464 _ICL_DSI_T_INIT_MASTER_1)
10294 10465
10466#define _DPHY_CLK_TIMING_PARAM_0 0x162180
10467#define _DPHY_CLK_TIMING_PARAM_1 0x6c180
10468#define DPHY_CLK_TIMING_PARAM(port) _MMIO_PORT(port, \
10469 _DPHY_CLK_TIMING_PARAM_0,\
10470 _DPHY_CLK_TIMING_PARAM_1)
10471#define _DSI_CLK_TIMING_PARAM_0 0x6b080
10472#define _DSI_CLK_TIMING_PARAM_1 0x6b880
10473#define DSI_CLK_TIMING_PARAM(port) _MMIO_PORT(port, \
10474 _DSI_CLK_TIMING_PARAM_0,\
10475 _DSI_CLK_TIMING_PARAM_1)
10476#define CLK_PREPARE_OVERRIDE (1 << 31)
10477#define CLK_PREPARE(x) ((x) << 28)
10478#define CLK_PREPARE_MASK (0x7 << 28)
10479#define CLK_PREPARE_SHIFT 28
10480#define CLK_ZERO_OVERRIDE (1 << 27)
10481#define CLK_ZERO(x) ((x) << 20)
10482#define CLK_ZERO_MASK (0xf << 20)
10483#define CLK_ZERO_SHIFT 20
10484#define CLK_PRE_OVERRIDE (1 << 19)
10485#define CLK_PRE(x) ((x) << 16)
10486#define CLK_PRE_MASK (0x3 << 16)
10487#define CLK_PRE_SHIFT 16
10488#define CLK_POST_OVERRIDE (1 << 15)
10489#define CLK_POST(x) ((x) << 8)
10490#define CLK_POST_MASK (0x7 << 8)
10491#define CLK_POST_SHIFT 8
10492#define CLK_TRAIL_OVERRIDE (1 << 7)
10493#define CLK_TRAIL(x) ((x) << 0)
10494#define CLK_TRAIL_MASK (0xf << 0)
10495#define CLK_TRAIL_SHIFT 0
10496
10497#define _DPHY_DATA_TIMING_PARAM_0 0x162184
10498#define _DPHY_DATA_TIMING_PARAM_1 0x6c184
10499#define DPHY_DATA_TIMING_PARAM(port) _MMIO_PORT(port, \
10500 _DPHY_DATA_TIMING_PARAM_0,\
10501 _DPHY_DATA_TIMING_PARAM_1)
10502#define _DSI_DATA_TIMING_PARAM_0 0x6B084
10503#define _DSI_DATA_TIMING_PARAM_1 0x6B884
10504#define DSI_DATA_TIMING_PARAM(port) _MMIO_PORT(port, \
10505 _DSI_DATA_TIMING_PARAM_0,\
10506 _DSI_DATA_TIMING_PARAM_1)
10507#define HS_PREPARE_OVERRIDE (1 << 31)
10508#define HS_PREPARE(x) ((x) << 24)
10509#define HS_PREPARE_MASK (0x7 << 24)
10510#define HS_PREPARE_SHIFT 24
10511#define HS_ZERO_OVERRIDE (1 << 23)
10512#define HS_ZERO(x) ((x) << 16)
10513#define HS_ZERO_MASK (0xf << 16)
10514#define HS_ZERO_SHIFT 16
10515#define HS_TRAIL_OVERRIDE (1 << 15)
10516#define HS_TRAIL(x) ((x) << 8)
10517#define HS_TRAIL_MASK (0x7 << 8)
10518#define HS_TRAIL_SHIFT 8
10519#define HS_EXIT_OVERRIDE (1 << 7)
10520#define HS_EXIT(x) ((x) << 0)
10521#define HS_EXIT_MASK (0x7 << 0)
10522#define HS_EXIT_SHIFT 0
10523
10524#define _DPHY_TA_TIMING_PARAM_0 0x162188
10525#define _DPHY_TA_TIMING_PARAM_1 0x6c188
10526#define DPHY_TA_TIMING_PARAM(port) _MMIO_PORT(port, \
10527 _DPHY_TA_TIMING_PARAM_0,\
10528 _DPHY_TA_TIMING_PARAM_1)
10529#define _DSI_TA_TIMING_PARAM_0 0x6b098
10530#define _DSI_TA_TIMING_PARAM_1 0x6b898
10531#define DSI_TA_TIMING_PARAM(port) _MMIO_PORT(port, \
10532 _DSI_TA_TIMING_PARAM_0,\
10533 _DSI_TA_TIMING_PARAM_1)
10534#define TA_SURE_OVERRIDE (1 << 31)
10535#define TA_SURE(x) ((x) << 16)
10536#define TA_SURE_MASK (0x1f << 16)
10537#define TA_SURE_SHIFT 16
10538#define TA_GO_OVERRIDE (1 << 15)
10539#define TA_GO(x) ((x) << 8)
10540#define TA_GO_MASK (0xf << 8)
10541#define TA_GO_SHIFT 8
10542#define TA_GET_OVERRIDE (1 << 7)
10543#define TA_GET(x) ((x) << 0)
10544#define TA_GET_MASK (0xf << 0)
10545#define TA_GET_SHIFT 0
10546
10547/* DSI transcoder configuration */
10548#define _DSI_TRANS_FUNC_CONF_0 0x6b030
10549#define _DSI_TRANS_FUNC_CONF_1 0x6b830
10550#define DSI_TRANS_FUNC_CONF(tc) _MMIO_DSI(tc, \
10551 _DSI_TRANS_FUNC_CONF_0,\
10552 _DSI_TRANS_FUNC_CONF_1)
10553#define OP_MODE_MASK (0x3 << 28)
10554#define OP_MODE_SHIFT 28
10555#define CMD_MODE_NO_GATE (0x0 << 28)
10556#define CMD_MODE_TE_GATE (0x1 << 28)
10557#define VIDEO_MODE_SYNC_EVENT (0x2 << 28)
10558#define VIDEO_MODE_SYNC_PULSE (0x3 << 28)
10559#define LINK_READY (1 << 20)
10560#define PIX_FMT_MASK (0x3 << 16)
10561#define PIX_FMT_SHIFT 16
10562#define PIX_FMT_RGB565 (0x0 << 16)
10563#define PIX_FMT_RGB666_PACKED (0x1 << 16)
10564#define PIX_FMT_RGB666_LOOSE (0x2 << 16)
10565#define PIX_FMT_RGB888 (0x3 << 16)
10566#define PIX_FMT_RGB101010 (0x4 << 16)
10567#define PIX_FMT_RGB121212 (0x5 << 16)
10568#define PIX_FMT_COMPRESSED (0x6 << 16)
10569#define BGR_TRANSMISSION (1 << 15)
10570#define PIX_VIRT_CHAN(x) ((x) << 12)
10571#define PIX_VIRT_CHAN_MASK (0x3 << 12)
10572#define PIX_VIRT_CHAN_SHIFT 12
10573#define PIX_BUF_THRESHOLD_MASK (0x3 << 10)
10574#define PIX_BUF_THRESHOLD_SHIFT 10
10575#define PIX_BUF_THRESHOLD_1_4 (0x0 << 10)
10576#define PIX_BUF_THRESHOLD_1_2 (0x1 << 10)
10577#define PIX_BUF_THRESHOLD_3_4 (0x2 << 10)
10578#define PIX_BUF_THRESHOLD_FULL (0x3 << 10)
10579#define CONTINUOUS_CLK_MASK (0x3 << 8)
10580#define CONTINUOUS_CLK_SHIFT 8
10581#define CLK_ENTER_LP_AFTER_DATA (0x0 << 8)
10582#define CLK_HS_OR_LP (0x2 << 8)
10583#define CLK_HS_CONTINUOUS (0x3 << 8)
10584#define LINK_CALIBRATION_MASK (0x3 << 4)
10585#define LINK_CALIBRATION_SHIFT 4
10586#define CALIBRATION_DISABLED (0x0 << 4)
10587#define CALIBRATION_ENABLED_INITIAL_ONLY (0x2 << 4)
10588#define CALIBRATION_ENABLED_INITIAL_PERIODIC (0x3 << 4)
10589#define S3D_ORIENTATION_LANDSCAPE (1 << 1)
10590#define EOTP_DISABLED (1 << 0)
10591
10592#define _DSI_CMD_RXCTL_0 0x6b0d4
10593#define _DSI_CMD_RXCTL_1 0x6b8d4
10594#define DSI_CMD_RXCTL(tc) _MMIO_DSI(tc, \
10595 _DSI_CMD_RXCTL_0,\
10596 _DSI_CMD_RXCTL_1)
10597#define READ_UNLOADS_DW (1 << 16)
10598#define RECEIVED_UNASSIGNED_TRIGGER (1 << 15)
10599#define RECEIVED_ACKNOWLEDGE_TRIGGER (1 << 14)
10600#define RECEIVED_TEAR_EFFECT_TRIGGER (1 << 13)
10601#define RECEIVED_RESET_TRIGGER (1 << 12)
10602#define RECEIVED_PAYLOAD_WAS_LOST (1 << 11)
10603#define RECEIVED_CRC_WAS_LOST (1 << 10)
10604#define NUMBER_RX_PLOAD_DW_MASK (0xff << 0)
10605#define NUMBER_RX_PLOAD_DW_SHIFT 0
10606
10607#define _DSI_CMD_TXCTL_0 0x6b0d0
10608#define _DSI_CMD_TXCTL_1 0x6b8d0
10609#define DSI_CMD_TXCTL(tc) _MMIO_DSI(tc, \
10610 _DSI_CMD_TXCTL_0,\
10611 _DSI_CMD_TXCTL_1)
10612#define KEEP_LINK_IN_HS (1 << 24)
10613#define FREE_HEADER_CREDIT_MASK (0x1f << 8)
10614#define FREE_HEADER_CREDIT_SHIFT 0x8
10615#define FREE_PLOAD_CREDIT_MASK (0xff << 0)
10616#define FREE_PLOAD_CREDIT_SHIFT 0
10617#define MAX_HEADER_CREDIT 0x10
10618#define MAX_PLOAD_CREDIT 0x40
10619
10620#define _DSI_CMD_TXHDR_0 0x6b100
10621#define _DSI_CMD_TXHDR_1 0x6b900
10622#define DSI_CMD_TXHDR(tc) _MMIO_DSI(tc, \
10623 _DSI_CMD_TXHDR_0,\
10624 _DSI_CMD_TXHDR_1)
10625#define PAYLOAD_PRESENT (1 << 31)
10626#define LP_DATA_TRANSFER (1 << 30)
10627#define VBLANK_FENCE (1 << 29)
10628#define PARAM_WC_MASK (0xffff << 8)
10629#define PARAM_WC_LOWER_SHIFT 8
10630#define PARAM_WC_UPPER_SHIFT 16
10631#define VC_MASK (0x3 << 6)
10632#define VC_SHIFT 6
10633#define DT_MASK (0x3f << 0)
10634#define DT_SHIFT 0
10635
10636#define _DSI_CMD_TXPYLD_0 0x6b104
10637#define _DSI_CMD_TXPYLD_1 0x6b904
10638#define DSI_CMD_TXPYLD(tc) _MMIO_DSI(tc, \
10639 _DSI_CMD_TXPYLD_0,\
10640 _DSI_CMD_TXPYLD_1)
10641
10642#define _DSI_LP_MSG_0 0x6b0d8
10643#define _DSI_LP_MSG_1 0x6b8d8
10644#define DSI_LP_MSG(tc) _MMIO_DSI(tc, \
10645 _DSI_LP_MSG_0,\
10646 _DSI_LP_MSG_1)
10647#define LPTX_IN_PROGRESS (1 << 17)
10648#define LINK_IN_ULPS (1 << 16)
10649#define LINK_ULPS_TYPE_LP11 (1 << 8)
10650#define LINK_ENTER_ULPS (1 << 0)
10651
10652/* DSI timeout registers */
10653#define _DSI_HSTX_TO_0 0x6b044
10654#define _DSI_HSTX_TO_1 0x6b844
10655#define DSI_HSTX_TO(tc) _MMIO_DSI(tc, \
10656 _DSI_HSTX_TO_0,\
10657 _DSI_HSTX_TO_1)
10658#define HSTX_TIMEOUT_VALUE_MASK (0xffff << 16)
10659#define HSTX_TIMEOUT_VALUE_SHIFT 16
10660#define HSTX_TIMEOUT_VALUE(x) ((x) << 16)
10661#define HSTX_TIMED_OUT (1 << 0)
10662
10663#define _DSI_LPRX_HOST_TO_0 0x6b048
10664#define _DSI_LPRX_HOST_TO_1 0x6b848
10665#define DSI_LPRX_HOST_TO(tc) _MMIO_DSI(tc, \
10666 _DSI_LPRX_HOST_TO_0,\
10667 _DSI_LPRX_HOST_TO_1)
10668#define LPRX_TIMED_OUT (1 << 16)
10669#define LPRX_TIMEOUT_VALUE_MASK (0xffff << 0)
10670#define LPRX_TIMEOUT_VALUE_SHIFT 0
10671#define LPRX_TIMEOUT_VALUE(x) ((x) << 0)
10672
10673#define _DSI_PWAIT_TO_0 0x6b040
10674#define _DSI_PWAIT_TO_1 0x6b840
10675#define DSI_PWAIT_TO(tc) _MMIO_DSI(tc, \
10676 _DSI_PWAIT_TO_0,\
10677 _DSI_PWAIT_TO_1)
10678#define PRESET_TIMEOUT_VALUE_MASK (0xffff << 16)
10679#define PRESET_TIMEOUT_VALUE_SHIFT 16
10680#define PRESET_TIMEOUT_VALUE(x) ((x) << 16)
10681#define PRESPONSE_TIMEOUT_VALUE_MASK (0xffff << 0)
10682#define PRESPONSE_TIMEOUT_VALUE_SHIFT 0
10683#define PRESPONSE_TIMEOUT_VALUE(x) ((x) << 0)
10684
10685#define _DSI_TA_TO_0 0x6b04c
10686#define _DSI_TA_TO_1 0x6b84c
10687#define DSI_TA_TO(tc) _MMIO_DSI(tc, \
10688 _DSI_TA_TO_0,\
10689 _DSI_TA_TO_1)
10690#define TA_TIMED_OUT (1 << 16)
10691#define TA_TIMEOUT_VALUE_MASK (0xffff << 0)
10692#define TA_TIMEOUT_VALUE_SHIFT 0
10693#define TA_TIMEOUT_VALUE(x) ((x) << 0)
10694
10295/* bits 31:0 */ 10695/* bits 31:0 */
10296#define _MIPIA_DBI_BW_CTRL (dev_priv->mipi_mmio_base + 0xb084) 10696#define _MIPIA_DBI_BW_CTRL (dev_priv->mipi_mmio_base + 0xb084)
10297#define _MIPIC_DBI_BW_CTRL (dev_priv->mipi_mmio_base + 0xb884) 10697#define _MIPIC_DBI_BW_CTRL (dev_priv->mipi_mmio_base + 0xb884)
@@ -10404,10 +10804,6 @@ enum skl_power_gate {
10404#define MIPI_READ_DATA_VALID(port) _MMIO_MIPI(port, _MIPIA_READ_DATA_VALID, _MIPIC_READ_DATA_VALID) 10804#define MIPI_READ_DATA_VALID(port) _MMIO_MIPI(port, _MIPIA_READ_DATA_VALID, _MIPIC_READ_DATA_VALID)
10405#define READ_DATA_VALID(n) (1 << (n)) 10805#define READ_DATA_VALID(n) (1 << (n))
10406 10806
10407/* For UMS only (deprecated): */
10408#define _PALETTE_A (dev_priv->info.display_mmio_offset + 0xa000)
10409#define _PALETTE_B (dev_priv->info.display_mmio_offset + 0xa800)
10410
10411/* MOCS (Memory Object Control State) registers */ 10807/* MOCS (Memory Object Control State) registers */
10412#define GEN9_LNCFCMOCS(i) _MMIO(0xb020 + (i) * 4) /* L3 Cache Control */ 10808#define GEN9_LNCFCMOCS(i) _MMIO(0xb020 + (i) * 4) /* L3 Cache Control */
10413 10809
@@ -10693,6 +11089,7 @@ enum skl_power_gate {
10693#define ICL_DSC1_PICTURE_PARAMETER_SET_16(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 11089#define ICL_DSC1_PICTURE_PARAMETER_SET_16(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
10694 _ICL_DSC1_PICTURE_PARAMETER_SET_16_PB, \ 11090 _ICL_DSC1_PICTURE_PARAMETER_SET_16_PB, \
10695 _ICL_DSC1_PICTURE_PARAMETER_SET_16_PC) 11091 _ICL_DSC1_PICTURE_PARAMETER_SET_16_PC)
11092#define DSC_SLICE_ROW_PER_FRAME(slice_row_per_frame) ((slice_row_per_frame) << 20)
10696#define DSC_SLICE_PER_LINE(slice_per_line) ((slice_per_line) << 16) 11093#define DSC_SLICE_PER_LINE(slice_per_line) ((slice_per_line) << 16)
10697#define DSC_SLICE_CHUNK_SIZE(slice_chunk_size) ((slice_chunk_size) << 0) 11094#define DSC_SLICE_CHUNK_SIZE(slice_chunk_size) ((slice_chunk_size) << 0)
10698 11095
@@ -10747,17 +11144,17 @@ enum skl_power_gate {
10747 _ICL_DSC1_RC_BUF_THRESH_1_UDW_PB, \ 11144 _ICL_DSC1_RC_BUF_THRESH_1_UDW_PB, \
10748 _ICL_DSC1_RC_BUF_THRESH_1_UDW_PC) 11145 _ICL_DSC1_RC_BUF_THRESH_1_UDW_PC)
10749 11146
10750#define PORT_TX_DFLEXDPSP _MMIO(0x1638A0) 11147#define PORT_TX_DFLEXDPSP _MMIO(FIA1_BASE + 0x008A0)
10751#define TC_LIVE_STATE_TBT(tc_port) (1 << ((tc_port) * 8 + 6)) 11148#define TC_LIVE_STATE_TBT(tc_port) (1 << ((tc_port) * 8 + 6))
10752#define TC_LIVE_STATE_TC(tc_port) (1 << ((tc_port) * 8 + 5)) 11149#define TC_LIVE_STATE_TC(tc_port) (1 << ((tc_port) * 8 + 5))
10753#define DP_LANE_ASSIGNMENT_SHIFT(tc_port) ((tc_port) * 8) 11150#define DP_LANE_ASSIGNMENT_SHIFT(tc_port) ((tc_port) * 8)
10754#define DP_LANE_ASSIGNMENT_MASK(tc_port) (0xf << ((tc_port) * 8)) 11151#define DP_LANE_ASSIGNMENT_MASK(tc_port) (0xf << ((tc_port) * 8))
10755#define DP_LANE_ASSIGNMENT(tc_port, x) ((x) << ((tc_port) * 8)) 11152#define DP_LANE_ASSIGNMENT(tc_port, x) ((x) << ((tc_port) * 8))
10756 11153
10757#define PORT_TX_DFLEXDPPMS _MMIO(0x163890) 11154#define PORT_TX_DFLEXDPPMS _MMIO(FIA1_BASE + 0x00890)
10758#define DP_PHY_MODE_STATUS_COMPLETED(tc_port) (1 << (tc_port)) 11155#define DP_PHY_MODE_STATUS_COMPLETED(tc_port) (1 << (tc_port))
10759 11156
10760#define PORT_TX_DFLEXDPCSSS _MMIO(0x163894) 11157#define PORT_TX_DFLEXDPCSSS _MMIO(FIA1_BASE + 0x00894)
10761#define DP_PHY_MODE_STATUS_NOT_SAFE(tc_port) (1 << (tc_port)) 11158#define DP_PHY_MODE_STATUS_NOT_SAFE(tc_port) (1 << (tc_port))
10762 11159
10763#endif /* _I915_REG_H_ */ 11160#endif /* _I915_REG_H_ */
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index a492385b2089..71107540581d 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -111,91 +111,6 @@ i915_request_remove_from_client(struct i915_request *request)
111 spin_unlock(&file_priv->mm.lock); 111 spin_unlock(&file_priv->mm.lock);
112} 112}
113 113
114static struct i915_dependency *
115i915_dependency_alloc(struct drm_i915_private *i915)
116{
117 return kmem_cache_alloc(i915->dependencies, GFP_KERNEL);
118}
119
120static void
121i915_dependency_free(struct drm_i915_private *i915,
122 struct i915_dependency *dep)
123{
124 kmem_cache_free(i915->dependencies, dep);
125}
126
127static void
128__i915_sched_node_add_dependency(struct i915_sched_node *node,
129 struct i915_sched_node *signal,
130 struct i915_dependency *dep,
131 unsigned long flags)
132{
133 INIT_LIST_HEAD(&dep->dfs_link);
134 list_add(&dep->wait_link, &signal->waiters_list);
135 list_add(&dep->signal_link, &node->signalers_list);
136 dep->signaler = signal;
137 dep->flags = flags;
138}
139
140static int
141i915_sched_node_add_dependency(struct drm_i915_private *i915,
142 struct i915_sched_node *node,
143 struct i915_sched_node *signal)
144{
145 struct i915_dependency *dep;
146
147 dep = i915_dependency_alloc(i915);
148 if (!dep)
149 return -ENOMEM;
150
151 __i915_sched_node_add_dependency(node, signal, dep,
152 I915_DEPENDENCY_ALLOC);
153 return 0;
154}
155
156static void
157i915_sched_node_fini(struct drm_i915_private *i915,
158 struct i915_sched_node *node)
159{
160 struct i915_dependency *dep, *tmp;
161
162 GEM_BUG_ON(!list_empty(&node->link));
163
164 /*
165 * Everyone we depended upon (the fences we wait to be signaled)
166 * should retire before us and remove themselves from our list.
167 * However, retirement is run independently on each timeline and
168 * so we may be called out-of-order.
169 */
170 list_for_each_entry_safe(dep, tmp, &node->signalers_list, signal_link) {
171 GEM_BUG_ON(!i915_sched_node_signaled(dep->signaler));
172 GEM_BUG_ON(!list_empty(&dep->dfs_link));
173
174 list_del(&dep->wait_link);
175 if (dep->flags & I915_DEPENDENCY_ALLOC)
176 i915_dependency_free(i915, dep);
177 }
178
179 /* Remove ourselves from everyone who depends upon us */
180 list_for_each_entry_safe(dep, tmp, &node->waiters_list, wait_link) {
181 GEM_BUG_ON(dep->signaler != node);
182 GEM_BUG_ON(!list_empty(&dep->dfs_link));
183
184 list_del(&dep->signal_link);
185 if (dep->flags & I915_DEPENDENCY_ALLOC)
186 i915_dependency_free(i915, dep);
187 }
188}
189
190static void
191i915_sched_node_init(struct i915_sched_node *node)
192{
193 INIT_LIST_HEAD(&node->signalers_list);
194 INIT_LIST_HEAD(&node->waiters_list);
195 INIT_LIST_HEAD(&node->link);
196 node->attr.priority = I915_PRIORITY_INVALID;
197}
198
199static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno) 114static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno)
200{ 115{
201 struct intel_engine_cs *engine; 116 struct intel_engine_cs *engine;
@@ -221,6 +136,8 @@ static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno)
221 intel_engine_get_seqno(engine), 136 intel_engine_get_seqno(engine),
222 seqno); 137 seqno);
223 138
139 kthread_park(engine->breadcrumbs.signaler);
140
224 if (!i915_seqno_passed(seqno, engine->timeline.seqno)) { 141 if (!i915_seqno_passed(seqno, engine->timeline.seqno)) {
225 /* Flush any waiters before we reuse the seqno */ 142 /* Flush any waiters before we reuse the seqno */
226 intel_engine_disarm_breadcrumbs(engine); 143 intel_engine_disarm_breadcrumbs(engine);
@@ -235,6 +152,8 @@ static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno)
235 /* Finally reset hw state */ 152 /* Finally reset hw state */
236 intel_engine_init_global_seqno(engine, seqno); 153 intel_engine_init_global_seqno(engine, seqno);
237 engine->timeline.seqno = seqno; 154 engine->timeline.seqno = seqno;
155
156 kthread_unpark(engine->breadcrumbs.signaler);
238 } 157 }
239 158
240 list_for_each_entry(timeline, &i915->gt.timelines, link) 159 list_for_each_entry(timeline, &i915->gt.timelines, link)
@@ -740,17 +659,6 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
740 if (rq) 659 if (rq)
741 cond_synchronize_rcu(rq->rcustate); 660 cond_synchronize_rcu(rq->rcustate);
742 661
743 /*
744 * We've forced the client to stall and catch up with whatever
745 * backlog there might have been. As we are assuming that we
746 * caused the mempressure, now is an opportune time to
747 * recover as much memory from the request pool as is possible.
748 * Having already penalized the client to stall, we spend
749 * a little extra time to re-optimise page allocation.
750 */
751 kmem_cache_shrink(i915->requests);
752 rcu_barrier(); /* Recover the TYPESAFE_BY_RCU pages */
753
754 rq = kmem_cache_alloc(i915->requests, GFP_KERNEL); 662 rq = kmem_cache_alloc(i915->requests, GFP_KERNEL);
755 if (!rq) { 663 if (!rq) {
756 ret = -ENOMEM; 664 ret = -ENOMEM;
@@ -1127,8 +1035,20 @@ void i915_request_add(struct i915_request *request)
1127 */ 1035 */
1128 local_bh_disable(); 1036 local_bh_disable();
1129 rcu_read_lock(); /* RCU serialisation for set-wedged protection */ 1037 rcu_read_lock(); /* RCU serialisation for set-wedged protection */
1130 if (engine->schedule) 1038 if (engine->schedule) {
1131 engine->schedule(request, &request->gem_context->sched); 1039 struct i915_sched_attr attr = request->gem_context->sched;
1040
1041 /*
1042 * Boost priorities to new clients (new request flows).
1043 *
1044 * Allow interactive/synchronous clients to jump ahead of
1045 * the bulk clients. (FQ_CODEL)
1046 */
1047 if (!prev || i915_request_completed(prev))
1048 attr.priority |= I915_PRIORITY_NEWCLIENT;
1049
1050 engine->schedule(request, &attr);
1051 }
1132 rcu_read_unlock(); 1052 rcu_read_unlock();
1133 i915_sw_fence_commit(&request->submit); 1053 i915_sw_fence_commit(&request->submit);
1134 local_bh_enable(); /* Kick the execlists tasklet if just scheduled */ 1054 local_bh_enable(); /* Kick the execlists tasklet if just scheduled */
@@ -1310,6 +1230,8 @@ long i915_request_wait(struct i915_request *rq,
1310 add_wait_queue(errq, &reset); 1230 add_wait_queue(errq, &reset);
1311 1231
1312 intel_wait_init(&wait); 1232 intel_wait_init(&wait);
1233 if (flags & I915_WAIT_PRIORITY)
1234 i915_schedule_bump_priority(rq, I915_PRIORITY_WAIT);
1313 1235
1314restart: 1236restart:
1315 do { 1237 do {
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
index 7fa94b024968..90e9d170a0cd 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -277,8 +277,9 @@ long i915_request_wait(struct i915_request *rq,
277 __attribute__((nonnull(1))); 277 __attribute__((nonnull(1)));
278#define I915_WAIT_INTERRUPTIBLE BIT(0) 278#define I915_WAIT_INTERRUPTIBLE BIT(0)
279#define I915_WAIT_LOCKED BIT(1) /* struct_mutex held, handle GPU reset */ 279#define I915_WAIT_LOCKED BIT(1) /* struct_mutex held, handle GPU reset */
280#define I915_WAIT_ALL BIT(2) /* used by i915_gem_object_wait() */ 280#define I915_WAIT_PRIORITY BIT(2) /* small priority bump for the request */
281#define I915_WAIT_FOR_IDLE_BOOST BIT(3) 281#define I915_WAIT_ALL BIT(3) /* used by i915_gem_object_wait() */
282#define I915_WAIT_FOR_IDLE_BOOST BIT(4)
282 283
283static inline bool intel_engine_has_started(struct intel_engine_cs *engine, 284static inline bool intel_engine_has_started(struct intel_engine_cs *engine,
284 u32 seqno); 285 u32 seqno);
@@ -332,14 +333,6 @@ static inline bool i915_request_completed(const struct i915_request *rq)
332 return __i915_request_completed(rq, seqno); 333 return __i915_request_completed(rq, seqno);
333} 334}
334 335
335static inline bool i915_sched_node_signaled(const struct i915_sched_node *node)
336{
337 const struct i915_request *rq =
338 container_of(node, const struct i915_request, sched);
339
340 return i915_request_completed(rq);
341}
342
343void i915_retire_requests(struct drm_i915_private *i915); 336void i915_retire_requests(struct drm_i915_private *i915);
344 337
345/* 338/*
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
new file mode 100644
index 000000000000..340faea6c08a
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -0,0 +1,399 @@
1/*
2 * SPDX-License-Identifier: MIT
3 *
4 * Copyright © 2018 Intel Corporation
5 */
6
7#include <linux/mutex.h>
8
9#include "i915_drv.h"
10#include "i915_request.h"
11#include "i915_scheduler.h"
12
13static DEFINE_SPINLOCK(schedule_lock);
14
15static const struct i915_request *
16node_to_request(const struct i915_sched_node *node)
17{
18 return container_of(node, const struct i915_request, sched);
19}
20
21static inline bool node_signaled(const struct i915_sched_node *node)
22{
23 return i915_request_completed(node_to_request(node));
24}
25
26void i915_sched_node_init(struct i915_sched_node *node)
27{
28 INIT_LIST_HEAD(&node->signalers_list);
29 INIT_LIST_HEAD(&node->waiters_list);
30 INIT_LIST_HEAD(&node->link);
31 node->attr.priority = I915_PRIORITY_INVALID;
32}
33
34static struct i915_dependency *
35i915_dependency_alloc(struct drm_i915_private *i915)
36{
37 return kmem_cache_alloc(i915->dependencies, GFP_KERNEL);
38}
39
40static void
41i915_dependency_free(struct drm_i915_private *i915,
42 struct i915_dependency *dep)
43{
44 kmem_cache_free(i915->dependencies, dep);
45}
46
47bool __i915_sched_node_add_dependency(struct i915_sched_node *node,
48 struct i915_sched_node *signal,
49 struct i915_dependency *dep,
50 unsigned long flags)
51{
52 bool ret = false;
53
54 spin_lock(&schedule_lock);
55
56 if (!node_signaled(signal)) {
57 INIT_LIST_HEAD(&dep->dfs_link);
58 list_add(&dep->wait_link, &signal->waiters_list);
59 list_add(&dep->signal_link, &node->signalers_list);
60 dep->signaler = signal;
61 dep->flags = flags;
62
63 ret = true;
64 }
65
66 spin_unlock(&schedule_lock);
67
68 return ret;
69}
70
71int i915_sched_node_add_dependency(struct drm_i915_private *i915,
72 struct i915_sched_node *node,
73 struct i915_sched_node *signal)
74{
75 struct i915_dependency *dep;
76
77 dep = i915_dependency_alloc(i915);
78 if (!dep)
79 return -ENOMEM;
80
81 if (!__i915_sched_node_add_dependency(node, signal, dep,
82 I915_DEPENDENCY_ALLOC))
83 i915_dependency_free(i915, dep);
84
85 return 0;
86}
87
88void i915_sched_node_fini(struct drm_i915_private *i915,
89 struct i915_sched_node *node)
90{
91 struct i915_dependency *dep, *tmp;
92
93 GEM_BUG_ON(!list_empty(&node->link));
94
95 spin_lock(&schedule_lock);
96
97 /*
98 * Everyone we depended upon (the fences we wait to be signaled)
99 * should retire before us and remove themselves from our list.
100 * However, retirement is run independently on each timeline and
101 * so we may be called out-of-order.
102 */
103 list_for_each_entry_safe(dep, tmp, &node->signalers_list, signal_link) {
104 GEM_BUG_ON(!node_signaled(dep->signaler));
105 GEM_BUG_ON(!list_empty(&dep->dfs_link));
106
107 list_del(&dep->wait_link);
108 if (dep->flags & I915_DEPENDENCY_ALLOC)
109 i915_dependency_free(i915, dep);
110 }
111
112 /* Remove ourselves from everyone who depends upon us */
113 list_for_each_entry_safe(dep, tmp, &node->waiters_list, wait_link) {
114 GEM_BUG_ON(dep->signaler != node);
115 GEM_BUG_ON(!list_empty(&dep->dfs_link));
116
117 list_del(&dep->signal_link);
118 if (dep->flags & I915_DEPENDENCY_ALLOC)
119 i915_dependency_free(i915, dep);
120 }
121
122 spin_unlock(&schedule_lock);
123}
124
125static inline struct i915_priolist *to_priolist(struct rb_node *rb)
126{
127 return rb_entry(rb, struct i915_priolist, node);
128}
129
130static void assert_priolists(struct intel_engine_execlists * const execlists,
131 long queue_priority)
132{
133 struct rb_node *rb;
134 long last_prio, i;
135
136 if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
137 return;
138
139 GEM_BUG_ON(rb_first_cached(&execlists->queue) !=
140 rb_first(&execlists->queue.rb_root));
141
142 last_prio = (queue_priority >> I915_USER_PRIORITY_SHIFT) + 1;
143 for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) {
144 const struct i915_priolist *p = to_priolist(rb);
145
146 GEM_BUG_ON(p->priority >= last_prio);
147 last_prio = p->priority;
148
149 GEM_BUG_ON(!p->used);
150 for (i = 0; i < ARRAY_SIZE(p->requests); i++) {
151 if (list_empty(&p->requests[i]))
152 continue;
153
154 GEM_BUG_ON(!(p->used & BIT(i)));
155 }
156 }
157}
158
159struct list_head *
160i915_sched_lookup_priolist(struct intel_engine_cs *engine, int prio)
161{
162 struct intel_engine_execlists * const execlists = &engine->execlists;
163 struct i915_priolist *p;
164 struct rb_node **parent, *rb;
165 bool first = true;
166 int idx, i;
167
168 lockdep_assert_held(&engine->timeline.lock);
169 assert_priolists(execlists, INT_MAX);
170
171 /* buckets sorted from highest [in slot 0] to lowest priority */
172 idx = I915_PRIORITY_COUNT - (prio & I915_PRIORITY_MASK) - 1;
173 prio >>= I915_USER_PRIORITY_SHIFT;
174 if (unlikely(execlists->no_priolist))
175 prio = I915_PRIORITY_NORMAL;
176
177find_priolist:
178 /* most positive priority is scheduled first, equal priorities fifo */
179 rb = NULL;
180 parent = &execlists->queue.rb_root.rb_node;
181 while (*parent) {
182 rb = *parent;
183 p = to_priolist(rb);
184 if (prio > p->priority) {
185 parent = &rb->rb_left;
186 } else if (prio < p->priority) {
187 parent = &rb->rb_right;
188 first = false;
189 } else {
190 goto out;
191 }
192 }
193
194 if (prio == I915_PRIORITY_NORMAL) {
195 p = &execlists->default_priolist;
196 } else {
197 p = kmem_cache_alloc(engine->i915->priorities, GFP_ATOMIC);
198 /* Convert an allocation failure to a priority bump */
199 if (unlikely(!p)) {
200 prio = I915_PRIORITY_NORMAL; /* recurses just once */
201
202 /* To maintain ordering with all rendering, after an
203 * allocation failure we have to disable all scheduling.
204 * Requests will then be executed in fifo, and schedule
205 * will ensure that dependencies are emitted in fifo.
206 * There will be still some reordering with existing
207 * requests, so if userspace lied about their
208 * dependencies that reordering may be visible.
209 */
210 execlists->no_priolist = true;
211 goto find_priolist;
212 }
213 }
214
215 p->priority = prio;
216 for (i = 0; i < ARRAY_SIZE(p->requests); i++)
217 INIT_LIST_HEAD(&p->requests[i]);
218 rb_link_node(&p->node, rb, parent);
219 rb_insert_color_cached(&p->node, &execlists->queue, first);
220 p->used = 0;
221
222out:
223 p->used |= BIT(idx);
224 return &p->requests[idx];
225}
226
227static struct intel_engine_cs *
228sched_lock_engine(struct i915_sched_node *node, struct intel_engine_cs *locked)
229{
230 struct intel_engine_cs *engine = node_to_request(node)->engine;
231
232 GEM_BUG_ON(!locked);
233
234 if (engine != locked) {
235 spin_unlock(&locked->timeline.lock);
236 spin_lock(&engine->timeline.lock);
237 }
238
239 return engine;
240}
241
242static void __i915_schedule(struct i915_request *rq,
243 const struct i915_sched_attr *attr)
244{
245 struct list_head *uninitialized_var(pl);
246 struct intel_engine_cs *engine, *last;
247 struct i915_dependency *dep, *p;
248 struct i915_dependency stack;
249 const int prio = attr->priority;
250 LIST_HEAD(dfs);
251
252 /* Needed in order to use the temporary link inside i915_dependency */
253 lockdep_assert_held(&schedule_lock);
254 GEM_BUG_ON(prio == I915_PRIORITY_INVALID);
255
256 if (i915_request_completed(rq))
257 return;
258
259 if (prio <= READ_ONCE(rq->sched.attr.priority))
260 return;
261
262 stack.signaler = &rq->sched;
263 list_add(&stack.dfs_link, &dfs);
264
265 /*
266 * Recursively bump all dependent priorities to match the new request.
267 *
268 * A naive approach would be to use recursion:
269 * static void update_priorities(struct i915_sched_node *node, prio) {
270 * list_for_each_entry(dep, &node->signalers_list, signal_link)
271 * update_priorities(dep->signal, prio)
272 * queue_request(node);
273 * }
274 * but that may have unlimited recursion depth and so runs a very
275 * real risk of overunning the kernel stack. Instead, we build
276 * a flat list of all dependencies starting with the current request.
277 * As we walk the list of dependencies, we add all of its dependencies
278 * to the end of the list (this may include an already visited
279 * request) and continue to walk onwards onto the new dependencies. The
280 * end result is a topological list of requests in reverse order, the
281 * last element in the list is the request we must execute first.
282 */
283 list_for_each_entry(dep, &dfs, dfs_link) {
284 struct i915_sched_node *node = dep->signaler;
285
286 /*
287 * Within an engine, there can be no cycle, but we may
288 * refer to the same dependency chain multiple times
289 * (redundant dependencies are not eliminated) and across
290 * engines.
291 */
292 list_for_each_entry(p, &node->signalers_list, signal_link) {
293 GEM_BUG_ON(p == dep); /* no cycles! */
294
295 if (node_signaled(p->signaler))
296 continue;
297
298 GEM_BUG_ON(p->signaler->attr.priority < node->attr.priority);
299 if (prio > READ_ONCE(p->signaler->attr.priority))
300 list_move_tail(&p->dfs_link, &dfs);
301 }
302 }
303
304 /*
305 * If we didn't need to bump any existing priorities, and we haven't
306 * yet submitted this request (i.e. there is no potential race with
307 * execlists_submit_request()), we can set our own priority and skip
308 * acquiring the engine locks.
309 */
310 if (rq->sched.attr.priority == I915_PRIORITY_INVALID) {
311 GEM_BUG_ON(!list_empty(&rq->sched.link));
312 rq->sched.attr = *attr;
313
314 if (stack.dfs_link.next == stack.dfs_link.prev)
315 return;
316
317 __list_del_entry(&stack.dfs_link);
318 }
319
320 last = NULL;
321 engine = rq->engine;
322 spin_lock_irq(&engine->timeline.lock);
323
324 /* Fifo and depth-first replacement ensure our deps execute before us */
325 list_for_each_entry_safe_reverse(dep, p, &dfs, dfs_link) {
326 struct i915_sched_node *node = dep->signaler;
327
328 INIT_LIST_HEAD(&dep->dfs_link);
329
330 engine = sched_lock_engine(node, engine);
331
332 /* Recheck after acquiring the engine->timeline.lock */
333 if (prio <= node->attr.priority || node_signaled(node))
334 continue;
335
336 node->attr.priority = prio;
337 if (!list_empty(&node->link)) {
338 if (last != engine) {
339 pl = i915_sched_lookup_priolist(engine, prio);
340 last = engine;
341 }
342 list_move_tail(&node->link, pl);
343 } else {
344 /*
345 * If the request is not in the priolist queue because
346 * it is not yet runnable, then it doesn't contribute
347 * to our preemption decisions. On the other hand,
348 * if the request is on the HW, it too is not in the
349 * queue; but in that case we may still need to reorder
350 * the inflight requests.
351 */
352 if (!i915_sw_fence_done(&node_to_request(node)->submit))
353 continue;
354 }
355
356 if (prio <= engine->execlists.queue_priority)
357 continue;
358
359 /*
360 * If we are already the currently executing context, don't
361 * bother evaluating if we should preempt ourselves.
362 */
363 if (node_to_request(node)->global_seqno &&
364 i915_seqno_passed(port_request(engine->execlists.port)->global_seqno,
365 node_to_request(node)->global_seqno))
366 continue;
367
368 /* Defer (tasklet) submission until after all of our updates. */
369 engine->execlists.queue_priority = prio;
370 tasklet_hi_schedule(&engine->execlists.tasklet);
371 }
372
373 spin_unlock_irq(&engine->timeline.lock);
374}
375
376void i915_schedule(struct i915_request *rq, const struct i915_sched_attr *attr)
377{
378 spin_lock(&schedule_lock);
379 __i915_schedule(rq, attr);
380 spin_unlock(&schedule_lock);
381}
382
383void i915_schedule_bump_priority(struct i915_request *rq, unsigned int bump)
384{
385 struct i915_sched_attr attr;
386
387 GEM_BUG_ON(bump & ~I915_PRIORITY_MASK);
388
389 if (READ_ONCE(rq->sched.attr.priority) == I915_PRIORITY_INVALID)
390 return;
391
392 spin_lock_bh(&schedule_lock);
393
394 attr = rq->sched.attr;
395 attr.priority |= bump;
396 __i915_schedule(rq, &attr);
397
398 spin_unlock_bh(&schedule_lock);
399}
diff --git a/drivers/gpu/drm/i915/i915_scheduler.h b/drivers/gpu/drm/i915/i915_scheduler.h
index 70a42220358d..dbe9cb7ecd82 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.h
+++ b/drivers/gpu/drm/i915/i915_scheduler.h
@@ -8,9 +8,14 @@
8#define _I915_SCHEDULER_H_ 8#define _I915_SCHEDULER_H_
9 9
10#include <linux/bitops.h> 10#include <linux/bitops.h>
11#include <linux/kernel.h>
11 12
12#include <uapi/drm/i915_drm.h> 13#include <uapi/drm/i915_drm.h>
13 14
15struct drm_i915_private;
16struct i915_request;
17struct intel_engine_cs;
18
14enum { 19enum {
15 I915_PRIORITY_MIN = I915_CONTEXT_MIN_USER_PRIORITY - 1, 20 I915_PRIORITY_MIN = I915_CONTEXT_MIN_USER_PRIORITY - 1,
16 I915_PRIORITY_NORMAL = I915_CONTEXT_DEFAULT_PRIORITY, 21 I915_PRIORITY_NORMAL = I915_CONTEXT_DEFAULT_PRIORITY,
@@ -19,6 +24,15 @@ enum {
19 I915_PRIORITY_INVALID = INT_MIN 24 I915_PRIORITY_INVALID = INT_MIN
20}; 25};
21 26
27#define I915_USER_PRIORITY_SHIFT 2
28#define I915_USER_PRIORITY(x) ((x) << I915_USER_PRIORITY_SHIFT)
29
30#define I915_PRIORITY_COUNT BIT(I915_USER_PRIORITY_SHIFT)
31#define I915_PRIORITY_MASK (I915_PRIORITY_COUNT - 1)
32
33#define I915_PRIORITY_WAIT ((u8)BIT(0))
34#define I915_PRIORITY_NEWCLIENT ((u8)BIT(1))
35
22struct i915_sched_attr { 36struct i915_sched_attr {
23 /** 37 /**
24 * @priority: execution and service priority 38 * @priority: execution and service priority
@@ -69,4 +83,26 @@ struct i915_dependency {
69#define I915_DEPENDENCY_ALLOC BIT(0) 83#define I915_DEPENDENCY_ALLOC BIT(0)
70}; 84};
71 85
86void i915_sched_node_init(struct i915_sched_node *node);
87
88bool __i915_sched_node_add_dependency(struct i915_sched_node *node,
89 struct i915_sched_node *signal,
90 struct i915_dependency *dep,
91 unsigned long flags);
92
93int i915_sched_node_add_dependency(struct drm_i915_private *i915,
94 struct i915_sched_node *node,
95 struct i915_sched_node *signal);
96
97void i915_sched_node_fini(struct drm_i915_private *i915,
98 struct i915_sched_node *node);
99
100void i915_schedule(struct i915_request *request,
101 const struct i915_sched_attr *attr);
102
103void i915_schedule_bump_priority(struct i915_request *rq, unsigned int bump);
104
105struct list_head *
106i915_sched_lookup_priolist(struct intel_engine_cs *engine, int prio);
107
72#endif /* _I915_SCHEDULER_H_ */ 108#endif /* _I915_SCHEDULER_H_ */
diff --git a/drivers/gpu/drm/i915/i915_syncmap.c b/drivers/gpu/drm/i915/i915_syncmap.c
index 58f8d0cc125c..60404dbb2e9f 100644
--- a/drivers/gpu/drm/i915/i915_syncmap.c
+++ b/drivers/gpu/drm/i915/i915_syncmap.c
@@ -92,7 +92,7 @@ void i915_syncmap_init(struct i915_syncmap **root)
92{ 92{
93 BUILD_BUG_ON_NOT_POWER_OF_2(KSYNCMAP); 93 BUILD_BUG_ON_NOT_POWER_OF_2(KSYNCMAP);
94 BUILD_BUG_ON_NOT_POWER_OF_2(SHIFT); 94 BUILD_BUG_ON_NOT_POWER_OF_2(SHIFT);
95 BUILD_BUG_ON(KSYNCMAP > BITS_PER_BYTE * sizeof((*root)->bitmap)); 95 BUILD_BUG_ON(KSYNCMAP > BITS_PER_TYPE((*root)->bitmap));
96 *root = NULL; 96 *root = NULL;
97} 97}
98 98
diff --git a/drivers/gpu/drm/i915/i915_timeline.h b/drivers/gpu/drm/i915/i915_timeline.h
index a2c2c3ab5fb0..ebd71b487220 100644
--- a/drivers/gpu/drm/i915/i915_timeline.h
+++ b/drivers/gpu/drm/i915/i915_timeline.h
@@ -83,6 +83,25 @@ void i915_timeline_init(struct drm_i915_private *i915,
83 const char *name); 83 const char *name);
84void i915_timeline_fini(struct i915_timeline *tl); 84void i915_timeline_fini(struct i915_timeline *tl);
85 85
86static inline void
87i915_timeline_set_subclass(struct i915_timeline *timeline,
88 unsigned int subclass)
89{
90 lockdep_set_subclass(&timeline->lock, subclass);
91
92 /*
93 * Due to an interesting quirk in lockdep's internal debug tracking,
94 * after setting a subclass we must ensure the lock is used. Otherwise,
95 * nr_unused_locks is incremented once too often.
96 */
97#ifdef CONFIG_DEBUG_LOCK_ALLOC
98 local_irq_disable();
99 lock_map_acquire(&timeline->lock.dep_map);
100 lock_map_release(&timeline->lock.dep_map);
101 local_irq_enable();
102#endif
103}
104
86struct i915_timeline * 105struct i915_timeline *
87i915_timeline_create(struct drm_i915_private *i915, const char *name); 106i915_timeline_create(struct drm_i915_private *i915, const char *name);
88 107
diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h
index 395dd2511568..5858a43e19da 100644
--- a/drivers/gpu/drm/i915/i915_utils.h
+++ b/drivers/gpu/drm/i915/i915_utils.h
@@ -68,7 +68,7 @@
68 68
69/* Note we don't consider signbits :| */ 69/* Note we don't consider signbits :| */
70#define overflows_type(x, T) \ 70#define overflows_type(x, T) \
71 (sizeof(x) > sizeof(T) && (x) >> (sizeof(T) * BITS_PER_BYTE)) 71 (sizeof(x) > sizeof(T) && (x) >> BITS_PER_TYPE(T))
72 72
73#define ptr_mask_bits(ptr, n) ({ \ 73#define ptr_mask_bits(ptr, n) ({ \
74 unsigned long __v = (unsigned long)(ptr); \ 74 unsigned long __v = (unsigned long)(ptr); \
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index 35fce4c88629..5b4d78cdb4ca 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -305,12 +305,12 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
305 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); 305 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
306 GEM_BUG_ON(vma->size > vma->node.size); 306 GEM_BUG_ON(vma->size > vma->node.size);
307 307
308 if (GEM_WARN_ON(range_overflows(vma->node.start, 308 if (GEM_DEBUG_WARN_ON(range_overflows(vma->node.start,
309 vma->node.size, 309 vma->node.size,
310 vma->vm->total))) 310 vma->vm->total)))
311 return -ENODEV; 311 return -ENODEV;
312 312
313 if (GEM_WARN_ON(!flags)) 313 if (GEM_DEBUG_WARN_ON(!flags))
314 return -EINVAL; 314 return -EINVAL;
315 315
316 bind_flags = 0; 316 bind_flags = 0;
diff --git a/drivers/gpu/drm/i915/icl_dsi.c b/drivers/gpu/drm/i915/icl_dsi.c
index 13830e43a4d1..01f422df8c23 100644
--- a/drivers/gpu/drm/i915/icl_dsi.c
+++ b/drivers/gpu/drm/i915/icl_dsi.c
@@ -25,8 +25,153 @@
25 * Jani Nikula <jani.nikula@intel.com> 25 * Jani Nikula <jani.nikula@intel.com>
26 */ 26 */
27 27
28#include <drm/drm_mipi_dsi.h>
28#include "intel_dsi.h" 29#include "intel_dsi.h"
29 30
31static inline int header_credits_available(struct drm_i915_private *dev_priv,
32 enum transcoder dsi_trans)
33{
34 return (I915_READ(DSI_CMD_TXCTL(dsi_trans)) & FREE_HEADER_CREDIT_MASK)
35 >> FREE_HEADER_CREDIT_SHIFT;
36}
37
38static inline int payload_credits_available(struct drm_i915_private *dev_priv,
39 enum transcoder dsi_trans)
40{
41 return (I915_READ(DSI_CMD_TXCTL(dsi_trans)) & FREE_PLOAD_CREDIT_MASK)
42 >> FREE_PLOAD_CREDIT_SHIFT;
43}
44
45static void wait_for_header_credits(struct drm_i915_private *dev_priv,
46 enum transcoder dsi_trans)
47{
48 if (wait_for_us(header_credits_available(dev_priv, dsi_trans) >=
49 MAX_HEADER_CREDIT, 100))
50 DRM_ERROR("DSI header credits not released\n");
51}
52
53static void wait_for_payload_credits(struct drm_i915_private *dev_priv,
54 enum transcoder dsi_trans)
55{
56 if (wait_for_us(payload_credits_available(dev_priv, dsi_trans) >=
57 MAX_PLOAD_CREDIT, 100))
58 DRM_ERROR("DSI payload credits not released\n");
59}
60
61static enum transcoder dsi_port_to_transcoder(enum port port)
62{
63 if (port == PORT_A)
64 return TRANSCODER_DSI_0;
65 else
66 return TRANSCODER_DSI_1;
67}
68
69static void wait_for_cmds_dispatched_to_panel(struct intel_encoder *encoder)
70{
71 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
72 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
73 struct mipi_dsi_device *dsi;
74 enum port port;
75 enum transcoder dsi_trans;
76 int ret;
77
78 /* wait for header/payload credits to be released */
79 for_each_dsi_port(port, intel_dsi->ports) {
80 dsi_trans = dsi_port_to_transcoder(port);
81 wait_for_header_credits(dev_priv, dsi_trans);
82 wait_for_payload_credits(dev_priv, dsi_trans);
83 }
84
85 /* send nop DCS command */
86 for_each_dsi_port(port, intel_dsi->ports) {
87 dsi = intel_dsi->dsi_hosts[port]->device;
88 dsi->mode_flags |= MIPI_DSI_MODE_LPM;
89 dsi->channel = 0;
90 ret = mipi_dsi_dcs_nop(dsi);
91 if (ret < 0)
92 DRM_ERROR("error sending DCS NOP command\n");
93 }
94
95 /* wait for header credits to be released */
96 for_each_dsi_port(port, intel_dsi->ports) {
97 dsi_trans = dsi_port_to_transcoder(port);
98 wait_for_header_credits(dev_priv, dsi_trans);
99 }
100
101 /* wait for LP TX in progress bit to be cleared */
102 for_each_dsi_port(port, intel_dsi->ports) {
103 dsi_trans = dsi_port_to_transcoder(port);
104 if (wait_for_us(!(I915_READ(DSI_LP_MSG(dsi_trans)) &
105 LPTX_IN_PROGRESS), 20))
106 DRM_ERROR("LPTX bit not cleared\n");
107 }
108}
109
110static void dsi_program_swing_and_deemphasis(struct intel_encoder *encoder)
111{
112 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
113 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
114 enum port port;
115 u32 tmp;
116 int lane;
117
118 for_each_dsi_port(port, intel_dsi->ports) {
119
120 /*
121 * Program voltage swing and pre-emphasis level values as per
122 * table in BSPEC under DDI buffer programing
123 */
124 tmp = I915_READ(ICL_PORT_TX_DW5_LN0(port));
125 tmp &= ~(SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK);
126 tmp |= SCALING_MODE_SEL(0x2);
127 tmp |= TAP2_DISABLE | TAP3_DISABLE;
128 tmp |= RTERM_SELECT(0x6);
129 I915_WRITE(ICL_PORT_TX_DW5_GRP(port), tmp);
130
131 tmp = I915_READ(ICL_PORT_TX_DW5_AUX(port));
132 tmp &= ~(SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK);
133 tmp |= SCALING_MODE_SEL(0x2);
134 tmp |= TAP2_DISABLE | TAP3_DISABLE;
135 tmp |= RTERM_SELECT(0x6);
136 I915_WRITE(ICL_PORT_TX_DW5_AUX(port), tmp);
137
138 tmp = I915_READ(ICL_PORT_TX_DW2_LN0(port));
139 tmp &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK |
140 RCOMP_SCALAR_MASK);
141 tmp |= SWING_SEL_UPPER(0x2);
142 tmp |= SWING_SEL_LOWER(0x2);
143 tmp |= RCOMP_SCALAR(0x98);
144 I915_WRITE(ICL_PORT_TX_DW2_GRP(port), tmp);
145
146 tmp = I915_READ(ICL_PORT_TX_DW2_AUX(port));
147 tmp &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK |
148 RCOMP_SCALAR_MASK);
149 tmp |= SWING_SEL_UPPER(0x2);
150 tmp |= SWING_SEL_LOWER(0x2);
151 tmp |= RCOMP_SCALAR(0x98);
152 I915_WRITE(ICL_PORT_TX_DW2_AUX(port), tmp);
153
154 tmp = I915_READ(ICL_PORT_TX_DW4_AUX(port));
155 tmp &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK |
156 CURSOR_COEFF_MASK);
157 tmp |= POST_CURSOR_1(0x0);
158 tmp |= POST_CURSOR_2(0x0);
159 tmp |= CURSOR_COEFF(0x3f);
160 I915_WRITE(ICL_PORT_TX_DW4_AUX(port), tmp);
161
162 for (lane = 0; lane <= 3; lane++) {
163 /* Bspec: must not use GRP register for write */
164 tmp = I915_READ(ICL_PORT_TX_DW4_LN(port, lane));
165 tmp &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK |
166 CURSOR_COEFF_MASK);
167 tmp |= POST_CURSOR_1(0x0);
168 tmp |= POST_CURSOR_2(0x0);
169 tmp |= CURSOR_COEFF(0x3f);
170 I915_WRITE(ICL_PORT_TX_DW4_LN(port, lane), tmp);
171 }
172 }
173}
174
30static void gen11_dsi_program_esc_clk_div(struct intel_encoder *encoder) 175static void gen11_dsi_program_esc_clk_div(struct intel_encoder *encoder)
31{ 176{
32 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 177 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
@@ -105,10 +250,553 @@ static void gen11_dsi_power_up_lanes(struct intel_encoder *encoder)
105 } 250 }
106} 251}
107 252
108static void gen11_dsi_enable_port_and_phy(struct intel_encoder *encoder) 253static void gen11_dsi_config_phy_lanes_sequence(struct intel_encoder *encoder)
254{
255 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
256 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
257 enum port port;
258 u32 tmp;
259 int lane;
260
261 /* Step 4b(i) set loadgen select for transmit and aux lanes */
262 for_each_dsi_port(port, intel_dsi->ports) {
263 tmp = I915_READ(ICL_PORT_TX_DW4_AUX(port));
264 tmp &= ~LOADGEN_SELECT;
265 I915_WRITE(ICL_PORT_TX_DW4_AUX(port), tmp);
266 for (lane = 0; lane <= 3; lane++) {
267 tmp = I915_READ(ICL_PORT_TX_DW4_LN(port, lane));
268 tmp &= ~LOADGEN_SELECT;
269 if (lane != 2)
270 tmp |= LOADGEN_SELECT;
271 I915_WRITE(ICL_PORT_TX_DW4_LN(port, lane), tmp);
272 }
273 }
274
275 /* Step 4b(ii) set latency optimization for transmit and aux lanes */
276 for_each_dsi_port(port, intel_dsi->ports) {
277 tmp = I915_READ(ICL_PORT_TX_DW2_AUX(port));
278 tmp &= ~FRC_LATENCY_OPTIM_MASK;
279 tmp |= FRC_LATENCY_OPTIM_VAL(0x5);
280 I915_WRITE(ICL_PORT_TX_DW2_AUX(port), tmp);
281 tmp = I915_READ(ICL_PORT_TX_DW2_LN0(port));
282 tmp &= ~FRC_LATENCY_OPTIM_MASK;
283 tmp |= FRC_LATENCY_OPTIM_VAL(0x5);
284 I915_WRITE(ICL_PORT_TX_DW2_GRP(port), tmp);
285 }
286
287}
288
289static void gen11_dsi_voltage_swing_program_seq(struct intel_encoder *encoder)
290{
291 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
292 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
293 u32 tmp;
294 enum port port;
295
296 /* clear common keeper enable bit */
297 for_each_dsi_port(port, intel_dsi->ports) {
298 tmp = I915_READ(ICL_PORT_PCS_DW1_LN0(port));
299 tmp &= ~COMMON_KEEPER_EN;
300 I915_WRITE(ICL_PORT_PCS_DW1_GRP(port), tmp);
301 tmp = I915_READ(ICL_PORT_PCS_DW1_AUX(port));
302 tmp &= ~COMMON_KEEPER_EN;
303 I915_WRITE(ICL_PORT_PCS_DW1_AUX(port), tmp);
304 }
305
306 /*
307 * Set SUS Clock Config bitfield to 11b
308 * Note: loadgen select program is done
309 * as part of lane phy sequence configuration
310 */
311 for_each_dsi_port(port, intel_dsi->ports) {
312 tmp = I915_READ(ICL_PORT_CL_DW5(port));
313 tmp |= SUS_CLOCK_CONFIG;
314 I915_WRITE(ICL_PORT_CL_DW5(port), tmp);
315 }
316
317 /* Clear training enable to change swing values */
318 for_each_dsi_port(port, intel_dsi->ports) {
319 tmp = I915_READ(ICL_PORT_TX_DW5_LN0(port));
320 tmp &= ~TX_TRAINING_EN;
321 I915_WRITE(ICL_PORT_TX_DW5_GRP(port), tmp);
322 tmp = I915_READ(ICL_PORT_TX_DW5_AUX(port));
323 tmp &= ~TX_TRAINING_EN;
324 I915_WRITE(ICL_PORT_TX_DW5_AUX(port), tmp);
325 }
326
327 /* Program swing and de-emphasis */
328 dsi_program_swing_and_deemphasis(encoder);
329
330 /* Set training enable to trigger update */
331 for_each_dsi_port(port, intel_dsi->ports) {
332 tmp = I915_READ(ICL_PORT_TX_DW5_LN0(port));
333 tmp |= TX_TRAINING_EN;
334 I915_WRITE(ICL_PORT_TX_DW5_GRP(port), tmp);
335 tmp = I915_READ(ICL_PORT_TX_DW5_AUX(port));
336 tmp |= TX_TRAINING_EN;
337 I915_WRITE(ICL_PORT_TX_DW5_AUX(port), tmp);
338 }
339}
340
341static void gen11_dsi_enable_ddi_buffer(struct intel_encoder *encoder)
342{
343 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
344 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
345 u32 tmp;
346 enum port port;
347
348 for_each_dsi_port(port, intel_dsi->ports) {
349 tmp = I915_READ(DDI_BUF_CTL(port));
350 tmp |= DDI_BUF_CTL_ENABLE;
351 I915_WRITE(DDI_BUF_CTL(port), tmp);
352
353 if (wait_for_us(!(I915_READ(DDI_BUF_CTL(port)) &
354 DDI_BUF_IS_IDLE),
355 500))
356 DRM_ERROR("DDI port:%c buffer idle\n", port_name(port));
357 }
358}
359
360static void gen11_dsi_setup_dphy_timings(struct intel_encoder *encoder)
361{
362 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
363 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
364 u32 tmp;
365 enum port port;
366
367 /* Program T-INIT master registers */
368 for_each_dsi_port(port, intel_dsi->ports) {
369 tmp = I915_READ(ICL_DSI_T_INIT_MASTER(port));
370 tmp &= ~MASTER_INIT_TIMER_MASK;
371 tmp |= intel_dsi->init_count;
372 I915_WRITE(ICL_DSI_T_INIT_MASTER(port), tmp);
373 }
374
375 /* Program DPHY clock lanes timings */
376 for_each_dsi_port(port, intel_dsi->ports) {
377 I915_WRITE(DPHY_CLK_TIMING_PARAM(port), intel_dsi->dphy_reg);
378
379 /* shadow register inside display core */
380 I915_WRITE(DSI_CLK_TIMING_PARAM(port), intel_dsi->dphy_reg);
381 }
382
383 /* Program DPHY data lanes timings */
384 for_each_dsi_port(port, intel_dsi->ports) {
385 I915_WRITE(DPHY_DATA_TIMING_PARAM(port),
386 intel_dsi->dphy_data_lane_reg);
387
388 /* shadow register inside display core */
389 I915_WRITE(DSI_DATA_TIMING_PARAM(port),
390 intel_dsi->dphy_data_lane_reg);
391 }
392
393 /*
394 * If DSI link operating at or below an 800 MHz,
395 * TA_SURE should be override and programmed to
396 * a value '0' inside TA_PARAM_REGISTERS otherwise
397 * leave all fields at HW default values.
398 */
399 if (intel_dsi_bitrate(intel_dsi) <= 800000) {
400 for_each_dsi_port(port, intel_dsi->ports) {
401 tmp = I915_READ(DPHY_TA_TIMING_PARAM(port));
402 tmp &= ~TA_SURE_MASK;
403 tmp |= TA_SURE_OVERRIDE | TA_SURE(0);
404 I915_WRITE(DPHY_TA_TIMING_PARAM(port), tmp);
405
406 /* shadow register inside display core */
407 tmp = I915_READ(DSI_TA_TIMING_PARAM(port));
408 tmp &= ~TA_SURE_MASK;
409 tmp |= TA_SURE_OVERRIDE | TA_SURE(0);
410 I915_WRITE(DSI_TA_TIMING_PARAM(port), tmp);
411 }
412 }
413}
414
415static void
416gen11_dsi_configure_transcoder(struct intel_encoder *encoder,
417 const struct intel_crtc_state *pipe_config)
418{
419 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
420 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
421 struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
422 enum pipe pipe = intel_crtc->pipe;
423 u32 tmp;
424 enum port port;
425 enum transcoder dsi_trans;
426
427 for_each_dsi_port(port, intel_dsi->ports) {
428 dsi_trans = dsi_port_to_transcoder(port);
429 tmp = I915_READ(DSI_TRANS_FUNC_CONF(dsi_trans));
430
431 if (intel_dsi->eotp_pkt)
432 tmp &= ~EOTP_DISABLED;
433 else
434 tmp |= EOTP_DISABLED;
435
436 /* enable link calibration if freq > 1.5Gbps */
437 if (intel_dsi_bitrate(intel_dsi) >= 1500 * 1000) {
438 tmp &= ~LINK_CALIBRATION_MASK;
439 tmp |= CALIBRATION_ENABLED_INITIAL_ONLY;
440 }
441
442 /* configure continuous clock */
443 tmp &= ~CONTINUOUS_CLK_MASK;
444 if (intel_dsi->clock_stop)
445 tmp |= CLK_ENTER_LP_AFTER_DATA;
446 else
447 tmp |= CLK_HS_CONTINUOUS;
448
449 /* configure buffer threshold limit to minimum */
450 tmp &= ~PIX_BUF_THRESHOLD_MASK;
451 tmp |= PIX_BUF_THRESHOLD_1_4;
452
453 /* set virtual channel to '0' */
454 tmp &= ~PIX_VIRT_CHAN_MASK;
455 tmp |= PIX_VIRT_CHAN(0);
456
457 /* program BGR transmission */
458 if (intel_dsi->bgr_enabled)
459 tmp |= BGR_TRANSMISSION;
460
461 /* select pixel format */
462 tmp &= ~PIX_FMT_MASK;
463 switch (intel_dsi->pixel_format) {
464 default:
465 MISSING_CASE(intel_dsi->pixel_format);
466 /* fallthrough */
467 case MIPI_DSI_FMT_RGB565:
468 tmp |= PIX_FMT_RGB565;
469 break;
470 case MIPI_DSI_FMT_RGB666_PACKED:
471 tmp |= PIX_FMT_RGB666_PACKED;
472 break;
473 case MIPI_DSI_FMT_RGB666:
474 tmp |= PIX_FMT_RGB666_LOOSE;
475 break;
476 case MIPI_DSI_FMT_RGB888:
477 tmp |= PIX_FMT_RGB888;
478 break;
479 }
480
481 /* program DSI operation mode */
482 if (is_vid_mode(intel_dsi)) {
483 tmp &= ~OP_MODE_MASK;
484 switch (intel_dsi->video_mode_format) {
485 default:
486 MISSING_CASE(intel_dsi->video_mode_format);
487 /* fallthrough */
488 case VIDEO_MODE_NON_BURST_WITH_SYNC_EVENTS:
489 tmp |= VIDEO_MODE_SYNC_EVENT;
490 break;
491 case VIDEO_MODE_NON_BURST_WITH_SYNC_PULSE:
492 tmp |= VIDEO_MODE_SYNC_PULSE;
493 break;
494 }
495 }
496
497 I915_WRITE(DSI_TRANS_FUNC_CONF(dsi_trans), tmp);
498 }
499
500 /* enable port sync mode if dual link */
501 if (intel_dsi->dual_link) {
502 for_each_dsi_port(port, intel_dsi->ports) {
503 dsi_trans = dsi_port_to_transcoder(port);
504 tmp = I915_READ(TRANS_DDI_FUNC_CTL2(dsi_trans));
505 tmp |= PORT_SYNC_MODE_ENABLE;
506 I915_WRITE(TRANS_DDI_FUNC_CTL2(dsi_trans), tmp);
507 }
508
509 //TODO: configure DSS_CTL1
510 }
511
512 for_each_dsi_port(port, intel_dsi->ports) {
513 dsi_trans = dsi_port_to_transcoder(port);
514
515 /* select data lane width */
516 tmp = I915_READ(TRANS_DDI_FUNC_CTL(dsi_trans));
517 tmp &= ~DDI_PORT_WIDTH_MASK;
518 tmp |= DDI_PORT_WIDTH(intel_dsi->lane_count);
519
520 /* select input pipe */
521 tmp &= ~TRANS_DDI_EDP_INPUT_MASK;
522 switch (pipe) {
523 default:
524 MISSING_CASE(pipe);
525 /* fallthrough */
526 case PIPE_A:
527 tmp |= TRANS_DDI_EDP_INPUT_A_ON;
528 break;
529 case PIPE_B:
530 tmp |= TRANS_DDI_EDP_INPUT_B_ONOFF;
531 break;
532 case PIPE_C:
533 tmp |= TRANS_DDI_EDP_INPUT_C_ONOFF;
534 break;
535 }
536
537 /* enable DDI buffer */
538 tmp |= TRANS_DDI_FUNC_ENABLE;
539 I915_WRITE(TRANS_DDI_FUNC_CTL(dsi_trans), tmp);
540 }
541
542 /* wait for link ready */
543 for_each_dsi_port(port, intel_dsi->ports) {
544 dsi_trans = dsi_port_to_transcoder(port);
545 if (wait_for_us((I915_READ(DSI_TRANS_FUNC_CONF(dsi_trans)) &
546 LINK_READY), 2500))
547 DRM_ERROR("DSI link not ready\n");
548 }
549}
550
551static void
552gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder,
553 const struct intel_crtc_state *pipe_config)
554{
555 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
556 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
557 const struct drm_display_mode *adjusted_mode =
558 &pipe_config->base.adjusted_mode;
559 enum port port;
560 enum transcoder dsi_trans;
561 /* horizontal timings */
562 u16 htotal, hactive, hsync_start, hsync_end, hsync_size;
563 u16 hfront_porch, hback_porch;
564 /* vertical timings */
565 u16 vtotal, vactive, vsync_start, vsync_end, vsync_shift;
566
567 hactive = adjusted_mode->crtc_hdisplay;
568 htotal = adjusted_mode->crtc_htotal;
569 hsync_start = adjusted_mode->crtc_hsync_start;
570 hsync_end = adjusted_mode->crtc_hsync_end;
571 hsync_size = hsync_end - hsync_start;
572 hfront_porch = (adjusted_mode->crtc_hsync_start -
573 adjusted_mode->crtc_hdisplay);
574 hback_porch = (adjusted_mode->crtc_htotal -
575 adjusted_mode->crtc_hsync_end);
576 vactive = adjusted_mode->crtc_vdisplay;
577 vtotal = adjusted_mode->crtc_vtotal;
578 vsync_start = adjusted_mode->crtc_vsync_start;
579 vsync_end = adjusted_mode->crtc_vsync_end;
580 vsync_shift = hsync_start - htotal / 2;
581
582 if (intel_dsi->dual_link) {
583 hactive /= 2;
584 if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK)
585 hactive += intel_dsi->pixel_overlap;
586 htotal /= 2;
587 }
588
589 /* minimum hactive as per bspec: 256 pixels */
590 if (adjusted_mode->crtc_hdisplay < 256)
591 DRM_ERROR("hactive is less then 256 pixels\n");
592
593 /* if RGB666 format, then hactive must be multiple of 4 pixels */
594 if (intel_dsi->pixel_format == MIPI_DSI_FMT_RGB666 && hactive % 4 != 0)
595 DRM_ERROR("hactive pixels are not multiple of 4\n");
596
597 /* program TRANS_HTOTAL register */
598 for_each_dsi_port(port, intel_dsi->ports) {
599 dsi_trans = dsi_port_to_transcoder(port);
600 I915_WRITE(HTOTAL(dsi_trans),
601 (hactive - 1) | ((htotal - 1) << 16));
602 }
603
604 /* TRANS_HSYNC register to be programmed only for video mode */
605 if (intel_dsi->operation_mode == INTEL_DSI_VIDEO_MODE) {
606 if (intel_dsi->video_mode_format ==
607 VIDEO_MODE_NON_BURST_WITH_SYNC_PULSE) {
608 /* BSPEC: hsync size should be atleast 16 pixels */
609 if (hsync_size < 16)
610 DRM_ERROR("hsync size < 16 pixels\n");
611 }
612
613 if (hback_porch < 16)
614 DRM_ERROR("hback porch < 16 pixels\n");
615
616 if (intel_dsi->dual_link) {
617 hsync_start /= 2;
618 hsync_end /= 2;
619 }
620
621 for_each_dsi_port(port, intel_dsi->ports) {
622 dsi_trans = dsi_port_to_transcoder(port);
623 I915_WRITE(HSYNC(dsi_trans),
624 (hsync_start - 1) | ((hsync_end - 1) << 16));
625 }
626 }
627
628 /* program TRANS_VTOTAL register */
629 for_each_dsi_port(port, intel_dsi->ports) {
630 dsi_trans = dsi_port_to_transcoder(port);
631 /*
632 * FIXME: Programing this by assuming progressive mode, since
633 * non-interlaced info from VBT is not saved inside
634 * struct drm_display_mode.
635 * For interlace mode: program required pixel minus 2
636 */
637 I915_WRITE(VTOTAL(dsi_trans),
638 (vactive - 1) | ((vtotal - 1) << 16));
639 }
640
641 if (vsync_end < vsync_start || vsync_end > vtotal)
642 DRM_ERROR("Invalid vsync_end value\n");
643
644 if (vsync_start < vactive)
645 DRM_ERROR("vsync_start less than vactive\n");
646
647 /* program TRANS_VSYNC register */
648 for_each_dsi_port(port, intel_dsi->ports) {
649 dsi_trans = dsi_port_to_transcoder(port);
650 I915_WRITE(VSYNC(dsi_trans),
651 (vsync_start - 1) | ((vsync_end - 1) << 16));
652 }
653
654 /*
655 * FIXME: It has to be programmed only for interlaced
656 * modes. Put the check condition here once interlaced
657 * info available as described above.
658 * program TRANS_VSYNCSHIFT register
659 */
660 for_each_dsi_port(port, intel_dsi->ports) {
661 dsi_trans = dsi_port_to_transcoder(port);
662 I915_WRITE(VSYNCSHIFT(dsi_trans), vsync_shift);
663 }
664}
665
666static void gen11_dsi_enable_transcoder(struct intel_encoder *encoder)
667{
668 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
669 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
670 enum port port;
671 enum transcoder dsi_trans;
672 u32 tmp;
673
674 for_each_dsi_port(port, intel_dsi->ports) {
675 dsi_trans = dsi_port_to_transcoder(port);
676 tmp = I915_READ(PIPECONF(dsi_trans));
677 tmp |= PIPECONF_ENABLE;
678 I915_WRITE(PIPECONF(dsi_trans), tmp);
679
680 /* wait for transcoder to be enabled */
681 if (intel_wait_for_register(dev_priv, PIPECONF(dsi_trans),
682 I965_PIPECONF_ACTIVE,
683 I965_PIPECONF_ACTIVE, 10))
684 DRM_ERROR("DSI transcoder not enabled\n");
685 }
686}
687
688static void gen11_dsi_setup_timeouts(struct intel_encoder *encoder)
689{
690 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
691 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
692 enum port port;
693 enum transcoder dsi_trans;
694 u32 tmp, hs_tx_timeout, lp_rx_timeout, ta_timeout, divisor, mul;
695
696 /*
697 * escape clock count calculation:
698 * BYTE_CLK_COUNT = TIME_NS/(8 * UI)
699 * UI (nsec) = (10^6)/Bitrate
700 * TIME_NS = (BYTE_CLK_COUNT * 8 * 10^6)/ Bitrate
701 * ESCAPE_CLK_COUNT = TIME_NS/ESC_CLK_NS
702 */
703 divisor = intel_dsi_tlpx_ns(intel_dsi) * intel_dsi_bitrate(intel_dsi) * 1000;
704 mul = 8 * 1000000;
705 hs_tx_timeout = DIV_ROUND_UP(intel_dsi->hs_tx_timeout * mul,
706 divisor);
707 lp_rx_timeout = DIV_ROUND_UP(intel_dsi->lp_rx_timeout * mul, divisor);
708 ta_timeout = DIV_ROUND_UP(intel_dsi->turn_arnd_val * mul, divisor);
709
710 for_each_dsi_port(port, intel_dsi->ports) {
711 dsi_trans = dsi_port_to_transcoder(port);
712
713 /* program hst_tx_timeout */
714 tmp = I915_READ(DSI_HSTX_TO(dsi_trans));
715 tmp &= ~HSTX_TIMEOUT_VALUE_MASK;
716 tmp |= HSTX_TIMEOUT_VALUE(hs_tx_timeout);
717 I915_WRITE(DSI_HSTX_TO(dsi_trans), tmp);
718
719 /* FIXME: DSI_CALIB_TO */
720
721 /* program lp_rx_host timeout */
722 tmp = I915_READ(DSI_LPRX_HOST_TO(dsi_trans));
723 tmp &= ~LPRX_TIMEOUT_VALUE_MASK;
724 tmp |= LPRX_TIMEOUT_VALUE(lp_rx_timeout);
725 I915_WRITE(DSI_LPRX_HOST_TO(dsi_trans), tmp);
726
727 /* FIXME: DSI_PWAIT_TO */
728
729 /* program turn around timeout */
730 tmp = I915_READ(DSI_TA_TO(dsi_trans));
731 tmp &= ~TA_TIMEOUT_VALUE_MASK;
732 tmp |= TA_TIMEOUT_VALUE(ta_timeout);
733 I915_WRITE(DSI_TA_TO(dsi_trans), tmp);
734 }
735}
736
737static void
738gen11_dsi_enable_port_and_phy(struct intel_encoder *encoder,
739 const struct intel_crtc_state *pipe_config)
109{ 740{
110 /* step 4a: power up all lanes of the DDI used by DSI */ 741 /* step 4a: power up all lanes of the DDI used by DSI */
111 gen11_dsi_power_up_lanes(encoder); 742 gen11_dsi_power_up_lanes(encoder);
743
744 /* step 4b: configure lane sequencing of the Combo-PHY transmitters */
745 gen11_dsi_config_phy_lanes_sequence(encoder);
746
747 /* step 4c: configure voltage swing and skew */
748 gen11_dsi_voltage_swing_program_seq(encoder);
749
750 /* enable DDI buffer */
751 gen11_dsi_enable_ddi_buffer(encoder);
752
753 /* setup D-PHY timings */
754 gen11_dsi_setup_dphy_timings(encoder);
755
756 /* step 4h: setup DSI protocol timeouts */
757 gen11_dsi_setup_timeouts(encoder);
758
759 /* Step (4h, 4i, 4j, 4k): Configure transcoder */
760 gen11_dsi_configure_transcoder(encoder, pipe_config);
761}
762
763static void gen11_dsi_powerup_panel(struct intel_encoder *encoder)
764{
765 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
766 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
767 struct mipi_dsi_device *dsi;
768 enum port port;
769 enum transcoder dsi_trans;
770 u32 tmp;
771 int ret;
772
773 /* set maximum return packet size */
774 for_each_dsi_port(port, intel_dsi->ports) {
775 dsi_trans = dsi_port_to_transcoder(port);
776
777 /*
778 * FIXME: This uses the number of DW's currently in the payload
779 * receive queue. This is probably not what we want here.
780 */
781 tmp = I915_READ(DSI_CMD_RXCTL(dsi_trans));
782 tmp &= NUMBER_RX_PLOAD_DW_MASK;
783 /* multiply "Number Rx Payload DW" by 4 to get max value */
784 tmp = tmp * 4;
785 dsi = intel_dsi->dsi_hosts[port]->device;
786 ret = mipi_dsi_set_maximum_return_packet_size(dsi, tmp);
787 if (ret < 0)
788 DRM_ERROR("error setting max return pkt size%d\n", tmp);
789 }
790
791 /* panel power on related mipi dsi vbt sequences */
792 intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_POWER_ON);
793 intel_dsi_msleep(intel_dsi, intel_dsi->panel_on_delay);
794 intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DEASSERT_RESET);
795 intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_INIT_OTP);
796 intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DISPLAY_ON);
797
798 /* ensure all panel commands dispatched before enabling transcoder */
799 wait_for_cmds_dispatched_to_panel(encoder);
112} 800}
113 801
114static void __attribute__((unused)) 802static void __attribute__((unused))
@@ -116,6 +804,8 @@ gen11_dsi_pre_enable(struct intel_encoder *encoder,
116 const struct intel_crtc_state *pipe_config, 804 const struct intel_crtc_state *pipe_config,
117 const struct drm_connector_state *conn_state) 805 const struct drm_connector_state *conn_state)
118{ 806{
807 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
808
119 /* step2: enable IO power */ 809 /* step2: enable IO power */
120 gen11_dsi_enable_io_power(encoder); 810 gen11_dsi_enable_io_power(encoder);
121 811
@@ -123,5 +813,169 @@ gen11_dsi_pre_enable(struct intel_encoder *encoder,
123 gen11_dsi_program_esc_clk_div(encoder); 813 gen11_dsi_program_esc_clk_div(encoder);
124 814
125 /* step4: enable DSI port and DPHY */ 815 /* step4: enable DSI port and DPHY */
126 gen11_dsi_enable_port_and_phy(encoder); 816 gen11_dsi_enable_port_and_phy(encoder, pipe_config);
817
818 /* step5: program and powerup panel */
819 gen11_dsi_powerup_panel(encoder);
820
821 /* step6c: configure transcoder timings */
822 gen11_dsi_set_transcoder_timings(encoder, pipe_config);
823
824 /* step6d: enable dsi transcoder */
825 gen11_dsi_enable_transcoder(encoder);
826
827 /* step7: enable backlight */
828 intel_panel_enable_backlight(pipe_config, conn_state);
829 intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_ON);
830}
831
832static void gen11_dsi_disable_transcoder(struct intel_encoder *encoder)
833{
834 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
835 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
836 enum port port;
837 enum transcoder dsi_trans;
838 u32 tmp;
839
840 for_each_dsi_port(port, intel_dsi->ports) {
841 dsi_trans = dsi_port_to_transcoder(port);
842
843 /* disable transcoder */
844 tmp = I915_READ(PIPECONF(dsi_trans));
845 tmp &= ~PIPECONF_ENABLE;
846 I915_WRITE(PIPECONF(dsi_trans), tmp);
847
848 /* wait for transcoder to be disabled */
849 if (intel_wait_for_register(dev_priv, PIPECONF(dsi_trans),
850 I965_PIPECONF_ACTIVE, 0, 50))
851 DRM_ERROR("DSI trancoder not disabled\n");
852 }
853}
854
855static void gen11_dsi_powerdown_panel(struct intel_encoder *encoder)
856{
857 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
858
859 intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DISPLAY_OFF);
860 intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_ASSERT_RESET);
861 intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_POWER_OFF);
862
863 /* ensure cmds dispatched to panel */
864 wait_for_cmds_dispatched_to_panel(encoder);
865}
866
867static void gen11_dsi_deconfigure_trancoder(struct intel_encoder *encoder)
868{
869 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
870 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
871 enum port port;
872 enum transcoder dsi_trans;
873 u32 tmp;
874
875 /* put dsi link in ULPS */
876 for_each_dsi_port(port, intel_dsi->ports) {
877 dsi_trans = dsi_port_to_transcoder(port);
878 tmp = I915_READ(DSI_LP_MSG(dsi_trans));
879 tmp |= LINK_ENTER_ULPS;
880 tmp &= ~LINK_ULPS_TYPE_LP11;
881 I915_WRITE(DSI_LP_MSG(dsi_trans), tmp);
882
883 if (wait_for_us((I915_READ(DSI_LP_MSG(dsi_trans)) &
884 LINK_IN_ULPS),
885 10))
886 DRM_ERROR("DSI link not in ULPS\n");
887 }
888
889 /* disable ddi function */
890 for_each_dsi_port(port, intel_dsi->ports) {
891 dsi_trans = dsi_port_to_transcoder(port);
892 tmp = I915_READ(TRANS_DDI_FUNC_CTL(dsi_trans));
893 tmp &= ~TRANS_DDI_FUNC_ENABLE;
894 I915_WRITE(TRANS_DDI_FUNC_CTL(dsi_trans), tmp);
895 }
896
897 /* disable port sync mode if dual link */
898 if (intel_dsi->dual_link) {
899 for_each_dsi_port(port, intel_dsi->ports) {
900 dsi_trans = dsi_port_to_transcoder(port);
901 tmp = I915_READ(TRANS_DDI_FUNC_CTL2(dsi_trans));
902 tmp &= ~PORT_SYNC_MODE_ENABLE;
903 I915_WRITE(TRANS_DDI_FUNC_CTL2(dsi_trans), tmp);
904 }
905 }
906}
907
908static void gen11_dsi_disable_port(struct intel_encoder *encoder)
909{
910 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
911 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
912 u32 tmp;
913 enum port port;
914
915 for_each_dsi_port(port, intel_dsi->ports) {
916 tmp = I915_READ(DDI_BUF_CTL(port));
917 tmp &= ~DDI_BUF_CTL_ENABLE;
918 I915_WRITE(DDI_BUF_CTL(port), tmp);
919
920 if (wait_for_us((I915_READ(DDI_BUF_CTL(port)) &
921 DDI_BUF_IS_IDLE),
922 8))
923 DRM_ERROR("DDI port:%c buffer not idle\n",
924 port_name(port));
925 }
926}
927
928static void gen11_dsi_disable_io_power(struct intel_encoder *encoder)
929{
930 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
931 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
932 enum port port;
933 u32 tmp;
934
935 intel_display_power_put(dev_priv, POWER_DOMAIN_PORT_DDI_A_IO);
936
937 if (intel_dsi->dual_link)
938 intel_display_power_put(dev_priv, POWER_DOMAIN_PORT_DDI_B_IO);
939
940 /* set mode to DDI */
941 for_each_dsi_port(port, intel_dsi->ports) {
942 tmp = I915_READ(ICL_DSI_IO_MODECTL(port));
943 tmp &= ~COMBO_PHY_MODE_DSI;
944 I915_WRITE(ICL_DSI_IO_MODECTL(port), tmp);
945 }
946}
947
948static void __attribute__((unused)) gen11_dsi_disable(
949 struct intel_encoder *encoder,
950 const struct intel_crtc_state *old_crtc_state,
951 const struct drm_connector_state *old_conn_state)
952{
953 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
954
955 /* step1: turn off backlight */
956 intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_OFF);
957 intel_panel_disable_backlight(old_conn_state);
958
959 /* step2d,e: disable transcoder and wait */
960 gen11_dsi_disable_transcoder(encoder);
961
962 /* step2f,g: powerdown panel */
963 gen11_dsi_powerdown_panel(encoder);
964
965 /* step2h,i,j: deconfig trancoder */
966 gen11_dsi_deconfigure_trancoder(encoder);
967
968 /* step3: disable port */
969 gen11_dsi_disable_port(encoder);
970
971 /* step4: disable IO power */
972 gen11_dsi_disable_io_power(encoder);
973}
974
975void icl_dsi_init(struct drm_i915_private *dev_priv)
976{
977 enum port port;
978
979 if (!intel_bios_is_dsi_present(dev_priv, &port))
980 return;
127} 981}
diff --git a/drivers/gpu/drm/i915/intel_atomic.c b/drivers/gpu/drm/i915/intel_atomic.c
index b04952bacf77..a5a2c8fe58a7 100644
--- a/drivers/gpu/drm/i915/intel_atomic.c
+++ b/drivers/gpu/drm/i915/intel_atomic.c
@@ -203,6 +203,72 @@ intel_crtc_destroy_state(struct drm_crtc *crtc,
203 drm_atomic_helper_crtc_destroy_state(crtc, state); 203 drm_atomic_helper_crtc_destroy_state(crtc, state);
204} 204}
205 205
206static void intel_atomic_setup_scaler(struct intel_crtc_scaler_state *scaler_state,
207 int num_scalers_need, struct intel_crtc *intel_crtc,
208 const char *name, int idx,
209 struct intel_plane_state *plane_state,
210 int *scaler_id)
211{
212 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
213 int j;
214 u32 mode;
215
216 if (*scaler_id < 0) {
217 /* find a free scaler */
218 for (j = 0; j < intel_crtc->num_scalers; j++) {
219 if (scaler_state->scalers[j].in_use)
220 continue;
221
222 *scaler_id = j;
223 scaler_state->scalers[*scaler_id].in_use = 1;
224 break;
225 }
226 }
227
228 if (WARN(*scaler_id < 0, "Cannot find scaler for %s:%d\n", name, idx))
229 return;
230
231 /* set scaler mode */
232 if (plane_state && plane_state->base.fb &&
233 plane_state->base.fb->format->is_yuv &&
234 plane_state->base.fb->format->num_planes > 1) {
235 if (IS_GEN9(dev_priv) &&
236 !IS_GEMINILAKE(dev_priv)) {
237 mode = SKL_PS_SCALER_MODE_NV12;
238 } else if (icl_is_hdr_plane(to_intel_plane(plane_state->base.plane))) {
239 /*
240 * On gen11+'s HDR planes we only use the scaler for
241 * scaling. They have a dedicated chroma upsampler, so
242 * we don't need the scaler to upsample the UV plane.
243 */
244 mode = PS_SCALER_MODE_NORMAL;
245 } else {
246 mode = PS_SCALER_MODE_PLANAR;
247
248 if (plane_state->linked_plane)
249 mode |= PS_PLANE_Y_SEL(plane_state->linked_plane->id);
250 }
251 } else if (INTEL_GEN(dev_priv) > 9 || IS_GEMINILAKE(dev_priv)) {
252 mode = PS_SCALER_MODE_NORMAL;
253 } else if (num_scalers_need == 1 && intel_crtc->num_scalers > 1) {
254 /*
255 * when only 1 scaler is in use on a pipe with 2 scalers
256 * scaler 0 operates in high quality (HQ) mode.
257 * In this case use scaler 0 to take advantage of HQ mode
258 */
259 scaler_state->scalers[*scaler_id].in_use = 0;
260 *scaler_id = 0;
261 scaler_state->scalers[0].in_use = 1;
262 mode = SKL_PS_SCALER_MODE_HQ;
263 } else {
264 mode = SKL_PS_SCALER_MODE_DYN;
265 }
266
267 DRM_DEBUG_KMS("Attached scaler id %u.%u to %s:%d\n",
268 intel_crtc->pipe, *scaler_id, name, idx);
269 scaler_state->scalers[*scaler_id].mode = mode;
270}
271
206/** 272/**
207 * intel_atomic_setup_scalers() - setup scalers for crtc per staged requests 273 * intel_atomic_setup_scalers() - setup scalers for crtc per staged requests
208 * @dev_priv: i915 device 274 * @dev_priv: i915 device
@@ -232,7 +298,7 @@ int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
232 struct drm_atomic_state *drm_state = crtc_state->base.state; 298 struct drm_atomic_state *drm_state = crtc_state->base.state;
233 struct intel_atomic_state *intel_state = to_intel_atomic_state(drm_state); 299 struct intel_atomic_state *intel_state = to_intel_atomic_state(drm_state);
234 int num_scalers_need; 300 int num_scalers_need;
235 int i, j; 301 int i;
236 302
237 num_scalers_need = hweight32(scaler_state->scaler_users); 303 num_scalers_need = hweight32(scaler_state->scaler_users);
238 304
@@ -304,59 +370,17 @@ int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
304 idx = plane->base.id; 370 idx = plane->base.id;
305 371
306 /* plane on different crtc cannot be a scaler user of this crtc */ 372 /* plane on different crtc cannot be a scaler user of this crtc */
307 if (WARN_ON(intel_plane->pipe != intel_crtc->pipe)) { 373 if (WARN_ON(intel_plane->pipe != intel_crtc->pipe))
308 continue; 374 continue;
309 }
310 375
311 plane_state = intel_atomic_get_new_plane_state(intel_state, 376 plane_state = intel_atomic_get_new_plane_state(intel_state,
312 intel_plane); 377 intel_plane);
313 scaler_id = &plane_state->scaler_id; 378 scaler_id = &plane_state->scaler_id;
314 } 379 }
315 380
316 if (*scaler_id < 0) { 381 intel_atomic_setup_scaler(scaler_state, num_scalers_need,
317 /* find a free scaler */ 382 intel_crtc, name, idx,
318 for (j = 0; j < intel_crtc->num_scalers; j++) { 383 plane_state, scaler_id);
319 if (!scaler_state->scalers[j].in_use) {
320 scaler_state->scalers[j].in_use = 1;
321 *scaler_id = j;
322 DRM_DEBUG_KMS("Attached scaler id %u.%u to %s:%d\n",
323 intel_crtc->pipe, *scaler_id, name, idx);
324 break;
325 }
326 }
327 }
328
329 if (WARN_ON(*scaler_id < 0)) {
330 DRM_DEBUG_KMS("Cannot find scaler for %s:%d\n", name, idx);
331 continue;
332 }
333
334 /* set scaler mode */
335 if ((INTEL_GEN(dev_priv) >= 9) &&
336 plane_state && plane_state->base.fb &&
337 plane_state->base.fb->format->format ==
338 DRM_FORMAT_NV12) {
339 if (INTEL_GEN(dev_priv) == 9 &&
340 !IS_GEMINILAKE(dev_priv) &&
341 !IS_SKYLAKE(dev_priv))
342 scaler_state->scalers[*scaler_id].mode =
343 SKL_PS_SCALER_MODE_NV12;
344 else
345 scaler_state->scalers[*scaler_id].mode =
346 PS_SCALER_MODE_PLANAR;
347 } else if (num_scalers_need == 1 && intel_crtc->pipe != PIPE_C) {
348 /*
349 * when only 1 scaler is in use on either pipe A or B,
350 * scaler 0 operates in high quality (HQ) mode.
351 * In this case use scaler 0 to take advantage of HQ mode
352 */
353 *scaler_id = 0;
354 scaler_state->scalers[0].in_use = 1;
355 scaler_state->scalers[0].mode = PS_SCALER_MODE_HQ;
356 scaler_state->scalers[1].in_use = 0;
357 } else {
358 scaler_state->scalers[*scaler_id].mode = PS_SCALER_MODE_DYN;
359 }
360 } 384 }
361 385
362 return 0; 386 return 0;
diff --git a/drivers/gpu/drm/i915/intel_atomic_plane.c b/drivers/gpu/drm/i915/intel_atomic_plane.c
index aabebe0d2e9b..905f8ef3ba4f 100644
--- a/drivers/gpu/drm/i915/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/intel_atomic_plane.c
@@ -36,28 +36,31 @@
36#include <drm/drm_plane_helper.h> 36#include <drm/drm_plane_helper.h>
37#include "intel_drv.h" 37#include "intel_drv.h"
38 38
39/** 39struct intel_plane *intel_plane_alloc(void)
40 * intel_create_plane_state - create plane state object
41 * @plane: drm plane
42 *
43 * Allocates a fresh plane state for the given plane and sets some of
44 * the state values to sensible initial values.
45 *
46 * Returns: A newly allocated plane state, or NULL on failure
47 */
48struct intel_plane_state *
49intel_create_plane_state(struct drm_plane *plane)
50{ 40{
51 struct intel_plane_state *state; 41 struct intel_plane_state *plane_state;
42 struct intel_plane *plane;
52 43
53 state = kzalloc(sizeof(*state), GFP_KERNEL); 44 plane = kzalloc(sizeof(*plane), GFP_KERNEL);
54 if (!state) 45 if (!plane)
55 return NULL; 46 return ERR_PTR(-ENOMEM);
56 47
57 state->base.plane = plane; 48 plane_state = kzalloc(sizeof(*plane_state), GFP_KERNEL);
58 state->base.rotation = DRM_MODE_ROTATE_0; 49 if (!plane_state) {
50 kfree(plane);
51 return ERR_PTR(-ENOMEM);
52 }
59 53
60 return state; 54 __drm_atomic_helper_plane_reset(&plane->base, &plane_state->base);
55 plane_state->scaler_id = -1;
56
57 return plane;
58}
59
60void intel_plane_free(struct intel_plane *plane)
61{
62 intel_plane_destroy_state(&plane->base, plane->base.state);
63 kfree(plane);
61} 64}
62 65
63/** 66/**
@@ -117,10 +120,14 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_
117 struct intel_plane *intel_plane = to_intel_plane(plane); 120 struct intel_plane *intel_plane = to_intel_plane(plane);
118 int ret; 121 int ret;
119 122
123 crtc_state->active_planes &= ~BIT(intel_plane->id);
124 crtc_state->nv12_planes &= ~BIT(intel_plane->id);
125 intel_state->base.visible = false;
126
127 /* If this is a cursor plane, no further checks are needed. */
120 if (!intel_state->base.crtc && !old_plane_state->base.crtc) 128 if (!intel_state->base.crtc && !old_plane_state->base.crtc)
121 return 0; 129 return 0;
122 130
123 intel_state->base.visible = false;
124 ret = intel_plane->check_plane(crtc_state, intel_state); 131 ret = intel_plane->check_plane(crtc_state, intel_state);
125 if (ret) 132 if (ret)
126 return ret; 133 return ret;
@@ -128,13 +135,9 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_
128 /* FIXME pre-g4x don't work like this */ 135 /* FIXME pre-g4x don't work like this */
129 if (state->visible) 136 if (state->visible)
130 crtc_state->active_planes |= BIT(intel_plane->id); 137 crtc_state->active_planes |= BIT(intel_plane->id);
131 else
132 crtc_state->active_planes &= ~BIT(intel_plane->id);
133 138
134 if (state->visible && state->fb->format->format == DRM_FORMAT_NV12) 139 if (state->visible && state->fb->format->format == DRM_FORMAT_NV12)
135 crtc_state->nv12_planes |= BIT(intel_plane->id); 140 crtc_state->nv12_planes |= BIT(intel_plane->id);
136 else
137 crtc_state->nv12_planes &= ~BIT(intel_plane->id);
138 141
139 return intel_plane_atomic_calc_changes(old_crtc_state, 142 return intel_plane_atomic_calc_changes(old_crtc_state,
140 &crtc_state->base, 143 &crtc_state->base,
@@ -152,6 +155,7 @@ static int intel_plane_atomic_check(struct drm_plane *plane,
152 const struct drm_crtc_state *old_crtc_state; 155 const struct drm_crtc_state *old_crtc_state;
153 struct drm_crtc_state *new_crtc_state; 156 struct drm_crtc_state *new_crtc_state;
154 157
158 new_plane_state->visible = false;
155 if (!crtc) 159 if (!crtc)
156 return 0; 160 return 0;
157 161
@@ -164,29 +168,52 @@ static int intel_plane_atomic_check(struct drm_plane *plane,
164 to_intel_plane_state(new_plane_state)); 168 to_intel_plane_state(new_plane_state));
165} 169}
166 170
167static void intel_plane_atomic_update(struct drm_plane *plane, 171void intel_update_planes_on_crtc(struct intel_atomic_state *old_state,
168 struct drm_plane_state *old_state) 172 struct intel_crtc *crtc,
173 struct intel_crtc_state *old_crtc_state,
174 struct intel_crtc_state *new_crtc_state)
169{ 175{
170 struct intel_atomic_state *state = to_intel_atomic_state(old_state->state); 176 struct intel_plane_state *new_plane_state;
171 struct intel_plane *intel_plane = to_intel_plane(plane); 177 struct intel_plane *plane;
172 const struct intel_plane_state *new_plane_state = 178 u32 update_mask;
173 intel_atomic_get_new_plane_state(state, intel_plane); 179 int i;
174 struct drm_crtc *crtc = new_plane_state->base.crtc ?: old_state->crtc; 180
175 181 update_mask = old_crtc_state->active_planes;
176 if (new_plane_state->base.visible) { 182 update_mask |= new_crtc_state->active_planes;
177 const struct intel_crtc_state *new_crtc_state = 183
178 intel_atomic_get_new_crtc_state(state, to_intel_crtc(crtc)); 184 for_each_new_intel_plane_in_state(old_state, plane, new_plane_state, i) {
179 185 if (crtc->pipe != plane->pipe ||
180 trace_intel_update_plane(plane, 186 !(update_mask & BIT(plane->id)))
181 to_intel_crtc(crtc)); 187 continue;
182 188
183 intel_plane->update_plane(intel_plane, 189 if (new_plane_state->base.visible) {
184 new_crtc_state, new_plane_state); 190 trace_intel_update_plane(&plane->base, crtc);
185 } else { 191
186 trace_intel_disable_plane(plane, 192 plane->update_plane(plane, new_crtc_state, new_plane_state);
187 to_intel_crtc(crtc)); 193 } else if (new_plane_state->slave) {
188 194 struct intel_plane *master =
189 intel_plane->disable_plane(intel_plane, to_intel_crtc(crtc)); 195 new_plane_state->linked_plane;
196
197 /*
198 * We update the slave plane from this function because
199 * programming it from the master plane's update_plane
200 * callback runs into issues when the Y plane is
201 * reassigned, disabled or used by a different plane.
202 *
203 * The slave plane is updated with the master plane's
204 * plane_state.
205 */
206 new_plane_state =
207 intel_atomic_get_new_plane_state(old_state, master);
208
209 trace_intel_update_plane(&plane->base, crtc);
210
211 plane->update_slave(plane, new_crtc_state, new_plane_state);
212 } else {
213 trace_intel_disable_plane(&plane->base, crtc);
214
215 plane->disable_plane(plane, crtc);
216 }
190 } 217 }
191} 218}
192 219
@@ -194,7 +221,6 @@ const struct drm_plane_helper_funcs intel_plane_helper_funcs = {
194 .prepare_fb = intel_prepare_plane_fb, 221 .prepare_fb = intel_prepare_plane_fb,
195 .cleanup_fb = intel_cleanup_plane_fb, 222 .cleanup_fb = intel_cleanup_plane_fb,
196 .atomic_check = intel_plane_atomic_check, 223 .atomic_check = intel_plane_atomic_check,
197 .atomic_update = intel_plane_atomic_update,
198}; 224};
199 225
200/** 226/**
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
index ee3ca2de983b..ae55a6865d5c 100644
--- a/drivers/gpu/drm/i915/intel_audio.c
+++ b/drivers/gpu/drm/i915/intel_audio.c
@@ -153,32 +153,32 @@ static const struct {
153 int n; 153 int n;
154 int cts; 154 int cts;
155} hdmi_aud_ncts[] = { 155} hdmi_aud_ncts[] = {
156 { 44100, TMDS_296M, 4459, 234375 },
157 { 44100, TMDS_297M, 4704, 247500 },
158 { 48000, TMDS_296M, 5824, 281250 },
159 { 48000, TMDS_297M, 5120, 247500 },
160 { 32000, TMDS_296M, 5824, 421875 }, 156 { 32000, TMDS_296M, 5824, 421875 },
161 { 32000, TMDS_297M, 3072, 222750 }, 157 { 32000, TMDS_297M, 3072, 222750 },
158 { 32000, TMDS_593M, 5824, 843750 },
159 { 32000, TMDS_594M, 3072, 445500 },
160 { 44100, TMDS_296M, 4459, 234375 },
161 { 44100, TMDS_297M, 4704, 247500 },
162 { 44100, TMDS_593M, 8918, 937500 },
163 { 44100, TMDS_594M, 9408, 990000 },
162 { 88200, TMDS_296M, 8918, 234375 }, 164 { 88200, TMDS_296M, 8918, 234375 },
163 { 88200, TMDS_297M, 9408, 247500 }, 165 { 88200, TMDS_297M, 9408, 247500 },
164 { 96000, TMDS_296M, 11648, 281250 }, 166 { 88200, TMDS_593M, 17836, 937500 },
165 { 96000, TMDS_297M, 10240, 247500 }, 167 { 88200, TMDS_594M, 18816, 990000 },
166 { 176400, TMDS_296M, 17836, 234375 }, 168 { 176400, TMDS_296M, 17836, 234375 },
167 { 176400, TMDS_297M, 18816, 247500 }, 169 { 176400, TMDS_297M, 18816, 247500 },
168 { 192000, TMDS_296M, 23296, 281250 }, 170 { 176400, TMDS_593M, 35672, 937500 },
169 { 192000, TMDS_297M, 20480, 247500 }, 171 { 176400, TMDS_594M, 37632, 990000 },
170 { 44100, TMDS_593M, 8918, 937500 }, 172 { 48000, TMDS_296M, 5824, 281250 },
171 { 44100, TMDS_594M, 9408, 990000 }, 173 { 48000, TMDS_297M, 5120, 247500 },
172 { 48000, TMDS_593M, 5824, 562500 }, 174 { 48000, TMDS_593M, 5824, 562500 },
173 { 48000, TMDS_594M, 6144, 594000 }, 175 { 48000, TMDS_594M, 6144, 594000 },
174 { 32000, TMDS_593M, 5824, 843750 }, 176 { 96000, TMDS_296M, 11648, 281250 },
175 { 32000, TMDS_594M, 3072, 445500 }, 177 { 96000, TMDS_297M, 10240, 247500 },
176 { 88200, TMDS_593M, 17836, 937500 },
177 { 88200, TMDS_594M, 18816, 990000 },
178 { 96000, TMDS_593M, 11648, 562500 }, 178 { 96000, TMDS_593M, 11648, 562500 },
179 { 96000, TMDS_594M, 12288, 594000 }, 179 { 96000, TMDS_594M, 12288, 594000 },
180 { 176400, TMDS_593M, 35672, 937500 }, 180 { 192000, TMDS_296M, 23296, 281250 },
181 { 176400, TMDS_594M, 37632, 990000 }, 181 { 192000, TMDS_297M, 20480, 247500 },
182 { 192000, TMDS_593M, 23296, 562500 }, 182 { 192000, TMDS_593M, 23296, 562500 },
183 { 192000, TMDS_594M, 24576, 594000 }, 183 { 192000, TMDS_594M, 24576, 594000 },
184}; 184};
@@ -929,6 +929,9 @@ static int i915_audio_component_bind(struct device *i915_kdev,
929 if (WARN_ON(acomp->base.ops || acomp->base.dev)) 929 if (WARN_ON(acomp->base.ops || acomp->base.dev))
930 return -EEXIST; 930 return -EEXIST;
931 931
932 if (WARN_ON(!device_link_add(hda_kdev, i915_kdev, DL_FLAG_STATELESS)))
933 return -ENOMEM;
934
932 drm_modeset_lock_all(&dev_priv->drm); 935 drm_modeset_lock_all(&dev_priv->drm);
933 acomp->base.ops = &i915_audio_component_ops; 936 acomp->base.ops = &i915_audio_component_ops;
934 acomp->base.dev = i915_kdev; 937 acomp->base.dev = i915_kdev;
@@ -952,6 +955,8 @@ static void i915_audio_component_unbind(struct device *i915_kdev,
952 acomp->base.dev = NULL; 955 acomp->base.dev = NULL;
953 dev_priv->audio_component = NULL; 956 dev_priv->audio_component = NULL;
954 drm_modeset_unlock_all(&dev_priv->drm); 957 drm_modeset_unlock_all(&dev_priv->drm);
958
959 device_link_remove(hda_kdev, i915_kdev);
955} 960}
956 961
957static const struct component_ops i915_audio_component_bind_ops = { 962static const struct component_ops i915_audio_component_bind_ops = {
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 1faa494e2bc9..0694aa8bb9bc 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -420,6 +420,13 @@ parse_general_features(struct drm_i915_private *dev_priv,
420 intel_bios_ssc_frequency(dev_priv, general->ssc_freq); 420 intel_bios_ssc_frequency(dev_priv, general->ssc_freq);
421 dev_priv->vbt.display_clock_mode = general->display_clock_mode; 421 dev_priv->vbt.display_clock_mode = general->display_clock_mode;
422 dev_priv->vbt.fdi_rx_polarity_inverted = general->fdi_rx_polarity_inverted; 422 dev_priv->vbt.fdi_rx_polarity_inverted = general->fdi_rx_polarity_inverted;
423 if (bdb->version >= 181) {
424 dev_priv->vbt.orientation = general->rotate_180 ?
425 DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP :
426 DRM_MODE_PANEL_ORIENTATION_NORMAL;
427 } else {
428 dev_priv->vbt.orientation = DRM_MODE_PANEL_ORIENTATION_UNKNOWN;
429 }
423 DRM_DEBUG_KMS("BDB_GENERAL_FEATURES int_tv_support %d int_crt_support %d lvds_use_ssc %d lvds_ssc_freq %d display_clock_mode %d fdi_rx_polarity_inverted %d\n", 430 DRM_DEBUG_KMS("BDB_GENERAL_FEATURES int_tv_support %d int_crt_support %d lvds_use_ssc %d lvds_ssc_freq %d display_clock_mode %d fdi_rx_polarity_inverted %d\n",
424 dev_priv->vbt.int_tv_support, 431 dev_priv->vbt.int_tv_support,
425 dev_priv->vbt.int_crt_support, 432 dev_priv->vbt.int_crt_support,
@@ -852,6 +859,30 @@ parse_mipi_config(struct drm_i915_private *dev_priv,
852 859
853 parse_dsi_backlight_ports(dev_priv, bdb->version, port); 860 parse_dsi_backlight_ports(dev_priv, bdb->version, port);
854 861
862 /* FIXME is the 90 vs. 270 correct? */
863 switch (config->rotation) {
864 case ENABLE_ROTATION_0:
865 /*
866 * Most (all?) VBTs claim 0 degrees despite having
867 * an upside down panel, thus we do not trust this.
868 */
869 dev_priv->vbt.dsi.orientation =
870 DRM_MODE_PANEL_ORIENTATION_UNKNOWN;
871 break;
872 case ENABLE_ROTATION_90:
873 dev_priv->vbt.dsi.orientation =
874 DRM_MODE_PANEL_ORIENTATION_RIGHT_UP;
875 break;
876 case ENABLE_ROTATION_180:
877 dev_priv->vbt.dsi.orientation =
878 DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP;
879 break;
880 case ENABLE_ROTATION_270:
881 dev_priv->vbt.dsi.orientation =
882 DRM_MODE_PANEL_ORIENTATION_LEFT_UP;
883 break;
884 }
885
855 /* We have mandatory mipi config blocks. Initialize as generic panel */ 886 /* We have mandatory mipi config blocks. Initialize as generic panel */
856 dev_priv->vbt.dsi.panel_id = MIPI_DSI_GENERIC_PANEL_ID; 887 dev_priv->vbt.dsi.panel_id = MIPI_DSI_GENERIC_PANEL_ID;
857} 888}
@@ -2039,17 +2070,17 @@ bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv,
2039 2070
2040 dvo_port = child->dvo_port; 2071 dvo_port = child->dvo_port;
2041 2072
2042 switch (dvo_port) { 2073 if (dvo_port == DVO_PORT_MIPIA ||
2043 case DVO_PORT_MIPIA: 2074 (dvo_port == DVO_PORT_MIPIB && IS_ICELAKE(dev_priv)) ||
2044 case DVO_PORT_MIPIC: 2075 (dvo_port == DVO_PORT_MIPIC && !IS_ICELAKE(dev_priv))) {
2045 if (port) 2076 if (port)
2046 *port = dvo_port - DVO_PORT_MIPIA; 2077 *port = dvo_port - DVO_PORT_MIPIA;
2047 return true; 2078 return true;
2048 case DVO_PORT_MIPIB: 2079 } else if (dvo_port == DVO_PORT_MIPIB ||
2049 case DVO_PORT_MIPID: 2080 dvo_port == DVO_PORT_MIPIC ||
2081 dvo_port == DVO_PORT_MIPID) {
2050 DRM_DEBUG_KMS("VBT has unsupported DSI port %c\n", 2082 DRM_DEBUG_KMS("VBT has unsupported DSI port %c\n",
2051 port_name(dvo_port - DVO_PORT_MIPIA)); 2083 port_name(dvo_port - DVO_PORT_MIPIA));
2052 break;
2053 } 2084 }
2054 } 2085 }
2055 2086
@@ -2159,3 +2190,49 @@ intel_bios_is_lspcon_present(struct drm_i915_private *dev_priv,
2159 2190
2160 return false; 2191 return false;
2161} 2192}
2193
2194enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *dev_priv,
2195 enum port port)
2196{
2197 const struct ddi_vbt_port_info *info =
2198 &dev_priv->vbt.ddi_port_info[port];
2199 enum aux_ch aux_ch;
2200
2201 if (!info->alternate_aux_channel) {
2202 aux_ch = (enum aux_ch)port;
2203
2204 DRM_DEBUG_KMS("using AUX %c for port %c (platform default)\n",
2205 aux_ch_name(aux_ch), port_name(port));
2206 return aux_ch;
2207 }
2208
2209 switch (info->alternate_aux_channel) {
2210 case DP_AUX_A:
2211 aux_ch = AUX_CH_A;
2212 break;
2213 case DP_AUX_B:
2214 aux_ch = AUX_CH_B;
2215 break;
2216 case DP_AUX_C:
2217 aux_ch = AUX_CH_C;
2218 break;
2219 case DP_AUX_D:
2220 aux_ch = AUX_CH_D;
2221 break;
2222 case DP_AUX_E:
2223 aux_ch = AUX_CH_E;
2224 break;
2225 case DP_AUX_F:
2226 aux_ch = AUX_CH_F;
2227 break;
2228 default:
2229 MISSING_CASE(info->alternate_aux_channel);
2230 aux_ch = AUX_CH_A;
2231 break;
2232 }
2233
2234 DRM_DEBUG_KMS("using AUX %c for port %c (VBT)\n",
2235 aux_ch_name(aux_ch), port_name(port));
2236
2237 return aux_ch;
2238}
diff --git a/drivers/gpu/drm/i915/intel_cdclk.c b/drivers/gpu/drm/i915/intel_cdclk.c
index 8d74276029e6..25e3aba9cded 100644
--- a/drivers/gpu/drm/i915/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/intel_cdclk.c
@@ -2660,37 +2660,18 @@ static int cnp_rawclk(struct drm_i915_private *dev_priv)
2660 fraction = 200; 2660 fraction = 200;
2661 } 2661 }
2662 2662
2663 rawclk = CNP_RAWCLK_DIV((divider / 1000) - 1); 2663 rawclk = CNP_RAWCLK_DIV(divider / 1000);
2664 if (fraction) 2664 if (fraction) {
2665 rawclk |= CNP_RAWCLK_FRAC(DIV_ROUND_CLOSEST(1000, 2665 int numerator = 1;
2666 fraction) - 1);
2667 2666
2668 I915_WRITE(PCH_RAWCLK_FREQ, rawclk); 2667 rawclk |= CNP_RAWCLK_DEN(DIV_ROUND_CLOSEST(numerator * 1000,
2669 return divider + fraction; 2668 fraction) - 1);
2670} 2669 if (HAS_PCH_ICP(dev_priv))
2671 2670 rawclk |= ICP_RAWCLK_NUM(numerator);
2672static int icp_rawclk(struct drm_i915_private *dev_priv)
2673{
2674 u32 rawclk;
2675 int divider, numerator, denominator, frequency;
2676
2677 if (I915_READ(SFUSE_STRAP) & SFUSE_STRAP_RAW_FREQUENCY) {
2678 frequency = 24000;
2679 divider = 23;
2680 numerator = 0;
2681 denominator = 0;
2682 } else {
2683 frequency = 19200;
2684 divider = 18;
2685 numerator = 1;
2686 denominator = 4;
2687 } 2671 }
2688 2672
2689 rawclk = CNP_RAWCLK_DIV(divider) | ICP_RAWCLK_NUM(numerator) |
2690 ICP_RAWCLK_DEN(denominator);
2691
2692 I915_WRITE(PCH_RAWCLK_FREQ, rawclk); 2673 I915_WRITE(PCH_RAWCLK_FREQ, rawclk);
2693 return frequency; 2674 return divider + fraction;
2694} 2675}
2695 2676
2696static int pch_rawclk(struct drm_i915_private *dev_priv) 2677static int pch_rawclk(struct drm_i915_private *dev_priv)
@@ -2740,9 +2721,7 @@ static int g4x_hrawclk(struct drm_i915_private *dev_priv)
2740 */ 2721 */
2741void intel_update_rawclk(struct drm_i915_private *dev_priv) 2722void intel_update_rawclk(struct drm_i915_private *dev_priv)
2742{ 2723{
2743 if (HAS_PCH_ICP(dev_priv)) 2724 if (HAS_PCH_CNP(dev_priv) || HAS_PCH_ICP(dev_priv))
2744 dev_priv->rawclk_freq = icp_rawclk(dev_priv);
2745 else if (HAS_PCH_CNP(dev_priv))
2746 dev_priv->rawclk_freq = cnp_rawclk(dev_priv); 2725 dev_priv->rawclk_freq = cnp_rawclk(dev_priv);
2747 else if (HAS_PCH_SPLIT(dev_priv)) 2726 else if (HAS_PCH_SPLIT(dev_priv))
2748 dev_priv->rawclk_freq = pch_rawclk(dev_priv); 2727 dev_priv->rawclk_freq = pch_rawclk(dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_color.c b/drivers/gpu/drm/i915/intel_color.c
index c6a7beabd58d..5127da286a2b 100644
--- a/drivers/gpu/drm/i915/intel_color.c
+++ b/drivers/gpu/drm/i915/intel_color.c
@@ -149,7 +149,8 @@ static void ilk_load_csc_matrix(struct drm_crtc_state *crtc_state)
149 if (INTEL_GEN(dev_priv) >= 8 || IS_HASWELL(dev_priv)) 149 if (INTEL_GEN(dev_priv) >= 8 || IS_HASWELL(dev_priv))
150 limited_color_range = intel_crtc_state->limited_color_range; 150 limited_color_range = intel_crtc_state->limited_color_range;
151 151
152 if (intel_crtc_state->ycbcr420) { 152 if (intel_crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
153 intel_crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) {
153 ilk_load_ycbcr_conversion_matrix(intel_crtc); 154 ilk_load_ycbcr_conversion_matrix(intel_crtc);
154 return; 155 return;
155 } else if (crtc_state->ctm) { 156 } else if (crtc_state->ctm) {
diff --git a/drivers/gpu/drm/i915/intel_combo_phy.c b/drivers/gpu/drm/i915/intel_combo_phy.c
new file mode 100644
index 000000000000..3d0271cebf99
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_combo_phy.c
@@ -0,0 +1,254 @@
1// SPDX-License-Identifier: MIT
2/*
3 * Copyright © 2018 Intel Corporation
4 */
5
6#include "intel_drv.h"
7
8#define for_each_combo_port(__dev_priv, __port) \
9 for ((__port) = PORT_A; (__port) < I915_MAX_PORTS; (__port)++) \
10 for_each_if(intel_port_is_combophy(__dev_priv, __port))
11
12#define for_each_combo_port_reverse(__dev_priv, __port) \
13 for ((__port) = I915_MAX_PORTS; (__port)-- > PORT_A;) \
14 for_each_if(intel_port_is_combophy(__dev_priv, __port))
15
16enum {
17 PROCMON_0_85V_DOT_0,
18 PROCMON_0_95V_DOT_0,
19 PROCMON_0_95V_DOT_1,
20 PROCMON_1_05V_DOT_0,
21 PROCMON_1_05V_DOT_1,
22};
23
24static const struct cnl_procmon {
25 u32 dw1, dw9, dw10;
26} cnl_procmon_values[] = {
27 [PROCMON_0_85V_DOT_0] =
28 { .dw1 = 0x00000000, .dw9 = 0x62AB67BB, .dw10 = 0x51914F96, },
29 [PROCMON_0_95V_DOT_0] =
30 { .dw1 = 0x00000000, .dw9 = 0x86E172C7, .dw10 = 0x77CA5EAB, },
31 [PROCMON_0_95V_DOT_1] =
32 { .dw1 = 0x00000000, .dw9 = 0x93F87FE1, .dw10 = 0x8AE871C5, },
33 [PROCMON_1_05V_DOT_0] =
34 { .dw1 = 0x00000000, .dw9 = 0x98FA82DD, .dw10 = 0x89E46DC1, },
35 [PROCMON_1_05V_DOT_1] =
36 { .dw1 = 0x00440000, .dw9 = 0x9A00AB25, .dw10 = 0x8AE38FF1, },
37};
38
39/*
40 * CNL has just one set of registers, while ICL has two sets: one for port A and
41 * the other for port B. The CNL registers are equivalent to the ICL port A
42 * registers, that's why we call the ICL macros even though the function has CNL
43 * on its name.
44 */
45static const struct cnl_procmon *
46cnl_get_procmon_ref_values(struct drm_i915_private *dev_priv, enum port port)
47{
48 const struct cnl_procmon *procmon;
49 u32 val;
50
51 val = I915_READ(ICL_PORT_COMP_DW3(port));
52 switch (val & (PROCESS_INFO_MASK | VOLTAGE_INFO_MASK)) {
53 default:
54 MISSING_CASE(val);
55 /* fall through */
56 case VOLTAGE_INFO_0_85V | PROCESS_INFO_DOT_0:
57 procmon = &cnl_procmon_values[PROCMON_0_85V_DOT_0];
58 break;
59 case VOLTAGE_INFO_0_95V | PROCESS_INFO_DOT_0:
60 procmon = &cnl_procmon_values[PROCMON_0_95V_DOT_0];
61 break;
62 case VOLTAGE_INFO_0_95V | PROCESS_INFO_DOT_1:
63 procmon = &cnl_procmon_values[PROCMON_0_95V_DOT_1];
64 break;
65 case VOLTAGE_INFO_1_05V | PROCESS_INFO_DOT_0:
66 procmon = &cnl_procmon_values[PROCMON_1_05V_DOT_0];
67 break;
68 case VOLTAGE_INFO_1_05V | PROCESS_INFO_DOT_1:
69 procmon = &cnl_procmon_values[PROCMON_1_05V_DOT_1];
70 break;
71 }
72
73 return procmon;
74}
75
76static void cnl_set_procmon_ref_values(struct drm_i915_private *dev_priv,
77 enum port port)
78{
79 const struct cnl_procmon *procmon;
80 u32 val;
81
82 procmon = cnl_get_procmon_ref_values(dev_priv, port);
83
84 val = I915_READ(ICL_PORT_COMP_DW1(port));
85 val &= ~((0xff << 16) | 0xff);
86 val |= procmon->dw1;
87 I915_WRITE(ICL_PORT_COMP_DW1(port), val);
88
89 I915_WRITE(ICL_PORT_COMP_DW9(port), procmon->dw9);
90 I915_WRITE(ICL_PORT_COMP_DW10(port), procmon->dw10);
91}
92
93static bool check_phy_reg(struct drm_i915_private *dev_priv,
94 enum port port, i915_reg_t reg, u32 mask,
95 u32 expected_val)
96{
97 u32 val = I915_READ(reg);
98
99 if ((val & mask) != expected_val) {
100 DRM_DEBUG_DRIVER("Port %c combo PHY reg %08x state mismatch: "
101 "current %08x mask %08x expected %08x\n",
102 port_name(port),
103 reg.reg, val, mask, expected_val);
104 return false;
105 }
106
107 return true;
108}
109
110static bool cnl_verify_procmon_ref_values(struct drm_i915_private *dev_priv,
111 enum port port)
112{
113 const struct cnl_procmon *procmon;
114 bool ret;
115
116 procmon = cnl_get_procmon_ref_values(dev_priv, port);
117
118 ret = check_phy_reg(dev_priv, port, ICL_PORT_COMP_DW1(port),
119 (0xff << 16) | 0xff, procmon->dw1);
120 ret &= check_phy_reg(dev_priv, port, ICL_PORT_COMP_DW9(port),
121 -1U, procmon->dw9);
122 ret &= check_phy_reg(dev_priv, port, ICL_PORT_COMP_DW10(port),
123 -1U, procmon->dw10);
124
125 return ret;
126}
127
128static bool cnl_combo_phy_enabled(struct drm_i915_private *dev_priv)
129{
130 return !(I915_READ(CHICKEN_MISC_2) & CNL_COMP_PWR_DOWN) &&
131 (I915_READ(CNL_PORT_COMP_DW0) & COMP_INIT);
132}
133
134static bool cnl_combo_phy_verify_state(struct drm_i915_private *dev_priv)
135{
136 enum port port = PORT_A;
137 bool ret;
138
139 if (!cnl_combo_phy_enabled(dev_priv))
140 return false;
141
142 ret = cnl_verify_procmon_ref_values(dev_priv, port);
143
144 ret &= check_phy_reg(dev_priv, port, CNL_PORT_CL1CM_DW5,
145 CL_POWER_DOWN_ENABLE, CL_POWER_DOWN_ENABLE);
146
147 return ret;
148}
149
150void cnl_combo_phys_init(struct drm_i915_private *dev_priv)
151{
152 u32 val;
153
154 val = I915_READ(CHICKEN_MISC_2);
155 val &= ~CNL_COMP_PWR_DOWN;
156 I915_WRITE(CHICKEN_MISC_2, val);
157
158 /* Dummy PORT_A to get the correct CNL register from the ICL macro */
159 cnl_set_procmon_ref_values(dev_priv, PORT_A);
160
161 val = I915_READ(CNL_PORT_COMP_DW0);
162 val |= COMP_INIT;
163 I915_WRITE(CNL_PORT_COMP_DW0, val);
164
165 val = I915_READ(CNL_PORT_CL1CM_DW5);
166 val |= CL_POWER_DOWN_ENABLE;
167 I915_WRITE(CNL_PORT_CL1CM_DW5, val);
168}
169
170void cnl_combo_phys_uninit(struct drm_i915_private *dev_priv)
171{
172 u32 val;
173
174 if (!cnl_combo_phy_verify_state(dev_priv))
175 DRM_WARN("Combo PHY HW state changed unexpectedly.\n");
176
177 val = I915_READ(CHICKEN_MISC_2);
178 val |= CNL_COMP_PWR_DOWN;
179 I915_WRITE(CHICKEN_MISC_2, val);
180}
181
182static bool icl_combo_phy_enabled(struct drm_i915_private *dev_priv,
183 enum port port)
184{
185 return !(I915_READ(ICL_PHY_MISC(port)) &
186 ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN) &&
187 (I915_READ(ICL_PORT_COMP_DW0(port)) & COMP_INIT);
188}
189
190static bool icl_combo_phy_verify_state(struct drm_i915_private *dev_priv,
191 enum port port)
192{
193 bool ret;
194
195 if (!icl_combo_phy_enabled(dev_priv, port))
196 return false;
197
198 ret = cnl_verify_procmon_ref_values(dev_priv, port);
199
200 ret &= check_phy_reg(dev_priv, port, ICL_PORT_CL_DW5(port),
201 CL_POWER_DOWN_ENABLE, CL_POWER_DOWN_ENABLE);
202
203 return ret;
204}
205
206void icl_combo_phys_init(struct drm_i915_private *dev_priv)
207{
208 enum port port;
209
210 for_each_combo_port(dev_priv, port) {
211 u32 val;
212
213 if (icl_combo_phy_verify_state(dev_priv, port)) {
214 DRM_DEBUG_DRIVER("Port %c combo PHY already enabled, won't reprogram it.\n",
215 port_name(port));
216 continue;
217 }
218
219 val = I915_READ(ICL_PHY_MISC(port));
220 val &= ~ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN;
221 I915_WRITE(ICL_PHY_MISC(port), val);
222
223 cnl_set_procmon_ref_values(dev_priv, port);
224
225 val = I915_READ(ICL_PORT_COMP_DW0(port));
226 val |= COMP_INIT;
227 I915_WRITE(ICL_PORT_COMP_DW0(port), val);
228
229 val = I915_READ(ICL_PORT_CL_DW5(port));
230 val |= CL_POWER_DOWN_ENABLE;
231 I915_WRITE(ICL_PORT_CL_DW5(port), val);
232 }
233}
234
235void icl_combo_phys_uninit(struct drm_i915_private *dev_priv)
236{
237 enum port port;
238
239 for_each_combo_port_reverse(dev_priv, port) {
240 u32 val;
241
242 if (!icl_combo_phy_verify_state(dev_priv, port))
243 DRM_WARN("Port %c combo PHY HW state changed unexpectedly\n",
244 port_name(port));
245
246 val = I915_READ(ICL_PHY_MISC(port));
247 val |= ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN;
248 I915_WRITE(ICL_PHY_MISC(port), val);
249
250 val = I915_READ(ICL_PORT_COMP_DW0(port));
251 val &= ~COMP_INIT;
252 I915_WRITE(ICL_PORT_COMP_DW0(port), val);
253 }
254}
diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_connector.c
index ca44bf368e24..18e370f607bc 100644
--- a/drivers/gpu/drm/i915/intel_modes.c
+++ b/drivers/gpu/drm/i915/intel_connector.c
@@ -25,11 +25,140 @@
25 25
26#include <linux/slab.h> 26#include <linux/slab.h>
27#include <linux/i2c.h> 27#include <linux/i2c.h>
28#include <drm/drm_atomic_helper.h>
28#include <drm/drm_edid.h> 29#include <drm/drm_edid.h>
29#include <drm/drmP.h> 30#include <drm/drmP.h>
30#include "intel_drv.h" 31#include "intel_drv.h"
31#include "i915_drv.h" 32#include "i915_drv.h"
32 33
34int intel_connector_init(struct intel_connector *connector)
35{
36 struct intel_digital_connector_state *conn_state;
37
38 /*
39 * Allocate enough memory to hold intel_digital_connector_state,
40 * This might be a few bytes too many, but for connectors that don't
41 * need it we'll free the state and allocate a smaller one on the first
42 * successful commit anyway.
43 */
44 conn_state = kzalloc(sizeof(*conn_state), GFP_KERNEL);
45 if (!conn_state)
46 return -ENOMEM;
47
48 __drm_atomic_helper_connector_reset(&connector->base,
49 &conn_state->base);
50
51 return 0;
52}
53
54struct intel_connector *intel_connector_alloc(void)
55{
56 struct intel_connector *connector;
57
58 connector = kzalloc(sizeof(*connector), GFP_KERNEL);
59 if (!connector)
60 return NULL;
61
62 if (intel_connector_init(connector) < 0) {
63 kfree(connector);
64 return NULL;
65 }
66
67 return connector;
68}
69
70/*
71 * Free the bits allocated by intel_connector_alloc.
72 * This should only be used after intel_connector_alloc has returned
73 * successfully, and before drm_connector_init returns successfully.
74 * Otherwise the destroy callbacks for the connector and the state should
75 * take care of proper cleanup/free (see intel_connector_destroy).
76 */
77void intel_connector_free(struct intel_connector *connector)
78{
79 kfree(to_intel_digital_connector_state(connector->base.state));
80 kfree(connector);
81}
82
83/*
84 * Connector type independent destroy hook for drm_connector_funcs.
85 */
86void intel_connector_destroy(struct drm_connector *connector)
87{
88 struct intel_connector *intel_connector = to_intel_connector(connector);
89
90 kfree(intel_connector->detect_edid);
91
92 if (!IS_ERR_OR_NULL(intel_connector->edid))
93 kfree(intel_connector->edid);
94
95 intel_panel_fini(&intel_connector->panel);
96
97 drm_connector_cleanup(connector);
98 kfree(connector);
99}
100
101int intel_connector_register(struct drm_connector *connector)
102{
103 struct intel_connector *intel_connector = to_intel_connector(connector);
104 int ret;
105
106 ret = intel_backlight_device_register(intel_connector);
107 if (ret)
108 goto err;
109
110 if (i915_inject_load_failure()) {
111 ret = -EFAULT;
112 goto err_backlight;
113 }
114
115 return 0;
116
117err_backlight:
118 intel_backlight_device_unregister(intel_connector);
119err:
120 return ret;
121}
122
123void intel_connector_unregister(struct drm_connector *connector)
124{
125 struct intel_connector *intel_connector = to_intel_connector(connector);
126
127 intel_backlight_device_unregister(intel_connector);
128}
129
130void intel_connector_attach_encoder(struct intel_connector *connector,
131 struct intel_encoder *encoder)
132{
133 connector->encoder = encoder;
134 drm_connector_attach_encoder(&connector->base, &encoder->base);
135}
136
137/*
138 * Simple connector->get_hw_state implementation for encoders that support only
139 * one connector and no cloning and hence the encoder state determines the state
140 * of the connector.
141 */
142bool intel_connector_get_hw_state(struct intel_connector *connector)
143{
144 enum pipe pipe = 0;
145 struct intel_encoder *encoder = connector->encoder;
146
147 return encoder->get_hw_state(encoder, &pipe);
148}
149
150enum pipe intel_connector_get_pipe(struct intel_connector *connector)
151{
152 struct drm_device *dev = connector->base.dev;
153
154 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
155
156 if (!connector->base.state->crtc)
157 return INVALID_PIPE;
158
159 return to_intel_crtc(connector->base.state->crtc)->pipe;
160}
161
33/** 162/**
34 * intel_connector_update_modes - update connector from edid 163 * intel_connector_update_modes - update connector from edid
35 * @connector: DRM connector device to use 164 * @connector: DRM connector device to use
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 0c6bf82bb059..68f2fb89ece3 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -354,6 +354,7 @@ static bool intel_crt_compute_config(struct intel_encoder *encoder,
354 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) 354 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
355 return false; 355 return false;
356 356
357 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
357 return true; 358 return true;
358} 359}
359 360
@@ -368,6 +369,7 @@ static bool pch_crt_compute_config(struct intel_encoder *encoder,
368 return false; 369 return false;
369 370
370 pipe_config->has_pch_encoder = true; 371 pipe_config->has_pch_encoder = true;
372 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
371 373
372 return true; 374 return true;
373} 375}
@@ -389,6 +391,7 @@ static bool hsw_crt_compute_config(struct intel_encoder *encoder,
389 return false; 391 return false;
390 392
391 pipe_config->has_pch_encoder = true; 393 pipe_config->has_pch_encoder = true;
394 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
392 395
393 /* LPT FDI RX only supports 8bpc. */ 396 /* LPT FDI RX only supports 8bpc. */
394 if (HAS_PCH_LPT(dev_priv)) { 397 if (HAS_PCH_LPT(dev_priv)) {
@@ -849,12 +852,6 @@ out:
849 return status; 852 return status;
850} 853}
851 854
852static void intel_crt_destroy(struct drm_connector *connector)
853{
854 drm_connector_cleanup(connector);
855 kfree(connector);
856}
857
858static int intel_crt_get_modes(struct drm_connector *connector) 855static int intel_crt_get_modes(struct drm_connector *connector)
859{ 856{
860 struct drm_device *dev = connector->dev; 857 struct drm_device *dev = connector->dev;
@@ -909,7 +906,7 @@ static const struct drm_connector_funcs intel_crt_connector_funcs = {
909 .fill_modes = drm_helper_probe_single_connector_modes, 906 .fill_modes = drm_helper_probe_single_connector_modes,
910 .late_register = intel_connector_register, 907 .late_register = intel_connector_register,
911 .early_unregister = intel_connector_unregister, 908 .early_unregister = intel_connector_unregister,
912 .destroy = intel_crt_destroy, 909 .destroy = intel_connector_destroy,
913 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 910 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
914 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, 911 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
915}; 912};
diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c
index d48186e9ddad..c1ca6596ff5c 100644
--- a/drivers/gpu/drm/i915/intel_csr.c
+++ b/drivers/gpu/drm/i915/intel_csr.c
@@ -34,34 +34,36 @@
34 * low-power state and comes back to normal. 34 * low-power state and comes back to normal.
35 */ 35 */
36 36
37#define I915_CSR_ICL "i915/icl_dmc_ver1_07.bin" 37#define ICL_CSR_PATH "i915/icl_dmc_ver1_07.bin"
38MODULE_FIRMWARE(I915_CSR_ICL);
39#define ICL_CSR_VERSION_REQUIRED CSR_VERSION(1, 7) 38#define ICL_CSR_VERSION_REQUIRED CSR_VERSION(1, 7)
39#define ICL_CSR_MAX_FW_SIZE 0x6000
40MODULE_FIRMWARE(ICL_CSR_PATH);
40 41
41#define I915_CSR_GLK "i915/glk_dmc_ver1_04.bin" 42#define CNL_CSR_PATH "i915/cnl_dmc_ver1_07.bin"
42MODULE_FIRMWARE(I915_CSR_GLK);
43#define GLK_CSR_VERSION_REQUIRED CSR_VERSION(1, 4)
44
45#define I915_CSR_CNL "i915/cnl_dmc_ver1_07.bin"
46MODULE_FIRMWARE(I915_CSR_CNL);
47#define CNL_CSR_VERSION_REQUIRED CSR_VERSION(1, 7) 43#define CNL_CSR_VERSION_REQUIRED CSR_VERSION(1, 7)
44#define CNL_CSR_MAX_FW_SIZE GLK_CSR_MAX_FW_SIZE
45MODULE_FIRMWARE(CNL_CSR_PATH);
48 46
49#define I915_CSR_KBL "i915/kbl_dmc_ver1_04.bin" 47#define GLK_CSR_PATH "i915/glk_dmc_ver1_04.bin"
50MODULE_FIRMWARE(I915_CSR_KBL); 48#define GLK_CSR_VERSION_REQUIRED CSR_VERSION(1, 4)
49#define GLK_CSR_MAX_FW_SIZE 0x4000
50MODULE_FIRMWARE(GLK_CSR_PATH);
51
52#define KBL_CSR_PATH "i915/kbl_dmc_ver1_04.bin"
51#define KBL_CSR_VERSION_REQUIRED CSR_VERSION(1, 4) 53#define KBL_CSR_VERSION_REQUIRED CSR_VERSION(1, 4)
54#define KBL_CSR_MAX_FW_SIZE BXT_CSR_MAX_FW_SIZE
55MODULE_FIRMWARE(KBL_CSR_PATH);
52 56
53#define I915_CSR_SKL "i915/skl_dmc_ver1_27.bin" 57#define SKL_CSR_PATH "i915/skl_dmc_ver1_27.bin"
54MODULE_FIRMWARE(I915_CSR_SKL);
55#define SKL_CSR_VERSION_REQUIRED CSR_VERSION(1, 27) 58#define SKL_CSR_VERSION_REQUIRED CSR_VERSION(1, 27)
59#define SKL_CSR_MAX_FW_SIZE BXT_CSR_MAX_FW_SIZE
60MODULE_FIRMWARE(SKL_CSR_PATH);
56 61
57#define I915_CSR_BXT "i915/bxt_dmc_ver1_07.bin" 62#define BXT_CSR_PATH "i915/bxt_dmc_ver1_07.bin"
58MODULE_FIRMWARE(I915_CSR_BXT);
59#define BXT_CSR_VERSION_REQUIRED CSR_VERSION(1, 7) 63#define BXT_CSR_VERSION_REQUIRED CSR_VERSION(1, 7)
60
61
62#define BXT_CSR_MAX_FW_SIZE 0x3000 64#define BXT_CSR_MAX_FW_SIZE 0x3000
63#define GLK_CSR_MAX_FW_SIZE 0x4000 65MODULE_FIRMWARE(BXT_CSR_PATH);
64#define ICL_CSR_MAX_FW_SIZE 0x6000 66
65#define CSR_DEFAULT_FW_OFFSET 0xFFFFFFFF 67#define CSR_DEFAULT_FW_OFFSET 0xFFFFFFFF
66 68
67struct intel_css_header { 69struct intel_css_header {
@@ -190,6 +192,12 @@ static const struct stepping_info bxt_stepping_info[] = {
190 {'B', '0'}, {'B', '1'}, {'B', '2'} 192 {'B', '0'}, {'B', '1'}, {'B', '2'}
191}; 193};
192 194
195static const struct stepping_info icl_stepping_info[] = {
196 {'A', '0'}, {'A', '1'}, {'A', '2'},
197 {'B', '0'}, {'B', '2'},
198 {'C', '0'}
199};
200
193static const struct stepping_info no_stepping_info = { '*', '*' }; 201static const struct stepping_info no_stepping_info = { '*', '*' };
194 202
195static const struct stepping_info * 203static const struct stepping_info *
@@ -198,7 +206,10 @@ intel_get_stepping_info(struct drm_i915_private *dev_priv)
198 const struct stepping_info *si; 206 const struct stepping_info *si;
199 unsigned int size; 207 unsigned int size;
200 208
201 if (IS_SKYLAKE(dev_priv)) { 209 if (IS_ICELAKE(dev_priv)) {
210 size = ARRAY_SIZE(icl_stepping_info);
211 si = icl_stepping_info;
212 } else if (IS_SKYLAKE(dev_priv)) {
202 size = ARRAY_SIZE(skl_stepping_info); 213 size = ARRAY_SIZE(skl_stepping_info);
203 si = skl_stepping_info; 214 si = skl_stepping_info;
204 } else if (IS_BROXTON(dev_priv)) { 215 } else if (IS_BROXTON(dev_priv)) {
@@ -285,10 +296,8 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
285 struct intel_csr *csr = &dev_priv->csr; 296 struct intel_csr *csr = &dev_priv->csr;
286 const struct stepping_info *si = intel_get_stepping_info(dev_priv); 297 const struct stepping_info *si = intel_get_stepping_info(dev_priv);
287 uint32_t dmc_offset = CSR_DEFAULT_FW_OFFSET, readcount = 0, nbytes; 298 uint32_t dmc_offset = CSR_DEFAULT_FW_OFFSET, readcount = 0, nbytes;
288 uint32_t max_fw_size = 0;
289 uint32_t i; 299 uint32_t i;
290 uint32_t *dmc_payload; 300 uint32_t *dmc_payload;
291 uint32_t required_version;
292 301
293 if (!fw) 302 if (!fw)
294 return NULL; 303 return NULL;
@@ -303,38 +312,19 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
303 return NULL; 312 return NULL;
304 } 313 }
305 314
306 csr->version = css_header->version; 315 if (csr->required_version &&
307 316 css_header->version != csr->required_version) {
308 if (csr->fw_path == i915_modparams.dmc_firmware_path) {
309 /* Bypass version check for firmware override. */
310 required_version = csr->version;
311 } else if (IS_ICELAKE(dev_priv)) {
312 required_version = ICL_CSR_VERSION_REQUIRED;
313 } else if (IS_CANNONLAKE(dev_priv)) {
314 required_version = CNL_CSR_VERSION_REQUIRED;
315 } else if (IS_GEMINILAKE(dev_priv)) {
316 required_version = GLK_CSR_VERSION_REQUIRED;
317 } else if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) {
318 required_version = KBL_CSR_VERSION_REQUIRED;
319 } else if (IS_SKYLAKE(dev_priv)) {
320 required_version = SKL_CSR_VERSION_REQUIRED;
321 } else if (IS_BROXTON(dev_priv)) {
322 required_version = BXT_CSR_VERSION_REQUIRED;
323 } else {
324 MISSING_CASE(INTEL_REVID(dev_priv));
325 required_version = 0;
326 }
327
328 if (csr->version != required_version) {
329 DRM_INFO("Refusing to load DMC firmware v%u.%u," 317 DRM_INFO("Refusing to load DMC firmware v%u.%u,"
330 " please use v%u.%u\n", 318 " please use v%u.%u\n",
331 CSR_VERSION_MAJOR(csr->version), 319 CSR_VERSION_MAJOR(css_header->version),
332 CSR_VERSION_MINOR(csr->version), 320 CSR_VERSION_MINOR(css_header->version),
333 CSR_VERSION_MAJOR(required_version), 321 CSR_VERSION_MAJOR(csr->required_version),
334 CSR_VERSION_MINOR(required_version)); 322 CSR_VERSION_MINOR(csr->required_version));
335 return NULL; 323 return NULL;
336 } 324 }
337 325
326 csr->version = css_header->version;
327
338 readcount += sizeof(struct intel_css_header); 328 readcount += sizeof(struct intel_css_header);
339 329
340 /* Extract Package Header information*/ 330 /* Extract Package Header information*/
@@ -402,15 +392,7 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
402 392
403 /* fw_size is in dwords, so multiplied by 4 to convert into bytes. */ 393 /* fw_size is in dwords, so multiplied by 4 to convert into bytes. */
404 nbytes = dmc_header->fw_size * 4; 394 nbytes = dmc_header->fw_size * 4;
405 if (INTEL_GEN(dev_priv) >= 11) 395 if (nbytes > csr->max_fw_size) {
406 max_fw_size = ICL_CSR_MAX_FW_SIZE;
407 else if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv))
408 max_fw_size = GLK_CSR_MAX_FW_SIZE;
409 else if (IS_GEN9(dev_priv))
410 max_fw_size = BXT_CSR_MAX_FW_SIZE;
411 else
412 MISSING_CASE(INTEL_REVID(dev_priv));
413 if (nbytes > max_fw_size) {
414 DRM_ERROR("DMC FW too big (%u bytes)\n", nbytes); 396 DRM_ERROR("DMC FW too big (%u bytes)\n", nbytes);
415 return NULL; 397 return NULL;
416 } 398 }
@@ -475,27 +457,57 @@ void intel_csr_ucode_init(struct drm_i915_private *dev_priv)
475 if (!HAS_CSR(dev_priv)) 457 if (!HAS_CSR(dev_priv))
476 return; 458 return;
477 459
478 if (i915_modparams.dmc_firmware_path)
479 csr->fw_path = i915_modparams.dmc_firmware_path;
480 else if (IS_ICELAKE(dev_priv))
481 csr->fw_path = I915_CSR_ICL;
482 else if (IS_CANNONLAKE(dev_priv))
483 csr->fw_path = I915_CSR_CNL;
484 else if (IS_GEMINILAKE(dev_priv))
485 csr->fw_path = I915_CSR_GLK;
486 else if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv))
487 csr->fw_path = I915_CSR_KBL;
488 else if (IS_SKYLAKE(dev_priv))
489 csr->fw_path = I915_CSR_SKL;
490 else if (IS_BROXTON(dev_priv))
491 csr->fw_path = I915_CSR_BXT;
492
493 /* 460 /*
494 * Obtain a runtime pm reference, until CSR is loaded, 461 * Obtain a runtime pm reference, until CSR is loaded, to avoid entering
495 * to avoid entering runtime-suspend. 462 * runtime-suspend.
463 *
464 * On error, we return with the rpm wakeref held to prevent runtime
465 * suspend as runtime suspend *requires* a working CSR for whatever
466 * reason.
496 */ 467 */
497 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); 468 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
498 469
470 if (IS_ICELAKE(dev_priv)) {
471 csr->fw_path = ICL_CSR_PATH;
472 csr->required_version = ICL_CSR_VERSION_REQUIRED;
473 csr->max_fw_size = ICL_CSR_MAX_FW_SIZE;
474 } else if (IS_CANNONLAKE(dev_priv)) {
475 csr->fw_path = CNL_CSR_PATH;
476 csr->required_version = CNL_CSR_VERSION_REQUIRED;
477 csr->max_fw_size = CNL_CSR_MAX_FW_SIZE;
478 } else if (IS_GEMINILAKE(dev_priv)) {
479 csr->fw_path = GLK_CSR_PATH;
480 csr->required_version = GLK_CSR_VERSION_REQUIRED;
481 csr->max_fw_size = GLK_CSR_MAX_FW_SIZE;
482 } else if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) {
483 csr->fw_path = KBL_CSR_PATH;
484 csr->required_version = KBL_CSR_VERSION_REQUIRED;
485 csr->max_fw_size = KBL_CSR_MAX_FW_SIZE;
486 } else if (IS_SKYLAKE(dev_priv)) {
487 csr->fw_path = SKL_CSR_PATH;
488 csr->required_version = SKL_CSR_VERSION_REQUIRED;
489 csr->max_fw_size = SKL_CSR_MAX_FW_SIZE;
490 } else if (IS_BROXTON(dev_priv)) {
491 csr->fw_path = BXT_CSR_PATH;
492 csr->required_version = BXT_CSR_VERSION_REQUIRED;
493 csr->max_fw_size = BXT_CSR_MAX_FW_SIZE;
494 } else {
495 MISSING_CASE(INTEL_REVID(dev_priv));
496 return;
497 }
498
499 if (i915_modparams.dmc_firmware_path) {
500 if (strlen(i915_modparams.dmc_firmware_path) == 0) {
501 csr->fw_path = NULL;
502 DRM_INFO("Disabling CSR firmware and runtime PM\n");
503 return;
504 }
505
506 csr->fw_path = i915_modparams.dmc_firmware_path;
507 /* Bypass version check for firmware override. */
508 csr->required_version = 0;
509 }
510
499 if (csr->fw_path == NULL) { 511 if (csr->fw_path == NULL) {
500 DRM_DEBUG_KMS("No known CSR firmware for platform, disabling runtime PM\n"); 512 DRM_DEBUG_KMS("No known CSR firmware for platform, disabling runtime PM\n");
501 WARN_ON(!IS_ALPHA_SUPPORT(INTEL_INFO(dev_priv))); 513 WARN_ON(!IS_ALPHA_SUPPORT(INTEL_INFO(dev_priv)));
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 5186cd7075f9..040483c96029 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -642,7 +642,7 @@ skl_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries)
642static const struct ddi_buf_trans * 642static const struct ddi_buf_trans *
643kbl_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries) 643kbl_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries)
644{ 644{
645 if (IS_KBL_ULX(dev_priv)) { 645 if (IS_KBL_ULX(dev_priv) || IS_AML_ULX(dev_priv)) {
646 *n_entries = ARRAY_SIZE(kbl_y_ddi_translations_dp); 646 *n_entries = ARRAY_SIZE(kbl_y_ddi_translations_dp);
647 return kbl_y_ddi_translations_dp; 647 return kbl_y_ddi_translations_dp;
648 } else if (IS_KBL_ULT(dev_priv) || IS_CFL_ULT(dev_priv)) { 648 } else if (IS_KBL_ULT(dev_priv) || IS_CFL_ULT(dev_priv)) {
@@ -658,7 +658,7 @@ static const struct ddi_buf_trans *
658skl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries) 658skl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
659{ 659{
660 if (dev_priv->vbt.edp.low_vswing) { 660 if (dev_priv->vbt.edp.low_vswing) {
661 if (IS_SKL_ULX(dev_priv) || IS_KBL_ULX(dev_priv)) { 661 if (IS_SKL_ULX(dev_priv) || IS_KBL_ULX(dev_priv) || IS_AML_ULX(dev_priv)) {
662 *n_entries = ARRAY_SIZE(skl_y_ddi_translations_edp); 662 *n_entries = ARRAY_SIZE(skl_y_ddi_translations_edp);
663 return skl_y_ddi_translations_edp; 663 return skl_y_ddi_translations_edp;
664 } else if (IS_SKL_ULT(dev_priv) || IS_KBL_ULT(dev_priv) || 664 } else if (IS_SKL_ULT(dev_priv) || IS_KBL_ULT(dev_priv) ||
@@ -680,7 +680,7 @@ skl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
680static const struct ddi_buf_trans * 680static const struct ddi_buf_trans *
681skl_get_buf_trans_hdmi(struct drm_i915_private *dev_priv, int *n_entries) 681skl_get_buf_trans_hdmi(struct drm_i915_private *dev_priv, int *n_entries)
682{ 682{
683 if (IS_SKL_ULX(dev_priv) || IS_KBL_ULX(dev_priv)) { 683 if (IS_SKL_ULX(dev_priv) || IS_KBL_ULX(dev_priv) || IS_AML_ULX(dev_priv)) {
684 *n_entries = ARRAY_SIZE(skl_y_ddi_translations_hdmi); 684 *n_entries = ARRAY_SIZE(skl_y_ddi_translations_hdmi);
685 return skl_y_ddi_translations_hdmi; 685 return skl_y_ddi_translations_hdmi;
686 } else { 686 } else {
@@ -1060,10 +1060,10 @@ static uint32_t hsw_pll_to_ddi_pll_sel(const struct intel_shared_dpll *pll)
1060} 1060}
1061 1061
1062static uint32_t icl_pll_to_ddi_pll_sel(struct intel_encoder *encoder, 1062static uint32_t icl_pll_to_ddi_pll_sel(struct intel_encoder *encoder,
1063 const struct intel_shared_dpll *pll) 1063 const struct intel_crtc_state *crtc_state)
1064{ 1064{
1065 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); 1065 const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
1066 int clock = crtc->config->port_clock; 1066 int clock = crtc_state->port_clock;
1067 const enum intel_dpll_id id = pll->info->id; 1067 const enum intel_dpll_id id = pll->info->id;
1068 1068
1069 switch (id) { 1069 switch (id) {
@@ -1517,7 +1517,7 @@ static void ddi_dotclock_get(struct intel_crtc_state *pipe_config)
1517 else 1517 else
1518 dotclock = pipe_config->port_clock; 1518 dotclock = pipe_config->port_clock;
1519 1519
1520 if (pipe_config->ycbcr420) 1520 if (pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
1521 dotclock *= 2; 1521 dotclock *= 2;
1522 1522
1523 if (pipe_config->pixel_multiplier) 1523 if (pipe_config->pixel_multiplier)
@@ -1737,16 +1737,16 @@ static void intel_ddi_clock_get(struct intel_encoder *encoder,
1737{ 1737{
1738 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 1738 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1739 1739
1740 if (INTEL_GEN(dev_priv) <= 8) 1740 if (IS_ICELAKE(dev_priv))
1741 hsw_ddi_clock_get(encoder, pipe_config); 1741 icl_ddi_clock_get(encoder, pipe_config);
1742 else if (IS_GEN9_BC(dev_priv))
1743 skl_ddi_clock_get(encoder, pipe_config);
1744 else if (IS_GEN9_LP(dev_priv))
1745 bxt_ddi_clock_get(encoder, pipe_config);
1746 else if (IS_CANNONLAKE(dev_priv)) 1742 else if (IS_CANNONLAKE(dev_priv))
1747 cnl_ddi_clock_get(encoder, pipe_config); 1743 cnl_ddi_clock_get(encoder, pipe_config);
1748 else if (IS_ICELAKE(dev_priv)) 1744 else if (IS_GEN9_LP(dev_priv))
1749 icl_ddi_clock_get(encoder, pipe_config); 1745 bxt_ddi_clock_get(encoder, pipe_config);
1746 else if (IS_GEN9_BC(dev_priv))
1747 skl_ddi_clock_get(encoder, pipe_config);
1748 else if (INTEL_GEN(dev_priv) <= 8)
1749 hsw_ddi_clock_get(encoder, pipe_config);
1750} 1750}
1751 1751
1752void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state) 1752void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state)
@@ -1784,6 +1784,13 @@ void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state)
1784 break; 1784 break;
1785 } 1785 }
1786 1786
1787 /*
1788 * As per DP 1.2 spec section 2.3.4.3 while sending
1789 * YCBCR 444 signals we should program MSA MISC1/0 fields with
1790 * colorspace information. The output colorspace encoding is BT601.
1791 */
1792 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444)
1793 temp |= TRANS_MSA_SAMPLING_444 | TRANS_MSA_CLRSP_YCBCR;
1787 I915_WRITE(TRANS_MSA_MISC(cpu_transcoder), temp); 1794 I915_WRITE(TRANS_MSA_MISC(cpu_transcoder), temp);
1788} 1795}
1789 1796
@@ -1998,24 +2005,24 @@ out:
1998 return ret; 2005 return ret;
1999} 2006}
2000 2007
2001bool intel_ddi_get_hw_state(struct intel_encoder *encoder, 2008static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
2002 enum pipe *pipe) 2009 u8 *pipe_mask, bool *is_dp_mst)
2003{ 2010{
2004 struct drm_device *dev = encoder->base.dev; 2011 struct drm_device *dev = encoder->base.dev;
2005 struct drm_i915_private *dev_priv = to_i915(dev); 2012 struct drm_i915_private *dev_priv = to_i915(dev);
2006 enum port port = encoder->port; 2013 enum port port = encoder->port;
2007 enum pipe p; 2014 enum pipe p;
2008 u32 tmp; 2015 u32 tmp;
2009 bool ret; 2016 u8 mst_pipe_mask;
2017
2018 *pipe_mask = 0;
2019 *is_dp_mst = false;
2010 2020
2011 if (!intel_display_power_get_if_enabled(dev_priv, 2021 if (!intel_display_power_get_if_enabled(dev_priv,
2012 encoder->power_domain)) 2022 encoder->power_domain))
2013 return false; 2023 return;
2014
2015 ret = false;
2016 2024
2017 tmp = I915_READ(DDI_BUF_CTL(port)); 2025 tmp = I915_READ(DDI_BUF_CTL(port));
2018
2019 if (!(tmp & DDI_BUF_CTL_ENABLE)) 2026 if (!(tmp & DDI_BUF_CTL_ENABLE))
2020 goto out; 2027 goto out;
2021 2028
@@ -2023,44 +2030,58 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
2023 tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP)); 2030 tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
2024 2031
2025 switch (tmp & TRANS_DDI_EDP_INPUT_MASK) { 2032 switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
2033 default:
2034 MISSING_CASE(tmp & TRANS_DDI_EDP_INPUT_MASK);
2035 /* fallthrough */
2026 case TRANS_DDI_EDP_INPUT_A_ON: 2036 case TRANS_DDI_EDP_INPUT_A_ON:
2027 case TRANS_DDI_EDP_INPUT_A_ONOFF: 2037 case TRANS_DDI_EDP_INPUT_A_ONOFF:
2028 *pipe = PIPE_A; 2038 *pipe_mask = BIT(PIPE_A);
2029 break; 2039 break;
2030 case TRANS_DDI_EDP_INPUT_B_ONOFF: 2040 case TRANS_DDI_EDP_INPUT_B_ONOFF:
2031 *pipe = PIPE_B; 2041 *pipe_mask = BIT(PIPE_B);
2032 break; 2042 break;
2033 case TRANS_DDI_EDP_INPUT_C_ONOFF: 2043 case TRANS_DDI_EDP_INPUT_C_ONOFF:
2034 *pipe = PIPE_C; 2044 *pipe_mask = BIT(PIPE_C);
2035 break; 2045 break;
2036 } 2046 }
2037 2047
2038 ret = true;
2039
2040 goto out; 2048 goto out;
2041 } 2049 }
2042 2050
2051 mst_pipe_mask = 0;
2043 for_each_pipe(dev_priv, p) { 2052 for_each_pipe(dev_priv, p) {
2044 enum transcoder cpu_transcoder = (enum transcoder) p; 2053 enum transcoder cpu_transcoder = (enum transcoder)p;
2045 2054
2046 tmp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder)); 2055 tmp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
2047 2056
2048 if ((tmp & TRANS_DDI_PORT_MASK) == TRANS_DDI_SELECT_PORT(port)) { 2057 if ((tmp & TRANS_DDI_PORT_MASK) != TRANS_DDI_SELECT_PORT(port))
2049 if ((tmp & TRANS_DDI_MODE_SELECT_MASK) == 2058 continue;
2050 TRANS_DDI_MODE_SELECT_DP_MST) 2059
2051 goto out; 2060 if ((tmp & TRANS_DDI_MODE_SELECT_MASK) ==
2061 TRANS_DDI_MODE_SELECT_DP_MST)
2062 mst_pipe_mask |= BIT(p);
2052 2063
2053 *pipe = p; 2064 *pipe_mask |= BIT(p);
2054 ret = true; 2065 }
2055 2066
2056 goto out; 2067 if (!*pipe_mask)
2057 } 2068 DRM_DEBUG_KMS("No pipe for ddi port %c found\n",
2069 port_name(port));
2070
2071 if (!mst_pipe_mask && hweight8(*pipe_mask) > 1) {
2072 DRM_DEBUG_KMS("Multiple pipes for non DP-MST port %c (pipe_mask %02x)\n",
2073 port_name(port), *pipe_mask);
2074 *pipe_mask = BIT(ffs(*pipe_mask) - 1);
2058 } 2075 }
2059 2076
2060 DRM_DEBUG_KMS("No pipe for ddi port %c found\n", port_name(port)); 2077 if (mst_pipe_mask && mst_pipe_mask != *pipe_mask)
2078 DRM_DEBUG_KMS("Conflicting MST and non-MST encoders for port %c (pipe_mask %02x mst_pipe_mask %02x)\n",
2079 port_name(port), *pipe_mask, mst_pipe_mask);
2080 else
2081 *is_dp_mst = mst_pipe_mask;
2061 2082
2062out: 2083out:
2063 if (ret && IS_GEN9_LP(dev_priv)) { 2084 if (*pipe_mask && IS_GEN9_LP(dev_priv)) {
2064 tmp = I915_READ(BXT_PHY_CTL(port)); 2085 tmp = I915_READ(BXT_PHY_CTL(port));
2065 if ((tmp & (BXT_PHY_CMNLANE_POWERDOWN_ACK | 2086 if ((tmp & (BXT_PHY_CMNLANE_POWERDOWN_ACK |
2066 BXT_PHY_LANE_POWERDOWN_ACK | 2087 BXT_PHY_LANE_POWERDOWN_ACK |
@@ -2070,12 +2091,26 @@ out:
2070 } 2091 }
2071 2092
2072 intel_display_power_put(dev_priv, encoder->power_domain); 2093 intel_display_power_put(dev_priv, encoder->power_domain);
2094}
2073 2095
2074 return ret; 2096bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
2097 enum pipe *pipe)
2098{
2099 u8 pipe_mask;
2100 bool is_mst;
2101
2102 intel_ddi_get_encoder_pipes(encoder, &pipe_mask, &is_mst);
2103
2104 if (is_mst || !pipe_mask)
2105 return false;
2106
2107 *pipe = ffs(pipe_mask) - 1;
2108
2109 return true;
2075} 2110}
2076 2111
2077static inline enum intel_display_power_domain 2112static inline enum intel_display_power_domain
2078intel_ddi_main_link_aux_domain(struct intel_dp *intel_dp) 2113intel_ddi_main_link_aux_domain(struct intel_digital_port *dig_port)
2079{ 2114{
2080 /* CNL+ HW requires corresponding AUX IOs to be powered up for PSR with 2115 /* CNL+ HW requires corresponding AUX IOs to be powered up for PSR with
2081 * DC states enabled at the same time, while for driver initiated AUX 2116 * DC states enabled at the same time, while for driver initiated AUX
@@ -2089,13 +2124,14 @@ intel_ddi_main_link_aux_domain(struct intel_dp *intel_dp)
2089 * Note that PSR is enabled only on Port A even though this function 2124 * Note that PSR is enabled only on Port A even though this function
2090 * returns the correct domain for other ports too. 2125 * returns the correct domain for other ports too.
2091 */ 2126 */
2092 return intel_dp->aux_ch == AUX_CH_A ? POWER_DOMAIN_AUX_IO_A : 2127 return dig_port->aux_ch == AUX_CH_A ? POWER_DOMAIN_AUX_IO_A :
2093 intel_dp->aux_power_domain; 2128 intel_aux_power_domain(dig_port);
2094} 2129}
2095 2130
2096static u64 intel_ddi_get_power_domains(struct intel_encoder *encoder, 2131static u64 intel_ddi_get_power_domains(struct intel_encoder *encoder,
2097 struct intel_crtc_state *crtc_state) 2132 struct intel_crtc_state *crtc_state)
2098{ 2133{
2134 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2099 struct intel_digital_port *dig_port; 2135 struct intel_digital_port *dig_port;
2100 u64 domains; 2136 u64 domains;
2101 2137
@@ -2110,12 +2146,13 @@ static u64 intel_ddi_get_power_domains(struct intel_encoder *encoder,
2110 dig_port = enc_to_dig_port(&encoder->base); 2146 dig_port = enc_to_dig_port(&encoder->base);
2111 domains = BIT_ULL(dig_port->ddi_io_power_domain); 2147 domains = BIT_ULL(dig_port->ddi_io_power_domain);
2112 2148
2113 /* AUX power is only needed for (e)DP mode, not for HDMI. */ 2149 /*
2114 if (intel_crtc_has_dp_encoder(crtc_state)) { 2150 * AUX power is only needed for (e)DP mode, and for HDMI mode on TC
2115 struct intel_dp *intel_dp = &dig_port->dp; 2151 * ports.
2116 2152 */
2117 domains |= BIT_ULL(intel_ddi_main_link_aux_domain(intel_dp)); 2153 if (intel_crtc_has_dp_encoder(crtc_state) ||
2118 } 2154 intel_port_is_tc(dev_priv, encoder->port))
2155 domains |= BIT_ULL(intel_ddi_main_link_aux_domain(dig_port));
2119 2156
2120 return domains; 2157 return domains;
2121} 2158}
@@ -2813,12 +2850,59 @@ void icl_unmap_plls_to_ports(struct drm_crtc *crtc,
2813 } 2850 }
2814} 2851}
2815 2852
2853void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder)
2854{
2855 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2856 u32 val;
2857 enum port port = encoder->port;
2858 bool clk_enabled;
2859
2860 /*
2861 * In case of DP MST, we sanitize the primary encoder only, not the
2862 * virtual ones.
2863 */
2864 if (encoder->type == INTEL_OUTPUT_DP_MST)
2865 return;
2866
2867 val = I915_READ(DPCLKA_CFGCR0_ICL);
2868 clk_enabled = !(val & icl_dpclka_cfgcr0_clk_off(dev_priv, port));
2869
2870 if (!encoder->base.crtc && intel_encoder_is_dp(encoder)) {
2871 u8 pipe_mask;
2872 bool is_mst;
2873
2874 intel_ddi_get_encoder_pipes(encoder, &pipe_mask, &is_mst);
2875 /*
2876 * In the unlikely case that BIOS enables DP in MST mode, just
2877 * warn since our MST HW readout is incomplete.
2878 */
2879 if (WARN_ON(is_mst))
2880 return;
2881 }
2882
2883 if (clk_enabled == !!encoder->base.crtc)
2884 return;
2885
2886 /*
2887 * Punt on the case now where clock is disabled, but the encoder is
2888 * enabled, something else is really broken then.
2889 */
2890 if (WARN_ON(!clk_enabled))
2891 return;
2892
2893 DRM_NOTE("Port %c is disabled but it has a mapped PLL, unmap it\n",
2894 port_name(port));
2895 val |= icl_dpclka_cfgcr0_clk_off(dev_priv, port);
2896 I915_WRITE(DPCLKA_CFGCR0_ICL, val);
2897}
2898
2816static void intel_ddi_clk_select(struct intel_encoder *encoder, 2899static void intel_ddi_clk_select(struct intel_encoder *encoder,
2817 const struct intel_shared_dpll *pll) 2900 const struct intel_crtc_state *crtc_state)
2818{ 2901{
2819 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 2902 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2820 enum port port = encoder->port; 2903 enum port port = encoder->port;
2821 uint32_t val; 2904 uint32_t val;
2905 const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
2822 2906
2823 if (WARN_ON(!pll)) 2907 if (WARN_ON(!pll))
2824 return; 2908 return;
@@ -2828,7 +2912,7 @@ static void intel_ddi_clk_select(struct intel_encoder *encoder,
2828 if (IS_ICELAKE(dev_priv)) { 2912 if (IS_ICELAKE(dev_priv)) {
2829 if (!intel_port_is_combophy(dev_priv, port)) 2913 if (!intel_port_is_combophy(dev_priv, port))
2830 I915_WRITE(DDI_CLK_SEL(port), 2914 I915_WRITE(DDI_CLK_SEL(port),
2831 icl_pll_to_ddi_pll_sel(encoder, pll)); 2915 icl_pll_to_ddi_pll_sel(encoder, crtc_state));
2832 } else if (IS_CANNONLAKE(dev_priv)) { 2916 } else if (IS_CANNONLAKE(dev_priv)) {
2833 /* Configure DPCLKA_CFGCR0 to map the DPLL to the DDI. */ 2917 /* Configure DPCLKA_CFGCR0 to map the DPLL to the DDI. */
2834 val = I915_READ(DPCLKA_CFGCR0); 2918 val = I915_READ(DPCLKA_CFGCR0);
@@ -2881,6 +2965,137 @@ static void intel_ddi_clk_disable(struct intel_encoder *encoder)
2881 } 2965 }
2882} 2966}
2883 2967
2968static void icl_enable_phy_clock_gating(struct intel_digital_port *dig_port)
2969{
2970 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
2971 enum port port = dig_port->base.port;
2972 enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
2973 i915_reg_t mg_regs[2] = { MG_DP_MODE(port, 0), MG_DP_MODE(port, 1) };
2974 u32 val;
2975 int i;
2976
2977 if (tc_port == PORT_TC_NONE)
2978 return;
2979
2980 for (i = 0; i < ARRAY_SIZE(mg_regs); i++) {
2981 val = I915_READ(mg_regs[i]);
2982 val |= MG_DP_MODE_CFG_TR2PWR_GATING |
2983 MG_DP_MODE_CFG_TRPWR_GATING |
2984 MG_DP_MODE_CFG_CLNPWR_GATING |
2985 MG_DP_MODE_CFG_DIGPWR_GATING |
2986 MG_DP_MODE_CFG_GAONPWR_GATING;
2987 I915_WRITE(mg_regs[i], val);
2988 }
2989
2990 val = I915_READ(MG_MISC_SUS0(tc_port));
2991 val |= MG_MISC_SUS0_SUSCLK_DYNCLKGATE_MODE(3) |
2992 MG_MISC_SUS0_CFG_TR2PWR_GATING |
2993 MG_MISC_SUS0_CFG_CL2PWR_GATING |
2994 MG_MISC_SUS0_CFG_GAONPWR_GATING |
2995 MG_MISC_SUS0_CFG_TRPWR_GATING |
2996 MG_MISC_SUS0_CFG_CL1PWR_GATING |
2997 MG_MISC_SUS0_CFG_DGPWR_GATING;
2998 I915_WRITE(MG_MISC_SUS0(tc_port), val);
2999}
3000
3001static void icl_disable_phy_clock_gating(struct intel_digital_port *dig_port)
3002{
3003 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
3004 enum port port = dig_port->base.port;
3005 enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
3006 i915_reg_t mg_regs[2] = { MG_DP_MODE(port, 0), MG_DP_MODE(port, 1) };
3007 u32 val;
3008 int i;
3009
3010 if (tc_port == PORT_TC_NONE)
3011 return;
3012
3013 for (i = 0; i < ARRAY_SIZE(mg_regs); i++) {
3014 val = I915_READ(mg_regs[i]);
3015 val &= ~(MG_DP_MODE_CFG_TR2PWR_GATING |
3016 MG_DP_MODE_CFG_TRPWR_GATING |
3017 MG_DP_MODE_CFG_CLNPWR_GATING |
3018 MG_DP_MODE_CFG_DIGPWR_GATING |
3019 MG_DP_MODE_CFG_GAONPWR_GATING);
3020 I915_WRITE(mg_regs[i], val);
3021 }
3022
3023 val = I915_READ(MG_MISC_SUS0(tc_port));
3024 val &= ~(MG_MISC_SUS0_SUSCLK_DYNCLKGATE_MODE_MASK |
3025 MG_MISC_SUS0_CFG_TR2PWR_GATING |
3026 MG_MISC_SUS0_CFG_CL2PWR_GATING |
3027 MG_MISC_SUS0_CFG_GAONPWR_GATING |
3028 MG_MISC_SUS0_CFG_TRPWR_GATING |
3029 MG_MISC_SUS0_CFG_CL1PWR_GATING |
3030 MG_MISC_SUS0_CFG_DGPWR_GATING);
3031 I915_WRITE(MG_MISC_SUS0(tc_port), val);
3032}
3033
3034static void icl_program_mg_dp_mode(struct intel_digital_port *intel_dig_port)
3035{
3036 struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
3037 enum port port = intel_dig_port->base.port;
3038 enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
3039 u32 ln0, ln1, lane_info;
3040
3041 if (tc_port == PORT_TC_NONE || intel_dig_port->tc_type == TC_PORT_TBT)
3042 return;
3043
3044 ln0 = I915_READ(MG_DP_MODE(port, 0));
3045 ln1 = I915_READ(MG_DP_MODE(port, 1));
3046
3047 switch (intel_dig_port->tc_type) {
3048 case TC_PORT_TYPEC:
3049 ln0 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE);
3050 ln1 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE);
3051
3052 lane_info = (I915_READ(PORT_TX_DFLEXDPSP) &
3053 DP_LANE_ASSIGNMENT_MASK(tc_port)) >>
3054 DP_LANE_ASSIGNMENT_SHIFT(tc_port);
3055
3056 switch (lane_info) {
3057 case 0x1:
3058 case 0x4:
3059 break;
3060 case 0x2:
3061 ln0 |= MG_DP_MODE_CFG_DP_X1_MODE;
3062 break;
3063 case 0x3:
3064 ln0 |= MG_DP_MODE_CFG_DP_X1_MODE |
3065 MG_DP_MODE_CFG_DP_X2_MODE;
3066 break;
3067 case 0x8:
3068 ln1 |= MG_DP_MODE_CFG_DP_X1_MODE;
3069 break;
3070 case 0xC:
3071 ln1 |= MG_DP_MODE_CFG_DP_X1_MODE |
3072 MG_DP_MODE_CFG_DP_X2_MODE;
3073 break;
3074 case 0xF:
3075 ln0 |= MG_DP_MODE_CFG_DP_X1_MODE |
3076 MG_DP_MODE_CFG_DP_X2_MODE;
3077 ln1 |= MG_DP_MODE_CFG_DP_X1_MODE |
3078 MG_DP_MODE_CFG_DP_X2_MODE;
3079 break;
3080 default:
3081 MISSING_CASE(lane_info);
3082 }
3083 break;
3084
3085 case TC_PORT_LEGACY:
3086 ln0 |= MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE;
3087 ln1 |= MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE;
3088 break;
3089
3090 default:
3091 MISSING_CASE(intel_dig_port->tc_type);
3092 return;
3093 }
3094
3095 I915_WRITE(MG_DP_MODE(port, 0), ln0);
3096 I915_WRITE(MG_DP_MODE(port, 1), ln1);
3097}
3098
2884static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder, 3099static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
2885 const struct intel_crtc_state *crtc_state, 3100 const struct intel_crtc_state *crtc_state,
2886 const struct drm_connector_state *conn_state) 3101 const struct drm_connector_state *conn_state)
@@ -2894,19 +3109,16 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
2894 3109
2895 WARN_ON(is_mst && (port == PORT_A || port == PORT_E)); 3110 WARN_ON(is_mst && (port == PORT_A || port == PORT_E));
2896 3111
2897 intel_display_power_get(dev_priv,
2898 intel_ddi_main_link_aux_domain(intel_dp));
2899
2900 intel_dp_set_link_params(intel_dp, crtc_state->port_clock, 3112 intel_dp_set_link_params(intel_dp, crtc_state->port_clock,
2901 crtc_state->lane_count, is_mst); 3113 crtc_state->lane_count, is_mst);
2902 3114
2903 intel_edp_panel_on(intel_dp); 3115 intel_edp_panel_on(intel_dp);
2904 3116
2905 intel_ddi_clk_select(encoder, crtc_state->shared_dpll); 3117 intel_ddi_clk_select(encoder, crtc_state);
2906 3118
2907 intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain); 3119 intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain);
2908 3120
2909 icl_program_mg_dp_mode(intel_dp); 3121 icl_program_mg_dp_mode(dig_port);
2910 icl_disable_phy_clock_gating(dig_port); 3122 icl_disable_phy_clock_gating(dig_port);
2911 3123
2912 if (IS_ICELAKE(dev_priv)) 3124 if (IS_ICELAKE(dev_priv))
@@ -2944,10 +3156,13 @@ static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
2944 struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base); 3156 struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
2945 3157
2946 intel_dp_dual_mode_set_tmds_output(intel_hdmi, true); 3158 intel_dp_dual_mode_set_tmds_output(intel_hdmi, true);
2947 intel_ddi_clk_select(encoder, crtc_state->shared_dpll); 3159 intel_ddi_clk_select(encoder, crtc_state);
2948 3160
2949 intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain); 3161 intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain);
2950 3162
3163 icl_program_mg_dp_mode(dig_port);
3164 icl_disable_phy_clock_gating(dig_port);
3165
2951 if (IS_ICELAKE(dev_priv)) 3166 if (IS_ICELAKE(dev_priv))
2952 icl_ddi_vswing_sequence(encoder, crtc_state->port_clock, 3167 icl_ddi_vswing_sequence(encoder, crtc_state->port_clock,
2953 level, INTEL_OUTPUT_HDMI); 3168 level, INTEL_OUTPUT_HDMI);
@@ -2958,12 +3173,14 @@ static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
2958 else 3173 else
2959 intel_prepare_hdmi_ddi_buffers(encoder, level); 3174 intel_prepare_hdmi_ddi_buffers(encoder, level);
2960 3175
3176 icl_enable_phy_clock_gating(dig_port);
3177
2961 if (IS_GEN9_BC(dev_priv)) 3178 if (IS_GEN9_BC(dev_priv))
2962 skl_ddi_set_iboost(encoder, level, INTEL_OUTPUT_HDMI); 3179 skl_ddi_set_iboost(encoder, level, INTEL_OUTPUT_HDMI);
2963 3180
2964 intel_ddi_enable_pipe_clock(crtc_state); 3181 intel_ddi_enable_pipe_clock(crtc_state);
2965 3182
2966 intel_dig_port->set_infoframes(&encoder->base, 3183 intel_dig_port->set_infoframes(encoder,
2967 crtc_state->has_infoframe, 3184 crtc_state->has_infoframe,
2968 crtc_state, conn_state); 3185 crtc_state, conn_state);
2969} 3186}
@@ -2993,10 +3210,22 @@ static void intel_ddi_pre_enable(struct intel_encoder *encoder,
2993 3210
2994 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 3211 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
2995 3212
2996 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) 3213 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
2997 intel_ddi_pre_enable_hdmi(encoder, crtc_state, conn_state); 3214 intel_ddi_pre_enable_hdmi(encoder, crtc_state, conn_state);
2998 else 3215 } else {
3216 struct intel_lspcon *lspcon =
3217 enc_to_intel_lspcon(&encoder->base);
3218
2999 intel_ddi_pre_enable_dp(encoder, crtc_state, conn_state); 3219 intel_ddi_pre_enable_dp(encoder, crtc_state, conn_state);
3220 if (lspcon->active) {
3221 struct intel_digital_port *dig_port =
3222 enc_to_dig_port(&encoder->base);
3223
3224 dig_port->set_infoframes(encoder,
3225 crtc_state->has_infoframe,
3226 crtc_state, conn_state);
3227 }
3228 }
3000} 3229}
3001 3230
3002static void intel_disable_ddi_buf(struct intel_encoder *encoder) 3231static void intel_disable_ddi_buf(struct intel_encoder *encoder)
@@ -3049,9 +3278,6 @@ static void intel_ddi_post_disable_dp(struct intel_encoder *encoder,
3049 intel_display_power_put(dev_priv, dig_port->ddi_io_power_domain); 3278 intel_display_power_put(dev_priv, dig_port->ddi_io_power_domain);
3050 3279
3051 intel_ddi_clk_disable(encoder); 3280 intel_ddi_clk_disable(encoder);
3052
3053 intel_display_power_put(dev_priv,
3054 intel_ddi_main_link_aux_domain(intel_dp));
3055} 3281}
3056 3282
3057static void intel_ddi_post_disable_hdmi(struct intel_encoder *encoder, 3283static void intel_ddi_post_disable_hdmi(struct intel_encoder *encoder,
@@ -3062,7 +3288,7 @@ static void intel_ddi_post_disable_hdmi(struct intel_encoder *encoder,
3062 struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base); 3288 struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
3063 struct intel_hdmi *intel_hdmi = &dig_port->hdmi; 3289 struct intel_hdmi *intel_hdmi = &dig_port->hdmi;
3064 3290
3065 dig_port->set_infoframes(&encoder->base, false, 3291 dig_port->set_infoframes(encoder, false,
3066 old_crtc_state, old_conn_state); 3292 old_crtc_state, old_conn_state);
3067 3293
3068 intel_ddi_disable_pipe_clock(old_crtc_state); 3294 intel_ddi_disable_pipe_clock(old_crtc_state);
@@ -3282,13 +3508,76 @@ static void intel_disable_ddi(struct intel_encoder *encoder,
3282 intel_disable_ddi_dp(encoder, old_crtc_state, old_conn_state); 3508 intel_disable_ddi_dp(encoder, old_crtc_state, old_conn_state);
3283} 3509}
3284 3510
3285static void bxt_ddi_pre_pll_enable(struct intel_encoder *encoder, 3511static void intel_ddi_set_fia_lane_count(struct intel_encoder *encoder,
3286 const struct intel_crtc_state *pipe_config, 3512 const struct intel_crtc_state *pipe_config,
3287 const struct drm_connector_state *conn_state) 3513 enum port port)
3514{
3515 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3516 struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
3517 enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
3518 u32 val = I915_READ(PORT_TX_DFLEXDPMLE1);
3519 bool lane_reversal = dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL;
3520
3521 val &= ~DFLEXDPMLE1_DPMLETC_MASK(tc_port);
3522 switch (pipe_config->lane_count) {
3523 case 1:
3524 val |= (lane_reversal) ? DFLEXDPMLE1_DPMLETC_ML3(tc_port) :
3525 DFLEXDPMLE1_DPMLETC_ML0(tc_port);
3526 break;
3527 case 2:
3528 val |= (lane_reversal) ? DFLEXDPMLE1_DPMLETC_ML3_2(tc_port) :
3529 DFLEXDPMLE1_DPMLETC_ML1_0(tc_port);
3530 break;
3531 case 4:
3532 val |= DFLEXDPMLE1_DPMLETC_ML3_0(tc_port);
3533 break;
3534 default:
3535 MISSING_CASE(pipe_config->lane_count);
3536 }
3537 I915_WRITE(PORT_TX_DFLEXDPMLE1, val);
3538}
3539
3540static void
3541intel_ddi_pre_pll_enable(struct intel_encoder *encoder,
3542 const struct intel_crtc_state *crtc_state,
3543 const struct drm_connector_state *conn_state)
3544{
3545 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3546 struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
3547 enum port port = encoder->port;
3548
3549 if (intel_crtc_has_dp_encoder(crtc_state) ||
3550 intel_port_is_tc(dev_priv, encoder->port))
3551 intel_display_power_get(dev_priv,
3552 intel_ddi_main_link_aux_domain(dig_port));
3553
3554 if (IS_GEN9_LP(dev_priv))
3555 bxt_ddi_phy_set_lane_optim_mask(encoder,
3556 crtc_state->lane_lat_optim_mask);
3557
3558 /*
3559 * Program the lane count for static/dynamic connections on Type-C ports.
3560 * Skip this step for TBT.
3561 */
3562 if (dig_port->tc_type == TC_PORT_UNKNOWN ||
3563 dig_port->tc_type == TC_PORT_TBT)
3564 return;
3565
3566 intel_ddi_set_fia_lane_count(encoder, crtc_state, port);
3567}
3568
3569static void
3570intel_ddi_post_pll_disable(struct intel_encoder *encoder,
3571 const struct intel_crtc_state *crtc_state,
3572 const struct drm_connector_state *conn_state)
3288{ 3573{
3289 uint8_t mask = pipe_config->lane_lat_optim_mask; 3574 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3575 struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
3290 3576
3291 bxt_ddi_phy_set_lane_optim_mask(encoder, mask); 3577 if (intel_crtc_has_dp_encoder(crtc_state) ||
3578 intel_port_is_tc(dev_priv, encoder->port))
3579 intel_display_power_put(dev_priv,
3580 intel_ddi_main_link_aux_domain(dig_port));
3292} 3581}
3293 3582
3294void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp) 3583void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp)
@@ -3353,10 +3642,10 @@ static bool intel_ddi_is_audio_enabled(struct drm_i915_private *dev_priv,
3353void intel_ddi_compute_min_voltage_level(struct drm_i915_private *dev_priv, 3642void intel_ddi_compute_min_voltage_level(struct drm_i915_private *dev_priv,
3354 struct intel_crtc_state *crtc_state) 3643 struct intel_crtc_state *crtc_state)
3355{ 3644{
3356 if (IS_CANNONLAKE(dev_priv) && crtc_state->port_clock > 594000) 3645 if (IS_ICELAKE(dev_priv) && crtc_state->port_clock > 594000)
3357 crtc_state->min_voltage_level = 2;
3358 else if (IS_ICELAKE(dev_priv) && crtc_state->port_clock > 594000)
3359 crtc_state->min_voltage_level = 1; 3646 crtc_state->min_voltage_level = 1;
3647 else if (IS_CANNONLAKE(dev_priv) && crtc_state->port_clock > 594000)
3648 crtc_state->min_voltage_level = 2;
3360} 3649}
3361 3650
3362void intel_ddi_get_config(struct intel_encoder *encoder, 3651void intel_ddi_get_config(struct intel_encoder *encoder,
@@ -3406,7 +3695,7 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
3406 pipe_config->has_hdmi_sink = true; 3695 pipe_config->has_hdmi_sink = true;
3407 intel_dig_port = enc_to_dig_port(&encoder->base); 3696 intel_dig_port = enc_to_dig_port(&encoder->base);
3408 3697
3409 if (intel_dig_port->infoframe_enabled(&encoder->base, pipe_config)) 3698 if (intel_dig_port->infoframe_enabled(encoder, pipe_config))
3410 pipe_config->has_infoframe = true; 3699 pipe_config->has_infoframe = true;
3411 3700
3412 if ((temp & TRANS_DDI_HDMI_SCRAMBLING_MASK) == 3701 if ((temp & TRANS_DDI_HDMI_SCRAMBLING_MASK) ==
@@ -3767,6 +4056,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
3767 struct intel_encoder *intel_encoder; 4056 struct intel_encoder *intel_encoder;
3768 struct drm_encoder *encoder; 4057 struct drm_encoder *encoder;
3769 bool init_hdmi, init_dp, init_lspcon = false; 4058 bool init_hdmi, init_dp, init_lspcon = false;
4059 enum pipe pipe;
3770 4060
3771 4061
3772 init_hdmi = (dev_priv->vbt.ddi_port_info[port].supports_dvi || 4062 init_hdmi = (dev_priv->vbt.ddi_port_info[port].supports_dvi ||
@@ -3805,8 +4095,8 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
3805 intel_encoder->compute_output_type = intel_ddi_compute_output_type; 4095 intel_encoder->compute_output_type = intel_ddi_compute_output_type;
3806 intel_encoder->compute_config = intel_ddi_compute_config; 4096 intel_encoder->compute_config = intel_ddi_compute_config;
3807 intel_encoder->enable = intel_enable_ddi; 4097 intel_encoder->enable = intel_enable_ddi;
3808 if (IS_GEN9_LP(dev_priv)) 4098 intel_encoder->pre_pll_enable = intel_ddi_pre_pll_enable;
3809 intel_encoder->pre_pll_enable = bxt_ddi_pre_pll_enable; 4099 intel_encoder->post_pll_disable = intel_ddi_post_pll_disable;
3810 intel_encoder->pre_enable = intel_ddi_pre_enable; 4100 intel_encoder->pre_enable = intel_ddi_pre_enable;
3811 intel_encoder->disable = intel_disable_ddi; 4101 intel_encoder->disable = intel_disable_ddi;
3812 intel_encoder->post_disable = intel_ddi_post_disable; 4102 intel_encoder->post_disable = intel_ddi_post_disable;
@@ -3817,8 +4107,9 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
3817 intel_encoder->type = INTEL_OUTPUT_DDI; 4107 intel_encoder->type = INTEL_OUTPUT_DDI;
3818 intel_encoder->power_domain = intel_port_to_power_domain(port); 4108 intel_encoder->power_domain = intel_port_to_power_domain(port);
3819 intel_encoder->port = port; 4109 intel_encoder->port = port;
3820 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
3821 intel_encoder->cloneable = 0; 4110 intel_encoder->cloneable = 0;
4111 for_each_pipe(dev_priv, pipe)
4112 intel_encoder->crtc_mask |= BIT(pipe);
3822 4113
3823 if (INTEL_GEN(dev_priv) >= 11) 4114 if (INTEL_GEN(dev_priv) >= 11)
3824 intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) & 4115 intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) &
@@ -3828,6 +4119,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
3828 (DDI_BUF_PORT_REVERSAL | DDI_A_4_LANES); 4119 (DDI_BUF_PORT_REVERSAL | DDI_A_4_LANES);
3829 intel_dig_port->dp.output_reg = INVALID_MMIO_REG; 4120 intel_dig_port->dp.output_reg = INVALID_MMIO_REG;
3830 intel_dig_port->max_lanes = intel_ddi_max_lanes(intel_dig_port); 4121 intel_dig_port->max_lanes = intel_ddi_max_lanes(intel_dig_port);
4122 intel_dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port);
3831 4123
3832 switch (port) { 4124 switch (port) {
3833 case PORT_A: 4125 case PORT_A:
@@ -3858,8 +4150,6 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
3858 MISSING_CASE(port); 4150 MISSING_CASE(port);
3859 } 4151 }
3860 4152
3861 intel_infoframe_init(intel_dig_port);
3862
3863 if (init_dp) { 4153 if (init_dp) {
3864 if (!intel_ddi_init_dp_connector(intel_dig_port)) 4154 if (!intel_ddi_init_dp_connector(intel_dig_port))
3865 goto err; 4155 goto err;
@@ -3888,6 +4178,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
3888 port_name(port)); 4178 port_name(port));
3889 } 4179 }
3890 4180
4181 intel_infoframe_init(intel_dig_port);
3891 return; 4182 return;
3892 4183
3893err: 4184err:
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
index 01fa98299bae..ceecb5bd5226 100644
--- a/drivers/gpu/drm/i915/intel_device_info.c
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -744,27 +744,30 @@ void intel_device_info_runtime_init(struct intel_device_info *info)
744 if (INTEL_GEN(dev_priv) >= 10) { 744 if (INTEL_GEN(dev_priv) >= 10) {
745 for_each_pipe(dev_priv, pipe) 745 for_each_pipe(dev_priv, pipe)
746 info->num_scalers[pipe] = 2; 746 info->num_scalers[pipe] = 2;
747 } else if (INTEL_GEN(dev_priv) == 9) { 747 } else if (IS_GEN9(dev_priv)) {
748 info->num_scalers[PIPE_A] = 2; 748 info->num_scalers[PIPE_A] = 2;
749 info->num_scalers[PIPE_B] = 2; 749 info->num_scalers[PIPE_B] = 2;
750 info->num_scalers[PIPE_C] = 1; 750 info->num_scalers[PIPE_C] = 1;
751 } 751 }
752 752
753 BUILD_BUG_ON(I915_NUM_ENGINES > 753 BUILD_BUG_ON(I915_NUM_ENGINES > BITS_PER_TYPE(intel_ring_mask_t));
754 sizeof(intel_ring_mask_t) * BITS_PER_BYTE);
755 754
756 /* 755 if (IS_GEN11(dev_priv))
757 * Skylake and Broxton currently don't expose the topmost plane as its 756 for_each_pipe(dev_priv, pipe)
758 * use is exclusive with the legacy cursor and we only want to expose 757 info->num_sprites[pipe] = 6;
759 * one of those, not both. Until we can safely expose the topmost plane 758 else if (IS_GEN10(dev_priv) || IS_GEMINILAKE(dev_priv))
760 * as a DRM_PLANE_TYPE_CURSOR with all the features exposed/supported,
761 * we don't expose the topmost plane at all to prevent ABI breakage
762 * down the line.
763 */
764 if (IS_GEN10(dev_priv) || IS_GEMINILAKE(dev_priv))
765 for_each_pipe(dev_priv, pipe) 759 for_each_pipe(dev_priv, pipe)
766 info->num_sprites[pipe] = 3; 760 info->num_sprites[pipe] = 3;
767 else if (IS_BROXTON(dev_priv)) { 761 else if (IS_BROXTON(dev_priv)) {
762 /*
763 * Skylake and Broxton currently don't expose the topmost plane as its
764 * use is exclusive with the legacy cursor and we only want to expose
765 * one of those, not both. Until we can safely expose the topmost plane
766 * as a DRM_PLANE_TYPE_CURSOR with all the features exposed/supported,
767 * we don't expose the topmost plane at all to prevent ABI breakage
768 * down the line.
769 */
770
768 info->num_sprites[PIPE_A] = 2; 771 info->num_sprites[PIPE_A] = 2;
769 info->num_sprites[PIPE_B] = 2; 772 info->num_sprites[PIPE_B] = 2;
770 info->num_sprites[PIPE_C] = 1; 773 info->num_sprites[PIPE_C] = 1;
@@ -844,13 +847,18 @@ void intel_device_info_runtime_init(struct intel_device_info *info)
844 cherryview_sseu_info_init(dev_priv); 847 cherryview_sseu_info_init(dev_priv);
845 else if (IS_BROADWELL(dev_priv)) 848 else if (IS_BROADWELL(dev_priv))
846 broadwell_sseu_info_init(dev_priv); 849 broadwell_sseu_info_init(dev_priv);
847 else if (INTEL_GEN(dev_priv) == 9) 850 else if (IS_GEN9(dev_priv))
848 gen9_sseu_info_init(dev_priv); 851 gen9_sseu_info_init(dev_priv);
849 else if (INTEL_GEN(dev_priv) == 10) 852 else if (IS_GEN10(dev_priv))
850 gen10_sseu_info_init(dev_priv); 853 gen10_sseu_info_init(dev_priv);
851 else if (INTEL_GEN(dev_priv) >= 11) 854 else if (INTEL_GEN(dev_priv) >= 11)
852 gen11_sseu_info_init(dev_priv); 855 gen11_sseu_info_init(dev_priv);
853 856
857 if (IS_GEN6(dev_priv) && intel_vtd_active()) {
858 DRM_INFO("Disabling ppGTT for VT-d support\n");
859 info->ppgtt = INTEL_PPGTT_NONE;
860 }
861
854 /* Initialize command stream timestamp frequency */ 862 /* Initialize command stream timestamp frequency */
855 info->cs_timestamp_frequency_khz = read_timestamp_frequency(dev_priv); 863 info->cs_timestamp_frequency_khz = read_timestamp_frequency(dev_priv);
856} 864}
@@ -872,40 +880,37 @@ void intel_driver_caps_print(const struct intel_driver_caps *caps,
872void intel_device_info_init_mmio(struct drm_i915_private *dev_priv) 880void intel_device_info_init_mmio(struct drm_i915_private *dev_priv)
873{ 881{
874 struct intel_device_info *info = mkwrite_device_info(dev_priv); 882 struct intel_device_info *info = mkwrite_device_info(dev_priv);
875 u8 vdbox_disable, vebox_disable;
876 u32 media_fuse; 883 u32 media_fuse;
877 int i; 884 unsigned int i;
878 885
879 if (INTEL_GEN(dev_priv) < 11) 886 if (INTEL_GEN(dev_priv) < 11)
880 return; 887 return;
881 888
882 media_fuse = I915_READ(GEN11_GT_VEBOX_VDBOX_DISABLE); 889 media_fuse = ~I915_READ(GEN11_GT_VEBOX_VDBOX_DISABLE);
883 890
884 vdbox_disable = media_fuse & GEN11_GT_VDBOX_DISABLE_MASK; 891 info->vdbox_enable = media_fuse & GEN11_GT_VDBOX_DISABLE_MASK;
885 vebox_disable = (media_fuse & GEN11_GT_VEBOX_DISABLE_MASK) >> 892 info->vebox_enable = (media_fuse & GEN11_GT_VEBOX_DISABLE_MASK) >>
886 GEN11_GT_VEBOX_DISABLE_SHIFT; 893 GEN11_GT_VEBOX_DISABLE_SHIFT;
887 894
888 DRM_DEBUG_DRIVER("vdbox disable: %04x\n", vdbox_disable); 895 DRM_DEBUG_DRIVER("vdbox enable: %04x\n", info->vdbox_enable);
889 for (i = 0; i < I915_MAX_VCS; i++) { 896 for (i = 0; i < I915_MAX_VCS; i++) {
890 if (!HAS_ENGINE(dev_priv, _VCS(i))) 897 if (!HAS_ENGINE(dev_priv, _VCS(i)))
891 continue; 898 continue;
892 899
893 if (!(BIT(i) & vdbox_disable)) 900 if (!(BIT(i) & info->vdbox_enable)) {
894 continue; 901 info->ring_mask &= ~ENGINE_MASK(_VCS(i));
895 902 DRM_DEBUG_DRIVER("vcs%u fused off\n", i);
896 info->ring_mask &= ~ENGINE_MASK(_VCS(i)); 903 }
897 DRM_DEBUG_DRIVER("vcs%u fused off\n", i);
898 } 904 }
899 905
900 DRM_DEBUG_DRIVER("vebox disable: %04x\n", vebox_disable); 906 DRM_DEBUG_DRIVER("vebox enable: %04x\n", info->vebox_enable);
901 for (i = 0; i < I915_MAX_VECS; i++) { 907 for (i = 0; i < I915_MAX_VECS; i++) {
902 if (!HAS_ENGINE(dev_priv, _VECS(i))) 908 if (!HAS_ENGINE(dev_priv, _VECS(i)))
903 continue; 909 continue;
904 910
905 if (!(BIT(i) & vebox_disable)) 911 if (!(BIT(i) & info->vebox_enable)) {
906 continue; 912 info->ring_mask &= ~ENGINE_MASK(_VECS(i));
907 913 DRM_DEBUG_DRIVER("vecs%u fused off\n", i);
908 info->ring_mask &= ~ENGINE_MASK(_VECS(i)); 914 }
909 DRM_DEBUG_DRIVER("vecs%u fused off\n", i);
910 } 915 }
911} 916}
diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h
index 6eecd64734d5..88f97210dc49 100644
--- a/drivers/gpu/drm/i915/intel_device_info.h
+++ b/drivers/gpu/drm/i915/intel_device_info.h
@@ -25,6 +25,8 @@
25#ifndef _INTEL_DEVICE_INFO_H_ 25#ifndef _INTEL_DEVICE_INFO_H_
26#define _INTEL_DEVICE_INFO_H_ 26#define _INTEL_DEVICE_INFO_H_
27 27
28#include <uapi/drm/i915_drm.h>
29
28#include "intel_display.h" 30#include "intel_display.h"
29 31
30struct drm_printer; 32struct drm_printer;
@@ -74,21 +76,25 @@ enum intel_platform {
74 INTEL_MAX_PLATFORMS 76 INTEL_MAX_PLATFORMS
75}; 77};
76 78
79enum intel_ppgtt {
80 INTEL_PPGTT_NONE = I915_GEM_PPGTT_NONE,
81 INTEL_PPGTT_ALIASING = I915_GEM_PPGTT_ALIASING,
82 INTEL_PPGTT_FULL = I915_GEM_PPGTT_FULL,
83 INTEL_PPGTT_FULL_4LVL,
84};
85
77#define DEV_INFO_FOR_EACH_FLAG(func) \ 86#define DEV_INFO_FOR_EACH_FLAG(func) \
78 func(is_mobile); \ 87 func(is_mobile); \
79 func(is_lp); \ 88 func(is_lp); \
80 func(is_alpha_support); \ 89 func(is_alpha_support); \
81 /* Keep has_* in alphabetical order */ \ 90 /* Keep has_* in alphabetical order */ \
82 func(has_64bit_reloc); \ 91 func(has_64bit_reloc); \
83 func(has_aliasing_ppgtt); \
84 func(has_csr); \ 92 func(has_csr); \
85 func(has_ddi); \ 93 func(has_ddi); \
86 func(has_dp_mst); \ 94 func(has_dp_mst); \
87 func(has_reset_engine); \ 95 func(has_reset_engine); \
88 func(has_fbc); \ 96 func(has_fbc); \
89 func(has_fpga_dbg); \ 97 func(has_fpga_dbg); \
90 func(has_full_ppgtt); \
91 func(has_full_48bit_ppgtt); \
92 func(has_gmch_display); \ 98 func(has_gmch_display); \
93 func(has_guc); \ 99 func(has_guc); \
94 func(has_guc_ct); \ 100 func(has_guc_ct); \
@@ -118,7 +124,7 @@ enum intel_platform {
118 124
119struct sseu_dev_info { 125struct sseu_dev_info {
120 u8 slice_mask; 126 u8 slice_mask;
121 u8 subslice_mask[GEN_MAX_SUBSLICES]; 127 u8 subslice_mask[GEN_MAX_SLICES];
122 u16 eu_total; 128 u16 eu_total;
123 u8 eu_per_subslice; 129 u8 eu_per_subslice;
124 u8 min_eu_in_pool; 130 u8 min_eu_in_pool;
@@ -154,6 +160,7 @@ struct intel_device_info {
154 enum intel_platform platform; 160 enum intel_platform platform;
155 u32 platform_mask; 161 u32 platform_mask;
156 162
163 enum intel_ppgtt ppgtt;
157 unsigned int page_sizes; /* page sizes supported by the HW */ 164 unsigned int page_sizes; /* page sizes supported by the HW */
158 165
159 u32 display_mmio_offset; 166 u32 display_mmio_offset;
@@ -170,7 +177,6 @@ struct intel_device_info {
170 /* Register offsets for the various display pipes and transcoders */ 177 /* Register offsets for the various display pipes and transcoders */
171 int pipe_offsets[I915_MAX_TRANSCODERS]; 178 int pipe_offsets[I915_MAX_TRANSCODERS];
172 int trans_offsets[I915_MAX_TRANSCODERS]; 179 int trans_offsets[I915_MAX_TRANSCODERS];
173 int palette_offsets[I915_MAX_PIPES];
174 int cursor_offsets[I915_MAX_PIPES]; 180 int cursor_offsets[I915_MAX_PIPES];
175 181
176 /* Slice/subslice/EU info */ 182 /* Slice/subslice/EU info */
@@ -178,6 +184,10 @@ struct intel_device_info {
178 184
179 u32 cs_timestamp_frequency_khz; 185 u32 cs_timestamp_frequency_khz;
180 186
187 /* Enabled (not fused off) media engine bitmasks. */
188 u8 vdbox_enable;
189 u8 vebox_enable;
190
181 struct color_luts { 191 struct color_luts {
182 u16 degamma_lut_size; 192 u16 degamma_lut_size;
183 u16 gamma_lut_size; 193 u16 gamma_lut_size;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index a54843fdeb2f..132e978227fb 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -24,7 +24,6 @@
24 * Eric Anholt <eric@anholt.net> 24 * Eric Anholt <eric@anholt.net>
25 */ 25 */
26 26
27#include <linux/dmi.h>
28#include <linux/module.h> 27#include <linux/module.h>
29#include <linux/input.h> 28#include <linux/input.h>
30#include <linux/i2c.h> 29#include <linux/i2c.h>
@@ -74,55 +73,6 @@ static const uint64_t i9xx_format_modifiers[] = {
74 DRM_FORMAT_MOD_INVALID 73 DRM_FORMAT_MOD_INVALID
75}; 74};
76 75
77static const uint32_t skl_primary_formats[] = {
78 DRM_FORMAT_C8,
79 DRM_FORMAT_RGB565,
80 DRM_FORMAT_XRGB8888,
81 DRM_FORMAT_XBGR8888,
82 DRM_FORMAT_ARGB8888,
83 DRM_FORMAT_ABGR8888,
84 DRM_FORMAT_XRGB2101010,
85 DRM_FORMAT_XBGR2101010,
86 DRM_FORMAT_YUYV,
87 DRM_FORMAT_YVYU,
88 DRM_FORMAT_UYVY,
89 DRM_FORMAT_VYUY,
90};
91
92static const uint32_t skl_pri_planar_formats[] = {
93 DRM_FORMAT_C8,
94 DRM_FORMAT_RGB565,
95 DRM_FORMAT_XRGB8888,
96 DRM_FORMAT_XBGR8888,
97 DRM_FORMAT_ARGB8888,
98 DRM_FORMAT_ABGR8888,
99 DRM_FORMAT_XRGB2101010,
100 DRM_FORMAT_XBGR2101010,
101 DRM_FORMAT_YUYV,
102 DRM_FORMAT_YVYU,
103 DRM_FORMAT_UYVY,
104 DRM_FORMAT_VYUY,
105 DRM_FORMAT_NV12,
106};
107
108static const uint64_t skl_format_modifiers_noccs[] = {
109 I915_FORMAT_MOD_Yf_TILED,
110 I915_FORMAT_MOD_Y_TILED,
111 I915_FORMAT_MOD_X_TILED,
112 DRM_FORMAT_MOD_LINEAR,
113 DRM_FORMAT_MOD_INVALID
114};
115
116static const uint64_t skl_format_modifiers_ccs[] = {
117 I915_FORMAT_MOD_Yf_TILED_CCS,
118 I915_FORMAT_MOD_Y_TILED_CCS,
119 I915_FORMAT_MOD_Yf_TILED,
120 I915_FORMAT_MOD_Y_TILED,
121 I915_FORMAT_MOD_X_TILED,
122 DRM_FORMAT_MOD_LINEAR,
123 DRM_FORMAT_MOD_INVALID
124};
125
126/* Cursor formats */ 76/* Cursor formats */
127static const uint32_t intel_cursor_formats[] = { 77static const uint32_t intel_cursor_formats[] = {
128 DRM_FORMAT_ARGB8888, 78 DRM_FORMAT_ARGB8888,
@@ -141,15 +91,15 @@ static void ironlake_pch_clock_get(struct intel_crtc *crtc,
141static int intel_framebuffer_init(struct intel_framebuffer *ifb, 91static int intel_framebuffer_init(struct intel_framebuffer *ifb,
142 struct drm_i915_gem_object *obj, 92 struct drm_i915_gem_object *obj,
143 struct drm_mode_fb_cmd2 *mode_cmd); 93 struct drm_mode_fb_cmd2 *mode_cmd);
144static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc); 94static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state);
145static void intel_set_pipe_timings(struct intel_crtc *intel_crtc); 95static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state);
146static void intel_set_pipe_src_size(struct intel_crtc *intel_crtc); 96static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
147static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc, 97 const struct intel_link_m_n *m_n,
148 struct intel_link_m_n *m_n, 98 const struct intel_link_m_n *m2_n2);
149 struct intel_link_m_n *m2_n2); 99static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state);
150static void ironlake_set_pipeconf(struct drm_crtc *crtc); 100static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state);
151static void haswell_set_pipeconf(struct drm_crtc *crtc); 101static void haswell_set_pipeconf(const struct intel_crtc_state *crtc_state);
152static void haswell_set_pipemisc(struct drm_crtc *crtc); 102static void haswell_set_pipemisc(const struct intel_crtc_state *crtc_state);
153static void vlv_prepare_pll(struct intel_crtc *crtc, 103static void vlv_prepare_pll(struct intel_crtc *crtc,
154 const struct intel_crtc_state *pipe_config); 104 const struct intel_crtc_state *pipe_config);
155static void chv_prepare_pll(struct intel_crtc *crtc, 105static void chv_prepare_pll(struct intel_crtc *crtc,
@@ -158,9 +108,9 @@ static void intel_begin_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
158static void intel_finish_crtc_commit(struct drm_crtc *, struct drm_crtc_state *); 108static void intel_finish_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
159static void intel_crtc_init_scalers(struct intel_crtc *crtc, 109static void intel_crtc_init_scalers(struct intel_crtc *crtc,
160 struct intel_crtc_state *crtc_state); 110 struct intel_crtc_state *crtc_state);
161static void skylake_pfit_enable(struct intel_crtc *crtc); 111static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state);
162static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force); 112static void ironlake_pfit_disable(const struct intel_crtc_state *old_crtc_state);
163static void ironlake_pfit_enable(struct intel_crtc *crtc); 113static void ironlake_pfit_enable(const struct intel_crtc_state *crtc_state);
164static void intel_modeset_setup_hw_state(struct drm_device *dev, 114static void intel_modeset_setup_hw_state(struct drm_device *dev,
165 struct drm_modeset_acquire_ctx *ctx); 115 struct drm_modeset_acquire_ctx *ctx);
166static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc); 116static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc);
@@ -506,23 +456,8 @@ static const struct intel_limit intel_limits_bxt = {
506}; 456};
507 457
508static void 458static void
509skl_wa_528(struct drm_i915_private *dev_priv, int pipe, bool enable)
510{
511 if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))
512 return;
513
514 if (enable)
515 I915_WRITE(CHICKEN_PIPESL_1(pipe), HSW_FBCQ_DIS);
516 else
517 I915_WRITE(CHICKEN_PIPESL_1(pipe), 0);
518}
519
520static void
521skl_wa_clkgate(struct drm_i915_private *dev_priv, int pipe, bool enable) 459skl_wa_clkgate(struct drm_i915_private *dev_priv, int pipe, bool enable)
522{ 460{
523 if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))
524 return;
525
526 if (enable) 461 if (enable)
527 I915_WRITE(CLKGATE_DIS_PSL(pipe), 462 I915_WRITE(CLKGATE_DIS_PSL(pipe),
528 DUPS1_GATING_DIS | DUPS2_GATING_DIS); 463 DUPS1_GATING_DIS | DUPS2_GATING_DIS);
@@ -1381,6 +1316,7 @@ static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1381 "PCH LVDS enabled on transcoder %c, should be disabled\n", 1316 "PCH LVDS enabled on transcoder %c, should be disabled\n",
1382 pipe_name(pipe)); 1317 pipe_name(pipe));
1383 1318
1319 /* PCH SDVOB multiplex with HDMIB */
1384 assert_pch_hdmi_disabled(dev_priv, pipe, PORT_B, PCH_HDMIB); 1320 assert_pch_hdmi_disabled(dev_priv, pipe, PORT_B, PCH_HDMIB);
1385 assert_pch_hdmi_disabled(dev_priv, pipe, PORT_C, PCH_HDMIC); 1321 assert_pch_hdmi_disabled(dev_priv, pipe, PORT_C, PCH_HDMIC);
1386 assert_pch_hdmi_disabled(dev_priv, pipe, PORT_D, PCH_HDMID); 1322 assert_pch_hdmi_disabled(dev_priv, pipe, PORT_D, PCH_HDMID);
@@ -1565,14 +1501,15 @@ static void i9xx_enable_pll(struct intel_crtc *crtc,
1565 } 1501 }
1566} 1502}
1567 1503
1568static void i9xx_disable_pll(struct intel_crtc *crtc) 1504static void i9xx_disable_pll(const struct intel_crtc_state *crtc_state)
1569{ 1505{
1506 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
1570 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1507 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1571 enum pipe pipe = crtc->pipe; 1508 enum pipe pipe = crtc->pipe;
1572 1509
1573 /* Disable DVO 2x clock on both PLLs if necessary */ 1510 /* Disable DVO 2x clock on both PLLs if necessary */
1574 if (IS_I830(dev_priv) && 1511 if (IS_I830(dev_priv) &&
1575 intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DVO) && 1512 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO) &&
1576 !intel_num_dvo_pipes(dev_priv)) { 1513 !intel_num_dvo_pipes(dev_priv)) {
1577 I915_WRITE(DPLL(PIPE_B), 1514 I915_WRITE(DPLL(PIPE_B),
1578 I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE); 1515 I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE);
@@ -1666,16 +1603,16 @@ void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
1666 I915_READ(dpll_reg) & port_mask, expected_mask); 1603 I915_READ(dpll_reg) & port_mask, expected_mask);
1667} 1604}
1668 1605
1669static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv, 1606static void ironlake_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
1670 enum pipe pipe)
1671{ 1607{
1672 struct intel_crtc *intel_crtc = intel_get_crtc_for_pipe(dev_priv, 1608 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
1673 pipe); 1609 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1610 enum pipe pipe = crtc->pipe;
1674 i915_reg_t reg; 1611 i915_reg_t reg;
1675 uint32_t val, pipeconf_val; 1612 uint32_t val, pipeconf_val;
1676 1613
1677 /* Make sure PCH DPLL is enabled */ 1614 /* Make sure PCH DPLL is enabled */
1678 assert_shared_dpll_enabled(dev_priv, intel_crtc->config->shared_dpll); 1615 assert_shared_dpll_enabled(dev_priv, crtc_state->shared_dpll);
1679 1616
1680 /* FDI must be feeding us bits for PCH ports */ 1617 /* FDI must be feeding us bits for PCH ports */
1681 assert_fdi_tx_enabled(dev_priv, pipe); 1618 assert_fdi_tx_enabled(dev_priv, pipe);
@@ -1701,7 +1638,7 @@ static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1701 * here for both 8bpc and 12bpc. 1638 * here for both 8bpc and 12bpc.
1702 */ 1639 */
1703 val &= ~PIPECONF_BPC_MASK; 1640 val &= ~PIPECONF_BPC_MASK;
1704 if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_HDMI)) 1641 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1705 val |= PIPECONF_8BPC; 1642 val |= PIPECONF_8BPC;
1706 else 1643 else
1707 val |= pipeconf_val & PIPECONF_BPC_MASK; 1644 val |= pipeconf_val & PIPECONF_BPC_MASK;
@@ -1710,7 +1647,7 @@ static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1710 val &= ~TRANS_INTERLACE_MASK; 1647 val &= ~TRANS_INTERLACE_MASK;
1711 if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) 1648 if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
1712 if (HAS_PCH_IBX(dev_priv) && 1649 if (HAS_PCH_IBX(dev_priv) &&
1713 intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO)) 1650 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
1714 val |= TRANS_LEGACY_INTERLACED_ILK; 1651 val |= TRANS_LEGACY_INTERLACED_ILK;
1715 else 1652 else
1716 val |= TRANS_INTERLACED; 1653 val |= TRANS_INTERLACED;
@@ -2254,6 +2191,11 @@ static u32 intel_adjust_tile_offset(int *x, int *y,
2254 return new_offset; 2191 return new_offset;
2255} 2192}
2256 2193
2194static bool is_surface_linear(u64 modifier, int color_plane)
2195{
2196 return modifier == DRM_FORMAT_MOD_LINEAR;
2197}
2198
2257static u32 intel_adjust_aligned_offset(int *x, int *y, 2199static u32 intel_adjust_aligned_offset(int *x, int *y,
2258 const struct drm_framebuffer *fb, 2200 const struct drm_framebuffer *fb,
2259 int color_plane, 2201 int color_plane,
@@ -2266,7 +2208,7 @@ static u32 intel_adjust_aligned_offset(int *x, int *y,
2266 2208
2267 WARN_ON(new_offset > old_offset); 2209 WARN_ON(new_offset > old_offset);
2268 2210
2269 if (fb->modifier != DRM_FORMAT_MOD_LINEAR) { 2211 if (!is_surface_linear(fb->modifier, color_plane)) {
2270 unsigned int tile_size, tile_width, tile_height; 2212 unsigned int tile_size, tile_width, tile_height;
2271 unsigned int pitch_tiles; 2213 unsigned int pitch_tiles;
2272 2214
@@ -2330,14 +2272,13 @@ static u32 intel_compute_aligned_offset(struct drm_i915_private *dev_priv,
2330 unsigned int rotation, 2272 unsigned int rotation,
2331 u32 alignment) 2273 u32 alignment)
2332{ 2274{
2333 uint64_t fb_modifier = fb->modifier;
2334 unsigned int cpp = fb->format->cpp[color_plane]; 2275 unsigned int cpp = fb->format->cpp[color_plane];
2335 u32 offset, offset_aligned; 2276 u32 offset, offset_aligned;
2336 2277
2337 if (alignment) 2278 if (alignment)
2338 alignment--; 2279 alignment--;
2339 2280
2340 if (fb_modifier != DRM_FORMAT_MOD_LINEAR) { 2281 if (!is_surface_linear(fb->modifier, color_plane)) {
2341 unsigned int tile_size, tile_width, tile_height; 2282 unsigned int tile_size, tile_width, tile_height;
2342 unsigned int tile_rows, tiles, pitch_tiles; 2283 unsigned int tile_rows, tiles, pitch_tiles;
2343 2284
@@ -2574,7 +2515,7 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv,
2574 tile_size); 2515 tile_size);
2575 offset /= tile_size; 2516 offset /= tile_size;
2576 2517
2577 if (fb->modifier != DRM_FORMAT_MOD_LINEAR) { 2518 if (!is_surface_linear(fb->modifier, i)) {
2578 unsigned int tile_width, tile_height; 2519 unsigned int tile_width, tile_height;
2579 unsigned int pitch_tiles; 2520 unsigned int pitch_tiles;
2580 struct drm_rect r; 2521 struct drm_rect r;
@@ -2788,10 +2729,6 @@ intel_set_plane_visible(struct intel_crtc_state *crtc_state,
2788 crtc_state->base.plane_mask |= drm_plane_mask(&plane->base); 2729 crtc_state->base.plane_mask |= drm_plane_mask(&plane->base);
2789 else 2730 else
2790 crtc_state->base.plane_mask &= ~drm_plane_mask(&plane->base); 2731 crtc_state->base.plane_mask &= ~drm_plane_mask(&plane->base);
2791
2792 DRM_DEBUG_KMS("%s active planes 0x%x\n",
2793 crtc_state->base.crtc->name,
2794 crtc_state->active_planes);
2795} 2732}
2796 2733
2797static void fixup_active_planes(struct intel_crtc_state *crtc_state) 2734static void fixup_active_planes(struct intel_crtc_state *crtc_state)
@@ -2819,6 +2756,10 @@ static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
2819 struct intel_plane_state *plane_state = 2756 struct intel_plane_state *plane_state =
2820 to_intel_plane_state(plane->base.state); 2757 to_intel_plane_state(plane->base.state);
2821 2758
2759 DRM_DEBUG_KMS("Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n",
2760 plane->base.base.id, plane->base.name,
2761 crtc->base.base.id, crtc->base.name);
2762
2822 intel_set_plane_visible(crtc_state, plane_state, false); 2763 intel_set_plane_visible(crtc_state, plane_state, false);
2823 fixup_active_planes(crtc_state); 2764 fixup_active_planes(crtc_state);
2824 2765
@@ -3098,28 +3039,6 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state)
3098 return 0; 3039 return 0;
3099} 3040}
3100 3041
3101static int
3102skl_check_nv12_surface(struct intel_plane_state *plane_state)
3103{
3104 /* Display WA #1106 */
3105 if (plane_state->base.rotation !=
3106 (DRM_MODE_REFLECT_X | DRM_MODE_ROTATE_90) &&
3107 plane_state->base.rotation != DRM_MODE_ROTATE_270)
3108 return 0;
3109
3110 /*
3111 * src coordinates are rotated here.
3112 * We check height but report it as width
3113 */
3114 if (((drm_rect_height(&plane_state->base.src) >> 16) % 4) != 0) {
3115 DRM_DEBUG_KMS("src width must be multiple "
3116 "of 4 for rotated NV12\n");
3117 return -EINVAL;
3118 }
3119
3120 return 0;
3121}
3122
3123static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state) 3042static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state)
3124{ 3043{
3125 const struct drm_framebuffer *fb = plane_state->base.fb; 3044 const struct drm_framebuffer *fb = plane_state->base.fb;
@@ -3198,9 +3117,6 @@ int skl_check_plane_surface(struct intel_plane_state *plane_state)
3198 * the main surface setup depends on it. 3117 * the main surface setup depends on it.
3199 */ 3118 */
3200 if (fb->format->format == DRM_FORMAT_NV12) { 3119 if (fb->format->format == DRM_FORMAT_NV12) {
3201 ret = skl_check_nv12_surface(plane_state);
3202 if (ret)
3203 return ret;
3204 ret = skl_check_nv12_aux_surface(plane_state); 3120 ret = skl_check_nv12_aux_surface(plane_state);
3205 if (ret) 3121 if (ret)
3206 return ret; 3122 return ret;
@@ -3448,7 +3364,6 @@ static void i9xx_update_plane(struct intel_plane *plane,
3448 intel_plane_ggtt_offset(plane_state) + 3364 intel_plane_ggtt_offset(plane_state) +
3449 dspaddr_offset); 3365 dspaddr_offset);
3450 } 3366 }
3451 POSTING_READ_FW(reg);
3452 3367
3453 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 3368 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
3454} 3369}
@@ -3467,7 +3382,6 @@ static void i9xx_disable_plane(struct intel_plane *plane,
3467 I915_WRITE_FW(DSPSURF(i9xx_plane), 0); 3382 I915_WRITE_FW(DSPSURF(i9xx_plane), 0);
3468 else 3383 else
3469 I915_WRITE_FW(DSPADDR(i9xx_plane), 0); 3384 I915_WRITE_FW(DSPADDR(i9xx_plane), 0);
3470 POSTING_READ_FW(DSPCNTR(i9xx_plane));
3471 3385
3472 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 3386 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
3473} 3387}
@@ -3527,13 +3441,13 @@ static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
3527/* 3441/*
3528 * This function detaches (aka. unbinds) unused scalers in hardware 3442 * This function detaches (aka. unbinds) unused scalers in hardware
3529 */ 3443 */
3530static void skl_detach_scalers(struct intel_crtc *intel_crtc) 3444static void skl_detach_scalers(const struct intel_crtc_state *crtc_state)
3531{ 3445{
3532 struct intel_crtc_scaler_state *scaler_state; 3446 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
3447 const struct intel_crtc_scaler_state *scaler_state =
3448 &crtc_state->scaler_state;
3533 int i; 3449 int i;
3534 3450
3535 scaler_state = &intel_crtc->config->scaler_state;
3536
3537 /* loop through and disable scalers that aren't in use */ 3451 /* loop through and disable scalers that aren't in use */
3538 for (i = 0; i < intel_crtc->num_scalers; i++) { 3452 for (i = 0; i < intel_crtc->num_scalers; i++) {
3539 if (!scaler_state->scalers[i].in_use) 3453 if (!scaler_state->scalers[i].in_use)
@@ -3597,29 +3511,38 @@ static u32 skl_plane_ctl_format(uint32_t pixel_format)
3597 return 0; 3511 return 0;
3598} 3512}
3599 3513
3600/* 3514static u32 skl_plane_ctl_alpha(const struct intel_plane_state *plane_state)
3601 * XXX: For ARBG/ABGR formats we default to expecting scanout buffers
3602 * to be already pre-multiplied. We need to add a knob (or a different
3603 * DRM_FORMAT) for user-space to configure that.
3604 */
3605static u32 skl_plane_ctl_alpha(uint32_t pixel_format)
3606{ 3515{
3607 switch (pixel_format) { 3516 if (!plane_state->base.fb->format->has_alpha)
3608 case DRM_FORMAT_ABGR8888: 3517 return PLANE_CTL_ALPHA_DISABLE;
3609 case DRM_FORMAT_ARGB8888: 3518
3519 switch (plane_state->base.pixel_blend_mode) {
3520 case DRM_MODE_BLEND_PIXEL_NONE:
3521 return PLANE_CTL_ALPHA_DISABLE;
3522 case DRM_MODE_BLEND_PREMULTI:
3610 return PLANE_CTL_ALPHA_SW_PREMULTIPLY; 3523 return PLANE_CTL_ALPHA_SW_PREMULTIPLY;
3524 case DRM_MODE_BLEND_COVERAGE:
3525 return PLANE_CTL_ALPHA_HW_PREMULTIPLY;
3611 default: 3526 default:
3527 MISSING_CASE(plane_state->base.pixel_blend_mode);
3612 return PLANE_CTL_ALPHA_DISABLE; 3528 return PLANE_CTL_ALPHA_DISABLE;
3613 } 3529 }
3614} 3530}
3615 3531
3616static u32 glk_plane_color_ctl_alpha(uint32_t pixel_format) 3532static u32 glk_plane_color_ctl_alpha(const struct intel_plane_state *plane_state)
3617{ 3533{
3618 switch (pixel_format) { 3534 if (!plane_state->base.fb->format->has_alpha)
3619 case DRM_FORMAT_ABGR8888: 3535 return PLANE_COLOR_ALPHA_DISABLE;
3620 case DRM_FORMAT_ARGB8888: 3536
3537 switch (plane_state->base.pixel_blend_mode) {
3538 case DRM_MODE_BLEND_PIXEL_NONE:
3539 return PLANE_COLOR_ALPHA_DISABLE;
3540 case DRM_MODE_BLEND_PREMULTI:
3621 return PLANE_COLOR_ALPHA_SW_PREMULTIPLY; 3541 return PLANE_COLOR_ALPHA_SW_PREMULTIPLY;
3542 case DRM_MODE_BLEND_COVERAGE:
3543 return PLANE_COLOR_ALPHA_HW_PREMULTIPLY;
3622 default: 3544 default:
3545 MISSING_CASE(plane_state->base.pixel_blend_mode);
3623 return PLANE_COLOR_ALPHA_DISABLE; 3546 return PLANE_COLOR_ALPHA_DISABLE;
3624 } 3547 }
3625} 3548}
@@ -3696,7 +3619,7 @@ u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
3696 plane_ctl = PLANE_CTL_ENABLE; 3619 plane_ctl = PLANE_CTL_ENABLE;
3697 3620
3698 if (INTEL_GEN(dev_priv) < 10 && !IS_GEMINILAKE(dev_priv)) { 3621 if (INTEL_GEN(dev_priv) < 10 && !IS_GEMINILAKE(dev_priv)) {
3699 plane_ctl |= skl_plane_ctl_alpha(fb->format->format); 3622 plane_ctl |= skl_plane_ctl_alpha(plane_state);
3700 plane_ctl |= 3623 plane_ctl |=
3701 PLANE_CTL_PIPE_GAMMA_ENABLE | 3624 PLANE_CTL_PIPE_GAMMA_ENABLE |
3702 PLANE_CTL_PIPE_CSC_ENABLE | 3625 PLANE_CTL_PIPE_CSC_ENABLE |
@@ -3731,6 +3654,7 @@ u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
3731 struct drm_i915_private *dev_priv = 3654 struct drm_i915_private *dev_priv =
3732 to_i915(plane_state->base.plane->dev); 3655 to_i915(plane_state->base.plane->dev);
3733 const struct drm_framebuffer *fb = plane_state->base.fb; 3656 const struct drm_framebuffer *fb = plane_state->base.fb;
3657 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
3734 u32 plane_color_ctl = 0; 3658 u32 plane_color_ctl = 0;
3735 3659
3736 if (INTEL_GEN(dev_priv) < 11) { 3660 if (INTEL_GEN(dev_priv) < 11) {
@@ -3738,9 +3662,9 @@ u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
3738 plane_color_ctl |= PLANE_COLOR_PIPE_CSC_ENABLE; 3662 plane_color_ctl |= PLANE_COLOR_PIPE_CSC_ENABLE;
3739 } 3663 }
3740 plane_color_ctl |= PLANE_COLOR_PLANE_GAMMA_DISABLE; 3664 plane_color_ctl |= PLANE_COLOR_PLANE_GAMMA_DISABLE;
3741 plane_color_ctl |= glk_plane_color_ctl_alpha(fb->format->format); 3665 plane_color_ctl |= glk_plane_color_ctl_alpha(plane_state);
3742 3666
3743 if (fb->format->is_yuv) { 3667 if (fb->format->is_yuv && !icl_is_hdr_plane(plane)) {
3744 if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709) 3668 if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709)
3745 plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV709_TO_RGB709; 3669 plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV709_TO_RGB709;
3746 else 3670 else
@@ -3748,6 +3672,8 @@ u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
3748 3672
3749 if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE) 3673 if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
3750 plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE; 3674 plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE;
3675 } else if (fb->format->is_yuv) {
3676 plane_color_ctl |= PLANE_COLOR_INPUT_CSC_ENABLE;
3751 } 3677 }
3752 3678
3753 return plane_color_ctl; 3679 return plane_color_ctl;
@@ -3932,15 +3858,15 @@ static void intel_update_pipe_config(const struct intel_crtc_state *old_crtc_sta
3932 3858
3933 /* on skylake this is done by detaching scalers */ 3859 /* on skylake this is done by detaching scalers */
3934 if (INTEL_GEN(dev_priv) >= 9) { 3860 if (INTEL_GEN(dev_priv) >= 9) {
3935 skl_detach_scalers(crtc); 3861 skl_detach_scalers(new_crtc_state);
3936 3862
3937 if (new_crtc_state->pch_pfit.enabled) 3863 if (new_crtc_state->pch_pfit.enabled)
3938 skylake_pfit_enable(crtc); 3864 skylake_pfit_enable(new_crtc_state);
3939 } else if (HAS_PCH_SPLIT(dev_priv)) { 3865 } else if (HAS_PCH_SPLIT(dev_priv)) {
3940 if (new_crtc_state->pch_pfit.enabled) 3866 if (new_crtc_state->pch_pfit.enabled)
3941 ironlake_pfit_enable(crtc); 3867 ironlake_pfit_enable(new_crtc_state);
3942 else if (old_crtc_state->pch_pfit.enabled) 3868 else if (old_crtc_state->pch_pfit.enabled)
3943 ironlake_pfit_disable(crtc, true); 3869 ironlake_pfit_disable(old_crtc_state);
3944 } 3870 }
3945} 3871}
3946 3872
@@ -4339,10 +4265,10 @@ train_done:
4339 DRM_DEBUG_KMS("FDI train done.\n"); 4265 DRM_DEBUG_KMS("FDI train done.\n");
4340} 4266}
4341 4267
4342static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc) 4268static void ironlake_fdi_pll_enable(const struct intel_crtc_state *crtc_state)
4343{ 4269{
4344 struct drm_device *dev = intel_crtc->base.dev; 4270 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
4345 struct drm_i915_private *dev_priv = to_i915(dev); 4271 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
4346 int pipe = intel_crtc->pipe; 4272 int pipe = intel_crtc->pipe;
4347 i915_reg_t reg; 4273 i915_reg_t reg;
4348 u32 temp; 4274 u32 temp;
@@ -4351,7 +4277,7 @@ static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc)
4351 reg = FDI_RX_CTL(pipe); 4277 reg = FDI_RX_CTL(pipe);
4352 temp = I915_READ(reg); 4278 temp = I915_READ(reg);
4353 temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16)); 4279 temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
4354 temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes); 4280 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
4355 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11; 4281 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
4356 I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE); 4282 I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
4357 4283
@@ -4500,10 +4426,11 @@ void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
4500} 4426}
4501 4427
4502/* Program iCLKIP clock to the desired frequency */ 4428/* Program iCLKIP clock to the desired frequency */
4503static void lpt_program_iclkip(struct intel_crtc *crtc) 4429static void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
4504{ 4430{
4431 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
4505 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 4432 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4506 int clock = crtc->config->base.adjusted_mode.crtc_clock; 4433 int clock = crtc_state->base.adjusted_mode.crtc_clock;
4507 u32 divsel, phaseinc, auxdiv, phasedir = 0; 4434 u32 divsel, phaseinc, auxdiv, phasedir = 0;
4508 u32 temp; 4435 u32 temp;
4509 4436
@@ -4614,12 +4541,12 @@ int lpt_get_iclkip(struct drm_i915_private *dev_priv)
4614 desired_divisor << auxdiv); 4541 desired_divisor << auxdiv);
4615} 4542}
4616 4543
4617static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc, 4544static void ironlake_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_state,
4618 enum pipe pch_transcoder) 4545 enum pipe pch_transcoder)
4619{ 4546{
4620 struct drm_device *dev = crtc->base.dev; 4547 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
4621 struct drm_i915_private *dev_priv = to_i915(dev); 4548 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4622 enum transcoder cpu_transcoder = crtc->config->cpu_transcoder; 4549 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
4623 4550
4624 I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder), 4551 I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder),
4625 I915_READ(HTOTAL(cpu_transcoder))); 4552 I915_READ(HTOTAL(cpu_transcoder)));
@@ -4638,9 +4565,8 @@ static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc,
4638 I915_READ(VSYNCSHIFT(cpu_transcoder))); 4565 I915_READ(VSYNCSHIFT(cpu_transcoder)));
4639} 4566}
4640 4567
4641static void cpt_set_fdi_bc_bifurcation(struct drm_device *dev, bool enable) 4568static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool enable)
4642{ 4569{
4643 struct drm_i915_private *dev_priv = to_i915(dev);
4644 uint32_t temp; 4570 uint32_t temp;
4645 4571
4646 temp = I915_READ(SOUTH_CHICKEN1); 4572 temp = I915_READ(SOUTH_CHICKEN1);
@@ -4659,22 +4585,23 @@ static void cpt_set_fdi_bc_bifurcation(struct drm_device *dev, bool enable)
4659 POSTING_READ(SOUTH_CHICKEN1); 4585 POSTING_READ(SOUTH_CHICKEN1);
4660} 4586}
4661 4587
4662static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc) 4588static void ivybridge_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state)
4663{ 4589{
4664 struct drm_device *dev = intel_crtc->base.dev; 4590 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
4591 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4665 4592
4666 switch (intel_crtc->pipe) { 4593 switch (crtc->pipe) {
4667 case PIPE_A: 4594 case PIPE_A:
4668 break; 4595 break;
4669 case PIPE_B: 4596 case PIPE_B:
4670 if (intel_crtc->config->fdi_lanes > 2) 4597 if (crtc_state->fdi_lanes > 2)
4671 cpt_set_fdi_bc_bifurcation(dev, false); 4598 cpt_set_fdi_bc_bifurcation(dev_priv, false);
4672 else 4599 else
4673 cpt_set_fdi_bc_bifurcation(dev, true); 4600 cpt_set_fdi_bc_bifurcation(dev_priv, true);
4674 4601
4675 break; 4602 break;
4676 case PIPE_C: 4603 case PIPE_C:
4677 cpt_set_fdi_bc_bifurcation(dev, true); 4604 cpt_set_fdi_bc_bifurcation(dev_priv, true);
4678 4605
4679 break; 4606 break;
4680 default: 4607 default:
@@ -4731,7 +4658,7 @@ static void ironlake_pch_enable(const struct intel_atomic_state *state,
4731 assert_pch_transcoder_disabled(dev_priv, pipe); 4658 assert_pch_transcoder_disabled(dev_priv, pipe);
4732 4659
4733 if (IS_IVYBRIDGE(dev_priv)) 4660 if (IS_IVYBRIDGE(dev_priv))
4734 ivybridge_update_fdi_bc_bifurcation(crtc); 4661 ivybridge_update_fdi_bc_bifurcation(crtc_state);
4735 4662
4736 /* Write the TU size bits before fdi link training, so that error 4663 /* Write the TU size bits before fdi link training, so that error
4737 * detection works. */ 4664 * detection works. */
@@ -4764,11 +4691,11 @@ static void ironlake_pch_enable(const struct intel_atomic_state *state,
4764 * Note that enable_shared_dpll tries to do the right thing, but 4691 * Note that enable_shared_dpll tries to do the right thing, but
4765 * get_shared_dpll unconditionally resets the pll - we need that to have 4692 * get_shared_dpll unconditionally resets the pll - we need that to have
4766 * the right LVDS enable sequence. */ 4693 * the right LVDS enable sequence. */
4767 intel_enable_shared_dpll(crtc); 4694 intel_enable_shared_dpll(crtc_state);
4768 4695
4769 /* set transcoder timing, panel must allow it */ 4696 /* set transcoder timing, panel must allow it */
4770 assert_panel_unlocked(dev_priv, pipe); 4697 assert_panel_unlocked(dev_priv, pipe);
4771 ironlake_pch_transcoder_set_timings(crtc, pipe); 4698 ironlake_pch_transcoder_set_timings(crtc_state, pipe);
4772 4699
4773 intel_fdi_normal_train(crtc); 4700 intel_fdi_normal_train(crtc);
4774 4701
@@ -4800,7 +4727,7 @@ static void ironlake_pch_enable(const struct intel_atomic_state *state,
4800 I915_WRITE(reg, temp); 4727 I915_WRITE(reg, temp);
4801 } 4728 }
4802 4729
4803 ironlake_enable_pch_transcoder(dev_priv, pipe); 4730 ironlake_enable_pch_transcoder(crtc_state);
4804} 4731}
4805 4732
4806static void lpt_pch_enable(const struct intel_atomic_state *state, 4733static void lpt_pch_enable(const struct intel_atomic_state *state,
@@ -4812,10 +4739,10 @@ static void lpt_pch_enable(const struct intel_atomic_state *state,
4812 4739
4813 assert_pch_transcoder_disabled(dev_priv, PIPE_A); 4740 assert_pch_transcoder_disabled(dev_priv, PIPE_A);
4814 4741
4815 lpt_program_iclkip(crtc); 4742 lpt_program_iclkip(crtc_state);
4816 4743
4817 /* Set transcoder timing. */ 4744 /* Set transcoder timing. */
4818 ironlake_pch_transcoder_set_timings(crtc, PIPE_A); 4745 ironlake_pch_transcoder_set_timings(crtc_state, PIPE_A);
4819 4746
4820 lpt_enable_pch_transcoder(dev_priv, cpu_transcoder); 4747 lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
4821} 4748}
@@ -4903,8 +4830,7 @@ static int
4903skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach, 4830skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
4904 unsigned int scaler_user, int *scaler_id, 4831 unsigned int scaler_user, int *scaler_id,
4905 int src_w, int src_h, int dst_w, int dst_h, 4832 int src_w, int src_h, int dst_w, int dst_h,
4906 bool plane_scaler_check, 4833 const struct drm_format_info *format, bool need_scaler)
4907 uint32_t pixel_format)
4908{ 4834{
4909 struct intel_crtc_scaler_state *scaler_state = 4835 struct intel_crtc_scaler_state *scaler_state =
4910 &crtc_state->scaler_state; 4836 &crtc_state->scaler_state;
@@ -4913,21 +4839,14 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
4913 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev); 4839 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
4914 const struct drm_display_mode *adjusted_mode = 4840 const struct drm_display_mode *adjusted_mode =
4915 &crtc_state->base.adjusted_mode; 4841 &crtc_state->base.adjusted_mode;
4916 int need_scaling;
4917 4842
4918 /* 4843 /*
4919 * Src coordinates are already rotated by 270 degrees for 4844 * Src coordinates are already rotated by 270 degrees for
4920 * the 90/270 degree plane rotation cases (to match the 4845 * the 90/270 degree plane rotation cases (to match the
4921 * GTT mapping), hence no need to account for rotation here. 4846 * GTT mapping), hence no need to account for rotation here.
4922 */ 4847 */
4923 need_scaling = src_w != dst_w || src_h != dst_h; 4848 if (src_w != dst_w || src_h != dst_h)
4924 4849 need_scaler = true;
4925 if (plane_scaler_check)
4926 if (pixel_format == DRM_FORMAT_NV12)
4927 need_scaling = true;
4928
4929 if (crtc_state->ycbcr420 && scaler_user == SKL_CRTC_INDEX)
4930 need_scaling = true;
4931 4850
4932 /* 4851 /*
4933 * Scaling/fitting not supported in IF-ID mode in GEN9+ 4852 * Scaling/fitting not supported in IF-ID mode in GEN9+
@@ -4936,7 +4855,7 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
4936 * for NV12. 4855 * for NV12.
4937 */ 4856 */
4938 if (INTEL_GEN(dev_priv) >= 9 && crtc_state->base.enable && 4857 if (INTEL_GEN(dev_priv) >= 9 && crtc_state->base.enable &&
4939 need_scaling && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { 4858 need_scaler && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
4940 DRM_DEBUG_KMS("Pipe/Plane scaling not supported with IF-ID mode\n"); 4859 DRM_DEBUG_KMS("Pipe/Plane scaling not supported with IF-ID mode\n");
4941 return -EINVAL; 4860 return -EINVAL;
4942 } 4861 }
@@ -4951,7 +4870,7 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
4951 * update to free the scaler is done in plane/panel-fit programming. 4870 * update to free the scaler is done in plane/panel-fit programming.
4952 * For this purpose crtc/plane_state->scaler_id isn't reset here. 4871 * For this purpose crtc/plane_state->scaler_id isn't reset here.
4953 */ 4872 */
4954 if (force_detach || !need_scaling) { 4873 if (force_detach || !need_scaler) {
4955 if (*scaler_id >= 0) { 4874 if (*scaler_id >= 0) {
4956 scaler_state->scaler_users &= ~(1 << scaler_user); 4875 scaler_state->scaler_users &= ~(1 << scaler_user);
4957 scaler_state->scalers[*scaler_id].in_use = 0; 4876 scaler_state->scalers[*scaler_id].in_use = 0;
@@ -4965,7 +4884,7 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
4965 return 0; 4884 return 0;
4966 } 4885 }
4967 4886
4968 if (plane_scaler_check && pixel_format == DRM_FORMAT_NV12 && 4887 if (format && format->format == DRM_FORMAT_NV12 &&
4969 (src_h < SKL_MIN_YUV_420_SRC_H || src_w < SKL_MIN_YUV_420_SRC_W)) { 4888 (src_h < SKL_MIN_YUV_420_SRC_H || src_w < SKL_MIN_YUV_420_SRC_W)) {
4970 DRM_DEBUG_KMS("NV12: src dimensions not met\n"); 4889 DRM_DEBUG_KMS("NV12: src dimensions not met\n");
4971 return -EINVAL; 4890 return -EINVAL;
@@ -5008,12 +4927,16 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
5008int skl_update_scaler_crtc(struct intel_crtc_state *state) 4927int skl_update_scaler_crtc(struct intel_crtc_state *state)
5009{ 4928{
5010 const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode; 4929 const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode;
4930 bool need_scaler = false;
4931
4932 if (state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
4933 need_scaler = true;
5011 4934
5012 return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX, 4935 return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX,
5013 &state->scaler_state.scaler_id, 4936 &state->scaler_state.scaler_id,
5014 state->pipe_src_w, state->pipe_src_h, 4937 state->pipe_src_w, state->pipe_src_h,
5015 adjusted_mode->crtc_hdisplay, 4938 adjusted_mode->crtc_hdisplay,
5016 adjusted_mode->crtc_vdisplay, false, 0); 4939 adjusted_mode->crtc_vdisplay, NULL, need_scaler);
5017} 4940}
5018 4941
5019/** 4942/**
@@ -5028,13 +4951,17 @@ int skl_update_scaler_crtc(struct intel_crtc_state *state)
5028static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state, 4951static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
5029 struct intel_plane_state *plane_state) 4952 struct intel_plane_state *plane_state)
5030{ 4953{
5031
5032 struct intel_plane *intel_plane = 4954 struct intel_plane *intel_plane =
5033 to_intel_plane(plane_state->base.plane); 4955 to_intel_plane(plane_state->base.plane);
5034 struct drm_framebuffer *fb = plane_state->base.fb; 4956 struct drm_framebuffer *fb = plane_state->base.fb;
5035 int ret; 4957 int ret;
5036
5037 bool force_detach = !fb || !plane_state->base.visible; 4958 bool force_detach = !fb || !plane_state->base.visible;
4959 bool need_scaler = false;
4960
4961 /* Pre-gen11 and SDR planes always need a scaler for planar formats. */
4962 if (!icl_is_hdr_plane(intel_plane) &&
4963 fb && fb->format->format == DRM_FORMAT_NV12)
4964 need_scaler = true;
5038 4965
5039 ret = skl_update_scaler(crtc_state, force_detach, 4966 ret = skl_update_scaler(crtc_state, force_detach,
5040 drm_plane_index(&intel_plane->base), 4967 drm_plane_index(&intel_plane->base),
@@ -5043,7 +4970,7 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
5043 drm_rect_height(&plane_state->base.src) >> 16, 4970 drm_rect_height(&plane_state->base.src) >> 16,
5044 drm_rect_width(&plane_state->base.dst), 4971 drm_rect_width(&plane_state->base.dst),
5045 drm_rect_height(&plane_state->base.dst), 4972 drm_rect_height(&plane_state->base.dst),
5046 fb ? true : false, fb ? fb->format->format : 0); 4973 fb ? fb->format : NULL, need_scaler);
5047 4974
5048 if (ret || plane_state->scaler_id < 0) 4975 if (ret || plane_state->scaler_id < 0)
5049 return ret; 4976 return ret;
@@ -5089,27 +5016,27 @@ static void skylake_scaler_disable(struct intel_crtc *crtc)
5089 skl_detach_scaler(crtc, i); 5016 skl_detach_scaler(crtc, i);
5090} 5017}
5091 5018
5092static void skylake_pfit_enable(struct intel_crtc *crtc) 5019static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state)
5093{ 5020{
5094 struct drm_device *dev = crtc->base.dev; 5021 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5095 struct drm_i915_private *dev_priv = to_i915(dev); 5022 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5096 int pipe = crtc->pipe; 5023 enum pipe pipe = crtc->pipe;
5097 struct intel_crtc_scaler_state *scaler_state = 5024 const struct intel_crtc_scaler_state *scaler_state =
5098 &crtc->config->scaler_state; 5025 &crtc_state->scaler_state;
5099 5026
5100 if (crtc->config->pch_pfit.enabled) { 5027 if (crtc_state->pch_pfit.enabled) {
5101 u16 uv_rgb_hphase, uv_rgb_vphase; 5028 u16 uv_rgb_hphase, uv_rgb_vphase;
5102 int pfit_w, pfit_h, hscale, vscale; 5029 int pfit_w, pfit_h, hscale, vscale;
5103 int id; 5030 int id;
5104 5031
5105 if (WARN_ON(crtc->config->scaler_state.scaler_id < 0)) 5032 if (WARN_ON(crtc_state->scaler_state.scaler_id < 0))
5106 return; 5033 return;
5107 5034
5108 pfit_w = (crtc->config->pch_pfit.size >> 16) & 0xFFFF; 5035 pfit_w = (crtc_state->pch_pfit.size >> 16) & 0xFFFF;
5109 pfit_h = crtc->config->pch_pfit.size & 0xFFFF; 5036 pfit_h = crtc_state->pch_pfit.size & 0xFFFF;
5110 5037
5111 hscale = (crtc->config->pipe_src_w << 16) / pfit_w; 5038 hscale = (crtc_state->pipe_src_w << 16) / pfit_w;
5112 vscale = (crtc->config->pipe_src_h << 16) / pfit_h; 5039 vscale = (crtc_state->pipe_src_h << 16) / pfit_h;
5113 5040
5114 uv_rgb_hphase = skl_scaler_calc_phase(1, hscale, false); 5041 uv_rgb_hphase = skl_scaler_calc_phase(1, hscale, false);
5115 uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false); 5042 uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false);
@@ -5121,18 +5048,18 @@ static void skylake_pfit_enable(struct intel_crtc *crtc)
5121 PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_vphase)); 5048 PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_vphase));
5122 I915_WRITE_FW(SKL_PS_HPHASE(pipe, id), 5049 I915_WRITE_FW(SKL_PS_HPHASE(pipe, id),
5123 PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_hphase)); 5050 PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_hphase));
5124 I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc->config->pch_pfit.pos); 5051 I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc_state->pch_pfit.pos);
5125 I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc->config->pch_pfit.size); 5052 I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc_state->pch_pfit.size);
5126 } 5053 }
5127} 5054}
5128 5055
5129static void ironlake_pfit_enable(struct intel_crtc *crtc) 5056static void ironlake_pfit_enable(const struct intel_crtc_state *crtc_state)
5130{ 5057{
5131 struct drm_device *dev = crtc->base.dev; 5058 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5132 struct drm_i915_private *dev_priv = to_i915(dev); 5059 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5133 int pipe = crtc->pipe; 5060 int pipe = crtc->pipe;
5134 5061
5135 if (crtc->config->pch_pfit.enabled) { 5062 if (crtc_state->pch_pfit.enabled) {
5136 /* Force use of hard-coded filter coefficients 5063 /* Force use of hard-coded filter coefficients
5137 * as some pre-programmed values are broken, 5064 * as some pre-programmed values are broken,
5138 * e.g. x201. 5065 * e.g. x201.
@@ -5142,8 +5069,8 @@ static void ironlake_pfit_enable(struct intel_crtc *crtc)
5142 PF_PIPE_SEL_IVB(pipe)); 5069 PF_PIPE_SEL_IVB(pipe));
5143 else 5070 else
5144 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3); 5071 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
5145 I915_WRITE(PF_WIN_POS(pipe), crtc->config->pch_pfit.pos); 5072 I915_WRITE(PF_WIN_POS(pipe), crtc_state->pch_pfit.pos);
5146 I915_WRITE(PF_WIN_SZ(pipe), crtc->config->pch_pfit.size); 5073 I915_WRITE(PF_WIN_SZ(pipe), crtc_state->pch_pfit.size);
5147 } 5074 }
5148} 5075}
5149 5076
@@ -5338,11 +5265,8 @@ static bool needs_nv12_wa(struct drm_i915_private *dev_priv,
5338 if (!crtc_state->nv12_planes) 5265 if (!crtc_state->nv12_planes)
5339 return false; 5266 return false;
5340 5267
5341 if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv)) 5268 /* WA Display #0827: Gen9:all */
5342 return false; 5269 if (IS_GEN9(dev_priv) && !IS_GEMINILAKE(dev_priv))
5343
5344 if ((INTEL_GEN(dev_priv) == 9 && !IS_GEMINILAKE(dev_priv)) ||
5345 IS_CANNONLAKE(dev_priv))
5346 return true; 5270 return true;
5347 5271
5348 return false; 5272 return false;
@@ -5385,7 +5309,6 @@ static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state)
5385 if (needs_nv12_wa(dev_priv, old_crtc_state) && 5309 if (needs_nv12_wa(dev_priv, old_crtc_state) &&
5386 !needs_nv12_wa(dev_priv, pipe_config)) { 5310 !needs_nv12_wa(dev_priv, pipe_config)) {
5387 skl_wa_clkgate(dev_priv, crtc->pipe, false); 5311 skl_wa_clkgate(dev_priv, crtc->pipe, false);
5388 skl_wa_528(dev_priv, crtc->pipe, false);
5389 } 5312 }
5390} 5313}
5391 5314
@@ -5425,7 +5348,6 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
5425 if (!needs_nv12_wa(dev_priv, old_crtc_state) && 5348 if (!needs_nv12_wa(dev_priv, old_crtc_state) &&
5426 needs_nv12_wa(dev_priv, pipe_config)) { 5349 needs_nv12_wa(dev_priv, pipe_config)) {
5427 skl_wa_clkgate(dev_priv, crtc->pipe, true); 5350 skl_wa_clkgate(dev_priv, crtc->pipe, true);
5428 skl_wa_528(dev_priv, crtc->pipe, true);
5429 } 5351 }
5430 5352
5431 /* 5353 /*
@@ -5448,7 +5370,8 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
5448 * 5370 *
5449 * WaCxSRDisabledForSpriteScaling:ivb 5371 * WaCxSRDisabledForSpriteScaling:ivb
5450 */ 5372 */
5451 if (pipe_config->disable_lp_wm && ilk_disable_lp_wm(dev)) 5373 if (pipe_config->disable_lp_wm && ilk_disable_lp_wm(dev) &&
5374 old_crtc_state->base.active)
5452 intel_wait_for_vblank(dev_priv, crtc->pipe); 5375 intel_wait_for_vblank(dev_priv, crtc->pipe);
5453 5376
5454 /* 5377 /*
@@ -5479,24 +5402,23 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
5479 intel_update_watermarks(crtc); 5402 intel_update_watermarks(crtc);
5480} 5403}
5481 5404
5482static void intel_crtc_disable_planes(struct drm_crtc *crtc, unsigned plane_mask) 5405static void intel_crtc_disable_planes(struct intel_crtc *crtc, unsigned plane_mask)
5483{ 5406{
5484 struct drm_device *dev = crtc->dev; 5407 struct drm_device *dev = crtc->base.dev;
5485 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5408 struct intel_plane *plane;
5486 struct drm_plane *p; 5409 unsigned fb_bits = 0;
5487 int pipe = intel_crtc->pipe;
5488 5410
5489 intel_crtc_dpms_overlay_disable(intel_crtc); 5411 intel_crtc_dpms_overlay_disable(crtc);
5490 5412
5491 drm_for_each_plane_mask(p, dev, plane_mask) 5413 for_each_intel_plane_on_crtc(dev, crtc, plane) {
5492 to_intel_plane(p)->disable_plane(to_intel_plane(p), intel_crtc); 5414 if (plane_mask & BIT(plane->id)) {
5415 plane->disable_plane(plane, crtc);
5493 5416
5494 /* 5417 fb_bits |= plane->frontbuffer_bit;
5495 * FIXME: Once we grow proper nuclear flip support out of this we need 5418 }
5496 * to compute the mask of flip planes precisely. For the time being 5419 }
5497 * consider this a flip to a NULL plane. 5420
5498 */ 5421 intel_frontbuffer_flip(to_i915(dev), fb_bits);
5499 intel_frontbuffer_flip(to_i915(dev), INTEL_FRONTBUFFER_ALL_MASK(pipe));
5500} 5422}
5501 5423
5502static void intel_encoders_pre_pll_enable(struct drm_crtc *crtc, 5424static void intel_encoders_pre_pll_enable(struct drm_crtc *crtc,
@@ -5554,7 +5476,8 @@ static void intel_encoders_enable(struct drm_crtc *crtc,
5554 if (conn_state->crtc != crtc) 5476 if (conn_state->crtc != crtc)
5555 continue; 5477 continue;
5556 5478
5557 encoder->enable(encoder, crtc_state, conn_state); 5479 if (encoder->enable)
5480 encoder->enable(encoder, crtc_state, conn_state);
5558 intel_opregion_notify_encoder(encoder, true); 5481 intel_opregion_notify_encoder(encoder, true);
5559 } 5482 }
5560} 5483}
@@ -5575,7 +5498,8 @@ static void intel_encoders_disable(struct drm_crtc *crtc,
5575 continue; 5498 continue;
5576 5499
5577 intel_opregion_notify_encoder(encoder, false); 5500 intel_opregion_notify_encoder(encoder, false);
5578 encoder->disable(encoder, old_crtc_state, old_conn_state); 5501 if (encoder->disable)
5502 encoder->disable(encoder, old_crtc_state, old_conn_state);
5579 } 5503 }
5580} 5504}
5581 5505
@@ -5646,37 +5570,37 @@ static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
5646 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 5570 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
5647 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false); 5571 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
5648 5572
5649 if (intel_crtc->config->has_pch_encoder) 5573 if (pipe_config->has_pch_encoder)
5650 intel_prepare_shared_dpll(intel_crtc); 5574 intel_prepare_shared_dpll(pipe_config);
5651 5575
5652 if (intel_crtc_has_dp_encoder(intel_crtc->config)) 5576 if (intel_crtc_has_dp_encoder(pipe_config))
5653 intel_dp_set_m_n(intel_crtc, M1_N1); 5577 intel_dp_set_m_n(pipe_config, M1_N1);
5654 5578
5655 intel_set_pipe_timings(intel_crtc); 5579 intel_set_pipe_timings(pipe_config);
5656 intel_set_pipe_src_size(intel_crtc); 5580 intel_set_pipe_src_size(pipe_config);
5657 5581
5658 if (intel_crtc->config->has_pch_encoder) { 5582 if (pipe_config->has_pch_encoder) {
5659 intel_cpu_transcoder_set_m_n(intel_crtc, 5583 intel_cpu_transcoder_set_m_n(pipe_config,
5660 &intel_crtc->config->fdi_m_n, NULL); 5584 &pipe_config->fdi_m_n, NULL);
5661 } 5585 }
5662 5586
5663 ironlake_set_pipeconf(crtc); 5587 ironlake_set_pipeconf(pipe_config);
5664 5588
5665 intel_crtc->active = true; 5589 intel_crtc->active = true;
5666 5590
5667 intel_encoders_pre_enable(crtc, pipe_config, old_state); 5591 intel_encoders_pre_enable(crtc, pipe_config, old_state);
5668 5592
5669 if (intel_crtc->config->has_pch_encoder) { 5593 if (pipe_config->has_pch_encoder) {
5670 /* Note: FDI PLL enabling _must_ be done before we enable the 5594 /* Note: FDI PLL enabling _must_ be done before we enable the
5671 * cpu pipes, hence this is separate from all the other fdi/pch 5595 * cpu pipes, hence this is separate from all the other fdi/pch
5672 * enabling. */ 5596 * enabling. */
5673 ironlake_fdi_pll_enable(intel_crtc); 5597 ironlake_fdi_pll_enable(pipe_config);
5674 } else { 5598 } else {
5675 assert_fdi_tx_disabled(dev_priv, pipe); 5599 assert_fdi_tx_disabled(dev_priv, pipe);
5676 assert_fdi_rx_disabled(dev_priv, pipe); 5600 assert_fdi_rx_disabled(dev_priv, pipe);
5677 } 5601 }
5678 5602
5679 ironlake_pfit_enable(intel_crtc); 5603 ironlake_pfit_enable(pipe_config);
5680 5604
5681 /* 5605 /*
5682 * On ILK+ LUT must be loaded before the pipe is running but with 5606 * On ILK+ LUT must be loaded before the pipe is running but with
@@ -5685,10 +5609,10 @@ static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
5685 intel_color_load_luts(&pipe_config->base); 5609 intel_color_load_luts(&pipe_config->base);
5686 5610
5687 if (dev_priv->display.initial_watermarks != NULL) 5611 if (dev_priv->display.initial_watermarks != NULL)
5688 dev_priv->display.initial_watermarks(old_intel_state, intel_crtc->config); 5612 dev_priv->display.initial_watermarks(old_intel_state, pipe_config);
5689 intel_enable_pipe(pipe_config); 5613 intel_enable_pipe(pipe_config);
5690 5614
5691 if (intel_crtc->config->has_pch_encoder) 5615 if (pipe_config->has_pch_encoder)
5692 ironlake_pch_enable(old_intel_state, pipe_config); 5616 ironlake_pch_enable(old_intel_state, pipe_config);
5693 5617
5694 assert_vblank_disabled(crtc); 5618 assert_vblank_disabled(crtc);
@@ -5705,7 +5629,7 @@ static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
5705 * some interlaced HDMI modes. Let's do the double wait always 5629 * some interlaced HDMI modes. Let's do the double wait always
5706 * in case there are more corner cases we don't know about. 5630 * in case there are more corner cases we don't know about.
5707 */ 5631 */
5708 if (intel_crtc->config->has_pch_encoder) { 5632 if (pipe_config->has_pch_encoder) {
5709 intel_wait_for_vblank(dev_priv, pipe); 5633 intel_wait_for_vblank(dev_priv, pipe);
5710 intel_wait_for_vblank(dev_priv, pipe); 5634 intel_wait_for_vblank(dev_priv, pipe);
5711 } 5635 }
@@ -5739,10 +5663,9 @@ static void icl_pipe_mbus_enable(struct intel_crtc *crtc)
5739 enum pipe pipe = crtc->pipe; 5663 enum pipe pipe = crtc->pipe;
5740 uint32_t val; 5664 uint32_t val;
5741 5665
5742 val = MBUS_DBOX_BW_CREDIT(1) | MBUS_DBOX_A_CREDIT(2); 5666 val = MBUS_DBOX_A_CREDIT(2);
5743 5667 val |= MBUS_DBOX_BW_CREDIT(1);
5744 /* Program B credit equally to all pipes */ 5668 val |= MBUS_DBOX_B_CREDIT(8);
5745 val |= MBUS_DBOX_B_CREDIT(24 / INTEL_INFO(dev_priv)->num_pipes);
5746 5669
5747 I915_WRITE(PIPE_MBUS_DBOX_CTL(pipe), val); 5670 I915_WRITE(PIPE_MBUS_DBOX_CTL(pipe), val);
5748} 5671}
@@ -5754,7 +5677,7 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
5754 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 5677 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
5755 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5678 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5756 int pipe = intel_crtc->pipe, hsw_workaround_pipe; 5679 int pipe = intel_crtc->pipe, hsw_workaround_pipe;
5757 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; 5680 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
5758 struct intel_atomic_state *old_intel_state = 5681 struct intel_atomic_state *old_intel_state =
5759 to_intel_atomic_state(old_state); 5682 to_intel_atomic_state(old_state);
5760 bool psl_clkgate_wa; 5683 bool psl_clkgate_wa;
@@ -5765,37 +5688,37 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
5765 5688
5766 intel_encoders_pre_pll_enable(crtc, pipe_config, old_state); 5689 intel_encoders_pre_pll_enable(crtc, pipe_config, old_state);
5767 5690
5768 if (intel_crtc->config->shared_dpll) 5691 if (pipe_config->shared_dpll)
5769 intel_enable_shared_dpll(intel_crtc); 5692 intel_enable_shared_dpll(pipe_config);
5770 5693
5771 if (INTEL_GEN(dev_priv) >= 11) 5694 if (INTEL_GEN(dev_priv) >= 11)
5772 icl_map_plls_to_ports(crtc, pipe_config, old_state); 5695 icl_map_plls_to_ports(crtc, pipe_config, old_state);
5773 5696
5774 intel_encoders_pre_enable(crtc, pipe_config, old_state); 5697 intel_encoders_pre_enable(crtc, pipe_config, old_state);
5775 5698
5776 if (intel_crtc_has_dp_encoder(intel_crtc->config)) 5699 if (intel_crtc_has_dp_encoder(pipe_config))
5777 intel_dp_set_m_n(intel_crtc, M1_N1); 5700 intel_dp_set_m_n(pipe_config, M1_N1);
5778 5701
5779 if (!transcoder_is_dsi(cpu_transcoder)) 5702 if (!transcoder_is_dsi(cpu_transcoder))
5780 intel_set_pipe_timings(intel_crtc); 5703 intel_set_pipe_timings(pipe_config);
5781 5704
5782 intel_set_pipe_src_size(intel_crtc); 5705 intel_set_pipe_src_size(pipe_config);
5783 5706
5784 if (cpu_transcoder != TRANSCODER_EDP && 5707 if (cpu_transcoder != TRANSCODER_EDP &&
5785 !transcoder_is_dsi(cpu_transcoder)) { 5708 !transcoder_is_dsi(cpu_transcoder)) {
5786 I915_WRITE(PIPE_MULT(cpu_transcoder), 5709 I915_WRITE(PIPE_MULT(cpu_transcoder),
5787 intel_crtc->config->pixel_multiplier - 1); 5710 pipe_config->pixel_multiplier - 1);
5788 } 5711 }
5789 5712
5790 if (intel_crtc->config->has_pch_encoder) { 5713 if (pipe_config->has_pch_encoder) {
5791 intel_cpu_transcoder_set_m_n(intel_crtc, 5714 intel_cpu_transcoder_set_m_n(pipe_config,
5792 &intel_crtc->config->fdi_m_n, NULL); 5715 &pipe_config->fdi_m_n, NULL);
5793 } 5716 }
5794 5717
5795 if (!transcoder_is_dsi(cpu_transcoder)) 5718 if (!transcoder_is_dsi(cpu_transcoder))
5796 haswell_set_pipeconf(crtc); 5719 haswell_set_pipeconf(pipe_config);
5797 5720
5798 haswell_set_pipemisc(crtc); 5721 haswell_set_pipemisc(pipe_config);
5799 5722
5800 intel_color_set_csc(&pipe_config->base); 5723 intel_color_set_csc(&pipe_config->base);
5801 5724
@@ -5803,14 +5726,14 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
5803 5726
5804 /* Display WA #1180: WaDisableScalarClockGating: glk, cnl */ 5727 /* Display WA #1180: WaDisableScalarClockGating: glk, cnl */
5805 psl_clkgate_wa = (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) && 5728 psl_clkgate_wa = (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) &&
5806 intel_crtc->config->pch_pfit.enabled; 5729 pipe_config->pch_pfit.enabled;
5807 if (psl_clkgate_wa) 5730 if (psl_clkgate_wa)
5808 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true); 5731 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true);
5809 5732
5810 if (INTEL_GEN(dev_priv) >= 9) 5733 if (INTEL_GEN(dev_priv) >= 9)
5811 skylake_pfit_enable(intel_crtc); 5734 skylake_pfit_enable(pipe_config);
5812 else 5735 else
5813 ironlake_pfit_enable(intel_crtc); 5736 ironlake_pfit_enable(pipe_config);
5814 5737
5815 /* 5738 /*
5816 * On ILK+ LUT must be loaded before the pipe is running but with 5739 * On ILK+ LUT must be loaded before the pipe is running but with
@@ -5843,10 +5766,10 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
5843 if (!transcoder_is_dsi(cpu_transcoder)) 5766 if (!transcoder_is_dsi(cpu_transcoder))
5844 intel_enable_pipe(pipe_config); 5767 intel_enable_pipe(pipe_config);
5845 5768
5846 if (intel_crtc->config->has_pch_encoder) 5769 if (pipe_config->has_pch_encoder)
5847 lpt_pch_enable(old_intel_state, pipe_config); 5770 lpt_pch_enable(old_intel_state, pipe_config);
5848 5771
5849 if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DP_MST)) 5772 if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DP_MST))
5850 intel_ddi_set_vc_payload_alloc(pipe_config, true); 5773 intel_ddi_set_vc_payload_alloc(pipe_config, true);
5851 5774
5852 assert_vblank_disabled(crtc); 5775 assert_vblank_disabled(crtc);
@@ -5868,15 +5791,15 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
5868 } 5791 }
5869} 5792}
5870 5793
5871static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force) 5794static void ironlake_pfit_disable(const struct intel_crtc_state *old_crtc_state)
5872{ 5795{
5873 struct drm_device *dev = crtc->base.dev; 5796 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
5874 struct drm_i915_private *dev_priv = to_i915(dev); 5797 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5875 int pipe = crtc->pipe; 5798 enum pipe pipe = crtc->pipe;
5876 5799
5877 /* To avoid upsetting the power well on haswell only disable the pfit if 5800 /* To avoid upsetting the power well on haswell only disable the pfit if
5878 * it's in use. The hw state code will make sure we get this right. */ 5801 * it's in use. The hw state code will make sure we get this right. */
5879 if (force || crtc->config->pch_pfit.enabled) { 5802 if (old_crtc_state->pch_pfit.enabled) {
5880 I915_WRITE(PF_CTL(pipe), 0); 5803 I915_WRITE(PF_CTL(pipe), 0);
5881 I915_WRITE(PF_WIN_POS(pipe), 0); 5804 I915_WRITE(PF_WIN_POS(pipe), 0);
5882 I915_WRITE(PF_WIN_SZ(pipe), 0); 5805 I915_WRITE(PF_WIN_SZ(pipe), 0);
@@ -5907,14 +5830,14 @@ static void ironlake_crtc_disable(struct intel_crtc_state *old_crtc_state,
5907 5830
5908 intel_disable_pipe(old_crtc_state); 5831 intel_disable_pipe(old_crtc_state);
5909 5832
5910 ironlake_pfit_disable(intel_crtc, false); 5833 ironlake_pfit_disable(old_crtc_state);
5911 5834
5912 if (intel_crtc->config->has_pch_encoder) 5835 if (old_crtc_state->has_pch_encoder)
5913 ironlake_fdi_disable(crtc); 5836 ironlake_fdi_disable(crtc);
5914 5837
5915 intel_encoders_post_disable(crtc, old_crtc_state, old_state); 5838 intel_encoders_post_disable(crtc, old_crtc_state, old_state);
5916 5839
5917 if (intel_crtc->config->has_pch_encoder) { 5840 if (old_crtc_state->has_pch_encoder) {
5918 ironlake_disable_pch_transcoder(dev_priv, pipe); 5841 ironlake_disable_pch_transcoder(dev_priv, pipe);
5919 5842
5920 if (HAS_PCH_CPT(dev_priv)) { 5843 if (HAS_PCH_CPT(dev_priv)) {
@@ -5968,21 +5891,22 @@ static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state,
5968 if (INTEL_GEN(dev_priv) >= 9) 5891 if (INTEL_GEN(dev_priv) >= 9)
5969 skylake_scaler_disable(intel_crtc); 5892 skylake_scaler_disable(intel_crtc);
5970 else 5893 else
5971 ironlake_pfit_disable(intel_crtc, false); 5894 ironlake_pfit_disable(old_crtc_state);
5972 5895
5973 intel_encoders_post_disable(crtc, old_crtc_state, old_state); 5896 intel_encoders_post_disable(crtc, old_crtc_state, old_state);
5974 5897
5975 if (INTEL_GEN(dev_priv) >= 11) 5898 if (INTEL_GEN(dev_priv) >= 11)
5976 icl_unmap_plls_to_ports(crtc, old_crtc_state, old_state); 5899 icl_unmap_plls_to_ports(crtc, old_crtc_state, old_state);
5900
5901 intel_encoders_post_pll_disable(crtc, old_crtc_state, old_state);
5977} 5902}
5978 5903
5979static void i9xx_pfit_enable(struct intel_crtc *crtc) 5904static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
5980{ 5905{
5981 struct drm_device *dev = crtc->base.dev; 5906 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5982 struct drm_i915_private *dev_priv = to_i915(dev); 5907 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5983 struct intel_crtc_state *pipe_config = crtc->config;
5984 5908
5985 if (!pipe_config->gmch_pfit.control) 5909 if (!crtc_state->gmch_pfit.control)
5986 return; 5910 return;
5987 5911
5988 /* 5912 /*
@@ -5992,8 +5916,8 @@ static void i9xx_pfit_enable(struct intel_crtc *crtc)
5992 WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE); 5916 WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE);
5993 assert_pipe_disabled(dev_priv, crtc->pipe); 5917 assert_pipe_disabled(dev_priv, crtc->pipe);
5994 5918
5995 I915_WRITE(PFIT_PGM_RATIOS, pipe_config->gmch_pfit.pgm_ratios); 5919 I915_WRITE(PFIT_PGM_RATIOS, crtc_state->gmch_pfit.pgm_ratios);
5996 I915_WRITE(PFIT_CONTROL, pipe_config->gmch_pfit.control); 5920 I915_WRITE(PFIT_CONTROL, crtc_state->gmch_pfit.control);
5997 5921
5998 /* Border color in case we don't scale up to the full screen. Black by 5922 /* Border color in case we don't scale up to the full screen. Black by
5999 * default, change to something else for debugging. */ 5923 * default, change to something else for debugging. */
@@ -6048,6 +5972,28 @@ enum intel_display_power_domain intel_port_to_power_domain(enum port port)
6048 } 5972 }
6049} 5973}
6050 5974
5975enum intel_display_power_domain
5976intel_aux_power_domain(struct intel_digital_port *dig_port)
5977{
5978 switch (dig_port->aux_ch) {
5979 case AUX_CH_A:
5980 return POWER_DOMAIN_AUX_A;
5981 case AUX_CH_B:
5982 return POWER_DOMAIN_AUX_B;
5983 case AUX_CH_C:
5984 return POWER_DOMAIN_AUX_C;
5985 case AUX_CH_D:
5986 return POWER_DOMAIN_AUX_D;
5987 case AUX_CH_E:
5988 return POWER_DOMAIN_AUX_E;
5989 case AUX_CH_F:
5990 return POWER_DOMAIN_AUX_F;
5991 default:
5992 MISSING_CASE(dig_port->aux_ch);
5993 return POWER_DOMAIN_AUX_A;
5994 }
5995}
5996
6051static u64 get_crtc_power_domains(struct drm_crtc *crtc, 5997static u64 get_crtc_power_domains(struct drm_crtc *crtc,
6052 struct intel_crtc_state *crtc_state) 5998 struct intel_crtc_state *crtc_state)
6053{ 5999{
@@ -6127,20 +6073,18 @@ static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
6127 if (WARN_ON(intel_crtc->active)) 6073 if (WARN_ON(intel_crtc->active))
6128 return; 6074 return;
6129 6075
6130 if (intel_crtc_has_dp_encoder(intel_crtc->config)) 6076 if (intel_crtc_has_dp_encoder(pipe_config))
6131 intel_dp_set_m_n(intel_crtc, M1_N1); 6077 intel_dp_set_m_n(pipe_config, M1_N1);
6132 6078
6133 intel_set_pipe_timings(intel_crtc); 6079 intel_set_pipe_timings(pipe_config);
6134 intel_set_pipe_src_size(intel_crtc); 6080 intel_set_pipe_src_size(pipe_config);
6135 6081
6136 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) { 6082 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
6137 struct drm_i915_private *dev_priv = to_i915(dev);
6138
6139 I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY); 6083 I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY);
6140 I915_WRITE(CHV_CANVAS(pipe), 0); 6084 I915_WRITE(CHV_CANVAS(pipe), 0);
6141 } 6085 }
6142 6086
6143 i9xx_set_pipeconf(intel_crtc); 6087 i9xx_set_pipeconf(pipe_config);
6144 6088
6145 intel_color_set_csc(&pipe_config->base); 6089 intel_color_set_csc(&pipe_config->base);
6146 6090
@@ -6151,16 +6095,16 @@ static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
6151 intel_encoders_pre_pll_enable(crtc, pipe_config, old_state); 6095 intel_encoders_pre_pll_enable(crtc, pipe_config, old_state);
6152 6096
6153 if (IS_CHERRYVIEW(dev_priv)) { 6097 if (IS_CHERRYVIEW(dev_priv)) {
6154 chv_prepare_pll(intel_crtc, intel_crtc->config); 6098 chv_prepare_pll(intel_crtc, pipe_config);
6155 chv_enable_pll(intel_crtc, intel_crtc->config); 6099 chv_enable_pll(intel_crtc, pipe_config);
6156 } else { 6100 } else {
6157 vlv_prepare_pll(intel_crtc, intel_crtc->config); 6101 vlv_prepare_pll(intel_crtc, pipe_config);
6158 vlv_enable_pll(intel_crtc, intel_crtc->config); 6102 vlv_enable_pll(intel_crtc, pipe_config);
6159 } 6103 }
6160 6104
6161 intel_encoders_pre_enable(crtc, pipe_config, old_state); 6105 intel_encoders_pre_enable(crtc, pipe_config, old_state);
6162 6106
6163 i9xx_pfit_enable(intel_crtc); 6107 i9xx_pfit_enable(pipe_config);
6164 6108
6165 intel_color_load_luts(&pipe_config->base); 6109 intel_color_load_luts(&pipe_config->base);
6166 6110
@@ -6174,13 +6118,13 @@ static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
6174 intel_encoders_enable(crtc, pipe_config, old_state); 6118 intel_encoders_enable(crtc, pipe_config, old_state);
6175} 6119}
6176 6120
6177static void i9xx_set_pll_dividers(struct intel_crtc *crtc) 6121static void i9xx_set_pll_dividers(const struct intel_crtc_state *crtc_state)
6178{ 6122{
6179 struct drm_device *dev = crtc->base.dev; 6123 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
6180 struct drm_i915_private *dev_priv = to_i915(dev); 6124 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6181 6125
6182 I915_WRITE(FP0(crtc->pipe), crtc->config->dpll_hw_state.fp0); 6126 I915_WRITE(FP0(crtc->pipe), crtc_state->dpll_hw_state.fp0);
6183 I915_WRITE(FP1(crtc->pipe), crtc->config->dpll_hw_state.fp1); 6127 I915_WRITE(FP1(crtc->pipe), crtc_state->dpll_hw_state.fp1);
6184} 6128}
6185 6129
6186static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config, 6130static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
@@ -6197,15 +6141,15 @@ static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
6197 if (WARN_ON(intel_crtc->active)) 6141 if (WARN_ON(intel_crtc->active))
6198 return; 6142 return;
6199 6143
6200 i9xx_set_pll_dividers(intel_crtc); 6144 i9xx_set_pll_dividers(pipe_config);
6201 6145
6202 if (intel_crtc_has_dp_encoder(intel_crtc->config)) 6146 if (intel_crtc_has_dp_encoder(pipe_config))
6203 intel_dp_set_m_n(intel_crtc, M1_N1); 6147 intel_dp_set_m_n(pipe_config, M1_N1);
6204 6148
6205 intel_set_pipe_timings(intel_crtc); 6149 intel_set_pipe_timings(pipe_config);
6206 intel_set_pipe_src_size(intel_crtc); 6150 intel_set_pipe_src_size(pipe_config);
6207 6151
6208 i9xx_set_pipeconf(intel_crtc); 6152 i9xx_set_pipeconf(pipe_config);
6209 6153
6210 intel_crtc->active = true; 6154 intel_crtc->active = true;
6211 6155
@@ -6216,13 +6160,13 @@ static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
6216 6160
6217 i9xx_enable_pll(intel_crtc, pipe_config); 6161 i9xx_enable_pll(intel_crtc, pipe_config);
6218 6162
6219 i9xx_pfit_enable(intel_crtc); 6163 i9xx_pfit_enable(pipe_config);
6220 6164
6221 intel_color_load_luts(&pipe_config->base); 6165 intel_color_load_luts(&pipe_config->base);
6222 6166
6223 if (dev_priv->display.initial_watermarks != NULL) 6167 if (dev_priv->display.initial_watermarks != NULL)
6224 dev_priv->display.initial_watermarks(old_intel_state, 6168 dev_priv->display.initial_watermarks(old_intel_state,
6225 intel_crtc->config); 6169 pipe_config);
6226 else 6170 else
6227 intel_update_watermarks(intel_crtc); 6171 intel_update_watermarks(intel_crtc);
6228 intel_enable_pipe(pipe_config); 6172 intel_enable_pipe(pipe_config);
@@ -6233,12 +6177,12 @@ static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
6233 intel_encoders_enable(crtc, pipe_config, old_state); 6177 intel_encoders_enable(crtc, pipe_config, old_state);
6234} 6178}
6235 6179
6236static void i9xx_pfit_disable(struct intel_crtc *crtc) 6180static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state)
6237{ 6181{
6238 struct drm_device *dev = crtc->base.dev; 6182 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
6239 struct drm_i915_private *dev_priv = to_i915(dev); 6183 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6240 6184
6241 if (!crtc->config->gmch_pfit.control) 6185 if (!old_crtc_state->gmch_pfit.control)
6242 return; 6186 return;
6243 6187
6244 assert_pipe_disabled(dev_priv, crtc->pipe); 6188 assert_pipe_disabled(dev_priv, crtc->pipe);
@@ -6271,17 +6215,17 @@ static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state,
6271 6215
6272 intel_disable_pipe(old_crtc_state); 6216 intel_disable_pipe(old_crtc_state);
6273 6217
6274 i9xx_pfit_disable(intel_crtc); 6218 i9xx_pfit_disable(old_crtc_state);
6275 6219
6276 intel_encoders_post_disable(crtc, old_crtc_state, old_state); 6220 intel_encoders_post_disable(crtc, old_crtc_state, old_state);
6277 6221
6278 if (!intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DSI)) { 6222 if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) {
6279 if (IS_CHERRYVIEW(dev_priv)) 6223 if (IS_CHERRYVIEW(dev_priv))
6280 chv_disable_pll(dev_priv, pipe); 6224 chv_disable_pll(dev_priv, pipe);
6281 else if (IS_VALLEYVIEW(dev_priv)) 6225 else if (IS_VALLEYVIEW(dev_priv))
6282 vlv_disable_pll(dev_priv, pipe); 6226 vlv_disable_pll(dev_priv, pipe);
6283 else 6227 else
6284 i9xx_disable_pll(intel_crtc); 6228 i9xx_disable_pll(old_crtc_state);
6285 } 6229 }
6286 6230
6287 intel_encoders_post_pll_disable(crtc, old_crtc_state, old_state); 6231 intel_encoders_post_pll_disable(crtc, old_crtc_state, old_state);
@@ -6355,7 +6299,7 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc,
6355 6299
6356 intel_fbc_disable(intel_crtc); 6300 intel_fbc_disable(intel_crtc);
6357 intel_update_watermarks(intel_crtc); 6301 intel_update_watermarks(intel_crtc);
6358 intel_disable_shared_dpll(intel_crtc); 6302 intel_disable_shared_dpll(to_intel_crtc_state(crtc->state));
6359 6303
6360 domains = intel_crtc->enabled_power_domains; 6304 domains = intel_crtc->enabled_power_domains;
6361 for_each_power_domain(domain, domains) 6305 for_each_power_domain(domain, domains)
@@ -6433,66 +6377,6 @@ static void intel_connector_verify_state(struct drm_crtc_state *crtc_state,
6433 } 6377 }
6434} 6378}
6435 6379
6436int intel_connector_init(struct intel_connector *connector)
6437{
6438 struct intel_digital_connector_state *conn_state;
6439
6440 /*
6441 * Allocate enough memory to hold intel_digital_connector_state,
6442 * This might be a few bytes too many, but for connectors that don't
6443 * need it we'll free the state and allocate a smaller one on the first
6444 * succesful commit anyway.
6445 */
6446 conn_state = kzalloc(sizeof(*conn_state), GFP_KERNEL);
6447 if (!conn_state)
6448 return -ENOMEM;
6449
6450 __drm_atomic_helper_connector_reset(&connector->base,
6451 &conn_state->base);
6452
6453 return 0;
6454}
6455
6456struct intel_connector *intel_connector_alloc(void)
6457{
6458 struct intel_connector *connector;
6459
6460 connector = kzalloc(sizeof *connector, GFP_KERNEL);
6461 if (!connector)
6462 return NULL;
6463
6464 if (intel_connector_init(connector) < 0) {
6465 kfree(connector);
6466 return NULL;
6467 }
6468
6469 return connector;
6470}
6471
6472/*
6473 * Free the bits allocated by intel_connector_alloc.
6474 * This should only be used after intel_connector_alloc has returned
6475 * successfully, and before drm_connector_init returns successfully.
6476 * Otherwise the destroy callbacks for the connector and the state should
6477 * take care of proper cleanup/free
6478 */
6479void intel_connector_free(struct intel_connector *connector)
6480{
6481 kfree(to_intel_digital_connector_state(connector->base.state));
6482 kfree(connector);
6483}
6484
6485/* Simple connector->get_hw_state implementation for encoders that support only
6486 * one connector and no cloning and hence the encoder state determines the state
6487 * of the connector. */
6488bool intel_connector_get_hw_state(struct intel_connector *connector)
6489{
6490 enum pipe pipe = 0;
6491 struct intel_encoder *encoder = connector->encoder;
6492
6493 return encoder->get_hw_state(encoder, &pipe);
6494}
6495
6496static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state) 6380static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
6497{ 6381{
6498 if (crtc_state->base.enable && crtc_state->has_pch_encoder) 6382 if (crtc_state->base.enable && crtc_state->has_pch_encoder)
@@ -6603,6 +6487,9 @@ retry:
6603 link_bw, &pipe_config->fdi_m_n, false); 6487 link_bw, &pipe_config->fdi_m_n, false);
6604 6488
6605 ret = ironlake_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config); 6489 ret = ironlake_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config);
6490 if (ret == -EDEADLK)
6491 return ret;
6492
6606 if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) { 6493 if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) {
6607 pipe_config->pipe_bpp -= 2*3; 6494 pipe_config->pipe_bpp -= 2*3;
6608 DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n", 6495 DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n",
@@ -6759,7 +6646,9 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
6759 return -EINVAL; 6646 return -EINVAL;
6760 } 6647 }
6761 6648
6762 if (pipe_config->ycbcr420 && pipe_config->base.ctm) { 6649 if ((pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
6650 pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) &&
6651 pipe_config->base.ctm) {
6763 /* 6652 /*
6764 * There is only one pipe CSC unit per pipe, and we need that 6653 * There is only one pipe CSC unit per pipe, and we need that
6765 * for output conversion from RGB->YCBCR. So if CTM is already 6654 * for output conversion from RGB->YCBCR. So if CTM is already
@@ -6925,12 +6814,12 @@ static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe
6925 vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val); 6814 vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
6926} 6815}
6927 6816
6928static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc, 6817static void intel_pch_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
6929 struct intel_link_m_n *m_n) 6818 const struct intel_link_m_n *m_n)
6930{ 6819{
6931 struct drm_device *dev = crtc->base.dev; 6820 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
6932 struct drm_i915_private *dev_priv = to_i915(dev); 6821 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6933 int pipe = crtc->pipe; 6822 enum pipe pipe = crtc->pipe;
6934 6823
6935 I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m); 6824 I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
6936 I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n); 6825 I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
@@ -6938,25 +6827,39 @@ static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc,
6938 I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n); 6827 I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n);
6939} 6828}
6940 6829
6941static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc, 6830static bool transcoder_has_m2_n2(struct drm_i915_private *dev_priv,
6942 struct intel_link_m_n *m_n, 6831 enum transcoder transcoder)
6943 struct intel_link_m_n *m2_n2) 6832{
6833 if (IS_HASWELL(dev_priv))
6834 return transcoder == TRANSCODER_EDP;
6835
6836 /*
6837 * Strictly speaking some registers are available before
6838 * gen7, but we only support DRRS on gen7+
6839 */
6840 return IS_GEN7(dev_priv) || IS_CHERRYVIEW(dev_priv);
6841}
6842
6843static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
6844 const struct intel_link_m_n *m_n,
6845 const struct intel_link_m_n *m2_n2)
6944{ 6846{
6847 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
6945 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 6848 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6946 int pipe = crtc->pipe; 6849 enum pipe pipe = crtc->pipe;
6947 enum transcoder transcoder = crtc->config->cpu_transcoder; 6850 enum transcoder transcoder = crtc_state->cpu_transcoder;
6948 6851
6949 if (INTEL_GEN(dev_priv) >= 5) { 6852 if (INTEL_GEN(dev_priv) >= 5) {
6950 I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m); 6853 I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m);
6951 I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n); 6854 I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n);
6952 I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m); 6855 I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m);
6953 I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n); 6856 I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n);
6954 /* M2_N2 registers to be set only for gen < 8 (M2_N2 available 6857 /*
6955 * for gen < 8) and if DRRS is supported (to make sure the 6858 * M2_N2 registers are set only if DRRS is supported
6956 * registers are not unnecessarily accessed). 6859 * (to make sure the registers are not unnecessarily accessed).
6957 */ 6860 */
6958 if (m2_n2 && (IS_CHERRYVIEW(dev_priv) || 6861 if (m2_n2 && crtc_state->has_drrs &&
6959 INTEL_GEN(dev_priv) < 8) && crtc->config->has_drrs) { 6862 transcoder_has_m2_n2(dev_priv, transcoder)) {
6960 I915_WRITE(PIPE_DATA_M2(transcoder), 6863 I915_WRITE(PIPE_DATA_M2(transcoder),
6961 TU_SIZE(m2_n2->tu) | m2_n2->gmch_m); 6864 TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
6962 I915_WRITE(PIPE_DATA_N2(transcoder), m2_n2->gmch_n); 6865 I915_WRITE(PIPE_DATA_N2(transcoder), m2_n2->gmch_n);
@@ -6971,29 +6874,29 @@ static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
6971 } 6874 }
6972} 6875}
6973 6876
6974void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n) 6877void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state, enum link_m_n_set m_n)
6975{ 6878{
6976 struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL; 6879 const struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL;
6977 6880
6978 if (m_n == M1_N1) { 6881 if (m_n == M1_N1) {
6979 dp_m_n = &crtc->config->dp_m_n; 6882 dp_m_n = &crtc_state->dp_m_n;
6980 dp_m2_n2 = &crtc->config->dp_m2_n2; 6883 dp_m2_n2 = &crtc_state->dp_m2_n2;
6981 } else if (m_n == M2_N2) { 6884 } else if (m_n == M2_N2) {
6982 6885
6983 /* 6886 /*
6984 * M2_N2 registers are not supported. Hence m2_n2 divider value 6887 * M2_N2 registers are not supported. Hence m2_n2 divider value
6985 * needs to be programmed into M1_N1. 6888 * needs to be programmed into M1_N1.
6986 */ 6889 */
6987 dp_m_n = &crtc->config->dp_m2_n2; 6890 dp_m_n = &crtc_state->dp_m2_n2;
6988 } else { 6891 } else {
6989 DRM_ERROR("Unsupported divider value\n"); 6892 DRM_ERROR("Unsupported divider value\n");
6990 return; 6893 return;
6991 } 6894 }
6992 6895
6993 if (crtc->config->has_pch_encoder) 6896 if (crtc_state->has_pch_encoder)
6994 intel_pch_transcoder_set_m_n(crtc, &crtc->config->dp_m_n); 6897 intel_pch_transcoder_set_m_n(crtc_state, &crtc_state->dp_m_n);
6995 else 6898 else
6996 intel_cpu_transcoder_set_m_n(crtc, dp_m_n, dp_m2_n2); 6899 intel_cpu_transcoder_set_m_n(crtc_state, dp_m_n, dp_m2_n2);
6997} 6900}
6998 6901
6999static void vlv_compute_dpll(struct intel_crtc *crtc, 6902static void vlv_compute_dpll(struct intel_crtc *crtc,
@@ -7092,8 +6995,8 @@ static void vlv_prepare_pll(struct intel_crtc *crtc,
7092 6995
7093 /* Set HBR and RBR LPF coefficients */ 6996 /* Set HBR and RBR LPF coefficients */
7094 if (pipe_config->port_clock == 162000 || 6997 if (pipe_config->port_clock == 162000 ||
7095 intel_crtc_has_type(crtc->config, INTEL_OUTPUT_ANALOG) || 6998 intel_crtc_has_type(pipe_config, INTEL_OUTPUT_ANALOG) ||
7096 intel_crtc_has_type(crtc->config, INTEL_OUTPUT_HDMI)) 6999 intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI))
7097 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe), 7000 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
7098 0x009f0003); 7001 0x009f0003);
7099 else 7002 else
@@ -7120,7 +7023,7 @@ static void vlv_prepare_pll(struct intel_crtc *crtc,
7120 7023
7121 coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe)); 7024 coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
7122 coreclk = (coreclk & 0x0000ff00) | 0x01c00000; 7025 coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
7123 if (intel_crtc_has_dp_encoder(crtc->config)) 7026 if (intel_crtc_has_dp_encoder(pipe_config))
7124 coreclk |= 0x01000000; 7027 coreclk |= 0x01000000;
7125 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk); 7028 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
7126 7029
@@ -7399,12 +7302,13 @@ static void i8xx_compute_dpll(struct intel_crtc *crtc,
7399 crtc_state->dpll_hw_state.dpll = dpll; 7302 crtc_state->dpll_hw_state.dpll = dpll;
7400} 7303}
7401 7304
7402static void intel_set_pipe_timings(struct intel_crtc *intel_crtc) 7305static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state)
7403{ 7306{
7404 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev); 7307 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
7405 enum pipe pipe = intel_crtc->pipe; 7308 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7406 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; 7309 enum pipe pipe = crtc->pipe;
7407 const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode; 7310 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
7311 const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
7408 uint32_t crtc_vtotal, crtc_vblank_end; 7312 uint32_t crtc_vtotal, crtc_vblank_end;
7409 int vsyncshift = 0; 7313 int vsyncshift = 0;
7410 7314
@@ -7418,7 +7322,7 @@ static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
7418 crtc_vtotal -= 1; 7322 crtc_vtotal -= 1;
7419 crtc_vblank_end -= 1; 7323 crtc_vblank_end -= 1;
7420 7324
7421 if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO)) 7325 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
7422 vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2; 7326 vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
7423 else 7327 else
7424 vsyncshift = adjusted_mode->crtc_hsync_start - 7328 vsyncshift = adjusted_mode->crtc_hsync_start -
@@ -7460,18 +7364,18 @@ static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
7460 7364
7461} 7365}
7462 7366
7463static void intel_set_pipe_src_size(struct intel_crtc *intel_crtc) 7367static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state)
7464{ 7368{
7465 struct drm_device *dev = intel_crtc->base.dev; 7369 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
7466 struct drm_i915_private *dev_priv = to_i915(dev); 7370 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7467 enum pipe pipe = intel_crtc->pipe; 7371 enum pipe pipe = crtc->pipe;
7468 7372
7469 /* pipesrc controls the size that is scaled from, which should 7373 /* pipesrc controls the size that is scaled from, which should
7470 * always be the user's requested size. 7374 * always be the user's requested size.
7471 */ 7375 */
7472 I915_WRITE(PIPESRC(pipe), 7376 I915_WRITE(PIPESRC(pipe),
7473 ((intel_crtc->config->pipe_src_w - 1) << 16) | 7377 ((crtc_state->pipe_src_w - 1) << 16) |
7474 (intel_crtc->config->pipe_src_h - 1)); 7378 (crtc_state->pipe_src_h - 1));
7475} 7379}
7476 7380
7477static void intel_get_pipe_timings(struct intel_crtc *crtc, 7381static void intel_get_pipe_timings(struct intel_crtc *crtc,
@@ -7547,29 +7451,30 @@ void intel_mode_from_pipe_config(struct drm_display_mode *mode,
7547 drm_mode_set_name(mode); 7451 drm_mode_set_name(mode);
7548} 7452}
7549 7453
7550static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc) 7454static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
7551{ 7455{
7552 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev); 7456 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
7457 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7553 uint32_t pipeconf; 7458 uint32_t pipeconf;
7554 7459
7555 pipeconf = 0; 7460 pipeconf = 0;
7556 7461
7557 /* we keep both pipes enabled on 830 */ 7462 /* we keep both pipes enabled on 830 */
7558 if (IS_I830(dev_priv)) 7463 if (IS_I830(dev_priv))
7559 pipeconf |= I915_READ(PIPECONF(intel_crtc->pipe)) & PIPECONF_ENABLE; 7464 pipeconf |= I915_READ(PIPECONF(crtc->pipe)) & PIPECONF_ENABLE;
7560 7465
7561 if (intel_crtc->config->double_wide) 7466 if (crtc_state->double_wide)
7562 pipeconf |= PIPECONF_DOUBLE_WIDE; 7467 pipeconf |= PIPECONF_DOUBLE_WIDE;
7563 7468
7564 /* only g4x and later have fancy bpc/dither controls */ 7469 /* only g4x and later have fancy bpc/dither controls */
7565 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || 7470 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
7566 IS_CHERRYVIEW(dev_priv)) { 7471 IS_CHERRYVIEW(dev_priv)) {
7567 /* Bspec claims that we can't use dithering for 30bpp pipes. */ 7472 /* Bspec claims that we can't use dithering for 30bpp pipes. */
7568 if (intel_crtc->config->dither && intel_crtc->config->pipe_bpp != 30) 7473 if (crtc_state->dither && crtc_state->pipe_bpp != 30)
7569 pipeconf |= PIPECONF_DITHER_EN | 7474 pipeconf |= PIPECONF_DITHER_EN |
7570 PIPECONF_DITHER_TYPE_SP; 7475 PIPECONF_DITHER_TYPE_SP;
7571 7476
7572 switch (intel_crtc->config->pipe_bpp) { 7477 switch (crtc_state->pipe_bpp) {
7573 case 18: 7478 case 18:
7574 pipeconf |= PIPECONF_6BPC; 7479 pipeconf |= PIPECONF_6BPC;
7575 break; 7480 break;
@@ -7585,9 +7490,9 @@ static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
7585 } 7490 }
7586 } 7491 }
7587 7492
7588 if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) { 7493 if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
7589 if (INTEL_GEN(dev_priv) < 4 || 7494 if (INTEL_GEN(dev_priv) < 4 ||
7590 intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO)) 7495 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
7591 pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION; 7496 pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
7592 else 7497 else
7593 pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT; 7498 pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
@@ -7595,11 +7500,11 @@ static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
7595 pipeconf |= PIPECONF_PROGRESSIVE; 7500 pipeconf |= PIPECONF_PROGRESSIVE;
7596 7501
7597 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && 7502 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
7598 intel_crtc->config->limited_color_range) 7503 crtc_state->limited_color_range)
7599 pipeconf |= PIPECONF_COLOR_RANGE_SELECT; 7504 pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
7600 7505
7601 I915_WRITE(PIPECONF(intel_crtc->pipe), pipeconf); 7506 I915_WRITE(PIPECONF(crtc->pipe), pipeconf);
7602 POSTING_READ(PIPECONF(intel_crtc->pipe)); 7507 POSTING_READ(PIPECONF(crtc->pipe));
7603} 7508}
7604 7509
7605static int i8xx_crtc_compute_clock(struct intel_crtc *crtc, 7510static int i8xx_crtc_compute_clock(struct intel_crtc *crtc,
@@ -7955,6 +7860,49 @@ static void chv_crtc_clock_get(struct intel_crtc *crtc,
7955 pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock); 7860 pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
7956} 7861}
7957 7862
7863static void intel_get_crtc_ycbcr_config(struct intel_crtc *crtc,
7864 struct intel_crtc_state *pipe_config)
7865{
7866 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7867 enum intel_output_format output = INTEL_OUTPUT_FORMAT_RGB;
7868
7869 pipe_config->lspcon_downsampling = false;
7870
7871 if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9) {
7872 u32 tmp = I915_READ(PIPEMISC(crtc->pipe));
7873
7874 if (tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV) {
7875 bool ycbcr420_enabled = tmp & PIPEMISC_YUV420_ENABLE;
7876 bool blend = tmp & PIPEMISC_YUV420_MODE_FULL_BLEND;
7877
7878 if (ycbcr420_enabled) {
7879 /* We support 4:2:0 in full blend mode only */
7880 if (!blend)
7881 output = INTEL_OUTPUT_FORMAT_INVALID;
7882 else if (!(IS_GEMINILAKE(dev_priv) ||
7883 INTEL_GEN(dev_priv) >= 10))
7884 output = INTEL_OUTPUT_FORMAT_INVALID;
7885 else
7886 output = INTEL_OUTPUT_FORMAT_YCBCR420;
7887 } else {
7888 /*
7889 * Currently there is no interface defined to
7890 * check user preference between RGB/YCBCR444
7891 * or YCBCR420. So the only possible case for
7892 * YCBCR444 usage is driving YCBCR420 output
7893 * with LSPCON, when pipe is configured for
7894 * YCBCR444 output and LSPCON takes care of
7895 * downsampling it.
7896 */
7897 pipe_config->lspcon_downsampling = true;
7898 output = INTEL_OUTPUT_FORMAT_YCBCR444;
7899 }
7900 }
7901 }
7902
7903 pipe_config->output_format = output;
7904}
7905
7958static bool i9xx_get_pipe_config(struct intel_crtc *crtc, 7906static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
7959 struct intel_crtc_state *pipe_config) 7907 struct intel_crtc_state *pipe_config)
7960{ 7908{
@@ -7967,6 +7915,7 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
7967 if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) 7915 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
7968 return false; 7916 return false;
7969 7917
7918 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
7970 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; 7919 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
7971 pipe_config->shared_dpll = NULL; 7920 pipe_config->shared_dpll = NULL;
7972 7921
@@ -8498,16 +8447,16 @@ void intel_init_pch_refclk(struct drm_i915_private *dev_priv)
8498 lpt_init_pch_refclk(dev_priv); 8447 lpt_init_pch_refclk(dev_priv);
8499} 8448}
8500 8449
8501static void ironlake_set_pipeconf(struct drm_crtc *crtc) 8450static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state)
8502{ 8451{
8503 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 8452 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
8504 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 8453 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8505 int pipe = intel_crtc->pipe; 8454 enum pipe pipe = crtc->pipe;
8506 uint32_t val; 8455 uint32_t val;
8507 8456
8508 val = 0; 8457 val = 0;
8509 8458
8510 switch (intel_crtc->config->pipe_bpp) { 8459 switch (crtc_state->pipe_bpp) {
8511 case 18: 8460 case 18:
8512 val |= PIPECONF_6BPC; 8461 val |= PIPECONF_6BPC;
8513 break; 8462 break;
@@ -8525,32 +8474,32 @@ static void ironlake_set_pipeconf(struct drm_crtc *crtc)
8525 BUG(); 8474 BUG();
8526 } 8475 }
8527 8476
8528 if (intel_crtc->config->dither) 8477 if (crtc_state->dither)
8529 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP); 8478 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
8530 8479
8531 if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) 8480 if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
8532 val |= PIPECONF_INTERLACED_ILK; 8481 val |= PIPECONF_INTERLACED_ILK;
8533 else 8482 else
8534 val |= PIPECONF_PROGRESSIVE; 8483 val |= PIPECONF_PROGRESSIVE;
8535 8484
8536 if (intel_crtc->config->limited_color_range) 8485 if (crtc_state->limited_color_range)
8537 val |= PIPECONF_COLOR_RANGE_SELECT; 8486 val |= PIPECONF_COLOR_RANGE_SELECT;
8538 8487
8539 I915_WRITE(PIPECONF(pipe), val); 8488 I915_WRITE(PIPECONF(pipe), val);
8540 POSTING_READ(PIPECONF(pipe)); 8489 POSTING_READ(PIPECONF(pipe));
8541} 8490}
8542 8491
8543static void haswell_set_pipeconf(struct drm_crtc *crtc) 8492static void haswell_set_pipeconf(const struct intel_crtc_state *crtc_state)
8544{ 8493{
8545 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 8494 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
8546 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 8495 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8547 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; 8496 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
8548 u32 val = 0; 8497 u32 val = 0;
8549 8498
8550 if (IS_HASWELL(dev_priv) && intel_crtc->config->dither) 8499 if (IS_HASWELL(dev_priv) && crtc_state->dither)
8551 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP); 8500 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
8552 8501
8553 if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) 8502 if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
8554 val |= PIPECONF_INTERLACED_ILK; 8503 val |= PIPECONF_INTERLACED_ILK;
8555 else 8504 else
8556 val |= PIPECONF_PROGRESSIVE; 8505 val |= PIPECONF_PROGRESSIVE;
@@ -8559,16 +8508,15 @@ static void haswell_set_pipeconf(struct drm_crtc *crtc)
8559 POSTING_READ(PIPECONF(cpu_transcoder)); 8508 POSTING_READ(PIPECONF(cpu_transcoder));
8560} 8509}
8561 8510
8562static void haswell_set_pipemisc(struct drm_crtc *crtc) 8511static void haswell_set_pipemisc(const struct intel_crtc_state *crtc_state)
8563{ 8512{
8564 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 8513 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
8565 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 8514 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
8566 struct intel_crtc_state *config = intel_crtc->config;
8567 8515
8568 if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9) { 8516 if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9) {
8569 u32 val = 0; 8517 u32 val = 0;
8570 8518
8571 switch (intel_crtc->config->pipe_bpp) { 8519 switch (crtc_state->pipe_bpp) {
8572 case 18: 8520 case 18:
8573 val |= PIPEMISC_DITHER_6_BPC; 8521 val |= PIPEMISC_DITHER_6_BPC;
8574 break; 8522 break;
@@ -8586,14 +8534,16 @@ static void haswell_set_pipemisc(struct drm_crtc *crtc)
8586 BUG(); 8534 BUG();
8587 } 8535 }
8588 8536
8589 if (intel_crtc->config->dither) 8537 if (crtc_state->dither)
8590 val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP; 8538 val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
8591 8539
8592 if (config->ycbcr420) { 8540 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
8593 val |= PIPEMISC_OUTPUT_COLORSPACE_YUV | 8541 crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444)
8594 PIPEMISC_YUV420_ENABLE | 8542 val |= PIPEMISC_OUTPUT_COLORSPACE_YUV;
8543
8544 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
8545 val |= PIPEMISC_YUV420_ENABLE |
8595 PIPEMISC_YUV420_MODE_FULL_BLEND; 8546 PIPEMISC_YUV420_MODE_FULL_BLEND;
8596 }
8597 8547
8598 I915_WRITE(PIPEMISC(intel_crtc->pipe), val); 8548 I915_WRITE(PIPEMISC(intel_crtc->pipe), val);
8599 } 8549 }
@@ -8804,12 +8754,8 @@ static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
8804 m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder)); 8754 m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder));
8805 m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder)) 8755 m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder))
8806 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; 8756 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
8807 /* Read M2_N2 registers only for gen < 8 (M2_N2 available for 8757
8808 * gen < 8) and if DRRS is supported (to make sure the 8758 if (m2_n2 && transcoder_has_m2_n2(dev_priv, transcoder)) {
8809 * registers are not unnecessarily read).
8810 */
8811 if (m2_n2 && INTEL_GEN(dev_priv) < 8 &&
8812 crtc->config->has_drrs) {
8813 m2_n2->link_m = I915_READ(PIPE_LINK_M2(transcoder)); 8759 m2_n2->link_m = I915_READ(PIPE_LINK_M2(transcoder));
8814 m2_n2->link_n = I915_READ(PIPE_LINK_N2(transcoder)); 8760 m2_n2->link_n = I915_READ(PIPE_LINK_N2(transcoder));
8815 m2_n2->gmch_m = I915_READ(PIPE_DATA_M2(transcoder)) 8761 m2_n2->gmch_m = I915_READ(PIPE_DATA_M2(transcoder))
@@ -9018,6 +8964,7 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
9018 if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) 8964 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
9019 return false; 8965 return false;
9020 8966
8967 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
9021 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; 8968 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
9022 pipe_config->shared_dpll = NULL; 8969 pipe_config->shared_dpll = NULL;
9023 8970
@@ -9366,30 +9313,17 @@ static void icelake_get_ddi_pll(struct drm_i915_private *dev_priv,
9366 u32 temp; 9313 u32 temp;
9367 9314
9368 /* TODO: TBT pll not implemented. */ 9315 /* TODO: TBT pll not implemented. */
9369 switch (port) { 9316 if (intel_port_is_combophy(dev_priv, port)) {
9370 case PORT_A:
9371 case PORT_B:
9372 temp = I915_READ(DPCLKA_CFGCR0_ICL) & 9317 temp = I915_READ(DPCLKA_CFGCR0_ICL) &
9373 DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port); 9318 DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
9374 id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port); 9319 id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port);
9375 9320
9376 if (WARN_ON(id != DPLL_ID_ICL_DPLL0 && id != DPLL_ID_ICL_DPLL1)) 9321 if (WARN_ON(!intel_dpll_is_combophy(id)))
9377 return; 9322 return;
9378 break; 9323 } else if (intel_port_is_tc(dev_priv, port)) {
9379 case PORT_C: 9324 id = icl_port_to_mg_pll_id(port);
9380 id = DPLL_ID_ICL_MGPLL1; 9325 } else {
9381 break; 9326 WARN(1, "Invalid port %x\n", port);
9382 case PORT_D:
9383 id = DPLL_ID_ICL_MGPLL2;
9384 break;
9385 case PORT_E:
9386 id = DPLL_ID_ICL_MGPLL3;
9387 break;
9388 case PORT_F:
9389 id = DPLL_ID_ICL_MGPLL4;
9390 break;
9391 default:
9392 MISSING_CASE(port);
9393 return; 9327 return;
9394 } 9328 }
9395 9329
@@ -9652,27 +9586,11 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
9652 } 9586 }
9653 9587
9654 intel_get_pipe_src_size(crtc, pipe_config); 9588 intel_get_pipe_src_size(crtc, pipe_config);
9589 intel_get_crtc_ycbcr_config(crtc, pipe_config);
9655 9590
9656 pipe_config->gamma_mode = 9591 pipe_config->gamma_mode =
9657 I915_READ(GAMMA_MODE(crtc->pipe)) & GAMMA_MODE_MODE_MASK; 9592 I915_READ(GAMMA_MODE(crtc->pipe)) & GAMMA_MODE_MODE_MASK;
9658 9593
9659 if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9) {
9660 u32 tmp = I915_READ(PIPEMISC(crtc->pipe));
9661 bool clrspace_yuv = tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV;
9662
9663 if (IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
9664 bool blend_mode_420 = tmp &
9665 PIPEMISC_YUV420_MODE_FULL_BLEND;
9666
9667 pipe_config->ycbcr420 = tmp & PIPEMISC_YUV420_ENABLE;
9668 if (pipe_config->ycbcr420 != clrspace_yuv ||
9669 pipe_config->ycbcr420 != blend_mode_420)
9670 DRM_DEBUG_KMS("Bad 4:2:0 mode (%08x)\n", tmp);
9671 } else if (clrspace_yuv) {
9672 DRM_DEBUG_KMS("YCbCr 4:2:0 Unsupported\n");
9673 }
9674 }
9675
9676 power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe); 9594 power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
9677 if (intel_display_power_get_if_enabled(dev_priv, power_domain)) { 9595 if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
9678 power_domain_mask |= BIT_ULL(power_domain); 9596 power_domain_mask |= BIT_ULL(power_domain);
@@ -9941,8 +9859,6 @@ static void i845_update_cursor(struct intel_plane *plane,
9941 I915_WRITE_FW(CURPOS(PIPE_A), pos); 9859 I915_WRITE_FW(CURPOS(PIPE_A), pos);
9942 } 9860 }
9943 9861
9944 POSTING_READ_FW(CURCNTR(PIPE_A));
9945
9946 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 9862 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
9947} 9863}
9948 9864
@@ -10171,8 +10087,6 @@ static void i9xx_update_cursor(struct intel_plane *plane,
10171 I915_WRITE_FW(CURBASE(pipe), base); 10087 I915_WRITE_FW(CURBASE(pipe), base);
10172 } 10088 }
10173 10089
10174 POSTING_READ_FW(CURBASE(pipe));
10175
10176 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 10090 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
10177} 10091}
10178 10092
@@ -10777,14 +10691,40 @@ int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_stat
10777 pipe_config->fb_bits |= plane->frontbuffer_bit; 10691 pipe_config->fb_bits |= plane->frontbuffer_bit;
10778 10692
10779 /* 10693 /*
10694 * ILK/SNB DVSACNTR/Sprite Enable
10695 * IVB SPR_CTL/Sprite Enable
10696 * "When in Self Refresh Big FIFO mode, a write to enable the
10697 * plane will be internally buffered and delayed while Big FIFO
10698 * mode is exiting."
10699 *
10700 * Which means that enabling the sprite can take an extra frame
10701 * when we start in big FIFO mode (LP1+). Thus we need to drop
10702 * down to LP0 and wait for vblank in order to make sure the
10703 * sprite gets enabled on the next vblank after the register write.
10704 * Doing otherwise would risk enabling the sprite one frame after
10705 * we've already signalled flip completion. We can resume LP1+
10706 * once the sprite has been enabled.
10707 *
10708 *
10780 * WaCxSRDisabledForSpriteScaling:ivb 10709 * WaCxSRDisabledForSpriteScaling:ivb
10710 * IVB SPR_SCALE/Scaling Enable
10711 * "Low Power watermarks must be disabled for at least one
10712 * frame before enabling sprite scaling, and kept disabled
10713 * until sprite scaling is disabled."
10714 *
10715 * ILK/SNB DVSASCALE/Scaling Enable
10716 * "When in Self Refresh Big FIFO mode, scaling enable will be
10717 * masked off while Big FIFO mode is exiting."
10781 * 10718 *
10782 * cstate->update_wm was already set above, so this flag will 10719 * Despite the w/a only being listed for IVB we assume that
10783 * take effect when we commit and program watermarks. 10720 * the ILK/SNB note has similar ramifications, hence we apply
10721 * the w/a on all three platforms.
10784 */ 10722 */
10785 if (plane->id == PLANE_SPRITE0 && IS_IVYBRIDGE(dev_priv) && 10723 if (plane->id == PLANE_SPRITE0 &&
10786 needs_scaling(to_intel_plane_state(plane_state)) && 10724 (IS_GEN5(dev_priv) || IS_GEN6(dev_priv) ||
10787 !needs_scaling(old_plane_state)) 10725 IS_IVYBRIDGE(dev_priv)) &&
10726 (turn_on || (!needs_scaling(old_plane_state) &&
10727 needs_scaling(to_intel_plane_state(plane_state)))))
10788 pipe_config->disable_lp_wm = true; 10728 pipe_config->disable_lp_wm = true;
10789 10729
10790 return 0; 10730 return 0;
@@ -10820,6 +10760,98 @@ static bool check_single_encoder_cloning(struct drm_atomic_state *state,
10820 return true; 10760 return true;
10821} 10761}
10822 10762
10763static int icl_add_linked_planes(struct intel_atomic_state *state)
10764{
10765 struct intel_plane *plane, *linked;
10766 struct intel_plane_state *plane_state, *linked_plane_state;
10767 int i;
10768
10769 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
10770 linked = plane_state->linked_plane;
10771
10772 if (!linked)
10773 continue;
10774
10775 linked_plane_state = intel_atomic_get_plane_state(state, linked);
10776 if (IS_ERR(linked_plane_state))
10777 return PTR_ERR(linked_plane_state);
10778
10779 WARN_ON(linked_plane_state->linked_plane != plane);
10780 WARN_ON(linked_plane_state->slave == plane_state->slave);
10781 }
10782
10783 return 0;
10784}
10785
10786static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
10787{
10788 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
10789 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10790 struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->base.state);
10791 struct intel_plane *plane, *linked;
10792 struct intel_plane_state *plane_state;
10793 int i;
10794
10795 if (INTEL_GEN(dev_priv) < 11)
10796 return 0;
10797
10798 /*
10799 * Destroy all old plane links and make the slave plane invisible
10800 * in the crtc_state->active_planes mask.
10801 */
10802 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
10803 if (plane->pipe != crtc->pipe || !plane_state->linked_plane)
10804 continue;
10805
10806 plane_state->linked_plane = NULL;
10807 if (plane_state->slave && !plane_state->base.visible)
10808 crtc_state->active_planes &= ~BIT(plane->id);
10809
10810 plane_state->slave = false;
10811 }
10812
10813 if (!crtc_state->nv12_planes)
10814 return 0;
10815
10816 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
10817 struct intel_plane_state *linked_state = NULL;
10818
10819 if (plane->pipe != crtc->pipe ||
10820 !(crtc_state->nv12_planes & BIT(plane->id)))
10821 continue;
10822
10823 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, linked) {
10824 if (!icl_is_nv12_y_plane(linked->id))
10825 continue;
10826
10827 if (crtc_state->active_planes & BIT(linked->id))
10828 continue;
10829
10830 linked_state = intel_atomic_get_plane_state(state, linked);
10831 if (IS_ERR(linked_state))
10832 return PTR_ERR(linked_state);
10833
10834 break;
10835 }
10836
10837 if (!linked_state) {
10838 DRM_DEBUG_KMS("Need %d free Y planes for NV12\n",
10839 hweight8(crtc_state->nv12_planes));
10840
10841 return -EINVAL;
10842 }
10843
10844 plane_state->linked_plane = linked;
10845
10846 linked_state->slave = true;
10847 linked_state->linked_plane = plane;
10848 crtc_state->active_planes |= BIT(linked->id);
10849 DRM_DEBUG_KMS("Using %s as Y plane for %s\n", linked->base.name, plane->base.name);
10850 }
10851
10852 return 0;
10853}
10854
10823static int intel_crtc_atomic_check(struct drm_crtc *crtc, 10855static int intel_crtc_atomic_check(struct drm_crtc *crtc,
10824 struct drm_crtc_state *crtc_state) 10856 struct drm_crtc_state *crtc_state)
10825{ 10857{
@@ -10828,7 +10860,6 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc,
10828 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 10860 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10829 struct intel_crtc_state *pipe_config = 10861 struct intel_crtc_state *pipe_config =
10830 to_intel_crtc_state(crtc_state); 10862 to_intel_crtc_state(crtc_state);
10831 struct drm_atomic_state *state = crtc_state->state;
10832 int ret; 10863 int ret;
10833 bool mode_changed = needs_modeset(crtc_state); 10864 bool mode_changed = needs_modeset(crtc_state);
10834 10865
@@ -10865,8 +10896,7 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc,
10865 } 10896 }
10866 } 10897 }
10867 10898
10868 if (dev_priv->display.compute_intermediate_wm && 10899 if (dev_priv->display.compute_intermediate_wm) {
10869 !to_intel_atomic_state(state)->skip_intermediate_wm) {
10870 if (WARN_ON(!dev_priv->display.compute_pipe_wm)) 10900 if (WARN_ON(!dev_priv->display.compute_pipe_wm))
10871 return 0; 10901 return 0;
10872 10902
@@ -10882,9 +10912,6 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc,
10882 DRM_DEBUG_KMS("No valid intermediate pipe watermarks are possible\n"); 10912 DRM_DEBUG_KMS("No valid intermediate pipe watermarks are possible\n");
10883 return ret; 10913 return ret;
10884 } 10914 }
10885 } else if (dev_priv->display.compute_intermediate_wm) {
10886 if (HAS_PCH_SPLIT(dev_priv) && INTEL_GEN(dev_priv) < 9)
10887 pipe_config->wm.ilk.intermediate = pipe_config->wm.ilk.optimal;
10888 } 10915 }
10889 10916
10890 if (INTEL_GEN(dev_priv) >= 9) { 10917 if (INTEL_GEN(dev_priv) >= 9) {
@@ -10892,6 +10919,8 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc,
10892 ret = skl_update_scaler_crtc(pipe_config); 10919 ret = skl_update_scaler_crtc(pipe_config);
10893 10920
10894 if (!ret) 10921 if (!ret)
10922 ret = icl_check_nv12_planes(pipe_config);
10923 if (!ret)
10895 ret = skl_check_pipe_max_pixel_rate(intel_crtc, 10924 ret = skl_check_pipe_max_pixel_rate(intel_crtc,
10896 pipe_config); 10925 pipe_config);
10897 if (!ret) 10926 if (!ret)
@@ -10906,8 +10935,6 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc,
10906} 10935}
10907 10936
10908static const struct drm_crtc_helper_funcs intel_helper_funcs = { 10937static const struct drm_crtc_helper_funcs intel_helper_funcs = {
10909 .atomic_begin = intel_begin_crtc_commit,
10910 .atomic_flush = intel_finish_crtc_commit,
10911 .atomic_check = intel_crtc_atomic_check, 10938 .atomic_check = intel_crtc_atomic_check,
10912}; 10939};
10913 10940
@@ -10936,30 +10963,42 @@ static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
10936 drm_connector_list_iter_end(&conn_iter); 10963 drm_connector_list_iter_end(&conn_iter);
10937} 10964}
10938 10965
10939static void 10966static int
10940connected_sink_compute_bpp(struct intel_connector *connector, 10967compute_sink_pipe_bpp(const struct drm_connector_state *conn_state,
10941 struct intel_crtc_state *pipe_config) 10968 struct intel_crtc_state *pipe_config)
10942{ 10969{
10943 const struct drm_display_info *info = &connector->base.display_info; 10970 struct drm_connector *connector = conn_state->connector;
10944 int bpp = pipe_config->pipe_bpp; 10971 const struct drm_display_info *info = &connector->display_info;
10972 int bpp;
10945 10973
10946 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] checking for sink bpp constrains\n", 10974 switch (conn_state->max_bpc) {
10947 connector->base.base.id, 10975 case 6 ... 7:
10948 connector->base.name); 10976 bpp = 6 * 3;
10949 10977 break;
10950 /* Don't use an invalid EDID bpc value */ 10978 case 8 ... 9:
10951 if (info->bpc != 0 && info->bpc * 3 < bpp) { 10979 bpp = 8 * 3;
10952 DRM_DEBUG_KMS("clamping display bpp (was %d) to EDID reported max of %d\n", 10980 break;
10953 bpp, info->bpc * 3); 10981 case 10 ... 11:
10954 pipe_config->pipe_bpp = info->bpc * 3; 10982 bpp = 10 * 3;
10983 break;
10984 case 12:
10985 bpp = 12 * 3;
10986 break;
10987 default:
10988 return -EINVAL;
10955 } 10989 }
10956 10990
10957 /* Clamp bpp to 8 on screens without EDID 1.4 */ 10991 if (bpp < pipe_config->pipe_bpp) {
10958 if (info->bpc == 0 && bpp > 24) { 10992 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] Limiting display bpp to %d instead of "
10959 DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of 24\n", 10993 "EDID bpp %d, requested bpp %d, max platform bpp %d\n",
10960 bpp); 10994 connector->base.id, connector->name,
10961 pipe_config->pipe_bpp = 24; 10995 bpp, 3 * info->bpc, 3 * conn_state->max_requested_bpc,
10996 pipe_config->pipe_bpp);
10997
10998 pipe_config->pipe_bpp = bpp;
10962 } 10999 }
11000
11001 return 0;
10963} 11002}
10964 11003
10965static int 11004static int
@@ -10967,7 +11006,7 @@ compute_baseline_pipe_bpp(struct intel_crtc *crtc,
10967 struct intel_crtc_state *pipe_config) 11006 struct intel_crtc_state *pipe_config)
10968{ 11007{
10969 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 11008 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10970 struct drm_atomic_state *state; 11009 struct drm_atomic_state *state = pipe_config->base.state;
10971 struct drm_connector *connector; 11010 struct drm_connector *connector;
10972 struct drm_connector_state *connector_state; 11011 struct drm_connector_state *connector_state;
10973 int bpp, i; 11012 int bpp, i;
@@ -10980,21 +11019,21 @@ compute_baseline_pipe_bpp(struct intel_crtc *crtc,
10980 else 11019 else
10981 bpp = 8*3; 11020 bpp = 8*3;
10982 11021
10983
10984 pipe_config->pipe_bpp = bpp; 11022 pipe_config->pipe_bpp = bpp;
10985 11023
10986 state = pipe_config->base.state; 11024 /* Clamp display bpp to connector max bpp */
10987
10988 /* Clamp display bpp to EDID value */
10989 for_each_new_connector_in_state(state, connector, connector_state, i) { 11025 for_each_new_connector_in_state(state, connector, connector_state, i) {
11026 int ret;
11027
10990 if (connector_state->crtc != &crtc->base) 11028 if (connector_state->crtc != &crtc->base)
10991 continue; 11029 continue;
10992 11030
10993 connected_sink_compute_bpp(to_intel_connector(connector), 11031 ret = compute_sink_pipe_bpp(connector_state, pipe_config);
10994 pipe_config); 11032 if (ret)
11033 return ret;
10995 } 11034 }
10996 11035
10997 return bpp; 11036 return 0;
10998} 11037}
10999 11038
11000static void intel_dump_crtc_timings(const struct drm_display_mode *mode) 11039static void intel_dump_crtc_timings(const struct drm_display_mode *mode)
@@ -11064,6 +11103,20 @@ static void snprintf_output_types(char *buf, size_t len,
11064 WARN_ON_ONCE(output_types != 0); 11103 WARN_ON_ONCE(output_types != 0);
11065} 11104}
11066 11105
11106static const char * const output_format_str[] = {
11107 [INTEL_OUTPUT_FORMAT_INVALID] = "Invalid",
11108 [INTEL_OUTPUT_FORMAT_RGB] = "RGB",
11109 [INTEL_OUTPUT_FORMAT_YCBCR420] = "YCBCR4:2:0",
11110 [INTEL_OUTPUT_FORMAT_YCBCR444] = "YCBCR4:4:4",
11111};
11112
11113static const char *output_formats(enum intel_output_format format)
11114{
11115 if (format >= ARRAY_SIZE(output_format_str))
11116 format = INTEL_OUTPUT_FORMAT_INVALID;
11117 return output_format_str[format];
11118}
11119
11067static void intel_dump_pipe_config(struct intel_crtc *crtc, 11120static void intel_dump_pipe_config(struct intel_crtc *crtc,
11068 struct intel_crtc_state *pipe_config, 11121 struct intel_crtc_state *pipe_config,
11069 const char *context) 11122 const char *context)
@@ -11083,6 +11136,9 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
11083 DRM_DEBUG_KMS("output_types: %s (0x%x)\n", 11136 DRM_DEBUG_KMS("output_types: %s (0x%x)\n",
11084 buf, pipe_config->output_types); 11137 buf, pipe_config->output_types);
11085 11138
11139 DRM_DEBUG_KMS("output format: %s\n",
11140 output_formats(pipe_config->output_format));
11141
11086 DRM_DEBUG_KMS("cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n", 11142 DRM_DEBUG_KMS("cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n",
11087 transcoder_name(pipe_config->cpu_transcoder), 11143 transcoder_name(pipe_config->cpu_transcoder),
11088 pipe_config->pipe_bpp, pipe_config->dither); 11144 pipe_config->pipe_bpp, pipe_config->dither);
@@ -11092,9 +11148,6 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
11092 pipe_config->fdi_lanes, 11148 pipe_config->fdi_lanes,
11093 &pipe_config->fdi_m_n); 11149 &pipe_config->fdi_m_n);
11094 11150
11095 if (pipe_config->ycbcr420)
11096 DRM_DEBUG_KMS("YCbCr 4:2:0 output enabled\n");
11097
11098 if (intel_crtc_has_dp_encoder(pipe_config)) { 11151 if (intel_crtc_has_dp_encoder(pipe_config)) {
11099 intel_dump_m_n_config(pipe_config, "dp m_n", 11152 intel_dump_m_n_config(pipe_config, "dp m_n",
11100 pipe_config->lane_count, &pipe_config->dp_m_n); 11153 pipe_config->lane_count, &pipe_config->dp_m_n);
@@ -11283,7 +11336,7 @@ intel_modeset_pipe_config(struct drm_crtc *crtc,
11283 struct intel_encoder *encoder; 11336 struct intel_encoder *encoder;
11284 struct drm_connector *connector; 11337 struct drm_connector *connector;
11285 struct drm_connector_state *connector_state; 11338 struct drm_connector_state *connector_state;
11286 int base_bpp, ret = -EINVAL; 11339 int base_bpp, ret;
11287 int i; 11340 int i;
11288 bool retry = true; 11341 bool retry = true;
11289 11342
@@ -11305,10 +11358,12 @@ intel_modeset_pipe_config(struct drm_crtc *crtc,
11305 (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC))) 11358 (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
11306 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC; 11359 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
11307 11360
11308 base_bpp = compute_baseline_pipe_bpp(to_intel_crtc(crtc), 11361 ret = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
11309 pipe_config); 11362 pipe_config);
11310 if (base_bpp < 0) 11363 if (ret)
11311 goto fail; 11364 return ret;
11365
11366 base_bpp = pipe_config->pipe_bpp;
11312 11367
11313 /* 11368 /*
11314 * Determine the real pipe dimensions. Note that stereo modes can 11369 * Determine the real pipe dimensions. Note that stereo modes can
@@ -11330,7 +11385,7 @@ intel_modeset_pipe_config(struct drm_crtc *crtc,
11330 11385
11331 if (!check_single_encoder_cloning(state, to_intel_crtc(crtc), encoder)) { 11386 if (!check_single_encoder_cloning(state, to_intel_crtc(crtc), encoder)) {
11332 DRM_DEBUG_KMS("rejecting invalid cloning configuration\n"); 11387 DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
11333 goto fail; 11388 return -EINVAL;
11334 } 11389 }
11335 11390
11336 /* 11391 /*
@@ -11366,7 +11421,7 @@ encoder_retry:
11366 11421
11367 if (!(encoder->compute_config(encoder, pipe_config, connector_state))) { 11422 if (!(encoder->compute_config(encoder, pipe_config, connector_state))) {
11368 DRM_DEBUG_KMS("Encoder config failure\n"); 11423 DRM_DEBUG_KMS("Encoder config failure\n");
11369 goto fail; 11424 return -EINVAL;
11370 } 11425 }
11371 } 11426 }
11372 11427
@@ -11377,16 +11432,16 @@ encoder_retry:
11377 * pipe_config->pixel_multiplier; 11432 * pipe_config->pixel_multiplier;
11378 11433
11379 ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config); 11434 ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
11435 if (ret == -EDEADLK)
11436 return ret;
11380 if (ret < 0) { 11437 if (ret < 0) {
11381 DRM_DEBUG_KMS("CRTC fixup failed\n"); 11438 DRM_DEBUG_KMS("CRTC fixup failed\n");
11382 goto fail; 11439 return ret;
11383 } 11440 }
11384 11441
11385 if (ret == RETRY) { 11442 if (ret == RETRY) {
11386 if (WARN(!retry, "loop in pipe configuration computation\n")) { 11443 if (WARN(!retry, "loop in pipe configuration computation\n"))
11387 ret = -EINVAL; 11444 return -EINVAL;
11388 goto fail;
11389 }
11390 11445
11391 DRM_DEBUG_KMS("CRTC bw constrained, retrying\n"); 11446 DRM_DEBUG_KMS("CRTC bw constrained, retrying\n");
11392 retry = false; 11447 retry = false;
@@ -11402,8 +11457,7 @@ encoder_retry:
11402 DRM_DEBUG_KMS("hw max bpp: %i, pipe bpp: %i, dithering: %i\n", 11457 DRM_DEBUG_KMS("hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
11403 base_bpp, pipe_config->pipe_bpp, pipe_config->dither); 11458 base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
11404 11459
11405fail: 11460 return 0;
11406 return ret;
11407} 11461}
11408 11462
11409static bool intel_fuzzy_clock_check(int clock1, int clock2) 11463static bool intel_fuzzy_clock_check(int clock1, int clock2)
@@ -11672,6 +11726,7 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv,
11672 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_end); 11726 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_end);
11673 11727
11674 PIPE_CONF_CHECK_I(pixel_multiplier); 11728 PIPE_CONF_CHECK_I(pixel_multiplier);
11729 PIPE_CONF_CHECK_I(output_format);
11675 PIPE_CONF_CHECK_BOOL(has_hdmi_sink); 11730 PIPE_CONF_CHECK_BOOL(has_hdmi_sink);
11676 if ((INTEL_GEN(dev_priv) < 8 && !IS_HASWELL(dev_priv)) || 11731 if ((INTEL_GEN(dev_priv) < 8 && !IS_HASWELL(dev_priv)) ||
11677 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 11732 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
@@ -11680,7 +11735,6 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv,
11680 PIPE_CONF_CHECK_BOOL(hdmi_scrambling); 11735 PIPE_CONF_CHECK_BOOL(hdmi_scrambling);
11681 PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio); 11736 PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio);
11682 PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_infoframe); 11737 PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_infoframe);
11683 PIPE_CONF_CHECK_BOOL(ycbcr420);
11684 11738
11685 PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio); 11739 PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio);
11686 11740
@@ -12189,8 +12243,9 @@ intel_modeset_verify_disabled(struct drm_device *dev,
12189 verify_disabled_dpll_state(dev); 12243 verify_disabled_dpll_state(dev);
12190} 12244}
12191 12245
12192static void update_scanline_offset(struct intel_crtc *crtc) 12246static void update_scanline_offset(const struct intel_crtc_state *crtc_state)
12193{ 12247{
12248 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
12194 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 12249 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
12195 12250
12196 /* 12251 /*
@@ -12221,7 +12276,7 @@ static void update_scanline_offset(struct intel_crtc *crtc)
12221 * answer that's slightly in the future. 12276 * answer that's slightly in the future.
12222 */ 12277 */
12223 if (IS_GEN2(dev_priv)) { 12278 if (IS_GEN2(dev_priv)) {
12224 const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode; 12279 const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
12225 int vtotal; 12280 int vtotal;
12226 12281
12227 vtotal = adjusted_mode->crtc_vtotal; 12282 vtotal = adjusted_mode->crtc_vtotal;
@@ -12230,7 +12285,7 @@ static void update_scanline_offset(struct intel_crtc *crtc)
12230 12285
12231 crtc->scanline_offset = vtotal - 1; 12286 crtc->scanline_offset = vtotal - 1;
12232 } else if (HAS_DDI(dev_priv) && 12287 } else if (HAS_DDI(dev_priv) &&
12233 intel_crtc_has_type(crtc->config, INTEL_OUTPUT_HDMI)) { 12288 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
12234 crtc->scanline_offset = 2; 12289 crtc->scanline_offset = 2;
12235 } else 12290 } else
12236 crtc->scanline_offset = 1; 12291 crtc->scanline_offset = 1;
@@ -12513,6 +12568,8 @@ static int intel_atomic_check(struct drm_device *dev,
12513 } 12568 }
12514 12569
12515 ret = intel_modeset_pipe_config(crtc, pipe_config); 12570 ret = intel_modeset_pipe_config(crtc, pipe_config);
12571 if (ret == -EDEADLK)
12572 return ret;
12516 if (ret) { 12573 if (ret) {
12517 intel_dump_pipe_config(to_intel_crtc(crtc), 12574 intel_dump_pipe_config(to_intel_crtc(crtc),
12518 pipe_config, "[failed]"); 12575 pipe_config, "[failed]");
@@ -12544,6 +12601,10 @@ static int intel_atomic_check(struct drm_device *dev,
12544 intel_state->cdclk.logical = dev_priv->cdclk.logical; 12601 intel_state->cdclk.logical = dev_priv->cdclk.logical;
12545 } 12602 }
12546 12603
12604 ret = icl_add_linked_planes(intel_state);
12605 if (ret)
12606 return ret;
12607
12547 ret = drm_atomic_helper_check_planes(dev, state); 12608 ret = drm_atomic_helper_check_planes(dev, state);
12548 if (ret) 12609 if (ret)
12549 return ret; 12610 return ret;
@@ -12576,6 +12637,7 @@ static void intel_update_crtc(struct drm_crtc *crtc,
12576 struct drm_device *dev = crtc->dev; 12637 struct drm_device *dev = crtc->dev;
12577 struct drm_i915_private *dev_priv = to_i915(dev); 12638 struct drm_i915_private *dev_priv = to_i915(dev);
12578 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 12639 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
12640 struct intel_crtc_state *old_intel_cstate = to_intel_crtc_state(old_crtc_state);
12579 struct intel_crtc_state *pipe_config = to_intel_crtc_state(new_crtc_state); 12641 struct intel_crtc_state *pipe_config = to_intel_crtc_state(new_crtc_state);
12580 bool modeset = needs_modeset(new_crtc_state); 12642 bool modeset = needs_modeset(new_crtc_state);
12581 struct intel_plane_state *new_plane_state = 12643 struct intel_plane_state *new_plane_state =
@@ -12583,7 +12645,7 @@ static void intel_update_crtc(struct drm_crtc *crtc,
12583 to_intel_plane(crtc->primary)); 12645 to_intel_plane(crtc->primary));
12584 12646
12585 if (modeset) { 12647 if (modeset) {
12586 update_scanline_offset(intel_crtc); 12648 update_scanline_offset(pipe_config);
12587 dev_priv->display.crtc_enable(pipe_config, state); 12649 dev_priv->display.crtc_enable(pipe_config, state);
12588 12650
12589 /* vblanks work again, re-enable pipe CRC. */ 12651 /* vblanks work again, re-enable pipe CRC. */
@@ -12596,7 +12658,12 @@ static void intel_update_crtc(struct drm_crtc *crtc,
12596 if (new_plane_state) 12658 if (new_plane_state)
12597 intel_fbc_enable(intel_crtc, pipe_config, new_plane_state); 12659 intel_fbc_enable(intel_crtc, pipe_config, new_plane_state);
12598 12660
12599 drm_atomic_helper_commit_planes_on_crtc(old_crtc_state); 12661 intel_begin_crtc_commit(crtc, old_crtc_state);
12662
12663 intel_update_planes_on_crtc(to_intel_atomic_state(state), intel_crtc,
12664 old_intel_cstate, pipe_config);
12665
12666 intel_finish_crtc_commit(crtc, old_crtc_state);
12600} 12667}
12601 12668
12602static void intel_update_crtcs(struct drm_atomic_state *state) 12669static void intel_update_crtcs(struct drm_atomic_state *state)
@@ -12628,13 +12695,12 @@ static void skl_update_crtcs(struct drm_atomic_state *state)
12628 int i; 12695 int i;
12629 u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices; 12696 u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices;
12630 u8 required_slices = intel_state->wm_results.ddb.enabled_slices; 12697 u8 required_slices = intel_state->wm_results.ddb.enabled_slices;
12631 12698 struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
12632 const struct skl_ddb_entry *entries[I915_MAX_PIPES] = {};
12633 12699
12634 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) 12700 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i)
12635 /* ignore allocations for crtc's that have been turned off. */ 12701 /* ignore allocations for crtc's that have been turned off. */
12636 if (new_crtc_state->active) 12702 if (new_crtc_state->active)
12637 entries[i] = &to_intel_crtc_state(old_crtc_state)->wm.skl.ddb; 12703 entries[i] = to_intel_crtc_state(old_crtc_state)->wm.skl.ddb;
12638 12704
12639 /* If 2nd DBuf slice required, enable it here */ 12705 /* If 2nd DBuf slice required, enable it here */
12640 if (INTEL_GEN(dev_priv) >= 11 && required_slices > hw_enabled_slices) 12706 if (INTEL_GEN(dev_priv) >= 11 && required_slices > hw_enabled_slices)
@@ -12660,14 +12726,13 @@ static void skl_update_crtcs(struct drm_atomic_state *state)
12660 if (updated & cmask || !cstate->base.active) 12726 if (updated & cmask || !cstate->base.active)
12661 continue; 12727 continue;
12662 12728
12663 if (skl_ddb_allocation_overlaps(dev_priv, 12729 if (skl_ddb_allocation_overlaps(&cstate->wm.skl.ddb,
12664 entries, 12730 entries,
12665 &cstate->wm.skl.ddb, 12731 INTEL_INFO(dev_priv)->num_pipes, i))
12666 i))
12667 continue; 12732 continue;
12668 12733
12669 updated |= cmask; 12734 updated |= cmask;
12670 entries[i] = &cstate->wm.skl.ddb; 12735 entries[i] = cstate->wm.skl.ddb;
12671 12736
12672 /* 12737 /*
12673 * If this is an already active pipe, it's DDB changed, 12738 * If this is an already active pipe, it's DDB changed,
@@ -12757,8 +12822,9 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
12757 struct intel_atomic_state *intel_state = to_intel_atomic_state(state); 12822 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
12758 struct drm_i915_private *dev_priv = to_i915(dev); 12823 struct drm_i915_private *dev_priv = to_i915(dev);
12759 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 12824 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
12825 struct intel_crtc_state *new_intel_crtc_state, *old_intel_crtc_state;
12760 struct drm_crtc *crtc; 12826 struct drm_crtc *crtc;
12761 struct intel_crtc_state *intel_cstate; 12827 struct intel_crtc *intel_crtc;
12762 u64 put_domains[I915_MAX_PIPES] = {}; 12828 u64 put_domains[I915_MAX_PIPES] = {};
12763 int i; 12829 int i;
12764 12830
@@ -12770,24 +12836,25 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
12770 intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET); 12836 intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
12771 12837
12772 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 12838 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
12773 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 12839 old_intel_crtc_state = to_intel_crtc_state(old_crtc_state);
12840 new_intel_crtc_state = to_intel_crtc_state(new_crtc_state);
12841 intel_crtc = to_intel_crtc(crtc);
12774 12842
12775 if (needs_modeset(new_crtc_state) || 12843 if (needs_modeset(new_crtc_state) ||
12776 to_intel_crtc_state(new_crtc_state)->update_pipe) { 12844 to_intel_crtc_state(new_crtc_state)->update_pipe) {
12777 12845
12778 put_domains[to_intel_crtc(crtc)->pipe] = 12846 put_domains[intel_crtc->pipe] =
12779 modeset_get_crtc_power_domains(crtc, 12847 modeset_get_crtc_power_domains(crtc,
12780 to_intel_crtc_state(new_crtc_state)); 12848 new_intel_crtc_state);
12781 } 12849 }
12782 12850
12783 if (!needs_modeset(new_crtc_state)) 12851 if (!needs_modeset(new_crtc_state))
12784 continue; 12852 continue;
12785 12853
12786 intel_pre_plane_update(to_intel_crtc_state(old_crtc_state), 12854 intel_pre_plane_update(old_intel_crtc_state, new_intel_crtc_state);
12787 to_intel_crtc_state(new_crtc_state));
12788 12855
12789 if (old_crtc_state->active) { 12856 if (old_crtc_state->active) {
12790 intel_crtc_disable_planes(crtc, old_crtc_state->plane_mask); 12857 intel_crtc_disable_planes(intel_crtc, old_intel_crtc_state->active_planes);
12791 12858
12792 /* 12859 /*
12793 * We need to disable pipe CRC before disabling the pipe, 12860 * We need to disable pipe CRC before disabling the pipe,
@@ -12795,10 +12862,10 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
12795 */ 12862 */
12796 intel_crtc_disable_pipe_crc(intel_crtc); 12863 intel_crtc_disable_pipe_crc(intel_crtc);
12797 12864
12798 dev_priv->display.crtc_disable(to_intel_crtc_state(old_crtc_state), state); 12865 dev_priv->display.crtc_disable(old_intel_crtc_state, state);
12799 intel_crtc->active = false; 12866 intel_crtc->active = false;
12800 intel_fbc_disable(intel_crtc); 12867 intel_fbc_disable(intel_crtc);
12801 intel_disable_shared_dpll(intel_crtc); 12868 intel_disable_shared_dpll(old_intel_crtc_state);
12802 12869
12803 /* 12870 /*
12804 * Underruns don't always raise 12871 * Underruns don't always raise
@@ -12812,7 +12879,7 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
12812 !HAS_GMCH_DISPLAY(dev_priv) && 12879 !HAS_GMCH_DISPLAY(dev_priv) &&
12813 dev_priv->display.initial_watermarks) 12880 dev_priv->display.initial_watermarks)
12814 dev_priv->display.initial_watermarks(intel_state, 12881 dev_priv->display.initial_watermarks(intel_state,
12815 to_intel_crtc_state(new_crtc_state)); 12882 new_intel_crtc_state);
12816 } 12883 }
12817 } 12884 }
12818 12885
@@ -12871,11 +12938,11 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
12871 * TODO: Move this (and other cleanup) to an async worker eventually. 12938 * TODO: Move this (and other cleanup) to an async worker eventually.
12872 */ 12939 */
12873 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { 12940 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
12874 intel_cstate = to_intel_crtc_state(new_crtc_state); 12941 new_intel_crtc_state = to_intel_crtc_state(new_crtc_state);
12875 12942
12876 if (dev_priv->display.optimize_watermarks) 12943 if (dev_priv->display.optimize_watermarks)
12877 dev_priv->display.optimize_watermarks(intel_state, 12944 dev_priv->display.optimize_watermarks(intel_state,
12878 intel_cstate); 12945 new_intel_crtc_state);
12879 } 12946 }
12880 12947
12881 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 12948 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
@@ -13258,13 +13325,12 @@ intel_prepare_plane_fb(struct drm_plane *plane,
13258 13325
13259 ret = intel_plane_pin_fb(to_intel_plane_state(new_state)); 13326 ret = intel_plane_pin_fb(to_intel_plane_state(new_state));
13260 13327
13261 fb_obj_bump_render_priority(obj);
13262
13263 mutex_unlock(&dev_priv->drm.struct_mutex); 13328 mutex_unlock(&dev_priv->drm.struct_mutex);
13264 i915_gem_object_unpin_pages(obj); 13329 i915_gem_object_unpin_pages(obj);
13265 if (ret) 13330 if (ret)
13266 return ret; 13331 return ret;
13267 13332
13333 fb_obj_bump_render_priority(obj);
13268 intel_fb_obj_flush(obj, ORIGIN_DIRTYFB); 13334 intel_fb_obj_flush(obj, ORIGIN_DIRTYFB);
13269 13335
13270 if (!new_state->fence) { /* implicit fencing */ 13336 if (!new_state->fence) { /* implicit fencing */
@@ -13395,7 +13461,7 @@ static void intel_begin_crtc_commit(struct drm_crtc *crtc,
13395 if (intel_cstate->update_pipe) 13461 if (intel_cstate->update_pipe)
13396 intel_update_pipe_config(old_intel_cstate, intel_cstate); 13462 intel_update_pipe_config(old_intel_cstate, intel_cstate);
13397 else if (INTEL_GEN(dev_priv) >= 9) 13463 else if (INTEL_GEN(dev_priv) >= 9)
13398 skl_detach_scalers(intel_crtc); 13464 skl_detach_scalers(intel_cstate);
13399 13465
13400out: 13466out:
13401 if (dev_priv->display.atomic_update_watermarks) 13467 if (dev_priv->display.atomic_update_watermarks)
@@ -13497,56 +13563,6 @@ static bool i965_plane_format_mod_supported(struct drm_plane *_plane,
13497 } 13563 }
13498} 13564}
13499 13565
13500static bool skl_plane_format_mod_supported(struct drm_plane *_plane,
13501 u32 format, u64 modifier)
13502{
13503 struct intel_plane *plane = to_intel_plane(_plane);
13504
13505 switch (modifier) {
13506 case DRM_FORMAT_MOD_LINEAR:
13507 case I915_FORMAT_MOD_X_TILED:
13508 case I915_FORMAT_MOD_Y_TILED:
13509 case I915_FORMAT_MOD_Yf_TILED:
13510 break;
13511 case I915_FORMAT_MOD_Y_TILED_CCS:
13512 case I915_FORMAT_MOD_Yf_TILED_CCS:
13513 if (!plane->has_ccs)
13514 return false;
13515 break;
13516 default:
13517 return false;
13518 }
13519
13520 switch (format) {
13521 case DRM_FORMAT_XRGB8888:
13522 case DRM_FORMAT_XBGR8888:
13523 case DRM_FORMAT_ARGB8888:
13524 case DRM_FORMAT_ABGR8888:
13525 if (is_ccs_modifier(modifier))
13526 return true;
13527 /* fall through */
13528 case DRM_FORMAT_RGB565:
13529 case DRM_FORMAT_XRGB2101010:
13530 case DRM_FORMAT_XBGR2101010:
13531 case DRM_FORMAT_YUYV:
13532 case DRM_FORMAT_YVYU:
13533 case DRM_FORMAT_UYVY:
13534 case DRM_FORMAT_VYUY:
13535 case DRM_FORMAT_NV12:
13536 if (modifier == I915_FORMAT_MOD_Yf_TILED)
13537 return true;
13538 /* fall through */
13539 case DRM_FORMAT_C8:
13540 if (modifier == DRM_FORMAT_MOD_LINEAR ||
13541 modifier == I915_FORMAT_MOD_X_TILED ||
13542 modifier == I915_FORMAT_MOD_Y_TILED)
13543 return true;
13544 /* fall through */
13545 default:
13546 return false;
13547 }
13548}
13549
13550static bool intel_cursor_format_mod_supported(struct drm_plane *_plane, 13566static bool intel_cursor_format_mod_supported(struct drm_plane *_plane,
13551 u32 format, u64 modifier) 13567 u32 format, u64 modifier)
13552{ 13568{
@@ -13554,18 +13570,7 @@ static bool intel_cursor_format_mod_supported(struct drm_plane *_plane,
13554 format == DRM_FORMAT_ARGB8888; 13570 format == DRM_FORMAT_ARGB8888;
13555} 13571}
13556 13572
13557static struct drm_plane_funcs skl_plane_funcs = { 13573static const struct drm_plane_funcs i965_plane_funcs = {
13558 .update_plane = drm_atomic_helper_update_plane,
13559 .disable_plane = drm_atomic_helper_disable_plane,
13560 .destroy = intel_plane_destroy,
13561 .atomic_get_property = intel_plane_atomic_get_property,
13562 .atomic_set_property = intel_plane_atomic_set_property,
13563 .atomic_duplicate_state = intel_plane_duplicate_state,
13564 .atomic_destroy_state = intel_plane_destroy_state,
13565 .format_mod_supported = skl_plane_format_mod_supported,
13566};
13567
13568static struct drm_plane_funcs i965_plane_funcs = {
13569 .update_plane = drm_atomic_helper_update_plane, 13574 .update_plane = drm_atomic_helper_update_plane,
13570 .disable_plane = drm_atomic_helper_disable_plane, 13575 .disable_plane = drm_atomic_helper_disable_plane,
13571 .destroy = intel_plane_destroy, 13576 .destroy = intel_plane_destroy,
@@ -13576,7 +13581,7 @@ static struct drm_plane_funcs i965_plane_funcs = {
13576 .format_mod_supported = i965_plane_format_mod_supported, 13581 .format_mod_supported = i965_plane_format_mod_supported,
13577}; 13582};
13578 13583
13579static struct drm_plane_funcs i8xx_plane_funcs = { 13584static const struct drm_plane_funcs i8xx_plane_funcs = {
13580 .update_plane = drm_atomic_helper_update_plane, 13585 .update_plane = drm_atomic_helper_update_plane,
13581 .disable_plane = drm_atomic_helper_disable_plane, 13586 .disable_plane = drm_atomic_helper_disable_plane,
13582 .destroy = intel_plane_destroy, 13587 .destroy = intel_plane_destroy,
@@ -13602,14 +13607,16 @@ intel_legacy_cursor_update(struct drm_plane *plane,
13602 struct drm_plane_state *old_plane_state, *new_plane_state; 13607 struct drm_plane_state *old_plane_state, *new_plane_state;
13603 struct intel_plane *intel_plane = to_intel_plane(plane); 13608 struct intel_plane *intel_plane = to_intel_plane(plane);
13604 struct drm_framebuffer *old_fb; 13609 struct drm_framebuffer *old_fb;
13605 struct drm_crtc_state *crtc_state = crtc->state; 13610 struct intel_crtc_state *crtc_state =
13611 to_intel_crtc_state(crtc->state);
13612 struct intel_crtc_state *new_crtc_state;
13606 13613
13607 /* 13614 /*
13608 * When crtc is inactive or there is a modeset pending, 13615 * When crtc is inactive or there is a modeset pending,
13609 * wait for it to complete in the slowpath 13616 * wait for it to complete in the slowpath
13610 */ 13617 */
13611 if (!crtc_state->active || needs_modeset(crtc_state) || 13618 if (!crtc_state->base.active || needs_modeset(&crtc_state->base) ||
13612 to_intel_crtc_state(crtc_state)->update_pipe) 13619 crtc_state->update_pipe)
13613 goto slow; 13620 goto slow;
13614 13621
13615 old_plane_state = plane->state; 13622 old_plane_state = plane->state;
@@ -13639,6 +13646,12 @@ intel_legacy_cursor_update(struct drm_plane *plane,
13639 if (!new_plane_state) 13646 if (!new_plane_state)
13640 return -ENOMEM; 13647 return -ENOMEM;
13641 13648
13649 new_crtc_state = to_intel_crtc_state(intel_crtc_duplicate_state(crtc));
13650 if (!new_crtc_state) {
13651 ret = -ENOMEM;
13652 goto out_free;
13653 }
13654
13642 drm_atomic_set_fb_for_plane(new_plane_state, fb); 13655 drm_atomic_set_fb_for_plane(new_plane_state, fb);
13643 13656
13644 new_plane_state->src_x = src_x; 13657 new_plane_state->src_x = src_x;
@@ -13650,9 +13663,8 @@ intel_legacy_cursor_update(struct drm_plane *plane,
13650 new_plane_state->crtc_w = crtc_w; 13663 new_plane_state->crtc_w = crtc_w;
13651 new_plane_state->crtc_h = crtc_h; 13664 new_plane_state->crtc_h = crtc_h;
13652 13665
13653 ret = intel_plane_atomic_check_with_state(to_intel_crtc_state(crtc->state), 13666 ret = intel_plane_atomic_check_with_state(crtc_state, new_crtc_state,
13654 to_intel_crtc_state(crtc->state), /* FIXME need a new crtc state? */ 13667 to_intel_plane_state(old_plane_state),
13655 to_intel_plane_state(plane->state),
13656 to_intel_plane_state(new_plane_state)); 13668 to_intel_plane_state(new_plane_state));
13657 if (ret) 13669 if (ret)
13658 goto out_free; 13670 goto out_free;
@@ -13674,10 +13686,21 @@ intel_legacy_cursor_update(struct drm_plane *plane,
13674 /* Swap plane state */ 13686 /* Swap plane state */
13675 plane->state = new_plane_state; 13687 plane->state = new_plane_state;
13676 13688
13689 /*
13690 * We cannot swap crtc_state as it may be in use by an atomic commit or
13691 * page flip that's running simultaneously. If we swap crtc_state and
13692 * destroy the old state, we will cause a use-after-free there.
13693 *
13694 * Only update active_planes, which is needed for our internal
13695 * bookkeeping. Either value will do the right thing when updating
13696 * planes atomically. If the cursor was part of the atomic update then
13697 * we would have taken the slowpath.
13698 */
13699 crtc_state->active_planes = new_crtc_state->active_planes;
13700
13677 if (plane->state->visible) { 13701 if (plane->state->visible) {
13678 trace_intel_update_plane(plane, to_intel_crtc(crtc)); 13702 trace_intel_update_plane(plane, to_intel_crtc(crtc));
13679 intel_plane->update_plane(intel_plane, 13703 intel_plane->update_plane(intel_plane, crtc_state,
13680 to_intel_crtc_state(crtc->state),
13681 to_intel_plane_state(plane->state)); 13704 to_intel_plane_state(plane->state));
13682 } else { 13705 } else {
13683 trace_intel_disable_plane(plane, to_intel_crtc(crtc)); 13706 trace_intel_disable_plane(plane, to_intel_crtc(crtc));
@@ -13689,6 +13712,8 @@ intel_legacy_cursor_update(struct drm_plane *plane,
13689out_unlock: 13712out_unlock:
13690 mutex_unlock(&dev_priv->drm.struct_mutex); 13713 mutex_unlock(&dev_priv->drm.struct_mutex);
13691out_free: 13714out_free:
13715 if (new_crtc_state)
13716 intel_crtc_destroy_state(crtc, &new_crtc_state->base);
13692 if (ret) 13717 if (ret)
13693 intel_plane_destroy_state(plane, new_plane_state); 13718 intel_plane_destroy_state(plane, new_plane_state);
13694 else 13719 else
@@ -13729,176 +13754,90 @@ static bool i9xx_plane_has_fbc(struct drm_i915_private *dev_priv,
13729 return i9xx_plane == PLANE_A; 13754 return i9xx_plane == PLANE_A;
13730} 13755}
13731 13756
13732static bool skl_plane_has_fbc(struct drm_i915_private *dev_priv,
13733 enum pipe pipe, enum plane_id plane_id)
13734{
13735 if (!HAS_FBC(dev_priv))
13736 return false;
13737
13738 return pipe == PIPE_A && plane_id == PLANE_PRIMARY;
13739}
13740
13741bool skl_plane_has_planar(struct drm_i915_private *dev_priv,
13742 enum pipe pipe, enum plane_id plane_id)
13743{
13744 /*
13745 * FIXME: ICL requires two hardware planes for scanning out NV12
13746 * framebuffers. Do not advertize support until this is implemented.
13747 */
13748 if (INTEL_GEN(dev_priv) >= 11)
13749 return false;
13750
13751 if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))
13752 return false;
13753
13754 if (INTEL_GEN(dev_priv) == 9 && !IS_GEMINILAKE(dev_priv) && pipe == PIPE_C)
13755 return false;
13756
13757 if (plane_id != PLANE_PRIMARY && plane_id != PLANE_SPRITE0)
13758 return false;
13759
13760 return true;
13761}
13762
13763static struct intel_plane * 13757static struct intel_plane *
13764intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe) 13758intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
13765{ 13759{
13766 struct intel_plane *primary = NULL; 13760 struct intel_plane *plane;
13767 struct intel_plane_state *state = NULL;
13768 const struct drm_plane_funcs *plane_funcs; 13761 const struct drm_plane_funcs *plane_funcs;
13769 const uint32_t *intel_primary_formats;
13770 unsigned int supported_rotations; 13762 unsigned int supported_rotations;
13771 unsigned int num_formats; 13763 unsigned int possible_crtcs;
13772 const uint64_t *modifiers; 13764 const u64 *modifiers;
13765 const u32 *formats;
13766 int num_formats;
13773 int ret; 13767 int ret;
13774 13768
13775 primary = kzalloc(sizeof(*primary), GFP_KERNEL); 13769 if (INTEL_GEN(dev_priv) >= 9)
13776 if (!primary) { 13770 return skl_universal_plane_create(dev_priv, pipe,
13777 ret = -ENOMEM; 13771 PLANE_PRIMARY);
13778 goto fail;
13779 }
13780
13781 state = intel_create_plane_state(&primary->base);
13782 if (!state) {
13783 ret = -ENOMEM;
13784 goto fail;
13785 }
13786 13772
13787 primary->base.state = &state->base; 13773 plane = intel_plane_alloc();
13774 if (IS_ERR(plane))
13775 return plane;
13788 13776
13789 if (INTEL_GEN(dev_priv) >= 9) 13777 plane->pipe = pipe;
13790 state->scaler_id = -1;
13791 primary->pipe = pipe;
13792 /* 13778 /*
13793 * On gen2/3 only plane A can do FBC, but the panel fitter and LVDS 13779 * On gen2/3 only plane A can do FBC, but the panel fitter and LVDS
13794 * port is hooked to pipe B. Hence we want plane A feeding pipe B. 13780 * port is hooked to pipe B. Hence we want plane A feeding pipe B.
13795 */ 13781 */
13796 if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) < 4) 13782 if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) < 4)
13797 primary->i9xx_plane = (enum i9xx_plane_id) !pipe; 13783 plane->i9xx_plane = (enum i9xx_plane_id) !pipe;
13798 else
13799 primary->i9xx_plane = (enum i9xx_plane_id) pipe;
13800 primary->id = PLANE_PRIMARY;
13801 primary->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, primary->id);
13802
13803 if (INTEL_GEN(dev_priv) >= 9)
13804 primary->has_fbc = skl_plane_has_fbc(dev_priv,
13805 primary->pipe,
13806 primary->id);
13807 else 13784 else
13808 primary->has_fbc = i9xx_plane_has_fbc(dev_priv, 13785 plane->i9xx_plane = (enum i9xx_plane_id) pipe;
13809 primary->i9xx_plane); 13786 plane->id = PLANE_PRIMARY;
13787 plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane->id);
13810 13788
13811 if (primary->has_fbc) { 13789 plane->has_fbc = i9xx_plane_has_fbc(dev_priv, plane->i9xx_plane);
13790 if (plane->has_fbc) {
13812 struct intel_fbc *fbc = &dev_priv->fbc; 13791 struct intel_fbc *fbc = &dev_priv->fbc;
13813 13792
13814 fbc->possible_framebuffer_bits |= primary->frontbuffer_bit; 13793 fbc->possible_framebuffer_bits |= plane->frontbuffer_bit;
13815 } 13794 }
13816 13795
13817 if (INTEL_GEN(dev_priv) >= 9) { 13796 if (INTEL_GEN(dev_priv) >= 4) {
13818 primary->has_ccs = skl_plane_has_ccs(dev_priv, pipe, 13797 formats = i965_primary_formats;
13819 PLANE_PRIMARY);
13820
13821 if (skl_plane_has_planar(dev_priv, pipe, PLANE_PRIMARY)) {
13822 intel_primary_formats = skl_pri_planar_formats;
13823 num_formats = ARRAY_SIZE(skl_pri_planar_formats);
13824 } else {
13825 intel_primary_formats = skl_primary_formats;
13826 num_formats = ARRAY_SIZE(skl_primary_formats);
13827 }
13828
13829 if (primary->has_ccs)
13830 modifiers = skl_format_modifiers_ccs;
13831 else
13832 modifiers = skl_format_modifiers_noccs;
13833
13834 primary->max_stride = skl_plane_max_stride;
13835 primary->update_plane = skl_update_plane;
13836 primary->disable_plane = skl_disable_plane;
13837 primary->get_hw_state = skl_plane_get_hw_state;
13838 primary->check_plane = skl_plane_check;
13839
13840 plane_funcs = &skl_plane_funcs;
13841 } else if (INTEL_GEN(dev_priv) >= 4) {
13842 intel_primary_formats = i965_primary_formats;
13843 num_formats = ARRAY_SIZE(i965_primary_formats); 13798 num_formats = ARRAY_SIZE(i965_primary_formats);
13844 modifiers = i9xx_format_modifiers; 13799 modifiers = i9xx_format_modifiers;
13845 13800
13846 primary->max_stride = i9xx_plane_max_stride; 13801 plane->max_stride = i9xx_plane_max_stride;
13847 primary->update_plane = i9xx_update_plane; 13802 plane->update_plane = i9xx_update_plane;
13848 primary->disable_plane = i9xx_disable_plane; 13803 plane->disable_plane = i9xx_disable_plane;
13849 primary->get_hw_state = i9xx_plane_get_hw_state; 13804 plane->get_hw_state = i9xx_plane_get_hw_state;
13850 primary->check_plane = i9xx_plane_check; 13805 plane->check_plane = i9xx_plane_check;
13851 13806
13852 plane_funcs = &i965_plane_funcs; 13807 plane_funcs = &i965_plane_funcs;
13853 } else { 13808 } else {
13854 intel_primary_formats = i8xx_primary_formats; 13809 formats = i8xx_primary_formats;
13855 num_formats = ARRAY_SIZE(i8xx_primary_formats); 13810 num_formats = ARRAY_SIZE(i8xx_primary_formats);
13856 modifiers = i9xx_format_modifiers; 13811 modifiers = i9xx_format_modifiers;
13857 13812
13858 primary->max_stride = i9xx_plane_max_stride; 13813 plane->max_stride = i9xx_plane_max_stride;
13859 primary->update_plane = i9xx_update_plane; 13814 plane->update_plane = i9xx_update_plane;
13860 primary->disable_plane = i9xx_disable_plane; 13815 plane->disable_plane = i9xx_disable_plane;
13861 primary->get_hw_state = i9xx_plane_get_hw_state; 13816 plane->get_hw_state = i9xx_plane_get_hw_state;
13862 primary->check_plane = i9xx_plane_check; 13817 plane->check_plane = i9xx_plane_check;
13863 13818
13864 plane_funcs = &i8xx_plane_funcs; 13819 plane_funcs = &i8xx_plane_funcs;
13865 } 13820 }
13866 13821
13867 if (INTEL_GEN(dev_priv) >= 9) 13822 possible_crtcs = BIT(pipe);
13868 ret = drm_universal_plane_init(&dev_priv->drm, &primary->base, 13823
13869 0, plane_funcs, 13824 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
13870 intel_primary_formats, num_formats, 13825 ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
13871 modifiers, 13826 possible_crtcs, plane_funcs,
13872 DRM_PLANE_TYPE_PRIMARY, 13827 formats, num_formats, modifiers,
13873 "plane 1%c", pipe_name(pipe));
13874 else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
13875 ret = drm_universal_plane_init(&dev_priv->drm, &primary->base,
13876 0, plane_funcs,
13877 intel_primary_formats, num_formats,
13878 modifiers,
13879 DRM_PLANE_TYPE_PRIMARY, 13828 DRM_PLANE_TYPE_PRIMARY,
13880 "primary %c", pipe_name(pipe)); 13829 "primary %c", pipe_name(pipe));
13881 else 13830 else
13882 ret = drm_universal_plane_init(&dev_priv->drm, &primary->base, 13831 ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
13883 0, plane_funcs, 13832 possible_crtcs, plane_funcs,
13884 intel_primary_formats, num_formats, 13833 formats, num_formats, modifiers,
13885 modifiers,
13886 DRM_PLANE_TYPE_PRIMARY, 13834 DRM_PLANE_TYPE_PRIMARY,
13887 "plane %c", 13835 "plane %c",
13888 plane_name(primary->i9xx_plane)); 13836 plane_name(plane->i9xx_plane));
13889 if (ret) 13837 if (ret)
13890 goto fail; 13838 goto fail;
13891 13839
13892 if (INTEL_GEN(dev_priv) >= 10) { 13840 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
13893 supported_rotations =
13894 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
13895 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270 |
13896 DRM_MODE_REFLECT_X;
13897 } else if (INTEL_GEN(dev_priv) >= 9) {
13898 supported_rotations =
13899 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
13900 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
13901 } else if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
13902 supported_rotations = 13841 supported_rotations =
13903 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 | 13842 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 |
13904 DRM_MODE_REFLECT_X; 13843 DRM_MODE_REFLECT_X;
@@ -13910,26 +13849,16 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
13910 } 13849 }
13911 13850
13912 if (INTEL_GEN(dev_priv) >= 4) 13851 if (INTEL_GEN(dev_priv) >= 4)
13913 drm_plane_create_rotation_property(&primary->base, 13852 drm_plane_create_rotation_property(&plane->base,
13914 DRM_MODE_ROTATE_0, 13853 DRM_MODE_ROTATE_0,
13915 supported_rotations); 13854 supported_rotations);
13916 13855
13917 if (INTEL_GEN(dev_priv) >= 9) 13856 drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs);
13918 drm_plane_create_color_properties(&primary->base,
13919 BIT(DRM_COLOR_YCBCR_BT601) |
13920 BIT(DRM_COLOR_YCBCR_BT709),
13921 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
13922 BIT(DRM_COLOR_YCBCR_FULL_RANGE),
13923 DRM_COLOR_YCBCR_BT709,
13924 DRM_COLOR_YCBCR_LIMITED_RANGE);
13925
13926 drm_plane_helper_add(&primary->base, &intel_plane_helper_funcs);
13927 13857
13928 return primary; 13858 return plane;
13929 13859
13930fail: 13860fail:
13931 kfree(state); 13861 intel_plane_free(plane);
13932 kfree(primary);
13933 13862
13934 return ERR_PTR(ret); 13863 return ERR_PTR(ret);
13935} 13864}
@@ -13938,23 +13867,13 @@ static struct intel_plane *
13938intel_cursor_plane_create(struct drm_i915_private *dev_priv, 13867intel_cursor_plane_create(struct drm_i915_private *dev_priv,
13939 enum pipe pipe) 13868 enum pipe pipe)
13940{ 13869{
13941 struct intel_plane *cursor = NULL; 13870 unsigned int possible_crtcs;
13942 struct intel_plane_state *state = NULL; 13871 struct intel_plane *cursor;
13943 int ret; 13872 int ret;
13944 13873
13945 cursor = kzalloc(sizeof(*cursor), GFP_KERNEL); 13874 cursor = intel_plane_alloc();
13946 if (!cursor) { 13875 if (IS_ERR(cursor))
13947 ret = -ENOMEM; 13876 return cursor;
13948 goto fail;
13949 }
13950
13951 state = intel_create_plane_state(&cursor->base);
13952 if (!state) {
13953 ret = -ENOMEM;
13954 goto fail;
13955 }
13956
13957 cursor->base.state = &state->base;
13958 13877
13959 cursor->pipe = pipe; 13878 cursor->pipe = pipe;
13960 cursor->i9xx_plane = (enum i9xx_plane_id) pipe; 13879 cursor->i9xx_plane = (enum i9xx_plane_id) pipe;
@@ -13981,8 +13900,10 @@ intel_cursor_plane_create(struct drm_i915_private *dev_priv,
13981 if (IS_I845G(dev_priv) || IS_I865G(dev_priv) || HAS_CUR_FBC(dev_priv)) 13900 if (IS_I845G(dev_priv) || IS_I865G(dev_priv) || HAS_CUR_FBC(dev_priv))
13982 cursor->cursor.size = ~0; 13901 cursor->cursor.size = ~0;
13983 13902
13903 possible_crtcs = BIT(pipe);
13904
13984 ret = drm_universal_plane_init(&dev_priv->drm, &cursor->base, 13905 ret = drm_universal_plane_init(&dev_priv->drm, &cursor->base,
13985 0, &intel_cursor_plane_funcs, 13906 possible_crtcs, &intel_cursor_plane_funcs,
13986 intel_cursor_formats, 13907 intel_cursor_formats,
13987 ARRAY_SIZE(intel_cursor_formats), 13908 ARRAY_SIZE(intel_cursor_formats),
13988 cursor_format_modifiers, 13909 cursor_format_modifiers,
@@ -13997,16 +13918,12 @@ intel_cursor_plane_create(struct drm_i915_private *dev_priv,
13997 DRM_MODE_ROTATE_0 | 13918 DRM_MODE_ROTATE_0 |
13998 DRM_MODE_ROTATE_180); 13919 DRM_MODE_ROTATE_180);
13999 13920
14000 if (INTEL_GEN(dev_priv) >= 9)
14001 state->scaler_id = -1;
14002
14003 drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs); 13921 drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs);
14004 13922
14005 return cursor; 13923 return cursor;
14006 13924
14007fail: 13925fail:
14008 kfree(state); 13926 intel_plane_free(cursor);
14009 kfree(cursor);
14010 13927
14011 return ERR_PTR(ret); 13928 return ERR_PTR(ret);
14012} 13929}
@@ -14027,7 +13944,7 @@ static void intel_crtc_init_scalers(struct intel_crtc *crtc,
14027 struct intel_scaler *scaler = &scaler_state->scalers[i]; 13944 struct intel_scaler *scaler = &scaler_state->scalers[i];
14028 13945
14029 scaler->in_use = 0; 13946 scaler->in_use = 0;
14030 scaler->mode = PS_SCALER_MODE_DYN; 13947 scaler->mode = 0;
14031 } 13948 }
14032 13949
14033 scaler_state->scaler_id = -1; 13950 scaler_state->scaler_id = -1;
@@ -14122,18 +14039,6 @@ fail:
14122 return ret; 14039 return ret;
14123} 14040}
14124 14041
14125enum pipe intel_get_pipe_from_connector(struct intel_connector *connector)
14126{
14127 struct drm_device *dev = connector->base.dev;
14128
14129 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
14130
14131 if (!connector->base.state->crtc)
14132 return INVALID_PIPE;
14133
14134 return to_intel_crtc(connector->base.state->crtc)->pipe;
14135}
14136
14137int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data, 14042int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
14138 struct drm_file *file) 14043 struct drm_file *file)
14139{ 14044{
@@ -14270,6 +14175,7 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
14270 intel_ddi_init(dev_priv, PORT_D); 14175 intel_ddi_init(dev_priv, PORT_D);
14271 intel_ddi_init(dev_priv, PORT_E); 14176 intel_ddi_init(dev_priv, PORT_E);
14272 intel_ddi_init(dev_priv, PORT_F); 14177 intel_ddi_init(dev_priv, PORT_F);
14178 icl_dsi_init(dev_priv);
14273 } else if (IS_GEN9_LP(dev_priv)) { 14179 } else if (IS_GEN9_LP(dev_priv)) {
14274 /* 14180 /*
14275 * FIXME: Broxton doesn't support port detection via the 14181 * FIXME: Broxton doesn't support port detection via the
@@ -14492,7 +14398,7 @@ static const struct drm_framebuffer_funcs intel_fb_funcs = {
14492 14398
14493static 14399static
14494u32 intel_fb_pitch_limit(struct drm_i915_private *dev_priv, 14400u32 intel_fb_pitch_limit(struct drm_i915_private *dev_priv,
14495 uint64_t fb_modifier, uint32_t pixel_format) 14401 u32 pixel_format, u64 fb_modifier)
14496{ 14402{
14497 struct intel_crtc *crtc; 14403 struct intel_crtc *crtc;
14498 struct intel_plane *plane; 14404 struct intel_plane *plane;
@@ -14560,13 +14466,19 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
14560 goto err; 14466 goto err;
14561 } 14467 }
14562 /* fall through */ 14468 /* fall through */
14563 case I915_FORMAT_MOD_Y_TILED:
14564 case I915_FORMAT_MOD_Yf_TILED: 14469 case I915_FORMAT_MOD_Yf_TILED:
14470 if (mode_cmd->pixel_format == DRM_FORMAT_C8) {
14471 DRM_DEBUG_KMS("Indexed format does not support Yf tiling\n");
14472 goto err;
14473 }
14474 /* fall through */
14475 case I915_FORMAT_MOD_Y_TILED:
14565 if (INTEL_GEN(dev_priv) < 9) { 14476 if (INTEL_GEN(dev_priv) < 9) {
14566 DRM_DEBUG_KMS("Unsupported tiling 0x%llx!\n", 14477 DRM_DEBUG_KMS("Unsupported tiling 0x%llx!\n",
14567 mode_cmd->modifier[0]); 14478 mode_cmd->modifier[0]);
14568 goto err; 14479 goto err;
14569 } 14480 }
14481 break;
14570 case DRM_FORMAT_MOD_LINEAR: 14482 case DRM_FORMAT_MOD_LINEAR:
14571 case I915_FORMAT_MOD_X_TILED: 14483 case I915_FORMAT_MOD_X_TILED:
14572 break; 14484 break;
@@ -14586,8 +14498,8 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
14586 goto err; 14498 goto err;
14587 } 14499 }
14588 14500
14589 pitch_limit = intel_fb_pitch_limit(dev_priv, mode_cmd->modifier[0], 14501 pitch_limit = intel_fb_pitch_limit(dev_priv, mode_cmd->pixel_format,
14590 mode_cmd->pixel_format); 14502 mode_cmd->modifier[0]);
14591 if (mode_cmd->pitches[0] > pitch_limit) { 14503 if (mode_cmd->pitches[0] > pitch_limit) {
14592 DRM_DEBUG_KMS("%s pitch (%u) must be at most %d\n", 14504 DRM_DEBUG_KMS("%s pitch (%u) must be at most %d\n",
14593 mode_cmd->modifier[0] != DRM_FORMAT_MOD_LINEAR ? 14505 mode_cmd->modifier[0] != DRM_FORMAT_MOD_LINEAR ?
@@ -14656,7 +14568,7 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
14656 break; 14568 break;
14657 case DRM_FORMAT_NV12: 14569 case DRM_FORMAT_NV12:
14658 if (INTEL_GEN(dev_priv) < 9 || IS_SKYLAKE(dev_priv) || 14570 if (INTEL_GEN(dev_priv) < 9 || IS_SKYLAKE(dev_priv) ||
14659 IS_BROXTON(dev_priv) || INTEL_GEN(dev_priv) >= 11) { 14571 IS_BROXTON(dev_priv)) {
14660 DRM_DEBUG_KMS("unsupported pixel format: %s\n", 14572 DRM_DEBUG_KMS("unsupported pixel format: %s\n",
14661 drm_get_format_name(mode_cmd->pixel_format, 14573 drm_get_format_name(mode_cmd->pixel_format,
14662 &format_name)); 14574 &format_name));
@@ -14940,174 +14852,6 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv)
14940 dev_priv->display.update_crtcs = intel_update_crtcs; 14852 dev_priv->display.update_crtcs = intel_update_crtcs;
14941} 14853}
14942 14854
14943/*
14944 * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
14945 */
14946static void quirk_ssc_force_disable(struct drm_device *dev)
14947{
14948 struct drm_i915_private *dev_priv = to_i915(dev);
14949 dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE;
14950 DRM_INFO("applying lvds SSC disable quirk\n");
14951}
14952
14953/*
14954 * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight
14955 * brightness value
14956 */
14957static void quirk_invert_brightness(struct drm_device *dev)
14958{
14959 struct drm_i915_private *dev_priv = to_i915(dev);
14960 dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS;
14961 DRM_INFO("applying inverted panel brightness quirk\n");
14962}
14963
14964/* Some VBT's incorrectly indicate no backlight is present */
14965static void quirk_backlight_present(struct drm_device *dev)
14966{
14967 struct drm_i915_private *dev_priv = to_i915(dev);
14968 dev_priv->quirks |= QUIRK_BACKLIGHT_PRESENT;
14969 DRM_INFO("applying backlight present quirk\n");
14970}
14971
14972/* Toshiba Satellite P50-C-18C requires T12 delay to be min 800ms
14973 * which is 300 ms greater than eDP spec T12 min.
14974 */
14975static void quirk_increase_t12_delay(struct drm_device *dev)
14976{
14977 struct drm_i915_private *dev_priv = to_i915(dev);
14978
14979 dev_priv->quirks |= QUIRK_INCREASE_T12_DELAY;
14980 DRM_INFO("Applying T12 delay quirk\n");
14981}
14982
14983/*
14984 * GeminiLake NUC HDMI outputs require additional off time
14985 * this allows the onboard retimer to correctly sync to signal
14986 */
14987static void quirk_increase_ddi_disabled_time(struct drm_device *dev)
14988{
14989 struct drm_i915_private *dev_priv = to_i915(dev);
14990
14991 dev_priv->quirks |= QUIRK_INCREASE_DDI_DISABLED_TIME;
14992 DRM_INFO("Applying Increase DDI Disabled quirk\n");
14993}
14994
14995struct intel_quirk {
14996 int device;
14997 int subsystem_vendor;
14998 int subsystem_device;
14999 void (*hook)(struct drm_device *dev);
15000};
15001
15002/* For systems that don't have a meaningful PCI subdevice/subvendor ID */
15003struct intel_dmi_quirk {
15004 void (*hook)(struct drm_device *dev);
15005 const struct dmi_system_id (*dmi_id_list)[];
15006};
15007
15008static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
15009{
15010 DRM_INFO("Backlight polarity reversed on %s\n", id->ident);
15011 return 1;
15012}
15013
15014static const struct intel_dmi_quirk intel_dmi_quirks[] = {
15015 {
15016 .dmi_id_list = &(const struct dmi_system_id[]) {
15017 {
15018 .callback = intel_dmi_reverse_brightness,
15019 .ident = "NCR Corporation",
15020 .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
15021 DMI_MATCH(DMI_PRODUCT_NAME, ""),
15022 },
15023 },
15024 { } /* terminating entry */
15025 },
15026 .hook = quirk_invert_brightness,
15027 },
15028};
15029
15030static struct intel_quirk intel_quirks[] = {
15031 /* Lenovo U160 cannot use SSC on LVDS */
15032 { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
15033
15034 /* Sony Vaio Y cannot use SSC on LVDS */
15035 { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
15036
15037 /* Acer Aspire 5734Z must invert backlight brightness */
15038 { 0x2a42, 0x1025, 0x0459, quirk_invert_brightness },
15039
15040 /* Acer/eMachines G725 */
15041 { 0x2a42, 0x1025, 0x0210, quirk_invert_brightness },
15042
15043 /* Acer/eMachines e725 */
15044 { 0x2a42, 0x1025, 0x0212, quirk_invert_brightness },
15045
15046 /* Acer/Packard Bell NCL20 */
15047 { 0x2a42, 0x1025, 0x034b, quirk_invert_brightness },
15048
15049 /* Acer Aspire 4736Z */
15050 { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
15051
15052 /* Acer Aspire 5336 */
15053 { 0x2a42, 0x1025, 0x048a, quirk_invert_brightness },
15054
15055 /* Acer C720 and C720P Chromebooks (Celeron 2955U) have backlights */
15056 { 0x0a06, 0x1025, 0x0a11, quirk_backlight_present },
15057
15058 /* Acer C720 Chromebook (Core i3 4005U) */
15059 { 0x0a16, 0x1025, 0x0a11, quirk_backlight_present },
15060
15061 /* Apple Macbook 2,1 (Core 2 T7400) */
15062 { 0x27a2, 0x8086, 0x7270, quirk_backlight_present },
15063
15064 /* Apple Macbook 4,1 */
15065 { 0x2a02, 0x106b, 0x00a1, quirk_backlight_present },
15066
15067 /* Toshiba CB35 Chromebook (Celeron 2955U) */
15068 { 0x0a06, 0x1179, 0x0a88, quirk_backlight_present },
15069
15070 /* HP Chromebook 14 (Celeron 2955U) */
15071 { 0x0a06, 0x103c, 0x21ed, quirk_backlight_present },
15072
15073 /* Dell Chromebook 11 */
15074 { 0x0a06, 0x1028, 0x0a35, quirk_backlight_present },
15075
15076 /* Dell Chromebook 11 (2015 version) */
15077 { 0x0a16, 0x1028, 0x0a35, quirk_backlight_present },
15078
15079 /* Toshiba Satellite P50-C-18C */
15080 { 0x191B, 0x1179, 0xF840, quirk_increase_t12_delay },
15081
15082 /* GeminiLake NUC */
15083 { 0x3185, 0x8086, 0x2072, quirk_increase_ddi_disabled_time },
15084 { 0x3184, 0x8086, 0x2072, quirk_increase_ddi_disabled_time },
15085 /* ASRock ITX*/
15086 { 0x3185, 0x1849, 0x2212, quirk_increase_ddi_disabled_time },
15087 { 0x3184, 0x1849, 0x2212, quirk_increase_ddi_disabled_time },
15088};
15089
15090static void intel_init_quirks(struct drm_device *dev)
15091{
15092 struct pci_dev *d = dev->pdev;
15093 int i;
15094
15095 for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) {
15096 struct intel_quirk *q = &intel_quirks[i];
15097
15098 if (d->device == q->device &&
15099 (d->subsystem_vendor == q->subsystem_vendor ||
15100 q->subsystem_vendor == PCI_ANY_ID) &&
15101 (d->subsystem_device == q->subsystem_device ||
15102 q->subsystem_device == PCI_ANY_ID))
15103 q->hook(dev);
15104 }
15105 for (i = 0; i < ARRAY_SIZE(intel_dmi_quirks); i++) {
15106 if (dmi_check_system(*intel_dmi_quirks[i].dmi_id_list) != 0)
15107 intel_dmi_quirks[i].hook(dev);
15108 }
15109}
15110
15111/* Disable the VGA plane that we never use */ 14855/* Disable the VGA plane that we never use */
15112static void i915_disable_vga(struct drm_i915_private *dev_priv) 14856static void i915_disable_vga(struct drm_i915_private *dev_priv)
15113{ 14857{
@@ -15313,7 +15057,9 @@ int intel_modeset_init(struct drm_device *dev)
15313 INIT_WORK(&dev_priv->atomic_helper.free_work, 15057 INIT_WORK(&dev_priv->atomic_helper.free_work,
15314 intel_atomic_helper_free_state_worker); 15058 intel_atomic_helper_free_state_worker);
15315 15059
15316 intel_init_quirks(dev); 15060 intel_init_quirks(dev_priv);
15061
15062 intel_fbc_init(dev_priv);
15317 15063
15318 intel_init_pm(dev_priv); 15064 intel_init_pm(dev_priv);
15319 15065
@@ -15545,8 +15291,8 @@ intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv)
15545 if (pipe == crtc->pipe) 15291 if (pipe == crtc->pipe)
15546 continue; 15292 continue;
15547 15293
15548 DRM_DEBUG_KMS("%s attached to the wrong pipe, disabling plane\n", 15294 DRM_DEBUG_KMS("[PLANE:%d:%s] attached to the wrong pipe, disabling plane\n",
15549 plane->base.name); 15295 plane->base.base.id, plane->base.name);
15550 15296
15551 plane_crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 15297 plane_crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
15552 intel_plane_disable_noatomic(plane_crtc, plane); 15298 intel_plane_disable_noatomic(plane_crtc, plane);
@@ -15587,7 +15333,8 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc,
15587{ 15333{
15588 struct drm_device *dev = crtc->base.dev; 15334 struct drm_device *dev = crtc->base.dev;
15589 struct drm_i915_private *dev_priv = to_i915(dev); 15335 struct drm_i915_private *dev_priv = to_i915(dev);
15590 enum transcoder cpu_transcoder = crtc->config->cpu_transcoder; 15336 struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
15337 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
15591 15338
15592 /* Clear any frame start delays used for debugging left by the BIOS */ 15339 /* Clear any frame start delays used for debugging left by the BIOS */
15593 if (crtc->active && !transcoder_is_dsi(cpu_transcoder)) { 15340 if (crtc->active && !transcoder_is_dsi(cpu_transcoder)) {
@@ -15597,7 +15344,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc,
15597 I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK); 15344 I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
15598 } 15345 }
15599 15346
15600 if (crtc->active) { 15347 if (crtc_state->base.active) {
15601 struct intel_plane *plane; 15348 struct intel_plane *plane;
15602 15349
15603 /* Disable everything but the primary plane */ 15350 /* Disable everything but the primary plane */
@@ -15613,10 +15360,10 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc,
15613 15360
15614 /* Adjust the state of the output pipe according to whether we 15361 /* Adjust the state of the output pipe according to whether we
15615 * have active connectors/encoders. */ 15362 * have active connectors/encoders. */
15616 if (crtc->active && !intel_crtc_has_encoders(crtc)) 15363 if (crtc_state->base.active && !intel_crtc_has_encoders(crtc))
15617 intel_crtc_disable_noatomic(&crtc->base, ctx); 15364 intel_crtc_disable_noatomic(&crtc->base, ctx);
15618 15365
15619 if (crtc->active || HAS_GMCH_DISPLAY(dev_priv)) { 15366 if (crtc_state->base.active || HAS_GMCH_DISPLAY(dev_priv)) {
15620 /* 15367 /*
15621 * We start out with underrun reporting disabled to avoid races. 15368 * We start out with underrun reporting disabled to avoid races.
15622 * For correct bookkeeping mark this on active crtcs. 15369 * For correct bookkeeping mark this on active crtcs.
@@ -15647,6 +15394,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc,
15647 15394
15648static void intel_sanitize_encoder(struct intel_encoder *encoder) 15395static void intel_sanitize_encoder(struct intel_encoder *encoder)
15649{ 15396{
15397 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
15650 struct intel_connector *connector; 15398 struct intel_connector *connector;
15651 15399
15652 /* We need to check both for a crtc link (meaning that the 15400 /* We need to check both for a crtc link (meaning that the
@@ -15670,7 +15418,8 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
15670 DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n", 15418 DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
15671 encoder->base.base.id, 15419 encoder->base.base.id,
15672 encoder->base.name); 15420 encoder->base.name);
15673 encoder->disable(encoder, to_intel_crtc_state(crtc_state), connector->base.state); 15421 if (encoder->disable)
15422 encoder->disable(encoder, to_intel_crtc_state(crtc_state), connector->base.state);
15674 if (encoder->post_disable) 15423 if (encoder->post_disable)
15675 encoder->post_disable(encoder, to_intel_crtc_state(crtc_state), connector->base.state); 15424 encoder->post_disable(encoder, to_intel_crtc_state(crtc_state), connector->base.state);
15676 } 15425 }
@@ -15687,6 +15436,9 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
15687 15436
15688 /* notify opregion of the sanitized encoder state */ 15437 /* notify opregion of the sanitized encoder state */
15689 intel_opregion_notify_encoder(encoder, connector && has_active_crtc); 15438 intel_opregion_notify_encoder(encoder, connector && has_active_crtc);
15439
15440 if (INTEL_GEN(dev_priv) >= 11)
15441 icl_sanitize_encoder_pll_mapping(encoder);
15690} 15442}
15691 15443
15692void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv) 15444void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv)
@@ -15735,6 +15487,10 @@ static void readout_plane_state(struct drm_i915_private *dev_priv)
15735 crtc_state = to_intel_crtc_state(crtc->base.state); 15487 crtc_state = to_intel_crtc_state(crtc->base.state);
15736 15488
15737 intel_set_plane_visible(crtc_state, plane_state, visible); 15489 intel_set_plane_visible(crtc_state, plane_state, visible);
15490
15491 DRM_DEBUG_KMS("[PLANE:%d:%s] hw state readout: %s, pipe %c\n",
15492 plane->base.base.id, plane->base.name,
15493 enableddisabled(visible), pipe_name(pipe));
15738 } 15494 }
15739 15495
15740 for_each_intel_crtc(&dev_priv->drm, crtc) { 15496 for_each_intel_crtc(&dev_priv->drm, crtc) {
@@ -15887,7 +15643,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
15887 15643
15888 drm_calc_timestamping_constants(&crtc->base, 15644 drm_calc_timestamping_constants(&crtc->base,
15889 &crtc_state->base.adjusted_mode); 15645 &crtc_state->base.adjusted_mode);
15890 update_scanline_offset(crtc); 15646 update_scanline_offset(crtc_state);
15891 } 15647 }
15892 15648
15893 dev_priv->min_cdclk[crtc->pipe] = min_cdclk; 15649 dev_priv->min_cdclk[crtc->pipe] = min_cdclk;
@@ -15942,6 +15698,65 @@ static void intel_early_display_was(struct drm_i915_private *dev_priv)
15942 } 15698 }
15943} 15699}
15944 15700
15701static void ibx_sanitize_pch_hdmi_port(struct drm_i915_private *dev_priv,
15702 enum port port, i915_reg_t hdmi_reg)
15703{
15704 u32 val = I915_READ(hdmi_reg);
15705
15706 if (val & SDVO_ENABLE ||
15707 (val & SDVO_PIPE_SEL_MASK) == SDVO_PIPE_SEL(PIPE_A))
15708 return;
15709
15710 DRM_DEBUG_KMS("Sanitizing transcoder select for HDMI %c\n",
15711 port_name(port));
15712
15713 val &= ~SDVO_PIPE_SEL_MASK;
15714 val |= SDVO_PIPE_SEL(PIPE_A);
15715
15716 I915_WRITE(hdmi_reg, val);
15717}
15718
15719static void ibx_sanitize_pch_dp_port(struct drm_i915_private *dev_priv,
15720 enum port port, i915_reg_t dp_reg)
15721{
15722 u32 val = I915_READ(dp_reg);
15723
15724 if (val & DP_PORT_EN ||
15725 (val & DP_PIPE_SEL_MASK) == DP_PIPE_SEL(PIPE_A))
15726 return;
15727
15728 DRM_DEBUG_KMS("Sanitizing transcoder select for DP %c\n",
15729 port_name(port));
15730
15731 val &= ~DP_PIPE_SEL_MASK;
15732 val |= DP_PIPE_SEL(PIPE_A);
15733
15734 I915_WRITE(dp_reg, val);
15735}
15736
15737static void ibx_sanitize_pch_ports(struct drm_i915_private *dev_priv)
15738{
15739 /*
15740 * The BIOS may select transcoder B on some of the PCH
15741 * ports even it doesn't enable the port. This would trip
15742 * assert_pch_dp_disabled() and assert_pch_hdmi_disabled().
15743 * Sanitize the transcoder select bits to prevent that. We
15744 * assume that the BIOS never actually enabled the port,
15745 * because if it did we'd actually have to toggle the port
15746 * on and back off to make the transcoder A select stick
15747 * (see. intel_dp_link_down(), intel_disable_hdmi(),
15748 * intel_disable_sdvo()).
15749 */
15750 ibx_sanitize_pch_dp_port(dev_priv, PORT_B, PCH_DP_B);
15751 ibx_sanitize_pch_dp_port(dev_priv, PORT_C, PCH_DP_C);
15752 ibx_sanitize_pch_dp_port(dev_priv, PORT_D, PCH_DP_D);
15753
15754 /* PCH SDVOB multiplex with HDMIB */
15755 ibx_sanitize_pch_hdmi_port(dev_priv, PORT_B, PCH_HDMIB);
15756 ibx_sanitize_pch_hdmi_port(dev_priv, PORT_C, PCH_HDMIC);
15757 ibx_sanitize_pch_hdmi_port(dev_priv, PORT_D, PCH_HDMID);
15758}
15759
15945/* Scan out the current hw modeset state, 15760/* Scan out the current hw modeset state,
15946 * and sanitizes it to the current state 15761 * and sanitizes it to the current state
15947 */ 15762 */
@@ -15951,6 +15766,7 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
15951{ 15766{
15952 struct drm_i915_private *dev_priv = to_i915(dev); 15767 struct drm_i915_private *dev_priv = to_i915(dev);
15953 struct intel_crtc *crtc; 15768 struct intel_crtc *crtc;
15769 struct intel_crtc_state *crtc_state;
15954 struct intel_encoder *encoder; 15770 struct intel_encoder *encoder;
15955 int i; 15771 int i;
15956 15772
@@ -15962,6 +15778,9 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
15962 /* HW state is read out, now we need to sanitize this mess. */ 15778 /* HW state is read out, now we need to sanitize this mess. */
15963 get_encoder_power_domains(dev_priv); 15779 get_encoder_power_domains(dev_priv);
15964 15780
15781 if (HAS_PCH_IBX(dev_priv))
15782 ibx_sanitize_pch_ports(dev_priv);
15783
15965 /* 15784 /*
15966 * intel_sanitize_plane_mapping() may need to do vblank 15785 * intel_sanitize_plane_mapping() may need to do vblank
15967 * waits, so we need vblank interrupts restored beforehand. 15786 * waits, so we need vblank interrupts restored beforehand.
@@ -15969,7 +15788,7 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
15969 for_each_intel_crtc(&dev_priv->drm, crtc) { 15788 for_each_intel_crtc(&dev_priv->drm, crtc) {
15970 drm_crtc_vblank_reset(&crtc->base); 15789 drm_crtc_vblank_reset(&crtc->base);
15971 15790
15972 if (crtc->active) 15791 if (crtc->base.state->active)
15973 drm_crtc_vblank_on(&crtc->base); 15792 drm_crtc_vblank_on(&crtc->base);
15974 } 15793 }
15975 15794
@@ -15979,8 +15798,9 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
15979 intel_sanitize_encoder(encoder); 15798 intel_sanitize_encoder(encoder);
15980 15799
15981 for_each_intel_crtc(&dev_priv->drm, crtc) { 15800 for_each_intel_crtc(&dev_priv->drm, crtc) {
15801 crtc_state = to_intel_crtc_state(crtc->base.state);
15982 intel_sanitize_crtc(crtc, ctx); 15802 intel_sanitize_crtc(crtc, ctx);
15983 intel_dump_pipe_config(crtc, crtc->config, 15803 intel_dump_pipe_config(crtc, crtc_state,
15984 "[setup_hw_state]"); 15804 "[setup_hw_state]");
15985 } 15805 }
15986 15806
@@ -16014,7 +15834,8 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
16014 for_each_intel_crtc(dev, crtc) { 15834 for_each_intel_crtc(dev, crtc) {
16015 u64 put_domains; 15835 u64 put_domains;
16016 15836
16017 put_domains = modeset_get_crtc_power_domains(&crtc->base, crtc->config); 15837 crtc_state = to_intel_crtc_state(crtc->base.state);
15838 put_domains = modeset_get_crtc_power_domains(&crtc->base, crtc_state);
16018 if (WARN_ON(put_domains)) 15839 if (WARN_ON(put_domains))
16019 modeset_put_power_domains(dev_priv, put_domains); 15840 modeset_put_power_domains(dev_priv, put_domains);
16020 } 15841 }
@@ -16058,29 +15879,6 @@ void intel_display_resume(struct drm_device *dev)
16058 drm_atomic_state_put(state); 15879 drm_atomic_state_put(state);
16059} 15880}
16060 15881
16061int intel_connector_register(struct drm_connector *connector)
16062{
16063 struct intel_connector *intel_connector = to_intel_connector(connector);
16064 int ret;
16065
16066 ret = intel_backlight_device_register(intel_connector);
16067 if (ret)
16068 goto err;
16069
16070 return 0;
16071
16072err:
16073 return ret;
16074}
16075
16076void intel_connector_unregister(struct drm_connector *connector)
16077{
16078 struct intel_connector *intel_connector = to_intel_connector(connector);
16079
16080 intel_backlight_device_unregister(intel_connector);
16081 intel_panel_destroy_backlight(connector);
16082}
16083
16084static void intel_hpd_poll_fini(struct drm_device *dev) 15882static void intel_hpd_poll_fini(struct drm_device *dev)
16085{ 15883{
16086 struct intel_connector *connector; 15884 struct intel_connector *connector;
@@ -16091,9 +15889,9 @@ static void intel_hpd_poll_fini(struct drm_device *dev)
16091 for_each_intel_connector_iter(connector, &conn_iter) { 15889 for_each_intel_connector_iter(connector, &conn_iter) {
16092 if (connector->modeset_retry_work.func) 15890 if (connector->modeset_retry_work.func)
16093 cancel_work_sync(&connector->modeset_retry_work); 15891 cancel_work_sync(&connector->modeset_retry_work);
16094 if (connector->hdcp_shim) { 15892 if (connector->hdcp.shim) {
16095 cancel_delayed_work_sync(&connector->hdcp_check_work); 15893 cancel_delayed_work_sync(&connector->hdcp.check_work);
16096 cancel_work_sync(&connector->hdcp_prop_work); 15894 cancel_work_sync(&connector->hdcp.prop_work);
16097 } 15895 }
16098 } 15896 }
16099 drm_connector_list_iter_end(&conn_iter); 15897 drm_connector_list_iter_end(&conn_iter);
@@ -16133,18 +15931,13 @@ void intel_modeset_cleanup(struct drm_device *dev)
16133 15931
16134 drm_mode_config_cleanup(dev); 15932 drm_mode_config_cleanup(dev);
16135 15933
16136 intel_cleanup_overlay(dev_priv); 15934 intel_overlay_cleanup(dev_priv);
16137 15935
16138 intel_teardown_gmbus(dev_priv); 15936 intel_teardown_gmbus(dev_priv);
16139 15937
16140 destroy_workqueue(dev_priv->modeset_wq); 15938 destroy_workqueue(dev_priv->modeset_wq);
16141}
16142 15939
16143void intel_connector_attach_encoder(struct intel_connector *connector, 15940 intel_fbc_cleanup_cfb(dev_priv);
16144 struct intel_encoder *encoder)
16145{
16146 connector->encoder = encoder;
16147 drm_connector_attach_encoder(&connector->base, &encoder->base);
16148} 15941}
16149 15942
16150/* 15943/*
diff --git a/drivers/gpu/drm/i915/intel_display.h b/drivers/gpu/drm/i915/intel_display.h
index 9fac67e31205..43eb4ebbcc35 100644
--- a/drivers/gpu/drm/i915/intel_display.h
+++ b/drivers/gpu/drm/i915/intel_display.h
@@ -61,8 +61,10 @@ enum transcoder {
61 TRANSCODER_B, 61 TRANSCODER_B,
62 TRANSCODER_C, 62 TRANSCODER_C,
63 TRANSCODER_EDP, 63 TRANSCODER_EDP,
64 TRANSCODER_DSI_A, 64 TRANSCODER_DSI_0,
65 TRANSCODER_DSI_C, 65 TRANSCODER_DSI_1,
66 TRANSCODER_DSI_A = TRANSCODER_DSI_0, /* legacy DSI */
67 TRANSCODER_DSI_C = TRANSCODER_DSI_1, /* legacy DSI */
66 68
67 I915_MAX_TRANSCODERS 69 I915_MAX_TRANSCODERS
68}; 70};
@@ -120,6 +122,9 @@ enum plane_id {
120 PLANE_SPRITE0, 122 PLANE_SPRITE0,
121 PLANE_SPRITE1, 123 PLANE_SPRITE1,
122 PLANE_SPRITE2, 124 PLANE_SPRITE2,
125 PLANE_SPRITE3,
126 PLANE_SPRITE4,
127 PLANE_SPRITE5,
123 PLANE_CURSOR, 128 PLANE_CURSOR,
124 129
125 I915_MAX_PLANES, 130 I915_MAX_PLANES,
@@ -363,7 +368,7 @@ struct intel_link_m_n {
363 (__dev_priv)->power_domains.power_well_count; \ 368 (__dev_priv)->power_domains.power_well_count; \
364 (__power_well)++) 369 (__power_well)++)
365 370
366#define for_each_power_well_rev(__dev_priv, __power_well) \ 371#define for_each_power_well_reverse(__dev_priv, __power_well) \
367 for ((__power_well) = (__dev_priv)->power_domains.power_wells + \ 372 for ((__power_well) = (__dev_priv)->power_domains.power_wells + \
368 (__dev_priv)->power_domains.power_well_count - 1; \ 373 (__dev_priv)->power_domains.power_well_count - 1; \
369 (__power_well) - (__dev_priv)->power_domains.power_wells >= 0; \ 374 (__power_well) - (__dev_priv)->power_domains.power_wells >= 0; \
@@ -373,8 +378,8 @@ struct intel_link_m_n {
373 for_each_power_well(__dev_priv, __power_well) \ 378 for_each_power_well(__dev_priv, __power_well) \
374 for_each_if((__power_well)->desc->domains & (__domain_mask)) 379 for_each_if((__power_well)->desc->domains & (__domain_mask))
375 380
376#define for_each_power_domain_well_rev(__dev_priv, __power_well, __domain_mask) \ 381#define for_each_power_domain_well_reverse(__dev_priv, __power_well, __domain_mask) \
377 for_each_power_well_rev(__dev_priv, __power_well) \ 382 for_each_power_well_reverse(__dev_priv, __power_well) \
378 for_each_if((__power_well)->desc->domains & (__domain_mask)) 383 for_each_if((__power_well)->desc->domains & (__domain_mask))
379 384
380#define for_each_new_intel_plane_in_state(__state, plane, new_plane_state, __i) \ 385#define for_each_new_intel_plane_in_state(__state, plane, new_plane_state, __i) \
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 13f9b56a9ce7..7699f9b7b2d2 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -45,6 +45,17 @@
45 45
46#define DP_DPRX_ESI_LEN 14 46#define DP_DPRX_ESI_LEN 14
47 47
48/* DP DSC small joiner has 2 FIFOs each of 640 x 6 bytes */
49#define DP_DSC_MAX_SMALL_JOINER_RAM_BUFFER 61440
50
51/* DP DSC throughput values used for slice count calculations KPixels/s */
52#define DP_DSC_PEAK_PIXEL_RATE 2720000
53#define DP_DSC_MAX_ENC_THROUGHPUT_0 340000
54#define DP_DSC_MAX_ENC_THROUGHPUT_1 400000
55
56/* DP DSC FEC Overhead factor = (100 - 2.4)/100 */
57#define DP_DSC_FEC_OVERHEAD_FACTOR 976
58
48/* Compliance test status bits */ 59/* Compliance test status bits */
49#define INTEL_DP_RESOLUTION_SHIFT_MASK 0 60#define INTEL_DP_RESOLUTION_SHIFT_MASK 0
50#define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK) 61#define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
@@ -93,6 +104,14 @@ static const struct dp_link_dpll chv_dpll[] = {
93 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }, 104 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
94}; 105};
95 106
107/* Constants for DP DSC configurations */
108static const u8 valid_dsc_bpp[] = {6, 8, 10, 12, 15};
109
110/* With Single pipe configuration, HW is capable of supporting maximum
111 * of 4 slices per line.
112 */
113static const u8 valid_dsc_slicecount[] = {1, 2, 4};
114
96/** 115/**
97 * intel_dp_is_edp - is the given port attached to an eDP panel (either CPU or PCH) 116 * intel_dp_is_edp - is the given port attached to an eDP panel (either CPU or PCH)
98 * @intel_dp: DP struct 117 * @intel_dp: DP struct
@@ -222,138 +241,6 @@ intel_dp_link_required(int pixel_clock, int bpp)
222 return DIV_ROUND_UP(pixel_clock * bpp, 8); 241 return DIV_ROUND_UP(pixel_clock * bpp, 8);
223} 242}
224 243
225void icl_program_mg_dp_mode(struct intel_dp *intel_dp)
226{
227 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
228 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
229 enum port port = intel_dig_port->base.port;
230 enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
231 u32 ln0, ln1, lane_info;
232
233 if (tc_port == PORT_TC_NONE || intel_dig_port->tc_type == TC_PORT_TBT)
234 return;
235
236 ln0 = I915_READ(MG_DP_MODE(port, 0));
237 ln1 = I915_READ(MG_DP_MODE(port, 1));
238
239 switch (intel_dig_port->tc_type) {
240 case TC_PORT_TYPEC:
241 ln0 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE);
242 ln1 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE);
243
244 lane_info = (I915_READ(PORT_TX_DFLEXDPSP) &
245 DP_LANE_ASSIGNMENT_MASK(tc_port)) >>
246 DP_LANE_ASSIGNMENT_SHIFT(tc_port);
247
248 switch (lane_info) {
249 case 0x1:
250 case 0x4:
251 break;
252 case 0x2:
253 ln0 |= MG_DP_MODE_CFG_DP_X1_MODE;
254 break;
255 case 0x3:
256 ln0 |= MG_DP_MODE_CFG_DP_X1_MODE |
257 MG_DP_MODE_CFG_DP_X2_MODE;
258 break;
259 case 0x8:
260 ln1 |= MG_DP_MODE_CFG_DP_X1_MODE;
261 break;
262 case 0xC:
263 ln1 |= MG_DP_MODE_CFG_DP_X1_MODE |
264 MG_DP_MODE_CFG_DP_X2_MODE;
265 break;
266 case 0xF:
267 ln0 |= MG_DP_MODE_CFG_DP_X1_MODE |
268 MG_DP_MODE_CFG_DP_X2_MODE;
269 ln1 |= MG_DP_MODE_CFG_DP_X1_MODE |
270 MG_DP_MODE_CFG_DP_X2_MODE;
271 break;
272 default:
273 MISSING_CASE(lane_info);
274 }
275 break;
276
277 case TC_PORT_LEGACY:
278 ln0 |= MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE;
279 ln1 |= MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE;
280 break;
281
282 default:
283 MISSING_CASE(intel_dig_port->tc_type);
284 return;
285 }
286
287 I915_WRITE(MG_DP_MODE(port, 0), ln0);
288 I915_WRITE(MG_DP_MODE(port, 1), ln1);
289}
290
291void icl_enable_phy_clock_gating(struct intel_digital_port *dig_port)
292{
293 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
294 enum port port = dig_port->base.port;
295 enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
296 i915_reg_t mg_regs[2] = { MG_DP_MODE(port, 0), MG_DP_MODE(port, 1) };
297 u32 val;
298 int i;
299
300 if (tc_port == PORT_TC_NONE)
301 return;
302
303 for (i = 0; i < ARRAY_SIZE(mg_regs); i++) {
304 val = I915_READ(mg_regs[i]);
305 val |= MG_DP_MODE_CFG_TR2PWR_GATING |
306 MG_DP_MODE_CFG_TRPWR_GATING |
307 MG_DP_MODE_CFG_CLNPWR_GATING |
308 MG_DP_MODE_CFG_DIGPWR_GATING |
309 MG_DP_MODE_CFG_GAONPWR_GATING;
310 I915_WRITE(mg_regs[i], val);
311 }
312
313 val = I915_READ(MG_MISC_SUS0(tc_port));
314 val |= MG_MISC_SUS0_SUSCLK_DYNCLKGATE_MODE(3) |
315 MG_MISC_SUS0_CFG_TR2PWR_GATING |
316 MG_MISC_SUS0_CFG_CL2PWR_GATING |
317 MG_MISC_SUS0_CFG_GAONPWR_GATING |
318 MG_MISC_SUS0_CFG_TRPWR_GATING |
319 MG_MISC_SUS0_CFG_CL1PWR_GATING |
320 MG_MISC_SUS0_CFG_DGPWR_GATING;
321 I915_WRITE(MG_MISC_SUS0(tc_port), val);
322}
323
324void icl_disable_phy_clock_gating(struct intel_digital_port *dig_port)
325{
326 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
327 enum port port = dig_port->base.port;
328 enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
329 i915_reg_t mg_regs[2] = { MG_DP_MODE(port, 0), MG_DP_MODE(port, 1) };
330 u32 val;
331 int i;
332
333 if (tc_port == PORT_TC_NONE)
334 return;
335
336 for (i = 0; i < ARRAY_SIZE(mg_regs); i++) {
337 val = I915_READ(mg_regs[i]);
338 val &= ~(MG_DP_MODE_CFG_TR2PWR_GATING |
339 MG_DP_MODE_CFG_TRPWR_GATING |
340 MG_DP_MODE_CFG_CLNPWR_GATING |
341 MG_DP_MODE_CFG_DIGPWR_GATING |
342 MG_DP_MODE_CFG_GAONPWR_GATING);
343 I915_WRITE(mg_regs[i], val);
344 }
345
346 val = I915_READ(MG_MISC_SUS0(tc_port));
347 val &= ~(MG_MISC_SUS0_SUSCLK_DYNCLKGATE_MODE_MASK |
348 MG_MISC_SUS0_CFG_TR2PWR_GATING |
349 MG_MISC_SUS0_CFG_CL2PWR_GATING |
350 MG_MISC_SUS0_CFG_GAONPWR_GATING |
351 MG_MISC_SUS0_CFG_TRPWR_GATING |
352 MG_MISC_SUS0_CFG_CL1PWR_GATING |
353 MG_MISC_SUS0_CFG_DGPWR_GATING);
354 I915_WRITE(MG_MISC_SUS0(tc_port), val);
355}
356
357int 244int
358intel_dp_max_data_rate(int max_link_clock, int max_lanes) 245intel_dp_max_data_rate(int max_link_clock, int max_lanes)
359{ 246{
@@ -455,7 +342,7 @@ intel_dp_set_source_rates(struct intel_dp *intel_dp)
455 if (INTEL_GEN(dev_priv) >= 10) { 342 if (INTEL_GEN(dev_priv) >= 10) {
456 source_rates = cnl_rates; 343 source_rates = cnl_rates;
457 size = ARRAY_SIZE(cnl_rates); 344 size = ARRAY_SIZE(cnl_rates);
458 if (INTEL_GEN(dev_priv) == 10) 345 if (IS_GEN10(dev_priv))
459 max_rate = cnl_max_source_rate(intel_dp); 346 max_rate = cnl_max_source_rate(intel_dp);
460 else 347 else
461 max_rate = icl_max_source_rate(intel_dp); 348 max_rate = icl_max_source_rate(intel_dp);
@@ -616,9 +503,12 @@ intel_dp_mode_valid(struct drm_connector *connector,
616 struct intel_dp *intel_dp = intel_attached_dp(connector); 503 struct intel_dp *intel_dp = intel_attached_dp(connector);
617 struct intel_connector *intel_connector = to_intel_connector(connector); 504 struct intel_connector *intel_connector = to_intel_connector(connector);
618 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode; 505 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
506 struct drm_i915_private *dev_priv = to_i915(connector->dev);
619 int target_clock = mode->clock; 507 int target_clock = mode->clock;
620 int max_rate, mode_rate, max_lanes, max_link_clock; 508 int max_rate, mode_rate, max_lanes, max_link_clock;
621 int max_dotclk; 509 int max_dotclk;
510 u16 dsc_max_output_bpp = 0;
511 u8 dsc_slice_count = 0;
622 512
623 if (mode->flags & DRM_MODE_FLAG_DBLSCAN) 513 if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
624 return MODE_NO_DBLESCAN; 514 return MODE_NO_DBLESCAN;
@@ -641,7 +531,33 @@ intel_dp_mode_valid(struct drm_connector *connector,
641 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes); 531 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
642 mode_rate = intel_dp_link_required(target_clock, 18); 532 mode_rate = intel_dp_link_required(target_clock, 18);
643 533
644 if (mode_rate > max_rate || target_clock > max_dotclk) 534 /*
535 * Output bpp is stored in 6.4 format so right shift by 4 to get the
536 * integer value since we support only integer values of bpp.
537 */
538 if ((INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) &&
539 drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)) {
540 if (intel_dp_is_edp(intel_dp)) {
541 dsc_max_output_bpp =
542 drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4;
543 dsc_slice_count =
544 drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
545 true);
546 } else {
547 dsc_max_output_bpp =
548 intel_dp_dsc_get_output_bpp(max_link_clock,
549 max_lanes,
550 target_clock,
551 mode->hdisplay) >> 4;
552 dsc_slice_count =
553 intel_dp_dsc_get_slice_count(intel_dp,
554 target_clock,
555 mode->hdisplay);
556 }
557 }
558
559 if ((mode_rate > max_rate && !(dsc_max_output_bpp && dsc_slice_count)) ||
560 target_clock > max_dotclk)
645 return MODE_CLOCK_HIGH; 561 return MODE_CLOCK_HIGH;
646 562
647 if (mode->clock < 10000) 563 if (mode->clock < 10000)
@@ -690,7 +606,8 @@ static void pps_lock(struct intel_dp *intel_dp)
690 * See intel_power_sequencer_reset() why we need 606 * See intel_power_sequencer_reset() why we need
691 * a power domain reference here. 607 * a power domain reference here.
692 */ 608 */
693 intel_display_power_get(dev_priv, intel_dp->aux_power_domain); 609 intel_display_power_get(dev_priv,
610 intel_aux_power_domain(dp_to_dig_port(intel_dp)));
694 611
695 mutex_lock(&dev_priv->pps_mutex); 612 mutex_lock(&dev_priv->pps_mutex);
696} 613}
@@ -701,7 +618,8 @@ static void pps_unlock(struct intel_dp *intel_dp)
701 618
702 mutex_unlock(&dev_priv->pps_mutex); 619 mutex_unlock(&dev_priv->pps_mutex);
703 620
704 intel_display_power_put(dev_priv, intel_dp->aux_power_domain); 621 intel_display_power_put(dev_priv,
622 intel_aux_power_domain(dp_to_dig_port(intel_dp)));
705} 623}
706 624
707static void 625static void
@@ -1156,6 +1074,7 @@ static uint32_t g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
1156static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index) 1074static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
1157{ 1075{
1158 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1076 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1077 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1159 1078
1160 if (index) 1079 if (index)
1161 return 0; 1080 return 0;
@@ -1165,7 +1084,7 @@ static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
1165 * like to run at 2MHz. So, take the cdclk or PCH rawclk value and 1084 * like to run at 2MHz. So, take the cdclk or PCH rawclk value and
1166 * divide by 2000 and use that 1085 * divide by 2000 and use that
1167 */ 1086 */
1168 if (intel_dp->aux_ch == AUX_CH_A) 1087 if (dig_port->aux_ch == AUX_CH_A)
1169 return DIV_ROUND_CLOSEST(dev_priv->cdclk.hw.cdclk, 2000); 1088 return DIV_ROUND_CLOSEST(dev_priv->cdclk.hw.cdclk, 2000);
1170 else 1089 else
1171 return DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 2000); 1090 return DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 2000);
@@ -1174,8 +1093,9 @@ static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
1174static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index) 1093static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
1175{ 1094{
1176 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1095 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1096 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1177 1097
1178 if (intel_dp->aux_ch != AUX_CH_A && HAS_PCH_LPT_H(dev_priv)) { 1098 if (dig_port->aux_ch != AUX_CH_A && HAS_PCH_LPT_H(dev_priv)) {
1179 /* Workaround for non-ULT HSW */ 1099 /* Workaround for non-ULT HSW */
1180 switch (index) { 1100 switch (index) {
1181 case 0: return 63; 1101 case 0: return 63;
@@ -1503,80 +1423,12 @@ intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
1503 return ret; 1423 return ret;
1504} 1424}
1505 1425
1506static enum aux_ch intel_aux_ch(struct intel_dp *intel_dp)
1507{
1508 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
1509 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1510 enum port port = encoder->port;
1511 const struct ddi_vbt_port_info *info =
1512 &dev_priv->vbt.ddi_port_info[port];
1513 enum aux_ch aux_ch;
1514
1515 if (!info->alternate_aux_channel) {
1516 aux_ch = (enum aux_ch) port;
1517
1518 DRM_DEBUG_KMS("using AUX %c for port %c (platform default)\n",
1519 aux_ch_name(aux_ch), port_name(port));
1520 return aux_ch;
1521 }
1522
1523 switch (info->alternate_aux_channel) {
1524 case DP_AUX_A:
1525 aux_ch = AUX_CH_A;
1526 break;
1527 case DP_AUX_B:
1528 aux_ch = AUX_CH_B;
1529 break;
1530 case DP_AUX_C:
1531 aux_ch = AUX_CH_C;
1532 break;
1533 case DP_AUX_D:
1534 aux_ch = AUX_CH_D;
1535 break;
1536 case DP_AUX_E:
1537 aux_ch = AUX_CH_E;
1538 break;
1539 case DP_AUX_F:
1540 aux_ch = AUX_CH_F;
1541 break;
1542 default:
1543 MISSING_CASE(info->alternate_aux_channel);
1544 aux_ch = AUX_CH_A;
1545 break;
1546 }
1547
1548 DRM_DEBUG_KMS("using AUX %c for port %c (VBT)\n",
1549 aux_ch_name(aux_ch), port_name(port));
1550
1551 return aux_ch;
1552}
1553
1554static enum intel_display_power_domain
1555intel_aux_power_domain(struct intel_dp *intel_dp)
1556{
1557 switch (intel_dp->aux_ch) {
1558 case AUX_CH_A:
1559 return POWER_DOMAIN_AUX_A;
1560 case AUX_CH_B:
1561 return POWER_DOMAIN_AUX_B;
1562 case AUX_CH_C:
1563 return POWER_DOMAIN_AUX_C;
1564 case AUX_CH_D:
1565 return POWER_DOMAIN_AUX_D;
1566 case AUX_CH_E:
1567 return POWER_DOMAIN_AUX_E;
1568 case AUX_CH_F:
1569 return POWER_DOMAIN_AUX_F;
1570 default:
1571 MISSING_CASE(intel_dp->aux_ch);
1572 return POWER_DOMAIN_AUX_A;
1573 }
1574}
1575 1426
1576static i915_reg_t g4x_aux_ctl_reg(struct intel_dp *intel_dp) 1427static i915_reg_t g4x_aux_ctl_reg(struct intel_dp *intel_dp)
1577{ 1428{
1578 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1429 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1579 enum aux_ch aux_ch = intel_dp->aux_ch; 1430 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1431 enum aux_ch aux_ch = dig_port->aux_ch;
1580 1432
1581 switch (aux_ch) { 1433 switch (aux_ch) {
1582 case AUX_CH_B: 1434 case AUX_CH_B:
@@ -1592,7 +1444,8 @@ static i915_reg_t g4x_aux_ctl_reg(struct intel_dp *intel_dp)
1592static i915_reg_t g4x_aux_data_reg(struct intel_dp *intel_dp, int index) 1444static i915_reg_t g4x_aux_data_reg(struct intel_dp *intel_dp, int index)
1593{ 1445{
1594 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1446 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1595 enum aux_ch aux_ch = intel_dp->aux_ch; 1447 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1448 enum aux_ch aux_ch = dig_port->aux_ch;
1596 1449
1597 switch (aux_ch) { 1450 switch (aux_ch) {
1598 case AUX_CH_B: 1451 case AUX_CH_B:
@@ -1608,7 +1461,8 @@ static i915_reg_t g4x_aux_data_reg(struct intel_dp *intel_dp, int index)
1608static i915_reg_t ilk_aux_ctl_reg(struct intel_dp *intel_dp) 1461static i915_reg_t ilk_aux_ctl_reg(struct intel_dp *intel_dp)
1609{ 1462{
1610 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1463 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1611 enum aux_ch aux_ch = intel_dp->aux_ch; 1464 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1465 enum aux_ch aux_ch = dig_port->aux_ch;
1612 1466
1613 switch (aux_ch) { 1467 switch (aux_ch) {
1614 case AUX_CH_A: 1468 case AUX_CH_A:
@@ -1626,7 +1480,8 @@ static i915_reg_t ilk_aux_ctl_reg(struct intel_dp *intel_dp)
1626static i915_reg_t ilk_aux_data_reg(struct intel_dp *intel_dp, int index) 1480static i915_reg_t ilk_aux_data_reg(struct intel_dp *intel_dp, int index)
1627{ 1481{
1628 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1482 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1629 enum aux_ch aux_ch = intel_dp->aux_ch; 1483 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1484 enum aux_ch aux_ch = dig_port->aux_ch;
1630 1485
1631 switch (aux_ch) { 1486 switch (aux_ch) {
1632 case AUX_CH_A: 1487 case AUX_CH_A:
@@ -1644,7 +1499,8 @@ static i915_reg_t ilk_aux_data_reg(struct intel_dp *intel_dp, int index)
1644static i915_reg_t skl_aux_ctl_reg(struct intel_dp *intel_dp) 1499static i915_reg_t skl_aux_ctl_reg(struct intel_dp *intel_dp)
1645{ 1500{
1646 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1501 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1647 enum aux_ch aux_ch = intel_dp->aux_ch; 1502 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1503 enum aux_ch aux_ch = dig_port->aux_ch;
1648 1504
1649 switch (aux_ch) { 1505 switch (aux_ch) {
1650 case AUX_CH_A: 1506 case AUX_CH_A:
@@ -1663,7 +1519,8 @@ static i915_reg_t skl_aux_ctl_reg(struct intel_dp *intel_dp)
1663static i915_reg_t skl_aux_data_reg(struct intel_dp *intel_dp, int index) 1519static i915_reg_t skl_aux_data_reg(struct intel_dp *intel_dp, int index)
1664{ 1520{
1665 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1521 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1666 enum aux_ch aux_ch = intel_dp->aux_ch; 1522 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1523 enum aux_ch aux_ch = dig_port->aux_ch;
1667 1524
1668 switch (aux_ch) { 1525 switch (aux_ch) {
1669 case AUX_CH_A: 1526 case AUX_CH_A:
@@ -1689,10 +1546,8 @@ static void
1689intel_dp_aux_init(struct intel_dp *intel_dp) 1546intel_dp_aux_init(struct intel_dp *intel_dp)
1690{ 1547{
1691 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1548 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1692 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 1549 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1693 1550 struct intel_encoder *encoder = &dig_port->base;
1694 intel_dp->aux_ch = intel_aux_ch(intel_dp);
1695 intel_dp->aux_power_domain = intel_aux_power_domain(intel_dp);
1696 1551
1697 if (INTEL_GEN(dev_priv) >= 9) { 1552 if (INTEL_GEN(dev_priv) >= 9) {
1698 intel_dp->aux_ch_ctl_reg = skl_aux_ctl_reg; 1553 intel_dp->aux_ch_ctl_reg = skl_aux_ctl_reg;
@@ -1951,6 +1806,42 @@ intel_dp_compute_link_config_wide(struct intel_dp *intel_dp,
1951 return false; 1806 return false;
1952} 1807}
1953 1808
1809/* Optimize link config in order: max bpp, min lanes, min clock */
1810static bool
1811intel_dp_compute_link_config_fast(struct intel_dp *intel_dp,
1812 struct intel_crtc_state *pipe_config,
1813 const struct link_config_limits *limits)
1814{
1815 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
1816 int bpp, clock, lane_count;
1817 int mode_rate, link_clock, link_avail;
1818
1819 for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) {
1820 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1821 bpp);
1822
1823 for (lane_count = limits->min_lane_count;
1824 lane_count <= limits->max_lane_count;
1825 lane_count <<= 1) {
1826 for (clock = limits->min_clock; clock <= limits->max_clock; clock++) {
1827 link_clock = intel_dp->common_rates[clock];
1828 link_avail = intel_dp_max_data_rate(link_clock,
1829 lane_count);
1830
1831 if (mode_rate <= link_avail) {
1832 pipe_config->lane_count = lane_count;
1833 pipe_config->pipe_bpp = bpp;
1834 pipe_config->port_clock = link_clock;
1835
1836 return true;
1837 }
1838 }
1839 }
1840 }
1841
1842 return false;
1843}
1844
1954static bool 1845static bool
1955intel_dp_compute_link_config(struct intel_encoder *encoder, 1846intel_dp_compute_link_config(struct intel_encoder *encoder,
1956 struct intel_crtc_state *pipe_config) 1847 struct intel_crtc_state *pipe_config)
@@ -1975,13 +1866,15 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
1975 limits.min_bpp = 6 * 3; 1866 limits.min_bpp = 6 * 3;
1976 limits.max_bpp = intel_dp_compute_bpp(intel_dp, pipe_config); 1867 limits.max_bpp = intel_dp_compute_bpp(intel_dp, pipe_config);
1977 1868
1978 if (intel_dp_is_edp(intel_dp)) { 1869 if (intel_dp_is_edp(intel_dp) && intel_dp->edp_dpcd[0] < DP_EDP_14) {
1979 /* 1870 /*
1980 * Use the maximum clock and number of lanes the eDP panel 1871 * Use the maximum clock and number of lanes the eDP panel
1981 * advertizes being capable of. The panels are generally 1872 * advertizes being capable of. The eDP 1.3 and earlier panels
1982 * designed to support only a single clock and lane 1873 * are generally designed to support only a single clock and
1983 * configuration, and typically these values correspond to the 1874 * lane configuration, and typically these values correspond to
1984 * native resolution of the panel. 1875 * the native resolution of the panel. With eDP 1.4 rate select
1876 * and DSC, this is decreasingly the case, and we need to be
1877 * able to select less than maximum link config.
1985 */ 1878 */
1986 limits.min_lane_count = limits.max_lane_count; 1879 limits.min_lane_count = limits.max_lane_count;
1987 limits.min_clock = limits.max_clock; 1880 limits.min_clock = limits.max_clock;
@@ -1995,12 +1888,25 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
1995 intel_dp->common_rates[limits.max_clock], 1888 intel_dp->common_rates[limits.max_clock],
1996 limits.max_bpp, adjusted_mode->crtc_clock); 1889 limits.max_bpp, adjusted_mode->crtc_clock);
1997 1890
1998 /* 1891 if (intel_dp_is_edp(intel_dp)) {
1999 * Optimize for slow and wide. This is the place to add alternative 1892 /*
2000 * optimization policy. 1893 * Optimize for fast and narrow. eDP 1.3 section 3.3 and eDP 1.4
2001 */ 1894 * section A.1: "It is recommended that the minimum number of
2002 if (!intel_dp_compute_link_config_wide(intel_dp, pipe_config, &limits)) 1895 * lanes be used, using the minimum link rate allowed for that
2003 return false; 1896 * lane configuration."
1897 *
1898 * Note that we use the max clock and lane count for eDP 1.3 and
1899 * earlier, and fast vs. wide is irrelevant.
1900 */
1901 if (!intel_dp_compute_link_config_fast(intel_dp, pipe_config,
1902 &limits))
1903 return false;
1904 } else {
1905 /* Optimize for slow and wide. */
1906 if (!intel_dp_compute_link_config_wide(intel_dp, pipe_config,
1907 &limits))
1908 return false;
1909 }
2004 1910
2005 DRM_DEBUG_KMS("DP lane count %d clock %d bpp %d\n", 1911 DRM_DEBUG_KMS("DP lane count %d clock %d bpp %d\n",
2006 pipe_config->lane_count, pipe_config->port_clock, 1912 pipe_config->lane_count, pipe_config->port_clock,
@@ -2023,6 +1929,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
2023 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 1929 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2024 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; 1930 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
2025 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 1931 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1932 struct intel_lspcon *lspcon = enc_to_intel_lspcon(&encoder->base);
2026 enum port port = encoder->port; 1933 enum port port = encoder->port;
2027 struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc); 1934 struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
2028 struct intel_connector *intel_connector = intel_dp->attached_connector; 1935 struct intel_connector *intel_connector = intel_dp->attached_connector;
@@ -2034,6 +1941,10 @@ intel_dp_compute_config(struct intel_encoder *encoder,
2034 if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && port != PORT_A) 1941 if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && port != PORT_A)
2035 pipe_config->has_pch_encoder = true; 1942 pipe_config->has_pch_encoder = true;
2036 1943
1944 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
1945 if (lspcon->active)
1946 lspcon_ycbcr420_config(&intel_connector->base, pipe_config);
1947
2037 pipe_config->has_drrs = false; 1948 pipe_config->has_drrs = false;
2038 if (IS_G4X(dev_priv) || port == PORT_A) 1949 if (IS_G4X(dev_priv) || port == PORT_A)
2039 pipe_config->has_audio = false; 1950 pipe_config->has_audio = false;
@@ -2338,7 +2249,8 @@ static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
2338 if (edp_have_panel_vdd(intel_dp)) 2249 if (edp_have_panel_vdd(intel_dp))
2339 return need_to_disable; 2250 return need_to_disable;
2340 2251
2341 intel_display_power_get(dev_priv, intel_dp->aux_power_domain); 2252 intel_display_power_get(dev_priv,
2253 intel_aux_power_domain(intel_dig_port));
2342 2254
2343 DRM_DEBUG_KMS("Turning eDP port %c VDD on\n", 2255 DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
2344 port_name(intel_dig_port->base.port)); 2256 port_name(intel_dig_port->base.port));
@@ -2424,7 +2336,8 @@ static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
2424 if ((pp & PANEL_POWER_ON) == 0) 2336 if ((pp & PANEL_POWER_ON) == 0)
2425 intel_dp->panel_power_off_time = ktime_get_boottime(); 2337 intel_dp->panel_power_off_time = ktime_get_boottime();
2426 2338
2427 intel_display_power_put(dev_priv, intel_dp->aux_power_domain); 2339 intel_display_power_put(dev_priv,
2340 intel_aux_power_domain(intel_dig_port));
2428} 2341}
2429 2342
2430static void edp_panel_vdd_work(struct work_struct *__work) 2343static void edp_panel_vdd_work(struct work_struct *__work)
@@ -2537,6 +2450,7 @@ void intel_edp_panel_on(struct intel_dp *intel_dp)
2537static void edp_panel_off(struct intel_dp *intel_dp) 2450static void edp_panel_off(struct intel_dp *intel_dp)
2538{ 2451{
2539 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 2452 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2453 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2540 u32 pp; 2454 u32 pp;
2541 i915_reg_t pp_ctrl_reg; 2455 i915_reg_t pp_ctrl_reg;
2542 2456
@@ -2546,10 +2460,10 @@ static void edp_panel_off(struct intel_dp *intel_dp)
2546 return; 2460 return;
2547 2461
2548 DRM_DEBUG_KMS("Turn eDP port %c panel power off\n", 2462 DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
2549 port_name(dp_to_dig_port(intel_dp)->base.port)); 2463 port_name(dig_port->base.port));
2550 2464
2551 WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n", 2465 WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
2552 port_name(dp_to_dig_port(intel_dp)->base.port)); 2466 port_name(dig_port->base.port));
2553 2467
2554 pp = ironlake_get_pp_control(intel_dp); 2468 pp = ironlake_get_pp_control(intel_dp);
2555 /* We need to switch off panel power _and_ force vdd, for otherwise some 2469 /* We need to switch off panel power _and_ force vdd, for otherwise some
@@ -2568,7 +2482,7 @@ static void edp_panel_off(struct intel_dp *intel_dp)
2568 intel_dp->panel_power_off_time = ktime_get_boottime(); 2482 intel_dp->panel_power_off_time = ktime_get_boottime();
2569 2483
2570 /* We got a reference when we enabled the VDD. */ 2484 /* We got a reference when we enabled the VDD. */
2571 intel_display_power_put(dev_priv, intel_dp->aux_power_domain); 2485 intel_display_power_put(dev_priv, intel_aux_power_domain(dig_port));
2572} 2486}
2573 2487
2574void intel_edp_panel_off(struct intel_dp *intel_dp) 2488void intel_edp_panel_off(struct intel_dp *intel_dp)
@@ -3900,6 +3814,41 @@ intel_dp_read_dpcd(struct intel_dp *intel_dp)
3900 return intel_dp->dpcd[DP_DPCD_REV] != 0; 3814 return intel_dp->dpcd[DP_DPCD_REV] != 0;
3901} 3815}
3902 3816
3817static void intel_dp_get_dsc_sink_cap(struct intel_dp *intel_dp)
3818{
3819 /*
3820 * Clear the cached register set to avoid using stale values
3821 * for the sinks that do not support DSC.
3822 */
3823 memset(intel_dp->dsc_dpcd, 0, sizeof(intel_dp->dsc_dpcd));
3824
3825 /* Clear fec_capable to avoid using stale values */
3826 intel_dp->fec_capable = 0;
3827
3828 /* Cache the DSC DPCD if eDP or DP rev >= 1.4 */
3829 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x14 ||
3830 intel_dp->edp_dpcd[0] >= DP_EDP_14) {
3831 if (drm_dp_dpcd_read(&intel_dp->aux, DP_DSC_SUPPORT,
3832 intel_dp->dsc_dpcd,
3833 sizeof(intel_dp->dsc_dpcd)) < 0)
3834 DRM_ERROR("Failed to read DPCD register 0x%x\n",
3835 DP_DSC_SUPPORT);
3836
3837 DRM_DEBUG_KMS("DSC DPCD: %*ph\n",
3838 (int)sizeof(intel_dp->dsc_dpcd),
3839 intel_dp->dsc_dpcd);
3840 /* FEC is supported only on DP 1.4 */
3841 if (!intel_dp_is_edp(intel_dp)) {
3842 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_FEC_CAPABILITY,
3843 &intel_dp->fec_capable) < 0)
3844 DRM_ERROR("Failed to read FEC DPCD register\n");
3845
3846 DRM_DEBUG_KMS("FEC CAPABILITY: %x\n",
3847 intel_dp->fec_capable);
3848 }
3849 }
3850}
3851
3903static bool 3852static bool
3904intel_edp_init_dpcd(struct intel_dp *intel_dp) 3853intel_edp_init_dpcd(struct intel_dp *intel_dp)
3905{ 3854{
@@ -3976,6 +3925,10 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
3976 3925
3977 intel_dp_set_common_rates(intel_dp); 3926 intel_dp_set_common_rates(intel_dp);
3978 3927
3928 /* Read the eDP DSC DPCD registers */
3929 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
3930 intel_dp_get_dsc_sink_cap(intel_dp);
3931
3979 return true; 3932 return true;
3980} 3933}
3981 3934
@@ -4029,16 +3982,10 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
4029} 3982}
4030 3983
4031static bool 3984static bool
4032intel_dp_can_mst(struct intel_dp *intel_dp) 3985intel_dp_sink_can_mst(struct intel_dp *intel_dp)
4033{ 3986{
4034 u8 mstm_cap; 3987 u8 mstm_cap;
4035 3988
4036 if (!i915_modparams.enable_dp_mst)
4037 return false;
4038
4039 if (!intel_dp->can_mst)
4040 return false;
4041
4042 if (intel_dp->dpcd[DP_DPCD_REV] < 0x12) 3989 if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
4043 return false; 3990 return false;
4044 3991
@@ -4048,34 +3995,36 @@ intel_dp_can_mst(struct intel_dp *intel_dp)
4048 return mstm_cap & DP_MST_CAP; 3995 return mstm_cap & DP_MST_CAP;
4049} 3996}
4050 3997
3998static bool
3999intel_dp_can_mst(struct intel_dp *intel_dp)
4000{
4001 return i915_modparams.enable_dp_mst &&
4002 intel_dp->can_mst &&
4003 intel_dp_sink_can_mst(intel_dp);
4004}
4005
4051static void 4006static void
4052intel_dp_configure_mst(struct intel_dp *intel_dp) 4007intel_dp_configure_mst(struct intel_dp *intel_dp)
4053{ 4008{
4054 if (!i915_modparams.enable_dp_mst) 4009 struct intel_encoder *encoder =
4055 return; 4010 &dp_to_dig_port(intel_dp)->base;
4011 bool sink_can_mst = intel_dp_sink_can_mst(intel_dp);
4012
4013 DRM_DEBUG_KMS("MST support? port %c: %s, sink: %s, modparam: %s\n",
4014 port_name(encoder->port), yesno(intel_dp->can_mst),
4015 yesno(sink_can_mst), yesno(i915_modparams.enable_dp_mst));
4056 4016
4057 if (!intel_dp->can_mst) 4017 if (!intel_dp->can_mst)
4058 return; 4018 return;
4059 4019
4060 intel_dp->is_mst = intel_dp_can_mst(intel_dp); 4020 intel_dp->is_mst = sink_can_mst &&
4061 4021 i915_modparams.enable_dp_mst;
4062 if (intel_dp->is_mst)
4063 DRM_DEBUG_KMS("Sink is MST capable\n");
4064 else
4065 DRM_DEBUG_KMS("Sink is not MST capable\n");
4066 4022
4067 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, 4023 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
4068 intel_dp->is_mst); 4024 intel_dp->is_mst);
4069} 4025}
4070 4026
4071static bool 4027static bool
4072intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4073{
4074 return drm_dp_dpcd_readb(&intel_dp->aux, DP_DEVICE_SERVICE_IRQ_VECTOR,
4075 sink_irq_vector) == 1;
4076}
4077
4078static bool
4079intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector) 4028intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4080{ 4029{
4081 return drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT_ESI, 4030 return drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT_ESI,
@@ -4083,6 +4032,91 @@ intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4083 DP_DPRX_ESI_LEN; 4032 DP_DPRX_ESI_LEN;
4084} 4033}
4085 4034
4035u16 intel_dp_dsc_get_output_bpp(int link_clock, uint8_t lane_count,
4036 int mode_clock, int mode_hdisplay)
4037{
4038 u16 bits_per_pixel, max_bpp_small_joiner_ram;
4039 int i;
4040
4041 /*
4042 * Available Link Bandwidth(Kbits/sec) = (NumberOfLanes)*
4043 * (LinkSymbolClock)* 8 * ((100-FECOverhead)/100)*(TimeSlotsPerMTP)
4044 * FECOverhead = 2.4%, for SST -> TimeSlotsPerMTP is 1,
4045 * for MST -> TimeSlotsPerMTP has to be calculated
4046 */
4047 bits_per_pixel = (link_clock * lane_count * 8 *
4048 DP_DSC_FEC_OVERHEAD_FACTOR) /
4049 mode_clock;
4050
4051 /* Small Joiner Check: output bpp <= joiner RAM (bits) / Horiz. width */
4052 max_bpp_small_joiner_ram = DP_DSC_MAX_SMALL_JOINER_RAM_BUFFER /
4053 mode_hdisplay;
4054
4055 /*
4056 * Greatest allowed DSC BPP = MIN (output BPP from avaialble Link BW
4057 * check, output bpp from small joiner RAM check)
4058 */
4059 bits_per_pixel = min(bits_per_pixel, max_bpp_small_joiner_ram);
4060
4061 /* Error out if the max bpp is less than smallest allowed valid bpp */
4062 if (bits_per_pixel < valid_dsc_bpp[0]) {
4063 DRM_DEBUG_KMS("Unsupported BPP %d\n", bits_per_pixel);
4064 return 0;
4065 }
4066
4067 /* Find the nearest match in the array of known BPPs from VESA */
4068 for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp) - 1; i++) {
4069 if (bits_per_pixel < valid_dsc_bpp[i + 1])
4070 break;
4071 }
4072 bits_per_pixel = valid_dsc_bpp[i];
4073
4074 /*
4075 * Compressed BPP in U6.4 format so multiply by 16, for Gen 11,
4076 * fractional part is 0
4077 */
4078 return bits_per_pixel << 4;
4079}
4080
4081u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp,
4082 int mode_clock,
4083 int mode_hdisplay)
4084{
4085 u8 min_slice_count, i;
4086 int max_slice_width;
4087
4088 if (mode_clock <= DP_DSC_PEAK_PIXEL_RATE)
4089 min_slice_count = DIV_ROUND_UP(mode_clock,
4090 DP_DSC_MAX_ENC_THROUGHPUT_0);
4091 else
4092 min_slice_count = DIV_ROUND_UP(mode_clock,
4093 DP_DSC_MAX_ENC_THROUGHPUT_1);
4094
4095 max_slice_width = drm_dp_dsc_sink_max_slice_width(intel_dp->dsc_dpcd);
4096 if (max_slice_width < DP_DSC_MIN_SLICE_WIDTH_VALUE) {
4097 DRM_DEBUG_KMS("Unsupported slice width %d by DP DSC Sink device\n",
4098 max_slice_width);
4099 return 0;
4100 }
4101 /* Also take into account max slice width */
4102 min_slice_count = min_t(uint8_t, min_slice_count,
4103 DIV_ROUND_UP(mode_hdisplay,
4104 max_slice_width));
4105
4106 /* Find the closest match to the valid slice count values */
4107 for (i = 0; i < ARRAY_SIZE(valid_dsc_slicecount); i++) {
4108 if (valid_dsc_slicecount[i] >
4109 drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
4110 false))
4111 break;
4112 if (min_slice_count <= valid_dsc_slicecount[i])
4113 return valid_dsc_slicecount[i];
4114 }
4115
4116 DRM_DEBUG_KMS("Unsupported Slice Count %d\n", min_slice_count);
4117 return 0;
4118}
4119
4086static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp) 4120static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
4087{ 4121{
4088 int status = 0; 4122 int status = 0;
@@ -4403,7 +4437,7 @@ int intel_dp_retrain_link(struct intel_encoder *encoder,
4403 4437
4404 /* Suppress underruns caused by re-training */ 4438 /* Suppress underruns caused by re-training */
4405 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false); 4439 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
4406 if (crtc->config->has_pch_encoder) 4440 if (crtc_state->has_pch_encoder)
4407 intel_set_pch_fifo_underrun_reporting(dev_priv, 4441 intel_set_pch_fifo_underrun_reporting(dev_priv,
4408 intel_crtc_pch_transcoder(crtc), false); 4442 intel_crtc_pch_transcoder(crtc), false);
4409 4443
@@ -4414,7 +4448,7 @@ int intel_dp_retrain_link(struct intel_encoder *encoder,
4414 intel_wait_for_vblank(dev_priv, crtc->pipe); 4448 intel_wait_for_vblank(dev_priv, crtc->pipe);
4415 4449
4416 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true); 4450 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
4417 if (crtc->config->has_pch_encoder) 4451 if (crtc_state->has_pch_encoder)
4418 intel_set_pch_fifo_underrun_reporting(dev_priv, 4452 intel_set_pch_fifo_underrun_reporting(dev_priv,
4419 intel_crtc_pch_transcoder(crtc), true); 4453 intel_crtc_pch_transcoder(crtc), true);
4420 4454
@@ -4462,6 +4496,29 @@ static bool intel_dp_hotplug(struct intel_encoder *encoder,
4462 return changed; 4496 return changed;
4463} 4497}
4464 4498
4499static void intel_dp_check_service_irq(struct intel_dp *intel_dp)
4500{
4501 u8 val;
4502
4503 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
4504 return;
4505
4506 if (drm_dp_dpcd_readb(&intel_dp->aux,
4507 DP_DEVICE_SERVICE_IRQ_VECTOR, &val) != 1 || !val)
4508 return;
4509
4510 drm_dp_dpcd_writeb(&intel_dp->aux, DP_DEVICE_SERVICE_IRQ_VECTOR, val);
4511
4512 if (val & DP_AUTOMATED_TEST_REQUEST)
4513 intel_dp_handle_test_request(intel_dp);
4514
4515 if (val & DP_CP_IRQ)
4516 intel_hdcp_check_link(intel_dp->attached_connector);
4517
4518 if (val & DP_SINK_SPECIFIC_IRQ)
4519 DRM_DEBUG_DRIVER("Sink specific irq unhandled\n");
4520}
4521
4465/* 4522/*
4466 * According to DP spec 4523 * According to DP spec
4467 * 5.1.2: 4524 * 5.1.2:
@@ -4479,7 +4536,6 @@ static bool
4479intel_dp_short_pulse(struct intel_dp *intel_dp) 4536intel_dp_short_pulse(struct intel_dp *intel_dp)
4480{ 4537{
4481 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 4538 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
4482 u8 sink_irq_vector = 0;
4483 u8 old_sink_count = intel_dp->sink_count; 4539 u8 old_sink_count = intel_dp->sink_count;
4484 bool ret; 4540 bool ret;
4485 4541
@@ -4502,20 +4558,7 @@ intel_dp_short_pulse(struct intel_dp *intel_dp)
4502 return false; 4558 return false;
4503 } 4559 }
4504 4560
4505 /* Try to read the source of the interrupt */ 4561 intel_dp_check_service_irq(intel_dp);
4506 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4507 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector) &&
4508 sink_irq_vector != 0) {
4509 /* Clear interrupt source */
4510 drm_dp_dpcd_writeb(&intel_dp->aux,
4511 DP_DEVICE_SERVICE_IRQ_VECTOR,
4512 sink_irq_vector);
4513
4514 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4515 intel_dp_handle_test_request(intel_dp);
4516 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4517 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4518 }
4519 4562
4520 /* Handle CEC interrupts, if any */ 4563 /* Handle CEC interrupts, if any */
4521 drm_dp_cec_irq(&intel_dp->aux); 4564 drm_dp_cec_irq(&intel_dp->aux);
@@ -4810,6 +4853,9 @@ static void icl_update_tc_port_type(struct drm_i915_private *dev_priv,
4810 type_str); 4853 type_str);
4811} 4854}
4812 4855
4856static void icl_tc_phy_disconnect(struct drm_i915_private *dev_priv,
4857 struct intel_digital_port *dig_port);
4858
4813/* 4859/*
4814 * This function implements the first part of the Connect Flow described by our 4860 * This function implements the first part of the Connect Flow described by our
4815 * specification, Gen11 TypeC Programming chapter. The rest of the flow (reading 4861 * specification, Gen11 TypeC Programming chapter. The rest of the flow (reading
@@ -4864,9 +4910,7 @@ static bool icl_tc_phy_connect(struct drm_i915_private *dev_priv,
4864 if (dig_port->tc_type == TC_PORT_TYPEC && 4910 if (dig_port->tc_type == TC_PORT_TYPEC &&
4865 !(I915_READ(PORT_TX_DFLEXDPSP) & TC_LIVE_STATE_TC(tc_port))) { 4911 !(I915_READ(PORT_TX_DFLEXDPSP) & TC_LIVE_STATE_TC(tc_port))) {
4866 DRM_DEBUG_KMS("TC PHY %d sudden disconnect.\n", tc_port); 4912 DRM_DEBUG_KMS("TC PHY %d sudden disconnect.\n", tc_port);
4867 val = I915_READ(PORT_TX_DFLEXDPCSSS); 4913 icl_tc_phy_disconnect(dev_priv, dig_port);
4868 val &= ~DP_PHY_MODE_STATUS_NOT_SAFE(tc_port);
4869 I915_WRITE(PORT_TX_DFLEXDPCSSS, val);
4870 return false; 4914 return false;
4871 } 4915 }
4872 4916
@@ -4881,21 +4925,24 @@ static void icl_tc_phy_disconnect(struct drm_i915_private *dev_priv,
4881 struct intel_digital_port *dig_port) 4925 struct intel_digital_port *dig_port)
4882{ 4926{
4883 enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port); 4927 enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port);
4884 u32 val;
4885 4928
4886 if (dig_port->tc_type != TC_PORT_LEGACY && 4929 if (dig_port->tc_type == TC_PORT_UNKNOWN)
4887 dig_port->tc_type != TC_PORT_TYPEC)
4888 return; 4930 return;
4889 4931
4890 /* 4932 /*
4891 * This function may be called many times in a row without an HPD event 4933 * TBT disconnection flow is read the live status, what was done in
4892 * in between, so try to avoid the write when we can. 4934 * caller.
4893 */ 4935 */
4894 val = I915_READ(PORT_TX_DFLEXDPCSSS); 4936 if (dig_port->tc_type == TC_PORT_TYPEC ||
4895 if (val & DP_PHY_MODE_STATUS_NOT_SAFE(tc_port)) { 4937 dig_port->tc_type == TC_PORT_LEGACY) {
4938 u32 val;
4939
4940 val = I915_READ(PORT_TX_DFLEXDPCSSS);
4896 val &= ~DP_PHY_MODE_STATUS_NOT_SAFE(tc_port); 4941 val &= ~DP_PHY_MODE_STATUS_NOT_SAFE(tc_port);
4897 I915_WRITE(PORT_TX_DFLEXDPCSSS, val); 4942 I915_WRITE(PORT_TX_DFLEXDPCSSS, val);
4898 } 4943 }
4944
4945 dig_port->tc_type = TC_PORT_UNKNOWN;
4899} 4946}
4900 4947
4901/* 4948/*
@@ -4945,19 +4992,14 @@ static bool icl_digital_port_connected(struct intel_encoder *encoder)
4945 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 4992 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4946 struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base); 4993 struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
4947 4994
4948 switch (encoder->hpd_pin) { 4995 if (intel_port_is_combophy(dev_priv, encoder->port))
4949 case HPD_PORT_A:
4950 case HPD_PORT_B:
4951 return icl_combo_port_connected(dev_priv, dig_port); 4996 return icl_combo_port_connected(dev_priv, dig_port);
4952 case HPD_PORT_C: 4997 else if (intel_port_is_tc(dev_priv, encoder->port))
4953 case HPD_PORT_D:
4954 case HPD_PORT_E:
4955 case HPD_PORT_F:
4956 return icl_tc_port_connected(dev_priv, dig_port); 4998 return icl_tc_port_connected(dev_priv, dig_port);
4957 default: 4999 else
4958 MISSING_CASE(encoder->hpd_pin); 5000 MISSING_CASE(encoder->hpd_pin);
4959 return false; 5001
4960 } 5002 return false;
4961} 5003}
4962 5004
4963/* 5005/*
@@ -4982,20 +5024,23 @@ bool intel_digital_port_connected(struct intel_encoder *encoder)
4982 return g4x_digital_port_connected(encoder); 5024 return g4x_digital_port_connected(encoder);
4983 } 5025 }
4984 5026
4985 if (IS_GEN5(dev_priv)) 5027 if (INTEL_GEN(dev_priv) >= 11)
4986 return ilk_digital_port_connected(encoder); 5028 return icl_digital_port_connected(encoder);
4987 else if (IS_GEN6(dev_priv)) 5029 else if (IS_GEN10(dev_priv) || IS_GEN9_BC(dev_priv))
4988 return snb_digital_port_connected(encoder); 5030 return spt_digital_port_connected(encoder);
4989 else if (IS_GEN7(dev_priv))
4990 return ivb_digital_port_connected(encoder);
4991 else if (IS_GEN8(dev_priv))
4992 return bdw_digital_port_connected(encoder);
4993 else if (IS_GEN9_LP(dev_priv)) 5031 else if (IS_GEN9_LP(dev_priv))
4994 return bxt_digital_port_connected(encoder); 5032 return bxt_digital_port_connected(encoder);
4995 else if (IS_GEN9_BC(dev_priv) || IS_GEN10(dev_priv)) 5033 else if (IS_GEN8(dev_priv))
4996 return spt_digital_port_connected(encoder); 5034 return bdw_digital_port_connected(encoder);
4997 else 5035 else if (IS_GEN7(dev_priv))
4998 return icl_digital_port_connected(encoder); 5036 return ivb_digital_port_connected(encoder);
5037 else if (IS_GEN6(dev_priv))
5038 return snb_digital_port_connected(encoder);
5039 else if (IS_GEN5(dev_priv))
5040 return ilk_digital_port_connected(encoder);
5041
5042 MISSING_CASE(INTEL_GEN(dev_priv));
5043 return false;
4999} 5044}
5000 5045
5001static struct edid * 5046static struct edid *
@@ -5042,28 +5087,35 @@ intel_dp_unset_edid(struct intel_dp *intel_dp)
5042} 5087}
5043 5088
5044static int 5089static int
5045intel_dp_long_pulse(struct intel_connector *connector, 5090intel_dp_detect(struct drm_connector *connector,
5046 struct drm_modeset_acquire_ctx *ctx) 5091 struct drm_modeset_acquire_ctx *ctx,
5092 bool force)
5047{ 5093{
5048 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 5094 struct drm_i915_private *dev_priv = to_i915(connector->dev);
5049 struct intel_dp *intel_dp = intel_attached_dp(&connector->base); 5095 struct intel_dp *intel_dp = intel_attached_dp(connector);
5096 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5097 struct intel_encoder *encoder = &dig_port->base;
5050 enum drm_connector_status status; 5098 enum drm_connector_status status;
5051 u8 sink_irq_vector = 0; 5099 enum intel_display_power_domain aux_domain =
5100 intel_aux_power_domain(dig_port);
5052 5101
5102 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
5103 connector->base.id, connector->name);
5053 WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex)); 5104 WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
5054 5105
5055 intel_display_power_get(dev_priv, intel_dp->aux_power_domain); 5106 intel_display_power_get(dev_priv, aux_domain);
5056 5107
5057 /* Can't disconnect eDP */ 5108 /* Can't disconnect eDP */
5058 if (intel_dp_is_edp(intel_dp)) 5109 if (intel_dp_is_edp(intel_dp))
5059 status = edp_detect(intel_dp); 5110 status = edp_detect(intel_dp);
5060 else if (intel_digital_port_connected(&dp_to_dig_port(intel_dp)->base)) 5111 else if (intel_digital_port_connected(encoder))
5061 status = intel_dp_detect_dpcd(intel_dp); 5112 status = intel_dp_detect_dpcd(intel_dp);
5062 else 5113 else
5063 status = connector_status_disconnected; 5114 status = connector_status_disconnected;
5064 5115
5065 if (status == connector_status_disconnected) { 5116 if (status == connector_status_disconnected) {
5066 memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance)); 5117 memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance));
5118 memset(intel_dp->dsc_dpcd, 0, sizeof(intel_dp->dsc_dpcd));
5067 5119
5068 if (intel_dp->is_mst) { 5120 if (intel_dp->is_mst) {
5069 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", 5121 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n",
@@ -5089,6 +5141,10 @@ intel_dp_long_pulse(struct intel_connector *connector,
5089 5141
5090 intel_dp_print_rates(intel_dp); 5142 intel_dp_print_rates(intel_dp);
5091 5143
5144 /* Read DP Sink DSC Cap DPCD regs for DP v1.4 */
5145 if (INTEL_GEN(dev_priv) >= 11)
5146 intel_dp_get_dsc_sink_cap(intel_dp);
5147
5092 drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc, 5148 drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc,
5093 drm_dp_is_branch(intel_dp->dpcd)); 5149 drm_dp_is_branch(intel_dp->dpcd));
5094 5150
@@ -5109,9 +5165,13 @@ intel_dp_long_pulse(struct intel_connector *connector,
5109 * with an IRQ_HPD, so force a link status check. 5165 * with an IRQ_HPD, so force a link status check.
5110 */ 5166 */
5111 if (!intel_dp_is_edp(intel_dp)) { 5167 if (!intel_dp_is_edp(intel_dp)) {
5112 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 5168 int ret;
5113 5169
5114 intel_dp_retrain_link(encoder, ctx); 5170 ret = intel_dp_retrain_link(encoder, ctx);
5171 if (ret) {
5172 intel_display_power_put(dev_priv, aux_domain);
5173 return ret;
5174 }
5115 } 5175 }
5116 5176
5117 /* 5177 /*
@@ -5123,61 +5183,17 @@ intel_dp_long_pulse(struct intel_connector *connector,
5123 intel_dp->aux.i2c_defer_count = 0; 5183 intel_dp->aux.i2c_defer_count = 0;
5124 5184
5125 intel_dp_set_edid(intel_dp); 5185 intel_dp_set_edid(intel_dp);
5126 if (intel_dp_is_edp(intel_dp) || connector->detect_edid) 5186 if (intel_dp_is_edp(intel_dp) ||
5187 to_intel_connector(connector)->detect_edid)
5127 status = connector_status_connected; 5188 status = connector_status_connected;
5128 intel_dp->detect_done = true;
5129 5189
5130 /* Try to read the source of the interrupt */ 5190 intel_dp_check_service_irq(intel_dp);
5131 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
5132 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector) &&
5133 sink_irq_vector != 0) {
5134 /* Clear interrupt source */
5135 drm_dp_dpcd_writeb(&intel_dp->aux,
5136 DP_DEVICE_SERVICE_IRQ_VECTOR,
5137 sink_irq_vector);
5138
5139 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
5140 intel_dp_handle_test_request(intel_dp);
5141 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
5142 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
5143 }
5144 5191
5145out: 5192out:
5146 if (status != connector_status_connected && !intel_dp->is_mst) 5193 if (status != connector_status_connected && !intel_dp->is_mst)
5147 intel_dp_unset_edid(intel_dp); 5194 intel_dp_unset_edid(intel_dp);
5148 5195
5149 intel_display_power_put(dev_priv, intel_dp->aux_power_domain); 5196 intel_display_power_put(dev_priv, aux_domain);
5150 return status;
5151}
5152
5153static int
5154intel_dp_detect(struct drm_connector *connector,
5155 struct drm_modeset_acquire_ctx *ctx,
5156 bool force)
5157{
5158 struct intel_dp *intel_dp = intel_attached_dp(connector);
5159 int status = connector->status;
5160
5161 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
5162 connector->base.id, connector->name);
5163
5164 /* If full detect is not performed yet, do a full detect */
5165 if (!intel_dp->detect_done) {
5166 struct drm_crtc *crtc;
5167 int ret;
5168
5169 crtc = connector->state->crtc;
5170 if (crtc) {
5171 ret = drm_modeset_lock(&crtc->mutex, ctx);
5172 if (ret)
5173 return ret;
5174 }
5175
5176 status = intel_dp_long_pulse(intel_dp->attached_connector, ctx);
5177 }
5178
5179 intel_dp->detect_done = false;
5180
5181 return status; 5197 return status;
5182} 5198}
5183 5199
@@ -5185,8 +5201,11 @@ static void
5185intel_dp_force(struct drm_connector *connector) 5201intel_dp_force(struct drm_connector *connector)
5186{ 5202{
5187 struct intel_dp *intel_dp = intel_attached_dp(connector); 5203 struct intel_dp *intel_dp = intel_attached_dp(connector);
5188 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base; 5204 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5205 struct intel_encoder *intel_encoder = &dig_port->base;
5189 struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev); 5206 struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
5207 enum intel_display_power_domain aux_domain =
5208 intel_aux_power_domain(dig_port);
5190 5209
5191 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", 5210 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
5192 connector->base.id, connector->name); 5211 connector->base.id, connector->name);
@@ -5195,11 +5214,11 @@ intel_dp_force(struct drm_connector *connector)
5195 if (connector->status != connector_status_connected) 5214 if (connector->status != connector_status_connected)
5196 return; 5215 return;
5197 5216
5198 intel_display_power_get(dev_priv, intel_dp->aux_power_domain); 5217 intel_display_power_get(dev_priv, aux_domain);
5199 5218
5200 intel_dp_set_edid(intel_dp); 5219 intel_dp_set_edid(intel_dp);
5201 5220
5202 intel_display_power_put(dev_priv, intel_dp->aux_power_domain); 5221 intel_display_power_put(dev_priv, aux_domain);
5203} 5222}
5204 5223
5205static int intel_dp_get_modes(struct drm_connector *connector) 5224static int intel_dp_get_modes(struct drm_connector *connector)
@@ -5264,27 +5283,6 @@ intel_dp_connector_unregister(struct drm_connector *connector)
5264 intel_connector_unregister(connector); 5283 intel_connector_unregister(connector);
5265} 5284}
5266 5285
5267static void
5268intel_dp_connector_destroy(struct drm_connector *connector)
5269{
5270 struct intel_connector *intel_connector = to_intel_connector(connector);
5271
5272 kfree(intel_connector->detect_edid);
5273
5274 if (!IS_ERR_OR_NULL(intel_connector->edid))
5275 kfree(intel_connector->edid);
5276
5277 /*
5278 * Can't call intel_dp_is_edp() since the encoder may have been
5279 * destroyed already.
5280 */
5281 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
5282 intel_panel_fini(&intel_connector->panel);
5283
5284 drm_connector_cleanup(connector);
5285 kfree(connector);
5286}
5287
5288void intel_dp_encoder_destroy(struct drm_encoder *encoder) 5286void intel_dp_encoder_destroy(struct drm_encoder *encoder)
5289{ 5287{
5290 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); 5288 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
@@ -5348,7 +5346,8 @@ int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port,
5348 dpcd_ret = drm_dp_dpcd_write(&intel_dig_port->dp.aux, DP_AUX_HDCP_AN, 5346 dpcd_ret = drm_dp_dpcd_write(&intel_dig_port->dp.aux, DP_AUX_HDCP_AN,
5349 an, DRM_HDCP_AN_LEN); 5347 an, DRM_HDCP_AN_LEN);
5350 if (dpcd_ret != DRM_HDCP_AN_LEN) { 5348 if (dpcd_ret != DRM_HDCP_AN_LEN) {
5351 DRM_ERROR("Failed to write An over DP/AUX (%zd)\n", dpcd_ret); 5349 DRM_DEBUG_KMS("Failed to write An over DP/AUX (%zd)\n",
5350 dpcd_ret);
5352 return dpcd_ret >= 0 ? -EIO : dpcd_ret; 5351 return dpcd_ret >= 0 ? -EIO : dpcd_ret;
5353 } 5352 }
5354 5353
@@ -5364,10 +5363,10 @@ int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port,
5364 rxbuf, sizeof(rxbuf), 5363 rxbuf, sizeof(rxbuf),
5365 DP_AUX_CH_CTL_AUX_AKSV_SELECT); 5364 DP_AUX_CH_CTL_AUX_AKSV_SELECT);
5366 if (ret < 0) { 5365 if (ret < 0) {
5367 DRM_ERROR("Write Aksv over DP/AUX failed (%d)\n", ret); 5366 DRM_DEBUG_KMS("Write Aksv over DP/AUX failed (%d)\n", ret);
5368 return ret; 5367 return ret;
5369 } else if (ret == 0) { 5368 } else if (ret == 0) {
5370 DRM_ERROR("Aksv write over DP/AUX was empty\n"); 5369 DRM_DEBUG_KMS("Aksv write over DP/AUX was empty\n");
5371 return -EIO; 5370 return -EIO;
5372 } 5371 }
5373 5372
@@ -5382,7 +5381,7 @@ static int intel_dp_hdcp_read_bksv(struct intel_digital_port *intel_dig_port,
5382 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BKSV, bksv, 5381 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BKSV, bksv,
5383 DRM_HDCP_KSV_LEN); 5382 DRM_HDCP_KSV_LEN);
5384 if (ret != DRM_HDCP_KSV_LEN) { 5383 if (ret != DRM_HDCP_KSV_LEN) {
5385 DRM_ERROR("Read Bksv from DP/AUX failed (%zd)\n", ret); 5384 DRM_DEBUG_KMS("Read Bksv from DP/AUX failed (%zd)\n", ret);
5386 return ret >= 0 ? -EIO : ret; 5385 return ret >= 0 ? -EIO : ret;
5387 } 5386 }
5388 return 0; 5387 return 0;
@@ -5400,7 +5399,7 @@ static int intel_dp_hdcp_read_bstatus(struct intel_digital_port *intel_dig_port,
5400 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BINFO, 5399 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BINFO,
5401 bstatus, DRM_HDCP_BSTATUS_LEN); 5400 bstatus, DRM_HDCP_BSTATUS_LEN);
5402 if (ret != DRM_HDCP_BSTATUS_LEN) { 5401 if (ret != DRM_HDCP_BSTATUS_LEN) {
5403 DRM_ERROR("Read bstatus from DP/AUX failed (%zd)\n", ret); 5402 DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret);
5404 return ret >= 0 ? -EIO : ret; 5403 return ret >= 0 ? -EIO : ret;
5405 } 5404 }
5406 return 0; 5405 return 0;
@@ -5415,7 +5414,7 @@ int intel_dp_hdcp_read_bcaps(struct intel_digital_port *intel_dig_port,
5415 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BCAPS, 5414 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BCAPS,
5416 bcaps, 1); 5415 bcaps, 1);
5417 if (ret != 1) { 5416 if (ret != 1) {
5418 DRM_ERROR("Read bcaps from DP/AUX failed (%zd)\n", ret); 5417 DRM_DEBUG_KMS("Read bcaps from DP/AUX failed (%zd)\n", ret);
5419 return ret >= 0 ? -EIO : ret; 5418 return ret >= 0 ? -EIO : ret;
5420 } 5419 }
5421 5420
@@ -5445,7 +5444,7 @@ int intel_dp_hdcp_read_ri_prime(struct intel_digital_port *intel_dig_port,
5445 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_RI_PRIME, 5444 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_RI_PRIME,
5446 ri_prime, DRM_HDCP_RI_LEN); 5445 ri_prime, DRM_HDCP_RI_LEN);
5447 if (ret != DRM_HDCP_RI_LEN) { 5446 if (ret != DRM_HDCP_RI_LEN) {
5448 DRM_ERROR("Read Ri' from DP/AUX failed (%zd)\n", ret); 5447 DRM_DEBUG_KMS("Read Ri' from DP/AUX failed (%zd)\n", ret);
5449 return ret >= 0 ? -EIO : ret; 5448 return ret >= 0 ? -EIO : ret;
5450 } 5449 }
5451 return 0; 5450 return 0;
@@ -5460,7 +5459,7 @@ int intel_dp_hdcp_read_ksv_ready(struct intel_digital_port *intel_dig_port,
5460 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BSTATUS, 5459 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BSTATUS,
5461 &bstatus, 1); 5460 &bstatus, 1);
5462 if (ret != 1) { 5461 if (ret != 1) {
5463 DRM_ERROR("Read bstatus from DP/AUX failed (%zd)\n", ret); 5462 DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret);
5464 return ret >= 0 ? -EIO : ret; 5463 return ret >= 0 ? -EIO : ret;
5465 } 5464 }
5466 *ksv_ready = bstatus & DP_BSTATUS_READY; 5465 *ksv_ready = bstatus & DP_BSTATUS_READY;
@@ -5482,8 +5481,8 @@ int intel_dp_hdcp_read_ksv_fifo(struct intel_digital_port *intel_dig_port,
5482 ksv_fifo + i * DRM_HDCP_KSV_LEN, 5481 ksv_fifo + i * DRM_HDCP_KSV_LEN,
5483 len); 5482 len);
5484 if (ret != len) { 5483 if (ret != len) {
5485 DRM_ERROR("Read ksv[%d] from DP/AUX failed (%zd)\n", i, 5484 DRM_DEBUG_KMS("Read ksv[%d] from DP/AUX failed (%zd)\n",
5486 ret); 5485 i, ret);
5487 return ret >= 0 ? -EIO : ret; 5486 return ret >= 0 ? -EIO : ret;
5488 } 5487 }
5489 } 5488 }
@@ -5503,7 +5502,7 @@ int intel_dp_hdcp_read_v_prime_part(struct intel_digital_port *intel_dig_port,
5503 DP_AUX_HDCP_V_PRIME(i), part, 5502 DP_AUX_HDCP_V_PRIME(i), part,
5504 DRM_HDCP_V_PRIME_PART_LEN); 5503 DRM_HDCP_V_PRIME_PART_LEN);
5505 if (ret != DRM_HDCP_V_PRIME_PART_LEN) { 5504 if (ret != DRM_HDCP_V_PRIME_PART_LEN) {
5506 DRM_ERROR("Read v'[%d] from DP/AUX failed (%zd)\n", i, ret); 5505 DRM_DEBUG_KMS("Read v'[%d] from DP/AUX failed (%zd)\n", i, ret);
5507 return ret >= 0 ? -EIO : ret; 5506 return ret >= 0 ? -EIO : ret;
5508 } 5507 }
5509 return 0; 5508 return 0;
@@ -5526,7 +5525,7 @@ bool intel_dp_hdcp_check_link(struct intel_digital_port *intel_dig_port)
5526 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BSTATUS, 5525 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BSTATUS,
5527 &bstatus, 1); 5526 &bstatus, 1);
5528 if (ret != 1) { 5527 if (ret != 1) {
5529 DRM_ERROR("Read bstatus from DP/AUX failed (%zd)\n", ret); 5528 DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret);
5530 return false; 5529 return false;
5531 } 5530 }
5532 5531
@@ -5565,6 +5564,7 @@ static const struct intel_hdcp_shim intel_dp_hdcp_shim = {
5565static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp) 5564static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
5566{ 5565{
5567 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 5566 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
5567 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5568 5568
5569 lockdep_assert_held(&dev_priv->pps_mutex); 5569 lockdep_assert_held(&dev_priv->pps_mutex);
5570 5570
@@ -5578,7 +5578,7 @@ static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
5578 * indefinitely. 5578 * indefinitely.
5579 */ 5579 */
5580 DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n"); 5580 DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
5581 intel_display_power_get(dev_priv, intel_dp->aux_power_domain); 5581 intel_display_power_get(dev_priv, intel_aux_power_domain(dig_port));
5582 5582
5583 edp_panel_vdd_schedule_off(intel_dp); 5583 edp_panel_vdd_schedule_off(intel_dp);
5584} 5584}
@@ -5631,7 +5631,7 @@ static const struct drm_connector_funcs intel_dp_connector_funcs = {
5631 .atomic_set_property = intel_digital_connector_atomic_set_property, 5631 .atomic_set_property = intel_digital_connector_atomic_set_property,
5632 .late_register = intel_dp_connector_register, 5632 .late_register = intel_dp_connector_register,
5633 .early_unregister = intel_dp_connector_unregister, 5633 .early_unregister = intel_dp_connector_unregister,
5634 .destroy = intel_dp_connector_destroy, 5634 .destroy = intel_connector_destroy,
5635 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 5635 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
5636 .atomic_duplicate_state = intel_digital_connector_duplicate_state, 5636 .atomic_duplicate_state = intel_digital_connector_duplicate_state,
5637}; 5637};
@@ -5673,11 +5673,11 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
5673 5673
5674 if (long_hpd) { 5674 if (long_hpd) {
5675 intel_dp->reset_link_params = true; 5675 intel_dp->reset_link_params = true;
5676 intel_dp->detect_done = false;
5677 return IRQ_NONE; 5676 return IRQ_NONE;
5678 } 5677 }
5679 5678
5680 intel_display_power_get(dev_priv, intel_dp->aux_power_domain); 5679 intel_display_power_get(dev_priv,
5680 intel_aux_power_domain(intel_dig_port));
5681 5681
5682 if (intel_dp->is_mst) { 5682 if (intel_dp->is_mst) {
5683 if (intel_dp_check_mst_status(intel_dp) == -EINVAL) { 5683 if (intel_dp_check_mst_status(intel_dp) == -EINVAL) {
@@ -5690,7 +5690,6 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
5690 intel_dp->is_mst = false; 5690 intel_dp->is_mst = false;
5691 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, 5691 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
5692 intel_dp->is_mst); 5692 intel_dp->is_mst);
5693 intel_dp->detect_done = false;
5694 goto put_power; 5693 goto put_power;
5695 } 5694 }
5696 } 5695 }
@@ -5700,19 +5699,15 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
5700 5699
5701 handled = intel_dp_short_pulse(intel_dp); 5700 handled = intel_dp_short_pulse(intel_dp);
5702 5701
5703 /* Short pulse can signify loss of hdcp authentication */ 5702 if (!handled)
5704 intel_hdcp_check_link(intel_dp->attached_connector);
5705
5706 if (!handled) {
5707 intel_dp->detect_done = false;
5708 goto put_power; 5703 goto put_power;
5709 }
5710 } 5704 }
5711 5705
5712 ret = IRQ_HANDLED; 5706 ret = IRQ_HANDLED;
5713 5707
5714put_power: 5708put_power:
5715 intel_display_power_put(dev_priv, intel_dp->aux_power_domain); 5709 intel_display_power_put(dev_priv,
5710 intel_aux_power_domain(intel_dig_port));
5716 5711
5717 return ret; 5712 return ret;
5718} 5713}
@@ -5743,6 +5738,10 @@ intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connect
5743 intel_attach_force_audio_property(connector); 5738 intel_attach_force_audio_property(connector);
5744 5739
5745 intel_attach_broadcast_rgb_property(connector); 5740 intel_attach_broadcast_rgb_property(connector);
5741 if (HAS_GMCH_DISPLAY(dev_priv))
5742 drm_connector_attach_max_bpc_property(connector, 6, 10);
5743 else if (INTEL_GEN(dev_priv) >= 5)
5744 drm_connector_attach_max_bpc_property(connector, 6, 12);
5746 5745
5747 if (intel_dp_is_edp(intel_dp)) { 5746 if (intel_dp_is_edp(intel_dp)) {
5748 u32 allowed_scalers; 5747 u32 allowed_scalers;
@@ -6099,10 +6098,10 @@ static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv,
6099 if (INTEL_GEN(dev_priv) >= 8 && !IS_CHERRYVIEW(dev_priv)) { 6098 if (INTEL_GEN(dev_priv) >= 8 && !IS_CHERRYVIEW(dev_priv)) {
6100 switch (index) { 6099 switch (index) {
6101 case DRRS_HIGH_RR: 6100 case DRRS_HIGH_RR:
6102 intel_dp_set_m_n(intel_crtc, M1_N1); 6101 intel_dp_set_m_n(crtc_state, M1_N1);
6103 break; 6102 break;
6104 case DRRS_LOW_RR: 6103 case DRRS_LOW_RR:
6105 intel_dp_set_m_n(intel_crtc, M2_N2); 6104 intel_dp_set_m_n(crtc_state, M2_N2);
6106 break; 6105 break;
6107 case DRRS_MAX_RR: 6106 case DRRS_MAX_RR:
6108 default: 6107 default:
@@ -6422,6 +6421,8 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
6422 if (!intel_dp_is_edp(intel_dp)) 6421 if (!intel_dp_is_edp(intel_dp))
6423 return true; 6422 return true;
6424 6423
6424 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work, edp_panel_vdd_work);
6425
6425 /* 6426 /*
6426 * On IBX/CPT we may get here with LVDS already registered. Since the 6427 * On IBX/CPT we may get here with LVDS already registered. Since the
6427 * driver uses the only internal power sequencer available for both 6428 * driver uses the only internal power sequencer available for both
@@ -6514,6 +6515,10 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
6514 intel_connector->panel.backlight.power = intel_edp_backlight_power; 6515 intel_connector->panel.backlight.power = intel_edp_backlight_power;
6515 intel_panel_setup_backlight(connector, pipe); 6516 intel_panel_setup_backlight(connector, pipe);
6516 6517
6518 if (fixed_mode)
6519 drm_connector_init_panel_orientation_property(
6520 connector, fixed_mode->hdisplay, fixed_mode->vdisplay);
6521
6517 return true; 6522 return true;
6518 6523
6519out_vdd_off: 6524out_vdd_off:
@@ -6624,9 +6629,6 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
6624 6629
6625 intel_dp_aux_init(intel_dp); 6630 intel_dp_aux_init(intel_dp);
6626 6631
6627 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
6628 edp_panel_vdd_work);
6629
6630 intel_connector_attach_encoder(intel_connector, intel_encoder); 6632 intel_connector_attach_encoder(intel_connector, intel_encoder);
6631 6633
6632 if (HAS_DDI(dev_priv)) 6634 if (HAS_DDI(dev_priv))
@@ -6743,6 +6745,7 @@ bool intel_dp_init(struct drm_i915_private *dev_priv,
6743 if (port != PORT_A) 6745 if (port != PORT_A)
6744 intel_infoframe_init(intel_dig_port); 6746 intel_infoframe_init(intel_dig_port);
6745 6747
6748 intel_dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port);
6746 if (!intel_dp_init_connector(intel_dig_port, intel_connector)) 6749 if (!intel_dp_init_connector(intel_dig_port, intel_connector))
6747 goto err_init_connector; 6750 goto err_init_connector;
6748 6751
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index a911691dbd0f..4de247ddf05f 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -51,6 +51,7 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
51 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) 51 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
52 return false; 52 return false;
53 53
54 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
54 pipe_config->has_pch_encoder = false; 55 pipe_config->has_pch_encoder = false;
55 bpp = 24; 56 bpp = 24;
56 if (intel_dp->compliance.test_data.bpc) { 57 if (intel_dp->compliance.test_data.bpc) {
@@ -208,12 +209,25 @@ static void intel_mst_pre_pll_enable_dp(struct intel_encoder *encoder,
208 struct intel_digital_port *intel_dig_port = intel_mst->primary; 209 struct intel_digital_port *intel_dig_port = intel_mst->primary;
209 struct intel_dp *intel_dp = &intel_dig_port->dp; 210 struct intel_dp *intel_dp = &intel_dig_port->dp;
210 211
211 if (intel_dp->active_mst_links == 0 && 212 if (intel_dp->active_mst_links == 0)
212 intel_dig_port->base.pre_pll_enable)
213 intel_dig_port->base.pre_pll_enable(&intel_dig_port->base, 213 intel_dig_port->base.pre_pll_enable(&intel_dig_port->base,
214 pipe_config, NULL); 214 pipe_config, NULL);
215} 215}
216 216
217static void intel_mst_post_pll_disable_dp(struct intel_encoder *encoder,
218 const struct intel_crtc_state *old_crtc_state,
219 const struct drm_connector_state *old_conn_state)
220{
221 struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
222 struct intel_digital_port *intel_dig_port = intel_mst->primary;
223 struct intel_dp *intel_dp = &intel_dig_port->dp;
224
225 if (intel_dp->active_mst_links == 0)
226 intel_dig_port->base.post_pll_disable(&intel_dig_port->base,
227 old_crtc_state,
228 old_conn_state);
229}
230
217static void intel_mst_pre_enable_dp(struct intel_encoder *encoder, 231static void intel_mst_pre_enable_dp(struct intel_encoder *encoder,
218 const struct intel_crtc_state *pipe_config, 232 const struct intel_crtc_state *pipe_config,
219 const struct drm_connector_state *conn_state) 233 const struct drm_connector_state *conn_state)
@@ -335,24 +349,12 @@ intel_dp_mst_detect(struct drm_connector *connector, bool force)
335 intel_connector->port); 349 intel_connector->port);
336} 350}
337 351
338static void
339intel_dp_mst_connector_destroy(struct drm_connector *connector)
340{
341 struct intel_connector *intel_connector = to_intel_connector(connector);
342
343 if (!IS_ERR_OR_NULL(intel_connector->edid))
344 kfree(intel_connector->edid);
345
346 drm_connector_cleanup(connector);
347 kfree(connector);
348}
349
350static const struct drm_connector_funcs intel_dp_mst_connector_funcs = { 352static const struct drm_connector_funcs intel_dp_mst_connector_funcs = {
351 .detect = intel_dp_mst_detect, 353 .detect = intel_dp_mst_detect,
352 .fill_modes = drm_helper_probe_single_connector_modes, 354 .fill_modes = drm_helper_probe_single_connector_modes,
353 .late_register = intel_connector_register, 355 .late_register = intel_connector_register,
354 .early_unregister = intel_connector_unregister, 356 .early_unregister = intel_connector_unregister,
355 .destroy = intel_dp_mst_connector_destroy, 357 .destroy = intel_connector_destroy,
356 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 358 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
357 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, 359 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
358}; 360};
@@ -560,6 +562,7 @@ intel_dp_create_fake_mst_encoder(struct intel_digital_port *intel_dig_port, enum
560 intel_encoder->disable = intel_mst_disable_dp; 562 intel_encoder->disable = intel_mst_disable_dp;
561 intel_encoder->post_disable = intel_mst_post_disable_dp; 563 intel_encoder->post_disable = intel_mst_post_disable_dp;
562 intel_encoder->pre_pll_enable = intel_mst_pre_pll_enable_dp; 564 intel_encoder->pre_pll_enable = intel_mst_pre_pll_enable_dp;
565 intel_encoder->post_pll_disable = intel_mst_post_pll_disable_dp;
563 intel_encoder->pre_enable = intel_mst_pre_enable_dp; 566 intel_encoder->pre_enable = intel_mst_pre_enable_dp;
564 intel_encoder->enable = intel_mst_enable_dp; 567 intel_encoder->enable = intel_mst_enable_dp;
565 intel_encoder->get_hw_state = intel_dp_mst_enc_get_hw_state; 568 intel_encoder->get_hw_state = intel_dp_mst_enc_get_hw_state;
diff --git a/drivers/gpu/drm/i915/intel_dpio_phy.c b/drivers/gpu/drm/i915/intel_dpio_phy.c
index 00b3ab656b06..3c7f10d17658 100644
--- a/drivers/gpu/drm/i915/intel_dpio_phy.c
+++ b/drivers/gpu/drm/i915/intel_dpio_phy.c
@@ -748,7 +748,7 @@ void chv_data_lane_soft_reset(struct intel_encoder *encoder,
748 val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET; 748 val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
749 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val); 749 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
750 750
751 if (crtc->config->lane_count > 2) { 751 if (crtc_state->lane_count > 2) {
752 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch)); 752 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
753 if (reset) 753 if (reset)
754 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET); 754 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
@@ -765,7 +765,7 @@ void chv_data_lane_soft_reset(struct intel_encoder *encoder,
765 val |= DPIO_PCS_CLK_SOFT_RESET; 765 val |= DPIO_PCS_CLK_SOFT_RESET;
766 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val); 766 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
767 767
768 if (crtc->config->lane_count > 2) { 768 if (crtc_state->lane_count > 2) {
769 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch)); 769 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
770 val |= CHV_PCS_REQ_SOFTRESET_EN; 770 val |= CHV_PCS_REQ_SOFTRESET_EN;
771 if (reset) 771 if (reset)
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c
index e6cac9225536..901e15063b24 100644
--- a/drivers/gpu/drm/i915/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c
@@ -126,16 +126,16 @@ void assert_shared_dpll(struct drm_i915_private *dev_priv,
126 126
127/** 127/**
128 * intel_prepare_shared_dpll - call a dpll's prepare hook 128 * intel_prepare_shared_dpll - call a dpll's prepare hook
129 * @crtc: CRTC which has a shared dpll 129 * @crtc_state: CRTC, and its state, which has a shared dpll
130 * 130 *
131 * This calls the PLL's prepare hook if it has one and if the PLL is not 131 * This calls the PLL's prepare hook if it has one and if the PLL is not
132 * already enabled. The prepare hook is platform specific. 132 * already enabled. The prepare hook is platform specific.
133 */ 133 */
134void intel_prepare_shared_dpll(struct intel_crtc *crtc) 134void intel_prepare_shared_dpll(const struct intel_crtc_state *crtc_state)
135{ 135{
136 struct drm_device *dev = crtc->base.dev; 136 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
137 struct drm_i915_private *dev_priv = to_i915(dev); 137 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
138 struct intel_shared_dpll *pll = crtc->config->shared_dpll; 138 struct intel_shared_dpll *pll = crtc_state->shared_dpll;
139 139
140 if (WARN_ON(pll == NULL)) 140 if (WARN_ON(pll == NULL))
141 return; 141 return;
@@ -154,15 +154,15 @@ void intel_prepare_shared_dpll(struct intel_crtc *crtc)
154 154
155/** 155/**
156 * intel_enable_shared_dpll - enable a CRTC's shared DPLL 156 * intel_enable_shared_dpll - enable a CRTC's shared DPLL
157 * @crtc: CRTC which has a shared DPLL 157 * @crtc_state: CRTC, and its state, which has a shared DPLL
158 * 158 *
159 * Enable the shared DPLL used by @crtc. 159 * Enable the shared DPLL used by @crtc.
160 */ 160 */
161void intel_enable_shared_dpll(struct intel_crtc *crtc) 161void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state)
162{ 162{
163 struct drm_device *dev = crtc->base.dev; 163 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
164 struct drm_i915_private *dev_priv = to_i915(dev); 164 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
165 struct intel_shared_dpll *pll = crtc->config->shared_dpll; 165 struct intel_shared_dpll *pll = crtc_state->shared_dpll;
166 unsigned int crtc_mask = drm_crtc_mask(&crtc->base); 166 unsigned int crtc_mask = drm_crtc_mask(&crtc->base);
167 unsigned int old_mask; 167 unsigned int old_mask;
168 168
@@ -199,14 +199,15 @@ out:
199 199
200/** 200/**
201 * intel_disable_shared_dpll - disable a CRTC's shared DPLL 201 * intel_disable_shared_dpll - disable a CRTC's shared DPLL
202 * @crtc: CRTC which has a shared DPLL 202 * @crtc_state: CRTC, and its state, which has a shared DPLL
203 * 203 *
204 * Disable the shared DPLL used by @crtc. 204 * Disable the shared DPLL used by @crtc.
205 */ 205 */
206void intel_disable_shared_dpll(struct intel_crtc *crtc) 206void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state)
207{ 207{
208 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
208 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 209 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
209 struct intel_shared_dpll *pll = crtc->config->shared_dpll; 210 struct intel_shared_dpll *pll = crtc_state->shared_dpll;
210 unsigned int crtc_mask = drm_crtc_mask(&crtc->base); 211 unsigned int crtc_mask = drm_crtc_mask(&crtc->base);
211 212
212 /* PCH only available on ILK+ */ 213 /* PCH only available on ILK+ */
@@ -409,14 +410,6 @@ static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
409 struct intel_shared_dpll *pll) 410 struct intel_shared_dpll *pll)
410{ 411{
411 const enum intel_dpll_id id = pll->info->id; 412 const enum intel_dpll_id id = pll->info->id;
412 struct drm_device *dev = &dev_priv->drm;
413 struct intel_crtc *crtc;
414
415 /* Make sure no transcoder isn't still depending on us. */
416 for_each_intel_crtc(dev, crtc) {
417 if (crtc->config->shared_dpll == pll)
418 assert_pch_transcoder_disabled(dev_priv, crtc->pipe);
419 }
420 413
421 I915_WRITE(PCH_DPLL(id), 0); 414 I915_WRITE(PCH_DPLL(id), 0);
422 POSTING_READ(PCH_DPLL(id)); 415 POSTING_READ(PCH_DPLL(id));
@@ -2628,11 +2621,16 @@ static enum port icl_mg_pll_id_to_port(enum intel_dpll_id id)
2628 return id - DPLL_ID_ICL_MGPLL1 + PORT_C; 2621 return id - DPLL_ID_ICL_MGPLL1 + PORT_C;
2629} 2622}
2630 2623
2631static enum intel_dpll_id icl_port_to_mg_pll_id(enum port port) 2624enum intel_dpll_id icl_port_to_mg_pll_id(enum port port)
2632{ 2625{
2633 return port - PORT_C + DPLL_ID_ICL_MGPLL1; 2626 return port - PORT_C + DPLL_ID_ICL_MGPLL1;
2634} 2627}
2635 2628
2629bool intel_dpll_is_combophy(enum intel_dpll_id id)
2630{
2631 return id == DPLL_ID_ICL_DPLL0 || id == DPLL_ID_ICL_DPLL1;
2632}
2633
2636static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc, 2634static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
2637 uint32_t *target_dco_khz, 2635 uint32_t *target_dco_khz,
2638 struct intel_dpll_hw_state *state) 2636 struct intel_dpll_hw_state *state)
@@ -2874,8 +2872,8 @@ static struct intel_shared_dpll *
2874icl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state, 2872icl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
2875 struct intel_encoder *encoder) 2873 struct intel_encoder *encoder)
2876{ 2874{
2877 struct intel_digital_port *intel_dig_port = 2875 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2878 enc_to_dig_port(&encoder->base); 2876 struct intel_digital_port *intel_dig_port;
2879 struct intel_shared_dpll *pll; 2877 struct intel_shared_dpll *pll;
2880 struct intel_dpll_hw_state pll_state = {}; 2878 struct intel_dpll_hw_state pll_state = {};
2881 enum port port = encoder->port; 2879 enum port port = encoder->port;
@@ -2883,18 +2881,21 @@ icl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
2883 int clock = crtc_state->port_clock; 2881 int clock = crtc_state->port_clock;
2884 bool ret; 2882 bool ret;
2885 2883
2886 switch (port) { 2884 if (intel_port_is_combophy(dev_priv, port)) {
2887 case PORT_A:
2888 case PORT_B:
2889 min = DPLL_ID_ICL_DPLL0; 2885 min = DPLL_ID_ICL_DPLL0;
2890 max = DPLL_ID_ICL_DPLL1; 2886 max = DPLL_ID_ICL_DPLL1;
2891 ret = icl_calc_dpll_state(crtc_state, encoder, clock, 2887 ret = icl_calc_dpll_state(crtc_state, encoder, clock,
2892 &pll_state); 2888 &pll_state);
2893 break; 2889 } else if (intel_port_is_tc(dev_priv, port)) {
2894 case PORT_C: 2890 if (encoder->type == INTEL_OUTPUT_DP_MST) {
2895 case PORT_D: 2891 struct intel_dp_mst_encoder *mst_encoder;
2896 case PORT_E: 2892
2897 case PORT_F: 2893 mst_encoder = enc_to_mst(&encoder->base);
2894 intel_dig_port = mst_encoder->primary;
2895 } else {
2896 intel_dig_port = enc_to_dig_port(&encoder->base);
2897 }
2898
2898 if (intel_dig_port->tc_type == TC_PORT_TBT) { 2899 if (intel_dig_port->tc_type == TC_PORT_TBT) {
2899 min = DPLL_ID_ICL_TBTPLL; 2900 min = DPLL_ID_ICL_TBTPLL;
2900 max = min; 2901 max = min;
@@ -2906,8 +2907,7 @@ icl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
2906 ret = icl_calc_mg_pll_state(crtc_state, encoder, clock, 2907 ret = icl_calc_mg_pll_state(crtc_state, encoder, clock,
2907 &pll_state); 2908 &pll_state);
2908 } 2909 }
2909 break; 2910 } else {
2910 default:
2911 MISSING_CASE(port); 2911 MISSING_CASE(port);
2912 return NULL; 2912 return NULL;
2913 } 2913 }
@@ -2932,21 +2932,16 @@ icl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
2932 2932
2933static i915_reg_t icl_pll_id_to_enable_reg(enum intel_dpll_id id) 2933static i915_reg_t icl_pll_id_to_enable_reg(enum intel_dpll_id id)
2934{ 2934{
2935 switch (id) { 2935 if (intel_dpll_is_combophy(id))
2936 default:
2937 MISSING_CASE(id);
2938 /* fall through */
2939 case DPLL_ID_ICL_DPLL0:
2940 case DPLL_ID_ICL_DPLL1:
2941 return CNL_DPLL_ENABLE(id); 2936 return CNL_DPLL_ENABLE(id);
2942 case DPLL_ID_ICL_TBTPLL: 2937 else if (id == DPLL_ID_ICL_TBTPLL)
2943 return TBT_PLL_ENABLE; 2938 return TBT_PLL_ENABLE;
2944 case DPLL_ID_ICL_MGPLL1: 2939 else
2945 case DPLL_ID_ICL_MGPLL2: 2940 /*
2946 case DPLL_ID_ICL_MGPLL3: 2941 * TODO: Make MG_PLL macros use
2947 case DPLL_ID_ICL_MGPLL4: 2942 * tc port id instead of port id
2943 */
2948 return MG_PLL_ENABLE(icl_mg_pll_id_to_port(id)); 2944 return MG_PLL_ENABLE(icl_mg_pll_id_to_port(id));
2949 }
2950} 2945}
2951 2946
2952static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv, 2947static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
@@ -2965,17 +2960,11 @@ static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
2965 if (!(val & PLL_ENABLE)) 2960 if (!(val & PLL_ENABLE))
2966 goto out; 2961 goto out;
2967 2962
2968 switch (id) { 2963 if (intel_dpll_is_combophy(id) ||
2969 case DPLL_ID_ICL_DPLL0: 2964 id == DPLL_ID_ICL_TBTPLL) {
2970 case DPLL_ID_ICL_DPLL1:
2971 case DPLL_ID_ICL_TBTPLL:
2972 hw_state->cfgcr0 = I915_READ(ICL_DPLL_CFGCR0(id)); 2965 hw_state->cfgcr0 = I915_READ(ICL_DPLL_CFGCR0(id));
2973 hw_state->cfgcr1 = I915_READ(ICL_DPLL_CFGCR1(id)); 2966 hw_state->cfgcr1 = I915_READ(ICL_DPLL_CFGCR1(id));
2974 break; 2967 } else {
2975 case DPLL_ID_ICL_MGPLL1:
2976 case DPLL_ID_ICL_MGPLL2:
2977 case DPLL_ID_ICL_MGPLL3:
2978 case DPLL_ID_ICL_MGPLL4:
2979 port = icl_mg_pll_id_to_port(id); 2968 port = icl_mg_pll_id_to_port(id);
2980 hw_state->mg_refclkin_ctl = I915_READ(MG_REFCLKIN_CTL(port)); 2969 hw_state->mg_refclkin_ctl = I915_READ(MG_REFCLKIN_CTL(port));
2981 hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK; 2970 hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
@@ -3013,9 +3002,6 @@ static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
3013 3002
3014 hw_state->mg_pll_tdc_coldst_bias &= hw_state->mg_pll_tdc_coldst_bias_mask; 3003 hw_state->mg_pll_tdc_coldst_bias &= hw_state->mg_pll_tdc_coldst_bias_mask;
3015 hw_state->mg_pll_bias &= hw_state->mg_pll_bias_mask; 3004 hw_state->mg_pll_bias &= hw_state->mg_pll_bias_mask;
3016 break;
3017 default:
3018 MISSING_CASE(id);
3019 } 3005 }
3020 3006
3021 ret = true; 3007 ret = true;
@@ -3104,21 +3090,10 @@ static void icl_pll_enable(struct drm_i915_private *dev_priv,
3104 PLL_POWER_STATE, 1)) 3090 PLL_POWER_STATE, 1))
3105 DRM_ERROR("PLL %d Power not enabled\n", id); 3091 DRM_ERROR("PLL %d Power not enabled\n", id);
3106 3092
3107 switch (id) { 3093 if (intel_dpll_is_combophy(id) || id == DPLL_ID_ICL_TBTPLL)
3108 case DPLL_ID_ICL_DPLL0:
3109 case DPLL_ID_ICL_DPLL1:
3110 case DPLL_ID_ICL_TBTPLL:
3111 icl_dpll_write(dev_priv, pll); 3094 icl_dpll_write(dev_priv, pll);
3112 break; 3095 else
3113 case DPLL_ID_ICL_MGPLL1:
3114 case DPLL_ID_ICL_MGPLL2:
3115 case DPLL_ID_ICL_MGPLL3:
3116 case DPLL_ID_ICL_MGPLL4:
3117 icl_mg_pll_write(dev_priv, pll); 3096 icl_mg_pll_write(dev_priv, pll);
3118 break;
3119 default:
3120 MISSING_CASE(id);
3121 }
3122 3097
3123 /* 3098 /*
3124 * DVFS pre sequence would be here, but in our driver the cdclk code 3099 * DVFS pre sequence would be here, but in our driver the cdclk code
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.h b/drivers/gpu/drm/i915/intel_dpll_mgr.h
index bf0de8a4dc63..a033d8f06d4a 100644
--- a/drivers/gpu/drm/i915/intel_dpll_mgr.h
+++ b/drivers/gpu/drm/i915/intel_dpll_mgr.h
@@ -334,9 +334,9 @@ struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc,
334void intel_release_shared_dpll(struct intel_shared_dpll *dpll, 334void intel_release_shared_dpll(struct intel_shared_dpll *dpll,
335 struct intel_crtc *crtc, 335 struct intel_crtc *crtc,
336 struct drm_atomic_state *state); 336 struct drm_atomic_state *state);
337void intel_prepare_shared_dpll(struct intel_crtc *crtc); 337void intel_prepare_shared_dpll(const struct intel_crtc_state *crtc_state);
338void intel_enable_shared_dpll(struct intel_crtc *crtc); 338void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state);
339void intel_disable_shared_dpll(struct intel_crtc *crtc); 339void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state);
340void intel_shared_dpll_swap_state(struct drm_atomic_state *state); 340void intel_shared_dpll_swap_state(struct drm_atomic_state *state);
341void intel_shared_dpll_init(struct drm_device *dev); 341void intel_shared_dpll_init(struct drm_device *dev);
342 342
@@ -345,5 +345,7 @@ void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv,
345int icl_calc_dp_combo_pll_link(struct drm_i915_private *dev_priv, 345int icl_calc_dp_combo_pll_link(struct drm_i915_private *dev_priv,
346 uint32_t pll_id); 346 uint32_t pll_id);
347int cnl_hdmi_pll_ref_clock(struct drm_i915_private *dev_priv); 347int cnl_hdmi_pll_ref_clock(struct drm_i915_private *dev_priv);
348enum intel_dpll_id icl_port_to_mg_pll_id(enum port port);
349bool intel_dpll_is_combophy(enum intel_dpll_id id);
348 350
349#endif /* _INTEL_DPLL_MGR_H_ */ 351#endif /* _INTEL_DPLL_MGR_H_ */
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 8b298e5f012d..f575ba2a59da 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -381,6 +381,15 @@ struct intel_hdcp_shim {
381 bool *hdcp_capable); 381 bool *hdcp_capable);
382}; 382};
383 383
384struct intel_hdcp {
385 const struct intel_hdcp_shim *shim;
386 /* Mutex for hdcp state of the connector */
387 struct mutex mutex;
388 u64 value;
389 struct delayed_work check_work;
390 struct work_struct prop_work;
391};
392
384struct intel_connector { 393struct intel_connector {
385 struct drm_connector base; 394 struct drm_connector base;
386 /* 395 /*
@@ -413,11 +422,7 @@ struct intel_connector {
413 /* Work struct to schedule a uevent on link train failure */ 422 /* Work struct to schedule a uevent on link train failure */
414 struct work_struct modeset_retry_work; 423 struct work_struct modeset_retry_work;
415 424
416 const struct intel_hdcp_shim *hdcp_shim; 425 struct intel_hdcp hdcp;
417 struct mutex hdcp_mutex;
418 uint64_t hdcp_value; /* protected by hdcp_mutex */
419 struct delayed_work hdcp_check_work;
420 struct work_struct hdcp_prop_work;
421}; 426};
422 427
423struct intel_digital_connector_state { 428struct intel_digital_connector_state {
@@ -539,6 +544,26 @@ struct intel_plane_state {
539 */ 544 */
540 int scaler_id; 545 int scaler_id;
541 546
547 /*
548 * linked_plane:
549 *
550 * ICL planar formats require 2 planes that are updated as pairs.
551 * This member is used to make sure the other plane is also updated
552 * when required, and for update_slave() to find the correct
553 * plane_state to pass as argument.
554 */
555 struct intel_plane *linked_plane;
556
557 /*
558 * slave:
559 * If set don't update use the linked plane's state for updating
560 * this plane during atomic commit with the update_slave() callback.
561 *
562 * It's also used by the watermark code to ignore wm calculations on
563 * this plane. They're calculated by the linked plane's wm code.
564 */
565 u32 slave;
566
542 struct drm_intel_sprite_colorkey ckey; 567 struct drm_intel_sprite_colorkey ckey;
543}; 568};
544 569
@@ -712,6 +737,13 @@ struct intel_crtc_wm_state {
712 bool need_postvbl_update; 737 bool need_postvbl_update;
713}; 738};
714 739
740enum intel_output_format {
741 INTEL_OUTPUT_FORMAT_INVALID,
742 INTEL_OUTPUT_FORMAT_RGB,
743 INTEL_OUTPUT_FORMAT_YCBCR420,
744 INTEL_OUTPUT_FORMAT_YCBCR444,
745};
746
715struct intel_crtc_state { 747struct intel_crtc_state {
716 struct drm_crtc_state base; 748 struct drm_crtc_state base;
717 749
@@ -899,8 +931,11 @@ struct intel_crtc_state {
899 /* HDMI High TMDS char rate ratio */ 931 /* HDMI High TMDS char rate ratio */
900 bool hdmi_high_tmds_clock_ratio; 932 bool hdmi_high_tmds_clock_ratio;
901 933
902 /* output format is YCBCR 4:2:0 */ 934 /* Output format RGB/YCBCR etc */
903 bool ycbcr420; 935 enum intel_output_format output_format;
936
937 /* Output down scaling is done in LSPCON device */
938 bool lspcon_downsampling;
904}; 939};
905 940
906struct intel_crtc { 941struct intel_crtc {
@@ -973,6 +1008,9 @@ struct intel_plane {
973 void (*update_plane)(struct intel_plane *plane, 1008 void (*update_plane)(struct intel_plane *plane,
974 const struct intel_crtc_state *crtc_state, 1009 const struct intel_crtc_state *crtc_state,
975 const struct intel_plane_state *plane_state); 1010 const struct intel_plane_state *plane_state);
1011 void (*update_slave)(struct intel_plane *plane,
1012 const struct intel_crtc_state *crtc_state,
1013 const struct intel_plane_state *plane_state);
976 void (*disable_plane)(struct intel_plane *plane, 1014 void (*disable_plane)(struct intel_plane *plane,
977 struct intel_crtc *crtc); 1015 struct intel_crtc *crtc);
978 bool (*get_hw_state)(struct intel_plane *plane, enum pipe *pipe); 1016 bool (*get_hw_state)(struct intel_plane *plane, enum pipe *pipe);
@@ -1070,13 +1108,13 @@ struct intel_dp {
1070 bool link_mst; 1108 bool link_mst;
1071 bool link_trained; 1109 bool link_trained;
1072 bool has_audio; 1110 bool has_audio;
1073 bool detect_done;
1074 bool reset_link_params; 1111 bool reset_link_params;
1075 enum aux_ch aux_ch;
1076 uint8_t dpcd[DP_RECEIVER_CAP_SIZE]; 1112 uint8_t dpcd[DP_RECEIVER_CAP_SIZE];
1077 uint8_t psr_dpcd[EDP_PSR_RECEIVER_CAP_SIZE]; 1113 uint8_t psr_dpcd[EDP_PSR_RECEIVER_CAP_SIZE];
1078 uint8_t downstream_ports[DP_MAX_DOWNSTREAM_PORTS]; 1114 uint8_t downstream_ports[DP_MAX_DOWNSTREAM_PORTS];
1079 uint8_t edp_dpcd[EDP_DISPLAY_CTL_CAP_SIZE]; 1115 uint8_t edp_dpcd[EDP_DISPLAY_CTL_CAP_SIZE];
1116 u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE];
1117 u8 fec_capable;
1080 /* source rates */ 1118 /* source rates */
1081 int num_source_rates; 1119 int num_source_rates;
1082 const int *source_rates; 1120 const int *source_rates;
@@ -1094,7 +1132,6 @@ struct intel_dp {
1094 /* sink or branch descriptor */ 1132 /* sink or branch descriptor */
1095 struct drm_dp_desc desc; 1133 struct drm_dp_desc desc;
1096 struct drm_dp_aux aux; 1134 struct drm_dp_aux aux;
1097 enum intel_display_power_domain aux_power_domain;
1098 uint8_t train_set[4]; 1135 uint8_t train_set[4];
1099 int panel_power_up_delay; 1136 int panel_power_up_delay;
1100 int panel_power_down_delay; 1137 int panel_power_down_delay;
@@ -1156,9 +1193,15 @@ struct intel_dp {
1156 struct intel_dp_compliance compliance; 1193 struct intel_dp_compliance compliance;
1157}; 1194};
1158 1195
1196enum lspcon_vendor {
1197 LSPCON_VENDOR_MCA,
1198 LSPCON_VENDOR_PARADE
1199};
1200
1159struct intel_lspcon { 1201struct intel_lspcon {
1160 bool active; 1202 bool active;
1161 enum drm_lspcon_mode mode; 1203 enum drm_lspcon_mode mode;
1204 enum lspcon_vendor vendor;
1162}; 1205};
1163 1206
1164struct intel_digital_port { 1207struct intel_digital_port {
@@ -1170,18 +1213,20 @@ struct intel_digital_port {
1170 enum irqreturn (*hpd_pulse)(struct intel_digital_port *, bool); 1213 enum irqreturn (*hpd_pulse)(struct intel_digital_port *, bool);
1171 bool release_cl2_override; 1214 bool release_cl2_override;
1172 uint8_t max_lanes; 1215 uint8_t max_lanes;
1216 /* Used for DP and ICL+ TypeC/DP and TypeC/HDMI ports. */
1217 enum aux_ch aux_ch;
1173 enum intel_display_power_domain ddi_io_power_domain; 1218 enum intel_display_power_domain ddi_io_power_domain;
1174 enum tc_port_type tc_type; 1219 enum tc_port_type tc_type;
1175 1220
1176 void (*write_infoframe)(struct drm_encoder *encoder, 1221 void (*write_infoframe)(struct intel_encoder *encoder,
1177 const struct intel_crtc_state *crtc_state, 1222 const struct intel_crtc_state *crtc_state,
1178 unsigned int type, 1223 unsigned int type,
1179 const void *frame, ssize_t len); 1224 const void *frame, ssize_t len);
1180 void (*set_infoframes)(struct drm_encoder *encoder, 1225 void (*set_infoframes)(struct intel_encoder *encoder,
1181 bool enable, 1226 bool enable,
1182 const struct intel_crtc_state *crtc_state, 1227 const struct intel_crtc_state *crtc_state,
1183 const struct drm_connector_state *conn_state); 1228 const struct drm_connector_state *conn_state);
1184 bool (*infoframe_enabled)(struct drm_encoder *encoder, 1229 bool (*infoframe_enabled)(struct intel_encoder *encoder,
1185 const struct intel_crtc_state *pipe_config); 1230 const struct intel_crtc_state *pipe_config);
1186}; 1231};
1187 1232
@@ -1281,6 +1326,12 @@ enc_to_dig_port(struct drm_encoder *encoder)
1281 return NULL; 1326 return NULL;
1282} 1327}
1283 1328
1329static inline struct intel_digital_port *
1330conn_to_dig_port(struct intel_connector *connector)
1331{
1332 return enc_to_dig_port(&intel_attached_encoder(&connector->base)->base);
1333}
1334
1284static inline struct intel_dp_mst_encoder * 1335static inline struct intel_dp_mst_encoder *
1285enc_to_mst(struct drm_encoder *encoder) 1336enc_to_mst(struct drm_encoder *encoder)
1286{ 1337{
@@ -1306,6 +1357,12 @@ static inline bool intel_encoder_is_dp(struct intel_encoder *encoder)
1306 } 1357 }
1307} 1358}
1308 1359
1360static inline struct intel_lspcon *
1361enc_to_intel_lspcon(struct drm_encoder *encoder)
1362{
1363 return &enc_to_dig_port(encoder)->lspcon;
1364}
1365
1309static inline struct intel_digital_port * 1366static inline struct intel_digital_port *
1310dp_to_dig_port(struct intel_dp *intel_dp) 1367dp_to_dig_port(struct intel_dp *intel_dp)
1311{ 1368{
@@ -1331,6 +1388,27 @@ hdmi_to_dig_port(struct intel_hdmi *intel_hdmi)
1331} 1388}
1332 1389
1333static inline struct intel_plane_state * 1390static inline struct intel_plane_state *
1391intel_atomic_get_plane_state(struct intel_atomic_state *state,
1392 struct intel_plane *plane)
1393{
1394 struct drm_plane_state *ret =
1395 drm_atomic_get_plane_state(&state->base, &plane->base);
1396
1397 if (IS_ERR(ret))
1398 return ERR_CAST(ret);
1399
1400 return to_intel_plane_state(ret);
1401}
1402
1403static inline struct intel_plane_state *
1404intel_atomic_get_old_plane_state(struct intel_atomic_state *state,
1405 struct intel_plane *plane)
1406{
1407 return to_intel_plane_state(drm_atomic_get_old_plane_state(&state->base,
1408 &plane->base));
1409}
1410
1411static inline struct intel_plane_state *
1334intel_atomic_get_new_plane_state(struct intel_atomic_state *state, 1412intel_atomic_get_new_plane_state(struct intel_atomic_state *state,
1335 struct intel_plane *plane) 1413 struct intel_plane *plane)
1336{ 1414{
@@ -1444,6 +1522,7 @@ void icl_map_plls_to_ports(struct drm_crtc *crtc,
1444void icl_unmap_plls_to_ports(struct drm_crtc *crtc, 1522void icl_unmap_plls_to_ports(struct drm_crtc *crtc,
1445 struct intel_crtc_state *crtc_state, 1523 struct intel_crtc_state *crtc_state,
1446 struct drm_atomic_state *old_state); 1524 struct drm_atomic_state *old_state);
1525void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder);
1447 1526
1448unsigned int intel_fb_align_height(const struct drm_framebuffer *fb, 1527unsigned int intel_fb_align_height(const struct drm_framebuffer *fb,
1449 int color_plane, unsigned int height); 1528 int color_plane, unsigned int height);
@@ -1488,7 +1567,6 @@ void intel_dump_cdclk_state(const struct intel_cdclk_state *cdclk_state,
1488void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe); 1567void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe);
1489void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe); 1568void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe);
1490enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc); 1569enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc);
1491void intel_update_rawclk(struct drm_i915_private *dev_priv);
1492int vlv_get_hpll_vco(struct drm_i915_private *dev_priv); 1570int vlv_get_hpll_vco(struct drm_i915_private *dev_priv);
1493int vlv_get_cck_clock(struct drm_i915_private *dev_priv, 1571int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
1494 const char *name, u32 reg, int ref_freq); 1572 const char *name, u32 reg, int ref_freq);
@@ -1509,20 +1587,12 @@ void intel_mark_idle(struct drm_i915_private *dev_priv);
1509int intel_display_suspend(struct drm_device *dev); 1587int intel_display_suspend(struct drm_device *dev);
1510void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv); 1588void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv);
1511void intel_encoder_destroy(struct drm_encoder *encoder); 1589void intel_encoder_destroy(struct drm_encoder *encoder);
1512int intel_connector_init(struct intel_connector *);
1513struct intel_connector *intel_connector_alloc(void);
1514void intel_connector_free(struct intel_connector *connector);
1515bool intel_connector_get_hw_state(struct intel_connector *connector);
1516void intel_connector_attach_encoder(struct intel_connector *connector,
1517 struct intel_encoder *encoder);
1518struct drm_display_mode * 1590struct drm_display_mode *
1519intel_encoder_current_mode(struct intel_encoder *encoder); 1591intel_encoder_current_mode(struct intel_encoder *encoder);
1520bool intel_port_is_combophy(struct drm_i915_private *dev_priv, enum port port); 1592bool intel_port_is_combophy(struct drm_i915_private *dev_priv, enum port port);
1521bool intel_port_is_tc(struct drm_i915_private *dev_priv, enum port port); 1593bool intel_port_is_tc(struct drm_i915_private *dev_priv, enum port port);
1522enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, 1594enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv,
1523 enum port port); 1595 enum port port);
1524
1525enum pipe intel_get_pipe_from_connector(struct intel_connector *connector);
1526int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data, 1596int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
1527 struct drm_file *file_priv); 1597 struct drm_file *file_priv);
1528enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv, 1598enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
@@ -1628,9 +1698,11 @@ void bxt_enable_dc9(struct drm_i915_private *dev_priv);
1628void bxt_disable_dc9(struct drm_i915_private *dev_priv); 1698void bxt_disable_dc9(struct drm_i915_private *dev_priv);
1629void gen9_enable_dc5(struct drm_i915_private *dev_priv); 1699void gen9_enable_dc5(struct drm_i915_private *dev_priv);
1630unsigned int skl_cdclk_get_vco(unsigned int freq); 1700unsigned int skl_cdclk_get_vco(unsigned int freq);
1701void skl_enable_dc6(struct drm_i915_private *dev_priv);
1631void intel_dp_get_m_n(struct intel_crtc *crtc, 1702void intel_dp_get_m_n(struct intel_crtc *crtc,
1632 struct intel_crtc_state *pipe_config); 1703 struct intel_crtc_state *pipe_config);
1633void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n); 1704void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state,
1705 enum link_m_n_set m_n);
1634int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n); 1706int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n);
1635bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock, 1707bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock,
1636 struct dpll *best_clock); 1708 struct dpll *best_clock);
@@ -1641,6 +1713,8 @@ bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state);
1641void hsw_enable_ips(const struct intel_crtc_state *crtc_state); 1713void hsw_enable_ips(const struct intel_crtc_state *crtc_state);
1642void hsw_disable_ips(const struct intel_crtc_state *crtc_state); 1714void hsw_disable_ips(const struct intel_crtc_state *crtc_state);
1643enum intel_display_power_domain intel_port_to_power_domain(enum port port); 1715enum intel_display_power_domain intel_port_to_power_domain(enum port port);
1716enum intel_display_power_domain
1717intel_aux_power_domain(struct intel_digital_port *dig_port);
1644void intel_mode_from_pipe_config(struct drm_display_mode *mode, 1718void intel_mode_from_pipe_config(struct drm_display_mode *mode,
1645 struct intel_crtc_state *pipe_config); 1719 struct intel_crtc_state *pipe_config);
1646void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc, 1720void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
@@ -1670,6 +1744,24 @@ unsigned int i9xx_plane_max_stride(struct intel_plane *plane,
1670 u32 pixel_format, u64 modifier, 1744 u32 pixel_format, u64 modifier,
1671 unsigned int rotation); 1745 unsigned int rotation);
1672 1746
1747/* intel_connector.c */
1748int intel_connector_init(struct intel_connector *connector);
1749struct intel_connector *intel_connector_alloc(void);
1750void intel_connector_free(struct intel_connector *connector);
1751void intel_connector_destroy(struct drm_connector *connector);
1752int intel_connector_register(struct drm_connector *connector);
1753void intel_connector_unregister(struct drm_connector *connector);
1754void intel_connector_attach_encoder(struct intel_connector *connector,
1755 struct intel_encoder *encoder);
1756bool intel_connector_get_hw_state(struct intel_connector *connector);
1757enum pipe intel_connector_get_pipe(struct intel_connector *connector);
1758int intel_connector_update_modes(struct drm_connector *connector,
1759 struct edid *edid);
1760int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
1761void intel_attach_force_audio_property(struct drm_connector *connector);
1762void intel_attach_broadcast_rgb_property(struct drm_connector *connector);
1763void intel_attach_aspect_ratio_property(struct drm_connector *connector);
1764
1673/* intel_csr.c */ 1765/* intel_csr.c */
1674void intel_csr_ucode_init(struct drm_i915_private *); 1766void intel_csr_ucode_init(struct drm_i915_private *);
1675void intel_csr_load_program(struct drm_i915_private *); 1767void intel_csr_load_program(struct drm_i915_private *);
@@ -1728,9 +1820,6 @@ void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv,
1728 unsigned int frontbuffer_bits); 1820 unsigned int frontbuffer_bits);
1729void intel_edp_drrs_flush(struct drm_i915_private *dev_priv, 1821void intel_edp_drrs_flush(struct drm_i915_private *dev_priv,
1730 unsigned int frontbuffer_bits); 1822 unsigned int frontbuffer_bits);
1731void icl_program_mg_dp_mode(struct intel_dp *intel_dp);
1732void icl_enable_phy_clock_gating(struct intel_digital_port *dig_port);
1733void icl_disable_phy_clock_gating(struct intel_digital_port *dig_port);
1734 1823
1735void 1824void
1736intel_dp_program_link_training_pattern(struct intel_dp *intel_dp, 1825intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
@@ -1748,6 +1837,10 @@ bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp);
1748bool intel_dp_source_supports_hbr3(struct intel_dp *intel_dp); 1837bool intel_dp_source_supports_hbr3(struct intel_dp *intel_dp);
1749bool 1838bool
1750intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]); 1839intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]);
1840uint16_t intel_dp_dsc_get_output_bpp(int link_clock, uint8_t lane_count,
1841 int mode_clock, int mode_hdisplay);
1842uint8_t intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp, int mode_clock,
1843 int mode_hdisplay);
1751 1844
1752static inline unsigned int intel_dp_unused_lane_mask(int lane_count) 1845static inline unsigned int intel_dp_unused_lane_mask(int lane_count)
1753{ 1846{
@@ -1768,6 +1861,9 @@ void intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port);
1768/* vlv_dsi.c */ 1861/* vlv_dsi.c */
1769void vlv_dsi_init(struct drm_i915_private *dev_priv); 1862void vlv_dsi_init(struct drm_i915_private *dev_priv);
1770 1863
1864/* icl_dsi.c */
1865void icl_dsi_init(struct drm_i915_private *dev_priv);
1866
1771/* intel_dsi_dcs_backlight.c */ 1867/* intel_dsi_dcs_backlight.c */
1772int intel_dsi_dcs_init_backlight_funcs(struct intel_connector *intel_connector); 1868int intel_dsi_dcs_init_backlight_funcs(struct intel_connector *intel_connector);
1773 1869
@@ -1858,7 +1954,6 @@ bool intel_hdmi_handle_sink_scrambling(struct intel_encoder *encoder,
1858void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable); 1954void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable);
1859void intel_infoframe_init(struct intel_digital_port *intel_dig_port); 1955void intel_infoframe_init(struct intel_digital_port *intel_dig_port);
1860 1956
1861
1862/* intel_lvds.c */ 1957/* intel_lvds.c */
1863bool intel_lvds_port_enabled(struct drm_i915_private *dev_priv, 1958bool intel_lvds_port_enabled(struct drm_i915_private *dev_priv,
1864 i915_reg_t lvds_reg, enum pipe *pipe); 1959 i915_reg_t lvds_reg, enum pipe *pipe);
@@ -1866,19 +1961,9 @@ void intel_lvds_init(struct drm_i915_private *dev_priv);
1866struct intel_encoder *intel_get_lvds_encoder(struct drm_device *dev); 1961struct intel_encoder *intel_get_lvds_encoder(struct drm_device *dev);
1867bool intel_is_dual_link_lvds(struct drm_device *dev); 1962bool intel_is_dual_link_lvds(struct drm_device *dev);
1868 1963
1869
1870/* intel_modes.c */
1871int intel_connector_update_modes(struct drm_connector *connector,
1872 struct edid *edid);
1873int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
1874void intel_attach_force_audio_property(struct drm_connector *connector);
1875void intel_attach_broadcast_rgb_property(struct drm_connector *connector);
1876void intel_attach_aspect_ratio_property(struct drm_connector *connector);
1877
1878
1879/* intel_overlay.c */ 1964/* intel_overlay.c */
1880void intel_setup_overlay(struct drm_i915_private *dev_priv); 1965void intel_overlay_setup(struct drm_i915_private *dev_priv);
1881void intel_cleanup_overlay(struct drm_i915_private *dev_priv); 1966void intel_overlay_cleanup(struct drm_i915_private *dev_priv);
1882int intel_overlay_switch_off(struct intel_overlay *overlay); 1967int intel_overlay_switch_off(struct intel_overlay *overlay);
1883int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data, 1968int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
1884 struct drm_file *file_priv); 1969 struct drm_file *file_priv);
@@ -1907,7 +1992,6 @@ int intel_panel_setup_backlight(struct drm_connector *connector,
1907void intel_panel_enable_backlight(const struct intel_crtc_state *crtc_state, 1992void intel_panel_enable_backlight(const struct intel_crtc_state *crtc_state,
1908 const struct drm_connector_state *conn_state); 1993 const struct drm_connector_state *conn_state);
1909void intel_panel_disable_backlight(const struct drm_connector_state *old_conn_state); 1994void intel_panel_disable_backlight(const struct drm_connector_state *old_conn_state);
1910void intel_panel_destroy_backlight(struct drm_connector *connector);
1911extern struct drm_display_mode *intel_find_panel_downclock( 1995extern struct drm_display_mode *intel_find_panel_downclock(
1912 struct drm_i915_private *dev_priv, 1996 struct drm_i915_private *dev_priv,
1913 struct drm_display_mode *fixed_mode, 1997 struct drm_display_mode *fixed_mode,
@@ -1936,6 +2020,7 @@ int intel_hdcp_enable(struct intel_connector *connector);
1936int intel_hdcp_disable(struct intel_connector *connector); 2020int intel_hdcp_disable(struct intel_connector *connector);
1937int intel_hdcp_check_link(struct intel_connector *connector); 2021int intel_hdcp_check_link(struct intel_connector *connector);
1938bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port); 2022bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port);
2023bool intel_hdcp_capable(struct intel_connector *connector);
1939 2024
1940/* intel_psr.c */ 2025/* intel_psr.c */
1941#define CAN_PSR(dev_priv) (HAS_PSR(dev_priv) && dev_priv->psr.sink_support) 2026#define CAN_PSR(dev_priv) (HAS_PSR(dev_priv) && dev_priv->psr.sink_support)
@@ -1962,11 +2047,16 @@ void intel_psr_short_pulse(struct intel_dp *intel_dp);
1962int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state, 2047int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state,
1963 u32 *out_value); 2048 u32 *out_value);
1964 2049
2050/* intel_quirks.c */
2051void intel_init_quirks(struct drm_i915_private *dev_priv);
2052
1965/* intel_runtime_pm.c */ 2053/* intel_runtime_pm.c */
1966int intel_power_domains_init(struct drm_i915_private *); 2054int intel_power_domains_init(struct drm_i915_private *);
1967void intel_power_domains_cleanup(struct drm_i915_private *dev_priv); 2055void intel_power_domains_cleanup(struct drm_i915_private *dev_priv);
1968void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume); 2056void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume);
1969void intel_power_domains_fini_hw(struct drm_i915_private *dev_priv); 2057void intel_power_domains_fini_hw(struct drm_i915_private *dev_priv);
2058void icl_display_core_init(struct drm_i915_private *dev_priv, bool resume);
2059void icl_display_core_uninit(struct drm_i915_private *dev_priv);
1970void intel_power_domains_enable(struct drm_i915_private *dev_priv); 2060void intel_power_domains_enable(struct drm_i915_private *dev_priv);
1971void intel_power_domains_disable(struct drm_i915_private *dev_priv); 2061void intel_power_domains_disable(struct drm_i915_private *dev_priv);
1972 2062
@@ -2101,10 +2191,9 @@ int intel_enable_sagv(struct drm_i915_private *dev_priv);
2101int intel_disable_sagv(struct drm_i915_private *dev_priv); 2191int intel_disable_sagv(struct drm_i915_private *dev_priv);
2102bool skl_wm_level_equals(const struct skl_wm_level *l1, 2192bool skl_wm_level_equals(const struct skl_wm_level *l1,
2103 const struct skl_wm_level *l2); 2193 const struct skl_wm_level *l2);
2104bool skl_ddb_allocation_overlaps(struct drm_i915_private *dev_priv, 2194bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb,
2105 const struct skl_ddb_entry **entries, 2195 const struct skl_ddb_entry entries[],
2106 const struct skl_ddb_entry *ddb, 2196 int num_entries, int ignore_idx);
2107 int ignore);
2108bool ilk_disable_lp_wm(struct drm_device *dev); 2197bool ilk_disable_lp_wm(struct drm_device *dev);
2109int skl_check_pipe_max_pixel_rate(struct intel_crtc *intel_crtc, 2198int skl_check_pipe_max_pixel_rate(struct intel_crtc *intel_crtc,
2110 struct intel_crtc_state *cstate); 2199 struct intel_crtc_state *cstate);
@@ -2127,23 +2216,29 @@ int intel_sprite_set_colorkey_ioctl(struct drm_device *dev, void *data,
2127 struct drm_file *file_priv); 2216 struct drm_file *file_priv);
2128void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state); 2217void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state);
2129void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state); 2218void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state);
2130void skl_update_plane(struct intel_plane *plane,
2131 const struct intel_crtc_state *crtc_state,
2132 const struct intel_plane_state *plane_state);
2133void skl_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc);
2134bool skl_plane_get_hw_state(struct intel_plane *plane, enum pipe *pipe);
2135bool skl_plane_has_ccs(struct drm_i915_private *dev_priv,
2136 enum pipe pipe, enum plane_id plane_id);
2137bool skl_plane_has_planar(struct drm_i915_private *dev_priv,
2138 enum pipe pipe, enum plane_id plane_id);
2139unsigned int skl_plane_max_stride(struct intel_plane *plane,
2140 u32 pixel_format, u64 modifier,
2141 unsigned int rotation);
2142int skl_plane_check(struct intel_crtc_state *crtc_state,
2143 struct intel_plane_state *plane_state);
2144int intel_plane_check_stride(const struct intel_plane_state *plane_state); 2219int intel_plane_check_stride(const struct intel_plane_state *plane_state);
2145int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state); 2220int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state);
2146int chv_plane_check_rotation(const struct intel_plane_state *plane_state); 2221int chv_plane_check_rotation(const struct intel_plane_state *plane_state);
2222struct intel_plane *
2223skl_universal_plane_create(struct drm_i915_private *dev_priv,
2224 enum pipe pipe, enum plane_id plane_id);
2225
2226static inline bool icl_is_nv12_y_plane(enum plane_id id)
2227{
2228 /* Don't need to do a gen check, these planes are only available on gen11 */
2229 if (id == PLANE_SPRITE4 || id == PLANE_SPRITE5)
2230 return true;
2231
2232 return false;
2233}
2234
2235static inline bool icl_is_hdr_plane(struct intel_plane *plane)
2236{
2237 if (INTEL_GEN(to_i915(plane->base.dev)) < 11)
2238 return false;
2239
2240 return plane->id < PLANE_SPRITE2;
2241}
2147 2242
2148/* intel_tv.c */ 2243/* intel_tv.c */
2149void intel_tv_init(struct drm_i915_private *dev_priv); 2244void intel_tv_init(struct drm_i915_private *dev_priv);
@@ -2185,11 +2280,16 @@ int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
2185 struct intel_crtc_state *crtc_state); 2280 struct intel_crtc_state *crtc_state);
2186 2281
2187/* intel_atomic_plane.c */ 2282/* intel_atomic_plane.c */
2188struct intel_plane_state *intel_create_plane_state(struct drm_plane *plane); 2283struct intel_plane *intel_plane_alloc(void);
2284void intel_plane_free(struct intel_plane *plane);
2189struct drm_plane_state *intel_plane_duplicate_state(struct drm_plane *plane); 2285struct drm_plane_state *intel_plane_duplicate_state(struct drm_plane *plane);
2190void intel_plane_destroy_state(struct drm_plane *plane, 2286void intel_plane_destroy_state(struct drm_plane *plane,
2191 struct drm_plane_state *state); 2287 struct drm_plane_state *state);
2192extern const struct drm_plane_helper_funcs intel_plane_helper_funcs; 2288extern const struct drm_plane_helper_funcs intel_plane_helper_funcs;
2289void intel_update_planes_on_crtc(struct intel_atomic_state *old_state,
2290 struct intel_crtc *crtc,
2291 struct intel_crtc_state *old_crtc_state,
2292 struct intel_crtc_state *new_crtc_state);
2193int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_state, 2293int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_state,
2194 struct intel_crtc_state *crtc_state, 2294 struct intel_crtc_state *crtc_state,
2195 const struct intel_plane_state *old_plane_state, 2295 const struct intel_plane_state *old_plane_state,
@@ -2205,6 +2305,18 @@ void intel_color_load_luts(struct drm_crtc_state *crtc_state);
2205bool lspcon_init(struct intel_digital_port *intel_dig_port); 2305bool lspcon_init(struct intel_digital_port *intel_dig_port);
2206void lspcon_resume(struct intel_lspcon *lspcon); 2306void lspcon_resume(struct intel_lspcon *lspcon);
2207void lspcon_wait_pcon_mode(struct intel_lspcon *lspcon); 2307void lspcon_wait_pcon_mode(struct intel_lspcon *lspcon);
2308void lspcon_write_infoframe(struct intel_encoder *encoder,
2309 const struct intel_crtc_state *crtc_state,
2310 unsigned int type,
2311 const void *buf, ssize_t len);
2312void lspcon_set_infoframes(struct intel_encoder *encoder,
2313 bool enable,
2314 const struct intel_crtc_state *crtc_state,
2315 const struct drm_connector_state *conn_state);
2316bool lspcon_infoframe_enabled(struct intel_encoder *encoder,
2317 const struct intel_crtc_state *pipe_config);
2318void lspcon_ycbcr420_config(struct drm_connector *connector,
2319 struct intel_crtc_state *crtc_state);
2208 2320
2209/* intel_pipe_crc.c */ 2321/* intel_pipe_crc.c */
2210#ifdef CONFIG_DEBUG_FS 2322#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
new file mode 100644
index 000000000000..5fec02aceaed
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -0,0 +1,128 @@
1// SPDX-License-Identifier: MIT
2/*
3 * Copyright © 2018 Intel Corporation
4 */
5
6#include <drm/drm_mipi_dsi.h>
7#include "intel_dsi.h"
8
9int intel_dsi_bitrate(const struct intel_dsi *intel_dsi)
10{
11 int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
12
13 if (WARN_ON(bpp < 0))
14 bpp = 16;
15
16 return intel_dsi->pclk * bpp / intel_dsi->lane_count;
17}
18
19int intel_dsi_tlpx_ns(const struct intel_dsi *intel_dsi)
20{
21 switch (intel_dsi->escape_clk_div) {
22 default:
23 case 0:
24 return 50;
25 case 1:
26 return 100;
27 case 2:
28 return 200;
29 }
30}
31
32int intel_dsi_get_modes(struct drm_connector *connector)
33{
34 struct intel_connector *intel_connector = to_intel_connector(connector);
35 struct drm_display_mode *mode;
36
37 DRM_DEBUG_KMS("\n");
38
39 if (!intel_connector->panel.fixed_mode) {
40 DRM_DEBUG_KMS("no fixed mode\n");
41 return 0;
42 }
43
44 mode = drm_mode_duplicate(connector->dev,
45 intel_connector->panel.fixed_mode);
46 if (!mode) {
47 DRM_DEBUG_KMS("drm_mode_duplicate failed\n");
48 return 0;
49 }
50
51 drm_mode_probed_add(connector, mode);
52 return 1;
53}
54
55enum drm_mode_status intel_dsi_mode_valid(struct drm_connector *connector,
56 struct drm_display_mode *mode)
57{
58 struct intel_connector *intel_connector = to_intel_connector(connector);
59 const struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
60 int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
61
62 DRM_DEBUG_KMS("\n");
63
64 if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
65 return MODE_NO_DBLESCAN;
66
67 if (fixed_mode) {
68 if (mode->hdisplay > fixed_mode->hdisplay)
69 return MODE_PANEL;
70 if (mode->vdisplay > fixed_mode->vdisplay)
71 return MODE_PANEL;
72 if (fixed_mode->clock > max_dotclk)
73 return MODE_CLOCK_HIGH;
74 }
75
76 return MODE_OK;
77}
78
79struct intel_dsi_host *intel_dsi_host_init(struct intel_dsi *intel_dsi,
80 const struct mipi_dsi_host_ops *funcs,
81 enum port port)
82{
83 struct intel_dsi_host *host;
84 struct mipi_dsi_device *device;
85
86 host = kzalloc(sizeof(*host), GFP_KERNEL);
87 if (!host)
88 return NULL;
89
90 host->base.ops = funcs;
91 host->intel_dsi = intel_dsi;
92 host->port = port;
93
94 /*
95 * We should call mipi_dsi_host_register(&host->base) here, but we don't
96 * have a host->dev, and we don't have OF stuff either. So just use the
97 * dsi framework as a library and hope for the best. Create the dsi
98 * devices by ourselves here too. Need to be careful though, because we
99 * don't initialize any of the driver model devices here.
100 */
101 device = kzalloc(sizeof(*device), GFP_KERNEL);
102 if (!device) {
103 kfree(host);
104 return NULL;
105 }
106
107 device->host = &host->base;
108 host->device = device;
109
110 return host;
111}
112
113enum drm_panel_orientation
114intel_dsi_get_panel_orientation(struct intel_connector *connector)
115{
116 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
117 enum drm_panel_orientation orientation;
118
119 orientation = dev_priv->vbt.dsi.orientation;
120 if (orientation != DRM_MODE_PANEL_ORIENTATION_UNKNOWN)
121 return orientation;
122
123 orientation = dev_priv->vbt.orientation;
124 if (orientation != DRM_MODE_PANEL_ORIENTATION_UNKNOWN)
125 return orientation;
126
127 return DRM_MODE_PANEL_ORIENTATION_NORMAL;
128}
diff --git a/drivers/gpu/drm/i915/intel_dsi.h b/drivers/gpu/drm/i915/intel_dsi.h
index ad7c1cb32983..ee93137f4433 100644
--- a/drivers/gpu/drm/i915/intel_dsi.h
+++ b/drivers/gpu/drm/i915/intel_dsi.h
@@ -81,14 +81,21 @@ struct intel_dsi {
81 u16 dcs_backlight_ports; 81 u16 dcs_backlight_ports;
82 u16 dcs_cabc_ports; 82 u16 dcs_cabc_ports;
83 83
84 /* RGB or BGR */
85 bool bgr_enabled;
86
84 u8 pixel_overlap; 87 u8 pixel_overlap;
85 u32 port_bits; 88 u32 port_bits;
86 u32 bw_timer; 89 u32 bw_timer;
87 u32 dphy_reg; 90 u32 dphy_reg;
91
92 /* data lanes dphy timing */
93 u32 dphy_data_lane_reg;
88 u32 video_frmt_cfg_bits; 94 u32 video_frmt_cfg_bits;
89 u16 lp_byte_clk; 95 u16 lp_byte_clk;
90 96
91 /* timeouts in byte clocks */ 97 /* timeouts in byte clocks */
98 u16 hs_tx_timeout;
92 u16 lp_rx_timeout; 99 u16 lp_rx_timeout;
93 u16 turn_arnd_val; 100 u16 turn_arnd_val;
94 u16 rst_timer_val; 101 u16 rst_timer_val;
@@ -129,9 +136,31 @@ static inline struct intel_dsi *enc_to_intel_dsi(struct drm_encoder *encoder)
129 return container_of(encoder, struct intel_dsi, base.base); 136 return container_of(encoder, struct intel_dsi, base.base);
130} 137}
131 138
139static inline bool is_vid_mode(struct intel_dsi *intel_dsi)
140{
141 return intel_dsi->operation_mode == INTEL_DSI_VIDEO_MODE;
142}
143
144static inline bool is_cmd_mode(struct intel_dsi *intel_dsi)
145{
146 return intel_dsi->operation_mode == INTEL_DSI_COMMAND_MODE;
147}
148
149/* intel_dsi.c */
150int intel_dsi_bitrate(const struct intel_dsi *intel_dsi);
151int intel_dsi_tlpx_ns(const struct intel_dsi *intel_dsi);
152enum drm_panel_orientation
153intel_dsi_get_panel_orientation(struct intel_connector *connector);
154
132/* vlv_dsi.c */ 155/* vlv_dsi.c */
133void vlv_dsi_wait_for_fifo_empty(struct intel_dsi *intel_dsi, enum port port); 156void vlv_dsi_wait_for_fifo_empty(struct intel_dsi *intel_dsi, enum port port);
134enum mipi_dsi_pixel_format pixel_format_from_register_bits(u32 fmt); 157enum mipi_dsi_pixel_format pixel_format_from_register_bits(u32 fmt);
158int intel_dsi_get_modes(struct drm_connector *connector);
159enum drm_mode_status intel_dsi_mode_valid(struct drm_connector *connector,
160 struct drm_display_mode *mode);
161struct intel_dsi_host *intel_dsi_host_init(struct intel_dsi *intel_dsi,
162 const struct mipi_dsi_host_ops *funcs,
163 enum port port);
135 164
136/* vlv_dsi_pll.c */ 165/* vlv_dsi_pll.c */
137int vlv_dsi_pll_compute(struct intel_encoder *encoder, 166int vlv_dsi_pll_compute(struct intel_encoder *encoder,
@@ -158,5 +187,6 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id);
158int intel_dsi_vbt_get_modes(struct intel_dsi *intel_dsi); 187int intel_dsi_vbt_get_modes(struct intel_dsi *intel_dsi);
159void intel_dsi_vbt_exec_sequence(struct intel_dsi *intel_dsi, 188void intel_dsi_vbt_exec_sequence(struct intel_dsi *intel_dsi,
160 enum mipi_seq seq_id); 189 enum mipi_seq seq_id);
190void intel_dsi_msleep(struct intel_dsi *intel_dsi, int msec);
161 191
162#endif /* _INTEL_DSI_H */ 192#endif /* _INTEL_DSI_H */
diff --git a/drivers/gpu/drm/i915/intel_dsi_vbt.c b/drivers/gpu/drm/i915/intel_dsi_vbt.c
index ac83d6b89ae0..a72de81f4832 100644
--- a/drivers/gpu/drm/i915/intel_dsi_vbt.c
+++ b/drivers/gpu/drm/i915/intel_dsi_vbt.c
@@ -111,6 +111,7 @@ static inline enum port intel_dsi_seq_port_to_port(u8 port)
111static const u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi, 111static const u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi,
112 const u8 *data) 112 const u8 *data)
113{ 113{
114 struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev);
114 struct mipi_dsi_device *dsi_device; 115 struct mipi_dsi_device *dsi_device;
115 u8 type, flags, seq_port; 116 u8 type, flags, seq_port;
116 u16 len; 117 u16 len;
@@ -181,7 +182,8 @@ static const u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi,
181 break; 182 break;
182 } 183 }
183 184
184 vlv_dsi_wait_for_fifo_empty(intel_dsi, port); 185 if (!IS_ICELAKE(dev_priv))
186 vlv_dsi_wait_for_fifo_empty(intel_dsi, port);
185 187
186out: 188out:
187 data += len; 189 data += len;
@@ -481,6 +483,17 @@ void intel_dsi_vbt_exec_sequence(struct intel_dsi *intel_dsi,
481 } 483 }
482} 484}
483 485
486void intel_dsi_msleep(struct intel_dsi *intel_dsi, int msec)
487{
488 struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev);
489
490 /* For v3 VBTs in vid-mode the delays are part of the VBT sequences */
491 if (is_vid_mode(intel_dsi) && dev_priv->vbt.dsi.seq_version >= 3)
492 return;
493
494 msleep(msec);
495}
496
484int intel_dsi_vbt_get_modes(struct intel_dsi *intel_dsi) 497int intel_dsi_vbt_get_modes(struct intel_dsi *intel_dsi)
485{ 498{
486 struct intel_connector *connector = intel_dsi->attached_connector; 499 struct intel_connector *connector = intel_dsi->attached_connector;
@@ -499,110 +512,125 @@ int intel_dsi_vbt_get_modes(struct intel_dsi *intel_dsi)
499 return 1; 512 return 1;
500} 513}
501 514
502bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id) 515#define ICL_PREPARE_CNT_MAX 0x7
516#define ICL_CLK_ZERO_CNT_MAX 0xf
517#define ICL_TRAIL_CNT_MAX 0x7
518#define ICL_TCLK_PRE_CNT_MAX 0x3
519#define ICL_TCLK_POST_CNT_MAX 0x7
520#define ICL_HS_ZERO_CNT_MAX 0xf
521#define ICL_EXIT_ZERO_CNT_MAX 0x7
522
523static void icl_dphy_param_init(struct intel_dsi *intel_dsi)
503{ 524{
504 struct drm_device *dev = intel_dsi->base.base.dev; 525 struct drm_device *dev = intel_dsi->base.base.dev;
505 struct drm_i915_private *dev_priv = to_i915(dev); 526 struct drm_i915_private *dev_priv = to_i915(dev);
506 struct mipi_config *mipi_config = dev_priv->vbt.dsi.config; 527 struct mipi_config *mipi_config = dev_priv->vbt.dsi.config;
507 struct mipi_pps_data *pps = dev_priv->vbt.dsi.pps; 528 u32 tlpx_ns;
508 struct drm_display_mode *mode = dev_priv->vbt.lfp_lvds_vbt_mode;
509 u32 bpp;
510 u32 tlpx_ns, extra_byte_count, bitrate, tlpx_ui;
511 u32 ui_num, ui_den;
512 u32 prepare_cnt, exit_zero_cnt, clk_zero_cnt, trail_cnt; 529 u32 prepare_cnt, exit_zero_cnt, clk_zero_cnt, trail_cnt;
513 u32 ths_prepare_ns, tclk_trail_ns; 530 u32 ths_prepare_ns, tclk_trail_ns;
514 u32 tclk_prepare_clkzero, ths_prepare_hszero; 531 u32 hs_zero_cnt;
515 u32 lp_to_hs_switch, hs_to_lp_switch; 532 u32 tclk_pre_cnt, tclk_post_cnt;
516 u32 pclk, computed_ddr;
517 u32 mul;
518 u16 burst_mode_ratio;
519 enum port port;
520 533
521 DRM_DEBUG_KMS("\n"); 534 tlpx_ns = intel_dsi_tlpx_ns(intel_dsi);
522 535
523 intel_dsi->eotp_pkt = mipi_config->eot_pkt_disabled ? 0 : 1; 536 tclk_trail_ns = max(mipi_config->tclk_trail, mipi_config->ths_trail);
524 intel_dsi->clock_stop = mipi_config->enable_clk_stop ? 1 : 0; 537 ths_prepare_ns = max(mipi_config->ths_prepare,
525 intel_dsi->lane_count = mipi_config->lane_cnt + 1; 538 mipi_config->tclk_prepare);
526 intel_dsi->pixel_format =
527 pixel_format_from_register_bits(
528 mipi_config->videomode_color_format << 7);
529 bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
530
531 intel_dsi->dual_link = mipi_config->dual_link;
532 intel_dsi->pixel_overlap = mipi_config->pixel_overlap;
533 intel_dsi->operation_mode = mipi_config->is_cmd_mode;
534 intel_dsi->video_mode_format = mipi_config->video_transfer_mode;
535 intel_dsi->escape_clk_div = mipi_config->byte_clk_sel;
536 intel_dsi->lp_rx_timeout = mipi_config->lp_rx_timeout;
537 intel_dsi->turn_arnd_val = mipi_config->turn_around_timeout;
538 intel_dsi->rst_timer_val = mipi_config->device_reset_timer;
539 intel_dsi->init_count = mipi_config->master_init_timer;
540 intel_dsi->bw_timer = mipi_config->dbi_bw_timer;
541 intel_dsi->video_frmt_cfg_bits =
542 mipi_config->bta_enabled ? DISABLE_VIDEO_BTA : 0;
543
544 pclk = mode->clock;
545 539
546 /* In dual link mode each port needs half of pixel clock */ 540 /*
547 if (intel_dsi->dual_link) { 541 * prepare cnt in escape clocks
548 pclk = pclk / 2; 542 * this field represents a hexadecimal value with a precision
543 * of 1.2 – i.e. the most significant bit is the integer
544 * and the least significant 2 bits are fraction bits.
545 * so, the field can represent a range of 0.25 to 1.75
546 */
547 prepare_cnt = DIV_ROUND_UP(ths_prepare_ns * 4, tlpx_ns);
548 if (prepare_cnt > ICL_PREPARE_CNT_MAX) {
549 DRM_DEBUG_KMS("prepare_cnt out of range (%d)\n", prepare_cnt);
550 prepare_cnt = ICL_PREPARE_CNT_MAX;
551 }
549 552
550 /* we can enable pixel_overlap if needed by panel. In this 553 /* clk zero count in escape clocks */
551 * case we need to increase the pixelclock for extra pixels 554 clk_zero_cnt = DIV_ROUND_UP(mipi_config->tclk_prepare_clkzero -
552 */ 555 ths_prepare_ns, tlpx_ns);
553 if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK) { 556 if (clk_zero_cnt > ICL_CLK_ZERO_CNT_MAX) {
554 pclk += DIV_ROUND_UP(mode->vtotal * 557 DRM_DEBUG_KMS("clk_zero_cnt out of range (%d)\n", clk_zero_cnt);
555 intel_dsi->pixel_overlap * 558 clk_zero_cnt = ICL_CLK_ZERO_CNT_MAX;
556 60, 1000);
557 }
558 } 559 }
559 560
560 /* Burst Mode Ratio 561 /* trail cnt in escape clocks*/
561 * Target ddr frequency from VBT / non burst ddr freq 562 trail_cnt = DIV_ROUND_UP(tclk_trail_ns, tlpx_ns);
562 * multiply by 100 to preserve remainder 563 if (trail_cnt > ICL_TRAIL_CNT_MAX) {
563 */ 564 DRM_DEBUG_KMS("trail_cnt out of range (%d)\n", trail_cnt);
564 if (intel_dsi->video_mode_format == VIDEO_MODE_BURST) { 565 trail_cnt = ICL_TRAIL_CNT_MAX;
565 if (mipi_config->target_burst_mode_freq) { 566 }
566 computed_ddr = (pclk * bpp) / intel_dsi->lane_count;
567 567
568 if (mipi_config->target_burst_mode_freq < 568 /* tclk pre count in escape clocks */
569 computed_ddr) { 569 tclk_pre_cnt = DIV_ROUND_UP(mipi_config->tclk_pre, tlpx_ns);
570 DRM_ERROR("Burst mode freq is less than computed\n"); 570 if (tclk_pre_cnt > ICL_TCLK_PRE_CNT_MAX) {
571 return false; 571 DRM_DEBUG_KMS("tclk_pre_cnt out of range (%d)\n", tclk_pre_cnt);
572 } 572 tclk_pre_cnt = ICL_TCLK_PRE_CNT_MAX;
573 }
573 574
574 burst_mode_ratio = DIV_ROUND_UP( 575 /* tclk post count in escape clocks */
575 mipi_config->target_burst_mode_freq * 100, 576 tclk_post_cnt = DIV_ROUND_UP(mipi_config->tclk_post, tlpx_ns);
576 computed_ddr); 577 if (tclk_post_cnt > ICL_TCLK_POST_CNT_MAX) {
578 DRM_DEBUG_KMS("tclk_post_cnt out of range (%d)\n", tclk_post_cnt);
579 tclk_post_cnt = ICL_TCLK_POST_CNT_MAX;
580 }
577 581
578 pclk = DIV_ROUND_UP(pclk * burst_mode_ratio, 100); 582 /* hs zero cnt in escape clocks */
579 } else { 583 hs_zero_cnt = DIV_ROUND_UP(mipi_config->ths_prepare_hszero -
580 DRM_ERROR("Burst mode target is not set\n"); 584 ths_prepare_ns, tlpx_ns);
581 return false; 585 if (hs_zero_cnt > ICL_HS_ZERO_CNT_MAX) {
582 } 586 DRM_DEBUG_KMS("hs_zero_cnt out of range (%d)\n", hs_zero_cnt);
583 } else 587 hs_zero_cnt = ICL_HS_ZERO_CNT_MAX;
584 burst_mode_ratio = 100; 588 }
585 589
586 intel_dsi->burst_mode_ratio = burst_mode_ratio; 590 /* hs exit zero cnt in escape clocks */
587 intel_dsi->pclk = pclk; 591 exit_zero_cnt = DIV_ROUND_UP(mipi_config->ths_exit, tlpx_ns);
592 if (exit_zero_cnt > ICL_EXIT_ZERO_CNT_MAX) {
593 DRM_DEBUG_KMS("exit_zero_cnt out of range (%d)\n", exit_zero_cnt);
594 exit_zero_cnt = ICL_EXIT_ZERO_CNT_MAX;
595 }
588 596
589 bitrate = (pclk * bpp) / intel_dsi->lane_count; 597 /* clock lane dphy timings */
598 intel_dsi->dphy_reg = (CLK_PREPARE_OVERRIDE |
599 CLK_PREPARE(prepare_cnt) |
600 CLK_ZERO_OVERRIDE |
601 CLK_ZERO(clk_zero_cnt) |
602 CLK_PRE_OVERRIDE |
603 CLK_PRE(tclk_pre_cnt) |
604 CLK_POST_OVERRIDE |
605 CLK_POST(tclk_post_cnt) |
606 CLK_TRAIL_OVERRIDE |
607 CLK_TRAIL(trail_cnt));
608
609 /* data lanes dphy timings */
610 intel_dsi->dphy_data_lane_reg = (HS_PREPARE_OVERRIDE |
611 HS_PREPARE(prepare_cnt) |
612 HS_ZERO_OVERRIDE |
613 HS_ZERO(hs_zero_cnt) |
614 HS_TRAIL_OVERRIDE |
615 HS_TRAIL(trail_cnt) |
616 HS_EXIT_OVERRIDE |
617 HS_EXIT(exit_zero_cnt));
618}
590 619
591 switch (intel_dsi->escape_clk_div) { 620static void vlv_dphy_param_init(struct intel_dsi *intel_dsi)
592 case 0: 621{
593 tlpx_ns = 50; 622 struct drm_device *dev = intel_dsi->base.base.dev;
594 break; 623 struct drm_i915_private *dev_priv = to_i915(dev);
595 case 1: 624 struct mipi_config *mipi_config = dev_priv->vbt.dsi.config;
596 tlpx_ns = 100; 625 u32 tlpx_ns, extra_byte_count, tlpx_ui;
597 break; 626 u32 ui_num, ui_den;
627 u32 prepare_cnt, exit_zero_cnt, clk_zero_cnt, trail_cnt;
628 u32 ths_prepare_ns, tclk_trail_ns;
629 u32 tclk_prepare_clkzero, ths_prepare_hszero;
630 u32 lp_to_hs_switch, hs_to_lp_switch;
631 u32 mul;
598 632
599 case 2: 633 tlpx_ns = intel_dsi_tlpx_ns(intel_dsi);
600 tlpx_ns = 200;
601 break;
602 default:
603 tlpx_ns = 50;
604 break;
605 }
606 634
607 switch (intel_dsi->lane_count) { 635 switch (intel_dsi->lane_count) {
608 case 1: 636 case 1:
@@ -620,7 +648,7 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id)
620 648
621 /* in Kbps */ 649 /* in Kbps */
622 ui_num = NS_KHZ_RATIO; 650 ui_num = NS_KHZ_RATIO;
623 ui_den = bitrate; 651 ui_den = intel_dsi_bitrate(intel_dsi);
624 652
625 tclk_prepare_clkzero = mipi_config->tclk_prepare_clkzero; 653 tclk_prepare_clkzero = mipi_config->tclk_prepare_clkzero;
626 ths_prepare_hszero = mipi_config->ths_prepare_hszero; 654 ths_prepare_hszero = mipi_config->ths_prepare_hszero;
@@ -746,6 +774,88 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id)
746 DIV_ROUND_UP(2 * tlpx_ui + trail_cnt * 2 + 8, 774 DIV_ROUND_UP(2 * tlpx_ui + trail_cnt * 2 + 8,
747 8); 775 8);
748 intel_dsi->clk_hs_to_lp_count += extra_byte_count; 776 intel_dsi->clk_hs_to_lp_count += extra_byte_count;
777}
778
779bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id)
780{
781 struct drm_device *dev = intel_dsi->base.base.dev;
782 struct drm_i915_private *dev_priv = to_i915(dev);
783 struct mipi_config *mipi_config = dev_priv->vbt.dsi.config;
784 struct mipi_pps_data *pps = dev_priv->vbt.dsi.pps;
785 struct drm_display_mode *mode = dev_priv->vbt.lfp_lvds_vbt_mode;
786 u16 burst_mode_ratio;
787 enum port port;
788
789 DRM_DEBUG_KMS("\n");
790
791 intel_dsi->eotp_pkt = mipi_config->eot_pkt_disabled ? 0 : 1;
792 intel_dsi->clock_stop = mipi_config->enable_clk_stop ? 1 : 0;
793 intel_dsi->lane_count = mipi_config->lane_cnt + 1;
794 intel_dsi->pixel_format =
795 pixel_format_from_register_bits(
796 mipi_config->videomode_color_format << 7);
797
798 intel_dsi->dual_link = mipi_config->dual_link;
799 intel_dsi->pixel_overlap = mipi_config->pixel_overlap;
800 intel_dsi->operation_mode = mipi_config->is_cmd_mode;
801 intel_dsi->video_mode_format = mipi_config->video_transfer_mode;
802 intel_dsi->escape_clk_div = mipi_config->byte_clk_sel;
803 intel_dsi->lp_rx_timeout = mipi_config->lp_rx_timeout;
804 intel_dsi->hs_tx_timeout = mipi_config->hs_tx_timeout;
805 intel_dsi->turn_arnd_val = mipi_config->turn_around_timeout;
806 intel_dsi->rst_timer_val = mipi_config->device_reset_timer;
807 intel_dsi->init_count = mipi_config->master_init_timer;
808 intel_dsi->bw_timer = mipi_config->dbi_bw_timer;
809 intel_dsi->video_frmt_cfg_bits =
810 mipi_config->bta_enabled ? DISABLE_VIDEO_BTA : 0;
811 intel_dsi->bgr_enabled = mipi_config->rgb_flip;
812
813 /* Starting point, adjusted depending on dual link and burst mode */
814 intel_dsi->pclk = mode->clock;
815
816 /* In dual link mode each port needs half of pixel clock */
817 if (intel_dsi->dual_link) {
818 intel_dsi->pclk /= 2;
819
820 /* we can enable pixel_overlap if needed by panel. In this
821 * case we need to increase the pixelclock for extra pixels
822 */
823 if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK) {
824 intel_dsi->pclk += DIV_ROUND_UP(mode->vtotal * intel_dsi->pixel_overlap * 60, 1000);
825 }
826 }
827
828 /* Burst Mode Ratio
829 * Target ddr frequency from VBT / non burst ddr freq
830 * multiply by 100 to preserve remainder
831 */
832 if (intel_dsi->video_mode_format == VIDEO_MODE_BURST) {
833 if (mipi_config->target_burst_mode_freq) {
834 u32 bitrate = intel_dsi_bitrate(intel_dsi);
835
836 if (mipi_config->target_burst_mode_freq < bitrate) {
837 DRM_ERROR("Burst mode freq is less than computed\n");
838 return false;
839 }
840
841 burst_mode_ratio = DIV_ROUND_UP(
842 mipi_config->target_burst_mode_freq * 100,
843 bitrate);
844
845 intel_dsi->pclk = DIV_ROUND_UP(intel_dsi->pclk * burst_mode_ratio, 100);
846 } else {
847 DRM_ERROR("Burst mode target is not set\n");
848 return false;
849 }
850 } else
851 burst_mode_ratio = 100;
852
853 intel_dsi->burst_mode_ratio = burst_mode_ratio;
854
855 if (IS_ICELAKE(dev_priv))
856 icl_dphy_param_init(intel_dsi);
857 else
858 vlv_dphy_param_init(intel_dsi);
749 859
750 DRM_DEBUG_KMS("Pclk %d\n", intel_dsi->pclk); 860 DRM_DEBUG_KMS("Pclk %d\n", intel_dsi->pclk);
751 DRM_DEBUG_KMS("Pixel overlap %d\n", intel_dsi->pixel_overlap); 861 DRM_DEBUG_KMS("Pixel overlap %d\n", intel_dsi->pixel_overlap);
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index 4e142ff49708..0042a7f69387 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -256,6 +256,7 @@ static bool intel_dvo_compute_config(struct intel_encoder *encoder,
256 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) 256 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
257 return false; 257 return false;
258 258
259 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
259 return true; 260 return true;
260} 261}
261 262
@@ -333,18 +334,11 @@ static int intel_dvo_get_modes(struct drm_connector *connector)
333 return 0; 334 return 0;
334} 335}
335 336
336static void intel_dvo_destroy(struct drm_connector *connector)
337{
338 drm_connector_cleanup(connector);
339 intel_panel_fini(&to_intel_connector(connector)->panel);
340 kfree(connector);
341}
342
343static const struct drm_connector_funcs intel_dvo_connector_funcs = { 337static const struct drm_connector_funcs intel_dvo_connector_funcs = {
344 .detect = intel_dvo_detect, 338 .detect = intel_dvo_detect,
345 .late_register = intel_connector_register, 339 .late_register = intel_connector_register,
346 .early_unregister = intel_connector_unregister, 340 .early_unregister = intel_connector_unregister,
347 .destroy = intel_dvo_destroy, 341 .destroy = intel_connector_destroy,
348 .fill_modes = drm_helper_probe_single_connector_modes, 342 .fill_modes = drm_helper_probe_single_connector_modes,
349 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 343 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
350 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, 344 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index 217ed3ee1cab..885a901b6e13 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -273,13 +273,13 @@ intel_engine_setup(struct drm_i915_private *dev_priv,
273 BUILD_BUG_ON(MAX_ENGINE_CLASS >= BIT(GEN11_ENGINE_CLASS_WIDTH)); 273 BUILD_BUG_ON(MAX_ENGINE_CLASS >= BIT(GEN11_ENGINE_CLASS_WIDTH));
274 BUILD_BUG_ON(MAX_ENGINE_INSTANCE >= BIT(GEN11_ENGINE_INSTANCE_WIDTH)); 274 BUILD_BUG_ON(MAX_ENGINE_INSTANCE >= BIT(GEN11_ENGINE_INSTANCE_WIDTH));
275 275
276 if (GEM_WARN_ON(info->class > MAX_ENGINE_CLASS)) 276 if (GEM_DEBUG_WARN_ON(info->class > MAX_ENGINE_CLASS))
277 return -EINVAL; 277 return -EINVAL;
278 278
279 if (GEM_WARN_ON(info->instance > MAX_ENGINE_INSTANCE)) 279 if (GEM_DEBUG_WARN_ON(info->instance > MAX_ENGINE_INSTANCE))
280 return -EINVAL; 280 return -EINVAL;
281 281
282 if (GEM_WARN_ON(dev_priv->engine_class[info->class][info->instance])) 282 if (GEM_DEBUG_WARN_ON(dev_priv->engine_class[info->class][info->instance]))
283 return -EINVAL; 283 return -EINVAL;
284 284
285 GEM_BUG_ON(dev_priv->engine[id]); 285 GEM_BUG_ON(dev_priv->engine[id]);
@@ -335,7 +335,10 @@ int intel_engines_init_mmio(struct drm_i915_private *dev_priv)
335 335
336 WARN_ON(ring_mask == 0); 336 WARN_ON(ring_mask == 0);
337 WARN_ON(ring_mask & 337 WARN_ON(ring_mask &
338 GENMASK(sizeof(mask) * BITS_PER_BYTE - 1, I915_NUM_ENGINES)); 338 GENMASK(BITS_PER_TYPE(mask) - 1, I915_NUM_ENGINES));
339
340 if (i915_inject_load_failure())
341 return -ENODEV;
339 342
340 for (i = 0; i < ARRAY_SIZE(intel_engines); i++) { 343 for (i = 0; i < ARRAY_SIZE(intel_engines); i++) {
341 if (!HAS_ENGINE(dev_priv, i)) 344 if (!HAS_ENGINE(dev_priv, i))
@@ -399,7 +402,7 @@ int intel_engines_init(struct drm_i915_private *dev_priv)
399 err = -EINVAL; 402 err = -EINVAL;
400 err_id = id; 403 err_id = id;
401 404
402 if (GEM_WARN_ON(!init)) 405 if (GEM_DEBUG_WARN_ON(!init))
403 goto cleanup; 406 goto cleanup;
404 407
405 err = init(engine); 408 err = init(engine);
@@ -463,7 +466,7 @@ static void intel_engine_init_execlist(struct intel_engine_cs *engine)
463 struct intel_engine_execlists * const execlists = &engine->execlists; 466 struct intel_engine_execlists * const execlists = &engine->execlists;
464 467
465 execlists->port_mask = 1; 468 execlists->port_mask = 1;
466 BUILD_BUG_ON_NOT_POWER_OF_2(execlists_num_ports(execlists)); 469 GEM_BUG_ON(!is_power_of_2(execlists_num_ports(execlists)));
467 GEM_BUG_ON(execlists_num_ports(execlists) > EXECLIST_MAX_PORTS); 470 GEM_BUG_ON(execlists_num_ports(execlists) > EXECLIST_MAX_PORTS);
468 471
469 execlists->queue_priority = INT_MIN; 472 execlists->queue_priority = INT_MIN;
@@ -482,7 +485,7 @@ static void intel_engine_init_execlist(struct intel_engine_cs *engine)
482void intel_engine_setup_common(struct intel_engine_cs *engine) 485void intel_engine_setup_common(struct intel_engine_cs *engine)
483{ 486{
484 i915_timeline_init(engine->i915, &engine->timeline, engine->name); 487 i915_timeline_init(engine->i915, &engine->timeline, engine->name);
485 lockdep_set_subclass(&engine->timeline.lock, TIMELINE_ENGINE); 488 i915_timeline_set_subclass(&engine->timeline, TIMELINE_ENGINE);
486 489
487 intel_engine_init_execlist(engine); 490 intel_engine_init_execlist(engine);
488 intel_engine_init_hangcheck(engine); 491 intel_engine_init_hangcheck(engine);
@@ -809,7 +812,7 @@ u32 intel_calculate_mcr_s_ss_select(struct drm_i915_private *dev_priv)
809 u32 slice = fls(sseu->slice_mask); 812 u32 slice = fls(sseu->slice_mask);
810 u32 subslice = fls(sseu->subslice_mask[slice]); 813 u32 subslice = fls(sseu->subslice_mask[slice]);
811 814
812 if (INTEL_GEN(dev_priv) == 10) 815 if (IS_GEN10(dev_priv))
813 mcr_s_ss_select = GEN8_MCR_SLICE(slice) | 816 mcr_s_ss_select = GEN8_MCR_SLICE(slice) |
814 GEN8_MCR_SUBSLICE(subslice); 817 GEN8_MCR_SUBSLICE(subslice);
815 else if (INTEL_GEN(dev_priv) >= 11) 818 else if (INTEL_GEN(dev_priv) >= 11)
@@ -1534,10 +1537,10 @@ void intel_engine_dump(struct intel_engine_cs *engine,
1534 count = 0; 1537 count = 0;
1535 drm_printf(m, "\t\tQueue priority: %d\n", execlists->queue_priority); 1538 drm_printf(m, "\t\tQueue priority: %d\n", execlists->queue_priority);
1536 for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) { 1539 for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) {
1537 struct i915_priolist *p = 1540 struct i915_priolist *p = rb_entry(rb, typeof(*p), node);
1538 rb_entry(rb, typeof(*p), node); 1541 int i;
1539 1542
1540 list_for_each_entry(rq, &p->requests, sched.link) { 1543 priolist_for_each_request(rq, p, i) {
1541 if (count++ < MAX_REQUESTS_TO_SHOW - 1) 1544 if (count++ < MAX_REQUESTS_TO_SHOW - 1)
1542 print_request(m, rq, "\t\tQ "); 1545 print_request(m, rq, "\t\tQ ");
1543 else 1546 else
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
index 74d425c700ef..14cbaf4a0e93 100644
--- a/drivers/gpu/drm/i915/intel_fbc.c
+++ b/drivers/gpu/drm/i915/intel_fbc.c
@@ -84,7 +84,7 @@ static int intel_fbc_calculate_cfb_size(struct drm_i915_private *dev_priv,
84 int lines; 84 int lines;
85 85
86 intel_fbc_get_plane_source_size(cache, NULL, &lines); 86 intel_fbc_get_plane_source_size(cache, NULL, &lines);
87 if (INTEL_GEN(dev_priv) == 7) 87 if (IS_GEN7(dev_priv))
88 lines = min(lines, 2048); 88 lines = min(lines, 2048);
89 else if (INTEL_GEN(dev_priv) >= 8) 89 else if (INTEL_GEN(dev_priv) >= 8)
90 lines = min(lines, 2560); 90 lines = min(lines, 2560);
@@ -674,6 +674,8 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
674 cache->plane.adjusted_y = plane_state->color_plane[0].y; 674 cache->plane.adjusted_y = plane_state->color_plane[0].y;
675 cache->plane.y = plane_state->base.src.y1 >> 16; 675 cache->plane.y = plane_state->base.src.y1 >> 16;
676 676
677 cache->plane.pixel_blend_mode = plane_state->base.pixel_blend_mode;
678
677 if (!cache->plane.visible) 679 if (!cache->plane.visible)
678 return; 680 return;
679 681
@@ -748,6 +750,12 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
748 return false; 750 return false;
749 } 751 }
750 752
753 if (cache->plane.pixel_blend_mode != DRM_MODE_BLEND_PIXEL_NONE &&
754 cache->fb.format->has_alpha) {
755 fbc->no_fbc_reason = "per-pixel alpha blending is incompatible with FBC";
756 return false;
757 }
758
751 /* WaFbcExceedCdClockThreshold:hsw,bdw */ 759 /* WaFbcExceedCdClockThreshold:hsw,bdw */
752 if ((IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) && 760 if ((IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) &&
753 cache->crtc.hsw_bdw_pixel_rate >= dev_priv->cdclk.hw.cdclk * 95 / 100) { 761 cache->crtc.hsw_bdw_pixel_rate >= dev_priv->cdclk.hw.cdclk * 95 / 100) {
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index f99332972b7a..2480c7d6edee 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -593,7 +593,7 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
593 * pipe. Note we need to use the selected fb's pitch and bpp 593 * pipe. Note we need to use the selected fb's pitch and bpp
594 * rather than the current pipe's, since they differ. 594 * rather than the current pipe's, since they differ.
595 */ 595 */
596 cur_size = intel_crtc->config->base.adjusted_mode.crtc_hdisplay; 596 cur_size = crtc->state->adjusted_mode.crtc_hdisplay;
597 cur_size = cur_size * fb->base.format->cpp[0]; 597 cur_size = cur_size * fb->base.format->cpp[0];
598 if (fb->base.pitches[0] < cur_size) { 598 if (fb->base.pitches[0] < cur_size) {
599 DRM_DEBUG_KMS("fb not wide enough for plane %c (%d vs %d)\n", 599 DRM_DEBUG_KMS("fb not wide enough for plane %c (%d vs %d)\n",
@@ -603,13 +603,13 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
603 break; 603 break;
604 } 604 }
605 605
606 cur_size = intel_crtc->config->base.adjusted_mode.crtc_vdisplay; 606 cur_size = crtc->state->adjusted_mode.crtc_vdisplay;
607 cur_size = intel_fb_align_height(&fb->base, 0, cur_size); 607 cur_size = intel_fb_align_height(&fb->base, 0, cur_size);
608 cur_size *= fb->base.pitches[0]; 608 cur_size *= fb->base.pitches[0];
609 DRM_DEBUG_KMS("pipe %c area: %dx%d, bpp: %d, size: %d\n", 609 DRM_DEBUG_KMS("pipe %c area: %dx%d, bpp: %d, size: %d\n",
610 pipe_name(intel_crtc->pipe), 610 pipe_name(intel_crtc->pipe),
611 intel_crtc->config->base.adjusted_mode.crtc_hdisplay, 611 crtc->state->adjusted_mode.crtc_hdisplay,
612 intel_crtc->config->base.adjusted_mode.crtc_vdisplay, 612 crtc->state->adjusted_mode.crtc_vdisplay,
613 fb->base.format->cpp[0] * 8, 613 fb->base.format->cpp[0] * 8,
614 cur_size); 614 cur_size);
615 615
diff --git a/drivers/gpu/drm/i915/intel_guc.c b/drivers/gpu/drm/i915/intel_guc.c
index 230aea69385d..8660af3fd755 100644
--- a/drivers/gpu/drm/i915/intel_guc.c
+++ b/drivers/gpu/drm/i915/intel_guc.c
@@ -50,7 +50,8 @@ void intel_guc_init_send_regs(struct intel_guc *guc)
50 unsigned int i; 50 unsigned int i;
51 51
52 guc->send_regs.base = i915_mmio_reg_offset(SOFT_SCRATCH(0)); 52 guc->send_regs.base = i915_mmio_reg_offset(SOFT_SCRATCH(0));
53 guc->send_regs.count = SOFT_SCRATCH_COUNT - 1; 53 guc->send_regs.count = GUC_MAX_MMIO_MSG_LEN;
54 BUILD_BUG_ON(GUC_MAX_MMIO_MSG_LEN > SOFT_SCRATCH_COUNT);
54 55
55 for (i = 0; i < guc->send_regs.count; i++) { 56 for (i = 0; i < guc->send_regs.count; i++) {
56 fw_domains |= intel_uncore_forcewake_for_reg(dev_priv, 57 fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
@@ -521,6 +522,44 @@ int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset)
521 return intel_guc_send(guc, action, ARRAY_SIZE(action)); 522 return intel_guc_send(guc, action, ARRAY_SIZE(action));
522} 523}
523 524
525/*
526 * The ENTER/EXIT_S_STATE actions queue the save/restore operation in GuC FW and
527 * then return, so waiting on the H2G is not enough to guarantee GuC is done.
528 * When all the processing is done, GuC writes INTEL_GUC_SLEEP_STATE_SUCCESS to
529 * scratch register 14, so we can poll on that. Note that GuC does not ensure
530 * that the value in the register is different from
531 * INTEL_GUC_SLEEP_STATE_SUCCESS while the action is in progress so we need to
532 * take care of that ourselves as well.
533 */
534static int guc_sleep_state_action(struct intel_guc *guc,
535 const u32 *action, u32 len)
536{
537 struct drm_i915_private *dev_priv = guc_to_i915(guc);
538 int ret;
539 u32 status;
540
541 I915_WRITE(SOFT_SCRATCH(14), INTEL_GUC_SLEEP_STATE_INVALID_MASK);
542
543 ret = intel_guc_send(guc, action, len);
544 if (ret)
545 return ret;
546
547 ret = __intel_wait_for_register(dev_priv, SOFT_SCRATCH(14),
548 INTEL_GUC_SLEEP_STATE_INVALID_MASK,
549 0, 0, 10, &status);
550 if (ret)
551 return ret;
552
553 if (status != INTEL_GUC_SLEEP_STATE_SUCCESS) {
554 DRM_ERROR("GuC failed to change sleep state. "
555 "action=0x%x, err=%u\n",
556 action[0], status);
557 return -EIO;
558 }
559
560 return 0;
561}
562
524/** 563/**
525 * intel_guc_suspend() - notify GuC entering suspend state 564 * intel_guc_suspend() - notify GuC entering suspend state
526 * @guc: the guc 565 * @guc: the guc
@@ -533,7 +572,7 @@ int intel_guc_suspend(struct intel_guc *guc)
533 intel_guc_ggtt_offset(guc, guc->shared_data) 572 intel_guc_ggtt_offset(guc, guc->shared_data)
534 }; 573 };
535 574
536 return intel_guc_send(guc, data, ARRAY_SIZE(data)); 575 return guc_sleep_state_action(guc, data, ARRAY_SIZE(data));
537} 576}
538 577
539/** 578/**
@@ -571,7 +610,7 @@ int intel_guc_resume(struct intel_guc *guc)
571 intel_guc_ggtt_offset(guc, guc->shared_data) 610 intel_guc_ggtt_offset(guc, guc->shared_data)
572 }; 611 };
573 612
574 return intel_guc_send(guc, data, ARRAY_SIZE(data)); 613 return guc_sleep_state_action(guc, data, ARRAY_SIZE(data));
575} 614}
576 615
577/** 616/**
diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h
index ad42faf48c46..0f1c4f9ebfd8 100644
--- a/drivers/gpu/drm/i915/intel_guc.h
+++ b/drivers/gpu/drm/i915/intel_guc.h
@@ -95,6 +95,11 @@ struct intel_guc {
95 void (*notify)(struct intel_guc *guc); 95 void (*notify)(struct intel_guc *guc);
96}; 96};
97 97
98static inline bool intel_guc_is_alive(struct intel_guc *guc)
99{
100 return intel_uc_fw_is_loaded(&guc->fw);
101}
102
98static 103static
99inline int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len) 104inline int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len)
100{ 105{
diff --git a/drivers/gpu/drm/i915/intel_guc_fw.c b/drivers/gpu/drm/i915/intel_guc_fw.c
index a9e6fcce467c..a67144ee5ceb 100644
--- a/drivers/gpu/drm/i915/intel_guc_fw.c
+++ b/drivers/gpu/drm/i915/intel_guc_fw.c
@@ -78,7 +78,8 @@ static void guc_fw_select(struct intel_uc_fw *guc_fw)
78 guc_fw->major_ver_wanted = KBL_FW_MAJOR; 78 guc_fw->major_ver_wanted = KBL_FW_MAJOR;
79 guc_fw->minor_ver_wanted = KBL_FW_MINOR; 79 guc_fw->minor_ver_wanted = KBL_FW_MINOR;
80 } else { 80 } else {
81 DRM_WARN("%s: No firmware known for this platform!\n", 81 dev_info(dev_priv->drm.dev,
82 "%s: No firmware known for this platform!\n",
82 intel_uc_fw_type_repr(guc_fw->type)); 83 intel_uc_fw_type_repr(guc_fw->type));
83 } 84 }
84} 85}
@@ -125,66 +126,26 @@ static void guc_prepare_xfer(struct intel_guc *guc)
125} 126}
126 127
127/* Copy RSA signature from the fw image to HW for verification */ 128/* Copy RSA signature from the fw image to HW for verification */
128static int guc_xfer_rsa(struct intel_guc *guc, struct i915_vma *vma) 129static void guc_xfer_rsa(struct intel_guc *guc, struct i915_vma *vma)
129{ 130{
130 struct drm_i915_private *dev_priv = guc_to_i915(guc); 131 struct drm_i915_private *dev_priv = guc_to_i915(guc);
131 struct intel_uc_fw *guc_fw = &guc->fw;
132 struct sg_table *sg = vma->pages;
133 u32 rsa[UOS_RSA_SCRATCH_COUNT]; 132 u32 rsa[UOS_RSA_SCRATCH_COUNT];
134 int i; 133 int i;
135 134
136 if (sg_pcopy_to_buffer(sg->sgl, sg->nents, rsa, sizeof(rsa), 135 sg_pcopy_to_buffer(vma->pages->sgl, vma->pages->nents,
137 guc_fw->rsa_offset) != sizeof(rsa)) 136 rsa, sizeof(rsa), guc->fw.rsa_offset);
138 return -EINVAL;
139 137
140 for (i = 0; i < UOS_RSA_SCRATCH_COUNT; i++) 138 for (i = 0; i < UOS_RSA_SCRATCH_COUNT; i++)
141 I915_WRITE(UOS_RSA_SCRATCH(i), rsa[i]); 139 I915_WRITE(UOS_RSA_SCRATCH(i), rsa[i]);
142
143 return 0;
144} 140}
145 141
146/* 142static bool guc_xfer_completed(struct intel_guc *guc, u32 *status)
147 * Transfer the firmware image to RAM for execution by the microcontroller.
148 *
149 * Architecturally, the DMA engine is bidirectional, and can potentially even
150 * transfer between GTT locations. This functionality is left out of the API
151 * for now as there is no need for it.
152 */
153static int guc_xfer_ucode(struct intel_guc *guc, struct i915_vma *vma)
154{ 143{
155 struct drm_i915_private *dev_priv = guc_to_i915(guc); 144 struct drm_i915_private *dev_priv = guc_to_i915(guc);
156 struct intel_uc_fw *guc_fw = &guc->fw;
157 unsigned long offset;
158 u32 status;
159 int ret;
160
161 /*
162 * The header plus uCode will be copied to WOPCM via DMA, excluding any
163 * other components
164 */
165 I915_WRITE(DMA_COPY_SIZE, guc_fw->header_size + guc_fw->ucode_size);
166
167 /* Set the source address for the new blob */
168 offset = intel_guc_ggtt_offset(guc, vma) + guc_fw->header_offset;
169 I915_WRITE(DMA_ADDR_0_LOW, lower_32_bits(offset));
170 I915_WRITE(DMA_ADDR_0_HIGH, upper_32_bits(offset) & 0xFFFF);
171 145
172 /* 146 /* Did we complete the xfer? */
173 * Set the DMA destination. Current uCode expects the code to be 147 *status = I915_READ(DMA_CTRL);
174 * loaded at 8k; locations below this are used for the stack. 148 return !(*status & START_DMA);
175 */
176 I915_WRITE(DMA_ADDR_1_LOW, 0x2000);
177 I915_WRITE(DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM);
178
179 /* Finally start the DMA */
180 I915_WRITE(DMA_CTRL, _MASKED_BIT_ENABLE(UOS_MOVE | START_DMA));
181
182 /* Wait for DMA to finish */
183 ret = __intel_wait_for_register_fw(dev_priv, DMA_CTRL, START_DMA, 0,
184 2, 100, &status);
185 DRM_DEBUG_DRIVER("GuC DMA status %#x\n", status);
186
187 return ret;
188} 149}
189 150
190/* 151/*
@@ -217,8 +178,8 @@ static int guc_wait_ucode(struct intel_guc *guc)
217 * NB: Docs recommend not using the interrupt for completion. 178 * NB: Docs recommend not using the interrupt for completion.
218 * Measurements indicate this should take no more than 20ms, so a 179 * Measurements indicate this should take no more than 20ms, so a
219 * timeout here indicates that the GuC has failed and is unusable. 180 * timeout here indicates that the GuC has failed and is unusable.
220 * (Higher levels of the driver will attempt to fall back to 181 * (Higher levels of the driver may decide to reset the GuC and
221 * execlist mode if this happens.) 182 * attempt the ucode load again if this happens.)
222 */ 183 */
223 ret = wait_for(guc_ready(guc, &status), 100); 184 ret = wait_for(guc_ready(guc, &status), 100);
224 DRM_DEBUG_DRIVER("GuC status %#x\n", status); 185 DRM_DEBUG_DRIVER("GuC status %#x\n", status);
@@ -228,10 +189,52 @@ static int guc_wait_ucode(struct intel_guc *guc)
228 ret = -ENOEXEC; 189 ret = -ENOEXEC;
229 } 190 }
230 191
192 if (ret == 0 && !guc_xfer_completed(guc, &status)) {
193 DRM_ERROR("GuC is ready, but the xfer %08x is incomplete\n",
194 status);
195 ret = -ENXIO;
196 }
197
231 return ret; 198 return ret;
232} 199}
233 200
234/* 201/*
202 * Transfer the firmware image to RAM for execution by the microcontroller.
203 *
204 * Architecturally, the DMA engine is bidirectional, and can potentially even
205 * transfer between GTT locations. This functionality is left out of the API
206 * for now as there is no need for it.
207 */
208static int guc_xfer_ucode(struct intel_guc *guc, struct i915_vma *vma)
209{
210 struct drm_i915_private *dev_priv = guc_to_i915(guc);
211 struct intel_uc_fw *guc_fw = &guc->fw;
212 unsigned long offset;
213
214 /*
215 * The header plus uCode will be copied to WOPCM via DMA, excluding any
216 * other components
217 */
218 I915_WRITE(DMA_COPY_SIZE, guc_fw->header_size + guc_fw->ucode_size);
219
220 /* Set the source address for the new blob */
221 offset = intel_guc_ggtt_offset(guc, vma) + guc_fw->header_offset;
222 I915_WRITE(DMA_ADDR_0_LOW, lower_32_bits(offset));
223 I915_WRITE(DMA_ADDR_0_HIGH, upper_32_bits(offset) & 0xFFFF);
224
225 /*
226 * Set the DMA destination. Current uCode expects the code to be
227 * loaded at 8k; locations below this are used for the stack.
228 */
229 I915_WRITE(DMA_ADDR_1_LOW, 0x2000);
230 I915_WRITE(DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM);
231
232 /* Finally start the DMA */
233 I915_WRITE(DMA_CTRL, _MASKED_BIT_ENABLE(UOS_MOVE | START_DMA));
234
235 return guc_wait_ucode(guc);
236}
237/*
235 * Load the GuC firmware blob into the MinuteIA. 238 * Load the GuC firmware blob into the MinuteIA.
236 */ 239 */
237static int guc_fw_xfer(struct intel_uc_fw *guc_fw, struct i915_vma *vma) 240static int guc_fw_xfer(struct intel_uc_fw *guc_fw, struct i915_vma *vma)
@@ -251,17 +254,9 @@ static int guc_fw_xfer(struct intel_uc_fw *guc_fw, struct i915_vma *vma)
251 * by the DMA engine in one operation, whereas the RSA signature is 254 * by the DMA engine in one operation, whereas the RSA signature is
252 * loaded via MMIO. 255 * loaded via MMIO.
253 */ 256 */
254 ret = guc_xfer_rsa(guc, vma); 257 guc_xfer_rsa(guc, vma);
255 if (ret)
256 DRM_WARN("GuC firmware signature xfer error %d\n", ret);
257 258
258 ret = guc_xfer_ucode(guc, vma); 259 ret = guc_xfer_ucode(guc, vma);
259 if (ret)
260 DRM_WARN("GuC firmware code xfer error %d\n", ret);
261
262 ret = guc_wait_ucode(guc);
263 if (ret)
264 DRM_ERROR("GuC firmware xfer error %d\n", ret);
265 260
266 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 261 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
267 262
diff --git a/drivers/gpu/drm/i915/intel_guc_fwif.h b/drivers/gpu/drm/i915/intel_guc_fwif.h
index 8382d591c784..b2f5148f4f17 100644
--- a/drivers/gpu/drm/i915/intel_guc_fwif.h
+++ b/drivers/gpu/drm/i915/intel_guc_fwif.h
@@ -39,6 +39,11 @@
39#define GUC_VIDEO_ENGINE2 4 39#define GUC_VIDEO_ENGINE2 4
40#define GUC_MAX_ENGINES_NUM (GUC_VIDEO_ENGINE2 + 1) 40#define GUC_MAX_ENGINES_NUM (GUC_VIDEO_ENGINE2 + 1)
41 41
42#define GUC_DOORBELL_INVALID 256
43
44#define GUC_DB_SIZE (PAGE_SIZE)
45#define GUC_WQ_SIZE (PAGE_SIZE * 2)
46
42/* Work queue item header definitions */ 47/* Work queue item header definitions */
43#define WQ_STATUS_ACTIVE 1 48#define WQ_STATUS_ACTIVE 1
44#define WQ_STATUS_SUSPENDED 2 49#define WQ_STATUS_SUSPENDED 2
@@ -59,9 +64,6 @@
59#define WQ_RING_TAIL_MAX 0x7FF /* 2^11 QWords */ 64#define WQ_RING_TAIL_MAX 0x7FF /* 2^11 QWords */
60#define WQ_RING_TAIL_MASK (WQ_RING_TAIL_MAX << WQ_RING_TAIL_SHIFT) 65#define WQ_RING_TAIL_MASK (WQ_RING_TAIL_MAX << WQ_RING_TAIL_SHIFT)
61 66
62#define GUC_DOORBELL_ENABLED 1
63#define GUC_DOORBELL_DISABLED 0
64
65#define GUC_STAGE_DESC_ATTR_ACTIVE BIT(0) 67#define GUC_STAGE_DESC_ATTR_ACTIVE BIT(0)
66#define GUC_STAGE_DESC_ATTR_PENDING_DB BIT(1) 68#define GUC_STAGE_DESC_ATTR_PENDING_DB BIT(1)
67#define GUC_STAGE_DESC_ATTR_KERNEL BIT(2) 69#define GUC_STAGE_DESC_ATTR_KERNEL BIT(2)
@@ -219,26 +221,6 @@ struct uc_css_header {
219 u32 header_info; 221 u32 header_info;
220} __packed; 222} __packed;
221 223
222struct guc_doorbell_info {
223 u32 db_status;
224 u32 cookie;
225 u32 reserved[14];
226} __packed;
227
228union guc_doorbell_qw {
229 struct {
230 u32 db_status;
231 u32 cookie;
232 };
233 u64 value_qw;
234} __packed;
235
236#define GUC_NUM_DOORBELLS 256
237#define GUC_DOORBELL_INVALID (GUC_NUM_DOORBELLS)
238
239#define GUC_DB_SIZE (PAGE_SIZE)
240#define GUC_WQ_SIZE (PAGE_SIZE * 2)
241
242/* Work item for submitting workloads into work queue of GuC. */ 224/* Work item for submitting workloads into work queue of GuC. */
243struct guc_wq_item { 225struct guc_wq_item {
244 u32 header; 226 u32 header;
@@ -601,7 +583,9 @@ struct guc_shared_ctx_data {
601 * registers, where first register holds data treated as message header, 583 * registers, where first register holds data treated as message header,
602 * and other registers are used to hold message payload. 584 * and other registers are used to hold message payload.
603 * 585 *
604 * For Gen9+, GuC uses software scratch registers 0xC180-0xC1B8 586 * For Gen9+, GuC uses software scratch registers 0xC180-0xC1B8,
587 * but no H2G command takes more than 8 parameters and the GuC FW
588 * itself uses an 8-element array to store the H2G message.
605 * 589 *
606 * +-----------+---------+---------+---------+ 590 * +-----------+---------+---------+---------+
607 * | MMIO[0] | MMIO[1] | ... | MMIO[n] | 591 * | MMIO[0] | MMIO[1] | ... | MMIO[n] |
@@ -633,6 +617,8 @@ struct guc_shared_ctx_data {
633 * field. 617 * field.
634 */ 618 */
635 619
620#define GUC_MAX_MMIO_MSG_LEN 8
621
636#define INTEL_GUC_MSG_TYPE_SHIFT 28 622#define INTEL_GUC_MSG_TYPE_SHIFT 28
637#define INTEL_GUC_MSG_TYPE_MASK (0xF << INTEL_GUC_MSG_TYPE_SHIFT) 623#define INTEL_GUC_MSG_TYPE_MASK (0xF << INTEL_GUC_MSG_TYPE_SHIFT)
638#define INTEL_GUC_MSG_DATA_SHIFT 16 624#define INTEL_GUC_MSG_DATA_SHIFT 16
@@ -687,6 +673,13 @@ enum intel_guc_report_status {
687 INTEL_GUC_REPORT_STATUS_COMPLETE = 0x4, 673 INTEL_GUC_REPORT_STATUS_COMPLETE = 0x4,
688}; 674};
689 675
676enum intel_guc_sleep_state_status {
677 INTEL_GUC_SLEEP_STATE_SUCCESS = 0x0,
678 INTEL_GUC_SLEEP_STATE_PREEMPT_TO_IDLE_FAILED = 0x1,
679 INTEL_GUC_SLEEP_STATE_ENGINE_RESET_FAILED = 0x2
680#define INTEL_GUC_SLEEP_STATE_INVALID_MASK 0x80000000
681};
682
690#define GUC_LOG_CONTROL_LOGGING_ENABLED (1 << 0) 683#define GUC_LOG_CONTROL_LOGGING_ENABLED (1 << 0)
691#define GUC_LOG_CONTROL_VERBOSITY_SHIFT 4 684#define GUC_LOG_CONTROL_VERBOSITY_SHIFT 4
692#define GUC_LOG_CONTROL_VERBOSITY_MASK (0xF << GUC_LOG_CONTROL_VERBOSITY_SHIFT) 685#define GUC_LOG_CONTROL_VERBOSITY_MASK (0xF << GUC_LOG_CONTROL_VERBOSITY_SHIFT)
diff --git a/drivers/gpu/drm/i915/intel_guc_reg.h b/drivers/gpu/drm/i915/intel_guc_reg.h
index d86084742a4a..57e7ad522c2f 100644
--- a/drivers/gpu/drm/i915/intel_guc_reg.h
+++ b/drivers/gpu/drm/i915/intel_guc_reg.h
@@ -104,6 +104,18 @@
104#define GUC_SEND_INTERRUPT _MMIO(0xc4c8) 104#define GUC_SEND_INTERRUPT _MMIO(0xc4c8)
105#define GUC_SEND_TRIGGER (1<<0) 105#define GUC_SEND_TRIGGER (1<<0)
106 106
107#define GUC_NUM_DOORBELLS 256
108
109/* format of the HW-monitored doorbell cacheline */
110struct guc_doorbell_info {
111 u32 db_status;
112#define GUC_DOORBELL_DISABLED 0
113#define GUC_DOORBELL_ENABLED 1
114
115 u32 cookie;
116 u32 reserved[14];
117} __packed;
118
107#define GEN8_DRBREGL(x) _MMIO(0x1000 + (x) * 8) 119#define GEN8_DRBREGL(x) _MMIO(0x1000 + (x) * 8)
108#define GEN8_DRB_VALID (1<<0) 120#define GEN8_DRB_VALID (1<<0)
109#define GEN8_DRBREGU(x) _MMIO(0x1000 + (x) * 8 + 4) 121#define GEN8_DRBREGU(x) _MMIO(0x1000 + (x) * 8 + 4)
diff --git a/drivers/gpu/drm/i915/intel_guc_submission.c b/drivers/gpu/drm/i915/intel_guc_submission.c
index a81f04d46e87..1570dcbe249c 100644
--- a/drivers/gpu/drm/i915/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/intel_guc_submission.c
@@ -192,7 +192,15 @@ static struct guc_doorbell_info *__get_doorbell(struct intel_guc_client *client)
192 return client->vaddr + client->doorbell_offset; 192 return client->vaddr + client->doorbell_offset;
193} 193}
194 194
195static void __create_doorbell(struct intel_guc_client *client) 195static bool __doorbell_valid(struct intel_guc *guc, u16 db_id)
196{
197 struct drm_i915_private *dev_priv = guc_to_i915(guc);
198
199 GEM_BUG_ON(db_id >= GUC_NUM_DOORBELLS);
200 return I915_READ(GEN8_DRBREGL(db_id)) & GEN8_DRB_VALID;
201}
202
203static void __init_doorbell(struct intel_guc_client *client)
196{ 204{
197 struct guc_doorbell_info *doorbell; 205 struct guc_doorbell_info *doorbell;
198 206
@@ -201,21 +209,19 @@ static void __create_doorbell(struct intel_guc_client *client)
201 doorbell->cookie = 0; 209 doorbell->cookie = 0;
202} 210}
203 211
204static void __destroy_doorbell(struct intel_guc_client *client) 212static void __fini_doorbell(struct intel_guc_client *client)
205{ 213{
206 struct drm_i915_private *dev_priv = guc_to_i915(client->guc);
207 struct guc_doorbell_info *doorbell; 214 struct guc_doorbell_info *doorbell;
208 u16 db_id = client->doorbell_id; 215 u16 db_id = client->doorbell_id;
209 216
210 doorbell = __get_doorbell(client); 217 doorbell = __get_doorbell(client);
211 doorbell->db_status = GUC_DOORBELL_DISABLED; 218 doorbell->db_status = GUC_DOORBELL_DISABLED;
212 doorbell->cookie = 0;
213 219
214 /* Doorbell release flow requires that we wait for GEN8_DRB_VALID bit 220 /* Doorbell release flow requires that we wait for GEN8_DRB_VALID bit
215 * to go to zero after updating db_status before we call the GuC to 221 * to go to zero after updating db_status before we call the GuC to
216 * release the doorbell 222 * release the doorbell
217 */ 223 */
218 if (wait_for_us(!(I915_READ(GEN8_DRBREGL(db_id)) & GEN8_DRB_VALID), 10)) 224 if (wait_for_us(!__doorbell_valid(client->guc, db_id), 10))
219 WARN_ONCE(true, "Doorbell never became invalid after disable\n"); 225 WARN_ONCE(true, "Doorbell never became invalid after disable\n");
220} 226}
221 227
@@ -227,11 +233,11 @@ static int create_doorbell(struct intel_guc_client *client)
227 return -ENODEV; /* internal setup error, should never happen */ 233 return -ENODEV; /* internal setup error, should never happen */
228 234
229 __update_doorbell_desc(client, client->doorbell_id); 235 __update_doorbell_desc(client, client->doorbell_id);
230 __create_doorbell(client); 236 __init_doorbell(client);
231 237
232 ret = __guc_allocate_doorbell(client->guc, client->stage_id); 238 ret = __guc_allocate_doorbell(client->guc, client->stage_id);
233 if (ret) { 239 if (ret) {
234 __destroy_doorbell(client); 240 __fini_doorbell(client);
235 __update_doorbell_desc(client, GUC_DOORBELL_INVALID); 241 __update_doorbell_desc(client, GUC_DOORBELL_INVALID);
236 DRM_DEBUG_DRIVER("Couldn't create client %u doorbell: %d\n", 242 DRM_DEBUG_DRIVER("Couldn't create client %u doorbell: %d\n",
237 client->stage_id, ret); 243 client->stage_id, ret);
@@ -247,7 +253,7 @@ static int destroy_doorbell(struct intel_guc_client *client)
247 253
248 GEM_BUG_ON(!has_doorbell(client)); 254 GEM_BUG_ON(!has_doorbell(client));
249 255
250 __destroy_doorbell(client); 256 __fini_doorbell(client);
251 ret = __guc_deallocate_doorbell(client->guc, client->stage_id); 257 ret = __guc_deallocate_doorbell(client->guc, client->stage_id);
252 if (ret) 258 if (ret)
253 DRM_ERROR("Couldn't destroy client %u doorbell: %d\n", 259 DRM_ERROR("Couldn't destroy client %u doorbell: %d\n",
@@ -282,8 +288,7 @@ __get_process_desc(struct intel_guc_client *client)
282/* 288/*
283 * Initialise the process descriptor shared with the GuC firmware. 289 * Initialise the process descriptor shared with the GuC firmware.
284 */ 290 */
285static void guc_proc_desc_init(struct intel_guc *guc, 291static void guc_proc_desc_init(struct intel_guc_client *client)
286 struct intel_guc_client *client)
287{ 292{
288 struct guc_process_desc *desc; 293 struct guc_process_desc *desc;
289 294
@@ -304,6 +309,14 @@ static void guc_proc_desc_init(struct intel_guc *guc,
304 desc->priority = client->priority; 309 desc->priority = client->priority;
305} 310}
306 311
312static void guc_proc_desc_fini(struct intel_guc_client *client)
313{
314 struct guc_process_desc *desc;
315
316 desc = __get_process_desc(client);
317 memset(desc, 0, sizeof(*desc));
318}
319
307static int guc_stage_desc_pool_create(struct intel_guc *guc) 320static int guc_stage_desc_pool_create(struct intel_guc *guc)
308{ 321{
309 struct i915_vma *vma; 322 struct i915_vma *vma;
@@ -341,9 +354,9 @@ static void guc_stage_desc_pool_destroy(struct intel_guc *guc)
341 * data structures relating to this client (doorbell, process descriptor, 354 * data structures relating to this client (doorbell, process descriptor,
342 * write queue, etc). 355 * write queue, etc).
343 */ 356 */
344static void guc_stage_desc_init(struct intel_guc *guc, 357static void guc_stage_desc_init(struct intel_guc_client *client)
345 struct intel_guc_client *client)
346{ 358{
359 struct intel_guc *guc = client->guc;
347 struct drm_i915_private *dev_priv = guc_to_i915(guc); 360 struct drm_i915_private *dev_priv = guc_to_i915(guc);
348 struct intel_engine_cs *engine; 361 struct intel_engine_cs *engine;
349 struct i915_gem_context *ctx = client->owner; 362 struct i915_gem_context *ctx = client->owner;
@@ -424,8 +437,7 @@ static void guc_stage_desc_init(struct intel_guc *guc,
424 desc->desc_private = ptr_to_u64(client); 437 desc->desc_private = ptr_to_u64(client);
425} 438}
426 439
427static void guc_stage_desc_fini(struct intel_guc *guc, 440static void guc_stage_desc_fini(struct intel_guc_client *client)
428 struct intel_guc_client *client)
429{ 441{
430 struct guc_stage_desc *desc; 442 struct guc_stage_desc *desc;
431 443
@@ -486,14 +498,6 @@ static void guc_wq_item_append(struct intel_guc_client *client,
486 WRITE_ONCE(desc->tail, (wq_off + wqi_size) & (GUC_WQ_SIZE - 1)); 498 WRITE_ONCE(desc->tail, (wq_off + wqi_size) & (GUC_WQ_SIZE - 1));
487} 499}
488 500
489static void guc_reset_wq(struct intel_guc_client *client)
490{
491 struct guc_process_desc *desc = __get_process_desc(client);
492
493 desc->head = 0;
494 desc->tail = 0;
495}
496
497static void guc_ring_doorbell(struct intel_guc_client *client) 501static void guc_ring_doorbell(struct intel_guc_client *client)
498{ 502{
499 struct guc_doorbell_info *db; 503 struct guc_doorbell_info *db;
@@ -746,30 +750,28 @@ static bool __guc_dequeue(struct intel_engine_cs *engine)
746 while ((rb = rb_first_cached(&execlists->queue))) { 750 while ((rb = rb_first_cached(&execlists->queue))) {
747 struct i915_priolist *p = to_priolist(rb); 751 struct i915_priolist *p = to_priolist(rb);
748 struct i915_request *rq, *rn; 752 struct i915_request *rq, *rn;
753 int i;
749 754
750 list_for_each_entry_safe(rq, rn, &p->requests, sched.link) { 755 priolist_for_each_request_consume(rq, rn, p, i) {
751 if (last && rq->hw_context != last->hw_context) { 756 if (last && rq->hw_context != last->hw_context) {
752 if (port == last_port) { 757 if (port == last_port)
753 __list_del_many(&p->requests,
754 &rq->sched.link);
755 goto done; 758 goto done;
756 }
757 759
758 if (submit) 760 if (submit)
759 port_assign(port, last); 761 port_assign(port, last);
760 port++; 762 port++;
761 } 763 }
762 764
763 INIT_LIST_HEAD(&rq->sched.link); 765 list_del_init(&rq->sched.link);
764 766
765 __i915_request_submit(rq); 767 __i915_request_submit(rq);
766 trace_i915_request_in(rq, port_index(port, execlists)); 768 trace_i915_request_in(rq, port_index(port, execlists));
769
767 last = rq; 770 last = rq;
768 submit = true; 771 submit = true;
769 } 772 }
770 773
771 rb_erase_cached(&p->node, &execlists->queue); 774 rb_erase_cached(&p->node, &execlists->queue);
772 INIT_LIST_HEAD(&p->requests);
773 if (p->priority != I915_PRIORITY_NORMAL) 775 if (p->priority != I915_PRIORITY_NORMAL)
774 kmem_cache_free(engine->i915->priorities, p); 776 kmem_cache_free(engine->i915->priorities, p);
775 } 777 }
@@ -791,19 +793,8 @@ done:
791 793
792static void guc_dequeue(struct intel_engine_cs *engine) 794static void guc_dequeue(struct intel_engine_cs *engine)
793{ 795{
794 unsigned long flags; 796 if (__guc_dequeue(engine))
795 bool submit;
796
797 local_irq_save(flags);
798
799 spin_lock(&engine->timeline.lock);
800 submit = __guc_dequeue(engine);
801 spin_unlock(&engine->timeline.lock);
802
803 if (submit)
804 guc_submit(engine); 797 guc_submit(engine);
805
806 local_irq_restore(flags);
807} 798}
808 799
809static void guc_submission_tasklet(unsigned long data) 800static void guc_submission_tasklet(unsigned long data)
@@ -812,6 +803,9 @@ static void guc_submission_tasklet(unsigned long data)
812 struct intel_engine_execlists * const execlists = &engine->execlists; 803 struct intel_engine_execlists * const execlists = &engine->execlists;
813 struct execlist_port *port = execlists->port; 804 struct execlist_port *port = execlists->port;
814 struct i915_request *rq; 805 struct i915_request *rq;
806 unsigned long flags;
807
808 spin_lock_irqsave(&engine->timeline.lock, flags);
815 809
816 rq = port_request(port); 810 rq = port_request(port);
817 while (rq && i915_request_completed(rq)) { 811 while (rq && i915_request_completed(rq)) {
@@ -835,6 +829,8 @@ static void guc_submission_tasklet(unsigned long data)
835 829
836 if (!execlists_is_active(execlists, EXECLISTS_ACTIVE_PREEMPT)) 830 if (!execlists_is_active(execlists, EXECLISTS_ACTIVE_PREEMPT))
837 guc_dequeue(engine); 831 guc_dequeue(engine);
832
833 spin_unlock_irqrestore(&engine->timeline.lock, flags);
838} 834}
839 835
840static struct i915_request * 836static struct i915_request *
@@ -877,72 +873,31 @@ guc_reset_prepare(struct intel_engine_cs *engine)
877/* Check that a doorbell register is in the expected state */ 873/* Check that a doorbell register is in the expected state */
878static bool doorbell_ok(struct intel_guc *guc, u16 db_id) 874static bool doorbell_ok(struct intel_guc *guc, u16 db_id)
879{ 875{
880 struct drm_i915_private *dev_priv = guc_to_i915(guc);
881 u32 drbregl;
882 bool valid; 876 bool valid;
883 877
884 GEM_BUG_ON(db_id >= GUC_DOORBELL_INVALID); 878 GEM_BUG_ON(db_id >= GUC_NUM_DOORBELLS);
885 879
886 drbregl = I915_READ(GEN8_DRBREGL(db_id)); 880 valid = __doorbell_valid(guc, db_id);
887 valid = drbregl & GEN8_DRB_VALID;
888 881
889 if (test_bit(db_id, guc->doorbell_bitmap) == valid) 882 if (test_bit(db_id, guc->doorbell_bitmap) == valid)
890 return true; 883 return true;
891 884
892 DRM_DEBUG_DRIVER("Doorbell %d has unexpected state (0x%x): valid=%s\n", 885 DRM_DEBUG_DRIVER("Doorbell %u has unexpected state: valid=%s\n",
893 db_id, drbregl, yesno(valid)); 886 db_id, yesno(valid));
894 887
895 return false; 888 return false;
896} 889}
897 890
898static bool guc_verify_doorbells(struct intel_guc *guc) 891static bool guc_verify_doorbells(struct intel_guc *guc)
899{ 892{
893 bool doorbells_ok = true;
900 u16 db_id; 894 u16 db_id;
901 895
902 for (db_id = 0; db_id < GUC_NUM_DOORBELLS; ++db_id) 896 for (db_id = 0; db_id < GUC_NUM_DOORBELLS; ++db_id)
903 if (!doorbell_ok(guc, db_id)) 897 if (!doorbell_ok(guc, db_id))
904 return false; 898 doorbells_ok = false;
905
906 return true;
907}
908
909static int guc_clients_doorbell_init(struct intel_guc *guc)
910{
911 int ret;
912
913 ret = create_doorbell(guc->execbuf_client);
914 if (ret)
915 return ret;
916
917 if (guc->preempt_client) {
918 ret = create_doorbell(guc->preempt_client);
919 if (ret) {
920 destroy_doorbell(guc->execbuf_client);
921 return ret;
922 }
923 }
924
925 return 0;
926}
927
928static void guc_clients_doorbell_fini(struct intel_guc *guc)
929{
930 /*
931 * By the time we're here, GuC has already been reset.
932 * Instead of trying (in vain) to communicate with it, let's just
933 * cleanup the doorbell HW and our internal state.
934 */
935 if (guc->preempt_client) {
936 __destroy_doorbell(guc->preempt_client);
937 __update_doorbell_desc(guc->preempt_client,
938 GUC_DOORBELL_INVALID);
939 }
940 899
941 if (guc->execbuf_client) { 900 return doorbells_ok;
942 __destroy_doorbell(guc->execbuf_client);
943 __update_doorbell_desc(guc->execbuf_client,
944 GUC_DOORBELL_INVALID);
945 }
946} 901}
947 902
948/** 903/**
@@ -1005,6 +960,10 @@ guc_client_alloc(struct drm_i915_private *dev_priv,
1005 } 960 }
1006 client->vaddr = vaddr; 961 client->vaddr = vaddr;
1007 962
963 ret = reserve_doorbell(client);
964 if (ret)
965 goto err_vaddr;
966
1008 client->doorbell_offset = __select_cacheline(guc); 967 client->doorbell_offset = __select_cacheline(guc);
1009 968
1010 /* 969 /*
@@ -1017,13 +976,6 @@ guc_client_alloc(struct drm_i915_private *dev_priv,
1017 else 976 else
1018 client->proc_desc_offset = (GUC_DB_SIZE / 2); 977 client->proc_desc_offset = (GUC_DB_SIZE / 2);
1019 978
1020 guc_proc_desc_init(guc, client);
1021 guc_stage_desc_init(guc, client);
1022
1023 ret = reserve_doorbell(client);
1024 if (ret)
1025 goto err_vaddr;
1026
1027 DRM_DEBUG_DRIVER("new priority %u client %p for engine(s) 0x%x: stage_id %u\n", 979 DRM_DEBUG_DRIVER("new priority %u client %p for engine(s) 0x%x: stage_id %u\n",
1028 priority, client, client->engines, client->stage_id); 980 priority, client, client->engines, client->stage_id);
1029 DRM_DEBUG_DRIVER("doorbell id %u, cacheline offset 0x%lx\n", 981 DRM_DEBUG_DRIVER("doorbell id %u, cacheline offset 0x%lx\n",
@@ -1045,7 +997,6 @@ err_client:
1045static void guc_client_free(struct intel_guc_client *client) 997static void guc_client_free(struct intel_guc_client *client)
1046{ 998{
1047 unreserve_doorbell(client); 999 unreserve_doorbell(client);
1048 guc_stage_desc_fini(client->guc, client);
1049 i915_vma_unpin_and_release(&client->vma, I915_VMA_RELEASE_MAP); 1000 i915_vma_unpin_and_release(&client->vma, I915_VMA_RELEASE_MAP);
1050 ida_simple_remove(&client->guc->stage_ids, client->stage_id); 1001 ida_simple_remove(&client->guc->stage_ids, client->stage_id);
1051 kfree(client); 1002 kfree(client);
@@ -1112,6 +1063,69 @@ static void guc_clients_destroy(struct intel_guc *guc)
1112 guc_client_free(client); 1063 guc_client_free(client);
1113} 1064}
1114 1065
1066static int __guc_client_enable(struct intel_guc_client *client)
1067{
1068 int ret;
1069
1070 guc_proc_desc_init(client);
1071 guc_stage_desc_init(client);
1072
1073 ret = create_doorbell(client);
1074 if (ret)
1075 goto fail;
1076
1077 return 0;
1078
1079fail:
1080 guc_stage_desc_fini(client);
1081 guc_proc_desc_fini(client);
1082 return ret;
1083}
1084
1085static void __guc_client_disable(struct intel_guc_client *client)
1086{
1087 /*
1088 * By the time we're here, GuC may have already been reset. if that is
1089 * the case, instead of trying (in vain) to communicate with it, let's
1090 * just cleanup the doorbell HW and our internal state.
1091 */
1092 if (intel_guc_is_alive(client->guc))
1093 destroy_doorbell(client);
1094 else
1095 __fini_doorbell(client);
1096
1097 guc_stage_desc_fini(client);
1098 guc_proc_desc_fini(client);
1099}
1100
1101static int guc_clients_enable(struct intel_guc *guc)
1102{
1103 int ret;
1104
1105 ret = __guc_client_enable(guc->execbuf_client);
1106 if (ret)
1107 return ret;
1108
1109 if (guc->preempt_client) {
1110 ret = __guc_client_enable(guc->preempt_client);
1111 if (ret) {
1112 __guc_client_disable(guc->execbuf_client);
1113 return ret;
1114 }
1115 }
1116
1117 return 0;
1118}
1119
1120static void guc_clients_disable(struct intel_guc *guc)
1121{
1122 if (guc->preempt_client)
1123 __guc_client_disable(guc->preempt_client);
1124
1125 if (guc->execbuf_client)
1126 __guc_client_disable(guc->execbuf_client);
1127}
1128
1115/* 1129/*
1116 * Set up the memory resources to be shared with the GuC (via the GGTT) 1130 * Set up the memory resources to be shared with the GuC (via the GGTT)
1117 * at firmware loading time. 1131 * at firmware loading time.
@@ -1295,15 +1309,11 @@ int intel_guc_submission_enable(struct intel_guc *guc)
1295 1309
1296 GEM_BUG_ON(!guc->execbuf_client); 1310 GEM_BUG_ON(!guc->execbuf_client);
1297 1311
1298 guc_reset_wq(guc->execbuf_client);
1299 if (guc->preempt_client)
1300 guc_reset_wq(guc->preempt_client);
1301
1302 err = intel_guc_sample_forcewake(guc); 1312 err = intel_guc_sample_forcewake(guc);
1303 if (err) 1313 if (err)
1304 return err; 1314 return err;
1305 1315
1306 err = guc_clients_doorbell_init(guc); 1316 err = guc_clients_enable(guc);
1307 if (err) 1317 if (err)
1308 return err; 1318 return err;
1309 1319
@@ -1325,7 +1335,7 @@ void intel_guc_submission_disable(struct intel_guc *guc)
1325 GEM_BUG_ON(dev_priv->gt.awake); /* GT should be parked first */ 1335 GEM_BUG_ON(dev_priv->gt.awake); /* GT should be parked first */
1326 1336
1327 guc_interrupts_release(dev_priv); 1337 guc_interrupts_release(dev_priv);
1328 guc_clients_doorbell_fini(guc); 1338 guc_clients_disable(guc);
1329} 1339}
1330 1340
1331#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 1341#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
diff --git a/drivers/gpu/drm/i915/intel_hdcp.c b/drivers/gpu/drm/i915/intel_hdcp.c
index 26e48fc95543..1bf487f94254 100644
--- a/drivers/gpu/drm/i915/intel_hdcp.c
+++ b/drivers/gpu/drm/i915/intel_hdcp.c
@@ -16,6 +16,62 @@
16 16
17#define KEY_LOAD_TRIES 5 17#define KEY_LOAD_TRIES 5
18 18
19static
20bool intel_hdcp_is_ksv_valid(u8 *ksv)
21{
22 int i, ones = 0;
23 /* KSV has 20 1's and 20 0's */
24 for (i = 0; i < DRM_HDCP_KSV_LEN; i++)
25 ones += hweight8(ksv[i]);
26 if (ones != 20)
27 return false;
28
29 return true;
30}
31
32static
33int intel_hdcp_read_valid_bksv(struct intel_digital_port *intel_dig_port,
34 const struct intel_hdcp_shim *shim, u8 *bksv)
35{
36 int ret, i, tries = 2;
37
38 /* HDCP spec states that we must retry the bksv if it is invalid */
39 for (i = 0; i < tries; i++) {
40 ret = shim->read_bksv(intel_dig_port, bksv);
41 if (ret)
42 return ret;
43 if (intel_hdcp_is_ksv_valid(bksv))
44 break;
45 }
46 if (i == tries) {
47 DRM_DEBUG_KMS("Bksv is invalid\n");
48 return -ENODEV;
49 }
50
51 return 0;
52}
53
54/* Is HDCP1.4 capable on Platform and Sink */
55bool intel_hdcp_capable(struct intel_connector *connector)
56{
57 struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
58 const struct intel_hdcp_shim *shim = connector->hdcp.shim;
59 bool capable = false;
60 u8 bksv[5];
61
62 if (!shim)
63 return capable;
64
65 if (shim->hdcp_capable) {
66 shim->hdcp_capable(intel_dig_port, &capable);
67 } else {
68 if (!intel_hdcp_read_valid_bksv(intel_dig_port, shim, bksv))
69 capable = true;
70 }
71
72 return capable;
73}
74
19static int intel_hdcp_poll_ksv_fifo(struct intel_digital_port *intel_dig_port, 75static int intel_hdcp_poll_ksv_fifo(struct intel_digital_port *intel_dig_port,
20 const struct intel_hdcp_shim *shim) 76 const struct intel_hdcp_shim *shim)
21{ 77{
@@ -168,18 +224,6 @@ u32 intel_hdcp_get_repeater_ctl(struct intel_digital_port *intel_dig_port)
168} 224}
169 225
170static 226static
171bool intel_hdcp_is_ksv_valid(u8 *ksv)
172{
173 int i, ones = 0;
174 /* KSV has 20 1's and 20 0's */
175 for (i = 0; i < DRM_HDCP_KSV_LEN; i++)
176 ones += hweight8(ksv[i]);
177 if (ones != 20)
178 return false;
179 return true;
180}
181
182static
183int intel_hdcp_validate_v_prime(struct intel_digital_port *intel_dig_port, 227int intel_hdcp_validate_v_prime(struct intel_digital_port *intel_dig_port,
184 const struct intel_hdcp_shim *shim, 228 const struct intel_hdcp_shim *shim,
185 u8 *ksv_fifo, u8 num_downstream, u8 *bstatus) 229 u8 *ksv_fifo, u8 num_downstream, u8 *bstatus)
@@ -383,7 +427,7 @@ int intel_hdcp_validate_v_prime(struct intel_digital_port *intel_dig_port,
383 if (intel_wait_for_register(dev_priv, HDCP_REP_CTL, 427 if (intel_wait_for_register(dev_priv, HDCP_REP_CTL,
384 HDCP_SHA1_COMPLETE, 428 HDCP_SHA1_COMPLETE,
385 HDCP_SHA1_COMPLETE, 1)) { 429 HDCP_SHA1_COMPLETE, 1)) {
386 DRM_DEBUG_KMS("Timed out waiting for SHA1 complete\n"); 430 DRM_ERROR("Timed out waiting for SHA1 complete\n");
387 return -ETIMEDOUT; 431 return -ETIMEDOUT;
388 } 432 }
389 if (!(I915_READ(HDCP_REP_CTL) & HDCP_SHA1_V_MATCH)) { 433 if (!(I915_READ(HDCP_REP_CTL) & HDCP_SHA1_V_MATCH)) {
@@ -404,7 +448,7 @@ int intel_hdcp_auth_downstream(struct intel_digital_port *intel_dig_port,
404 448
405 ret = intel_hdcp_poll_ksv_fifo(intel_dig_port, shim); 449 ret = intel_hdcp_poll_ksv_fifo(intel_dig_port, shim);
406 if (ret) { 450 if (ret) {
407 DRM_ERROR("KSV list failed to become ready (%d)\n", ret); 451 DRM_DEBUG_KMS("KSV list failed to become ready (%d)\n", ret);
408 return ret; 452 return ret;
409 } 453 }
410 454
@@ -414,7 +458,7 @@ int intel_hdcp_auth_downstream(struct intel_digital_port *intel_dig_port,
414 458
415 if (DRM_HDCP_MAX_DEVICE_EXCEEDED(bstatus[0]) || 459 if (DRM_HDCP_MAX_DEVICE_EXCEEDED(bstatus[0]) ||
416 DRM_HDCP_MAX_CASCADE_EXCEEDED(bstatus[1])) { 460 DRM_HDCP_MAX_CASCADE_EXCEEDED(bstatus[1])) {
417 DRM_ERROR("Max Topology Limit Exceeded\n"); 461 DRM_DEBUG_KMS("Max Topology Limit Exceeded\n");
418 return -EPERM; 462 return -EPERM;
419 } 463 }
420 464
@@ -450,7 +494,7 @@ int intel_hdcp_auth_downstream(struct intel_digital_port *intel_dig_port,
450 } 494 }
451 495
452 if (i == tries) { 496 if (i == tries) {
453 DRM_ERROR("V Prime validation failed.(%d)\n", ret); 497 DRM_DEBUG_KMS("V Prime validation failed.(%d)\n", ret);
454 goto err; 498 goto err;
455 } 499 }
456 500
@@ -499,7 +543,7 @@ static int intel_hdcp_auth(struct intel_digital_port *intel_dig_port,
499 if (ret) 543 if (ret)
500 return ret; 544 return ret;
501 if (!hdcp_capable) { 545 if (!hdcp_capable) {
502 DRM_ERROR("Panel is not HDCP capable\n"); 546 DRM_DEBUG_KMS("Panel is not HDCP capable\n");
503 return -EINVAL; 547 return -EINVAL;
504 } 548 }
505 } 549 }
@@ -527,18 +571,9 @@ static int intel_hdcp_auth(struct intel_digital_port *intel_dig_port,
527 571
528 memset(&bksv, 0, sizeof(bksv)); 572 memset(&bksv, 0, sizeof(bksv));
529 573
530 /* HDCP spec states that we must retry the bksv if it is invalid */ 574 ret = intel_hdcp_read_valid_bksv(intel_dig_port, shim, bksv.shim);
531 for (i = 0; i < tries; i++) { 575 if (ret < 0)
532 ret = shim->read_bksv(intel_dig_port, bksv.shim); 576 return ret;
533 if (ret)
534 return ret;
535 if (intel_hdcp_is_ksv_valid(bksv.shim))
536 break;
537 }
538 if (i == tries) {
539 DRM_ERROR("HDCP failed, Bksv is invalid\n");
540 return -ENODEV;
541 }
542 577
543 I915_WRITE(PORT_HDCP_BKSVLO(port), bksv.reg[0]); 578 I915_WRITE(PORT_HDCP_BKSVLO(port), bksv.reg[0]);
544 I915_WRITE(PORT_HDCP_BKSVHI(port), bksv.reg[1]); 579 I915_WRITE(PORT_HDCP_BKSVHI(port), bksv.reg[1]);
@@ -594,8 +629,8 @@ static int intel_hdcp_auth(struct intel_digital_port *intel_dig_port,
594 } 629 }
595 630
596 if (i == tries) { 631 if (i == tries) {
597 DRM_ERROR("Timed out waiting for Ri prime match (%x)\n", 632 DRM_DEBUG_KMS("Timed out waiting for Ri prime match (%x)\n",
598 I915_READ(PORT_HDCP_STATUS(port))); 633 I915_READ(PORT_HDCP_STATUS(port)));
599 return -ETIMEDOUT; 634 return -ETIMEDOUT;
600 } 635 }
601 636
@@ -618,14 +653,9 @@ static int intel_hdcp_auth(struct intel_digital_port *intel_dig_port,
618 return 0; 653 return 0;
619} 654}
620 655
621static
622struct intel_digital_port *conn_to_dig_port(struct intel_connector *connector)
623{
624 return enc_to_dig_port(&intel_attached_encoder(&connector->base)->base);
625}
626
627static int _intel_hdcp_disable(struct intel_connector *connector) 656static int _intel_hdcp_disable(struct intel_connector *connector)
628{ 657{
658 struct intel_hdcp *hdcp = &connector->hdcp;
629 struct drm_i915_private *dev_priv = connector->base.dev->dev_private; 659 struct drm_i915_private *dev_priv = connector->base.dev->dev_private;
630 struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector); 660 struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
631 enum port port = intel_dig_port->base.port; 661 enum port port = intel_dig_port->base.port;
@@ -641,7 +671,7 @@ static int _intel_hdcp_disable(struct intel_connector *connector)
641 return -ETIMEDOUT; 671 return -ETIMEDOUT;
642 } 672 }
643 673
644 ret = connector->hdcp_shim->toggle_signalling(intel_dig_port, false); 674 ret = hdcp->shim->toggle_signalling(intel_dig_port, false);
645 if (ret) { 675 if (ret) {
646 DRM_ERROR("Failed to disable HDCP signalling\n"); 676 DRM_ERROR("Failed to disable HDCP signalling\n");
647 return ret; 677 return ret;
@@ -653,6 +683,7 @@ static int _intel_hdcp_disable(struct intel_connector *connector)
653 683
654static int _intel_hdcp_enable(struct intel_connector *connector) 684static int _intel_hdcp_enable(struct intel_connector *connector)
655{ 685{
686 struct intel_hdcp *hdcp = &connector->hdcp;
656 struct drm_i915_private *dev_priv = connector->base.dev->dev_private; 687 struct drm_i915_private *dev_priv = connector->base.dev->dev_private;
657 int i, ret, tries = 3; 688 int i, ret, tries = 3;
658 689
@@ -677,8 +708,7 @@ static int _intel_hdcp_enable(struct intel_connector *connector)
677 708
678 /* Incase of authentication failures, HDCP spec expects reauth. */ 709 /* Incase of authentication failures, HDCP spec expects reauth. */
679 for (i = 0; i < tries; i++) { 710 for (i = 0; i < tries; i++) {
680 ret = intel_hdcp_auth(conn_to_dig_port(connector), 711 ret = intel_hdcp_auth(conn_to_dig_port(connector), hdcp->shim);
681 connector->hdcp_shim);
682 if (!ret) 712 if (!ret)
683 return 0; 713 return 0;
684 714
@@ -688,42 +718,50 @@ static int _intel_hdcp_enable(struct intel_connector *connector)
688 _intel_hdcp_disable(connector); 718 _intel_hdcp_disable(connector);
689 } 719 }
690 720
691 DRM_ERROR("HDCP authentication failed (%d tries/%d)\n", tries, ret); 721 DRM_DEBUG_KMS("HDCP authentication failed (%d tries/%d)\n", tries, ret);
692 return ret; 722 return ret;
693} 723}
694 724
725static inline
726struct intel_connector *intel_hdcp_to_connector(struct intel_hdcp *hdcp)
727{
728 return container_of(hdcp, struct intel_connector, hdcp);
729}
730
695static void intel_hdcp_check_work(struct work_struct *work) 731static void intel_hdcp_check_work(struct work_struct *work)
696{ 732{
697 struct intel_connector *connector = container_of(to_delayed_work(work), 733 struct intel_hdcp *hdcp = container_of(to_delayed_work(work),
698 struct intel_connector, 734 struct intel_hdcp,
699 hdcp_check_work); 735 check_work);
736 struct intel_connector *connector = intel_hdcp_to_connector(hdcp);
737
700 if (!intel_hdcp_check_link(connector)) 738 if (!intel_hdcp_check_link(connector))
701 schedule_delayed_work(&connector->hdcp_check_work, 739 schedule_delayed_work(&hdcp->check_work,
702 DRM_HDCP_CHECK_PERIOD_MS); 740 DRM_HDCP_CHECK_PERIOD_MS);
703} 741}
704 742
705static void intel_hdcp_prop_work(struct work_struct *work) 743static void intel_hdcp_prop_work(struct work_struct *work)
706{ 744{
707 struct intel_connector *connector = container_of(work, 745 struct intel_hdcp *hdcp = container_of(work, struct intel_hdcp,
708 struct intel_connector, 746 prop_work);
709 hdcp_prop_work); 747 struct intel_connector *connector = intel_hdcp_to_connector(hdcp);
710 struct drm_device *dev = connector->base.dev; 748 struct drm_device *dev = connector->base.dev;
711 struct drm_connector_state *state; 749 struct drm_connector_state *state;
712 750
713 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); 751 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
714 mutex_lock(&connector->hdcp_mutex); 752 mutex_lock(&hdcp->mutex);
715 753
716 /* 754 /*
717 * This worker is only used to flip between ENABLED/DESIRED. Either of 755 * This worker is only used to flip between ENABLED/DESIRED. Either of
718 * those to UNDESIRED is handled by core. If hdcp_value == UNDESIRED, 756 * those to UNDESIRED is handled by core. If value == UNDESIRED,
719 * we're running just after hdcp has been disabled, so just exit 757 * we're running just after hdcp has been disabled, so just exit
720 */ 758 */
721 if (connector->hdcp_value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) { 759 if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
722 state = connector->base.state; 760 state = connector->base.state;
723 state->content_protection = connector->hdcp_value; 761 state->content_protection = hdcp->value;
724 } 762 }
725 763
726 mutex_unlock(&connector->hdcp_mutex); 764 mutex_unlock(&hdcp->mutex);
727 drm_modeset_unlock(&dev->mode_config.connection_mutex); 765 drm_modeset_unlock(&dev->mode_config.connection_mutex);
728} 766}
729 767
@@ -735,8 +773,9 @@ bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port)
735} 773}
736 774
737int intel_hdcp_init(struct intel_connector *connector, 775int intel_hdcp_init(struct intel_connector *connector,
738 const struct intel_hdcp_shim *hdcp_shim) 776 const struct intel_hdcp_shim *shim)
739{ 777{
778 struct intel_hdcp *hdcp = &connector->hdcp;
740 int ret; 779 int ret;
741 780
742 ret = drm_connector_attach_content_protection_property( 781 ret = drm_connector_attach_content_protection_property(
@@ -744,51 +783,53 @@ int intel_hdcp_init(struct intel_connector *connector,
744 if (ret) 783 if (ret)
745 return ret; 784 return ret;
746 785
747 connector->hdcp_shim = hdcp_shim; 786 hdcp->shim = shim;
748 mutex_init(&connector->hdcp_mutex); 787 mutex_init(&hdcp->mutex);
749 INIT_DELAYED_WORK(&connector->hdcp_check_work, intel_hdcp_check_work); 788 INIT_DELAYED_WORK(&hdcp->check_work, intel_hdcp_check_work);
750 INIT_WORK(&connector->hdcp_prop_work, intel_hdcp_prop_work); 789 INIT_WORK(&hdcp->prop_work, intel_hdcp_prop_work);
751 return 0; 790 return 0;
752} 791}
753 792
754int intel_hdcp_enable(struct intel_connector *connector) 793int intel_hdcp_enable(struct intel_connector *connector)
755{ 794{
795 struct intel_hdcp *hdcp = &connector->hdcp;
756 int ret; 796 int ret;
757 797
758 if (!connector->hdcp_shim) 798 if (!hdcp->shim)
759 return -ENOENT; 799 return -ENOENT;
760 800
761 mutex_lock(&connector->hdcp_mutex); 801 mutex_lock(&hdcp->mutex);
762 802
763 ret = _intel_hdcp_enable(connector); 803 ret = _intel_hdcp_enable(connector);
764 if (ret) 804 if (ret)
765 goto out; 805 goto out;
766 806
767 connector->hdcp_value = DRM_MODE_CONTENT_PROTECTION_ENABLED; 807 hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
768 schedule_work(&connector->hdcp_prop_work); 808 schedule_work(&hdcp->prop_work);
769 schedule_delayed_work(&connector->hdcp_check_work, 809 schedule_delayed_work(&hdcp->check_work,
770 DRM_HDCP_CHECK_PERIOD_MS); 810 DRM_HDCP_CHECK_PERIOD_MS);
771out: 811out:
772 mutex_unlock(&connector->hdcp_mutex); 812 mutex_unlock(&hdcp->mutex);
773 return ret; 813 return ret;
774} 814}
775 815
776int intel_hdcp_disable(struct intel_connector *connector) 816int intel_hdcp_disable(struct intel_connector *connector)
777{ 817{
818 struct intel_hdcp *hdcp = &connector->hdcp;
778 int ret = 0; 819 int ret = 0;
779 820
780 if (!connector->hdcp_shim) 821 if (!hdcp->shim)
781 return -ENOENT; 822 return -ENOENT;
782 823
783 mutex_lock(&connector->hdcp_mutex); 824 mutex_lock(&hdcp->mutex);
784 825
785 if (connector->hdcp_value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) { 826 if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
786 connector->hdcp_value = DRM_MODE_CONTENT_PROTECTION_UNDESIRED; 827 hdcp->value = DRM_MODE_CONTENT_PROTECTION_UNDESIRED;
787 ret = _intel_hdcp_disable(connector); 828 ret = _intel_hdcp_disable(connector);
788 } 829 }
789 830
790 mutex_unlock(&connector->hdcp_mutex); 831 mutex_unlock(&hdcp->mutex);
791 cancel_delayed_work_sync(&connector->hdcp_check_work); 832 cancel_delayed_work_sync(&hdcp->check_work);
792 return ret; 833 return ret;
793} 834}
794 835
@@ -828,17 +869,18 @@ void intel_hdcp_atomic_check(struct drm_connector *connector,
828/* Implements Part 3 of the HDCP authorization procedure */ 869/* Implements Part 3 of the HDCP authorization procedure */
829int intel_hdcp_check_link(struct intel_connector *connector) 870int intel_hdcp_check_link(struct intel_connector *connector)
830{ 871{
872 struct intel_hdcp *hdcp = &connector->hdcp;
831 struct drm_i915_private *dev_priv = connector->base.dev->dev_private; 873 struct drm_i915_private *dev_priv = connector->base.dev->dev_private;
832 struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector); 874 struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
833 enum port port = intel_dig_port->base.port; 875 enum port port = intel_dig_port->base.port;
834 int ret = 0; 876 int ret = 0;
835 877
836 if (!connector->hdcp_shim) 878 if (!hdcp->shim)
837 return -ENOENT; 879 return -ENOENT;
838 880
839 mutex_lock(&connector->hdcp_mutex); 881 mutex_lock(&hdcp->mutex);
840 882
841 if (connector->hdcp_value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED) 883 if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
842 goto out; 884 goto out;
843 885
844 if (!(I915_READ(PORT_HDCP_STATUS(port)) & HDCP_STATUS_ENC)) { 886 if (!(I915_READ(PORT_HDCP_STATUS(port)) & HDCP_STATUS_ENC)) {
@@ -846,17 +888,15 @@ int intel_hdcp_check_link(struct intel_connector *connector)
846 connector->base.name, connector->base.base.id, 888 connector->base.name, connector->base.base.id,
847 I915_READ(PORT_HDCP_STATUS(port))); 889 I915_READ(PORT_HDCP_STATUS(port)));
848 ret = -ENXIO; 890 ret = -ENXIO;
849 connector->hdcp_value = DRM_MODE_CONTENT_PROTECTION_DESIRED; 891 hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
850 schedule_work(&connector->hdcp_prop_work); 892 schedule_work(&hdcp->prop_work);
851 goto out; 893 goto out;
852 } 894 }
853 895
854 if (connector->hdcp_shim->check_link(intel_dig_port)) { 896 if (hdcp->shim->check_link(intel_dig_port)) {
855 if (connector->hdcp_value != 897 if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
856 DRM_MODE_CONTENT_PROTECTION_UNDESIRED) { 898 hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
857 connector->hdcp_value = 899 schedule_work(&hdcp->prop_work);
858 DRM_MODE_CONTENT_PROTECTION_ENABLED;
859 schedule_work(&connector->hdcp_prop_work);
860 } 900 }
861 goto out; 901 goto out;
862 } 902 }
@@ -867,20 +907,20 @@ int intel_hdcp_check_link(struct intel_connector *connector)
867 ret = _intel_hdcp_disable(connector); 907 ret = _intel_hdcp_disable(connector);
868 if (ret) { 908 if (ret) {
869 DRM_ERROR("Failed to disable hdcp (%d)\n", ret); 909 DRM_ERROR("Failed to disable hdcp (%d)\n", ret);
870 connector->hdcp_value = DRM_MODE_CONTENT_PROTECTION_DESIRED; 910 hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
871 schedule_work(&connector->hdcp_prop_work); 911 schedule_work(&hdcp->prop_work);
872 goto out; 912 goto out;
873 } 913 }
874 914
875 ret = _intel_hdcp_enable(connector); 915 ret = _intel_hdcp_enable(connector);
876 if (ret) { 916 if (ret) {
877 DRM_ERROR("Failed to enable hdcp (%d)\n", ret); 917 DRM_DEBUG_KMS("Failed to enable hdcp (%d)\n", ret);
878 connector->hdcp_value = DRM_MODE_CONTENT_PROTECTION_DESIRED; 918 hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
879 schedule_work(&connector->hdcp_prop_work); 919 schedule_work(&hdcp->prop_work);
880 goto out; 920 goto out;
881 } 921 }
882 922
883out: 923out:
884 mutex_unlock(&connector->hdcp_mutex); 924 mutex_unlock(&hdcp->mutex);
885 return ret; 925 return ret;
886} 926}
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index d7234e03fdb0..e2c6a2b3e8f2 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -148,14 +148,13 @@ hsw_dip_data_reg(struct drm_i915_private *dev_priv,
148 } 148 }
149} 149}
150 150
151static void g4x_write_infoframe(struct drm_encoder *encoder, 151static void g4x_write_infoframe(struct intel_encoder *encoder,
152 const struct intel_crtc_state *crtc_state, 152 const struct intel_crtc_state *crtc_state,
153 unsigned int type, 153 unsigned int type,
154 const void *frame, ssize_t len) 154 const void *frame, ssize_t len)
155{ 155{
156 const u32 *data = frame; 156 const u32 *data = frame;
157 struct drm_device *dev = encoder->dev; 157 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
158 struct drm_i915_private *dev_priv = to_i915(dev);
159 u32 val = I915_READ(VIDEO_DIP_CTL); 158 u32 val = I915_READ(VIDEO_DIP_CTL);
160 int i; 159 int i;
161 160
@@ -186,31 +185,29 @@ static void g4x_write_infoframe(struct drm_encoder *encoder,
186 POSTING_READ(VIDEO_DIP_CTL); 185 POSTING_READ(VIDEO_DIP_CTL);
187} 186}
188 187
189static bool g4x_infoframe_enabled(struct drm_encoder *encoder, 188static bool g4x_infoframe_enabled(struct intel_encoder *encoder,
190 const struct intel_crtc_state *pipe_config) 189 const struct intel_crtc_state *pipe_config)
191{ 190{
192 struct drm_i915_private *dev_priv = to_i915(encoder->dev); 191 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
193 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
194 u32 val = I915_READ(VIDEO_DIP_CTL); 192 u32 val = I915_READ(VIDEO_DIP_CTL);
195 193
196 if ((val & VIDEO_DIP_ENABLE) == 0) 194 if ((val & VIDEO_DIP_ENABLE) == 0)
197 return false; 195 return false;
198 196
199 if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(intel_dig_port->base.port)) 197 if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(encoder->port))
200 return false; 198 return false;
201 199
202 return val & (VIDEO_DIP_ENABLE_AVI | 200 return val & (VIDEO_DIP_ENABLE_AVI |
203 VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_SPD); 201 VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_SPD);
204} 202}
205 203
206static void ibx_write_infoframe(struct drm_encoder *encoder, 204static void ibx_write_infoframe(struct intel_encoder *encoder,
207 const struct intel_crtc_state *crtc_state, 205 const struct intel_crtc_state *crtc_state,
208 unsigned int type, 206 unsigned int type,
209 const void *frame, ssize_t len) 207 const void *frame, ssize_t len)
210{ 208{
211 const u32 *data = frame; 209 const u32 *data = frame;
212 struct drm_device *dev = encoder->dev; 210 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
213 struct drm_i915_private *dev_priv = to_i915(dev);
214 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); 211 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
215 i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe); 212 i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
216 u32 val = I915_READ(reg); 213 u32 val = I915_READ(reg);
@@ -243,11 +240,10 @@ static void ibx_write_infoframe(struct drm_encoder *encoder,
243 POSTING_READ(reg); 240 POSTING_READ(reg);
244} 241}
245 242
246static bool ibx_infoframe_enabled(struct drm_encoder *encoder, 243static bool ibx_infoframe_enabled(struct intel_encoder *encoder,
247 const struct intel_crtc_state *pipe_config) 244 const struct intel_crtc_state *pipe_config)
248{ 245{
249 struct drm_i915_private *dev_priv = to_i915(encoder->dev); 246 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
250 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
251 enum pipe pipe = to_intel_crtc(pipe_config->base.crtc)->pipe; 247 enum pipe pipe = to_intel_crtc(pipe_config->base.crtc)->pipe;
252 i915_reg_t reg = TVIDEO_DIP_CTL(pipe); 248 i915_reg_t reg = TVIDEO_DIP_CTL(pipe);
253 u32 val = I915_READ(reg); 249 u32 val = I915_READ(reg);
@@ -255,7 +251,7 @@ static bool ibx_infoframe_enabled(struct drm_encoder *encoder,
255 if ((val & VIDEO_DIP_ENABLE) == 0) 251 if ((val & VIDEO_DIP_ENABLE) == 0)
256 return false; 252 return false;
257 253
258 if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(intel_dig_port->base.port)) 254 if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(encoder->port))
259 return false; 255 return false;
260 256
261 return val & (VIDEO_DIP_ENABLE_AVI | 257 return val & (VIDEO_DIP_ENABLE_AVI |
@@ -263,14 +259,13 @@ static bool ibx_infoframe_enabled(struct drm_encoder *encoder,
263 VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP); 259 VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
264} 260}
265 261
266static void cpt_write_infoframe(struct drm_encoder *encoder, 262static void cpt_write_infoframe(struct intel_encoder *encoder,
267 const struct intel_crtc_state *crtc_state, 263 const struct intel_crtc_state *crtc_state,
268 unsigned int type, 264 unsigned int type,
269 const void *frame, ssize_t len) 265 const void *frame, ssize_t len)
270{ 266{
271 const u32 *data = frame; 267 const u32 *data = frame;
272 struct drm_device *dev = encoder->dev; 268 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
273 struct drm_i915_private *dev_priv = to_i915(dev);
274 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); 269 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
275 i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe); 270 i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
276 u32 val = I915_READ(reg); 271 u32 val = I915_READ(reg);
@@ -306,10 +301,10 @@ static void cpt_write_infoframe(struct drm_encoder *encoder,
306 POSTING_READ(reg); 301 POSTING_READ(reg);
307} 302}
308 303
309static bool cpt_infoframe_enabled(struct drm_encoder *encoder, 304static bool cpt_infoframe_enabled(struct intel_encoder *encoder,
310 const struct intel_crtc_state *pipe_config) 305 const struct intel_crtc_state *pipe_config)
311{ 306{
312 struct drm_i915_private *dev_priv = to_i915(encoder->dev); 307 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
313 enum pipe pipe = to_intel_crtc(pipe_config->base.crtc)->pipe; 308 enum pipe pipe = to_intel_crtc(pipe_config->base.crtc)->pipe;
314 u32 val = I915_READ(TVIDEO_DIP_CTL(pipe)); 309 u32 val = I915_READ(TVIDEO_DIP_CTL(pipe));
315 310
@@ -321,14 +316,13 @@ static bool cpt_infoframe_enabled(struct drm_encoder *encoder,
321 VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP); 316 VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
322} 317}
323 318
324static void vlv_write_infoframe(struct drm_encoder *encoder, 319static void vlv_write_infoframe(struct intel_encoder *encoder,
325 const struct intel_crtc_state *crtc_state, 320 const struct intel_crtc_state *crtc_state,
326 unsigned int type, 321 unsigned int type,
327 const void *frame, ssize_t len) 322 const void *frame, ssize_t len)
328{ 323{
329 const u32 *data = frame; 324 const u32 *data = frame;
330 struct drm_device *dev = encoder->dev; 325 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
331 struct drm_i915_private *dev_priv = to_i915(dev);
332 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); 326 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
333 i915_reg_t reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe); 327 i915_reg_t reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
334 u32 val = I915_READ(reg); 328 u32 val = I915_READ(reg);
@@ -361,18 +355,17 @@ static void vlv_write_infoframe(struct drm_encoder *encoder,
361 POSTING_READ(reg); 355 POSTING_READ(reg);
362} 356}
363 357
364static bool vlv_infoframe_enabled(struct drm_encoder *encoder, 358static bool vlv_infoframe_enabled(struct intel_encoder *encoder,
365 const struct intel_crtc_state *pipe_config) 359 const struct intel_crtc_state *pipe_config)
366{ 360{
367 struct drm_i915_private *dev_priv = to_i915(encoder->dev); 361 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
368 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
369 enum pipe pipe = to_intel_crtc(pipe_config->base.crtc)->pipe; 362 enum pipe pipe = to_intel_crtc(pipe_config->base.crtc)->pipe;
370 u32 val = I915_READ(VLV_TVIDEO_DIP_CTL(pipe)); 363 u32 val = I915_READ(VLV_TVIDEO_DIP_CTL(pipe));
371 364
372 if ((val & VIDEO_DIP_ENABLE) == 0) 365 if ((val & VIDEO_DIP_ENABLE) == 0)
373 return false; 366 return false;
374 367
375 if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(intel_dig_port->base.port)) 368 if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(encoder->port))
376 return false; 369 return false;
377 370
378 return val & (VIDEO_DIP_ENABLE_AVI | 371 return val & (VIDEO_DIP_ENABLE_AVI |
@@ -380,14 +373,13 @@ static bool vlv_infoframe_enabled(struct drm_encoder *encoder,
380 VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP); 373 VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
381} 374}
382 375
383static void hsw_write_infoframe(struct drm_encoder *encoder, 376static void hsw_write_infoframe(struct intel_encoder *encoder,
384 const struct intel_crtc_state *crtc_state, 377 const struct intel_crtc_state *crtc_state,
385 unsigned int type, 378 unsigned int type,
386 const void *frame, ssize_t len) 379 const void *frame, ssize_t len)
387{ 380{
388 const u32 *data = frame; 381 const u32 *data = frame;
389 struct drm_device *dev = encoder->dev; 382 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
390 struct drm_i915_private *dev_priv = to_i915(dev);
391 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 383 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
392 i915_reg_t ctl_reg = HSW_TVIDEO_DIP_CTL(cpu_transcoder); 384 i915_reg_t ctl_reg = HSW_TVIDEO_DIP_CTL(cpu_transcoder);
393 int data_size = type == DP_SDP_VSC ? 385 int data_size = type == DP_SDP_VSC ?
@@ -415,10 +407,10 @@ static void hsw_write_infoframe(struct drm_encoder *encoder,
415 POSTING_READ(ctl_reg); 407 POSTING_READ(ctl_reg);
416} 408}
417 409
418static bool hsw_infoframe_enabled(struct drm_encoder *encoder, 410static bool hsw_infoframe_enabled(struct intel_encoder *encoder,
419 const struct intel_crtc_state *pipe_config) 411 const struct intel_crtc_state *pipe_config)
420{ 412{
421 struct drm_i915_private *dev_priv = to_i915(encoder->dev); 413 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
422 u32 val = I915_READ(HSW_TVIDEO_DIP_CTL(pipe_config->cpu_transcoder)); 414 u32 val = I915_READ(HSW_TVIDEO_DIP_CTL(pipe_config->cpu_transcoder));
423 415
424 return val & (VIDEO_DIP_ENABLE_VSC_HSW | VIDEO_DIP_ENABLE_AVI_HSW | 416 return val & (VIDEO_DIP_ENABLE_VSC_HSW | VIDEO_DIP_ENABLE_AVI_HSW |
@@ -443,11 +435,11 @@ static bool hsw_infoframe_enabled(struct drm_encoder *encoder,
443 * trick them by giving an offset into the buffer and moving back the header 435 * trick them by giving an offset into the buffer and moving back the header
444 * bytes by one. 436 * bytes by one.
445 */ 437 */
446static void intel_write_infoframe(struct drm_encoder *encoder, 438static void intel_write_infoframe(struct intel_encoder *encoder,
447 const struct intel_crtc_state *crtc_state, 439 const struct intel_crtc_state *crtc_state,
448 union hdmi_infoframe *frame) 440 union hdmi_infoframe *frame)
449{ 441{
450 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); 442 struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base);
451 u8 buffer[VIDEO_DIP_DATA_SIZE]; 443 u8 buffer[VIDEO_DIP_DATA_SIZE];
452 ssize_t len; 444 ssize_t len;
453 445
@@ -457,20 +449,20 @@ static void intel_write_infoframe(struct drm_encoder *encoder,
457 return; 449 return;
458 450
459 /* Insert the 'hole' (see big comment above) at position 3 */ 451 /* Insert the 'hole' (see big comment above) at position 3 */
460 buffer[0] = buffer[1]; 452 memmove(&buffer[0], &buffer[1], 3);
461 buffer[1] = buffer[2];
462 buffer[2] = buffer[3];
463 buffer[3] = 0; 453 buffer[3] = 0;
464 len++; 454 len++;
465 455
466 intel_dig_port->write_infoframe(encoder, crtc_state, frame->any.type, buffer, len); 456 intel_dig_port->write_infoframe(encoder,
457 crtc_state,
458 frame->any.type, buffer, len);
467} 459}
468 460
469static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder, 461static void intel_hdmi_set_avi_infoframe(struct intel_encoder *encoder,
470 const struct intel_crtc_state *crtc_state, 462 const struct intel_crtc_state *crtc_state,
471 const struct drm_connector_state *conn_state) 463 const struct drm_connector_state *conn_state)
472{ 464{
473 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); 465 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
474 const struct drm_display_mode *adjusted_mode = 466 const struct drm_display_mode *adjusted_mode =
475 &crtc_state->base.adjusted_mode; 467 &crtc_state->base.adjusted_mode;
476 struct drm_connector *connector = &intel_hdmi->attached_connector->base; 468 struct drm_connector *connector = &intel_hdmi->attached_connector->base;
@@ -487,8 +479,10 @@ static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
487 return; 479 return;
488 } 480 }
489 481
490 if (crtc_state->ycbcr420) 482 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
491 frame.avi.colorspace = HDMI_COLORSPACE_YUV420; 483 frame.avi.colorspace = HDMI_COLORSPACE_YUV420;
484 else if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444)
485 frame.avi.colorspace = HDMI_COLORSPACE_YUV444;
492 else 486 else
493 frame.avi.colorspace = HDMI_COLORSPACE_RGB; 487 frame.avi.colorspace = HDMI_COLORSPACE_RGB;
494 488
@@ -503,10 +497,11 @@ static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
503 conn_state); 497 conn_state);
504 498
505 /* TODO: handle pixel repetition for YCBCR420 outputs */ 499 /* TODO: handle pixel repetition for YCBCR420 outputs */
506 intel_write_infoframe(encoder, crtc_state, &frame); 500 intel_write_infoframe(encoder, crtc_state,
501 &frame);
507} 502}
508 503
509static void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder, 504static void intel_hdmi_set_spd_infoframe(struct intel_encoder *encoder,
510 const struct intel_crtc_state *crtc_state) 505 const struct intel_crtc_state *crtc_state)
511{ 506{
512 union hdmi_infoframe frame; 507 union hdmi_infoframe frame;
@@ -520,11 +515,12 @@ static void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder,
520 515
521 frame.spd.sdi = HDMI_SPD_SDI_PC; 516 frame.spd.sdi = HDMI_SPD_SDI_PC;
522 517
523 intel_write_infoframe(encoder, crtc_state, &frame); 518 intel_write_infoframe(encoder, crtc_state,
519 &frame);
524} 520}
525 521
526static void 522static void
527intel_hdmi_set_hdmi_infoframe(struct drm_encoder *encoder, 523intel_hdmi_set_hdmi_infoframe(struct intel_encoder *encoder,
528 const struct intel_crtc_state *crtc_state, 524 const struct intel_crtc_state *crtc_state,
529 const struct drm_connector_state *conn_state) 525 const struct drm_connector_state *conn_state)
530{ 526{
@@ -537,20 +533,21 @@ intel_hdmi_set_hdmi_infoframe(struct drm_encoder *encoder,
537 if (ret < 0) 533 if (ret < 0)
538 return; 534 return;
539 535
540 intel_write_infoframe(encoder, crtc_state, &frame); 536 intel_write_infoframe(encoder, crtc_state,
537 &frame);
541} 538}
542 539
543static void g4x_set_infoframes(struct drm_encoder *encoder, 540static void g4x_set_infoframes(struct intel_encoder *encoder,
544 bool enable, 541 bool enable,
545 const struct intel_crtc_state *crtc_state, 542 const struct intel_crtc_state *crtc_state,
546 const struct drm_connector_state *conn_state) 543 const struct drm_connector_state *conn_state)
547{ 544{
548 struct drm_i915_private *dev_priv = to_i915(encoder->dev); 545 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
549 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); 546 struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base);
550 struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi; 547 struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
551 i915_reg_t reg = VIDEO_DIP_CTL; 548 i915_reg_t reg = VIDEO_DIP_CTL;
552 u32 val = I915_READ(reg); 549 u32 val = I915_READ(reg);
553 u32 port = VIDEO_DIP_PORT(intel_dig_port->base.port); 550 u32 port = VIDEO_DIP_PORT(encoder->port);
554 551
555 assert_hdmi_port_disabled(intel_hdmi); 552 assert_hdmi_port_disabled(intel_hdmi);
556 553
@@ -658,11 +655,11 @@ static bool gcp_default_phase_possible(int pipe_bpp,
658 mode->crtc_htotal/2 % pixels_per_group == 0); 655 mode->crtc_htotal/2 % pixels_per_group == 0);
659} 656}
660 657
661static bool intel_hdmi_set_gcp_infoframe(struct drm_encoder *encoder, 658static bool intel_hdmi_set_gcp_infoframe(struct intel_encoder *encoder,
662 const struct intel_crtc_state *crtc_state, 659 const struct intel_crtc_state *crtc_state,
663 const struct drm_connector_state *conn_state) 660 const struct drm_connector_state *conn_state)
664{ 661{
665 struct drm_i915_private *dev_priv = to_i915(encoder->dev); 662 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
666 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 663 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
667 i915_reg_t reg; 664 i915_reg_t reg;
668 u32 val = 0; 665 u32 val = 0;
@@ -690,18 +687,18 @@ static bool intel_hdmi_set_gcp_infoframe(struct drm_encoder *encoder,
690 return val != 0; 687 return val != 0;
691} 688}
692 689
693static void ibx_set_infoframes(struct drm_encoder *encoder, 690static void ibx_set_infoframes(struct intel_encoder *encoder,
694 bool enable, 691 bool enable,
695 const struct intel_crtc_state *crtc_state, 692 const struct intel_crtc_state *crtc_state,
696 const struct drm_connector_state *conn_state) 693 const struct drm_connector_state *conn_state)
697{ 694{
698 struct drm_i915_private *dev_priv = to_i915(encoder->dev); 695 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
699 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); 696 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
700 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); 697 struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base);
701 struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi; 698 struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
702 i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe); 699 i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
703 u32 val = I915_READ(reg); 700 u32 val = I915_READ(reg);
704 u32 port = VIDEO_DIP_PORT(intel_dig_port->base.port); 701 u32 port = VIDEO_DIP_PORT(encoder->port);
705 702
706 assert_hdmi_port_disabled(intel_hdmi); 703 assert_hdmi_port_disabled(intel_hdmi);
707 704
@@ -743,14 +740,14 @@ static void ibx_set_infoframes(struct drm_encoder *encoder,
743 intel_hdmi_set_hdmi_infoframe(encoder, crtc_state, conn_state); 740 intel_hdmi_set_hdmi_infoframe(encoder, crtc_state, conn_state);
744} 741}
745 742
746static void cpt_set_infoframes(struct drm_encoder *encoder, 743static void cpt_set_infoframes(struct intel_encoder *encoder,
747 bool enable, 744 bool enable,
748 const struct intel_crtc_state *crtc_state, 745 const struct intel_crtc_state *crtc_state,
749 const struct drm_connector_state *conn_state) 746 const struct drm_connector_state *conn_state)
750{ 747{
751 struct drm_i915_private *dev_priv = to_i915(encoder->dev); 748 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
752 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); 749 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
753 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); 750 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
754 i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe); 751 i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
755 u32 val = I915_READ(reg); 752 u32 val = I915_READ(reg);
756 753
@@ -786,18 +783,17 @@ static void cpt_set_infoframes(struct drm_encoder *encoder,
786 intel_hdmi_set_hdmi_infoframe(encoder, crtc_state, conn_state); 783 intel_hdmi_set_hdmi_infoframe(encoder, crtc_state, conn_state);
787} 784}
788 785
789static void vlv_set_infoframes(struct drm_encoder *encoder, 786static void vlv_set_infoframes(struct intel_encoder *encoder,
790 bool enable, 787 bool enable,
791 const struct intel_crtc_state *crtc_state, 788 const struct intel_crtc_state *crtc_state,
792 const struct drm_connector_state *conn_state) 789 const struct drm_connector_state *conn_state)
793{ 790{
794 struct drm_i915_private *dev_priv = to_i915(encoder->dev); 791 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
795 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
796 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); 792 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
797 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); 793 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
798 i915_reg_t reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe); 794 i915_reg_t reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
799 u32 val = I915_READ(reg); 795 u32 val = I915_READ(reg);
800 u32 port = VIDEO_DIP_PORT(intel_dig_port->base.port); 796 u32 port = VIDEO_DIP_PORT(encoder->port);
801 797
802 assert_hdmi_port_disabled(intel_hdmi); 798 assert_hdmi_port_disabled(intel_hdmi);
803 799
@@ -839,12 +835,12 @@ static void vlv_set_infoframes(struct drm_encoder *encoder,
839 intel_hdmi_set_hdmi_infoframe(encoder, crtc_state, conn_state); 835 intel_hdmi_set_hdmi_infoframe(encoder, crtc_state, conn_state);
840} 836}
841 837
842static void hsw_set_infoframes(struct drm_encoder *encoder, 838static void hsw_set_infoframes(struct intel_encoder *encoder,
843 bool enable, 839 bool enable,
844 const struct intel_crtc_state *crtc_state, 840 const struct intel_crtc_state *crtc_state,
845 const struct drm_connector_state *conn_state) 841 const struct drm_connector_state *conn_state)
846{ 842{
847 struct drm_i915_private *dev_priv = to_i915(encoder->dev); 843 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
848 i915_reg_t reg = HSW_TVIDEO_DIP_CTL(crtc_state->cpu_transcoder); 844 i915_reg_t reg = HSW_TVIDEO_DIP_CTL(crtc_state->cpu_transcoder);
849 u32 val = I915_READ(reg); 845 u32 val = I915_READ(reg);
850 846
@@ -966,13 +962,13 @@ int intel_hdmi_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port,
966 ret = intel_hdmi_hdcp_write(intel_dig_port, DRM_HDCP_DDC_AN, an, 962 ret = intel_hdmi_hdcp_write(intel_dig_port, DRM_HDCP_DDC_AN, an,
967 DRM_HDCP_AN_LEN); 963 DRM_HDCP_AN_LEN);
968 if (ret) { 964 if (ret) {
969 DRM_ERROR("Write An over DDC failed (%d)\n", ret); 965 DRM_DEBUG_KMS("Write An over DDC failed (%d)\n", ret);
970 return ret; 966 return ret;
971 } 967 }
972 968
973 ret = intel_gmbus_output_aksv(adapter); 969 ret = intel_gmbus_output_aksv(adapter);
974 if (ret < 0) { 970 if (ret < 0) {
975 DRM_ERROR("Failed to output aksv (%d)\n", ret); 971 DRM_DEBUG_KMS("Failed to output aksv (%d)\n", ret);
976 return ret; 972 return ret;
977 } 973 }
978 return 0; 974 return 0;
@@ -985,7 +981,7 @@ static int intel_hdmi_hdcp_read_bksv(struct intel_digital_port *intel_dig_port,
985 ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_BKSV, bksv, 981 ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_BKSV, bksv,
986 DRM_HDCP_KSV_LEN); 982 DRM_HDCP_KSV_LEN);
987 if (ret) 983 if (ret)
988 DRM_ERROR("Read Bksv over DDC failed (%d)\n", ret); 984 DRM_DEBUG_KMS("Read Bksv over DDC failed (%d)\n", ret);
989 return ret; 985 return ret;
990} 986}
991 987
@@ -997,7 +993,7 @@ int intel_hdmi_hdcp_read_bstatus(struct intel_digital_port *intel_dig_port,
997 ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_BSTATUS, 993 ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_BSTATUS,
998 bstatus, DRM_HDCP_BSTATUS_LEN); 994 bstatus, DRM_HDCP_BSTATUS_LEN);
999 if (ret) 995 if (ret)
1000 DRM_ERROR("Read bstatus over DDC failed (%d)\n", ret); 996 DRM_DEBUG_KMS("Read bstatus over DDC failed (%d)\n", ret);
1001 return ret; 997 return ret;
1002} 998}
1003 999
@@ -1010,7 +1006,7 @@ int intel_hdmi_hdcp_repeater_present(struct intel_digital_port *intel_dig_port,
1010 1006
1011 ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_BCAPS, &val, 1); 1007 ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_BCAPS, &val, 1);
1012 if (ret) { 1008 if (ret) {
1013 DRM_ERROR("Read bcaps over DDC failed (%d)\n", ret); 1009 DRM_DEBUG_KMS("Read bcaps over DDC failed (%d)\n", ret);
1014 return ret; 1010 return ret;
1015 } 1011 }
1016 *repeater_present = val & DRM_HDCP_DDC_BCAPS_REPEATER_PRESENT; 1012 *repeater_present = val & DRM_HDCP_DDC_BCAPS_REPEATER_PRESENT;
@@ -1025,7 +1021,7 @@ int intel_hdmi_hdcp_read_ri_prime(struct intel_digital_port *intel_dig_port,
1025 ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_RI_PRIME, 1021 ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_RI_PRIME,
1026 ri_prime, DRM_HDCP_RI_LEN); 1022 ri_prime, DRM_HDCP_RI_LEN);
1027 if (ret) 1023 if (ret)
1028 DRM_ERROR("Read Ri' over DDC failed (%d)\n", ret); 1024 DRM_DEBUG_KMS("Read Ri' over DDC failed (%d)\n", ret);
1029 return ret; 1025 return ret;
1030} 1026}
1031 1027
@@ -1038,7 +1034,7 @@ int intel_hdmi_hdcp_read_ksv_ready(struct intel_digital_port *intel_dig_port,
1038 1034
1039 ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_BCAPS, &val, 1); 1035 ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_BCAPS, &val, 1);
1040 if (ret) { 1036 if (ret) {
1041 DRM_ERROR("Read bcaps over DDC failed (%d)\n", ret); 1037 DRM_DEBUG_KMS("Read bcaps over DDC failed (%d)\n", ret);
1042 return ret; 1038 return ret;
1043 } 1039 }
1044 *ksv_ready = val & DRM_HDCP_DDC_BCAPS_KSV_FIFO_READY; 1040 *ksv_ready = val & DRM_HDCP_DDC_BCAPS_KSV_FIFO_READY;
@@ -1053,7 +1049,7 @@ int intel_hdmi_hdcp_read_ksv_fifo(struct intel_digital_port *intel_dig_port,
1053 ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_KSV_FIFO, 1049 ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_KSV_FIFO,
1054 ksv_fifo, num_downstream * DRM_HDCP_KSV_LEN); 1050 ksv_fifo, num_downstream * DRM_HDCP_KSV_LEN);
1055 if (ret) { 1051 if (ret) {
1056 DRM_ERROR("Read ksv fifo over DDC failed (%d)\n", ret); 1052 DRM_DEBUG_KMS("Read ksv fifo over DDC failed (%d)\n", ret);
1057 return ret; 1053 return ret;
1058 } 1054 }
1059 return 0; 1055 return 0;
@@ -1071,7 +1067,7 @@ int intel_hdmi_hdcp_read_v_prime_part(struct intel_digital_port *intel_dig_port,
1071 ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_V_PRIME(i), 1067 ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_V_PRIME(i),
1072 part, DRM_HDCP_V_PRIME_PART_LEN); 1068 part, DRM_HDCP_V_PRIME_PART_LEN);
1073 if (ret) 1069 if (ret)
1074 DRM_ERROR("Read V'[%d] over DDC failed (%d)\n", i, ret); 1070 DRM_DEBUG_KMS("Read V'[%d] over DDC failed (%d)\n", i, ret);
1075 return ret; 1071 return ret;
1076} 1072}
1077 1073
@@ -1218,7 +1214,7 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
1218 if (tmp & HDMI_MODE_SELECT_HDMI) 1214 if (tmp & HDMI_MODE_SELECT_HDMI)
1219 pipe_config->has_hdmi_sink = true; 1215 pipe_config->has_hdmi_sink = true;
1220 1216
1221 if (intel_dig_port->infoframe_enabled(&encoder->base, pipe_config)) 1217 if (intel_dig_port->infoframe_enabled(encoder, pipe_config))
1222 pipe_config->has_infoframe = true; 1218 pipe_config->has_infoframe = true;
1223 1219
1224 if (tmp & SDVO_AUDIO_ENABLE) 1220 if (tmp & SDVO_AUDIO_ENABLE)
@@ -1439,7 +1435,8 @@ static void intel_disable_hdmi(struct intel_encoder *encoder,
1439 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true); 1435 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
1440 } 1436 }
1441 1437
1442 intel_dig_port->set_infoframes(&encoder->base, false, 1438 intel_dig_port->set_infoframes(encoder,
1439 false,
1443 old_crtc_state, old_conn_state); 1440 old_crtc_state, old_conn_state);
1444 1441
1445 intel_dp_dual_mode_set_tmds_output(intel_hdmi, false); 1442 intel_dp_dual_mode_set_tmds_output(intel_hdmi, false);
@@ -1598,6 +1595,8 @@ static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state,
1598 struct drm_atomic_state *state = crtc_state->base.state; 1595 struct drm_atomic_state *state = crtc_state->base.state;
1599 struct drm_connector_state *connector_state; 1596 struct drm_connector_state *connector_state;
1600 struct drm_connector *connector; 1597 struct drm_connector *connector;
1598 const struct drm_display_mode *adjusted_mode =
1599 &crtc_state->base.adjusted_mode;
1601 int i; 1600 int i;
1602 1601
1603 if (HAS_GMCH_DISPLAY(dev_priv)) 1602 if (HAS_GMCH_DISPLAY(dev_priv))
@@ -1625,7 +1624,7 @@ static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state,
1625 if (connector_state->crtc != crtc_state->base.crtc) 1624 if (connector_state->crtc != crtc_state->base.crtc)
1626 continue; 1625 continue;
1627 1626
1628 if (crtc_state->ycbcr420) { 1627 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) {
1629 const struct drm_hdmi_info *hdmi = &info->hdmi; 1628 const struct drm_hdmi_info *hdmi = &info->hdmi;
1630 1629
1631 if (bpc == 12 && !(hdmi->y420_dc_modes & 1630 if (bpc == 12 && !(hdmi->y420_dc_modes &
@@ -1646,7 +1645,14 @@ static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state,
1646 1645
1647 /* Display WA #1139: glk */ 1646 /* Display WA #1139: glk */
1648 if (bpc == 12 && IS_GLK_REVID(dev_priv, 0, GLK_REVID_A1) && 1647 if (bpc == 12 && IS_GLK_REVID(dev_priv, 0, GLK_REVID_A1) &&
1649 crtc_state->base.adjusted_mode.htotal > 5460) 1648 adjusted_mode->htotal > 5460)
1649 return false;
1650
1651 /* Display Wa_1405510057:icl */
1652 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 &&
1653 bpc == 10 && IS_ICELAKE(dev_priv) &&
1654 (adjusted_mode->crtc_hblank_end -
1655 adjusted_mode->crtc_hblank_start) % 8 == 2)
1650 return false; 1656 return false;
1651 1657
1652 return true; 1658 return true;
@@ -1670,7 +1676,7 @@ intel_hdmi_ycbcr420_config(struct drm_connector *connector,
1670 *clock_12bpc /= 2; 1676 *clock_12bpc /= 2;
1671 *clock_10bpc /= 2; 1677 *clock_10bpc /= 2;
1672 *clock_8bpc /= 2; 1678 *clock_8bpc /= 2;
1673 config->ycbcr420 = true; 1679 config->output_format = INTEL_OUTPUT_FORMAT_YCBCR420;
1674 1680
1675 /* YCBCR 420 output conversion needs a scaler */ 1681 /* YCBCR 420 output conversion needs a scaler */
1676 if (skl_update_scaler_crtc(config)) { 1682 if (skl_update_scaler_crtc(config)) {
@@ -1704,6 +1710,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
1704 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) 1710 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
1705 return false; 1711 return false;
1706 1712
1713 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
1707 pipe_config->has_hdmi_sink = !force_dvi && intel_hdmi->has_hdmi_sink; 1714 pipe_config->has_hdmi_sink = !force_dvi && intel_hdmi->has_hdmi_sink;
1708 1715
1709 if (pipe_config->has_hdmi_sink) 1716 if (pipe_config->has_hdmi_sink)
@@ -1974,7 +1981,7 @@ static void intel_hdmi_pre_enable(struct intel_encoder *encoder,
1974 1981
1975 intel_hdmi_prepare(encoder, pipe_config); 1982 intel_hdmi_prepare(encoder, pipe_config);
1976 1983
1977 intel_dig_port->set_infoframes(&encoder->base, 1984 intel_dig_port->set_infoframes(encoder,
1978 pipe_config->has_infoframe, 1985 pipe_config->has_infoframe,
1979 pipe_config, conn_state); 1986 pipe_config, conn_state);
1980} 1987}
@@ -1992,7 +1999,7 @@ static void vlv_hdmi_pre_enable(struct intel_encoder *encoder,
1992 vlv_set_phy_signal_level(encoder, 0x2b245f5f, 0x00002000, 0x5578b83a, 1999 vlv_set_phy_signal_level(encoder, 0x2b245f5f, 0x00002000, 0x5578b83a,
1993 0x2b247878); 2000 0x2b247878);
1994 2001
1995 dport->set_infoframes(&encoder->base, 2002 dport->set_infoframes(encoder,
1996 pipe_config->has_infoframe, 2003 pipe_config->has_infoframe,
1997 pipe_config, conn_state); 2004 pipe_config, conn_state);
1998 2005
@@ -2063,7 +2070,7 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder,
2063 /* Use 800mV-0dB */ 2070 /* Use 800mV-0dB */
2064 chv_set_phy_signal_level(encoder, 128, 102, false); 2071 chv_set_phy_signal_level(encoder, 128, 102, false);
2065 2072
2066 dport->set_infoframes(&encoder->base, 2073 dport->set_infoframes(encoder,
2067 pipe_config->has_infoframe, 2074 pipe_config->has_infoframe,
2068 pipe_config, conn_state); 2075 pipe_config, conn_state);
2069 2076
@@ -2075,13 +2082,26 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder,
2075 chv_phy_release_cl2_override(encoder); 2082 chv_phy_release_cl2_override(encoder);
2076} 2083}
2077 2084
2085static int
2086intel_hdmi_connector_register(struct drm_connector *connector)
2087{
2088 int ret;
2089
2090 ret = intel_connector_register(connector);
2091 if (ret)
2092 return ret;
2093
2094 i915_debugfs_connector_add(connector);
2095
2096 return ret;
2097}
2098
2078static void intel_hdmi_destroy(struct drm_connector *connector) 2099static void intel_hdmi_destroy(struct drm_connector *connector)
2079{ 2100{
2080 if (intel_attached_hdmi(connector)->cec_notifier) 2101 if (intel_attached_hdmi(connector)->cec_notifier)
2081 cec_notifier_put(intel_attached_hdmi(connector)->cec_notifier); 2102 cec_notifier_put(intel_attached_hdmi(connector)->cec_notifier);
2082 kfree(to_intel_connector(connector)->detect_edid); 2103
2083 drm_connector_cleanup(connector); 2104 intel_connector_destroy(connector);
2084 kfree(connector);
2085} 2105}
2086 2106
2087static const struct drm_connector_funcs intel_hdmi_connector_funcs = { 2107static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
@@ -2090,7 +2110,7 @@ static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
2090 .fill_modes = drm_helper_probe_single_connector_modes, 2110 .fill_modes = drm_helper_probe_single_connector_modes,
2091 .atomic_get_property = intel_digital_connector_atomic_get_property, 2111 .atomic_get_property = intel_digital_connector_atomic_get_property,
2092 .atomic_set_property = intel_digital_connector_atomic_set_property, 2112 .atomic_set_property = intel_digital_connector_atomic_set_property,
2093 .late_register = intel_connector_register, 2113 .late_register = intel_hdmi_connector_register,
2094 .early_unregister = intel_connector_unregister, 2114 .early_unregister = intel_connector_unregister,
2095 .destroy = intel_hdmi_destroy, 2115 .destroy = intel_hdmi_destroy,
2096 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 2116 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
@@ -2110,11 +2130,16 @@ static const struct drm_encoder_funcs intel_hdmi_enc_funcs = {
2110static void 2130static void
2111intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *connector) 2131intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *connector)
2112{ 2132{
2133 struct drm_i915_private *dev_priv = to_i915(connector->dev);
2134
2113 intel_attach_force_audio_property(connector); 2135 intel_attach_force_audio_property(connector);
2114 intel_attach_broadcast_rgb_property(connector); 2136 intel_attach_broadcast_rgb_property(connector);
2115 intel_attach_aspect_ratio_property(connector); 2137 intel_attach_aspect_ratio_property(connector);
2116 drm_connector_attach_content_type_property(connector); 2138 drm_connector_attach_content_type_property(connector);
2117 connector->state->picture_aspect_ratio = HDMI_PICTURE_ASPECT_NONE; 2139 connector->state->picture_aspect_ratio = HDMI_PICTURE_ASPECT_NONE;
2140
2141 if (!HAS_GMCH_DISPLAY(dev_priv))
2142 drm_connector_attach_max_bpc_property(connector, 8, 12);
2118} 2143}
2119 2144
2120/* 2145/*
@@ -2325,9 +2350,18 @@ void intel_infoframe_init(struct intel_digital_port *intel_dig_port)
2325 intel_dig_port->set_infoframes = g4x_set_infoframes; 2350 intel_dig_port->set_infoframes = g4x_set_infoframes;
2326 intel_dig_port->infoframe_enabled = g4x_infoframe_enabled; 2351 intel_dig_port->infoframe_enabled = g4x_infoframe_enabled;
2327 } else if (HAS_DDI(dev_priv)) { 2352 } else if (HAS_DDI(dev_priv)) {
2328 intel_dig_port->write_infoframe = hsw_write_infoframe; 2353 if (intel_dig_port->lspcon.active) {
2329 intel_dig_port->set_infoframes = hsw_set_infoframes; 2354 intel_dig_port->write_infoframe =
2330 intel_dig_port->infoframe_enabled = hsw_infoframe_enabled; 2355 lspcon_write_infoframe;
2356 intel_dig_port->set_infoframes = lspcon_set_infoframes;
2357 intel_dig_port->infoframe_enabled =
2358 lspcon_infoframe_enabled;
2359 } else {
2360 intel_dig_port->set_infoframes = hsw_set_infoframes;
2361 intel_dig_port->infoframe_enabled =
2362 hsw_infoframe_enabled;
2363 intel_dig_port->write_infoframe = hsw_write_infoframe;
2364 }
2331 } else if (HAS_PCH_IBX(dev_priv)) { 2365 } else if (HAS_PCH_IBX(dev_priv)) {
2332 intel_dig_port->write_infoframe = ibx_write_infoframe; 2366 intel_dig_port->write_infoframe = ibx_write_infoframe;
2333 intel_dig_port->set_infoframes = ibx_set_infoframes; 2367 intel_dig_port->set_infoframes = ibx_set_infoframes;
@@ -2486,5 +2520,6 @@ void intel_hdmi_init(struct drm_i915_private *dev_priv,
2486 2520
2487 intel_infoframe_init(intel_dig_port); 2521 intel_infoframe_init(intel_dig_port);
2488 2522
2523 intel_dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port);
2489 intel_hdmi_init_connector(intel_dig_port, intel_connector); 2524 intel_hdmi_init_connector(intel_dig_port, intel_connector);
2490} 2525}
diff --git a/drivers/gpu/drm/i915/intel_hotplug.c b/drivers/gpu/drm/i915/intel_hotplug.c
index 9a8018130237..e24174d08fed 100644
--- a/drivers/gpu/drm/i915/intel_hotplug.c
+++ b/drivers/gpu/drm/i915/intel_hotplug.c
@@ -114,51 +114,68 @@ enum hpd_pin intel_hpd_pin_default(struct drm_i915_private *dev_priv,
114#define HPD_STORM_REENABLE_DELAY (2 * 60 * 1000) 114#define HPD_STORM_REENABLE_DELAY (2 * 60 * 1000)
115 115
116/** 116/**
117 * intel_hpd_irq_storm_detect - gather stats and detect HPD irq storm on a pin 117 * intel_hpd_irq_storm_detect - gather stats and detect HPD IRQ storm on a pin
118 * @dev_priv: private driver data pointer 118 * @dev_priv: private driver data pointer
119 * @pin: the pin to gather stats on 119 * @pin: the pin to gather stats on
120 * @long_hpd: whether the HPD IRQ was long or short
120 * 121 *
121 * Gather stats about HPD irqs from the specified @pin, and detect irq 122 * Gather stats about HPD IRQs from the specified @pin, and detect IRQ
122 * storms. Only the pin specific stats and state are changed, the caller is 123 * storms. Only the pin specific stats and state are changed, the caller is
123 * responsible for further action. 124 * responsible for further action.
124 * 125 *
125 * The number of irqs that are allowed within @HPD_STORM_DETECT_PERIOD is 126 * The number of IRQs that are allowed within @HPD_STORM_DETECT_PERIOD is
126 * stored in @dev_priv->hotplug.hpd_storm_threshold which defaults to 127 * stored in @dev_priv->hotplug.hpd_storm_threshold which defaults to
127 * @HPD_STORM_DEFAULT_THRESHOLD. If this threshold is exceeded, it's 128 * @HPD_STORM_DEFAULT_THRESHOLD. Long IRQs count as +10 to this threshold, and
128 * considered an irq storm and the irq state is set to @HPD_MARK_DISABLED. 129 * short IRQs count as +1. If this threshold is exceeded, it's considered an
130 * IRQ storm and the IRQ state is set to @HPD_MARK_DISABLED.
131 *
132 * By default, most systems will only count long IRQs towards
133 * &dev_priv->hotplug.hpd_storm_threshold. However, some older systems also
134 * suffer from short IRQ storms and must also track these. Because short IRQ
135 * storms are naturally caused by sideband interactions with DP MST devices,
136 * short IRQ detection is only enabled for systems without DP MST support.
137 * Systems which are new enough to support DP MST are far less likely to
138 * suffer from IRQ storms at all, so this is fine.
129 * 139 *
130 * The HPD threshold can be controlled through i915_hpd_storm_ctl in debugfs, 140 * The HPD threshold can be controlled through i915_hpd_storm_ctl in debugfs,
131 * and should only be adjusted for automated hotplug testing. 141 * and should only be adjusted for automated hotplug testing.
132 * 142 *
133 * Return true if an irq storm was detected on @pin. 143 * Return true if an IRQ storm was detected on @pin.
134 */ 144 */
135static bool intel_hpd_irq_storm_detect(struct drm_i915_private *dev_priv, 145static bool intel_hpd_irq_storm_detect(struct drm_i915_private *dev_priv,
136 enum hpd_pin pin) 146 enum hpd_pin pin, bool long_hpd)
137{ 147{
138 unsigned long start = dev_priv->hotplug.stats[pin].last_jiffies; 148 struct i915_hotplug *hpd = &dev_priv->hotplug;
149 unsigned long start = hpd->stats[pin].last_jiffies;
139 unsigned long end = start + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD); 150 unsigned long end = start + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD);
140 const int threshold = dev_priv->hotplug.hpd_storm_threshold; 151 const int increment = long_hpd ? 10 : 1;
152 const int threshold = hpd->hpd_storm_threshold;
141 bool storm = false; 153 bool storm = false;
142 154
155 if (!threshold ||
156 (!long_hpd && !dev_priv->hotplug.hpd_short_storm_enabled))
157 return false;
158
143 if (!time_in_range(jiffies, start, end)) { 159 if (!time_in_range(jiffies, start, end)) {
144 dev_priv->hotplug.stats[pin].last_jiffies = jiffies; 160 hpd->stats[pin].last_jiffies = jiffies;
145 dev_priv->hotplug.stats[pin].count = 0; 161 hpd->stats[pin].count = 0;
146 DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", pin); 162 }
147 } else if (dev_priv->hotplug.stats[pin].count > threshold && 163
148 threshold) { 164 hpd->stats[pin].count += increment;
149 dev_priv->hotplug.stats[pin].state = HPD_MARK_DISABLED; 165 if (hpd->stats[pin].count > threshold) {
166 hpd->stats[pin].state = HPD_MARK_DISABLED;
150 DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", pin); 167 DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", pin);
151 storm = true; 168 storm = true;
152 } else { 169 } else {
153 dev_priv->hotplug.stats[pin].count++;
154 DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", pin, 170 DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", pin,
155 dev_priv->hotplug.stats[pin].count); 171 hpd->stats[pin].count);
156 } 172 }
157 173
158 return storm; 174 return storm;
159} 175}
160 176
161static void intel_hpd_irq_storm_disable(struct drm_i915_private *dev_priv) 177static void
178intel_hpd_irq_storm_switch_to_polling(struct drm_i915_private *dev_priv)
162{ 179{
163 struct drm_device *dev = &dev_priv->drm; 180 struct drm_device *dev = &dev_priv->drm;
164 struct intel_connector *intel_connector; 181 struct intel_connector *intel_connector;
@@ -348,8 +365,8 @@ static void i915_hotplug_work_func(struct work_struct *work)
348 hpd_event_bits = dev_priv->hotplug.event_bits; 365 hpd_event_bits = dev_priv->hotplug.event_bits;
349 dev_priv->hotplug.event_bits = 0; 366 dev_priv->hotplug.event_bits = 0;
350 367
351 /* Disable hotplug on connectors that hit an irq storm. */ 368 /* Enable polling for connectors which had HPD IRQ storms */
352 intel_hpd_irq_storm_disable(dev_priv); 369 intel_hpd_irq_storm_switch_to_polling(dev_priv);
353 370
354 spin_unlock_irq(&dev_priv->irq_lock); 371 spin_unlock_irq(&dev_priv->irq_lock);
355 372
@@ -474,15 +491,17 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
474 queue_hp = true; 491 queue_hp = true;
475 } 492 }
476 493
477 if (!long_hpd) 494 if (intel_hpd_irq_storm_detect(dev_priv, pin, long_hpd)) {
478 continue;
479
480 if (intel_hpd_irq_storm_detect(dev_priv, pin)) {
481 dev_priv->hotplug.event_bits &= ~BIT(pin); 495 dev_priv->hotplug.event_bits &= ~BIT(pin);
482 storm_detected = true; 496 storm_detected = true;
497 queue_hp = true;
483 } 498 }
484 } 499 }
485 500
501 /*
502 * Disable any IRQs that storms were detected on. Polling enablement
503 * happens later in our hotplug work.
504 */
486 if (storm_detected && dev_priv->display_irqs_enabled) 505 if (storm_detected && dev_priv->display_irqs_enabled)
487 dev_priv->display.hpd_irq_setup(dev_priv); 506 dev_priv->display.hpd_irq_setup(dev_priv);
488 spin_unlock(&dev_priv->irq_lock); 507 spin_unlock(&dev_priv->irq_lock);
diff --git a/drivers/gpu/drm/i915/intel_huc.c b/drivers/gpu/drm/i915/intel_huc.c
index 37ef540dd280..bc27b691d824 100644
--- a/drivers/gpu/drm/i915/intel_huc.c
+++ b/drivers/gpu/drm/i915/intel_huc.c
@@ -108,13 +108,14 @@ fail:
108 * This function reads status register to verify if HuC 108 * This function reads status register to verify if HuC
109 * firmware was successfully loaded. 109 * firmware was successfully loaded.
110 * 110 *
111 * Returns positive value if HuC firmware is loaded and verified 111 * Returns: 1 if HuC firmware is loaded and verified,
112 * and -ENODEV if HuC is not present. 112 * 0 if HuC firmware is not loaded and -ENODEV if HuC
113 * is not present on this platform.
113 */ 114 */
114int intel_huc_check_status(struct intel_huc *huc) 115int intel_huc_check_status(struct intel_huc *huc)
115{ 116{
116 struct drm_i915_private *dev_priv = huc_to_i915(huc); 117 struct drm_i915_private *dev_priv = huc_to_i915(huc);
117 u32 status; 118 bool status;
118 119
119 if (!HAS_HUC(dev_priv)) 120 if (!HAS_HUC(dev_priv))
120 return -ENODEV; 121 return -ENODEV;
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 37c94a54efcb..08fd9b12e4d7 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -259,63 +259,6 @@ intel_lr_context_descriptor_update(struct i915_gem_context *ctx,
259 ce->lrc_desc = desc; 259 ce->lrc_desc = desc;
260} 260}
261 261
262static struct i915_priolist *
263lookup_priolist(struct intel_engine_cs *engine, int prio)
264{
265 struct intel_engine_execlists * const execlists = &engine->execlists;
266 struct i915_priolist *p;
267 struct rb_node **parent, *rb;
268 bool first = true;
269
270 if (unlikely(execlists->no_priolist))
271 prio = I915_PRIORITY_NORMAL;
272
273find_priolist:
274 /* most positive priority is scheduled first, equal priorities fifo */
275 rb = NULL;
276 parent = &execlists->queue.rb_root.rb_node;
277 while (*parent) {
278 rb = *parent;
279 p = to_priolist(rb);
280 if (prio > p->priority) {
281 parent = &rb->rb_left;
282 } else if (prio < p->priority) {
283 parent = &rb->rb_right;
284 first = false;
285 } else {
286 return p;
287 }
288 }
289
290 if (prio == I915_PRIORITY_NORMAL) {
291 p = &execlists->default_priolist;
292 } else {
293 p = kmem_cache_alloc(engine->i915->priorities, GFP_ATOMIC);
294 /* Convert an allocation failure to a priority bump */
295 if (unlikely(!p)) {
296 prio = I915_PRIORITY_NORMAL; /* recurses just once */
297
298 /* To maintain ordering with all rendering, after an
299 * allocation failure we have to disable all scheduling.
300 * Requests will then be executed in fifo, and schedule
301 * will ensure that dependencies are emitted in fifo.
302 * There will be still some reordering with existing
303 * requests, so if userspace lied about their
304 * dependencies that reordering may be visible.
305 */
306 execlists->no_priolist = true;
307 goto find_priolist;
308 }
309 }
310
311 p->priority = prio;
312 INIT_LIST_HEAD(&p->requests);
313 rb_link_node(&p->node, rb, parent);
314 rb_insert_color_cached(&p->node, &execlists->queue, first);
315
316 return p;
317}
318
319static void unwind_wa_tail(struct i915_request *rq) 262static void unwind_wa_tail(struct i915_request *rq)
320{ 263{
321 rq->tail = intel_ring_wrap(rq->ring, rq->wa_tail - WA_TAIL_BYTES); 264 rq->tail = intel_ring_wrap(rq->ring, rq->wa_tail - WA_TAIL_BYTES);
@@ -324,9 +267,9 @@ static void unwind_wa_tail(struct i915_request *rq)
324 267
325static void __unwind_incomplete_requests(struct intel_engine_cs *engine) 268static void __unwind_incomplete_requests(struct intel_engine_cs *engine)
326{ 269{
327 struct i915_request *rq, *rn; 270 struct i915_request *rq, *rn, *active = NULL;
328 struct i915_priolist *uninitialized_var(p); 271 struct list_head *uninitialized_var(pl);
329 int last_prio = I915_PRIORITY_INVALID; 272 int prio = I915_PRIORITY_INVALID | I915_PRIORITY_NEWCLIENT;
330 273
331 lockdep_assert_held(&engine->timeline.lock); 274 lockdep_assert_held(&engine->timeline.lock);
332 275
@@ -334,19 +277,34 @@ static void __unwind_incomplete_requests(struct intel_engine_cs *engine)
334 &engine->timeline.requests, 277 &engine->timeline.requests,
335 link) { 278 link) {
336 if (i915_request_completed(rq)) 279 if (i915_request_completed(rq))
337 return; 280 break;
338 281
339 __i915_request_unsubmit(rq); 282 __i915_request_unsubmit(rq);
340 unwind_wa_tail(rq); 283 unwind_wa_tail(rq);
341 284
285 GEM_BUG_ON(rq->hw_context->active);
286
342 GEM_BUG_ON(rq_prio(rq) == I915_PRIORITY_INVALID); 287 GEM_BUG_ON(rq_prio(rq) == I915_PRIORITY_INVALID);
343 if (rq_prio(rq) != last_prio) { 288 if (rq_prio(rq) != prio) {
344 last_prio = rq_prio(rq); 289 prio = rq_prio(rq);
345 p = lookup_priolist(engine, last_prio); 290 pl = i915_sched_lookup_priolist(engine, prio);
346 } 291 }
292 GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root));
347 293
348 GEM_BUG_ON(p->priority != rq_prio(rq)); 294 list_add(&rq->sched.link, pl);
349 list_add(&rq->sched.link, &p->requests); 295
296 active = rq;
297 }
298
299 /*
300 * The active request is now effectively the start of a new client
301 * stream, so give it the equivalent small priority bump to prevent
302 * it being gazumped a second time by another peer.
303 */
304 if (!(prio & I915_PRIORITY_NEWCLIENT)) {
305 prio |= I915_PRIORITY_NEWCLIENT;
306 list_move_tail(&active->sched.link,
307 i915_sched_lookup_priolist(engine, prio));
350 } 308 }
351} 309}
352 310
@@ -355,13 +313,8 @@ execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists)
355{ 313{
356 struct intel_engine_cs *engine = 314 struct intel_engine_cs *engine =
357 container_of(execlists, typeof(*engine), execlists); 315 container_of(execlists, typeof(*engine), execlists);
358 unsigned long flags;
359
360 spin_lock_irqsave(&engine->timeline.lock, flags);
361 316
362 __unwind_incomplete_requests(engine); 317 __unwind_incomplete_requests(engine);
363
364 spin_unlock_irqrestore(&engine->timeline.lock, flags);
365} 318}
366 319
367static inline void 320static inline void
@@ -394,13 +347,17 @@ execlists_user_end(struct intel_engine_execlists *execlists)
394static inline void 347static inline void
395execlists_context_schedule_in(struct i915_request *rq) 348execlists_context_schedule_in(struct i915_request *rq)
396{ 349{
350 GEM_BUG_ON(rq->hw_context->active);
351
397 execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_IN); 352 execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_IN);
398 intel_engine_context_in(rq->engine); 353 intel_engine_context_in(rq->engine);
354 rq->hw_context->active = rq->engine;
399} 355}
400 356
401static inline void 357static inline void
402execlists_context_schedule_out(struct i915_request *rq, unsigned long status) 358execlists_context_schedule_out(struct i915_request *rq, unsigned long status)
403{ 359{
360 rq->hw_context->active = NULL;
404 intel_engine_context_out(rq->engine); 361 intel_engine_context_out(rq->engine);
405 execlists_context_status_change(rq, status); 362 execlists_context_status_change(rq, status);
406 trace_i915_request_out(rq); 363 trace_i915_request_out(rq);
@@ -417,9 +374,8 @@ execlists_update_context_pdps(struct i915_hw_ppgtt *ppgtt, u32 *reg_state)
417 374
418static u64 execlists_update_context(struct i915_request *rq) 375static u64 execlists_update_context(struct i915_request *rq)
419{ 376{
377 struct i915_hw_ppgtt *ppgtt = rq->gem_context->ppgtt;
420 struct intel_context *ce = rq->hw_context; 378 struct intel_context *ce = rq->hw_context;
421 struct i915_hw_ppgtt *ppgtt =
422 rq->gem_context->ppgtt ?: rq->i915->mm.aliasing_ppgtt;
423 u32 *reg_state = ce->lrc_reg_state; 379 u32 *reg_state = ce->lrc_reg_state;
424 380
425 reg_state[CTX_RING_TAIL+1] = intel_ring_set_tail(rq->ring, rq->tail); 381 reg_state[CTX_RING_TAIL+1] = intel_ring_set_tail(rq->ring, rq->tail);
@@ -430,7 +386,7 @@ static u64 execlists_update_context(struct i915_request *rq)
430 * PML4 is allocated during ppgtt init, so this is not needed 386 * PML4 is allocated during ppgtt init, so this is not needed
431 * in 48-bit mode. 387 * in 48-bit mode.
432 */ 388 */
433 if (ppgtt && !i915_vm_is_48bit(&ppgtt->vm)) 389 if (!i915_vm_is_48bit(&ppgtt->vm))
434 execlists_update_context_pdps(ppgtt, reg_state); 390 execlists_update_context_pdps(ppgtt, reg_state);
435 391
436 /* 392 /*
@@ -681,8 +637,9 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
681 while ((rb = rb_first_cached(&execlists->queue))) { 637 while ((rb = rb_first_cached(&execlists->queue))) {
682 struct i915_priolist *p = to_priolist(rb); 638 struct i915_priolist *p = to_priolist(rb);
683 struct i915_request *rq, *rn; 639 struct i915_request *rq, *rn;
640 int i;
684 641
685 list_for_each_entry_safe(rq, rn, &p->requests, sched.link) { 642 priolist_for_each_request_consume(rq, rn, p, i) {
686 /* 643 /*
687 * Can we combine this request with the current port? 644 * Can we combine this request with the current port?
688 * It has to be the same context/ringbuffer and not 645 * It has to be the same context/ringbuffer and not
@@ -701,11 +658,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
701 * combine this request with the last, then we 658 * combine this request with the last, then we
702 * are done. 659 * are done.
703 */ 660 */
704 if (port == last_port) { 661 if (port == last_port)
705 __list_del_many(&p->requests,
706 &rq->sched.link);
707 goto done; 662 goto done;
708 }
709 663
710 /* 664 /*
711 * If GVT overrides us we only ever submit 665 * If GVT overrides us we only ever submit
@@ -715,11 +669,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
715 * request) to the second port. 669 * request) to the second port.
716 */ 670 */
717 if (ctx_single_port_submission(last->hw_context) || 671 if (ctx_single_port_submission(last->hw_context) ||
718 ctx_single_port_submission(rq->hw_context)) { 672 ctx_single_port_submission(rq->hw_context))
719 __list_del_many(&p->requests,
720 &rq->sched.link);
721 goto done; 673 goto done;
722 }
723 674
724 GEM_BUG_ON(last->hw_context == rq->hw_context); 675 GEM_BUG_ON(last->hw_context == rq->hw_context);
725 676
@@ -730,15 +681,16 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
730 GEM_BUG_ON(port_isset(port)); 681 GEM_BUG_ON(port_isset(port));
731 } 682 }
732 683
733 INIT_LIST_HEAD(&rq->sched.link); 684 list_del_init(&rq->sched.link);
685
734 __i915_request_submit(rq); 686 __i915_request_submit(rq);
735 trace_i915_request_in(rq, port_index(port, execlists)); 687 trace_i915_request_in(rq, port_index(port, execlists));
688
736 last = rq; 689 last = rq;
737 submit = true; 690 submit = true;
738 } 691 }
739 692
740 rb_erase_cached(&p->node, &execlists->queue); 693 rb_erase_cached(&p->node, &execlists->queue);
741 INIT_LIST_HEAD(&p->requests);
742 if (p->priority != I915_PRIORITY_NORMAL) 694 if (p->priority != I915_PRIORITY_NORMAL)
743 kmem_cache_free(engine->i915->priorities, p); 695 kmem_cache_free(engine->i915->priorities, p);
744 } 696 }
@@ -873,16 +825,16 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
873 /* Flush the queued requests to the timeline list (for retiring). */ 825 /* Flush the queued requests to the timeline list (for retiring). */
874 while ((rb = rb_first_cached(&execlists->queue))) { 826 while ((rb = rb_first_cached(&execlists->queue))) {
875 struct i915_priolist *p = to_priolist(rb); 827 struct i915_priolist *p = to_priolist(rb);
828 int i;
876 829
877 list_for_each_entry_safe(rq, rn, &p->requests, sched.link) { 830 priolist_for_each_request_consume(rq, rn, p, i) {
878 INIT_LIST_HEAD(&rq->sched.link); 831 list_del_init(&rq->sched.link);
879 832
880 dma_fence_set_error(&rq->fence, -EIO); 833 dma_fence_set_error(&rq->fence, -EIO);
881 __i915_request_submit(rq); 834 __i915_request_submit(rq);
882 } 835 }
883 836
884 rb_erase_cached(&p->node, &execlists->queue); 837 rb_erase_cached(&p->node, &execlists->queue);
885 INIT_LIST_HEAD(&p->requests);
886 if (p->priority != I915_PRIORITY_NORMAL) 838 if (p->priority != I915_PRIORITY_NORMAL)
887 kmem_cache_free(engine->i915->priorities, p); 839 kmem_cache_free(engine->i915->priorities, p);
888 } 840 }
@@ -1088,13 +1040,7 @@ static void queue_request(struct intel_engine_cs *engine,
1088 struct i915_sched_node *node, 1040 struct i915_sched_node *node,
1089 int prio) 1041 int prio)
1090{ 1042{
1091 list_add_tail(&node->link, 1043 list_add_tail(&node->link, i915_sched_lookup_priolist(engine, prio));
1092 &lookup_priolist(engine, prio)->requests);
1093}
1094
1095static void __update_queue(struct intel_engine_cs *engine, int prio)
1096{
1097 engine->execlists.queue_priority = prio;
1098} 1044}
1099 1045
1100static void __submit_queue_imm(struct intel_engine_cs *engine) 1046static void __submit_queue_imm(struct intel_engine_cs *engine)
@@ -1113,7 +1059,7 @@ static void __submit_queue_imm(struct intel_engine_cs *engine)
1113static void submit_queue(struct intel_engine_cs *engine, int prio) 1059static void submit_queue(struct intel_engine_cs *engine, int prio)
1114{ 1060{
1115 if (prio > engine->execlists.queue_priority) { 1061 if (prio > engine->execlists.queue_priority) {
1116 __update_queue(engine, prio); 1062 engine->execlists.queue_priority = prio;
1117 __submit_queue_imm(engine); 1063 __submit_queue_imm(engine);
1118 } 1064 }
1119} 1065}
@@ -1136,139 +1082,6 @@ static void execlists_submit_request(struct i915_request *request)
1136 spin_unlock_irqrestore(&engine->timeline.lock, flags); 1082 spin_unlock_irqrestore(&engine->timeline.lock, flags);
1137} 1083}
1138 1084
1139static struct i915_request *sched_to_request(struct i915_sched_node *node)
1140{
1141 return container_of(node, struct i915_request, sched);
1142}
1143
1144static struct intel_engine_cs *
1145sched_lock_engine(struct i915_sched_node *node, struct intel_engine_cs *locked)
1146{
1147 struct intel_engine_cs *engine = sched_to_request(node)->engine;
1148
1149 GEM_BUG_ON(!locked);
1150
1151 if (engine != locked) {
1152 spin_unlock(&locked->timeline.lock);
1153 spin_lock(&engine->timeline.lock);
1154 }
1155
1156 return engine;
1157}
1158
1159static void execlists_schedule(struct i915_request *request,
1160 const struct i915_sched_attr *attr)
1161{
1162 struct i915_priolist *uninitialized_var(pl);
1163 struct intel_engine_cs *engine, *last;
1164 struct i915_dependency *dep, *p;
1165 struct i915_dependency stack;
1166 const int prio = attr->priority;
1167 LIST_HEAD(dfs);
1168
1169 GEM_BUG_ON(prio == I915_PRIORITY_INVALID);
1170
1171 if (i915_request_completed(request))
1172 return;
1173
1174 if (prio <= READ_ONCE(request->sched.attr.priority))
1175 return;
1176
1177 /* Need BKL in order to use the temporary link inside i915_dependency */
1178 lockdep_assert_held(&request->i915->drm.struct_mutex);
1179
1180 stack.signaler = &request->sched;
1181 list_add(&stack.dfs_link, &dfs);
1182
1183 /*
1184 * Recursively bump all dependent priorities to match the new request.
1185 *
1186 * A naive approach would be to use recursion:
1187 * static void update_priorities(struct i915_sched_node *node, prio) {
1188 * list_for_each_entry(dep, &node->signalers_list, signal_link)
1189 * update_priorities(dep->signal, prio)
1190 * queue_request(node);
1191 * }
1192 * but that may have unlimited recursion depth and so runs a very
1193 * real risk of overunning the kernel stack. Instead, we build
1194 * a flat list of all dependencies starting with the current request.
1195 * As we walk the list of dependencies, we add all of its dependencies
1196 * to the end of the list (this may include an already visited
1197 * request) and continue to walk onwards onto the new dependencies. The
1198 * end result is a topological list of requests in reverse order, the
1199 * last element in the list is the request we must execute first.
1200 */
1201 list_for_each_entry(dep, &dfs, dfs_link) {
1202 struct i915_sched_node *node = dep->signaler;
1203
1204 /*
1205 * Within an engine, there can be no cycle, but we may
1206 * refer to the same dependency chain multiple times
1207 * (redundant dependencies are not eliminated) and across
1208 * engines.
1209 */
1210 list_for_each_entry(p, &node->signalers_list, signal_link) {
1211 GEM_BUG_ON(p == dep); /* no cycles! */
1212
1213 if (i915_sched_node_signaled(p->signaler))
1214 continue;
1215
1216 GEM_BUG_ON(p->signaler->attr.priority < node->attr.priority);
1217 if (prio > READ_ONCE(p->signaler->attr.priority))
1218 list_move_tail(&p->dfs_link, &dfs);
1219 }
1220 }
1221
1222 /*
1223 * If we didn't need to bump any existing priorities, and we haven't
1224 * yet submitted this request (i.e. there is no potential race with
1225 * execlists_submit_request()), we can set our own priority and skip
1226 * acquiring the engine locks.
1227 */
1228 if (request->sched.attr.priority == I915_PRIORITY_INVALID) {
1229 GEM_BUG_ON(!list_empty(&request->sched.link));
1230 request->sched.attr = *attr;
1231 if (stack.dfs_link.next == stack.dfs_link.prev)
1232 return;
1233 __list_del_entry(&stack.dfs_link);
1234 }
1235
1236 last = NULL;
1237 engine = request->engine;
1238 spin_lock_irq(&engine->timeline.lock);
1239
1240 /* Fifo and depth-first replacement ensure our deps execute before us */
1241 list_for_each_entry_safe_reverse(dep, p, &dfs, dfs_link) {
1242 struct i915_sched_node *node = dep->signaler;
1243
1244 INIT_LIST_HEAD(&dep->dfs_link);
1245
1246 engine = sched_lock_engine(node, engine);
1247
1248 if (prio <= node->attr.priority)
1249 continue;
1250
1251 node->attr.priority = prio;
1252 if (!list_empty(&node->link)) {
1253 if (last != engine) {
1254 pl = lookup_priolist(engine, prio);
1255 last = engine;
1256 }
1257 GEM_BUG_ON(pl->priority != prio);
1258 list_move_tail(&node->link, &pl->requests);
1259 }
1260
1261 if (prio > engine->execlists.queue_priority &&
1262 i915_sw_fence_done(&sched_to_request(node)->submit)) {
1263 /* defer submission until after all of our updates */
1264 __update_queue(engine, prio);
1265 tasklet_hi_schedule(&engine->execlists.tasklet);
1266 }
1267 }
1268
1269 spin_unlock_irq(&engine->timeline.lock);
1270}
1271
1272static void execlists_context_destroy(struct intel_context *ce) 1085static void execlists_context_destroy(struct intel_context *ce)
1273{ 1086{
1274 GEM_BUG_ON(ce->pin_count); 1087 GEM_BUG_ON(ce->pin_count);
@@ -1284,6 +1097,28 @@ static void execlists_context_destroy(struct intel_context *ce)
1284 1097
1285static void execlists_context_unpin(struct intel_context *ce) 1098static void execlists_context_unpin(struct intel_context *ce)
1286{ 1099{
1100 struct intel_engine_cs *engine;
1101
1102 /*
1103 * The tasklet may still be using a pointer to our state, via an
1104 * old request. However, since we know we only unpin the context
1105 * on retirement of the following request, we know that the last
1106 * request referencing us will have had a completion CS interrupt.
1107 * If we see that it is still active, it means that the tasklet hasn't
1108 * had the chance to run yet; let it run before we teardown the
1109 * reference it may use.
1110 */
1111 engine = READ_ONCE(ce->active);
1112 if (unlikely(engine)) {
1113 unsigned long flags;
1114
1115 spin_lock_irqsave(&engine->timeline.lock, flags);
1116 process_csb(engine);
1117 spin_unlock_irqrestore(&engine->timeline.lock, flags);
1118
1119 GEM_BUG_ON(READ_ONCE(ce->active));
1120 }
1121
1287 i915_gem_context_unpin_hw_id(ce->gem_context); 1122 i915_gem_context_unpin_hw_id(ce->gem_context);
1288 1123
1289 intel_ring_unpin(ce->ring); 1124 intel_ring_unpin(ce->ring);
@@ -1387,6 +1222,7 @@ execlists_context_pin(struct intel_engine_cs *engine,
1387 struct intel_context *ce = to_intel_context(ctx, engine); 1222 struct intel_context *ce = to_intel_context(ctx, engine);
1388 1223
1389 lockdep_assert_held(&ctx->i915->drm.struct_mutex); 1224 lockdep_assert_held(&ctx->i915->drm.struct_mutex);
1225 GEM_BUG_ON(!ctx->ppgtt);
1390 1226
1391 if (likely(ce->pin_count++)) 1227 if (likely(ce->pin_count++))
1392 return ce; 1228 return ce;
@@ -1691,7 +1527,7 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
1691 unsigned int i; 1527 unsigned int i;
1692 int ret; 1528 int ret;
1693 1529
1694 if (GEM_WARN_ON(engine->id != RCS)) 1530 if (GEM_DEBUG_WARN_ON(engine->id != RCS))
1695 return -EINVAL; 1531 return -EINVAL;
1696 1532
1697 switch (INTEL_GEN(engine->i915)) { 1533 switch (INTEL_GEN(engine->i915)) {
@@ -1730,8 +1566,8 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
1730 */ 1566 */
1731 for (i = 0; i < ARRAY_SIZE(wa_bb_fn); i++) { 1567 for (i = 0; i < ARRAY_SIZE(wa_bb_fn); i++) {
1732 wa_bb[i]->offset = batch_ptr - batch; 1568 wa_bb[i]->offset = batch_ptr - batch;
1733 if (GEM_WARN_ON(!IS_ALIGNED(wa_bb[i]->offset, 1569 if (GEM_DEBUG_WARN_ON(!IS_ALIGNED(wa_bb[i]->offset,
1734 CACHELINE_BYTES))) { 1570 CACHELINE_BYTES))) {
1735 ret = -EINVAL; 1571 ret = -EINVAL;
1736 break; 1572 break;
1737 } 1573 }
@@ -1914,7 +1750,7 @@ static void execlists_reset(struct intel_engine_cs *engine,
1914 unsigned long flags; 1750 unsigned long flags;
1915 u32 *regs; 1751 u32 *regs;
1916 1752
1917 GEM_TRACE("%s request global=%x, current=%d\n", 1753 GEM_TRACE("%s request global=%d, current=%d\n",
1918 engine->name, request ? request->global_seqno : 0, 1754 engine->name, request ? request->global_seqno : 0,
1919 intel_engine_get_seqno(engine)); 1755 intel_engine_get_seqno(engine));
1920 1756
@@ -2041,8 +1877,7 @@ static int gen8_emit_bb_start(struct i915_request *rq,
2041 * it is unsafe in case of lite-restore (because the ctx is 1877 * it is unsafe in case of lite-restore (because the ctx is
2042 * not idle). PML4 is allocated during ppgtt init so this is 1878 * not idle). PML4 is allocated during ppgtt init so this is
2043 * not needed in 48-bit.*/ 1879 * not needed in 48-bit.*/
2044 if (rq->gem_context->ppgtt && 1880 if ((intel_engine_flag(rq->engine) & rq->gem_context->ppgtt->pd_dirty_rings) &&
2045 (intel_engine_flag(rq->engine) & rq->gem_context->ppgtt->pd_dirty_rings) &&
2046 !i915_vm_is_48bit(&rq->gem_context->ppgtt->vm) && 1881 !i915_vm_is_48bit(&rq->gem_context->ppgtt->vm) &&
2047 !intel_vgpu_active(rq->i915)) { 1882 !intel_vgpu_active(rq->i915)) {
2048 ret = intel_logical_ring_emit_pdps(rq); 1883 ret = intel_logical_ring_emit_pdps(rq);
@@ -2121,7 +1956,7 @@ static int gen8_emit_flush(struct i915_request *request, u32 mode)
2121 1956
2122 if (mode & EMIT_INVALIDATE) { 1957 if (mode & EMIT_INVALIDATE) {
2123 cmd |= MI_INVALIDATE_TLB; 1958 cmd |= MI_INVALIDATE_TLB;
2124 if (request->engine->id == VCS) 1959 if (request->engine->class == VIDEO_DECODE_CLASS)
2125 cmd |= MI_INVALIDATE_BSD; 1960 cmd |= MI_INVALIDATE_BSD;
2126 } 1961 }
2127 1962
@@ -2306,7 +2141,7 @@ void intel_execlists_set_default_submission(struct intel_engine_cs *engine)
2306{ 2141{
2307 engine->submit_request = execlists_submit_request; 2142 engine->submit_request = execlists_submit_request;
2308 engine->cancel_requests = execlists_cancel_requests; 2143 engine->cancel_requests = execlists_cancel_requests;
2309 engine->schedule = execlists_schedule; 2144 engine->schedule = i915_schedule;
2310 engine->execlists.tasklet.func = execlists_submission_tasklet; 2145 engine->execlists.tasklet.func = execlists_submission_tasklet;
2311 2146
2312 engine->reset.prepare = execlists_reset_prepare; 2147 engine->reset.prepare = execlists_reset_prepare;
@@ -2644,7 +2479,6 @@ static void execlists_init_reg_state(u32 *regs,
2644 struct intel_ring *ring) 2479 struct intel_ring *ring)
2645{ 2480{
2646 struct drm_i915_private *dev_priv = engine->i915; 2481 struct drm_i915_private *dev_priv = engine->i915;
2647 struct i915_hw_ppgtt *ppgtt = ctx->ppgtt ?: dev_priv->mm.aliasing_ppgtt;
2648 u32 base = engine->mmio_base; 2482 u32 base = engine->mmio_base;
2649 bool rcs = engine->class == RENDER_CLASS; 2483 bool rcs = engine->class == RENDER_CLASS;
2650 2484
@@ -2716,12 +2550,12 @@ static void execlists_init_reg_state(u32 *regs,
2716 CTX_REG(regs, CTX_PDP0_UDW, GEN8_RING_PDP_UDW(engine, 0), 0); 2550 CTX_REG(regs, CTX_PDP0_UDW, GEN8_RING_PDP_UDW(engine, 0), 0);
2717 CTX_REG(regs, CTX_PDP0_LDW, GEN8_RING_PDP_LDW(engine, 0), 0); 2551 CTX_REG(regs, CTX_PDP0_LDW, GEN8_RING_PDP_LDW(engine, 0), 0);
2718 2552
2719 if (ppgtt && i915_vm_is_48bit(&ppgtt->vm)) { 2553 if (i915_vm_is_48bit(&ctx->ppgtt->vm)) {
2720 /* 64b PPGTT (48bit canonical) 2554 /* 64b PPGTT (48bit canonical)
2721 * PDP0_DESCRIPTOR contains the base address to PML4 and 2555 * PDP0_DESCRIPTOR contains the base address to PML4 and
2722 * other PDP Descriptors are ignored. 2556 * other PDP Descriptors are ignored.
2723 */ 2557 */
2724 ASSIGN_CTX_PML4(ppgtt, regs); 2558 ASSIGN_CTX_PML4(ctx->ppgtt, regs);
2725 } 2559 }
2726 2560
2727 if (rcs) { 2561 if (rcs) {
diff --git a/drivers/gpu/drm/i915/intel_lspcon.c b/drivers/gpu/drm/i915/intel_lspcon.c
index 3e085c5f2b81..96a8d9524b0c 100644
--- a/drivers/gpu/drm/i915/intel_lspcon.c
+++ b/drivers/gpu/drm/i915/intel_lspcon.c
@@ -27,6 +27,22 @@
27#include <drm/drm_dp_dual_mode_helper.h> 27#include <drm/drm_dp_dual_mode_helper.h>
28#include "intel_drv.h" 28#include "intel_drv.h"
29 29
30/* LSPCON OUI Vendor ID(signatures) */
31#define LSPCON_VENDOR_PARADE_OUI 0x001CF8
32#define LSPCON_VENDOR_MCA_OUI 0x0060AD
33
34/* AUX addresses to write MCA AVI IF */
35#define LSPCON_MCA_AVI_IF_WRITE_OFFSET 0x5C0
36#define LSPCON_MCA_AVI_IF_CTRL 0x5DF
37#define LSPCON_MCA_AVI_IF_KICKOFF (1 << 0)
38#define LSPCON_MCA_AVI_IF_HANDLED (1 << 1)
39
40/* AUX addresses to write Parade AVI IF */
41#define LSPCON_PARADE_AVI_IF_WRITE_OFFSET 0x516
42#define LSPCON_PARADE_AVI_IF_CTRL 0x51E
43#define LSPCON_PARADE_AVI_IF_KICKOFF (1 << 7)
44#define LSPCON_PARADE_AVI_IF_DATA_SIZE 32
45
30static struct intel_dp *lspcon_to_intel_dp(struct intel_lspcon *lspcon) 46static struct intel_dp *lspcon_to_intel_dp(struct intel_lspcon *lspcon)
31{ 47{
32 struct intel_digital_port *dig_port = 48 struct intel_digital_port *dig_port =
@@ -50,6 +66,40 @@ static const char *lspcon_mode_name(enum drm_lspcon_mode mode)
50 } 66 }
51} 67}
52 68
69static bool lspcon_detect_vendor(struct intel_lspcon *lspcon)
70{
71 struct intel_dp *dp = lspcon_to_intel_dp(lspcon);
72 struct drm_dp_dpcd_ident *ident;
73 u32 vendor_oui;
74
75 if (drm_dp_read_desc(&dp->aux, &dp->desc, drm_dp_is_branch(dp->dpcd))) {
76 DRM_ERROR("Can't read description\n");
77 return false;
78 }
79
80 ident = &dp->desc.ident;
81 vendor_oui = (ident->oui[0] << 16) | (ident->oui[1] << 8) |
82 ident->oui[2];
83
84 switch (vendor_oui) {
85 case LSPCON_VENDOR_MCA_OUI:
86 lspcon->vendor = LSPCON_VENDOR_MCA;
87 DRM_DEBUG_KMS("Vendor: Mega Chips\n");
88 break;
89
90 case LSPCON_VENDOR_PARADE_OUI:
91 lspcon->vendor = LSPCON_VENDOR_PARADE;
92 DRM_DEBUG_KMS("Vendor: Parade Tech\n");
93 break;
94
95 default:
96 DRM_ERROR("Invalid/Unknown vendor OUI\n");
97 return false;
98 }
99
100 return true;
101}
102
53static enum drm_lspcon_mode lspcon_get_current_mode(struct intel_lspcon *lspcon) 103static enum drm_lspcon_mode lspcon_get_current_mode(struct intel_lspcon *lspcon)
54{ 104{
55 enum drm_lspcon_mode current_mode; 105 enum drm_lspcon_mode current_mode;
@@ -130,6 +180,21 @@ static bool lspcon_wake_native_aux_ch(struct intel_lspcon *lspcon)
130 return true; 180 return true;
131} 181}
132 182
183void lspcon_ycbcr420_config(struct drm_connector *connector,
184 struct intel_crtc_state *crtc_state)
185{
186 const struct drm_display_info *info = &connector->display_info;
187 const struct drm_display_mode *adjusted_mode =
188 &crtc_state->base.adjusted_mode;
189
190 if (drm_mode_is_420_only(info, adjusted_mode) &&
191 connector->ycbcr_420_allowed) {
192 crtc_state->port_clock /= 2;
193 crtc_state->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
194 crtc_state->lspcon_downsampling = true;
195 }
196}
197
133static bool lspcon_probe(struct intel_lspcon *lspcon) 198static bool lspcon_probe(struct intel_lspcon *lspcon)
134{ 199{
135 int retry; 200 int retry;
@@ -159,7 +224,18 @@ static bool lspcon_probe(struct intel_lspcon *lspcon)
159 /* Yay ... got a LSPCON device */ 224 /* Yay ... got a LSPCON device */
160 DRM_DEBUG_KMS("LSPCON detected\n"); 225 DRM_DEBUG_KMS("LSPCON detected\n");
161 lspcon->mode = lspcon_wait_mode(lspcon, expected_mode); 226 lspcon->mode = lspcon_wait_mode(lspcon, expected_mode);
162 lspcon->active = true; 227
228 /*
229 * In the SW state machine, lets Put LSPCON in PCON mode only.
230 * In this way, it will work with both HDMI 1.4 sinks as well as HDMI
231 * 2.0 sinks.
232 */
233 if (lspcon->mode != DRM_LSPCON_MODE_PCON) {
234 if (lspcon_change_mode(lspcon, DRM_LSPCON_MODE_PCON) < 0) {
235 DRM_ERROR("LSPCON mode change to PCON failed\n");
236 return false;
237 }
238 }
163 return true; 239 return true;
164} 240}
165 241
@@ -185,6 +261,255 @@ static void lspcon_resume_in_pcon_wa(struct intel_lspcon *lspcon)
185 DRM_DEBUG_KMS("LSPCON DP descriptor mismatch after resume\n"); 261 DRM_DEBUG_KMS("LSPCON DP descriptor mismatch after resume\n");
186} 262}
187 263
264static bool lspcon_parade_fw_ready(struct drm_dp_aux *aux)
265{
266 u8 avi_if_ctrl;
267 u8 retry;
268 ssize_t ret;
269
270 /* Check if LSPCON FW is ready for data */
271 for (retry = 0; retry < 5; retry++) {
272 if (retry)
273 usleep_range(200, 300);
274
275 ret = drm_dp_dpcd_read(aux, LSPCON_PARADE_AVI_IF_CTRL,
276 &avi_if_ctrl, 1);
277 if (ret < 0) {
278 DRM_ERROR("Failed to read AVI IF control\n");
279 return false;
280 }
281
282 if ((avi_if_ctrl & LSPCON_PARADE_AVI_IF_KICKOFF) == 0)
283 return true;
284 }
285
286 DRM_ERROR("Parade FW not ready to accept AVI IF\n");
287 return false;
288}
289
290static bool _lspcon_parade_write_infoframe_blocks(struct drm_dp_aux *aux,
291 uint8_t *avi_buf)
292{
293 u8 avi_if_ctrl;
294 u8 block_count = 0;
295 u8 *data;
296 uint16_t reg;
297 ssize_t ret;
298
299 while (block_count < 4) {
300 if (!lspcon_parade_fw_ready(aux)) {
301 DRM_DEBUG_KMS("LSPCON FW not ready, block %d\n",
302 block_count);
303 return false;
304 }
305
306 reg = LSPCON_PARADE_AVI_IF_WRITE_OFFSET;
307 data = avi_buf + block_count * 8;
308 ret = drm_dp_dpcd_write(aux, reg, data, 8);
309 if (ret < 0) {
310 DRM_ERROR("Failed to write AVI IF block %d\n",
311 block_count);
312 return false;
313 }
314
315 /*
316 * Once a block of data is written, we have to inform the FW
317 * about this by writing into avi infoframe control register:
318 * - set the kickoff bit[7] to 1
319 * - write the block no. to bits[1:0]
320 */
321 reg = LSPCON_PARADE_AVI_IF_CTRL;
322 avi_if_ctrl = LSPCON_PARADE_AVI_IF_KICKOFF | block_count;
323 ret = drm_dp_dpcd_write(aux, reg, &avi_if_ctrl, 1);
324 if (ret < 0) {
325 DRM_ERROR("Failed to update (0x%x), block %d\n",
326 reg, block_count);
327 return false;
328 }
329
330 block_count++;
331 }
332
333 DRM_DEBUG_KMS("Wrote AVI IF blocks successfully\n");
334 return true;
335}
336
337static bool _lspcon_write_avi_infoframe_parade(struct drm_dp_aux *aux,
338 const uint8_t *frame,
339 ssize_t len)
340{
341 uint8_t avi_if[LSPCON_PARADE_AVI_IF_DATA_SIZE] = {1, };
342
343 /*
344 * Parade's frames contains 32 bytes of data, divided
345 * into 4 frames:
346 * Token byte (first byte of first frame, must be non-zero)
347 * HB0 to HB2 from AVI IF (3 bytes header)
348 * PB0 to PB27 from AVI IF (28 bytes data)
349 * So it should look like this
350 * first block: | <token> <HB0-HB2> <DB0-DB3> |
351 * next 3 blocks: |<DB4-DB11>|<DB12-DB19>|<DB20-DB28>|
352 */
353
354 if (len > LSPCON_PARADE_AVI_IF_DATA_SIZE - 1) {
355 DRM_ERROR("Invalid length of infoframes\n");
356 return false;
357 }
358
359 memcpy(&avi_if[1], frame, len);
360
361 if (!_lspcon_parade_write_infoframe_blocks(aux, avi_if)) {
362 DRM_DEBUG_KMS("Failed to write infoframe blocks\n");
363 return false;
364 }
365
366 return true;
367}
368
369static bool _lspcon_write_avi_infoframe_mca(struct drm_dp_aux *aux,
370 const uint8_t *buffer, ssize_t len)
371{
372 int ret;
373 uint32_t val = 0;
374 uint32_t retry;
375 uint16_t reg;
376 const uint8_t *data = buffer;
377
378 reg = LSPCON_MCA_AVI_IF_WRITE_OFFSET;
379 while (val < len) {
380 /* DPCD write for AVI IF can fail on a slow FW day, so retry */
381 for (retry = 0; retry < 5; retry++) {
382 ret = drm_dp_dpcd_write(aux, reg, (void *)data, 1);
383 if (ret == 1) {
384 break;
385 } else if (retry < 4) {
386 mdelay(50);
387 continue;
388 } else {
389 DRM_ERROR("DPCD write failed at:0x%x\n", reg);
390 return false;
391 }
392 }
393 val++; reg++; data++;
394 }
395
396 val = 0;
397 reg = LSPCON_MCA_AVI_IF_CTRL;
398 ret = drm_dp_dpcd_read(aux, reg, &val, 1);
399 if (ret < 0) {
400 DRM_ERROR("DPCD read failed, address 0x%x\n", reg);
401 return false;
402 }
403
404 /* Indicate LSPCON chip about infoframe, clear bit 1 and set bit 0 */
405 val &= ~LSPCON_MCA_AVI_IF_HANDLED;
406 val |= LSPCON_MCA_AVI_IF_KICKOFF;
407
408 ret = drm_dp_dpcd_write(aux, reg, &val, 1);
409 if (ret < 0) {
410 DRM_ERROR("DPCD read failed, address 0x%x\n", reg);
411 return false;
412 }
413
414 val = 0;
415 ret = drm_dp_dpcd_read(aux, reg, &val, 1);
416 if (ret < 0) {
417 DRM_ERROR("DPCD read failed, address 0x%x\n", reg);
418 return false;
419 }
420
421 if (val == LSPCON_MCA_AVI_IF_HANDLED)
422 DRM_DEBUG_KMS("AVI IF handled by FW\n");
423
424 return true;
425}
426
427void lspcon_write_infoframe(struct intel_encoder *encoder,
428 const struct intel_crtc_state *crtc_state,
429 unsigned int type,
430 const void *frame, ssize_t len)
431{
432 bool ret;
433 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
434 struct intel_lspcon *lspcon = enc_to_intel_lspcon(&encoder->base);
435
436 /* LSPCON only needs AVI IF */
437 if (type != HDMI_INFOFRAME_TYPE_AVI)
438 return;
439
440 if (lspcon->vendor == LSPCON_VENDOR_MCA)
441 ret = _lspcon_write_avi_infoframe_mca(&intel_dp->aux,
442 frame, len);
443 else
444 ret = _lspcon_write_avi_infoframe_parade(&intel_dp->aux,
445 frame, len);
446
447 if (!ret) {
448 DRM_ERROR("Failed to write AVI infoframes\n");
449 return;
450 }
451
452 DRM_DEBUG_DRIVER("AVI infoframes updated successfully\n");
453}
454
455void lspcon_set_infoframes(struct intel_encoder *encoder,
456 bool enable,
457 const struct intel_crtc_state *crtc_state,
458 const struct drm_connector_state *conn_state)
459{
460 ssize_t ret;
461 union hdmi_infoframe frame;
462 uint8_t buf[VIDEO_DIP_DATA_SIZE];
463 struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
464 struct intel_lspcon *lspcon = &dig_port->lspcon;
465 struct intel_dp *intel_dp = &dig_port->dp;
466 struct drm_connector *connector = &intel_dp->attached_connector->base;
467 const struct drm_display_mode *mode = &crtc_state->base.adjusted_mode;
468 bool is_hdmi2_sink = connector->display_info.hdmi.scdc.supported;
469
470 if (!lspcon->active) {
471 DRM_ERROR("Writing infoframes while LSPCON disabled ?\n");
472 return;
473 }
474
475 ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi,
476 mode, is_hdmi2_sink);
477 if (ret < 0) {
478 DRM_ERROR("couldn't fill AVI infoframe\n");
479 return;
480 }
481
482 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) {
483 if (crtc_state->lspcon_downsampling)
484 frame.avi.colorspace = HDMI_COLORSPACE_YUV420;
485 else
486 frame.avi.colorspace = HDMI_COLORSPACE_YUV444;
487 } else {
488 frame.avi.colorspace = HDMI_COLORSPACE_RGB;
489 }
490
491 drm_hdmi_avi_infoframe_quant_range(&frame.avi, mode,
492 crtc_state->limited_color_range ?
493 HDMI_QUANTIZATION_RANGE_LIMITED :
494 HDMI_QUANTIZATION_RANGE_FULL,
495 false, is_hdmi2_sink);
496
497 ret = hdmi_infoframe_pack(&frame, buf, sizeof(buf));
498 if (ret < 0) {
499 DRM_ERROR("Failed to pack AVI IF\n");
500 return;
501 }
502
503 dig_port->write_infoframe(encoder, crtc_state, HDMI_INFOFRAME_TYPE_AVI,
504 buf, ret);
505}
506
507bool lspcon_infoframe_enabled(struct intel_encoder *encoder,
508 const struct intel_crtc_state *pipe_config)
509{
510 return enc_to_intel_lspcon(&encoder->base)->active;
511}
512
188void lspcon_resume(struct intel_lspcon *lspcon) 513void lspcon_resume(struct intel_lspcon *lspcon)
189{ 514{
190 enum drm_lspcon_mode expected_mode; 515 enum drm_lspcon_mode expected_mode;
@@ -216,6 +541,7 @@ bool lspcon_init(struct intel_digital_port *intel_dig_port)
216 struct intel_lspcon *lspcon = &intel_dig_port->lspcon; 541 struct intel_lspcon *lspcon = &intel_dig_port->lspcon;
217 struct drm_device *dev = intel_dig_port->base.base.dev; 542 struct drm_device *dev = intel_dig_port->base.base.dev;
218 struct drm_i915_private *dev_priv = to_i915(dev); 543 struct drm_i915_private *dev_priv = to_i915(dev);
544 struct drm_connector *connector = &dp->attached_connector->base;
219 545
220 if (!HAS_LSPCON(dev_priv)) { 546 if (!HAS_LSPCON(dev_priv)) {
221 DRM_ERROR("LSPCON is not supported on this platform\n"); 547 DRM_ERROR("LSPCON is not supported on this platform\n");
@@ -230,25 +556,18 @@ bool lspcon_init(struct intel_digital_port *intel_dig_port)
230 return false; 556 return false;
231 } 557 }
232 558
233 /*
234 * In the SW state machine, lets Put LSPCON in PCON mode only.
235 * In this way, it will work with both HDMI 1.4 sinks as well as HDMI
236 * 2.0 sinks.
237 */
238 if (lspcon->active && lspcon->mode != DRM_LSPCON_MODE_PCON) {
239 if (lspcon_change_mode(lspcon, DRM_LSPCON_MODE_PCON) < 0) {
240 DRM_ERROR("LSPCON mode change to PCON failed\n");
241 return false;
242 }
243 }
244
245 if (!intel_dp_read_dpcd(dp)) { 559 if (!intel_dp_read_dpcd(dp)) {
246 DRM_ERROR("LSPCON DPCD read failed\n"); 560 DRM_ERROR("LSPCON DPCD read failed\n");
247 return false; 561 return false;
248 } 562 }
249 563
250 drm_dp_read_desc(&dp->aux, &dp->desc, drm_dp_is_branch(dp->dpcd)); 564 if (!lspcon_detect_vendor(lspcon)) {
565 DRM_ERROR("LSPCON vendor detection failed\n");
566 return false;
567 }
251 568
569 connector->ycbcr_420_allowed = true;
570 lspcon->active = true;
252 DRM_DEBUG_KMS("Success: LSPCON init\n"); 571 DRM_DEBUG_KMS("Success: LSPCON init\n");
253 return true; 572 return true;
254} 573}
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index f9f3b0885ba5..e6c5d985ea0a 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -42,10 +42,6 @@
42#include <linux/acpi.h> 42#include <linux/acpi.h>
43 43
44/* Private structure for the integrated LVDS support */ 44/* Private structure for the integrated LVDS support */
45struct intel_lvds_connector {
46 struct intel_connector base;
47};
48
49struct intel_lvds_pps { 45struct intel_lvds_pps {
50 /* 100us units */ 46 /* 100us units */
51 int t1_t2; 47 int t1_t2;
@@ -70,7 +66,7 @@ struct intel_lvds_encoder {
70 struct intel_lvds_pps init_pps; 66 struct intel_lvds_pps init_pps;
71 u32 init_lvds_val; 67 u32 init_lvds_val;
72 68
73 struct intel_lvds_connector *attached_connector; 69 struct intel_connector *attached_connector;
74}; 70};
75 71
76static struct intel_lvds_encoder *to_lvds_encoder(struct drm_encoder *encoder) 72static struct intel_lvds_encoder *to_lvds_encoder(struct drm_encoder *encoder)
@@ -78,11 +74,6 @@ static struct intel_lvds_encoder *to_lvds_encoder(struct drm_encoder *encoder)
78 return container_of(encoder, struct intel_lvds_encoder, base.base); 74 return container_of(encoder, struct intel_lvds_encoder, base.base);
79} 75}
80 76
81static struct intel_lvds_connector *to_lvds_connector(struct drm_connector *connector)
82{
83 return container_of(connector, struct intel_lvds_connector, base.base);
84}
85
86bool intel_lvds_port_enabled(struct drm_i915_private *dev_priv, 77bool intel_lvds_port_enabled(struct drm_i915_private *dev_priv,
87 i915_reg_t lvds_reg, enum pipe *pipe) 78 i915_reg_t lvds_reg, enum pipe *pipe)
88{ 79{
@@ -396,7 +387,7 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
396 struct intel_lvds_encoder *lvds_encoder = 387 struct intel_lvds_encoder *lvds_encoder =
397 to_lvds_encoder(&intel_encoder->base); 388 to_lvds_encoder(&intel_encoder->base);
398 struct intel_connector *intel_connector = 389 struct intel_connector *intel_connector =
399 &lvds_encoder->attached_connector->base; 390 lvds_encoder->attached_connector;
400 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; 391 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
401 struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc); 392 struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
402 unsigned int lvds_bpp; 393 unsigned int lvds_bpp;
@@ -418,6 +409,8 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
418 pipe_config->pipe_bpp = lvds_bpp; 409 pipe_config->pipe_bpp = lvds_bpp;
419 } 410 }
420 411
412 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
413
421 /* 414 /*
422 * We have timings from the BIOS for the panel, put them in 415 * We have timings from the BIOS for the panel, put them in
423 * to the adjusted mode. The CRTC will be set up for this mode, 416 * to the adjusted mode. The CRTC will be set up for this mode,
@@ -461,15 +454,15 @@ intel_lvds_detect(struct drm_connector *connector, bool force)
461 */ 454 */
462static int intel_lvds_get_modes(struct drm_connector *connector) 455static int intel_lvds_get_modes(struct drm_connector *connector)
463{ 456{
464 struct intel_lvds_connector *lvds_connector = to_lvds_connector(connector); 457 struct intel_connector *intel_connector = to_intel_connector(connector);
465 struct drm_device *dev = connector->dev; 458 struct drm_device *dev = connector->dev;
466 struct drm_display_mode *mode; 459 struct drm_display_mode *mode;
467 460
468 /* use cached edid if we have one */ 461 /* use cached edid if we have one */
469 if (!IS_ERR_OR_NULL(lvds_connector->base.edid)) 462 if (!IS_ERR_OR_NULL(intel_connector->edid))
470 return drm_add_edid_modes(connector, lvds_connector->base.edid); 463 return drm_add_edid_modes(connector, intel_connector->edid);
471 464
472 mode = drm_mode_duplicate(dev, lvds_connector->base.panel.fixed_mode); 465 mode = drm_mode_duplicate(dev, intel_connector->panel.fixed_mode);
473 if (mode == NULL) 466 if (mode == NULL)
474 return 0; 467 return 0;
475 468
@@ -477,27 +470,6 @@ static int intel_lvds_get_modes(struct drm_connector *connector)
477 return 1; 470 return 1;
478} 471}
479 472
480/**
481 * intel_lvds_destroy - unregister and free LVDS structures
482 * @connector: connector to free
483 *
484 * Unregister the DDC bus for this connector then free the driver private
485 * structure.
486 */
487static void intel_lvds_destroy(struct drm_connector *connector)
488{
489 struct intel_lvds_connector *lvds_connector =
490 to_lvds_connector(connector);
491
492 if (!IS_ERR_OR_NULL(lvds_connector->base.edid))
493 kfree(lvds_connector->base.edid);
494
495 intel_panel_fini(&lvds_connector->base.panel);
496
497 drm_connector_cleanup(connector);
498 kfree(connector);
499}
500
501static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs = { 473static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs = {
502 .get_modes = intel_lvds_get_modes, 474 .get_modes = intel_lvds_get_modes,
503 .mode_valid = intel_lvds_mode_valid, 475 .mode_valid = intel_lvds_mode_valid,
@@ -511,7 +483,7 @@ static const struct drm_connector_funcs intel_lvds_connector_funcs = {
511 .atomic_set_property = intel_digital_connector_atomic_set_property, 483 .atomic_set_property = intel_digital_connector_atomic_set_property,
512 .late_register = intel_connector_register, 484 .late_register = intel_connector_register,
513 .early_unregister = intel_connector_unregister, 485 .early_unregister = intel_connector_unregister,
514 .destroy = intel_lvds_destroy, 486 .destroy = intel_connector_destroy,
515 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 487 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
516 .atomic_duplicate_state = intel_digital_connector_duplicate_state, 488 .atomic_duplicate_state = intel_digital_connector_duplicate_state,
517}; 489};
@@ -802,8 +774,7 @@ static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder)
802 return i915_modparams.lvds_channel_mode == 2; 774 return i915_modparams.lvds_channel_mode == 2;
803 775
804 /* single channel LVDS is limited to 112 MHz */ 776 /* single channel LVDS is limited to 112 MHz */
805 if (lvds_encoder->attached_connector->base.panel.fixed_mode->clock 777 if (lvds_encoder->attached_connector->panel.fixed_mode->clock > 112999)
806 > 112999)
807 return true; 778 return true;
808 779
809 if (dmi_check_system(intel_dual_link_lvds)) 780 if (dmi_check_system(intel_dual_link_lvds))
@@ -858,7 +829,6 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
858 struct drm_device *dev = &dev_priv->drm; 829 struct drm_device *dev = &dev_priv->drm;
859 struct intel_lvds_encoder *lvds_encoder; 830 struct intel_lvds_encoder *lvds_encoder;
860 struct intel_encoder *intel_encoder; 831 struct intel_encoder *intel_encoder;
861 struct intel_lvds_connector *lvds_connector;
862 struct intel_connector *intel_connector; 832 struct intel_connector *intel_connector;
863 struct drm_connector *connector; 833 struct drm_connector *connector;
864 struct drm_encoder *encoder; 834 struct drm_encoder *encoder;
@@ -911,23 +881,16 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
911 if (!lvds_encoder) 881 if (!lvds_encoder)
912 return; 882 return;
913 883
914 lvds_connector = kzalloc(sizeof(*lvds_connector), GFP_KERNEL); 884 intel_connector = intel_connector_alloc();
915 if (!lvds_connector) { 885 if (!intel_connector) {
916 kfree(lvds_encoder);
917 return;
918 }
919
920 if (intel_connector_init(&lvds_connector->base) < 0) {
921 kfree(lvds_connector);
922 kfree(lvds_encoder); 886 kfree(lvds_encoder);
923 return; 887 return;
924 } 888 }
925 889
926 lvds_encoder->attached_connector = lvds_connector; 890 lvds_encoder->attached_connector = intel_connector;
927 891
928 intel_encoder = &lvds_encoder->base; 892 intel_encoder = &lvds_encoder->base;
929 encoder = &intel_encoder->base; 893 encoder = &intel_encoder->base;
930 intel_connector = &lvds_connector->base;
931 connector = &intel_connector->base; 894 connector = &intel_connector->base;
932 drm_connector_init(dev, &intel_connector->base, &intel_lvds_connector_funcs, 895 drm_connector_init(dev, &intel_connector->base, &intel_lvds_connector_funcs,
933 DRM_MODE_CONNECTOR_LVDS); 896 DRM_MODE_CONNECTOR_LVDS);
@@ -1008,7 +971,7 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
1008 } else { 971 } else {
1009 edid = ERR_PTR(-ENOENT); 972 edid = ERR_PTR(-ENOENT);
1010 } 973 }
1011 lvds_connector->base.edid = edid; 974 intel_connector->edid = edid;
1012 975
1013 list_for_each_entry(scan, &connector->probed_modes, head) { 976 list_for_each_entry(scan, &connector->probed_modes, head) {
1014 if (scan->type & DRM_MODE_TYPE_PREFERRED) { 977 if (scan->type & DRM_MODE_TYPE_PREFERRED) {
@@ -1072,6 +1035,6 @@ failed:
1072 drm_connector_cleanup(connector); 1035 drm_connector_cleanup(connector);
1073 drm_encoder_cleanup(encoder); 1036 drm_encoder_cleanup(encoder);
1074 kfree(lvds_encoder); 1037 kfree(lvds_encoder);
1075 kfree(lvds_connector); 1038 intel_connector_free(intel_connector);
1076 return; 1039 return;
1077} 1040}
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index e034b4166d32..b8f106d9ecf8 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -773,70 +773,6 @@ static void intel_setup_cadls(struct drm_i915_private *dev_priv)
773 opregion->acpi->cadl[i] = 0; 773 opregion->acpi->cadl[i] = 0;
774} 774}
775 775
776void intel_opregion_register(struct drm_i915_private *dev_priv)
777{
778 struct intel_opregion *opregion = &dev_priv->opregion;
779
780 if (!opregion->header)
781 return;
782
783 if (opregion->acpi) {
784 intel_didl_outputs(dev_priv);
785 intel_setup_cadls(dev_priv);
786
787 /* Notify BIOS we are ready to handle ACPI video ext notifs.
788 * Right now, all the events are handled by the ACPI video module.
789 * We don't actually need to do anything with them. */
790 opregion->acpi->csts = 0;
791 opregion->acpi->drdy = 1;
792
793 opregion->acpi_notifier.notifier_call = intel_opregion_video_event;
794 register_acpi_notifier(&opregion->acpi_notifier);
795 }
796
797 if (opregion->asle) {
798 opregion->asle->tche = ASLE_TCHE_BLC_EN;
799 opregion->asle->ardy = ASLE_ARDY_READY;
800 }
801}
802
803void intel_opregion_unregister(struct drm_i915_private *dev_priv)
804{
805 struct intel_opregion *opregion = &dev_priv->opregion;
806
807 if (!opregion->header)
808 return;
809
810 if (opregion->asle)
811 opregion->asle->ardy = ASLE_ARDY_NOT_READY;
812
813 cancel_work_sync(&dev_priv->opregion.asle_work);
814
815 if (opregion->acpi) {
816 opregion->acpi->drdy = 0;
817
818 unregister_acpi_notifier(&opregion->acpi_notifier);
819 opregion->acpi_notifier.notifier_call = NULL;
820 }
821
822 /* just clear all opregion memory pointers now */
823 memunmap(opregion->header);
824 if (opregion->rvda) {
825 memunmap(opregion->rvda);
826 opregion->rvda = NULL;
827 }
828 if (opregion->vbt_firmware) {
829 kfree(opregion->vbt_firmware);
830 opregion->vbt_firmware = NULL;
831 }
832 opregion->header = NULL;
833 opregion->acpi = NULL;
834 opregion->swsci = NULL;
835 opregion->asle = NULL;
836 opregion->vbt = NULL;
837 opregion->lid_state = NULL;
838}
839
840static void swsci_setup(struct drm_i915_private *dev_priv) 776static void swsci_setup(struct drm_i915_private *dev_priv)
841{ 777{
842 struct intel_opregion *opregion = &dev_priv->opregion; 778 struct intel_opregion *opregion = &dev_priv->opregion;
@@ -1115,3 +1051,97 @@ intel_opregion_get_panel_type(struct drm_i915_private *dev_priv)
1115 1051
1116 return ret - 1; 1052 return ret - 1;
1117} 1053}
1054
1055void intel_opregion_register(struct drm_i915_private *i915)
1056{
1057 struct intel_opregion *opregion = &i915->opregion;
1058
1059 if (!opregion->header)
1060 return;
1061
1062 if (opregion->acpi) {
1063 opregion->acpi_notifier.notifier_call =
1064 intel_opregion_video_event;
1065 register_acpi_notifier(&opregion->acpi_notifier);
1066 }
1067
1068 intel_opregion_resume(i915);
1069}
1070
1071void intel_opregion_resume(struct drm_i915_private *i915)
1072{
1073 struct intel_opregion *opregion = &i915->opregion;
1074
1075 if (!opregion->header)
1076 return;
1077
1078 if (opregion->acpi) {
1079 intel_didl_outputs(i915);
1080 intel_setup_cadls(i915);
1081
1082 /*
1083 * Notify BIOS we are ready to handle ACPI video ext notifs.
1084 * Right now, all the events are handled by the ACPI video
1085 * module. We don't actually need to do anything with them.
1086 */
1087 opregion->acpi->csts = 0;
1088 opregion->acpi->drdy = 1;
1089 }
1090
1091 if (opregion->asle) {
1092 opregion->asle->tche = ASLE_TCHE_BLC_EN;
1093 opregion->asle->ardy = ASLE_ARDY_READY;
1094 }
1095
1096 intel_opregion_notify_adapter(i915, PCI_D0);
1097}
1098
1099void intel_opregion_suspend(struct drm_i915_private *i915, pci_power_t state)
1100{
1101 struct intel_opregion *opregion = &i915->opregion;
1102
1103 if (!opregion->header)
1104 return;
1105
1106 intel_opregion_notify_adapter(i915, state);
1107
1108 if (opregion->asle)
1109 opregion->asle->ardy = ASLE_ARDY_NOT_READY;
1110
1111 cancel_work_sync(&i915->opregion.asle_work);
1112
1113 if (opregion->acpi)
1114 opregion->acpi->drdy = 0;
1115}
1116
1117void intel_opregion_unregister(struct drm_i915_private *i915)
1118{
1119 struct intel_opregion *opregion = &i915->opregion;
1120
1121 intel_opregion_suspend(i915, PCI_D1);
1122
1123 if (!opregion->header)
1124 return;
1125
1126 if (opregion->acpi_notifier.notifier_call) {
1127 unregister_acpi_notifier(&opregion->acpi_notifier);
1128 opregion->acpi_notifier.notifier_call = NULL;
1129 }
1130
1131 /* just clear all opregion memory pointers now */
1132 memunmap(opregion->header);
1133 if (opregion->rvda) {
1134 memunmap(opregion->rvda);
1135 opregion->rvda = NULL;
1136 }
1137 if (opregion->vbt_firmware) {
1138 kfree(opregion->vbt_firmware);
1139 opregion->vbt_firmware = NULL;
1140 }
1141 opregion->header = NULL;
1142 opregion->acpi = NULL;
1143 opregion->swsci = NULL;
1144 opregion->asle = NULL;
1145 opregion->vbt = NULL;
1146 opregion->lid_state = NULL;
1147}
diff --git a/drivers/gpu/drm/i915/intel_opregion.h b/drivers/gpu/drm/i915/intel_opregion.h
index e8498a8cda3d..d84b6d2d2fae 100644
--- a/drivers/gpu/drm/i915/intel_opregion.h
+++ b/drivers/gpu/drm/i915/intel_opregion.h
@@ -57,8 +57,14 @@ struct intel_opregion {
57#ifdef CONFIG_ACPI 57#ifdef CONFIG_ACPI
58 58
59int intel_opregion_setup(struct drm_i915_private *dev_priv); 59int intel_opregion_setup(struct drm_i915_private *dev_priv);
60
60void intel_opregion_register(struct drm_i915_private *dev_priv); 61void intel_opregion_register(struct drm_i915_private *dev_priv);
61void intel_opregion_unregister(struct drm_i915_private *dev_priv); 62void intel_opregion_unregister(struct drm_i915_private *dev_priv);
63
64void intel_opregion_resume(struct drm_i915_private *dev_priv);
65void intel_opregion_suspend(struct drm_i915_private *dev_priv,
66 pci_power_t state);
67
62void intel_opregion_asle_intr(struct drm_i915_private *dev_priv); 68void intel_opregion_asle_intr(struct drm_i915_private *dev_priv);
63int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, 69int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
64 bool enable); 70 bool enable);
@@ -81,6 +87,15 @@ static inline void intel_opregion_unregister(struct drm_i915_private *dev_priv)
81{ 87{
82} 88}
83 89
90void intel_opregion_resume(struct drm_i915_private *dev_priv)
91{
92}
93
94void intel_opregion_suspend(struct drm_i915_private *dev_priv,
95 pci_power_t state)
96{
97}
98
84static inline void intel_opregion_asle_intr(struct drm_i915_private *dev_priv) 99static inline void intel_opregion_asle_intr(struct drm_i915_private *dev_priv)
85{ 100{
86} 101}
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 72eb7e48e8bc..20ea7c99d13a 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -1338,7 +1338,7 @@ err_put_bo:
1338 return err; 1338 return err;
1339} 1339}
1340 1340
1341void intel_setup_overlay(struct drm_i915_private *dev_priv) 1341void intel_overlay_setup(struct drm_i915_private *dev_priv)
1342{ 1342{
1343 struct intel_overlay *overlay; 1343 struct intel_overlay *overlay;
1344 int ret; 1344 int ret;
@@ -1387,7 +1387,7 @@ out_free:
1387 kfree(overlay); 1387 kfree(overlay);
1388} 1388}
1389 1389
1390void intel_cleanup_overlay(struct drm_i915_private *dev_priv) 1390void intel_overlay_cleanup(struct drm_i915_private *dev_priv)
1391{ 1391{
1392 struct intel_overlay *overlay; 1392 struct intel_overlay *overlay;
1393 1393
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 4a9f139e7b73..e6cd7b55c018 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -111,7 +111,7 @@ intel_pch_panel_fitting(struct intel_crtc *intel_crtc,
111 /* Native modes don't need fitting */ 111 /* Native modes don't need fitting */
112 if (adjusted_mode->crtc_hdisplay == pipe_config->pipe_src_w && 112 if (adjusted_mode->crtc_hdisplay == pipe_config->pipe_src_w &&
113 adjusted_mode->crtc_vdisplay == pipe_config->pipe_src_h && 113 adjusted_mode->crtc_vdisplay == pipe_config->pipe_src_h &&
114 !pipe_config->ycbcr420) 114 pipe_config->output_format != INTEL_OUTPUT_FORMAT_YCBCR420)
115 goto done; 115 goto done;
116 116
117 switch (fitting_mode) { 117 switch (fitting_mode) {
@@ -505,7 +505,7 @@ static u32 _vlv_get_backlight(struct drm_i915_private *dev_priv, enum pipe pipe)
505static u32 vlv_get_backlight(struct intel_connector *connector) 505static u32 vlv_get_backlight(struct intel_connector *connector)
506{ 506{
507 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 507 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
508 enum pipe pipe = intel_get_pipe_from_connector(connector); 508 enum pipe pipe = intel_connector_get_pipe(connector);
509 509
510 return _vlv_get_backlight(dev_priv, pipe); 510 return _vlv_get_backlight(dev_priv, pipe);
511} 511}
@@ -763,7 +763,7 @@ static void pwm_disable_backlight(const struct drm_connector_state *old_conn_sta
763 struct intel_panel *panel = &connector->panel; 763 struct intel_panel *panel = &connector->panel;
764 764
765 /* Disable the backlight */ 765 /* Disable the backlight */
766 pwm_config(panel->backlight.pwm, 0, CRC_PMIC_PWM_PERIOD_NS); 766 intel_panel_actually_set_backlight(old_conn_state, 0);
767 usleep_range(2000, 3000); 767 usleep_range(2000, 3000);
768 pwm_disable(panel->backlight.pwm); 768 pwm_disable(panel->backlight.pwm);
769} 769}
@@ -1814,11 +1814,8 @@ int intel_panel_setup_backlight(struct drm_connector *connector, enum pipe pipe)
1814 return 0; 1814 return 0;
1815} 1815}
1816 1816
1817void intel_panel_destroy_backlight(struct drm_connector *connector) 1817static void intel_panel_destroy_backlight(struct intel_panel *panel)
1818{ 1818{
1819 struct intel_connector *intel_connector = to_intel_connector(connector);
1820 struct intel_panel *panel = &intel_connector->panel;
1821
1822 /* dispose of the pwm */ 1819 /* dispose of the pwm */
1823 if (panel->backlight.pwm) 1820 if (panel->backlight.pwm)
1824 pwm_put(panel->backlight.pwm); 1821 pwm_put(panel->backlight.pwm);
@@ -1923,6 +1920,8 @@ void intel_panel_fini(struct intel_panel *panel)
1923 struct intel_connector *intel_connector = 1920 struct intel_connector *intel_connector =
1924 container_of(panel, struct intel_connector, panel); 1921 container_of(panel, struct intel_connector, panel);
1925 1922
1923 intel_panel_destroy_backlight(panel);
1924
1926 if (panel->fixed_mode) 1925 if (panel->fixed_mode)
1927 drm_mode_destroy(intel_connector->base.dev, panel->fixed_mode); 1926 drm_mode_destroy(intel_connector->base.dev, panel->fixed_mode);
1928 1927
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 245f0022bcfd..897a791662c5 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -2493,6 +2493,9 @@ static uint32_t ilk_compute_pri_wm(const struct intel_crtc_state *cstate,
2493 uint32_t method1, method2; 2493 uint32_t method1, method2;
2494 int cpp; 2494 int cpp;
2495 2495
2496 if (mem_value == 0)
2497 return U32_MAX;
2498
2496 if (!intel_wm_plane_visible(cstate, pstate)) 2499 if (!intel_wm_plane_visible(cstate, pstate))
2497 return 0; 2500 return 0;
2498 2501
@@ -2522,6 +2525,9 @@ static uint32_t ilk_compute_spr_wm(const struct intel_crtc_state *cstate,
2522 uint32_t method1, method2; 2525 uint32_t method1, method2;
2523 int cpp; 2526 int cpp;
2524 2527
2528 if (mem_value == 0)
2529 return U32_MAX;
2530
2525 if (!intel_wm_plane_visible(cstate, pstate)) 2531 if (!intel_wm_plane_visible(cstate, pstate))
2526 return 0; 2532 return 0;
2527 2533
@@ -2545,6 +2551,9 @@ static uint32_t ilk_compute_cur_wm(const struct intel_crtc_state *cstate,
2545{ 2551{
2546 int cpp; 2552 int cpp;
2547 2553
2554 if (mem_value == 0)
2555 return U32_MAX;
2556
2548 if (!intel_wm_plane_visible(cstate, pstate)) 2557 if (!intel_wm_plane_visible(cstate, pstate))
2549 return 0; 2558 return 0;
2550 2559
@@ -3008,6 +3017,34 @@ static void snb_wm_latency_quirk(struct drm_i915_private *dev_priv)
3008 intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency); 3017 intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
3009} 3018}
3010 3019
3020static void snb_wm_lp3_irq_quirk(struct drm_i915_private *dev_priv)
3021{
3022 /*
3023 * On some SNB machines (Thinkpad X220 Tablet at least)
3024 * LP3 usage can cause vblank interrupts to be lost.
3025 * The DEIIR bit will go high but it looks like the CPU
3026 * never gets interrupted.
3027 *
3028 * It's not clear whether other interrupt source could
3029 * be affected or if this is somehow limited to vblank
3030 * interrupts only. To play it safe we disable LP3
3031 * watermarks entirely.
3032 */
3033 if (dev_priv->wm.pri_latency[3] == 0 &&
3034 dev_priv->wm.spr_latency[3] == 0 &&
3035 dev_priv->wm.cur_latency[3] == 0)
3036 return;
3037
3038 dev_priv->wm.pri_latency[3] = 0;
3039 dev_priv->wm.spr_latency[3] = 0;
3040 dev_priv->wm.cur_latency[3] = 0;
3041
3042 DRM_DEBUG_KMS("LP3 watermarks disabled due to potential for lost interrupts\n");
3043 intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency);
3044 intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency);
3045 intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
3046}
3047
3011static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv) 3048static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv)
3012{ 3049{
3013 intel_read_wm_latency(dev_priv, dev_priv->wm.pri_latency); 3050 intel_read_wm_latency(dev_priv, dev_priv->wm.pri_latency);
@@ -3024,8 +3061,10 @@ static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv)
3024 intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency); 3061 intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency);
3025 intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency); 3062 intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
3026 3063
3027 if (IS_GEN6(dev_priv)) 3064 if (IS_GEN6(dev_priv)) {
3028 snb_wm_latency_quirk(dev_priv); 3065 snb_wm_latency_quirk(dev_priv);
3066 snb_wm_lp3_irq_quirk(dev_priv);
3067 }
3029} 3068}
3030 3069
3031static void skl_setup_wm_latency(struct drm_i915_private *dev_priv) 3070static void skl_setup_wm_latency(struct drm_i915_private *dev_priv)
@@ -3159,7 +3198,8 @@ static int ilk_compute_intermediate_wm(struct drm_device *dev,
3159 * and after the vblank. 3198 * and after the vblank.
3160 */ 3199 */
3161 *a = newstate->wm.ilk.optimal; 3200 *a = newstate->wm.ilk.optimal;
3162 if (!newstate->base.active || drm_atomic_crtc_needs_modeset(&newstate->base)) 3201 if (!newstate->base.active || drm_atomic_crtc_needs_modeset(&newstate->base) ||
3202 intel_state->skip_intermediate_wm)
3163 return 0; 3203 return 0;
3164 3204
3165 a->pipe_enabled |= b->pipe_enabled; 3205 a->pipe_enabled |= b->pipe_enabled;
@@ -3611,15 +3651,8 @@ static bool skl_needs_memory_bw_wa(struct intel_atomic_state *state)
3611static bool 3651static bool
3612intel_has_sagv(struct drm_i915_private *dev_priv) 3652intel_has_sagv(struct drm_i915_private *dev_priv)
3613{ 3653{
3614 if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv) || 3654 return (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) &&
3615 IS_CANNONLAKE(dev_priv)) 3655 dev_priv->sagv_status != I915_SAGV_NOT_CONTROLLED;
3616 return true;
3617
3618 if (IS_SKYLAKE(dev_priv) &&
3619 dev_priv->sagv_status != I915_SAGV_NOT_CONTROLLED)
3620 return true;
3621
3622 return false;
3623} 3656}
3624 3657
3625/* 3658/*
@@ -3783,7 +3816,7 @@ bool intel_can_enable_sagv(struct drm_atomic_state *state)
3783 3816
3784static u16 intel_get_ddb_size(struct drm_i915_private *dev_priv, 3817static u16 intel_get_ddb_size(struct drm_i915_private *dev_priv,
3785 const struct intel_crtc_state *cstate, 3818 const struct intel_crtc_state *cstate,
3786 const unsigned int total_data_rate, 3819 const u64 total_data_rate,
3787 const int num_active, 3820 const int num_active,
3788 struct skl_ddb_allocation *ddb) 3821 struct skl_ddb_allocation *ddb)
3789{ 3822{
@@ -3797,12 +3830,12 @@ static u16 intel_get_ddb_size(struct drm_i915_private *dev_priv,
3797 return ddb_size - 4; /* 4 blocks for bypass path allocation */ 3830 return ddb_size - 4; /* 4 blocks for bypass path allocation */
3798 3831
3799 adjusted_mode = &cstate->base.adjusted_mode; 3832 adjusted_mode = &cstate->base.adjusted_mode;
3800 total_data_bw = (u64)total_data_rate * drm_mode_vrefresh(adjusted_mode); 3833 total_data_bw = total_data_rate * drm_mode_vrefresh(adjusted_mode);
3801 3834
3802 /* 3835 /*
3803 * 12GB/s is maximum BW supported by single DBuf slice. 3836 * 12GB/s is maximum BW supported by single DBuf slice.
3804 */ 3837 */
3805 if (total_data_bw >= GBps(12) || num_active > 1) { 3838 if (num_active > 1 || total_data_bw >= GBps(12)) {
3806 ddb->enabled_slices = 2; 3839 ddb->enabled_slices = 2;
3807 } else { 3840 } else {
3808 ddb->enabled_slices = 1; 3841 ddb->enabled_slices = 1;
@@ -3813,16 +3846,15 @@ static u16 intel_get_ddb_size(struct drm_i915_private *dev_priv,
3813} 3846}
3814 3847
3815static void 3848static void
3816skl_ddb_get_pipe_allocation_limits(struct drm_device *dev, 3849skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv,
3817 const struct intel_crtc_state *cstate, 3850 const struct intel_crtc_state *cstate,
3818 const unsigned int total_data_rate, 3851 const u64 total_data_rate,
3819 struct skl_ddb_allocation *ddb, 3852 struct skl_ddb_allocation *ddb,
3820 struct skl_ddb_entry *alloc, /* out */ 3853 struct skl_ddb_entry *alloc, /* out */
3821 int *num_active /* out */) 3854 int *num_active /* out */)
3822{ 3855{
3823 struct drm_atomic_state *state = cstate->base.state; 3856 struct drm_atomic_state *state = cstate->base.state;
3824 struct intel_atomic_state *intel_state = to_intel_atomic_state(state); 3857 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
3825 struct drm_i915_private *dev_priv = to_i915(dev);
3826 struct drm_crtc *for_crtc = cstate->base.crtc; 3858 struct drm_crtc *for_crtc = cstate->base.crtc;
3827 const struct drm_crtc_state *crtc_state; 3859 const struct drm_crtc_state *crtc_state;
3828 const struct drm_crtc *crtc; 3860 const struct drm_crtc *crtc;
@@ -3944,14 +3976,9 @@ skl_ddb_get_hw_plane_state(struct drm_i915_private *dev_priv,
3944 val & PLANE_CTL_ALPHA_MASK); 3976 val & PLANE_CTL_ALPHA_MASK);
3945 3977
3946 val = I915_READ(PLANE_BUF_CFG(pipe, plane_id)); 3978 val = I915_READ(PLANE_BUF_CFG(pipe, plane_id));
3947 /* 3979 if (fourcc == DRM_FORMAT_NV12 && INTEL_GEN(dev_priv) < 11) {
3948 * FIXME: add proper NV12 support for ICL. Avoid reading unclaimed
3949 * registers for now.
3950 */
3951 if (INTEL_GEN(dev_priv) < 11)
3952 val2 = I915_READ(PLANE_NV12_BUF_CFG(pipe, plane_id)); 3980 val2 = I915_READ(PLANE_NV12_BUF_CFG(pipe, plane_id));
3953 3981
3954 if (fourcc == DRM_FORMAT_NV12) {
3955 skl_ddb_entry_init_from_hw(dev_priv, 3982 skl_ddb_entry_init_from_hw(dev_priv,
3956 &ddb->plane[pipe][plane_id], val2); 3983 &ddb->plane[pipe][plane_id], val2);
3957 skl_ddb_entry_init_from_hw(dev_priv, 3984 skl_ddb_entry_init_from_hw(dev_priv,
@@ -4138,23 +4165,24 @@ int skl_check_pipe_max_pixel_rate(struct intel_crtc *intel_crtc,
4138 return 0; 4165 return 0;
4139} 4166}
4140 4167
4141static unsigned int 4168static u64
4142skl_plane_relative_data_rate(const struct intel_crtc_state *cstate, 4169skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
4143 const struct drm_plane_state *pstate, 4170 const struct intel_plane_state *intel_pstate,
4144 const int plane) 4171 const int plane)
4145{ 4172{
4146 struct intel_plane *intel_plane = to_intel_plane(pstate->plane); 4173 struct intel_plane *intel_plane =
4147 struct intel_plane_state *intel_pstate = to_intel_plane_state(pstate); 4174 to_intel_plane(intel_pstate->base.plane);
4148 uint32_t data_rate; 4175 uint32_t data_rate;
4149 uint32_t width = 0, height = 0; 4176 uint32_t width = 0, height = 0;
4150 struct drm_framebuffer *fb; 4177 struct drm_framebuffer *fb;
4151 u32 format; 4178 u32 format;
4152 uint_fixed_16_16_t down_scale_amount; 4179 uint_fixed_16_16_t down_scale_amount;
4180 u64 rate;
4153 4181
4154 if (!intel_pstate->base.visible) 4182 if (!intel_pstate->base.visible)
4155 return 0; 4183 return 0;
4156 4184
4157 fb = pstate->fb; 4185 fb = intel_pstate->base.fb;
4158 format = fb->format->format; 4186 format = fb->format->format;
4159 4187
4160 if (intel_plane->id == PLANE_CURSOR) 4188 if (intel_plane->id == PLANE_CURSOR)
@@ -4176,28 +4204,26 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
4176 height /= 2; 4204 height /= 2;
4177 } 4205 }
4178 4206
4179 data_rate = width * height * fb->format->cpp[plane]; 4207 data_rate = width * height;
4180 4208
4181 down_scale_amount = skl_plane_downscale_amount(cstate, intel_pstate); 4209 down_scale_amount = skl_plane_downscale_amount(cstate, intel_pstate);
4182 4210
4183 return mul_round_up_u32_fixed16(data_rate, down_scale_amount); 4211 rate = mul_round_up_u32_fixed16(data_rate, down_scale_amount);
4212
4213 rate *= fb->format->cpp[plane];
4214 return rate;
4184} 4215}
4185 4216
4186/* 4217static u64
4187 * We don't overflow 32 bits. Worst case is 3 planes enabled, each fetching
4188 * a 8192x4096@32bpp framebuffer:
4189 * 3 * 4096 * 8192 * 4 < 2^32
4190 */
4191static unsigned int
4192skl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate, 4218skl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate,
4193 unsigned int *plane_data_rate, 4219 u64 *plane_data_rate,
4194 unsigned int *uv_plane_data_rate) 4220 u64 *uv_plane_data_rate)
4195{ 4221{
4196 struct drm_crtc_state *cstate = &intel_cstate->base; 4222 struct drm_crtc_state *cstate = &intel_cstate->base;
4197 struct drm_atomic_state *state = cstate->state; 4223 struct drm_atomic_state *state = cstate->state;
4198 struct drm_plane *plane; 4224 struct drm_plane *plane;
4199 const struct drm_plane_state *pstate; 4225 const struct drm_plane_state *pstate;
4200 unsigned int total_data_rate = 0; 4226 u64 total_data_rate = 0;
4201 4227
4202 if (WARN_ON(!state)) 4228 if (WARN_ON(!state))
4203 return 0; 4229 return 0;
@@ -4205,26 +4231,81 @@ skl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate,
4205 /* Calculate and cache data rate for each plane */ 4231 /* Calculate and cache data rate for each plane */
4206 drm_atomic_crtc_state_for_each_plane_state(plane, pstate, cstate) { 4232 drm_atomic_crtc_state_for_each_plane_state(plane, pstate, cstate) {
4207 enum plane_id plane_id = to_intel_plane(plane)->id; 4233 enum plane_id plane_id = to_intel_plane(plane)->id;
4208 unsigned int rate; 4234 u64 rate;
4235 const struct intel_plane_state *intel_pstate =
4236 to_intel_plane_state(pstate);
4209 4237
4210 /* packed/y */ 4238 /* packed/y */
4211 rate = skl_plane_relative_data_rate(intel_cstate, 4239 rate = skl_plane_relative_data_rate(intel_cstate,
4212 pstate, 0); 4240 intel_pstate, 0);
4213 plane_data_rate[plane_id] = rate; 4241 plane_data_rate[plane_id] = rate;
4214
4215 total_data_rate += rate; 4242 total_data_rate += rate;
4216 4243
4217 /* uv-plane */ 4244 /* uv-plane */
4218 rate = skl_plane_relative_data_rate(intel_cstate, 4245 rate = skl_plane_relative_data_rate(intel_cstate,
4219 pstate, 1); 4246 intel_pstate, 1);
4220 uv_plane_data_rate[plane_id] = rate; 4247 uv_plane_data_rate[plane_id] = rate;
4221
4222 total_data_rate += rate; 4248 total_data_rate += rate;
4223 } 4249 }
4224 4250
4225 return total_data_rate; 4251 return total_data_rate;
4226} 4252}
4227 4253
4254static u64
4255icl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate,
4256 u64 *plane_data_rate)
4257{
4258 struct drm_crtc_state *cstate = &intel_cstate->base;
4259 struct drm_atomic_state *state = cstate->state;
4260 struct drm_plane *plane;
4261 const struct drm_plane_state *pstate;
4262 u64 total_data_rate = 0;
4263
4264 if (WARN_ON(!state))
4265 return 0;
4266
4267 /* Calculate and cache data rate for each plane */
4268 drm_atomic_crtc_state_for_each_plane_state(plane, pstate, cstate) {
4269 const struct intel_plane_state *intel_pstate =
4270 to_intel_plane_state(pstate);
4271 enum plane_id plane_id = to_intel_plane(plane)->id;
4272 u64 rate;
4273
4274 if (!intel_pstate->linked_plane) {
4275 rate = skl_plane_relative_data_rate(intel_cstate,
4276 intel_pstate, 0);
4277 plane_data_rate[plane_id] = rate;
4278 total_data_rate += rate;
4279 } else {
4280 enum plane_id y_plane_id;
4281
4282 /*
4283 * The slave plane might not iterate in
4284 * drm_atomic_crtc_state_for_each_plane_state(),
4285 * and needs the master plane state which may be
4286 * NULL if we try get_new_plane_state(), so we
4287 * always calculate from the master.
4288 */
4289 if (intel_pstate->slave)
4290 continue;
4291
4292 /* Y plane rate is calculated on the slave */
4293 rate = skl_plane_relative_data_rate(intel_cstate,
4294 intel_pstate, 0);
4295 y_plane_id = intel_pstate->linked_plane->id;
4296 plane_data_rate[y_plane_id] = rate;
4297 total_data_rate += rate;
4298
4299 rate = skl_plane_relative_data_rate(intel_cstate,
4300 intel_pstate, 1);
4301 plane_data_rate[plane_id] = rate;
4302 total_data_rate += rate;
4303 }
4304 }
4305
4306 return total_data_rate;
4307}
4308
4228static uint16_t 4309static uint16_t
4229skl_ddb_min_alloc(const struct drm_plane_state *pstate, const int plane) 4310skl_ddb_min_alloc(const struct drm_plane_state *pstate, const int plane)
4230{ 4311{
@@ -4297,15 +4378,25 @@ skl_ddb_calc_min(const struct intel_crtc_state *cstate, int num_active,
4297 4378
4298 drm_atomic_crtc_state_for_each_plane_state(plane, pstate, &cstate->base) { 4379 drm_atomic_crtc_state_for_each_plane_state(plane, pstate, &cstate->base) {
4299 enum plane_id plane_id = to_intel_plane(plane)->id; 4380 enum plane_id plane_id = to_intel_plane(plane)->id;
4381 struct intel_plane_state *plane_state = to_intel_plane_state(pstate);
4300 4382
4301 if (plane_id == PLANE_CURSOR) 4383 if (plane_id == PLANE_CURSOR)
4302 continue; 4384 continue;
4303 4385
4304 if (!pstate->visible) 4386 /* slave plane must be invisible and calculated from master */
4387 if (!pstate->visible || WARN_ON(plane_state->slave))
4305 continue; 4388 continue;
4306 4389
4307 minimum[plane_id] = skl_ddb_min_alloc(pstate, 0); 4390 if (!plane_state->linked_plane) {
4308 uv_minimum[plane_id] = skl_ddb_min_alloc(pstate, 1); 4391 minimum[plane_id] = skl_ddb_min_alloc(pstate, 0);
4392 uv_minimum[plane_id] = skl_ddb_min_alloc(pstate, 1);
4393 } else {
4394 enum plane_id y_plane_id =
4395 plane_state->linked_plane->id;
4396
4397 minimum[y_plane_id] = skl_ddb_min_alloc(pstate, 0);
4398 minimum[plane_id] = skl_ddb_min_alloc(pstate, 1);
4399 }
4309 } 4400 }
4310 4401
4311 minimum[PLANE_CURSOR] = skl_cursor_allocation(num_active); 4402 minimum[PLANE_CURSOR] = skl_cursor_allocation(num_active);
@@ -4317,18 +4408,18 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
4317{ 4408{
4318 struct drm_atomic_state *state = cstate->base.state; 4409 struct drm_atomic_state *state = cstate->base.state;
4319 struct drm_crtc *crtc = cstate->base.crtc; 4410 struct drm_crtc *crtc = cstate->base.crtc;
4320 struct drm_device *dev = crtc->dev; 4411 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
4321 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4412 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4322 enum pipe pipe = intel_crtc->pipe; 4413 enum pipe pipe = intel_crtc->pipe;
4323 struct skl_ddb_entry *alloc = &cstate->wm.skl.ddb; 4414 struct skl_ddb_entry *alloc = &cstate->wm.skl.ddb;
4324 uint16_t alloc_size, start; 4415 uint16_t alloc_size, start;
4325 uint16_t minimum[I915_MAX_PLANES] = {}; 4416 uint16_t minimum[I915_MAX_PLANES] = {};
4326 uint16_t uv_minimum[I915_MAX_PLANES] = {}; 4417 uint16_t uv_minimum[I915_MAX_PLANES] = {};
4327 unsigned int total_data_rate; 4418 u64 total_data_rate;
4328 enum plane_id plane_id; 4419 enum plane_id plane_id;
4329 int num_active; 4420 int num_active;
4330 unsigned int plane_data_rate[I915_MAX_PLANES] = {}; 4421 u64 plane_data_rate[I915_MAX_PLANES] = {};
4331 unsigned int uv_plane_data_rate[I915_MAX_PLANES] = {}; 4422 u64 uv_plane_data_rate[I915_MAX_PLANES] = {};
4332 uint16_t total_min_blocks = 0; 4423 uint16_t total_min_blocks = 0;
4333 4424
4334 /* Clear the partitioning for disabled planes. */ 4425 /* Clear the partitioning for disabled planes. */
@@ -4343,11 +4434,18 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
4343 return 0; 4434 return 0;
4344 } 4435 }
4345 4436
4346 total_data_rate = skl_get_total_relative_data_rate(cstate, 4437 if (INTEL_GEN(dev_priv) < 11)
4347 plane_data_rate, 4438 total_data_rate =
4348 uv_plane_data_rate); 4439 skl_get_total_relative_data_rate(cstate,
4349 skl_ddb_get_pipe_allocation_limits(dev, cstate, total_data_rate, ddb, 4440 plane_data_rate,
4350 alloc, &num_active); 4441 uv_plane_data_rate);
4442 else
4443 total_data_rate =
4444 icl_get_total_relative_data_rate(cstate,
4445 plane_data_rate);
4446
4447 skl_ddb_get_pipe_allocation_limits(dev_priv, cstate, total_data_rate,
4448 ddb, alloc, &num_active);
4351 alloc_size = skl_ddb_entry_size(alloc); 4449 alloc_size = skl_ddb_entry_size(alloc);
4352 if (alloc_size == 0) 4450 if (alloc_size == 0)
4353 return 0; 4451 return 0;
@@ -4387,7 +4485,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
4387 4485
4388 start = alloc->start; 4486 start = alloc->start;
4389 for_each_plane_id_on_crtc(intel_crtc, plane_id) { 4487 for_each_plane_id_on_crtc(intel_crtc, plane_id) {
4390 unsigned int data_rate, uv_data_rate; 4488 u64 data_rate, uv_data_rate;
4391 uint16_t plane_blocks, uv_plane_blocks; 4489 uint16_t plane_blocks, uv_plane_blocks;
4392 4490
4393 if (plane_id == PLANE_CURSOR) 4491 if (plane_id == PLANE_CURSOR)
@@ -4401,8 +4499,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
4401 * result is < available as data_rate / total_data_rate < 1 4499 * result is < available as data_rate / total_data_rate < 1
4402 */ 4500 */
4403 plane_blocks = minimum[plane_id]; 4501 plane_blocks = minimum[plane_id];
4404 plane_blocks += div_u64((uint64_t)alloc_size * data_rate, 4502 plane_blocks += div64_u64(alloc_size * data_rate, total_data_rate);
4405 total_data_rate);
4406 4503
4407 /* Leave disabled planes at (0,0) */ 4504 /* Leave disabled planes at (0,0) */
4408 if (data_rate) { 4505 if (data_rate) {
@@ -4416,8 +4513,10 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
4416 uv_data_rate = uv_plane_data_rate[plane_id]; 4513 uv_data_rate = uv_plane_data_rate[plane_id];
4417 4514
4418 uv_plane_blocks = uv_minimum[plane_id]; 4515 uv_plane_blocks = uv_minimum[plane_id];
4419 uv_plane_blocks += div_u64((uint64_t)alloc_size * uv_data_rate, 4516 uv_plane_blocks += div64_u64(alloc_size * uv_data_rate, total_data_rate);
4420 total_data_rate); 4517
4518 /* Gen11+ uses a separate plane for UV watermarks */
4519 WARN_ON(INTEL_GEN(dev_priv) >= 11 && uv_plane_blocks);
4421 4520
4422 if (uv_data_rate) { 4521 if (uv_data_rate) {
4423 ddb->uv_plane[pipe][plane_id].start = start; 4522 ddb->uv_plane[pipe][plane_id].start = start;
@@ -4475,7 +4574,7 @@ static uint_fixed_16_16_t skl_wm_method2(uint32_t pixel_rate,
4475} 4574}
4476 4575
4477static uint_fixed_16_16_t 4576static uint_fixed_16_16_t
4478intel_get_linetime_us(struct intel_crtc_state *cstate) 4577intel_get_linetime_us(const struct intel_crtc_state *cstate)
4479{ 4578{
4480 uint32_t pixel_rate; 4579 uint32_t pixel_rate;
4481 uint32_t crtc_htotal; 4580 uint32_t crtc_htotal;
@@ -4519,7 +4618,7 @@ skl_adjusted_plane_pixel_rate(const struct intel_crtc_state *cstate,
4519 4618
4520static int 4619static int
4521skl_compute_plane_wm_params(const struct drm_i915_private *dev_priv, 4620skl_compute_plane_wm_params(const struct drm_i915_private *dev_priv,
4522 struct intel_crtc_state *cstate, 4621 const struct intel_crtc_state *cstate,
4523 const struct intel_plane_state *intel_pstate, 4622 const struct intel_plane_state *intel_pstate,
4524 struct skl_wm_params *wp, int plane_id) 4623 struct skl_wm_params *wp, int plane_id)
4525{ 4624{
@@ -4626,7 +4725,7 @@ skl_compute_plane_wm_params(const struct drm_i915_private *dev_priv,
4626} 4725}
4627 4726
4628static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv, 4727static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
4629 struct intel_crtc_state *cstate, 4728 const struct intel_crtc_state *cstate,
4630 const struct intel_plane_state *intel_pstate, 4729 const struct intel_plane_state *intel_pstate,
4631 uint16_t ddb_allocation, 4730 uint16_t ddb_allocation,
4632 int level, 4731 int level,
@@ -4671,15 +4770,24 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
4671 } else { 4770 } else {
4672 if ((wp->cpp * cstate->base.adjusted_mode.crtc_htotal / 4771 if ((wp->cpp * cstate->base.adjusted_mode.crtc_htotal /
4673 wp->dbuf_block_size < 1) && 4772 wp->dbuf_block_size < 1) &&
4674 (wp->plane_bytes_per_line / wp->dbuf_block_size < 1)) 4773 (wp->plane_bytes_per_line / wp->dbuf_block_size < 1)) {
4675 selected_result = method2; 4774 selected_result = method2;
4676 else if (ddb_allocation >= 4775 } else if (ddb_allocation >=
4677 fixed16_to_u32_round_up(wp->plane_blocks_per_line)) 4776 fixed16_to_u32_round_up(wp->plane_blocks_per_line)) {
4678 selected_result = min_fixed16(method1, method2); 4777 if (IS_GEN9(dev_priv) &&
4679 else if (latency >= wp->linetime_us) 4778 !IS_GEMINILAKE(dev_priv))
4680 selected_result = min_fixed16(method1, method2); 4779 selected_result = min_fixed16(method1, method2);
4681 else 4780 else
4781 selected_result = method2;
4782 } else if (latency >= wp->linetime_us) {
4783 if (IS_GEN9(dev_priv) &&
4784 !IS_GEMINILAKE(dev_priv))
4785 selected_result = min_fixed16(method1, method2);
4786 else
4787 selected_result = method2;
4788 } else {
4682 selected_result = method1; 4789 selected_result = method1;
4790 }
4683 } 4791 }
4684 4792
4685 res_blocks = fixed16_to_u32_round_up(selected_result) + 1; 4793 res_blocks = fixed16_to_u32_round_up(selected_result) + 1;
@@ -4755,17 +4863,6 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
4755 } 4863 }
4756 } 4864 }
4757 4865
4758 /*
4759 * Display WA #826 (SKL:ALL, BXT:ALL) & #1059 (CNL:A)
4760 * disable wm level 1-7 on NV12 planes
4761 */
4762 if (wp->is_planar && level >= 1 &&
4763 (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv) ||
4764 IS_CNL_REVID(dev_priv, CNL_REVID_A0, CNL_REVID_A0))) {
4765 result->plane_en = false;
4766 return 0;
4767 }
4768
4769 /* The number of lines are ignored for the level 0 watermark. */ 4866 /* The number of lines are ignored for the level 0 watermark. */
4770 result->plane_res_b = res_blocks; 4867 result->plane_res_b = res_blocks;
4771 result->plane_res_l = res_lines; 4868 result->plane_res_l = res_lines;
@@ -4777,38 +4874,22 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
4777static int 4874static int
4778skl_compute_wm_levels(const struct drm_i915_private *dev_priv, 4875skl_compute_wm_levels(const struct drm_i915_private *dev_priv,
4779 struct skl_ddb_allocation *ddb, 4876 struct skl_ddb_allocation *ddb,
4780 struct intel_crtc_state *cstate, 4877 const struct intel_crtc_state *cstate,
4781 const struct intel_plane_state *intel_pstate, 4878 const struct intel_plane_state *intel_pstate,
4879 uint16_t ddb_blocks,
4782 const struct skl_wm_params *wm_params, 4880 const struct skl_wm_params *wm_params,
4783 struct skl_plane_wm *wm, 4881 struct skl_plane_wm *wm,
4784 int plane_id) 4882 struct skl_wm_level *levels)
4785{ 4883{
4786 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
4787 struct drm_plane *plane = intel_pstate->base.plane;
4788 struct intel_plane *intel_plane = to_intel_plane(plane);
4789 uint16_t ddb_blocks;
4790 enum pipe pipe = intel_crtc->pipe;
4791 int level, max_level = ilk_wm_max_level(dev_priv); 4884 int level, max_level = ilk_wm_max_level(dev_priv);
4792 enum plane_id intel_plane_id = intel_plane->id; 4885 struct skl_wm_level *result_prev = &levels[0];
4793 int ret; 4886 int ret;
4794 4887
4795 if (WARN_ON(!intel_pstate->base.fb)) 4888 if (WARN_ON(!intel_pstate->base.fb))
4796 return -EINVAL; 4889 return -EINVAL;
4797 4890
4798 ddb_blocks = plane_id ?
4799 skl_ddb_entry_size(&ddb->uv_plane[pipe][intel_plane_id]) :
4800 skl_ddb_entry_size(&ddb->plane[pipe][intel_plane_id]);
4801
4802 for (level = 0; level <= max_level; level++) { 4891 for (level = 0; level <= max_level; level++) {
4803 struct skl_wm_level *result = plane_id ? &wm->uv_wm[level] : 4892 struct skl_wm_level *result = &levels[level];
4804 &wm->wm[level];
4805 struct skl_wm_level *result_prev;
4806
4807 if (level)
4808 result_prev = plane_id ? &wm->uv_wm[level - 1] :
4809 &wm->wm[level - 1];
4810 else
4811 result_prev = plane_id ? &wm->uv_wm[0] : &wm->wm[0];
4812 4893
4813 ret = skl_compute_plane_wm(dev_priv, 4894 ret = skl_compute_plane_wm(dev_priv,
4814 cstate, 4895 cstate,
@@ -4820,6 +4901,8 @@ skl_compute_wm_levels(const struct drm_i915_private *dev_priv,
4820 result); 4901 result);
4821 if (ret) 4902 if (ret)
4822 return ret; 4903 return ret;
4904
4905 result_prev = result;
4823 } 4906 }
4824 4907
4825 if (intel_pstate->base.fb->format->format == DRM_FORMAT_NV12) 4908 if (intel_pstate->base.fb->format->format == DRM_FORMAT_NV12)
@@ -4829,7 +4912,7 @@ skl_compute_wm_levels(const struct drm_i915_private *dev_priv,
4829} 4912}
4830 4913
4831static uint32_t 4914static uint32_t
4832skl_compute_linetime_wm(struct intel_crtc_state *cstate) 4915skl_compute_linetime_wm(const struct intel_crtc_state *cstate)
4833{ 4916{
4834 struct drm_atomic_state *state = cstate->base.state; 4917 struct drm_atomic_state *state = cstate->base.state;
4835 struct drm_i915_private *dev_priv = to_i915(state->dev); 4918 struct drm_i915_private *dev_priv = to_i915(state->dev);
@@ -4851,7 +4934,7 @@ skl_compute_linetime_wm(struct intel_crtc_state *cstate)
4851 return linetime_wm; 4934 return linetime_wm;
4852} 4935}
4853 4936
4854static void skl_compute_transition_wm(struct intel_crtc_state *cstate, 4937static void skl_compute_transition_wm(const struct intel_crtc_state *cstate,
4855 struct skl_wm_params *wp, 4938 struct skl_wm_params *wp,
4856 struct skl_wm_level *wm_l0, 4939 struct skl_wm_level *wm_l0,
4857 uint16_t ddb_allocation, 4940 uint16_t ddb_allocation,
@@ -4861,7 +4944,7 @@ static void skl_compute_transition_wm(struct intel_crtc_state *cstate,
4861 const struct drm_i915_private *dev_priv = to_i915(dev); 4944 const struct drm_i915_private *dev_priv = to_i915(dev);
4862 uint16_t trans_min, trans_y_tile_min; 4945 uint16_t trans_min, trans_y_tile_min;
4863 const uint16_t trans_amount = 10; /* This is configurable amount */ 4946 const uint16_t trans_amount = 10; /* This is configurable amount */
4864 uint16_t trans_offset_b, res_blocks; 4947 uint16_t wm0_sel_res_b, trans_offset_b, res_blocks;
4865 4948
4866 if (!cstate->base.active) 4949 if (!cstate->base.active)
4867 goto exit; 4950 goto exit;
@@ -4874,19 +4957,31 @@ static void skl_compute_transition_wm(struct intel_crtc_state *cstate,
4874 if (!dev_priv->ipc_enabled) 4957 if (!dev_priv->ipc_enabled)
4875 goto exit; 4958 goto exit;
4876 4959
4877 trans_min = 0; 4960 trans_min = 14;
4878 if (INTEL_GEN(dev_priv) >= 10) 4961 if (INTEL_GEN(dev_priv) >= 11)
4879 trans_min = 4; 4962 trans_min = 4;
4880 4963
4881 trans_offset_b = trans_min + trans_amount; 4964 trans_offset_b = trans_min + trans_amount;
4882 4965
4966 /*
4967 * The spec asks for Selected Result Blocks for wm0 (the real value),
4968 * not Result Blocks (the integer value). Pay attention to the capital
4969 * letters. The value wm_l0->plane_res_b is actually Result Blocks, but
4970 * since Result Blocks is the ceiling of Selected Result Blocks plus 1,
4971 * and since we later will have to get the ceiling of the sum in the
4972 * transition watermarks calculation, we can just pretend Selected
4973 * Result Blocks is Result Blocks minus 1 and it should work for the
4974 * current platforms.
4975 */
4976 wm0_sel_res_b = wm_l0->plane_res_b - 1;
4977
4883 if (wp->y_tiled) { 4978 if (wp->y_tiled) {
4884 trans_y_tile_min = (uint16_t) mul_round_up_u32_fixed16(2, 4979 trans_y_tile_min = (uint16_t) mul_round_up_u32_fixed16(2,
4885 wp->y_tile_minimum); 4980 wp->y_tile_minimum);
4886 res_blocks = max(wm_l0->plane_res_b, trans_y_tile_min) + 4981 res_blocks = max(wm0_sel_res_b, trans_y_tile_min) +
4887 trans_offset_b; 4982 trans_offset_b;
4888 } else { 4983 } else {
4889 res_blocks = wm_l0->plane_res_b + trans_offset_b; 4984 res_blocks = wm0_sel_res_b + trans_offset_b;
4890 4985
4891 /* WA BUG:1938466 add one block for non y-tile planes */ 4986 /* WA BUG:1938466 add one block for non y-tile planes */
4892 if (IS_CNL_REVID(dev_priv, CNL_REVID_A0, CNL_REVID_A0)) 4987 if (IS_CNL_REVID(dev_priv, CNL_REVID_A0, CNL_REVID_A0))
@@ -4906,16 +5001,101 @@ exit:
4906 trans_wm->plane_en = false; 5001 trans_wm->plane_en = false;
4907} 5002}
4908 5003
5004static int __skl_build_plane_wm_single(struct skl_ddb_allocation *ddb,
5005 struct skl_pipe_wm *pipe_wm,
5006 enum plane_id plane_id,
5007 const struct intel_crtc_state *cstate,
5008 const struct intel_plane_state *pstate,
5009 int color_plane)
5010{
5011 struct drm_i915_private *dev_priv = to_i915(pstate->base.plane->dev);
5012 struct skl_plane_wm *wm = &pipe_wm->planes[plane_id];
5013 enum pipe pipe = to_intel_plane(pstate->base.plane)->pipe;
5014 struct skl_wm_params wm_params;
5015 uint16_t ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][plane_id]);
5016 int ret;
5017
5018 ret = skl_compute_plane_wm_params(dev_priv, cstate, pstate,
5019 &wm_params, color_plane);
5020 if (ret)
5021 return ret;
5022
5023 ret = skl_compute_wm_levels(dev_priv, ddb, cstate, pstate,
5024 ddb_blocks, &wm_params, wm, wm->wm);
5025
5026 if (ret)
5027 return ret;
5028
5029 skl_compute_transition_wm(cstate, &wm_params, &wm->wm[0],
5030 ddb_blocks, &wm->trans_wm);
5031
5032 return 0;
5033}
5034
5035static int skl_build_plane_wm_single(struct skl_ddb_allocation *ddb,
5036 struct skl_pipe_wm *pipe_wm,
5037 const struct intel_crtc_state *cstate,
5038 const struct intel_plane_state *pstate)
5039{
5040 enum plane_id plane_id = to_intel_plane(pstate->base.plane)->id;
5041
5042 return __skl_build_plane_wm_single(ddb, pipe_wm, plane_id, cstate, pstate, 0);
5043}
5044
5045static int skl_build_plane_wm_planar(struct skl_ddb_allocation *ddb,
5046 struct skl_pipe_wm *pipe_wm,
5047 const struct intel_crtc_state *cstate,
5048 const struct intel_plane_state *pstate)
5049{
5050 struct intel_plane *plane = to_intel_plane(pstate->base.plane);
5051 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
5052 enum plane_id plane_id = plane->id;
5053 struct skl_plane_wm *wm = &pipe_wm->planes[plane_id];
5054 struct skl_wm_params wm_params;
5055 enum pipe pipe = plane->pipe;
5056 uint16_t ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][plane_id]);
5057 int ret;
5058
5059 ret = __skl_build_plane_wm_single(ddb, pipe_wm, plane_id, cstate, pstate, 0);
5060 if (ret)
5061 return ret;
5062
5063 /* uv plane watermarks must also be validated for NV12/Planar */
5064 ddb_blocks = skl_ddb_entry_size(&ddb->uv_plane[pipe][plane_id]);
5065
5066 ret = skl_compute_plane_wm_params(dev_priv, cstate, pstate, &wm_params, 1);
5067 if (ret)
5068 return ret;
5069
5070 return skl_compute_wm_levels(dev_priv, ddb, cstate, pstate,
5071 ddb_blocks, &wm_params, wm, wm->uv_wm);
5072}
5073
5074static int icl_build_plane_wm_planar(struct skl_ddb_allocation *ddb,
5075 struct skl_pipe_wm *pipe_wm,
5076 const struct intel_crtc_state *cstate,
5077 const struct intel_plane_state *pstate)
5078{
5079 int ret;
5080 enum plane_id y_plane_id = pstate->linked_plane->id;
5081 enum plane_id uv_plane_id = to_intel_plane(pstate->base.plane)->id;
5082
5083 ret = __skl_build_plane_wm_single(ddb, pipe_wm, y_plane_id,
5084 cstate, pstate, 0);
5085 if (ret)
5086 return ret;
5087
5088 return __skl_build_plane_wm_single(ddb, pipe_wm, uv_plane_id,
5089 cstate, pstate, 1);
5090}
5091
4909static int skl_build_pipe_wm(struct intel_crtc_state *cstate, 5092static int skl_build_pipe_wm(struct intel_crtc_state *cstate,
4910 struct skl_ddb_allocation *ddb, 5093 struct skl_ddb_allocation *ddb,
4911 struct skl_pipe_wm *pipe_wm) 5094 struct skl_pipe_wm *pipe_wm)
4912{ 5095{
4913 struct drm_device *dev = cstate->base.crtc->dev;
4914 struct drm_crtc_state *crtc_state = &cstate->base; 5096 struct drm_crtc_state *crtc_state = &cstate->base;
4915 const struct drm_i915_private *dev_priv = to_i915(dev);
4916 struct drm_plane *plane; 5097 struct drm_plane *plane;
4917 const struct drm_plane_state *pstate; 5098 const struct drm_plane_state *pstate;
4918 struct skl_plane_wm *wm;
4919 int ret; 5099 int ret;
4920 5100
4921 /* 5101 /*
@@ -4927,44 +5107,21 @@ static int skl_build_pipe_wm(struct intel_crtc_state *cstate,
4927 drm_atomic_crtc_state_for_each_plane_state(plane, pstate, crtc_state) { 5107 drm_atomic_crtc_state_for_each_plane_state(plane, pstate, crtc_state) {
4928 const struct intel_plane_state *intel_pstate = 5108 const struct intel_plane_state *intel_pstate =
4929 to_intel_plane_state(pstate); 5109 to_intel_plane_state(pstate);
4930 enum plane_id plane_id = to_intel_plane(plane)->id;
4931 struct skl_wm_params wm_params;
4932 enum pipe pipe = to_intel_crtc(cstate->base.crtc)->pipe;
4933 uint16_t ddb_blocks;
4934 5110
4935 wm = &pipe_wm->planes[plane_id]; 5111 /* Watermarks calculated in master */
4936 ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][plane_id]); 5112 if (intel_pstate->slave)
5113 continue;
4937 5114
4938 ret = skl_compute_plane_wm_params(dev_priv, cstate, 5115 if (intel_pstate->linked_plane)
4939 intel_pstate, &wm_params, 0); 5116 ret = icl_build_plane_wm_planar(ddb, pipe_wm, cstate, intel_pstate);
4940 if (ret) 5117 else if (intel_pstate->base.fb &&
4941 return ret; 5118 intel_pstate->base.fb->format->format == DRM_FORMAT_NV12)
5119 ret = skl_build_plane_wm_planar(ddb, pipe_wm, cstate, intel_pstate);
5120 else
5121 ret = skl_build_plane_wm_single(ddb, pipe_wm, cstate, intel_pstate);
4942 5122
4943 ret = skl_compute_wm_levels(dev_priv, ddb, cstate,
4944 intel_pstate, &wm_params, wm, 0);
4945 if (ret) 5123 if (ret)
4946 return ret; 5124 return ret;
4947
4948 skl_compute_transition_wm(cstate, &wm_params, &wm->wm[0],
4949 ddb_blocks, &wm->trans_wm);
4950
4951 /* uv plane watermarks must also be validated for NV12/Planar */
4952 if (wm_params.is_planar) {
4953 memset(&wm_params, 0, sizeof(struct skl_wm_params));
4954 wm->is_planar = true;
4955
4956 ret = skl_compute_plane_wm_params(dev_priv, cstate,
4957 intel_pstate,
4958 &wm_params, 1);
4959 if (ret)
4960 return ret;
4961
4962 ret = skl_compute_wm_levels(dev_priv, ddb, cstate,
4963 intel_pstate, &wm_params,
4964 wm, 1);
4965 if (ret)
4966 return ret;
4967 }
4968 } 5125 }
4969 5126
4970 pipe_wm->linetime = skl_compute_linetime_wm(cstate); 5127 pipe_wm->linetime = skl_compute_linetime_wm(cstate);
@@ -5015,14 +5172,7 @@ static void skl_write_plane_wm(struct intel_crtc *intel_crtc,
5015 skl_write_wm_level(dev_priv, PLANE_WM_TRANS(pipe, plane_id), 5172 skl_write_wm_level(dev_priv, PLANE_WM_TRANS(pipe, plane_id),
5016 &wm->trans_wm); 5173 &wm->trans_wm);
5017 5174
5018 skl_ddb_entry_write(dev_priv, PLANE_BUF_CFG(pipe, plane_id), 5175 if (wm->is_planar && INTEL_GEN(dev_priv) < 11) {
5019 &ddb->plane[pipe][plane_id]);
5020 /* FIXME: add proper NV12 support for ICL. */
5021 if (INTEL_GEN(dev_priv) >= 11)
5022 return skl_ddb_entry_write(dev_priv,
5023 PLANE_BUF_CFG(pipe, plane_id),
5024 &ddb->plane[pipe][plane_id]);
5025 if (wm->is_planar) {
5026 skl_ddb_entry_write(dev_priv, PLANE_BUF_CFG(pipe, plane_id), 5176 skl_ddb_entry_write(dev_priv, PLANE_BUF_CFG(pipe, plane_id),
5027 &ddb->uv_plane[pipe][plane_id]); 5177 &ddb->uv_plane[pipe][plane_id]);
5028 skl_ddb_entry_write(dev_priv, 5178 skl_ddb_entry_write(dev_priv,
@@ -5031,7 +5181,8 @@ static void skl_write_plane_wm(struct intel_crtc *intel_crtc,
5031 } else { 5181 } else {
5032 skl_ddb_entry_write(dev_priv, PLANE_BUF_CFG(pipe, plane_id), 5182 skl_ddb_entry_write(dev_priv, PLANE_BUF_CFG(pipe, plane_id),
5033 &ddb->plane[pipe][plane_id]); 5183 &ddb->plane[pipe][plane_id]);
5034 I915_WRITE(PLANE_NV12_BUF_CFG(pipe, plane_id), 0x0); 5184 if (INTEL_GEN(dev_priv) < 11)
5185 I915_WRITE(PLANE_NV12_BUF_CFG(pipe, plane_id), 0x0);
5035 } 5186 }
5036} 5187}
5037 5188
@@ -5075,16 +5226,15 @@ static inline bool skl_ddb_entries_overlap(const struct skl_ddb_entry *a,
5075 return a->start < b->end && b->start < a->end; 5226 return a->start < b->end && b->start < a->end;
5076} 5227}
5077 5228
5078bool skl_ddb_allocation_overlaps(struct drm_i915_private *dev_priv, 5229bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb,
5079 const struct skl_ddb_entry **entries, 5230 const struct skl_ddb_entry entries[],
5080 const struct skl_ddb_entry *ddb, 5231 int num_entries, int ignore_idx)
5081 int ignore)
5082{ 5232{
5083 enum pipe pipe; 5233 int i;
5084 5234
5085 for_each_pipe(dev_priv, pipe) { 5235 for (i = 0; i < num_entries; i++) {
5086 if (pipe != ignore && entries[pipe] && 5236 if (i != ignore_idx &&
5087 skl_ddb_entries_overlap(ddb, entries[pipe])) 5237 skl_ddb_entries_overlap(ddb, &entries[i]))
5088 return true; 5238 return true;
5089 } 5239 }
5090 5240
@@ -5136,11 +5286,12 @@ skl_ddb_add_affected_planes(struct intel_crtc_state *cstate)
5136 struct intel_atomic_state *intel_state = to_intel_atomic_state(state); 5286 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
5137 struct skl_ddb_allocation *new_ddb = &intel_state->wm_results.ddb; 5287 struct skl_ddb_allocation *new_ddb = &intel_state->wm_results.ddb;
5138 struct skl_ddb_allocation *cur_ddb = &dev_priv->wm.skl_hw.ddb; 5288 struct skl_ddb_allocation *cur_ddb = &dev_priv->wm.skl_hw.ddb;
5139 struct drm_plane_state *plane_state;
5140 struct drm_plane *plane; 5289 struct drm_plane *plane;
5141 enum pipe pipe = intel_crtc->pipe; 5290 enum pipe pipe = intel_crtc->pipe;
5142 5291
5143 drm_for_each_plane_mask(plane, dev, cstate->base.plane_mask) { 5292 drm_for_each_plane_mask(plane, dev, cstate->base.plane_mask) {
5293 struct drm_plane_state *plane_state;
5294 struct intel_plane *linked;
5144 enum plane_id plane_id = to_intel_plane(plane)->id; 5295 enum plane_id plane_id = to_intel_plane(plane)->id;
5145 5296
5146 if (skl_ddb_entry_equal(&cur_ddb->plane[pipe][plane_id], 5297 if (skl_ddb_entry_equal(&cur_ddb->plane[pipe][plane_id],
@@ -5152,6 +5303,15 @@ skl_ddb_add_affected_planes(struct intel_crtc_state *cstate)
5152 plane_state = drm_atomic_get_plane_state(state, plane); 5303 plane_state = drm_atomic_get_plane_state(state, plane);
5153 if (IS_ERR(plane_state)) 5304 if (IS_ERR(plane_state))
5154 return PTR_ERR(plane_state); 5305 return PTR_ERR(plane_state);
5306
5307 /* Make sure linked plane is updated too */
5308 linked = to_intel_plane_state(plane_state)->linked_plane;
5309 if (!linked)
5310 continue;
5311
5312 plane_state = drm_atomic_get_plane_state(state, &linked->base);
5313 if (IS_ERR(plane_state))
5314 return PTR_ERR(plane_state);
5155 } 5315 }
5156 5316
5157 return 0; 5317 return 0;
@@ -5210,11 +5370,11 @@ skl_print_wm_changes(const struct drm_atomic_state *state)
5210 if (skl_ddb_entry_equal(old, new)) 5370 if (skl_ddb_entry_equal(old, new))
5211 continue; 5371 continue;
5212 5372
5213 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] ddb (%d - %d) -> (%d - %d)\n", 5373 DRM_DEBUG_KMS("[PLANE:%d:%s] ddb (%d - %d) -> (%d - %d)\n",
5214 intel_plane->base.base.id, 5374 intel_plane->base.base.id,
5215 intel_plane->base.name, 5375 intel_plane->base.name,
5216 old->start, old->end, 5376 old->start, old->end,
5217 new->start, new->end); 5377 new->start, new->end);
5218 } 5378 }
5219 } 5379 }
5220} 5380}
@@ -6116,14 +6276,8 @@ void intel_enable_ipc(struct drm_i915_private *dev_priv)
6116{ 6276{
6117 u32 val; 6277 u32 val;
6118 6278
6119 /* Display WA #0477 WaDisableIPC: skl */ 6279 if (!HAS_IPC(dev_priv))
6120 if (IS_SKYLAKE(dev_priv)) 6280 return;
6121 dev_priv->ipc_enabled = false;
6122
6123 /* Display WA #1141: SKL:all KBL:all CFL */
6124 if ((IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) &&
6125 !dev_priv->dram_info.symmetric_memory)
6126 dev_priv->ipc_enabled = false;
6127 6281
6128 val = I915_READ(DISP_ARB_CTL2); 6282 val = I915_READ(DISP_ARB_CTL2);
6129 6283
@@ -6137,11 +6291,15 @@ void intel_enable_ipc(struct drm_i915_private *dev_priv)
6137 6291
6138void intel_init_ipc(struct drm_i915_private *dev_priv) 6292void intel_init_ipc(struct drm_i915_private *dev_priv)
6139{ 6293{
6140 dev_priv->ipc_enabled = false;
6141 if (!HAS_IPC(dev_priv)) 6294 if (!HAS_IPC(dev_priv))
6142 return; 6295 return;
6143 6296
6144 dev_priv->ipc_enabled = true; 6297 /* Display WA #1141: SKL:all KBL:all CFL */
6298 if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv))
6299 dev_priv->ipc_enabled = dev_priv->dram_info.symmetric_memory;
6300 else
6301 dev_priv->ipc_enabled = true;
6302
6145 intel_enable_ipc(dev_priv); 6303 intel_enable_ipc(dev_priv);
6146} 6304}
6147 6305
@@ -8735,6 +8893,10 @@ static void icl_init_clock_gating(struct drm_i915_private *dev_priv)
8735 /* This is not an Wa. Enable to reduce Sampler power */ 8893 /* This is not an Wa. Enable to reduce Sampler power */
8736 I915_WRITE(GEN10_DFR_RATIO_EN_AND_CHICKEN, 8894 I915_WRITE(GEN10_DFR_RATIO_EN_AND_CHICKEN,
8737 I915_READ(GEN10_DFR_RATIO_EN_AND_CHICKEN) & ~DFR_DISABLE); 8895 I915_READ(GEN10_DFR_RATIO_EN_AND_CHICKEN) & ~DFR_DISABLE);
8896
8897 /* WaEnable32PlaneMode:icl */
8898 I915_WRITE(GEN9_CSFE_CHICKEN1_RCS,
8899 _MASKED_BIT_ENABLE(GEN11_ENABLE_32_PLANE_MODE));
8738} 8900}
8739 8901
8740static void cnp_init_clock_gating(struct drm_i915_private *dev_priv) 8902static void cnp_init_clock_gating(struct drm_i915_private *dev_priv)
@@ -9312,8 +9474,6 @@ void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
9312/* Set up chip specific power management-related functions */ 9474/* Set up chip specific power management-related functions */
9313void intel_init_pm(struct drm_i915_private *dev_priv) 9475void intel_init_pm(struct drm_i915_private *dev_priv)
9314{ 9476{
9315 intel_fbc_init(dev_priv);
9316
9317 /* For cxsr */ 9477 /* For cxsr */
9318 if (IS_PINEVIEW(dev_priv)) 9478 if (IS_PINEVIEW(dev_priv))
9319 i915_pineview_get_mem_freq(dev_priv); 9479 i915_pineview_get_mem_freq(dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index b6838b525502..48df16a02fac 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -71,6 +71,10 @@ static bool psr_global_enabled(u32 debug)
71static bool intel_psr2_enabled(struct drm_i915_private *dev_priv, 71static bool intel_psr2_enabled(struct drm_i915_private *dev_priv,
72 const struct intel_crtc_state *crtc_state) 72 const struct intel_crtc_state *crtc_state)
73{ 73{
74 /* Disable PSR2 by default for all platforms */
75 if (i915_modparams.enable_psr == -1)
76 return false;
77
74 switch (dev_priv->psr.debug & I915_PSR_DEBUG_MODE_MASK) { 78 switch (dev_priv->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
75 case I915_PSR_DEBUG_FORCE_PSR1: 79 case I915_PSR_DEBUG_FORCE_PSR1:
76 return false; 80 return false;
@@ -294,7 +298,8 @@ static void intel_psr_setup_vsc(struct intel_dp *intel_dp,
294 psr_vsc.sdp_header.HB3 = 0x8; 298 psr_vsc.sdp_header.HB3 = 0x8;
295 } 299 }
296 300
297 intel_dig_port->write_infoframe(&intel_dig_port->base.base, crtc_state, 301 intel_dig_port->write_infoframe(&intel_dig_port->base,
302 crtc_state,
298 DP_SDP_VSC, &psr_vsc, sizeof(psr_vsc)); 303 DP_SDP_VSC, &psr_vsc, sizeof(psr_vsc));
299} 304}
300 305
@@ -558,6 +563,7 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
558{ 563{
559 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 564 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
560 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 565 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
566 u32 mask;
561 567
562 /* Only HSW and BDW have PSR AUX registers that need to be setup. SKL+ 568 /* Only HSW and BDW have PSR AUX registers that need to be setup. SKL+
563 * use hardcoded values PSR AUX transactions 569 * use hardcoded values PSR AUX transactions
@@ -568,35 +574,30 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
568 if (dev_priv->psr.psr2_enabled) { 574 if (dev_priv->psr.psr2_enabled) {
569 u32 chicken = I915_READ(CHICKEN_TRANS(cpu_transcoder)); 575 u32 chicken = I915_READ(CHICKEN_TRANS(cpu_transcoder));
570 576
571 if (INTEL_GEN(dev_priv) == 9 && !IS_GEMINILAKE(dev_priv)) 577 if (IS_GEN9(dev_priv) && !IS_GEMINILAKE(dev_priv))
572 chicken |= (PSR2_VSC_ENABLE_PROG_HEADER 578 chicken |= (PSR2_VSC_ENABLE_PROG_HEADER
573 | PSR2_ADD_VERTICAL_LINE_COUNT); 579 | PSR2_ADD_VERTICAL_LINE_COUNT);
574 580
575 else 581 else
576 chicken &= ~VSC_DATA_SEL_SOFTWARE_CONTROL; 582 chicken &= ~VSC_DATA_SEL_SOFTWARE_CONTROL;
577 I915_WRITE(CHICKEN_TRANS(cpu_transcoder), chicken); 583 I915_WRITE(CHICKEN_TRANS(cpu_transcoder), chicken);
578
579 I915_WRITE(EDP_PSR_DEBUG,
580 EDP_PSR_DEBUG_MASK_MEMUP |
581 EDP_PSR_DEBUG_MASK_HPD |
582 EDP_PSR_DEBUG_MASK_LPSP |
583 EDP_PSR_DEBUG_MASK_MAX_SLEEP |
584 EDP_PSR_DEBUG_MASK_DISP_REG_WRITE);
585 } else {
586 /*
587 * Per Spec: Avoid continuous PSR exit by masking MEMUP
588 * and HPD. also mask LPSP to avoid dependency on other
589 * drivers that might block runtime_pm besides
590 * preventing other hw tracking issues now we can rely
591 * on frontbuffer tracking.
592 */
593 I915_WRITE(EDP_PSR_DEBUG,
594 EDP_PSR_DEBUG_MASK_MEMUP |
595 EDP_PSR_DEBUG_MASK_HPD |
596 EDP_PSR_DEBUG_MASK_LPSP |
597 EDP_PSR_DEBUG_MASK_DISP_REG_WRITE |
598 EDP_PSR_DEBUG_MASK_MAX_SLEEP);
599 } 584 }
585
586 /*
587 * Per Spec: Avoid continuous PSR exit by masking MEMUP and HPD also
588 * mask LPSP to avoid dependency on other drivers that might block
589 * runtime_pm besides preventing other hw tracking issues now we
590 * can rely on frontbuffer tracking.
591 */
592 mask = EDP_PSR_DEBUG_MASK_MEMUP |
593 EDP_PSR_DEBUG_MASK_HPD |
594 EDP_PSR_DEBUG_MASK_LPSP |
595 EDP_PSR_DEBUG_MASK_MAX_SLEEP;
596
597 if (INTEL_GEN(dev_priv) < 11)
598 mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE;
599
600 I915_WRITE(EDP_PSR_DEBUG, mask);
600} 601}
601 602
602static void intel_psr_enable_locked(struct drm_i915_private *dev_priv, 603static void intel_psr_enable_locked(struct drm_i915_private *dev_priv,
@@ -656,49 +657,34 @@ unlock:
656 mutex_unlock(&dev_priv->psr.lock); 657 mutex_unlock(&dev_priv->psr.lock);
657} 658}
658 659
659static void 660static void intel_psr_exit(struct drm_i915_private *dev_priv)
660intel_psr_disable_source(struct intel_dp *intel_dp)
661{ 661{
662 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 662 u32 val;
663
664 if (dev_priv->psr.active) {
665 i915_reg_t psr_status;
666 u32 psr_status_mask;
667
668 if (dev_priv->psr.psr2_enabled) {
669 psr_status = EDP_PSR2_STATUS;
670 psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;
671
672 I915_WRITE(EDP_PSR2_CTL,
673 I915_READ(EDP_PSR2_CTL) &
674 ~(EDP_PSR2_ENABLE | EDP_SU_TRACK_ENABLE));
675
676 } else {
677 psr_status = EDP_PSR_STATUS;
678 psr_status_mask = EDP_PSR_STATUS_STATE_MASK;
679
680 I915_WRITE(EDP_PSR_CTL,
681 I915_READ(EDP_PSR_CTL) & ~EDP_PSR_ENABLE);
682 }
683 663
684 /* Wait till PSR is idle */ 664 if (!dev_priv->psr.active) {
685 if (intel_wait_for_register(dev_priv, 665 if (INTEL_GEN(dev_priv) >= 9)
686 psr_status, psr_status_mask, 0, 666 WARN_ON(I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE);
687 2000)) 667 WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
688 DRM_ERROR("Timed out waiting for PSR Idle State\n"); 668 return;
669 }
689 670
690 dev_priv->psr.active = false; 671 if (dev_priv->psr.psr2_enabled) {
672 val = I915_READ(EDP_PSR2_CTL);
673 WARN_ON(!(val & EDP_PSR2_ENABLE));
674 I915_WRITE(EDP_PSR2_CTL, val & ~EDP_PSR2_ENABLE);
691 } else { 675 } else {
692 if (dev_priv->psr.psr2_enabled) 676 val = I915_READ(EDP_PSR_CTL);
693 WARN_ON(I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE); 677 WARN_ON(!(val & EDP_PSR_ENABLE));
694 else 678 I915_WRITE(EDP_PSR_CTL, val & ~EDP_PSR_ENABLE);
695 WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
696 } 679 }
680 dev_priv->psr.active = false;
697} 681}
698 682
699static void intel_psr_disable_locked(struct intel_dp *intel_dp) 683static void intel_psr_disable_locked(struct intel_dp *intel_dp)
700{ 684{
701 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 685 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
686 i915_reg_t psr_status;
687 u32 psr_status_mask;
702 688
703 lockdep_assert_held(&dev_priv->psr.lock); 689 lockdep_assert_held(&dev_priv->psr.lock);
704 690
@@ -707,7 +693,21 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
707 693
708 DRM_DEBUG_KMS("Disabling PSR%s\n", 694 DRM_DEBUG_KMS("Disabling PSR%s\n",
709 dev_priv->psr.psr2_enabled ? "2" : "1"); 695 dev_priv->psr.psr2_enabled ? "2" : "1");
710 intel_psr_disable_source(intel_dp); 696
697 intel_psr_exit(dev_priv);
698
699 if (dev_priv->psr.psr2_enabled) {
700 psr_status = EDP_PSR2_STATUS;
701 psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;
702 } else {
703 psr_status = EDP_PSR_STATUS;
704 psr_status_mask = EDP_PSR_STATUS_STATE_MASK;
705 }
706
707 /* Wait till PSR is idle */
708 if (intel_wait_for_register(dev_priv, psr_status, psr_status_mask, 0,
709 2000))
710 DRM_ERROR("Timed out waiting PSR idle state\n");
711 711
712 /* Disable PSR on Sink */ 712 /* Disable PSR on Sink */
713 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0); 713 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);
@@ -925,25 +925,6 @@ unlock:
925 mutex_unlock(&dev_priv->psr.lock); 925 mutex_unlock(&dev_priv->psr.lock);
926} 926}
927 927
928static void intel_psr_exit(struct drm_i915_private *dev_priv)
929{
930 u32 val;
931
932 if (!dev_priv->psr.active)
933 return;
934
935 if (dev_priv->psr.psr2_enabled) {
936 val = I915_READ(EDP_PSR2_CTL);
937 WARN_ON(!(val & EDP_PSR2_ENABLE));
938 I915_WRITE(EDP_PSR2_CTL, val & ~EDP_PSR2_ENABLE);
939 } else {
940 val = I915_READ(EDP_PSR_CTL);
941 WARN_ON(!(val & EDP_PSR_ENABLE));
942 I915_WRITE(EDP_PSR_CTL, val & ~EDP_PSR_ENABLE);
943 }
944 dev_priv->psr.active = false;
945}
946
947/** 928/**
948 * intel_psr_invalidate - Invalidade PSR 929 * intel_psr_invalidate - Invalidade PSR
949 * @dev_priv: i915 device 930 * @dev_priv: i915 device
@@ -1026,20 +1007,16 @@ void intel_psr_flush(struct drm_i915_private *dev_priv,
1026 1007
1027 /* By definition flush = invalidate + flush */ 1008 /* By definition flush = invalidate + flush */
1028 if (frontbuffer_bits) { 1009 if (frontbuffer_bits) {
1029 if (dev_priv->psr.psr2_enabled) { 1010 /*
1030 intel_psr_exit(dev_priv); 1011 * Display WA #0884: all
1031 } else { 1012 * This documented WA for bxt can be safely applied
1032 /* 1013 * broadly so we can force HW tracking to exit PSR
1033 * Display WA #0884: all 1014 * instead of disabling and re-enabling.
1034 * This documented WA for bxt can be safely applied 1015 * Workaround tells us to write 0 to CUR_SURFLIVE_A,
1035 * broadly so we can force HW tracking to exit PSR 1016 * but it makes more sense write to the current active
1036 * instead of disabling and re-enabling. 1017 * pipe.
1037 * Workaround tells us to write 0 to CUR_SURFLIVE_A, 1018 */
1038 * but it makes more sense write to the current active 1019 I915_WRITE(CURSURFLIVE(pipe), 0);
1039 * pipe.
1040 */
1041 I915_WRITE(CURSURFLIVE(pipe), 0);
1042 }
1043 } 1020 }
1044 1021
1045 if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits) 1022 if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
@@ -1065,12 +1042,9 @@ void intel_psr_init(struct drm_i915_private *dev_priv)
1065 if (!dev_priv->psr.sink_support) 1042 if (!dev_priv->psr.sink_support)
1066 return; 1043 return;
1067 1044
1068 if (i915_modparams.enable_psr == -1) { 1045 if (i915_modparams.enable_psr == -1)
1069 i915_modparams.enable_psr = dev_priv->vbt.psr.enable; 1046 if (INTEL_GEN(dev_priv) < 9 || !dev_priv->vbt.psr.enable)
1070 1047 i915_modparams.enable_psr = 0;
1071 /* Per platform default: all disabled. */
1072 i915_modparams.enable_psr = 0;
1073 }
1074 1048
1075 /* Set link_standby x link_off defaults */ 1049 /* Set link_standby x link_off defaults */
1076 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 1050 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
@@ -1130,8 +1104,6 @@ void intel_psr_short_pulse(struct intel_dp *intel_dp)
1130 intel_psr_disable_locked(intel_dp); 1104 intel_psr_disable_locked(intel_dp);
1131 /* clear status register */ 1105 /* clear status register */
1132 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ERROR_STATUS, val); 1106 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ERROR_STATUS, val);
1133
1134 /* TODO: handle PSR2 errors */
1135exit: 1107exit:
1136 mutex_unlock(&psr->lock); 1108 mutex_unlock(&psr->lock);
1137} 1109}
diff --git a/drivers/gpu/drm/i915/intel_quirks.c b/drivers/gpu/drm/i915/intel_quirks.c
new file mode 100644
index 000000000000..ec2b0fc92b8b
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_quirks.c
@@ -0,0 +1,169 @@
1// SPDX-License-Identifier: MIT
2/*
3 * Copyright © 2018 Intel Corporation
4 */
5
6#include <linux/dmi.h>
7
8#include "intel_drv.h"
9
10/*
11 * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
12 */
13static void quirk_ssc_force_disable(struct drm_i915_private *i915)
14{
15 i915->quirks |= QUIRK_LVDS_SSC_DISABLE;
16 DRM_INFO("applying lvds SSC disable quirk\n");
17}
18
19/*
20 * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight
21 * brightness value
22 */
23static void quirk_invert_brightness(struct drm_i915_private *i915)
24{
25 i915->quirks |= QUIRK_INVERT_BRIGHTNESS;
26 DRM_INFO("applying inverted panel brightness quirk\n");
27}
28
29/* Some VBT's incorrectly indicate no backlight is present */
30static void quirk_backlight_present(struct drm_i915_private *i915)
31{
32 i915->quirks |= QUIRK_BACKLIGHT_PRESENT;
33 DRM_INFO("applying backlight present quirk\n");
34}
35
36/* Toshiba Satellite P50-C-18C requires T12 delay to be min 800ms
37 * which is 300 ms greater than eDP spec T12 min.
38 */
39static void quirk_increase_t12_delay(struct drm_i915_private *i915)
40{
41 i915->quirks |= QUIRK_INCREASE_T12_DELAY;
42 DRM_INFO("Applying T12 delay quirk\n");
43}
44
45/*
46 * GeminiLake NUC HDMI outputs require additional off time
47 * this allows the onboard retimer to correctly sync to signal
48 */
49static void quirk_increase_ddi_disabled_time(struct drm_i915_private *i915)
50{
51 i915->quirks |= QUIRK_INCREASE_DDI_DISABLED_TIME;
52 DRM_INFO("Applying Increase DDI Disabled quirk\n");
53}
54
55struct intel_quirk {
56 int device;
57 int subsystem_vendor;
58 int subsystem_device;
59 void (*hook)(struct drm_i915_private *i915);
60};
61
62/* For systems that don't have a meaningful PCI subdevice/subvendor ID */
63struct intel_dmi_quirk {
64 void (*hook)(struct drm_i915_private *i915);
65 const struct dmi_system_id (*dmi_id_list)[];
66};
67
68static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
69{
70 DRM_INFO("Backlight polarity reversed on %s\n", id->ident);
71 return 1;
72}
73
74static const struct intel_dmi_quirk intel_dmi_quirks[] = {
75 {
76 .dmi_id_list = &(const struct dmi_system_id[]) {
77 {
78 .callback = intel_dmi_reverse_brightness,
79 .ident = "NCR Corporation",
80 .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
81 DMI_MATCH(DMI_PRODUCT_NAME, ""),
82 },
83 },
84 { } /* terminating entry */
85 },
86 .hook = quirk_invert_brightness,
87 },
88};
89
90static struct intel_quirk intel_quirks[] = {
91 /* Lenovo U160 cannot use SSC on LVDS */
92 { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
93
94 /* Sony Vaio Y cannot use SSC on LVDS */
95 { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
96
97 /* Acer Aspire 5734Z must invert backlight brightness */
98 { 0x2a42, 0x1025, 0x0459, quirk_invert_brightness },
99
100 /* Acer/eMachines G725 */
101 { 0x2a42, 0x1025, 0x0210, quirk_invert_brightness },
102
103 /* Acer/eMachines e725 */
104 { 0x2a42, 0x1025, 0x0212, quirk_invert_brightness },
105
106 /* Acer/Packard Bell NCL20 */
107 { 0x2a42, 0x1025, 0x034b, quirk_invert_brightness },
108
109 /* Acer Aspire 4736Z */
110 { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
111
112 /* Acer Aspire 5336 */
113 { 0x2a42, 0x1025, 0x048a, quirk_invert_brightness },
114
115 /* Acer C720 and C720P Chromebooks (Celeron 2955U) have backlights */
116 { 0x0a06, 0x1025, 0x0a11, quirk_backlight_present },
117
118 /* Acer C720 Chromebook (Core i3 4005U) */
119 { 0x0a16, 0x1025, 0x0a11, quirk_backlight_present },
120
121 /* Apple Macbook 2,1 (Core 2 T7400) */
122 { 0x27a2, 0x8086, 0x7270, quirk_backlight_present },
123
124 /* Apple Macbook 4,1 */
125 { 0x2a02, 0x106b, 0x00a1, quirk_backlight_present },
126
127 /* Toshiba CB35 Chromebook (Celeron 2955U) */
128 { 0x0a06, 0x1179, 0x0a88, quirk_backlight_present },
129
130 /* HP Chromebook 14 (Celeron 2955U) */
131 { 0x0a06, 0x103c, 0x21ed, quirk_backlight_present },
132
133 /* Dell Chromebook 11 */
134 { 0x0a06, 0x1028, 0x0a35, quirk_backlight_present },
135
136 /* Dell Chromebook 11 (2015 version) */
137 { 0x0a16, 0x1028, 0x0a35, quirk_backlight_present },
138
139 /* Toshiba Satellite P50-C-18C */
140 { 0x191B, 0x1179, 0xF840, quirk_increase_t12_delay },
141
142 /* GeminiLake NUC */
143 { 0x3185, 0x8086, 0x2072, quirk_increase_ddi_disabled_time },
144 { 0x3184, 0x8086, 0x2072, quirk_increase_ddi_disabled_time },
145 /* ASRock ITX*/
146 { 0x3185, 0x1849, 0x2212, quirk_increase_ddi_disabled_time },
147 { 0x3184, 0x1849, 0x2212, quirk_increase_ddi_disabled_time },
148};
149
150void intel_init_quirks(struct drm_i915_private *i915)
151{
152 struct pci_dev *d = i915->drm.pdev;
153 int i;
154
155 for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) {
156 struct intel_quirk *q = &intel_quirks[i];
157
158 if (d->device == q->device &&
159 (d->subsystem_vendor == q->subsystem_vendor ||
160 q->subsystem_vendor == PCI_ANY_ID) &&
161 (d->subsystem_device == q->subsystem_device ||
162 q->subsystem_device == PCI_ANY_ID))
163 q->hook(i915);
164 }
165 for (i = 0; i < ARRAY_SIZE(intel_dmi_quirks); i++) {
166 if (dmi_check_system(*intel_dmi_quirks[i].dmi_id_list) != 0)
167 intel_dmi_quirks[i].hook(i915);
168 }
169}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 187bb0ceb4ac..87eebc13c0d8 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -608,7 +608,9 @@ static void skip_request(struct i915_request *rq)
608 608
609static void reset_ring(struct intel_engine_cs *engine, struct i915_request *rq) 609static void reset_ring(struct intel_engine_cs *engine, struct i915_request *rq)
610{ 610{
611 GEM_TRACE("%s seqno=%x\n", engine->name, rq ? rq->global_seqno : 0); 611 GEM_TRACE("%s request global=%d, current=%d\n",
612 engine->name, rq ? rq->global_seqno : 0,
613 intel_engine_get_seqno(engine));
612 614
613 /* 615 /*
614 * Try to restore the logical GPU state to match the continuation 616 * Try to restore the logical GPU state to match the continuation
@@ -1055,8 +1057,7 @@ i915_emit_bb_start(struct i915_request *rq,
1055int intel_ring_pin(struct intel_ring *ring) 1057int intel_ring_pin(struct intel_ring *ring)
1056{ 1058{
1057 struct i915_vma *vma = ring->vma; 1059 struct i915_vma *vma = ring->vma;
1058 enum i915_map_type map = 1060 enum i915_map_type map = i915_coherent_map_type(vma->vm->i915);
1059 HAS_LLC(vma->vm->i915) ? I915_MAP_WB : I915_MAP_WC;
1060 unsigned int flags; 1061 unsigned int flags;
1061 void *addr; 1062 void *addr;
1062 int ret; 1063 int ret;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 2dfa585712c2..8a2270b209b0 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -1,4 +1,4 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: MIT */
2#ifndef _INTEL_RINGBUFFER_H_ 2#ifndef _INTEL_RINGBUFFER_H_
3#define _INTEL_RINGBUFFER_H_ 3#define _INTEL_RINGBUFFER_H_
4 4
@@ -93,11 +93,11 @@ hangcheck_action_to_str(const enum intel_engine_hangcheck_action a)
93#define I915_MAX_SUBSLICES 8 93#define I915_MAX_SUBSLICES 8
94 94
95#define instdone_slice_mask(dev_priv__) \ 95#define instdone_slice_mask(dev_priv__) \
96 (INTEL_GEN(dev_priv__) == 7 ? \ 96 (IS_GEN7(dev_priv__) ? \
97 1 : INTEL_INFO(dev_priv__)->sseu.slice_mask) 97 1 : INTEL_INFO(dev_priv__)->sseu.slice_mask)
98 98
99#define instdone_subslice_mask(dev_priv__) \ 99#define instdone_subslice_mask(dev_priv__) \
100 (INTEL_GEN(dev_priv__) == 7 ? \ 100 (IS_GEN7(dev_priv__) ? \
101 1 : INTEL_INFO(dev_priv__)->sseu.subslice_mask[0]) 101 1 : INTEL_INFO(dev_priv__)->sseu.subslice_mask[0])
102 102
103#define for_each_instdone_slice_subslice(dev_priv__, slice__, subslice__) \ 103#define for_each_instdone_slice_subslice(dev_priv__, slice__, subslice__) \
@@ -190,11 +190,22 @@ enum intel_engine_id {
190}; 190};
191 191
192struct i915_priolist { 192struct i915_priolist {
193 struct list_head requests[I915_PRIORITY_COUNT];
193 struct rb_node node; 194 struct rb_node node;
194 struct list_head requests; 195 unsigned long used;
195 int priority; 196 int priority;
196}; 197};
197 198
199#define priolist_for_each_request(it, plist, idx) \
200 for (idx = 0; idx < ARRAY_SIZE((plist)->requests); idx++) \
201 list_for_each_entry(it, &(plist)->requests[idx], sched.link)
202
203#define priolist_for_each_request_consume(it, n, plist, idx) \
204 for (; (idx = ffs((plist)->used)); (plist)->used &= ~BIT(idx - 1)) \
205 list_for_each_entry_safe(it, n, \
206 &(plist)->requests[idx - 1], \
207 sched.link)
208
198struct st_preempt_hang { 209struct st_preempt_hang {
199 struct completion completion; 210 struct completion completion;
200 bool inject_hang; 211 bool inject_hang;
@@ -487,11 +498,10 @@ struct intel_engine_cs {
487 */ 498 */
488 void (*submit_request)(struct i915_request *rq); 499 void (*submit_request)(struct i915_request *rq);
489 500
490 /* Call when the priority on a request has changed and it and its 501 /*
502 * Call when the priority on a request has changed and it and its
491 * dependencies may need rescheduling. Note the request itself may 503 * dependencies may need rescheduling. Note the request itself may
492 * not be ready to run! 504 * not be ready to run!
493 *
494 * Called under the struct_mutex.
495 */ 505 */
496 void (*schedule)(struct i915_request *request, 506 void (*schedule)(struct i915_request *request,
497 const struct i915_sched_attr *attr); 507 const struct i915_sched_attr *attr);
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 44e4491a4918..1c2de9b69a19 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -208,7 +208,7 @@ bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
208 208
209 is_enabled = true; 209 is_enabled = true;
210 210
211 for_each_power_domain_well_rev(dev_priv, power_well, BIT_ULL(domain)) { 211 for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain)) {
212 if (power_well->desc->always_on) 212 if (power_well->desc->always_on)
213 continue; 213 continue;
214 214
@@ -436,6 +436,15 @@ icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
436 I915_WRITE(ICL_PORT_CL_DW12(port), val | ICL_LANE_ENABLE_AUX); 436 I915_WRITE(ICL_PORT_CL_DW12(port), val | ICL_LANE_ENABLE_AUX);
437 437
438 hsw_wait_for_power_well_enable(dev_priv, power_well); 438 hsw_wait_for_power_well_enable(dev_priv, power_well);
439
440 /* Display WA #1178: icl */
441 if (IS_ICELAKE(dev_priv) &&
442 pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= ICL_PW_CTL_IDX_AUX_B &&
443 !intel_bios_is_port_edp(dev_priv, port)) {
444 val = I915_READ(ICL_AUX_ANAOVRD1(pw_idx));
445 val |= ICL_AUX_ANAOVRD1_ENABLE | ICL_AUX_ANAOVRD1_LDO_BYPASS;
446 I915_WRITE(ICL_AUX_ANAOVRD1(pw_idx), val);
447 }
439} 448}
440 449
441static void 450static void
@@ -456,6 +465,25 @@ icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
456 hsw_wait_for_power_well_disable(dev_priv, power_well); 465 hsw_wait_for_power_well_disable(dev_priv, power_well);
457} 466}
458 467
468#define ICL_AUX_PW_TO_CH(pw_idx) \
469 ((pw_idx) - ICL_PW_CTL_IDX_AUX_A + AUX_CH_A)
470
471static void
472icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
473 struct i915_power_well *power_well)
474{
475 enum aux_ch aux_ch = ICL_AUX_PW_TO_CH(power_well->desc->hsw.idx);
476 u32 val;
477
478 val = I915_READ(DP_AUX_CH_CTL(aux_ch));
479 val &= ~DP_AUX_CH_CTL_TBT_IO;
480 if (power_well->desc->hsw.is_tc_tbt)
481 val |= DP_AUX_CH_CTL_TBT_IO;
482 I915_WRITE(DP_AUX_CH_CTL(aux_ch), val);
483
484 hsw_power_well_enable(dev_priv, power_well);
485}
486
459/* 487/*
460 * We should only use the power well if we explicitly asked the hardware to 488 * We should only use the power well if we explicitly asked the hardware to
461 * enable it, so check if it's enabled and also check if we've requested it to 489 * enable it, so check if it's enabled and also check if we've requested it to
@@ -465,11 +493,25 @@ static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
465 struct i915_power_well *power_well) 493 struct i915_power_well *power_well)
466{ 494{
467 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; 495 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
496 enum i915_power_well_id id = power_well->desc->id;
468 int pw_idx = power_well->desc->hsw.idx; 497 int pw_idx = power_well->desc->hsw.idx;
469 u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx) | 498 u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx) |
470 HSW_PWR_WELL_CTL_STATE(pw_idx); 499 HSW_PWR_WELL_CTL_STATE(pw_idx);
500 u32 val;
501
502 val = I915_READ(regs->driver);
471 503
472 return (I915_READ(regs->driver) & mask) == mask; 504 /*
505 * On GEN9 big core due to a DMC bug the driver's request bits for PW1
506 * and the MISC_IO PW will be not restored, so check instead for the
507 * BIOS's own request bits, which are forced-on for these power wells
508 * when exiting DC5/6.
509 */
510 if (IS_GEN9(dev_priv) && !IS_GEN9_LP(dev_priv) &&
511 (id == SKL_DISP_PW_1 || id == SKL_DISP_PW_MISC_IO))
512 val |= I915_READ(regs->bios);
513
514 return (val & mask) == mask;
473} 515}
474 516
475static void assert_can_enable_dc9(struct drm_i915_private *dev_priv) 517static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
@@ -551,7 +593,9 @@ static u32 gen9_dc_mask(struct drm_i915_private *dev_priv)
551 u32 mask; 593 u32 mask;
552 594
553 mask = DC_STATE_EN_UPTO_DC5; 595 mask = DC_STATE_EN_UPTO_DC5;
554 if (IS_GEN9_LP(dev_priv)) 596 if (INTEL_GEN(dev_priv) >= 11)
597 mask |= DC_STATE_EN_UPTO_DC6 | DC_STATE_EN_DC9;
598 else if (IS_GEN9_LP(dev_priv))
555 mask |= DC_STATE_EN_DC9; 599 mask |= DC_STATE_EN_DC9;
556 else 600 else
557 mask |= DC_STATE_EN_UPTO_DC6; 601 mask |= DC_STATE_EN_UPTO_DC6;
@@ -624,8 +668,13 @@ void bxt_enable_dc9(struct drm_i915_private *dev_priv)
624 assert_can_enable_dc9(dev_priv); 668 assert_can_enable_dc9(dev_priv);
625 669
626 DRM_DEBUG_KMS("Enabling DC9\n"); 670 DRM_DEBUG_KMS("Enabling DC9\n");
627 671 /*
628 intel_power_sequencer_reset(dev_priv); 672 * Power sequencer reset is not needed on
673 * platforms with South Display Engine on PCH,
674 * because PPS registers are always on.
675 */
676 if (!HAS_PCH_SPLIT(dev_priv))
677 intel_power_sequencer_reset(dev_priv);
629 gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9); 678 gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9);
630} 679}
631 680
@@ -707,7 +756,7 @@ static void assert_can_enable_dc6(struct drm_i915_private *dev_priv)
707 assert_csr_loaded(dev_priv); 756 assert_csr_loaded(dev_priv);
708} 757}
709 758
710static void skl_enable_dc6(struct drm_i915_private *dev_priv) 759void skl_enable_dc6(struct drm_i915_private *dev_priv)
711{ 760{
712 assert_can_enable_dc6(dev_priv); 761 assert_can_enable_dc6(dev_priv);
713 762
@@ -808,6 +857,14 @@ static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
808 857
809 if (IS_GEN9_LP(dev_priv)) 858 if (IS_GEN9_LP(dev_priv))
810 bxt_verify_ddi_phy_power_wells(dev_priv); 859 bxt_verify_ddi_phy_power_wells(dev_priv);
860
861 if (INTEL_GEN(dev_priv) >= 11)
862 /*
863 * DMC retains HW context only for port A, the other combo
864 * PHY's HW context for port B is lost after DC transitions,
865 * so we need to restore it manually.
866 */
867 icl_combo_phys_init(dev_priv);
811} 868}
812 869
813static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv, 870static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
@@ -1608,7 +1665,7 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
1608 intel_display_power_domain_str(domain)); 1665 intel_display_power_domain_str(domain));
1609 power_domains->domain_use_count[domain]--; 1666 power_domains->domain_use_count[domain]--;
1610 1667
1611 for_each_power_domain_well_rev(dev_priv, power_well, BIT_ULL(domain)) 1668 for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain))
1612 intel_power_well_put(dev_priv, power_well); 1669 intel_power_well_put(dev_priv, power_well);
1613 1670
1614 mutex_unlock(&power_domains->lock); 1671 mutex_unlock(&power_domains->lock);
@@ -2041,7 +2098,7 @@ static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = {
2041static const struct i915_power_well_desc i9xx_always_on_power_well[] = { 2098static const struct i915_power_well_desc i9xx_always_on_power_well[] = {
2042 { 2099 {
2043 .name = "always-on", 2100 .name = "always-on",
2044 .always_on = 1, 2101 .always_on = true,
2045 .domains = POWER_DOMAIN_MASK, 2102 .domains = POWER_DOMAIN_MASK,
2046 .ops = &i9xx_always_on_power_well_ops, 2103 .ops = &i9xx_always_on_power_well_ops,
2047 .id = DISP_PW_ID_NONE, 2104 .id = DISP_PW_ID_NONE,
@@ -2058,7 +2115,7 @@ static const struct i915_power_well_ops i830_pipes_power_well_ops = {
2058static const struct i915_power_well_desc i830_power_wells[] = { 2115static const struct i915_power_well_desc i830_power_wells[] = {
2059 { 2116 {
2060 .name = "always-on", 2117 .name = "always-on",
2061 .always_on = 1, 2118 .always_on = true,
2062 .domains = POWER_DOMAIN_MASK, 2119 .domains = POWER_DOMAIN_MASK,
2063 .ops = &i9xx_always_on_power_well_ops, 2120 .ops = &i9xx_always_on_power_well_ops,
2064 .id = DISP_PW_ID_NONE, 2121 .id = DISP_PW_ID_NONE,
@@ -2102,7 +2159,7 @@ static const struct i915_power_well_regs hsw_power_well_regs = {
2102static const struct i915_power_well_desc hsw_power_wells[] = { 2159static const struct i915_power_well_desc hsw_power_wells[] = {
2103 { 2160 {
2104 .name = "always-on", 2161 .name = "always-on",
2105 .always_on = 1, 2162 .always_on = true,
2106 .domains = POWER_DOMAIN_MASK, 2163 .domains = POWER_DOMAIN_MASK,
2107 .ops = &i9xx_always_on_power_well_ops, 2164 .ops = &i9xx_always_on_power_well_ops,
2108 .id = DISP_PW_ID_NONE, 2165 .id = DISP_PW_ID_NONE,
@@ -2123,7 +2180,7 @@ static const struct i915_power_well_desc hsw_power_wells[] = {
2123static const struct i915_power_well_desc bdw_power_wells[] = { 2180static const struct i915_power_well_desc bdw_power_wells[] = {
2124 { 2181 {
2125 .name = "always-on", 2182 .name = "always-on",
2126 .always_on = 1, 2183 .always_on = true,
2127 .domains = POWER_DOMAIN_MASK, 2184 .domains = POWER_DOMAIN_MASK,
2128 .ops = &i9xx_always_on_power_well_ops, 2185 .ops = &i9xx_always_on_power_well_ops,
2129 .id = DISP_PW_ID_NONE, 2186 .id = DISP_PW_ID_NONE,
@@ -2166,7 +2223,7 @@ static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
2166static const struct i915_power_well_desc vlv_power_wells[] = { 2223static const struct i915_power_well_desc vlv_power_wells[] = {
2167 { 2224 {
2168 .name = "always-on", 2225 .name = "always-on",
2169 .always_on = 1, 2226 .always_on = true,
2170 .domains = POWER_DOMAIN_MASK, 2227 .domains = POWER_DOMAIN_MASK,
2171 .ops = &i9xx_always_on_power_well_ops, 2228 .ops = &i9xx_always_on_power_well_ops,
2172 .id = DISP_PW_ID_NONE, 2229 .id = DISP_PW_ID_NONE,
@@ -2242,7 +2299,7 @@ static const struct i915_power_well_desc vlv_power_wells[] = {
2242static const struct i915_power_well_desc chv_power_wells[] = { 2299static const struct i915_power_well_desc chv_power_wells[] = {
2243 { 2300 {
2244 .name = "always-on", 2301 .name = "always-on",
2245 .always_on = 1, 2302 .always_on = true,
2246 .domains = POWER_DOMAIN_MASK, 2303 .domains = POWER_DOMAIN_MASK,
2247 .ops = &i9xx_always_on_power_well_ops, 2304 .ops = &i9xx_always_on_power_well_ops,
2248 .id = DISP_PW_ID_NONE, 2305 .id = DISP_PW_ID_NONE,
@@ -2293,7 +2350,7 @@ bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
2293static const struct i915_power_well_desc skl_power_wells[] = { 2350static const struct i915_power_well_desc skl_power_wells[] = {
2294 { 2351 {
2295 .name = "always-on", 2352 .name = "always-on",
2296 .always_on = 1, 2353 .always_on = true,
2297 .domains = POWER_DOMAIN_MASK, 2354 .domains = POWER_DOMAIN_MASK,
2298 .ops = &i9xx_always_on_power_well_ops, 2355 .ops = &i9xx_always_on_power_well_ops,
2299 .id = DISP_PW_ID_NONE, 2356 .id = DISP_PW_ID_NONE,
@@ -2301,6 +2358,7 @@ static const struct i915_power_well_desc skl_power_wells[] = {
2301 { 2358 {
2302 .name = "power well 1", 2359 .name = "power well 1",
2303 /* Handled by the DMC firmware */ 2360 /* Handled by the DMC firmware */
2361 .always_on = true,
2304 .domains = 0, 2362 .domains = 0,
2305 .ops = &hsw_power_well_ops, 2363 .ops = &hsw_power_well_ops,
2306 .id = SKL_DISP_PW_1, 2364 .id = SKL_DISP_PW_1,
@@ -2313,6 +2371,7 @@ static const struct i915_power_well_desc skl_power_wells[] = {
2313 { 2371 {
2314 .name = "MISC IO power well", 2372 .name = "MISC IO power well",
2315 /* Handled by the DMC firmware */ 2373 /* Handled by the DMC firmware */
2374 .always_on = true,
2316 .domains = 0, 2375 .domains = 0,
2317 .ops = &hsw_power_well_ops, 2376 .ops = &hsw_power_well_ops,
2318 .id = SKL_DISP_PW_MISC_IO, 2377 .id = SKL_DISP_PW_MISC_IO,
@@ -2385,13 +2444,15 @@ static const struct i915_power_well_desc skl_power_wells[] = {
2385static const struct i915_power_well_desc bxt_power_wells[] = { 2444static const struct i915_power_well_desc bxt_power_wells[] = {
2386 { 2445 {
2387 .name = "always-on", 2446 .name = "always-on",
2388 .always_on = 1, 2447 .always_on = true,
2389 .domains = POWER_DOMAIN_MASK, 2448 .domains = POWER_DOMAIN_MASK,
2390 .ops = &i9xx_always_on_power_well_ops, 2449 .ops = &i9xx_always_on_power_well_ops,
2391 .id = DISP_PW_ID_NONE, 2450 .id = DISP_PW_ID_NONE,
2392 }, 2451 },
2393 { 2452 {
2394 .name = "power well 1", 2453 .name = "power well 1",
2454 /* Handled by the DMC firmware */
2455 .always_on = true,
2395 .domains = 0, 2456 .domains = 0,
2396 .ops = &hsw_power_well_ops, 2457 .ops = &hsw_power_well_ops,
2397 .id = SKL_DISP_PW_1, 2458 .id = SKL_DISP_PW_1,
@@ -2443,7 +2504,7 @@ static const struct i915_power_well_desc bxt_power_wells[] = {
2443static const struct i915_power_well_desc glk_power_wells[] = { 2504static const struct i915_power_well_desc glk_power_wells[] = {
2444 { 2505 {
2445 .name = "always-on", 2506 .name = "always-on",
2446 .always_on = 1, 2507 .always_on = true,
2447 .domains = POWER_DOMAIN_MASK, 2508 .domains = POWER_DOMAIN_MASK,
2448 .ops = &i9xx_always_on_power_well_ops, 2509 .ops = &i9xx_always_on_power_well_ops,
2449 .id = DISP_PW_ID_NONE, 2510 .id = DISP_PW_ID_NONE,
@@ -2451,6 +2512,7 @@ static const struct i915_power_well_desc glk_power_wells[] = {
2451 { 2512 {
2452 .name = "power well 1", 2513 .name = "power well 1",
2453 /* Handled by the DMC firmware */ 2514 /* Handled by the DMC firmware */
2515 .always_on = true,
2454 .domains = 0, 2516 .domains = 0,
2455 .ops = &hsw_power_well_ops, 2517 .ops = &hsw_power_well_ops,
2456 .id = SKL_DISP_PW_1, 2518 .id = SKL_DISP_PW_1,
@@ -2571,7 +2633,7 @@ static const struct i915_power_well_desc glk_power_wells[] = {
2571static const struct i915_power_well_desc cnl_power_wells[] = { 2633static const struct i915_power_well_desc cnl_power_wells[] = {
2572 { 2634 {
2573 .name = "always-on", 2635 .name = "always-on",
2574 .always_on = 1, 2636 .always_on = true,
2575 .domains = POWER_DOMAIN_MASK, 2637 .domains = POWER_DOMAIN_MASK,
2576 .ops = &i9xx_always_on_power_well_ops, 2638 .ops = &i9xx_always_on_power_well_ops,
2577 .id = DISP_PW_ID_NONE, 2639 .id = DISP_PW_ID_NONE,
@@ -2579,6 +2641,7 @@ static const struct i915_power_well_desc cnl_power_wells[] = {
2579 { 2641 {
2580 .name = "power well 1", 2642 .name = "power well 1",
2581 /* Handled by the DMC firmware */ 2643 /* Handled by the DMC firmware */
2644 .always_on = true,
2582 .domains = 0, 2645 .domains = 0,
2583 .ops = &hsw_power_well_ops, 2646 .ops = &hsw_power_well_ops,
2584 .id = SKL_DISP_PW_1, 2647 .id = SKL_DISP_PW_1,
@@ -2716,6 +2779,13 @@ static const struct i915_power_well_ops icl_combo_phy_aux_power_well_ops = {
2716 .is_enabled = hsw_power_well_enabled, 2779 .is_enabled = hsw_power_well_enabled,
2717}; 2780};
2718 2781
2782static const struct i915_power_well_ops icl_tc_phy_aux_power_well_ops = {
2783 .sync_hw = hsw_power_well_sync_hw,
2784 .enable = icl_tc_phy_aux_power_well_enable,
2785 .disable = hsw_power_well_disable,
2786 .is_enabled = hsw_power_well_enabled,
2787};
2788
2719static const struct i915_power_well_regs icl_aux_power_well_regs = { 2789static const struct i915_power_well_regs icl_aux_power_well_regs = {
2720 .bios = ICL_PWR_WELL_CTL_AUX1, 2790 .bios = ICL_PWR_WELL_CTL_AUX1,
2721 .driver = ICL_PWR_WELL_CTL_AUX2, 2791 .driver = ICL_PWR_WELL_CTL_AUX2,
@@ -2731,7 +2801,7 @@ static const struct i915_power_well_regs icl_ddi_power_well_regs = {
2731static const struct i915_power_well_desc icl_power_wells[] = { 2801static const struct i915_power_well_desc icl_power_wells[] = {
2732 { 2802 {
2733 .name = "always-on", 2803 .name = "always-on",
2734 .always_on = 1, 2804 .always_on = true,
2735 .domains = POWER_DOMAIN_MASK, 2805 .domains = POWER_DOMAIN_MASK,
2736 .ops = &i9xx_always_on_power_well_ops, 2806 .ops = &i9xx_always_on_power_well_ops,
2737 .id = DISP_PW_ID_NONE, 2807 .id = DISP_PW_ID_NONE,
@@ -2739,6 +2809,7 @@ static const struct i915_power_well_desc icl_power_wells[] = {
2739 { 2809 {
2740 .name = "power well 1", 2810 .name = "power well 1",
2741 /* Handled by the DMC firmware */ 2811 /* Handled by the DMC firmware */
2812 .always_on = true,
2742 .domains = 0, 2813 .domains = 0,
2743 .ops = &hsw_power_well_ops, 2814 .ops = &hsw_power_well_ops,
2744 .id = SKL_DISP_PW_1, 2815 .id = SKL_DISP_PW_1,
@@ -2861,81 +2932,89 @@ static const struct i915_power_well_desc icl_power_wells[] = {
2861 { 2932 {
2862 .name = "AUX C", 2933 .name = "AUX C",
2863 .domains = ICL_AUX_C_IO_POWER_DOMAINS, 2934 .domains = ICL_AUX_C_IO_POWER_DOMAINS,
2864 .ops = &hsw_power_well_ops, 2935 .ops = &icl_tc_phy_aux_power_well_ops,
2865 .id = DISP_PW_ID_NONE, 2936 .id = DISP_PW_ID_NONE,
2866 { 2937 {
2867 .hsw.regs = &icl_aux_power_well_regs, 2938 .hsw.regs = &icl_aux_power_well_regs,
2868 .hsw.idx = ICL_PW_CTL_IDX_AUX_C, 2939 .hsw.idx = ICL_PW_CTL_IDX_AUX_C,
2940 .hsw.is_tc_tbt = false,
2869 }, 2941 },
2870 }, 2942 },
2871 { 2943 {
2872 .name = "AUX D", 2944 .name = "AUX D",
2873 .domains = ICL_AUX_D_IO_POWER_DOMAINS, 2945 .domains = ICL_AUX_D_IO_POWER_DOMAINS,
2874 .ops = &hsw_power_well_ops, 2946 .ops = &icl_tc_phy_aux_power_well_ops,
2875 .id = DISP_PW_ID_NONE, 2947 .id = DISP_PW_ID_NONE,
2876 { 2948 {
2877 .hsw.regs = &icl_aux_power_well_regs, 2949 .hsw.regs = &icl_aux_power_well_regs,
2878 .hsw.idx = ICL_PW_CTL_IDX_AUX_D, 2950 .hsw.idx = ICL_PW_CTL_IDX_AUX_D,
2951 .hsw.is_tc_tbt = false,
2879 }, 2952 },
2880 }, 2953 },
2881 { 2954 {
2882 .name = "AUX E", 2955 .name = "AUX E",
2883 .domains = ICL_AUX_E_IO_POWER_DOMAINS, 2956 .domains = ICL_AUX_E_IO_POWER_DOMAINS,
2884 .ops = &hsw_power_well_ops, 2957 .ops = &icl_tc_phy_aux_power_well_ops,
2885 .id = DISP_PW_ID_NONE, 2958 .id = DISP_PW_ID_NONE,
2886 { 2959 {
2887 .hsw.regs = &icl_aux_power_well_regs, 2960 .hsw.regs = &icl_aux_power_well_regs,
2888 .hsw.idx = ICL_PW_CTL_IDX_AUX_E, 2961 .hsw.idx = ICL_PW_CTL_IDX_AUX_E,
2962 .hsw.is_tc_tbt = false,
2889 }, 2963 },
2890 }, 2964 },
2891 { 2965 {
2892 .name = "AUX F", 2966 .name = "AUX F",
2893 .domains = ICL_AUX_F_IO_POWER_DOMAINS, 2967 .domains = ICL_AUX_F_IO_POWER_DOMAINS,
2894 .ops = &hsw_power_well_ops, 2968 .ops = &icl_tc_phy_aux_power_well_ops,
2895 .id = DISP_PW_ID_NONE, 2969 .id = DISP_PW_ID_NONE,
2896 { 2970 {
2897 .hsw.regs = &icl_aux_power_well_regs, 2971 .hsw.regs = &icl_aux_power_well_regs,
2898 .hsw.idx = ICL_PW_CTL_IDX_AUX_F, 2972 .hsw.idx = ICL_PW_CTL_IDX_AUX_F,
2973 .hsw.is_tc_tbt = false,
2899 }, 2974 },
2900 }, 2975 },
2901 { 2976 {
2902 .name = "AUX TBT1", 2977 .name = "AUX TBT1",
2903 .domains = ICL_AUX_TBT1_IO_POWER_DOMAINS, 2978 .domains = ICL_AUX_TBT1_IO_POWER_DOMAINS,
2904 .ops = &hsw_power_well_ops, 2979 .ops = &icl_tc_phy_aux_power_well_ops,
2905 .id = DISP_PW_ID_NONE, 2980 .id = DISP_PW_ID_NONE,
2906 { 2981 {
2907 .hsw.regs = &icl_aux_power_well_regs, 2982 .hsw.regs = &icl_aux_power_well_regs,
2908 .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT1, 2983 .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT1,
2984 .hsw.is_tc_tbt = true,
2909 }, 2985 },
2910 }, 2986 },
2911 { 2987 {
2912 .name = "AUX TBT2", 2988 .name = "AUX TBT2",
2913 .domains = ICL_AUX_TBT2_IO_POWER_DOMAINS, 2989 .domains = ICL_AUX_TBT2_IO_POWER_DOMAINS,
2914 .ops = &hsw_power_well_ops, 2990 .ops = &icl_tc_phy_aux_power_well_ops,
2915 .id = DISP_PW_ID_NONE, 2991 .id = DISP_PW_ID_NONE,
2916 { 2992 {
2917 .hsw.regs = &icl_aux_power_well_regs, 2993 .hsw.regs = &icl_aux_power_well_regs,
2918 .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT2, 2994 .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT2,
2995 .hsw.is_tc_tbt = true,
2919 }, 2996 },
2920 }, 2997 },
2921 { 2998 {
2922 .name = "AUX TBT3", 2999 .name = "AUX TBT3",
2923 .domains = ICL_AUX_TBT3_IO_POWER_DOMAINS, 3000 .domains = ICL_AUX_TBT3_IO_POWER_DOMAINS,
2924 .ops = &hsw_power_well_ops, 3001 .ops = &icl_tc_phy_aux_power_well_ops,
2925 .id = DISP_PW_ID_NONE, 3002 .id = DISP_PW_ID_NONE,
2926 { 3003 {
2927 .hsw.regs = &icl_aux_power_well_regs, 3004 .hsw.regs = &icl_aux_power_well_regs,
2928 .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT3, 3005 .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT3,
3006 .hsw.is_tc_tbt = true,
2929 }, 3007 },
2930 }, 3008 },
2931 { 3009 {
2932 .name = "AUX TBT4", 3010 .name = "AUX TBT4",
2933 .domains = ICL_AUX_TBT4_IO_POWER_DOMAINS, 3011 .domains = ICL_AUX_TBT4_IO_POWER_DOMAINS,
2934 .ops = &hsw_power_well_ops, 3012 .ops = &icl_tc_phy_aux_power_well_ops,
2935 .id = DISP_PW_ID_NONE, 3013 .id = DISP_PW_ID_NONE,
2936 { 3014 {
2937 .hsw.regs = &icl_aux_power_well_regs, 3015 .hsw.regs = &icl_aux_power_well_regs,
2938 .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT4, 3016 .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT4,
3017 .hsw.is_tc_tbt = true,
2939 }, 3018 },
2940 }, 3019 },
2941 { 3020 {
@@ -2969,17 +3048,20 @@ static uint32_t get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
2969 int requested_dc; 3048 int requested_dc;
2970 int max_dc; 3049 int max_dc;
2971 3050
2972 if (IS_GEN9_BC(dev_priv) || INTEL_INFO(dev_priv)->gen >= 10) { 3051 if (INTEL_GEN(dev_priv) >= 11) {
2973 max_dc = 2; 3052 max_dc = 2;
2974 mask = 0;
2975 } else if (IS_GEN9_LP(dev_priv)) {
2976 max_dc = 1;
2977 /* 3053 /*
2978 * DC9 has a separate HW flow from the rest of the DC states, 3054 * DC9 has a separate HW flow from the rest of the DC states,
2979 * not depending on the DMC firmware. It's needed by system 3055 * not depending on the DMC firmware. It's needed by system
2980 * suspend/resume, so allow it unconditionally. 3056 * suspend/resume, so allow it unconditionally.
2981 */ 3057 */
2982 mask = DC_STATE_EN_DC9; 3058 mask = DC_STATE_EN_DC9;
3059 } else if (IS_GEN10(dev_priv) || IS_GEN9_BC(dev_priv)) {
3060 max_dc = 2;
3061 mask = 0;
3062 } else if (IS_GEN9_LP(dev_priv)) {
3063 max_dc = 1;
3064 mask = DC_STATE_EN_DC9;
2983 } else { 3065 } else {
2984 max_dc = 0; 3066 max_dc = 0;
2985 mask = 0; 3067 mask = 0;
@@ -3075,12 +3157,6 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
3075 */ 3157 */
3076 if (IS_ICELAKE(dev_priv)) { 3158 if (IS_ICELAKE(dev_priv)) {
3077 err = set_power_wells(power_domains, icl_power_wells); 3159 err = set_power_wells(power_domains, icl_power_wells);
3078 } else if (IS_HASWELL(dev_priv)) {
3079 err = set_power_wells(power_domains, hsw_power_wells);
3080 } else if (IS_BROADWELL(dev_priv)) {
3081 err = set_power_wells(power_domains, bdw_power_wells);
3082 } else if (IS_GEN9_BC(dev_priv)) {
3083 err = set_power_wells(power_domains, skl_power_wells);
3084 } else if (IS_CANNONLAKE(dev_priv)) { 3160 } else if (IS_CANNONLAKE(dev_priv)) {
3085 err = set_power_wells(power_domains, cnl_power_wells); 3161 err = set_power_wells(power_domains, cnl_power_wells);
3086 3162
@@ -3092,13 +3168,18 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
3092 */ 3168 */
3093 if (!IS_CNL_WITH_PORT_F(dev_priv)) 3169 if (!IS_CNL_WITH_PORT_F(dev_priv))
3094 power_domains->power_well_count -= 2; 3170 power_domains->power_well_count -= 2;
3095
3096 } else if (IS_BROXTON(dev_priv)) {
3097 err = set_power_wells(power_domains, bxt_power_wells);
3098 } else if (IS_GEMINILAKE(dev_priv)) { 3171 } else if (IS_GEMINILAKE(dev_priv)) {
3099 err = set_power_wells(power_domains, glk_power_wells); 3172 err = set_power_wells(power_domains, glk_power_wells);
3173 } else if (IS_BROXTON(dev_priv)) {
3174 err = set_power_wells(power_domains, bxt_power_wells);
3175 } else if (IS_GEN9_BC(dev_priv)) {
3176 err = set_power_wells(power_domains, skl_power_wells);
3100 } else if (IS_CHERRYVIEW(dev_priv)) { 3177 } else if (IS_CHERRYVIEW(dev_priv)) {
3101 err = set_power_wells(power_domains, chv_power_wells); 3178 err = set_power_wells(power_domains, chv_power_wells);
3179 } else if (IS_BROADWELL(dev_priv)) {
3180 err = set_power_wells(power_domains, bdw_power_wells);
3181 } else if (IS_HASWELL(dev_priv)) {
3182 err = set_power_wells(power_domains, hsw_power_wells);
3102 } else if (IS_VALLEYVIEW(dev_priv)) { 3183 } else if (IS_VALLEYVIEW(dev_priv)) {
3103 err = set_power_wells(power_domains, vlv_power_wells); 3184 err = set_power_wells(power_domains, vlv_power_wells);
3104 } else if (IS_I830(dev_priv)) { 3185 } else if (IS_I830(dev_priv)) {
@@ -3238,18 +3319,40 @@ static void icl_mbus_init(struct drm_i915_private *dev_priv)
3238 I915_WRITE(MBUS_ABOX_CTL, val); 3319 I915_WRITE(MBUS_ABOX_CTL, val);
3239} 3320}
3240 3321
3322static void intel_pch_reset_handshake(struct drm_i915_private *dev_priv,
3323 bool enable)
3324{
3325 i915_reg_t reg;
3326 u32 reset_bits, val;
3327
3328 if (IS_IVYBRIDGE(dev_priv)) {
3329 reg = GEN7_MSG_CTL;
3330 reset_bits = WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK;
3331 } else {
3332 reg = HSW_NDE_RSTWRN_OPT;
3333 reset_bits = RESET_PCH_HANDSHAKE_ENABLE;
3334 }
3335
3336 val = I915_READ(reg);
3337
3338 if (enable)
3339 val |= reset_bits;
3340 else
3341 val &= ~reset_bits;
3342
3343 I915_WRITE(reg, val);
3344}
3345
3241static void skl_display_core_init(struct drm_i915_private *dev_priv, 3346static void skl_display_core_init(struct drm_i915_private *dev_priv,
3242 bool resume) 3347 bool resume)
3243{ 3348{
3244 struct i915_power_domains *power_domains = &dev_priv->power_domains; 3349 struct i915_power_domains *power_domains = &dev_priv->power_domains;
3245 struct i915_power_well *well; 3350 struct i915_power_well *well;
3246 uint32_t val;
3247 3351
3248 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 3352 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
3249 3353
3250 /* enable PCH reset handshake */ 3354 /* enable PCH reset handshake */
3251 val = I915_READ(HSW_NDE_RSTWRN_OPT); 3355 intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
3252 I915_WRITE(HSW_NDE_RSTWRN_OPT, val | RESET_PCH_HANDSHAKE_ENABLE);
3253 3356
3254 /* enable PG1 and Misc I/O */ 3357 /* enable PG1 and Misc I/O */
3255 mutex_lock(&power_domains->lock); 3358 mutex_lock(&power_domains->lock);
@@ -3305,7 +3408,6 @@ void bxt_display_core_init(struct drm_i915_private *dev_priv,
3305{ 3408{
3306 struct i915_power_domains *power_domains = &dev_priv->power_domains; 3409 struct i915_power_domains *power_domains = &dev_priv->power_domains;
3307 struct i915_power_well *well; 3410 struct i915_power_well *well;
3308 uint32_t val;
3309 3411
3310 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 3412 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
3311 3413
@@ -3315,9 +3417,7 @@ void bxt_display_core_init(struct drm_i915_private *dev_priv,
3315 * Move the handshake programming to initialization sequence. 3417 * Move the handshake programming to initialization sequence.
3316 * Previously was left up to BIOS. 3418 * Previously was left up to BIOS.
3317 */ 3419 */
3318 val = I915_READ(HSW_NDE_RSTWRN_OPT); 3420 intel_pch_reset_handshake(dev_priv, false);
3319 val &= ~RESET_PCH_HANDSHAKE_ENABLE;
3320 I915_WRITE(HSW_NDE_RSTWRN_OPT, val);
3321 3421
3322 /* Enable PG1 */ 3422 /* Enable PG1 */
3323 mutex_lock(&power_domains->lock); 3423 mutex_lock(&power_domains->lock);
@@ -3363,101 +3463,18 @@ void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
3363 usleep_range(10, 30); /* 10 us delay per Bspec */ 3463 usleep_range(10, 30); /* 10 us delay per Bspec */
3364} 3464}
3365 3465
3366enum {
3367 PROCMON_0_85V_DOT_0,
3368 PROCMON_0_95V_DOT_0,
3369 PROCMON_0_95V_DOT_1,
3370 PROCMON_1_05V_DOT_0,
3371 PROCMON_1_05V_DOT_1,
3372};
3373
3374static const struct cnl_procmon {
3375 u32 dw1, dw9, dw10;
3376} cnl_procmon_values[] = {
3377 [PROCMON_0_85V_DOT_0] =
3378 { .dw1 = 0x00000000, .dw9 = 0x62AB67BB, .dw10 = 0x51914F96, },
3379 [PROCMON_0_95V_DOT_0] =
3380 { .dw1 = 0x00000000, .dw9 = 0x86E172C7, .dw10 = 0x77CA5EAB, },
3381 [PROCMON_0_95V_DOT_1] =
3382 { .dw1 = 0x00000000, .dw9 = 0x93F87FE1, .dw10 = 0x8AE871C5, },
3383 [PROCMON_1_05V_DOT_0] =
3384 { .dw1 = 0x00000000, .dw9 = 0x98FA82DD, .dw10 = 0x89E46DC1, },
3385 [PROCMON_1_05V_DOT_1] =
3386 { .dw1 = 0x00440000, .dw9 = 0x9A00AB25, .dw10 = 0x8AE38FF1, },
3387};
3388
3389/*
3390 * CNL has just one set of registers, while ICL has two sets: one for port A and
3391 * the other for port B. The CNL registers are equivalent to the ICL port A
3392 * registers, that's why we call the ICL macros even though the function has CNL
3393 * on its name.
3394 */
3395static void cnl_set_procmon_ref_values(struct drm_i915_private *dev_priv,
3396 enum port port)
3397{
3398 const struct cnl_procmon *procmon;
3399 u32 val;
3400
3401 val = I915_READ(ICL_PORT_COMP_DW3(port));
3402 switch (val & (PROCESS_INFO_MASK | VOLTAGE_INFO_MASK)) {
3403 default:
3404 MISSING_CASE(val);
3405 /* fall through */
3406 case VOLTAGE_INFO_0_85V | PROCESS_INFO_DOT_0:
3407 procmon = &cnl_procmon_values[PROCMON_0_85V_DOT_0];
3408 break;
3409 case VOLTAGE_INFO_0_95V | PROCESS_INFO_DOT_0:
3410 procmon = &cnl_procmon_values[PROCMON_0_95V_DOT_0];
3411 break;
3412 case VOLTAGE_INFO_0_95V | PROCESS_INFO_DOT_1:
3413 procmon = &cnl_procmon_values[PROCMON_0_95V_DOT_1];
3414 break;
3415 case VOLTAGE_INFO_1_05V | PROCESS_INFO_DOT_0:
3416 procmon = &cnl_procmon_values[PROCMON_1_05V_DOT_0];
3417 break;
3418 case VOLTAGE_INFO_1_05V | PROCESS_INFO_DOT_1:
3419 procmon = &cnl_procmon_values[PROCMON_1_05V_DOT_1];
3420 break;
3421 }
3422
3423 val = I915_READ(ICL_PORT_COMP_DW1(port));
3424 val &= ~((0xff << 16) | 0xff);
3425 val |= procmon->dw1;
3426 I915_WRITE(ICL_PORT_COMP_DW1(port), val);
3427
3428 I915_WRITE(ICL_PORT_COMP_DW9(port), procmon->dw9);
3429 I915_WRITE(ICL_PORT_COMP_DW10(port), procmon->dw10);
3430}
3431
3432static void cnl_display_core_init(struct drm_i915_private *dev_priv, bool resume) 3466static void cnl_display_core_init(struct drm_i915_private *dev_priv, bool resume)
3433{ 3467{
3434 struct i915_power_domains *power_domains = &dev_priv->power_domains; 3468 struct i915_power_domains *power_domains = &dev_priv->power_domains;
3435 struct i915_power_well *well; 3469 struct i915_power_well *well;
3436 u32 val;
3437 3470
3438 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 3471 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
3439 3472
3440 /* 1. Enable PCH Reset Handshake */ 3473 /* 1. Enable PCH Reset Handshake */
3441 val = I915_READ(HSW_NDE_RSTWRN_OPT); 3474 intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
3442 val |= RESET_PCH_HANDSHAKE_ENABLE;
3443 I915_WRITE(HSW_NDE_RSTWRN_OPT, val);
3444
3445 /* 2. Enable Comp */
3446 val = I915_READ(CHICKEN_MISC_2);
3447 val &= ~CNL_COMP_PWR_DOWN;
3448 I915_WRITE(CHICKEN_MISC_2, val);
3449
3450 /* Dummy PORT_A to get the correct CNL register from the ICL macro */
3451 cnl_set_procmon_ref_values(dev_priv, PORT_A);
3452 3475
3453 val = I915_READ(CNL_PORT_COMP_DW0); 3476 /* 2-3. */
3454 val |= COMP_INIT; 3477 cnl_combo_phys_init(dev_priv);
3455 I915_WRITE(CNL_PORT_COMP_DW0, val);
3456
3457 /* 3. */
3458 val = I915_READ(CNL_PORT_CL1CM_DW5);
3459 val |= CL_POWER_DOWN_ENABLE;
3460 I915_WRITE(CNL_PORT_CL1CM_DW5, val);
3461 3478
3462 /* 3479 /*
3463 * 4. Enable Power Well 1 (PG1). 3480 * 4. Enable Power Well 1 (PG1).
@@ -3482,7 +3499,6 @@ static void cnl_display_core_uninit(struct drm_i915_private *dev_priv)
3482{ 3499{
3483 struct i915_power_domains *power_domains = &dev_priv->power_domains; 3500 struct i915_power_domains *power_domains = &dev_priv->power_domains;
3484 struct i915_power_well *well; 3501 struct i915_power_well *well;
3485 u32 val;
3486 3502
3487 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 3503 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
3488 3504
@@ -3506,44 +3522,23 @@ static void cnl_display_core_uninit(struct drm_i915_private *dev_priv)
3506 3522
3507 usleep_range(10, 30); /* 10 us delay per Bspec */ 3523 usleep_range(10, 30); /* 10 us delay per Bspec */
3508 3524
3509 /* 5. Disable Comp */ 3525 /* 5. */
3510 val = I915_READ(CHICKEN_MISC_2); 3526 cnl_combo_phys_uninit(dev_priv);
3511 val |= CNL_COMP_PWR_DOWN;
3512 I915_WRITE(CHICKEN_MISC_2, val);
3513} 3527}
3514 3528
3515static void icl_display_core_init(struct drm_i915_private *dev_priv, 3529void icl_display_core_init(struct drm_i915_private *dev_priv,
3516 bool resume) 3530 bool resume)
3517{ 3531{
3518 struct i915_power_domains *power_domains = &dev_priv->power_domains; 3532 struct i915_power_domains *power_domains = &dev_priv->power_domains;
3519 struct i915_power_well *well; 3533 struct i915_power_well *well;
3520 enum port port;
3521 u32 val;
3522 3534
3523 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 3535 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
3524 3536
3525 /* 1. Enable PCH reset handshake. */ 3537 /* 1. Enable PCH reset handshake. */
3526 val = I915_READ(HSW_NDE_RSTWRN_OPT); 3538 intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
3527 val |= RESET_PCH_HANDSHAKE_ENABLE; 3539
3528 I915_WRITE(HSW_NDE_RSTWRN_OPT, val); 3540 /* 2-3. */
3529 3541 icl_combo_phys_init(dev_priv);
3530 for (port = PORT_A; port <= PORT_B; port++) {
3531 /* 2. Enable DDI combo PHY comp. */
3532 val = I915_READ(ICL_PHY_MISC(port));
3533 val &= ~ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN;
3534 I915_WRITE(ICL_PHY_MISC(port), val);
3535
3536 cnl_set_procmon_ref_values(dev_priv, port);
3537
3538 val = I915_READ(ICL_PORT_COMP_DW0(port));
3539 val |= COMP_INIT;
3540 I915_WRITE(ICL_PORT_COMP_DW0(port), val);
3541
3542 /* 3. Set power down enable. */
3543 val = I915_READ(ICL_PORT_CL_DW5(port));
3544 val |= CL_POWER_DOWN_ENABLE;
3545 I915_WRITE(ICL_PORT_CL_DW5(port), val);
3546 }
3547 3542
3548 /* 3543 /*
3549 * 4. Enable Power Well 1 (PG1). 3544 * 4. Enable Power Well 1 (PG1).
@@ -3567,12 +3562,10 @@ static void icl_display_core_init(struct drm_i915_private *dev_priv,
3567 intel_csr_load_program(dev_priv); 3562 intel_csr_load_program(dev_priv);
3568} 3563}
3569 3564
3570static void icl_display_core_uninit(struct drm_i915_private *dev_priv) 3565void icl_display_core_uninit(struct drm_i915_private *dev_priv)
3571{ 3566{
3572 struct i915_power_domains *power_domains = &dev_priv->power_domains; 3567 struct i915_power_domains *power_domains = &dev_priv->power_domains;
3573 struct i915_power_well *well; 3568 struct i915_power_well *well;
3574 enum port port;
3575 u32 val;
3576 3569
3577 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 3570 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
3578 3571
@@ -3594,12 +3587,8 @@ static void icl_display_core_uninit(struct drm_i915_private *dev_priv)
3594 intel_power_well_disable(dev_priv, well); 3587 intel_power_well_disable(dev_priv, well);
3595 mutex_unlock(&power_domains->lock); 3588 mutex_unlock(&power_domains->lock);
3596 3589
3597 /* 5. Disable Comp */ 3590 /* 5. */
3598 for (port = PORT_A; port <= PORT_B; port++) { 3591 icl_combo_phys_uninit(dev_priv);
3599 val = I915_READ(ICL_PHY_MISC(port));
3600 val |= ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN;
3601 I915_WRITE(ICL_PHY_MISC(port), val);
3602 }
3603} 3592}
3604 3593
3605static void chv_phy_control_init(struct drm_i915_private *dev_priv) 3594static void chv_phy_control_init(struct drm_i915_private *dev_priv)
@@ -3757,7 +3746,8 @@ void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume)
3757 mutex_lock(&power_domains->lock); 3746 mutex_lock(&power_domains->lock);
3758 vlv_cmnlane_wa(dev_priv); 3747 vlv_cmnlane_wa(dev_priv);
3759 mutex_unlock(&power_domains->lock); 3748 mutex_unlock(&power_domains->lock);
3760 } 3749 } else if (IS_IVYBRIDGE(dev_priv) || INTEL_GEN(dev_priv) >= 7)
3750 intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
3761 3751
3762 /* 3752 /*
3763 * Keep all power wells enabled for any dependent HW access during 3753 * Keep all power wells enabled for any dependent HW access during
@@ -3951,14 +3941,6 @@ static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv)
3951 int domains_count; 3941 int domains_count;
3952 bool enabled; 3942 bool enabled;
3953 3943
3954 /*
3955 * Power wells not belonging to any domain (like the MISC_IO
3956 * and PW1 power wells) are under FW control, so ignore them,
3957 * since their state can change asynchronously.
3958 */
3959 if (!power_well->desc->domains)
3960 continue;
3961
3962 enabled = power_well->desc->ops->is_enabled(dev_priv, 3944 enabled = power_well->desc->ops->is_enabled(dev_priv,
3963 power_well); 3945 power_well);
3964 if ((power_well->count || power_well->desc->always_on) != 3946 if ((power_well->count || power_well->desc->always_on) !=
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 701372e512a8..5805ec1aba12 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -105,11 +105,6 @@ struct intel_sdvo {
105 bool has_hdmi_audio; 105 bool has_hdmi_audio;
106 bool rgb_quant_range_selectable; 106 bool rgb_quant_range_selectable;
107 107
108 /**
109 * This is sdvo fixed pannel mode pointer
110 */
111 struct drm_display_mode *sdvo_lvds_fixed_mode;
112
113 /* DDC bus used by this SDVO encoder */ 108 /* DDC bus used by this SDVO encoder */
114 uint8_t ddc_bus; 109 uint8_t ddc_bus;
115 110
@@ -765,10 +760,14 @@ intel_sdvo_create_preferred_input_timing(struct intel_sdvo *intel_sdvo,
765 args.height = height; 760 args.height = height;
766 args.interlace = 0; 761 args.interlace = 0;
767 762
768 if (IS_LVDS(intel_sdvo_connector) && 763 if (IS_LVDS(intel_sdvo_connector)) {
769 (intel_sdvo->sdvo_lvds_fixed_mode->hdisplay != width || 764 const struct drm_display_mode *fixed_mode =
770 intel_sdvo->sdvo_lvds_fixed_mode->vdisplay != height)) 765 intel_sdvo_connector->base.panel.fixed_mode;
771 args.scaled = 1; 766
767 if (fixed_mode->hdisplay != width ||
768 fixed_mode->vdisplay != height)
769 args.scaled = 1;
770 }
772 771
773 return intel_sdvo_set_value(intel_sdvo, 772 return intel_sdvo_set_value(intel_sdvo,
774 SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING, 773 SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING,
@@ -1123,6 +1122,7 @@ static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
1123 1122
1124 DRM_DEBUG_KMS("forcing bpc to 8 for SDVO\n"); 1123 DRM_DEBUG_KMS("forcing bpc to 8 for SDVO\n");
1125 pipe_config->pipe_bpp = 8*3; 1124 pipe_config->pipe_bpp = 8*3;
1125 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
1126 1126
1127 if (HAS_PCH_SPLIT(to_i915(encoder->base.dev))) 1127 if (HAS_PCH_SPLIT(to_i915(encoder->base.dev)))
1128 pipe_config->has_pch_encoder = true; 1128 pipe_config->has_pch_encoder = true;
@@ -1144,7 +1144,7 @@ static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
1144 pipe_config->sdvo_tv_clock = true; 1144 pipe_config->sdvo_tv_clock = true;
1145 } else if (IS_LVDS(intel_sdvo_connector)) { 1145 } else if (IS_LVDS(intel_sdvo_connector)) {
1146 if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo, 1146 if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo,
1147 intel_sdvo->sdvo_lvds_fixed_mode)) 1147 intel_sdvo_connector->base.panel.fixed_mode))
1148 return false; 1148 return false;
1149 1149
1150 (void) intel_sdvo_get_preferred_input_mode(intel_sdvo, 1150 (void) intel_sdvo_get_preferred_input_mode(intel_sdvo,
@@ -1301,7 +1301,7 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder,
1301 /* lvds has a special fixed output timing. */ 1301 /* lvds has a special fixed output timing. */
1302 if (IS_LVDS(intel_sdvo_connector)) 1302 if (IS_LVDS(intel_sdvo_connector))
1303 intel_sdvo_get_dtd_from_mode(&output_dtd, 1303 intel_sdvo_get_dtd_from_mode(&output_dtd,
1304 intel_sdvo->sdvo_lvds_fixed_mode); 1304 intel_sdvo_connector->base.panel.fixed_mode);
1305 else 1305 else
1306 intel_sdvo_get_dtd_from_mode(&output_dtd, mode); 1306 intel_sdvo_get_dtd_from_mode(&output_dtd, mode);
1307 if (!intel_sdvo_set_output_timing(intel_sdvo, &output_dtd)) 1307 if (!intel_sdvo_set_output_timing(intel_sdvo, &output_dtd))
@@ -1642,10 +1642,13 @@ intel_sdvo_mode_valid(struct drm_connector *connector,
1642 return MODE_CLOCK_HIGH; 1642 return MODE_CLOCK_HIGH;
1643 1643
1644 if (IS_LVDS(intel_sdvo_connector)) { 1644 if (IS_LVDS(intel_sdvo_connector)) {
1645 if (mode->hdisplay > intel_sdvo->sdvo_lvds_fixed_mode->hdisplay) 1645 const struct drm_display_mode *fixed_mode =
1646 intel_sdvo_connector->base.panel.fixed_mode;
1647
1648 if (mode->hdisplay > fixed_mode->hdisplay)
1646 return MODE_PANEL; 1649 return MODE_PANEL;
1647 1650
1648 if (mode->vdisplay > intel_sdvo->sdvo_lvds_fixed_mode->vdisplay) 1651 if (mode->vdisplay > fixed_mode->vdisplay)
1649 return MODE_PANEL; 1652 return MODE_PANEL;
1650 } 1653 }
1651 1654
@@ -2058,14 +2061,6 @@ static int intel_sdvo_get_modes(struct drm_connector *connector)
2058 return !list_empty(&connector->probed_modes); 2061 return !list_empty(&connector->probed_modes);
2059} 2062}
2060 2063
2061static void intel_sdvo_destroy(struct drm_connector *connector)
2062{
2063 struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
2064
2065 drm_connector_cleanup(connector);
2066 kfree(intel_sdvo_connector);
2067}
2068
2069static int 2064static int
2070intel_sdvo_connector_atomic_get_property(struct drm_connector *connector, 2065intel_sdvo_connector_atomic_get_property(struct drm_connector *connector,
2071 const struct drm_connector_state *state, 2066 const struct drm_connector_state *state,
@@ -2228,7 +2223,7 @@ static const struct drm_connector_funcs intel_sdvo_connector_funcs = {
2228 .atomic_set_property = intel_sdvo_connector_atomic_set_property, 2223 .atomic_set_property = intel_sdvo_connector_atomic_set_property,
2229 .late_register = intel_sdvo_connector_register, 2224 .late_register = intel_sdvo_connector_register,
2230 .early_unregister = intel_sdvo_connector_unregister, 2225 .early_unregister = intel_sdvo_connector_unregister,
2231 .destroy = intel_sdvo_destroy, 2226 .destroy = intel_connector_destroy,
2232 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 2227 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
2233 .atomic_duplicate_state = intel_sdvo_connector_duplicate_state, 2228 .atomic_duplicate_state = intel_sdvo_connector_duplicate_state,
2234}; 2229};
@@ -2267,10 +2262,6 @@ static void intel_sdvo_enc_destroy(struct drm_encoder *encoder)
2267{ 2262{
2268 struct intel_sdvo *intel_sdvo = to_sdvo(to_intel_encoder(encoder)); 2263 struct intel_sdvo *intel_sdvo = to_sdvo(to_intel_encoder(encoder));
2269 2264
2270 if (intel_sdvo->sdvo_lvds_fixed_mode != NULL)
2271 drm_mode_destroy(encoder->dev,
2272 intel_sdvo->sdvo_lvds_fixed_mode);
2273
2274 i2c_del_adapter(&intel_sdvo->ddc); 2265 i2c_del_adapter(&intel_sdvo->ddc);
2275 intel_encoder_destroy(encoder); 2266 intel_encoder_destroy(encoder);
2276} 2267}
@@ -2583,7 +2574,7 @@ intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type)
2583 return true; 2574 return true;
2584 2575
2585err: 2576err:
2586 intel_sdvo_destroy(connector); 2577 intel_connector_destroy(connector);
2587 return false; 2578 return false;
2588} 2579}
2589 2580
@@ -2663,19 +2654,22 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
2663 2654
2664 list_for_each_entry(mode, &connector->probed_modes, head) { 2655 list_for_each_entry(mode, &connector->probed_modes, head) {
2665 if (mode->type & DRM_MODE_TYPE_PREFERRED) { 2656 if (mode->type & DRM_MODE_TYPE_PREFERRED) {
2666 intel_sdvo->sdvo_lvds_fixed_mode = 2657 struct drm_display_mode *fixed_mode =
2667 drm_mode_duplicate(connector->dev, mode); 2658 drm_mode_duplicate(connector->dev, mode);
2659
2660 intel_panel_init(&intel_connector->panel,
2661 fixed_mode, NULL);
2668 break; 2662 break;
2669 } 2663 }
2670 } 2664 }
2671 2665
2672 if (!intel_sdvo->sdvo_lvds_fixed_mode) 2666 if (!intel_connector->panel.fixed_mode)
2673 goto err; 2667 goto err;
2674 2668
2675 return true; 2669 return true;
2676 2670
2677err: 2671err:
2678 intel_sdvo_destroy(connector); 2672 intel_connector_destroy(connector);
2679 return false; 2673 return false;
2680} 2674}
2681 2675
@@ -2745,7 +2739,7 @@ static void intel_sdvo_output_cleanup(struct intel_sdvo *intel_sdvo)
2745 &dev->mode_config.connector_list, head) { 2739 &dev->mode_config.connector_list, head) {
2746 if (intel_attached_encoder(connector) == &intel_sdvo->base) { 2740 if (intel_attached_encoder(connector) == &intel_sdvo->base) {
2747 drm_connector_unregister(connector); 2741 drm_connector_unregister(connector);
2748 intel_sdvo_destroy(connector); 2742 intel_connector_destroy(connector);
2749 } 2743 }
2750 } 2744 }
2751} 2745}
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index d3090a7537bb..abe193815ccc 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -40,6 +40,7 @@
40#include "intel_frontbuffer.h" 40#include "intel_frontbuffer.h"
41#include <drm/i915_drm.h> 41#include <drm/i915_drm.h>
42#include "i915_drv.h" 42#include "i915_drv.h"
43#include <drm/drm_color_mgmt.h>
43 44
44int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode, 45int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
45 int usecs) 46 int usecs)
@@ -275,17 +276,24 @@ int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state)
275 src->y2 = (src_y + src_h) << 16; 276 src->y2 = (src_y + src_h) << 16;
276 277
277 if (fb->format->is_yuv && 278 if (fb->format->is_yuv &&
278 fb->format->format != DRM_FORMAT_NV12 &&
279 (src_x & 1 || src_w & 1)) { 279 (src_x & 1 || src_w & 1)) {
280 DRM_DEBUG_KMS("src x/w (%u, %u) must be a multiple of 2 for YUV planes\n", 280 DRM_DEBUG_KMS("src x/w (%u, %u) must be a multiple of 2 for YUV planes\n",
281 src_x, src_w); 281 src_x, src_w);
282 return -EINVAL; 282 return -EINVAL;
283 } 283 }
284 284
285 if (fb->format->is_yuv &&
286 fb->format->num_planes > 1 &&
287 (src_y & 1 || src_h & 1)) {
288 DRM_DEBUG_KMS("src y/h (%u, %u) must be a multiple of 2 for planar YUV planes\n",
289 src_y, src_h);
290 return -EINVAL;
291 }
292
285 return 0; 293 return 0;
286} 294}
287 295
288unsigned int 296static unsigned int
289skl_plane_max_stride(struct intel_plane *plane, 297skl_plane_max_stride(struct intel_plane *plane,
290 u32 pixel_format, u64 modifier, 298 u32 pixel_format, u64 modifier,
291 unsigned int rotation) 299 unsigned int rotation)
@@ -328,7 +336,8 @@ skl_program_scaler(struct intel_plane *plane,
328 0, INT_MAX); 336 0, INT_MAX);
329 337
330 /* TODO: handle sub-pixel coordinates */ 338 /* TODO: handle sub-pixel coordinates */
331 if (plane_state->base.fb->format->format == DRM_FORMAT_NV12) { 339 if (plane_state->base.fb->format->format == DRM_FORMAT_NV12 &&
340 !icl_is_hdr_plane(plane)) {
332 y_hphase = skl_scaler_calc_phase(1, hscale, false); 341 y_hphase = skl_scaler_calc_phase(1, hscale, false);
333 y_vphase = skl_scaler_calc_phase(1, vscale, false); 342 y_vphase = skl_scaler_calc_phase(1, vscale, false);
334 343
@@ -346,7 +355,6 @@ skl_program_scaler(struct intel_plane *plane,
346 355
347 I915_WRITE_FW(SKL_PS_CTRL(pipe, scaler_id), 356 I915_WRITE_FW(SKL_PS_CTRL(pipe, scaler_id),
348 PS_SCALER_EN | PS_PLANE_SEL(plane->id) | scaler->mode); 357 PS_SCALER_EN | PS_PLANE_SEL(plane->id) | scaler->mode);
349 I915_WRITE_FW(SKL_PS_PWR_GATE(pipe, scaler_id), 0);
350 I915_WRITE_FW(SKL_PS_VPHASE(pipe, scaler_id), 358 I915_WRITE_FW(SKL_PS_VPHASE(pipe, scaler_id),
351 PS_Y_PHASE(y_vphase) | PS_UV_RGB_PHASE(uv_rgb_vphase)); 359 PS_Y_PHASE(y_vphase) | PS_UV_RGB_PHASE(uv_rgb_vphase));
352 I915_WRITE_FW(SKL_PS_HPHASE(pipe, scaler_id), 360 I915_WRITE_FW(SKL_PS_HPHASE(pipe, scaler_id),
@@ -355,69 +363,232 @@ skl_program_scaler(struct intel_plane *plane,
355 I915_WRITE_FW(SKL_PS_WIN_SZ(pipe, scaler_id), (crtc_w << 16) | crtc_h); 363 I915_WRITE_FW(SKL_PS_WIN_SZ(pipe, scaler_id), (crtc_w << 16) | crtc_h);
356} 364}
357 365
358void 366/* Preoffset values for YUV to RGB Conversion */
359skl_update_plane(struct intel_plane *plane, 367#define PREOFF_YUV_TO_RGB_HI 0x1800
360 const struct intel_crtc_state *crtc_state, 368#define PREOFF_YUV_TO_RGB_ME 0x1F00
361 const struct intel_plane_state *plane_state) 369#define PREOFF_YUV_TO_RGB_LO 0x1800
370
371#define ROFF(x) (((x) & 0xffff) << 16)
372#define GOFF(x) (((x) & 0xffff) << 0)
373#define BOFF(x) (((x) & 0xffff) << 16)
374
375static void
376icl_program_input_csc_coeff(const struct intel_crtc_state *crtc_state,
377 const struct intel_plane_state *plane_state)
378{
379 struct drm_i915_private *dev_priv =
380 to_i915(plane_state->base.plane->dev);
381 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
382 enum pipe pipe = crtc->pipe;
383 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
384 enum plane_id plane_id = plane->id;
385
386 static const u16 input_csc_matrix[][9] = {
387 /*
388 * BT.601 full range YCbCr -> full range RGB
389 * The matrix required is :
390 * [1.000, 0.000, 1.371,
391 * 1.000, -0.336, -0.698,
392 * 1.000, 1.732, 0.0000]
393 */
394 [DRM_COLOR_YCBCR_BT601] = {
395 0x7AF8, 0x7800, 0x0,
396 0x8B28, 0x7800, 0x9AC0,
397 0x0, 0x7800, 0x7DD8,
398 },
399 /*
400 * BT.709 full range YCbCr -> full range RGB
401 * The matrix required is :
402 * [1.000, 0.000, 1.574,
403 * 1.000, -0.187, -0.468,
404 * 1.000, 1.855, 0.0000]
405 */
406 [DRM_COLOR_YCBCR_BT709] = {
407 0x7C98, 0x7800, 0x0,
408 0x9EF8, 0x7800, 0xABF8,
409 0x0, 0x7800, 0x7ED8,
410 },
411 };
412
413 /* Matrix for Limited Range to Full Range Conversion */
414 static const u16 input_csc_matrix_lr[][9] = {
415 /*
416 * BT.601 Limted range YCbCr -> full range RGB
417 * The matrix required is :
418 * [1.164384, 0.000, 1.596370,
419 * 1.138393, -0.382500, -0.794598,
420 * 1.138393, 1.971696, 0.0000]
421 */
422 [DRM_COLOR_YCBCR_BT601] = {
423 0x7CC8, 0x7950, 0x0,
424 0x8CB8, 0x7918, 0x9C40,
425 0x0, 0x7918, 0x7FC8,
426 },
427 /*
428 * BT.709 Limited range YCbCr -> full range RGB
429 * The matrix required is :
430 * [1.164, 0.000, 1.833671,
431 * 1.138393, -0.213249, -0.532909,
432 * 1.138393, 2.112402, 0.0000]
433 */
434 [DRM_COLOR_YCBCR_BT709] = {
435 0x7EA8, 0x7950, 0x0,
436 0x8888, 0x7918, 0xADA8,
437 0x0, 0x7918, 0x6870,
438 },
439 };
440 const u16 *csc;
441
442 if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
443 csc = input_csc_matrix[plane_state->base.color_encoding];
444 else
445 csc = input_csc_matrix_lr[plane_state->base.color_encoding];
446
447 I915_WRITE_FW(PLANE_INPUT_CSC_COEFF(pipe, plane_id, 0), ROFF(csc[0]) |
448 GOFF(csc[1]));
449 I915_WRITE_FW(PLANE_INPUT_CSC_COEFF(pipe, plane_id, 1), BOFF(csc[2]));
450 I915_WRITE_FW(PLANE_INPUT_CSC_COEFF(pipe, plane_id, 2), ROFF(csc[3]) |
451 GOFF(csc[4]));
452 I915_WRITE_FW(PLANE_INPUT_CSC_COEFF(pipe, plane_id, 3), BOFF(csc[5]));
453 I915_WRITE_FW(PLANE_INPUT_CSC_COEFF(pipe, plane_id, 4), ROFF(csc[6]) |
454 GOFF(csc[7]));
455 I915_WRITE_FW(PLANE_INPUT_CSC_COEFF(pipe, plane_id, 5), BOFF(csc[8]));
456
457 I915_WRITE_FW(PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 0),
458 PREOFF_YUV_TO_RGB_HI);
459 I915_WRITE_FW(PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 1),
460 PREOFF_YUV_TO_RGB_ME);
461 I915_WRITE_FW(PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 2),
462 PREOFF_YUV_TO_RGB_LO);
463 I915_WRITE_FW(PLANE_INPUT_CSC_POSTOFF(pipe, plane_id, 0), 0x0);
464 I915_WRITE_FW(PLANE_INPUT_CSC_POSTOFF(pipe, plane_id, 1), 0x0);
465 I915_WRITE_FW(PLANE_INPUT_CSC_POSTOFF(pipe, plane_id, 2), 0x0);
466}
467
468static void
469skl_program_plane(struct intel_plane *plane,
470 const struct intel_crtc_state *crtc_state,
471 const struct intel_plane_state *plane_state,
472 int color_plane, bool slave, u32 plane_ctl)
362{ 473{
363 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 474 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
364 enum plane_id plane_id = plane->id; 475 enum plane_id plane_id = plane->id;
365 enum pipe pipe = plane->pipe; 476 enum pipe pipe = plane->pipe;
366 u32 plane_ctl = plane_state->ctl;
367 const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; 477 const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
368 u32 surf_addr = plane_state->color_plane[0].offset; 478 u32 surf_addr = plane_state->color_plane[color_plane].offset;
369 u32 stride = skl_plane_stride(plane_state, 0); 479 u32 stride = skl_plane_stride(plane_state, color_plane);
370 u32 aux_stride = skl_plane_stride(plane_state, 1); 480 u32 aux_stride = skl_plane_stride(plane_state, 1);
371 int crtc_x = plane_state->base.dst.x1; 481 int crtc_x = plane_state->base.dst.x1;
372 int crtc_y = plane_state->base.dst.y1; 482 int crtc_y = plane_state->base.dst.y1;
373 uint32_t x = plane_state->color_plane[0].x; 483 uint32_t x = plane_state->color_plane[color_plane].x;
374 uint32_t y = plane_state->color_plane[0].y; 484 uint32_t y = plane_state->color_plane[color_plane].y;
375 uint32_t src_w = drm_rect_width(&plane_state->base.src) >> 16; 485 uint32_t src_w = drm_rect_width(&plane_state->base.src) >> 16;
376 uint32_t src_h = drm_rect_height(&plane_state->base.src) >> 16; 486 uint32_t src_h = drm_rect_height(&plane_state->base.src) >> 16;
487 struct intel_plane *linked = plane_state->linked_plane;
488 const struct drm_framebuffer *fb = plane_state->base.fb;
489 u8 alpha = plane_state->base.alpha >> 8;
377 unsigned long irqflags; 490 unsigned long irqflags;
491 u32 keymsk, keymax;
378 492
379 /* Sizes are 0 based */ 493 /* Sizes are 0 based */
380 src_w--; 494 src_w--;
381 src_h--; 495 src_h--;
382 496
497 keymax = (key->max_value & 0xffffff) | PLANE_KEYMAX_ALPHA(alpha);
498
499 keymsk = key->channel_mask & 0x3ffffff;
500 if (alpha < 0xff)
501 keymsk |= PLANE_KEYMSK_ALPHA_ENABLE;
502
503 /* The scaler will handle the output position */
504 if (plane_state->scaler_id >= 0) {
505 crtc_x = 0;
506 crtc_y = 0;
507 }
508
383 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 509 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
384 510
385 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) 511 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
386 I915_WRITE_FW(PLANE_COLOR_CTL(pipe, plane_id), 512 I915_WRITE_FW(PLANE_COLOR_CTL(pipe, plane_id),
387 plane_state->color_ctl); 513 plane_state->color_ctl);
388 514
389 if (key->flags) { 515 if (fb->format->is_yuv && icl_is_hdr_plane(plane))
390 I915_WRITE_FW(PLANE_KEYVAL(pipe, plane_id), key->min_value); 516 icl_program_input_csc_coeff(crtc_state, plane_state);
391 I915_WRITE_FW(PLANE_KEYMAX(pipe, plane_id), key->max_value); 517
392 I915_WRITE_FW(PLANE_KEYMSK(pipe, plane_id), key->channel_mask); 518 I915_WRITE_FW(PLANE_KEYVAL(pipe, plane_id), key->min_value);
393 } 519 I915_WRITE_FW(PLANE_KEYMAX(pipe, plane_id), keymax);
520 I915_WRITE_FW(PLANE_KEYMSK(pipe, plane_id), keymsk);
394 521
395 I915_WRITE_FW(PLANE_OFFSET(pipe, plane_id), (y << 16) | x); 522 I915_WRITE_FW(PLANE_OFFSET(pipe, plane_id), (y << 16) | x);
396 I915_WRITE_FW(PLANE_STRIDE(pipe, plane_id), stride); 523 I915_WRITE_FW(PLANE_STRIDE(pipe, plane_id), stride);
397 I915_WRITE_FW(PLANE_SIZE(pipe, plane_id), (src_h << 16) | src_w); 524 I915_WRITE_FW(PLANE_SIZE(pipe, plane_id), (src_h << 16) | src_w);
398 I915_WRITE_FW(PLANE_AUX_DIST(pipe, plane_id), 525 I915_WRITE_FW(PLANE_AUX_DIST(pipe, plane_id),
399 (plane_state->color_plane[1].offset - surf_addr) | aux_stride); 526 (plane_state->color_plane[1].offset - surf_addr) | aux_stride);
400 I915_WRITE_FW(PLANE_AUX_OFFSET(pipe, plane_id),
401 (plane_state->color_plane[1].y << 16) |
402 plane_state->color_plane[1].x);
403 527
404 if (plane_state->scaler_id >= 0) { 528 if (INTEL_GEN(dev_priv) < 11)
405 skl_program_scaler(plane, crtc_state, plane_state); 529 I915_WRITE_FW(PLANE_AUX_OFFSET(pipe, plane_id),
530 (plane_state->color_plane[1].y << 16) |
531 plane_state->color_plane[1].x);
532
533 if (icl_is_hdr_plane(plane)) {
534 u32 cus_ctl = 0;
535
536 if (linked) {
537 /* Enable and use MPEG-2 chroma siting */
538 cus_ctl = PLANE_CUS_ENABLE |
539 PLANE_CUS_HPHASE_0 |
540 PLANE_CUS_VPHASE_SIGN_NEGATIVE |
541 PLANE_CUS_VPHASE_0_25;
542
543 if (linked->id == PLANE_SPRITE5)
544 cus_ctl |= PLANE_CUS_PLANE_7;
545 else if (linked->id == PLANE_SPRITE4)
546 cus_ctl |= PLANE_CUS_PLANE_6;
547 else
548 MISSING_CASE(linked->id);
549 }
406 550
407 I915_WRITE_FW(PLANE_POS(pipe, plane_id), 0); 551 I915_WRITE_FW(PLANE_CUS_CTL(pipe, plane_id), cus_ctl);
408 } else {
409 I915_WRITE_FW(PLANE_POS(pipe, plane_id), (crtc_y << 16) | crtc_x);
410 } 552 }
411 553
554 if (!slave && plane_state->scaler_id >= 0)
555 skl_program_scaler(plane, crtc_state, plane_state);
556
557 I915_WRITE_FW(PLANE_POS(pipe, plane_id), (crtc_y << 16) | crtc_x);
558
412 I915_WRITE_FW(PLANE_CTL(pipe, plane_id), plane_ctl); 559 I915_WRITE_FW(PLANE_CTL(pipe, plane_id), plane_ctl);
413 I915_WRITE_FW(PLANE_SURF(pipe, plane_id), 560 I915_WRITE_FW(PLANE_SURF(pipe, plane_id),
414 intel_plane_ggtt_offset(plane_state) + surf_addr); 561 intel_plane_ggtt_offset(plane_state) + surf_addr);
415 POSTING_READ_FW(PLANE_SURF(pipe, plane_id));
416 562
417 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 563 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
418} 564}
419 565
420void 566static void
567skl_update_plane(struct intel_plane *plane,
568 const struct intel_crtc_state *crtc_state,
569 const struct intel_plane_state *plane_state)
570{
571 int color_plane = 0;
572
573 if (plane_state->linked_plane) {
574 /* Program the UV plane */
575 color_plane = 1;
576 }
577
578 skl_program_plane(plane, crtc_state, plane_state,
579 color_plane, false, plane_state->ctl);
580}
581
582static void
583icl_update_slave(struct intel_plane *plane,
584 const struct intel_crtc_state *crtc_state,
585 const struct intel_plane_state *plane_state)
586{
587 skl_program_plane(plane, crtc_state, plane_state, 0, true,
588 plane_state->ctl | PLANE_CTL_YUV420_Y_PLANE);
589}
590
591static void
421skl_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc) 592skl_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
422{ 593{
423 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 594 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
@@ -428,14 +599,12 @@ skl_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
428 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 599 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
429 600
430 I915_WRITE_FW(PLANE_CTL(pipe, plane_id), 0); 601 I915_WRITE_FW(PLANE_CTL(pipe, plane_id), 0);
431
432 I915_WRITE_FW(PLANE_SURF(pipe, plane_id), 0); 602 I915_WRITE_FW(PLANE_SURF(pipe, plane_id), 0);
433 POSTING_READ_FW(PLANE_SURF(pipe, plane_id));
434 603
435 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 604 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
436} 605}
437 606
438bool 607static bool
439skl_plane_get_hw_state(struct intel_plane *plane, 608skl_plane_get_hw_state(struct intel_plane *plane,
440 enum pipe *pipe) 609 enum pipe *pipe)
441{ 610{
@@ -628,7 +797,6 @@ vlv_update_plane(struct intel_plane *plane,
628 const struct intel_plane_state *plane_state) 797 const struct intel_plane_state *plane_state)
629{ 798{
630 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 799 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
631 const struct drm_framebuffer *fb = plane_state->base.fb;
632 enum pipe pipe = plane->pipe; 800 enum pipe pipe = plane->pipe;
633 enum plane_id plane_id = plane->id; 801 enum plane_id plane_id = plane->id;
634 u32 sprctl = plane_state->ctl; 802 u32 sprctl = plane_state->ctl;
@@ -665,10 +833,8 @@ vlv_update_plane(struct intel_plane *plane,
665 plane_state->color_plane[0].stride); 833 plane_state->color_plane[0].stride);
666 I915_WRITE_FW(SPPOS(pipe, plane_id), (crtc_y << 16) | crtc_x); 834 I915_WRITE_FW(SPPOS(pipe, plane_id), (crtc_y << 16) | crtc_x);
667 835
668 if (fb->modifier == I915_FORMAT_MOD_X_TILED) 836 I915_WRITE_FW(SPTILEOFF(pipe, plane_id), (y << 16) | x);
669 I915_WRITE_FW(SPTILEOFF(pipe, plane_id), (y << 16) | x); 837 I915_WRITE_FW(SPLINOFF(pipe, plane_id), linear_offset);
670 else
671 I915_WRITE_FW(SPLINOFF(pipe, plane_id), linear_offset);
672 838
673 I915_WRITE_FW(SPCONSTALPHA(pipe, plane_id), 0); 839 I915_WRITE_FW(SPCONSTALPHA(pipe, plane_id), 0);
674 840
@@ -676,7 +842,6 @@ vlv_update_plane(struct intel_plane *plane,
676 I915_WRITE_FW(SPCNTR(pipe, plane_id), sprctl); 842 I915_WRITE_FW(SPCNTR(pipe, plane_id), sprctl);
677 I915_WRITE_FW(SPSURF(pipe, plane_id), 843 I915_WRITE_FW(SPSURF(pipe, plane_id),
678 intel_plane_ggtt_offset(plane_state) + sprsurf_offset); 844 intel_plane_ggtt_offset(plane_state) + sprsurf_offset);
679 POSTING_READ_FW(SPSURF(pipe, plane_id));
680 845
681 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 846 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
682} 847}
@@ -692,9 +857,7 @@ vlv_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
692 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 857 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
693 858
694 I915_WRITE_FW(SPCNTR(pipe, plane_id), 0); 859 I915_WRITE_FW(SPCNTR(pipe, plane_id), 0);
695
696 I915_WRITE_FW(SPSURF(pipe, plane_id), 0); 860 I915_WRITE_FW(SPSURF(pipe, plane_id), 0);
697 POSTING_READ_FW(SPSURF(pipe, plane_id));
698 861
699 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 862 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
700} 863}
@@ -789,7 +952,6 @@ ivb_update_plane(struct intel_plane *plane,
789 const struct intel_plane_state *plane_state) 952 const struct intel_plane_state *plane_state)
790{ 953{
791 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 954 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
792 const struct drm_framebuffer *fb = plane_state->base.fb;
793 enum pipe pipe = plane->pipe; 955 enum pipe pipe = plane->pipe;
794 u32 sprctl = plane_state->ctl, sprscale = 0; 956 u32 sprctl = plane_state->ctl, sprscale = 0;
795 u32 sprsurf_offset = plane_state->color_plane[0].offset; 957 u32 sprsurf_offset = plane_state->color_plane[0].offset;
@@ -829,12 +991,12 @@ ivb_update_plane(struct intel_plane *plane,
829 991
830 /* HSW consolidates SPRTILEOFF and SPRLINOFF into a single SPROFFSET 992 /* HSW consolidates SPRTILEOFF and SPRLINOFF into a single SPROFFSET
831 * register */ 993 * register */
832 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 994 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
833 I915_WRITE_FW(SPROFFSET(pipe), (y << 16) | x); 995 I915_WRITE_FW(SPROFFSET(pipe), (y << 16) | x);
834 else if (fb->modifier == I915_FORMAT_MOD_X_TILED) 996 } else {
835 I915_WRITE_FW(SPRTILEOFF(pipe), (y << 16) | x); 997 I915_WRITE_FW(SPRTILEOFF(pipe), (y << 16) | x);
836 else
837 I915_WRITE_FW(SPRLINOFF(pipe), linear_offset); 998 I915_WRITE_FW(SPRLINOFF(pipe), linear_offset);
999 }
838 1000
839 I915_WRITE_FW(SPRSIZE(pipe), (crtc_h << 16) | crtc_w); 1001 I915_WRITE_FW(SPRSIZE(pipe), (crtc_h << 16) | crtc_w);
840 if (IS_IVYBRIDGE(dev_priv)) 1002 if (IS_IVYBRIDGE(dev_priv))
@@ -842,7 +1004,6 @@ ivb_update_plane(struct intel_plane *plane,
842 I915_WRITE_FW(SPRCTL(pipe), sprctl); 1004 I915_WRITE_FW(SPRCTL(pipe), sprctl);
843 I915_WRITE_FW(SPRSURF(pipe), 1005 I915_WRITE_FW(SPRSURF(pipe),
844 intel_plane_ggtt_offset(plane_state) + sprsurf_offset); 1006 intel_plane_ggtt_offset(plane_state) + sprsurf_offset);
845 POSTING_READ_FW(SPRSURF(pipe));
846 1007
847 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 1008 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
848} 1009}
@@ -860,9 +1021,7 @@ ivb_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
860 /* Can't leave the scaler enabled... */ 1021 /* Can't leave the scaler enabled... */
861 if (IS_IVYBRIDGE(dev_priv)) 1022 if (IS_IVYBRIDGE(dev_priv))
862 I915_WRITE_FW(SPRSCALE(pipe), 0); 1023 I915_WRITE_FW(SPRSCALE(pipe), 0);
863
864 I915_WRITE_FW(SPRSURF(pipe), 0); 1024 I915_WRITE_FW(SPRSURF(pipe), 0);
865 POSTING_READ_FW(SPRSURF(pipe));
866 1025
867 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 1026 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
868} 1027}
@@ -961,7 +1120,6 @@ g4x_update_plane(struct intel_plane *plane,
961 const struct intel_plane_state *plane_state) 1120 const struct intel_plane_state *plane_state)
962{ 1121{
963 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 1122 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
964 const struct drm_framebuffer *fb = plane_state->base.fb;
965 enum pipe pipe = plane->pipe; 1123 enum pipe pipe = plane->pipe;
966 u32 dvscntr = plane_state->ctl, dvsscale = 0; 1124 u32 dvscntr = plane_state->ctl, dvsscale = 0;
967 u32 dvssurf_offset = plane_state->color_plane[0].offset; 1125 u32 dvssurf_offset = plane_state->color_plane[0].offset;
@@ -999,17 +1157,14 @@ g4x_update_plane(struct intel_plane *plane,
999 I915_WRITE_FW(DVSSTRIDE(pipe), plane_state->color_plane[0].stride); 1157 I915_WRITE_FW(DVSSTRIDE(pipe), plane_state->color_plane[0].stride);
1000 I915_WRITE_FW(DVSPOS(pipe), (crtc_y << 16) | crtc_x); 1158 I915_WRITE_FW(DVSPOS(pipe), (crtc_y << 16) | crtc_x);
1001 1159
1002 if (fb->modifier == I915_FORMAT_MOD_X_TILED) 1160 I915_WRITE_FW(DVSTILEOFF(pipe), (y << 16) | x);
1003 I915_WRITE_FW(DVSTILEOFF(pipe), (y << 16) | x); 1161 I915_WRITE_FW(DVSLINOFF(pipe), linear_offset);
1004 else
1005 I915_WRITE_FW(DVSLINOFF(pipe), linear_offset);
1006 1162
1007 I915_WRITE_FW(DVSSIZE(pipe), (crtc_h << 16) | crtc_w); 1163 I915_WRITE_FW(DVSSIZE(pipe), (crtc_h << 16) | crtc_w);
1008 I915_WRITE_FW(DVSSCALE(pipe), dvsscale); 1164 I915_WRITE_FW(DVSSCALE(pipe), dvsscale);
1009 I915_WRITE_FW(DVSCNTR(pipe), dvscntr); 1165 I915_WRITE_FW(DVSCNTR(pipe), dvscntr);
1010 I915_WRITE_FW(DVSSURF(pipe), 1166 I915_WRITE_FW(DVSSURF(pipe),
1011 intel_plane_ggtt_offset(plane_state) + dvssurf_offset); 1167 intel_plane_ggtt_offset(plane_state) + dvssurf_offset);
1012 POSTING_READ_FW(DVSSURF(pipe));
1013 1168
1014 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 1169 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
1015} 1170}
@@ -1026,9 +1181,7 @@ g4x_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
1026 I915_WRITE_FW(DVSCNTR(pipe), 0); 1181 I915_WRITE_FW(DVSCNTR(pipe), 0);
1027 /* Disable the scaler */ 1182 /* Disable the scaler */
1028 I915_WRITE_FW(DVSSCALE(pipe), 0); 1183 I915_WRITE_FW(DVSSCALE(pipe), 0);
1029
1030 I915_WRITE_FW(DVSSURF(pipe), 0); 1184 I915_WRITE_FW(DVSSURF(pipe), 0);
1031 POSTING_READ_FW(DVSSURF(pipe));
1032 1185
1033 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 1186 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
1034} 1187}
@@ -1054,6 +1207,19 @@ g4x_plane_get_hw_state(struct intel_plane *plane,
1054 return ret; 1207 return ret;
1055} 1208}
1056 1209
1210static bool intel_fb_scalable(const struct drm_framebuffer *fb)
1211{
1212 if (!fb)
1213 return false;
1214
1215 switch (fb->format->format) {
1216 case DRM_FORMAT_C8:
1217 return false;
1218 default:
1219 return true;
1220 }
1221}
1222
1057static int 1223static int
1058g4x_sprite_check_scaling(struct intel_crtc_state *crtc_state, 1224g4x_sprite_check_scaling(struct intel_crtc_state *crtc_state,
1059 struct intel_plane_state *plane_state) 1225 struct intel_plane_state *plane_state)
@@ -1121,18 +1287,18 @@ g4x_sprite_check(struct intel_crtc_state *crtc_state,
1121{ 1287{
1122 struct intel_plane *plane = to_intel_plane(plane_state->base.plane); 1288 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
1123 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 1289 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
1124 int max_scale, min_scale; 1290 int min_scale = DRM_PLANE_HELPER_NO_SCALING;
1291 int max_scale = DRM_PLANE_HELPER_NO_SCALING;
1125 int ret; 1292 int ret;
1126 1293
1127 if (INTEL_GEN(dev_priv) < 7) { 1294 if (intel_fb_scalable(plane_state->base.fb)) {
1128 min_scale = 1; 1295 if (INTEL_GEN(dev_priv) < 7) {
1129 max_scale = 16 << 16; 1296 min_scale = 1;
1130 } else if (IS_IVYBRIDGE(dev_priv)) { 1297 max_scale = 16 << 16;
1131 min_scale = 1; 1298 } else if (IS_IVYBRIDGE(dev_priv)) {
1132 max_scale = 2 << 16; 1299 min_scale = 1;
1133 } else { 1300 max_scale = 2 << 16;
1134 min_scale = DRM_PLANE_HELPER_NO_SCALING; 1301 }
1135 max_scale = DRM_PLANE_HELPER_NO_SCALING;
1136 } 1302 }
1137 1303
1138 ret = drm_atomic_helper_check_plane_state(&plane_state->base, 1304 ret = drm_atomic_helper_check_plane_state(&plane_state->base,
@@ -1219,6 +1385,8 @@ vlv_sprite_check(struct intel_crtc_state *crtc_state,
1219static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state, 1385static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state,
1220 const struct intel_plane_state *plane_state) 1386 const struct intel_plane_state *plane_state)
1221{ 1387{
1388 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
1389 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
1222 const struct drm_framebuffer *fb = plane_state->base.fb; 1390 const struct drm_framebuffer *fb = plane_state->base.fb;
1223 unsigned int rotation = plane_state->base.rotation; 1391 unsigned int rotation = plane_state->base.rotation;
1224 struct drm_format_name_buf format_name; 1392 struct drm_format_name_buf format_name;
@@ -1247,13 +1415,17 @@ static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state,
1247 } 1415 }
1248 1416
1249 /* 1417 /*
1250 * 90/270 is not allowed with RGB64 16:16:16:16, 1418 * 90/270 is not allowed with RGB64 16:16:16:16 and
1251 * RGB 16-bit 5:6:5, and Indexed 8-bit. 1419 * Indexed 8-bit. RGB 16-bit 5:6:5 is allowed gen11 onwards.
1252 * TBD: Add RGB64 case once its added in supported format list. 1420 * TBD: Add RGB64 case once its added in supported format
1421 * list.
1253 */ 1422 */
1254 switch (fb->format->format) { 1423 switch (fb->format->format) {
1255 case DRM_FORMAT_C8:
1256 case DRM_FORMAT_RGB565: 1424 case DRM_FORMAT_RGB565:
1425 if (INTEL_GEN(dev_priv) >= 11)
1426 break;
1427 /* fall through */
1428 case DRM_FORMAT_C8:
1257 DRM_DEBUG_KMS("Unsupported pixel format %s for 90/270!\n", 1429 DRM_DEBUG_KMS("Unsupported pixel format %s for 90/270!\n",
1258 drm_get_format_name(fb->format->format, 1430 drm_get_format_name(fb->format->format,
1259 &format_name)); 1431 &format_name));
@@ -1307,12 +1479,31 @@ static int skl_plane_check_dst_coordinates(const struct intel_crtc_state *crtc_s
1307 return 0; 1479 return 0;
1308} 1480}
1309 1481
1310int skl_plane_check(struct intel_crtc_state *crtc_state, 1482static int skl_plane_check_nv12_rotation(const struct intel_plane_state *plane_state)
1311 struct intel_plane_state *plane_state) 1483{
1484 const struct drm_framebuffer *fb = plane_state->base.fb;
1485 unsigned int rotation = plane_state->base.rotation;
1486 int src_w = drm_rect_width(&plane_state->base.src) >> 16;
1487
1488 /* Display WA #1106 */
1489 if (fb->format->format == DRM_FORMAT_NV12 && src_w & 3 &&
1490 (rotation == DRM_MODE_ROTATE_270 ||
1491 rotation == (DRM_MODE_REFLECT_X | DRM_MODE_ROTATE_90))) {
1492 DRM_DEBUG_KMS("src width must be multiple of 4 for rotated NV12\n");
1493 return -EINVAL;
1494 }
1495
1496 return 0;
1497}
1498
1499static int skl_plane_check(struct intel_crtc_state *crtc_state,
1500 struct intel_plane_state *plane_state)
1312{ 1501{
1313 struct intel_plane *plane = to_intel_plane(plane_state->base.plane); 1502 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
1314 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 1503 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
1315 int max_scale, min_scale; 1504 const struct drm_framebuffer *fb = plane_state->base.fb;
1505 int min_scale = DRM_PLANE_HELPER_NO_SCALING;
1506 int max_scale = DRM_PLANE_HELPER_NO_SCALING;
1316 int ret; 1507 int ret;
1317 1508
1318 ret = skl_plane_check_fb(crtc_state, plane_state); 1509 ret = skl_plane_check_fb(crtc_state, plane_state);
@@ -1320,15 +1511,9 @@ int skl_plane_check(struct intel_crtc_state *crtc_state,
1320 return ret; 1511 return ret;
1321 1512
1322 /* use scaler when colorkey is not required */ 1513 /* use scaler when colorkey is not required */
1323 if (!plane_state->ckey.flags) { 1514 if (!plane_state->ckey.flags && intel_fb_scalable(fb)) {
1324 const struct drm_framebuffer *fb = plane_state->base.fb;
1325
1326 min_scale = 1; 1515 min_scale = 1;
1327 max_scale = skl_max_scale(crtc_state, 1516 max_scale = skl_max_scale(crtc_state, fb->format->format);
1328 fb ? fb->format->format : 0);
1329 } else {
1330 min_scale = DRM_PLANE_HELPER_NO_SCALING;
1331 max_scale = DRM_PLANE_HELPER_NO_SCALING;
1332 } 1517 }
1333 1518
1334 ret = drm_atomic_helper_check_plane_state(&plane_state->base, 1519 ret = drm_atomic_helper_check_plane_state(&plane_state->base,
@@ -1349,10 +1534,18 @@ int skl_plane_check(struct intel_crtc_state *crtc_state,
1349 if (ret) 1534 if (ret)
1350 return ret; 1535 return ret;
1351 1536
1537 ret = skl_plane_check_nv12_rotation(plane_state);
1538 if (ret)
1539 return ret;
1540
1352 ret = skl_check_plane_surface(plane_state); 1541 ret = skl_check_plane_surface(plane_state);
1353 if (ret) 1542 if (ret)
1354 return ret; 1543 return ret;
1355 1544
1545 /* HW only has 8 bits pixel precision, disable plane if invisible */
1546 if (!(plane_state->base.alpha >> 8))
1547 plane_state->base.visible = false;
1548
1356 plane_state->ctl = skl_plane_ctl(crtc_state, plane_state); 1549 plane_state->ctl = skl_plane_ctl(crtc_state, plane_state);
1357 1550
1358 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) 1551 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
@@ -1517,24 +1710,30 @@ static const uint32_t vlv_plane_formats[] = {
1517 DRM_FORMAT_VYUY, 1710 DRM_FORMAT_VYUY,
1518}; 1711};
1519 1712
1520static uint32_t skl_plane_formats[] = { 1713static const uint32_t skl_plane_formats[] = {
1714 DRM_FORMAT_C8,
1521 DRM_FORMAT_RGB565, 1715 DRM_FORMAT_RGB565,
1522 DRM_FORMAT_ABGR8888,
1523 DRM_FORMAT_ARGB8888,
1524 DRM_FORMAT_XBGR8888,
1525 DRM_FORMAT_XRGB8888, 1716 DRM_FORMAT_XRGB8888,
1717 DRM_FORMAT_XBGR8888,
1718 DRM_FORMAT_ARGB8888,
1719 DRM_FORMAT_ABGR8888,
1720 DRM_FORMAT_XRGB2101010,
1721 DRM_FORMAT_XBGR2101010,
1526 DRM_FORMAT_YUYV, 1722 DRM_FORMAT_YUYV,
1527 DRM_FORMAT_YVYU, 1723 DRM_FORMAT_YVYU,
1528 DRM_FORMAT_UYVY, 1724 DRM_FORMAT_UYVY,
1529 DRM_FORMAT_VYUY, 1725 DRM_FORMAT_VYUY,
1530}; 1726};
1531 1727
1532static uint32_t skl_planar_formats[] = { 1728static const uint32_t skl_planar_formats[] = {
1729 DRM_FORMAT_C8,
1533 DRM_FORMAT_RGB565, 1730 DRM_FORMAT_RGB565,
1534 DRM_FORMAT_ABGR8888,
1535 DRM_FORMAT_ARGB8888,
1536 DRM_FORMAT_XBGR8888,
1537 DRM_FORMAT_XRGB8888, 1731 DRM_FORMAT_XRGB8888,
1732 DRM_FORMAT_XBGR8888,
1733 DRM_FORMAT_ARGB8888,
1734 DRM_FORMAT_ABGR8888,
1735 DRM_FORMAT_XRGB2101010,
1736 DRM_FORMAT_XBGR2101010,
1538 DRM_FORMAT_YUYV, 1737 DRM_FORMAT_YUYV,
1539 DRM_FORMAT_YVYU, 1738 DRM_FORMAT_YVYU,
1540 DRM_FORMAT_UYVY, 1739 DRM_FORMAT_UYVY,
@@ -1739,8 +1938,36 @@ static const struct drm_plane_funcs skl_plane_funcs = {
1739 .format_mod_supported = skl_plane_format_mod_supported, 1938 .format_mod_supported = skl_plane_format_mod_supported,
1740}; 1939};
1741 1940
1742bool skl_plane_has_ccs(struct drm_i915_private *dev_priv, 1941static bool skl_plane_has_fbc(struct drm_i915_private *dev_priv,
1743 enum pipe pipe, enum plane_id plane_id) 1942 enum pipe pipe, enum plane_id plane_id)
1943{
1944 if (!HAS_FBC(dev_priv))
1945 return false;
1946
1947 return pipe == PIPE_A && plane_id == PLANE_PRIMARY;
1948}
1949
1950static bool skl_plane_has_planar(struct drm_i915_private *dev_priv,
1951 enum pipe pipe, enum plane_id plane_id)
1952{
1953 if (INTEL_GEN(dev_priv) >= 11)
1954 return plane_id <= PLANE_SPRITE3;
1955
1956 /* Display WA #0870: skl, bxt */
1957 if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))
1958 return false;
1959
1960 if (IS_GEN9(dev_priv) && !IS_GEMINILAKE(dev_priv) && pipe == PIPE_C)
1961 return false;
1962
1963 if (plane_id != PLANE_PRIMARY && plane_id != PLANE_SPRITE0)
1964 return false;
1965
1966 return true;
1967}
1968
1969static bool skl_plane_has_ccs(struct drm_i915_private *dev_priv,
1970 enum pipe pipe, enum plane_id plane_id)
1744{ 1971{
1745 if (plane_id == PLANE_CURSOR) 1972 if (plane_id == PLANE_CURSOR)
1746 return false; 1973 return false;
@@ -1757,109 +1984,173 @@ bool skl_plane_has_ccs(struct drm_i915_private *dev_priv,
1757} 1984}
1758 1985
1759struct intel_plane * 1986struct intel_plane *
1760intel_sprite_plane_create(struct drm_i915_private *dev_priv, 1987skl_universal_plane_create(struct drm_i915_private *dev_priv,
1761 enum pipe pipe, int plane) 1988 enum pipe pipe, enum plane_id plane_id)
1762{ 1989{
1763 struct intel_plane *intel_plane = NULL; 1990 struct intel_plane *plane;
1764 struct intel_plane_state *state = NULL; 1991 enum drm_plane_type plane_type;
1765 const struct drm_plane_funcs *plane_funcs;
1766 unsigned long possible_crtcs;
1767 const uint32_t *plane_formats;
1768 const uint64_t *modifiers;
1769 unsigned int supported_rotations; 1992 unsigned int supported_rotations;
1770 int num_plane_formats; 1993 unsigned int possible_crtcs;
1994 const u64 *modifiers;
1995 const u32 *formats;
1996 int num_formats;
1771 int ret; 1997 int ret;
1772 1998
1773 intel_plane = kzalloc(sizeof(*intel_plane), GFP_KERNEL); 1999 plane = intel_plane_alloc();
1774 if (!intel_plane) { 2000 if (IS_ERR(plane))
1775 ret = -ENOMEM; 2001 return plane;
1776 goto fail; 2002
2003 plane->pipe = pipe;
2004 plane->id = plane_id;
2005 plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane_id);
2006
2007 plane->has_fbc = skl_plane_has_fbc(dev_priv, pipe, plane_id);
2008 if (plane->has_fbc) {
2009 struct intel_fbc *fbc = &dev_priv->fbc;
2010
2011 fbc->possible_framebuffer_bits |= plane->frontbuffer_bit;
1777 } 2012 }
1778 2013
1779 state = intel_create_plane_state(&intel_plane->base); 2014 plane->max_stride = skl_plane_max_stride;
1780 if (!state) { 2015 plane->update_plane = skl_update_plane;
1781 ret = -ENOMEM; 2016 plane->disable_plane = skl_disable_plane;
1782 goto fail; 2017 plane->get_hw_state = skl_plane_get_hw_state;
2018 plane->check_plane = skl_plane_check;
2019 if (icl_is_nv12_y_plane(plane_id))
2020 plane->update_slave = icl_update_slave;
2021
2022 if (skl_plane_has_planar(dev_priv, pipe, plane_id)) {
2023 formats = skl_planar_formats;
2024 num_formats = ARRAY_SIZE(skl_planar_formats);
2025 } else {
2026 formats = skl_plane_formats;
2027 num_formats = ARRAY_SIZE(skl_plane_formats);
1783 } 2028 }
1784 intel_plane->base.state = &state->base;
1785 2029
1786 if (INTEL_GEN(dev_priv) >= 9) { 2030 plane->has_ccs = skl_plane_has_ccs(dev_priv, pipe, plane_id);
1787 state->scaler_id = -1; 2031 if (plane->has_ccs)
2032 modifiers = skl_plane_format_modifiers_ccs;
2033 else
2034 modifiers = skl_plane_format_modifiers_noccs;
1788 2035
1789 intel_plane->has_ccs = skl_plane_has_ccs(dev_priv, pipe, 2036 if (plane_id == PLANE_PRIMARY)
1790 PLANE_SPRITE0 + plane); 2037 plane_type = DRM_PLANE_TYPE_PRIMARY;
2038 else
2039 plane_type = DRM_PLANE_TYPE_OVERLAY;
1791 2040
1792 intel_plane->max_stride = skl_plane_max_stride; 2041 possible_crtcs = BIT(pipe);
1793 intel_plane->update_plane = skl_update_plane;
1794 intel_plane->disable_plane = skl_disable_plane;
1795 intel_plane->get_hw_state = skl_plane_get_hw_state;
1796 intel_plane->check_plane = skl_plane_check;
1797 2042
1798 if (skl_plane_has_planar(dev_priv, pipe, 2043 ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
1799 PLANE_SPRITE0 + plane)) { 2044 possible_crtcs, &skl_plane_funcs,
1800 plane_formats = skl_planar_formats; 2045 formats, num_formats, modifiers,
1801 num_plane_formats = ARRAY_SIZE(skl_planar_formats); 2046 plane_type,
1802 } else { 2047 "plane %d%c", plane_id + 1,
1803 plane_formats = skl_plane_formats; 2048 pipe_name(pipe));
1804 num_plane_formats = ARRAY_SIZE(skl_plane_formats); 2049 if (ret)
1805 } 2050 goto fail;
2051
2052 supported_rotations =
2053 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
2054 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
2055
2056 if (INTEL_GEN(dev_priv) >= 10)
2057 supported_rotations |= DRM_MODE_REFLECT_X;
2058
2059 drm_plane_create_rotation_property(&plane->base,
2060 DRM_MODE_ROTATE_0,
2061 supported_rotations);
2062
2063 drm_plane_create_color_properties(&plane->base,
2064 BIT(DRM_COLOR_YCBCR_BT601) |
2065 BIT(DRM_COLOR_YCBCR_BT709),
2066 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
2067 BIT(DRM_COLOR_YCBCR_FULL_RANGE),
2068 DRM_COLOR_YCBCR_BT709,
2069 DRM_COLOR_YCBCR_LIMITED_RANGE);
2070
2071 drm_plane_create_alpha_property(&plane->base);
2072 drm_plane_create_blend_mode_property(&plane->base,
2073 BIT(DRM_MODE_BLEND_PIXEL_NONE) |
2074 BIT(DRM_MODE_BLEND_PREMULTI) |
2075 BIT(DRM_MODE_BLEND_COVERAGE));
2076
2077 drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs);
2078
2079 return plane;
2080
2081fail:
2082 intel_plane_free(plane);
2083
2084 return ERR_PTR(ret);
2085}
1806 2086
1807 if (intel_plane->has_ccs) 2087struct intel_plane *
1808 modifiers = skl_plane_format_modifiers_ccs; 2088intel_sprite_plane_create(struct drm_i915_private *dev_priv,
1809 else 2089 enum pipe pipe, int sprite)
1810 modifiers = skl_plane_format_modifiers_noccs; 2090{
1811 2091 struct intel_plane *plane;
1812 plane_funcs = &skl_plane_funcs; 2092 const struct drm_plane_funcs *plane_funcs;
1813 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 2093 unsigned long possible_crtcs;
1814 intel_plane->max_stride = i9xx_plane_max_stride; 2094 unsigned int supported_rotations;
1815 intel_plane->update_plane = vlv_update_plane; 2095 const u64 *modifiers;
1816 intel_plane->disable_plane = vlv_disable_plane; 2096 const u32 *formats;
1817 intel_plane->get_hw_state = vlv_plane_get_hw_state; 2097 int num_formats;
1818 intel_plane->check_plane = vlv_sprite_check; 2098 int ret;
1819 2099
1820 plane_formats = vlv_plane_formats; 2100 if (INTEL_GEN(dev_priv) >= 9)
1821 num_plane_formats = ARRAY_SIZE(vlv_plane_formats); 2101 return skl_universal_plane_create(dev_priv, pipe,
2102 PLANE_SPRITE0 + sprite);
2103
2104 plane = intel_plane_alloc();
2105 if (IS_ERR(plane))
2106 return plane;
2107
2108 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
2109 plane->max_stride = i9xx_plane_max_stride;
2110 plane->update_plane = vlv_update_plane;
2111 plane->disable_plane = vlv_disable_plane;
2112 plane->get_hw_state = vlv_plane_get_hw_state;
2113 plane->check_plane = vlv_sprite_check;
2114
2115 formats = vlv_plane_formats;
2116 num_formats = ARRAY_SIZE(vlv_plane_formats);
1822 modifiers = i9xx_plane_format_modifiers; 2117 modifiers = i9xx_plane_format_modifiers;
1823 2118
1824 plane_funcs = &vlv_sprite_funcs; 2119 plane_funcs = &vlv_sprite_funcs;
1825 } else if (INTEL_GEN(dev_priv) >= 7) { 2120 } else if (INTEL_GEN(dev_priv) >= 7) {
1826 intel_plane->max_stride = g4x_sprite_max_stride; 2121 plane->max_stride = g4x_sprite_max_stride;
1827 intel_plane->update_plane = ivb_update_plane; 2122 plane->update_plane = ivb_update_plane;
1828 intel_plane->disable_plane = ivb_disable_plane; 2123 plane->disable_plane = ivb_disable_plane;
1829 intel_plane->get_hw_state = ivb_plane_get_hw_state; 2124 plane->get_hw_state = ivb_plane_get_hw_state;
1830 intel_plane->check_plane = g4x_sprite_check; 2125 plane->check_plane = g4x_sprite_check;
1831 2126
1832 plane_formats = snb_plane_formats; 2127 formats = snb_plane_formats;
1833 num_plane_formats = ARRAY_SIZE(snb_plane_formats); 2128 num_formats = ARRAY_SIZE(snb_plane_formats);
1834 modifiers = i9xx_plane_format_modifiers; 2129 modifiers = i9xx_plane_format_modifiers;
1835 2130
1836 plane_funcs = &snb_sprite_funcs; 2131 plane_funcs = &snb_sprite_funcs;
1837 } else { 2132 } else {
1838 intel_plane->max_stride = g4x_sprite_max_stride; 2133 plane->max_stride = g4x_sprite_max_stride;
1839 intel_plane->update_plane = g4x_update_plane; 2134 plane->update_plane = g4x_update_plane;
1840 intel_plane->disable_plane = g4x_disable_plane; 2135 plane->disable_plane = g4x_disable_plane;
1841 intel_plane->get_hw_state = g4x_plane_get_hw_state; 2136 plane->get_hw_state = g4x_plane_get_hw_state;
1842 intel_plane->check_plane = g4x_sprite_check; 2137 plane->check_plane = g4x_sprite_check;
1843 2138
1844 modifiers = i9xx_plane_format_modifiers; 2139 modifiers = i9xx_plane_format_modifiers;
1845 if (IS_GEN6(dev_priv)) { 2140 if (IS_GEN6(dev_priv)) {
1846 plane_formats = snb_plane_formats; 2141 formats = snb_plane_formats;
1847 num_plane_formats = ARRAY_SIZE(snb_plane_formats); 2142 num_formats = ARRAY_SIZE(snb_plane_formats);
1848 2143
1849 plane_funcs = &snb_sprite_funcs; 2144 plane_funcs = &snb_sprite_funcs;
1850 } else { 2145 } else {
1851 plane_formats = g4x_plane_formats; 2146 formats = g4x_plane_formats;
1852 num_plane_formats = ARRAY_SIZE(g4x_plane_formats); 2147 num_formats = ARRAY_SIZE(g4x_plane_formats);
1853 2148
1854 plane_funcs = &g4x_sprite_funcs; 2149 plane_funcs = &g4x_sprite_funcs;
1855 } 2150 }
1856 } 2151 }
1857 2152
1858 if (INTEL_GEN(dev_priv) >= 9) { 2153 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
1859 supported_rotations =
1860 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
1861 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
1862 } else if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
1863 supported_rotations = 2154 supported_rotations =
1864 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 | 2155 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 |
1865 DRM_MODE_REFLECT_X; 2156 DRM_MODE_REFLECT_X;
@@ -1868,35 +2159,25 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
1868 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180; 2159 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180;
1869 } 2160 }
1870 2161
1871 intel_plane->pipe = pipe; 2162 plane->pipe = pipe;
1872 intel_plane->i9xx_plane = plane; 2163 plane->id = PLANE_SPRITE0 + sprite;
1873 intel_plane->id = PLANE_SPRITE0 + plane; 2164 plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane->id);
1874 intel_plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, intel_plane->id);
1875 2165
1876 possible_crtcs = (1 << pipe); 2166 possible_crtcs = BIT(pipe);
1877 2167
1878 if (INTEL_GEN(dev_priv) >= 9) 2168 ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
1879 ret = drm_universal_plane_init(&dev_priv->drm, &intel_plane->base, 2169 possible_crtcs, plane_funcs,
1880 possible_crtcs, plane_funcs, 2170 formats, num_formats, modifiers,
1881 plane_formats, num_plane_formats, 2171 DRM_PLANE_TYPE_OVERLAY,
1882 modifiers, 2172 "sprite %c", sprite_name(pipe, sprite));
1883 DRM_PLANE_TYPE_OVERLAY,
1884 "plane %d%c", plane + 2, pipe_name(pipe));
1885 else
1886 ret = drm_universal_plane_init(&dev_priv->drm, &intel_plane->base,
1887 possible_crtcs, plane_funcs,
1888 plane_formats, num_plane_formats,
1889 modifiers,
1890 DRM_PLANE_TYPE_OVERLAY,
1891 "sprite %c", sprite_name(pipe, plane));
1892 if (ret) 2173 if (ret)
1893 goto fail; 2174 goto fail;
1894 2175
1895 drm_plane_create_rotation_property(&intel_plane->base, 2176 drm_plane_create_rotation_property(&plane->base,
1896 DRM_MODE_ROTATE_0, 2177 DRM_MODE_ROTATE_0,
1897 supported_rotations); 2178 supported_rotations);
1898 2179
1899 drm_plane_create_color_properties(&intel_plane->base, 2180 drm_plane_create_color_properties(&plane->base,
1900 BIT(DRM_COLOR_YCBCR_BT601) | 2181 BIT(DRM_COLOR_YCBCR_BT601) |
1901 BIT(DRM_COLOR_YCBCR_BT709), 2182 BIT(DRM_COLOR_YCBCR_BT709),
1902 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) | 2183 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
@@ -1904,13 +2185,12 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
1904 DRM_COLOR_YCBCR_BT709, 2185 DRM_COLOR_YCBCR_BT709,
1905 DRM_COLOR_YCBCR_LIMITED_RANGE); 2186 DRM_COLOR_YCBCR_LIMITED_RANGE);
1906 2187
1907 drm_plane_helper_add(&intel_plane->base, &intel_plane_helper_funcs); 2188 drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs);
1908 2189
1909 return intel_plane; 2190 return plane;
1910 2191
1911fail: 2192fail:
1912 kfree(state); 2193 intel_plane_free(plane);
1913 kfree(intel_plane);
1914 2194
1915 return ERR_PTR(ret); 2195 return ERR_PTR(ret);
1916} 2196}
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index b5b04cb892e9..860f306a23ba 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -885,6 +885,7 @@ intel_tv_compute_config(struct intel_encoder *encoder,
885 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) 885 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
886 return false; 886 return false;
887 887
888 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
888 adjusted_mode->crtc_clock = tv_mode->clock; 889 adjusted_mode->crtc_clock = tv_mode->clock;
889 DRM_DEBUG_KMS("forcing bpc to 8 for TV\n"); 890 DRM_DEBUG_KMS("forcing bpc to 8 for TV\n");
890 pipe_config->pipe_bpp = 8*3; 891 pipe_config->pipe_bpp = 8*3;
@@ -1377,17 +1378,10 @@ intel_tv_get_modes(struct drm_connector *connector)
1377 return count; 1378 return count;
1378} 1379}
1379 1380
1380static void
1381intel_tv_destroy(struct drm_connector *connector)
1382{
1383 drm_connector_cleanup(connector);
1384 kfree(connector);
1385}
1386
1387static const struct drm_connector_funcs intel_tv_connector_funcs = { 1381static const struct drm_connector_funcs intel_tv_connector_funcs = {
1388 .late_register = intel_connector_register, 1382 .late_register = intel_connector_register,
1389 .early_unregister = intel_connector_unregister, 1383 .early_unregister = intel_connector_unregister,
1390 .destroy = intel_tv_destroy, 1384 .destroy = intel_connector_destroy,
1391 .fill_modes = drm_helper_probe_single_connector_modes, 1385 .fill_modes = drm_helper_probe_single_connector_modes,
1392 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 1386 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
1393 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, 1387 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
diff --git a/drivers/gpu/drm/i915/intel_uc.c b/drivers/gpu/drm/i915/intel_uc.c
index b1b3e81b6e24..b34c318b238d 100644
--- a/drivers/gpu/drm/i915/intel_uc.c
+++ b/drivers/gpu/drm/i915/intel_uc.c
@@ -376,7 +376,7 @@ int intel_uc_init_hw(struct drm_i915_private *i915)
376 376
377 intel_guc_init_params(guc); 377 intel_guc_init_params(guc);
378 ret = intel_guc_fw_upload(guc); 378 ret = intel_guc_fw_upload(guc);
379 if (ret == 0 || ret != -EAGAIN) 379 if (ret == 0 || ret != -ETIMEDOUT)
380 break; 380 break;
381 381
382 DRM_DEBUG_DRIVER("GuC fw load failed: %d; will reset and " 382 DRM_DEBUG_DRIVER("GuC fw load failed: %d; will reset and "
diff --git a/drivers/gpu/drm/i915/intel_uc_fw.h b/drivers/gpu/drm/i915/intel_uc_fw.h
index 87910aa83267..0e3bd580e267 100644
--- a/drivers/gpu/drm/i915/intel_uc_fw.h
+++ b/drivers/gpu/drm/i915/intel_uc_fw.h
@@ -115,9 +115,14 @@ static inline bool intel_uc_fw_is_selected(struct intel_uc_fw *uc_fw)
115 return uc_fw->path != NULL; 115 return uc_fw->path != NULL;
116} 116}
117 117
118static inline bool intel_uc_fw_is_loaded(struct intel_uc_fw *uc_fw)
119{
120 return uc_fw->load_status == INTEL_UC_FIRMWARE_SUCCESS;
121}
122
118static inline void intel_uc_fw_sanitize(struct intel_uc_fw *uc_fw) 123static inline void intel_uc_fw_sanitize(struct intel_uc_fw *uc_fw)
119{ 124{
120 if (uc_fw->load_status == INTEL_UC_FIRMWARE_SUCCESS) 125 if (intel_uc_fw_is_loaded(uc_fw))
121 uc_fw->load_status = INTEL_UC_FIRMWARE_PENDING; 126 uc_fw->load_status = INTEL_UC_FIRMWARE_PENDING;
122} 127}
123 128
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 3ad302c66254..9289515108c3 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -1437,7 +1437,7 @@ static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv)
1437 FORCEWAKE_MEDIA_VEBOX_GEN11(i), 1437 FORCEWAKE_MEDIA_VEBOX_GEN11(i),
1438 FORCEWAKE_ACK_MEDIA_VEBOX_GEN11(i)); 1438 FORCEWAKE_ACK_MEDIA_VEBOX_GEN11(i));
1439 } 1439 }
1440 } else if (IS_GEN9(dev_priv) || IS_GEN10(dev_priv)) { 1440 } else if (IS_GEN10(dev_priv) || IS_GEN9(dev_priv)) {
1441 dev_priv->uncore.funcs.force_wake_get = 1441 dev_priv->uncore.funcs.force_wake_get =
1442 fw_domains_get_with_fallback; 1442 fw_domains_get_with_fallback;
1443 dev_priv->uncore.funcs.force_wake_put = fw_domains_put; 1443 dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
diff --git a/drivers/gpu/drm/i915/intel_vbt_defs.h b/drivers/gpu/drm/i915/intel_vbt_defs.h
index bba98cf83cbd..bf3662ad5fed 100644
--- a/drivers/gpu/drm/i915/intel_vbt_defs.h
+++ b/drivers/gpu/drm/i915/intel_vbt_defs.h
@@ -326,6 +326,13 @@ enum vbt_gmbus_ddi {
326 ICL_DDC_BUS_PORT_4, 326 ICL_DDC_BUS_PORT_4,
327}; 327};
328 328
329#define DP_AUX_A 0x40
330#define DP_AUX_B 0x10
331#define DP_AUX_C 0x20
332#define DP_AUX_D 0x30
333#define DP_AUX_E 0x50
334#define DP_AUX_F 0x60
335
329#define VBT_DP_MAX_LINK_RATE_HBR3 0 336#define VBT_DP_MAX_LINK_RATE_HBR3 0
330#define VBT_DP_MAX_LINK_RATE_HBR2 1 337#define VBT_DP_MAX_LINK_RATE_HBR2 1
331#define VBT_DP_MAX_LINK_RATE_HBR 2 338#define VBT_DP_MAX_LINK_RATE_HBR 2
diff --git a/drivers/gpu/drm/i915/intel_workarounds.c b/drivers/gpu/drm/i915/intel_workarounds.c
index 4bcdeaf8d98f..ca1f78a42b17 100644
--- a/drivers/gpu/drm/i915/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/intel_workarounds.c
@@ -823,18 +823,21 @@ static void icl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
823 _MASKED_BIT_ENABLE(_3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE)); 823 _MASKED_BIT_ENABLE(_3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE));
824 824
825 /* WaInPlaceDecompressionHang:icl */ 825 /* WaInPlaceDecompressionHang:icl */
826 I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA, I915_READ(GEN9_GAMT_ECO_REG_RW_IA) | 826 I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
827 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); 827 I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
828 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
828 829
829 /* WaPipelineFlushCoherentLines:icl */ 830 /* WaPipelineFlushCoherentLines:icl */
830 I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) | 831 I915_WRITE(GEN8_L3SQCREG4,
831 GEN8_LQSC_FLUSH_COHERENT_LINES); 832 I915_READ(GEN8_L3SQCREG4) |
833 GEN8_LQSC_FLUSH_COHERENT_LINES);
832 834
833 /* Wa_1405543622:icl 835 /* Wa_1405543622:icl
834 * Formerly known as WaGAPZPriorityScheme 836 * Formerly known as WaGAPZPriorityScheme
835 */ 837 */
836 I915_WRITE(GEN8_GARBCNTL, I915_READ(GEN8_GARBCNTL) | 838 I915_WRITE(GEN8_GARBCNTL,
837 GEN11_ARBITRATION_PRIO_ORDER_MASK); 839 I915_READ(GEN8_GARBCNTL) |
840 GEN11_ARBITRATION_PRIO_ORDER_MASK);
838 841
839 /* Wa_1604223664:icl 842 /* Wa_1604223664:icl
840 * Formerly known as WaL3BankAddressHashing 843 * Formerly known as WaL3BankAddressHashing
@@ -854,21 +857,24 @@ static void icl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
854 /* Wa_1405733216:icl 857 /* Wa_1405733216:icl
855 * Formerly known as WaDisableCleanEvicts 858 * Formerly known as WaDisableCleanEvicts
856 */ 859 */
857 I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) | 860 I915_WRITE(GEN8_L3SQCREG4,
858 GEN11_LQSC_CLEAN_EVICT_DISABLE); 861 I915_READ(GEN8_L3SQCREG4) |
862 GEN11_LQSC_CLEAN_EVICT_DISABLE);
859 863
860 /* Wa_1405766107:icl 864 /* Wa_1405766107:icl
861 * Formerly known as WaCL2SFHalfMaxAlloc 865 * Formerly known as WaCL2SFHalfMaxAlloc
862 */ 866 */
863 I915_WRITE(GEN11_LSN_UNSLCVC, I915_READ(GEN11_LSN_UNSLCVC) | 867 I915_WRITE(GEN11_LSN_UNSLCVC,
864 GEN11_LSN_UNSLCVC_GAFS_HALF_SF_MAXALLOC | 868 I915_READ(GEN11_LSN_UNSLCVC) |
865 GEN11_LSN_UNSLCVC_GAFS_HALF_CL2_MAXALLOC); 869 GEN11_LSN_UNSLCVC_GAFS_HALF_SF_MAXALLOC |
870 GEN11_LSN_UNSLCVC_GAFS_HALF_CL2_MAXALLOC);
866 871
867 /* Wa_220166154:icl 872 /* Wa_220166154:icl
868 * Formerly known as WaDisCtxReload 873 * Formerly known as WaDisCtxReload
869 */ 874 */
870 I915_WRITE(GAMW_ECO_DEV_RW_IA_REG, I915_READ(GAMW_ECO_DEV_RW_IA_REG) | 875 I915_WRITE(GEN8_GAMW_ECO_DEV_RW_IA,
871 GAMW_ECO_DEV_CTX_RELOAD_DISABLE); 876 I915_READ(GEN8_GAMW_ECO_DEV_RW_IA) |
877 GAMW_ECO_DEV_CTX_RELOAD_DISABLE);
872 878
873 /* Wa_1405779004:icl (pre-prod) */ 879 /* Wa_1405779004:icl (pre-prod) */
874 if (IS_ICL_REVID(dev_priv, ICL_REVID_A0, ICL_REVID_A0)) 880 if (IS_ICL_REVID(dev_priv, ICL_REVID_A0, ICL_REVID_A0))
@@ -905,6 +911,13 @@ static void icl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
905 I915_WRITE(GAMT_CHKN_BIT_REG, 911 I915_WRITE(GAMT_CHKN_BIT_REG,
906 I915_READ(GAMT_CHKN_BIT_REG) | 912 I915_READ(GAMT_CHKN_BIT_REG) |
907 GAMT_CHKN_DISABLE_L3_COH_PIPE); 913 GAMT_CHKN_DISABLE_L3_COH_PIPE);
914
915 /* Wa_1406609255:icl (pre-prod) */
916 if (IS_ICL_REVID(dev_priv, ICL_REVID_A0, ICL_REVID_B0))
917 I915_WRITE(GEN7_SARCHKMD,
918 I915_READ(GEN7_SARCHKMD) |
919 GEN7_DISABLE_DEMAND_PREFETCH |
920 GEN7_DISABLE_SAMPLER_PREFETCH);
908} 921}
909 922
910void intel_gt_workarounds_apply(struct drm_i915_private *dev_priv) 923void intel_gt_workarounds_apply(struct drm_i915_private *dev_priv)
@@ -941,7 +954,7 @@ struct whitelist {
941 954
942static void whitelist_reg(struct whitelist *w, i915_reg_t reg) 955static void whitelist_reg(struct whitelist *w, i915_reg_t reg)
943{ 956{
944 if (GEM_WARN_ON(w->count >= RING_MAX_NONPRIV_SLOTS)) 957 if (GEM_DEBUG_WARN_ON(w->count >= RING_MAX_NONPRIV_SLOTS))
945 return; 958 return;
946 959
947 w->reg[w->count++] = reg; 960 w->reg[w->count++] = reg;
@@ -1009,6 +1022,11 @@ static void cnl_whitelist_build(struct whitelist *w)
1009 1022
1010static void icl_whitelist_build(struct whitelist *w) 1023static void icl_whitelist_build(struct whitelist *w)
1011{ 1024{
1025 /* WaAllowUMDToModifyHalfSliceChicken7:icl */
1026 whitelist_reg(w, GEN9_HALF_SLICE_CHICKEN7);
1027
1028 /* WaAllowUMDToModifySamplerMode:icl */
1029 whitelist_reg(w, GEN10_SAMPLER_MODE);
1012} 1030}
1013 1031
1014static struct whitelist *whitelist_build(struct intel_engine_cs *engine, 1032static struct whitelist *whitelist_build(struct intel_engine_cs *engine,
diff --git a/drivers/gpu/drm/i915/selftests/huge_pages.c b/drivers/gpu/drm/i915/selftests/huge_pages.c
index 5c22f2c8d4cf..26c065c8d2c0 100644
--- a/drivers/gpu/drm/i915/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/selftests/huge_pages.c
@@ -1135,7 +1135,8 @@ static int igt_write_huge(struct i915_gem_context *ctx,
1135 n = 0; 1135 n = 0;
1136 for_each_engine(engine, i915, id) { 1136 for_each_engine(engine, i915, id) {
1137 if (!intel_engine_can_store_dword(engine)) { 1137 if (!intel_engine_can_store_dword(engine)) {
1138 pr_info("store-dword-imm not supported on engine=%u\n", id); 1138 pr_info("store-dword-imm not supported on engine=%u\n",
1139 id);
1139 continue; 1140 continue;
1140 } 1141 }
1141 engines[n++] = engine; 1142 engines[n++] = engine;
@@ -1167,17 +1168,30 @@ static int igt_write_huge(struct i915_gem_context *ctx,
1167 engine = engines[order[i] % n]; 1168 engine = engines[order[i] % n];
1168 i = (i + 1) % (n * I915_NUM_ENGINES); 1169 i = (i + 1) % (n * I915_NUM_ENGINES);
1169 1170
1170 err = __igt_write_huge(ctx, engine, obj, size, offset_low, dword, num + 1); 1171 /*
1172 * In order to utilize 64K pages we need to both pad the vma
1173 * size and ensure the vma offset is at the start of the pt
1174 * boundary, however to improve coverage we opt for testing both
1175 * aligned and unaligned offsets.
1176 */
1177 if (obj->mm.page_sizes.sg & I915_GTT_PAGE_SIZE_64K)
1178 offset_low = round_down(offset_low,
1179 I915_GTT_PAGE_SIZE_2M);
1180
1181 err = __igt_write_huge(ctx, engine, obj, size, offset_low,
1182 dword, num + 1);
1171 if (err) 1183 if (err)
1172 break; 1184 break;
1173 1185
1174 err = __igt_write_huge(ctx, engine, obj, size, offset_high, dword, num + 1); 1186 err = __igt_write_huge(ctx, engine, obj, size, offset_high,
1187 dword, num + 1);
1175 if (err) 1188 if (err)
1176 break; 1189 break;
1177 1190
1178 if (igt_timeout(end_time, 1191 if (igt_timeout(end_time,
1179 "%s timed out on engine=%u, offset_low=%llx offset_high=%llx, max_page_size=%x\n", 1192 "%s timed out on engine=%u, offset_low=%llx offset_high=%llx, max_page_size=%x\n",
1180 __func__, engine->id, offset_low, offset_high, max_page_size)) 1193 __func__, engine->id, offset_low, offset_high,
1194 max_page_size))
1181 break; 1195 break;
1182 } 1196 }
1183 1197
@@ -1436,7 +1450,7 @@ static int igt_ppgtt_pin_update(void *arg)
1436 * huge-gtt-pages. 1450 * huge-gtt-pages.
1437 */ 1451 */
1438 1452
1439 if (!USES_FULL_48BIT_PPGTT(dev_priv)) { 1453 if (!HAS_FULL_48BIT_PPGTT(dev_priv)) {
1440 pr_info("48b PPGTT not supported, skipping\n"); 1454 pr_info("48b PPGTT not supported, skipping\n");
1441 return 0; 1455 return 0;
1442 } 1456 }
@@ -1687,10 +1701,9 @@ int i915_gem_huge_page_mock_selftests(void)
1687 SUBTEST(igt_mock_ppgtt_huge_fill), 1701 SUBTEST(igt_mock_ppgtt_huge_fill),
1688 SUBTEST(igt_mock_ppgtt_64K), 1702 SUBTEST(igt_mock_ppgtt_64K),
1689 }; 1703 };
1690 int saved_ppgtt = i915_modparams.enable_ppgtt;
1691 struct drm_i915_private *dev_priv; 1704 struct drm_i915_private *dev_priv;
1692 struct pci_dev *pdev;
1693 struct i915_hw_ppgtt *ppgtt; 1705 struct i915_hw_ppgtt *ppgtt;
1706 struct pci_dev *pdev;
1694 int err; 1707 int err;
1695 1708
1696 dev_priv = mock_gem_device(); 1709 dev_priv = mock_gem_device();
@@ -1698,7 +1711,7 @@ int i915_gem_huge_page_mock_selftests(void)
1698 return -ENOMEM; 1711 return -ENOMEM;
1699 1712
1700 /* Pretend to be a device which supports the 48b PPGTT */ 1713 /* Pretend to be a device which supports the 48b PPGTT */
1701 i915_modparams.enable_ppgtt = 3; 1714 mkwrite_device_info(dev_priv)->ppgtt = INTEL_PPGTT_FULL_4LVL;
1702 1715
1703 pdev = dev_priv->drm.pdev; 1716 pdev = dev_priv->drm.pdev;
1704 dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(39)); 1717 dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(39));
@@ -1731,9 +1744,6 @@ out_close:
1731 1744
1732out_unlock: 1745out_unlock:
1733 mutex_unlock(&dev_priv->drm.struct_mutex); 1746 mutex_unlock(&dev_priv->drm.struct_mutex);
1734
1735 i915_modparams.enable_ppgtt = saved_ppgtt;
1736
1737 drm_dev_put(&dev_priv->drm); 1747 drm_dev_put(&dev_priv->drm);
1738 1748
1739 return err; 1749 return err;
@@ -1753,7 +1763,7 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *dev_priv)
1753 struct i915_gem_context *ctx; 1763 struct i915_gem_context *ctx;
1754 int err; 1764 int err;
1755 1765
1756 if (!USES_PPGTT(dev_priv)) { 1766 if (!HAS_PPGTT(dev_priv)) {
1757 pr_info("PPGTT not supported, skipping live-selftests\n"); 1767 pr_info("PPGTT not supported, skipping live-selftests\n");
1758 return 0; 1768 return 0;
1759 } 1769 }
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
index 76df25aa90c9..7d82043aff10 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
@@ -39,7 +39,8 @@ struct live_test {
39 const char *func; 39 const char *func;
40 const char *name; 40 const char *name;
41 41
42 unsigned int reset_count; 42 unsigned int reset_global;
43 unsigned int reset_engine[I915_NUM_ENGINES];
43}; 44};
44 45
45static int begin_live_test(struct live_test *t, 46static int begin_live_test(struct live_test *t,
@@ -47,6 +48,8 @@ static int begin_live_test(struct live_test *t,
47 const char *func, 48 const char *func,
48 const char *name) 49 const char *name)
49{ 50{
51 struct intel_engine_cs *engine;
52 enum intel_engine_id id;
50 int err; 53 int err;
51 54
52 t->i915 = i915; 55 t->i915 = i915;
@@ -63,7 +66,11 @@ static int begin_live_test(struct live_test *t,
63 } 66 }
64 67
65 i915->gpu_error.missed_irq_rings = 0; 68 i915->gpu_error.missed_irq_rings = 0;
66 t->reset_count = i915_reset_count(&i915->gpu_error); 69 t->reset_global = i915_reset_count(&i915->gpu_error);
70
71 for_each_engine(engine, i915, id)
72 t->reset_engine[id] =
73 i915_reset_engine_count(&i915->gpu_error, engine);
67 74
68 return 0; 75 return 0;
69} 76}
@@ -71,14 +78,28 @@ static int begin_live_test(struct live_test *t,
71static int end_live_test(struct live_test *t) 78static int end_live_test(struct live_test *t)
72{ 79{
73 struct drm_i915_private *i915 = t->i915; 80 struct drm_i915_private *i915 = t->i915;
81 struct intel_engine_cs *engine;
82 enum intel_engine_id id;
74 83
75 if (igt_flush_test(i915, I915_WAIT_LOCKED)) 84 if (igt_flush_test(i915, I915_WAIT_LOCKED))
76 return -EIO; 85 return -EIO;
77 86
78 if (t->reset_count != i915_reset_count(&i915->gpu_error)) { 87 if (t->reset_global != i915_reset_count(&i915->gpu_error)) {
79 pr_err("%s(%s): GPU was reset %d times!\n", 88 pr_err("%s(%s): GPU was reset %d times!\n",
80 t->func, t->name, 89 t->func, t->name,
81 i915_reset_count(&i915->gpu_error) - t->reset_count); 90 i915_reset_count(&i915->gpu_error) - t->reset_global);
91 return -EIO;
92 }
93
94 for_each_engine(engine, i915, id) {
95 if (t->reset_engine[id] ==
96 i915_reset_engine_count(&i915->gpu_error, engine))
97 continue;
98
99 pr_err("%s(%s): engine '%s' was reset %d times!\n",
100 t->func, t->name, engine->name,
101 i915_reset_engine_count(&i915->gpu_error, engine) -
102 t->reset_engine[id]);
82 return -EIO; 103 return -EIO;
83 } 104 }
84 105
@@ -531,11 +552,11 @@ static int igt_ctx_exec(void *arg)
531{ 552{
532 struct drm_i915_private *i915 = arg; 553 struct drm_i915_private *i915 = arg;
533 struct drm_i915_gem_object *obj = NULL; 554 struct drm_i915_gem_object *obj = NULL;
555 unsigned long ncontexts, ndwords, dw;
534 struct drm_file *file; 556 struct drm_file *file;
535 IGT_TIMEOUT(end_time); 557 IGT_TIMEOUT(end_time);
536 LIST_HEAD(objects); 558 LIST_HEAD(objects);
537 unsigned long ncontexts, ndwords, dw; 559 struct live_test t;
538 bool first_shared_gtt = true;
539 int err = -ENODEV; 560 int err = -ENODEV;
540 561
541 /* 562 /*
@@ -553,6 +574,10 @@ static int igt_ctx_exec(void *arg)
553 574
554 mutex_lock(&i915->drm.struct_mutex); 575 mutex_lock(&i915->drm.struct_mutex);
555 576
577 err = begin_live_test(&t, i915, __func__, "");
578 if (err)
579 goto out_unlock;
580
556 ncontexts = 0; 581 ncontexts = 0;
557 ndwords = 0; 582 ndwords = 0;
558 dw = 0; 583 dw = 0;
@@ -561,12 +586,7 @@ static int igt_ctx_exec(void *arg)
561 struct i915_gem_context *ctx; 586 struct i915_gem_context *ctx;
562 unsigned int id; 587 unsigned int id;
563 588
564 if (first_shared_gtt) { 589 ctx = i915_gem_create_context(i915, file->driver_priv);
565 ctx = __create_hw_context(i915, file->driver_priv);
566 first_shared_gtt = false;
567 } else {
568 ctx = i915_gem_create_context(i915, file->driver_priv);
569 }
570 if (IS_ERR(ctx)) { 590 if (IS_ERR(ctx)) {
571 err = PTR_ERR(ctx); 591 err = PTR_ERR(ctx);
572 goto out_unlock; 592 goto out_unlock;
@@ -622,7 +642,7 @@ static int igt_ctx_exec(void *arg)
622 } 642 }
623 643
624out_unlock: 644out_unlock:
625 if (igt_flush_test(i915, I915_WAIT_LOCKED)) 645 if (end_live_test(&t))
626 err = -EIO; 646 err = -EIO;
627 mutex_unlock(&i915->drm.struct_mutex); 647 mutex_unlock(&i915->drm.struct_mutex);
628 648
@@ -634,13 +654,14 @@ static int igt_ctx_readonly(void *arg)
634{ 654{
635 struct drm_i915_private *i915 = arg; 655 struct drm_i915_private *i915 = arg;
636 struct drm_i915_gem_object *obj = NULL; 656 struct drm_i915_gem_object *obj = NULL;
657 struct i915_gem_context *ctx;
658 struct i915_hw_ppgtt *ppgtt;
659 unsigned long ndwords, dw;
637 struct drm_file *file; 660 struct drm_file *file;
638 I915_RND_STATE(prng); 661 I915_RND_STATE(prng);
639 IGT_TIMEOUT(end_time); 662 IGT_TIMEOUT(end_time);
640 LIST_HEAD(objects); 663 LIST_HEAD(objects);
641 struct i915_gem_context *ctx; 664 struct live_test t;
642 struct i915_hw_ppgtt *ppgtt;
643 unsigned long ndwords, dw;
644 int err = -ENODEV; 665 int err = -ENODEV;
645 666
646 /* 667 /*
@@ -655,6 +676,10 @@ static int igt_ctx_readonly(void *arg)
655 676
656 mutex_lock(&i915->drm.struct_mutex); 677 mutex_lock(&i915->drm.struct_mutex);
657 678
679 err = begin_live_test(&t, i915, __func__, "");
680 if (err)
681 goto out_unlock;
682
658 ctx = i915_gem_create_context(i915, file->driver_priv); 683 ctx = i915_gem_create_context(i915, file->driver_priv);
659 if (IS_ERR(ctx)) { 684 if (IS_ERR(ctx)) {
660 err = PTR_ERR(ctx); 685 err = PTR_ERR(ctx);
@@ -727,7 +752,324 @@ static int igt_ctx_readonly(void *arg)
727 } 752 }
728 753
729out_unlock: 754out_unlock:
730 if (igt_flush_test(i915, I915_WAIT_LOCKED)) 755 if (end_live_test(&t))
756 err = -EIO;
757 mutex_unlock(&i915->drm.struct_mutex);
758
759 mock_file_free(i915, file);
760 return err;
761}
762
763static int check_scratch(struct i915_gem_context *ctx, u64 offset)
764{
765 struct drm_mm_node *node =
766 __drm_mm_interval_first(&ctx->ppgtt->vm.mm,
767 offset, offset + sizeof(u32) - 1);
768 if (!node || node->start > offset)
769 return 0;
770
771 GEM_BUG_ON(offset >= node->start + node->size);
772
773 pr_err("Target offset 0x%08x_%08x overlaps with a node in the mm!\n",
774 upper_32_bits(offset), lower_32_bits(offset));
775 return -EINVAL;
776}
777
778static int write_to_scratch(struct i915_gem_context *ctx,
779 struct intel_engine_cs *engine,
780 u64 offset, u32 value)
781{
782 struct drm_i915_private *i915 = ctx->i915;
783 struct drm_i915_gem_object *obj;
784 struct i915_request *rq;
785 struct i915_vma *vma;
786 u32 *cmd;
787 int err;
788
789 GEM_BUG_ON(offset < I915_GTT_PAGE_SIZE);
790
791 obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
792 if (IS_ERR(obj))
793 return PTR_ERR(obj);
794
795 cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
796 if (IS_ERR(cmd)) {
797 err = PTR_ERR(cmd);
798 goto err;
799 }
800
801 *cmd++ = MI_STORE_DWORD_IMM_GEN4;
802 if (INTEL_GEN(i915) >= 8) {
803 *cmd++ = lower_32_bits(offset);
804 *cmd++ = upper_32_bits(offset);
805 } else {
806 *cmd++ = 0;
807 *cmd++ = offset;
808 }
809 *cmd++ = value;
810 *cmd = MI_BATCH_BUFFER_END;
811 i915_gem_object_unpin_map(obj);
812
813 err = i915_gem_object_set_to_gtt_domain(obj, false);
814 if (err)
815 goto err;
816
817 vma = i915_vma_instance(obj, &ctx->ppgtt->vm, NULL);
818 if (IS_ERR(vma)) {
819 err = PTR_ERR(vma);
820 goto err;
821 }
822
823 err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_OFFSET_FIXED);
824 if (err)
825 goto err;
826
827 err = check_scratch(ctx, offset);
828 if (err)
829 goto err_unpin;
830
831 rq = i915_request_alloc(engine, ctx);
832 if (IS_ERR(rq)) {
833 err = PTR_ERR(rq);
834 goto err_unpin;
835 }
836
837 err = engine->emit_bb_start(rq, vma->node.start, vma->node.size, 0);
838 if (err)
839 goto err_request;
840
841 err = i915_vma_move_to_active(vma, rq, 0);
842 if (err)
843 goto skip_request;
844
845 i915_gem_object_set_active_reference(obj);
846 i915_vma_unpin(vma);
847 i915_vma_close(vma);
848
849 i915_request_add(rq);
850
851 return 0;
852
853skip_request:
854 i915_request_skip(rq, err);
855err_request:
856 i915_request_add(rq);
857err_unpin:
858 i915_vma_unpin(vma);
859err:
860 i915_gem_object_put(obj);
861 return err;
862}
863
864static int read_from_scratch(struct i915_gem_context *ctx,
865 struct intel_engine_cs *engine,
866 u64 offset, u32 *value)
867{
868 struct drm_i915_private *i915 = ctx->i915;
869 struct drm_i915_gem_object *obj;
870 const u32 RCS_GPR0 = 0x2600; /* not all engines have their own GPR! */
871 const u32 result = 0x100;
872 struct i915_request *rq;
873 struct i915_vma *vma;
874 u32 *cmd;
875 int err;
876
877 GEM_BUG_ON(offset < I915_GTT_PAGE_SIZE);
878
879 obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
880 if (IS_ERR(obj))
881 return PTR_ERR(obj);
882
883 cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
884 if (IS_ERR(cmd)) {
885 err = PTR_ERR(cmd);
886 goto err;
887 }
888
889 memset(cmd, POISON_INUSE, PAGE_SIZE);
890 if (INTEL_GEN(i915) >= 8) {
891 *cmd++ = MI_LOAD_REGISTER_MEM_GEN8;
892 *cmd++ = RCS_GPR0;
893 *cmd++ = lower_32_bits(offset);
894 *cmd++ = upper_32_bits(offset);
895 *cmd++ = MI_STORE_REGISTER_MEM_GEN8;
896 *cmd++ = RCS_GPR0;
897 *cmd++ = result;
898 *cmd++ = 0;
899 } else {
900 *cmd++ = MI_LOAD_REGISTER_MEM;
901 *cmd++ = RCS_GPR0;
902 *cmd++ = offset;
903 *cmd++ = MI_STORE_REGISTER_MEM;
904 *cmd++ = RCS_GPR0;
905 *cmd++ = result;
906 }
907 *cmd = MI_BATCH_BUFFER_END;
908 i915_gem_object_unpin_map(obj);
909
910 err = i915_gem_object_set_to_gtt_domain(obj, false);
911 if (err)
912 goto err;
913
914 vma = i915_vma_instance(obj, &ctx->ppgtt->vm, NULL);
915 if (IS_ERR(vma)) {
916 err = PTR_ERR(vma);
917 goto err;
918 }
919
920 err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_OFFSET_FIXED);
921 if (err)
922 goto err;
923
924 err = check_scratch(ctx, offset);
925 if (err)
926 goto err_unpin;
927
928 rq = i915_request_alloc(engine, ctx);
929 if (IS_ERR(rq)) {
930 err = PTR_ERR(rq);
931 goto err_unpin;
932 }
933
934 err = engine->emit_bb_start(rq, vma->node.start, vma->node.size, 0);
935 if (err)
936 goto err_request;
937
938 err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
939 if (err)
940 goto skip_request;
941
942 i915_vma_unpin(vma);
943 i915_vma_close(vma);
944
945 i915_request_add(rq);
946
947 err = i915_gem_object_set_to_cpu_domain(obj, false);
948 if (err)
949 goto err;
950
951 cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
952 if (IS_ERR(cmd)) {
953 err = PTR_ERR(cmd);
954 goto err;
955 }
956
957 *value = cmd[result / sizeof(*cmd)];
958 i915_gem_object_unpin_map(obj);
959 i915_gem_object_put(obj);
960
961 return 0;
962
963skip_request:
964 i915_request_skip(rq, err);
965err_request:
966 i915_request_add(rq);
967err_unpin:
968 i915_vma_unpin(vma);
969err:
970 i915_gem_object_put(obj);
971 return err;
972}
973
974static int igt_vm_isolation(void *arg)
975{
976 struct drm_i915_private *i915 = arg;
977 struct i915_gem_context *ctx_a, *ctx_b;
978 struct intel_engine_cs *engine;
979 struct drm_file *file;
980 I915_RND_STATE(prng);
981 unsigned long count;
982 struct live_test t;
983 unsigned int id;
984 u64 vm_total;
985 int err;
986
987 if (INTEL_GEN(i915) < 7)
988 return 0;
989
990 /*
991 * The simple goal here is that a write into one context is not
992 * observed in a second (separate page tables and scratch).
993 */
994
995 file = mock_file(i915);
996 if (IS_ERR(file))
997 return PTR_ERR(file);
998
999 mutex_lock(&i915->drm.struct_mutex);
1000
1001 err = begin_live_test(&t, i915, __func__, "");
1002 if (err)
1003 goto out_unlock;
1004
1005 ctx_a = i915_gem_create_context(i915, file->driver_priv);
1006 if (IS_ERR(ctx_a)) {
1007 err = PTR_ERR(ctx_a);
1008 goto out_unlock;
1009 }
1010
1011 ctx_b = i915_gem_create_context(i915, file->driver_priv);
1012 if (IS_ERR(ctx_b)) {
1013 err = PTR_ERR(ctx_b);
1014 goto out_unlock;
1015 }
1016
1017 /* We can only test vm isolation, if the vm are distinct */
1018 if (ctx_a->ppgtt == ctx_b->ppgtt)
1019 goto out_unlock;
1020
1021 vm_total = ctx_a->ppgtt->vm.total;
1022 GEM_BUG_ON(ctx_b->ppgtt->vm.total != vm_total);
1023 vm_total -= I915_GTT_PAGE_SIZE;
1024
1025 intel_runtime_pm_get(i915);
1026
1027 count = 0;
1028 for_each_engine(engine, i915, id) {
1029 IGT_TIMEOUT(end_time);
1030 unsigned long this = 0;
1031
1032 if (!intel_engine_can_store_dword(engine))
1033 continue;
1034
1035 while (!__igt_timeout(end_time, NULL)) {
1036 u32 value = 0xc5c5c5c5;
1037 u64 offset;
1038
1039 div64_u64_rem(i915_prandom_u64_state(&prng),
1040 vm_total, &offset);
1041 offset &= ~sizeof(u32);
1042 offset += I915_GTT_PAGE_SIZE;
1043
1044 err = write_to_scratch(ctx_a, engine,
1045 offset, 0xdeadbeef);
1046 if (err == 0)
1047 err = read_from_scratch(ctx_b, engine,
1048 offset, &value);
1049 if (err)
1050 goto out_rpm;
1051
1052 if (value) {
1053 pr_err("%s: Read %08x from scratch (offset 0x%08x_%08x), after %lu reads!\n",
1054 engine->name, value,
1055 upper_32_bits(offset),
1056 lower_32_bits(offset),
1057 this);
1058 err = -EINVAL;
1059 goto out_rpm;
1060 }
1061
1062 this++;
1063 }
1064 count += this;
1065 }
1066 pr_info("Checked %lu scratch offsets across %d engines\n",
1067 count, INTEL_INFO(i915)->num_rings);
1068
1069out_rpm:
1070 intel_runtime_pm_put(i915);
1071out_unlock:
1072 if (end_live_test(&t))
731 err = -EIO; 1073 err = -EIO;
732 mutex_unlock(&i915->drm.struct_mutex); 1074 mutex_unlock(&i915->drm.struct_mutex);
733 1075
@@ -865,33 +1207,6 @@ out_unlock:
865 return err; 1207 return err;
866} 1208}
867 1209
868static int fake_aliasing_ppgtt_enable(struct drm_i915_private *i915)
869{
870 struct drm_i915_gem_object *obj;
871 int err;
872
873 err = i915_gem_init_aliasing_ppgtt(i915);
874 if (err)
875 return err;
876
877 list_for_each_entry(obj, &i915->mm.bound_list, mm.link) {
878 struct i915_vma *vma;
879
880 vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
881 if (IS_ERR(vma))
882 continue;
883
884 vma->flags &= ~I915_VMA_LOCAL_BIND;
885 }
886
887 return 0;
888}
889
890static void fake_aliasing_ppgtt_disable(struct drm_i915_private *i915)
891{
892 i915_gem_fini_aliasing_ppgtt(i915);
893}
894
895int i915_gem_context_mock_selftests(void) 1210int i915_gem_context_mock_selftests(void)
896{ 1211{
897 static const struct i915_subtest tests[] = { 1212 static const struct i915_subtest tests[] = {
@@ -917,32 +1232,11 @@ int i915_gem_context_live_selftests(struct drm_i915_private *dev_priv)
917 SUBTEST(live_nop_switch), 1232 SUBTEST(live_nop_switch),
918 SUBTEST(igt_ctx_exec), 1233 SUBTEST(igt_ctx_exec),
919 SUBTEST(igt_ctx_readonly), 1234 SUBTEST(igt_ctx_readonly),
1235 SUBTEST(igt_vm_isolation),
920 }; 1236 };
921 bool fake_alias = false;
922 int err;
923 1237
924 if (i915_terminally_wedged(&dev_priv->gpu_error)) 1238 if (i915_terminally_wedged(&dev_priv->gpu_error))
925 return 0; 1239 return 0;
926 1240
927 /* Install a fake aliasing gtt for exercise */ 1241 return i915_subtests(tests, dev_priv);
928 if (USES_PPGTT(dev_priv) && !dev_priv->mm.aliasing_ppgtt) {
929 mutex_lock(&dev_priv->drm.struct_mutex);
930 err = fake_aliasing_ppgtt_enable(dev_priv);
931 mutex_unlock(&dev_priv->drm.struct_mutex);
932 if (err)
933 return err;
934
935 GEM_BUG_ON(!dev_priv->mm.aliasing_ppgtt);
936 fake_alias = true;
937 }
938
939 err = i915_subtests(tests, dev_priv);
940
941 if (fake_alias) {
942 mutex_lock(&dev_priv->drm.struct_mutex);
943 fake_aliasing_ppgtt_disable(dev_priv);
944 mutex_unlock(&dev_priv->drm.struct_mutex);
945 }
946
947 return err;
948} 1242}
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
index 128ad1cf0647..4365979d8222 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
@@ -351,7 +351,7 @@ static int igt_evict_contexts(void *arg)
351 * where the GTT space of the request is separate from the GGTT 351 * where the GTT space of the request is separate from the GGTT
352 * allocation required to build the request. 352 * allocation required to build the request.
353 */ 353 */
354 if (!USES_FULL_PPGTT(i915)) 354 if (!HAS_FULL_PPGTT(i915))
355 return 0; 355 return 0;
356 356
357 mutex_lock(&i915->drm.struct_mutex); 357 mutex_lock(&i915->drm.struct_mutex);
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
index 127d81513671..69fe86b30fbb 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
@@ -153,7 +153,7 @@ static int igt_ppgtt_alloc(void *arg)
153 153
154 /* Allocate a ppggt and try to fill the entire range */ 154 /* Allocate a ppggt and try to fill the entire range */
155 155
156 if (!USES_PPGTT(dev_priv)) 156 if (!HAS_PPGTT(dev_priv))
157 return 0; 157 return 0;
158 158
159 ppgtt = __hw_ppgtt_create(dev_priv); 159 ppgtt = __hw_ppgtt_create(dev_priv);
@@ -1001,7 +1001,7 @@ static int exercise_ppgtt(struct drm_i915_private *dev_priv,
1001 IGT_TIMEOUT(end_time); 1001 IGT_TIMEOUT(end_time);
1002 int err; 1002 int err;
1003 1003
1004 if (!USES_FULL_PPGTT(dev_priv)) 1004 if (!HAS_FULL_PPGTT(dev_priv))
1005 return 0; 1005 return 0;
1006 1006
1007 file = mock_file(dev_priv); 1007 file = mock_file(dev_priv);
diff --git a/drivers/gpu/drm/i915/selftests/intel_guc.c b/drivers/gpu/drm/i915/selftests/intel_guc.c
index 0c0ab82b6228..32cba4cae31a 100644
--- a/drivers/gpu/drm/i915/selftests/intel_guc.c
+++ b/drivers/gpu/drm/i915/selftests/intel_guc.c
@@ -159,6 +159,7 @@ static int igt_guc_clients(void *args)
159 * Get rid of clients created during driver load because the test will 159 * Get rid of clients created during driver load because the test will
160 * recreate them. 160 * recreate them.
161 */ 161 */
162 guc_clients_disable(guc);
162 guc_clients_destroy(guc); 163 guc_clients_destroy(guc);
163 if (guc->execbuf_client || guc->preempt_client) { 164 if (guc->execbuf_client || guc->preempt_client) {
164 pr_err("guc_clients_destroy lied!\n"); 165 pr_err("guc_clients_destroy lied!\n");
@@ -197,8 +198,8 @@ static int igt_guc_clients(void *args)
197 goto out; 198 goto out;
198 } 199 }
199 200
200 /* Now create the doorbells */ 201 /* Now enable the clients */
201 guc_clients_doorbell_init(guc); 202 guc_clients_enable(guc);
202 203
203 /* each client should now have received a doorbell */ 204 /* each client should now have received a doorbell */
204 if (!client_doorbell_in_sync(guc->execbuf_client) || 205 if (!client_doorbell_in_sync(guc->execbuf_client) ||
@@ -212,63 +213,17 @@ static int igt_guc_clients(void *args)
212 * Basic test - an attempt to reallocate a valid doorbell to the 213 * Basic test - an attempt to reallocate a valid doorbell to the
213 * client it is currently assigned should not cause a failure. 214 * client it is currently assigned should not cause a failure.
214 */ 215 */
215 err = guc_clients_doorbell_init(guc);
216 if (err)
217 goto out;
218
219 /*
220 * Negative test - a client with no doorbell (invalid db id).
221 * After destroying the doorbell, the db id is changed to
222 * GUC_DOORBELL_INVALID and the firmware will reject any attempt to
223 * allocate a doorbell with an invalid id (db has to be reserved before
224 * allocation).
225 */
226 destroy_doorbell(guc->execbuf_client);
227 if (client_doorbell_in_sync(guc->execbuf_client)) {
228 pr_err("destroy db did not work\n");
229 err = -EINVAL;
230 goto out;
231 }
232
233 unreserve_doorbell(guc->execbuf_client);
234
235 __create_doorbell(guc->execbuf_client);
236 err = __guc_allocate_doorbell(guc, guc->execbuf_client->stage_id);
237 if (err != -EIO) {
238 pr_err("unexpected (err = %d)", err);
239 goto out_db;
240 }
241
242 if (!available_dbs(guc, guc->execbuf_client->priority)) {
243 pr_err("doorbell not available when it should\n");
244 err = -EIO;
245 goto out_db;
246 }
247
248out_db:
249 /* clean after test */
250 __destroy_doorbell(guc->execbuf_client);
251 err = reserve_doorbell(guc->execbuf_client);
252 if (err) {
253 pr_err("failed to reserve back the doorbell back\n");
254 }
255 err = create_doorbell(guc->execbuf_client); 216 err = create_doorbell(guc->execbuf_client);
256 if (err) {
257 pr_err("recreate doorbell failed\n");
258 goto out;
259 }
260 217
261out: 218out:
262 /* 219 /*
263 * Leave clean state for other test, plus the driver always destroy the 220 * Leave clean state for other test, plus the driver always destroy the
264 * clients during unload. 221 * clients during unload.
265 */ 222 */
266 destroy_doorbell(guc->execbuf_client); 223 guc_clients_disable(guc);
267 if (guc->preempt_client)
268 destroy_doorbell(guc->preempt_client);
269 guc_clients_destroy(guc); 224 guc_clients_destroy(guc);
270 guc_clients_create(guc); 225 guc_clients_create(guc);
271 guc_clients_doorbell_init(guc); 226 guc_clients_enable(guc);
272unlock: 227unlock:
273 intel_runtime_pm_put(dev_priv); 228 intel_runtime_pm_put(dev_priv);
274 mutex_unlock(&dev_priv->drm.struct_mutex); 229 mutex_unlock(&dev_priv->drm.struct_mutex);
@@ -352,7 +307,7 @@ static int igt_guc_doorbells(void *arg)
352 307
353 db_id = clients[i]->doorbell_id; 308 db_id = clients[i]->doorbell_id;
354 309
355 err = create_doorbell(clients[i]); 310 err = __guc_client_enable(clients[i]);
356 if (err) { 311 if (err) {
357 pr_err("[%d] Failed to create a doorbell\n", i); 312 pr_err("[%d] Failed to create a doorbell\n", i);
358 goto out; 313 goto out;
@@ -378,7 +333,7 @@ static int igt_guc_doorbells(void *arg)
378out: 333out:
379 for (i = 0; i < ATTEMPTS; i++) 334 for (i = 0; i < ATTEMPTS; i++)
380 if (!IS_ERR_OR_NULL(clients[i])) { 335 if (!IS_ERR_OR_NULL(clients[i])) {
381 destroy_doorbell(clients[i]); 336 __guc_client_disable(clients[i]);
382 guc_client_free(clients[i]); 337 guc_client_free(clients[i]);
383 } 338 }
384unlock: 339unlock:
diff --git a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
index db378226ac10..51d0e2bed9e1 100644
--- a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
+++ b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
@@ -76,7 +76,7 @@ static int hang_init(struct hang *h, struct drm_i915_private *i915)
76 h->seqno = memset(vaddr, 0xff, PAGE_SIZE); 76 h->seqno = memset(vaddr, 0xff, PAGE_SIZE);
77 77
78 vaddr = i915_gem_object_pin_map(h->obj, 78 vaddr = i915_gem_object_pin_map(h->obj,
79 HAS_LLC(i915) ? I915_MAP_WB : I915_MAP_WC); 79 i915_coherent_map_type(i915));
80 if (IS_ERR(vaddr)) { 80 if (IS_ERR(vaddr)) {
81 err = PTR_ERR(vaddr); 81 err = PTR_ERR(vaddr);
82 goto err_unpin_hws; 82 goto err_unpin_hws;
@@ -234,7 +234,7 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine)
234 return ERR_CAST(obj); 234 return ERR_CAST(obj);
235 235
236 vaddr = i915_gem_object_pin_map(obj, 236 vaddr = i915_gem_object_pin_map(obj,
237 HAS_LLC(h->i915) ? I915_MAP_WB : I915_MAP_WC); 237 i915_coherent_map_type(h->i915));
238 if (IS_ERR(vaddr)) { 238 if (IS_ERR(vaddr)) {
239 i915_gem_object_put(obj); 239 i915_gem_object_put(obj);
240 return ERR_CAST(vaddr); 240 return ERR_CAST(vaddr);
diff --git a/drivers/gpu/drm/i915/selftests/intel_lrc.c b/drivers/gpu/drm/i915/selftests/intel_lrc.c
index 1aea7a8f2224..94fc0e5c8766 100644
--- a/drivers/gpu/drm/i915/selftests/intel_lrc.c
+++ b/drivers/gpu/drm/i915/selftests/intel_lrc.c
@@ -6,6 +6,7 @@
6 6
7#include "../i915_selftest.h" 7#include "../i915_selftest.h"
8#include "igt_flush_test.h" 8#include "igt_flush_test.h"
9#include "i915_random.h"
9 10
10#include "mock_context.h" 11#include "mock_context.h"
11 12
@@ -48,7 +49,7 @@ static int spinner_init(struct spinner *spin, struct drm_i915_private *i915)
48 } 49 }
49 spin->seqno = memset(vaddr, 0xff, PAGE_SIZE); 50 spin->seqno = memset(vaddr, 0xff, PAGE_SIZE);
50 51
51 mode = HAS_LLC(i915) ? I915_MAP_WB : I915_MAP_WC; 52 mode = i915_coherent_map_type(i915);
52 vaddr = i915_gem_object_pin_map(spin->obj, mode); 53 vaddr = i915_gem_object_pin_map(spin->obj, mode);
53 if (IS_ERR(vaddr)) { 54 if (IS_ERR(vaddr)) {
54 err = PTR_ERR(vaddr); 55 err = PTR_ERR(vaddr);
@@ -291,12 +292,14 @@ static int live_preempt(void *arg)
291 ctx_hi = kernel_context(i915); 292 ctx_hi = kernel_context(i915);
292 if (!ctx_hi) 293 if (!ctx_hi)
293 goto err_spin_lo; 294 goto err_spin_lo;
294 ctx_hi->sched.priority = I915_CONTEXT_MAX_USER_PRIORITY; 295 ctx_hi->sched.priority =
296 I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY);
295 297
296 ctx_lo = kernel_context(i915); 298 ctx_lo = kernel_context(i915);
297 if (!ctx_lo) 299 if (!ctx_lo)
298 goto err_ctx_hi; 300 goto err_ctx_hi;
299 ctx_lo->sched.priority = I915_CONTEXT_MIN_USER_PRIORITY; 301 ctx_lo->sched.priority =
302 I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY);
300 303
301 for_each_engine(engine, i915, id) { 304 for_each_engine(engine, i915, id) {
302 struct i915_request *rq; 305 struct i915_request *rq;
@@ -417,7 +420,7 @@ static int live_late_preempt(void *arg)
417 goto err_wedged; 420 goto err_wedged;
418 } 421 }
419 422
420 attr.priority = I915_PRIORITY_MAX; 423 attr.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX);
421 engine->schedule(rq, &attr); 424 engine->schedule(rq, &attr);
422 425
423 if (!wait_for_spinner(&spin_hi, rq)) { 426 if (!wait_for_spinner(&spin_hi, rq)) {
@@ -573,6 +576,261 @@ err_unlock:
573 return err; 576 return err;
574} 577}
575 578
579static int random_range(struct rnd_state *rnd, int min, int max)
580{
581 return i915_prandom_u32_max_state(max - min, rnd) + min;
582}
583
584static int random_priority(struct rnd_state *rnd)
585{
586 return random_range(rnd, I915_PRIORITY_MIN, I915_PRIORITY_MAX);
587}
588
589struct preempt_smoke {
590 struct drm_i915_private *i915;
591 struct i915_gem_context **contexts;
592 struct intel_engine_cs *engine;
593 struct drm_i915_gem_object *batch;
594 unsigned int ncontext;
595 struct rnd_state prng;
596 unsigned long count;
597};
598
599static struct i915_gem_context *smoke_context(struct preempt_smoke *smoke)
600{
601 return smoke->contexts[i915_prandom_u32_max_state(smoke->ncontext,
602 &smoke->prng)];
603}
604
605static int smoke_submit(struct preempt_smoke *smoke,
606 struct i915_gem_context *ctx, int prio,
607 struct drm_i915_gem_object *batch)
608{
609 struct i915_request *rq;
610 struct i915_vma *vma = NULL;
611 int err = 0;
612
613 if (batch) {
614 vma = i915_vma_instance(batch, &ctx->ppgtt->vm, NULL);
615 if (IS_ERR(vma))
616 return PTR_ERR(vma);
617
618 err = i915_vma_pin(vma, 0, 0, PIN_USER);
619 if (err)
620 return err;
621 }
622
623 ctx->sched.priority = prio;
624
625 rq = i915_request_alloc(smoke->engine, ctx);
626 if (IS_ERR(rq)) {
627 err = PTR_ERR(rq);
628 goto unpin;
629 }
630
631 if (vma) {
632 err = rq->engine->emit_bb_start(rq,
633 vma->node.start,
634 PAGE_SIZE, 0);
635 if (!err)
636 err = i915_vma_move_to_active(vma, rq, 0);
637 }
638
639 i915_request_add(rq);
640
641unpin:
642 if (vma)
643 i915_vma_unpin(vma);
644
645 return err;
646}
647
648static int smoke_crescendo_thread(void *arg)
649{
650 struct preempt_smoke *smoke = arg;
651 IGT_TIMEOUT(end_time);
652 unsigned long count;
653
654 count = 0;
655 do {
656 struct i915_gem_context *ctx = smoke_context(smoke);
657 int err;
658
659 mutex_lock(&smoke->i915->drm.struct_mutex);
660 err = smoke_submit(smoke,
661 ctx, count % I915_PRIORITY_MAX,
662 smoke->batch);
663 mutex_unlock(&smoke->i915->drm.struct_mutex);
664 if (err)
665 return err;
666
667 count++;
668 } while (!__igt_timeout(end_time, NULL));
669
670 smoke->count = count;
671 return 0;
672}
673
674static int smoke_crescendo(struct preempt_smoke *smoke, unsigned int flags)
675#define BATCH BIT(0)
676{
677 struct task_struct *tsk[I915_NUM_ENGINES] = {};
678 struct preempt_smoke arg[I915_NUM_ENGINES];
679 struct intel_engine_cs *engine;
680 enum intel_engine_id id;
681 unsigned long count;
682 int err = 0;
683
684 mutex_unlock(&smoke->i915->drm.struct_mutex);
685
686 for_each_engine(engine, smoke->i915, id) {
687 arg[id] = *smoke;
688 arg[id].engine = engine;
689 if (!(flags & BATCH))
690 arg[id].batch = NULL;
691 arg[id].count = 0;
692
693 tsk[id] = kthread_run(smoke_crescendo_thread, &arg,
694 "igt/smoke:%d", id);
695 if (IS_ERR(tsk[id])) {
696 err = PTR_ERR(tsk[id]);
697 break;
698 }
699 get_task_struct(tsk[id]);
700 }
701
702 count = 0;
703 for_each_engine(engine, smoke->i915, id) {
704 int status;
705
706 if (IS_ERR_OR_NULL(tsk[id]))
707 continue;
708
709 status = kthread_stop(tsk[id]);
710 if (status && !err)
711 err = status;
712
713 count += arg[id].count;
714
715 put_task_struct(tsk[id]);
716 }
717
718 mutex_lock(&smoke->i915->drm.struct_mutex);
719
720 pr_info("Submitted %lu crescendo:%x requests across %d engines and %d contexts\n",
721 count, flags,
722 INTEL_INFO(smoke->i915)->num_rings, smoke->ncontext);
723 return 0;
724}
725
726static int smoke_random(struct preempt_smoke *smoke, unsigned int flags)
727{
728 enum intel_engine_id id;
729 IGT_TIMEOUT(end_time);
730 unsigned long count;
731
732 count = 0;
733 do {
734 for_each_engine(smoke->engine, smoke->i915, id) {
735 struct i915_gem_context *ctx = smoke_context(smoke);
736 int err;
737
738 err = smoke_submit(smoke,
739 ctx, random_priority(&smoke->prng),
740 flags & BATCH ? smoke->batch : NULL);
741 if (err)
742 return err;
743
744 count++;
745 }
746 } while (!__igt_timeout(end_time, NULL));
747
748 pr_info("Submitted %lu random:%x requests across %d engines and %d contexts\n",
749 count, flags,
750 INTEL_INFO(smoke->i915)->num_rings, smoke->ncontext);
751 return 0;
752}
753
754static int live_preempt_smoke(void *arg)
755{
756 struct preempt_smoke smoke = {
757 .i915 = arg,
758 .prng = I915_RND_STATE_INITIALIZER(i915_selftest.random_seed),
759 .ncontext = 1024,
760 };
761 const unsigned int phase[] = { 0, BATCH };
762 int err = -ENOMEM;
763 u32 *cs;
764 int n;
765
766 if (!HAS_LOGICAL_RING_PREEMPTION(smoke.i915))
767 return 0;
768
769 smoke.contexts = kmalloc_array(smoke.ncontext,
770 sizeof(*smoke.contexts),
771 GFP_KERNEL);
772 if (!smoke.contexts)
773 return -ENOMEM;
774
775 mutex_lock(&smoke.i915->drm.struct_mutex);
776 intel_runtime_pm_get(smoke.i915);
777
778 smoke.batch = i915_gem_object_create_internal(smoke.i915, PAGE_SIZE);
779 if (IS_ERR(smoke.batch)) {
780 err = PTR_ERR(smoke.batch);
781 goto err_unlock;
782 }
783
784 cs = i915_gem_object_pin_map(smoke.batch, I915_MAP_WB);
785 if (IS_ERR(cs)) {
786 err = PTR_ERR(cs);
787 goto err_batch;
788 }
789 for (n = 0; n < PAGE_SIZE / sizeof(*cs) - 1; n++)
790 cs[n] = MI_ARB_CHECK;
791 cs[n] = MI_BATCH_BUFFER_END;
792 i915_gem_object_unpin_map(smoke.batch);
793
794 err = i915_gem_object_set_to_gtt_domain(smoke.batch, false);
795 if (err)
796 goto err_batch;
797
798 for (n = 0; n < smoke.ncontext; n++) {
799 smoke.contexts[n] = kernel_context(smoke.i915);
800 if (!smoke.contexts[n])
801 goto err_ctx;
802 }
803
804 for (n = 0; n < ARRAY_SIZE(phase); n++) {
805 err = smoke_crescendo(&smoke, phase[n]);
806 if (err)
807 goto err_ctx;
808
809 err = smoke_random(&smoke, phase[n]);
810 if (err)
811 goto err_ctx;
812 }
813
814err_ctx:
815 if (igt_flush_test(smoke.i915, I915_WAIT_LOCKED))
816 err = -EIO;
817
818 for (n = 0; n < smoke.ncontext; n++) {
819 if (!smoke.contexts[n])
820 break;
821 kernel_context_close(smoke.contexts[n]);
822 }
823
824err_batch:
825 i915_gem_object_put(smoke.batch);
826err_unlock:
827 intel_runtime_pm_put(smoke.i915);
828 mutex_unlock(&smoke.i915->drm.struct_mutex);
829 kfree(smoke.contexts);
830
831 return err;
832}
833
576int intel_execlists_live_selftests(struct drm_i915_private *i915) 834int intel_execlists_live_selftests(struct drm_i915_private *i915)
577{ 835{
578 static const struct i915_subtest tests[] = { 836 static const struct i915_subtest tests[] = {
@@ -580,6 +838,7 @@ int intel_execlists_live_selftests(struct drm_i915_private *i915)
580 SUBTEST(live_preempt), 838 SUBTEST(live_preempt),
581 SUBTEST(live_late_preempt), 839 SUBTEST(live_late_preempt),
582 SUBTEST(live_preempt_hang), 840 SUBTEST(live_preempt_hang),
841 SUBTEST(live_preempt_smoke),
583 }; 842 };
584 843
585 if (!HAS_EXECLISTS(i915)) 844 if (!HAS_EXECLISTS(i915))
diff --git a/drivers/gpu/drm/i915/selftests/mock_engine.c b/drivers/gpu/drm/i915/selftests/mock_engine.c
index 22a73da45ad5..d0c44c18db42 100644
--- a/drivers/gpu/drm/i915/selftests/mock_engine.c
+++ b/drivers/gpu/drm/i915/selftests/mock_engine.c
@@ -200,7 +200,7 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
200 engine->base.submit_request = mock_submit_request; 200 engine->base.submit_request = mock_submit_request;
201 201
202 i915_timeline_init(i915, &engine->base.timeline, engine->base.name); 202 i915_timeline_init(i915, &engine->base.timeline, engine->base.name);
203 lockdep_set_subclass(&engine->base.timeline.lock, TIMELINE_ENGINE); 203 i915_timeline_set_subclass(&engine->base.timeline, TIMELINE_ENGINE);
204 204
205 intel_engine_init_breadcrumbs(&engine->base); 205 intel_engine_init_breadcrumbs(&engine->base);
206 engine->base.breadcrumbs.mock = true; /* prevent touching HW for irqs */ 206 engine->base.breadcrumbs.mock = true; /* prevent touching HW for irqs */
diff --git a/drivers/gpu/drm/i915/vlv_dsi.c b/drivers/gpu/drm/i915/vlv_dsi.c
index 435a2c35ee8c..361e962a7969 100644
--- a/drivers/gpu/drm/i915/vlv_dsi.c
+++ b/drivers/gpu/drm/i915/vlv_dsi.c
@@ -206,39 +206,6 @@ static const struct mipi_dsi_host_ops intel_dsi_host_ops = {
206 .transfer = intel_dsi_host_transfer, 206 .transfer = intel_dsi_host_transfer,
207}; 207};
208 208
209static struct intel_dsi_host *intel_dsi_host_init(struct intel_dsi *intel_dsi,
210 enum port port)
211{
212 struct intel_dsi_host *host;
213 struct mipi_dsi_device *device;
214
215 host = kzalloc(sizeof(*host), GFP_KERNEL);
216 if (!host)
217 return NULL;
218
219 host->base.ops = &intel_dsi_host_ops;
220 host->intel_dsi = intel_dsi;
221 host->port = port;
222
223 /*
224 * We should call mipi_dsi_host_register(&host->base) here, but we don't
225 * have a host->dev, and we don't have OF stuff either. So just use the
226 * dsi framework as a library and hope for the best. Create the dsi
227 * devices by ourselves here too. Need to be careful though, because we
228 * don't initialize any of the driver model devices here.
229 */
230 device = kzalloc(sizeof(*device), GFP_KERNEL);
231 if (!device) {
232 kfree(host);
233 return NULL;
234 }
235
236 device->host = &host->base;
237 host->device = device;
238
239 return host;
240}
241
242/* 209/*
243 * send a video mode command 210 * send a video mode command
244 * 211 *
@@ -290,16 +257,6 @@ static void band_gap_reset(struct drm_i915_private *dev_priv)
290 mutex_unlock(&dev_priv->sb_lock); 257 mutex_unlock(&dev_priv->sb_lock);
291} 258}
292 259
293static inline bool is_vid_mode(struct intel_dsi *intel_dsi)
294{
295 return intel_dsi->operation_mode == INTEL_DSI_VIDEO_MODE;
296}
297
298static inline bool is_cmd_mode(struct intel_dsi *intel_dsi)
299{
300 return intel_dsi->operation_mode == INTEL_DSI_COMMAND_MODE;
301}
302
303static bool intel_dsi_compute_config(struct intel_encoder *encoder, 260static bool intel_dsi_compute_config(struct intel_encoder *encoder,
304 struct intel_crtc_state *pipe_config, 261 struct intel_crtc_state *pipe_config,
305 struct drm_connector_state *conn_state) 262 struct drm_connector_state *conn_state)
@@ -314,6 +271,7 @@ static bool intel_dsi_compute_config(struct intel_encoder *encoder,
314 int ret; 271 int ret;
315 272
316 DRM_DEBUG_KMS("\n"); 273 DRM_DEBUG_KMS("\n");
274 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
317 275
318 if (fixed_mode) { 276 if (fixed_mode) {
319 intel_fixed_panel_mode(fixed_mode, adjusted_mode); 277 intel_fixed_panel_mode(fixed_mode, adjusted_mode);
@@ -745,17 +703,6 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
745 const struct intel_crtc_state *pipe_config); 703 const struct intel_crtc_state *pipe_config);
746static void intel_dsi_unprepare(struct intel_encoder *encoder); 704static void intel_dsi_unprepare(struct intel_encoder *encoder);
747 705
748static void intel_dsi_msleep(struct intel_dsi *intel_dsi, int msec)
749{
750 struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev);
751
752 /* For v3 VBTs in vid-mode the delays are part of the VBT sequences */
753 if (is_vid_mode(intel_dsi) && dev_priv->vbt.dsi.seq_version >= 3)
754 return;
755
756 msleep(msec);
757}
758
759/* 706/*
760 * Panel enable/disable sequences from the VBT spec. 707 * Panel enable/disable sequences from the VBT spec.
761 * 708 *
@@ -793,6 +740,10 @@ static void intel_dsi_msleep(struct intel_dsi *intel_dsi, int msec)
793 * - wait t4 - wait t4 740 * - wait t4 - wait t4
794 */ 741 */
795 742
743/*
744 * DSI port enable has to be done before pipe and plane enable, so we do it in
745 * the pre_enable hook instead of the enable hook.
746 */
796static void intel_dsi_pre_enable(struct intel_encoder *encoder, 747static void intel_dsi_pre_enable(struct intel_encoder *encoder,
797 const struct intel_crtc_state *pipe_config, 748 const struct intel_crtc_state *pipe_config,
798 const struct drm_connector_state *conn_state) 749 const struct drm_connector_state *conn_state)
@@ -895,17 +846,6 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder,
895} 846}
896 847
897/* 848/*
898 * DSI port enable has to be done before pipe and plane enable, so we do it in
899 * the pre_enable hook.
900 */
901static void intel_dsi_enable_nop(struct intel_encoder *encoder,
902 const struct intel_crtc_state *pipe_config,
903 const struct drm_connector_state *conn_state)
904{
905 DRM_DEBUG_KMS("\n");
906}
907
908/*
909 * DSI port disable has to be done after pipe and plane disable, so we do it in 849 * DSI port disable has to be done after pipe and plane disable, so we do it in
910 * the post_disable hook. 850 * the post_disable hook.
911 */ 851 */
@@ -1272,31 +1212,6 @@ static void intel_dsi_get_config(struct intel_encoder *encoder,
1272 } 1212 }
1273} 1213}
1274 1214
1275static enum drm_mode_status
1276intel_dsi_mode_valid(struct drm_connector *connector,
1277 struct drm_display_mode *mode)
1278{
1279 struct intel_connector *intel_connector = to_intel_connector(connector);
1280 const struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
1281 int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
1282
1283 DRM_DEBUG_KMS("\n");
1284
1285 if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
1286 return MODE_NO_DBLESCAN;
1287
1288 if (fixed_mode) {
1289 if (mode->hdisplay > fixed_mode->hdisplay)
1290 return MODE_PANEL;
1291 if (mode->vdisplay > fixed_mode->vdisplay)
1292 return MODE_PANEL;
1293 if (fixed_mode->clock > max_dotclk)
1294 return MODE_CLOCK_HIGH;
1295 }
1296
1297 return MODE_OK;
1298}
1299
1300/* return txclkesc cycles in terms of divider and duration in us */ 1215/* return txclkesc cycles in terms of divider and duration in us */
1301static u16 txclkesc(u32 divider, unsigned int us) 1216static u16 txclkesc(u32 divider, unsigned int us)
1302{ 1217{
@@ -1619,39 +1534,6 @@ static void intel_dsi_unprepare(struct intel_encoder *encoder)
1619 } 1534 }
1620} 1535}
1621 1536
1622static int intel_dsi_get_modes(struct drm_connector *connector)
1623{
1624 struct intel_connector *intel_connector = to_intel_connector(connector);
1625 struct drm_display_mode *mode;
1626
1627 DRM_DEBUG_KMS("\n");
1628
1629 if (!intel_connector->panel.fixed_mode) {
1630 DRM_DEBUG_KMS("no fixed mode\n");
1631 return 0;
1632 }
1633
1634 mode = drm_mode_duplicate(connector->dev,
1635 intel_connector->panel.fixed_mode);
1636 if (!mode) {
1637 DRM_DEBUG_KMS("drm_mode_duplicate failed\n");
1638 return 0;
1639 }
1640
1641 drm_mode_probed_add(connector, mode);
1642 return 1;
1643}
1644
1645static void intel_dsi_connector_destroy(struct drm_connector *connector)
1646{
1647 struct intel_connector *intel_connector = to_intel_connector(connector);
1648
1649 DRM_DEBUG_KMS("\n");
1650 intel_panel_fini(&intel_connector->panel);
1651 drm_connector_cleanup(connector);
1652 kfree(connector);
1653}
1654
1655static void intel_dsi_encoder_destroy(struct drm_encoder *encoder) 1537static void intel_dsi_encoder_destroy(struct drm_encoder *encoder)
1656{ 1538{
1657 struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); 1539 struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
@@ -1676,7 +1558,7 @@ static const struct drm_connector_helper_funcs intel_dsi_connector_helper_funcs
1676static const struct drm_connector_funcs intel_dsi_connector_funcs = { 1558static const struct drm_connector_funcs intel_dsi_connector_funcs = {
1677 .late_register = intel_connector_register, 1559 .late_register = intel_connector_register,
1678 .early_unregister = intel_connector_unregister, 1560 .early_unregister = intel_connector_unregister,
1679 .destroy = intel_dsi_connector_destroy, 1561 .destroy = intel_connector_destroy,
1680 .fill_modes = drm_helper_probe_single_connector_modes, 1562 .fill_modes = drm_helper_probe_single_connector_modes,
1681 .atomic_get_property = intel_digital_connector_atomic_get_property, 1563 .atomic_get_property = intel_digital_connector_atomic_get_property,
1682 .atomic_set_property = intel_digital_connector_atomic_set_property, 1564 .atomic_set_property = intel_digital_connector_atomic_set_property,
@@ -1684,27 +1566,57 @@ static const struct drm_connector_funcs intel_dsi_connector_funcs = {
1684 .atomic_duplicate_state = intel_digital_connector_duplicate_state, 1566 .atomic_duplicate_state = intel_digital_connector_duplicate_state,
1685}; 1567};
1686 1568
1687static int intel_dsi_get_panel_orientation(struct intel_connector *connector) 1569static enum drm_panel_orientation
1570vlv_dsi_get_hw_panel_orientation(struct intel_connector *connector)
1688{ 1571{
1689 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 1572 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1690 int orientation = DRM_MODE_PANEL_ORIENTATION_NORMAL; 1573 struct intel_encoder *encoder = connector->encoder;
1691 enum i9xx_plane_id i9xx_plane; 1574 enum intel_display_power_domain power_domain;
1575 enum drm_panel_orientation orientation;
1576 struct intel_plane *plane;
1577 struct intel_crtc *crtc;
1578 enum pipe pipe;
1692 u32 val; 1579 u32 val;
1693 1580
1694 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 1581 if (!encoder->get_hw_state(encoder, &pipe))
1695 if (connector->encoder->crtc_mask == BIT(PIPE_B)) 1582 return DRM_MODE_PANEL_ORIENTATION_UNKNOWN;
1696 i9xx_plane = PLANE_B;
1697 else
1698 i9xx_plane = PLANE_A;
1699 1583
1700 val = I915_READ(DSPCNTR(i9xx_plane)); 1584 crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
1701 if (val & DISPPLANE_ROTATE_180) 1585 plane = to_intel_plane(crtc->base.primary);
1702 orientation = DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP; 1586
1703 } 1587 power_domain = POWER_DOMAIN_PIPE(pipe);
1588 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
1589 return DRM_MODE_PANEL_ORIENTATION_UNKNOWN;
1590
1591 val = I915_READ(DSPCNTR(plane->i9xx_plane));
1592
1593 if (!(val & DISPLAY_PLANE_ENABLE))
1594 orientation = DRM_MODE_PANEL_ORIENTATION_UNKNOWN;
1595 else if (val & DISPPLANE_ROTATE_180)
1596 orientation = DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP;
1597 else
1598 orientation = DRM_MODE_PANEL_ORIENTATION_NORMAL;
1599
1600 intel_display_power_put(dev_priv, power_domain);
1704 1601
1705 return orientation; 1602 return orientation;
1706} 1603}
1707 1604
1605static enum drm_panel_orientation
1606vlv_dsi_get_panel_orientation(struct intel_connector *connector)
1607{
1608 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1609 enum drm_panel_orientation orientation;
1610
1611 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1612 orientation = vlv_dsi_get_hw_panel_orientation(connector);
1613 if (orientation != DRM_MODE_PANEL_ORIENTATION_UNKNOWN)
1614 return orientation;
1615 }
1616
1617 return intel_dsi_get_panel_orientation(connector);
1618}
1619
1708static void intel_dsi_add_properties(struct intel_connector *connector) 1620static void intel_dsi_add_properties(struct intel_connector *connector)
1709{ 1621{
1710 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 1622 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
@@ -1722,7 +1634,7 @@ static void intel_dsi_add_properties(struct intel_connector *connector)
1722 connector->base.state->scaling_mode = DRM_MODE_SCALE_ASPECT; 1634 connector->base.state->scaling_mode = DRM_MODE_SCALE_ASPECT;
1723 1635
1724 connector->base.display_info.panel_orientation = 1636 connector->base.display_info.panel_orientation =
1725 intel_dsi_get_panel_orientation(connector); 1637 vlv_dsi_get_panel_orientation(connector);
1726 drm_connector_init_panel_orientation_property( 1638 drm_connector_init_panel_orientation_property(
1727 &connector->base, 1639 &connector->base,
1728 connector->panel.fixed_mode->hdisplay, 1640 connector->panel.fixed_mode->hdisplay,
@@ -1773,7 +1685,6 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
1773 1685
1774 intel_encoder->compute_config = intel_dsi_compute_config; 1686 intel_encoder->compute_config = intel_dsi_compute_config;
1775 intel_encoder->pre_enable = intel_dsi_pre_enable; 1687 intel_encoder->pre_enable = intel_dsi_pre_enable;
1776 intel_encoder->enable = intel_dsi_enable_nop;
1777 intel_encoder->disable = intel_dsi_disable; 1688 intel_encoder->disable = intel_dsi_disable;
1778 intel_encoder->post_disable = intel_dsi_post_disable; 1689 intel_encoder->post_disable = intel_dsi_post_disable;
1779 intel_encoder->get_hw_state = intel_dsi_get_hw_state; 1690 intel_encoder->get_hw_state = intel_dsi_get_hw_state;
@@ -1806,7 +1717,8 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
1806 for_each_dsi_port(port, intel_dsi->ports) { 1717 for_each_dsi_port(port, intel_dsi->ports) {
1807 struct intel_dsi_host *host; 1718 struct intel_dsi_host *host;
1808 1719
1809 host = intel_dsi_host_init(intel_dsi, port); 1720 host = intel_dsi_host_init(intel_dsi, &intel_dsi_host_ops,
1721 port);
1810 if (!host) 1722 if (!host)
1811 goto err; 1723 goto err;
1812 1724
diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h
index dd0552cb7472..af0a761f52f0 100644
--- a/include/drm/drm_connector.h
+++ b/include/drm/drm_connector.h
@@ -508,6 +508,18 @@ struct drm_connector_state {
508 * drm_writeback_signal_completion() 508 * drm_writeback_signal_completion()
509 */ 509 */
510 struct drm_writeback_job *writeback_job; 510 struct drm_writeback_job *writeback_job;
511
512 /**
513 * @max_requested_bpc: Connector property to limit the maximum bit
514 * depth of the pixels.
515 */
516 u8 max_requested_bpc;
517
518 /**
519 * @max_bpc: Connector max_bpc based on the requested max_bpc property
520 * and the connector bpc limitations obtained from edid.
521 */
522 u8 max_bpc;
511}; 523};
512 524
513/** 525/**
@@ -973,6 +985,12 @@ struct drm_connector {
973 */ 985 */
974 struct drm_property_blob *path_blob_ptr; 986 struct drm_property_blob *path_blob_ptr;
975 987
988 /**
989 * @max_bpc_property: Default connector property for the max bpc to be
990 * driven out of the connector.
991 */
992 struct drm_property *max_bpc_property;
993
976#define DRM_CONNECTOR_POLL_HPD (1 << 0) 994#define DRM_CONNECTOR_POLL_HPD (1 << 0)
977#define DRM_CONNECTOR_POLL_CONNECT (1 << 1) 995#define DRM_CONNECTOR_POLL_CONNECT (1 << 1)
978#define DRM_CONNECTOR_POLL_DISCONNECT (1 << 2) 996#define DRM_CONNECTOR_POLL_DISCONNECT (1 << 2)
@@ -1269,6 +1287,8 @@ void drm_connector_set_link_status_property(struct drm_connector *connector,
1269 uint64_t link_status); 1287 uint64_t link_status);
1270int drm_connector_init_panel_orientation_property( 1288int drm_connector_init_panel_orientation_property(
1271 struct drm_connector *connector, int width, int height); 1289 struct drm_connector *connector, int width, int height);
1290int drm_connector_attach_max_bpc_property(struct drm_connector *connector,
1291 int min, int max);
1272 1292
1273/** 1293/**
1274 * struct drm_tile_group - Tile group metadata 1294 * struct drm_tile_group - Tile group metadata
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index 9ad98e8d9ede..3314e91f6eb3 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -231,6 +231,8 @@
231#define DP_DSC_MAX_BITS_PER_PIXEL_LOW 0x067 /* eDP 1.4 */ 231#define DP_DSC_MAX_BITS_PER_PIXEL_LOW 0x067 /* eDP 1.4 */
232 232
233#define DP_DSC_MAX_BITS_PER_PIXEL_HI 0x068 /* eDP 1.4 */ 233#define DP_DSC_MAX_BITS_PER_PIXEL_HI 0x068 /* eDP 1.4 */
234# define DP_DSC_MAX_BITS_PER_PIXEL_HI_MASK (0x3 << 0)
235# define DP_DSC_MAX_BITS_PER_PIXEL_HI_SHIFT 8
234 236
235#define DP_DSC_DEC_COLOR_FORMAT_CAP 0x069 237#define DP_DSC_DEC_COLOR_FORMAT_CAP 0x069
236# define DP_DSC_RGB (1 << 0) 238# define DP_DSC_RGB (1 << 0)
@@ -279,6 +281,8 @@
279# define DP_DSC_THROUGHPUT_MODE_1_1000 (14 << 4) 281# define DP_DSC_THROUGHPUT_MODE_1_1000 (14 << 4)
280 282
281#define DP_DSC_MAX_SLICE_WIDTH 0x06C 283#define DP_DSC_MAX_SLICE_WIDTH 0x06C
284#define DP_DSC_MIN_SLICE_WIDTH_VALUE 2560
285#define DP_DSC_SLICE_WIDTH_MULTIPLIER 320
282 286
283#define DP_DSC_SLICE_CAP_2 0x06D 287#define DP_DSC_SLICE_CAP_2 0x06D
284# define DP_DSC_16_PER_DP_DSC_SINK (1 << 0) 288# define DP_DSC_16_PER_DP_DSC_SINK (1 << 0)
@@ -477,6 +481,7 @@
477# define DP_AUX_FRAME_SYNC_VALID (1 << 0) 481# define DP_AUX_FRAME_SYNC_VALID (1 << 0)
478 482
479#define DP_DSC_ENABLE 0x160 /* DP 1.4 */ 483#define DP_DSC_ENABLE 0x160 /* DP 1.4 */
484# define DP_DECOMPRESSION_EN (1 << 0)
480 485
481#define DP_PSR_EN_CFG 0x170 /* XXX 1.2? */ 486#define DP_PSR_EN_CFG 0x170 /* XXX 1.2? */
482# define DP_PSR_ENABLE (1 << 0) 487# define DP_PSR_ENABLE (1 << 0)
@@ -907,6 +912,57 @@
907#define DP_AUX_HDCP_KSV_FIFO 0x6802C 912#define DP_AUX_HDCP_KSV_FIFO 0x6802C
908#define DP_AUX_HDCP_AINFO 0x6803B 913#define DP_AUX_HDCP_AINFO 0x6803B
909 914
915/* DP HDCP2.2 parameter offsets in DPCD address space */
916#define DP_HDCP_2_2_REG_RTX_OFFSET 0x69000
917#define DP_HDCP_2_2_REG_TXCAPS_OFFSET 0x69008
918#define DP_HDCP_2_2_REG_CERT_RX_OFFSET 0x6900B
919#define DP_HDCP_2_2_REG_RRX_OFFSET 0x69215
920#define DP_HDCP_2_2_REG_RX_CAPS_OFFSET 0x6921D
921#define DP_HDCP_2_2_REG_EKPUB_KM_OFFSET 0x69220
922#define DP_HDCP_2_2_REG_EKH_KM_WR_OFFSET 0x692A0
923#define DP_HDCP_2_2_REG_M_OFFSET 0x692B0
924#define DP_HDCP_2_2_REG_HPRIME_OFFSET 0x692C0
925#define DP_HDCP_2_2_REG_EKH_KM_RD_OFFSET 0x692E0
926#define DP_HDCP_2_2_REG_RN_OFFSET 0x692F0
927#define DP_HDCP_2_2_REG_LPRIME_OFFSET 0x692F8
928#define DP_HDCP_2_2_REG_EDKEY_KS_OFFSET 0x69318
929#define DP_HDCP_2_2_REG_RIV_OFFSET 0x69328
930#define DP_HDCP_2_2_REG_RXINFO_OFFSET 0x69330
931#define DP_HDCP_2_2_REG_SEQ_NUM_V_OFFSET 0x69332
932#define DP_HDCP_2_2_REG_VPRIME_OFFSET 0x69335
933#define DP_HDCP_2_2_REG_RECV_ID_LIST_OFFSET 0x69345
934#define DP_HDCP_2_2_REG_V_OFFSET 0x693E0
935#define DP_HDCP_2_2_REG_SEQ_NUM_M_OFFSET 0x693F0
936#define DP_HDCP_2_2_REG_K_OFFSET 0x693F3
937#define DP_HDCP_2_2_REG_STREAM_ID_TYPE_OFFSET 0x693F5
938#define DP_HDCP_2_2_REG_MPRIME_OFFSET 0x69473
939#define DP_HDCP_2_2_REG_RXSTATUS_OFFSET 0x69493
940#define DP_HDCP_2_2_REG_STREAM_TYPE_OFFSET 0x69494
941#define DP_HDCP_2_2_REG_DBG_OFFSET 0x69518
942
943/* DP HDCP message start offsets in DPCD address space */
944#define DP_HDCP_2_2_AKE_INIT_OFFSET DP_HDCP_2_2_REG_RTX_OFFSET
945#define DP_HDCP_2_2_AKE_SEND_CERT_OFFSET DP_HDCP_2_2_REG_CERT_RX_OFFSET
946#define DP_HDCP_2_2_AKE_NO_STORED_KM_OFFSET DP_HDCP_2_2_REG_EKPUB_KM_OFFSET
947#define DP_HDCP_2_2_AKE_STORED_KM_OFFSET DP_HDCP_2_2_REG_EKH_KM_WR_OFFSET
948#define DP_HDCP_2_2_AKE_SEND_HPRIME_OFFSET DP_HDCP_2_2_REG_HPRIME_OFFSET
949#define DP_HDCP_2_2_AKE_SEND_PAIRING_INFO_OFFSET \
950 DP_HDCP_2_2_REG_EKH_KM_RD_OFFSET
951#define DP_HDCP_2_2_LC_INIT_OFFSET DP_HDCP_2_2_REG_RN_OFFSET
952#define DP_HDCP_2_2_LC_SEND_LPRIME_OFFSET DP_HDCP_2_2_REG_LPRIME_OFFSET
953#define DP_HDCP_2_2_SKE_SEND_EKS_OFFSET DP_HDCP_2_2_REG_EDKEY_KS_OFFSET
954#define DP_HDCP_2_2_REP_SEND_RECVID_LIST_OFFSET DP_HDCP_2_2_REG_RXINFO_OFFSET
955#define DP_HDCP_2_2_REP_SEND_ACK_OFFSET DP_HDCP_2_2_REG_V_OFFSET
956#define DP_HDCP_2_2_REP_STREAM_MANAGE_OFFSET DP_HDCP_2_2_REG_SEQ_NUM_M_OFFSET
957#define DP_HDCP_2_2_REP_STREAM_READY_OFFSET DP_HDCP_2_2_REG_MPRIME_OFFSET
958
959#define HDCP_2_2_DP_RXSTATUS_LEN 1
960#define HDCP_2_2_DP_RXSTATUS_READY(x) ((x) & BIT(0))
961#define HDCP_2_2_DP_RXSTATUS_H_PRIME(x) ((x) & BIT(1))
962#define HDCP_2_2_DP_RXSTATUS_PAIRING(x) ((x) & BIT(2))
963#define HDCP_2_2_DP_RXSTATUS_REAUTH_REQ(x) ((x) & BIT(3))
964#define HDCP_2_2_DP_RXSTATUS_LINK_FAILED(x) ((x) & BIT(4))
965
910/* DP 1.2 Sideband message defines */ 966/* DP 1.2 Sideband message defines */
911/* peer device type - DP 1.2a Table 2-92 */ 967/* peer device type - DP 1.2a Table 2-92 */
912#define DP_PEER_DEVICE_NONE 0x0 968#define DP_PEER_DEVICE_NONE 0x0
@@ -965,6 +1021,7 @@ u8 drm_dp_get_adjust_request_pre_emphasis(const u8 link_status[DP_LINK_STATUS_SI
965 1021
966#define DP_BRANCH_OUI_HEADER_SIZE 0xc 1022#define DP_BRANCH_OUI_HEADER_SIZE 0xc
967#define DP_RECEIVER_CAP_SIZE 0xf 1023#define DP_RECEIVER_CAP_SIZE 0xf
1024#define DP_DSC_RECEIVER_CAP_SIZE 0xf
968#define EDP_PSR_RECEIVER_CAP_SIZE 2 1025#define EDP_PSR_RECEIVER_CAP_SIZE 2
969#define EDP_DISPLAY_CTL_CAP_SIZE 3 1026#define EDP_DISPLAY_CTL_CAP_SIZE 3
970 1027
@@ -995,6 +1052,7 @@ struct dp_sdp_header {
995 1052
996#define EDP_SDP_HEADER_REVISION_MASK 0x1F 1053#define EDP_SDP_HEADER_REVISION_MASK 0x1F
997#define EDP_SDP_HEADER_VALID_PAYLOAD_BYTES 0x1F 1054#define EDP_SDP_HEADER_VALID_PAYLOAD_BYTES 0x1F
1055#define DP_SDP_PPS_HEADER_PAYLOAD_BYTES_MINUS_1 0x7F
998 1056
999struct edp_vsc_psr { 1057struct edp_vsc_psr {
1000 struct dp_sdp_header sdp_header; 1058 struct dp_sdp_header sdp_header;
@@ -1061,6 +1119,43 @@ drm_dp_is_branch(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
1061 return dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT; 1119 return dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT;
1062} 1120}
1063 1121
1122/* DP/eDP DSC support */
1123u8 drm_dp_dsc_sink_max_slice_count(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE],
1124 bool is_edp);
1125u8 drm_dp_dsc_sink_line_buf_depth(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE]);
1126u8 drm_dp_dsc_sink_max_color_depth(const u8 dsc_dpc[DP_DSC_RECEIVER_CAP_SIZE]);
1127
1128static inline bool
1129drm_dp_sink_supports_dsc(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE])
1130{
1131 return dsc_dpcd[DP_DSC_SUPPORT - DP_DSC_SUPPORT] &
1132 DP_DSC_DECOMPRESSION_IS_SUPPORTED;
1133}
1134
1135static inline u16
1136drm_edp_dsc_sink_output_bpp(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE])
1137{
1138 return dsc_dpcd[DP_DSC_MAX_BITS_PER_PIXEL_LOW - DP_DSC_SUPPORT] |
1139 (dsc_dpcd[DP_DSC_MAX_BITS_PER_PIXEL_HI - DP_DSC_SUPPORT] &
1140 DP_DSC_MAX_BITS_PER_PIXEL_HI_MASK <<
1141 DP_DSC_MAX_BITS_PER_PIXEL_HI_SHIFT);
1142}
1143
1144static inline u32
1145drm_dp_dsc_sink_max_slice_width(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE])
1146{
1147 /* Max Slicewidth = Number of Pixels * 320 */
1148 return dsc_dpcd[DP_DSC_MAX_SLICE_WIDTH - DP_DSC_SUPPORT] *
1149 DP_DSC_SLICE_WIDTH_MULTIPLIER;
1150}
1151
1152/* Forward Error Correction Support on DP 1.4 */
1153static inline bool
1154drm_dp_sink_supports_fec(const u8 fec_capable)
1155{
1156 return fec_capable & DP_FEC_CAPABLE;
1157}
1158
1064/* 1159/*
1065 * DisplayPort AUX channel 1160 * DisplayPort AUX channel
1066 */ 1161 */
diff --git a/include/drm/drm_hdcp.h b/include/drm/drm_hdcp.h
index 98e63d870139..a6de09c5e47f 100644
--- a/include/drm/drm_hdcp.h
+++ b/include/drm/drm_hdcp.h
@@ -38,4 +38,216 @@
38#define DRM_HDCP_DDC_BSTATUS 0x41 38#define DRM_HDCP_DDC_BSTATUS 0x41
39#define DRM_HDCP_DDC_KSV_FIFO 0x43 39#define DRM_HDCP_DDC_KSV_FIFO 0x43
40 40
41#define DRM_HDCP_1_4_SRM_ID 0x8
42#define DRM_HDCP_1_4_VRL_LENGTH_SIZE 3
43#define DRM_HDCP_1_4_DCP_SIG_SIZE 40
44
45/* Protocol message definition for HDCP2.2 specification */
46/*
47 * Protected content streams are classified into 2 types:
48 * - Type0: Can be transmitted with HDCP 1.4+
49 * - Type1: Can be transmitted with HDCP 2.2+
50 */
51#define HDCP_STREAM_TYPE0 0x00
52#define HDCP_STREAM_TYPE1 0x01
53
54/* HDCP2.2 Msg IDs */
55#define HDCP_2_2_NULL_MSG 1
56#define HDCP_2_2_AKE_INIT 2
57#define HDCP_2_2_AKE_SEND_CERT 3
58#define HDCP_2_2_AKE_NO_STORED_KM 4
59#define HDCP_2_2_AKE_STORED_KM 5
60#define HDCP_2_2_AKE_SEND_HPRIME 7
61#define HDCP_2_2_AKE_SEND_PAIRING_INFO 8
62#define HDCP_2_2_LC_INIT 9
63#define HDCP_2_2_LC_SEND_LPRIME 10
64#define HDCP_2_2_SKE_SEND_EKS 11
65#define HDCP_2_2_REP_SEND_RECVID_LIST 12
66#define HDCP_2_2_REP_SEND_ACK 15
67#define HDCP_2_2_REP_STREAM_MANAGE 16
68#define HDCP_2_2_REP_STREAM_READY 17
69#define HDCP_2_2_ERRATA_DP_STREAM_TYPE 50
70
71#define HDCP_2_2_RTX_LEN 8
72#define HDCP_2_2_RRX_LEN 8
73
74#define HDCP_2_2_K_PUB_RX_MOD_N_LEN 128
75#define HDCP_2_2_K_PUB_RX_EXP_E_LEN 3
76#define HDCP_2_2_K_PUB_RX_LEN (HDCP_2_2_K_PUB_RX_MOD_N_LEN + \
77 HDCP_2_2_K_PUB_RX_EXP_E_LEN)
78
79#define HDCP_2_2_DCP_LLC_SIG_LEN 384
80
81#define HDCP_2_2_E_KPUB_KM_LEN 128
82#define HDCP_2_2_E_KH_KM_M_LEN (16 + 16)
83#define HDCP_2_2_H_PRIME_LEN 32
84#define HDCP_2_2_E_KH_KM_LEN 16
85#define HDCP_2_2_RN_LEN 8
86#define HDCP_2_2_L_PRIME_LEN 32
87#define HDCP_2_2_E_DKEY_KS_LEN 16
88#define HDCP_2_2_RIV_LEN 8
89#define HDCP_2_2_SEQ_NUM_LEN 3
90#define HDCP_2_2_V_PRIME_HALF_LEN (HDCP_2_2_L_PRIME_LEN / 2)
91#define HDCP_2_2_RECEIVER_ID_LEN DRM_HDCP_KSV_LEN
92#define HDCP_2_2_MAX_DEVICE_COUNT 31
93#define HDCP_2_2_RECEIVER_IDS_MAX_LEN (HDCP_2_2_RECEIVER_ID_LEN * \
94 HDCP_2_2_MAX_DEVICE_COUNT)
95#define HDCP_2_2_MPRIME_LEN 32
96
97/* Following Macros take a byte at a time for bit(s) masking */
98/*
99 * TODO: This has to be changed for DP MST, as multiple stream on
100 * same port is possible.
101 * For HDCP2.2 on HDMI and DP SST this value is always 1.
102 */
103#define HDCP_2_2_MAX_CONTENT_STREAMS_CNT 1
104#define HDCP_2_2_TXCAP_MASK_LEN 2
105#define HDCP_2_2_RXCAPS_LEN 3
106#define HDCP_2_2_RX_REPEATER(x) ((x) & BIT(0))
107#define HDCP_2_2_DP_HDCP_CAPABLE(x) ((x) & BIT(1))
108#define HDCP_2_2_RXINFO_LEN 2
109
110/* HDCP1.x compliant device in downstream */
111#define HDCP_2_2_HDCP1_DEVICE_CONNECTED(x) ((x) & BIT(0))
112
113/* HDCP2.0 Compliant repeater in downstream */
114#define HDCP_2_2_HDCP_2_0_REP_CONNECTED(x) ((x) & BIT(1))
115#define HDCP_2_2_MAX_CASCADE_EXCEEDED(x) ((x) & BIT(2))
116#define HDCP_2_2_MAX_DEVS_EXCEEDED(x) ((x) & BIT(3))
117#define HDCP_2_2_DEV_COUNT_LO(x) (((x) & (0xF << 4)) >> 4)
118#define HDCP_2_2_DEV_COUNT_HI(x) ((x) & BIT(0))
119#define HDCP_2_2_DEPTH(x) (((x) & (0x7 << 1)) >> 1)
120
121struct hdcp2_cert_rx {
122 u8 receiver_id[HDCP_2_2_RECEIVER_ID_LEN];
123 u8 kpub_rx[HDCP_2_2_K_PUB_RX_LEN];
124 u8 reserved[2];
125 u8 dcp_signature[HDCP_2_2_DCP_LLC_SIG_LEN];
126} __packed;
127
128struct hdcp2_streamid_type {
129 u8 stream_id;
130 u8 stream_type;
131} __packed;
132
133/*
134 * The TxCaps field specified in the HDCP HDMI, DP specs
135 * This field is big endian as specified in the errata.
136 */
137struct hdcp2_tx_caps {
138 /* Transmitter must set this to 0x2 */
139 u8 version;
140
141 /* Reserved for HDCP and DP Spec. Read as Zero */
142 u8 tx_cap_mask[HDCP_2_2_TXCAP_MASK_LEN];
143} __packed;
144
145/* Main structures for HDCP2.2 protocol communication */
146struct hdcp2_ake_init {
147 u8 msg_id;
148 u8 r_tx[HDCP_2_2_RTX_LEN];
149 struct hdcp2_tx_caps tx_caps;
150} __packed;
151
152struct hdcp2_ake_send_cert {
153 u8 msg_id;
154 struct hdcp2_cert_rx cert_rx;
155 u8 r_rx[HDCP_2_2_RRX_LEN];
156 u8 rx_caps[HDCP_2_2_RXCAPS_LEN];
157} __packed;
158
159struct hdcp2_ake_no_stored_km {
160 u8 msg_id;
161 u8 e_kpub_km[HDCP_2_2_E_KPUB_KM_LEN];
162} __packed;
163
164struct hdcp2_ake_stored_km {
165 u8 msg_id;
166 u8 e_kh_km_m[HDCP_2_2_E_KH_KM_M_LEN];
167} __packed;
168
169struct hdcp2_ake_send_hprime {
170 u8 msg_id;
171 u8 h_prime[HDCP_2_2_H_PRIME_LEN];
172} __packed;
173
174struct hdcp2_ake_send_pairing_info {
175 u8 msg_id;
176 u8 e_kh_km[HDCP_2_2_E_KH_KM_LEN];
177} __packed;
178
179struct hdcp2_lc_init {
180 u8 msg_id;
181 u8 r_n[HDCP_2_2_RN_LEN];
182} __packed;
183
184struct hdcp2_lc_send_lprime {
185 u8 msg_id;
186 u8 l_prime[HDCP_2_2_L_PRIME_LEN];
187} __packed;
188
189struct hdcp2_ske_send_eks {
190 u8 msg_id;
191 u8 e_dkey_ks[HDCP_2_2_E_DKEY_KS_LEN];
192 u8 riv[HDCP_2_2_RIV_LEN];
193} __packed;
194
195struct hdcp2_rep_send_receiverid_list {
196 u8 msg_id;
197 u8 rx_info[HDCP_2_2_RXINFO_LEN];
198 u8 seq_num_v[HDCP_2_2_SEQ_NUM_LEN];
199 u8 v_prime[HDCP_2_2_V_PRIME_HALF_LEN];
200 u8 receiver_ids[HDCP_2_2_RECEIVER_IDS_MAX_LEN];
201} __packed;
202
203struct hdcp2_rep_send_ack {
204 u8 msg_id;
205 u8 v[HDCP_2_2_V_PRIME_HALF_LEN];
206} __packed;
207
208struct hdcp2_rep_stream_manage {
209 u8 msg_id;
210 u8 seq_num_m[HDCP_2_2_SEQ_NUM_LEN];
211 __be16 k;
212 struct hdcp2_streamid_type streams[HDCP_2_2_MAX_CONTENT_STREAMS_CNT];
213} __packed;
214
215struct hdcp2_rep_stream_ready {
216 u8 msg_id;
217 u8 m_prime[HDCP_2_2_MPRIME_LEN];
218} __packed;
219
220struct hdcp2_dp_errata_stream_type {
221 u8 msg_id;
222 u8 stream_type;
223} __packed;
224
225/* HDCP2.2 TIMEOUTs in mSec */
226#define HDCP_2_2_CERT_TIMEOUT_MS 100
227#define HDCP_2_2_HPRIME_NO_PAIRED_TIMEOUT_MS 1000
228#define HDCP_2_2_HPRIME_PAIRED_TIMEOUT_MS 200
229#define HDCP_2_2_PAIRING_TIMEOUT_MS 200
230#define HDCP_2_2_HDMI_LPRIME_TIMEOUT_MS 20
231#define HDCP_2_2_DP_LPRIME_TIMEOUT_MS 7
232#define HDCP_2_2_RECVID_LIST_TIMEOUT_MS 3000
233#define HDCP_2_2_STREAM_READY_TIMEOUT_MS 100
234
235/* HDMI HDCP2.2 Register Offsets */
236#define HDCP_2_2_HDMI_REG_VER_OFFSET 0x50
237#define HDCP_2_2_HDMI_REG_WR_MSG_OFFSET 0x60
238#define HDCP_2_2_HDMI_REG_RXSTATUS_OFFSET 0x70
239#define HDCP_2_2_HDMI_REG_RD_MSG_OFFSET 0x80
240#define HDCP_2_2_HDMI_REG_DBG_OFFSET 0xC0
241
242#define HDCP_2_2_HDMI_SUPPORT_MASK BIT(2)
243#define HDCP_2_2_RX_CAPS_VERSION_VAL 0x02
244#define HDCP_2_2_SEQ_NUM_MAX 0xFFFFFF
245#define HDCP_2_2_DELAY_BEFORE_ENCRYPTION_EN 200
246
247/* Below macros take a byte at a time and mask the bit(s) */
248#define HDCP_2_2_HDMI_RXSTATUS_LEN 2
249#define HDCP_2_2_HDMI_RXSTATUS_MSG_SZ_HI(x) ((x) & 0x3)
250#define HDCP_2_2_HDMI_RXSTATUS_READY(x) ((x) & BIT(2))
251#define HDCP_2_2_HDMI_RXSTATUS_REAUTH_REQ(x) ((x) & BIT(3))
252
41#endif 253#endif
diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
index fd965ffbb92e..192667144693 100644
--- a/include/drm/i915_pciids.h
+++ b/include/drm/i915_pciids.h
@@ -365,16 +365,20 @@
365 INTEL_VGA_DEVICE(0x593B, info) /* Halo GT4 */ 365 INTEL_VGA_DEVICE(0x593B, info) /* Halo GT4 */
366 366
367/* AML/KBL Y GT2 */ 367/* AML/KBL Y GT2 */
368#define INTEL_AML_GT2_IDS(info) \ 368#define INTEL_AML_KBL_GT2_IDS(info) \
369 INTEL_VGA_DEVICE(0x591C, info), /* ULX GT2 */ \ 369 INTEL_VGA_DEVICE(0x591C, info), /* ULX GT2 */ \
370 INTEL_VGA_DEVICE(0x87C0, info) /* ULX GT2 */ 370 INTEL_VGA_DEVICE(0x87C0, info) /* ULX GT2 */
371 371
372/* AML/CFL Y GT2 */
373#define INTEL_AML_CFL_GT2_IDS(info) \
374 INTEL_VGA_DEVICE(0x87CA, info)
375
372#define INTEL_KBL_IDS(info) \ 376#define INTEL_KBL_IDS(info) \
373 INTEL_KBL_GT1_IDS(info), \ 377 INTEL_KBL_GT1_IDS(info), \
374 INTEL_KBL_GT2_IDS(info), \ 378 INTEL_KBL_GT2_IDS(info), \
375 INTEL_KBL_GT3_IDS(info), \ 379 INTEL_KBL_GT3_IDS(info), \
376 INTEL_KBL_GT4_IDS(info), \ 380 INTEL_KBL_GT4_IDS(info), \
377 INTEL_AML_GT2_IDS(info) 381 INTEL_AML_KBL_GT2_IDS(info)
378 382
379/* CFL S */ 383/* CFL S */
380#define INTEL_CFL_S_GT1_IDS(info) \ 384#define INTEL_CFL_S_GT1_IDS(info) \
@@ -407,17 +411,17 @@
407 411
408/* WHL/CFL U GT1 */ 412/* WHL/CFL U GT1 */
409#define INTEL_WHL_U_GT1_IDS(info) \ 413#define INTEL_WHL_U_GT1_IDS(info) \
410 INTEL_VGA_DEVICE(0x3EA1, info) 414 INTEL_VGA_DEVICE(0x3EA1, info), \
415 INTEL_VGA_DEVICE(0x3EA4, info)
411 416
412/* WHL/CFL U GT2 */ 417/* WHL/CFL U GT2 */
413#define INTEL_WHL_U_GT2_IDS(info) \ 418#define INTEL_WHL_U_GT2_IDS(info) \
414 INTEL_VGA_DEVICE(0x3EA0, info) 419 INTEL_VGA_DEVICE(0x3EA0, info), \
420 INTEL_VGA_DEVICE(0x3EA3, info)
415 421
416/* WHL/CFL U GT3 */ 422/* WHL/CFL U GT3 */
417#define INTEL_WHL_U_GT3_IDS(info) \ 423#define INTEL_WHL_U_GT3_IDS(info) \
418 INTEL_VGA_DEVICE(0x3EA2, info), \ 424 INTEL_VGA_DEVICE(0x3EA2, info)
419 INTEL_VGA_DEVICE(0x3EA3, info), \
420 INTEL_VGA_DEVICE(0x3EA4, info)
421 425
422#define INTEL_CFL_IDS(info) \ 426#define INTEL_CFL_IDS(info) \
423 INTEL_CFL_S_GT1_IDS(info), \ 427 INTEL_CFL_S_GT1_IDS(info), \
@@ -427,7 +431,8 @@
427 INTEL_CFL_U_GT3_IDS(info), \ 431 INTEL_CFL_U_GT3_IDS(info), \
428 INTEL_WHL_U_GT1_IDS(info), \ 432 INTEL_WHL_U_GT1_IDS(info), \
429 INTEL_WHL_U_GT2_IDS(info), \ 433 INTEL_WHL_U_GT2_IDS(info), \
430 INTEL_WHL_U_GT3_IDS(info) 434 INTEL_WHL_U_GT3_IDS(info), \
435 INTEL_AML_CFL_GT2_IDS(info)
431 436
432/* CNL */ 437/* CNL */
433#define INTEL_CNL_IDS(info) \ 438#define INTEL_CNL_IDS(info) \
diff --git a/include/linux/swap.h b/include/linux/swap.h
index d8a07a4f171d..a8f6d5d89524 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -18,6 +18,8 @@ struct notifier_block;
18 18
19struct bio; 19struct bio;
20 20
21struct pagevec;
22
21#define SWAP_FLAG_PREFER 0x8000 /* set if swap priority specified */ 23#define SWAP_FLAG_PREFER 0x8000 /* set if swap priority specified */
22#define SWAP_FLAG_PRIO_MASK 0x7fff 24#define SWAP_FLAG_PRIO_MASK 0x7fff
23#define SWAP_FLAG_PRIO_SHIFT 0 25#define SWAP_FLAG_PRIO_SHIFT 0
@@ -369,7 +371,7 @@ static inline int node_reclaim(struct pglist_data *pgdat, gfp_t mask,
369#endif 371#endif
370 372
371extern int page_evictable(struct page *page); 373extern int page_evictable(struct page *page);
372extern void check_move_unevictable_pages(struct page **, int nr_pages); 374extern void check_move_unevictable_pages(struct pagevec *pvec);
373 375
374extern int kswapd_run(int nid); 376extern int kswapd_run(int nid);
375extern void kswapd_stop(int nid); 377extern void kswapd_stop(int nid);
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index a4446f452040..298b2e197744 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -412,6 +412,14 @@ typedef struct drm_i915_irq_wait {
412 int irq_seq; 412 int irq_seq;
413} drm_i915_irq_wait_t; 413} drm_i915_irq_wait_t;
414 414
415/*
416 * Different modes of per-process Graphics Translation Table,
417 * see I915_PARAM_HAS_ALIASING_PPGTT
418 */
419#define I915_GEM_PPGTT_NONE 0
420#define I915_GEM_PPGTT_ALIASING 1
421#define I915_GEM_PPGTT_FULL 2
422
415/* Ioctl to query kernel params: 423/* Ioctl to query kernel params:
416 */ 424 */
417#define I915_PARAM_IRQ_ACTIVE 1 425#define I915_PARAM_IRQ_ACTIVE 1
diff --git a/mm/shmem.c b/mm/shmem.c
index d44991ea5ed4..0e10b06fc7d6 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -756,7 +756,7 @@ void shmem_unlock_mapping(struct address_space *mapping)
756 break; 756 break;
757 index = indices[pvec.nr - 1] + 1; 757 index = indices[pvec.nr - 1] + 1;
758 pagevec_remove_exceptionals(&pvec); 758 pagevec_remove_exceptionals(&pvec);
759 check_move_unevictable_pages(pvec.pages, pvec.nr); 759 check_move_unevictable_pages(&pvec);
760 pagevec_release(&pvec); 760 pagevec_release(&pvec);
761 cond_resched(); 761 cond_resched();
762 } 762 }
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 62ac0c488624..24ab1f7394ab 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -46,6 +46,7 @@
46#include <linux/delayacct.h> 46#include <linux/delayacct.h>
47#include <linux/sysctl.h> 47#include <linux/sysctl.h>
48#include <linux/oom.h> 48#include <linux/oom.h>
49#include <linux/pagevec.h>
49#include <linux/prefetch.h> 50#include <linux/prefetch.h>
50#include <linux/printk.h> 51#include <linux/printk.h>
51#include <linux/dax.h> 52#include <linux/dax.h>
@@ -4182,17 +4183,16 @@ int page_evictable(struct page *page)
4182 return ret; 4183 return ret;
4183} 4184}
4184 4185
4185#ifdef CONFIG_SHMEM
4186/** 4186/**
4187 * check_move_unevictable_pages - check pages for evictability and move to appropriate zone lru list 4187 * check_move_unevictable_pages - check pages for evictability and move to
4188 * @pages: array of pages to check 4188 * appropriate zone lru list
4189 * @nr_pages: number of pages to check 4189 * @pvec: pagevec with lru pages to check
4190 * 4190 *
4191 * Checks pages for evictability and moves them to the appropriate lru list. 4191 * Checks pages for evictability, if an evictable page is in the unevictable
4192 * 4192 * lru list, moves it to the appropriate evictable lru list. This function
4193 * This function is only used for SysV IPC SHM_UNLOCK. 4193 * should be only used for lru pages.
4194 */ 4194 */
4195void check_move_unevictable_pages(struct page **pages, int nr_pages) 4195void check_move_unevictable_pages(struct pagevec *pvec)
4196{ 4196{
4197 struct lruvec *lruvec; 4197 struct lruvec *lruvec;
4198 struct pglist_data *pgdat = NULL; 4198 struct pglist_data *pgdat = NULL;
@@ -4200,8 +4200,8 @@ void check_move_unevictable_pages(struct page **pages, int nr_pages)
4200 int pgrescued = 0; 4200 int pgrescued = 0;
4201 int i; 4201 int i;
4202 4202
4203 for (i = 0; i < nr_pages; i++) { 4203 for (i = 0; i < pvec->nr; i++) {
4204 struct page *page = pages[i]; 4204 struct page *page = pvec->pages[i];
4205 struct pglist_data *pagepgdat = page_pgdat(page); 4205 struct pglist_data *pagepgdat = page_pgdat(page);
4206 4206
4207 pgscanned++; 4207 pgscanned++;
@@ -4233,4 +4233,4 @@ void check_move_unevictable_pages(struct page **pages, int nr_pages)
4233 spin_unlock_irq(&pgdat->lru_lock); 4233 spin_unlock_irq(&pgdat->lru_lock);
4234 } 4234 }
4235} 4235}
4236#endif /* CONFIG_SHMEM */ 4236EXPORT_SYMBOL_GPL(check_move_unevictable_pages);
diff --git a/sound/x86/intel_hdmi_audio.c b/sound/x86/intel_hdmi_audio.c
index 83d76c345940..00c92eb854ce 100644
--- a/sound/x86/intel_hdmi_audio.c
+++ b/sound/x86/intel_hdmi_audio.c
@@ -1648,7 +1648,7 @@ static int had_create_jack(struct snd_intelhad *ctx,
1648 * PM callbacks 1648 * PM callbacks
1649 */ 1649 */
1650 1650
1651static int hdmi_lpe_audio_runtime_suspend(struct device *dev) 1651static int __maybe_unused hdmi_lpe_audio_suspend(struct device *dev)
1652{ 1652{
1653 struct snd_intelhad_card *card_ctx = dev_get_drvdata(dev); 1653 struct snd_intelhad_card *card_ctx = dev_get_drvdata(dev);
1654 int port; 1654 int port;
@@ -1664,23 +1664,8 @@ static int hdmi_lpe_audio_runtime_suspend(struct device *dev)
1664 } 1664 }
1665 } 1665 }
1666 1666
1667 return 0; 1667 snd_power_change_state(card_ctx->card, SNDRV_CTL_POWER_D3hot);
1668}
1669
1670static int __maybe_unused hdmi_lpe_audio_suspend(struct device *dev)
1671{
1672 struct snd_intelhad_card *card_ctx = dev_get_drvdata(dev);
1673 int err;
1674 1668
1675 err = hdmi_lpe_audio_runtime_suspend(dev);
1676 if (!err)
1677 snd_power_change_state(card_ctx->card, SNDRV_CTL_POWER_D3hot);
1678 return err;
1679}
1680
1681static int hdmi_lpe_audio_runtime_resume(struct device *dev)
1682{
1683 pm_runtime_mark_last_busy(dev);
1684 return 0; 1669 return 0;
1685} 1670}
1686 1671
@@ -1688,8 +1673,10 @@ static int __maybe_unused hdmi_lpe_audio_resume(struct device *dev)
1688{ 1673{
1689 struct snd_intelhad_card *card_ctx = dev_get_drvdata(dev); 1674 struct snd_intelhad_card *card_ctx = dev_get_drvdata(dev);
1690 1675
1691 hdmi_lpe_audio_runtime_resume(dev); 1676 pm_runtime_mark_last_busy(dev);
1677
1692 snd_power_change_state(card_ctx->card, SNDRV_CTL_POWER_D0); 1678 snd_power_change_state(card_ctx->card, SNDRV_CTL_POWER_D0);
1679
1693 return 0; 1680 return 0;
1694} 1681}
1695 1682
@@ -1877,7 +1864,6 @@ static int hdmi_lpe_audio_probe(struct platform_device *pdev)
1877 1864
1878 pm_runtime_use_autosuspend(&pdev->dev); 1865 pm_runtime_use_autosuspend(&pdev->dev);
1879 pm_runtime_mark_last_busy(&pdev->dev); 1866 pm_runtime_mark_last_busy(&pdev->dev);
1880 pm_runtime_set_active(&pdev->dev);
1881 1867
1882 dev_dbg(&pdev->dev, "%s: handle pending notification\n", __func__); 1868 dev_dbg(&pdev->dev, "%s: handle pending notification\n", __func__);
1883 for_each_port(card_ctx, port) { 1869 for_each_port(card_ctx, port) {
@@ -1908,8 +1894,6 @@ static int hdmi_lpe_audio_remove(struct platform_device *pdev)
1908 1894
1909static const struct dev_pm_ops hdmi_lpe_audio_pm = { 1895static const struct dev_pm_ops hdmi_lpe_audio_pm = {
1910 SET_SYSTEM_SLEEP_PM_OPS(hdmi_lpe_audio_suspend, hdmi_lpe_audio_resume) 1896 SET_SYSTEM_SLEEP_PM_OPS(hdmi_lpe_audio_suspend, hdmi_lpe_audio_resume)
1911 SET_RUNTIME_PM_OPS(hdmi_lpe_audio_runtime_suspend,
1912 hdmi_lpe_audio_runtime_resume, NULL)
1913}; 1897};
1914 1898
1915static struct platform_driver hdmi_lpe_audio_driver = { 1899static struct platform_driver hdmi_lpe_audio_driver = {