aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-11-16 11:17:29 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2018-11-16 11:17:29 -0500
commit4efd34602fc0da31f87dca8669388edcafba622d (patch)
tree099d6bd1e5acb7bc69024f475c592c6ee1f97de5
parentef268de19756e6bc78cd3e6d9b15545f7df97ef2 (diff)
parent20325e8a614377967644cf63050095c9f2ea8ab9 (diff)
Merge tag 'drm-fixes-2018-11-16' of git://anongit.freedesktop.org/drm/drm
Pull drm fixes from Dave Airlie: "Live from Vancouver, SoC maintainer talk, this weeks drm fixes pull for rc3: omapdrm: - regression fixes for the reordering bridge stuff that went into rc1 i915: - incorrect EU count fix - HPD storm fix - MST fix - relocation fix for gen4/5 amdgpu: - huge page handling fix - IH ring setup - XGMI aperture setup - watermark setup fix misc: - docs and MST fix" * tag 'drm-fixes-2018-11-16' of git://anongit.freedesktop.org/drm/drm: (23 commits) drm/i915: Account for scale factor when calculating initial phase drm/i915: Clean up skl_program_scaler() drm/i915: Move programming plane scaler to its own function. drm/i915/icl: Drop spurious register read from icl_dbuf_slices_update drm/i915: fix broadwell EU computation drm/amdgpu: fix huge page handling on Vega10 drm/amd/pp: Fix truncated clock value when set watermark drm/amdgpu: fix bug with IH ring setup drm/meson: venc: dmt mode must use encp drm/amdgpu: set system aperture to cover whole FB region drm/i915: Fix hpd handling for pins with two encoders drm/i915/execlists: Force write serialisation into context image vs execution drm/i915/icl: Fix power well 2 wrt. DC-off toggling order drm/i915: Fix NULL deref when re-enabling HPD IRQs on systems with MST drm/i915: Fix possible race in intel_dp_add_mst_connector() drm/i915/ringbuffer: Delay after EMIT_INVALIDATE for gen4/gen5 drm/omap: dsi: Fix missing of_platform_depopulate() drm/omap: Move DISPC runtime PM handling to omapdrm drm/omap: dsi: Ensure the device is active during probe drm/omap: hdmi4: Ensure the device is active during bind ...
-rw-r--r--arch/arm/mach-omap2/display.c111
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega10_ih.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c32
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c3
-rw-r--r--drivers/gpu/drm/drm_fourcc.c2
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.c2
-rw-r--r--drivers/gpu/drm/i915/intel_display.c45
-rw-r--r--drivers/gpu/drm/i915/intel_dp_mst.c8
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h2
-rw-r--r--drivers/gpu/drm/i915/intel_hotplug.c70
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c14
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c38
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c16
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c93
-rw-r--r--drivers/gpu/drm/meson/meson_venc.c15
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dsi.c22
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss.c11
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4.c37
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi5.c27
-rw-r--r--drivers/gpu/drm/omapdrm/dss/venc.c7
-rw-r--r--drivers/gpu/drm/omapdrm/omap_crtc.c6
24 files changed, 342 insertions, 251 deletions
diff --git a/arch/arm/mach-omap2/display.c b/arch/arm/mach-omap2/display.c
index 9500b6e27380..f86b72d1d59e 100644
--- a/arch/arm/mach-omap2/display.c
+++ b/arch/arm/mach-omap2/display.c
@@ -209,11 +209,61 @@ static int __init omapdss_init_fbdev(void)
209 209
210 return 0; 210 return 0;
211} 211}
212#else 212
213static inline int omapdss_init_fbdev(void) 213static const char * const omapdss_compat_names[] __initconst = {
214 "ti,omap2-dss",
215 "ti,omap3-dss",
216 "ti,omap4-dss",
217 "ti,omap5-dss",
218 "ti,dra7-dss",
219};
220
221static struct device_node * __init omapdss_find_dss_of_node(void)
214{ 222{
215 return 0; 223 struct device_node *node;
224 int i;
225
226 for (i = 0; i < ARRAY_SIZE(omapdss_compat_names); ++i) {
227 node = of_find_compatible_node(NULL, NULL,
228 omapdss_compat_names[i]);
229 if (node)
230 return node;
231 }
232
233 return NULL;
216} 234}
235
236static int __init omapdss_init_of(void)
237{
238 int r;
239 struct device_node *node;
240 struct platform_device *pdev;
241
242 /* only create dss helper devices if dss is enabled in the .dts */
243
244 node = omapdss_find_dss_of_node();
245 if (!node)
246 return 0;
247
248 if (!of_device_is_available(node))
249 return 0;
250
251 pdev = of_find_device_by_node(node);
252
253 if (!pdev) {
254 pr_err("Unable to find DSS platform device\n");
255 return -ENODEV;
256 }
257
258 r = of_platform_populate(node, NULL, NULL, &pdev->dev);
259 if (r) {
260 pr_err("Unable to populate DSS submodule devices\n");
261 return r;
262 }
263
264 return omapdss_init_fbdev();
265}
266omap_device_initcall(omapdss_init_of);
217#endif /* CONFIG_FB_OMAP2 */ 267#endif /* CONFIG_FB_OMAP2 */
218 268
219static void dispc_disable_outputs(void) 269static void dispc_disable_outputs(void)
@@ -361,58 +411,3 @@ int omap_dss_reset(struct omap_hwmod *oh)
361 411
362 return r; 412 return r;
363} 413}
364
365static const char * const omapdss_compat_names[] __initconst = {
366 "ti,omap2-dss",
367 "ti,omap3-dss",
368 "ti,omap4-dss",
369 "ti,omap5-dss",
370 "ti,dra7-dss",
371};
372
373static struct device_node * __init omapdss_find_dss_of_node(void)
374{
375 struct device_node *node;
376 int i;
377
378 for (i = 0; i < ARRAY_SIZE(omapdss_compat_names); ++i) {
379 node = of_find_compatible_node(NULL, NULL,
380 omapdss_compat_names[i]);
381 if (node)
382 return node;
383 }
384
385 return NULL;
386}
387
388static int __init omapdss_init_of(void)
389{
390 int r;
391 struct device_node *node;
392 struct platform_device *pdev;
393
394 /* only create dss helper devices if dss is enabled in the .dts */
395
396 node = omapdss_find_dss_of_node();
397 if (!node)
398 return 0;
399
400 if (!of_device_is_available(node))
401 return 0;
402
403 pdev = of_find_device_by_node(node);
404
405 if (!pdev) {
406 pr_err("Unable to find DSS platform device\n");
407 return -ENODEV;
408 }
409
410 r = of_platform_populate(node, NULL, NULL, &pdev->dev);
411 if (r) {
412 pr_err("Unable to populate DSS submodule devices\n");
413 return r;
414 }
415
416 return omapdss_init_fbdev();
417}
418omap_device_initcall(omapdss_init_of);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 352b30409060..dad0e2342df9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -1632,13 +1632,6 @@ static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
1632 continue; 1632 continue;
1633 } 1633 }
1634 1634
1635 /* First check if the entry is already handled */
1636 if (cursor.pfn < frag_start) {
1637 cursor.entry->huge = true;
1638 amdgpu_vm_pt_next(adev, &cursor);
1639 continue;
1640 }
1641
1642 /* If it isn't already handled it can't be a huge page */ 1635 /* If it isn't already handled it can't be a huge page */
1643 if (cursor.entry->huge) { 1636 if (cursor.entry->huge) {
1644 /* Add the entry to the relocated list to update it. */ 1637 /* Add the entry to the relocated list to update it. */
@@ -1701,8 +1694,17 @@ static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
1701 } 1694 }
1702 } while (frag_start < entry_end); 1695 } while (frag_start < entry_end);
1703 1696
1704 if (frag >= shift) 1697 if (amdgpu_vm_pt_descendant(adev, &cursor)) {
1698 /* Mark all child entries as huge */
1699 while (cursor.pfn < frag_start) {
1700 cursor.entry->huge = true;
1701 amdgpu_vm_pt_next(adev, &cursor);
1702 }
1703
1704 } else if (frag >= shift) {
1705 /* or just move on to the next on the same level. */
1705 amdgpu_vm_pt_next(adev, &cursor); 1706 amdgpu_vm_pt_next(adev, &cursor);
1707 }
1706 } 1708 }
1707 1709
1708 return 0; 1710 return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
index ceb7847b504f..bfa317ad20a9 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
@@ -72,7 +72,7 @@ static void gfxhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
72 72
73 /* Program the system aperture low logical page number. */ 73 /* Program the system aperture low logical page number. */
74 WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR, 74 WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
75 min(adev->gmc.vram_start, adev->gmc.agp_start) >> 18); 75 min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
76 76
77 if (adev->asic_type == CHIP_RAVEN && adev->rev_id >= 0x8) 77 if (adev->asic_type == CHIP_RAVEN && adev->rev_id >= 0x8)
78 /* 78 /*
@@ -82,11 +82,11 @@ static void gfxhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
82 * to get rid of the VM fault and hardware hang. 82 * to get rid of the VM fault and hardware hang.
83 */ 83 */
84 WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR, 84 WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
85 max((adev->gmc.vram_end >> 18) + 0x1, 85 max((adev->gmc.fb_end >> 18) + 0x1,
86 adev->gmc.agp_end >> 18)); 86 adev->gmc.agp_end >> 18));
87 else 87 else
88 WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR, 88 WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
89 max(adev->gmc.vram_end, adev->gmc.agp_end) >> 18); 89 max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
90 90
91 /* Set default page address. */ 91 /* Set default page address. */
92 value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start 92 value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
index fd23ba1226a5..a0db67adc34c 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
@@ -90,7 +90,7 @@ static void mmhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
90 90
91 /* Program the system aperture low logical page number. */ 91 /* Program the system aperture low logical page number. */
92 WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR, 92 WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
93 min(adev->gmc.vram_start, adev->gmc.agp_start) >> 18); 93 min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
94 94
95 if (adev->asic_type == CHIP_RAVEN && adev->rev_id >= 0x8) 95 if (adev->asic_type == CHIP_RAVEN && adev->rev_id >= 0x8)
96 /* 96 /*
@@ -100,11 +100,11 @@ static void mmhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
100 * to get rid of the VM fault and hardware hang. 100 * to get rid of the VM fault and hardware hang.
101 */ 101 */
102 WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR, 102 WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
103 max((adev->gmc.vram_end >> 18) + 0x1, 103 max((adev->gmc.fb_end >> 18) + 0x1,
104 adev->gmc.agp_end >> 18)); 104 adev->gmc.agp_end >> 18));
105 else 105 else
106 WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR, 106 WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
107 max(adev->gmc.vram_end, adev->gmc.agp_end) >> 18); 107 max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
108 108
109 /* Set default page address. */ 109 /* Set default page address. */
110 value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start + 110 value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start +
diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
index a99f71797aa3..a0fda6f9252a 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
@@ -129,7 +129,7 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev)
129 else 129 else
130 wptr_off = adev->wb.gpu_addr + (adev->irq.ih.wptr_offs * 4); 130 wptr_off = adev->wb.gpu_addr + (adev->irq.ih.wptr_offs * 4);
131 WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_ADDR_LO, lower_32_bits(wptr_off)); 131 WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_ADDR_LO, lower_32_bits(wptr_off));
132 WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_ADDR_HI, upper_32_bits(wptr_off) & 0xFF); 132 WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_ADDR_HI, upper_32_bits(wptr_off) & 0xFFFF);
133 133
134 /* set rptr, wptr to 0 */ 134 /* set rptr, wptr to 0 */
135 WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, 0); 135 WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, 0);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c
index 99a33c33a32c..101c09b212ad 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c
@@ -713,20 +713,20 @@ int smu_set_watermarks_for_clocks_ranges(void *wt_table,
713 for (i = 0; i < wm_with_clock_ranges->num_wm_dmif_sets; i++) { 713 for (i = 0; i < wm_with_clock_ranges->num_wm_dmif_sets; i++) {
714 table->WatermarkRow[1][i].MinClock = 714 table->WatermarkRow[1][i].MinClock =
715 cpu_to_le16((uint16_t) 715 cpu_to_le16((uint16_t)
716 (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_min_dcfclk_clk_in_khz) / 716 (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_min_dcfclk_clk_in_khz /
717 1000); 717 1000));
718 table->WatermarkRow[1][i].MaxClock = 718 table->WatermarkRow[1][i].MaxClock =
719 cpu_to_le16((uint16_t) 719 cpu_to_le16((uint16_t)
720 (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_max_dcfclk_clk_in_khz) / 720 (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_max_dcfclk_clk_in_khz /
721 1000); 721 1000));
722 table->WatermarkRow[1][i].MinUclk = 722 table->WatermarkRow[1][i].MinUclk =
723 cpu_to_le16((uint16_t) 723 cpu_to_le16((uint16_t)
724 (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_min_mem_clk_in_khz) / 724 (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_min_mem_clk_in_khz /
725 1000); 725 1000));
726 table->WatermarkRow[1][i].MaxUclk = 726 table->WatermarkRow[1][i].MaxUclk =
727 cpu_to_le16((uint16_t) 727 cpu_to_le16((uint16_t)
728 (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_max_mem_clk_in_khz) / 728 (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_max_mem_clk_in_khz /
729 1000); 729 1000));
730 table->WatermarkRow[1][i].WmSetting = (uint8_t) 730 table->WatermarkRow[1][i].WmSetting = (uint8_t)
731 wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_set_id; 731 wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_set_id;
732 } 732 }
@@ -734,20 +734,20 @@ int smu_set_watermarks_for_clocks_ranges(void *wt_table,
734 for (i = 0; i < wm_with_clock_ranges->num_wm_mcif_sets; i++) { 734 for (i = 0; i < wm_with_clock_ranges->num_wm_mcif_sets; i++) {
735 table->WatermarkRow[0][i].MinClock = 735 table->WatermarkRow[0][i].MinClock =
736 cpu_to_le16((uint16_t) 736 cpu_to_le16((uint16_t)
737 (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_min_socclk_clk_in_khz) / 737 (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_min_socclk_clk_in_khz /
738 1000); 738 1000));
739 table->WatermarkRow[0][i].MaxClock = 739 table->WatermarkRow[0][i].MaxClock =
740 cpu_to_le16((uint16_t) 740 cpu_to_le16((uint16_t)
741 (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_max_socclk_clk_in_khz) / 741 (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_max_socclk_clk_in_khz /
742 1000); 742 1000));
743 table->WatermarkRow[0][i].MinUclk = 743 table->WatermarkRow[0][i].MinUclk =
744 cpu_to_le16((uint16_t) 744 cpu_to_le16((uint16_t)
745 (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_min_mem_clk_in_khz) / 745 (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_min_mem_clk_in_khz /
746 1000); 746 1000));
747 table->WatermarkRow[0][i].MaxUclk = 747 table->WatermarkRow[0][i].MaxUclk =
748 cpu_to_le16((uint16_t) 748 cpu_to_le16((uint16_t)
749 (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_max_mem_clk_in_khz) / 749 (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_max_mem_clk_in_khz /
750 1000); 750 1000));
751 table->WatermarkRow[0][i].WmSetting = (uint8_t) 751 table->WatermarkRow[0][i].WmSetting = (uint8_t)
752 wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_set_id; 752 wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_set_id;
753 } 753 }
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 5ff1d79b86c4..0e0df398222d 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -1275,6 +1275,9 @@ static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_
1275 mutex_lock(&mgr->lock); 1275 mutex_lock(&mgr->lock);
1276 mstb = mgr->mst_primary; 1276 mstb = mgr->mst_primary;
1277 1277
1278 if (!mstb)
1279 goto out;
1280
1278 for (i = 0; i < lct - 1; i++) { 1281 for (i = 0; i < lct - 1; i++) {
1279 int shift = (i % 2) ? 0 : 4; 1282 int shift = (i % 2) ? 0 : 4;
1280 int port_num = (rad[i / 2] >> shift) & 0xf; 1283 int port_num = (rad[i / 2] >> shift) & 0xf;
diff --git a/drivers/gpu/drm/drm_fourcc.c b/drivers/gpu/drm/drm_fourcc.c
index 90a1c846fc25..8aaa5e86a979 100644
--- a/drivers/gpu/drm/drm_fourcc.c
+++ b/drivers/gpu/drm/drm_fourcc.c
@@ -97,9 +97,9 @@ EXPORT_SYMBOL(drm_mode_legacy_fb_format);
97 97
98/** 98/**
99 * drm_driver_legacy_fb_format - compute drm fourcc code from legacy description 99 * drm_driver_legacy_fb_format - compute drm fourcc code from legacy description
100 * @dev: DRM device
100 * @bpp: bits per pixels 101 * @bpp: bits per pixels
101 * @depth: bit depth per pixel 102 * @depth: bit depth per pixel
102 * @native: use host native byte order
103 * 103 *
104 * Computes a drm fourcc pixel format code for the given @bpp/@depth values. 104 * Computes a drm fourcc pixel format code for the given @bpp/@depth values.
105 * Unlike drm_mode_legacy_fb_format() this looks at the drivers mode_config, 105 * Unlike drm_mode_legacy_fb_format() this looks at the drivers mode_config,
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
index 0ef0c6448d53..01fa98299bae 100644
--- a/drivers/gpu/drm/i915/intel_device_info.c
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -474,7 +474,7 @@ static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv)
474 u8 eu_disabled_mask; 474 u8 eu_disabled_mask;
475 u32 n_disabled; 475 u32 n_disabled;
476 476
477 if (!(sseu->subslice_mask[ss] & BIT(ss))) 477 if (!(sseu->subslice_mask[s] & BIT(ss)))
478 /* skip disabled subslice */ 478 /* skip disabled subslice */
479 continue; 479 continue;
480 480
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 23d8008a93bb..a54843fdeb2f 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -4850,8 +4850,31 @@ static void cpt_verify_modeset(struct drm_device *dev, int pipe)
4850 * chroma samples for both of the luma samples, and thus we don't 4850 * chroma samples for both of the luma samples, and thus we don't
4851 * actually get the expected MPEG2 chroma siting convention :( 4851 * actually get the expected MPEG2 chroma siting convention :(
4852 * The same behaviour is observed on pre-SKL platforms as well. 4852 * The same behaviour is observed on pre-SKL platforms as well.
4853 *
4854 * Theory behind the formula (note that we ignore sub-pixel
4855 * source coordinates):
4856 * s = source sample position
4857 * d = destination sample position
4858 *
4859 * Downscaling 4:1:
4860 * -0.5
4861 * | 0.0
4862 * | | 1.5 (initial phase)
4863 * | | |
4864 * v v v
4865 * | s | s | s | s |
4866 * | d |
4867 *
4868 * Upscaling 1:4:
4869 * -0.5
4870 * | -0.375 (initial phase)
4871 * | | 0.0
4872 * | | |
4873 * v v v
4874 * | s |
4875 * | d | d | d | d |
4853 */ 4876 */
4854u16 skl_scaler_calc_phase(int sub, bool chroma_cosited) 4877u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_cosited)
4855{ 4878{
4856 int phase = -0x8000; 4879 int phase = -0x8000;
4857 u16 trip = 0; 4880 u16 trip = 0;
@@ -4859,6 +4882,15 @@ u16 skl_scaler_calc_phase(int sub, bool chroma_cosited)
4859 if (chroma_cosited) 4882 if (chroma_cosited)
4860 phase += (sub - 1) * 0x8000 / sub; 4883 phase += (sub - 1) * 0x8000 / sub;
4861 4884
4885 phase += scale / (2 * sub);
4886
4887 /*
4888 * Hardware initial phase limited to [-0.5:1.5].
4889 * Since the max hardware scale factor is 3.0, we
4890 * should never actually excdeed 1.0 here.
4891 */
4892 WARN_ON(phase < -0x8000 || phase > 0x18000);
4893
4862 if (phase < 0) 4894 if (phase < 0)
4863 phase = 0x10000 + phase; 4895 phase = 0x10000 + phase;
4864 else 4896 else
@@ -5067,13 +5099,20 @@ static void skylake_pfit_enable(struct intel_crtc *crtc)
5067 5099
5068 if (crtc->config->pch_pfit.enabled) { 5100 if (crtc->config->pch_pfit.enabled) {
5069 u16 uv_rgb_hphase, uv_rgb_vphase; 5101 u16 uv_rgb_hphase, uv_rgb_vphase;
5102 int pfit_w, pfit_h, hscale, vscale;
5070 int id; 5103 int id;
5071 5104
5072 if (WARN_ON(crtc->config->scaler_state.scaler_id < 0)) 5105 if (WARN_ON(crtc->config->scaler_state.scaler_id < 0))
5073 return; 5106 return;
5074 5107
5075 uv_rgb_hphase = skl_scaler_calc_phase(1, false); 5108 pfit_w = (crtc->config->pch_pfit.size >> 16) & 0xFFFF;
5076 uv_rgb_vphase = skl_scaler_calc_phase(1, false); 5109 pfit_h = crtc->config->pch_pfit.size & 0xFFFF;
5110
5111 hscale = (crtc->config->pipe_src_w << 16) / pfit_w;
5112 vscale = (crtc->config->pipe_src_h << 16) / pfit_h;
5113
5114 uv_rgb_hphase = skl_scaler_calc_phase(1, hscale, false);
5115 uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false);
5077 5116
5078 id = scaler_state->scaler_id; 5117 id = scaler_state->scaler_id;
5079 I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN | 5118 I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN |
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index 1b00f8ea145b..a911691dbd0f 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -452,6 +452,10 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
452 if (!intel_connector) 452 if (!intel_connector)
453 return NULL; 453 return NULL;
454 454
455 intel_connector->get_hw_state = intel_dp_mst_get_hw_state;
456 intel_connector->mst_port = intel_dp;
457 intel_connector->port = port;
458
455 connector = &intel_connector->base; 459 connector = &intel_connector->base;
456 ret = drm_connector_init(dev, connector, &intel_dp_mst_connector_funcs, 460 ret = drm_connector_init(dev, connector, &intel_dp_mst_connector_funcs,
457 DRM_MODE_CONNECTOR_DisplayPort); 461 DRM_MODE_CONNECTOR_DisplayPort);
@@ -462,10 +466,6 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
462 466
463 drm_connector_helper_add(connector, &intel_dp_mst_connector_helper_funcs); 467 drm_connector_helper_add(connector, &intel_dp_mst_connector_helper_funcs);
464 468
465 intel_connector->get_hw_state = intel_dp_mst_get_hw_state;
466 intel_connector->mst_port = intel_dp;
467 intel_connector->port = port;
468
469 for_each_pipe(dev_priv, pipe) { 469 for_each_pipe(dev_priv, pipe) {
470 struct drm_encoder *enc = 470 struct drm_encoder *enc =
471 &intel_dp->mst_encoders[pipe]->base.base; 471 &intel_dp->mst_encoders[pipe]->base.base;
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index f8dc84b2d2d3..8b298e5f012d 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -1646,7 +1646,7 @@ void intel_mode_from_pipe_config(struct drm_display_mode *mode,
1646void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc, 1646void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
1647 struct intel_crtc_state *crtc_state); 1647 struct intel_crtc_state *crtc_state);
1648 1648
1649u16 skl_scaler_calc_phase(int sub, bool chroma_center); 1649u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_center);
1650int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state); 1650int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state);
1651int skl_max_scale(const struct intel_crtc_state *crtc_state, 1651int skl_max_scale(const struct intel_crtc_state *crtc_state,
1652 u32 pixel_format); 1652 u32 pixel_format);
diff --git a/drivers/gpu/drm/i915/intel_hotplug.c b/drivers/gpu/drm/i915/intel_hotplug.c
index 648a13c6043c..9a8018130237 100644
--- a/drivers/gpu/drm/i915/intel_hotplug.c
+++ b/drivers/gpu/drm/i915/intel_hotplug.c
@@ -228,7 +228,9 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
228 drm_for_each_connector_iter(connector, &conn_iter) { 228 drm_for_each_connector_iter(connector, &conn_iter) {
229 struct intel_connector *intel_connector = to_intel_connector(connector); 229 struct intel_connector *intel_connector = to_intel_connector(connector);
230 230
231 if (intel_connector->encoder->hpd_pin == pin) { 231 /* Don't check MST ports, they don't have pins */
232 if (!intel_connector->mst_port &&
233 intel_connector->encoder->hpd_pin == pin) {
232 if (connector->polled != intel_connector->polled) 234 if (connector->polled != intel_connector->polled)
233 DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n", 235 DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n",
234 connector->name); 236 connector->name);
@@ -395,37 +397,54 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
395 struct intel_encoder *encoder; 397 struct intel_encoder *encoder;
396 bool storm_detected = false; 398 bool storm_detected = false;
397 bool queue_dig = false, queue_hp = false; 399 bool queue_dig = false, queue_hp = false;
400 u32 long_hpd_pulse_mask = 0;
401 u32 short_hpd_pulse_mask = 0;
402 enum hpd_pin pin;
398 403
399 if (!pin_mask) 404 if (!pin_mask)
400 return; 405 return;
401 406
402 spin_lock(&dev_priv->irq_lock); 407 spin_lock(&dev_priv->irq_lock);
408
409 /*
410 * Determine whether ->hpd_pulse() exists for each pin, and
411 * whether we have a short or a long pulse. This is needed
412 * as each pin may have up to two encoders (HDMI and DP) and
413 * only the one of them (DP) will have ->hpd_pulse().
414 */
403 for_each_intel_encoder(&dev_priv->drm, encoder) { 415 for_each_intel_encoder(&dev_priv->drm, encoder) {
404 enum hpd_pin pin = encoder->hpd_pin;
405 bool has_hpd_pulse = intel_encoder_has_hpd_pulse(encoder); 416 bool has_hpd_pulse = intel_encoder_has_hpd_pulse(encoder);
417 enum port port = encoder->port;
418 bool long_hpd;
406 419
420 pin = encoder->hpd_pin;
407 if (!(BIT(pin) & pin_mask)) 421 if (!(BIT(pin) & pin_mask))
408 continue; 422 continue;
409 423
410 if (has_hpd_pulse) { 424 if (!has_hpd_pulse)
411 bool long_hpd = long_mask & BIT(pin); 425 continue;
412 enum port port = encoder->port;
413 426
414 DRM_DEBUG_DRIVER("digital hpd port %c - %s\n", port_name(port), 427 long_hpd = long_mask & BIT(pin);
415 long_hpd ? "long" : "short"); 428
416 /* 429 DRM_DEBUG_DRIVER("digital hpd port %c - %s\n", port_name(port),
417 * For long HPD pulses we want to have the digital queue happen, 430 long_hpd ? "long" : "short");
418 * but we still want HPD storm detection to function. 431 queue_dig = true;
419 */ 432
420 queue_dig = true; 433 if (long_hpd) {
421 if (long_hpd) { 434 long_hpd_pulse_mask |= BIT(pin);
422 dev_priv->hotplug.long_port_mask |= (1 << port); 435 dev_priv->hotplug.long_port_mask |= BIT(port);
423 } else { 436 } else {
424 /* for short HPD just trigger the digital queue */ 437 short_hpd_pulse_mask |= BIT(pin);
425 dev_priv->hotplug.short_port_mask |= (1 << port); 438 dev_priv->hotplug.short_port_mask |= BIT(port);
426 continue;
427 }
428 } 439 }
440 }
441
442 /* Now process each pin just once */
443 for_each_hpd_pin(pin) {
444 bool long_hpd;
445
446 if (!(BIT(pin) & pin_mask))
447 continue;
429 448
430 if (dev_priv->hotplug.stats[pin].state == HPD_DISABLED) { 449 if (dev_priv->hotplug.stats[pin].state == HPD_DISABLED) {
431 /* 450 /*
@@ -442,11 +461,22 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
442 if (dev_priv->hotplug.stats[pin].state != HPD_ENABLED) 461 if (dev_priv->hotplug.stats[pin].state != HPD_ENABLED)
443 continue; 462 continue;
444 463
445 if (!has_hpd_pulse) { 464 /*
465 * Delegate to ->hpd_pulse() if one of the encoders for this
466 * pin has it, otherwise let the hotplug_work deal with this
467 * pin directly.
468 */
469 if (((short_hpd_pulse_mask | long_hpd_pulse_mask) & BIT(pin))) {
470 long_hpd = long_hpd_pulse_mask & BIT(pin);
471 } else {
446 dev_priv->hotplug.event_bits |= BIT(pin); 472 dev_priv->hotplug.event_bits |= BIT(pin);
473 long_hpd = true;
447 queue_hp = true; 474 queue_hp = true;
448 } 475 }
449 476
477 if (!long_hpd)
478 continue;
479
450 if (intel_hpd_irq_storm_detect(dev_priv, pin)) { 480 if (intel_hpd_irq_storm_detect(dev_priv, pin)) {
451 dev_priv->hotplug.event_bits &= ~BIT(pin); 481 dev_priv->hotplug.event_bits &= ~BIT(pin);
452 storm_detected = true; 482 storm_detected = true;
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 43957bb37a42..37c94a54efcb 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -424,7 +424,8 @@ static u64 execlists_update_context(struct i915_request *rq)
424 424
425 reg_state[CTX_RING_TAIL+1] = intel_ring_set_tail(rq->ring, rq->tail); 425 reg_state[CTX_RING_TAIL+1] = intel_ring_set_tail(rq->ring, rq->tail);
426 426
427 /* True 32b PPGTT with dynamic page allocation: update PDP 427 /*
428 * True 32b PPGTT with dynamic page allocation: update PDP
428 * registers and point the unallocated PDPs to scratch page. 429 * registers and point the unallocated PDPs to scratch page.
429 * PML4 is allocated during ppgtt init, so this is not needed 430 * PML4 is allocated during ppgtt init, so this is not needed
430 * in 48-bit mode. 431 * in 48-bit mode.
@@ -432,6 +433,17 @@ static u64 execlists_update_context(struct i915_request *rq)
432 if (ppgtt && !i915_vm_is_48bit(&ppgtt->vm)) 433 if (ppgtt && !i915_vm_is_48bit(&ppgtt->vm))
433 execlists_update_context_pdps(ppgtt, reg_state); 434 execlists_update_context_pdps(ppgtt, reg_state);
434 435
436 /*
437 * Make sure the context image is complete before we submit it to HW.
438 *
439 * Ostensibly, writes (including the WCB) should be flushed prior to
440 * an uncached write such as our mmio register access, the empirical
441 * evidence (esp. on Braswell) suggests that the WC write into memory
442 * may not be visible to the HW prior to the completion of the UC
443 * register write and that we may begin execution from the context
444 * before its image is complete leading to invalid PD chasing.
445 */
446 wmb();
435 return ce->lrc_desc; 447 return ce->lrc_desc;
436} 448}
437 449
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index d0ef50bf930a..187bb0ceb4ac 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -91,6 +91,7 @@ static int
91gen4_render_ring_flush(struct i915_request *rq, u32 mode) 91gen4_render_ring_flush(struct i915_request *rq, u32 mode)
92{ 92{
93 u32 cmd, *cs; 93 u32 cmd, *cs;
94 int i;
94 95
95 /* 96 /*
96 * read/write caches: 97 * read/write caches:
@@ -127,12 +128,45 @@ gen4_render_ring_flush(struct i915_request *rq, u32 mode)
127 cmd |= MI_INVALIDATE_ISP; 128 cmd |= MI_INVALIDATE_ISP;
128 } 129 }
129 130
130 cs = intel_ring_begin(rq, 2); 131 i = 2;
132 if (mode & EMIT_INVALIDATE)
133 i += 20;
134
135 cs = intel_ring_begin(rq, i);
131 if (IS_ERR(cs)) 136 if (IS_ERR(cs))
132 return PTR_ERR(cs); 137 return PTR_ERR(cs);
133 138
134 *cs++ = cmd; 139 *cs++ = cmd;
135 *cs++ = MI_NOOP; 140
141 /*
142 * A random delay to let the CS invalidate take effect? Without this
143 * delay, the GPU relocation path fails as the CS does not see
144 * the updated contents. Just as important, if we apply the flushes
145 * to the EMIT_FLUSH branch (i.e. immediately after the relocation
146 * write and before the invalidate on the next batch), the relocations
147 * still fail. This implies that is a delay following invalidation
148 * that is required to reset the caches as opposed to a delay to
149 * ensure the memory is written.
150 */
151 if (mode & EMIT_INVALIDATE) {
152 *cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE;
153 *cs++ = i915_ggtt_offset(rq->engine->scratch) |
154 PIPE_CONTROL_GLOBAL_GTT;
155 *cs++ = 0;
156 *cs++ = 0;
157
158 for (i = 0; i < 12; i++)
159 *cs++ = MI_FLUSH;
160
161 *cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE;
162 *cs++ = i915_ggtt_offset(rq->engine->scratch) |
163 PIPE_CONTROL_GLOBAL_GTT;
164 *cs++ = 0;
165 *cs++ = 0;
166 }
167
168 *cs++ = cmd;
169
136 intel_ring_advance(rq, cs); 170 intel_ring_advance(rq, cs);
137 171
138 return 0; 172 return 0;
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 0fdabce647ab..44e4491a4918 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -2749,6 +2749,12 @@ static const struct i915_power_well_desc icl_power_wells[] = {
2749 }, 2749 },
2750 }, 2750 },
2751 { 2751 {
2752 .name = "DC off",
2753 .domains = ICL_DISPLAY_DC_OFF_POWER_DOMAINS,
2754 .ops = &gen9_dc_off_power_well_ops,
2755 .id = DISP_PW_ID_NONE,
2756 },
2757 {
2752 .name = "power well 2", 2758 .name = "power well 2",
2753 .domains = ICL_PW_2_POWER_DOMAINS, 2759 .domains = ICL_PW_2_POWER_DOMAINS,
2754 .ops = &hsw_power_well_ops, 2760 .ops = &hsw_power_well_ops,
@@ -2760,12 +2766,6 @@ static const struct i915_power_well_desc icl_power_wells[] = {
2760 }, 2766 },
2761 }, 2767 },
2762 { 2768 {
2763 .name = "DC off",
2764 .domains = ICL_DISPLAY_DC_OFF_POWER_DOMAINS,
2765 .ops = &gen9_dc_off_power_well_ops,
2766 .id = DISP_PW_ID_NONE,
2767 },
2768 {
2769 .name = "power well 3", 2769 .name = "power well 3",
2770 .domains = ICL_PW_3_POWER_DOMAINS, 2770 .domains = ICL_PW_3_POWER_DOMAINS,
2771 .ops = &hsw_power_well_ops, 2771 .ops = &hsw_power_well_ops,
@@ -3176,8 +3176,7 @@ static u8 intel_dbuf_max_slices(struct drm_i915_private *dev_priv)
3176void icl_dbuf_slices_update(struct drm_i915_private *dev_priv, 3176void icl_dbuf_slices_update(struct drm_i915_private *dev_priv,
3177 u8 req_slices) 3177 u8 req_slices)
3178{ 3178{
3179 u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices; 3179 const u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices;
3180 u32 val;
3181 bool ret; 3180 bool ret;
3182 3181
3183 if (req_slices > intel_dbuf_max_slices(dev_priv)) { 3182 if (req_slices > intel_dbuf_max_slices(dev_priv)) {
@@ -3188,7 +3187,6 @@ void icl_dbuf_slices_update(struct drm_i915_private *dev_priv,
3188 if (req_slices == hw_enabled_slices || req_slices == 0) 3187 if (req_slices == hw_enabled_slices || req_slices == 0)
3189 return; 3188 return;
3190 3189
3191 val = I915_READ(DBUF_CTL_S2);
3192 if (req_slices > hw_enabled_slices) 3190 if (req_slices > hw_enabled_slices)
3193 ret = intel_dbuf_slice_set(dev_priv, DBUF_CTL_S2, true); 3191 ret = intel_dbuf_slice_set(dev_priv, DBUF_CTL_S2, true);
3194 else 3192 else
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 5fd2f7bf3927..d3090a7537bb 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -302,13 +302,65 @@ skl_plane_max_stride(struct intel_plane *plane,
302 return min(8192 * cpp, 32768); 302 return min(8192 * cpp, 32768);
303} 303}
304 304
305static void
306skl_program_scaler(struct intel_plane *plane,
307 const struct intel_crtc_state *crtc_state,
308 const struct intel_plane_state *plane_state)
309{
310 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
311 enum pipe pipe = plane->pipe;
312 int scaler_id = plane_state->scaler_id;
313 const struct intel_scaler *scaler =
314 &crtc_state->scaler_state.scalers[scaler_id];
315 int crtc_x = plane_state->base.dst.x1;
316 int crtc_y = plane_state->base.dst.y1;
317 uint32_t crtc_w = drm_rect_width(&plane_state->base.dst);
318 uint32_t crtc_h = drm_rect_height(&plane_state->base.dst);
319 u16 y_hphase, uv_rgb_hphase;
320 u16 y_vphase, uv_rgb_vphase;
321 int hscale, vscale;
322
323 hscale = drm_rect_calc_hscale(&plane_state->base.src,
324 &plane_state->base.dst,
325 0, INT_MAX);
326 vscale = drm_rect_calc_vscale(&plane_state->base.src,
327 &plane_state->base.dst,
328 0, INT_MAX);
329
330 /* TODO: handle sub-pixel coordinates */
331 if (plane_state->base.fb->format->format == DRM_FORMAT_NV12) {
332 y_hphase = skl_scaler_calc_phase(1, hscale, false);
333 y_vphase = skl_scaler_calc_phase(1, vscale, false);
334
335 /* MPEG2 chroma siting convention */
336 uv_rgb_hphase = skl_scaler_calc_phase(2, hscale, true);
337 uv_rgb_vphase = skl_scaler_calc_phase(2, vscale, false);
338 } else {
339 /* not used */
340 y_hphase = 0;
341 y_vphase = 0;
342
343 uv_rgb_hphase = skl_scaler_calc_phase(1, hscale, false);
344 uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false);
345 }
346
347 I915_WRITE_FW(SKL_PS_CTRL(pipe, scaler_id),
348 PS_SCALER_EN | PS_PLANE_SEL(plane->id) | scaler->mode);
349 I915_WRITE_FW(SKL_PS_PWR_GATE(pipe, scaler_id), 0);
350 I915_WRITE_FW(SKL_PS_VPHASE(pipe, scaler_id),
351 PS_Y_PHASE(y_vphase) | PS_UV_RGB_PHASE(uv_rgb_vphase));
352 I915_WRITE_FW(SKL_PS_HPHASE(pipe, scaler_id),
353 PS_Y_PHASE(y_hphase) | PS_UV_RGB_PHASE(uv_rgb_hphase));
354 I915_WRITE_FW(SKL_PS_WIN_POS(pipe, scaler_id), (crtc_x << 16) | crtc_y);
355 I915_WRITE_FW(SKL_PS_WIN_SZ(pipe, scaler_id), (crtc_w << 16) | crtc_h);
356}
357
305void 358void
306skl_update_plane(struct intel_plane *plane, 359skl_update_plane(struct intel_plane *plane,
307 const struct intel_crtc_state *crtc_state, 360 const struct intel_crtc_state *crtc_state,
308 const struct intel_plane_state *plane_state) 361 const struct intel_plane_state *plane_state)
309{ 362{
310 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 363 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
311 const struct drm_framebuffer *fb = plane_state->base.fb;
312 enum plane_id plane_id = plane->id; 364 enum plane_id plane_id = plane->id;
313 enum pipe pipe = plane->pipe; 365 enum pipe pipe = plane->pipe;
314 u32 plane_ctl = plane_state->ctl; 366 u32 plane_ctl = plane_state->ctl;
@@ -318,8 +370,6 @@ skl_update_plane(struct intel_plane *plane,
318 u32 aux_stride = skl_plane_stride(plane_state, 1); 370 u32 aux_stride = skl_plane_stride(plane_state, 1);
319 int crtc_x = plane_state->base.dst.x1; 371 int crtc_x = plane_state->base.dst.x1;
320 int crtc_y = plane_state->base.dst.y1; 372 int crtc_y = plane_state->base.dst.y1;
321 uint32_t crtc_w = drm_rect_width(&plane_state->base.dst);
322 uint32_t crtc_h = drm_rect_height(&plane_state->base.dst);
323 uint32_t x = plane_state->color_plane[0].x; 373 uint32_t x = plane_state->color_plane[0].x;
324 uint32_t y = plane_state->color_plane[0].y; 374 uint32_t y = plane_state->color_plane[0].y;
325 uint32_t src_w = drm_rect_width(&plane_state->base.src) >> 16; 375 uint32_t src_w = drm_rect_width(&plane_state->base.src) >> 16;
@@ -329,8 +379,6 @@ skl_update_plane(struct intel_plane *plane,
329 /* Sizes are 0 based */ 379 /* Sizes are 0 based */
330 src_w--; 380 src_w--;
331 src_h--; 381 src_h--;
332 crtc_w--;
333 crtc_h--;
334 382
335 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 383 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
336 384
@@ -353,41 +401,8 @@ skl_update_plane(struct intel_plane *plane,
353 (plane_state->color_plane[1].y << 16) | 401 (plane_state->color_plane[1].y << 16) |
354 plane_state->color_plane[1].x); 402 plane_state->color_plane[1].x);
355 403
356 /* program plane scaler */
357 if (plane_state->scaler_id >= 0) { 404 if (plane_state->scaler_id >= 0) {
358 int scaler_id = plane_state->scaler_id; 405 skl_program_scaler(plane, crtc_state, plane_state);
359 const struct intel_scaler *scaler =
360 &crtc_state->scaler_state.scalers[scaler_id];
361 u16 y_hphase, uv_rgb_hphase;
362 u16 y_vphase, uv_rgb_vphase;
363
364 /* TODO: handle sub-pixel coordinates */
365 if (fb->format->format == DRM_FORMAT_NV12) {
366 y_hphase = skl_scaler_calc_phase(1, false);
367 y_vphase = skl_scaler_calc_phase(1, false);
368
369 /* MPEG2 chroma siting convention */
370 uv_rgb_hphase = skl_scaler_calc_phase(2, true);
371 uv_rgb_vphase = skl_scaler_calc_phase(2, false);
372 } else {
373 /* not used */
374 y_hphase = 0;
375 y_vphase = 0;
376
377 uv_rgb_hphase = skl_scaler_calc_phase(1, false);
378 uv_rgb_vphase = skl_scaler_calc_phase(1, false);
379 }
380
381 I915_WRITE_FW(SKL_PS_CTRL(pipe, scaler_id),
382 PS_SCALER_EN | PS_PLANE_SEL(plane_id) | scaler->mode);
383 I915_WRITE_FW(SKL_PS_PWR_GATE(pipe, scaler_id), 0);
384 I915_WRITE_FW(SKL_PS_VPHASE(pipe, scaler_id),
385 PS_Y_PHASE(y_vphase) | PS_UV_RGB_PHASE(uv_rgb_vphase));
386 I915_WRITE_FW(SKL_PS_HPHASE(pipe, scaler_id),
387 PS_Y_PHASE(y_hphase) | PS_UV_RGB_PHASE(uv_rgb_hphase));
388 I915_WRITE_FW(SKL_PS_WIN_POS(pipe, scaler_id), (crtc_x << 16) | crtc_y);
389 I915_WRITE_FW(SKL_PS_WIN_SZ(pipe, scaler_id),
390 ((crtc_w + 1) << 16)|(crtc_h + 1));
391 406
392 I915_WRITE_FW(PLANE_POS(pipe, plane_id), 0); 407 I915_WRITE_FW(PLANE_POS(pipe, plane_id), 0);
393 } else { 408 } else {
diff --git a/drivers/gpu/drm/meson/meson_venc.c b/drivers/gpu/drm/meson/meson_venc.c
index 514245e69b38..acbbad3e322c 100644
--- a/drivers/gpu/drm/meson/meson_venc.c
+++ b/drivers/gpu/drm/meson/meson_venc.c
@@ -854,6 +854,13 @@ void meson_venc_hdmi_mode_set(struct meson_drm *priv, int vic,
854 unsigned int sof_lines; 854 unsigned int sof_lines;
855 unsigned int vsync_lines; 855 unsigned int vsync_lines;
856 856
857 /* Use VENCI for 480i and 576i and double HDMI pixels */
858 if (mode->flags & DRM_MODE_FLAG_DBLCLK) {
859 hdmi_repeat = true;
860 use_enci = true;
861 venc_hdmi_latency = 1;
862 }
863
857 if (meson_venc_hdmi_supported_vic(vic)) { 864 if (meson_venc_hdmi_supported_vic(vic)) {
858 vmode = meson_venc_hdmi_get_vic_vmode(vic); 865 vmode = meson_venc_hdmi_get_vic_vmode(vic);
859 if (!vmode) { 866 if (!vmode) {
@@ -865,13 +872,7 @@ void meson_venc_hdmi_mode_set(struct meson_drm *priv, int vic,
865 } else { 872 } else {
866 meson_venc_hdmi_get_dmt_vmode(mode, &vmode_dmt); 873 meson_venc_hdmi_get_dmt_vmode(mode, &vmode_dmt);
867 vmode = &vmode_dmt; 874 vmode = &vmode_dmt;
868 } 875 use_enci = false;
869
870 /* Use VENCI for 480i and 576i and double HDMI pixels */
871 if (mode->flags & DRM_MODE_FLAG_DBLCLK) {
872 hdmi_repeat = true;
873 use_enci = true;
874 venc_hdmi_latency = 1;
875 } 876 }
876 877
877 /* Repeat VENC pixels for 480/576i/p, 720p50/60 and 1080p50/60 */ 878 /* Repeat VENC pixels for 480/576i/p, 720p50/60 and 1080p50/60 */
diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c
index 394c129cfb3b..0a485c5b982e 100644
--- a/drivers/gpu/drm/omapdrm/dss/dsi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dsi.c
@@ -5409,11 +5409,14 @@ static int dsi_probe(struct platform_device *pdev)
5409 5409
5410 /* DSI on OMAP3 doesn't have register DSI_GNQ, set number 5410 /* DSI on OMAP3 doesn't have register DSI_GNQ, set number
5411 * of data to 3 by default */ 5411 * of data to 3 by default */
5412 if (dsi->data->quirks & DSI_QUIRK_GNQ) 5412 if (dsi->data->quirks & DSI_QUIRK_GNQ) {
5413 dsi_runtime_get(dsi);
5413 /* NB_DATA_LANES */ 5414 /* NB_DATA_LANES */
5414 dsi->num_lanes_supported = 1 + REG_GET(dsi, DSI_GNQ, 11, 9); 5415 dsi->num_lanes_supported = 1 + REG_GET(dsi, DSI_GNQ, 11, 9);
5415 else 5416 dsi_runtime_put(dsi);
5417 } else {
5416 dsi->num_lanes_supported = 3; 5418 dsi->num_lanes_supported = 3;
5419 }
5417 5420
5418 r = dsi_init_output(dsi); 5421 r = dsi_init_output(dsi);
5419 if (r) 5422 if (r)
@@ -5426,15 +5429,19 @@ static int dsi_probe(struct platform_device *pdev)
5426 } 5429 }
5427 5430
5428 r = of_platform_populate(dev->of_node, NULL, NULL, dev); 5431 r = of_platform_populate(dev->of_node, NULL, NULL, dev);
5429 if (r) 5432 if (r) {
5430 DSSERR("Failed to populate DSI child devices: %d\n", r); 5433 DSSERR("Failed to populate DSI child devices: %d\n", r);
5434 goto err_uninit_output;
5435 }
5431 5436
5432 r = component_add(&pdev->dev, &dsi_component_ops); 5437 r = component_add(&pdev->dev, &dsi_component_ops);
5433 if (r) 5438 if (r)
5434 goto err_uninit_output; 5439 goto err_of_depopulate;
5435 5440
5436 return 0; 5441 return 0;
5437 5442
5443err_of_depopulate:
5444 of_platform_depopulate(dev);
5438err_uninit_output: 5445err_uninit_output:
5439 dsi_uninit_output(dsi); 5446 dsi_uninit_output(dsi);
5440err_pm_disable: 5447err_pm_disable:
@@ -5470,19 +5477,12 @@ static int dsi_runtime_suspend(struct device *dev)
5470 /* wait for current handler to finish before turning the DSI off */ 5477 /* wait for current handler to finish before turning the DSI off */
5471 synchronize_irq(dsi->irq); 5478 synchronize_irq(dsi->irq);
5472 5479
5473 dispc_runtime_put(dsi->dss->dispc);
5474
5475 return 0; 5480 return 0;
5476} 5481}
5477 5482
5478static int dsi_runtime_resume(struct device *dev) 5483static int dsi_runtime_resume(struct device *dev)
5479{ 5484{
5480 struct dsi_data *dsi = dev_get_drvdata(dev); 5485 struct dsi_data *dsi = dev_get_drvdata(dev);
5481 int r;
5482
5483 r = dispc_runtime_get(dsi->dss->dispc);
5484 if (r)
5485 return r;
5486 5486
5487 dsi->is_enabled = true; 5487 dsi->is_enabled = true;
5488 /* ensure the irq handler sees the is_enabled value */ 5488 /* ensure the irq handler sees the is_enabled value */
diff --git a/drivers/gpu/drm/omapdrm/dss/dss.c b/drivers/gpu/drm/omapdrm/dss/dss.c
index 1aaf260aa9b8..7553c7fc1c45 100644
--- a/drivers/gpu/drm/omapdrm/dss/dss.c
+++ b/drivers/gpu/drm/omapdrm/dss/dss.c
@@ -1484,16 +1484,23 @@ static int dss_probe(struct platform_device *pdev)
1484 dss); 1484 dss);
1485 1485
1486 /* Add all the child devices as components. */ 1486 /* Add all the child devices as components. */
1487 r = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
1488 if (r)
1489 goto err_uninit_debugfs;
1490
1487 omapdss_gather_components(&pdev->dev); 1491 omapdss_gather_components(&pdev->dev);
1488 1492
1489 device_for_each_child(&pdev->dev, &match, dss_add_child_component); 1493 device_for_each_child(&pdev->dev, &match, dss_add_child_component);
1490 1494
1491 r = component_master_add_with_match(&pdev->dev, &dss_component_ops, match); 1495 r = component_master_add_with_match(&pdev->dev, &dss_component_ops, match);
1492 if (r) 1496 if (r)
1493 goto err_uninit_debugfs; 1497 goto err_of_depopulate;
1494 1498
1495 return 0; 1499 return 0;
1496 1500
1501err_of_depopulate:
1502 of_platform_depopulate(&pdev->dev);
1503
1497err_uninit_debugfs: 1504err_uninit_debugfs:
1498 dss_debugfs_remove_file(dss->debugfs.clk); 1505 dss_debugfs_remove_file(dss->debugfs.clk);
1499 dss_debugfs_remove_file(dss->debugfs.dss); 1506 dss_debugfs_remove_file(dss->debugfs.dss);
@@ -1522,6 +1529,8 @@ static int dss_remove(struct platform_device *pdev)
1522{ 1529{
1523 struct dss_device *dss = platform_get_drvdata(pdev); 1530 struct dss_device *dss = platform_get_drvdata(pdev);
1524 1531
1532 of_platform_depopulate(&pdev->dev);
1533
1525 component_master_del(&pdev->dev, &dss_component_ops); 1534 component_master_del(&pdev->dev, &dss_component_ops);
1526 1535
1527 dss_debugfs_remove_file(dss->debugfs.clk); 1536 dss_debugfs_remove_file(dss->debugfs.clk);
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4.c b/drivers/gpu/drm/omapdrm/dss/hdmi4.c
index cf6230eac31a..aabdda394c9c 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi4.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4.c
@@ -635,10 +635,14 @@ static int hdmi4_bind(struct device *dev, struct device *master, void *data)
635 635
636 hdmi->dss = dss; 636 hdmi->dss = dss;
637 637
638 r = hdmi_pll_init(dss, hdmi->pdev, &hdmi->pll, &hdmi->wp); 638 r = hdmi_runtime_get(hdmi);
639 if (r) 639 if (r)
640 return r; 640 return r;
641 641
642 r = hdmi_pll_init(dss, hdmi->pdev, &hdmi->pll, &hdmi->wp);
643 if (r)
644 goto err_runtime_put;
645
642 r = hdmi4_cec_init(hdmi->pdev, &hdmi->core, &hdmi->wp); 646 r = hdmi4_cec_init(hdmi->pdev, &hdmi->core, &hdmi->wp);
643 if (r) 647 if (r)
644 goto err_pll_uninit; 648 goto err_pll_uninit;
@@ -652,12 +656,16 @@ static int hdmi4_bind(struct device *dev, struct device *master, void *data)
652 hdmi->debugfs = dss_debugfs_create_file(dss, "hdmi", hdmi_dump_regs, 656 hdmi->debugfs = dss_debugfs_create_file(dss, "hdmi", hdmi_dump_regs,
653 hdmi); 657 hdmi);
654 658
659 hdmi_runtime_put(hdmi);
660
655 return 0; 661 return 0;
656 662
657err_cec_uninit: 663err_cec_uninit:
658 hdmi4_cec_uninit(&hdmi->core); 664 hdmi4_cec_uninit(&hdmi->core);
659err_pll_uninit: 665err_pll_uninit:
660 hdmi_pll_uninit(&hdmi->pll); 666 hdmi_pll_uninit(&hdmi->pll);
667err_runtime_put:
668 hdmi_runtime_put(hdmi);
661 return r; 669 return r;
662} 670}
663 671
@@ -833,32 +841,6 @@ static int hdmi4_remove(struct platform_device *pdev)
833 return 0; 841 return 0;
834} 842}
835 843
836static int hdmi_runtime_suspend(struct device *dev)
837{
838 struct omap_hdmi *hdmi = dev_get_drvdata(dev);
839
840 dispc_runtime_put(hdmi->dss->dispc);
841
842 return 0;
843}
844
845static int hdmi_runtime_resume(struct device *dev)
846{
847 struct omap_hdmi *hdmi = dev_get_drvdata(dev);
848 int r;
849
850 r = dispc_runtime_get(hdmi->dss->dispc);
851 if (r < 0)
852 return r;
853
854 return 0;
855}
856
857static const struct dev_pm_ops hdmi_pm_ops = {
858 .runtime_suspend = hdmi_runtime_suspend,
859 .runtime_resume = hdmi_runtime_resume,
860};
861
862static const struct of_device_id hdmi_of_match[] = { 844static const struct of_device_id hdmi_of_match[] = {
863 { .compatible = "ti,omap4-hdmi", }, 845 { .compatible = "ti,omap4-hdmi", },
864 {}, 846 {},
@@ -869,7 +851,6 @@ struct platform_driver omapdss_hdmi4hw_driver = {
869 .remove = hdmi4_remove, 851 .remove = hdmi4_remove,
870 .driver = { 852 .driver = {
871 .name = "omapdss_hdmi", 853 .name = "omapdss_hdmi",
872 .pm = &hdmi_pm_ops,
873 .of_match_table = hdmi_of_match, 854 .of_match_table = hdmi_of_match,
874 .suppress_bind_attrs = true, 855 .suppress_bind_attrs = true,
875 }, 856 },
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5.c b/drivers/gpu/drm/omapdrm/dss/hdmi5.c
index b0e4a7463f8c..9e8556f67a29 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi5.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi5.c
@@ -825,32 +825,6 @@ static int hdmi5_remove(struct platform_device *pdev)
825 return 0; 825 return 0;
826} 826}
827 827
828static int hdmi_runtime_suspend(struct device *dev)
829{
830 struct omap_hdmi *hdmi = dev_get_drvdata(dev);
831
832 dispc_runtime_put(hdmi->dss->dispc);
833
834 return 0;
835}
836
837static int hdmi_runtime_resume(struct device *dev)
838{
839 struct omap_hdmi *hdmi = dev_get_drvdata(dev);
840 int r;
841
842 r = dispc_runtime_get(hdmi->dss->dispc);
843 if (r < 0)
844 return r;
845
846 return 0;
847}
848
849static const struct dev_pm_ops hdmi_pm_ops = {
850 .runtime_suspend = hdmi_runtime_suspend,
851 .runtime_resume = hdmi_runtime_resume,
852};
853
854static const struct of_device_id hdmi_of_match[] = { 828static const struct of_device_id hdmi_of_match[] = {
855 { .compatible = "ti,omap5-hdmi", }, 829 { .compatible = "ti,omap5-hdmi", },
856 { .compatible = "ti,dra7-hdmi", }, 830 { .compatible = "ti,dra7-hdmi", },
@@ -862,7 +836,6 @@ struct platform_driver omapdss_hdmi5hw_driver = {
862 .remove = hdmi5_remove, 836 .remove = hdmi5_remove,
863 .driver = { 837 .driver = {
864 .name = "omapdss_hdmi5", 838 .name = "omapdss_hdmi5",
865 .pm = &hdmi_pm_ops,
866 .of_match_table = hdmi_of_match, 839 .of_match_table = hdmi_of_match,
867 .suppress_bind_attrs = true, 840 .suppress_bind_attrs = true,
868 }, 841 },
diff --git a/drivers/gpu/drm/omapdrm/dss/venc.c b/drivers/gpu/drm/omapdrm/dss/venc.c
index ff0b18c8e4ac..b5f52727f8b1 100644
--- a/drivers/gpu/drm/omapdrm/dss/venc.c
+++ b/drivers/gpu/drm/omapdrm/dss/venc.c
@@ -946,19 +946,12 @@ static int venc_runtime_suspend(struct device *dev)
946 if (venc->tv_dac_clk) 946 if (venc->tv_dac_clk)
947 clk_disable_unprepare(venc->tv_dac_clk); 947 clk_disable_unprepare(venc->tv_dac_clk);
948 948
949 dispc_runtime_put(venc->dss->dispc);
950
951 return 0; 949 return 0;
952} 950}
953 951
954static int venc_runtime_resume(struct device *dev) 952static int venc_runtime_resume(struct device *dev)
955{ 953{
956 struct venc_device *venc = dev_get_drvdata(dev); 954 struct venc_device *venc = dev_get_drvdata(dev);
957 int r;
958
959 r = dispc_runtime_get(venc->dss->dispc);
960 if (r < 0)
961 return r;
962 955
963 if (venc->tv_dac_clk) 956 if (venc->tv_dac_clk)
964 clk_prepare_enable(venc->tv_dac_clk); 957 clk_prepare_enable(venc->tv_dac_clk);
diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c
index 62928ec0e7db..caffc547ef97 100644
--- a/drivers/gpu/drm/omapdrm/omap_crtc.c
+++ b/drivers/gpu/drm/omapdrm/omap_crtc.c
@@ -350,11 +350,14 @@ static void omap_crtc_arm_event(struct drm_crtc *crtc)
350static void omap_crtc_atomic_enable(struct drm_crtc *crtc, 350static void omap_crtc_atomic_enable(struct drm_crtc *crtc,
351 struct drm_crtc_state *old_state) 351 struct drm_crtc_state *old_state)
352{ 352{
353 struct omap_drm_private *priv = crtc->dev->dev_private;
353 struct omap_crtc *omap_crtc = to_omap_crtc(crtc); 354 struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
354 int ret; 355 int ret;
355 356
356 DBG("%s", omap_crtc->name); 357 DBG("%s", omap_crtc->name);
357 358
359 priv->dispc_ops->runtime_get(priv->dispc);
360
358 spin_lock_irq(&crtc->dev->event_lock); 361 spin_lock_irq(&crtc->dev->event_lock);
359 drm_crtc_vblank_on(crtc); 362 drm_crtc_vblank_on(crtc);
360 ret = drm_crtc_vblank_get(crtc); 363 ret = drm_crtc_vblank_get(crtc);
@@ -367,6 +370,7 @@ static void omap_crtc_atomic_enable(struct drm_crtc *crtc,
367static void omap_crtc_atomic_disable(struct drm_crtc *crtc, 370static void omap_crtc_atomic_disable(struct drm_crtc *crtc,
368 struct drm_crtc_state *old_state) 371 struct drm_crtc_state *old_state)
369{ 372{
373 struct omap_drm_private *priv = crtc->dev->dev_private;
370 struct omap_crtc *omap_crtc = to_omap_crtc(crtc); 374 struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
371 375
372 DBG("%s", omap_crtc->name); 376 DBG("%s", omap_crtc->name);
@@ -379,6 +383,8 @@ static void omap_crtc_atomic_disable(struct drm_crtc *crtc,
379 spin_unlock_irq(&crtc->dev->event_lock); 383 spin_unlock_irq(&crtc->dev->event_lock);
380 384
381 drm_crtc_vblank_off(crtc); 385 drm_crtc_vblank_off(crtc);
386
387 priv->dispc_ops->runtime_put(priv->dispc);
382} 388}
383 389
384static enum drm_mode_status omap_crtc_mode_valid(struct drm_crtc *crtc, 390static enum drm_mode_status omap_crtc_mode_valid(struct drm_crtc *crtc,