aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915')
-rw-r--r--drivers/gpu/drm/i915/Makefile6
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c115
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.h10
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c8
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio_context.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c28
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c241
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c113
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h241
-rw-r--r--drivers/gpu/drm/i915/i915_fixed.h143
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c67
-rw-r--r--drivers/gpu/drm/i915/i915_gem.h4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c13
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.h1
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c9
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c251
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.h47
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c33
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.h8
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c83
-rw-r--r--drivers/gpu/drm/i915/i915_oa_bdw.c27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_bdw.h27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_bxt.c27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_bxt.h27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_cflgt2.c27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_cflgt2.h27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_cflgt3.c27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_cflgt3.h27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_chv.c27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_chv.h27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_cnl.c27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_cnl.h27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_glk.c27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_glk.h27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_hsw.c27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_hsw.h27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_icl.c27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_icl.h27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_kblgt2.c27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_kblgt2.h27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_kblgt3.c27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_kblgt3.h27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_sklgt2.c27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_sklgt2.h27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_sklgt3.c27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_sklgt3.h27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_sklgt4.c27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_sklgt4.h27
-rw-r--r--drivers/gpu/drm/i915/i915_params.c9
-rw-r--r--drivers/gpu/drm/i915/i915_params.h1
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c71
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c49
-rw-r--r--drivers/gpu/drm/i915/i915_query.c3
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h822
-rw-r--r--drivers/gpu/drm/i915/i915_request.c118
-rw-r--r--drivers/gpu/drm/i915/i915_request.h13
-rw-r--r--drivers/gpu/drm/i915/i915_scheduler.c399
-rw-r--r--drivers/gpu/drm/i915/i915_scheduler.h36
-rw-r--r--drivers/gpu/drm/i915/i915_syncmap.c2
-rw-r--r--drivers/gpu/drm/i915/i915_timeline.h19
-rw-r--r--drivers/gpu/drm/i915/i915_utils.h2
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c8
-rw-r--r--drivers/gpu/drm/i915/icl_dsi.c858
-rw-r--r--drivers/gpu/drm/i915/intel_atomic.c118
-rw-r--r--drivers/gpu/drm/i915/intel_atomic_plane.c118
-rw-r--r--drivers/gpu/drm/i915/intel_audio.c34
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c89
-rw-r--r--drivers/gpu/drm/i915/intel_cdclk.c57
-rw-r--r--drivers/gpu/drm/i915/intel_color.c3
-rw-r--r--drivers/gpu/drm/i915/intel_combo_phy.c254
-rw-r--r--drivers/gpu/drm/i915/intel_connector.c (renamed from drivers/gpu/drm/i915/intel_modes.c)129
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c11
-rw-r--r--drivers/gpu/drm/i915/intel_csr.c162
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c482
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.c71
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.h20
-rw-r--r--drivers/gpu/drm/i915/intel_display.c1986
-rw-r--r--drivers/gpu/drm/i915/intel_display.h37
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c831
-rw-r--r--drivers/gpu/drm/i915/intel_dp_mst.c41
-rw-r--r--drivers/gpu/drm/i915/intel_dpio_phy.c4
-rw-r--r--drivers/gpu/drm/i915/intel_dpll_mgr.c117
-rw-r--r--drivers/gpu/drm/i915/intel_dpll_mgr.h8
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h233
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.c128
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.h30
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_vbt.c284
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c10
-rw-r--r--drivers/gpu/drm/i915/intel_engine_cs.c31
-rw-r--r--drivers/gpu/drm/i915/intel_fbc.c10
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c8
-rw-r--r--drivers/gpu/drm/i915/intel_guc.c45
-rw-r--r--drivers/gpu/drm/i915/intel_guc.h5
-rw-r--r--drivers/gpu/drm/i915/intel_guc_fw.c113
-rw-r--r--drivers/gpu/drm/i915/intel_guc_fwif.h41
-rw-r--r--drivers/gpu/drm/i915/intel_guc_reg.h12
-rw-r--r--drivers/gpu/drm/i915/intel_guc_submission.c216
-rw-r--r--drivers/gpu/drm/i915/intel_hdcp.c214
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c213
-rw-r--r--drivers/gpu/drm/i915/intel_hotplug.c131
-rw-r--r--drivers/gpu/drm/i915/intel_huc.c7
-rw-r--r--drivers/gpu/drm/i915/intel_lpe_audio.c4
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c328
-rw-r--r--drivers/gpu/drm/i915/intel_lspcon.c347
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c67
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c158
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.h15
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c4
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c13
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c525
-rw-r--r--drivers/gpu/drm/i915/intel_psr.c248
-rw-r--r--drivers/gpu/drm/i915/intel_quirks.c169
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c45
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h24
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c358
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c56
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c737
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c10
-rw-r--r--drivers/gpu/drm/i915/intel_uc.c2
-rw-r--r--drivers/gpu/drm/i915/intel_uc_fw.h7
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c2
-rw-r--r--drivers/gpu/drm/i915/intel_vbt_defs.h7
-rw-r--r--drivers/gpu/drm/i915/intel_workarounds.c46
-rw-r--r--drivers/gpu/drm/i915/selftests/huge_pages.c36
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_context.c428
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_evict.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_gtt.c10
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_guc.c59
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_hangcheck.c7
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_lrc.c267
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_engine.c2
-rw-r--r--drivers/gpu/drm/i915/vlv_dsi.c190
132 files changed, 9482 insertions, 5583 deletions
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 1c2857f13ad4..0ff878c994e2 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -75,6 +75,7 @@ i915-y += i915_cmd_parser.o \
75 i915_gemfs.o \ 75 i915_gemfs.o \
76 i915_query.o \ 76 i915_query.o \
77 i915_request.o \ 77 i915_request.o \
78 i915_scheduler.o \
78 i915_timeline.o \ 79 i915_timeline.o \
79 i915_trace_points.o \ 80 i915_trace_points.o \
80 i915_vma.o \ 81 i915_vma.o \
@@ -112,6 +113,8 @@ i915-y += intel_audio.o \
112 intel_bios.o \ 113 intel_bios.o \
113 intel_cdclk.o \ 114 intel_cdclk.o \
114 intel_color.o \ 115 intel_color.o \
116 intel_combo_phy.o \
117 intel_connector.o \
115 intel_display.o \ 118 intel_display.o \
116 intel_dpio_phy.o \ 119 intel_dpio_phy.o \
117 intel_dpll_mgr.o \ 120 intel_dpll_mgr.o \
@@ -120,9 +123,9 @@ i915-y += intel_audio.o \
120 intel_frontbuffer.o \ 123 intel_frontbuffer.o \
121 intel_hdcp.o \ 124 intel_hdcp.o \
122 intel_hotplug.o \ 125 intel_hotplug.o \
123 intel_modes.o \
124 intel_overlay.o \ 126 intel_overlay.o \
125 intel_psr.o \ 127 intel_psr.o \
128 intel_quirks.o \
126 intel_sideband.o \ 129 intel_sideband.o \
127 intel_sprite.o 130 intel_sprite.o
128i915-$(CONFIG_ACPI) += intel_acpi.o intel_opregion.o 131i915-$(CONFIG_ACPI) += intel_acpi.o intel_opregion.o
@@ -142,6 +145,7 @@ i915-y += dvo_ch7017.o \
142 intel_dp_link_training.o \ 145 intel_dp_link_training.o \
143 intel_dp_mst.o \ 146 intel_dp_mst.o \
144 intel_dp.o \ 147 intel_dp.o \
148 intel_dsi.o \
145 intel_dsi_dcs_backlight.o \ 149 intel_dsi_dcs_backlight.o \
146 intel_dsi_vbt.o \ 150 intel_dsi_vbt.o \
147 intel_dvo.o \ 151 intel_dvo.o \
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 2402395a068d..58e166effa45 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -1905,7 +1905,6 @@ static struct intel_vgpu_mm *intel_vgpu_create_ggtt_mm(struct intel_vgpu *vgpu)
1905 vgpu_free_mm(mm); 1905 vgpu_free_mm(mm);
1906 return ERR_PTR(-ENOMEM); 1906 return ERR_PTR(-ENOMEM);
1907 } 1907 }
1908 mm->ggtt_mm.last_partial_off = -1UL;
1909 1908
1910 return mm; 1909 return mm;
1911} 1910}
@@ -1930,7 +1929,6 @@ void _intel_vgpu_mm_release(struct kref *mm_ref)
1930 invalidate_ppgtt_mm(mm); 1929 invalidate_ppgtt_mm(mm);
1931 } else { 1930 } else {
1932 vfree(mm->ggtt_mm.virtual_ggtt); 1931 vfree(mm->ggtt_mm.virtual_ggtt);
1933 mm->ggtt_mm.last_partial_off = -1UL;
1934 } 1932 }
1935 1933
1936 vgpu_free_mm(mm); 1934 vgpu_free_mm(mm);
@@ -2168,6 +2166,8 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
2168 struct intel_gvt_gtt_entry e, m; 2166 struct intel_gvt_gtt_entry e, m;
2169 dma_addr_t dma_addr; 2167 dma_addr_t dma_addr;
2170 int ret; 2168 int ret;
2169 struct intel_gvt_partial_pte *partial_pte, *pos, *n;
2170 bool partial_update = false;
2171 2171
2172 if (bytes != 4 && bytes != 8) 2172 if (bytes != 4 && bytes != 8)
2173 return -EINVAL; 2173 return -EINVAL;
@@ -2178,68 +2178,57 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
2178 if (!vgpu_gmadr_is_valid(vgpu, gma)) 2178 if (!vgpu_gmadr_is_valid(vgpu, gma))
2179 return 0; 2179 return 0;
2180 2180
2181 ggtt_get_guest_entry(ggtt_mm, &e, g_gtt_index); 2181 e.type = GTT_TYPE_GGTT_PTE;
2182
2183 memcpy((void *)&e.val64 + (off & (info->gtt_entry_size - 1)), p_data, 2182 memcpy((void *)&e.val64 + (off & (info->gtt_entry_size - 1)), p_data,
2184 bytes); 2183 bytes);
2185 2184
2186 /* If ggtt entry size is 8 bytes, and it's split into two 4 bytes 2185 /* If ggtt entry size is 8 bytes, and it's split into two 4 bytes
2187 * write, we assume the two 4 bytes writes are consecutive. 2186 * write, save the first 4 bytes in a list and update virtual
2188 * Otherwise, we abort and report error 2187 * PTE. Only update shadow PTE when the second 4 bytes comes.
2189 */ 2188 */
2190 if (bytes < info->gtt_entry_size) { 2189 if (bytes < info->gtt_entry_size) {
2191 if (ggtt_mm->ggtt_mm.last_partial_off == -1UL) { 2190 bool found = false;
2192 /* the first partial part*/ 2191
2193 ggtt_mm->ggtt_mm.last_partial_off = off; 2192 list_for_each_entry_safe(pos, n,
2194 ggtt_mm->ggtt_mm.last_partial_data = e.val64; 2193 &ggtt_mm->ggtt_mm.partial_pte_list, list) {
2195 return 0; 2194 if (g_gtt_index == pos->offset >>
2196 } else if ((g_gtt_index == 2195 info->gtt_entry_size_shift) {
2197 (ggtt_mm->ggtt_mm.last_partial_off >> 2196 if (off != pos->offset) {
2198 info->gtt_entry_size_shift)) && 2197 /* the second partial part*/
2199 (off != ggtt_mm->ggtt_mm.last_partial_off)) { 2198 int last_off = pos->offset &
2200 /* the second partial part */ 2199 (info->gtt_entry_size - 1);
2201 2200
2202 int last_off = ggtt_mm->ggtt_mm.last_partial_off & 2201 memcpy((void *)&e.val64 + last_off,
2203 (info->gtt_entry_size - 1); 2202 (void *)&pos->data + last_off,
2204 2203 bytes);
2205 memcpy((void *)&e.val64 + last_off, 2204
2206 (void *)&ggtt_mm->ggtt_mm.last_partial_data + 2205 list_del(&pos->list);
2207 last_off, bytes); 2206 kfree(pos);
2208 2207 found = true;
2209 ggtt_mm->ggtt_mm.last_partial_off = -1UL; 2208 break;
2210 } else { 2209 }
2211 int last_offset; 2210
2212 2211 /* update of the first partial part */
2213 gvt_vgpu_err("failed to populate guest ggtt entry: abnormal ggtt entry write sequence, last_partial_off=%lx, offset=%x, bytes=%d, ggtt entry size=%d\n", 2212 pos->data = e.val64;
2214 ggtt_mm->ggtt_mm.last_partial_off, off, 2213 ggtt_set_guest_entry(ggtt_mm, &e, g_gtt_index);
2215 bytes, info->gtt_entry_size); 2214 return 0;
2216 2215 }
2217 /* set host ggtt entry to scratch page and clear 2216 }
2218 * virtual ggtt entry as not present for last
2219 * partially write offset
2220 */
2221 last_offset = ggtt_mm->ggtt_mm.last_partial_off &
2222 (~(info->gtt_entry_size - 1));
2223
2224 ggtt_get_host_entry(ggtt_mm, &m, last_offset);
2225 ggtt_invalidate_pte(vgpu, &m);
2226 ops->set_pfn(&m, gvt->gtt.scratch_mfn);
2227 ops->clear_present(&m);
2228 ggtt_set_host_entry(ggtt_mm, &m, last_offset);
2229 ggtt_invalidate(gvt->dev_priv);
2230
2231 ggtt_get_guest_entry(ggtt_mm, &e, last_offset);
2232 ops->clear_present(&e);
2233 ggtt_set_guest_entry(ggtt_mm, &e, last_offset);
2234
2235 ggtt_mm->ggtt_mm.last_partial_off = off;
2236 ggtt_mm->ggtt_mm.last_partial_data = e.val64;
2237 2217
2238 return 0; 2218 if (!found) {
2219 /* the first partial part */
2220 partial_pte = kzalloc(sizeof(*partial_pte), GFP_KERNEL);
2221 if (!partial_pte)
2222 return -ENOMEM;
2223 partial_pte->offset = off;
2224 partial_pte->data = e.val64;
2225 list_add_tail(&partial_pte->list,
2226 &ggtt_mm->ggtt_mm.partial_pte_list);
2227 partial_update = true;
2239 } 2228 }
2240 } 2229 }
2241 2230
2242 if (ops->test_present(&e)) { 2231 if (!partial_update && (ops->test_present(&e))) {
2243 gfn = ops->get_pfn(&e); 2232 gfn = ops->get_pfn(&e);
2244 m = e; 2233 m = e;
2245 2234
@@ -2263,16 +2252,18 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
2263 } else 2252 } else
2264 ops->set_pfn(&m, dma_addr >> PAGE_SHIFT); 2253 ops->set_pfn(&m, dma_addr >> PAGE_SHIFT);
2265 } else { 2254 } else {
2266 ggtt_get_host_entry(ggtt_mm, &m, g_gtt_index);
2267 ggtt_invalidate_pte(vgpu, &m);
2268 ops->set_pfn(&m, gvt->gtt.scratch_mfn); 2255 ops->set_pfn(&m, gvt->gtt.scratch_mfn);
2269 ops->clear_present(&m); 2256 ops->clear_present(&m);
2270 } 2257 }
2271 2258
2272out: 2259out:
2260 ggtt_set_guest_entry(ggtt_mm, &e, g_gtt_index);
2261
2262 ggtt_get_host_entry(ggtt_mm, &e, g_gtt_index);
2263 ggtt_invalidate_pte(vgpu, &e);
2264
2273 ggtt_set_host_entry(ggtt_mm, &m, g_gtt_index); 2265 ggtt_set_host_entry(ggtt_mm, &m, g_gtt_index);
2274 ggtt_invalidate(gvt->dev_priv); 2266 ggtt_invalidate(gvt->dev_priv);
2275 ggtt_set_guest_entry(ggtt_mm, &e, g_gtt_index);
2276 return 0; 2267 return 0;
2277} 2268}
2278 2269
@@ -2430,6 +2421,8 @@ int intel_vgpu_init_gtt(struct intel_vgpu *vgpu)
2430 2421
2431 intel_vgpu_reset_ggtt(vgpu, false); 2422 intel_vgpu_reset_ggtt(vgpu, false);
2432 2423
2424 INIT_LIST_HEAD(&gtt->ggtt_mm->ggtt_mm.partial_pte_list);
2425
2433 return create_scratch_page_tree(vgpu); 2426 return create_scratch_page_tree(vgpu);
2434} 2427}
2435 2428
@@ -2454,6 +2447,14 @@ static void intel_vgpu_destroy_all_ppgtt_mm(struct intel_vgpu *vgpu)
2454 2447
2455static void intel_vgpu_destroy_ggtt_mm(struct intel_vgpu *vgpu) 2448static void intel_vgpu_destroy_ggtt_mm(struct intel_vgpu *vgpu)
2456{ 2449{
2450 struct intel_gvt_partial_pte *pos;
2451
2452 list_for_each_entry(pos,
2453 &vgpu->gtt.ggtt_mm->ggtt_mm.partial_pte_list, list) {
2454 gvt_dbg_mm("partial PTE update on hold 0x%lx : 0x%llx\n",
2455 pos->offset, pos->data);
2456 kfree(pos);
2457 }
2457 intel_vgpu_destroy_mm(vgpu->gtt.ggtt_mm); 2458 intel_vgpu_destroy_mm(vgpu->gtt.ggtt_mm);
2458 vgpu->gtt.ggtt_mm = NULL; 2459 vgpu->gtt.ggtt_mm = NULL;
2459} 2460}
diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h
index 7a9b36176efb..d8cb04cc946d 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.h
+++ b/drivers/gpu/drm/i915/gvt/gtt.h
@@ -35,7 +35,6 @@
35#define _GVT_GTT_H_ 35#define _GVT_GTT_H_
36 36
37#define I915_GTT_PAGE_SHIFT 12 37#define I915_GTT_PAGE_SHIFT 12
38#define I915_GTT_PAGE_MASK (~(I915_GTT_PAGE_SIZE - 1))
39 38
40struct intel_vgpu_mm; 39struct intel_vgpu_mm;
41 40
@@ -133,6 +132,12 @@ enum intel_gvt_mm_type {
133 132
134#define GVT_RING_CTX_NR_PDPS GEN8_3LVL_PDPES 133#define GVT_RING_CTX_NR_PDPS GEN8_3LVL_PDPES
135 134
135struct intel_gvt_partial_pte {
136 unsigned long offset;
137 u64 data;
138 struct list_head list;
139};
140
136struct intel_vgpu_mm { 141struct intel_vgpu_mm {
137 enum intel_gvt_mm_type type; 142 enum intel_gvt_mm_type type;
138 struct intel_vgpu *vgpu; 143 struct intel_vgpu *vgpu;
@@ -157,8 +162,7 @@ struct intel_vgpu_mm {
157 } ppgtt_mm; 162 } ppgtt_mm;
158 struct { 163 struct {
159 void *virtual_ggtt; 164 void *virtual_ggtt;
160 unsigned long last_partial_off; 165 struct list_head partial_pte_list;
161 u64 last_partial_data;
162 } ggtt_mm; 166 } ggtt_mm;
163 }; 167 };
164}; 168};
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 90f50f67909a..aa280bb07125 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -1609,7 +1609,7 @@ static int bxt_gt_disp_pwron_write(struct intel_vgpu *vgpu,
1609 return 0; 1609 return 0;
1610} 1610}
1611 1611
1612static int bxt_edp_psr_imr_iir_write(struct intel_vgpu *vgpu, 1612static int edp_psr_imr_iir_write(struct intel_vgpu *vgpu,
1613 unsigned int offset, void *p_data, unsigned int bytes) 1613 unsigned int offset, void *p_data, unsigned int bytes)
1614{ 1614{
1615 vgpu_vreg(vgpu, offset) = 0; 1615 vgpu_vreg(vgpu, offset) = 0;
@@ -2607,6 +2607,9 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
2607 MMIO_DFH(_MMIO(0x1a178), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); 2607 MMIO_DFH(_MMIO(0x1a178), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2608 MMIO_DFH(_MMIO(0x1a17c), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); 2608 MMIO_DFH(_MMIO(0x1a17c), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2609 MMIO_DFH(_MMIO(0x2217c), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); 2609 MMIO_DFH(_MMIO(0x2217c), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2610
2611 MMIO_DH(EDP_PSR_IMR, D_BDW_PLUS, NULL, edp_psr_imr_iir_write);
2612 MMIO_DH(EDP_PSR_IIR, D_BDW_PLUS, NULL, edp_psr_imr_iir_write);
2610 return 0; 2613 return 0;
2611} 2614}
2612 2615
@@ -3205,9 +3208,6 @@ static int init_bxt_mmio_info(struct intel_gvt *gvt)
3205 MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_B), D_BXT); 3208 MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_B), D_BXT);
3206 MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_C), D_BXT); 3209 MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_C), D_BXT);
3207 3210
3208 MMIO_DH(EDP_PSR_IMR, D_BXT, NULL, bxt_edp_psr_imr_iir_write);
3209 MMIO_DH(EDP_PSR_IIR, D_BXT, NULL, bxt_edp_psr_imr_iir_write);
3210
3211 MMIO_D(RC6_CTX_BASE, D_BXT); 3211 MMIO_D(RC6_CTX_BASE, D_BXT);
3212 3212
3213 MMIO_D(GEN8_PUSHBUS_CONTROL, D_BXT); 3213 MMIO_D(GEN8_PUSHBUS_CONTROL, D_BXT);
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c
index 10e63eea5492..36a5147cd01e 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.c
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.c
@@ -131,7 +131,7 @@ static struct engine_mmio gen9_engine_mmio_list[] __cacheline_aligned = {
131 {RCS, GAMT_CHKN_BIT_REG, 0x0, false}, /* 0x4ab8 */ 131 {RCS, GAMT_CHKN_BIT_REG, 0x0, false}, /* 0x4ab8 */
132 132
133 {RCS, GEN9_GAMT_ECO_REG_RW_IA, 0x0, false}, /* 0x4ab0 */ 133 {RCS, GEN9_GAMT_ECO_REG_RW_IA, 0x0, false}, /* 0x4ab0 */
134 {RCS, GEN9_CSFE_CHICKEN1_RCS, 0x0, false}, /* 0x20d4 */ 134 {RCS, GEN9_CSFE_CHICKEN1_RCS, 0xffff, false}, /* 0x20d4 */
135 135
136 {RCS, GEN8_GARBCNTL, 0x0, false}, /* 0xb004 */ 136 {RCS, GEN8_GARBCNTL, 0x0, false}, /* 0xb004 */
137 {RCS, GEN7_FF_THREAD_MODE, 0x0, false}, /* 0x20a0 */ 137 {RCS, GEN7_FF_THREAD_MODE, 0x0, false}, /* 0x20a0 */
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index ea34003d6dd2..b8fbe3fabea3 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -334,6 +334,28 @@ static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
334 i915_gem_object_put(wa_ctx->indirect_ctx.obj); 334 i915_gem_object_put(wa_ctx->indirect_ctx.obj);
335} 335}
336 336
337static int set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload,
338 struct i915_gem_context *ctx)
339{
340 struct intel_vgpu_mm *mm = workload->shadow_mm;
341 struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
342 int i = 0;
343
344 if (mm->type != INTEL_GVT_MM_PPGTT || !mm->ppgtt_mm.shadowed)
345 return -1;
346
347 if (mm->ppgtt_mm.root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY) {
348 px_dma(&ppgtt->pml4) = mm->ppgtt_mm.shadow_pdps[0];
349 } else {
350 for (i = 0; i < GVT_RING_CTX_NR_PDPS; i++) {
351 px_dma(ppgtt->pdp.page_directory[i]) =
352 mm->ppgtt_mm.shadow_pdps[i];
353 }
354 }
355
356 return 0;
357}
358
337/** 359/**
338 * intel_gvt_scan_and_shadow_workload - audit the workload by scanning and 360 * intel_gvt_scan_and_shadow_workload - audit the workload by scanning and
339 * shadow it as well, include ringbuffer,wa_ctx and ctx. 361 * shadow it as well, include ringbuffer,wa_ctx and ctx.
@@ -358,6 +380,12 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
358 if (workload->req) 380 if (workload->req)
359 return 0; 381 return 0;
360 382
383 ret = set_context_ppgtt_from_shadow(workload, shadow_ctx);
384 if (ret < 0) {
385 gvt_vgpu_err("workload shadow ppgtt isn't ready\n");
386 return ret;
387 }
388
361 /* pin shadow context by gvt even the shadow context will be pinned 389 /* pin shadow context by gvt even the shadow context will be pinned
362 * when i915 alloc request. That is because gvt will update the guest 390 * when i915 alloc request. That is because gvt will update the guest
363 * context from shadow context when workload is completed, and at that 391 * context from shadow context when workload is completed, and at that
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 4f3ac0a12889..7f455bca528e 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -1788,6 +1788,8 @@ static int i915_emon_status(struct seq_file *m, void *unused)
1788 if (!IS_GEN5(dev_priv)) 1788 if (!IS_GEN5(dev_priv))
1789 return -ENODEV; 1789 return -ENODEV;
1790 1790
1791 intel_runtime_pm_get(dev_priv);
1792
1791 ret = mutex_lock_interruptible(&dev->struct_mutex); 1793 ret = mutex_lock_interruptible(&dev->struct_mutex);
1792 if (ret) 1794 if (ret)
1793 return ret; 1795 return ret;
@@ -1802,6 +1804,8 @@ static int i915_emon_status(struct seq_file *m, void *unused)
1802 seq_printf(m, "GFX power: %ld\n", gfx); 1804 seq_printf(m, "GFX power: %ld\n", gfx);
1803 seq_printf(m, "Total power: %ld\n", chipset + gfx); 1805 seq_printf(m, "Total power: %ld\n", chipset + gfx);
1804 1806
1807 intel_runtime_pm_put(dev_priv);
1808
1805 return 0; 1809 return 0;
1806} 1810}
1807 1811
@@ -2215,8 +2219,23 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
2215 struct drm_i915_private *dev_priv = node_to_i915(m->private); 2219 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2216 struct drm_device *dev = &dev_priv->drm; 2220 struct drm_device *dev = &dev_priv->drm;
2217 struct intel_rps *rps = &dev_priv->gt_pm.rps; 2221 struct intel_rps *rps = &dev_priv->gt_pm.rps;
2222 u32 act_freq = rps->cur_freq;
2218 struct drm_file *file; 2223 struct drm_file *file;
2219 2224
2225 if (intel_runtime_pm_get_if_in_use(dev_priv)) {
2226 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
2227 mutex_lock(&dev_priv->pcu_lock);
2228 act_freq = vlv_punit_read(dev_priv,
2229 PUNIT_REG_GPU_FREQ_STS);
2230 act_freq = (act_freq >> 8) & 0xff;
2231 mutex_unlock(&dev_priv->pcu_lock);
2232 } else {
2233 act_freq = intel_get_cagf(dev_priv,
2234 I915_READ(GEN6_RPSTAT1));
2235 }
2236 intel_runtime_pm_put(dev_priv);
2237 }
2238
2220 seq_printf(m, "RPS enabled? %d\n", rps->enabled); 2239 seq_printf(m, "RPS enabled? %d\n", rps->enabled);
2221 seq_printf(m, "GPU busy? %s [%d requests]\n", 2240 seq_printf(m, "GPU busy? %s [%d requests]\n",
2222 yesno(dev_priv->gt.awake), dev_priv->gt.active_requests); 2241 yesno(dev_priv->gt.awake), dev_priv->gt.active_requests);
@@ -2224,8 +2243,9 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
2224 seq_printf(m, "Boosts outstanding? %d\n", 2243 seq_printf(m, "Boosts outstanding? %d\n",
2225 atomic_read(&rps->num_waiters)); 2244 atomic_read(&rps->num_waiters));
2226 seq_printf(m, "Interactive? %d\n", READ_ONCE(rps->power.interactive)); 2245 seq_printf(m, "Interactive? %d\n", READ_ONCE(rps->power.interactive));
2227 seq_printf(m, "Frequency requested %d\n", 2246 seq_printf(m, "Frequency requested %d, actual %d\n",
2228 intel_gpu_freq(dev_priv, rps->cur_freq)); 2247 intel_gpu_freq(dev_priv, rps->cur_freq),
2248 intel_gpu_freq(dev_priv, act_freq));
2229 seq_printf(m, " min hard:%d, soft:%d; max soft:%d, hard:%d\n", 2249 seq_printf(m, " min hard:%d, soft:%d; max soft:%d, hard:%d\n",
2230 intel_gpu_freq(dev_priv, rps->min_freq), 2250 intel_gpu_freq(dev_priv, rps->min_freq),
2231 intel_gpu_freq(dev_priv, rps->min_freq_softlimit), 2251 intel_gpu_freq(dev_priv, rps->min_freq_softlimit),
@@ -2900,16 +2920,15 @@ static int i915_dmc_info(struct seq_file *m, void *unused)
2900 seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version), 2920 seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version),
2901 CSR_VERSION_MINOR(csr->version)); 2921 CSR_VERSION_MINOR(csr->version));
2902 2922
2903 if (IS_KABYLAKE(dev_priv) || 2923 if (WARN_ON(INTEL_GEN(dev_priv) > 11))
2904 (IS_SKYLAKE(dev_priv) && csr->version >= CSR_VERSION(1, 6))) { 2924 goto out;
2905 seq_printf(m, "DC3 -> DC5 count: %d\n", 2925
2906 I915_READ(SKL_CSR_DC3_DC5_COUNT)); 2926 seq_printf(m, "DC3 -> DC5 count: %d\n",
2927 I915_READ(IS_BROXTON(dev_priv) ? BXT_CSR_DC3_DC5_COUNT :
2928 SKL_CSR_DC3_DC5_COUNT));
2929 if (!IS_GEN9_LP(dev_priv))
2907 seq_printf(m, "DC5 -> DC6 count: %d\n", 2930 seq_printf(m, "DC5 -> DC6 count: %d\n",
2908 I915_READ(SKL_CSR_DC5_DC6_COUNT)); 2931 I915_READ(SKL_CSR_DC5_DC6_COUNT));
2909 } else if (IS_BROXTON(dev_priv) && csr->version >= CSR_VERSION(1, 4)) {
2910 seq_printf(m, "DC3 -> DC5 count: %d\n",
2911 I915_READ(BXT_CSR_DC3_DC5_COUNT));
2912 }
2913 2932
2914out: 2933out:
2915 seq_printf(m, "program base: 0x%08x\n", I915_READ(CSR_PROGRAM(0))); 2934 seq_printf(m, "program base: 0x%08x\n", I915_READ(CSR_PROGRAM(0)));
@@ -3049,16 +3068,17 @@ static void intel_connector_info(struct seq_file *m,
3049 seq_printf(m, "connector %d: type %s, status: %s\n", 3068 seq_printf(m, "connector %d: type %s, status: %s\n",
3050 connector->base.id, connector->name, 3069 connector->base.id, connector->name,
3051 drm_get_connector_status_name(connector->status)); 3070 drm_get_connector_status_name(connector->status));
3052 if (connector->status == connector_status_connected) { 3071
3053 seq_printf(m, "\tname: %s\n", connector->display_info.name); 3072 if (connector->status == connector_status_disconnected)
3054 seq_printf(m, "\tphysical dimensions: %dx%dmm\n", 3073 return;
3055 connector->display_info.width_mm, 3074
3056 connector->display_info.height_mm); 3075 seq_printf(m, "\tname: %s\n", connector->display_info.name);
3057 seq_printf(m, "\tsubpixel order: %s\n", 3076 seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
3058 drm_get_subpixel_order_name(connector->display_info.subpixel_order)); 3077 connector->display_info.width_mm,
3059 seq_printf(m, "\tCEA rev: %d\n", 3078 connector->display_info.height_mm);
3060 connector->display_info.cea_rev); 3079 seq_printf(m, "\tsubpixel order: %s\n",
3061 } 3080 drm_get_subpixel_order_name(connector->display_info.subpixel_order));
3081 seq_printf(m, "\tCEA rev: %d\n", connector->display_info.cea_rev);
3062 3082
3063 if (!intel_encoder) 3083 if (!intel_encoder)
3064 return; 3084 return;
@@ -4172,6 +4192,7 @@ i915_drop_caches_set(void *data, u64 val)
4172 4192
4173 DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n", 4193 DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n",
4174 val, val & DROP_ALL); 4194 val, val & DROP_ALL);
4195 intel_runtime_pm_get(i915);
4175 4196
4176 if (val & DROP_RESET_ACTIVE && !intel_engines_are_idle(i915)) 4197 if (val & DROP_RESET_ACTIVE && !intel_engines_are_idle(i915))
4177 i915_gem_set_wedged(i915); 4198 i915_gem_set_wedged(i915);
@@ -4181,7 +4202,7 @@ i915_drop_caches_set(void *data, u64 val)
4181 if (val & (DROP_ACTIVE | DROP_RETIRE | DROP_RESET_SEQNO)) { 4202 if (val & (DROP_ACTIVE | DROP_RETIRE | DROP_RESET_SEQNO)) {
4182 ret = mutex_lock_interruptible(&i915->drm.struct_mutex); 4203 ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
4183 if (ret) 4204 if (ret)
4184 return ret; 4205 goto out;
4185 4206
4186 if (val & DROP_ACTIVE) 4207 if (val & DROP_ACTIVE)
4187 ret = i915_gem_wait_for_idle(i915, 4208 ret = i915_gem_wait_for_idle(i915,
@@ -4189,11 +4210,8 @@ i915_drop_caches_set(void *data, u64 val)
4189 I915_WAIT_LOCKED, 4210 I915_WAIT_LOCKED,
4190 MAX_SCHEDULE_TIMEOUT); 4211 MAX_SCHEDULE_TIMEOUT);
4191 4212
4192 if (ret == 0 && val & DROP_RESET_SEQNO) { 4213 if (ret == 0 && val & DROP_RESET_SEQNO)
4193 intel_runtime_pm_get(i915);
4194 ret = i915_gem_set_global_seqno(&i915->drm, 1); 4214 ret = i915_gem_set_global_seqno(&i915->drm, 1);
4195 intel_runtime_pm_put(i915);
4196 }
4197 4215
4198 if (val & DROP_RETIRE) 4216 if (val & DROP_RETIRE)
4199 i915_retire_requests(i915); 4217 i915_retire_requests(i915);
@@ -4231,6 +4249,9 @@ i915_drop_caches_set(void *data, u64 val)
4231 if (val & DROP_FREED) 4249 if (val & DROP_FREED)
4232 i915_gem_drain_freed_objects(i915); 4250 i915_gem_drain_freed_objects(i915);
4233 4251
4252out:
4253 intel_runtime_pm_put(i915);
4254
4234 return ret; 4255 return ret;
4235} 4256}
4236 4257
@@ -4331,7 +4352,7 @@ static void gen10_sseu_device_status(struct drm_i915_private *dev_priv,
4331 for (s = 0; s < info->sseu.max_slices; s++) { 4352 for (s = 0; s < info->sseu.max_slices; s++) {
4332 /* 4353 /*
4333 * FIXME: Valid SS Mask respects the spec and read 4354 * FIXME: Valid SS Mask respects the spec and read
4334 * only valid bits for those registers, excluding reserverd 4355 * only valid bits for those registers, excluding reserved
4335 * although this seems wrong because it would leave many 4356 * although this seems wrong because it would leave many
4336 * subslices without ACK. 4357 * subslices without ACK.
4337 */ 4358 */
@@ -4641,24 +4662,122 @@ static const struct file_operations i915_hpd_storm_ctl_fops = {
4641 .write = i915_hpd_storm_ctl_write 4662 .write = i915_hpd_storm_ctl_write
4642}; 4663};
4643 4664
4665static int i915_hpd_short_storm_ctl_show(struct seq_file *m, void *data)
4666{
4667 struct drm_i915_private *dev_priv = m->private;
4668
4669 seq_printf(m, "Enabled: %s\n",
4670 yesno(dev_priv->hotplug.hpd_short_storm_enabled));
4671
4672 return 0;
4673}
4674
4675static int
4676i915_hpd_short_storm_ctl_open(struct inode *inode, struct file *file)
4677{
4678 return single_open(file, i915_hpd_short_storm_ctl_show,
4679 inode->i_private);
4680}
4681
4682static ssize_t i915_hpd_short_storm_ctl_write(struct file *file,
4683 const char __user *ubuf,
4684 size_t len, loff_t *offp)
4685{
4686 struct seq_file *m = file->private_data;
4687 struct drm_i915_private *dev_priv = m->private;
4688 struct i915_hotplug *hotplug = &dev_priv->hotplug;
4689 char *newline;
4690 char tmp[16];
4691 int i;
4692 bool new_state;
4693
4694 if (len >= sizeof(tmp))
4695 return -EINVAL;
4696
4697 if (copy_from_user(tmp, ubuf, len))
4698 return -EFAULT;
4699
4700 tmp[len] = '\0';
4701
4702 /* Strip newline, if any */
4703 newline = strchr(tmp, '\n');
4704 if (newline)
4705 *newline = '\0';
4706
4707 /* Reset to the "default" state for this system */
4708 if (strcmp(tmp, "reset") == 0)
4709 new_state = !HAS_DP_MST(dev_priv);
4710 else if (kstrtobool(tmp, &new_state) != 0)
4711 return -EINVAL;
4712
4713 DRM_DEBUG_KMS("%sabling HPD short storm detection\n",
4714 new_state ? "En" : "Dis");
4715
4716 spin_lock_irq(&dev_priv->irq_lock);
4717 hotplug->hpd_short_storm_enabled = new_state;
4718 /* Reset the HPD storm stats so we don't accidentally trigger a storm */
4719 for_each_hpd_pin(i)
4720 hotplug->stats[i].count = 0;
4721 spin_unlock_irq(&dev_priv->irq_lock);
4722
4723 /* Re-enable hpd immediately if we were in an irq storm */
4724 flush_delayed_work(&dev_priv->hotplug.reenable_work);
4725
4726 return len;
4727}
4728
4729static const struct file_operations i915_hpd_short_storm_ctl_fops = {
4730 .owner = THIS_MODULE,
4731 .open = i915_hpd_short_storm_ctl_open,
4732 .read = seq_read,
4733 .llseek = seq_lseek,
4734 .release = single_release,
4735 .write = i915_hpd_short_storm_ctl_write,
4736};
4737
4644static int i915_drrs_ctl_set(void *data, u64 val) 4738static int i915_drrs_ctl_set(void *data, u64 val)
4645{ 4739{
4646 struct drm_i915_private *dev_priv = data; 4740 struct drm_i915_private *dev_priv = data;
4647 struct drm_device *dev = &dev_priv->drm; 4741 struct drm_device *dev = &dev_priv->drm;
4648 struct intel_crtc *intel_crtc; 4742 struct intel_crtc *crtc;
4649 struct intel_encoder *encoder;
4650 struct intel_dp *intel_dp;
4651 4743
4652 if (INTEL_GEN(dev_priv) < 7) 4744 if (INTEL_GEN(dev_priv) < 7)
4653 return -ENODEV; 4745 return -ENODEV;
4654 4746
4655 drm_modeset_lock_all(dev); 4747 for_each_intel_crtc(dev, crtc) {
4656 for_each_intel_crtc(dev, intel_crtc) { 4748 struct drm_connector_list_iter conn_iter;
4657 if (!intel_crtc->base.state->active || 4749 struct intel_crtc_state *crtc_state;
4658 !intel_crtc->config->has_drrs) 4750 struct drm_connector *connector;
4659 continue; 4751 struct drm_crtc_commit *commit;
4752 int ret;
4753
4754 ret = drm_modeset_lock_single_interruptible(&crtc->base.mutex);
4755 if (ret)
4756 return ret;
4757
4758 crtc_state = to_intel_crtc_state(crtc->base.state);
4759
4760 if (!crtc_state->base.active ||
4761 !crtc_state->has_drrs)
4762 goto out;
4660 4763
4661 for_each_encoder_on_crtc(dev, &intel_crtc->base, encoder) { 4764 commit = crtc_state->base.commit;
4765 if (commit) {
4766 ret = wait_for_completion_interruptible(&commit->hw_done);
4767 if (ret)
4768 goto out;
4769 }
4770
4771 drm_connector_list_iter_begin(dev, &conn_iter);
4772 drm_for_each_connector_iter(connector, &conn_iter) {
4773 struct intel_encoder *encoder;
4774 struct intel_dp *intel_dp;
4775
4776 if (!(crtc_state->base.connector_mask &
4777 drm_connector_mask(connector)))
4778 continue;
4779
4780 encoder = intel_attached_encoder(connector);
4662 if (encoder->type != INTEL_OUTPUT_EDP) 4781 if (encoder->type != INTEL_OUTPUT_EDP)
4663 continue; 4782 continue;
4664 4783
@@ -4668,13 +4787,18 @@ static int i915_drrs_ctl_set(void *data, u64 val)
4668 intel_dp = enc_to_intel_dp(&encoder->base); 4787 intel_dp = enc_to_intel_dp(&encoder->base);
4669 if (val) 4788 if (val)
4670 intel_edp_drrs_enable(intel_dp, 4789 intel_edp_drrs_enable(intel_dp,
4671 intel_crtc->config); 4790 crtc_state);
4672 else 4791 else
4673 intel_edp_drrs_disable(intel_dp, 4792 intel_edp_drrs_disable(intel_dp,
4674 intel_crtc->config); 4793 crtc_state);
4675 } 4794 }
4795 drm_connector_list_iter_end(&conn_iter);
4796
4797out:
4798 drm_modeset_unlock(&crtc->base.mutex);
4799 if (ret)
4800 return ret;
4676 } 4801 }
4677 drm_modeset_unlock_all(dev);
4678 4802
4679 return 0; 4803 return 0;
4680} 4804}
@@ -4818,6 +4942,7 @@ static const struct i915_debugfs_files {
4818 {"i915_guc_log_level", &i915_guc_log_level_fops}, 4942 {"i915_guc_log_level", &i915_guc_log_level_fops},
4819 {"i915_guc_log_relay", &i915_guc_log_relay_fops}, 4943 {"i915_guc_log_relay", &i915_guc_log_relay_fops},
4820 {"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops}, 4944 {"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops},
4945 {"i915_hpd_short_storm_ctl", &i915_hpd_short_storm_ctl_fops},
4821 {"i915_ipc_status", &i915_ipc_status_fops}, 4946 {"i915_ipc_status", &i915_ipc_status_fops},
4822 {"i915_drrs_ctl", &i915_drrs_ctl_fops}, 4947 {"i915_drrs_ctl", &i915_drrs_ctl_fops},
4823 {"i915_edp_psr_debug", &i915_edp_psr_debug_fops} 4948 {"i915_edp_psr_debug", &i915_edp_psr_debug_fops}
@@ -4899,13 +5024,10 @@ static int i915_dpcd_show(struct seq_file *m, void *data)
4899 continue; 5024 continue;
4900 5025
4901 err = drm_dp_dpcd_read(&intel_dp->aux, b->offset, buf, size); 5026 err = drm_dp_dpcd_read(&intel_dp->aux, b->offset, buf, size);
4902 if (err <= 0) { 5027 if (err < 0)
4903 DRM_ERROR("dpcd read (%zu bytes at %u) failed (%zd)\n", 5028 seq_printf(m, "%04x: ERROR %d\n", b->offset, (int)err);
4904 size, b->offset, err); 5029 else
4905 continue; 5030 seq_printf(m, "%04x: %*ph\n", b->offset, (int)err, buf);
4906 }
4907
4908 seq_printf(m, "%04x: %*ph\n", b->offset, (int) size, buf);
4909 } 5031 }
4910 5032
4911 return 0; 5033 return 0;
@@ -4934,6 +5056,28 @@ static int i915_panel_show(struct seq_file *m, void *data)
4934} 5056}
4935DEFINE_SHOW_ATTRIBUTE(i915_panel); 5057DEFINE_SHOW_ATTRIBUTE(i915_panel);
4936 5058
5059static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data)
5060{
5061 struct drm_connector *connector = m->private;
5062 struct intel_connector *intel_connector = to_intel_connector(connector);
5063
5064 if (connector->status != connector_status_connected)
5065 return -ENODEV;
5066
5067 /* HDCP is supported by connector */
5068 if (!intel_connector->hdcp.shim)
5069 return -EINVAL;
5070
5071 seq_printf(m, "%s:%d HDCP version: ", connector->name,
5072 connector->base.id);
5073 seq_printf(m, "%s ", !intel_hdcp_capable(intel_connector) ?
5074 "None" : "HDCP1.4");
5075 seq_puts(m, "\n");
5076
5077 return 0;
5078}
5079DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability);
5080
4937/** 5081/**
4938 * i915_debugfs_connector_add - add i915 specific connector debugfs files 5082 * i915_debugfs_connector_add - add i915 specific connector debugfs files
4939 * @connector: pointer to a registered drm_connector 5083 * @connector: pointer to a registered drm_connector
@@ -4963,5 +5107,12 @@ int i915_debugfs_connector_add(struct drm_connector *connector)
4963 connector, &i915_psr_sink_status_fops); 5107 connector, &i915_psr_sink_status_fops);
4964 } 5108 }
4965 5109
5110 if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
5111 connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
5112 connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) {
5113 debugfs_create_file("i915_hdcp_sink_capability", S_IRUGO, root,
5114 connector, &i915_hdcp_sink_capability_fops);
5115 }
5116
4966 return 0; 5117 return 0;
4967} 5118}
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 44e2c0f5ec50..b1d23c73c147 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -345,7 +345,7 @@ static int i915_getparam_ioctl(struct drm_device *dev, void *data,
345 value = HAS_WT(dev_priv); 345 value = HAS_WT(dev_priv);
346 break; 346 break;
347 case I915_PARAM_HAS_ALIASING_PPGTT: 347 case I915_PARAM_HAS_ALIASING_PPGTT:
348 value = USES_PPGTT(dev_priv); 348 value = min_t(int, INTEL_PPGTT(dev_priv), I915_GEM_PPGTT_FULL);
349 break; 349 break;
350 case I915_PARAM_HAS_SEMAPHORES: 350 case I915_PARAM_HAS_SEMAPHORES:
351 value = HAS_LEGACY_SEMAPHORES(dev_priv); 351 value = HAS_LEGACY_SEMAPHORES(dev_priv);
@@ -645,6 +645,13 @@ static int i915_load_modeset_init(struct drm_device *dev)
645 if (i915_inject_load_failure()) 645 if (i915_inject_load_failure())
646 return -ENODEV; 646 return -ENODEV;
647 647
648 if (INTEL_INFO(dev_priv)->num_pipes) {
649 ret = drm_vblank_init(&dev_priv->drm,
650 INTEL_INFO(dev_priv)->num_pipes);
651 if (ret)
652 goto out;
653 }
654
648 intel_bios_init(dev_priv); 655 intel_bios_init(dev_priv);
649 656
650 /* If we have > 1 VGA cards, then we need to arbitrate access 657 /* If we have > 1 VGA cards, then we need to arbitrate access
@@ -687,7 +694,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
687 if (ret) 694 if (ret)
688 goto cleanup_modeset; 695 goto cleanup_modeset;
689 696
690 intel_setup_overlay(dev_priv); 697 intel_overlay_setup(dev_priv);
691 698
692 if (INTEL_INFO(dev_priv)->num_pipes == 0) 699 if (INTEL_INFO(dev_priv)->num_pipes == 0)
693 return 0; 700 return 0;
@@ -699,6 +706,8 @@ static int i915_load_modeset_init(struct drm_device *dev)
699 /* Only enable hotplug handling once the fbdev is fully set up. */ 706 /* Only enable hotplug handling once the fbdev is fully set up. */
700 intel_hpd_init(dev_priv); 707 intel_hpd_init(dev_priv);
701 708
709 intel_init_ipc(dev_priv);
710
702 return 0; 711 return 0;
703 712
704cleanup_gem: 713cleanup_gem:
@@ -1030,6 +1039,7 @@ static int i915_driver_init_mmio(struct drm_i915_private *dev_priv)
1030 1039
1031err_uncore: 1040err_uncore:
1032 intel_uncore_fini(dev_priv); 1041 intel_uncore_fini(dev_priv);
1042 i915_mmio_cleanup(dev_priv);
1033err_bridge: 1043err_bridge:
1034 pci_dev_put(dev_priv->bridge_dev); 1044 pci_dev_put(dev_priv->bridge_dev);
1035 1045
@@ -1049,17 +1059,6 @@ static void i915_driver_cleanup_mmio(struct drm_i915_private *dev_priv)
1049 1059
1050static void intel_sanitize_options(struct drm_i915_private *dev_priv) 1060static void intel_sanitize_options(struct drm_i915_private *dev_priv)
1051{ 1061{
1052 /*
1053 * i915.enable_ppgtt is read-only, so do an early pass to validate the
1054 * user's requested state against the hardware/driver capabilities. We
1055 * do this now so that we can print out any log messages once rather
1056 * than every time we check intel_enable_ppgtt().
1057 */
1058 i915_modparams.enable_ppgtt =
1059 intel_sanitize_enable_ppgtt(dev_priv,
1060 i915_modparams.enable_ppgtt);
1061 DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915_modparams.enable_ppgtt);
1062
1063 intel_gvt_sanitize_options(dev_priv); 1062 intel_gvt_sanitize_options(dev_priv);
1064} 1063}
1065 1064
@@ -1175,8 +1174,6 @@ skl_dram_get_channels_info(struct drm_i915_private *dev_priv)
1175 return -EINVAL; 1174 return -EINVAL;
1176 } 1175 }
1177 1176
1178 dram_info->valid_dimm = true;
1179
1180 /* 1177 /*
1181 * If any of the channel is single rank channel, worst case output 1178 * If any of the channel is single rank channel, worst case output
1182 * will be same as if single rank memory, so consider single rank 1179 * will be same as if single rank memory, so consider single rank
@@ -1193,8 +1190,7 @@ skl_dram_get_channels_info(struct drm_i915_private *dev_priv)
1193 return -EINVAL; 1190 return -EINVAL;
1194 } 1191 }
1195 1192
1196 if (ch0.is_16gb_dimm || ch1.is_16gb_dimm) 1193 dram_info->is_16gb_dimm = ch0.is_16gb_dimm || ch1.is_16gb_dimm;
1197 dram_info->is_16gb_dimm = true;
1198 1194
1199 dev_priv->dram_info.symmetric_memory = intel_is_dram_symmetric(val_ch0, 1195 dev_priv->dram_info.symmetric_memory = intel_is_dram_symmetric(val_ch0,
1200 val_ch1, 1196 val_ch1,
@@ -1314,7 +1310,6 @@ bxt_get_dram_info(struct drm_i915_private *dev_priv)
1314 return -EINVAL; 1310 return -EINVAL;
1315 } 1311 }
1316 1312
1317 dram_info->valid_dimm = true;
1318 dram_info->valid = true; 1313 dram_info->valid = true;
1319 return 0; 1314 return 0;
1320} 1315}
@@ -1327,19 +1322,24 @@ intel_get_dram_info(struct drm_i915_private *dev_priv)
1327 int ret; 1322 int ret;
1328 1323
1329 dram_info->valid = false; 1324 dram_info->valid = false;
1330 dram_info->valid_dimm = false;
1331 dram_info->is_16gb_dimm = false;
1332 dram_info->rank = I915_DRAM_RANK_INVALID; 1325 dram_info->rank = I915_DRAM_RANK_INVALID;
1333 dram_info->bandwidth_kbps = 0; 1326 dram_info->bandwidth_kbps = 0;
1334 dram_info->num_channels = 0; 1327 dram_info->num_channels = 0;
1335 1328
1329 /*
1330 * Assume 16Gb DIMMs are present until proven otherwise.
1331 * This is only used for the level 0 watermark latency
1332 * w/a which does not apply to bxt/glk.
1333 */
1334 dram_info->is_16gb_dimm = !IS_GEN9_LP(dev_priv);
1335
1336 if (INTEL_GEN(dev_priv) < 9 || IS_GEMINILAKE(dev_priv)) 1336 if (INTEL_GEN(dev_priv) < 9 || IS_GEMINILAKE(dev_priv))
1337 return; 1337 return;
1338 1338
1339 /* Need to calculate bandwidth only for Gen9 */ 1339 /* Need to calculate bandwidth only for Gen9 */
1340 if (IS_BROXTON(dev_priv)) 1340 if (IS_BROXTON(dev_priv))
1341 ret = bxt_get_dram_info(dev_priv); 1341 ret = bxt_get_dram_info(dev_priv);
1342 else if (INTEL_GEN(dev_priv) == 9) 1342 else if (IS_GEN9(dev_priv))
1343 ret = skl_get_dram_info(dev_priv); 1343 ret = skl_get_dram_info(dev_priv);
1344 else 1344 else
1345 ret = skl_dram_get_channels_info(dev_priv); 1345 ret = skl_dram_get_channels_info(dev_priv);
@@ -1374,6 +1374,15 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
1374 1374
1375 intel_device_info_runtime_init(mkwrite_device_info(dev_priv)); 1375 intel_device_info_runtime_init(mkwrite_device_info(dev_priv));
1376 1376
1377 if (HAS_PPGTT(dev_priv)) {
1378 if (intel_vgpu_active(dev_priv) &&
1379 !intel_vgpu_has_full_48bit_ppgtt(dev_priv)) {
1380 i915_report_error(dev_priv,
1381 "incompatible vGPU found, support for isolated ppGTT required\n");
1382 return -ENXIO;
1383 }
1384 }
1385
1377 intel_sanitize_options(dev_priv); 1386 intel_sanitize_options(dev_priv);
1378 1387
1379 i915_perf_init(dev_priv); 1388 i915_perf_init(dev_priv);
@@ -1629,14 +1638,16 @@ i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent)
1629 (struct intel_device_info *)ent->driver_data; 1638 (struct intel_device_info *)ent->driver_data;
1630 struct intel_device_info *device_info; 1639 struct intel_device_info *device_info;
1631 struct drm_i915_private *i915; 1640 struct drm_i915_private *i915;
1641 int err;
1632 1642
1633 i915 = kzalloc(sizeof(*i915), GFP_KERNEL); 1643 i915 = kzalloc(sizeof(*i915), GFP_KERNEL);
1634 if (!i915) 1644 if (!i915)
1635 return NULL; 1645 return ERR_PTR(-ENOMEM);
1636 1646
1637 if (drm_dev_init(&i915->drm, &driver, &pdev->dev)) { 1647 err = drm_dev_init(&i915->drm, &driver, &pdev->dev);
1648 if (err) {
1638 kfree(i915); 1649 kfree(i915);
1639 return NULL; 1650 return ERR_PTR(err);
1640 } 1651 }
1641 1652
1642 i915->drm.pdev = pdev; 1653 i915->drm.pdev = pdev;
@@ -1649,8 +1660,8 @@ i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent)
1649 device_info->device_id = pdev->device; 1660 device_info->device_id = pdev->device;
1650 1661
1651 BUILD_BUG_ON(INTEL_MAX_PLATFORMS > 1662 BUILD_BUG_ON(INTEL_MAX_PLATFORMS >
1652 sizeof(device_info->platform_mask) * BITS_PER_BYTE); 1663 BITS_PER_TYPE(device_info->platform_mask));
1653 BUG_ON(device_info->gen > sizeof(device_info->gen_mask) * BITS_PER_BYTE); 1664 BUG_ON(device_info->gen > BITS_PER_TYPE(device_info->gen_mask));
1654 1665
1655 return i915; 1666 return i915;
1656} 1667}
@@ -1685,8 +1696,8 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
1685 int ret; 1696 int ret;
1686 1697
1687 dev_priv = i915_driver_create(pdev, ent); 1698 dev_priv = i915_driver_create(pdev, ent);
1688 if (!dev_priv) 1699 if (IS_ERR(dev_priv))
1689 return -ENOMEM; 1700 return PTR_ERR(dev_priv);
1690 1701
1691 /* Disable nuclear pageflip by default on pre-ILK */ 1702 /* Disable nuclear pageflip by default on pre-ILK */
1692 if (!i915_modparams.nuclear_pageflip && match_info->gen < 5) 1703 if (!i915_modparams.nuclear_pageflip && match_info->gen < 5)
@@ -1710,26 +1721,12 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
1710 if (ret < 0) 1721 if (ret < 0)
1711 goto out_cleanup_mmio; 1722 goto out_cleanup_mmio;
1712 1723
1713 /*
1714 * TODO: move the vblank init and parts of modeset init steps into one
1715 * of the i915_driver_init_/i915_driver_register functions according
1716 * to the role/effect of the given init step.
1717 */
1718 if (INTEL_INFO(dev_priv)->num_pipes) {
1719 ret = drm_vblank_init(&dev_priv->drm,
1720 INTEL_INFO(dev_priv)->num_pipes);
1721 if (ret)
1722 goto out_cleanup_hw;
1723 }
1724
1725 ret = i915_load_modeset_init(&dev_priv->drm); 1724 ret = i915_load_modeset_init(&dev_priv->drm);
1726 if (ret < 0) 1725 if (ret < 0)
1727 goto out_cleanup_hw; 1726 goto out_cleanup_hw;
1728 1727
1729 i915_driver_register(dev_priv); 1728 i915_driver_register(dev_priv);
1730 1729
1731 intel_init_ipc(dev_priv);
1732
1733 enable_rpm_wakeref_asserts(dev_priv); 1730 enable_rpm_wakeref_asserts(dev_priv);
1734 1731
1735 i915_welcome_messages(dev_priv); 1732 i915_welcome_messages(dev_priv);
@@ -1781,7 +1778,6 @@ void i915_driver_unload(struct drm_device *dev)
1781 i915_reset_error_state(dev_priv); 1778 i915_reset_error_state(dev_priv);
1782 1779
1783 i915_gem_fini(dev_priv); 1780 i915_gem_fini(dev_priv);
1784 intel_fbc_cleanup_cfb(dev_priv);
1785 1781
1786 intel_power_domains_fini_hw(dev_priv); 1782 intel_power_domains_fini_hw(dev_priv);
1787 1783
@@ -1919,9 +1915,7 @@ static int i915_drm_suspend(struct drm_device *dev)
1919 i915_save_state(dev_priv); 1915 i915_save_state(dev_priv);
1920 1916
1921 opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold; 1917 opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold;
1922 intel_opregion_notify_adapter(dev_priv, opregion_target_state); 1918 intel_opregion_suspend(dev_priv, opregion_target_state);
1923
1924 intel_opregion_unregister(dev_priv);
1925 1919
1926 intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true); 1920 intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true);
1927 1921
@@ -1962,7 +1956,7 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
1962 get_suspend_mode(dev_priv, hibernation)); 1956 get_suspend_mode(dev_priv, hibernation));
1963 1957
1964 ret = 0; 1958 ret = 0;
1965 if (IS_GEN9_LP(dev_priv)) 1959 if (INTEL_GEN(dev_priv) >= 11 || IS_GEN9_LP(dev_priv))
1966 bxt_enable_dc9(dev_priv); 1960 bxt_enable_dc9(dev_priv);
1967 else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 1961 else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
1968 hsw_enable_pc8(dev_priv); 1962 hsw_enable_pc8(dev_priv);
@@ -2040,7 +2034,6 @@ static int i915_drm_resume(struct drm_device *dev)
2040 2034
2041 i915_restore_state(dev_priv); 2035 i915_restore_state(dev_priv);
2042 intel_pps_unlock_regs_wa(dev_priv); 2036 intel_pps_unlock_regs_wa(dev_priv);
2043 intel_opregion_setup(dev_priv);
2044 2037
2045 intel_init_pch_refclk(dev_priv); 2038 intel_init_pch_refclk(dev_priv);
2046 2039
@@ -2082,12 +2075,10 @@ static int i915_drm_resume(struct drm_device *dev)
2082 * */ 2075 * */
2083 intel_hpd_init(dev_priv); 2076 intel_hpd_init(dev_priv);
2084 2077
2085 intel_opregion_register(dev_priv); 2078 intel_opregion_resume(dev_priv);
2086 2079
2087 intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false); 2080 intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false);
2088 2081
2089 intel_opregion_notify_adapter(dev_priv, PCI_D0);
2090
2091 intel_power_domains_enable(dev_priv); 2082 intel_power_domains_enable(dev_priv);
2092 2083
2093 enable_rpm_wakeref_asserts(dev_priv); 2084 enable_rpm_wakeref_asserts(dev_priv);
@@ -2155,7 +2146,7 @@ static int i915_drm_resume_early(struct drm_device *dev)
2155 2146
2156 intel_uncore_resume_early(dev_priv); 2147 intel_uncore_resume_early(dev_priv);
2157 2148
2158 if (IS_GEN9_LP(dev_priv)) { 2149 if (INTEL_GEN(dev_priv) >= 11 || IS_GEN9_LP(dev_priv)) {
2159 gen9_sanitize_dc_state(dev_priv); 2150 gen9_sanitize_dc_state(dev_priv);
2160 bxt_disable_dc9(dev_priv); 2151 bxt_disable_dc9(dev_priv);
2161 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { 2152 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
@@ -2922,7 +2913,10 @@ static int intel_runtime_suspend(struct device *kdev)
2922 intel_uncore_suspend(dev_priv); 2913 intel_uncore_suspend(dev_priv);
2923 2914
2924 ret = 0; 2915 ret = 0;
2925 if (IS_GEN9_LP(dev_priv)) { 2916 if (INTEL_GEN(dev_priv) >= 11) {
2917 icl_display_core_uninit(dev_priv);
2918 bxt_enable_dc9(dev_priv);
2919 } else if (IS_GEN9_LP(dev_priv)) {
2926 bxt_display_core_uninit(dev_priv); 2920 bxt_display_core_uninit(dev_priv);
2927 bxt_enable_dc9(dev_priv); 2921 bxt_enable_dc9(dev_priv);
2928 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { 2922 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
@@ -3007,7 +3001,18 @@ static int intel_runtime_resume(struct device *kdev)
3007 if (intel_uncore_unclaimed_mmio(dev_priv)) 3001 if (intel_uncore_unclaimed_mmio(dev_priv))
3008 DRM_DEBUG_DRIVER("Unclaimed access during suspend, bios?\n"); 3002 DRM_DEBUG_DRIVER("Unclaimed access during suspend, bios?\n");
3009 3003
3010 if (IS_GEN9_LP(dev_priv)) { 3004 if (INTEL_GEN(dev_priv) >= 11) {
3005 bxt_disable_dc9(dev_priv);
3006 icl_display_core_init(dev_priv, true);
3007 if (dev_priv->csr.dmc_payload) {
3008 if (dev_priv->csr.allowed_dc_mask &
3009 DC_STATE_EN_UPTO_DC6)
3010 skl_enable_dc6(dev_priv);
3011 else if (dev_priv->csr.allowed_dc_mask &
3012 DC_STATE_EN_UPTO_DC5)
3013 gen9_enable_dc5(dev_priv);
3014 }
3015 } else if (IS_GEN9_LP(dev_priv)) {
3011 bxt_disable_dc9(dev_priv); 3016 bxt_disable_dc9(dev_priv);
3012 bxt_display_core_init(dev_priv, true); 3017 bxt_display_core_init(dev_priv, true);
3013 if (dev_priv->csr.dmc_payload && 3018 if (dev_priv->csr.dmc_payload &&
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 8624b4bdc242..4064e49dbf70 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -54,6 +54,7 @@
54#include <drm/drm_cache.h> 54#include <drm/drm_cache.h>
55#include <drm/drm_util.h> 55#include <drm/drm_util.h>
56 56
57#include "i915_fixed.h"
57#include "i915_params.h" 58#include "i915_params.h"
58#include "i915_reg.h" 59#include "i915_reg.h"
59#include "i915_utils.h" 60#include "i915_utils.h"
@@ -87,8 +88,8 @@
87 88
88#define DRIVER_NAME "i915" 89#define DRIVER_NAME "i915"
89#define DRIVER_DESC "Intel Graphics" 90#define DRIVER_DESC "Intel Graphics"
90#define DRIVER_DATE "20180921" 91#define DRIVER_DATE "20181122"
91#define DRIVER_TIMESTAMP 1537521997 92#define DRIVER_TIMESTAMP 1542898187
92 93
93/* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and 94/* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and
94 * WARN_ON()) for hw state sanity checks to check for unexpected conditions 95 * WARN_ON()) for hw state sanity checks to check for unexpected conditions
@@ -127,144 +128,6 @@ bool i915_error_injected(void);
127 __i915_printk(i915, i915_error_injected() ? KERN_DEBUG : KERN_ERR, \ 128 __i915_printk(i915, i915_error_injected() ? KERN_DEBUG : KERN_ERR, \
128 fmt, ##__VA_ARGS__) 129 fmt, ##__VA_ARGS__)
129 130
130typedef struct {
131 uint32_t val;
132} uint_fixed_16_16_t;
133
134#define FP_16_16_MAX ({ \
135 uint_fixed_16_16_t fp; \
136 fp.val = UINT_MAX; \
137 fp; \
138})
139
140static inline bool is_fixed16_zero(uint_fixed_16_16_t val)
141{
142 if (val.val == 0)
143 return true;
144 return false;
145}
146
147static inline uint_fixed_16_16_t u32_to_fixed16(uint32_t val)
148{
149 uint_fixed_16_16_t fp;
150
151 WARN_ON(val > U16_MAX);
152
153 fp.val = val << 16;
154 return fp;
155}
156
157static inline uint32_t fixed16_to_u32_round_up(uint_fixed_16_16_t fp)
158{
159 return DIV_ROUND_UP(fp.val, 1 << 16);
160}
161
162static inline uint32_t fixed16_to_u32(uint_fixed_16_16_t fp)
163{
164 return fp.val >> 16;
165}
166
167static inline uint_fixed_16_16_t min_fixed16(uint_fixed_16_16_t min1,
168 uint_fixed_16_16_t min2)
169{
170 uint_fixed_16_16_t min;
171
172 min.val = min(min1.val, min2.val);
173 return min;
174}
175
176static inline uint_fixed_16_16_t max_fixed16(uint_fixed_16_16_t max1,
177 uint_fixed_16_16_t max2)
178{
179 uint_fixed_16_16_t max;
180
181 max.val = max(max1.val, max2.val);
182 return max;
183}
184
185static inline uint_fixed_16_16_t clamp_u64_to_fixed16(uint64_t val)
186{
187 uint_fixed_16_16_t fp;
188 WARN_ON(val > U32_MAX);
189 fp.val = (uint32_t) val;
190 return fp;
191}
192
193static inline uint32_t div_round_up_fixed16(uint_fixed_16_16_t val,
194 uint_fixed_16_16_t d)
195{
196 return DIV_ROUND_UP(val.val, d.val);
197}
198
199static inline uint32_t mul_round_up_u32_fixed16(uint32_t val,
200 uint_fixed_16_16_t mul)
201{
202 uint64_t intermediate_val;
203
204 intermediate_val = (uint64_t) val * mul.val;
205 intermediate_val = DIV_ROUND_UP_ULL(intermediate_val, 1 << 16);
206 WARN_ON(intermediate_val > U32_MAX);
207 return (uint32_t) intermediate_val;
208}
209
210static inline uint_fixed_16_16_t mul_fixed16(uint_fixed_16_16_t val,
211 uint_fixed_16_16_t mul)
212{
213 uint64_t intermediate_val;
214
215 intermediate_val = (uint64_t) val.val * mul.val;
216 intermediate_val = intermediate_val >> 16;
217 return clamp_u64_to_fixed16(intermediate_val);
218}
219
220static inline uint_fixed_16_16_t div_fixed16(uint32_t val, uint32_t d)
221{
222 uint64_t interm_val;
223
224 interm_val = (uint64_t)val << 16;
225 interm_val = DIV_ROUND_UP_ULL(interm_val, d);
226 return clamp_u64_to_fixed16(interm_val);
227}
228
229static inline uint32_t div_round_up_u32_fixed16(uint32_t val,
230 uint_fixed_16_16_t d)
231{
232 uint64_t interm_val;
233
234 interm_val = (uint64_t)val << 16;
235 interm_val = DIV_ROUND_UP_ULL(interm_val, d.val);
236 WARN_ON(interm_val > U32_MAX);
237 return (uint32_t) interm_val;
238}
239
240static inline uint_fixed_16_16_t mul_u32_fixed16(uint32_t val,
241 uint_fixed_16_16_t mul)
242{
243 uint64_t intermediate_val;
244
245 intermediate_val = (uint64_t) val * mul.val;
246 return clamp_u64_to_fixed16(intermediate_val);
247}
248
249static inline uint_fixed_16_16_t add_fixed16(uint_fixed_16_16_t add1,
250 uint_fixed_16_16_t add2)
251{
252 uint64_t interm_sum;
253
254 interm_sum = (uint64_t) add1.val + add2.val;
255 return clamp_u64_to_fixed16(interm_sum);
256}
257
258static inline uint_fixed_16_16_t add_fixed16_u32(uint_fixed_16_16_t add1,
259 uint32_t add2)
260{
261 uint64_t interm_sum;
262 uint_fixed_16_16_t interm_add2 = u32_to_fixed16(add2);
263
264 interm_sum = (uint64_t) add1.val + interm_add2.val;
265 return clamp_u64_to_fixed16(interm_sum);
266}
267
268enum hpd_pin { 131enum hpd_pin {
269 HPD_NONE = 0, 132 HPD_NONE = 0,
270 HPD_TV = HPD_NONE, /* TV is known to be unreliable */ 133 HPD_TV = HPD_NONE, /* TV is known to be unreliable */
@@ -283,7 +146,8 @@ enum hpd_pin {
283#define for_each_hpd_pin(__pin) \ 146#define for_each_hpd_pin(__pin) \
284 for ((__pin) = (HPD_NONE + 1); (__pin) < HPD_NUM_PINS; (__pin)++) 147 for ((__pin) = (HPD_NONE + 1); (__pin) < HPD_NUM_PINS; (__pin)++)
285 148
286#define HPD_STORM_DEFAULT_THRESHOLD 5 149/* Threshold == 5 for long IRQs, 50 for short */
150#define HPD_STORM_DEFAULT_THRESHOLD 50
287 151
288struct i915_hotplug { 152struct i915_hotplug {
289 struct work_struct hotplug_work; 153 struct work_struct hotplug_work;
@@ -308,6 +172,8 @@ struct i915_hotplug {
308 bool poll_enabled; 172 bool poll_enabled;
309 173
310 unsigned int hpd_storm_threshold; 174 unsigned int hpd_storm_threshold;
175 /* Whether or not to count short HPD IRQs in HPD storms */
176 u8 hpd_short_storm_enabled;
311 177
312 /* 178 /*
313 * if we get a HPD irq from DP and a HPD irq from non-DP 179 * if we get a HPD irq from DP and a HPD irq from non-DP
@@ -465,8 +331,10 @@ struct drm_i915_display_funcs {
465struct intel_csr { 331struct intel_csr {
466 struct work_struct work; 332 struct work_struct work;
467 const char *fw_path; 333 const char *fw_path;
334 uint32_t required_version;
335 uint32_t max_fw_size; /* bytes */
468 uint32_t *dmc_payload; 336 uint32_t *dmc_payload;
469 uint32_t dmc_fw_size; 337 uint32_t dmc_fw_size; /* dwords */
470 uint32_t version; 338 uint32_t version;
471 uint32_t mmio_count; 339 uint32_t mmio_count;
472 i915_reg_t mmioaddr[8]; 340 i915_reg_t mmioaddr[8];
@@ -546,6 +414,8 @@ struct intel_fbc {
546 int adjusted_y; 414 int adjusted_y;
547 415
548 int y; 416 int y;
417
418 uint16_t pixel_blend_mode;
549 } plane; 419 } plane;
550 420
551 struct { 421 struct {
@@ -630,7 +500,6 @@ struct i915_psr {
630 bool sink_psr2_support; 500 bool sink_psr2_support;
631 bool link_standby; 501 bool link_standby;
632 bool colorimetry_support; 502 bool colorimetry_support;
633 bool alpm;
634 bool psr2_enabled; 503 bool psr2_enabled;
635 u8 sink_sync_latency; 504 u8 sink_sync_latency;
636 ktime_t last_entry_attempt; 505 ktime_t last_entry_attempt;
@@ -918,6 +787,11 @@ struct i915_power_well_desc {
918 /* The pw is backing the VGA functionality */ 787 /* The pw is backing the VGA functionality */
919 bool has_vga:1; 788 bool has_vga:1;
920 bool has_fuses:1; 789 bool has_fuses:1;
790 /*
791 * The pw is for an ICL+ TypeC PHY port in
792 * Thunderbolt mode.
793 */
794 bool is_tc_tbt:1;
921 } hsw; 795 } hsw;
922 }; 796 };
923 const struct i915_power_well_ops *ops; 797 const struct i915_power_well_ops *ops;
@@ -1042,17 +916,6 @@ struct i915_gem_mm {
1042 916
1043#define I915_ENGINE_WEDGED_TIMEOUT (60 * HZ) /* Reset but no recovery? */ 917#define I915_ENGINE_WEDGED_TIMEOUT (60 * HZ) /* Reset but no recovery? */
1044 918
1045#define DP_AUX_A 0x40
1046#define DP_AUX_B 0x10
1047#define DP_AUX_C 0x20
1048#define DP_AUX_D 0x30
1049#define DP_AUX_E 0x50
1050#define DP_AUX_F 0x60
1051
1052#define DDC_PIN_B 0x05
1053#define DDC_PIN_C 0x04
1054#define DDC_PIN_D 0x06
1055
1056struct ddi_vbt_port_info { 919struct ddi_vbt_port_info {
1057 int max_tmds_clock; 920 int max_tmds_clock;
1058 921
@@ -1099,6 +962,7 @@ struct intel_vbt_data {
1099 unsigned int panel_type:4; 962 unsigned int panel_type:4;
1100 int lvds_ssc_freq; 963 int lvds_ssc_freq;
1101 unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */ 964 unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
965 enum drm_panel_orientation orientation;
1102 966
1103 enum drrs_support_type drrs_type; 967 enum drrs_support_type drrs_type;
1104 968
@@ -1144,6 +1008,7 @@ struct intel_vbt_data {
1144 u8 *data; 1008 u8 *data;
1145 const u8 *sequence[MIPI_SEQ_MAX]; 1009 const u8 *sequence[MIPI_SEQ_MAX];
1146 u8 *deassert_seq; /* Used by fixup_mipi_sequences() */ 1010 u8 *deassert_seq; /* Used by fixup_mipi_sequences() */
1011 enum drm_panel_orientation orientation;
1147 } dsi; 1012 } dsi;
1148 1013
1149 int crt_ddc_pin; 1014 int crt_ddc_pin;
@@ -1240,9 +1105,9 @@ struct skl_ddb_values {
1240}; 1105};
1241 1106
1242struct skl_wm_level { 1107struct skl_wm_level {
1243 bool plane_en;
1244 uint16_t plane_res_b; 1108 uint16_t plane_res_b;
1245 uint8_t plane_res_l; 1109 uint8_t plane_res_l;
1110 bool plane_en;
1246}; 1111};
1247 1112
1248/* Stores plane specific WM parameters */ 1113/* Stores plane specific WM parameters */
@@ -1520,30 +1385,12 @@ struct i915_oa_ops {
1520 bool (*is_valid_flex_reg)(struct drm_i915_private *dev_priv, u32 addr); 1385 bool (*is_valid_flex_reg)(struct drm_i915_private *dev_priv, u32 addr);
1521 1386
1522 /** 1387 /**
1523 * @init_oa_buffer: Resets the head and tail pointers of the
1524 * circular buffer for periodic OA reports.
1525 *
1526 * Called when first opening a stream for OA metrics, but also may be
1527 * called in response to an OA buffer overflow or other error
1528 * condition.
1529 *
1530 * Note it may be necessary to clear the full OA buffer here as part of
1531 * maintaining the invariable that new reports must be written to
1532 * zeroed memory for us to be able to reliable detect if an expected
1533 * report has not yet landed in memory. (At least on Haswell the OA
1534 * buffer tail pointer is not synchronized with reports being visible
1535 * to the CPU)
1536 */
1537 void (*init_oa_buffer)(struct drm_i915_private *dev_priv);
1538
1539 /**
1540 * @enable_metric_set: Selects and applies any MUX configuration to set 1388 * @enable_metric_set: Selects and applies any MUX configuration to set
1541 * up the Boolean and Custom (B/C) counters that are part of the 1389 * up the Boolean and Custom (B/C) counters that are part of the
1542 * counter reports being sampled. May apply system constraints such as 1390 * counter reports being sampled. May apply system constraints such as
1543 * disabling EU clock gating as required. 1391 * disabling EU clock gating as required.
1544 */ 1392 */
1545 int (*enable_metric_set)(struct drm_i915_private *dev_priv, 1393 int (*enable_metric_set)(struct i915_perf_stream *stream);
1546 const struct i915_oa_config *oa_config);
1547 1394
1548 /** 1395 /**
1549 * @disable_metric_set: Remove system constraints associated with using 1396 * @disable_metric_set: Remove system constraints associated with using
@@ -1554,12 +1401,12 @@ struct i915_oa_ops {
1554 /** 1401 /**
1555 * @oa_enable: Enable periodic sampling 1402 * @oa_enable: Enable periodic sampling
1556 */ 1403 */
1557 void (*oa_enable)(struct drm_i915_private *dev_priv); 1404 void (*oa_enable)(struct i915_perf_stream *stream);
1558 1405
1559 /** 1406 /**
1560 * @oa_disable: Disable periodic sampling 1407 * @oa_disable: Disable periodic sampling
1561 */ 1408 */
1562 void (*oa_disable)(struct drm_i915_private *dev_priv); 1409 void (*oa_disable)(struct i915_perf_stream *stream);
1563 1410
1564 /** 1411 /**
1565 * @read: Copy data from the circular OA buffer into a given userspace 1412 * @read: Copy data from the circular OA buffer into a given userspace
@@ -1948,7 +1795,6 @@ struct drm_i915_private {
1948 1795
1949 struct dram_info { 1796 struct dram_info {
1950 bool valid; 1797 bool valid;
1951 bool valid_dimm;
1952 bool is_16gb_dimm; 1798 bool is_16gb_dimm;
1953 u8 num_channels; 1799 u8 num_channels;
1954 enum dram_rank { 1800 enum dram_rank {
@@ -2323,6 +2169,8 @@ static inline struct scatterlist *__sg_next(struct scatterlist *sg)
2323 (((__iter).curr += PAGE_SIZE) >= (__iter).max) ? \ 2169 (((__iter).curr += PAGE_SIZE) >= (__iter).max) ? \
2324 (__iter) = __sgt_iter(__sg_next((__iter).sgp), false), 0 : 0) 2170 (__iter) = __sgt_iter(__sg_next((__iter).sgp), false), 0 : 0)
2325 2171
2172bool i915_sg_trim(struct sg_table *orig_st);
2173
2326static inline unsigned int i915_sg_page_sizes(struct scatterlist *sg) 2174static inline unsigned int i915_sg_page_sizes(struct scatterlist *sg)
2327{ 2175{
2328 unsigned int page_sizes; 2176 unsigned int page_sizes;
@@ -2368,20 +2216,12 @@ intel_info(const struct drm_i915_private *dev_priv)
2368#define REVID_FOREVER 0xff 2216#define REVID_FOREVER 0xff
2369#define INTEL_REVID(dev_priv) ((dev_priv)->drm.pdev->revision) 2217#define INTEL_REVID(dev_priv) ((dev_priv)->drm.pdev->revision)
2370 2218
2371#define GEN_FOREVER (0)
2372
2373#define INTEL_GEN_MASK(s, e) ( \ 2219#define INTEL_GEN_MASK(s, e) ( \
2374 BUILD_BUG_ON_ZERO(!__builtin_constant_p(s)) + \ 2220 BUILD_BUG_ON_ZERO(!__builtin_constant_p(s)) + \
2375 BUILD_BUG_ON_ZERO(!__builtin_constant_p(e)) + \ 2221 BUILD_BUG_ON_ZERO(!__builtin_constant_p(e)) + \
2376 GENMASK((e) != GEN_FOREVER ? (e) - 1 : BITS_PER_LONG - 1, \ 2222 GENMASK((e) - 1, (s) - 1))
2377 (s) != GEN_FOREVER ? (s) - 1 : 0) \
2378)
2379 2223
2380/* 2224/* Returns true if Gen is in inclusive range [Start, End] */
2381 * Returns true if Gen is in inclusive range [Start, End].
2382 *
2383 * Use GEN_FOREVER for unbound start and or end.
2384 */
2385#define IS_GEN(dev_priv, s, e) \ 2225#define IS_GEN(dev_priv, s, e) \
2386 (!!((dev_priv)->info.gen_mask & INTEL_GEN_MASK((s), (e)))) 2226 (!!((dev_priv)->info.gen_mask & INTEL_GEN_MASK((s), (e))))
2387 2227
@@ -2462,6 +2302,8 @@ intel_info(const struct drm_i915_private *dev_priv)
2462#define IS_KBL_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x590E || \ 2302#define IS_KBL_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x590E || \
2463 INTEL_DEVID(dev_priv) == 0x5915 || \ 2303 INTEL_DEVID(dev_priv) == 0x5915 || \
2464 INTEL_DEVID(dev_priv) == 0x591E) 2304 INTEL_DEVID(dev_priv) == 0x591E)
2305#define IS_AML_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x591C || \
2306 INTEL_DEVID(dev_priv) == 0x87C0)
2465#define IS_SKL_GT2(dev_priv) (IS_SKYLAKE(dev_priv) && \ 2307#define IS_SKL_GT2(dev_priv) (IS_SKYLAKE(dev_priv) && \
2466 (dev_priv)->info.gt == 2) 2308 (dev_priv)->info.gt == 2)
2467#define IS_SKL_GT3(dev_priv) (IS_SKYLAKE(dev_priv) && \ 2309#define IS_SKL_GT3(dev_priv) (IS_SKYLAKE(dev_priv) && \
@@ -2593,9 +2435,14 @@ intel_info(const struct drm_i915_private *dev_priv)
2593 2435
2594#define HAS_EXECLISTS(dev_priv) HAS_LOGICAL_RING_CONTEXTS(dev_priv) 2436#define HAS_EXECLISTS(dev_priv) HAS_LOGICAL_RING_CONTEXTS(dev_priv)
2595 2437
2596#define USES_PPGTT(dev_priv) (i915_modparams.enable_ppgtt) 2438#define INTEL_PPGTT(dev_priv) (INTEL_INFO(dev_priv)->ppgtt)
2597#define USES_FULL_PPGTT(dev_priv) (i915_modparams.enable_ppgtt >= 2) 2439#define HAS_PPGTT(dev_priv) \
2598#define USES_FULL_48BIT_PPGTT(dev_priv) (i915_modparams.enable_ppgtt == 3) 2440 (INTEL_PPGTT(dev_priv) != INTEL_PPGTT_NONE)
2441#define HAS_FULL_PPGTT(dev_priv) \
2442 (INTEL_PPGTT(dev_priv) >= INTEL_PPGTT_FULL)
2443#define HAS_FULL_48BIT_PPGTT(dev_priv) \
2444 (INTEL_PPGTT(dev_priv) >= INTEL_PPGTT_FULL_4LVL)
2445
2599#define HAS_PAGE_SIZES(dev_priv, sizes) ({ \ 2446#define HAS_PAGE_SIZES(dev_priv, sizes) ({ \
2600 GEM_BUG_ON((sizes) == 0); \ 2447 GEM_BUG_ON((sizes) == 0); \
2601 ((sizes) & ~(dev_priv)->info.page_sizes) == 0; \ 2448 ((sizes) & ~(dev_priv)->info.page_sizes) == 0; \
@@ -2743,9 +2590,6 @@ intel_ggtt_update_needs_vtd_wa(struct drm_i915_private *dev_priv)
2743 return IS_BROXTON(dev_priv) && intel_vtd_active(); 2590 return IS_BROXTON(dev_priv) && intel_vtd_active();
2744} 2591}
2745 2592
2746int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
2747 int enable_ppgtt);
2748
2749/* i915_drv.c */ 2593/* i915_drv.c */
2750void __printf(3, 4) 2594void __printf(3, 4)
2751__i915_printk(struct drm_i915_private *dev_priv, const char *level, 2595__i915_printk(struct drm_i915_private *dev_priv, const char *level,
@@ -3230,7 +3074,7 @@ int i915_gem_object_wait(struct drm_i915_gem_object *obj,
3230int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj, 3074int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
3231 unsigned int flags, 3075 unsigned int flags,
3232 const struct i915_sched_attr *attr); 3076 const struct i915_sched_attr *attr);
3233#define I915_PRIORITY_DISPLAY I915_PRIORITY_MAX 3077#define I915_PRIORITY_DISPLAY I915_USER_PRIORITY(I915_PRIORITY_MAX)
3234 3078
3235int __must_check 3079int __must_check
3236i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write); 3080i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write);
@@ -3462,6 +3306,7 @@ bool intel_bios_is_port_hpd_inverted(struct drm_i915_private *dev_priv,
3462 enum port port); 3306 enum port port);
3463bool intel_bios_is_lspcon_present(struct drm_i915_private *dev_priv, 3307bool intel_bios_is_lspcon_present(struct drm_i915_private *dev_priv,
3464 enum port port); 3308 enum port port);
3309enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *dev_priv, enum port port);
3465 3310
3466/* intel_acpi.c */ 3311/* intel_acpi.c */
3467#ifdef CONFIG_ACPI 3312#ifdef CONFIG_ACPI
@@ -3483,8 +3328,6 @@ mkwrite_device_info(struct drm_i915_private *dev_priv)
3483extern void intel_modeset_init_hw(struct drm_device *dev); 3328extern void intel_modeset_init_hw(struct drm_device *dev);
3484extern int intel_modeset_init(struct drm_device *dev); 3329extern int intel_modeset_init(struct drm_device *dev);
3485extern void intel_modeset_cleanup(struct drm_device *dev); 3330extern void intel_modeset_cleanup(struct drm_device *dev);
3486extern int intel_connector_register(struct drm_connector *);
3487extern void intel_connector_unregister(struct drm_connector *);
3488extern int intel_modeset_vga_set_state(struct drm_i915_private *dev_priv, 3331extern int intel_modeset_vga_set_state(struct drm_i915_private *dev_priv,
3489 bool state); 3332 bool state);
3490extern void intel_display_resume(struct drm_device *dev); 3333extern void intel_display_resume(struct drm_device *dev);
@@ -3584,6 +3427,12 @@ void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder,
3584void vlv_phy_reset_lanes(struct intel_encoder *encoder, 3427void vlv_phy_reset_lanes(struct intel_encoder *encoder,
3585 const struct intel_crtc_state *old_crtc_state); 3428 const struct intel_crtc_state *old_crtc_state);
3586 3429
3430/* intel_combo_phy.c */
3431void icl_combo_phys_init(struct drm_i915_private *dev_priv);
3432void icl_combo_phys_uninit(struct drm_i915_private *dev_priv);
3433void cnl_combo_phys_init(struct drm_i915_private *dev_priv);
3434void cnl_combo_phys_uninit(struct drm_i915_private *dev_priv);
3435
3587int intel_gpu_freq(struct drm_i915_private *dev_priv, int val); 3436int intel_gpu_freq(struct drm_i915_private *dev_priv, int val);
3588int intel_freq_opcode(struct drm_i915_private *dev_priv, int val); 3437int intel_freq_opcode(struct drm_i915_private *dev_priv, int val);
3589u64 intel_rc6_residency_ns(struct drm_i915_private *dev_priv, 3438u64 intel_rc6_residency_ns(struct drm_i915_private *dev_priv,
diff --git a/drivers/gpu/drm/i915/i915_fixed.h b/drivers/gpu/drm/i915/i915_fixed.h
new file mode 100644
index 000000000000..591dd89ba7af
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_fixed.h
@@ -0,0 +1,143 @@
1/* SPDX-License-Identifier: MIT */
2/*
3 * Copyright © 2018 Intel Corporation
4 */
5
6#ifndef _I915_FIXED_H_
7#define _I915_FIXED_H_
8
9typedef struct {
10 u32 val;
11} uint_fixed_16_16_t;
12
13#define FP_16_16_MAX ((uint_fixed_16_16_t){ .val = UINT_MAX })
14
15static inline bool is_fixed16_zero(uint_fixed_16_16_t val)
16{
17 return val.val == 0;
18}
19
20static inline uint_fixed_16_16_t u32_to_fixed16(u32 val)
21{
22 uint_fixed_16_16_t fp = { .val = val << 16 };
23
24 WARN_ON(val > U16_MAX);
25
26 return fp;
27}
28
29static inline u32 fixed16_to_u32_round_up(uint_fixed_16_16_t fp)
30{
31 return DIV_ROUND_UP(fp.val, 1 << 16);
32}
33
34static inline u32 fixed16_to_u32(uint_fixed_16_16_t fp)
35{
36 return fp.val >> 16;
37}
38
39static inline uint_fixed_16_16_t min_fixed16(uint_fixed_16_16_t min1,
40 uint_fixed_16_16_t min2)
41{
42 uint_fixed_16_16_t min = { .val = min(min1.val, min2.val) };
43
44 return min;
45}
46
47static inline uint_fixed_16_16_t max_fixed16(uint_fixed_16_16_t max1,
48 uint_fixed_16_16_t max2)
49{
50 uint_fixed_16_16_t max = { .val = max(max1.val, max2.val) };
51
52 return max;
53}
54
55static inline uint_fixed_16_16_t clamp_u64_to_fixed16(u64 val)
56{
57 uint_fixed_16_16_t fp = { .val = (u32)val };
58
59 WARN_ON(val > U32_MAX);
60
61 return fp;
62}
63
64static inline u32 div_round_up_fixed16(uint_fixed_16_16_t val,
65 uint_fixed_16_16_t d)
66{
67 return DIV_ROUND_UP(val.val, d.val);
68}
69
70static inline u32 mul_round_up_u32_fixed16(u32 val, uint_fixed_16_16_t mul)
71{
72 u64 tmp;
73
74 tmp = (u64)val * mul.val;
75 tmp = DIV_ROUND_UP_ULL(tmp, 1 << 16);
76 WARN_ON(tmp > U32_MAX);
77
78 return (u32)tmp;
79}
80
81static inline uint_fixed_16_16_t mul_fixed16(uint_fixed_16_16_t val,
82 uint_fixed_16_16_t mul)
83{
84 u64 tmp;
85
86 tmp = (u64)val.val * mul.val;
87 tmp = tmp >> 16;
88
89 return clamp_u64_to_fixed16(tmp);
90}
91
92static inline uint_fixed_16_16_t div_fixed16(u32 val, u32 d)
93{
94 u64 tmp;
95
96 tmp = (u64)val << 16;
97 tmp = DIV_ROUND_UP_ULL(tmp, d);
98
99 return clamp_u64_to_fixed16(tmp);
100}
101
102static inline u32 div_round_up_u32_fixed16(u32 val, uint_fixed_16_16_t d)
103{
104 u64 tmp;
105
106 tmp = (u64)val << 16;
107 tmp = DIV_ROUND_UP_ULL(tmp, d.val);
108 WARN_ON(tmp > U32_MAX);
109
110 return (u32)tmp;
111}
112
113static inline uint_fixed_16_16_t mul_u32_fixed16(u32 val, uint_fixed_16_16_t mul)
114{
115 u64 tmp;
116
117 tmp = (u64)val * mul.val;
118
119 return clamp_u64_to_fixed16(tmp);
120}
121
122static inline uint_fixed_16_16_t add_fixed16(uint_fixed_16_16_t add1,
123 uint_fixed_16_16_t add2)
124{
125 u64 tmp;
126
127 tmp = (u64)add1.val + add2.val;
128
129 return clamp_u64_to_fixed16(tmp);
130}
131
132static inline uint_fixed_16_16_t add_fixed16_u32(uint_fixed_16_16_t add1,
133 u32 add2)
134{
135 uint_fixed_16_16_t tmp_add2 = u32_to_fixed16(add2);
136 u64 tmp;
137
138 tmp = (u64)add1.val + tmp_add2.val;
139
140 return clamp_u64_to_fixed16(tmp);
141}
142
143#endif /* _I915_FIXED_H_ */
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 0c8aa57ce83b..c55b1f75c980 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1740,6 +1740,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1740 */ 1740 */
1741 err = i915_gem_object_wait(obj, 1741 err = i915_gem_object_wait(obj,
1742 I915_WAIT_INTERRUPTIBLE | 1742 I915_WAIT_INTERRUPTIBLE |
1743 I915_WAIT_PRIORITY |
1743 (write_domain ? I915_WAIT_ALL : 0), 1744 (write_domain ? I915_WAIT_ALL : 0),
1744 MAX_SCHEDULE_TIMEOUT, 1745 MAX_SCHEDULE_TIMEOUT,
1745 to_rps_client(file)); 1746 to_rps_client(file));
@@ -2381,11 +2382,23 @@ void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
2381 invalidate_mapping_pages(mapping, 0, (loff_t)-1); 2382 invalidate_mapping_pages(mapping, 0, (loff_t)-1);
2382} 2383}
2383 2384
2385/*
2386 * Move pages to appropriate lru and release the pagevec, decrementing the
2387 * ref count of those pages.
2388 */
2389static void check_release_pagevec(struct pagevec *pvec)
2390{
2391 check_move_unevictable_pages(pvec);
2392 __pagevec_release(pvec);
2393 cond_resched();
2394}
2395
2384static void 2396static void
2385i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj, 2397i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj,
2386 struct sg_table *pages) 2398 struct sg_table *pages)
2387{ 2399{
2388 struct sgt_iter sgt_iter; 2400 struct sgt_iter sgt_iter;
2401 struct pagevec pvec;
2389 struct page *page; 2402 struct page *page;
2390 2403
2391 __i915_gem_object_release_shmem(obj, pages, true); 2404 __i915_gem_object_release_shmem(obj, pages, true);
@@ -2395,6 +2408,9 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj,
2395 if (i915_gem_object_needs_bit17_swizzle(obj)) 2408 if (i915_gem_object_needs_bit17_swizzle(obj))
2396 i915_gem_object_save_bit_17_swizzle(obj, pages); 2409 i915_gem_object_save_bit_17_swizzle(obj, pages);
2397 2410
2411 mapping_clear_unevictable(file_inode(obj->base.filp)->i_mapping);
2412
2413 pagevec_init(&pvec);
2398 for_each_sgt_page(page, sgt_iter, pages) { 2414 for_each_sgt_page(page, sgt_iter, pages) {
2399 if (obj->mm.dirty) 2415 if (obj->mm.dirty)
2400 set_page_dirty(page); 2416 set_page_dirty(page);
@@ -2402,8 +2418,11 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj,
2402 if (obj->mm.madv == I915_MADV_WILLNEED) 2418 if (obj->mm.madv == I915_MADV_WILLNEED)
2403 mark_page_accessed(page); 2419 mark_page_accessed(page);
2404 2420
2405 put_page(page); 2421 if (!pagevec_add(&pvec, page))
2422 check_release_pagevec(&pvec);
2406 } 2423 }
2424 if (pagevec_count(&pvec))
2425 check_release_pagevec(&pvec);
2407 obj->mm.dirty = false; 2426 obj->mm.dirty = false;
2408 2427
2409 sg_free_table(pages); 2428 sg_free_table(pages);
@@ -2483,7 +2502,7 @@ unlock:
2483 mutex_unlock(&obj->mm.lock); 2502 mutex_unlock(&obj->mm.lock);
2484} 2503}
2485 2504
2486static bool i915_sg_trim(struct sg_table *orig_st) 2505bool i915_sg_trim(struct sg_table *orig_st)
2487{ 2506{
2488 struct sg_table new_st; 2507 struct sg_table new_st;
2489 struct scatterlist *sg, *new_sg; 2508 struct scatterlist *sg, *new_sg;
@@ -2524,6 +2543,7 @@ static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
2524 unsigned long last_pfn = 0; /* suppress gcc warning */ 2543 unsigned long last_pfn = 0; /* suppress gcc warning */
2525 unsigned int max_segment = i915_sg_segment_size(); 2544 unsigned int max_segment = i915_sg_segment_size();
2526 unsigned int sg_page_sizes; 2545 unsigned int sg_page_sizes;
2546 struct pagevec pvec;
2527 gfp_t noreclaim; 2547 gfp_t noreclaim;
2528 int ret; 2548 int ret;
2529 2549
@@ -2559,6 +2579,7 @@ rebuild_st:
2559 * Fail silently without starting the shrinker 2579 * Fail silently without starting the shrinker
2560 */ 2580 */
2561 mapping = obj->base.filp->f_mapping; 2581 mapping = obj->base.filp->f_mapping;
2582 mapping_set_unevictable(mapping);
2562 noreclaim = mapping_gfp_constraint(mapping, ~__GFP_RECLAIM); 2583 noreclaim = mapping_gfp_constraint(mapping, ~__GFP_RECLAIM);
2563 noreclaim |= __GFP_NORETRY | __GFP_NOWARN; 2584 noreclaim |= __GFP_NORETRY | __GFP_NOWARN;
2564 2585
@@ -2573,6 +2594,7 @@ rebuild_st:
2573 gfp_t gfp = noreclaim; 2594 gfp_t gfp = noreclaim;
2574 2595
2575 do { 2596 do {
2597 cond_resched();
2576 page = shmem_read_mapping_page_gfp(mapping, i, gfp); 2598 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
2577 if (likely(!IS_ERR(page))) 2599 if (likely(!IS_ERR(page)))
2578 break; 2600 break;
@@ -2583,7 +2605,6 @@ rebuild_st:
2583 } 2605 }
2584 2606
2585 i915_gem_shrink(dev_priv, 2 * page_count, NULL, *s++); 2607 i915_gem_shrink(dev_priv, 2 * page_count, NULL, *s++);
2586 cond_resched();
2587 2608
2588 /* 2609 /*
2589 * We've tried hard to allocate the memory by reaping 2610 * We've tried hard to allocate the memory by reaping
@@ -2673,8 +2694,14 @@ rebuild_st:
2673err_sg: 2694err_sg:
2674 sg_mark_end(sg); 2695 sg_mark_end(sg);
2675err_pages: 2696err_pages:
2676 for_each_sgt_page(page, sgt_iter, st) 2697 mapping_clear_unevictable(mapping);
2677 put_page(page); 2698 pagevec_init(&pvec);
2699 for_each_sgt_page(page, sgt_iter, st) {
2700 if (!pagevec_add(&pvec, page))
2701 check_release_pagevec(&pvec);
2702 }
2703 if (pagevec_count(&pvec))
2704 check_release_pagevec(&pvec);
2678 sg_free_table(st); 2705 sg_free_table(st);
2679 kfree(st); 2706 kfree(st);
2680 2707
@@ -3530,6 +3557,8 @@ static void __sleep_rcu(struct rcu_head *rcu)
3530 struct sleep_rcu_work *s = container_of(rcu, typeof(*s), rcu); 3557 struct sleep_rcu_work *s = container_of(rcu, typeof(*s), rcu);
3531 struct drm_i915_private *i915 = s->i915; 3558 struct drm_i915_private *i915 = s->i915;
3532 3559
3560 destroy_rcu_head(&s->rcu);
3561
3533 if (same_epoch(i915, s->epoch)) { 3562 if (same_epoch(i915, s->epoch)) {
3534 INIT_WORK(&s->work, __sleep_work); 3563 INIT_WORK(&s->work, __sleep_work);
3535 queue_work(i915->wq, &s->work); 3564 queue_work(i915->wq, &s->work);
@@ -3646,6 +3675,7 @@ out_rearm:
3646 if (same_epoch(dev_priv, epoch)) { 3675 if (same_epoch(dev_priv, epoch)) {
3647 struct sleep_rcu_work *s = kmalloc(sizeof(*s), GFP_KERNEL); 3676 struct sleep_rcu_work *s = kmalloc(sizeof(*s), GFP_KERNEL);
3648 if (s) { 3677 if (s) {
3678 init_rcu_head(&s->rcu);
3649 s->i915 = dev_priv; 3679 s->i915 = dev_priv;
3650 s->epoch = epoch; 3680 s->epoch = epoch;
3651 call_rcu(&s->rcu, __sleep_rcu); 3681 call_rcu(&s->rcu, __sleep_rcu);
@@ -3743,7 +3773,9 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
3743 start = ktime_get(); 3773 start = ktime_get();
3744 3774
3745 ret = i915_gem_object_wait(obj, 3775 ret = i915_gem_object_wait(obj,
3746 I915_WAIT_INTERRUPTIBLE | I915_WAIT_ALL, 3776 I915_WAIT_INTERRUPTIBLE |
3777 I915_WAIT_PRIORITY |
3778 I915_WAIT_ALL,
3747 to_wait_timeout(args->timeout_ns), 3779 to_wait_timeout(args->timeout_ns),
3748 to_rps_client(file)); 3780 to_rps_client(file));
3749 3781
@@ -4710,6 +4742,8 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
4710 INIT_LIST_HEAD(&obj->lut_list); 4742 INIT_LIST_HEAD(&obj->lut_list);
4711 INIT_LIST_HEAD(&obj->batch_pool_link); 4743 INIT_LIST_HEAD(&obj->batch_pool_link);
4712 4744
4745 init_rcu_head(&obj->rcu);
4746
4713 obj->ops = ops; 4747 obj->ops = ops;
4714 4748
4715 reservation_object_init(&obj->__builtin_resv); 4749 reservation_object_init(&obj->__builtin_resv);
@@ -4977,6 +5011,13 @@ static void __i915_gem_free_object_rcu(struct rcu_head *head)
4977 struct drm_i915_private *i915 = to_i915(obj->base.dev); 5011 struct drm_i915_private *i915 = to_i915(obj->base.dev);
4978 5012
4979 /* 5013 /*
5014 * We reuse obj->rcu for the freed list, so we had better not treat
5015 * it like a rcu_head from this point forwards. And we expect all
5016 * objects to be freed via this path.
5017 */
5018 destroy_rcu_head(&obj->rcu);
5019
5020 /*
4980 * Since we require blocking on struct_mutex to unbind the freed 5021 * Since we require blocking on struct_mutex to unbind the freed
4981 * object from the GPU before releasing resources back to the 5022 * object from the GPU before releasing resources back to the
4982 * system, we can not do that directly from the RCU callback (which may 5023 * system, we can not do that directly from the RCU callback (which may
@@ -5293,18 +5334,6 @@ int i915_gem_init_hw(struct drm_i915_private *dev_priv)
5293 I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev_priv) ? 5334 I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev_priv) ?
5294 LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED); 5335 LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
5295 5336
5296 if (HAS_PCH_NOP(dev_priv)) {
5297 if (IS_IVYBRIDGE(dev_priv)) {
5298 u32 temp = I915_READ(GEN7_MSG_CTL);
5299 temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
5300 I915_WRITE(GEN7_MSG_CTL, temp);
5301 } else if (INTEL_GEN(dev_priv) >= 7) {
5302 u32 temp = I915_READ(HSW_NDE_RSTWRN_OPT);
5303 temp &= ~RESET_PCH_HANDSHAKE_ENABLE;
5304 I915_WRITE(HSW_NDE_RSTWRN_OPT, temp);
5305 }
5306 }
5307
5308 intel_gt_workarounds_apply(dev_priv); 5337 intel_gt_workarounds_apply(dev_priv);
5309 5338
5310 i915_gem_init_swizzling(dev_priv); 5339 i915_gem_init_swizzling(dev_priv);
@@ -5951,7 +5980,7 @@ void i915_gem_track_fb(struct drm_i915_gem_object *old,
5951 * the bits. 5980 * the bits.
5952 */ 5981 */
5953 BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES > 5982 BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES >
5954 sizeof(atomic_t) * BITS_PER_BYTE); 5983 BITS_PER_TYPE(atomic_t));
5955 5984
5956 if (old) { 5985 if (old) {
5957 WARN_ON(!(atomic_read(&old->frontbuffer_bits) & frontbuffer_bits)); 5986 WARN_ON(!(atomic_read(&old->frontbuffer_bits) & frontbuffer_bits));
diff --git a/drivers/gpu/drm/i915/i915_gem.h b/drivers/gpu/drm/i915/i915_gem.h
index 599c4f6eb1ea..b0e4b976880c 100644
--- a/drivers/gpu/drm/i915/i915_gem.h
+++ b/drivers/gpu/drm/i915/i915_gem.h
@@ -47,17 +47,19 @@ struct drm_i915_private;
47#define GEM_DEBUG_DECL(var) var 47#define GEM_DEBUG_DECL(var) var
48#define GEM_DEBUG_EXEC(expr) expr 48#define GEM_DEBUG_EXEC(expr) expr
49#define GEM_DEBUG_BUG_ON(expr) GEM_BUG_ON(expr) 49#define GEM_DEBUG_BUG_ON(expr) GEM_BUG_ON(expr)
50#define GEM_DEBUG_WARN_ON(expr) GEM_WARN_ON(expr)
50 51
51#else 52#else
52 53
53#define GEM_SHOW_DEBUG() (0) 54#define GEM_SHOW_DEBUG() (0)
54 55
55#define GEM_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr) 56#define GEM_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr)
56#define GEM_WARN_ON(expr) (BUILD_BUG_ON_INVALID(expr), 0) 57#define GEM_WARN_ON(expr) ({ unlikely(!!(expr)); })
57 58
58#define GEM_DEBUG_DECL(var) 59#define GEM_DEBUG_DECL(var)
59#define GEM_DEBUG_EXEC(expr) do { } while (0) 60#define GEM_DEBUG_EXEC(expr) do { } while (0)
60#define GEM_DEBUG_BUG_ON(expr) 61#define GEM_DEBUG_BUG_ON(expr)
62#define GEM_DEBUG_WARN_ON(expr) ({ BUILD_BUG_ON_INVALID(expr); 0; })
61#endif 63#endif
62 64
63#if IS_ENABLED(CONFIG_DRM_I915_TRACE_GEM) 65#if IS_ENABLED(CONFIG_DRM_I915_TRACE_GEM)
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index f772593b99ab..b97963db0287 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -337,7 +337,7 @@ __create_hw_context(struct drm_i915_private *dev_priv,
337 kref_init(&ctx->ref); 337 kref_init(&ctx->ref);
338 list_add_tail(&ctx->link, &dev_priv->contexts.list); 338 list_add_tail(&ctx->link, &dev_priv->contexts.list);
339 ctx->i915 = dev_priv; 339 ctx->i915 = dev_priv;
340 ctx->sched.priority = I915_PRIORITY_NORMAL; 340 ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_NORMAL);
341 341
342 for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++) { 342 for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++) {
343 struct intel_context *ce = &ctx->__engine[n]; 343 struct intel_context *ce = &ctx->__engine[n];
@@ -414,7 +414,7 @@ i915_gem_create_context(struct drm_i915_private *dev_priv,
414 if (IS_ERR(ctx)) 414 if (IS_ERR(ctx))
415 return ctx; 415 return ctx;
416 416
417 if (USES_FULL_PPGTT(dev_priv)) { 417 if (HAS_FULL_PPGTT(dev_priv)) {
418 struct i915_hw_ppgtt *ppgtt; 418 struct i915_hw_ppgtt *ppgtt;
419 419
420 ppgtt = i915_ppgtt_create(dev_priv, file_priv); 420 ppgtt = i915_ppgtt_create(dev_priv, file_priv);
@@ -457,7 +457,7 @@ i915_gem_context_create_gvt(struct drm_device *dev)
457 if (ret) 457 if (ret)
458 return ERR_PTR(ret); 458 return ERR_PTR(ret);
459 459
460 ctx = __create_hw_context(to_i915(dev), NULL); 460 ctx = i915_gem_create_context(to_i915(dev), NULL);
461 if (IS_ERR(ctx)) 461 if (IS_ERR(ctx))
462 goto out; 462 goto out;
463 463
@@ -504,7 +504,7 @@ i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio)
504 } 504 }
505 505
506 i915_gem_context_clear_bannable(ctx); 506 i915_gem_context_clear_bannable(ctx);
507 ctx->sched.priority = prio; 507 ctx->sched.priority = I915_USER_PRIORITY(prio);
508 ctx->ring_size = PAGE_SIZE; 508 ctx->ring_size = PAGE_SIZE;
509 509
510 GEM_BUG_ON(!i915_gem_context_is_kernel(ctx)); 510 GEM_BUG_ON(!i915_gem_context_is_kernel(ctx));
@@ -879,7 +879,7 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
879 args->value = i915_gem_context_is_bannable(ctx); 879 args->value = i915_gem_context_is_bannable(ctx);
880 break; 880 break;
881 case I915_CONTEXT_PARAM_PRIORITY: 881 case I915_CONTEXT_PARAM_PRIORITY:
882 args->value = ctx->sched.priority; 882 args->value = ctx->sched.priority >> I915_USER_PRIORITY_SHIFT;
883 break; 883 break;
884 default: 884 default:
885 ret = -EINVAL; 885 ret = -EINVAL;
@@ -948,7 +948,8 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
948 !capable(CAP_SYS_NICE)) 948 !capable(CAP_SYS_NICE))
949 ret = -EPERM; 949 ret = -EPERM;
950 else 950 else
951 ctx->sched.priority = priority; 951 ctx->sched.priority =
952 I915_USER_PRIORITY(priority);
952 } 953 }
953 break; 954 break;
954 955
diff --git a/drivers/gpu/drm/i915/i915_gem_context.h b/drivers/gpu/drm/i915/i915_gem_context.h
index 08165f6a0a84..f6d870b1f73e 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.h
+++ b/drivers/gpu/drm/i915/i915_gem_context.h
@@ -163,6 +163,7 @@ struct i915_gem_context {
163 /** engine: per-engine logical HW state */ 163 /** engine: per-engine logical HW state */
164 struct intel_context { 164 struct intel_context {
165 struct i915_gem_context *gem_context; 165 struct i915_gem_context *gem_context;
166 struct intel_engine_cs *active;
166 struct i915_vma *state; 167 struct i915_vma *state;
167 struct intel_ring *ring; 168 struct intel_ring *ring;
168 u32 *lrc_reg_state; 169 u32 *lrc_reg_state;
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 09187286d346..d4fac09095f8 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -460,7 +460,7 @@ eb_validate_vma(struct i915_execbuffer *eb,
460 * any non-page-aligned or non-canonical addresses. 460 * any non-page-aligned or non-canonical addresses.
461 */ 461 */
462 if (unlikely(entry->flags & EXEC_OBJECT_PINNED && 462 if (unlikely(entry->flags & EXEC_OBJECT_PINNED &&
463 entry->offset != gen8_canonical_addr(entry->offset & PAGE_MASK))) 463 entry->offset != gen8_canonical_addr(entry->offset & I915_GTT_PAGE_MASK)))
464 return -EINVAL; 464 return -EINVAL;
465 465
466 /* pad_to_size was once a reserved field, so sanitize it */ 466 /* pad_to_size was once a reserved field, so sanitize it */
@@ -1268,7 +1268,7 @@ relocate_entry(struct i915_vma *vma,
1268 else if (gen >= 4) 1268 else if (gen >= 4)
1269 len = 4; 1269 len = 4;
1270 else 1270 else
1271 len = 3; 1271 len = 6;
1272 1272
1273 batch = reloc_gpu(eb, vma, len); 1273 batch = reloc_gpu(eb, vma, len);
1274 if (IS_ERR(batch)) 1274 if (IS_ERR(batch))
@@ -1309,6 +1309,11 @@ relocate_entry(struct i915_vma *vma,
1309 *batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL; 1309 *batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
1310 *batch++ = addr; 1310 *batch++ = addr;
1311 *batch++ = target_offset; 1311 *batch++ = target_offset;
1312
1313 /* And again for good measure (blb/pnv) */
1314 *batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
1315 *batch++ = addr;
1316 *batch++ = target_offset;
1312 } 1317 }
1313 1318
1314 goto out; 1319 goto out;
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 56c7f8637311..add1fe7aeb93 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -133,55 +133,6 @@ static inline void i915_ggtt_invalidate(struct drm_i915_private *i915)
133 i915->ggtt.invalidate(i915); 133 i915->ggtt.invalidate(i915);
134} 134}
135 135
136int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
137 int enable_ppgtt)
138{
139 bool has_full_ppgtt;
140 bool has_full_48bit_ppgtt;
141
142 if (!dev_priv->info.has_aliasing_ppgtt)
143 return 0;
144
145 has_full_ppgtt = dev_priv->info.has_full_ppgtt;
146 has_full_48bit_ppgtt = dev_priv->info.has_full_48bit_ppgtt;
147
148 if (intel_vgpu_active(dev_priv)) {
149 /* GVT-g has no support for 32bit ppgtt */
150 has_full_ppgtt = false;
151 has_full_48bit_ppgtt = intel_vgpu_has_full_48bit_ppgtt(dev_priv);
152 }
153
154 /*
155 * We don't allow disabling PPGTT for gen9+ as it's a requirement for
156 * execlists, the sole mechanism available to submit work.
157 */
158 if (enable_ppgtt == 0 && INTEL_GEN(dev_priv) < 9)
159 return 0;
160
161 if (enable_ppgtt == 1)
162 return 1;
163
164 if (enable_ppgtt == 2 && has_full_ppgtt)
165 return 2;
166
167 if (enable_ppgtt == 3 && has_full_48bit_ppgtt)
168 return 3;
169
170 /* Disable ppgtt on SNB if VT-d is on. */
171 if (IS_GEN6(dev_priv) && intel_vtd_active()) {
172 DRM_INFO("Disabling PPGTT because VT-d is on\n");
173 return 0;
174 }
175
176 if (has_full_48bit_ppgtt)
177 return 3;
178
179 if (has_full_ppgtt)
180 return 2;
181
182 return 1;
183}
184
185static int ppgtt_bind_vma(struct i915_vma *vma, 136static int ppgtt_bind_vma(struct i915_vma *vma,
186 enum i915_cache_level cache_level, 137 enum i915_cache_level cache_level,
187 u32 unused) 138 u32 unused)
@@ -235,9 +186,9 @@ static void clear_pages(struct i915_vma *vma)
235 memset(&vma->page_sizes, 0, sizeof(vma->page_sizes)); 186 memset(&vma->page_sizes, 0, sizeof(vma->page_sizes));
236} 187}
237 188
238static gen8_pte_t gen8_pte_encode(dma_addr_t addr, 189static u64 gen8_pte_encode(dma_addr_t addr,
239 enum i915_cache_level level, 190 enum i915_cache_level level,
240 u32 flags) 191 u32 flags)
241{ 192{
242 gen8_pte_t pte = addr | _PAGE_PRESENT | _PAGE_RW; 193 gen8_pte_t pte = addr | _PAGE_PRESENT | _PAGE_RW;
243 194
@@ -274,9 +225,9 @@ static gen8_pde_t gen8_pde_encode(const dma_addr_t addr,
274#define gen8_pdpe_encode gen8_pde_encode 225#define gen8_pdpe_encode gen8_pde_encode
275#define gen8_pml4e_encode gen8_pde_encode 226#define gen8_pml4e_encode gen8_pde_encode
276 227
277static gen6_pte_t snb_pte_encode(dma_addr_t addr, 228static u64 snb_pte_encode(dma_addr_t addr,
278 enum i915_cache_level level, 229 enum i915_cache_level level,
279 u32 unused) 230 u32 flags)
280{ 231{
281 gen6_pte_t pte = GEN6_PTE_VALID; 232 gen6_pte_t pte = GEN6_PTE_VALID;
282 pte |= GEN6_PTE_ADDR_ENCODE(addr); 233 pte |= GEN6_PTE_ADDR_ENCODE(addr);
@@ -296,9 +247,9 @@ static gen6_pte_t snb_pte_encode(dma_addr_t addr,
296 return pte; 247 return pte;
297} 248}
298 249
299static gen6_pte_t ivb_pte_encode(dma_addr_t addr, 250static u64 ivb_pte_encode(dma_addr_t addr,
300 enum i915_cache_level level, 251 enum i915_cache_level level,
301 u32 unused) 252 u32 flags)
302{ 253{
303 gen6_pte_t pte = GEN6_PTE_VALID; 254 gen6_pte_t pte = GEN6_PTE_VALID;
304 pte |= GEN6_PTE_ADDR_ENCODE(addr); 255 pte |= GEN6_PTE_ADDR_ENCODE(addr);
@@ -320,9 +271,9 @@ static gen6_pte_t ivb_pte_encode(dma_addr_t addr,
320 return pte; 271 return pte;
321} 272}
322 273
323static gen6_pte_t byt_pte_encode(dma_addr_t addr, 274static u64 byt_pte_encode(dma_addr_t addr,
324 enum i915_cache_level level, 275 enum i915_cache_level level,
325 u32 flags) 276 u32 flags)
326{ 277{
327 gen6_pte_t pte = GEN6_PTE_VALID; 278 gen6_pte_t pte = GEN6_PTE_VALID;
328 pte |= GEN6_PTE_ADDR_ENCODE(addr); 279 pte |= GEN6_PTE_ADDR_ENCODE(addr);
@@ -336,9 +287,9 @@ static gen6_pte_t byt_pte_encode(dma_addr_t addr,
336 return pte; 287 return pte;
337} 288}
338 289
339static gen6_pte_t hsw_pte_encode(dma_addr_t addr, 290static u64 hsw_pte_encode(dma_addr_t addr,
340 enum i915_cache_level level, 291 enum i915_cache_level level,
341 u32 unused) 292 u32 flags)
342{ 293{
343 gen6_pte_t pte = GEN6_PTE_VALID; 294 gen6_pte_t pte = GEN6_PTE_VALID;
344 pte |= HSW_PTE_ADDR_ENCODE(addr); 295 pte |= HSW_PTE_ADDR_ENCODE(addr);
@@ -349,9 +300,9 @@ static gen6_pte_t hsw_pte_encode(dma_addr_t addr,
349 return pte; 300 return pte;
350} 301}
351 302
352static gen6_pte_t iris_pte_encode(dma_addr_t addr, 303static u64 iris_pte_encode(dma_addr_t addr,
353 enum i915_cache_level level, 304 enum i915_cache_level level,
354 u32 unused) 305 u32 flags)
355{ 306{
356 gen6_pte_t pte = GEN6_PTE_VALID; 307 gen6_pte_t pte = GEN6_PTE_VALID;
357 pte |= HSW_PTE_ADDR_ENCODE(addr); 308 pte |= HSW_PTE_ADDR_ENCODE(addr);
@@ -629,10 +580,9 @@ setup_scratch_page(struct i915_address_space *vm, gfp_t gfp)
629 * region, including any PTEs which happen to point to scratch. 580 * region, including any PTEs which happen to point to scratch.
630 * 581 *
631 * This is only relevant for the 48b PPGTT where we support 582 * This is only relevant for the 48b PPGTT where we support
632 * huge-gtt-pages, see also i915_vma_insert(). 583 * huge-gtt-pages, see also i915_vma_insert(). However, as we share the
633 * 584 * scratch (read-only) between all vm, we create one 64k scratch page
634 * TODO: we should really consider write-protecting the scratch-page and 585 * for all.
635 * sharing between ppgtt
636 */ 586 */
637 size = I915_GTT_PAGE_SIZE_4K; 587 size = I915_GTT_PAGE_SIZE_4K;
638 if (i915_vm_is_48bit(vm) && 588 if (i915_vm_is_48bit(vm) &&
@@ -715,14 +665,13 @@ static void free_pt(struct i915_address_space *vm, struct i915_page_table *pt)
715static void gen8_initialize_pt(struct i915_address_space *vm, 665static void gen8_initialize_pt(struct i915_address_space *vm,
716 struct i915_page_table *pt) 666 struct i915_page_table *pt)
717{ 667{
718 fill_px(vm, pt, 668 fill_px(vm, pt, vm->scratch_pte);
719 gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0));
720} 669}
721 670
722static void gen6_initialize_pt(struct gen6_hw_ppgtt *ppgtt, 671static void gen6_initialize_pt(struct i915_address_space *vm,
723 struct i915_page_table *pt) 672 struct i915_page_table *pt)
724{ 673{
725 fill32_px(&ppgtt->base.vm, pt, ppgtt->scratch_pte); 674 fill32_px(vm, pt, vm->scratch_pte);
726} 675}
727 676
728static struct i915_page_directory *alloc_pd(struct i915_address_space *vm) 677static struct i915_page_directory *alloc_pd(struct i915_address_space *vm)
@@ -856,15 +805,13 @@ static void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt)
856/* Removes entries from a single page table, releasing it if it's empty. 805/* Removes entries from a single page table, releasing it if it's empty.
857 * Caller can use the return value to update higher-level entries. 806 * Caller can use the return value to update higher-level entries.
858 */ 807 */
859static bool gen8_ppgtt_clear_pt(struct i915_address_space *vm, 808static bool gen8_ppgtt_clear_pt(const struct i915_address_space *vm,
860 struct i915_page_table *pt, 809 struct i915_page_table *pt,
861 u64 start, u64 length) 810 u64 start, u64 length)
862{ 811{
863 unsigned int num_entries = gen8_pte_count(start, length); 812 unsigned int num_entries = gen8_pte_count(start, length);
864 unsigned int pte = gen8_pte_index(start); 813 unsigned int pte = gen8_pte_index(start);
865 unsigned int pte_end = pte + num_entries; 814 unsigned int pte_end = pte + num_entries;
866 const gen8_pte_t scratch_pte =
867 gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0);
868 gen8_pte_t *vaddr; 815 gen8_pte_t *vaddr;
869 816
870 GEM_BUG_ON(num_entries > pt->used_ptes); 817 GEM_BUG_ON(num_entries > pt->used_ptes);
@@ -875,7 +822,7 @@ static bool gen8_ppgtt_clear_pt(struct i915_address_space *vm,
875 822
876 vaddr = kmap_atomic_px(pt); 823 vaddr = kmap_atomic_px(pt);
877 while (pte < pte_end) 824 while (pte < pte_end)
878 vaddr[pte++] = scratch_pte; 825 vaddr[pte++] = vm->scratch_pte;
879 kunmap_atomic(vaddr); 826 kunmap_atomic(vaddr);
880 827
881 return false; 828 return false;
@@ -1208,7 +1155,7 @@ static void gen8_ppgtt_insert_huge_entries(struct i915_vma *vma,
1208 if (I915_SELFTEST_ONLY(vma->vm->scrub_64K)) { 1155 if (I915_SELFTEST_ONLY(vma->vm->scrub_64K)) {
1209 u16 i; 1156 u16 i;
1210 1157
1211 encode = pte_encode | vma->vm->scratch_page.daddr; 1158 encode = vma->vm->scratch_pte;
1212 vaddr = kmap_atomic_px(pd->page_table[idx.pde]); 1159 vaddr = kmap_atomic_px(pd->page_table[idx.pde]);
1213 1160
1214 for (i = 1; i < index; i += 16) 1161 for (i = 1; i < index; i += 16)
@@ -1261,10 +1208,35 @@ static int gen8_init_scratch(struct i915_address_space *vm)
1261{ 1208{
1262 int ret; 1209 int ret;
1263 1210
1211 /*
1212 * If everybody agrees to not to write into the scratch page,
1213 * we can reuse it for all vm, keeping contexts and processes separate.
1214 */
1215 if (vm->has_read_only &&
1216 vm->i915->kernel_context &&
1217 vm->i915->kernel_context->ppgtt) {
1218 struct i915_address_space *clone =
1219 &vm->i915->kernel_context->ppgtt->vm;
1220
1221 GEM_BUG_ON(!clone->has_read_only);
1222
1223 vm->scratch_page.order = clone->scratch_page.order;
1224 vm->scratch_pte = clone->scratch_pte;
1225 vm->scratch_pt = clone->scratch_pt;
1226 vm->scratch_pd = clone->scratch_pd;
1227 vm->scratch_pdp = clone->scratch_pdp;
1228 return 0;
1229 }
1230
1264 ret = setup_scratch_page(vm, __GFP_HIGHMEM); 1231 ret = setup_scratch_page(vm, __GFP_HIGHMEM);
1265 if (ret) 1232 if (ret)
1266 return ret; 1233 return ret;
1267 1234
1235 vm->scratch_pte =
1236 gen8_pte_encode(vm->scratch_page.daddr,
1237 I915_CACHE_LLC,
1238 PTE_READ_ONLY);
1239
1268 vm->scratch_pt = alloc_pt(vm); 1240 vm->scratch_pt = alloc_pt(vm);
1269 if (IS_ERR(vm->scratch_pt)) { 1241 if (IS_ERR(vm->scratch_pt)) {
1270 ret = PTR_ERR(vm->scratch_pt); 1242 ret = PTR_ERR(vm->scratch_pt);
@@ -1336,6 +1308,9 @@ static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create)
1336 1308
1337static void gen8_free_scratch(struct i915_address_space *vm) 1309static void gen8_free_scratch(struct i915_address_space *vm)
1338{ 1310{
1311 if (!vm->scratch_page.daddr)
1312 return;
1313
1339 if (use_4lvl(vm)) 1314 if (use_4lvl(vm))
1340 free_pdp(vm, vm->scratch_pdp); 1315 free_pdp(vm, vm->scratch_pdp);
1341 free_pd(vm, vm->scratch_pd); 1316 free_pd(vm, vm->scratch_pd);
@@ -1573,8 +1548,7 @@ static void gen8_dump_pdp(struct i915_hw_ppgtt *ppgtt,
1573static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m) 1548static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
1574{ 1549{
1575 struct i915_address_space *vm = &ppgtt->vm; 1550 struct i915_address_space *vm = &ppgtt->vm;
1576 const gen8_pte_t scratch_pte = 1551 const gen8_pte_t scratch_pte = vm->scratch_pte;
1577 gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0);
1578 u64 start = 0, length = ppgtt->vm.total; 1552 u64 start = 0, length = ppgtt->vm.total;
1579 1553
1580 if (use_4lvl(vm)) { 1554 if (use_4lvl(vm)) {
@@ -1647,16 +1621,12 @@ static struct i915_hw_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915)
1647 ppgtt->vm.i915 = i915; 1621 ppgtt->vm.i915 = i915;
1648 ppgtt->vm.dma = &i915->drm.pdev->dev; 1622 ppgtt->vm.dma = &i915->drm.pdev->dev;
1649 1623
1650 ppgtt->vm.total = USES_FULL_48BIT_PPGTT(i915) ? 1624 ppgtt->vm.total = HAS_FULL_48BIT_PPGTT(i915) ?
1651 1ULL << 48 : 1625 1ULL << 48 :
1652 1ULL << 32; 1626 1ULL << 32;
1653 1627
1654 /* 1628 /* From bdw, there is support for read-only pages in the PPGTT. */
1655 * From bdw, there is support for read-only pages in the PPGTT. 1629 ppgtt->vm.has_read_only = true;
1656 *
1657 * XXX GVT is not honouring the lack of RW in the PTE bits.
1658 */
1659 ppgtt->vm.has_read_only = !intel_vgpu_active(i915);
1660 1630
1661 i915_address_space_init(&ppgtt->vm, i915); 1631 i915_address_space_init(&ppgtt->vm, i915);
1662 1632
@@ -1721,7 +1691,7 @@ err_free:
1721static void gen6_dump_ppgtt(struct i915_hw_ppgtt *base, struct seq_file *m) 1691static void gen6_dump_ppgtt(struct i915_hw_ppgtt *base, struct seq_file *m)
1722{ 1692{
1723 struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base); 1693 struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base);
1724 const gen6_pte_t scratch_pte = ppgtt->scratch_pte; 1694 const gen6_pte_t scratch_pte = base->vm.scratch_pte;
1725 struct i915_page_table *pt; 1695 struct i915_page_table *pt;
1726 u32 pte, pde; 1696 u32 pte, pde;
1727 1697
@@ -1757,7 +1727,7 @@ static void gen6_dump_ppgtt(struct i915_hw_ppgtt *base, struct seq_file *m)
1757 if (i == 4) 1727 if (i == 4)
1758 continue; 1728 continue;
1759 1729
1760 seq_printf(m, "\t\t(%03d, %04d) %08lx: ", 1730 seq_printf(m, "\t\t(%03d, %04d) %08llx: ",
1761 pde, pte, 1731 pde, pte,
1762 (pde * GEN6_PTES + pte) * I915_GTT_PAGE_SIZE); 1732 (pde * GEN6_PTES + pte) * I915_GTT_PAGE_SIZE);
1763 for (i = 0; i < 4; i++) { 1733 for (i = 0; i < 4; i++) {
@@ -1782,19 +1752,6 @@ static inline void gen6_write_pde(const struct gen6_hw_ppgtt *ppgtt,
1782 ppgtt->pd_addr + pde); 1752 ppgtt->pd_addr + pde);
1783} 1753}
1784 1754
1785static void gen8_ppgtt_enable(struct drm_i915_private *dev_priv)
1786{
1787 struct intel_engine_cs *engine;
1788 enum intel_engine_id id;
1789
1790 for_each_engine(engine, dev_priv, id) {
1791 u32 four_level = USES_FULL_48BIT_PPGTT(dev_priv) ?
1792 GEN8_GFX_PPGTT_48B : 0;
1793 I915_WRITE(RING_MODE_GEN7(engine),
1794 _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE | four_level));
1795 }
1796}
1797
1798static void gen7_ppgtt_enable(struct drm_i915_private *dev_priv) 1755static void gen7_ppgtt_enable(struct drm_i915_private *dev_priv)
1799{ 1756{
1800 struct intel_engine_cs *engine; 1757 struct intel_engine_cs *engine;
@@ -1834,7 +1791,8 @@ static void gen6_ppgtt_enable(struct drm_i915_private *dev_priv)
1834 ecochk = I915_READ(GAM_ECOCHK); 1791 ecochk = I915_READ(GAM_ECOCHK);
1835 I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B); 1792 I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B);
1836 1793
1837 I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); 1794 if (HAS_PPGTT(dev_priv)) /* may be disabled for VT-d */
1795 I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
1838} 1796}
1839 1797
1840/* PPGTT support for Sandybdrige/Gen6 and later */ 1798/* PPGTT support for Sandybdrige/Gen6 and later */
@@ -1846,7 +1804,7 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
1846 unsigned int pde = first_entry / GEN6_PTES; 1804 unsigned int pde = first_entry / GEN6_PTES;
1847 unsigned int pte = first_entry % GEN6_PTES; 1805 unsigned int pte = first_entry % GEN6_PTES;
1848 unsigned int num_entries = length / I915_GTT_PAGE_SIZE; 1806 unsigned int num_entries = length / I915_GTT_PAGE_SIZE;
1849 const gen6_pte_t scratch_pte = ppgtt->scratch_pte; 1807 const gen6_pte_t scratch_pte = vm->scratch_pte;
1850 1808
1851 while (num_entries) { 1809 while (num_entries) {
1852 struct i915_page_table *pt = ppgtt->base.pd.page_table[pde++]; 1810 struct i915_page_table *pt = ppgtt->base.pd.page_table[pde++];
@@ -1937,7 +1895,7 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
1937 if (IS_ERR(pt)) 1895 if (IS_ERR(pt))
1938 goto unwind_out; 1896 goto unwind_out;
1939 1897
1940 gen6_initialize_pt(ppgtt, pt); 1898 gen6_initialize_pt(vm, pt);
1941 ppgtt->base.pd.page_table[pde] = pt; 1899 ppgtt->base.pd.page_table[pde] = pt;
1942 1900
1943 if (i915_vma_is_bound(ppgtt->vma, 1901 if (i915_vma_is_bound(ppgtt->vma,
@@ -1975,9 +1933,9 @@ static int gen6_ppgtt_init_scratch(struct gen6_hw_ppgtt *ppgtt)
1975 if (ret) 1933 if (ret)
1976 return ret; 1934 return ret;
1977 1935
1978 ppgtt->scratch_pte = 1936 vm->scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
1979 vm->pte_encode(vm->scratch_page.daddr, 1937 I915_CACHE_NONE,
1980 I915_CACHE_NONE, PTE_READ_ONLY); 1938 PTE_READ_ONLY);
1981 1939
1982 vm->scratch_pt = alloc_pt(vm); 1940 vm->scratch_pt = alloc_pt(vm);
1983 if (IS_ERR(vm->scratch_pt)) { 1941 if (IS_ERR(vm->scratch_pt)) {
@@ -1985,7 +1943,7 @@ static int gen6_ppgtt_init_scratch(struct gen6_hw_ppgtt *ppgtt)
1985 return PTR_ERR(vm->scratch_pt); 1943 return PTR_ERR(vm->scratch_pt);
1986 } 1944 }
1987 1945
1988 gen6_initialize_pt(ppgtt, vm->scratch_pt); 1946 gen6_initialize_pt(vm, vm->scratch_pt);
1989 gen6_for_all_pdes(unused, &ppgtt->base.pd, pde) 1947 gen6_for_all_pdes(unused, &ppgtt->base.pd, pde)
1990 ppgtt->base.pd.page_table[pde] = vm->scratch_pt; 1948 ppgtt->base.pd.page_table[pde] = vm->scratch_pt;
1991 1949
@@ -2237,23 +2195,10 @@ int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv)
2237{ 2195{
2238 gtt_write_workarounds(dev_priv); 2196 gtt_write_workarounds(dev_priv);
2239 2197
2240 /* In the case of execlists, PPGTT is enabled by the context descriptor
2241 * and the PDPs are contained within the context itself. We don't
2242 * need to do anything here. */
2243 if (HAS_LOGICAL_RING_CONTEXTS(dev_priv))
2244 return 0;
2245
2246 if (!USES_PPGTT(dev_priv))
2247 return 0;
2248
2249 if (IS_GEN6(dev_priv)) 2198 if (IS_GEN6(dev_priv))
2250 gen6_ppgtt_enable(dev_priv); 2199 gen6_ppgtt_enable(dev_priv);
2251 else if (IS_GEN7(dev_priv)) 2200 else if (IS_GEN7(dev_priv))
2252 gen7_ppgtt_enable(dev_priv); 2201 gen7_ppgtt_enable(dev_priv);
2253 else if (INTEL_GEN(dev_priv) >= 8)
2254 gen8_ppgtt_enable(dev_priv);
2255 else
2256 MISSING_CASE(INTEL_GEN(dev_priv));
2257 2202
2258 return 0; 2203 return 0;
2259} 2204}
@@ -2543,8 +2488,7 @@ static void gen8_ggtt_clear_range(struct i915_address_space *vm,
2543 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); 2488 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2544 unsigned first_entry = start / I915_GTT_PAGE_SIZE; 2489 unsigned first_entry = start / I915_GTT_PAGE_SIZE;
2545 unsigned num_entries = length / I915_GTT_PAGE_SIZE; 2490 unsigned num_entries = length / I915_GTT_PAGE_SIZE;
2546 const gen8_pte_t scratch_pte = 2491 const gen8_pte_t scratch_pte = vm->scratch_pte;
2547 gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0);
2548 gen8_pte_t __iomem *gtt_base = 2492 gen8_pte_t __iomem *gtt_base =
2549 (gen8_pte_t __iomem *)ggtt->gsm + first_entry; 2493 (gen8_pte_t __iomem *)ggtt->gsm + first_entry;
2550 const int max_entries = ggtt_total_entries(ggtt) - first_entry; 2494 const int max_entries = ggtt_total_entries(ggtt) - first_entry;
@@ -2669,8 +2613,7 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm,
2669 first_entry, num_entries, max_entries)) 2613 first_entry, num_entries, max_entries))
2670 num_entries = max_entries; 2614 num_entries = max_entries;
2671 2615
2672 scratch_pte = vm->pte_encode(vm->scratch_page.daddr, 2616 scratch_pte = vm->scratch_pte;
2673 I915_CACHE_LLC, 0);
2674 2617
2675 for (i = 0; i < num_entries; i++) 2618 for (i = 0; i < num_entries; i++)
2676 iowrite32(scratch_pte, &gtt_base[i]); 2619 iowrite32(scratch_pte, &gtt_base[i]);
@@ -2952,7 +2895,7 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
2952 /* And finally clear the reserved guard page */ 2895 /* And finally clear the reserved guard page */
2953 ggtt->vm.clear_range(&ggtt->vm, ggtt->vm.total - PAGE_SIZE, PAGE_SIZE); 2896 ggtt->vm.clear_range(&ggtt->vm, ggtt->vm.total - PAGE_SIZE, PAGE_SIZE);
2954 2897
2955 if (USES_PPGTT(dev_priv) && !USES_FULL_PPGTT(dev_priv)) { 2898 if (INTEL_PPGTT(dev_priv) == INTEL_PPGTT_ALIASING) {
2956 ret = i915_gem_init_aliasing_ppgtt(dev_priv); 2899 ret = i915_gem_init_aliasing_ppgtt(dev_priv);
2957 if (ret) 2900 if (ret)
2958 goto err; 2901 goto err;
@@ -3076,6 +3019,10 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
3076 return ret; 3019 return ret;
3077 } 3020 }
3078 3021
3022 ggtt->vm.scratch_pte =
3023 ggtt->vm.pte_encode(ggtt->vm.scratch_page.daddr,
3024 I915_CACHE_NONE, 0);
3025
3079 return 0; 3026 return 0;
3080} 3027}
3081 3028
@@ -3275,7 +3222,7 @@ static void bdw_setup_private_ppat(struct intel_ppat *ppat)
3275 ppat->match = bdw_private_pat_match; 3222 ppat->match = bdw_private_pat_match;
3276 ppat->clear_value = GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3); 3223 ppat->clear_value = GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3);
3277 3224
3278 if (!USES_PPGTT(ppat->i915)) { 3225 if (!HAS_PPGTT(ppat->i915)) {
3279 /* Spec: "For GGTT, there is NO pat_sel[2:0] from the entry, 3226 /* Spec: "For GGTT, there is NO pat_sel[2:0] from the entry,
3280 * so RTL will always use the value corresponding to 3227 * so RTL will always use the value corresponding to
3281 * pat_sel = 000". 3228 * pat_sel = 000".
@@ -3402,7 +3349,7 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
3402 ggtt->vm.cleanup = gen6_gmch_remove; 3349 ggtt->vm.cleanup = gen6_gmch_remove;
3403 ggtt->vm.insert_page = gen8_ggtt_insert_page; 3350 ggtt->vm.insert_page = gen8_ggtt_insert_page;
3404 ggtt->vm.clear_range = nop_clear_range; 3351 ggtt->vm.clear_range = nop_clear_range;
3405 if (!USES_FULL_PPGTT(dev_priv) || intel_scanout_needs_vtd_wa(dev_priv)) 3352 if (intel_scanout_needs_vtd_wa(dev_priv))
3406 ggtt->vm.clear_range = gen8_ggtt_clear_range; 3353 ggtt->vm.clear_range = gen8_ggtt_clear_range;
3407 3354
3408 ggtt->vm.insert_entries = gen8_ggtt_insert_entries; 3355 ggtt->vm.insert_entries = gen8_ggtt_insert_entries;
@@ -3413,6 +3360,11 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
3413 ggtt->vm.insert_page = bxt_vtd_ggtt_insert_page__BKL; 3360 ggtt->vm.insert_page = bxt_vtd_ggtt_insert_page__BKL;
3414 if (ggtt->vm.clear_range != nop_clear_range) 3361 if (ggtt->vm.clear_range != nop_clear_range)
3415 ggtt->vm.clear_range = bxt_vtd_ggtt_clear_range__BKL; 3362 ggtt->vm.clear_range = bxt_vtd_ggtt_clear_range__BKL;
3363
3364 /* Prevent recursively calling stop_machine() and deadlocks. */
3365 dev_info(dev_priv->drm.dev,
3366 "Disabling error capture for VT-d workaround\n");
3367 i915_disable_error_state(dev_priv, -ENODEV);
3416 } 3368 }
3417 3369
3418 ggtt->invalidate = gen6_ggtt_invalidate; 3370 ggtt->invalidate = gen6_ggtt_invalidate;
@@ -3422,6 +3374,8 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
3422 ggtt->vm.vma_ops.set_pages = ggtt_set_pages; 3374 ggtt->vm.vma_ops.set_pages = ggtt_set_pages;
3423 ggtt->vm.vma_ops.clear_pages = clear_pages; 3375 ggtt->vm.vma_ops.clear_pages = clear_pages;
3424 3376
3377 ggtt->vm.pte_encode = gen8_pte_encode;
3378
3425 setup_private_pat(dev_priv); 3379 setup_private_pat(dev_priv);
3426 3380
3427 return ggtt_probe_common(ggtt, size); 3381 return ggtt_probe_common(ggtt, size);
@@ -3609,7 +3563,7 @@ int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
3609 /* Only VLV supports read-only GGTT mappings */ 3563 /* Only VLV supports read-only GGTT mappings */
3610 ggtt->vm.has_read_only = IS_VALLEYVIEW(dev_priv); 3564 ggtt->vm.has_read_only = IS_VALLEYVIEW(dev_priv);
3611 3565
3612 if (!HAS_LLC(dev_priv) && !USES_PPGTT(dev_priv)) 3566 if (!HAS_LLC(dev_priv) && !HAS_PPGTT(dev_priv))
3613 ggtt->vm.mm.color_adjust = i915_gtt_color_adjust; 3567 ggtt->vm.mm.color_adjust = i915_gtt_color_adjust;
3614 mutex_unlock(&dev_priv->drm.struct_mutex); 3568 mutex_unlock(&dev_priv->drm.struct_mutex);
3615 3569
@@ -3711,7 +3665,7 @@ void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
3711} 3665}
3712 3666
3713static struct scatterlist * 3667static struct scatterlist *
3714rotate_pages(const dma_addr_t *in, unsigned int offset, 3668rotate_pages(struct drm_i915_gem_object *obj, unsigned int offset,
3715 unsigned int width, unsigned int height, 3669 unsigned int width, unsigned int height,
3716 unsigned int stride, 3670 unsigned int stride,
3717 struct sg_table *st, struct scatterlist *sg) 3671 struct sg_table *st, struct scatterlist *sg)
@@ -3720,7 +3674,7 @@ rotate_pages(const dma_addr_t *in, unsigned int offset,
3720 unsigned int src_idx; 3674 unsigned int src_idx;
3721 3675
3722 for (column = 0; column < width; column++) { 3676 for (column = 0; column < width; column++) {
3723 src_idx = stride * (height - 1) + column; 3677 src_idx = stride * (height - 1) + column + offset;
3724 for (row = 0; row < height; row++) { 3678 for (row = 0; row < height; row++) {
3725 st->nents++; 3679 st->nents++;
3726 /* We don't need the pages, but need to initialize 3680 /* We don't need the pages, but need to initialize
@@ -3728,7 +3682,8 @@ rotate_pages(const dma_addr_t *in, unsigned int offset,
3728 * The only thing we need are DMA addresses. 3682 * The only thing we need are DMA addresses.
3729 */ 3683 */
3730 sg_set_page(sg, NULL, I915_GTT_PAGE_SIZE, 0); 3684 sg_set_page(sg, NULL, I915_GTT_PAGE_SIZE, 0);
3731 sg_dma_address(sg) = in[offset + src_idx]; 3685 sg_dma_address(sg) =
3686 i915_gem_object_get_dma_address(obj, src_idx);
3732 sg_dma_len(sg) = I915_GTT_PAGE_SIZE; 3687 sg_dma_len(sg) = I915_GTT_PAGE_SIZE;
3733 sg = sg_next(sg); 3688 sg = sg_next(sg);
3734 src_idx -= stride; 3689 src_idx -= stride;
@@ -3742,22 +3697,11 @@ static noinline struct sg_table *
3742intel_rotate_pages(struct intel_rotation_info *rot_info, 3697intel_rotate_pages(struct intel_rotation_info *rot_info,
3743 struct drm_i915_gem_object *obj) 3698 struct drm_i915_gem_object *obj)
3744{ 3699{
3745 const unsigned long n_pages = obj->base.size / I915_GTT_PAGE_SIZE;
3746 unsigned int size = intel_rotation_info_size(rot_info); 3700 unsigned int size = intel_rotation_info_size(rot_info);
3747 struct sgt_iter sgt_iter;
3748 dma_addr_t dma_addr;
3749 unsigned long i;
3750 dma_addr_t *page_addr_list;
3751 struct sg_table *st; 3701 struct sg_table *st;
3752 struct scatterlist *sg; 3702 struct scatterlist *sg;
3753 int ret = -ENOMEM; 3703 int ret = -ENOMEM;
3754 3704 int i;
3755 /* Allocate a temporary list of source pages for random access. */
3756 page_addr_list = kvmalloc_array(n_pages,
3757 sizeof(dma_addr_t),
3758 GFP_KERNEL);
3759 if (!page_addr_list)
3760 return ERR_PTR(ret);
3761 3705
3762 /* Allocate target SG list. */ 3706 /* Allocate target SG list. */
3763 st = kmalloc(sizeof(*st), GFP_KERNEL); 3707 st = kmalloc(sizeof(*st), GFP_KERNEL);
@@ -3768,29 +3712,20 @@ intel_rotate_pages(struct intel_rotation_info *rot_info,
3768 if (ret) 3712 if (ret)
3769 goto err_sg_alloc; 3713 goto err_sg_alloc;
3770 3714
3771 /* Populate source page list from the object. */
3772 i = 0;
3773 for_each_sgt_dma(dma_addr, sgt_iter, obj->mm.pages)
3774 page_addr_list[i++] = dma_addr;
3775
3776 GEM_BUG_ON(i != n_pages);
3777 st->nents = 0; 3715 st->nents = 0;
3778 sg = st->sgl; 3716 sg = st->sgl;
3779 3717
3780 for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++) { 3718 for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++) {
3781 sg = rotate_pages(page_addr_list, rot_info->plane[i].offset, 3719 sg = rotate_pages(obj, rot_info->plane[i].offset,
3782 rot_info->plane[i].width, rot_info->plane[i].height, 3720 rot_info->plane[i].width, rot_info->plane[i].height,
3783 rot_info->plane[i].stride, st, sg); 3721 rot_info->plane[i].stride, st, sg);
3784 } 3722 }
3785 3723
3786 kvfree(page_addr_list);
3787
3788 return st; 3724 return st;
3789 3725
3790err_sg_alloc: 3726err_sg_alloc:
3791 kfree(st); 3727 kfree(st);
3792err_st_alloc: 3728err_st_alloc:
3793 kvfree(page_addr_list);
3794 3729
3795 DRM_DEBUG_DRIVER("Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n", 3730 DRM_DEBUG_DRIVER("Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n",
3796 obj->base.size, rot_info->plane[0].width, rot_info->plane[0].height, size); 3731 obj->base.size, rot_info->plane[0].width, rot_info->plane[0].height, size);
@@ -3835,6 +3770,8 @@ intel_partial_pages(const struct i915_ggtt_view *view,
3835 count -= len >> PAGE_SHIFT; 3770 count -= len >> PAGE_SHIFT;
3836 if (count == 0) { 3771 if (count == 0) {
3837 sg_mark_end(sg); 3772 sg_mark_end(sg);
3773 i915_sg_trim(st); /* Drop any unused tail entries. */
3774
3838 return st; 3775 return st;
3839 } 3776 }
3840 3777
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index 7e2af5f4f39b..4874da09a3c4 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -42,13 +42,15 @@
42#include "i915_selftest.h" 42#include "i915_selftest.h"
43#include "i915_timeline.h" 43#include "i915_timeline.h"
44 44
45#define I915_GTT_PAGE_SIZE_4K BIT(12) 45#define I915_GTT_PAGE_SIZE_4K BIT_ULL(12)
46#define I915_GTT_PAGE_SIZE_64K BIT(16) 46#define I915_GTT_PAGE_SIZE_64K BIT_ULL(16)
47#define I915_GTT_PAGE_SIZE_2M BIT(21) 47#define I915_GTT_PAGE_SIZE_2M BIT_ULL(21)
48 48
49#define I915_GTT_PAGE_SIZE I915_GTT_PAGE_SIZE_4K 49#define I915_GTT_PAGE_SIZE I915_GTT_PAGE_SIZE_4K
50#define I915_GTT_MAX_PAGE_SIZE I915_GTT_PAGE_SIZE_2M 50#define I915_GTT_MAX_PAGE_SIZE I915_GTT_PAGE_SIZE_2M
51 51
52#define I915_GTT_PAGE_MASK -I915_GTT_PAGE_SIZE
53
52#define I915_GTT_MIN_ALIGNMENT I915_GTT_PAGE_SIZE 54#define I915_GTT_MIN_ALIGNMENT I915_GTT_PAGE_SIZE
53 55
54#define I915_FENCE_REG_NONE -1 56#define I915_FENCE_REG_NONE -1
@@ -287,6 +289,7 @@ struct i915_address_space {
287 289
288 struct mutex mutex; /* protects vma and our lists */ 290 struct mutex mutex; /* protects vma and our lists */
289 291
292 u64 scratch_pte;
290 struct i915_page_dma scratch_page; 293 struct i915_page_dma scratch_page;
291 struct i915_page_table *scratch_pt; 294 struct i915_page_table *scratch_pt;
292 struct i915_page_directory *scratch_pd; 295 struct i915_page_directory *scratch_pd;
@@ -333,12 +336,11 @@ struct i915_address_space {
333 /* Some systems support read-only mappings for GGTT and/or PPGTT */ 336 /* Some systems support read-only mappings for GGTT and/or PPGTT */
334 bool has_read_only:1; 337 bool has_read_only:1;
335 338
336 /* FIXME: Need a more generic return type */ 339 u64 (*pte_encode)(dma_addr_t addr,
337 gen6_pte_t (*pte_encode)(dma_addr_t addr, 340 enum i915_cache_level level,
338 enum i915_cache_level level, 341 u32 flags); /* Create a valid PTE */
339 u32 flags); /* Create a valid PTE */
340 /* flags for pte_encode */
341#define PTE_READ_ONLY (1<<0) 342#define PTE_READ_ONLY (1<<0)
343
342 int (*allocate_va_range)(struct i915_address_space *vm, 344 int (*allocate_va_range)(struct i915_address_space *vm,
343 u64 start, u64 length); 345 u64 start, u64 length);
344 void (*clear_range)(struct i915_address_space *vm, 346 void (*clear_range)(struct i915_address_space *vm,
@@ -420,7 +422,6 @@ struct gen6_hw_ppgtt {
420 422
421 struct i915_vma *vma; 423 struct i915_vma *vma;
422 gen6_pte_t __iomem *pd_addr; 424 gen6_pte_t __iomem *pd_addr;
423 gen6_pte_t scratch_pte;
424 425
425 unsigned int pin_count; 426 unsigned int pin_count;
426 bool scan_for_unused_pt; 427 bool scan_for_unused_pt;
@@ -659,20 +660,20 @@ int i915_gem_gtt_insert(struct i915_address_space *vm,
659 u64 start, u64 end, unsigned int flags); 660 u64 start, u64 end, unsigned int flags);
660 661
661/* Flags used by pin/bind&friends. */ 662/* Flags used by pin/bind&friends. */
662#define PIN_NONBLOCK BIT(0) 663#define PIN_NONBLOCK BIT_ULL(0)
663#define PIN_MAPPABLE BIT(1) 664#define PIN_MAPPABLE BIT_ULL(1)
664#define PIN_ZONE_4G BIT(2) 665#define PIN_ZONE_4G BIT_ULL(2)
665#define PIN_NONFAULT BIT(3) 666#define PIN_NONFAULT BIT_ULL(3)
666#define PIN_NOEVICT BIT(4) 667#define PIN_NOEVICT BIT_ULL(4)
667 668
668#define PIN_MBZ BIT(5) /* I915_VMA_PIN_OVERFLOW */ 669#define PIN_MBZ BIT_ULL(5) /* I915_VMA_PIN_OVERFLOW */
669#define PIN_GLOBAL BIT(6) /* I915_VMA_GLOBAL_BIND */ 670#define PIN_GLOBAL BIT_ULL(6) /* I915_VMA_GLOBAL_BIND */
670#define PIN_USER BIT(7) /* I915_VMA_LOCAL_BIND */ 671#define PIN_USER BIT_ULL(7) /* I915_VMA_LOCAL_BIND */
671#define PIN_UPDATE BIT(8) 672#define PIN_UPDATE BIT_ULL(8)
672 673
673#define PIN_HIGH BIT(9) 674#define PIN_HIGH BIT_ULL(9)
674#define PIN_OFFSET_BIAS BIT(10) 675#define PIN_OFFSET_BIAS BIT_ULL(10)
675#define PIN_OFFSET_FIXED BIT(11) 676#define PIN_OFFSET_FIXED BIT_ULL(11)
676#define PIN_OFFSET_MASK (-I915_GTT_PAGE_SIZE) 677#define PIN_OFFSET_MASK (-I915_GTT_PAGE_SIZE)
677 678
678#endif 679#endif
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 8762d17b6659..8123bf0e4807 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -27,7 +27,7 @@
27 * 27 *
28 */ 28 */
29 29
30#include <generated/utsrelease.h> 30#include <linux/utsname.h>
31#include <linux/stop_machine.h> 31#include <linux/stop_machine.h>
32#include <linux/zlib.h> 32#include <linux/zlib.h>
33#include <drm/drm_print.h> 33#include <drm/drm_print.h>
@@ -512,7 +512,7 @@ static void error_print_engine(struct drm_i915_error_state_buf *m,
512 err_printf(m, " SYNC_2: 0x%08x\n", 512 err_printf(m, " SYNC_2: 0x%08x\n",
513 ee->semaphore_mboxes[2]); 513 ee->semaphore_mboxes[2]);
514 } 514 }
515 if (USES_PPGTT(m->i915)) { 515 if (HAS_PPGTT(m->i915)) {
516 err_printf(m, " GFX_MODE: 0x%08x\n", ee->vm_info.gfx_mode); 516 err_printf(m, " GFX_MODE: 0x%08x\n", ee->vm_info.gfx_mode);
517 517
518 if (INTEL_GEN(m->i915) >= 8) { 518 if (INTEL_GEN(m->i915) >= 8) {
@@ -648,9 +648,12 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
648 return 0; 648 return 0;
649 } 649 }
650 650
651 if (IS_ERR(error))
652 return PTR_ERR(error);
653
651 if (*error->error_msg) 654 if (*error->error_msg)
652 err_printf(m, "%s\n", error->error_msg); 655 err_printf(m, "%s\n", error->error_msg);
653 err_printf(m, "Kernel: " UTS_RELEASE "\n"); 656 err_printf(m, "Kernel: %s\n", init_utsname()->release);
654 ts = ktime_to_timespec64(error->time); 657 ts = ktime_to_timespec64(error->time);
655 err_printf(m, "Time: %lld s %ld us\n", 658 err_printf(m, "Time: %lld s %ld us\n",
656 (s64)ts.tv_sec, ts.tv_nsec / NSEC_PER_USEC); 659 (s64)ts.tv_sec, ts.tv_nsec / NSEC_PER_USEC);
@@ -999,7 +1002,6 @@ i915_error_object_create(struct drm_i915_private *i915,
999 } 1002 }
1000 1003
1001 compress_fini(&compress, dst); 1004 compress_fini(&compress, dst);
1002 ggtt->vm.clear_range(&ggtt->vm, slot, PAGE_SIZE);
1003 return dst; 1005 return dst;
1004} 1006}
1005 1007
@@ -1268,7 +1270,7 @@ static void error_record_engine_registers(struct i915_gpu_state *error,
1268 ee->reset_count = i915_reset_engine_count(&dev_priv->gpu_error, 1270 ee->reset_count = i915_reset_engine_count(&dev_priv->gpu_error,
1269 engine); 1271 engine);
1270 1272
1271 if (USES_PPGTT(dev_priv)) { 1273 if (HAS_PPGTT(dev_priv)) {
1272 int i; 1274 int i;
1273 1275
1274 ee->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(engine)); 1276 ee->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(engine));
@@ -1785,6 +1787,14 @@ static unsigned long capture_find_epoch(const struct i915_gpu_state *error)
1785 return epoch; 1787 return epoch;
1786} 1788}
1787 1789
1790static void capture_finish(struct i915_gpu_state *error)
1791{
1792 struct i915_ggtt *ggtt = &error->i915->ggtt;
1793 const u64 slot = ggtt->error_capture.start;
1794
1795 ggtt->vm.clear_range(&ggtt->vm, slot, PAGE_SIZE);
1796}
1797
1788static int capture(void *data) 1798static int capture(void *data)
1789{ 1799{
1790 struct i915_gpu_state *error = data; 1800 struct i915_gpu_state *error = data;
@@ -1809,6 +1819,7 @@ static int capture(void *data)
1809 1819
1810 error->epoch = capture_find_epoch(error); 1820 error->epoch = capture_find_epoch(error);
1811 1821
1822 capture_finish(error);
1812 return 0; 1823 return 0;
1813} 1824}
1814 1825
@@ -1859,6 +1870,7 @@ void i915_capture_error_state(struct drm_i915_private *i915,
1859 error = i915_capture_gpu_state(i915); 1870 error = i915_capture_gpu_state(i915);
1860 if (!error) { 1871 if (!error) {
1861 DRM_DEBUG_DRIVER("out of memory, not capturing error state\n"); 1872 DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
1873 i915_disable_error_state(i915, -ENOMEM);
1862 return; 1874 return;
1863 } 1875 }
1864 1876
@@ -1914,5 +1926,14 @@ void i915_reset_error_state(struct drm_i915_private *i915)
1914 i915->gpu_error.first_error = NULL; 1926 i915->gpu_error.first_error = NULL;
1915 spin_unlock_irq(&i915->gpu_error.lock); 1927 spin_unlock_irq(&i915->gpu_error.lock);
1916 1928
1917 i915_gpu_state_put(error); 1929 if (!IS_ERR(error))
1930 i915_gpu_state_put(error);
1931}
1932
1933void i915_disable_error_state(struct drm_i915_private *i915, int err)
1934{
1935 spin_lock_irq(&i915->gpu_error.lock);
1936 if (!i915->gpu_error.first_error)
1937 i915->gpu_error.first_error = ERR_PTR(err);
1938 spin_unlock_irq(&i915->gpu_error.lock);
1918} 1939}
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h
index 8710fb18ed74..3ec89a504de5 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.h
+++ b/drivers/gpu/drm/i915/i915_gpu_error.h
@@ -343,6 +343,7 @@ static inline void i915_gpu_state_put(struct i915_gpu_state *gpu)
343 343
344struct i915_gpu_state *i915_first_error_state(struct drm_i915_private *i915); 344struct i915_gpu_state *i915_first_error_state(struct drm_i915_private *i915);
345void i915_reset_error_state(struct drm_i915_private *i915); 345void i915_reset_error_state(struct drm_i915_private *i915);
346void i915_disable_error_state(struct drm_i915_private *i915, int err);
346 347
347#else 348#else
348 349
@@ -355,13 +356,18 @@ static inline void i915_capture_error_state(struct drm_i915_private *dev_priv,
355static inline struct i915_gpu_state * 356static inline struct i915_gpu_state *
356i915_first_error_state(struct drm_i915_private *i915) 357i915_first_error_state(struct drm_i915_private *i915)
357{ 358{
358 return NULL; 359 return ERR_PTR(-ENODEV);
359} 360}
360 361
361static inline void i915_reset_error_state(struct drm_i915_private *i915) 362static inline void i915_reset_error_state(struct drm_i915_private *i915)
362{ 363{
363} 364}
364 365
366static inline void i915_disable_error_state(struct drm_i915_private *i915,
367 int err)
368{
369}
370
365#endif /* IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) */ 371#endif /* IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) */
366 372
367#endif /* _I915_GPU_ERROR_H_ */ 373#endif /* _I915_GPU_ERROR_H_ */
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 2e242270e270..d447d7d508f4 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -2887,21 +2887,39 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
2887 return ret; 2887 return ret;
2888} 2888}
2889 2889
2890static inline u32 gen8_master_intr_disable(void __iomem * const regs)
2891{
2892 raw_reg_write(regs, GEN8_MASTER_IRQ, 0);
2893
2894 /*
2895 * Now with master disabled, get a sample of level indications
2896 * for this interrupt. Indications will be cleared on related acks.
2897 * New indications can and will light up during processing,
2898 * and will generate new interrupt after enabling master.
2899 */
2900 return raw_reg_read(regs, GEN8_MASTER_IRQ);
2901}
2902
2903static inline void gen8_master_intr_enable(void __iomem * const regs)
2904{
2905 raw_reg_write(regs, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
2906}
2907
2890static irqreturn_t gen8_irq_handler(int irq, void *arg) 2908static irqreturn_t gen8_irq_handler(int irq, void *arg)
2891{ 2909{
2892 struct drm_i915_private *dev_priv = to_i915(arg); 2910 struct drm_i915_private *dev_priv = to_i915(arg);
2911 void __iomem * const regs = dev_priv->regs;
2893 u32 master_ctl; 2912 u32 master_ctl;
2894 u32 gt_iir[4]; 2913 u32 gt_iir[4];
2895 2914
2896 if (!intel_irqs_enabled(dev_priv)) 2915 if (!intel_irqs_enabled(dev_priv))
2897 return IRQ_NONE; 2916 return IRQ_NONE;
2898 2917
2899 master_ctl = I915_READ_FW(GEN8_MASTER_IRQ); 2918 master_ctl = gen8_master_intr_disable(regs);
2900 master_ctl &= ~GEN8_MASTER_IRQ_CONTROL; 2919 if (!master_ctl) {
2901 if (!master_ctl) 2920 gen8_master_intr_enable(regs);
2902 return IRQ_NONE; 2921 return IRQ_NONE;
2903 2922 }
2904 I915_WRITE_FW(GEN8_MASTER_IRQ, 0);
2905 2923
2906 /* Find, clear, then process each source of interrupt */ 2924 /* Find, clear, then process each source of interrupt */
2907 gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir); 2925 gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir);
@@ -2913,7 +2931,7 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
2913 enable_rpm_wakeref_asserts(dev_priv); 2931 enable_rpm_wakeref_asserts(dev_priv);
2914 } 2932 }
2915 2933
2916 I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 2934 gen8_master_intr_enable(regs);
2917 2935
2918 gen8_gt_irq_handler(dev_priv, master_ctl, gt_iir); 2936 gen8_gt_irq_handler(dev_priv, master_ctl, gt_iir);
2919 2937
@@ -3111,6 +3129,24 @@ gen11_gu_misc_irq_handler(struct drm_i915_private *dev_priv, const u32 iir)
3111 intel_opregion_asle_intr(dev_priv); 3129 intel_opregion_asle_intr(dev_priv);
3112} 3130}
3113 3131
3132static inline u32 gen11_master_intr_disable(void __iomem * const regs)
3133{
3134 raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, 0);
3135
3136 /*
3137 * Now with master disabled, get a sample of level indications
3138 * for this interrupt. Indications will be cleared on related acks.
3139 * New indications can and will light up during processing,
3140 * and will generate new interrupt after enabling master.
3141 */
3142 return raw_reg_read(regs, GEN11_GFX_MSTR_IRQ);
3143}
3144
3145static inline void gen11_master_intr_enable(void __iomem * const regs)
3146{
3147 raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ);
3148}
3149
3114static irqreturn_t gen11_irq_handler(int irq, void *arg) 3150static irqreturn_t gen11_irq_handler(int irq, void *arg)
3115{ 3151{
3116 struct drm_i915_private * const i915 = to_i915(arg); 3152 struct drm_i915_private * const i915 = to_i915(arg);
@@ -3121,13 +3157,11 @@ static irqreturn_t gen11_irq_handler(int irq, void *arg)
3121 if (!intel_irqs_enabled(i915)) 3157 if (!intel_irqs_enabled(i915))
3122 return IRQ_NONE; 3158 return IRQ_NONE;
3123 3159
3124 master_ctl = raw_reg_read(regs, GEN11_GFX_MSTR_IRQ); 3160 master_ctl = gen11_master_intr_disable(regs);
3125 master_ctl &= ~GEN11_MASTER_IRQ; 3161 if (!master_ctl) {
3126 if (!master_ctl) 3162 gen11_master_intr_enable(regs);
3127 return IRQ_NONE; 3163 return IRQ_NONE;
3128 3164 }
3129 /* Disable interrupts. */
3130 raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, 0);
3131 3165
3132 /* Find, clear, then process each source of interrupt. */ 3166 /* Find, clear, then process each source of interrupt. */
3133 gen11_gt_irq_handler(i915, master_ctl); 3167 gen11_gt_irq_handler(i915, master_ctl);
@@ -3147,8 +3181,7 @@ static irqreturn_t gen11_irq_handler(int irq, void *arg)
3147 3181
3148 gu_misc_iir = gen11_gu_misc_irq_ack(i915, master_ctl); 3182 gu_misc_iir = gen11_gu_misc_irq_ack(i915, master_ctl);
3149 3183
3150 /* Acknowledge and enable interrupts. */ 3184 gen11_master_intr_enable(regs);
3151 raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ | master_ctl);
3152 3185
3153 gen11_gu_misc_irq_handler(i915, gu_misc_iir); 3186 gen11_gu_misc_irq_handler(i915, gu_misc_iir);
3154 3187
@@ -3598,8 +3631,7 @@ static void gen8_irq_reset(struct drm_device *dev)
3598 struct drm_i915_private *dev_priv = to_i915(dev); 3631 struct drm_i915_private *dev_priv = to_i915(dev);
3599 int pipe; 3632 int pipe;
3600 3633
3601 I915_WRITE(GEN8_MASTER_IRQ, 0); 3634 gen8_master_intr_disable(dev_priv->regs);
3602 POSTING_READ(GEN8_MASTER_IRQ);
3603 3635
3604 gen8_gt_irq_reset(dev_priv); 3636 gen8_gt_irq_reset(dev_priv);
3605 3637
@@ -3641,13 +3673,15 @@ static void gen11_irq_reset(struct drm_device *dev)
3641 struct drm_i915_private *dev_priv = dev->dev_private; 3673 struct drm_i915_private *dev_priv = dev->dev_private;
3642 int pipe; 3674 int pipe;
3643 3675
3644 I915_WRITE(GEN11_GFX_MSTR_IRQ, 0); 3676 gen11_master_intr_disable(dev_priv->regs);
3645 POSTING_READ(GEN11_GFX_MSTR_IRQ);
3646 3677
3647 gen11_gt_irq_reset(dev_priv); 3678 gen11_gt_irq_reset(dev_priv);
3648 3679
3649 I915_WRITE(GEN11_DISPLAY_INT_CTL, 0); 3680 I915_WRITE(GEN11_DISPLAY_INT_CTL, 0);
3650 3681
3682 I915_WRITE(EDP_PSR_IMR, 0xffffffff);
3683 I915_WRITE(EDP_PSR_IIR, 0xffffffff);
3684
3651 for_each_pipe(dev_priv, pipe) 3685 for_each_pipe(dev_priv, pipe)
3652 if (intel_display_power_is_enabled(dev_priv, 3686 if (intel_display_power_is_enabled(dev_priv,
3653 POWER_DOMAIN_PIPE(pipe))) 3687 POWER_DOMAIN_PIPE(pipe)))
@@ -4244,8 +4278,7 @@ static int gen8_irq_postinstall(struct drm_device *dev)
4244 if (HAS_PCH_SPLIT(dev_priv)) 4278 if (HAS_PCH_SPLIT(dev_priv))
4245 ibx_irq_postinstall(dev); 4279 ibx_irq_postinstall(dev);
4246 4280
4247 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 4281 gen8_master_intr_enable(dev_priv->regs);
4248 POSTING_READ(GEN8_MASTER_IRQ);
4249 4282
4250 return 0; 4283 return 0;
4251} 4284}
@@ -4307,8 +4340,7 @@ static int gen11_irq_postinstall(struct drm_device *dev)
4307 4340
4308 I915_WRITE(GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE); 4341 I915_WRITE(GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE);
4309 4342
4310 I915_WRITE(GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ); 4343 gen11_master_intr_enable(dev_priv->regs);
4311 POSTING_READ(GEN11_GFX_MSTR_IRQ);
4312 4344
4313 return 0; 4345 return 0;
4314} 4346}
@@ -4834,6 +4866,13 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
4834 dev_priv->display_irqs_enabled = false; 4866 dev_priv->display_irqs_enabled = false;
4835 4867
4836 dev_priv->hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD; 4868 dev_priv->hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD;
4869 /* If we have MST support, we want to avoid doing short HPD IRQ storm
4870 * detection, as short HPD storms will occur as a natural part of
4871 * sideband messaging with MST.
4872 * On older platforms however, IRQ storms can occur with both long and
4873 * short pulses, as seen on some G4x systems.
4874 */
4875 dev_priv->hotplug.hpd_short_storm_enabled = !HAS_DP_MST(dev_priv);
4837 4876
4838 dev->driver->get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos; 4877 dev->driver->get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos;
4839 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; 4878 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
diff --git a/drivers/gpu/drm/i915/i915_oa_bdw.c b/drivers/gpu/drm/i915/i915_oa_bdw.c
index 4abd2e8b5083..4acdb94555b7 100644
--- a/drivers/gpu/drm/i915/i915_oa_bdw.c
+++ b/drivers/gpu/drm/i915/i915_oa_bdw.c
@@ -1,29 +1,10 @@
1/* 1/*
2 * Autogenerated file by GPU Top : https://github.com/rib/gputop 2 * SPDX-License-Identifier: MIT
3 * DO NOT EDIT manually!
4 *
5 *
6 * Copyright (c) 2015 Intel Corporation
7 * 3 *
8 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Copyright © 2018 Intel Corporation
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 * 5 *
6 * Autogenerated file by GPU Top : https://github.com/rib/gputop
7 * DO NOT EDIT manually!
27 */ 8 */
28 9
29#include <linux/sysfs.h> 10#include <linux/sysfs.h>
diff --git a/drivers/gpu/drm/i915/i915_oa_bdw.h b/drivers/gpu/drm/i915/i915_oa_bdw.h
index b812d16162ac..0e667f1a8aa1 100644
--- a/drivers/gpu/drm/i915/i915_oa_bdw.h
+++ b/drivers/gpu/drm/i915/i915_oa_bdw.h
@@ -1,29 +1,10 @@
1/* 1/*
2 * Autogenerated file by GPU Top : https://github.com/rib/gputop 2 * SPDX-License-Identifier: MIT
3 * DO NOT EDIT manually!
4 *
5 *
6 * Copyright (c) 2015 Intel Corporation
7 * 3 *
8 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Copyright © 2018 Intel Corporation
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 * 5 *
6 * Autogenerated file by GPU Top : https://github.com/rib/gputop
7 * DO NOT EDIT manually!
27 */ 8 */
28 9
29#ifndef __I915_OA_BDW_H__ 10#ifndef __I915_OA_BDW_H__
diff --git a/drivers/gpu/drm/i915/i915_oa_bxt.c b/drivers/gpu/drm/i915/i915_oa_bxt.c
index cb6f304ec16a..a44195c39923 100644
--- a/drivers/gpu/drm/i915/i915_oa_bxt.c
+++ b/drivers/gpu/drm/i915/i915_oa_bxt.c
@@ -1,29 +1,10 @@
1/* 1/*
2 * Autogenerated file by GPU Top : https://github.com/rib/gputop 2 * SPDX-License-Identifier: MIT
3 * DO NOT EDIT manually!
4 *
5 *
6 * Copyright (c) 2015 Intel Corporation
7 * 3 *
8 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Copyright © 2018 Intel Corporation
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 * 5 *
6 * Autogenerated file by GPU Top : https://github.com/rib/gputop
7 * DO NOT EDIT manually!
27 */ 8 */
28 9
29#include <linux/sysfs.h> 10#include <linux/sysfs.h>
diff --git a/drivers/gpu/drm/i915/i915_oa_bxt.h b/drivers/gpu/drm/i915/i915_oa_bxt.h
index 690b963a2383..679e92cf4f1d 100644
--- a/drivers/gpu/drm/i915/i915_oa_bxt.h
+++ b/drivers/gpu/drm/i915/i915_oa_bxt.h
@@ -1,29 +1,10 @@
1/* 1/*
2 * Autogenerated file by GPU Top : https://github.com/rib/gputop 2 * SPDX-License-Identifier: MIT
3 * DO NOT EDIT manually!
4 *
5 *
6 * Copyright (c) 2015 Intel Corporation
7 * 3 *
8 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Copyright © 2018 Intel Corporation
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 * 5 *
6 * Autogenerated file by GPU Top : https://github.com/rib/gputop
7 * DO NOT EDIT manually!
27 */ 8 */
28 9
29#ifndef __I915_OA_BXT_H__ 10#ifndef __I915_OA_BXT_H__
diff --git a/drivers/gpu/drm/i915/i915_oa_cflgt2.c b/drivers/gpu/drm/i915/i915_oa_cflgt2.c
index 8641ae30e343..7f60d51b8761 100644
--- a/drivers/gpu/drm/i915/i915_oa_cflgt2.c
+++ b/drivers/gpu/drm/i915/i915_oa_cflgt2.c
@@ -1,29 +1,10 @@
1/* 1/*
2 * Autogenerated file by GPU Top : https://github.com/rib/gputop 2 * SPDX-License-Identifier: MIT
3 * DO NOT EDIT manually!
4 *
5 *
6 * Copyright (c) 2015 Intel Corporation
7 * 3 *
8 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Copyright © 2018 Intel Corporation
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 * 5 *
6 * Autogenerated file by GPU Top : https://github.com/rib/gputop
7 * DO NOT EDIT manually!
27 */ 8 */
28 9
29#include <linux/sysfs.h> 10#include <linux/sysfs.h>
diff --git a/drivers/gpu/drm/i915/i915_oa_cflgt2.h b/drivers/gpu/drm/i915/i915_oa_cflgt2.h
index 1f3268ef2ea2..4d6025559bbe 100644
--- a/drivers/gpu/drm/i915/i915_oa_cflgt2.h
+++ b/drivers/gpu/drm/i915/i915_oa_cflgt2.h
@@ -1,29 +1,10 @@
1/* 1/*
2 * Autogenerated file by GPU Top : https://github.com/rib/gputop 2 * SPDX-License-Identifier: MIT
3 * DO NOT EDIT manually!
4 *
5 *
6 * Copyright (c) 2015 Intel Corporation
7 * 3 *
8 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Copyright © 2018 Intel Corporation
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 * 5 *
6 * Autogenerated file by GPU Top : https://github.com/rib/gputop
7 * DO NOT EDIT manually!
27 */ 8 */
28 9
29#ifndef __I915_OA_CFLGT2_H__ 10#ifndef __I915_OA_CFLGT2_H__
diff --git a/drivers/gpu/drm/i915/i915_oa_cflgt3.c b/drivers/gpu/drm/i915/i915_oa_cflgt3.c
index 792facdb6702..a92c38e3a0ce 100644
--- a/drivers/gpu/drm/i915/i915_oa_cflgt3.c
+++ b/drivers/gpu/drm/i915/i915_oa_cflgt3.c
@@ -1,29 +1,10 @@
1/* 1/*
2 * Autogenerated file by GPU Top : https://github.com/rib/gputop 2 * SPDX-License-Identifier: MIT
3 * DO NOT EDIT manually!
4 *
5 *
6 * Copyright (c) 2015 Intel Corporation
7 * 3 *
8 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Copyright © 2018 Intel Corporation
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 * 5 *
6 * Autogenerated file by GPU Top : https://github.com/rib/gputop
7 * DO NOT EDIT manually!
27 */ 8 */
28 9
29#include <linux/sysfs.h> 10#include <linux/sysfs.h>
diff --git a/drivers/gpu/drm/i915/i915_oa_cflgt3.h b/drivers/gpu/drm/i915/i915_oa_cflgt3.h
index c13b5aac01b9..0697f4077402 100644
--- a/drivers/gpu/drm/i915/i915_oa_cflgt3.h
+++ b/drivers/gpu/drm/i915/i915_oa_cflgt3.h
@@ -1,29 +1,10 @@
1/* 1/*
2 * Autogenerated file by GPU Top : https://github.com/rib/gputop 2 * SPDX-License-Identifier: MIT
3 * DO NOT EDIT manually!
4 *
5 *
6 * Copyright (c) 2015 Intel Corporation
7 * 3 *
8 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Copyright © 2018 Intel Corporation
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 * 5 *
6 * Autogenerated file by GPU Top : https://github.com/rib/gputop
7 * DO NOT EDIT manually!
27 */ 8 */
28 9
29#ifndef __I915_OA_CFLGT3_H__ 10#ifndef __I915_OA_CFLGT3_H__
diff --git a/drivers/gpu/drm/i915/i915_oa_chv.c b/drivers/gpu/drm/i915/i915_oa_chv.c
index 556febb2c3c8..71ec889a0114 100644
--- a/drivers/gpu/drm/i915/i915_oa_chv.c
+++ b/drivers/gpu/drm/i915/i915_oa_chv.c
@@ -1,29 +1,10 @@
1/* 1/*
2 * Autogenerated file by GPU Top : https://github.com/rib/gputop 2 * SPDX-License-Identifier: MIT
3 * DO NOT EDIT manually!
4 *
5 *
6 * Copyright (c) 2015 Intel Corporation
7 * 3 *
8 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Copyright © 2018 Intel Corporation
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 * 5 *
6 * Autogenerated file by GPU Top : https://github.com/rib/gputop
7 * DO NOT EDIT manually!
27 */ 8 */
28 9
29#include <linux/sysfs.h> 10#include <linux/sysfs.h>
diff --git a/drivers/gpu/drm/i915/i915_oa_chv.h b/drivers/gpu/drm/i915/i915_oa_chv.h
index b9622496979e..0986eae3135f 100644
--- a/drivers/gpu/drm/i915/i915_oa_chv.h
+++ b/drivers/gpu/drm/i915/i915_oa_chv.h
@@ -1,29 +1,10 @@
1/* 1/*
2 * Autogenerated file by GPU Top : https://github.com/rib/gputop 2 * SPDX-License-Identifier: MIT
3 * DO NOT EDIT manually!
4 *
5 *
6 * Copyright (c) 2015 Intel Corporation
7 * 3 *
8 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Copyright © 2018 Intel Corporation
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 * 5 *
6 * Autogenerated file by GPU Top : https://github.com/rib/gputop
7 * DO NOT EDIT manually!
27 */ 8 */
28 9
29#ifndef __I915_OA_CHV_H__ 10#ifndef __I915_OA_CHV_H__
diff --git a/drivers/gpu/drm/i915/i915_oa_cnl.c b/drivers/gpu/drm/i915/i915_oa_cnl.c
index ba9140c87cc0..5c23d883d6c9 100644
--- a/drivers/gpu/drm/i915/i915_oa_cnl.c
+++ b/drivers/gpu/drm/i915/i915_oa_cnl.c
@@ -1,29 +1,10 @@
1/* 1/*
2 * Autogenerated file by GPU Top : https://github.com/rib/gputop 2 * SPDX-License-Identifier: MIT
3 * DO NOT EDIT manually!
4 *
5 *
6 * Copyright (c) 2015 Intel Corporation
7 * 3 *
8 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Copyright © 2018 Intel Corporation
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 * 5 *
6 * Autogenerated file by GPU Top : https://github.com/rib/gputop
7 * DO NOT EDIT manually!
27 */ 8 */
28 9
29#include <linux/sysfs.h> 10#include <linux/sysfs.h>
diff --git a/drivers/gpu/drm/i915/i915_oa_cnl.h b/drivers/gpu/drm/i915/i915_oa_cnl.h
index fb918b131105..e830a406aff2 100644
--- a/drivers/gpu/drm/i915/i915_oa_cnl.h
+++ b/drivers/gpu/drm/i915/i915_oa_cnl.h
@@ -1,29 +1,10 @@
1/* 1/*
2 * Autogenerated file by GPU Top : https://github.com/rib/gputop 2 * SPDX-License-Identifier: MIT
3 * DO NOT EDIT manually!
4 *
5 *
6 * Copyright (c) 2015 Intel Corporation
7 * 3 *
8 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Copyright © 2018 Intel Corporation
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 * 5 *
6 * Autogenerated file by GPU Top : https://github.com/rib/gputop
7 * DO NOT EDIT manually!
27 */ 8 */
28 9
29#ifndef __I915_OA_CNL_H__ 10#ifndef __I915_OA_CNL_H__
diff --git a/drivers/gpu/drm/i915/i915_oa_glk.c b/drivers/gpu/drm/i915/i915_oa_glk.c
index 971db587957c..4bdda66df7d2 100644
--- a/drivers/gpu/drm/i915/i915_oa_glk.c
+++ b/drivers/gpu/drm/i915/i915_oa_glk.c
@@ -1,29 +1,10 @@
1/* 1/*
2 * Autogenerated file by GPU Top : https://github.com/rib/gputop 2 * SPDX-License-Identifier: MIT
3 * DO NOT EDIT manually!
4 *
5 *
6 * Copyright (c) 2015 Intel Corporation
7 * 3 *
8 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Copyright © 2018 Intel Corporation
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 * 5 *
6 * Autogenerated file by GPU Top : https://github.com/rib/gputop
7 * DO NOT EDIT manually!
27 */ 8 */
28 9
29#include <linux/sysfs.h> 10#include <linux/sysfs.h>
diff --git a/drivers/gpu/drm/i915/i915_oa_glk.h b/drivers/gpu/drm/i915/i915_oa_glk.h
index 63bd113f4bc9..06dedf991edb 100644
--- a/drivers/gpu/drm/i915/i915_oa_glk.h
+++ b/drivers/gpu/drm/i915/i915_oa_glk.h
@@ -1,29 +1,10 @@
1/* 1/*
2 * Autogenerated file by GPU Top : https://github.com/rib/gputop 2 * SPDX-License-Identifier: MIT
3 * DO NOT EDIT manually!
4 *
5 *
6 * Copyright (c) 2015 Intel Corporation
7 * 3 *
8 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Copyright © 2018 Intel Corporation
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 * 5 *
6 * Autogenerated file by GPU Top : https://github.com/rib/gputop
7 * DO NOT EDIT manually!
27 */ 8 */
28 9
29#ifndef __I915_OA_GLK_H__ 10#ifndef __I915_OA_GLK_H__
diff --git a/drivers/gpu/drm/i915/i915_oa_hsw.c b/drivers/gpu/drm/i915/i915_oa_hsw.c
index 434a9b96d7ab..cc6526fdd2bd 100644
--- a/drivers/gpu/drm/i915/i915_oa_hsw.c
+++ b/drivers/gpu/drm/i915/i915_oa_hsw.c
@@ -1,29 +1,10 @@
1/* 1/*
2 * Autogenerated file by GPU Top : https://github.com/rib/gputop 2 * SPDX-License-Identifier: MIT
3 * DO NOT EDIT manually!
4 *
5 *
6 * Copyright (c) 2015 Intel Corporation
7 * 3 *
8 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Copyright © 2018 Intel Corporation
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 * 5 *
6 * Autogenerated file by GPU Top : https://github.com/rib/gputop
7 * DO NOT EDIT manually!
27 */ 8 */
28 9
29#include <linux/sysfs.h> 10#include <linux/sysfs.h>
diff --git a/drivers/gpu/drm/i915/i915_oa_hsw.h b/drivers/gpu/drm/i915/i915_oa_hsw.h
index 74d03439c157..3d0c870cd0bd 100644
--- a/drivers/gpu/drm/i915/i915_oa_hsw.h
+++ b/drivers/gpu/drm/i915/i915_oa_hsw.h
@@ -1,29 +1,10 @@
1/* 1/*
2 * Autogenerated file by GPU Top : https://github.com/rib/gputop 2 * SPDX-License-Identifier: MIT
3 * DO NOT EDIT manually!
4 *
5 *
6 * Copyright (c) 2015 Intel Corporation
7 * 3 *
8 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Copyright © 2018 Intel Corporation
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 * 5 *
6 * Autogenerated file by GPU Top : https://github.com/rib/gputop
7 * DO NOT EDIT manually!
27 */ 8 */
28 9
29#ifndef __I915_OA_HSW_H__ 10#ifndef __I915_OA_HSW_H__
diff --git a/drivers/gpu/drm/i915/i915_oa_icl.c b/drivers/gpu/drm/i915/i915_oa_icl.c
index a5667926e3de..baa51427a543 100644
--- a/drivers/gpu/drm/i915/i915_oa_icl.c
+++ b/drivers/gpu/drm/i915/i915_oa_icl.c
@@ -1,29 +1,10 @@
1/* 1/*
2 * Autogenerated file by GPU Top : https://github.com/rib/gputop 2 * SPDX-License-Identifier: MIT
3 * DO NOT EDIT manually!
4 *
5 *
6 * Copyright (c) 2015 Intel Corporation
7 * 3 *
8 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Copyright © 2018 Intel Corporation
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 * 5 *
6 * Autogenerated file by GPU Top : https://github.com/rib/gputop
7 * DO NOT EDIT manually!
27 */ 8 */
28 9
29#include <linux/sysfs.h> 10#include <linux/sysfs.h>
diff --git a/drivers/gpu/drm/i915/i915_oa_icl.h b/drivers/gpu/drm/i915/i915_oa_icl.h
index ae1c24aafe4f..24eaa97d61ba 100644
--- a/drivers/gpu/drm/i915/i915_oa_icl.h
+++ b/drivers/gpu/drm/i915/i915_oa_icl.h
@@ -1,29 +1,10 @@
1/* 1/*
2 * Autogenerated file by GPU Top : https://github.com/rib/gputop 2 * SPDX-License-Identifier: MIT
3 * DO NOT EDIT manually!
4 *
5 *
6 * Copyright (c) 2015 Intel Corporation
7 * 3 *
8 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Copyright © 2018 Intel Corporation
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 * 5 *
6 * Autogenerated file by GPU Top : https://github.com/rib/gputop
7 * DO NOT EDIT manually!
27 */ 8 */
28 9
29#ifndef __I915_OA_ICL_H__ 10#ifndef __I915_OA_ICL_H__
diff --git a/drivers/gpu/drm/i915/i915_oa_kblgt2.c b/drivers/gpu/drm/i915/i915_oa_kblgt2.c
index 2fa98a40bbc8..168e49ab0d4d 100644
--- a/drivers/gpu/drm/i915/i915_oa_kblgt2.c
+++ b/drivers/gpu/drm/i915/i915_oa_kblgt2.c
@@ -1,29 +1,10 @@
1/* 1/*
2 * Autogenerated file by GPU Top : https://github.com/rib/gputop 2 * SPDX-License-Identifier: MIT
3 * DO NOT EDIT manually!
4 *
5 *
6 * Copyright (c) 2015 Intel Corporation
7 * 3 *
8 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Copyright © 2018 Intel Corporation
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 * 5 *
6 * Autogenerated file by GPU Top : https://github.com/rib/gputop
7 * DO NOT EDIT manually!
27 */ 8 */
28 9
29#include <linux/sysfs.h> 10#include <linux/sysfs.h>
diff --git a/drivers/gpu/drm/i915/i915_oa_kblgt2.h b/drivers/gpu/drm/i915/i915_oa_kblgt2.h
index 25b803546dc1..a55398a904de 100644
--- a/drivers/gpu/drm/i915/i915_oa_kblgt2.h
+++ b/drivers/gpu/drm/i915/i915_oa_kblgt2.h
@@ -1,29 +1,10 @@
1/* 1/*
2 * Autogenerated file by GPU Top : https://github.com/rib/gputop 2 * SPDX-License-Identifier: MIT
3 * DO NOT EDIT manually!
4 *
5 *
6 * Copyright (c) 2015 Intel Corporation
7 * 3 *
8 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Copyright © 2018 Intel Corporation
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 * 5 *
6 * Autogenerated file by GPU Top : https://github.com/rib/gputop
7 * DO NOT EDIT manually!
27 */ 8 */
28 9
29#ifndef __I915_OA_KBLGT2_H__ 10#ifndef __I915_OA_KBLGT2_H__
diff --git a/drivers/gpu/drm/i915/i915_oa_kblgt3.c b/drivers/gpu/drm/i915/i915_oa_kblgt3.c
index f3cb6679a1bc..6ffa553c388e 100644
--- a/drivers/gpu/drm/i915/i915_oa_kblgt3.c
+++ b/drivers/gpu/drm/i915/i915_oa_kblgt3.c
@@ -1,29 +1,10 @@
1/* 1/*
2 * Autogenerated file by GPU Top : https://github.com/rib/gputop 2 * SPDX-License-Identifier: MIT
3 * DO NOT EDIT manually!
4 *
5 *
6 * Copyright (c) 2015 Intel Corporation
7 * 3 *
8 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Copyright © 2018 Intel Corporation
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 * 5 *
6 * Autogenerated file by GPU Top : https://github.com/rib/gputop
7 * DO NOT EDIT manually!
27 */ 8 */
28 9
29#include <linux/sysfs.h> 10#include <linux/sysfs.h>
diff --git a/drivers/gpu/drm/i915/i915_oa_kblgt3.h b/drivers/gpu/drm/i915/i915_oa_kblgt3.h
index d5b5b5c1923e..3ddd3483b7cc 100644
--- a/drivers/gpu/drm/i915/i915_oa_kblgt3.h
+++ b/drivers/gpu/drm/i915/i915_oa_kblgt3.h
@@ -1,29 +1,10 @@
1/* 1/*
2 * Autogenerated file by GPU Top : https://github.com/rib/gputop 2 * SPDX-License-Identifier: MIT
3 * DO NOT EDIT manually!
4 *
5 *
6 * Copyright (c) 2015 Intel Corporation
7 * 3 *
8 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Copyright © 2018 Intel Corporation
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 * 5 *
6 * Autogenerated file by GPU Top : https://github.com/rib/gputop
7 * DO NOT EDIT manually!
27 */ 8 */
28 9
29#ifndef __I915_OA_KBLGT3_H__ 10#ifndef __I915_OA_KBLGT3_H__
diff --git a/drivers/gpu/drm/i915/i915_oa_sklgt2.c b/drivers/gpu/drm/i915/i915_oa_sklgt2.c
index bf8b8cd8a50d..7ce6ee851d43 100644
--- a/drivers/gpu/drm/i915/i915_oa_sklgt2.c
+++ b/drivers/gpu/drm/i915/i915_oa_sklgt2.c
@@ -1,29 +1,10 @@
1/* 1/*
2 * Autogenerated file by GPU Top : https://github.com/rib/gputop 2 * SPDX-License-Identifier: MIT
3 * DO NOT EDIT manually!
4 *
5 *
6 * Copyright (c) 2015 Intel Corporation
7 * 3 *
8 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Copyright © 2018 Intel Corporation
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 * 5 *
6 * Autogenerated file by GPU Top : https://github.com/rib/gputop
7 * DO NOT EDIT manually!
27 */ 8 */
28 9
29#include <linux/sysfs.h> 10#include <linux/sysfs.h>
diff --git a/drivers/gpu/drm/i915/i915_oa_sklgt2.h b/drivers/gpu/drm/i915/i915_oa_sklgt2.h
index fe1aa2c03958..be6256037239 100644
--- a/drivers/gpu/drm/i915/i915_oa_sklgt2.h
+++ b/drivers/gpu/drm/i915/i915_oa_sklgt2.h
@@ -1,29 +1,10 @@
1/* 1/*
2 * Autogenerated file by GPU Top : https://github.com/rib/gputop 2 * SPDX-License-Identifier: MIT
3 * DO NOT EDIT manually!
4 *
5 *
6 * Copyright (c) 2015 Intel Corporation
7 * 3 *
8 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Copyright © 2018 Intel Corporation
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 * 5 *
6 * Autogenerated file by GPU Top : https://github.com/rib/gputop
7 * DO NOT EDIT manually!
27 */ 8 */
28 9
29#ifndef __I915_OA_SKLGT2_H__ 10#ifndef __I915_OA_SKLGT2_H__
diff --git a/drivers/gpu/drm/i915/i915_oa_sklgt3.c b/drivers/gpu/drm/i915/i915_oa_sklgt3.c
index ae534c7c8135..086ca2631e1c 100644
--- a/drivers/gpu/drm/i915/i915_oa_sklgt3.c
+++ b/drivers/gpu/drm/i915/i915_oa_sklgt3.c
@@ -1,29 +1,10 @@
1/* 1/*
2 * Autogenerated file by GPU Top : https://github.com/rib/gputop 2 * SPDX-License-Identifier: MIT
3 * DO NOT EDIT manually!
4 *
5 *
6 * Copyright (c) 2015 Intel Corporation
7 * 3 *
8 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Copyright © 2018 Intel Corporation
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 * 5 *
6 * Autogenerated file by GPU Top : https://github.com/rib/gputop
7 * DO NOT EDIT manually!
27 */ 8 */
28 9
29#include <linux/sysfs.h> 10#include <linux/sysfs.h>
diff --git a/drivers/gpu/drm/i915/i915_oa_sklgt3.h b/drivers/gpu/drm/i915/i915_oa_sklgt3.h
index 06746b2616c8..650beb068e56 100644
--- a/drivers/gpu/drm/i915/i915_oa_sklgt3.h
+++ b/drivers/gpu/drm/i915/i915_oa_sklgt3.h
@@ -1,29 +1,10 @@
1/* 1/*
2 * Autogenerated file by GPU Top : https://github.com/rib/gputop 2 * SPDX-License-Identifier: MIT
3 * DO NOT EDIT manually!
4 *
5 *
6 * Copyright (c) 2015 Intel Corporation
7 * 3 *
8 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Copyright © 2018 Intel Corporation
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 * 5 *
6 * Autogenerated file by GPU Top : https://github.com/rib/gputop
7 * DO NOT EDIT manually!
27 */ 8 */
28 9
29#ifndef __I915_OA_SKLGT3_H__ 10#ifndef __I915_OA_SKLGT3_H__
diff --git a/drivers/gpu/drm/i915/i915_oa_sklgt4.c b/drivers/gpu/drm/i915/i915_oa_sklgt4.c
index 817fba2d82df..b291a6eb8a87 100644
--- a/drivers/gpu/drm/i915/i915_oa_sklgt4.c
+++ b/drivers/gpu/drm/i915/i915_oa_sklgt4.c
@@ -1,29 +1,10 @@
1/* 1/*
2 * Autogenerated file by GPU Top : https://github.com/rib/gputop 2 * SPDX-License-Identifier: MIT
3 * DO NOT EDIT manually!
4 *
5 *
6 * Copyright (c) 2015 Intel Corporation
7 * 3 *
8 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Copyright © 2018 Intel Corporation
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 * 5 *
6 * Autogenerated file by GPU Top : https://github.com/rib/gputop
7 * DO NOT EDIT manually!
27 */ 8 */
28 9
29#include <linux/sysfs.h> 10#include <linux/sysfs.h>
diff --git a/drivers/gpu/drm/i915/i915_oa_sklgt4.h b/drivers/gpu/drm/i915/i915_oa_sklgt4.h
index 944fd525c8b1..8dcf849d131e 100644
--- a/drivers/gpu/drm/i915/i915_oa_sklgt4.h
+++ b/drivers/gpu/drm/i915/i915_oa_sklgt4.h
@@ -1,29 +1,10 @@
1/* 1/*
2 * Autogenerated file by GPU Top : https://github.com/rib/gputop 2 * SPDX-License-Identifier: MIT
3 * DO NOT EDIT manually!
4 *
5 *
6 * Copyright (c) 2015 Intel Corporation
7 * 3 *
8 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Copyright © 2018 Intel Corporation
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 * 5 *
6 * Autogenerated file by GPU Top : https://github.com/rib/gputop
7 * DO NOT EDIT manually!
27 */ 8 */
28 9
29#ifndef __I915_OA_SKLGT4_H__ 10#ifndef __I915_OA_SKLGT4_H__
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index 295e981e4a39..2e0356561839 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -82,10 +82,6 @@ i915_param_named_unsafe(enable_hangcheck, bool, 0644,
82 "WARNING: Disabling this can cause system wide hangs. " 82 "WARNING: Disabling this can cause system wide hangs. "
83 "(default: true)"); 83 "(default: true)");
84 84
85i915_param_named_unsafe(enable_ppgtt, int, 0400,
86 "Override PPGTT usage. "
87 "(-1=auto [default], 0=disabled, 1=aliasing, 2=full, 3=full with extended address space)");
88
89i915_param_named_unsafe(enable_psr, int, 0600, 85i915_param_named_unsafe(enable_psr, int, 0600,
90 "Enable PSR " 86 "Enable PSR "
91 "(0=disabled, 1=enabled) " 87 "(0=disabled, 1=enabled) "
@@ -171,8 +167,10 @@ i915_param_named_unsafe(inject_load_failure, uint, 0400,
171i915_param_named(enable_dpcd_backlight, bool, 0600, 167i915_param_named(enable_dpcd_backlight, bool, 0600,
172 "Enable support for DPCD backlight control (default:false)"); 168 "Enable support for DPCD backlight control (default:false)");
173 169
170#if IS_ENABLED(CONFIG_DRM_I915_GVT)
174i915_param_named(enable_gvt, bool, 0400, 171i915_param_named(enable_gvt, bool, 0400,
175 "Enable support for Intel GVT-g graphics virtualization host support(default:false)"); 172 "Enable support for Intel GVT-g graphics virtualization host support(default:false)");
173#endif
176 174
177static __always_inline void _print_param(struct drm_printer *p, 175static __always_inline void _print_param(struct drm_printer *p,
178 const char *name, 176 const char *name,
@@ -188,7 +186,8 @@ static __always_inline void _print_param(struct drm_printer *p,
188 else if (!__builtin_strcmp(type, "char *")) 186 else if (!__builtin_strcmp(type, "char *"))
189 drm_printf(p, "i915.%s=%s\n", name, *(const char **)x); 187 drm_printf(p, "i915.%s=%s\n", name, *(const char **)x);
190 else 188 else
191 BUILD_BUG(); 189 WARN_ONCE(1, "no printer defined for param type %s (i915.%s)\n",
190 type, name);
192} 191}
193 192
194/** 193/**
diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h
index 6c4d4a21474b..7e56c516c815 100644
--- a/drivers/gpu/drm/i915/i915_params.h
+++ b/drivers/gpu/drm/i915/i915_params.h
@@ -41,7 +41,6 @@ struct drm_printer;
41 param(int, vbt_sdvo_panel_type, -1) \ 41 param(int, vbt_sdvo_panel_type, -1) \
42 param(int, enable_dc, -1) \ 42 param(int, enable_dc, -1) \
43 param(int, enable_fbc, -1) \ 43 param(int, enable_fbc, -1) \
44 param(int, enable_ppgtt, -1) \
45 param(int, enable_psr, -1) \ 44 param(int, enable_psr, -1) \
46 param(int, disable_power_well, -1) \ 45 param(int, disable_power_well, -1) \
47 param(int, enable_ips, 1) \ 46 param(int, enable_ips, 1) \
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index d6f7b9fe1d26..1b81d7cb209e 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -33,19 +33,30 @@
33#define GEN(x) .gen = (x), .gen_mask = BIT((x) - 1) 33#define GEN(x) .gen = (x), .gen_mask = BIT((x) - 1)
34 34
35#define GEN_DEFAULT_PIPEOFFSETS \ 35#define GEN_DEFAULT_PIPEOFFSETS \
36 .pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \ 36 .pipe_offsets = { \
37 PIPE_C_OFFSET, PIPE_EDP_OFFSET }, \ 37 [TRANSCODER_A] = PIPE_A_OFFSET, \
38 .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \ 38 [TRANSCODER_B] = PIPE_B_OFFSET, \
39 TRANSCODER_C_OFFSET, TRANSCODER_EDP_OFFSET }, \ 39 [TRANSCODER_C] = PIPE_C_OFFSET, \
40 .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET } 40 [TRANSCODER_EDP] = PIPE_EDP_OFFSET, \
41 }, \
42 .trans_offsets = { \
43 [TRANSCODER_A] = TRANSCODER_A_OFFSET, \
44 [TRANSCODER_B] = TRANSCODER_B_OFFSET, \
45 [TRANSCODER_C] = TRANSCODER_C_OFFSET, \
46 [TRANSCODER_EDP] = TRANSCODER_EDP_OFFSET, \
47 }
41 48
42#define GEN_CHV_PIPEOFFSETS \ 49#define GEN_CHV_PIPEOFFSETS \
43 .pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \ 50 .pipe_offsets = { \
44 CHV_PIPE_C_OFFSET }, \ 51 [TRANSCODER_A] = PIPE_A_OFFSET, \
45 .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \ 52 [TRANSCODER_B] = PIPE_B_OFFSET, \
46 CHV_TRANSCODER_C_OFFSET, }, \ 53 [TRANSCODER_C] = CHV_PIPE_C_OFFSET, \
47 .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET, \ 54 }, \
48 CHV_PALETTE_C_OFFSET } 55 .trans_offsets = { \
56 [TRANSCODER_A] = TRANSCODER_A_OFFSET, \
57 [TRANSCODER_B] = TRANSCODER_B_OFFSET, \
58 [TRANSCODER_C] = CHV_TRANSCODER_C_OFFSET, \
59 }
49 60
50#define CURSOR_OFFSETS \ 61#define CURSOR_OFFSETS \
51 .cursor_offsets = { CURSOR_A_OFFSET, CURSOR_B_OFFSET, CHV_CURSOR_C_OFFSET } 62 .cursor_offsets = { CURSOR_A_OFFSET, CURSOR_B_OFFSET, CHV_CURSOR_C_OFFSET }
@@ -252,7 +263,7 @@ static const struct intel_device_info intel_ironlake_m_info = {
252 .has_llc = 1, \ 263 .has_llc = 1, \
253 .has_rc6 = 1, \ 264 .has_rc6 = 1, \
254 .has_rc6p = 1, \ 265 .has_rc6p = 1, \
255 .has_aliasing_ppgtt = 1, \ 266 .ppgtt = INTEL_PPGTT_ALIASING, \
256 GEN_DEFAULT_PIPEOFFSETS, \ 267 GEN_DEFAULT_PIPEOFFSETS, \
257 GEN_DEFAULT_PAGE_SIZES, \ 268 GEN_DEFAULT_PAGE_SIZES, \
258 CURSOR_OFFSETS 269 CURSOR_OFFSETS
@@ -297,8 +308,7 @@ static const struct intel_device_info intel_sandybridge_m_gt2_info = {
297 .has_llc = 1, \ 308 .has_llc = 1, \
298 .has_rc6 = 1, \ 309 .has_rc6 = 1, \
299 .has_rc6p = 1, \ 310 .has_rc6p = 1, \
300 .has_aliasing_ppgtt = 1, \ 311 .ppgtt = INTEL_PPGTT_FULL, \
301 .has_full_ppgtt = 1, \
302 GEN_DEFAULT_PIPEOFFSETS, \ 312 GEN_DEFAULT_PIPEOFFSETS, \
303 GEN_DEFAULT_PAGE_SIZES, \ 313 GEN_DEFAULT_PAGE_SIZES, \
304 IVB_CURSOR_OFFSETS 314 IVB_CURSOR_OFFSETS
@@ -351,8 +361,7 @@ static const struct intel_device_info intel_valleyview_info = {
351 .has_rc6 = 1, 361 .has_rc6 = 1,
352 .has_gmch_display = 1, 362 .has_gmch_display = 1,
353 .has_hotplug = 1, 363 .has_hotplug = 1,
354 .has_aliasing_ppgtt = 1, 364 .ppgtt = INTEL_PPGTT_FULL,
355 .has_full_ppgtt = 1,
356 .has_snoop = true, 365 .has_snoop = true,
357 .has_coherent_ggtt = false, 366 .has_coherent_ggtt = false,
358 .ring_mask = RENDER_RING | BSD_RING | BLT_RING, 367 .ring_mask = RENDER_RING | BSD_RING | BLT_RING,
@@ -399,7 +408,7 @@ static const struct intel_device_info intel_haswell_gt3_info = {
399 .page_sizes = I915_GTT_PAGE_SIZE_4K | \ 408 .page_sizes = I915_GTT_PAGE_SIZE_4K | \
400 I915_GTT_PAGE_SIZE_2M, \ 409 I915_GTT_PAGE_SIZE_2M, \
401 .has_logical_ring_contexts = 1, \ 410 .has_logical_ring_contexts = 1, \
402 .has_full_48bit_ppgtt = 1, \ 411 .ppgtt = INTEL_PPGTT_FULL_4LVL, \
403 .has_64bit_reloc = 1, \ 412 .has_64bit_reloc = 1, \
404 .has_reset_engine = 1 413 .has_reset_engine = 1
405 414
@@ -443,8 +452,7 @@ static const struct intel_device_info intel_cherryview_info = {
443 .has_rc6 = 1, 452 .has_rc6 = 1,
444 .has_logical_ring_contexts = 1, 453 .has_logical_ring_contexts = 1,
445 .has_gmch_display = 1, 454 .has_gmch_display = 1,
446 .has_aliasing_ppgtt = 1, 455 .ppgtt = INTEL_PPGTT_FULL,
447 .has_full_ppgtt = 1,
448 .has_reset_engine = 1, 456 .has_reset_engine = 1,
449 .has_snoop = true, 457 .has_snoop = true,
450 .has_coherent_ggtt = false, 458 .has_coherent_ggtt = false,
@@ -472,6 +480,8 @@ static const struct intel_device_info intel_cherryview_info = {
472 480
473#define SKL_PLATFORM \ 481#define SKL_PLATFORM \
474 GEN9_FEATURES, \ 482 GEN9_FEATURES, \
483 /* Display WA #0477 WaDisableIPC: skl */ \
484 .has_ipc = 0, \
475 PLATFORM(INTEL_SKYLAKE) 485 PLATFORM(INTEL_SKYLAKE)
476 486
477static const struct intel_device_info intel_skylake_gt1_info = { 487static const struct intel_device_info intel_skylake_gt1_info = {
@@ -518,9 +528,7 @@ static const struct intel_device_info intel_skylake_gt4_info = {
518 .has_logical_ring_contexts = 1, \ 528 .has_logical_ring_contexts = 1, \
519 .has_logical_ring_preemption = 1, \ 529 .has_logical_ring_preemption = 1, \
520 .has_guc = 1, \ 530 .has_guc = 1, \
521 .has_aliasing_ppgtt = 1, \ 531 .ppgtt = INTEL_PPGTT_FULL_4LVL, \
522 .has_full_ppgtt = 1, \
523 .has_full_48bit_ppgtt = 1, \
524 .has_reset_engine = 1, \ 532 .has_reset_engine = 1, \
525 .has_snoop = true, \ 533 .has_snoop = true, \
526 .has_coherent_ggtt = false, \ 534 .has_coherent_ggtt = false, \
@@ -598,6 +606,22 @@ static const struct intel_device_info intel_cannonlake_info = {
598 606
599#define GEN11_FEATURES \ 607#define GEN11_FEATURES \
600 GEN10_FEATURES, \ 608 GEN10_FEATURES, \
609 .pipe_offsets = { \
610 [TRANSCODER_A] = PIPE_A_OFFSET, \
611 [TRANSCODER_B] = PIPE_B_OFFSET, \
612 [TRANSCODER_C] = PIPE_C_OFFSET, \
613 [TRANSCODER_EDP] = PIPE_EDP_OFFSET, \
614 [TRANSCODER_DSI_0] = PIPE_DSI0_OFFSET, \
615 [TRANSCODER_DSI_1] = PIPE_DSI1_OFFSET, \
616 }, \
617 .trans_offsets = { \
618 [TRANSCODER_A] = TRANSCODER_A_OFFSET, \
619 [TRANSCODER_B] = TRANSCODER_B_OFFSET, \
620 [TRANSCODER_C] = TRANSCODER_C_OFFSET, \
621 [TRANSCODER_EDP] = TRANSCODER_EDP_OFFSET, \
622 [TRANSCODER_DSI_0] = TRANSCODER_DSI0_OFFSET, \
623 [TRANSCODER_DSI_1] = TRANSCODER_DSI1_OFFSET, \
624 }, \
601 GEN(11), \ 625 GEN(11), \
602 .ddb_size = 2048, \ 626 .ddb_size = 2048, \
603 .has_logical_ring_elsq = 1 627 .has_logical_ring_elsq = 1
@@ -663,7 +687,7 @@ static const struct pci_device_id pciidlist[] = {
663 INTEL_KBL_GT2_IDS(&intel_kabylake_gt2_info), 687 INTEL_KBL_GT2_IDS(&intel_kabylake_gt2_info),
664 INTEL_KBL_GT3_IDS(&intel_kabylake_gt3_info), 688 INTEL_KBL_GT3_IDS(&intel_kabylake_gt3_info),
665 INTEL_KBL_GT4_IDS(&intel_kabylake_gt3_info), 689 INTEL_KBL_GT4_IDS(&intel_kabylake_gt3_info),
666 INTEL_AML_GT2_IDS(&intel_kabylake_gt2_info), 690 INTEL_AML_KBL_GT2_IDS(&intel_kabylake_gt2_info),
667 INTEL_CFL_S_GT1_IDS(&intel_coffeelake_gt1_info), 691 INTEL_CFL_S_GT1_IDS(&intel_coffeelake_gt1_info),
668 INTEL_CFL_S_GT2_IDS(&intel_coffeelake_gt2_info), 692 INTEL_CFL_S_GT2_IDS(&intel_coffeelake_gt2_info),
669 INTEL_CFL_H_GT2_IDS(&intel_coffeelake_gt2_info), 693 INTEL_CFL_H_GT2_IDS(&intel_coffeelake_gt2_info),
@@ -671,6 +695,7 @@ static const struct pci_device_id pciidlist[] = {
671 INTEL_CFL_U_GT3_IDS(&intel_coffeelake_gt3_info), 695 INTEL_CFL_U_GT3_IDS(&intel_coffeelake_gt3_info),
672 INTEL_WHL_U_GT1_IDS(&intel_coffeelake_gt1_info), 696 INTEL_WHL_U_GT1_IDS(&intel_coffeelake_gt1_info),
673 INTEL_WHL_U_GT2_IDS(&intel_coffeelake_gt2_info), 697 INTEL_WHL_U_GT2_IDS(&intel_coffeelake_gt2_info),
698 INTEL_AML_CFL_GT2_IDS(&intel_coffeelake_gt2_info),
674 INTEL_WHL_U_GT3_IDS(&intel_coffeelake_gt3_info), 699 INTEL_WHL_U_GT3_IDS(&intel_coffeelake_gt3_info),
675 INTEL_CNL_IDS(&intel_cannonlake_info), 700 INTEL_CNL_IDS(&intel_cannonlake_info),
676 INTEL_ICL_11_IDS(&intel_icelake_11_info), 701 INTEL_ICL_11_IDS(&intel_icelake_11_info),
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 664b96bb65a3..4529edfdcfc8 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -890,8 +890,8 @@ static int gen8_oa_read(struct i915_perf_stream *stream,
890 DRM_DEBUG("OA buffer overflow (exponent = %d): force restart\n", 890 DRM_DEBUG("OA buffer overflow (exponent = %d): force restart\n",
891 dev_priv->perf.oa.period_exponent); 891 dev_priv->perf.oa.period_exponent);
892 892
893 dev_priv->perf.oa.ops.oa_disable(dev_priv); 893 dev_priv->perf.oa.ops.oa_disable(stream);
894 dev_priv->perf.oa.ops.oa_enable(dev_priv); 894 dev_priv->perf.oa.ops.oa_enable(stream);
895 895
896 /* 896 /*
897 * Note: .oa_enable() is expected to re-init the oabuffer and 897 * Note: .oa_enable() is expected to re-init the oabuffer and
@@ -1114,8 +1114,8 @@ static int gen7_oa_read(struct i915_perf_stream *stream,
1114 DRM_DEBUG("OA buffer overflow (exponent = %d): force restart\n", 1114 DRM_DEBUG("OA buffer overflow (exponent = %d): force restart\n",
1115 dev_priv->perf.oa.period_exponent); 1115 dev_priv->perf.oa.period_exponent);
1116 1116
1117 dev_priv->perf.oa.ops.oa_disable(dev_priv); 1117 dev_priv->perf.oa.ops.oa_disable(stream);
1118 dev_priv->perf.oa.ops.oa_enable(dev_priv); 1118 dev_priv->perf.oa.ops.oa_enable(stream);
1119 1119
1120 oastatus1 = I915_READ(GEN7_OASTATUS1); 1120 oastatus1 = I915_READ(GEN7_OASTATUS1);
1121 } 1121 }
@@ -1528,8 +1528,6 @@ static int alloc_oa_buffer(struct drm_i915_private *dev_priv)
1528 goto err_unpin; 1528 goto err_unpin;
1529 } 1529 }
1530 1530
1531 dev_priv->perf.oa.ops.init_oa_buffer(dev_priv);
1532
1533 DRM_DEBUG_DRIVER("OA Buffer initialized, gtt offset = 0x%x, vaddr = %p\n", 1531 DRM_DEBUG_DRIVER("OA Buffer initialized, gtt offset = 0x%x, vaddr = %p\n",
1534 i915_ggtt_offset(dev_priv->perf.oa.oa_buffer.vma), 1532 i915_ggtt_offset(dev_priv->perf.oa.oa_buffer.vma),
1535 dev_priv->perf.oa.oa_buffer.vaddr); 1533 dev_priv->perf.oa.oa_buffer.vaddr);
@@ -1563,9 +1561,11 @@ static void config_oa_regs(struct drm_i915_private *dev_priv,
1563 } 1561 }
1564} 1562}
1565 1563
1566static int hsw_enable_metric_set(struct drm_i915_private *dev_priv, 1564static int hsw_enable_metric_set(struct i915_perf_stream *stream)
1567 const struct i915_oa_config *oa_config)
1568{ 1565{
1566 struct drm_i915_private *dev_priv = stream->dev_priv;
1567 const struct i915_oa_config *oa_config = stream->oa_config;
1568
1569 /* PRM: 1569 /* PRM:
1570 * 1570 *
1571 * OA unit is using “crclk” for its functionality. When trunk 1571 * OA unit is using “crclk” for its functionality. When trunk
@@ -1767,9 +1767,10 @@ static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
1767 return 0; 1767 return 0;
1768} 1768}
1769 1769
1770static int gen8_enable_metric_set(struct drm_i915_private *dev_priv, 1770static int gen8_enable_metric_set(struct i915_perf_stream *stream)
1771 const struct i915_oa_config *oa_config)
1772{ 1771{
1772 struct drm_i915_private *dev_priv = stream->dev_priv;
1773 const struct i915_oa_config *oa_config = stream->oa_config;
1773 int ret; 1774 int ret;
1774 1775
1775 /* 1776 /*
@@ -1837,10 +1838,10 @@ static void gen10_disable_metric_set(struct drm_i915_private *dev_priv)
1837 I915_READ(RPM_CONFIG1) & ~GEN10_GT_NOA_ENABLE); 1838 I915_READ(RPM_CONFIG1) & ~GEN10_GT_NOA_ENABLE);
1838} 1839}
1839 1840
1840static void gen7_oa_enable(struct drm_i915_private *dev_priv) 1841static void gen7_oa_enable(struct i915_perf_stream *stream)
1841{ 1842{
1842 struct i915_gem_context *ctx = 1843 struct drm_i915_private *dev_priv = stream->dev_priv;
1843 dev_priv->perf.oa.exclusive_stream->ctx; 1844 struct i915_gem_context *ctx = stream->ctx;
1844 u32 ctx_id = dev_priv->perf.oa.specific_ctx_id; 1845 u32 ctx_id = dev_priv->perf.oa.specific_ctx_id;
1845 bool periodic = dev_priv->perf.oa.periodic; 1846 bool periodic = dev_priv->perf.oa.periodic;
1846 u32 period_exponent = dev_priv->perf.oa.period_exponent; 1847 u32 period_exponent = dev_priv->perf.oa.period_exponent;
@@ -1867,8 +1868,9 @@ static void gen7_oa_enable(struct drm_i915_private *dev_priv)
1867 GEN7_OACONTROL_ENABLE); 1868 GEN7_OACONTROL_ENABLE);
1868} 1869}
1869 1870
1870static void gen8_oa_enable(struct drm_i915_private *dev_priv) 1871static void gen8_oa_enable(struct i915_perf_stream *stream)
1871{ 1872{
1873 struct drm_i915_private *dev_priv = stream->dev_priv;
1872 u32 report_format = dev_priv->perf.oa.oa_buffer.format; 1874 u32 report_format = dev_priv->perf.oa.oa_buffer.format;
1873 1875
1874 /* 1876 /*
@@ -1905,7 +1907,7 @@ static void i915_oa_stream_enable(struct i915_perf_stream *stream)
1905{ 1907{
1906 struct drm_i915_private *dev_priv = stream->dev_priv; 1908 struct drm_i915_private *dev_priv = stream->dev_priv;
1907 1909
1908 dev_priv->perf.oa.ops.oa_enable(dev_priv); 1910 dev_priv->perf.oa.ops.oa_enable(stream);
1909 1911
1910 if (dev_priv->perf.oa.periodic) 1912 if (dev_priv->perf.oa.periodic)
1911 hrtimer_start(&dev_priv->perf.oa.poll_check_timer, 1913 hrtimer_start(&dev_priv->perf.oa.poll_check_timer,
@@ -1913,8 +1915,10 @@ static void i915_oa_stream_enable(struct i915_perf_stream *stream)
1913 HRTIMER_MODE_REL_PINNED); 1915 HRTIMER_MODE_REL_PINNED);
1914} 1916}
1915 1917
1916static void gen7_oa_disable(struct drm_i915_private *dev_priv) 1918static void gen7_oa_disable(struct i915_perf_stream *stream)
1917{ 1919{
1920 struct drm_i915_private *dev_priv = stream->dev_priv;
1921
1918 I915_WRITE(GEN7_OACONTROL, 0); 1922 I915_WRITE(GEN7_OACONTROL, 0);
1919 if (intel_wait_for_register(dev_priv, 1923 if (intel_wait_for_register(dev_priv,
1920 GEN7_OACONTROL, GEN7_OACONTROL_ENABLE, 0, 1924 GEN7_OACONTROL, GEN7_OACONTROL_ENABLE, 0,
@@ -1922,8 +1926,10 @@ static void gen7_oa_disable(struct drm_i915_private *dev_priv)
1922 DRM_ERROR("wait for OA to be disabled timed out\n"); 1926 DRM_ERROR("wait for OA to be disabled timed out\n");
1923} 1927}
1924 1928
1925static void gen8_oa_disable(struct drm_i915_private *dev_priv) 1929static void gen8_oa_disable(struct i915_perf_stream *stream)
1926{ 1930{
1931 struct drm_i915_private *dev_priv = stream->dev_priv;
1932
1927 I915_WRITE(GEN8_OACONTROL, 0); 1933 I915_WRITE(GEN8_OACONTROL, 0);
1928 if (intel_wait_for_register(dev_priv, 1934 if (intel_wait_for_register(dev_priv,
1929 GEN8_OACONTROL, GEN8_OA_COUNTER_ENABLE, 0, 1935 GEN8_OACONTROL, GEN8_OA_COUNTER_ENABLE, 0,
@@ -1943,7 +1949,7 @@ static void i915_oa_stream_disable(struct i915_perf_stream *stream)
1943{ 1949{
1944 struct drm_i915_private *dev_priv = stream->dev_priv; 1950 struct drm_i915_private *dev_priv = stream->dev_priv;
1945 1951
1946 dev_priv->perf.oa.ops.oa_disable(dev_priv); 1952 dev_priv->perf.oa.ops.oa_disable(stream);
1947 1953
1948 if (dev_priv->perf.oa.periodic) 1954 if (dev_priv->perf.oa.periodic)
1949 hrtimer_cancel(&dev_priv->perf.oa.poll_check_timer); 1955 hrtimer_cancel(&dev_priv->perf.oa.poll_check_timer);
@@ -1998,7 +2004,7 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
1998 return -EINVAL; 2004 return -EINVAL;
1999 } 2005 }
2000 2006
2001 if (!dev_priv->perf.oa.ops.init_oa_buffer) { 2007 if (!dev_priv->perf.oa.ops.enable_metric_set) {
2002 DRM_DEBUG("OA unit not supported\n"); 2008 DRM_DEBUG("OA unit not supported\n");
2003 return -ENODEV; 2009 return -ENODEV;
2004 } 2010 }
@@ -2092,8 +2098,7 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
2092 if (ret) 2098 if (ret)
2093 goto err_lock; 2099 goto err_lock;
2094 2100
2095 ret = dev_priv->perf.oa.ops.enable_metric_set(dev_priv, 2101 ret = dev_priv->perf.oa.ops.enable_metric_set(stream);
2096 stream->oa_config);
2097 if (ret) { 2102 if (ret) {
2098 DRM_DEBUG("Unable to enable metric set\n"); 2103 DRM_DEBUG("Unable to enable metric set\n");
2099 goto err_enable; 2104 goto err_enable;
@@ -3387,7 +3392,6 @@ void i915_perf_init(struct drm_i915_private *dev_priv)
3387 dev_priv->perf.oa.ops.is_valid_mux_reg = 3392 dev_priv->perf.oa.ops.is_valid_mux_reg =
3388 hsw_is_valid_mux_addr; 3393 hsw_is_valid_mux_addr;
3389 dev_priv->perf.oa.ops.is_valid_flex_reg = NULL; 3394 dev_priv->perf.oa.ops.is_valid_flex_reg = NULL;
3390 dev_priv->perf.oa.ops.init_oa_buffer = gen7_init_oa_buffer;
3391 dev_priv->perf.oa.ops.enable_metric_set = hsw_enable_metric_set; 3395 dev_priv->perf.oa.ops.enable_metric_set = hsw_enable_metric_set;
3392 dev_priv->perf.oa.ops.disable_metric_set = hsw_disable_metric_set; 3396 dev_priv->perf.oa.ops.disable_metric_set = hsw_disable_metric_set;
3393 dev_priv->perf.oa.ops.oa_enable = gen7_oa_enable; 3397 dev_priv->perf.oa.ops.oa_enable = gen7_oa_enable;
@@ -3406,7 +3410,6 @@ void i915_perf_init(struct drm_i915_private *dev_priv)
3406 */ 3410 */
3407 dev_priv->perf.oa.oa_formats = gen8_plus_oa_formats; 3411 dev_priv->perf.oa.oa_formats = gen8_plus_oa_formats;
3408 3412
3409 dev_priv->perf.oa.ops.init_oa_buffer = gen8_init_oa_buffer;
3410 dev_priv->perf.oa.ops.oa_enable = gen8_oa_enable; 3413 dev_priv->perf.oa.ops.oa_enable = gen8_oa_enable;
3411 dev_priv->perf.oa.ops.oa_disable = gen8_oa_disable; 3414 dev_priv->perf.oa.ops.oa_disable = gen8_oa_disable;
3412 dev_priv->perf.oa.ops.read = gen8_oa_read; 3415 dev_priv->perf.oa.ops.read = gen8_oa_read;
diff --git a/drivers/gpu/drm/i915/i915_query.c b/drivers/gpu/drm/i915/i915_query.c
index 3f502eef2431..6fc4b8eeab42 100644
--- a/drivers/gpu/drm/i915/i915_query.c
+++ b/drivers/gpu/drm/i915/i915_query.c
@@ -27,8 +27,7 @@ static int query_topology_info(struct drm_i915_private *dev_priv,
27 27
28 slice_length = sizeof(sseu->slice_mask); 28 slice_length = sizeof(sseu->slice_mask);
29 subslice_length = sseu->max_slices * 29 subslice_length = sseu->max_slices *
30 DIV_ROUND_UP(sseu->max_subslices, 30 DIV_ROUND_UP(sseu->max_subslices, BITS_PER_BYTE);
31 sizeof(sseu->subslice_mask[0]) * BITS_PER_BYTE);
32 eu_length = sseu->max_slices * sseu->max_subslices * 31 eu_length = sseu->max_slices * sseu->max_subslices *
33 DIV_ROUND_UP(sseu->max_eus_per_subslice, BITS_PER_BYTE); 32 DIV_ROUND_UP(sseu->max_eus_per_subslice, BITS_PER_BYTE);
34 33
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 7c491ea3d052..47baf2fe8f71 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -157,20 +157,37 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
157/* 157/*
158 * Named helper wrappers around _PICK_EVEN() and _PICK(). 158 * Named helper wrappers around _PICK_EVEN() and _PICK().
159 */ 159 */
160#define _PIPE(pipe, a, b) _PICK_EVEN(pipe, a, b) 160#define _PIPE(pipe, a, b) _PICK_EVEN(pipe, a, b)
161#define _MMIO_PIPE(pipe, a, b) _MMIO(_PIPE(pipe, a, b)) 161#define _PLANE(plane, a, b) _PICK_EVEN(plane, a, b)
162#define _PLANE(plane, a, b) _PICK_EVEN(plane, a, b) 162#define _TRANS(tran, a, b) _PICK_EVEN(tran, a, b)
163#define _MMIO_PLANE(plane, a, b) _MMIO_PIPE(plane, a, b) 163#define _PORT(port, a, b) _PICK_EVEN(port, a, b)
164#define _TRANS(tran, a, b) _PICK_EVEN(tran, a, b) 164#define _PLL(pll, a, b) _PICK_EVEN(pll, a, b)
165#define _MMIO_TRANS(tran, a, b) _MMIO(_TRANS(tran, a, b)) 165
166#define _PORT(port, a, b) _PICK_EVEN(port, a, b) 166#define _MMIO_PIPE(pipe, a, b) _MMIO(_PIPE(pipe, a, b))
167#define _MMIO_PORT(port, a, b) _MMIO(_PORT(port, a, b)) 167#define _MMIO_PLANE(plane, a, b) _MMIO(_PLANE(plane, a, b))
168#define _MMIO_PIPE3(pipe, a, b, c) _MMIO(_PICK(pipe, a, b, c)) 168#define _MMIO_TRANS(tran, a, b) _MMIO(_TRANS(tran, a, b))
169#define _MMIO_PORT3(pipe, a, b, c) _MMIO(_PICK(pipe, a, b, c)) 169#define _MMIO_PORT(port, a, b) _MMIO(_PORT(port, a, b))
170#define _PLL(pll, a, b) _PICK_EVEN(pll, a, b) 170#define _MMIO_PLL(pll, a, b) _MMIO(_PLL(pll, a, b))
171#define _MMIO_PLL(pll, a, b) _MMIO(_PLL(pll, a, b)) 171
172#define _PHY3(phy, ...) _PICK(phy, __VA_ARGS__) 172#define _PHY3(phy, ...) _PICK(phy, __VA_ARGS__)
173#define _MMIO_PHY3(phy, a, b, c) _MMIO(_PHY3(phy, a, b, c)) 173
174#define _MMIO_PIPE3(pipe, a, b, c) _MMIO(_PICK(pipe, a, b, c))
175#define _MMIO_PORT3(pipe, a, b, c) _MMIO(_PICK(pipe, a, b, c))
176#define _MMIO_PHY3(phy, a, b, c) _MMIO(_PHY3(phy, a, b, c))
177
178/*
179 * Device info offset array based helpers for groups of registers with unevenly
180 * spaced base offsets.
181 */
182#define _MMIO_PIPE2(pipe, reg) _MMIO(dev_priv->info.pipe_offsets[pipe] - \
183 dev_priv->info.pipe_offsets[PIPE_A] + (reg) + \
184 dev_priv->info.display_mmio_offset)
185#define _MMIO_TRANS2(pipe, reg) _MMIO(dev_priv->info.trans_offsets[(pipe)] - \
186 dev_priv->info.trans_offsets[TRANSCODER_A] + (reg) + \
187 dev_priv->info.display_mmio_offset)
188#define _CURSOR2(pipe, reg) _MMIO(dev_priv->info.cursor_offsets[(pipe)] - \
189 dev_priv->info.cursor_offsets[PIPE_A] + (reg) + \
190 dev_priv->info.display_mmio_offset)
174 191
175#define __MASKED_FIELD(mask, value) ((mask) << 16 | (value)) 192#define __MASKED_FIELD(mask, value) ((mask) << 16 | (value))
176#define _MASKED_FIELD(mask, value) ({ \ 193#define _MASKED_FIELD(mask, value) ({ \
@@ -1631,35 +1648,6 @@ enum i915_power_well_id {
1631#define PHY_RESERVED (1 << 7) 1648#define PHY_RESERVED (1 << 7)
1632#define BXT_PORT_CL1CM_DW0(phy) _BXT_PHY((phy), _PORT_CL1CM_DW0_BC) 1649#define BXT_PORT_CL1CM_DW0(phy) _BXT_PHY((phy), _PORT_CL1CM_DW0_BC)
1633 1650
1634#define CNL_PORT_CL1CM_DW5 _MMIO(0x162014)
1635#define CL_POWER_DOWN_ENABLE (1 << 4)
1636#define SUS_CLOCK_CONFIG (3 << 0)
1637
1638#define _ICL_PORT_CL_DW5_A 0x162014
1639#define _ICL_PORT_CL_DW5_B 0x6C014
1640#define ICL_PORT_CL_DW5(port) _MMIO_PORT(port, _ICL_PORT_CL_DW5_A, \
1641 _ICL_PORT_CL_DW5_B)
1642
1643#define _CNL_PORT_CL_DW10_A 0x162028
1644#define _ICL_PORT_CL_DW10_B 0x6c028
1645#define ICL_PORT_CL_DW10(port) _MMIO_PORT(port, \
1646 _CNL_PORT_CL_DW10_A, \
1647 _ICL_PORT_CL_DW10_B)
1648#define PG_SEQ_DELAY_OVERRIDE_MASK (3 << 25)
1649#define PG_SEQ_DELAY_OVERRIDE_SHIFT 25
1650#define PG_SEQ_DELAY_OVERRIDE_ENABLE (1 << 24)
1651#define PWR_UP_ALL_LANES (0x0 << 4)
1652#define PWR_DOWN_LN_3_2_1 (0xe << 4)
1653#define PWR_DOWN_LN_3_2 (0xc << 4)
1654#define PWR_DOWN_LN_3 (0x8 << 4)
1655#define PWR_DOWN_LN_2_1_0 (0x7 << 4)
1656#define PWR_DOWN_LN_1_0 (0x3 << 4)
1657#define PWR_DOWN_LN_1 (0x2 << 4)
1658#define PWR_DOWN_LN_3_1 (0xa << 4)
1659#define PWR_DOWN_LN_3_1_0 (0xb << 4)
1660#define PWR_DOWN_LN_MASK (0xf << 4)
1661#define PWR_DOWN_LN_SHIFT 4
1662
1663#define _PORT_CL1CM_DW9_A 0x162024 1651#define _PORT_CL1CM_DW9_A 0x162024
1664#define _PORT_CL1CM_DW9_BC 0x6C024 1652#define _PORT_CL1CM_DW9_BC 0x6C024
1665#define IREF0RC_OFFSET_SHIFT 8 1653#define IREF0RC_OFFSET_SHIFT 8
@@ -1672,13 +1660,6 @@ enum i915_power_well_id {
1672#define IREF1RC_OFFSET_MASK (0xFF << IREF1RC_OFFSET_SHIFT) 1660#define IREF1RC_OFFSET_MASK (0xFF << IREF1RC_OFFSET_SHIFT)
1673#define BXT_PORT_CL1CM_DW10(phy) _BXT_PHY((phy), _PORT_CL1CM_DW10_BC) 1661#define BXT_PORT_CL1CM_DW10(phy) _BXT_PHY((phy), _PORT_CL1CM_DW10_BC)
1674 1662
1675#define _ICL_PORT_CL_DW12_A 0x162030
1676#define _ICL_PORT_CL_DW12_B 0x6C030
1677#define ICL_LANE_ENABLE_AUX (1 << 0)
1678#define ICL_PORT_CL_DW12(port) _MMIO_PORT((port), \
1679 _ICL_PORT_CL_DW12_A, \
1680 _ICL_PORT_CL_DW12_B)
1681
1682#define _PORT_CL1CM_DW28_A 0x162070 1663#define _PORT_CL1CM_DW28_A 0x162070
1683#define _PORT_CL1CM_DW28_BC 0x6C070 1664#define _PORT_CL1CM_DW28_BC 0x6C070
1684#define OCL1_POWER_DOWN_EN (1 << 23) 1665#define OCL1_POWER_DOWN_EN (1 << 23)
@@ -1691,6 +1672,74 @@ enum i915_power_well_id {
1691#define OCL2_LDOFUSE_PWR_DIS (1 << 6) 1672#define OCL2_LDOFUSE_PWR_DIS (1 << 6)
1692#define BXT_PORT_CL1CM_DW30(phy) _BXT_PHY((phy), _PORT_CL1CM_DW30_BC) 1673#define BXT_PORT_CL1CM_DW30(phy) _BXT_PHY((phy), _PORT_CL1CM_DW30_BC)
1693 1674
1675/*
1676 * CNL/ICL Port/COMBO-PHY Registers
1677 */
1678#define _ICL_COMBOPHY_A 0x162000
1679#define _ICL_COMBOPHY_B 0x6C000
1680#define _ICL_COMBOPHY(port) _PICK(port, _ICL_COMBOPHY_A, \
1681 _ICL_COMBOPHY_B)
1682
1683/* CNL/ICL Port CL_DW registers */
1684#define _ICL_PORT_CL_DW(dw, port) (_ICL_COMBOPHY(port) + \
1685 4 * (dw))
1686
1687#define CNL_PORT_CL1CM_DW5 _MMIO(0x162014)
1688#define ICL_PORT_CL_DW5(port) _MMIO(_ICL_PORT_CL_DW(5, port))
1689#define CL_POWER_DOWN_ENABLE (1 << 4)
1690#define SUS_CLOCK_CONFIG (3 << 0)
1691
1692#define ICL_PORT_CL_DW10(port) _MMIO(_ICL_PORT_CL_DW(10, port))
1693#define PG_SEQ_DELAY_OVERRIDE_MASK (3 << 25)
1694#define PG_SEQ_DELAY_OVERRIDE_SHIFT 25
1695#define PG_SEQ_DELAY_OVERRIDE_ENABLE (1 << 24)
1696#define PWR_UP_ALL_LANES (0x0 << 4)
1697#define PWR_DOWN_LN_3_2_1 (0xe << 4)
1698#define PWR_DOWN_LN_3_2 (0xc << 4)
1699#define PWR_DOWN_LN_3 (0x8 << 4)
1700#define PWR_DOWN_LN_2_1_0 (0x7 << 4)
1701#define PWR_DOWN_LN_1_0 (0x3 << 4)
1702#define PWR_DOWN_LN_1 (0x2 << 4)
1703#define PWR_DOWN_LN_3_1 (0xa << 4)
1704#define PWR_DOWN_LN_3_1_0 (0xb << 4)
1705#define PWR_DOWN_LN_MASK (0xf << 4)
1706#define PWR_DOWN_LN_SHIFT 4
1707
1708#define ICL_PORT_CL_DW12(port) _MMIO(_ICL_PORT_CL_DW(12, port))
1709#define ICL_LANE_ENABLE_AUX (1 << 0)
1710
1711/* CNL/ICL Port COMP_DW registers */
1712#define _ICL_PORT_COMP 0x100
1713#define _ICL_PORT_COMP_DW(dw, port) (_ICL_COMBOPHY(port) + \
1714 _ICL_PORT_COMP + 4 * (dw))
1715
1716#define CNL_PORT_COMP_DW0 _MMIO(0x162100)
1717#define ICL_PORT_COMP_DW0(port) _MMIO(_ICL_PORT_COMP_DW(0, port))
1718#define COMP_INIT (1 << 31)
1719
1720#define CNL_PORT_COMP_DW1 _MMIO(0x162104)
1721#define ICL_PORT_COMP_DW1(port) _MMIO(_ICL_PORT_COMP_DW(1, port))
1722
1723#define CNL_PORT_COMP_DW3 _MMIO(0x16210c)
1724#define ICL_PORT_COMP_DW3(port) _MMIO(_ICL_PORT_COMP_DW(3, port))
1725#define PROCESS_INFO_DOT_0 (0 << 26)
1726#define PROCESS_INFO_DOT_1 (1 << 26)
1727#define PROCESS_INFO_DOT_4 (2 << 26)
1728#define PROCESS_INFO_MASK (7 << 26)
1729#define PROCESS_INFO_SHIFT 26
1730#define VOLTAGE_INFO_0_85V (0 << 24)
1731#define VOLTAGE_INFO_0_95V (1 << 24)
1732#define VOLTAGE_INFO_1_05V (2 << 24)
1733#define VOLTAGE_INFO_MASK (3 << 24)
1734#define VOLTAGE_INFO_SHIFT 24
1735
1736#define CNL_PORT_COMP_DW9 _MMIO(0x162124)
1737#define ICL_PORT_COMP_DW9(port) _MMIO(_ICL_PORT_COMP_DW(9, port))
1738
1739#define CNL_PORT_COMP_DW10 _MMIO(0x162128)
1740#define ICL_PORT_COMP_DW10(port) _MMIO(_ICL_PORT_COMP_DW(10, port))
1741
1742/* CNL/ICL Port PCS registers */
1694#define _CNL_PORT_PCS_DW1_GRP_AE 0x162304 1743#define _CNL_PORT_PCS_DW1_GRP_AE 0x162304
1695#define _CNL_PORT_PCS_DW1_GRP_B 0x162384 1744#define _CNL_PORT_PCS_DW1_GRP_B 0x162384
1696#define _CNL_PORT_PCS_DW1_GRP_C 0x162B04 1745#define _CNL_PORT_PCS_DW1_GRP_C 0x162B04
@@ -1708,7 +1757,6 @@ enum i915_power_well_id {
1708 _CNL_PORT_PCS_DW1_GRP_D, \ 1757 _CNL_PORT_PCS_DW1_GRP_D, \
1709 _CNL_PORT_PCS_DW1_GRP_AE, \ 1758 _CNL_PORT_PCS_DW1_GRP_AE, \
1710 _CNL_PORT_PCS_DW1_GRP_F)) 1759 _CNL_PORT_PCS_DW1_GRP_F))
1711
1712#define CNL_PORT_PCS_DW1_LN0(port) _MMIO(_PICK(port, \ 1760#define CNL_PORT_PCS_DW1_LN0(port) _MMIO(_PICK(port, \
1713 _CNL_PORT_PCS_DW1_LN0_AE, \ 1761 _CNL_PORT_PCS_DW1_LN0_AE, \
1714 _CNL_PORT_PCS_DW1_LN0_B, \ 1762 _CNL_PORT_PCS_DW1_LN0_B, \
@@ -1717,24 +1765,21 @@ enum i915_power_well_id {
1717 _CNL_PORT_PCS_DW1_LN0_AE, \ 1765 _CNL_PORT_PCS_DW1_LN0_AE, \
1718 _CNL_PORT_PCS_DW1_LN0_F)) 1766 _CNL_PORT_PCS_DW1_LN0_F))
1719 1767
1720#define _ICL_PORT_PCS_DW1_GRP_A 0x162604 1768#define _ICL_PORT_PCS_AUX 0x300
1721#define _ICL_PORT_PCS_DW1_GRP_B 0x6C604 1769#define _ICL_PORT_PCS_GRP 0x600
1722#define _ICL_PORT_PCS_DW1_LN0_A 0x162804 1770#define _ICL_PORT_PCS_LN(ln) (0x800 + (ln) * 0x100)
1723#define _ICL_PORT_PCS_DW1_LN0_B 0x6C804 1771#define _ICL_PORT_PCS_DW_AUX(dw, port) (_ICL_COMBOPHY(port) + \
1724#define _ICL_PORT_PCS_DW1_AUX_A 0x162304 1772 _ICL_PORT_PCS_AUX + 4 * (dw))
1725#define _ICL_PORT_PCS_DW1_AUX_B 0x6c304 1773#define _ICL_PORT_PCS_DW_GRP(dw, port) (_ICL_COMBOPHY(port) + \
1726#define ICL_PORT_PCS_DW1_GRP(port) _MMIO_PORT(port,\ 1774 _ICL_PORT_PCS_GRP + 4 * (dw))
1727 _ICL_PORT_PCS_DW1_GRP_A, \ 1775#define _ICL_PORT_PCS_DW_LN(dw, ln, port) (_ICL_COMBOPHY(port) + \
1728 _ICL_PORT_PCS_DW1_GRP_B) 1776 _ICL_PORT_PCS_LN(ln) + 4 * (dw))
1729#define ICL_PORT_PCS_DW1_LN0(port) _MMIO_PORT(port, \ 1777#define ICL_PORT_PCS_DW1_AUX(port) _MMIO(_ICL_PORT_PCS_DW_AUX(1, port))
1730 _ICL_PORT_PCS_DW1_LN0_A, \ 1778#define ICL_PORT_PCS_DW1_GRP(port) _MMIO(_ICL_PORT_PCS_DW_GRP(1, port))
1731 _ICL_PORT_PCS_DW1_LN0_B) 1779#define ICL_PORT_PCS_DW1_LN0(port) _MMIO(_ICL_PORT_PCS_DW_LN(1, 0, port))
1732#define ICL_PORT_PCS_DW1_AUX(port) _MMIO_PORT(port, \
1733 _ICL_PORT_PCS_DW1_AUX_A, \
1734 _ICL_PORT_PCS_DW1_AUX_B)
1735#define COMMON_KEEPER_EN (1 << 26) 1780#define COMMON_KEEPER_EN (1 << 26)
1736 1781
1737/* CNL Port TX registers */ 1782/* CNL/ICL Port TX registers */
1738#define _CNL_PORT_TX_AE_GRP_OFFSET 0x162340 1783#define _CNL_PORT_TX_AE_GRP_OFFSET 0x162340
1739#define _CNL_PORT_TX_B_GRP_OFFSET 0x1623C0 1784#define _CNL_PORT_TX_B_GRP_OFFSET 0x1623C0
1740#define _CNL_PORT_TX_C_GRP_OFFSET 0x162B40 1785#define _CNL_PORT_TX_C_GRP_OFFSET 0x162B40
@@ -1762,23 +1807,22 @@ enum i915_power_well_id {
1762 _CNL_PORT_TX_F_LN0_OFFSET) + \ 1807 _CNL_PORT_TX_F_LN0_OFFSET) + \
1763 4 * (dw)) 1808 4 * (dw))
1764 1809
1765#define CNL_PORT_TX_DW2_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP((port), 2)) 1810#define _ICL_PORT_TX_AUX 0x380
1766#define CNL_PORT_TX_DW2_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0((port), 2)) 1811#define _ICL_PORT_TX_GRP 0x680
1767#define _ICL_PORT_TX_DW2_GRP_A 0x162688 1812#define _ICL_PORT_TX_LN(ln) (0x880 + (ln) * 0x100)
1768#define _ICL_PORT_TX_DW2_GRP_B 0x6C688 1813
1769#define _ICL_PORT_TX_DW2_LN0_A 0x162888 1814#define _ICL_PORT_TX_DW_AUX(dw, port) (_ICL_COMBOPHY(port) + \
1770#define _ICL_PORT_TX_DW2_LN0_B 0x6C888 1815 _ICL_PORT_TX_AUX + 4 * (dw))
1771#define _ICL_PORT_TX_DW2_AUX_A 0x162388 1816#define _ICL_PORT_TX_DW_GRP(dw, port) (_ICL_COMBOPHY(port) + \
1772#define _ICL_PORT_TX_DW2_AUX_B 0x6c388 1817 _ICL_PORT_TX_GRP + 4 * (dw))
1773#define ICL_PORT_TX_DW2_GRP(port) _MMIO_PORT(port, \ 1818#define _ICL_PORT_TX_DW_LN(dw, ln, port) (_ICL_COMBOPHY(port) + \
1774 _ICL_PORT_TX_DW2_GRP_A, \ 1819 _ICL_PORT_TX_LN(ln) + 4 * (dw))
1775 _ICL_PORT_TX_DW2_GRP_B) 1820
1776#define ICL_PORT_TX_DW2_LN0(port) _MMIO_PORT(port, \ 1821#define CNL_PORT_TX_DW2_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP(2, port))
1777 _ICL_PORT_TX_DW2_LN0_A, \ 1822#define CNL_PORT_TX_DW2_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0(2, port))
1778 _ICL_PORT_TX_DW2_LN0_B) 1823#define ICL_PORT_TX_DW2_AUX(port) _MMIO(_ICL_PORT_TX_DW_AUX(2, port))
1779#define ICL_PORT_TX_DW2_AUX(port) _MMIO_PORT(port, \ 1824#define ICL_PORT_TX_DW2_GRP(port) _MMIO(_ICL_PORT_TX_DW_GRP(2, port))
1780 _ICL_PORT_TX_DW2_AUX_A, \ 1825#define ICL_PORT_TX_DW2_LN0(port) _MMIO(_ICL_PORT_TX_DW_LN(2, 0, port))
1781 _ICL_PORT_TX_DW2_AUX_B)
1782#define SWING_SEL_UPPER(x) (((x) >> 3) << 15) 1826#define SWING_SEL_UPPER(x) (((x) >> 3) << 15)
1783#define SWING_SEL_UPPER_MASK (1 << 15) 1827#define SWING_SEL_UPPER_MASK (1 << 15)
1784#define SWING_SEL_LOWER(x) (((x) & 0x7) << 11) 1828#define SWING_SEL_LOWER(x) (((x) & 0x7) << 11)
@@ -1795,24 +1839,10 @@ enum i915_power_well_id {
1795#define CNL_PORT_TX_DW4_LN(port, ln) _MMIO(_CNL_PORT_TX_DW_LN0((port), 4) + \ 1839#define CNL_PORT_TX_DW4_LN(port, ln) _MMIO(_CNL_PORT_TX_DW_LN0((port), 4) + \
1796 ((ln) * (_CNL_PORT_TX_DW4_LN1_AE - \ 1840 ((ln) * (_CNL_PORT_TX_DW4_LN1_AE - \
1797 _CNL_PORT_TX_DW4_LN0_AE))) 1841 _CNL_PORT_TX_DW4_LN0_AE)))
1798#define _ICL_PORT_TX_DW4_GRP_A 0x162690 1842#define ICL_PORT_TX_DW4_AUX(port) _MMIO(_ICL_PORT_TX_DW_AUX(4, port))
1799#define _ICL_PORT_TX_DW4_GRP_B 0x6C690 1843#define ICL_PORT_TX_DW4_GRP(port) _MMIO(_ICL_PORT_TX_DW_GRP(4, port))
1800#define _ICL_PORT_TX_DW4_LN0_A 0x162890 1844#define ICL_PORT_TX_DW4_LN0(port) _MMIO(_ICL_PORT_TX_DW_LN(4, 0, port))
1801#define _ICL_PORT_TX_DW4_LN1_A 0x162990 1845#define ICL_PORT_TX_DW4_LN(port, ln) _MMIO(_ICL_PORT_TX_DW_LN(4, ln, port))
1802#define _ICL_PORT_TX_DW4_LN0_B 0x6C890
1803#define _ICL_PORT_TX_DW4_AUX_A 0x162390
1804#define _ICL_PORT_TX_DW4_AUX_B 0x6c390
1805#define ICL_PORT_TX_DW4_GRP(port) _MMIO_PORT(port, \
1806 _ICL_PORT_TX_DW4_GRP_A, \
1807 _ICL_PORT_TX_DW4_GRP_B)
1808#define ICL_PORT_TX_DW4_LN(port, ln) _MMIO(_PORT(port, \
1809 _ICL_PORT_TX_DW4_LN0_A, \
1810 _ICL_PORT_TX_DW4_LN0_B) + \
1811 ((ln) * (_ICL_PORT_TX_DW4_LN1_A - \
1812 _ICL_PORT_TX_DW4_LN0_A)))
1813#define ICL_PORT_TX_DW4_AUX(port) _MMIO_PORT(port, \
1814 _ICL_PORT_TX_DW4_AUX_A, \
1815 _ICL_PORT_TX_DW4_AUX_B)
1816#define LOADGEN_SELECT (1 << 31) 1846#define LOADGEN_SELECT (1 << 31)
1817#define POST_CURSOR_1(x) ((x) << 12) 1847#define POST_CURSOR_1(x) ((x) << 12)
1818#define POST_CURSOR_1_MASK (0x3F << 12) 1848#define POST_CURSOR_1_MASK (0x3F << 12)
@@ -1821,23 +1851,11 @@ enum i915_power_well_id {
1821#define CURSOR_COEFF(x) ((x) << 0) 1851#define CURSOR_COEFF(x) ((x) << 0)
1822#define CURSOR_COEFF_MASK (0x3F << 0) 1852#define CURSOR_COEFF_MASK (0x3F << 0)
1823 1853
1824#define CNL_PORT_TX_DW5_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP((port), 5)) 1854#define CNL_PORT_TX_DW5_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP(5, port))
1825#define CNL_PORT_TX_DW5_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0((port), 5)) 1855#define CNL_PORT_TX_DW5_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0(5, port))
1826#define _ICL_PORT_TX_DW5_GRP_A 0x162694 1856#define ICL_PORT_TX_DW5_AUX(port) _MMIO(_ICL_PORT_TX_DW_AUX(5, port))
1827#define _ICL_PORT_TX_DW5_GRP_B 0x6C694 1857#define ICL_PORT_TX_DW5_GRP(port) _MMIO(_ICL_PORT_TX_DW_GRP(5, port))
1828#define _ICL_PORT_TX_DW5_LN0_A 0x162894 1858#define ICL_PORT_TX_DW5_LN0(port) _MMIO(_ICL_PORT_TX_DW_LN(5, 0, port))
1829#define _ICL_PORT_TX_DW5_LN0_B 0x6C894
1830#define _ICL_PORT_TX_DW5_AUX_A 0x162394
1831#define _ICL_PORT_TX_DW5_AUX_B 0x6c394
1832#define ICL_PORT_TX_DW5_GRP(port) _MMIO_PORT(port, \
1833 _ICL_PORT_TX_DW5_GRP_A, \
1834 _ICL_PORT_TX_DW5_GRP_B)
1835#define ICL_PORT_TX_DW5_LN0(port) _MMIO_PORT(port, \
1836 _ICL_PORT_TX_DW5_LN0_A, \
1837 _ICL_PORT_TX_DW5_LN0_B)
1838#define ICL_PORT_TX_DW5_AUX(port) _MMIO_PORT(port, \
1839 _ICL_PORT_TX_DW5_AUX_A, \
1840 _ICL_PORT_TX_DW5_AUX_B)
1841#define TX_TRAINING_EN (1 << 31) 1859#define TX_TRAINING_EN (1 << 31)
1842#define TAP2_DISABLE (1 << 30) 1860#define TAP2_DISABLE (1 << 30)
1843#define TAP3_DISABLE (1 << 29) 1861#define TAP3_DISABLE (1 << 29)
@@ -2054,49 +2072,16 @@ enum i915_power_well_id {
2054#define BXT_PORT_CL2CM_DW6(phy) _BXT_PHY((phy), _PORT_CL2CM_DW6_BC) 2072#define BXT_PORT_CL2CM_DW6(phy) _BXT_PHY((phy), _PORT_CL2CM_DW6_BC)
2055#define DW6_OLDO_DYN_PWR_DOWN_EN (1 << 28) 2073#define DW6_OLDO_DYN_PWR_DOWN_EN (1 << 28)
2056 2074
2057#define CNL_PORT_COMP_DW0 _MMIO(0x162100) 2075#define FIA1_BASE 0x163000
2058#define COMP_INIT (1 << 31)
2059#define CNL_PORT_COMP_DW1 _MMIO(0x162104)
2060#define CNL_PORT_COMP_DW3 _MMIO(0x16210c)
2061#define PROCESS_INFO_DOT_0 (0 << 26)
2062#define PROCESS_INFO_DOT_1 (1 << 26)
2063#define PROCESS_INFO_DOT_4 (2 << 26)
2064#define PROCESS_INFO_MASK (7 << 26)
2065#define PROCESS_INFO_SHIFT 26
2066#define VOLTAGE_INFO_0_85V (0 << 24)
2067#define VOLTAGE_INFO_0_95V (1 << 24)
2068#define VOLTAGE_INFO_1_05V (2 << 24)
2069#define VOLTAGE_INFO_MASK (3 << 24)
2070#define VOLTAGE_INFO_SHIFT 24
2071#define CNL_PORT_COMP_DW9 _MMIO(0x162124)
2072#define CNL_PORT_COMP_DW10 _MMIO(0x162128)
2073
2074#define _ICL_PORT_COMP_DW0_A 0x162100
2075#define _ICL_PORT_COMP_DW0_B 0x6C100
2076#define ICL_PORT_COMP_DW0(port) _MMIO_PORT(port, _ICL_PORT_COMP_DW0_A, \
2077 _ICL_PORT_COMP_DW0_B)
2078#define _ICL_PORT_COMP_DW1_A 0x162104
2079#define _ICL_PORT_COMP_DW1_B 0x6C104
2080#define ICL_PORT_COMP_DW1(port) _MMIO_PORT(port, _ICL_PORT_COMP_DW1_A, \
2081 _ICL_PORT_COMP_DW1_B)
2082#define _ICL_PORT_COMP_DW3_A 0x16210C
2083#define _ICL_PORT_COMP_DW3_B 0x6C10C
2084#define ICL_PORT_COMP_DW3(port) _MMIO_PORT(port, _ICL_PORT_COMP_DW3_A, \
2085 _ICL_PORT_COMP_DW3_B)
2086#define _ICL_PORT_COMP_DW9_A 0x162124
2087#define _ICL_PORT_COMP_DW9_B 0x6C124
2088#define ICL_PORT_COMP_DW9(port) _MMIO_PORT(port, _ICL_PORT_COMP_DW9_A, \
2089 _ICL_PORT_COMP_DW9_B)
2090#define _ICL_PORT_COMP_DW10_A 0x162128
2091#define _ICL_PORT_COMP_DW10_B 0x6C128
2092#define ICL_PORT_COMP_DW10(port) _MMIO_PORT(port, \
2093 _ICL_PORT_COMP_DW10_A, \
2094 _ICL_PORT_COMP_DW10_B)
2095 2076
2096/* ICL PHY DFLEX registers */ 2077/* ICL PHY DFLEX registers */
2097#define PORT_TX_DFLEXDPMLE1 _MMIO(0x1638C0) 2078#define PORT_TX_DFLEXDPMLE1 _MMIO(FIA1_BASE + 0x008C0)
2098#define DFLEXDPMLE1_DPMLETC_MASK(n) (0xf << (4 * (n))) 2079#define DFLEXDPMLE1_DPMLETC_MASK(tc_port) (0xf << (4 * (tc_port)))
2099#define DFLEXDPMLE1_DPMLETC(n, x) ((x) << (4 * (n))) 2080#define DFLEXDPMLE1_DPMLETC_ML0(tc_port) (1 << (4 * (tc_port)))
2081#define DFLEXDPMLE1_DPMLETC_ML1_0(tc_port) (3 << (4 * (tc_port)))
2082#define DFLEXDPMLE1_DPMLETC_ML3(tc_port) (8 << (4 * (tc_port)))
2083#define DFLEXDPMLE1_DPMLETC_ML3_2(tc_port) (12 << (4 * (tc_port)))
2084#define DFLEXDPMLE1_DPMLETC_ML3_0(tc_port) (15 << (4 * (tc_port)))
2100 2085
2101/* BXT PHY Ref registers */ 2086/* BXT PHY Ref registers */
2102#define _PORT_REF_DW3_A 0x16218C 2087#define _PORT_REF_DW3_A 0x16218C
@@ -2413,6 +2398,7 @@ enum i915_power_well_id {
2413 2398
2414#define GEN8_GAMW_ECO_DEV_RW_IA _MMIO(0x4080) 2399#define GEN8_GAMW_ECO_DEV_RW_IA _MMIO(0x4080)
2415#define GAMW_ECO_ENABLE_64K_IPS_FIELD 0xF 2400#define GAMW_ECO_ENABLE_64K_IPS_FIELD 0xF
2401#define GAMW_ECO_DEV_CTX_RELOAD_DISABLE (1 << 7)
2416 2402
2417#define GAMT_CHKN_BIT_REG _MMIO(0x4ab8) 2403#define GAMT_CHKN_BIT_REG _MMIO(0x4ab8)
2418#define GAMT_CHKN_DISABLE_L3_COH_PIPE (1 << 31) 2404#define GAMT_CHKN_DISABLE_L3_COH_PIPE (1 << 31)
@@ -2573,6 +2559,7 @@ enum i915_power_well_id {
2573/* chicken reg for WaConextSwitchWithConcurrentTLBInvalidate */ 2559/* chicken reg for WaConextSwitchWithConcurrentTLBInvalidate */
2574#define GEN9_CSFE_CHICKEN1_RCS _MMIO(0x20D4) 2560#define GEN9_CSFE_CHICKEN1_RCS _MMIO(0x20D4)
2575#define GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE (1 << 2) 2561#define GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE (1 << 2)
2562#define GEN11_ENABLE_32_PLANE_MODE (1 << 7)
2576 2563
2577/* WaClearTdlStateAckDirtyBits */ 2564/* WaClearTdlStateAckDirtyBits */
2578#define GEN8_STATE_ACK _MMIO(0x20F0) 2565#define GEN8_STATE_ACK _MMIO(0x20F0)
@@ -3475,11 +3462,13 @@ enum i915_power_well_id {
3475/* 3462/*
3476 * Palette regs 3463 * Palette regs
3477 */ 3464 */
3478#define PALETTE_A_OFFSET 0xa000 3465#define _PALETTE_A 0xa000
3479#define PALETTE_B_OFFSET 0xa800 3466#define _PALETTE_B 0xa800
3480#define CHV_PALETTE_C_OFFSET 0xc000 3467#define _CHV_PALETTE_C 0xc000
3481#define PALETTE(pipe, i) _MMIO(dev_priv->info.palette_offsets[pipe] + \ 3468#define PALETTE(pipe, i) _MMIO(dev_priv->info.display_mmio_offset + \
3482 dev_priv->info.display_mmio_offset + (i) * 4) 3469 _PICK((pipe), _PALETTE_A, \
3470 _PALETTE_B, _CHV_PALETTE_C) + \
3471 (i) * 4)
3483 3472
3484/* MCH MMIO space */ 3473/* MCH MMIO space */
3485 3474
@@ -4061,15 +4050,27 @@ enum {
4061#define _VSYNCSHIFT_B 0x61028 4050#define _VSYNCSHIFT_B 0x61028
4062#define _PIPE_MULT_B 0x6102c 4051#define _PIPE_MULT_B 0x6102c
4063 4052
4053/* DSI 0 timing regs */
4054#define _HTOTAL_DSI0 0x6b000
4055#define _HSYNC_DSI0 0x6b008
4056#define _VTOTAL_DSI0 0x6b00c
4057#define _VSYNC_DSI0 0x6b014
4058#define _VSYNCSHIFT_DSI0 0x6b028
4059
4060/* DSI 1 timing regs */
4061#define _HTOTAL_DSI1 0x6b800
4062#define _HSYNC_DSI1 0x6b808
4063#define _VTOTAL_DSI1 0x6b80c
4064#define _VSYNC_DSI1 0x6b814
4065#define _VSYNCSHIFT_DSI1 0x6b828
4066
4064#define TRANSCODER_A_OFFSET 0x60000 4067#define TRANSCODER_A_OFFSET 0x60000
4065#define TRANSCODER_B_OFFSET 0x61000 4068#define TRANSCODER_B_OFFSET 0x61000
4066#define TRANSCODER_C_OFFSET 0x62000 4069#define TRANSCODER_C_OFFSET 0x62000
4067#define CHV_TRANSCODER_C_OFFSET 0x63000 4070#define CHV_TRANSCODER_C_OFFSET 0x63000
4068#define TRANSCODER_EDP_OFFSET 0x6f000 4071#define TRANSCODER_EDP_OFFSET 0x6f000
4069 4072#define TRANSCODER_DSI0_OFFSET 0x6b000
4070#define _MMIO_TRANS2(pipe, reg) _MMIO(dev_priv->info.trans_offsets[(pipe)] - \ 4073#define TRANSCODER_DSI1_OFFSET 0x6b800
4071 dev_priv->info.trans_offsets[TRANSCODER_A] + (reg) + \
4072 dev_priv->info.display_mmio_offset)
4073 4074
4074#define HTOTAL(trans) _MMIO_TRANS2(trans, _HTOTAL_A) 4075#define HTOTAL(trans) _MMIO_TRANS2(trans, _HTOTAL_A)
4075#define HBLANK(trans) _MMIO_TRANS2(trans, _HBLANK_A) 4076#define HBLANK(trans) _MMIO_TRANS2(trans, _HBLANK_A)
@@ -4149,9 +4150,13 @@ enum {
4149/* Bspec claims those aren't shifted but stay at 0x64800 */ 4150/* Bspec claims those aren't shifted but stay at 0x64800 */
4150#define EDP_PSR_IMR _MMIO(0x64834) 4151#define EDP_PSR_IMR _MMIO(0x64834)
4151#define EDP_PSR_IIR _MMIO(0x64838) 4152#define EDP_PSR_IIR _MMIO(0x64838)
4152#define EDP_PSR_ERROR(trans) (1 << (((trans) * 8 + 10) & 31)) 4153#define EDP_PSR_ERROR(shift) (1 << ((shift) + 2))
4153#define EDP_PSR_POST_EXIT(trans) (1 << (((trans) * 8 + 9) & 31)) 4154#define EDP_PSR_POST_EXIT(shift) (1 << ((shift) + 1))
4154#define EDP_PSR_PRE_ENTRY(trans) (1 << (((trans) * 8 + 8) & 31)) 4155#define EDP_PSR_PRE_ENTRY(shift) (1 << (shift))
4156#define EDP_PSR_TRANSCODER_C_SHIFT 24
4157#define EDP_PSR_TRANSCODER_B_SHIFT 16
4158#define EDP_PSR_TRANSCODER_A_SHIFT 8
4159#define EDP_PSR_TRANSCODER_EDP_SHIFT 0
4155 4160
4156#define EDP_PSR_AUX_CTL _MMIO(dev_priv->psr_mmio_base + 0x10) 4161#define EDP_PSR_AUX_CTL _MMIO(dev_priv->psr_mmio_base + 0x10)
4157#define EDP_PSR_AUX_CTL_TIME_OUT_MASK (3 << 26) 4162#define EDP_PSR_AUX_CTL_TIME_OUT_MASK (3 << 26)
@@ -4195,7 +4200,7 @@ enum {
4195#define EDP_PSR_DEBUG_MASK_LPSP (1 << 27) 4200#define EDP_PSR_DEBUG_MASK_LPSP (1 << 27)
4196#define EDP_PSR_DEBUG_MASK_MEMUP (1 << 26) 4201#define EDP_PSR_DEBUG_MASK_MEMUP (1 << 26)
4197#define EDP_PSR_DEBUG_MASK_HPD (1 << 25) 4202#define EDP_PSR_DEBUG_MASK_HPD (1 << 25)
4198#define EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (1 << 16) 4203#define EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (1 << 16) /* Reserved in ICL+ */
4199#define EDP_PSR_DEBUG_EXIT_ON_PIXEL_UNDERRUN (1 << 15) /* SKL+ */ 4204#define EDP_PSR_DEBUG_EXIT_ON_PIXEL_UNDERRUN (1 << 15) /* SKL+ */
4200 4205
4201#define EDP_PSR2_CTL _MMIO(0x6f900) 4206#define EDP_PSR2_CTL _MMIO(0x6f900)
@@ -4232,7 +4237,7 @@ enum {
4232#define PSR_EVENT_FRONT_BUFFER_MODIFY (1 << 9) 4237#define PSR_EVENT_FRONT_BUFFER_MODIFY (1 << 9)
4233#define PSR_EVENT_WD_TIMER_EXPIRE (1 << 8) 4238#define PSR_EVENT_WD_TIMER_EXPIRE (1 << 8)
4234#define PSR_EVENT_PIPE_REGISTERS_UPDATE (1 << 6) 4239#define PSR_EVENT_PIPE_REGISTERS_UPDATE (1 << 6)
4235#define PSR_EVENT_REGISTER_UPDATE (1 << 5) 4240#define PSR_EVENT_REGISTER_UPDATE (1 << 5) /* Reserved in ICL+ */
4236#define PSR_EVENT_HDCP_ENABLE (1 << 4) 4241#define PSR_EVENT_HDCP_ENABLE (1 << 4)
4237#define PSR_EVENT_KVMR_SESSION_ENABLE (1 << 3) 4242#define PSR_EVENT_KVMR_SESSION_ENABLE (1 << 3)
4238#define PSR_EVENT_VBI_ENABLE (1 << 2) 4243#define PSR_EVENT_VBI_ENABLE (1 << 2)
@@ -4584,6 +4589,15 @@ enum {
4584#define VIDEO_DIP_FREQ_2VSYNC (2 << 16) 4589#define VIDEO_DIP_FREQ_2VSYNC (2 << 16)
4585#define VIDEO_DIP_FREQ_MASK (3 << 16) 4590#define VIDEO_DIP_FREQ_MASK (3 << 16)
4586/* HSW and later: */ 4591/* HSW and later: */
4592#define DRM_DIP_ENABLE (1 << 28)
4593#define PSR_VSC_BIT_7_SET (1 << 27)
4594#define VSC_SELECT_MASK (0x3 << 25)
4595#define VSC_SELECT_SHIFT 25
4596#define VSC_DIP_HW_HEA_DATA (0 << 25)
4597#define VSC_DIP_HW_HEA_SW_DATA (1 << 25)
4598#define VSC_DIP_HW_DATA_SW_HEA (2 << 25)
4599#define VSC_DIP_SW_HEA_DATA (3 << 25)
4600#define VDIP_ENABLE_PPS (1 << 24)
4587#define VIDEO_DIP_ENABLE_VSC_HSW (1 << 20) 4601#define VIDEO_DIP_ENABLE_VSC_HSW (1 << 20)
4588#define VIDEO_DIP_ENABLE_GCP_HSW (1 << 16) 4602#define VIDEO_DIP_ENABLE_GCP_HSW (1 << 16)
4589#define VIDEO_DIP_ENABLE_AVI_HSW (1 << 12) 4603#define VIDEO_DIP_ENABLE_AVI_HSW (1 << 12)
@@ -4591,16 +4605,6 @@ enum {
4591#define VIDEO_DIP_ENABLE_GMP_HSW (1 << 4) 4605#define VIDEO_DIP_ENABLE_GMP_HSW (1 << 4)
4592#define VIDEO_DIP_ENABLE_SPD_HSW (1 << 0) 4606#define VIDEO_DIP_ENABLE_SPD_HSW (1 << 0)
4593 4607
4594#define DRM_DIP_ENABLE (1 << 28)
4595#define PSR_VSC_BIT_7_SET (1 << 27)
4596#define VSC_SELECT_MASK (0x3 << 26)
4597#define VSC_SELECT_SHIFT 26
4598#define VSC_DIP_HW_HEA_DATA (0 << 26)
4599#define VSC_DIP_HW_HEA_SW_DATA (1 << 26)
4600#define VSC_DIP_HW_DATA_SW_HEA (2 << 26)
4601#define VSC_DIP_SW_HEA_DATA (3 << 26)
4602#define VDIP_ENABLE_PPS (1 << 24)
4603
4604/* Panel power sequencing */ 4608/* Panel power sequencing */
4605#define PPS_BASE 0x61200 4609#define PPS_BASE 0x61200
4606#define VLV_PPS_BASE (VLV_DISPLAY_BASE + PPS_BASE) 4610#define VLV_PPS_BASE (VLV_DISPLAY_BASE + PPS_BASE)
@@ -5636,9 +5640,9 @@ enum {
5636 */ 5640 */
5637#define PIPE_EDP_OFFSET 0x7f000 5641#define PIPE_EDP_OFFSET 0x7f000
5638 5642
5639#define _MMIO_PIPE2(pipe, reg) _MMIO(dev_priv->info.pipe_offsets[pipe] - \ 5643/* ICL DSI 0 and 1 */
5640 dev_priv->info.pipe_offsets[PIPE_A] + (reg) + \ 5644#define PIPE_DSI0_OFFSET 0x7b000
5641 dev_priv->info.display_mmio_offset) 5645#define PIPE_DSI1_OFFSET 0x7b800
5642 5646
5643#define PIPECONF(pipe) _MMIO_PIPE2(pipe, _PIPEACONF) 5647#define PIPECONF(pipe) _MMIO_PIPE2(pipe, _PIPEACONF)
5644#define PIPEDSL(pipe) _MMIO_PIPE2(pipe, _PIPEADSL) 5648#define PIPEDSL(pipe) _MMIO_PIPE2(pipe, _PIPEADSL)
@@ -6087,10 +6091,6 @@ enum {
6087#define _CURBBASE_IVB 0x71084 6091#define _CURBBASE_IVB 0x71084
6088#define _CURBPOS_IVB 0x71088 6092#define _CURBPOS_IVB 0x71088
6089 6093
6090#define _CURSOR2(pipe, reg) _MMIO(dev_priv->info.cursor_offsets[(pipe)] - \
6091 dev_priv->info.cursor_offsets[PIPE_A] + (reg) + \
6092 dev_priv->info.display_mmio_offset)
6093
6094#define CURCNTR(pipe) _CURSOR2(pipe, _CURACNTR) 6094#define CURCNTR(pipe) _CURSOR2(pipe, _CURACNTR)
6095#define CURBASE(pipe) _CURSOR2(pipe, _CURABASE) 6095#define CURBASE(pipe) _CURSOR2(pipe, _CURABASE)
6096#define CURPOS(pipe) _CURSOR2(pipe, _CURAPOS) 6096#define CURPOS(pipe) _CURSOR2(pipe, _CURAPOS)
@@ -6224,6 +6224,10 @@ enum {
6224#define _DSPBOFFSET (dev_priv->info.display_mmio_offset + 0x711A4) 6224#define _DSPBOFFSET (dev_priv->info.display_mmio_offset + 0x711A4)
6225#define _DSPBSURFLIVE (dev_priv->info.display_mmio_offset + 0x711AC) 6225#define _DSPBSURFLIVE (dev_priv->info.display_mmio_offset + 0x711AC)
6226 6226
6227/* ICL DSI 0 and 1 */
6228#define _PIPEDSI0CONF 0x7b008
6229#define _PIPEDSI1CONF 0x7b808
6230
6227/* Sprite A control */ 6231/* Sprite A control */
6228#define _DVSACNTR 0x72180 6232#define _DVSACNTR 0x72180
6229#define DVS_ENABLE (1 << 31) 6233#define DVS_ENABLE (1 << 31)
@@ -6511,6 +6515,7 @@ enum {
6511#define PLANE_CTL_KEY_ENABLE_DESTINATION (2 << 21) 6515#define PLANE_CTL_KEY_ENABLE_DESTINATION (2 << 21)
6512#define PLANE_CTL_ORDER_BGRX (0 << 20) 6516#define PLANE_CTL_ORDER_BGRX (0 << 20)
6513#define PLANE_CTL_ORDER_RGBX (1 << 20) 6517#define PLANE_CTL_ORDER_RGBX (1 << 20)
6518#define PLANE_CTL_YUV420_Y_PLANE (1 << 19)
6514#define PLANE_CTL_YUV_TO_RGB_CSC_FORMAT_BT709 (1 << 18) 6519#define PLANE_CTL_YUV_TO_RGB_CSC_FORMAT_BT709 (1 << 18)
6515#define PLANE_CTL_YUV422_ORDER_MASK (0x3 << 16) 6520#define PLANE_CTL_YUV422_ORDER_MASK (0x3 << 16)
6516#define PLANE_CTL_YUV422_YUYV (0 << 16) 6521#define PLANE_CTL_YUV422_YUYV (0 << 16)
@@ -6554,17 +6559,33 @@ enum {
6554#define _PLANE_KEYVAL_2_A 0x70294 6559#define _PLANE_KEYVAL_2_A 0x70294
6555#define _PLANE_KEYMSK_1_A 0x70198 6560#define _PLANE_KEYMSK_1_A 0x70198
6556#define _PLANE_KEYMSK_2_A 0x70298 6561#define _PLANE_KEYMSK_2_A 0x70298
6562#define PLANE_KEYMSK_ALPHA_ENABLE (1 << 31)
6557#define _PLANE_KEYMAX_1_A 0x701a0 6563#define _PLANE_KEYMAX_1_A 0x701a0
6558#define _PLANE_KEYMAX_2_A 0x702a0 6564#define _PLANE_KEYMAX_2_A 0x702a0
6565#define PLANE_KEYMAX_ALPHA(a) ((a) << 24)
6559#define _PLANE_AUX_DIST_1_A 0x701c0 6566#define _PLANE_AUX_DIST_1_A 0x701c0
6560#define _PLANE_AUX_DIST_2_A 0x702c0 6567#define _PLANE_AUX_DIST_2_A 0x702c0
6561#define _PLANE_AUX_OFFSET_1_A 0x701c4 6568#define _PLANE_AUX_OFFSET_1_A 0x701c4
6562#define _PLANE_AUX_OFFSET_2_A 0x702c4 6569#define _PLANE_AUX_OFFSET_2_A 0x702c4
6570#define _PLANE_CUS_CTL_1_A 0x701c8
6571#define _PLANE_CUS_CTL_2_A 0x702c8
6572#define PLANE_CUS_ENABLE (1 << 31)
6573#define PLANE_CUS_PLANE_6 (0 << 30)
6574#define PLANE_CUS_PLANE_7 (1 << 30)
6575#define PLANE_CUS_HPHASE_SIGN_NEGATIVE (1 << 19)
6576#define PLANE_CUS_HPHASE_0 (0 << 16)
6577#define PLANE_CUS_HPHASE_0_25 (1 << 16)
6578#define PLANE_CUS_HPHASE_0_5 (2 << 16)
6579#define PLANE_CUS_VPHASE_SIGN_NEGATIVE (1 << 15)
6580#define PLANE_CUS_VPHASE_0 (0 << 12)
6581#define PLANE_CUS_VPHASE_0_25 (1 << 12)
6582#define PLANE_CUS_VPHASE_0_5 (2 << 12)
6563#define _PLANE_COLOR_CTL_1_A 0x701CC /* GLK+ */ 6583#define _PLANE_COLOR_CTL_1_A 0x701CC /* GLK+ */
6564#define _PLANE_COLOR_CTL_2_A 0x702CC /* GLK+ */ 6584#define _PLANE_COLOR_CTL_2_A 0x702CC /* GLK+ */
6565#define _PLANE_COLOR_CTL_3_A 0x703CC /* GLK+ */ 6585#define _PLANE_COLOR_CTL_3_A 0x703CC /* GLK+ */
6566#define PLANE_COLOR_PIPE_GAMMA_ENABLE (1 << 30) /* Pre-ICL */ 6586#define PLANE_COLOR_PIPE_GAMMA_ENABLE (1 << 30) /* Pre-ICL */
6567#define PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE (1 << 28) 6587#define PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE (1 << 28)
6588#define PLANE_COLOR_INPUT_CSC_ENABLE (1 << 20) /* ICL+ */
6568#define PLANE_COLOR_PIPE_CSC_ENABLE (1 << 23) /* Pre-ICL */ 6589#define PLANE_COLOR_PIPE_CSC_ENABLE (1 << 23) /* Pre-ICL */
6569#define PLANE_COLOR_CSC_MODE_BYPASS (0 << 17) 6590#define PLANE_COLOR_CSC_MODE_BYPASS (0 << 17)
6570#define PLANE_COLOR_CSC_MODE_YUV601_TO_RGB709 (1 << 17) 6591#define PLANE_COLOR_CSC_MODE_YUV601_TO_RGB709 (1 << 17)
@@ -6581,6 +6602,55 @@ enum {
6581#define _PLANE_NV12_BUF_CFG_1_A 0x70278 6602#define _PLANE_NV12_BUF_CFG_1_A 0x70278
6582#define _PLANE_NV12_BUF_CFG_2_A 0x70378 6603#define _PLANE_NV12_BUF_CFG_2_A 0x70378
6583 6604
6605/* Input CSC Register Definitions */
6606#define _PLANE_INPUT_CSC_RY_GY_1_A 0x701E0
6607#define _PLANE_INPUT_CSC_RY_GY_2_A 0x702E0
6608
6609#define _PLANE_INPUT_CSC_RY_GY_1_B 0x711E0
6610#define _PLANE_INPUT_CSC_RY_GY_2_B 0x712E0
6611
6612#define _PLANE_INPUT_CSC_RY_GY_1(pipe) \
6613 _PIPE(pipe, _PLANE_INPUT_CSC_RY_GY_1_A, \
6614 _PLANE_INPUT_CSC_RY_GY_1_B)
6615#define _PLANE_INPUT_CSC_RY_GY_2(pipe) \
6616 _PIPE(pipe, _PLANE_INPUT_CSC_RY_GY_2_A, \
6617 _PLANE_INPUT_CSC_RY_GY_2_B)
6618
6619#define PLANE_INPUT_CSC_COEFF(pipe, plane, index) \
6620 _MMIO_PLANE(plane, _PLANE_INPUT_CSC_RY_GY_1(pipe) + (index) * 4, \
6621 _PLANE_INPUT_CSC_RY_GY_2(pipe) + (index) * 4)
6622
6623#define _PLANE_INPUT_CSC_PREOFF_HI_1_A 0x701F8
6624#define _PLANE_INPUT_CSC_PREOFF_HI_2_A 0x702F8
6625
6626#define _PLANE_INPUT_CSC_PREOFF_HI_1_B 0x711F8
6627#define _PLANE_INPUT_CSC_PREOFF_HI_2_B 0x712F8
6628
6629#define _PLANE_INPUT_CSC_PREOFF_HI_1(pipe) \
6630 _PIPE(pipe, _PLANE_INPUT_CSC_PREOFF_HI_1_A, \
6631 _PLANE_INPUT_CSC_PREOFF_HI_1_B)
6632#define _PLANE_INPUT_CSC_PREOFF_HI_2(pipe) \
6633 _PIPE(pipe, _PLANE_INPUT_CSC_PREOFF_HI_2_A, \
6634 _PLANE_INPUT_CSC_PREOFF_HI_2_B)
6635#define PLANE_INPUT_CSC_PREOFF(pipe, plane, index) \
6636 _MMIO_PLANE(plane, _PLANE_INPUT_CSC_PREOFF_HI_1(pipe) + (index) * 4, \
6637 _PLANE_INPUT_CSC_PREOFF_HI_2(pipe) + (index) * 4)
6638
6639#define _PLANE_INPUT_CSC_POSTOFF_HI_1_A 0x70204
6640#define _PLANE_INPUT_CSC_POSTOFF_HI_2_A 0x70304
6641
6642#define _PLANE_INPUT_CSC_POSTOFF_HI_1_B 0x71204
6643#define _PLANE_INPUT_CSC_POSTOFF_HI_2_B 0x71304
6644
6645#define _PLANE_INPUT_CSC_POSTOFF_HI_1(pipe) \
6646 _PIPE(pipe, _PLANE_INPUT_CSC_POSTOFF_HI_1_A, \
6647 _PLANE_INPUT_CSC_POSTOFF_HI_1_B)
6648#define _PLANE_INPUT_CSC_POSTOFF_HI_2(pipe) \
6649 _PIPE(pipe, _PLANE_INPUT_CSC_POSTOFF_HI_2_A, \
6650 _PLANE_INPUT_CSC_POSTOFF_HI_2_B)
6651#define PLANE_INPUT_CSC_POSTOFF(pipe, plane, index) \
6652 _MMIO_PLANE(plane, _PLANE_INPUT_CSC_POSTOFF_HI_1(pipe) + (index) * 4, \
6653 _PLANE_INPUT_CSC_POSTOFF_HI_2(pipe) + (index) * 4)
6584 6654
6585#define _PLANE_CTL_1_B 0x71180 6655#define _PLANE_CTL_1_B 0x71180
6586#define _PLANE_CTL_2_B 0x71280 6656#define _PLANE_CTL_2_B 0x71280
@@ -6697,6 +6767,15 @@ enum {
6697#define PLANE_AUX_OFFSET(pipe, plane) \ 6767#define PLANE_AUX_OFFSET(pipe, plane) \
6698 _MMIO_PLANE(plane, _PLANE_AUX_OFFSET_1(pipe), _PLANE_AUX_OFFSET_2(pipe)) 6768 _MMIO_PLANE(plane, _PLANE_AUX_OFFSET_1(pipe), _PLANE_AUX_OFFSET_2(pipe))
6699 6769
6770#define _PLANE_CUS_CTL_1_B 0x711c8
6771#define _PLANE_CUS_CTL_2_B 0x712c8
6772#define _PLANE_CUS_CTL_1(pipe) \
6773 _PIPE(pipe, _PLANE_CUS_CTL_1_A, _PLANE_CUS_CTL_1_B)
6774#define _PLANE_CUS_CTL_2(pipe) \
6775 _PIPE(pipe, _PLANE_CUS_CTL_2_A, _PLANE_CUS_CTL_2_B)
6776#define PLANE_CUS_CTL(pipe, plane) \
6777 _MMIO_PLANE(plane, _PLANE_CUS_CTL_1(pipe), _PLANE_CUS_CTL_2(pipe))
6778
6700#define _PLANE_COLOR_CTL_1_B 0x711CC 6779#define _PLANE_COLOR_CTL_1_B 0x711CC
6701#define _PLANE_COLOR_CTL_2_B 0x712CC 6780#define _PLANE_COLOR_CTL_2_B 0x712CC
6702#define _PLANE_COLOR_CTL_3_B 0x713CC 6781#define _PLANE_COLOR_CTL_3_B 0x713CC
@@ -6850,11 +6929,12 @@ enum {
6850#define _PS_2B_CTRL 0x68A80 6929#define _PS_2B_CTRL 0x68A80
6851#define _PS_1C_CTRL 0x69180 6930#define _PS_1C_CTRL 0x69180
6852#define PS_SCALER_EN (1 << 31) 6931#define PS_SCALER_EN (1 << 31)
6853#define PS_SCALER_MODE_MASK (3 << 28) 6932#define SKL_PS_SCALER_MODE_MASK (3 << 28)
6854#define PS_SCALER_MODE_DYN (0 << 28) 6933#define SKL_PS_SCALER_MODE_DYN (0 << 28)
6855#define PS_SCALER_MODE_HQ (1 << 28) 6934#define SKL_PS_SCALER_MODE_HQ (1 << 28)
6856#define SKL_PS_SCALER_MODE_NV12 (2 << 28) 6935#define SKL_PS_SCALER_MODE_NV12 (2 << 28)
6857#define PS_SCALER_MODE_PLANAR (1 << 29) 6936#define PS_SCALER_MODE_PLANAR (1 << 29)
6937#define PS_SCALER_MODE_NORMAL (0 << 29)
6858#define PS_PLANE_SEL_MASK (7 << 25) 6938#define PS_PLANE_SEL_MASK (7 << 25)
6859#define PS_PLANE_SEL(plane) (((plane) + 1) << 25) 6939#define PS_PLANE_SEL(plane) (((plane) + 1) << 25)
6860#define PS_FILTER_MASK (3 << 23) 6940#define PS_FILTER_MASK (3 << 23)
@@ -6871,6 +6951,8 @@ enum {
6871#define PS_VADAPT_MODE_LEAST_ADAPT (0 << 5) 6951#define PS_VADAPT_MODE_LEAST_ADAPT (0 << 5)
6872#define PS_VADAPT_MODE_MOD_ADAPT (1 << 5) 6952#define PS_VADAPT_MODE_MOD_ADAPT (1 << 5)
6873#define PS_VADAPT_MODE_MOST_ADAPT (3 << 5) 6953#define PS_VADAPT_MODE_MOST_ADAPT (3 << 5)
6954#define PS_PLANE_Y_SEL_MASK (7 << 5)
6955#define PS_PLANE_Y_SEL(plane) (((plane) + 1) << 5)
6874 6956
6875#define _PS_PWR_GATE_1A 0x68160 6957#define _PS_PWR_GATE_1A 0x68160
6876#define _PS_PWR_GATE_2A 0x68260 6958#define _PS_PWR_GATE_2A 0x68260
@@ -7317,9 +7399,10 @@ enum {
7317#define BDW_DPRS_MASK_VBLANK_SRD (1 << 0) 7399#define BDW_DPRS_MASK_VBLANK_SRD (1 << 0)
7318#define CHICKEN_PIPESL_1(pipe) _MMIO_PIPE(pipe, _CHICKEN_PIPESL_1_A, _CHICKEN_PIPESL_1_B) 7400#define CHICKEN_PIPESL_1(pipe) _MMIO_PIPE(pipe, _CHICKEN_PIPESL_1_A, _CHICKEN_PIPESL_1_B)
7319 7401
7320#define CHICKEN_TRANS_A 0x420c0 7402#define CHICKEN_TRANS_A _MMIO(0x420c0)
7321#define CHICKEN_TRANS_B 0x420c4 7403#define CHICKEN_TRANS_B _MMIO(0x420c4)
7322#define CHICKEN_TRANS(trans) _MMIO_TRANS(trans, CHICKEN_TRANS_A, CHICKEN_TRANS_B) 7404#define CHICKEN_TRANS_C _MMIO(0x420c8)
7405#define CHICKEN_TRANS_EDP _MMIO(0x420cc)
7323#define VSC_DATA_SEL_SOFTWARE_CONTROL (1 << 25) /* GLK and CNL+ */ 7406#define VSC_DATA_SEL_SOFTWARE_CONTROL (1 << 25) /* GLK and CNL+ */
7324#define DDI_TRAINING_OVERRIDE_ENABLE (1 << 19) 7407#define DDI_TRAINING_OVERRIDE_ENABLE (1 << 19)
7325#define DDI_TRAINING_OVERRIDE_VALUE (1 << 18) 7408#define DDI_TRAINING_OVERRIDE_VALUE (1 << 18)
@@ -7409,6 +7492,10 @@ enum {
7409#define GEN9_SLICE_COMMON_ECO_CHICKEN1 _MMIO(0x731c) 7492#define GEN9_SLICE_COMMON_ECO_CHICKEN1 _MMIO(0x731c)
7410#define GEN11_STATE_CACHE_REDIRECT_TO_CS (1 << 11) 7493#define GEN11_STATE_CACHE_REDIRECT_TO_CS (1 << 11)
7411 7494
7495#define GEN7_SARCHKMD _MMIO(0xB000)
7496#define GEN7_DISABLE_DEMAND_PREFETCH (1 << 31)
7497#define GEN7_DISABLE_SAMPLER_PREFETCH (1 << 30)
7498
7412#define GEN7_L3SQCREG1 _MMIO(0xB010) 7499#define GEN7_L3SQCREG1 _MMIO(0xB010)
7413#define VLV_B0_WA_L3SQCREG1_VALUE 0x00D30000 7500#define VLV_B0_WA_L3SQCREG1_VALUE 0x00D30000
7414 7501
@@ -7824,8 +7911,7 @@ enum {
7824#define CNP_RAWCLK_DIV_MASK (0x3ff << 16) 7911#define CNP_RAWCLK_DIV_MASK (0x3ff << 16)
7825#define CNP_RAWCLK_DIV(div) ((div) << 16) 7912#define CNP_RAWCLK_DIV(div) ((div) << 16)
7826#define CNP_RAWCLK_FRAC_MASK (0xf << 26) 7913#define CNP_RAWCLK_FRAC_MASK (0xf << 26)
7827#define CNP_RAWCLK_FRAC(frac) ((frac) << 26) 7914#define CNP_RAWCLK_DEN(den) ((den) << 26)
7828#define ICP_RAWCLK_DEN(den) ((den) << 26)
7829#define ICP_RAWCLK_NUM(num) ((num) << 11) 7915#define ICP_RAWCLK_NUM(num) ((num) << 11)
7830 7916
7831#define PCH_DPLL_TMR_CFG _MMIO(0xc6208) 7917#define PCH_DPLL_TMR_CFG _MMIO(0xc6208)
@@ -8625,8 +8711,7 @@ enum {
8625#define GEN11_LSN_UNSLCVC_GAFS_HALF_CL2_MAXALLOC (1 << 9) 8711#define GEN11_LSN_UNSLCVC_GAFS_HALF_CL2_MAXALLOC (1 << 9)
8626#define GEN11_LSN_UNSLCVC_GAFS_HALF_SF_MAXALLOC (1 << 7) 8712#define GEN11_LSN_UNSLCVC_GAFS_HALF_SF_MAXALLOC (1 << 7)
8627 8713
8628#define GAMW_ECO_DEV_RW_IA_REG _MMIO(0x4080) 8714#define GEN10_SAMPLER_MODE _MMIO(0xE18C)
8629#define GAMW_ECO_DEV_CTX_RELOAD_DISABLE (1 << 7)
8630 8715
8631/* IVYBRIDGE DPF */ 8716/* IVYBRIDGE DPF */
8632#define GEN7_L3CDERRST1(slice) _MMIO(0xB008 + (slice) * 0x200) /* L3CD Error Status 1 */ 8717#define GEN7_L3CDERRST1(slice) _MMIO(0xB008 + (slice) * 0x200) /* L3CD Error Status 1 */
@@ -8927,6 +9012,15 @@ enum skl_power_gate {
8927#define CNL_AUX_ANAOVRD1_ENABLE (1 << 16) 9012#define CNL_AUX_ANAOVRD1_ENABLE (1 << 16)
8928#define CNL_AUX_ANAOVRD1_LDO_BYPASS (1 << 23) 9013#define CNL_AUX_ANAOVRD1_LDO_BYPASS (1 << 23)
8929 9014
9015#define _ICL_AUX_REG_IDX(pw_idx) ((pw_idx) - ICL_PW_CTL_IDX_AUX_A)
9016#define _ICL_AUX_ANAOVRD1_A 0x162398
9017#define _ICL_AUX_ANAOVRD1_B 0x6C398
9018#define ICL_AUX_ANAOVRD1(pw_idx) _MMIO(_PICK(_ICL_AUX_REG_IDX(pw_idx), \
9019 _ICL_AUX_ANAOVRD1_A, \
9020 _ICL_AUX_ANAOVRD1_B))
9021#define ICL_AUX_ANAOVRD1_LDO_BYPASS (1 << 7)
9022#define ICL_AUX_ANAOVRD1_ENABLE (1 << 0)
9023
8930/* HDCP Key Registers */ 9024/* HDCP Key Registers */
8931#define HDCP_KEY_CONF _MMIO(0x66c00) 9025#define HDCP_KEY_CONF _MMIO(0x66c00)
8932#define HDCP_AKSV_SEND_TRIGGER BIT(31) 9026#define HDCP_AKSV_SEND_TRIGGER BIT(31)
@@ -9009,11 +9103,45 @@ enum skl_power_gate {
9009#define HDCP_STATUS_CIPHER BIT(16) 9103#define HDCP_STATUS_CIPHER BIT(16)
9010#define HDCP_STATUS_FRAME_CNT(x) (((x) >> 8) & 0xff) 9104#define HDCP_STATUS_FRAME_CNT(x) (((x) >> 8) & 0xff)
9011 9105
9106/* HDCP2.2 Registers */
9107#define _PORTA_HDCP2_BASE 0x66800
9108#define _PORTB_HDCP2_BASE 0x66500
9109#define _PORTC_HDCP2_BASE 0x66600
9110#define _PORTD_HDCP2_BASE 0x66700
9111#define _PORTE_HDCP2_BASE 0x66A00
9112#define _PORTF_HDCP2_BASE 0x66900
9113#define _PORT_HDCP2_BASE(port, x) _MMIO(_PICK((port), \
9114 _PORTA_HDCP2_BASE, \
9115 _PORTB_HDCP2_BASE, \
9116 _PORTC_HDCP2_BASE, \
9117 _PORTD_HDCP2_BASE, \
9118 _PORTE_HDCP2_BASE, \
9119 _PORTF_HDCP2_BASE) + (x))
9120
9121#define HDCP2_AUTH_DDI(port) _PORT_HDCP2_BASE(port, 0x98)
9122#define AUTH_LINK_AUTHENTICATED BIT(31)
9123#define AUTH_LINK_TYPE BIT(30)
9124#define AUTH_FORCE_CLR_INPUTCTR BIT(19)
9125#define AUTH_CLR_KEYS BIT(18)
9126
9127#define HDCP2_CTL_DDI(port) _PORT_HDCP2_BASE(port, 0xB0)
9128#define CTL_LINK_ENCRYPTION_REQ BIT(31)
9129
9130#define HDCP2_STATUS_DDI(port) _PORT_HDCP2_BASE(port, 0xB4)
9131#define STREAM_ENCRYPTION_STATUS_A BIT(31)
9132#define STREAM_ENCRYPTION_STATUS_B BIT(30)
9133#define STREAM_ENCRYPTION_STATUS_C BIT(29)
9134#define LINK_TYPE_STATUS BIT(22)
9135#define LINK_AUTH_STATUS BIT(21)
9136#define LINK_ENCRYPTION_STATUS BIT(20)
9137
9012/* Per-pipe DDI Function Control */ 9138/* Per-pipe DDI Function Control */
9013#define _TRANS_DDI_FUNC_CTL_A 0x60400 9139#define _TRANS_DDI_FUNC_CTL_A 0x60400
9014#define _TRANS_DDI_FUNC_CTL_B 0x61400 9140#define _TRANS_DDI_FUNC_CTL_B 0x61400
9015#define _TRANS_DDI_FUNC_CTL_C 0x62400 9141#define _TRANS_DDI_FUNC_CTL_C 0x62400
9016#define _TRANS_DDI_FUNC_CTL_EDP 0x6F400 9142#define _TRANS_DDI_FUNC_CTL_EDP 0x6F400
9143#define _TRANS_DDI_FUNC_CTL_DSI0 0x6b400
9144#define _TRANS_DDI_FUNC_CTL_DSI1 0x6bc00
9017#define TRANS_DDI_FUNC_CTL(tran) _MMIO_TRANS2(tran, _TRANS_DDI_FUNC_CTL_A) 9145#define TRANS_DDI_FUNC_CTL(tran) _MMIO_TRANS2(tran, _TRANS_DDI_FUNC_CTL_A)
9018 9146
9019#define TRANS_DDI_FUNC_ENABLE (1 << 31) 9147#define TRANS_DDI_FUNC_ENABLE (1 << 31)
@@ -9051,6 +9179,19 @@ enum skl_power_gate {
9051 | TRANS_DDI_HDMI_SCRAMBLER_RESET_FREQ \ 9179 | TRANS_DDI_HDMI_SCRAMBLER_RESET_FREQ \
9052 | TRANS_DDI_HDMI_SCRAMBLING) 9180 | TRANS_DDI_HDMI_SCRAMBLING)
9053 9181
9182#define _TRANS_DDI_FUNC_CTL2_A 0x60404
9183#define _TRANS_DDI_FUNC_CTL2_B 0x61404
9184#define _TRANS_DDI_FUNC_CTL2_C 0x62404
9185#define _TRANS_DDI_FUNC_CTL2_EDP 0x6f404
9186#define _TRANS_DDI_FUNC_CTL2_DSI0 0x6b404
9187#define _TRANS_DDI_FUNC_CTL2_DSI1 0x6bc04
9188#define TRANS_DDI_FUNC_CTL2(tran) _MMIO_TRANS2(tran, \
9189 _TRANS_DDI_FUNC_CTL2_A)
9190#define PORT_SYNC_MODE_ENABLE (1 << 4)
9191#define PORT_SYNC_MODE_MASTER_SELECT(x) ((x) < 0)
9192#define PORT_SYNC_MODE_MASTER_SELECT_MASK (0x7 << 0)
9193#define PORT_SYNC_MODE_MASTER_SELECT_SHIFT 0
9194
9054/* DisplayPort Transport Control */ 9195/* DisplayPort Transport Control */
9055#define _DP_TP_CTL_A 0x64040 9196#define _DP_TP_CTL_A 0x64040
9056#define _DP_TP_CTL_B 0x64140 9197#define _DP_TP_CTL_B 0x64140
@@ -9222,6 +9363,8 @@ enum skl_power_gate {
9222#define TRANS_MSA_MISC(tran) _MMIO_TRANS2(tran, _TRANSA_MSA_MISC) 9363#define TRANS_MSA_MISC(tran) _MMIO_TRANS2(tran, _TRANSA_MSA_MISC)
9223 9364
9224#define TRANS_MSA_SYNC_CLK (1 << 0) 9365#define TRANS_MSA_SYNC_CLK (1 << 0)
9366#define TRANS_MSA_SAMPLING_444 (2 << 1)
9367#define TRANS_MSA_CLRSP_YCBCR (2 << 3)
9225#define TRANS_MSA_6_BPC (0 << 5) 9368#define TRANS_MSA_6_BPC (0 << 5)
9226#define TRANS_MSA_8_BPC (1 << 5) 9369#define TRANS_MSA_8_BPC (1 << 5)
9227#define TRANS_MSA_10_BPC (2 << 5) 9370#define TRANS_MSA_10_BPC (2 << 5)
@@ -9789,6 +9932,10 @@ enum skl_power_gate {
9789#define _MIPI_PORT(port, a, c) (((port) == PORT_A) ? a : c) /* ports A and C only */ 9932#define _MIPI_PORT(port, a, c) (((port) == PORT_A) ? a : c) /* ports A and C only */
9790#define _MMIO_MIPI(port, a, c) _MMIO(_MIPI_PORT(port, a, c)) 9933#define _MMIO_MIPI(port, a, c) _MMIO(_MIPI_PORT(port, a, c))
9791 9934
9935/* Gen11 DSI */
9936#define _MMIO_DSI(tc, dsi0, dsi1) _MMIO_TRANS((tc) - TRANSCODER_DSI_0, \
9937 dsi0, dsi1)
9938
9792#define MIPIO_TXESC_CLK_DIV1 _MMIO(0x160004) 9939#define MIPIO_TXESC_CLK_DIV1 _MMIO(0x160004)
9793#define GLK_TX_ESC_CLK_DIV1_MASK 0x3FF 9940#define GLK_TX_ESC_CLK_DIV1_MASK 0x3FF
9794#define MIPIO_TXESC_CLK_DIV2 _MMIO(0x160008) 9941#define MIPIO_TXESC_CLK_DIV2 _MMIO(0x160008)
@@ -9952,6 +10099,39 @@ enum skl_power_gate {
9952 _ICL_DSI_IO_MODECTL_1) 10099 _ICL_DSI_IO_MODECTL_1)
9953#define COMBO_PHY_MODE_DSI (1 << 0) 10100#define COMBO_PHY_MODE_DSI (1 << 0)
9954 10101
10102/* Display Stream Splitter Control */
10103#define DSS_CTL1 _MMIO(0x67400)
10104#define SPLITTER_ENABLE (1 << 31)
10105#define JOINER_ENABLE (1 << 30)
10106#define DUAL_LINK_MODE_INTERLEAVE (1 << 24)
10107#define DUAL_LINK_MODE_FRONTBACK (0 << 24)
10108#define OVERLAP_PIXELS_MASK (0xf << 16)
10109#define OVERLAP_PIXELS(pixels) ((pixels) << 16)
10110#define LEFT_DL_BUF_TARGET_DEPTH_MASK (0xfff << 0)
10111#define LEFT_DL_BUF_TARGET_DEPTH(pixels) ((pixels) << 0)
10112#define MAX_DL_BUFFER_TARGET_DEPTH 0x5a0
10113
10114#define DSS_CTL2 _MMIO(0x67404)
10115#define LEFT_BRANCH_VDSC_ENABLE (1 << 31)
10116#define RIGHT_BRANCH_VDSC_ENABLE (1 << 15)
10117#define RIGHT_DL_BUF_TARGET_DEPTH_MASK (0xfff << 0)
10118#define RIGHT_DL_BUF_TARGET_DEPTH(pixels) ((pixels) << 0)
10119
10120#define _ICL_PIPE_DSS_CTL1_PB 0x78200
10121#define _ICL_PIPE_DSS_CTL1_PC 0x78400
10122#define ICL_PIPE_DSS_CTL1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
10123 _ICL_PIPE_DSS_CTL1_PB, \
10124 _ICL_PIPE_DSS_CTL1_PC)
10125#define BIG_JOINER_ENABLE (1 << 29)
10126#define MASTER_BIG_JOINER_ENABLE (1 << 28)
10127#define VGA_CENTERING_ENABLE (1 << 27)
10128
10129#define _ICL_PIPE_DSS_CTL2_PB 0x78204
10130#define _ICL_PIPE_DSS_CTL2_PC 0x78404
10131#define ICL_PIPE_DSS_CTL2(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
10132 _ICL_PIPE_DSS_CTL2_PB, \
10133 _ICL_PIPE_DSS_CTL2_PC)
10134
9955#define BXT_P_DSI_REGULATOR_CFG _MMIO(0x160020) 10135#define BXT_P_DSI_REGULATOR_CFG _MMIO(0x160020)
9956#define STAP_SELECT (1 << 0) 10136#define STAP_SELECT (1 << 0)
9957 10137
@@ -10288,6 +10468,235 @@ enum skl_power_gate {
10288 _ICL_DSI_T_INIT_MASTER_0,\ 10468 _ICL_DSI_T_INIT_MASTER_0,\
10289 _ICL_DSI_T_INIT_MASTER_1) 10469 _ICL_DSI_T_INIT_MASTER_1)
10290 10470
10471#define _DPHY_CLK_TIMING_PARAM_0 0x162180
10472#define _DPHY_CLK_TIMING_PARAM_1 0x6c180
10473#define DPHY_CLK_TIMING_PARAM(port) _MMIO_PORT(port, \
10474 _DPHY_CLK_TIMING_PARAM_0,\
10475 _DPHY_CLK_TIMING_PARAM_1)
10476#define _DSI_CLK_TIMING_PARAM_0 0x6b080
10477#define _DSI_CLK_TIMING_PARAM_1 0x6b880
10478#define DSI_CLK_TIMING_PARAM(port) _MMIO_PORT(port, \
10479 _DSI_CLK_TIMING_PARAM_0,\
10480 _DSI_CLK_TIMING_PARAM_1)
10481#define CLK_PREPARE_OVERRIDE (1 << 31)
10482#define CLK_PREPARE(x) ((x) << 28)
10483#define CLK_PREPARE_MASK (0x7 << 28)
10484#define CLK_PREPARE_SHIFT 28
10485#define CLK_ZERO_OVERRIDE (1 << 27)
10486#define CLK_ZERO(x) ((x) << 20)
10487#define CLK_ZERO_MASK (0xf << 20)
10488#define CLK_ZERO_SHIFT 20
10489#define CLK_PRE_OVERRIDE (1 << 19)
10490#define CLK_PRE(x) ((x) << 16)
10491#define CLK_PRE_MASK (0x3 << 16)
10492#define CLK_PRE_SHIFT 16
10493#define CLK_POST_OVERRIDE (1 << 15)
10494#define CLK_POST(x) ((x) << 8)
10495#define CLK_POST_MASK (0x7 << 8)
10496#define CLK_POST_SHIFT 8
10497#define CLK_TRAIL_OVERRIDE (1 << 7)
10498#define CLK_TRAIL(x) ((x) << 0)
10499#define CLK_TRAIL_MASK (0xf << 0)
10500#define CLK_TRAIL_SHIFT 0
10501
10502#define _DPHY_DATA_TIMING_PARAM_0 0x162184
10503#define _DPHY_DATA_TIMING_PARAM_1 0x6c184
10504#define DPHY_DATA_TIMING_PARAM(port) _MMIO_PORT(port, \
10505 _DPHY_DATA_TIMING_PARAM_0,\
10506 _DPHY_DATA_TIMING_PARAM_1)
10507#define _DSI_DATA_TIMING_PARAM_0 0x6B084
10508#define _DSI_DATA_TIMING_PARAM_1 0x6B884
10509#define DSI_DATA_TIMING_PARAM(port) _MMIO_PORT(port, \
10510 _DSI_DATA_TIMING_PARAM_0,\
10511 _DSI_DATA_TIMING_PARAM_1)
10512#define HS_PREPARE_OVERRIDE (1 << 31)
10513#define HS_PREPARE(x) ((x) << 24)
10514#define HS_PREPARE_MASK (0x7 << 24)
10515#define HS_PREPARE_SHIFT 24
10516#define HS_ZERO_OVERRIDE (1 << 23)
10517#define HS_ZERO(x) ((x) << 16)
10518#define HS_ZERO_MASK (0xf << 16)
10519#define HS_ZERO_SHIFT 16
10520#define HS_TRAIL_OVERRIDE (1 << 15)
10521#define HS_TRAIL(x) ((x) << 8)
10522#define HS_TRAIL_MASK (0x7 << 8)
10523#define HS_TRAIL_SHIFT 8
10524#define HS_EXIT_OVERRIDE (1 << 7)
10525#define HS_EXIT(x) ((x) << 0)
10526#define HS_EXIT_MASK (0x7 << 0)
10527#define HS_EXIT_SHIFT 0
10528
10529#define _DPHY_TA_TIMING_PARAM_0 0x162188
10530#define _DPHY_TA_TIMING_PARAM_1 0x6c188
10531#define DPHY_TA_TIMING_PARAM(port) _MMIO_PORT(port, \
10532 _DPHY_TA_TIMING_PARAM_0,\
10533 _DPHY_TA_TIMING_PARAM_1)
10534#define _DSI_TA_TIMING_PARAM_0 0x6b098
10535#define _DSI_TA_TIMING_PARAM_1 0x6b898
10536#define DSI_TA_TIMING_PARAM(port) _MMIO_PORT(port, \
10537 _DSI_TA_TIMING_PARAM_0,\
10538 _DSI_TA_TIMING_PARAM_1)
10539#define TA_SURE_OVERRIDE (1 << 31)
10540#define TA_SURE(x) ((x) << 16)
10541#define TA_SURE_MASK (0x1f << 16)
10542#define TA_SURE_SHIFT 16
10543#define TA_GO_OVERRIDE (1 << 15)
10544#define TA_GO(x) ((x) << 8)
10545#define TA_GO_MASK (0xf << 8)
10546#define TA_GO_SHIFT 8
10547#define TA_GET_OVERRIDE (1 << 7)
10548#define TA_GET(x) ((x) << 0)
10549#define TA_GET_MASK (0xf << 0)
10550#define TA_GET_SHIFT 0
10551
10552/* DSI transcoder configuration */
10553#define _DSI_TRANS_FUNC_CONF_0 0x6b030
10554#define _DSI_TRANS_FUNC_CONF_1 0x6b830
10555#define DSI_TRANS_FUNC_CONF(tc) _MMIO_DSI(tc, \
10556 _DSI_TRANS_FUNC_CONF_0,\
10557 _DSI_TRANS_FUNC_CONF_1)
10558#define OP_MODE_MASK (0x3 << 28)
10559#define OP_MODE_SHIFT 28
10560#define CMD_MODE_NO_GATE (0x0 << 28)
10561#define CMD_MODE_TE_GATE (0x1 << 28)
10562#define VIDEO_MODE_SYNC_EVENT (0x2 << 28)
10563#define VIDEO_MODE_SYNC_PULSE (0x3 << 28)
10564#define LINK_READY (1 << 20)
10565#define PIX_FMT_MASK (0x3 << 16)
10566#define PIX_FMT_SHIFT 16
10567#define PIX_FMT_RGB565 (0x0 << 16)
10568#define PIX_FMT_RGB666_PACKED (0x1 << 16)
10569#define PIX_FMT_RGB666_LOOSE (0x2 << 16)
10570#define PIX_FMT_RGB888 (0x3 << 16)
10571#define PIX_FMT_RGB101010 (0x4 << 16)
10572#define PIX_FMT_RGB121212 (0x5 << 16)
10573#define PIX_FMT_COMPRESSED (0x6 << 16)
10574#define BGR_TRANSMISSION (1 << 15)
10575#define PIX_VIRT_CHAN(x) ((x) << 12)
10576#define PIX_VIRT_CHAN_MASK (0x3 << 12)
10577#define PIX_VIRT_CHAN_SHIFT 12
10578#define PIX_BUF_THRESHOLD_MASK (0x3 << 10)
10579#define PIX_BUF_THRESHOLD_SHIFT 10
10580#define PIX_BUF_THRESHOLD_1_4 (0x0 << 10)
10581#define PIX_BUF_THRESHOLD_1_2 (0x1 << 10)
10582#define PIX_BUF_THRESHOLD_3_4 (0x2 << 10)
10583#define PIX_BUF_THRESHOLD_FULL (0x3 << 10)
10584#define CONTINUOUS_CLK_MASK (0x3 << 8)
10585#define CONTINUOUS_CLK_SHIFT 8
10586#define CLK_ENTER_LP_AFTER_DATA (0x0 << 8)
10587#define CLK_HS_OR_LP (0x2 << 8)
10588#define CLK_HS_CONTINUOUS (0x3 << 8)
10589#define LINK_CALIBRATION_MASK (0x3 << 4)
10590#define LINK_CALIBRATION_SHIFT 4
10591#define CALIBRATION_DISABLED (0x0 << 4)
10592#define CALIBRATION_ENABLED_INITIAL_ONLY (0x2 << 4)
10593#define CALIBRATION_ENABLED_INITIAL_PERIODIC (0x3 << 4)
10594#define S3D_ORIENTATION_LANDSCAPE (1 << 1)
10595#define EOTP_DISABLED (1 << 0)
10596
10597#define _DSI_CMD_RXCTL_0 0x6b0d4
10598#define _DSI_CMD_RXCTL_1 0x6b8d4
10599#define DSI_CMD_RXCTL(tc) _MMIO_DSI(tc, \
10600 _DSI_CMD_RXCTL_0,\
10601 _DSI_CMD_RXCTL_1)
10602#define READ_UNLOADS_DW (1 << 16)
10603#define RECEIVED_UNASSIGNED_TRIGGER (1 << 15)
10604#define RECEIVED_ACKNOWLEDGE_TRIGGER (1 << 14)
10605#define RECEIVED_TEAR_EFFECT_TRIGGER (1 << 13)
10606#define RECEIVED_RESET_TRIGGER (1 << 12)
10607#define RECEIVED_PAYLOAD_WAS_LOST (1 << 11)
10608#define RECEIVED_CRC_WAS_LOST (1 << 10)
10609#define NUMBER_RX_PLOAD_DW_MASK (0xff << 0)
10610#define NUMBER_RX_PLOAD_DW_SHIFT 0
10611
10612#define _DSI_CMD_TXCTL_0 0x6b0d0
10613#define _DSI_CMD_TXCTL_1 0x6b8d0
10614#define DSI_CMD_TXCTL(tc) _MMIO_DSI(tc, \
10615 _DSI_CMD_TXCTL_0,\
10616 _DSI_CMD_TXCTL_1)
10617#define KEEP_LINK_IN_HS (1 << 24)
10618#define FREE_HEADER_CREDIT_MASK (0x1f << 8)
10619#define FREE_HEADER_CREDIT_SHIFT 0x8
10620#define FREE_PLOAD_CREDIT_MASK (0xff << 0)
10621#define FREE_PLOAD_CREDIT_SHIFT 0
10622#define MAX_HEADER_CREDIT 0x10
10623#define MAX_PLOAD_CREDIT 0x40
10624
10625#define _DSI_CMD_TXHDR_0 0x6b100
10626#define _DSI_CMD_TXHDR_1 0x6b900
10627#define DSI_CMD_TXHDR(tc) _MMIO_DSI(tc, \
10628 _DSI_CMD_TXHDR_0,\
10629 _DSI_CMD_TXHDR_1)
10630#define PAYLOAD_PRESENT (1 << 31)
10631#define LP_DATA_TRANSFER (1 << 30)
10632#define VBLANK_FENCE (1 << 29)
10633#define PARAM_WC_MASK (0xffff << 8)
10634#define PARAM_WC_LOWER_SHIFT 8
10635#define PARAM_WC_UPPER_SHIFT 16
10636#define VC_MASK (0x3 << 6)
10637#define VC_SHIFT 6
10638#define DT_MASK (0x3f << 0)
10639#define DT_SHIFT 0
10640
10641#define _DSI_CMD_TXPYLD_0 0x6b104
10642#define _DSI_CMD_TXPYLD_1 0x6b904
10643#define DSI_CMD_TXPYLD(tc) _MMIO_DSI(tc, \
10644 _DSI_CMD_TXPYLD_0,\
10645 _DSI_CMD_TXPYLD_1)
10646
10647#define _DSI_LP_MSG_0 0x6b0d8
10648#define _DSI_LP_MSG_1 0x6b8d8
10649#define DSI_LP_MSG(tc) _MMIO_DSI(tc, \
10650 _DSI_LP_MSG_0,\
10651 _DSI_LP_MSG_1)
10652#define LPTX_IN_PROGRESS (1 << 17)
10653#define LINK_IN_ULPS (1 << 16)
10654#define LINK_ULPS_TYPE_LP11 (1 << 8)
10655#define LINK_ENTER_ULPS (1 << 0)
10656
10657/* DSI timeout registers */
10658#define _DSI_HSTX_TO_0 0x6b044
10659#define _DSI_HSTX_TO_1 0x6b844
10660#define DSI_HSTX_TO(tc) _MMIO_DSI(tc, \
10661 _DSI_HSTX_TO_0,\
10662 _DSI_HSTX_TO_1)
10663#define HSTX_TIMEOUT_VALUE_MASK (0xffff << 16)
10664#define HSTX_TIMEOUT_VALUE_SHIFT 16
10665#define HSTX_TIMEOUT_VALUE(x) ((x) << 16)
10666#define HSTX_TIMED_OUT (1 << 0)
10667
10668#define _DSI_LPRX_HOST_TO_0 0x6b048
10669#define _DSI_LPRX_HOST_TO_1 0x6b848
10670#define DSI_LPRX_HOST_TO(tc) _MMIO_DSI(tc, \
10671 _DSI_LPRX_HOST_TO_0,\
10672 _DSI_LPRX_HOST_TO_1)
10673#define LPRX_TIMED_OUT (1 << 16)
10674#define LPRX_TIMEOUT_VALUE_MASK (0xffff << 0)
10675#define LPRX_TIMEOUT_VALUE_SHIFT 0
10676#define LPRX_TIMEOUT_VALUE(x) ((x) << 0)
10677
10678#define _DSI_PWAIT_TO_0 0x6b040
10679#define _DSI_PWAIT_TO_1 0x6b840
10680#define DSI_PWAIT_TO(tc) _MMIO_DSI(tc, \
10681 _DSI_PWAIT_TO_0,\
10682 _DSI_PWAIT_TO_1)
10683#define PRESET_TIMEOUT_VALUE_MASK (0xffff << 16)
10684#define PRESET_TIMEOUT_VALUE_SHIFT 16
10685#define PRESET_TIMEOUT_VALUE(x) ((x) << 16)
10686#define PRESPONSE_TIMEOUT_VALUE_MASK (0xffff << 0)
10687#define PRESPONSE_TIMEOUT_VALUE_SHIFT 0
10688#define PRESPONSE_TIMEOUT_VALUE(x) ((x) << 0)
10689
10690#define _DSI_TA_TO_0 0x6b04c
10691#define _DSI_TA_TO_1 0x6b84c
10692#define DSI_TA_TO(tc) _MMIO_DSI(tc, \
10693 _DSI_TA_TO_0,\
10694 _DSI_TA_TO_1)
10695#define TA_TIMED_OUT (1 << 16)
10696#define TA_TIMEOUT_VALUE_MASK (0xffff << 0)
10697#define TA_TIMEOUT_VALUE_SHIFT 0
10698#define TA_TIMEOUT_VALUE(x) ((x) << 0)
10699
10291/* bits 31:0 */ 10700/* bits 31:0 */
10292#define _MIPIA_DBI_BW_CTRL (dev_priv->mipi_mmio_base + 0xb084) 10701#define _MIPIA_DBI_BW_CTRL (dev_priv->mipi_mmio_base + 0xb084)
10293#define _MIPIC_DBI_BW_CTRL (dev_priv->mipi_mmio_base + 0xb884) 10702#define _MIPIC_DBI_BW_CTRL (dev_priv->mipi_mmio_base + 0xb884)
@@ -10400,10 +10809,6 @@ enum skl_power_gate {
10400#define MIPI_READ_DATA_VALID(port) _MMIO_MIPI(port, _MIPIA_READ_DATA_VALID, _MIPIC_READ_DATA_VALID) 10809#define MIPI_READ_DATA_VALID(port) _MMIO_MIPI(port, _MIPIA_READ_DATA_VALID, _MIPIC_READ_DATA_VALID)
10401#define READ_DATA_VALID(n) (1 << (n)) 10810#define READ_DATA_VALID(n) (1 << (n))
10402 10811
10403/* For UMS only (deprecated): */
10404#define _PALETTE_A (dev_priv->info.display_mmio_offset + 0xa000)
10405#define _PALETTE_B (dev_priv->info.display_mmio_offset + 0xa800)
10406
10407/* MOCS (Memory Object Control State) registers */ 10812/* MOCS (Memory Object Control State) registers */
10408#define GEN9_LNCFCMOCS(i) _MMIO(0xb020 + (i) * 4) /* L3 Cache Control */ 10813#define GEN9_LNCFCMOCS(i) _MMIO(0xb020 + (i) * 4) /* L3 Cache Control */
10409 10814
@@ -10689,6 +11094,7 @@ enum skl_power_gate {
10689#define ICL_DSC1_PICTURE_PARAMETER_SET_16(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 11094#define ICL_DSC1_PICTURE_PARAMETER_SET_16(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
10690 _ICL_DSC1_PICTURE_PARAMETER_SET_16_PB, \ 11095 _ICL_DSC1_PICTURE_PARAMETER_SET_16_PB, \
10691 _ICL_DSC1_PICTURE_PARAMETER_SET_16_PC) 11096 _ICL_DSC1_PICTURE_PARAMETER_SET_16_PC)
11097#define DSC_SLICE_ROW_PER_FRAME(slice_row_per_frame) ((slice_row_per_frame) << 20)
10692#define DSC_SLICE_PER_LINE(slice_per_line) ((slice_per_line) << 16) 11098#define DSC_SLICE_PER_LINE(slice_per_line) ((slice_per_line) << 16)
10693#define DSC_SLICE_CHUNK_SIZE(slice_chunk_size) ((slice_chunk_size) << 0) 11099#define DSC_SLICE_CHUNK_SIZE(slice_chunk_size) ((slice_chunk_size) << 0)
10694 11100
@@ -10743,17 +11149,17 @@ enum skl_power_gate {
10743 _ICL_DSC1_RC_BUF_THRESH_1_UDW_PB, \ 11149 _ICL_DSC1_RC_BUF_THRESH_1_UDW_PB, \
10744 _ICL_DSC1_RC_BUF_THRESH_1_UDW_PC) 11150 _ICL_DSC1_RC_BUF_THRESH_1_UDW_PC)
10745 11151
10746#define PORT_TX_DFLEXDPSP _MMIO(0x1638A0) 11152#define PORT_TX_DFLEXDPSP _MMIO(FIA1_BASE + 0x008A0)
10747#define TC_LIVE_STATE_TBT(tc_port) (1 << ((tc_port) * 8 + 6)) 11153#define TC_LIVE_STATE_TBT(tc_port) (1 << ((tc_port) * 8 + 6))
10748#define TC_LIVE_STATE_TC(tc_port) (1 << ((tc_port) * 8 + 5)) 11154#define TC_LIVE_STATE_TC(tc_port) (1 << ((tc_port) * 8 + 5))
10749#define DP_LANE_ASSIGNMENT_SHIFT(tc_port) ((tc_port) * 8) 11155#define DP_LANE_ASSIGNMENT_SHIFT(tc_port) ((tc_port) * 8)
10750#define DP_LANE_ASSIGNMENT_MASK(tc_port) (0xf << ((tc_port) * 8)) 11156#define DP_LANE_ASSIGNMENT_MASK(tc_port) (0xf << ((tc_port) * 8))
10751#define DP_LANE_ASSIGNMENT(tc_port, x) ((x) << ((tc_port) * 8)) 11157#define DP_LANE_ASSIGNMENT(tc_port, x) ((x) << ((tc_port) * 8))
10752 11158
10753#define PORT_TX_DFLEXDPPMS _MMIO(0x163890) 11159#define PORT_TX_DFLEXDPPMS _MMIO(FIA1_BASE + 0x00890)
10754#define DP_PHY_MODE_STATUS_COMPLETED(tc_port) (1 << (tc_port)) 11160#define DP_PHY_MODE_STATUS_COMPLETED(tc_port) (1 << (tc_port))
10755 11161
10756#define PORT_TX_DFLEXDPCSSS _MMIO(0x163894) 11162#define PORT_TX_DFLEXDPCSSS _MMIO(FIA1_BASE + 0x00894)
10757#define DP_PHY_MODE_STATUS_NOT_SAFE(tc_port) (1 << (tc_port)) 11163#define DP_PHY_MODE_STATUS_NOT_SAFE(tc_port) (1 << (tc_port))
10758 11164
10759#endif /* _I915_REG_H_ */ 11165#endif /* _I915_REG_H_ */
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index a492385b2089..71107540581d 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -111,91 +111,6 @@ i915_request_remove_from_client(struct i915_request *request)
111 spin_unlock(&file_priv->mm.lock); 111 spin_unlock(&file_priv->mm.lock);
112} 112}
113 113
114static struct i915_dependency *
115i915_dependency_alloc(struct drm_i915_private *i915)
116{
117 return kmem_cache_alloc(i915->dependencies, GFP_KERNEL);
118}
119
120static void
121i915_dependency_free(struct drm_i915_private *i915,
122 struct i915_dependency *dep)
123{
124 kmem_cache_free(i915->dependencies, dep);
125}
126
127static void
128__i915_sched_node_add_dependency(struct i915_sched_node *node,
129 struct i915_sched_node *signal,
130 struct i915_dependency *dep,
131 unsigned long flags)
132{
133 INIT_LIST_HEAD(&dep->dfs_link);
134 list_add(&dep->wait_link, &signal->waiters_list);
135 list_add(&dep->signal_link, &node->signalers_list);
136 dep->signaler = signal;
137 dep->flags = flags;
138}
139
140static int
141i915_sched_node_add_dependency(struct drm_i915_private *i915,
142 struct i915_sched_node *node,
143 struct i915_sched_node *signal)
144{
145 struct i915_dependency *dep;
146
147 dep = i915_dependency_alloc(i915);
148 if (!dep)
149 return -ENOMEM;
150
151 __i915_sched_node_add_dependency(node, signal, dep,
152 I915_DEPENDENCY_ALLOC);
153 return 0;
154}
155
156static void
157i915_sched_node_fini(struct drm_i915_private *i915,
158 struct i915_sched_node *node)
159{
160 struct i915_dependency *dep, *tmp;
161
162 GEM_BUG_ON(!list_empty(&node->link));
163
164 /*
165 * Everyone we depended upon (the fences we wait to be signaled)
166 * should retire before us and remove themselves from our list.
167 * However, retirement is run independently on each timeline and
168 * so we may be called out-of-order.
169 */
170 list_for_each_entry_safe(dep, tmp, &node->signalers_list, signal_link) {
171 GEM_BUG_ON(!i915_sched_node_signaled(dep->signaler));
172 GEM_BUG_ON(!list_empty(&dep->dfs_link));
173
174 list_del(&dep->wait_link);
175 if (dep->flags & I915_DEPENDENCY_ALLOC)
176 i915_dependency_free(i915, dep);
177 }
178
179 /* Remove ourselves from everyone who depends upon us */
180 list_for_each_entry_safe(dep, tmp, &node->waiters_list, wait_link) {
181 GEM_BUG_ON(dep->signaler != node);
182 GEM_BUG_ON(!list_empty(&dep->dfs_link));
183
184 list_del(&dep->signal_link);
185 if (dep->flags & I915_DEPENDENCY_ALLOC)
186 i915_dependency_free(i915, dep);
187 }
188}
189
190static void
191i915_sched_node_init(struct i915_sched_node *node)
192{
193 INIT_LIST_HEAD(&node->signalers_list);
194 INIT_LIST_HEAD(&node->waiters_list);
195 INIT_LIST_HEAD(&node->link);
196 node->attr.priority = I915_PRIORITY_INVALID;
197}
198
199static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno) 114static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno)
200{ 115{
201 struct intel_engine_cs *engine; 116 struct intel_engine_cs *engine;
@@ -221,6 +136,8 @@ static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno)
221 intel_engine_get_seqno(engine), 136 intel_engine_get_seqno(engine),
222 seqno); 137 seqno);
223 138
139 kthread_park(engine->breadcrumbs.signaler);
140
224 if (!i915_seqno_passed(seqno, engine->timeline.seqno)) { 141 if (!i915_seqno_passed(seqno, engine->timeline.seqno)) {
225 /* Flush any waiters before we reuse the seqno */ 142 /* Flush any waiters before we reuse the seqno */
226 intel_engine_disarm_breadcrumbs(engine); 143 intel_engine_disarm_breadcrumbs(engine);
@@ -235,6 +152,8 @@ static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno)
235 /* Finally reset hw state */ 152 /* Finally reset hw state */
236 intel_engine_init_global_seqno(engine, seqno); 153 intel_engine_init_global_seqno(engine, seqno);
237 engine->timeline.seqno = seqno; 154 engine->timeline.seqno = seqno;
155
156 kthread_unpark(engine->breadcrumbs.signaler);
238 } 157 }
239 158
240 list_for_each_entry(timeline, &i915->gt.timelines, link) 159 list_for_each_entry(timeline, &i915->gt.timelines, link)
@@ -740,17 +659,6 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
740 if (rq) 659 if (rq)
741 cond_synchronize_rcu(rq->rcustate); 660 cond_synchronize_rcu(rq->rcustate);
742 661
743 /*
744 * We've forced the client to stall and catch up with whatever
745 * backlog there might have been. As we are assuming that we
746 * caused the mempressure, now is an opportune time to
747 * recover as much memory from the request pool as is possible.
748 * Having already penalized the client to stall, we spend
749 * a little extra time to re-optimise page allocation.
750 */
751 kmem_cache_shrink(i915->requests);
752 rcu_barrier(); /* Recover the TYPESAFE_BY_RCU pages */
753
754 rq = kmem_cache_alloc(i915->requests, GFP_KERNEL); 662 rq = kmem_cache_alloc(i915->requests, GFP_KERNEL);
755 if (!rq) { 663 if (!rq) {
756 ret = -ENOMEM; 664 ret = -ENOMEM;
@@ -1127,8 +1035,20 @@ void i915_request_add(struct i915_request *request)
1127 */ 1035 */
1128 local_bh_disable(); 1036 local_bh_disable();
1129 rcu_read_lock(); /* RCU serialisation for set-wedged protection */ 1037 rcu_read_lock(); /* RCU serialisation for set-wedged protection */
1130 if (engine->schedule) 1038 if (engine->schedule) {
1131 engine->schedule(request, &request->gem_context->sched); 1039 struct i915_sched_attr attr = request->gem_context->sched;
1040
1041 /*
1042 * Boost priorities to new clients (new request flows).
1043 *
1044 * Allow interactive/synchronous clients to jump ahead of
1045 * the bulk clients. (FQ_CODEL)
1046 */
1047 if (!prev || i915_request_completed(prev))
1048 attr.priority |= I915_PRIORITY_NEWCLIENT;
1049
1050 engine->schedule(request, &attr);
1051 }
1132 rcu_read_unlock(); 1052 rcu_read_unlock();
1133 i915_sw_fence_commit(&request->submit); 1053 i915_sw_fence_commit(&request->submit);
1134 local_bh_enable(); /* Kick the execlists tasklet if just scheduled */ 1054 local_bh_enable(); /* Kick the execlists tasklet if just scheduled */
@@ -1310,6 +1230,8 @@ long i915_request_wait(struct i915_request *rq,
1310 add_wait_queue(errq, &reset); 1230 add_wait_queue(errq, &reset);
1311 1231
1312 intel_wait_init(&wait); 1232 intel_wait_init(&wait);
1233 if (flags & I915_WAIT_PRIORITY)
1234 i915_schedule_bump_priority(rq, I915_PRIORITY_WAIT);
1313 1235
1314restart: 1236restart:
1315 do { 1237 do {
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
index 7fa94b024968..90e9d170a0cd 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -277,8 +277,9 @@ long i915_request_wait(struct i915_request *rq,
277 __attribute__((nonnull(1))); 277 __attribute__((nonnull(1)));
278#define I915_WAIT_INTERRUPTIBLE BIT(0) 278#define I915_WAIT_INTERRUPTIBLE BIT(0)
279#define I915_WAIT_LOCKED BIT(1) /* struct_mutex held, handle GPU reset */ 279#define I915_WAIT_LOCKED BIT(1) /* struct_mutex held, handle GPU reset */
280#define I915_WAIT_ALL BIT(2) /* used by i915_gem_object_wait() */ 280#define I915_WAIT_PRIORITY BIT(2) /* small priority bump for the request */
281#define I915_WAIT_FOR_IDLE_BOOST BIT(3) 281#define I915_WAIT_ALL BIT(3) /* used by i915_gem_object_wait() */
282#define I915_WAIT_FOR_IDLE_BOOST BIT(4)
282 283
283static inline bool intel_engine_has_started(struct intel_engine_cs *engine, 284static inline bool intel_engine_has_started(struct intel_engine_cs *engine,
284 u32 seqno); 285 u32 seqno);
@@ -332,14 +333,6 @@ static inline bool i915_request_completed(const struct i915_request *rq)
332 return __i915_request_completed(rq, seqno); 333 return __i915_request_completed(rq, seqno);
333} 334}
334 335
335static inline bool i915_sched_node_signaled(const struct i915_sched_node *node)
336{
337 const struct i915_request *rq =
338 container_of(node, const struct i915_request, sched);
339
340 return i915_request_completed(rq);
341}
342
343void i915_retire_requests(struct drm_i915_private *i915); 336void i915_retire_requests(struct drm_i915_private *i915);
344 337
345/* 338/*
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
new file mode 100644
index 000000000000..340faea6c08a
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -0,0 +1,399 @@
1/*
2 * SPDX-License-Identifier: MIT
3 *
4 * Copyright © 2018 Intel Corporation
5 */
6
7#include <linux/mutex.h>
8
9#include "i915_drv.h"
10#include "i915_request.h"
11#include "i915_scheduler.h"
12
13static DEFINE_SPINLOCK(schedule_lock);
14
15static const struct i915_request *
16node_to_request(const struct i915_sched_node *node)
17{
18 return container_of(node, const struct i915_request, sched);
19}
20
21static inline bool node_signaled(const struct i915_sched_node *node)
22{
23 return i915_request_completed(node_to_request(node));
24}
25
26void i915_sched_node_init(struct i915_sched_node *node)
27{
28 INIT_LIST_HEAD(&node->signalers_list);
29 INIT_LIST_HEAD(&node->waiters_list);
30 INIT_LIST_HEAD(&node->link);
31 node->attr.priority = I915_PRIORITY_INVALID;
32}
33
34static struct i915_dependency *
35i915_dependency_alloc(struct drm_i915_private *i915)
36{
37 return kmem_cache_alloc(i915->dependencies, GFP_KERNEL);
38}
39
40static void
41i915_dependency_free(struct drm_i915_private *i915,
42 struct i915_dependency *dep)
43{
44 kmem_cache_free(i915->dependencies, dep);
45}
46
47bool __i915_sched_node_add_dependency(struct i915_sched_node *node,
48 struct i915_sched_node *signal,
49 struct i915_dependency *dep,
50 unsigned long flags)
51{
52 bool ret = false;
53
54 spin_lock(&schedule_lock);
55
56 if (!node_signaled(signal)) {
57 INIT_LIST_HEAD(&dep->dfs_link);
58 list_add(&dep->wait_link, &signal->waiters_list);
59 list_add(&dep->signal_link, &node->signalers_list);
60 dep->signaler = signal;
61 dep->flags = flags;
62
63 ret = true;
64 }
65
66 spin_unlock(&schedule_lock);
67
68 return ret;
69}
70
71int i915_sched_node_add_dependency(struct drm_i915_private *i915,
72 struct i915_sched_node *node,
73 struct i915_sched_node *signal)
74{
75 struct i915_dependency *dep;
76
77 dep = i915_dependency_alloc(i915);
78 if (!dep)
79 return -ENOMEM;
80
81 if (!__i915_sched_node_add_dependency(node, signal, dep,
82 I915_DEPENDENCY_ALLOC))
83 i915_dependency_free(i915, dep);
84
85 return 0;
86}
87
88void i915_sched_node_fini(struct drm_i915_private *i915,
89 struct i915_sched_node *node)
90{
91 struct i915_dependency *dep, *tmp;
92
93 GEM_BUG_ON(!list_empty(&node->link));
94
95 spin_lock(&schedule_lock);
96
97 /*
98 * Everyone we depended upon (the fences we wait to be signaled)
99 * should retire before us and remove themselves from our list.
100 * However, retirement is run independently on each timeline and
101 * so we may be called out-of-order.
102 */
103 list_for_each_entry_safe(dep, tmp, &node->signalers_list, signal_link) {
104 GEM_BUG_ON(!node_signaled(dep->signaler));
105 GEM_BUG_ON(!list_empty(&dep->dfs_link));
106
107 list_del(&dep->wait_link);
108 if (dep->flags & I915_DEPENDENCY_ALLOC)
109 i915_dependency_free(i915, dep);
110 }
111
112 /* Remove ourselves from everyone who depends upon us */
113 list_for_each_entry_safe(dep, tmp, &node->waiters_list, wait_link) {
114 GEM_BUG_ON(dep->signaler != node);
115 GEM_BUG_ON(!list_empty(&dep->dfs_link));
116
117 list_del(&dep->signal_link);
118 if (dep->flags & I915_DEPENDENCY_ALLOC)
119 i915_dependency_free(i915, dep);
120 }
121
122 spin_unlock(&schedule_lock);
123}
124
125static inline struct i915_priolist *to_priolist(struct rb_node *rb)
126{
127 return rb_entry(rb, struct i915_priolist, node);
128}
129
130static void assert_priolists(struct intel_engine_execlists * const execlists,
131 long queue_priority)
132{
133 struct rb_node *rb;
134 long last_prio, i;
135
136 if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
137 return;
138
139 GEM_BUG_ON(rb_first_cached(&execlists->queue) !=
140 rb_first(&execlists->queue.rb_root));
141
142 last_prio = (queue_priority >> I915_USER_PRIORITY_SHIFT) + 1;
143 for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) {
144 const struct i915_priolist *p = to_priolist(rb);
145
146 GEM_BUG_ON(p->priority >= last_prio);
147 last_prio = p->priority;
148
149 GEM_BUG_ON(!p->used);
150 for (i = 0; i < ARRAY_SIZE(p->requests); i++) {
151 if (list_empty(&p->requests[i]))
152 continue;
153
154 GEM_BUG_ON(!(p->used & BIT(i)));
155 }
156 }
157}
158
159struct list_head *
160i915_sched_lookup_priolist(struct intel_engine_cs *engine, int prio)
161{
162 struct intel_engine_execlists * const execlists = &engine->execlists;
163 struct i915_priolist *p;
164 struct rb_node **parent, *rb;
165 bool first = true;
166 int idx, i;
167
168 lockdep_assert_held(&engine->timeline.lock);
169 assert_priolists(execlists, INT_MAX);
170
171 /* buckets sorted from highest [in slot 0] to lowest priority */
172 idx = I915_PRIORITY_COUNT - (prio & I915_PRIORITY_MASK) - 1;
173 prio >>= I915_USER_PRIORITY_SHIFT;
174 if (unlikely(execlists->no_priolist))
175 prio = I915_PRIORITY_NORMAL;
176
177find_priolist:
178 /* most positive priority is scheduled first, equal priorities fifo */
179 rb = NULL;
180 parent = &execlists->queue.rb_root.rb_node;
181 while (*parent) {
182 rb = *parent;
183 p = to_priolist(rb);
184 if (prio > p->priority) {
185 parent = &rb->rb_left;
186 } else if (prio < p->priority) {
187 parent = &rb->rb_right;
188 first = false;
189 } else {
190 goto out;
191 }
192 }
193
194 if (prio == I915_PRIORITY_NORMAL) {
195 p = &execlists->default_priolist;
196 } else {
197 p = kmem_cache_alloc(engine->i915->priorities, GFP_ATOMIC);
198 /* Convert an allocation failure to a priority bump */
199 if (unlikely(!p)) {
200 prio = I915_PRIORITY_NORMAL; /* recurses just once */
201
202 /* To maintain ordering with all rendering, after an
203 * allocation failure we have to disable all scheduling.
204 * Requests will then be executed in fifo, and schedule
205 * will ensure that dependencies are emitted in fifo.
206 * There will be still some reordering with existing
207 * requests, so if userspace lied about their
208 * dependencies that reordering may be visible.
209 */
210 execlists->no_priolist = true;
211 goto find_priolist;
212 }
213 }
214
215 p->priority = prio;
216 for (i = 0; i < ARRAY_SIZE(p->requests); i++)
217 INIT_LIST_HEAD(&p->requests[i]);
218 rb_link_node(&p->node, rb, parent);
219 rb_insert_color_cached(&p->node, &execlists->queue, first);
220 p->used = 0;
221
222out:
223 p->used |= BIT(idx);
224 return &p->requests[idx];
225}
226
227static struct intel_engine_cs *
228sched_lock_engine(struct i915_sched_node *node, struct intel_engine_cs *locked)
229{
230 struct intel_engine_cs *engine = node_to_request(node)->engine;
231
232 GEM_BUG_ON(!locked);
233
234 if (engine != locked) {
235 spin_unlock(&locked->timeline.lock);
236 spin_lock(&engine->timeline.lock);
237 }
238
239 return engine;
240}
241
242static void __i915_schedule(struct i915_request *rq,
243 const struct i915_sched_attr *attr)
244{
245 struct list_head *uninitialized_var(pl);
246 struct intel_engine_cs *engine, *last;
247 struct i915_dependency *dep, *p;
248 struct i915_dependency stack;
249 const int prio = attr->priority;
250 LIST_HEAD(dfs);
251
252 /* Needed in order to use the temporary link inside i915_dependency */
253 lockdep_assert_held(&schedule_lock);
254 GEM_BUG_ON(prio == I915_PRIORITY_INVALID);
255
256 if (i915_request_completed(rq))
257 return;
258
259 if (prio <= READ_ONCE(rq->sched.attr.priority))
260 return;
261
262 stack.signaler = &rq->sched;
263 list_add(&stack.dfs_link, &dfs);
264
265 /*
266 * Recursively bump all dependent priorities to match the new request.
267 *
268 * A naive approach would be to use recursion:
269 * static void update_priorities(struct i915_sched_node *node, prio) {
270 * list_for_each_entry(dep, &node->signalers_list, signal_link)
271 * update_priorities(dep->signal, prio)
272 * queue_request(node);
273 * }
274 * but that may have unlimited recursion depth and so runs a very
275 * real risk of overunning the kernel stack. Instead, we build
276 * a flat list of all dependencies starting with the current request.
277 * As we walk the list of dependencies, we add all of its dependencies
278 * to the end of the list (this may include an already visited
279 * request) and continue to walk onwards onto the new dependencies. The
280 * end result is a topological list of requests in reverse order, the
281 * last element in the list is the request we must execute first.
282 */
283 list_for_each_entry(dep, &dfs, dfs_link) {
284 struct i915_sched_node *node = dep->signaler;
285
286 /*
287 * Within an engine, there can be no cycle, but we may
288 * refer to the same dependency chain multiple times
289 * (redundant dependencies are not eliminated) and across
290 * engines.
291 */
292 list_for_each_entry(p, &node->signalers_list, signal_link) {
293 GEM_BUG_ON(p == dep); /* no cycles! */
294
295 if (node_signaled(p->signaler))
296 continue;
297
298 GEM_BUG_ON(p->signaler->attr.priority < node->attr.priority);
299 if (prio > READ_ONCE(p->signaler->attr.priority))
300 list_move_tail(&p->dfs_link, &dfs);
301 }
302 }
303
304 /*
305 * If we didn't need to bump any existing priorities, and we haven't
306 * yet submitted this request (i.e. there is no potential race with
307 * execlists_submit_request()), we can set our own priority and skip
308 * acquiring the engine locks.
309 */
310 if (rq->sched.attr.priority == I915_PRIORITY_INVALID) {
311 GEM_BUG_ON(!list_empty(&rq->sched.link));
312 rq->sched.attr = *attr;
313
314 if (stack.dfs_link.next == stack.dfs_link.prev)
315 return;
316
317 __list_del_entry(&stack.dfs_link);
318 }
319
320 last = NULL;
321 engine = rq->engine;
322 spin_lock_irq(&engine->timeline.lock);
323
324 /* Fifo and depth-first replacement ensure our deps execute before us */
325 list_for_each_entry_safe_reverse(dep, p, &dfs, dfs_link) {
326 struct i915_sched_node *node = dep->signaler;
327
328 INIT_LIST_HEAD(&dep->dfs_link);
329
330 engine = sched_lock_engine(node, engine);
331
332 /* Recheck after acquiring the engine->timeline.lock */
333 if (prio <= node->attr.priority || node_signaled(node))
334 continue;
335
336 node->attr.priority = prio;
337 if (!list_empty(&node->link)) {
338 if (last != engine) {
339 pl = i915_sched_lookup_priolist(engine, prio);
340 last = engine;
341 }
342 list_move_tail(&node->link, pl);
343 } else {
344 /*
345 * If the request is not in the priolist queue because
346 * it is not yet runnable, then it doesn't contribute
347 * to our preemption decisions. On the other hand,
348 * if the request is on the HW, it too is not in the
349 * queue; but in that case we may still need to reorder
350 * the inflight requests.
351 */
352 if (!i915_sw_fence_done(&node_to_request(node)->submit))
353 continue;
354 }
355
356 if (prio <= engine->execlists.queue_priority)
357 continue;
358
359 /*
360 * If we are already the currently executing context, don't
361 * bother evaluating if we should preempt ourselves.
362 */
363 if (node_to_request(node)->global_seqno &&
364 i915_seqno_passed(port_request(engine->execlists.port)->global_seqno,
365 node_to_request(node)->global_seqno))
366 continue;
367
368 /* Defer (tasklet) submission until after all of our updates. */
369 engine->execlists.queue_priority = prio;
370 tasklet_hi_schedule(&engine->execlists.tasklet);
371 }
372
373 spin_unlock_irq(&engine->timeline.lock);
374}
375
376void i915_schedule(struct i915_request *rq, const struct i915_sched_attr *attr)
377{
378 spin_lock(&schedule_lock);
379 __i915_schedule(rq, attr);
380 spin_unlock(&schedule_lock);
381}
382
383void i915_schedule_bump_priority(struct i915_request *rq, unsigned int bump)
384{
385 struct i915_sched_attr attr;
386
387 GEM_BUG_ON(bump & ~I915_PRIORITY_MASK);
388
389 if (READ_ONCE(rq->sched.attr.priority) == I915_PRIORITY_INVALID)
390 return;
391
392 spin_lock_bh(&schedule_lock);
393
394 attr = rq->sched.attr;
395 attr.priority |= bump;
396 __i915_schedule(rq, &attr);
397
398 spin_unlock_bh(&schedule_lock);
399}
diff --git a/drivers/gpu/drm/i915/i915_scheduler.h b/drivers/gpu/drm/i915/i915_scheduler.h
index 70a42220358d..dbe9cb7ecd82 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.h
+++ b/drivers/gpu/drm/i915/i915_scheduler.h
@@ -8,9 +8,14 @@
8#define _I915_SCHEDULER_H_ 8#define _I915_SCHEDULER_H_
9 9
10#include <linux/bitops.h> 10#include <linux/bitops.h>
11#include <linux/kernel.h>
11 12
12#include <uapi/drm/i915_drm.h> 13#include <uapi/drm/i915_drm.h>
13 14
15struct drm_i915_private;
16struct i915_request;
17struct intel_engine_cs;
18
14enum { 19enum {
15 I915_PRIORITY_MIN = I915_CONTEXT_MIN_USER_PRIORITY - 1, 20 I915_PRIORITY_MIN = I915_CONTEXT_MIN_USER_PRIORITY - 1,
16 I915_PRIORITY_NORMAL = I915_CONTEXT_DEFAULT_PRIORITY, 21 I915_PRIORITY_NORMAL = I915_CONTEXT_DEFAULT_PRIORITY,
@@ -19,6 +24,15 @@ enum {
19 I915_PRIORITY_INVALID = INT_MIN 24 I915_PRIORITY_INVALID = INT_MIN
20}; 25};
21 26
27#define I915_USER_PRIORITY_SHIFT 2
28#define I915_USER_PRIORITY(x) ((x) << I915_USER_PRIORITY_SHIFT)
29
30#define I915_PRIORITY_COUNT BIT(I915_USER_PRIORITY_SHIFT)
31#define I915_PRIORITY_MASK (I915_PRIORITY_COUNT - 1)
32
33#define I915_PRIORITY_WAIT ((u8)BIT(0))
34#define I915_PRIORITY_NEWCLIENT ((u8)BIT(1))
35
22struct i915_sched_attr { 36struct i915_sched_attr {
23 /** 37 /**
24 * @priority: execution and service priority 38 * @priority: execution and service priority
@@ -69,4 +83,26 @@ struct i915_dependency {
69#define I915_DEPENDENCY_ALLOC BIT(0) 83#define I915_DEPENDENCY_ALLOC BIT(0)
70}; 84};
71 85
86void i915_sched_node_init(struct i915_sched_node *node);
87
88bool __i915_sched_node_add_dependency(struct i915_sched_node *node,
89 struct i915_sched_node *signal,
90 struct i915_dependency *dep,
91 unsigned long flags);
92
93int i915_sched_node_add_dependency(struct drm_i915_private *i915,
94 struct i915_sched_node *node,
95 struct i915_sched_node *signal);
96
97void i915_sched_node_fini(struct drm_i915_private *i915,
98 struct i915_sched_node *node);
99
100void i915_schedule(struct i915_request *request,
101 const struct i915_sched_attr *attr);
102
103void i915_schedule_bump_priority(struct i915_request *rq, unsigned int bump);
104
105struct list_head *
106i915_sched_lookup_priolist(struct intel_engine_cs *engine, int prio);
107
72#endif /* _I915_SCHEDULER_H_ */ 108#endif /* _I915_SCHEDULER_H_ */
diff --git a/drivers/gpu/drm/i915/i915_syncmap.c b/drivers/gpu/drm/i915/i915_syncmap.c
index 58f8d0cc125c..60404dbb2e9f 100644
--- a/drivers/gpu/drm/i915/i915_syncmap.c
+++ b/drivers/gpu/drm/i915/i915_syncmap.c
@@ -92,7 +92,7 @@ void i915_syncmap_init(struct i915_syncmap **root)
92{ 92{
93 BUILD_BUG_ON_NOT_POWER_OF_2(KSYNCMAP); 93 BUILD_BUG_ON_NOT_POWER_OF_2(KSYNCMAP);
94 BUILD_BUG_ON_NOT_POWER_OF_2(SHIFT); 94 BUILD_BUG_ON_NOT_POWER_OF_2(SHIFT);
95 BUILD_BUG_ON(KSYNCMAP > BITS_PER_BYTE * sizeof((*root)->bitmap)); 95 BUILD_BUG_ON(KSYNCMAP > BITS_PER_TYPE((*root)->bitmap));
96 *root = NULL; 96 *root = NULL;
97} 97}
98 98
diff --git a/drivers/gpu/drm/i915/i915_timeline.h b/drivers/gpu/drm/i915/i915_timeline.h
index a2c2c3ab5fb0..ebd71b487220 100644
--- a/drivers/gpu/drm/i915/i915_timeline.h
+++ b/drivers/gpu/drm/i915/i915_timeline.h
@@ -83,6 +83,25 @@ void i915_timeline_init(struct drm_i915_private *i915,
83 const char *name); 83 const char *name);
84void i915_timeline_fini(struct i915_timeline *tl); 84void i915_timeline_fini(struct i915_timeline *tl);
85 85
86static inline void
87i915_timeline_set_subclass(struct i915_timeline *timeline,
88 unsigned int subclass)
89{
90 lockdep_set_subclass(&timeline->lock, subclass);
91
92 /*
93 * Due to an interesting quirk in lockdep's internal debug tracking,
94 * after setting a subclass we must ensure the lock is used. Otherwise,
95 * nr_unused_locks is incremented once too often.
96 */
97#ifdef CONFIG_DEBUG_LOCK_ALLOC
98 local_irq_disable();
99 lock_map_acquire(&timeline->lock.dep_map);
100 lock_map_release(&timeline->lock.dep_map);
101 local_irq_enable();
102#endif
103}
104
86struct i915_timeline * 105struct i915_timeline *
87i915_timeline_create(struct drm_i915_private *i915, const char *name); 106i915_timeline_create(struct drm_i915_private *i915, const char *name);
88 107
diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h
index 395dd2511568..5858a43e19da 100644
--- a/drivers/gpu/drm/i915/i915_utils.h
+++ b/drivers/gpu/drm/i915/i915_utils.h
@@ -68,7 +68,7 @@
68 68
69/* Note we don't consider signbits :| */ 69/* Note we don't consider signbits :| */
70#define overflows_type(x, T) \ 70#define overflows_type(x, T) \
71 (sizeof(x) > sizeof(T) && (x) >> (sizeof(T) * BITS_PER_BYTE)) 71 (sizeof(x) > sizeof(T) && (x) >> BITS_PER_TYPE(T))
72 72
73#define ptr_mask_bits(ptr, n) ({ \ 73#define ptr_mask_bits(ptr, n) ({ \
74 unsigned long __v = (unsigned long)(ptr); \ 74 unsigned long __v = (unsigned long)(ptr); \
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index 35fce4c88629..5b4d78cdb4ca 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -305,12 +305,12 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
305 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); 305 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
306 GEM_BUG_ON(vma->size > vma->node.size); 306 GEM_BUG_ON(vma->size > vma->node.size);
307 307
308 if (GEM_WARN_ON(range_overflows(vma->node.start, 308 if (GEM_DEBUG_WARN_ON(range_overflows(vma->node.start,
309 vma->node.size, 309 vma->node.size,
310 vma->vm->total))) 310 vma->vm->total)))
311 return -ENODEV; 311 return -ENODEV;
312 312
313 if (GEM_WARN_ON(!flags)) 313 if (GEM_DEBUG_WARN_ON(!flags))
314 return -EINVAL; 314 return -EINVAL;
315 315
316 bind_flags = 0; 316 bind_flags = 0;
diff --git a/drivers/gpu/drm/i915/icl_dsi.c b/drivers/gpu/drm/i915/icl_dsi.c
index 13830e43a4d1..01f422df8c23 100644
--- a/drivers/gpu/drm/i915/icl_dsi.c
+++ b/drivers/gpu/drm/i915/icl_dsi.c
@@ -25,8 +25,153 @@
25 * Jani Nikula <jani.nikula@intel.com> 25 * Jani Nikula <jani.nikula@intel.com>
26 */ 26 */
27 27
28#include <drm/drm_mipi_dsi.h>
28#include "intel_dsi.h" 29#include "intel_dsi.h"
29 30
31static inline int header_credits_available(struct drm_i915_private *dev_priv,
32 enum transcoder dsi_trans)
33{
34 return (I915_READ(DSI_CMD_TXCTL(dsi_trans)) & FREE_HEADER_CREDIT_MASK)
35 >> FREE_HEADER_CREDIT_SHIFT;
36}
37
38static inline int payload_credits_available(struct drm_i915_private *dev_priv,
39 enum transcoder dsi_trans)
40{
41 return (I915_READ(DSI_CMD_TXCTL(dsi_trans)) & FREE_PLOAD_CREDIT_MASK)
42 >> FREE_PLOAD_CREDIT_SHIFT;
43}
44
45static void wait_for_header_credits(struct drm_i915_private *dev_priv,
46 enum transcoder dsi_trans)
47{
48 if (wait_for_us(header_credits_available(dev_priv, dsi_trans) >=
49 MAX_HEADER_CREDIT, 100))
50 DRM_ERROR("DSI header credits not released\n");
51}
52
53static void wait_for_payload_credits(struct drm_i915_private *dev_priv,
54 enum transcoder dsi_trans)
55{
56 if (wait_for_us(payload_credits_available(dev_priv, dsi_trans) >=
57 MAX_PLOAD_CREDIT, 100))
58 DRM_ERROR("DSI payload credits not released\n");
59}
60
61static enum transcoder dsi_port_to_transcoder(enum port port)
62{
63 if (port == PORT_A)
64 return TRANSCODER_DSI_0;
65 else
66 return TRANSCODER_DSI_1;
67}
68
69static void wait_for_cmds_dispatched_to_panel(struct intel_encoder *encoder)
70{
71 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
72 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
73 struct mipi_dsi_device *dsi;
74 enum port port;
75 enum transcoder dsi_trans;
76 int ret;
77
78 /* wait for header/payload credits to be released */
79 for_each_dsi_port(port, intel_dsi->ports) {
80 dsi_trans = dsi_port_to_transcoder(port);
81 wait_for_header_credits(dev_priv, dsi_trans);
82 wait_for_payload_credits(dev_priv, dsi_trans);
83 }
84
85 /* send nop DCS command */
86 for_each_dsi_port(port, intel_dsi->ports) {
87 dsi = intel_dsi->dsi_hosts[port]->device;
88 dsi->mode_flags |= MIPI_DSI_MODE_LPM;
89 dsi->channel = 0;
90 ret = mipi_dsi_dcs_nop(dsi);
91 if (ret < 0)
92 DRM_ERROR("error sending DCS NOP command\n");
93 }
94
95 /* wait for header credits to be released */
96 for_each_dsi_port(port, intel_dsi->ports) {
97 dsi_trans = dsi_port_to_transcoder(port);
98 wait_for_header_credits(dev_priv, dsi_trans);
99 }
100
101 /* wait for LP TX in progress bit to be cleared */
102 for_each_dsi_port(port, intel_dsi->ports) {
103 dsi_trans = dsi_port_to_transcoder(port);
104 if (wait_for_us(!(I915_READ(DSI_LP_MSG(dsi_trans)) &
105 LPTX_IN_PROGRESS), 20))
106 DRM_ERROR("LPTX bit not cleared\n");
107 }
108}
109
110static void dsi_program_swing_and_deemphasis(struct intel_encoder *encoder)
111{
112 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
113 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
114 enum port port;
115 u32 tmp;
116 int lane;
117
118 for_each_dsi_port(port, intel_dsi->ports) {
119
120 /*
121 * Program voltage swing and pre-emphasis level values as per
122 * table in BSPEC under DDI buffer programing
123 */
124 tmp = I915_READ(ICL_PORT_TX_DW5_LN0(port));
125 tmp &= ~(SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK);
126 tmp |= SCALING_MODE_SEL(0x2);
127 tmp |= TAP2_DISABLE | TAP3_DISABLE;
128 tmp |= RTERM_SELECT(0x6);
129 I915_WRITE(ICL_PORT_TX_DW5_GRP(port), tmp);
130
131 tmp = I915_READ(ICL_PORT_TX_DW5_AUX(port));
132 tmp &= ~(SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK);
133 tmp |= SCALING_MODE_SEL(0x2);
134 tmp |= TAP2_DISABLE | TAP3_DISABLE;
135 tmp |= RTERM_SELECT(0x6);
136 I915_WRITE(ICL_PORT_TX_DW5_AUX(port), tmp);
137
138 tmp = I915_READ(ICL_PORT_TX_DW2_LN0(port));
139 tmp &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK |
140 RCOMP_SCALAR_MASK);
141 tmp |= SWING_SEL_UPPER(0x2);
142 tmp |= SWING_SEL_LOWER(0x2);
143 tmp |= RCOMP_SCALAR(0x98);
144 I915_WRITE(ICL_PORT_TX_DW2_GRP(port), tmp);
145
146 tmp = I915_READ(ICL_PORT_TX_DW2_AUX(port));
147 tmp &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK |
148 RCOMP_SCALAR_MASK);
149 tmp |= SWING_SEL_UPPER(0x2);
150 tmp |= SWING_SEL_LOWER(0x2);
151 tmp |= RCOMP_SCALAR(0x98);
152 I915_WRITE(ICL_PORT_TX_DW2_AUX(port), tmp);
153
154 tmp = I915_READ(ICL_PORT_TX_DW4_AUX(port));
155 tmp &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK |
156 CURSOR_COEFF_MASK);
157 tmp |= POST_CURSOR_1(0x0);
158 tmp |= POST_CURSOR_2(0x0);
159 tmp |= CURSOR_COEFF(0x3f);
160 I915_WRITE(ICL_PORT_TX_DW4_AUX(port), tmp);
161
162 for (lane = 0; lane <= 3; lane++) {
163 /* Bspec: must not use GRP register for write */
164 tmp = I915_READ(ICL_PORT_TX_DW4_LN(port, lane));
165 tmp &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK |
166 CURSOR_COEFF_MASK);
167 tmp |= POST_CURSOR_1(0x0);
168 tmp |= POST_CURSOR_2(0x0);
169 tmp |= CURSOR_COEFF(0x3f);
170 I915_WRITE(ICL_PORT_TX_DW4_LN(port, lane), tmp);
171 }
172 }
173}
174
30static void gen11_dsi_program_esc_clk_div(struct intel_encoder *encoder) 175static void gen11_dsi_program_esc_clk_div(struct intel_encoder *encoder)
31{ 176{
32 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 177 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
@@ -105,10 +250,553 @@ static void gen11_dsi_power_up_lanes(struct intel_encoder *encoder)
105 } 250 }
106} 251}
107 252
108static void gen11_dsi_enable_port_and_phy(struct intel_encoder *encoder) 253static void gen11_dsi_config_phy_lanes_sequence(struct intel_encoder *encoder)
254{
255 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
256 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
257 enum port port;
258 u32 tmp;
259 int lane;
260
261 /* Step 4b(i) set loadgen select for transmit and aux lanes */
262 for_each_dsi_port(port, intel_dsi->ports) {
263 tmp = I915_READ(ICL_PORT_TX_DW4_AUX(port));
264 tmp &= ~LOADGEN_SELECT;
265 I915_WRITE(ICL_PORT_TX_DW4_AUX(port), tmp);
266 for (lane = 0; lane <= 3; lane++) {
267 tmp = I915_READ(ICL_PORT_TX_DW4_LN(port, lane));
268 tmp &= ~LOADGEN_SELECT;
269 if (lane != 2)
270 tmp |= LOADGEN_SELECT;
271 I915_WRITE(ICL_PORT_TX_DW4_LN(port, lane), tmp);
272 }
273 }
274
275 /* Step 4b(ii) set latency optimization for transmit and aux lanes */
276 for_each_dsi_port(port, intel_dsi->ports) {
277 tmp = I915_READ(ICL_PORT_TX_DW2_AUX(port));
278 tmp &= ~FRC_LATENCY_OPTIM_MASK;
279 tmp |= FRC_LATENCY_OPTIM_VAL(0x5);
280 I915_WRITE(ICL_PORT_TX_DW2_AUX(port), tmp);
281 tmp = I915_READ(ICL_PORT_TX_DW2_LN0(port));
282 tmp &= ~FRC_LATENCY_OPTIM_MASK;
283 tmp |= FRC_LATENCY_OPTIM_VAL(0x5);
284 I915_WRITE(ICL_PORT_TX_DW2_GRP(port), tmp);
285 }
286
287}
288
289static void gen11_dsi_voltage_swing_program_seq(struct intel_encoder *encoder)
290{
291 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
292 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
293 u32 tmp;
294 enum port port;
295
296 /* clear common keeper enable bit */
297 for_each_dsi_port(port, intel_dsi->ports) {
298 tmp = I915_READ(ICL_PORT_PCS_DW1_LN0(port));
299 tmp &= ~COMMON_KEEPER_EN;
300 I915_WRITE(ICL_PORT_PCS_DW1_GRP(port), tmp);
301 tmp = I915_READ(ICL_PORT_PCS_DW1_AUX(port));
302 tmp &= ~COMMON_KEEPER_EN;
303 I915_WRITE(ICL_PORT_PCS_DW1_AUX(port), tmp);
304 }
305
306 /*
307 * Set SUS Clock Config bitfield to 11b
308 * Note: loadgen select program is done
309 * as part of lane phy sequence configuration
310 */
311 for_each_dsi_port(port, intel_dsi->ports) {
312 tmp = I915_READ(ICL_PORT_CL_DW5(port));
313 tmp |= SUS_CLOCK_CONFIG;
314 I915_WRITE(ICL_PORT_CL_DW5(port), tmp);
315 }
316
317 /* Clear training enable to change swing values */
318 for_each_dsi_port(port, intel_dsi->ports) {
319 tmp = I915_READ(ICL_PORT_TX_DW5_LN0(port));
320 tmp &= ~TX_TRAINING_EN;
321 I915_WRITE(ICL_PORT_TX_DW5_GRP(port), tmp);
322 tmp = I915_READ(ICL_PORT_TX_DW5_AUX(port));
323 tmp &= ~TX_TRAINING_EN;
324 I915_WRITE(ICL_PORT_TX_DW5_AUX(port), tmp);
325 }
326
327 /* Program swing and de-emphasis */
328 dsi_program_swing_and_deemphasis(encoder);
329
330 /* Set training enable to trigger update */
331 for_each_dsi_port(port, intel_dsi->ports) {
332 tmp = I915_READ(ICL_PORT_TX_DW5_LN0(port));
333 tmp |= TX_TRAINING_EN;
334 I915_WRITE(ICL_PORT_TX_DW5_GRP(port), tmp);
335 tmp = I915_READ(ICL_PORT_TX_DW5_AUX(port));
336 tmp |= TX_TRAINING_EN;
337 I915_WRITE(ICL_PORT_TX_DW5_AUX(port), tmp);
338 }
339}
340
341static void gen11_dsi_enable_ddi_buffer(struct intel_encoder *encoder)
342{
343 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
344 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
345 u32 tmp;
346 enum port port;
347
348 for_each_dsi_port(port, intel_dsi->ports) {
349 tmp = I915_READ(DDI_BUF_CTL(port));
350 tmp |= DDI_BUF_CTL_ENABLE;
351 I915_WRITE(DDI_BUF_CTL(port), tmp);
352
353 if (wait_for_us(!(I915_READ(DDI_BUF_CTL(port)) &
354 DDI_BUF_IS_IDLE),
355 500))
356 DRM_ERROR("DDI port:%c buffer idle\n", port_name(port));
357 }
358}
359
360static void gen11_dsi_setup_dphy_timings(struct intel_encoder *encoder)
361{
362 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
363 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
364 u32 tmp;
365 enum port port;
366
367 /* Program T-INIT master registers */
368 for_each_dsi_port(port, intel_dsi->ports) {
369 tmp = I915_READ(ICL_DSI_T_INIT_MASTER(port));
370 tmp &= ~MASTER_INIT_TIMER_MASK;
371 tmp |= intel_dsi->init_count;
372 I915_WRITE(ICL_DSI_T_INIT_MASTER(port), tmp);
373 }
374
375 /* Program DPHY clock lanes timings */
376 for_each_dsi_port(port, intel_dsi->ports) {
377 I915_WRITE(DPHY_CLK_TIMING_PARAM(port), intel_dsi->dphy_reg);
378
379 /* shadow register inside display core */
380 I915_WRITE(DSI_CLK_TIMING_PARAM(port), intel_dsi->dphy_reg);
381 }
382
383 /* Program DPHY data lanes timings */
384 for_each_dsi_port(port, intel_dsi->ports) {
385 I915_WRITE(DPHY_DATA_TIMING_PARAM(port),
386 intel_dsi->dphy_data_lane_reg);
387
388 /* shadow register inside display core */
389 I915_WRITE(DSI_DATA_TIMING_PARAM(port),
390 intel_dsi->dphy_data_lane_reg);
391 }
392
393 /*
394 * If DSI link operating at or below an 800 MHz,
395 * TA_SURE should be override and programmed to
396 * a value '0' inside TA_PARAM_REGISTERS otherwise
397 * leave all fields at HW default values.
398 */
399 if (intel_dsi_bitrate(intel_dsi) <= 800000) {
400 for_each_dsi_port(port, intel_dsi->ports) {
401 tmp = I915_READ(DPHY_TA_TIMING_PARAM(port));
402 tmp &= ~TA_SURE_MASK;
403 tmp |= TA_SURE_OVERRIDE | TA_SURE(0);
404 I915_WRITE(DPHY_TA_TIMING_PARAM(port), tmp);
405
406 /* shadow register inside display core */
407 tmp = I915_READ(DSI_TA_TIMING_PARAM(port));
408 tmp &= ~TA_SURE_MASK;
409 tmp |= TA_SURE_OVERRIDE | TA_SURE(0);
410 I915_WRITE(DSI_TA_TIMING_PARAM(port), tmp);
411 }
412 }
413}
414
415static void
416gen11_dsi_configure_transcoder(struct intel_encoder *encoder,
417 const struct intel_crtc_state *pipe_config)
418{
419 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
420 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
421 struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
422 enum pipe pipe = intel_crtc->pipe;
423 u32 tmp;
424 enum port port;
425 enum transcoder dsi_trans;
426
427 for_each_dsi_port(port, intel_dsi->ports) {
428 dsi_trans = dsi_port_to_transcoder(port);
429 tmp = I915_READ(DSI_TRANS_FUNC_CONF(dsi_trans));
430
431 if (intel_dsi->eotp_pkt)
432 tmp &= ~EOTP_DISABLED;
433 else
434 tmp |= EOTP_DISABLED;
435
436 /* enable link calibration if freq > 1.5Gbps */
437 if (intel_dsi_bitrate(intel_dsi) >= 1500 * 1000) {
438 tmp &= ~LINK_CALIBRATION_MASK;
439 tmp |= CALIBRATION_ENABLED_INITIAL_ONLY;
440 }
441
442 /* configure continuous clock */
443 tmp &= ~CONTINUOUS_CLK_MASK;
444 if (intel_dsi->clock_stop)
445 tmp |= CLK_ENTER_LP_AFTER_DATA;
446 else
447 tmp |= CLK_HS_CONTINUOUS;
448
449 /* configure buffer threshold limit to minimum */
450 tmp &= ~PIX_BUF_THRESHOLD_MASK;
451 tmp |= PIX_BUF_THRESHOLD_1_4;
452
453 /* set virtual channel to '0' */
454 tmp &= ~PIX_VIRT_CHAN_MASK;
455 tmp |= PIX_VIRT_CHAN(0);
456
457 /* program BGR transmission */
458 if (intel_dsi->bgr_enabled)
459 tmp |= BGR_TRANSMISSION;
460
461 /* select pixel format */
462 tmp &= ~PIX_FMT_MASK;
463 switch (intel_dsi->pixel_format) {
464 default:
465 MISSING_CASE(intel_dsi->pixel_format);
466 /* fallthrough */
467 case MIPI_DSI_FMT_RGB565:
468 tmp |= PIX_FMT_RGB565;
469 break;
470 case MIPI_DSI_FMT_RGB666_PACKED:
471 tmp |= PIX_FMT_RGB666_PACKED;
472 break;
473 case MIPI_DSI_FMT_RGB666:
474 tmp |= PIX_FMT_RGB666_LOOSE;
475 break;
476 case MIPI_DSI_FMT_RGB888:
477 tmp |= PIX_FMT_RGB888;
478 break;
479 }
480
481 /* program DSI operation mode */
482 if (is_vid_mode(intel_dsi)) {
483 tmp &= ~OP_MODE_MASK;
484 switch (intel_dsi->video_mode_format) {
485 default:
486 MISSING_CASE(intel_dsi->video_mode_format);
487 /* fallthrough */
488 case VIDEO_MODE_NON_BURST_WITH_SYNC_EVENTS:
489 tmp |= VIDEO_MODE_SYNC_EVENT;
490 break;
491 case VIDEO_MODE_NON_BURST_WITH_SYNC_PULSE:
492 tmp |= VIDEO_MODE_SYNC_PULSE;
493 break;
494 }
495 }
496
497 I915_WRITE(DSI_TRANS_FUNC_CONF(dsi_trans), tmp);
498 }
499
500 /* enable port sync mode if dual link */
501 if (intel_dsi->dual_link) {
502 for_each_dsi_port(port, intel_dsi->ports) {
503 dsi_trans = dsi_port_to_transcoder(port);
504 tmp = I915_READ(TRANS_DDI_FUNC_CTL2(dsi_trans));
505 tmp |= PORT_SYNC_MODE_ENABLE;
506 I915_WRITE(TRANS_DDI_FUNC_CTL2(dsi_trans), tmp);
507 }
508
509 //TODO: configure DSS_CTL1
510 }
511
512 for_each_dsi_port(port, intel_dsi->ports) {
513 dsi_trans = dsi_port_to_transcoder(port);
514
515 /* select data lane width */
516 tmp = I915_READ(TRANS_DDI_FUNC_CTL(dsi_trans));
517 tmp &= ~DDI_PORT_WIDTH_MASK;
518 tmp |= DDI_PORT_WIDTH(intel_dsi->lane_count);
519
520 /* select input pipe */
521 tmp &= ~TRANS_DDI_EDP_INPUT_MASK;
522 switch (pipe) {
523 default:
524 MISSING_CASE(pipe);
525 /* fallthrough */
526 case PIPE_A:
527 tmp |= TRANS_DDI_EDP_INPUT_A_ON;
528 break;
529 case PIPE_B:
530 tmp |= TRANS_DDI_EDP_INPUT_B_ONOFF;
531 break;
532 case PIPE_C:
533 tmp |= TRANS_DDI_EDP_INPUT_C_ONOFF;
534 break;
535 }
536
537 /* enable DDI buffer */
538 tmp |= TRANS_DDI_FUNC_ENABLE;
539 I915_WRITE(TRANS_DDI_FUNC_CTL(dsi_trans), tmp);
540 }
541
542 /* wait for link ready */
543 for_each_dsi_port(port, intel_dsi->ports) {
544 dsi_trans = dsi_port_to_transcoder(port);
545 if (wait_for_us((I915_READ(DSI_TRANS_FUNC_CONF(dsi_trans)) &
546 LINK_READY), 2500))
547 DRM_ERROR("DSI link not ready\n");
548 }
549}
550
551static void
552gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder,
553 const struct intel_crtc_state *pipe_config)
554{
555 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
556 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
557 const struct drm_display_mode *adjusted_mode =
558 &pipe_config->base.adjusted_mode;
559 enum port port;
560 enum transcoder dsi_trans;
561 /* horizontal timings */
562 u16 htotal, hactive, hsync_start, hsync_end, hsync_size;
563 u16 hfront_porch, hback_porch;
564 /* vertical timings */
565 u16 vtotal, vactive, vsync_start, vsync_end, vsync_shift;
566
567 hactive = adjusted_mode->crtc_hdisplay;
568 htotal = adjusted_mode->crtc_htotal;
569 hsync_start = adjusted_mode->crtc_hsync_start;
570 hsync_end = adjusted_mode->crtc_hsync_end;
571 hsync_size = hsync_end - hsync_start;
572 hfront_porch = (adjusted_mode->crtc_hsync_start -
573 adjusted_mode->crtc_hdisplay);
574 hback_porch = (adjusted_mode->crtc_htotal -
575 adjusted_mode->crtc_hsync_end);
576 vactive = adjusted_mode->crtc_vdisplay;
577 vtotal = adjusted_mode->crtc_vtotal;
578 vsync_start = adjusted_mode->crtc_vsync_start;
579 vsync_end = adjusted_mode->crtc_vsync_end;
580 vsync_shift = hsync_start - htotal / 2;
581
582 if (intel_dsi->dual_link) {
583 hactive /= 2;
584 if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK)
585 hactive += intel_dsi->pixel_overlap;
586 htotal /= 2;
587 }
588
589 /* minimum hactive as per bspec: 256 pixels */
590 if (adjusted_mode->crtc_hdisplay < 256)
591 DRM_ERROR("hactive is less then 256 pixels\n");
592
593 /* if RGB666 format, then hactive must be multiple of 4 pixels */
594 if (intel_dsi->pixel_format == MIPI_DSI_FMT_RGB666 && hactive % 4 != 0)
595 DRM_ERROR("hactive pixels are not multiple of 4\n");
596
597 /* program TRANS_HTOTAL register */
598 for_each_dsi_port(port, intel_dsi->ports) {
599 dsi_trans = dsi_port_to_transcoder(port);
600 I915_WRITE(HTOTAL(dsi_trans),
601 (hactive - 1) | ((htotal - 1) << 16));
602 }
603
604 /* TRANS_HSYNC register to be programmed only for video mode */
605 if (intel_dsi->operation_mode == INTEL_DSI_VIDEO_MODE) {
606 if (intel_dsi->video_mode_format ==
607 VIDEO_MODE_NON_BURST_WITH_SYNC_PULSE) {
608 /* BSPEC: hsync size should be atleast 16 pixels */
609 if (hsync_size < 16)
610 DRM_ERROR("hsync size < 16 pixels\n");
611 }
612
613 if (hback_porch < 16)
614 DRM_ERROR("hback porch < 16 pixels\n");
615
616 if (intel_dsi->dual_link) {
617 hsync_start /= 2;
618 hsync_end /= 2;
619 }
620
621 for_each_dsi_port(port, intel_dsi->ports) {
622 dsi_trans = dsi_port_to_transcoder(port);
623 I915_WRITE(HSYNC(dsi_trans),
624 (hsync_start - 1) | ((hsync_end - 1) << 16));
625 }
626 }
627
628 /* program TRANS_VTOTAL register */
629 for_each_dsi_port(port, intel_dsi->ports) {
630 dsi_trans = dsi_port_to_transcoder(port);
631 /*
632 * FIXME: Programing this by assuming progressive mode, since
633 * non-interlaced info from VBT is not saved inside
634 * struct drm_display_mode.
635 * For interlace mode: program required pixel minus 2
636 */
637 I915_WRITE(VTOTAL(dsi_trans),
638 (vactive - 1) | ((vtotal - 1) << 16));
639 }
640
641 if (vsync_end < vsync_start || vsync_end > vtotal)
642 DRM_ERROR("Invalid vsync_end value\n");
643
644 if (vsync_start < vactive)
645 DRM_ERROR("vsync_start less than vactive\n");
646
647 /* program TRANS_VSYNC register */
648 for_each_dsi_port(port, intel_dsi->ports) {
649 dsi_trans = dsi_port_to_transcoder(port);
650 I915_WRITE(VSYNC(dsi_trans),
651 (vsync_start - 1) | ((vsync_end - 1) << 16));
652 }
653
654 /*
655 * FIXME: It has to be programmed only for interlaced
656 * modes. Put the check condition here once interlaced
657 * info available as described above.
658 * program TRANS_VSYNCSHIFT register
659 */
660 for_each_dsi_port(port, intel_dsi->ports) {
661 dsi_trans = dsi_port_to_transcoder(port);
662 I915_WRITE(VSYNCSHIFT(dsi_trans), vsync_shift);
663 }
664}
665
666static void gen11_dsi_enable_transcoder(struct intel_encoder *encoder)
667{
668 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
669 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
670 enum port port;
671 enum transcoder dsi_trans;
672 u32 tmp;
673
674 for_each_dsi_port(port, intel_dsi->ports) {
675 dsi_trans = dsi_port_to_transcoder(port);
676 tmp = I915_READ(PIPECONF(dsi_trans));
677 tmp |= PIPECONF_ENABLE;
678 I915_WRITE(PIPECONF(dsi_trans), tmp);
679
680 /* wait for transcoder to be enabled */
681 if (intel_wait_for_register(dev_priv, PIPECONF(dsi_trans),
682 I965_PIPECONF_ACTIVE,
683 I965_PIPECONF_ACTIVE, 10))
684 DRM_ERROR("DSI transcoder not enabled\n");
685 }
686}
687
688static void gen11_dsi_setup_timeouts(struct intel_encoder *encoder)
689{
690 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
691 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
692 enum port port;
693 enum transcoder dsi_trans;
694 u32 tmp, hs_tx_timeout, lp_rx_timeout, ta_timeout, divisor, mul;
695
696 /*
697 * escape clock count calculation:
698 * BYTE_CLK_COUNT = TIME_NS/(8 * UI)
699 * UI (nsec) = (10^6)/Bitrate
700 * TIME_NS = (BYTE_CLK_COUNT * 8 * 10^6)/ Bitrate
701 * ESCAPE_CLK_COUNT = TIME_NS/ESC_CLK_NS
702 */
703 divisor = intel_dsi_tlpx_ns(intel_dsi) * intel_dsi_bitrate(intel_dsi) * 1000;
704 mul = 8 * 1000000;
705 hs_tx_timeout = DIV_ROUND_UP(intel_dsi->hs_tx_timeout * mul,
706 divisor);
707 lp_rx_timeout = DIV_ROUND_UP(intel_dsi->lp_rx_timeout * mul, divisor);
708 ta_timeout = DIV_ROUND_UP(intel_dsi->turn_arnd_val * mul, divisor);
709
710 for_each_dsi_port(port, intel_dsi->ports) {
711 dsi_trans = dsi_port_to_transcoder(port);
712
713 /* program hst_tx_timeout */
714 tmp = I915_READ(DSI_HSTX_TO(dsi_trans));
715 tmp &= ~HSTX_TIMEOUT_VALUE_MASK;
716 tmp |= HSTX_TIMEOUT_VALUE(hs_tx_timeout);
717 I915_WRITE(DSI_HSTX_TO(dsi_trans), tmp);
718
719 /* FIXME: DSI_CALIB_TO */
720
721 /* program lp_rx_host timeout */
722 tmp = I915_READ(DSI_LPRX_HOST_TO(dsi_trans));
723 tmp &= ~LPRX_TIMEOUT_VALUE_MASK;
724 tmp |= LPRX_TIMEOUT_VALUE(lp_rx_timeout);
725 I915_WRITE(DSI_LPRX_HOST_TO(dsi_trans), tmp);
726
727 /* FIXME: DSI_PWAIT_TO */
728
729 /* program turn around timeout */
730 tmp = I915_READ(DSI_TA_TO(dsi_trans));
731 tmp &= ~TA_TIMEOUT_VALUE_MASK;
732 tmp |= TA_TIMEOUT_VALUE(ta_timeout);
733 I915_WRITE(DSI_TA_TO(dsi_trans), tmp);
734 }
735}
736
737static void
738gen11_dsi_enable_port_and_phy(struct intel_encoder *encoder,
739 const struct intel_crtc_state *pipe_config)
109{ 740{
110 /* step 4a: power up all lanes of the DDI used by DSI */ 741 /* step 4a: power up all lanes of the DDI used by DSI */
111 gen11_dsi_power_up_lanes(encoder); 742 gen11_dsi_power_up_lanes(encoder);
743
744 /* step 4b: configure lane sequencing of the Combo-PHY transmitters */
745 gen11_dsi_config_phy_lanes_sequence(encoder);
746
747 /* step 4c: configure voltage swing and skew */
748 gen11_dsi_voltage_swing_program_seq(encoder);
749
750 /* enable DDI buffer */
751 gen11_dsi_enable_ddi_buffer(encoder);
752
753 /* setup D-PHY timings */
754 gen11_dsi_setup_dphy_timings(encoder);
755
756 /* step 4h: setup DSI protocol timeouts */
757 gen11_dsi_setup_timeouts(encoder);
758
759 /* Step (4h, 4i, 4j, 4k): Configure transcoder */
760 gen11_dsi_configure_transcoder(encoder, pipe_config);
761}
762
763static void gen11_dsi_powerup_panel(struct intel_encoder *encoder)
764{
765 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
766 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
767 struct mipi_dsi_device *dsi;
768 enum port port;
769 enum transcoder dsi_trans;
770 u32 tmp;
771 int ret;
772
773 /* set maximum return packet size */
774 for_each_dsi_port(port, intel_dsi->ports) {
775 dsi_trans = dsi_port_to_transcoder(port);
776
777 /*
778 * FIXME: This uses the number of DW's currently in the payload
779 * receive queue. This is probably not what we want here.
780 */
781 tmp = I915_READ(DSI_CMD_RXCTL(dsi_trans));
782 tmp &= NUMBER_RX_PLOAD_DW_MASK;
783 /* multiply "Number Rx Payload DW" by 4 to get max value */
784 tmp = tmp * 4;
785 dsi = intel_dsi->dsi_hosts[port]->device;
786 ret = mipi_dsi_set_maximum_return_packet_size(dsi, tmp);
787 if (ret < 0)
788 DRM_ERROR("error setting max return pkt size%d\n", tmp);
789 }
790
791 /* panel power on related mipi dsi vbt sequences */
792 intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_POWER_ON);
793 intel_dsi_msleep(intel_dsi, intel_dsi->panel_on_delay);
794 intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DEASSERT_RESET);
795 intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_INIT_OTP);
796 intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DISPLAY_ON);
797
798 /* ensure all panel commands dispatched before enabling transcoder */
799 wait_for_cmds_dispatched_to_panel(encoder);
112} 800}
113 801
114static void __attribute__((unused)) 802static void __attribute__((unused))
@@ -116,6 +804,8 @@ gen11_dsi_pre_enable(struct intel_encoder *encoder,
116 const struct intel_crtc_state *pipe_config, 804 const struct intel_crtc_state *pipe_config,
117 const struct drm_connector_state *conn_state) 805 const struct drm_connector_state *conn_state)
118{ 806{
807 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
808
119 /* step2: enable IO power */ 809 /* step2: enable IO power */
120 gen11_dsi_enable_io_power(encoder); 810 gen11_dsi_enable_io_power(encoder);
121 811
@@ -123,5 +813,169 @@ gen11_dsi_pre_enable(struct intel_encoder *encoder,
123 gen11_dsi_program_esc_clk_div(encoder); 813 gen11_dsi_program_esc_clk_div(encoder);
124 814
125 /* step4: enable DSI port and DPHY */ 815 /* step4: enable DSI port and DPHY */
126 gen11_dsi_enable_port_and_phy(encoder); 816 gen11_dsi_enable_port_and_phy(encoder, pipe_config);
817
818 /* step5: program and powerup panel */
819 gen11_dsi_powerup_panel(encoder);
820
821 /* step6c: configure transcoder timings */
822 gen11_dsi_set_transcoder_timings(encoder, pipe_config);
823
824 /* step6d: enable dsi transcoder */
825 gen11_dsi_enable_transcoder(encoder);
826
827 /* step7: enable backlight */
828 intel_panel_enable_backlight(pipe_config, conn_state);
829 intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_ON);
830}
831
832static void gen11_dsi_disable_transcoder(struct intel_encoder *encoder)
833{
834 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
835 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
836 enum port port;
837 enum transcoder dsi_trans;
838 u32 tmp;
839
840 for_each_dsi_port(port, intel_dsi->ports) {
841 dsi_trans = dsi_port_to_transcoder(port);
842
843 /* disable transcoder */
844 tmp = I915_READ(PIPECONF(dsi_trans));
845 tmp &= ~PIPECONF_ENABLE;
846 I915_WRITE(PIPECONF(dsi_trans), tmp);
847
848 /* wait for transcoder to be disabled */
849 if (intel_wait_for_register(dev_priv, PIPECONF(dsi_trans),
850 I965_PIPECONF_ACTIVE, 0, 50))
851 DRM_ERROR("DSI trancoder not disabled\n");
852 }
853}
854
855static void gen11_dsi_powerdown_panel(struct intel_encoder *encoder)
856{
857 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
858
859 intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DISPLAY_OFF);
860 intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_ASSERT_RESET);
861 intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_POWER_OFF);
862
863 /* ensure cmds dispatched to panel */
864 wait_for_cmds_dispatched_to_panel(encoder);
865}
866
867static void gen11_dsi_deconfigure_trancoder(struct intel_encoder *encoder)
868{
869 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
870 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
871 enum port port;
872 enum transcoder dsi_trans;
873 u32 tmp;
874
875 /* put dsi link in ULPS */
876 for_each_dsi_port(port, intel_dsi->ports) {
877 dsi_trans = dsi_port_to_transcoder(port);
878 tmp = I915_READ(DSI_LP_MSG(dsi_trans));
879 tmp |= LINK_ENTER_ULPS;
880 tmp &= ~LINK_ULPS_TYPE_LP11;
881 I915_WRITE(DSI_LP_MSG(dsi_trans), tmp);
882
883 if (wait_for_us((I915_READ(DSI_LP_MSG(dsi_trans)) &
884 LINK_IN_ULPS),
885 10))
886 DRM_ERROR("DSI link not in ULPS\n");
887 }
888
889 /* disable ddi function */
890 for_each_dsi_port(port, intel_dsi->ports) {
891 dsi_trans = dsi_port_to_transcoder(port);
892 tmp = I915_READ(TRANS_DDI_FUNC_CTL(dsi_trans));
893 tmp &= ~TRANS_DDI_FUNC_ENABLE;
894 I915_WRITE(TRANS_DDI_FUNC_CTL(dsi_trans), tmp);
895 }
896
897 /* disable port sync mode if dual link */
898 if (intel_dsi->dual_link) {
899 for_each_dsi_port(port, intel_dsi->ports) {
900 dsi_trans = dsi_port_to_transcoder(port);
901 tmp = I915_READ(TRANS_DDI_FUNC_CTL2(dsi_trans));
902 tmp &= ~PORT_SYNC_MODE_ENABLE;
903 I915_WRITE(TRANS_DDI_FUNC_CTL2(dsi_trans), tmp);
904 }
905 }
906}
907
908static void gen11_dsi_disable_port(struct intel_encoder *encoder)
909{
910 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
911 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
912 u32 tmp;
913 enum port port;
914
915 for_each_dsi_port(port, intel_dsi->ports) {
916 tmp = I915_READ(DDI_BUF_CTL(port));
917 tmp &= ~DDI_BUF_CTL_ENABLE;
918 I915_WRITE(DDI_BUF_CTL(port), tmp);
919
920 if (wait_for_us((I915_READ(DDI_BUF_CTL(port)) &
921 DDI_BUF_IS_IDLE),
922 8))
923 DRM_ERROR("DDI port:%c buffer not idle\n",
924 port_name(port));
925 }
926}
927
928static void gen11_dsi_disable_io_power(struct intel_encoder *encoder)
929{
930 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
931 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
932 enum port port;
933 u32 tmp;
934
935 intel_display_power_put(dev_priv, POWER_DOMAIN_PORT_DDI_A_IO);
936
937 if (intel_dsi->dual_link)
938 intel_display_power_put(dev_priv, POWER_DOMAIN_PORT_DDI_B_IO);
939
940 /* set mode to DDI */
941 for_each_dsi_port(port, intel_dsi->ports) {
942 tmp = I915_READ(ICL_DSI_IO_MODECTL(port));
943 tmp &= ~COMBO_PHY_MODE_DSI;
944 I915_WRITE(ICL_DSI_IO_MODECTL(port), tmp);
945 }
946}
947
948static void __attribute__((unused)) gen11_dsi_disable(
949 struct intel_encoder *encoder,
950 const struct intel_crtc_state *old_crtc_state,
951 const struct drm_connector_state *old_conn_state)
952{
953 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
954
955 /* step1: turn off backlight */
956 intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_OFF);
957 intel_panel_disable_backlight(old_conn_state);
958
959 /* step2d,e: disable transcoder and wait */
960 gen11_dsi_disable_transcoder(encoder);
961
962 /* step2f,g: powerdown panel */
963 gen11_dsi_powerdown_panel(encoder);
964
965 /* step2h,i,j: deconfig trancoder */
966 gen11_dsi_deconfigure_trancoder(encoder);
967
968 /* step3: disable port */
969 gen11_dsi_disable_port(encoder);
970
971 /* step4: disable IO power */
972 gen11_dsi_disable_io_power(encoder);
973}
974
975void icl_dsi_init(struct drm_i915_private *dev_priv)
976{
977 enum port port;
978
979 if (!intel_bios_is_dsi_present(dev_priv, &port))
980 return;
127} 981}
diff --git a/drivers/gpu/drm/i915/intel_atomic.c b/drivers/gpu/drm/i915/intel_atomic.c
index b04952bacf77..a5a2c8fe58a7 100644
--- a/drivers/gpu/drm/i915/intel_atomic.c
+++ b/drivers/gpu/drm/i915/intel_atomic.c
@@ -203,6 +203,72 @@ intel_crtc_destroy_state(struct drm_crtc *crtc,
203 drm_atomic_helper_crtc_destroy_state(crtc, state); 203 drm_atomic_helper_crtc_destroy_state(crtc, state);
204} 204}
205 205
206static void intel_atomic_setup_scaler(struct intel_crtc_scaler_state *scaler_state,
207 int num_scalers_need, struct intel_crtc *intel_crtc,
208 const char *name, int idx,
209 struct intel_plane_state *plane_state,
210 int *scaler_id)
211{
212 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
213 int j;
214 u32 mode;
215
216 if (*scaler_id < 0) {
217 /* find a free scaler */
218 for (j = 0; j < intel_crtc->num_scalers; j++) {
219 if (scaler_state->scalers[j].in_use)
220 continue;
221
222 *scaler_id = j;
223 scaler_state->scalers[*scaler_id].in_use = 1;
224 break;
225 }
226 }
227
228 if (WARN(*scaler_id < 0, "Cannot find scaler for %s:%d\n", name, idx))
229 return;
230
231 /* set scaler mode */
232 if (plane_state && plane_state->base.fb &&
233 plane_state->base.fb->format->is_yuv &&
234 plane_state->base.fb->format->num_planes > 1) {
235 if (IS_GEN9(dev_priv) &&
236 !IS_GEMINILAKE(dev_priv)) {
237 mode = SKL_PS_SCALER_MODE_NV12;
238 } else if (icl_is_hdr_plane(to_intel_plane(plane_state->base.plane))) {
239 /*
240 * On gen11+'s HDR planes we only use the scaler for
241 * scaling. They have a dedicated chroma upsampler, so
242 * we don't need the scaler to upsample the UV plane.
243 */
244 mode = PS_SCALER_MODE_NORMAL;
245 } else {
246 mode = PS_SCALER_MODE_PLANAR;
247
248 if (plane_state->linked_plane)
249 mode |= PS_PLANE_Y_SEL(plane_state->linked_plane->id);
250 }
251 } else if (INTEL_GEN(dev_priv) > 9 || IS_GEMINILAKE(dev_priv)) {
252 mode = PS_SCALER_MODE_NORMAL;
253 } else if (num_scalers_need == 1 && intel_crtc->num_scalers > 1) {
254 /*
255 * when only 1 scaler is in use on a pipe with 2 scalers
256 * scaler 0 operates in high quality (HQ) mode.
257 * In this case use scaler 0 to take advantage of HQ mode
258 */
259 scaler_state->scalers[*scaler_id].in_use = 0;
260 *scaler_id = 0;
261 scaler_state->scalers[0].in_use = 1;
262 mode = SKL_PS_SCALER_MODE_HQ;
263 } else {
264 mode = SKL_PS_SCALER_MODE_DYN;
265 }
266
267 DRM_DEBUG_KMS("Attached scaler id %u.%u to %s:%d\n",
268 intel_crtc->pipe, *scaler_id, name, idx);
269 scaler_state->scalers[*scaler_id].mode = mode;
270}
271
206/** 272/**
207 * intel_atomic_setup_scalers() - setup scalers for crtc per staged requests 273 * intel_atomic_setup_scalers() - setup scalers for crtc per staged requests
208 * @dev_priv: i915 device 274 * @dev_priv: i915 device
@@ -232,7 +298,7 @@ int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
232 struct drm_atomic_state *drm_state = crtc_state->base.state; 298 struct drm_atomic_state *drm_state = crtc_state->base.state;
233 struct intel_atomic_state *intel_state = to_intel_atomic_state(drm_state); 299 struct intel_atomic_state *intel_state = to_intel_atomic_state(drm_state);
234 int num_scalers_need; 300 int num_scalers_need;
235 int i, j; 301 int i;
236 302
237 num_scalers_need = hweight32(scaler_state->scaler_users); 303 num_scalers_need = hweight32(scaler_state->scaler_users);
238 304
@@ -304,59 +370,17 @@ int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
304 idx = plane->base.id; 370 idx = plane->base.id;
305 371
306 /* plane on different crtc cannot be a scaler user of this crtc */ 372 /* plane on different crtc cannot be a scaler user of this crtc */
307 if (WARN_ON(intel_plane->pipe != intel_crtc->pipe)) { 373 if (WARN_ON(intel_plane->pipe != intel_crtc->pipe))
308 continue; 374 continue;
309 }
310 375
311 plane_state = intel_atomic_get_new_plane_state(intel_state, 376 plane_state = intel_atomic_get_new_plane_state(intel_state,
312 intel_plane); 377 intel_plane);
313 scaler_id = &plane_state->scaler_id; 378 scaler_id = &plane_state->scaler_id;
314 } 379 }
315 380
316 if (*scaler_id < 0) { 381 intel_atomic_setup_scaler(scaler_state, num_scalers_need,
317 /* find a free scaler */ 382 intel_crtc, name, idx,
318 for (j = 0; j < intel_crtc->num_scalers; j++) { 383 plane_state, scaler_id);
319 if (!scaler_state->scalers[j].in_use) {
320 scaler_state->scalers[j].in_use = 1;
321 *scaler_id = j;
322 DRM_DEBUG_KMS("Attached scaler id %u.%u to %s:%d\n",
323 intel_crtc->pipe, *scaler_id, name, idx);
324 break;
325 }
326 }
327 }
328
329 if (WARN_ON(*scaler_id < 0)) {
330 DRM_DEBUG_KMS("Cannot find scaler for %s:%d\n", name, idx);
331 continue;
332 }
333
334 /* set scaler mode */
335 if ((INTEL_GEN(dev_priv) >= 9) &&
336 plane_state && plane_state->base.fb &&
337 plane_state->base.fb->format->format ==
338 DRM_FORMAT_NV12) {
339 if (INTEL_GEN(dev_priv) == 9 &&
340 !IS_GEMINILAKE(dev_priv) &&
341 !IS_SKYLAKE(dev_priv))
342 scaler_state->scalers[*scaler_id].mode =
343 SKL_PS_SCALER_MODE_NV12;
344 else
345 scaler_state->scalers[*scaler_id].mode =
346 PS_SCALER_MODE_PLANAR;
347 } else if (num_scalers_need == 1 && intel_crtc->pipe != PIPE_C) {
348 /*
349 * when only 1 scaler is in use on either pipe A or B,
350 * scaler 0 operates in high quality (HQ) mode.
351 * In this case use scaler 0 to take advantage of HQ mode
352 */
353 *scaler_id = 0;
354 scaler_state->scalers[0].in_use = 1;
355 scaler_state->scalers[0].mode = PS_SCALER_MODE_HQ;
356 scaler_state->scalers[1].in_use = 0;
357 } else {
358 scaler_state->scalers[*scaler_id].mode = PS_SCALER_MODE_DYN;
359 }
360 } 384 }
361 385
362 return 0; 386 return 0;
diff --git a/drivers/gpu/drm/i915/intel_atomic_plane.c b/drivers/gpu/drm/i915/intel_atomic_plane.c
index aabebe0d2e9b..905f8ef3ba4f 100644
--- a/drivers/gpu/drm/i915/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/intel_atomic_plane.c
@@ -36,28 +36,31 @@
36#include <drm/drm_plane_helper.h> 36#include <drm/drm_plane_helper.h>
37#include "intel_drv.h" 37#include "intel_drv.h"
38 38
39/** 39struct intel_plane *intel_plane_alloc(void)
40 * intel_create_plane_state - create plane state object
41 * @plane: drm plane
42 *
43 * Allocates a fresh plane state for the given plane and sets some of
44 * the state values to sensible initial values.
45 *
46 * Returns: A newly allocated plane state, or NULL on failure
47 */
48struct intel_plane_state *
49intel_create_plane_state(struct drm_plane *plane)
50{ 40{
51 struct intel_plane_state *state; 41 struct intel_plane_state *plane_state;
42 struct intel_plane *plane;
52 43
53 state = kzalloc(sizeof(*state), GFP_KERNEL); 44 plane = kzalloc(sizeof(*plane), GFP_KERNEL);
54 if (!state) 45 if (!plane)
55 return NULL; 46 return ERR_PTR(-ENOMEM);
56 47
57 state->base.plane = plane; 48 plane_state = kzalloc(sizeof(*plane_state), GFP_KERNEL);
58 state->base.rotation = DRM_MODE_ROTATE_0; 49 if (!plane_state) {
50 kfree(plane);
51 return ERR_PTR(-ENOMEM);
52 }
59 53
60 return state; 54 __drm_atomic_helper_plane_reset(&plane->base, &plane_state->base);
55 plane_state->scaler_id = -1;
56
57 return plane;
58}
59
60void intel_plane_free(struct intel_plane *plane)
61{
62 intel_plane_destroy_state(&plane->base, plane->base.state);
63 kfree(plane);
61} 64}
62 65
63/** 66/**
@@ -117,10 +120,14 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_
117 struct intel_plane *intel_plane = to_intel_plane(plane); 120 struct intel_plane *intel_plane = to_intel_plane(plane);
118 int ret; 121 int ret;
119 122
123 crtc_state->active_planes &= ~BIT(intel_plane->id);
124 crtc_state->nv12_planes &= ~BIT(intel_plane->id);
125 intel_state->base.visible = false;
126
127 /* If this is a cursor plane, no further checks are needed. */
120 if (!intel_state->base.crtc && !old_plane_state->base.crtc) 128 if (!intel_state->base.crtc && !old_plane_state->base.crtc)
121 return 0; 129 return 0;
122 130
123 intel_state->base.visible = false;
124 ret = intel_plane->check_plane(crtc_state, intel_state); 131 ret = intel_plane->check_plane(crtc_state, intel_state);
125 if (ret) 132 if (ret)
126 return ret; 133 return ret;
@@ -128,13 +135,9 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_
128 /* FIXME pre-g4x don't work like this */ 135 /* FIXME pre-g4x don't work like this */
129 if (state->visible) 136 if (state->visible)
130 crtc_state->active_planes |= BIT(intel_plane->id); 137 crtc_state->active_planes |= BIT(intel_plane->id);
131 else
132 crtc_state->active_planes &= ~BIT(intel_plane->id);
133 138
134 if (state->visible && state->fb->format->format == DRM_FORMAT_NV12) 139 if (state->visible && state->fb->format->format == DRM_FORMAT_NV12)
135 crtc_state->nv12_planes |= BIT(intel_plane->id); 140 crtc_state->nv12_planes |= BIT(intel_plane->id);
136 else
137 crtc_state->nv12_planes &= ~BIT(intel_plane->id);
138 141
139 return intel_plane_atomic_calc_changes(old_crtc_state, 142 return intel_plane_atomic_calc_changes(old_crtc_state,
140 &crtc_state->base, 143 &crtc_state->base,
@@ -152,6 +155,7 @@ static int intel_plane_atomic_check(struct drm_plane *plane,
152 const struct drm_crtc_state *old_crtc_state; 155 const struct drm_crtc_state *old_crtc_state;
153 struct drm_crtc_state *new_crtc_state; 156 struct drm_crtc_state *new_crtc_state;
154 157
158 new_plane_state->visible = false;
155 if (!crtc) 159 if (!crtc)
156 return 0; 160 return 0;
157 161
@@ -164,29 +168,52 @@ static int intel_plane_atomic_check(struct drm_plane *plane,
164 to_intel_plane_state(new_plane_state)); 168 to_intel_plane_state(new_plane_state));
165} 169}
166 170
167static void intel_plane_atomic_update(struct drm_plane *plane, 171void intel_update_planes_on_crtc(struct intel_atomic_state *old_state,
168 struct drm_plane_state *old_state) 172 struct intel_crtc *crtc,
173 struct intel_crtc_state *old_crtc_state,
174 struct intel_crtc_state *new_crtc_state)
169{ 175{
170 struct intel_atomic_state *state = to_intel_atomic_state(old_state->state); 176 struct intel_plane_state *new_plane_state;
171 struct intel_plane *intel_plane = to_intel_plane(plane); 177 struct intel_plane *plane;
172 const struct intel_plane_state *new_plane_state = 178 u32 update_mask;
173 intel_atomic_get_new_plane_state(state, intel_plane); 179 int i;
174 struct drm_crtc *crtc = new_plane_state->base.crtc ?: old_state->crtc; 180
175 181 update_mask = old_crtc_state->active_planes;
176 if (new_plane_state->base.visible) { 182 update_mask |= new_crtc_state->active_planes;
177 const struct intel_crtc_state *new_crtc_state = 183
178 intel_atomic_get_new_crtc_state(state, to_intel_crtc(crtc)); 184 for_each_new_intel_plane_in_state(old_state, plane, new_plane_state, i) {
179 185 if (crtc->pipe != plane->pipe ||
180 trace_intel_update_plane(plane, 186 !(update_mask & BIT(plane->id)))
181 to_intel_crtc(crtc)); 187 continue;
182 188
183 intel_plane->update_plane(intel_plane, 189 if (new_plane_state->base.visible) {
184 new_crtc_state, new_plane_state); 190 trace_intel_update_plane(&plane->base, crtc);
185 } else { 191
186 trace_intel_disable_plane(plane, 192 plane->update_plane(plane, new_crtc_state, new_plane_state);
187 to_intel_crtc(crtc)); 193 } else if (new_plane_state->slave) {
188 194 struct intel_plane *master =
189 intel_plane->disable_plane(intel_plane, to_intel_crtc(crtc)); 195 new_plane_state->linked_plane;
196
197 /*
198 * We update the slave plane from this function because
199 * programming it from the master plane's update_plane
200 * callback runs into issues when the Y plane is
201 * reassigned, disabled or used by a different plane.
202 *
203 * The slave plane is updated with the master plane's
204 * plane_state.
205 */
206 new_plane_state =
207 intel_atomic_get_new_plane_state(old_state, master);
208
209 trace_intel_update_plane(&plane->base, crtc);
210
211 plane->update_slave(plane, new_crtc_state, new_plane_state);
212 } else {
213 trace_intel_disable_plane(&plane->base, crtc);
214
215 plane->disable_plane(plane, crtc);
216 }
190 } 217 }
191} 218}
192 219
@@ -194,7 +221,6 @@ const struct drm_plane_helper_funcs intel_plane_helper_funcs = {
194 .prepare_fb = intel_prepare_plane_fb, 221 .prepare_fb = intel_prepare_plane_fb,
195 .cleanup_fb = intel_cleanup_plane_fb, 222 .cleanup_fb = intel_cleanup_plane_fb,
196 .atomic_check = intel_plane_atomic_check, 223 .atomic_check = intel_plane_atomic_check,
197 .atomic_update = intel_plane_atomic_update,
198}; 224};
199 225
200/** 226/**
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
index 769f3f586661..ae55a6865d5c 100644
--- a/drivers/gpu/drm/i915/intel_audio.c
+++ b/drivers/gpu/drm/i915/intel_audio.c
@@ -144,26 +144,43 @@ static const struct {
144/* HDMI N/CTS table */ 144/* HDMI N/CTS table */
145#define TMDS_297M 297000 145#define TMDS_297M 297000
146#define TMDS_296M 296703 146#define TMDS_296M 296703
147#define TMDS_594M 594000
148#define TMDS_593M 593407
149
147static const struct { 150static const struct {
148 int sample_rate; 151 int sample_rate;
149 int clock; 152 int clock;
150 int n; 153 int n;
151 int cts; 154 int cts;
152} hdmi_aud_ncts[] = { 155} hdmi_aud_ncts[] = {
153 { 44100, TMDS_296M, 4459, 234375 },
154 { 44100, TMDS_297M, 4704, 247500 },
155 { 48000, TMDS_296M, 5824, 281250 },
156 { 48000, TMDS_297M, 5120, 247500 },
157 { 32000, TMDS_296M, 5824, 421875 }, 156 { 32000, TMDS_296M, 5824, 421875 },
158 { 32000, TMDS_297M, 3072, 222750 }, 157 { 32000, TMDS_297M, 3072, 222750 },
158 { 32000, TMDS_593M, 5824, 843750 },
159 { 32000, TMDS_594M, 3072, 445500 },
160 { 44100, TMDS_296M, 4459, 234375 },
161 { 44100, TMDS_297M, 4704, 247500 },
162 { 44100, TMDS_593M, 8918, 937500 },
163 { 44100, TMDS_594M, 9408, 990000 },
159 { 88200, TMDS_296M, 8918, 234375 }, 164 { 88200, TMDS_296M, 8918, 234375 },
160 { 88200, TMDS_297M, 9408, 247500 }, 165 { 88200, TMDS_297M, 9408, 247500 },
161 { 96000, TMDS_296M, 11648, 281250 }, 166 { 88200, TMDS_593M, 17836, 937500 },
162 { 96000, TMDS_297M, 10240, 247500 }, 167 { 88200, TMDS_594M, 18816, 990000 },
163 { 176400, TMDS_296M, 17836, 234375 }, 168 { 176400, TMDS_296M, 17836, 234375 },
164 { 176400, TMDS_297M, 18816, 247500 }, 169 { 176400, TMDS_297M, 18816, 247500 },
170 { 176400, TMDS_593M, 35672, 937500 },
171 { 176400, TMDS_594M, 37632, 990000 },
172 { 48000, TMDS_296M, 5824, 281250 },
173 { 48000, TMDS_297M, 5120, 247500 },
174 { 48000, TMDS_593M, 5824, 562500 },
175 { 48000, TMDS_594M, 6144, 594000 },
176 { 96000, TMDS_296M, 11648, 281250 },
177 { 96000, TMDS_297M, 10240, 247500 },
178 { 96000, TMDS_593M, 11648, 562500 },
179 { 96000, TMDS_594M, 12288, 594000 },
165 { 192000, TMDS_296M, 23296, 281250 }, 180 { 192000, TMDS_296M, 23296, 281250 },
166 { 192000, TMDS_297M, 20480, 247500 }, 181 { 192000, TMDS_297M, 20480, 247500 },
182 { 192000, TMDS_593M, 23296, 562500 },
183 { 192000, TMDS_594M, 24576, 594000 },
167}; 184};
168 185
169/* get AUD_CONFIG_PIXEL_CLOCK_HDMI_* value for mode */ 186/* get AUD_CONFIG_PIXEL_CLOCK_HDMI_* value for mode */
@@ -912,6 +929,9 @@ static int i915_audio_component_bind(struct device *i915_kdev,
912 if (WARN_ON(acomp->base.ops || acomp->base.dev)) 929 if (WARN_ON(acomp->base.ops || acomp->base.dev))
913 return -EEXIST; 930 return -EEXIST;
914 931
932 if (WARN_ON(!device_link_add(hda_kdev, i915_kdev, DL_FLAG_STATELESS)))
933 return -ENOMEM;
934
915 drm_modeset_lock_all(&dev_priv->drm); 935 drm_modeset_lock_all(&dev_priv->drm);
916 acomp->base.ops = &i915_audio_component_ops; 936 acomp->base.ops = &i915_audio_component_ops;
917 acomp->base.dev = i915_kdev; 937 acomp->base.dev = i915_kdev;
@@ -935,6 +955,8 @@ static void i915_audio_component_unbind(struct device *i915_kdev,
935 acomp->base.dev = NULL; 955 acomp->base.dev = NULL;
936 dev_priv->audio_component = NULL; 956 dev_priv->audio_component = NULL;
937 drm_modeset_unlock_all(&dev_priv->drm); 957 drm_modeset_unlock_all(&dev_priv->drm);
958
959 device_link_remove(hda_kdev, i915_kdev);
938} 960}
939 961
940static const struct component_ops i915_audio_component_bind_ops = { 962static const struct component_ops i915_audio_component_bind_ops = {
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 1faa494e2bc9..0694aa8bb9bc 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -420,6 +420,13 @@ parse_general_features(struct drm_i915_private *dev_priv,
420 intel_bios_ssc_frequency(dev_priv, general->ssc_freq); 420 intel_bios_ssc_frequency(dev_priv, general->ssc_freq);
421 dev_priv->vbt.display_clock_mode = general->display_clock_mode; 421 dev_priv->vbt.display_clock_mode = general->display_clock_mode;
422 dev_priv->vbt.fdi_rx_polarity_inverted = general->fdi_rx_polarity_inverted; 422 dev_priv->vbt.fdi_rx_polarity_inverted = general->fdi_rx_polarity_inverted;
423 if (bdb->version >= 181) {
424 dev_priv->vbt.orientation = general->rotate_180 ?
425 DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP :
426 DRM_MODE_PANEL_ORIENTATION_NORMAL;
427 } else {
428 dev_priv->vbt.orientation = DRM_MODE_PANEL_ORIENTATION_UNKNOWN;
429 }
423 DRM_DEBUG_KMS("BDB_GENERAL_FEATURES int_tv_support %d int_crt_support %d lvds_use_ssc %d lvds_ssc_freq %d display_clock_mode %d fdi_rx_polarity_inverted %d\n", 430 DRM_DEBUG_KMS("BDB_GENERAL_FEATURES int_tv_support %d int_crt_support %d lvds_use_ssc %d lvds_ssc_freq %d display_clock_mode %d fdi_rx_polarity_inverted %d\n",
424 dev_priv->vbt.int_tv_support, 431 dev_priv->vbt.int_tv_support,
425 dev_priv->vbt.int_crt_support, 432 dev_priv->vbt.int_crt_support,
@@ -852,6 +859,30 @@ parse_mipi_config(struct drm_i915_private *dev_priv,
852 859
853 parse_dsi_backlight_ports(dev_priv, bdb->version, port); 860 parse_dsi_backlight_ports(dev_priv, bdb->version, port);
854 861
862 /* FIXME is the 90 vs. 270 correct? */
863 switch (config->rotation) {
864 case ENABLE_ROTATION_0:
865 /*
866 * Most (all?) VBTs claim 0 degrees despite having
867 * an upside down panel, thus we do not trust this.
868 */
869 dev_priv->vbt.dsi.orientation =
870 DRM_MODE_PANEL_ORIENTATION_UNKNOWN;
871 break;
872 case ENABLE_ROTATION_90:
873 dev_priv->vbt.dsi.orientation =
874 DRM_MODE_PANEL_ORIENTATION_RIGHT_UP;
875 break;
876 case ENABLE_ROTATION_180:
877 dev_priv->vbt.dsi.orientation =
878 DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP;
879 break;
880 case ENABLE_ROTATION_270:
881 dev_priv->vbt.dsi.orientation =
882 DRM_MODE_PANEL_ORIENTATION_LEFT_UP;
883 break;
884 }
885
855 /* We have mandatory mipi config blocks. Initialize as generic panel */ 886 /* We have mandatory mipi config blocks. Initialize as generic panel */
856 dev_priv->vbt.dsi.panel_id = MIPI_DSI_GENERIC_PANEL_ID; 887 dev_priv->vbt.dsi.panel_id = MIPI_DSI_GENERIC_PANEL_ID;
857} 888}
@@ -2039,17 +2070,17 @@ bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv,
2039 2070
2040 dvo_port = child->dvo_port; 2071 dvo_port = child->dvo_port;
2041 2072
2042 switch (dvo_port) { 2073 if (dvo_port == DVO_PORT_MIPIA ||
2043 case DVO_PORT_MIPIA: 2074 (dvo_port == DVO_PORT_MIPIB && IS_ICELAKE(dev_priv)) ||
2044 case DVO_PORT_MIPIC: 2075 (dvo_port == DVO_PORT_MIPIC && !IS_ICELAKE(dev_priv))) {
2045 if (port) 2076 if (port)
2046 *port = dvo_port - DVO_PORT_MIPIA; 2077 *port = dvo_port - DVO_PORT_MIPIA;
2047 return true; 2078 return true;
2048 case DVO_PORT_MIPIB: 2079 } else if (dvo_port == DVO_PORT_MIPIB ||
2049 case DVO_PORT_MIPID: 2080 dvo_port == DVO_PORT_MIPIC ||
2081 dvo_port == DVO_PORT_MIPID) {
2050 DRM_DEBUG_KMS("VBT has unsupported DSI port %c\n", 2082 DRM_DEBUG_KMS("VBT has unsupported DSI port %c\n",
2051 port_name(dvo_port - DVO_PORT_MIPIA)); 2083 port_name(dvo_port - DVO_PORT_MIPIA));
2052 break;
2053 } 2084 }
2054 } 2085 }
2055 2086
@@ -2159,3 +2190,49 @@ intel_bios_is_lspcon_present(struct drm_i915_private *dev_priv,
2159 2190
2160 return false; 2191 return false;
2161} 2192}
2193
2194enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *dev_priv,
2195 enum port port)
2196{
2197 const struct ddi_vbt_port_info *info =
2198 &dev_priv->vbt.ddi_port_info[port];
2199 enum aux_ch aux_ch;
2200
2201 if (!info->alternate_aux_channel) {
2202 aux_ch = (enum aux_ch)port;
2203
2204 DRM_DEBUG_KMS("using AUX %c for port %c (platform default)\n",
2205 aux_ch_name(aux_ch), port_name(port));
2206 return aux_ch;
2207 }
2208
2209 switch (info->alternate_aux_channel) {
2210 case DP_AUX_A:
2211 aux_ch = AUX_CH_A;
2212 break;
2213 case DP_AUX_B:
2214 aux_ch = AUX_CH_B;
2215 break;
2216 case DP_AUX_C:
2217 aux_ch = AUX_CH_C;
2218 break;
2219 case DP_AUX_D:
2220 aux_ch = AUX_CH_D;
2221 break;
2222 case DP_AUX_E:
2223 aux_ch = AUX_CH_E;
2224 break;
2225 case DP_AUX_F:
2226 aux_ch = AUX_CH_F;
2227 break;
2228 default:
2229 MISSING_CASE(info->alternate_aux_channel);
2230 aux_ch = AUX_CH_A;
2231 break;
2232 }
2233
2234 DRM_DEBUG_KMS("using AUX %c for port %c (VBT)\n",
2235 aux_ch_name(aux_ch), port_name(port));
2236
2237 return aux_ch;
2238}
diff --git a/drivers/gpu/drm/i915/intel_cdclk.c b/drivers/gpu/drm/i915/intel_cdclk.c
index 29075c763428..25e3aba9cded 100644
--- a/drivers/gpu/drm/i915/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/intel_cdclk.c
@@ -2138,16 +2138,8 @@ void intel_set_cdclk(struct drm_i915_private *dev_priv,
2138static int intel_pixel_rate_to_cdclk(struct drm_i915_private *dev_priv, 2138static int intel_pixel_rate_to_cdclk(struct drm_i915_private *dev_priv,
2139 int pixel_rate) 2139 int pixel_rate)
2140{ 2140{
2141 if (INTEL_GEN(dev_priv) >= 10) 2141 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
2142 return DIV_ROUND_UP(pixel_rate, 2); 2142 return DIV_ROUND_UP(pixel_rate, 2);
2143 else if (IS_GEMINILAKE(dev_priv))
2144 /*
2145 * FIXME: Avoid using a pixel clock that is more than 99% of the cdclk
2146 * as a temporary workaround. Use a higher cdclk instead. (Note that
2147 * intel_compute_max_dotclk() limits the max pixel clock to 99% of max
2148 * cdclk.)
2149 */
2150 return DIV_ROUND_UP(pixel_rate * 100, 2 * 99);
2151 else if (IS_GEN9(dev_priv) || 2143 else if (IS_GEN9(dev_priv) ||
2152 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) 2144 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
2153 return pixel_rate; 2145 return pixel_rate;
@@ -2543,14 +2535,8 @@ static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv)
2543{ 2535{
2544 int max_cdclk_freq = dev_priv->max_cdclk_freq; 2536 int max_cdclk_freq = dev_priv->max_cdclk_freq;
2545 2537
2546 if (INTEL_GEN(dev_priv) >= 10) 2538 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
2547 return 2 * max_cdclk_freq; 2539 return 2 * max_cdclk_freq;
2548 else if (IS_GEMINILAKE(dev_priv))
2549 /*
2550 * FIXME: Limiting to 99% as a temporary workaround. See
2551 * intel_min_cdclk() for details.
2552 */
2553 return 2 * max_cdclk_freq * 99 / 100;
2554 else if (IS_GEN9(dev_priv) || 2540 else if (IS_GEN9(dev_priv) ||
2555 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) 2541 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
2556 return max_cdclk_freq; 2542 return max_cdclk_freq;
@@ -2674,37 +2660,18 @@ static int cnp_rawclk(struct drm_i915_private *dev_priv)
2674 fraction = 200; 2660 fraction = 200;
2675 } 2661 }
2676 2662
2677 rawclk = CNP_RAWCLK_DIV((divider / 1000) - 1); 2663 rawclk = CNP_RAWCLK_DIV(divider / 1000);
2678 if (fraction) 2664 if (fraction) {
2679 rawclk |= CNP_RAWCLK_FRAC(DIV_ROUND_CLOSEST(1000, 2665 int numerator = 1;
2680 fraction) - 1);
2681
2682 I915_WRITE(PCH_RAWCLK_FREQ, rawclk);
2683 return divider + fraction;
2684}
2685 2666
2686static int icp_rawclk(struct drm_i915_private *dev_priv) 2667 rawclk |= CNP_RAWCLK_DEN(DIV_ROUND_CLOSEST(numerator * 1000,
2687{ 2668 fraction) - 1);
2688 u32 rawclk; 2669 if (HAS_PCH_ICP(dev_priv))
2689 int divider, numerator, denominator, frequency; 2670 rawclk |= ICP_RAWCLK_NUM(numerator);
2690
2691 if (I915_READ(SFUSE_STRAP) & SFUSE_STRAP_RAW_FREQUENCY) {
2692 frequency = 24000;
2693 divider = 23;
2694 numerator = 0;
2695 denominator = 0;
2696 } else {
2697 frequency = 19200;
2698 divider = 18;
2699 numerator = 1;
2700 denominator = 4;
2701 } 2671 }
2702 2672
2703 rawclk = CNP_RAWCLK_DIV(divider) | ICP_RAWCLK_NUM(numerator) |
2704 ICP_RAWCLK_DEN(denominator);
2705
2706 I915_WRITE(PCH_RAWCLK_FREQ, rawclk); 2673 I915_WRITE(PCH_RAWCLK_FREQ, rawclk);
2707 return frequency; 2674 return divider + fraction;
2708} 2675}
2709 2676
2710static int pch_rawclk(struct drm_i915_private *dev_priv) 2677static int pch_rawclk(struct drm_i915_private *dev_priv)
@@ -2754,9 +2721,7 @@ static int g4x_hrawclk(struct drm_i915_private *dev_priv)
2754 */ 2721 */
2755void intel_update_rawclk(struct drm_i915_private *dev_priv) 2722void intel_update_rawclk(struct drm_i915_private *dev_priv)
2756{ 2723{
2757 if (HAS_PCH_ICP(dev_priv)) 2724 if (HAS_PCH_CNP(dev_priv) || HAS_PCH_ICP(dev_priv))
2758 dev_priv->rawclk_freq = icp_rawclk(dev_priv);
2759 else if (HAS_PCH_CNP(dev_priv))
2760 dev_priv->rawclk_freq = cnp_rawclk(dev_priv); 2725 dev_priv->rawclk_freq = cnp_rawclk(dev_priv);
2761 else if (HAS_PCH_SPLIT(dev_priv)) 2726 else if (HAS_PCH_SPLIT(dev_priv))
2762 dev_priv->rawclk_freq = pch_rawclk(dev_priv); 2727 dev_priv->rawclk_freq = pch_rawclk(dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_color.c b/drivers/gpu/drm/i915/intel_color.c
index c6a7beabd58d..5127da286a2b 100644
--- a/drivers/gpu/drm/i915/intel_color.c
+++ b/drivers/gpu/drm/i915/intel_color.c
@@ -149,7 +149,8 @@ static void ilk_load_csc_matrix(struct drm_crtc_state *crtc_state)
149 if (INTEL_GEN(dev_priv) >= 8 || IS_HASWELL(dev_priv)) 149 if (INTEL_GEN(dev_priv) >= 8 || IS_HASWELL(dev_priv))
150 limited_color_range = intel_crtc_state->limited_color_range; 150 limited_color_range = intel_crtc_state->limited_color_range;
151 151
152 if (intel_crtc_state->ycbcr420) { 152 if (intel_crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
153 intel_crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) {
153 ilk_load_ycbcr_conversion_matrix(intel_crtc); 154 ilk_load_ycbcr_conversion_matrix(intel_crtc);
154 return; 155 return;
155 } else if (crtc_state->ctm) { 156 } else if (crtc_state->ctm) {
diff --git a/drivers/gpu/drm/i915/intel_combo_phy.c b/drivers/gpu/drm/i915/intel_combo_phy.c
new file mode 100644
index 000000000000..3d0271cebf99
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_combo_phy.c
@@ -0,0 +1,254 @@
1// SPDX-License-Identifier: MIT
2/*
3 * Copyright © 2018 Intel Corporation
4 */
5
6#include "intel_drv.h"
7
8#define for_each_combo_port(__dev_priv, __port) \
9 for ((__port) = PORT_A; (__port) < I915_MAX_PORTS; (__port)++) \
10 for_each_if(intel_port_is_combophy(__dev_priv, __port))
11
12#define for_each_combo_port_reverse(__dev_priv, __port) \
13 for ((__port) = I915_MAX_PORTS; (__port)-- > PORT_A;) \
14 for_each_if(intel_port_is_combophy(__dev_priv, __port))
15
16enum {
17 PROCMON_0_85V_DOT_0,
18 PROCMON_0_95V_DOT_0,
19 PROCMON_0_95V_DOT_1,
20 PROCMON_1_05V_DOT_0,
21 PROCMON_1_05V_DOT_1,
22};
23
24static const struct cnl_procmon {
25 u32 dw1, dw9, dw10;
26} cnl_procmon_values[] = {
27 [PROCMON_0_85V_DOT_0] =
28 { .dw1 = 0x00000000, .dw9 = 0x62AB67BB, .dw10 = 0x51914F96, },
29 [PROCMON_0_95V_DOT_0] =
30 { .dw1 = 0x00000000, .dw9 = 0x86E172C7, .dw10 = 0x77CA5EAB, },
31 [PROCMON_0_95V_DOT_1] =
32 { .dw1 = 0x00000000, .dw9 = 0x93F87FE1, .dw10 = 0x8AE871C5, },
33 [PROCMON_1_05V_DOT_0] =
34 { .dw1 = 0x00000000, .dw9 = 0x98FA82DD, .dw10 = 0x89E46DC1, },
35 [PROCMON_1_05V_DOT_1] =
36 { .dw1 = 0x00440000, .dw9 = 0x9A00AB25, .dw10 = 0x8AE38FF1, },
37};
38
39/*
40 * CNL has just one set of registers, while ICL has two sets: one for port A and
41 * the other for port B. The CNL registers are equivalent to the ICL port A
42 * registers, that's why we call the ICL macros even though the function has CNL
43 * on its name.
44 */
45static const struct cnl_procmon *
46cnl_get_procmon_ref_values(struct drm_i915_private *dev_priv, enum port port)
47{
48 const struct cnl_procmon *procmon;
49 u32 val;
50
51 val = I915_READ(ICL_PORT_COMP_DW3(port));
52 switch (val & (PROCESS_INFO_MASK | VOLTAGE_INFO_MASK)) {
53 default:
54 MISSING_CASE(val);
55 /* fall through */
56 case VOLTAGE_INFO_0_85V | PROCESS_INFO_DOT_0:
57 procmon = &cnl_procmon_values[PROCMON_0_85V_DOT_0];
58 break;
59 case VOLTAGE_INFO_0_95V | PROCESS_INFO_DOT_0:
60 procmon = &cnl_procmon_values[PROCMON_0_95V_DOT_0];
61 break;
62 case VOLTAGE_INFO_0_95V | PROCESS_INFO_DOT_1:
63 procmon = &cnl_procmon_values[PROCMON_0_95V_DOT_1];
64 break;
65 case VOLTAGE_INFO_1_05V | PROCESS_INFO_DOT_0:
66 procmon = &cnl_procmon_values[PROCMON_1_05V_DOT_0];
67 break;
68 case VOLTAGE_INFO_1_05V | PROCESS_INFO_DOT_1:
69 procmon = &cnl_procmon_values[PROCMON_1_05V_DOT_1];
70 break;
71 }
72
73 return procmon;
74}
75
76static void cnl_set_procmon_ref_values(struct drm_i915_private *dev_priv,
77 enum port port)
78{
79 const struct cnl_procmon *procmon;
80 u32 val;
81
82 procmon = cnl_get_procmon_ref_values(dev_priv, port);
83
84 val = I915_READ(ICL_PORT_COMP_DW1(port));
85 val &= ~((0xff << 16) | 0xff);
86 val |= procmon->dw1;
87 I915_WRITE(ICL_PORT_COMP_DW1(port), val);
88
89 I915_WRITE(ICL_PORT_COMP_DW9(port), procmon->dw9);
90 I915_WRITE(ICL_PORT_COMP_DW10(port), procmon->dw10);
91}
92
93static bool check_phy_reg(struct drm_i915_private *dev_priv,
94 enum port port, i915_reg_t reg, u32 mask,
95 u32 expected_val)
96{
97 u32 val = I915_READ(reg);
98
99 if ((val & mask) != expected_val) {
100 DRM_DEBUG_DRIVER("Port %c combo PHY reg %08x state mismatch: "
101 "current %08x mask %08x expected %08x\n",
102 port_name(port),
103 reg.reg, val, mask, expected_val);
104 return false;
105 }
106
107 return true;
108}
109
110static bool cnl_verify_procmon_ref_values(struct drm_i915_private *dev_priv,
111 enum port port)
112{
113 const struct cnl_procmon *procmon;
114 bool ret;
115
116 procmon = cnl_get_procmon_ref_values(dev_priv, port);
117
118 ret = check_phy_reg(dev_priv, port, ICL_PORT_COMP_DW1(port),
119 (0xff << 16) | 0xff, procmon->dw1);
120 ret &= check_phy_reg(dev_priv, port, ICL_PORT_COMP_DW9(port),
121 -1U, procmon->dw9);
122 ret &= check_phy_reg(dev_priv, port, ICL_PORT_COMP_DW10(port),
123 -1U, procmon->dw10);
124
125 return ret;
126}
127
128static bool cnl_combo_phy_enabled(struct drm_i915_private *dev_priv)
129{
130 return !(I915_READ(CHICKEN_MISC_2) & CNL_COMP_PWR_DOWN) &&
131 (I915_READ(CNL_PORT_COMP_DW0) & COMP_INIT);
132}
133
134static bool cnl_combo_phy_verify_state(struct drm_i915_private *dev_priv)
135{
136 enum port port = PORT_A;
137 bool ret;
138
139 if (!cnl_combo_phy_enabled(dev_priv))
140 return false;
141
142 ret = cnl_verify_procmon_ref_values(dev_priv, port);
143
144 ret &= check_phy_reg(dev_priv, port, CNL_PORT_CL1CM_DW5,
145 CL_POWER_DOWN_ENABLE, CL_POWER_DOWN_ENABLE);
146
147 return ret;
148}
149
150void cnl_combo_phys_init(struct drm_i915_private *dev_priv)
151{
152 u32 val;
153
154 val = I915_READ(CHICKEN_MISC_2);
155 val &= ~CNL_COMP_PWR_DOWN;
156 I915_WRITE(CHICKEN_MISC_2, val);
157
158 /* Dummy PORT_A to get the correct CNL register from the ICL macro */
159 cnl_set_procmon_ref_values(dev_priv, PORT_A);
160
161 val = I915_READ(CNL_PORT_COMP_DW0);
162 val |= COMP_INIT;
163 I915_WRITE(CNL_PORT_COMP_DW0, val);
164
165 val = I915_READ(CNL_PORT_CL1CM_DW5);
166 val |= CL_POWER_DOWN_ENABLE;
167 I915_WRITE(CNL_PORT_CL1CM_DW5, val);
168}
169
170void cnl_combo_phys_uninit(struct drm_i915_private *dev_priv)
171{
172 u32 val;
173
174 if (!cnl_combo_phy_verify_state(dev_priv))
175 DRM_WARN("Combo PHY HW state changed unexpectedly.\n");
176
177 val = I915_READ(CHICKEN_MISC_2);
178 val |= CNL_COMP_PWR_DOWN;
179 I915_WRITE(CHICKEN_MISC_2, val);
180}
181
182static bool icl_combo_phy_enabled(struct drm_i915_private *dev_priv,
183 enum port port)
184{
185 return !(I915_READ(ICL_PHY_MISC(port)) &
186 ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN) &&
187 (I915_READ(ICL_PORT_COMP_DW0(port)) & COMP_INIT);
188}
189
190static bool icl_combo_phy_verify_state(struct drm_i915_private *dev_priv,
191 enum port port)
192{
193 bool ret;
194
195 if (!icl_combo_phy_enabled(dev_priv, port))
196 return false;
197
198 ret = cnl_verify_procmon_ref_values(dev_priv, port);
199
200 ret &= check_phy_reg(dev_priv, port, ICL_PORT_CL_DW5(port),
201 CL_POWER_DOWN_ENABLE, CL_POWER_DOWN_ENABLE);
202
203 return ret;
204}
205
206void icl_combo_phys_init(struct drm_i915_private *dev_priv)
207{
208 enum port port;
209
210 for_each_combo_port(dev_priv, port) {
211 u32 val;
212
213 if (icl_combo_phy_verify_state(dev_priv, port)) {
214 DRM_DEBUG_DRIVER("Port %c combo PHY already enabled, won't reprogram it.\n",
215 port_name(port));
216 continue;
217 }
218
219 val = I915_READ(ICL_PHY_MISC(port));
220 val &= ~ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN;
221 I915_WRITE(ICL_PHY_MISC(port), val);
222
223 cnl_set_procmon_ref_values(dev_priv, port);
224
225 val = I915_READ(ICL_PORT_COMP_DW0(port));
226 val |= COMP_INIT;
227 I915_WRITE(ICL_PORT_COMP_DW0(port), val);
228
229 val = I915_READ(ICL_PORT_CL_DW5(port));
230 val |= CL_POWER_DOWN_ENABLE;
231 I915_WRITE(ICL_PORT_CL_DW5(port), val);
232 }
233}
234
235void icl_combo_phys_uninit(struct drm_i915_private *dev_priv)
236{
237 enum port port;
238
239 for_each_combo_port_reverse(dev_priv, port) {
240 u32 val;
241
242 if (!icl_combo_phy_verify_state(dev_priv, port))
243 DRM_WARN("Port %c combo PHY HW state changed unexpectedly\n",
244 port_name(port));
245
246 val = I915_READ(ICL_PHY_MISC(port));
247 val |= ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN;
248 I915_WRITE(ICL_PHY_MISC(port), val);
249
250 val = I915_READ(ICL_PORT_COMP_DW0(port));
251 val &= ~COMP_INIT;
252 I915_WRITE(ICL_PORT_COMP_DW0(port), val);
253 }
254}
diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_connector.c
index ca44bf368e24..18e370f607bc 100644
--- a/drivers/gpu/drm/i915/intel_modes.c
+++ b/drivers/gpu/drm/i915/intel_connector.c
@@ -25,11 +25,140 @@
25 25
26#include <linux/slab.h> 26#include <linux/slab.h>
27#include <linux/i2c.h> 27#include <linux/i2c.h>
28#include <drm/drm_atomic_helper.h>
28#include <drm/drm_edid.h> 29#include <drm/drm_edid.h>
29#include <drm/drmP.h> 30#include <drm/drmP.h>
30#include "intel_drv.h" 31#include "intel_drv.h"
31#include "i915_drv.h" 32#include "i915_drv.h"
32 33
34int intel_connector_init(struct intel_connector *connector)
35{
36 struct intel_digital_connector_state *conn_state;
37
38 /*
39 * Allocate enough memory to hold intel_digital_connector_state,
40 * This might be a few bytes too many, but for connectors that don't
41 * need it we'll free the state and allocate a smaller one on the first
42 * successful commit anyway.
43 */
44 conn_state = kzalloc(sizeof(*conn_state), GFP_KERNEL);
45 if (!conn_state)
46 return -ENOMEM;
47
48 __drm_atomic_helper_connector_reset(&connector->base,
49 &conn_state->base);
50
51 return 0;
52}
53
54struct intel_connector *intel_connector_alloc(void)
55{
56 struct intel_connector *connector;
57
58 connector = kzalloc(sizeof(*connector), GFP_KERNEL);
59 if (!connector)
60 return NULL;
61
62 if (intel_connector_init(connector) < 0) {
63 kfree(connector);
64 return NULL;
65 }
66
67 return connector;
68}
69
70/*
71 * Free the bits allocated by intel_connector_alloc.
72 * This should only be used after intel_connector_alloc has returned
73 * successfully, and before drm_connector_init returns successfully.
74 * Otherwise the destroy callbacks for the connector and the state should
75 * take care of proper cleanup/free (see intel_connector_destroy).
76 */
77void intel_connector_free(struct intel_connector *connector)
78{
79 kfree(to_intel_digital_connector_state(connector->base.state));
80 kfree(connector);
81}
82
83/*
84 * Connector type independent destroy hook for drm_connector_funcs.
85 */
86void intel_connector_destroy(struct drm_connector *connector)
87{
88 struct intel_connector *intel_connector = to_intel_connector(connector);
89
90 kfree(intel_connector->detect_edid);
91
92 if (!IS_ERR_OR_NULL(intel_connector->edid))
93 kfree(intel_connector->edid);
94
95 intel_panel_fini(&intel_connector->panel);
96
97 drm_connector_cleanup(connector);
98 kfree(connector);
99}
100
101int intel_connector_register(struct drm_connector *connector)
102{
103 struct intel_connector *intel_connector = to_intel_connector(connector);
104 int ret;
105
106 ret = intel_backlight_device_register(intel_connector);
107 if (ret)
108 goto err;
109
110 if (i915_inject_load_failure()) {
111 ret = -EFAULT;
112 goto err_backlight;
113 }
114
115 return 0;
116
117err_backlight:
118 intel_backlight_device_unregister(intel_connector);
119err:
120 return ret;
121}
122
123void intel_connector_unregister(struct drm_connector *connector)
124{
125 struct intel_connector *intel_connector = to_intel_connector(connector);
126
127 intel_backlight_device_unregister(intel_connector);
128}
129
130void intel_connector_attach_encoder(struct intel_connector *connector,
131 struct intel_encoder *encoder)
132{
133 connector->encoder = encoder;
134 drm_connector_attach_encoder(&connector->base, &encoder->base);
135}
136
137/*
138 * Simple connector->get_hw_state implementation for encoders that support only
139 * one connector and no cloning and hence the encoder state determines the state
140 * of the connector.
141 */
142bool intel_connector_get_hw_state(struct intel_connector *connector)
143{
144 enum pipe pipe = 0;
145 struct intel_encoder *encoder = connector->encoder;
146
147 return encoder->get_hw_state(encoder, &pipe);
148}
149
150enum pipe intel_connector_get_pipe(struct intel_connector *connector)
151{
152 struct drm_device *dev = connector->base.dev;
153
154 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
155
156 if (!connector->base.state->crtc)
157 return INVALID_PIPE;
158
159 return to_intel_crtc(connector->base.state->crtc)->pipe;
160}
161
33/** 162/**
34 * intel_connector_update_modes - update connector from edid 163 * intel_connector_update_modes - update connector from edid
35 * @connector: DRM connector device to use 164 * @connector: DRM connector device to use
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 0c6bf82bb059..68f2fb89ece3 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -354,6 +354,7 @@ static bool intel_crt_compute_config(struct intel_encoder *encoder,
354 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) 354 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
355 return false; 355 return false;
356 356
357 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
357 return true; 358 return true;
358} 359}
359 360
@@ -368,6 +369,7 @@ static bool pch_crt_compute_config(struct intel_encoder *encoder,
368 return false; 369 return false;
369 370
370 pipe_config->has_pch_encoder = true; 371 pipe_config->has_pch_encoder = true;
372 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
371 373
372 return true; 374 return true;
373} 375}
@@ -389,6 +391,7 @@ static bool hsw_crt_compute_config(struct intel_encoder *encoder,
389 return false; 391 return false;
390 392
391 pipe_config->has_pch_encoder = true; 393 pipe_config->has_pch_encoder = true;
394 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
392 395
393 /* LPT FDI RX only supports 8bpc. */ 396 /* LPT FDI RX only supports 8bpc. */
394 if (HAS_PCH_LPT(dev_priv)) { 397 if (HAS_PCH_LPT(dev_priv)) {
@@ -849,12 +852,6 @@ out:
849 return status; 852 return status;
850} 853}
851 854
852static void intel_crt_destroy(struct drm_connector *connector)
853{
854 drm_connector_cleanup(connector);
855 kfree(connector);
856}
857
858static int intel_crt_get_modes(struct drm_connector *connector) 855static int intel_crt_get_modes(struct drm_connector *connector)
859{ 856{
860 struct drm_device *dev = connector->dev; 857 struct drm_device *dev = connector->dev;
@@ -909,7 +906,7 @@ static const struct drm_connector_funcs intel_crt_connector_funcs = {
909 .fill_modes = drm_helper_probe_single_connector_modes, 906 .fill_modes = drm_helper_probe_single_connector_modes,
910 .late_register = intel_connector_register, 907 .late_register = intel_connector_register,
911 .early_unregister = intel_connector_unregister, 908 .early_unregister = intel_connector_unregister,
912 .destroy = intel_crt_destroy, 909 .destroy = intel_connector_destroy,
913 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 910 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
914 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, 911 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
915}; 912};
diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c
index d48186e9ddad..a516697bf57d 100644
--- a/drivers/gpu/drm/i915/intel_csr.c
+++ b/drivers/gpu/drm/i915/intel_csr.c
@@ -34,34 +34,38 @@
34 * low-power state and comes back to normal. 34 * low-power state and comes back to normal.
35 */ 35 */
36 36
37#define I915_CSR_ICL "i915/icl_dmc_ver1_07.bin" 37#define GEN12_CSR_MAX_FW_SIZE ICL_CSR_MAX_FW_SIZE
38MODULE_FIRMWARE(I915_CSR_ICL);
39#define ICL_CSR_VERSION_REQUIRED CSR_VERSION(1, 7)
40 38
41#define I915_CSR_GLK "i915/glk_dmc_ver1_04.bin" 39#define ICL_CSR_PATH "i915/icl_dmc_ver1_07.bin"
42MODULE_FIRMWARE(I915_CSR_GLK); 40#define ICL_CSR_VERSION_REQUIRED CSR_VERSION(1, 7)
43#define GLK_CSR_VERSION_REQUIRED CSR_VERSION(1, 4) 41#define ICL_CSR_MAX_FW_SIZE 0x6000
42MODULE_FIRMWARE(ICL_CSR_PATH);
44 43
45#define I915_CSR_CNL "i915/cnl_dmc_ver1_07.bin" 44#define CNL_CSR_PATH "i915/cnl_dmc_ver1_07.bin"
46MODULE_FIRMWARE(I915_CSR_CNL);
47#define CNL_CSR_VERSION_REQUIRED CSR_VERSION(1, 7) 45#define CNL_CSR_VERSION_REQUIRED CSR_VERSION(1, 7)
46#define CNL_CSR_MAX_FW_SIZE GLK_CSR_MAX_FW_SIZE
47MODULE_FIRMWARE(CNL_CSR_PATH);
48
49#define GLK_CSR_PATH "i915/glk_dmc_ver1_04.bin"
50#define GLK_CSR_VERSION_REQUIRED CSR_VERSION(1, 4)
51#define GLK_CSR_MAX_FW_SIZE 0x4000
52MODULE_FIRMWARE(GLK_CSR_PATH);
48 53
49#define I915_CSR_KBL "i915/kbl_dmc_ver1_04.bin" 54#define KBL_CSR_PATH "i915/kbl_dmc_ver1_04.bin"
50MODULE_FIRMWARE(I915_CSR_KBL);
51#define KBL_CSR_VERSION_REQUIRED CSR_VERSION(1, 4) 55#define KBL_CSR_VERSION_REQUIRED CSR_VERSION(1, 4)
56#define KBL_CSR_MAX_FW_SIZE BXT_CSR_MAX_FW_SIZE
57MODULE_FIRMWARE(KBL_CSR_PATH);
52 58
53#define I915_CSR_SKL "i915/skl_dmc_ver1_27.bin" 59#define SKL_CSR_PATH "i915/skl_dmc_ver1_27.bin"
54MODULE_FIRMWARE(I915_CSR_SKL);
55#define SKL_CSR_VERSION_REQUIRED CSR_VERSION(1, 27) 60#define SKL_CSR_VERSION_REQUIRED CSR_VERSION(1, 27)
61#define SKL_CSR_MAX_FW_SIZE BXT_CSR_MAX_FW_SIZE
62MODULE_FIRMWARE(SKL_CSR_PATH);
56 63
57#define I915_CSR_BXT "i915/bxt_dmc_ver1_07.bin" 64#define BXT_CSR_PATH "i915/bxt_dmc_ver1_07.bin"
58MODULE_FIRMWARE(I915_CSR_BXT);
59#define BXT_CSR_VERSION_REQUIRED CSR_VERSION(1, 7) 65#define BXT_CSR_VERSION_REQUIRED CSR_VERSION(1, 7)
60
61
62#define BXT_CSR_MAX_FW_SIZE 0x3000 66#define BXT_CSR_MAX_FW_SIZE 0x3000
63#define GLK_CSR_MAX_FW_SIZE 0x4000 67MODULE_FIRMWARE(BXT_CSR_PATH);
64#define ICL_CSR_MAX_FW_SIZE 0x6000 68
65#define CSR_DEFAULT_FW_OFFSET 0xFFFFFFFF 69#define CSR_DEFAULT_FW_OFFSET 0xFFFFFFFF
66 70
67struct intel_css_header { 71struct intel_css_header {
@@ -190,6 +194,12 @@ static const struct stepping_info bxt_stepping_info[] = {
190 {'B', '0'}, {'B', '1'}, {'B', '2'} 194 {'B', '0'}, {'B', '1'}, {'B', '2'}
191}; 195};
192 196
197static const struct stepping_info icl_stepping_info[] = {
198 {'A', '0'}, {'A', '1'}, {'A', '2'},
199 {'B', '0'}, {'B', '2'},
200 {'C', '0'}
201};
202
193static const struct stepping_info no_stepping_info = { '*', '*' }; 203static const struct stepping_info no_stepping_info = { '*', '*' };
194 204
195static const struct stepping_info * 205static const struct stepping_info *
@@ -198,7 +208,10 @@ intel_get_stepping_info(struct drm_i915_private *dev_priv)
198 const struct stepping_info *si; 208 const struct stepping_info *si;
199 unsigned int size; 209 unsigned int size;
200 210
201 if (IS_SKYLAKE(dev_priv)) { 211 if (IS_ICELAKE(dev_priv)) {
212 size = ARRAY_SIZE(icl_stepping_info);
213 si = icl_stepping_info;
214 } else if (IS_SKYLAKE(dev_priv)) {
202 size = ARRAY_SIZE(skl_stepping_info); 215 size = ARRAY_SIZE(skl_stepping_info);
203 si = skl_stepping_info; 216 si = skl_stepping_info;
204 } else if (IS_BROXTON(dev_priv)) { 217 } else if (IS_BROXTON(dev_priv)) {
@@ -285,10 +298,8 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
285 struct intel_csr *csr = &dev_priv->csr; 298 struct intel_csr *csr = &dev_priv->csr;
286 const struct stepping_info *si = intel_get_stepping_info(dev_priv); 299 const struct stepping_info *si = intel_get_stepping_info(dev_priv);
287 uint32_t dmc_offset = CSR_DEFAULT_FW_OFFSET, readcount = 0, nbytes; 300 uint32_t dmc_offset = CSR_DEFAULT_FW_OFFSET, readcount = 0, nbytes;
288 uint32_t max_fw_size = 0;
289 uint32_t i; 301 uint32_t i;
290 uint32_t *dmc_payload; 302 uint32_t *dmc_payload;
291 uint32_t required_version;
292 303
293 if (!fw) 304 if (!fw)
294 return NULL; 305 return NULL;
@@ -303,38 +314,19 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
303 return NULL; 314 return NULL;
304 } 315 }
305 316
306 csr->version = css_header->version; 317 if (csr->required_version &&
307 318 css_header->version != csr->required_version) {
308 if (csr->fw_path == i915_modparams.dmc_firmware_path) {
309 /* Bypass version check for firmware override. */
310 required_version = csr->version;
311 } else if (IS_ICELAKE(dev_priv)) {
312 required_version = ICL_CSR_VERSION_REQUIRED;
313 } else if (IS_CANNONLAKE(dev_priv)) {
314 required_version = CNL_CSR_VERSION_REQUIRED;
315 } else if (IS_GEMINILAKE(dev_priv)) {
316 required_version = GLK_CSR_VERSION_REQUIRED;
317 } else if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) {
318 required_version = KBL_CSR_VERSION_REQUIRED;
319 } else if (IS_SKYLAKE(dev_priv)) {
320 required_version = SKL_CSR_VERSION_REQUIRED;
321 } else if (IS_BROXTON(dev_priv)) {
322 required_version = BXT_CSR_VERSION_REQUIRED;
323 } else {
324 MISSING_CASE(INTEL_REVID(dev_priv));
325 required_version = 0;
326 }
327
328 if (csr->version != required_version) {
329 DRM_INFO("Refusing to load DMC firmware v%u.%u," 319 DRM_INFO("Refusing to load DMC firmware v%u.%u,"
330 " please use v%u.%u\n", 320 " please use v%u.%u\n",
331 CSR_VERSION_MAJOR(csr->version), 321 CSR_VERSION_MAJOR(css_header->version),
332 CSR_VERSION_MINOR(csr->version), 322 CSR_VERSION_MINOR(css_header->version),
333 CSR_VERSION_MAJOR(required_version), 323 CSR_VERSION_MAJOR(csr->required_version),
334 CSR_VERSION_MINOR(required_version)); 324 CSR_VERSION_MINOR(csr->required_version));
335 return NULL; 325 return NULL;
336 } 326 }
337 327
328 csr->version = css_header->version;
329
338 readcount += sizeof(struct intel_css_header); 330 readcount += sizeof(struct intel_css_header);
339 331
340 /* Extract Package Header information*/ 332 /* Extract Package Header information*/
@@ -402,15 +394,7 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
402 394
403 /* fw_size is in dwords, so multiplied by 4 to convert into bytes. */ 395 /* fw_size is in dwords, so multiplied by 4 to convert into bytes. */
404 nbytes = dmc_header->fw_size * 4; 396 nbytes = dmc_header->fw_size * 4;
405 if (INTEL_GEN(dev_priv) >= 11) 397 if (nbytes > csr->max_fw_size) {
406 max_fw_size = ICL_CSR_MAX_FW_SIZE;
407 else if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv))
408 max_fw_size = GLK_CSR_MAX_FW_SIZE;
409 else if (IS_GEN9(dev_priv))
410 max_fw_size = BXT_CSR_MAX_FW_SIZE;
411 else
412 MISSING_CASE(INTEL_REVID(dev_priv));
413 if (nbytes > max_fw_size) {
414 DRM_ERROR("DMC FW too big (%u bytes)\n", nbytes); 398 DRM_ERROR("DMC FW too big (%u bytes)\n", nbytes);
415 return NULL; 399 return NULL;
416 } 400 }
@@ -475,27 +459,57 @@ void intel_csr_ucode_init(struct drm_i915_private *dev_priv)
475 if (!HAS_CSR(dev_priv)) 459 if (!HAS_CSR(dev_priv))
476 return; 460 return;
477 461
478 if (i915_modparams.dmc_firmware_path)
479 csr->fw_path = i915_modparams.dmc_firmware_path;
480 else if (IS_ICELAKE(dev_priv))
481 csr->fw_path = I915_CSR_ICL;
482 else if (IS_CANNONLAKE(dev_priv))
483 csr->fw_path = I915_CSR_CNL;
484 else if (IS_GEMINILAKE(dev_priv))
485 csr->fw_path = I915_CSR_GLK;
486 else if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv))
487 csr->fw_path = I915_CSR_KBL;
488 else if (IS_SKYLAKE(dev_priv))
489 csr->fw_path = I915_CSR_SKL;
490 else if (IS_BROXTON(dev_priv))
491 csr->fw_path = I915_CSR_BXT;
492
493 /* 462 /*
494 * Obtain a runtime pm reference, until CSR is loaded, 463 * Obtain a runtime pm reference, until CSR is loaded, to avoid entering
495 * to avoid entering runtime-suspend. 464 * runtime-suspend.
465 *
466 * On error, we return with the rpm wakeref held to prevent runtime
467 * suspend as runtime suspend *requires* a working CSR for whatever
468 * reason.
496 */ 469 */
497 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); 470 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
498 471
472 if (INTEL_GEN(dev_priv) >= 12) {
473 /* Allow to load fw via parameter using the last known size */
474 csr->max_fw_size = GEN12_CSR_MAX_FW_SIZE;
475 } else if (IS_ICELAKE(dev_priv)) {
476 csr->fw_path = ICL_CSR_PATH;
477 csr->required_version = ICL_CSR_VERSION_REQUIRED;
478 csr->max_fw_size = ICL_CSR_MAX_FW_SIZE;
479 } else if (IS_CANNONLAKE(dev_priv)) {
480 csr->fw_path = CNL_CSR_PATH;
481 csr->required_version = CNL_CSR_VERSION_REQUIRED;
482 csr->max_fw_size = CNL_CSR_MAX_FW_SIZE;
483 } else if (IS_GEMINILAKE(dev_priv)) {
484 csr->fw_path = GLK_CSR_PATH;
485 csr->required_version = GLK_CSR_VERSION_REQUIRED;
486 csr->max_fw_size = GLK_CSR_MAX_FW_SIZE;
487 } else if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) {
488 csr->fw_path = KBL_CSR_PATH;
489 csr->required_version = KBL_CSR_VERSION_REQUIRED;
490 csr->max_fw_size = KBL_CSR_MAX_FW_SIZE;
491 } else if (IS_SKYLAKE(dev_priv)) {
492 csr->fw_path = SKL_CSR_PATH;
493 csr->required_version = SKL_CSR_VERSION_REQUIRED;
494 csr->max_fw_size = SKL_CSR_MAX_FW_SIZE;
495 } else if (IS_BROXTON(dev_priv)) {
496 csr->fw_path = BXT_CSR_PATH;
497 csr->required_version = BXT_CSR_VERSION_REQUIRED;
498 csr->max_fw_size = BXT_CSR_MAX_FW_SIZE;
499 }
500
501 if (i915_modparams.dmc_firmware_path) {
502 if (strlen(i915_modparams.dmc_firmware_path) == 0) {
503 csr->fw_path = NULL;
504 DRM_INFO("Disabling CSR firmware and runtime PM\n");
505 return;
506 }
507
508 csr->fw_path = i915_modparams.dmc_firmware_path;
509 /* Bypass version check for firmware override. */
510 csr->required_version = 0;
511 }
512
499 if (csr->fw_path == NULL) { 513 if (csr->fw_path == NULL) {
500 DRM_DEBUG_KMS("No known CSR firmware for platform, disabling runtime PM\n"); 514 DRM_DEBUG_KMS("No known CSR firmware for platform, disabling runtime PM\n");
501 WARN_ON(!IS_ALPHA_SUPPORT(INTEL_INFO(dev_priv))); 515 WARN_ON(!IS_ALPHA_SUPPORT(INTEL_INFO(dev_priv)));
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 5186cd7075f9..ad11540ac436 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -642,7 +642,7 @@ skl_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries)
642static const struct ddi_buf_trans * 642static const struct ddi_buf_trans *
643kbl_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries) 643kbl_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries)
644{ 644{
645 if (IS_KBL_ULX(dev_priv)) { 645 if (IS_KBL_ULX(dev_priv) || IS_AML_ULX(dev_priv)) {
646 *n_entries = ARRAY_SIZE(kbl_y_ddi_translations_dp); 646 *n_entries = ARRAY_SIZE(kbl_y_ddi_translations_dp);
647 return kbl_y_ddi_translations_dp; 647 return kbl_y_ddi_translations_dp;
648 } else if (IS_KBL_ULT(dev_priv) || IS_CFL_ULT(dev_priv)) { 648 } else if (IS_KBL_ULT(dev_priv) || IS_CFL_ULT(dev_priv)) {
@@ -658,7 +658,7 @@ static const struct ddi_buf_trans *
658skl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries) 658skl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
659{ 659{
660 if (dev_priv->vbt.edp.low_vswing) { 660 if (dev_priv->vbt.edp.low_vswing) {
661 if (IS_SKL_ULX(dev_priv) || IS_KBL_ULX(dev_priv)) { 661 if (IS_SKL_ULX(dev_priv) || IS_KBL_ULX(dev_priv) || IS_AML_ULX(dev_priv)) {
662 *n_entries = ARRAY_SIZE(skl_y_ddi_translations_edp); 662 *n_entries = ARRAY_SIZE(skl_y_ddi_translations_edp);
663 return skl_y_ddi_translations_edp; 663 return skl_y_ddi_translations_edp;
664 } else if (IS_SKL_ULT(dev_priv) || IS_KBL_ULT(dev_priv) || 664 } else if (IS_SKL_ULT(dev_priv) || IS_KBL_ULT(dev_priv) ||
@@ -680,7 +680,7 @@ skl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
680static const struct ddi_buf_trans * 680static const struct ddi_buf_trans *
681skl_get_buf_trans_hdmi(struct drm_i915_private *dev_priv, int *n_entries) 681skl_get_buf_trans_hdmi(struct drm_i915_private *dev_priv, int *n_entries)
682{ 682{
683 if (IS_SKL_ULX(dev_priv) || IS_KBL_ULX(dev_priv)) { 683 if (IS_SKL_ULX(dev_priv) || IS_KBL_ULX(dev_priv) || IS_AML_ULX(dev_priv)) {
684 *n_entries = ARRAY_SIZE(skl_y_ddi_translations_hdmi); 684 *n_entries = ARRAY_SIZE(skl_y_ddi_translations_hdmi);
685 return skl_y_ddi_translations_hdmi; 685 return skl_y_ddi_translations_hdmi;
686 } else { 686 } else {
@@ -1060,10 +1060,10 @@ static uint32_t hsw_pll_to_ddi_pll_sel(const struct intel_shared_dpll *pll)
1060} 1060}
1061 1061
1062static uint32_t icl_pll_to_ddi_pll_sel(struct intel_encoder *encoder, 1062static uint32_t icl_pll_to_ddi_pll_sel(struct intel_encoder *encoder,
1063 const struct intel_shared_dpll *pll) 1063 const struct intel_crtc_state *crtc_state)
1064{ 1064{
1065 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); 1065 const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
1066 int clock = crtc->config->port_clock; 1066 int clock = crtc_state->port_clock;
1067 const enum intel_dpll_id id = pll->info->id; 1067 const enum intel_dpll_id id = pll->info->id;
1068 1068
1069 switch (id) { 1069 switch (id) {
@@ -1517,7 +1517,7 @@ static void ddi_dotclock_get(struct intel_crtc_state *pipe_config)
1517 else 1517 else
1518 dotclock = pipe_config->port_clock; 1518 dotclock = pipe_config->port_clock;
1519 1519
1520 if (pipe_config->ycbcr420) 1520 if (pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
1521 dotclock *= 2; 1521 dotclock *= 2;
1522 1522
1523 if (pipe_config->pixel_multiplier) 1523 if (pipe_config->pixel_multiplier)
@@ -1737,16 +1737,16 @@ static void intel_ddi_clock_get(struct intel_encoder *encoder,
1737{ 1737{
1738 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 1738 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1739 1739
1740 if (INTEL_GEN(dev_priv) <= 8) 1740 if (IS_ICELAKE(dev_priv))
1741 hsw_ddi_clock_get(encoder, pipe_config); 1741 icl_ddi_clock_get(encoder, pipe_config);
1742 else if (IS_GEN9_BC(dev_priv))
1743 skl_ddi_clock_get(encoder, pipe_config);
1744 else if (IS_GEN9_LP(dev_priv))
1745 bxt_ddi_clock_get(encoder, pipe_config);
1746 else if (IS_CANNONLAKE(dev_priv)) 1742 else if (IS_CANNONLAKE(dev_priv))
1747 cnl_ddi_clock_get(encoder, pipe_config); 1743 cnl_ddi_clock_get(encoder, pipe_config);
1748 else if (IS_ICELAKE(dev_priv)) 1744 else if (IS_GEN9_LP(dev_priv))
1749 icl_ddi_clock_get(encoder, pipe_config); 1745 bxt_ddi_clock_get(encoder, pipe_config);
1746 else if (IS_GEN9_BC(dev_priv))
1747 skl_ddi_clock_get(encoder, pipe_config);
1748 else if (INTEL_GEN(dev_priv) <= 8)
1749 hsw_ddi_clock_get(encoder, pipe_config);
1750} 1750}
1751 1751
1752void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state) 1752void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state)
@@ -1784,6 +1784,13 @@ void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state)
1784 break; 1784 break;
1785 } 1785 }
1786 1786
1787 /*
1788 * As per DP 1.2 spec section 2.3.4.3 while sending
1789 * YCBCR 444 signals we should program MSA MISC1/0 fields with
1790 * colorspace information. The output colorspace encoding is BT601.
1791 */
1792 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444)
1793 temp |= TRANS_MSA_SAMPLING_444 | TRANS_MSA_CLRSP_YCBCR;
1787 I915_WRITE(TRANS_MSA_MISC(cpu_transcoder), temp); 1794 I915_WRITE(TRANS_MSA_MISC(cpu_transcoder), temp);
1788} 1795}
1789 1796
@@ -1998,24 +2005,24 @@ out:
1998 return ret; 2005 return ret;
1999} 2006}
2000 2007
2001bool intel_ddi_get_hw_state(struct intel_encoder *encoder, 2008static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
2002 enum pipe *pipe) 2009 u8 *pipe_mask, bool *is_dp_mst)
2003{ 2010{
2004 struct drm_device *dev = encoder->base.dev; 2011 struct drm_device *dev = encoder->base.dev;
2005 struct drm_i915_private *dev_priv = to_i915(dev); 2012 struct drm_i915_private *dev_priv = to_i915(dev);
2006 enum port port = encoder->port; 2013 enum port port = encoder->port;
2007 enum pipe p; 2014 enum pipe p;
2008 u32 tmp; 2015 u32 tmp;
2009 bool ret; 2016 u8 mst_pipe_mask;
2017
2018 *pipe_mask = 0;
2019 *is_dp_mst = false;
2010 2020
2011 if (!intel_display_power_get_if_enabled(dev_priv, 2021 if (!intel_display_power_get_if_enabled(dev_priv,
2012 encoder->power_domain)) 2022 encoder->power_domain))
2013 return false; 2023 return;
2014
2015 ret = false;
2016 2024
2017 tmp = I915_READ(DDI_BUF_CTL(port)); 2025 tmp = I915_READ(DDI_BUF_CTL(port));
2018
2019 if (!(tmp & DDI_BUF_CTL_ENABLE)) 2026 if (!(tmp & DDI_BUF_CTL_ENABLE))
2020 goto out; 2027 goto out;
2021 2028
@@ -2023,44 +2030,58 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
2023 tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP)); 2030 tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
2024 2031
2025 switch (tmp & TRANS_DDI_EDP_INPUT_MASK) { 2032 switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
2033 default:
2034 MISSING_CASE(tmp & TRANS_DDI_EDP_INPUT_MASK);
2035 /* fallthrough */
2026 case TRANS_DDI_EDP_INPUT_A_ON: 2036 case TRANS_DDI_EDP_INPUT_A_ON:
2027 case TRANS_DDI_EDP_INPUT_A_ONOFF: 2037 case TRANS_DDI_EDP_INPUT_A_ONOFF:
2028 *pipe = PIPE_A; 2038 *pipe_mask = BIT(PIPE_A);
2029 break; 2039 break;
2030 case TRANS_DDI_EDP_INPUT_B_ONOFF: 2040 case TRANS_DDI_EDP_INPUT_B_ONOFF:
2031 *pipe = PIPE_B; 2041 *pipe_mask = BIT(PIPE_B);
2032 break; 2042 break;
2033 case TRANS_DDI_EDP_INPUT_C_ONOFF: 2043 case TRANS_DDI_EDP_INPUT_C_ONOFF:
2034 *pipe = PIPE_C; 2044 *pipe_mask = BIT(PIPE_C);
2035 break; 2045 break;
2036 } 2046 }
2037 2047
2038 ret = true;
2039
2040 goto out; 2048 goto out;
2041 } 2049 }
2042 2050
2051 mst_pipe_mask = 0;
2043 for_each_pipe(dev_priv, p) { 2052 for_each_pipe(dev_priv, p) {
2044 enum transcoder cpu_transcoder = (enum transcoder) p; 2053 enum transcoder cpu_transcoder = (enum transcoder)p;
2045 2054
2046 tmp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder)); 2055 tmp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
2047 2056
2048 if ((tmp & TRANS_DDI_PORT_MASK) == TRANS_DDI_SELECT_PORT(port)) { 2057 if ((tmp & TRANS_DDI_PORT_MASK) != TRANS_DDI_SELECT_PORT(port))
2049 if ((tmp & TRANS_DDI_MODE_SELECT_MASK) == 2058 continue;
2050 TRANS_DDI_MODE_SELECT_DP_MST)
2051 goto out;
2052 2059
2053 *pipe = p; 2060 if ((tmp & TRANS_DDI_MODE_SELECT_MASK) ==
2054 ret = true; 2061 TRANS_DDI_MODE_SELECT_DP_MST)
2062 mst_pipe_mask |= BIT(p);
2055 2063
2056 goto out; 2064 *pipe_mask |= BIT(p);
2057 }
2058 } 2065 }
2059 2066
2060 DRM_DEBUG_KMS("No pipe for ddi port %c found\n", port_name(port)); 2067 if (!*pipe_mask)
2068 DRM_DEBUG_KMS("No pipe for ddi port %c found\n",
2069 port_name(port));
2070
2071 if (!mst_pipe_mask && hweight8(*pipe_mask) > 1) {
2072 DRM_DEBUG_KMS("Multiple pipes for non DP-MST port %c (pipe_mask %02x)\n",
2073 port_name(port), *pipe_mask);
2074 *pipe_mask = BIT(ffs(*pipe_mask) - 1);
2075 }
2076
2077 if (mst_pipe_mask && mst_pipe_mask != *pipe_mask)
2078 DRM_DEBUG_KMS("Conflicting MST and non-MST encoders for port %c (pipe_mask %02x mst_pipe_mask %02x)\n",
2079 port_name(port), *pipe_mask, mst_pipe_mask);
2080 else
2081 *is_dp_mst = mst_pipe_mask;
2061 2082
2062out: 2083out:
2063 if (ret && IS_GEN9_LP(dev_priv)) { 2084 if (*pipe_mask && IS_GEN9_LP(dev_priv)) {
2064 tmp = I915_READ(BXT_PHY_CTL(port)); 2085 tmp = I915_READ(BXT_PHY_CTL(port));
2065 if ((tmp & (BXT_PHY_CMNLANE_POWERDOWN_ACK | 2086 if ((tmp & (BXT_PHY_CMNLANE_POWERDOWN_ACK |
2066 BXT_PHY_LANE_POWERDOWN_ACK | 2087 BXT_PHY_LANE_POWERDOWN_ACK |
@@ -2070,12 +2091,26 @@ out:
2070 } 2091 }
2071 2092
2072 intel_display_power_put(dev_priv, encoder->power_domain); 2093 intel_display_power_put(dev_priv, encoder->power_domain);
2094}
2073 2095
2074 return ret; 2096bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
2097 enum pipe *pipe)
2098{
2099 u8 pipe_mask;
2100 bool is_mst;
2101
2102 intel_ddi_get_encoder_pipes(encoder, &pipe_mask, &is_mst);
2103
2104 if (is_mst || !pipe_mask)
2105 return false;
2106
2107 *pipe = ffs(pipe_mask) - 1;
2108
2109 return true;
2075} 2110}
2076 2111
2077static inline enum intel_display_power_domain 2112static inline enum intel_display_power_domain
2078intel_ddi_main_link_aux_domain(struct intel_dp *intel_dp) 2113intel_ddi_main_link_aux_domain(struct intel_digital_port *dig_port)
2079{ 2114{
2080 /* CNL+ HW requires corresponding AUX IOs to be powered up for PSR with 2115 /* CNL+ HW requires corresponding AUX IOs to be powered up for PSR with
2081 * DC states enabled at the same time, while for driver initiated AUX 2116 * DC states enabled at the same time, while for driver initiated AUX
@@ -2089,13 +2124,14 @@ intel_ddi_main_link_aux_domain(struct intel_dp *intel_dp)
2089 * Note that PSR is enabled only on Port A even though this function 2124 * Note that PSR is enabled only on Port A even though this function
2090 * returns the correct domain for other ports too. 2125 * returns the correct domain for other ports too.
2091 */ 2126 */
2092 return intel_dp->aux_ch == AUX_CH_A ? POWER_DOMAIN_AUX_IO_A : 2127 return dig_port->aux_ch == AUX_CH_A ? POWER_DOMAIN_AUX_IO_A :
2093 intel_dp->aux_power_domain; 2128 intel_aux_power_domain(dig_port);
2094} 2129}
2095 2130
2096static u64 intel_ddi_get_power_domains(struct intel_encoder *encoder, 2131static u64 intel_ddi_get_power_domains(struct intel_encoder *encoder,
2097 struct intel_crtc_state *crtc_state) 2132 struct intel_crtc_state *crtc_state)
2098{ 2133{
2134 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2099 struct intel_digital_port *dig_port; 2135 struct intel_digital_port *dig_port;
2100 u64 domains; 2136 u64 domains;
2101 2137
@@ -2110,12 +2146,13 @@ static u64 intel_ddi_get_power_domains(struct intel_encoder *encoder,
2110 dig_port = enc_to_dig_port(&encoder->base); 2146 dig_port = enc_to_dig_port(&encoder->base);
2111 domains = BIT_ULL(dig_port->ddi_io_power_domain); 2147 domains = BIT_ULL(dig_port->ddi_io_power_domain);
2112 2148
2113 /* AUX power is only needed for (e)DP mode, not for HDMI. */ 2149 /*
2114 if (intel_crtc_has_dp_encoder(crtc_state)) { 2150 * AUX power is only needed for (e)DP mode, and for HDMI mode on TC
2115 struct intel_dp *intel_dp = &dig_port->dp; 2151 * ports.
2116 2152 */
2117 domains |= BIT_ULL(intel_ddi_main_link_aux_domain(intel_dp)); 2153 if (intel_crtc_has_dp_encoder(crtc_state) ||
2118 } 2154 intel_port_is_tc(dev_priv, encoder->port))
2155 domains |= BIT_ULL(intel_ddi_main_link_aux_domain(dig_port));
2119 2156
2120 return domains; 2157 return domains;
2121} 2158}
@@ -2813,12 +2850,59 @@ void icl_unmap_plls_to_ports(struct drm_crtc *crtc,
2813 } 2850 }
2814} 2851}
2815 2852
2853void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder)
2854{
2855 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2856 u32 val;
2857 enum port port = encoder->port;
2858 bool clk_enabled;
2859
2860 /*
2861 * In case of DP MST, we sanitize the primary encoder only, not the
2862 * virtual ones.
2863 */
2864 if (encoder->type == INTEL_OUTPUT_DP_MST)
2865 return;
2866
2867 val = I915_READ(DPCLKA_CFGCR0_ICL);
2868 clk_enabled = !(val & icl_dpclka_cfgcr0_clk_off(dev_priv, port));
2869
2870 if (!encoder->base.crtc && intel_encoder_is_dp(encoder)) {
2871 u8 pipe_mask;
2872 bool is_mst;
2873
2874 intel_ddi_get_encoder_pipes(encoder, &pipe_mask, &is_mst);
2875 /*
2876 * In the unlikely case that BIOS enables DP in MST mode, just
2877 * warn since our MST HW readout is incomplete.
2878 */
2879 if (WARN_ON(is_mst))
2880 return;
2881 }
2882
2883 if (clk_enabled == !!encoder->base.crtc)
2884 return;
2885
2886 /*
2887 * Punt on the case now where clock is disabled, but the encoder is
2888 * enabled, something else is really broken then.
2889 */
2890 if (WARN_ON(!clk_enabled))
2891 return;
2892
2893 DRM_NOTE("Port %c is disabled but it has a mapped PLL, unmap it\n",
2894 port_name(port));
2895 val |= icl_dpclka_cfgcr0_clk_off(dev_priv, port);
2896 I915_WRITE(DPCLKA_CFGCR0_ICL, val);
2897}
2898
2816static void intel_ddi_clk_select(struct intel_encoder *encoder, 2899static void intel_ddi_clk_select(struct intel_encoder *encoder,
2817 const struct intel_shared_dpll *pll) 2900 const struct intel_crtc_state *crtc_state)
2818{ 2901{
2819 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 2902 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2820 enum port port = encoder->port; 2903 enum port port = encoder->port;
2821 uint32_t val; 2904 uint32_t val;
2905 const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
2822 2906
2823 if (WARN_ON(!pll)) 2907 if (WARN_ON(!pll))
2824 return; 2908 return;
@@ -2828,7 +2912,7 @@ static void intel_ddi_clk_select(struct intel_encoder *encoder,
2828 if (IS_ICELAKE(dev_priv)) { 2912 if (IS_ICELAKE(dev_priv)) {
2829 if (!intel_port_is_combophy(dev_priv, port)) 2913 if (!intel_port_is_combophy(dev_priv, port))
2830 I915_WRITE(DDI_CLK_SEL(port), 2914 I915_WRITE(DDI_CLK_SEL(port),
2831 icl_pll_to_ddi_pll_sel(encoder, pll)); 2915 icl_pll_to_ddi_pll_sel(encoder, crtc_state));
2832 } else if (IS_CANNONLAKE(dev_priv)) { 2916 } else if (IS_CANNONLAKE(dev_priv)) {
2833 /* Configure DPCLKA_CFGCR0 to map the DPLL to the DDI. */ 2917 /* Configure DPCLKA_CFGCR0 to map the DPLL to the DDI. */
2834 val = I915_READ(DPCLKA_CFGCR0); 2918 val = I915_READ(DPCLKA_CFGCR0);
@@ -2881,6 +2965,137 @@ static void intel_ddi_clk_disable(struct intel_encoder *encoder)
2881 } 2965 }
2882} 2966}
2883 2967
2968static void icl_enable_phy_clock_gating(struct intel_digital_port *dig_port)
2969{
2970 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
2971 enum port port = dig_port->base.port;
2972 enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
2973 i915_reg_t mg_regs[2] = { MG_DP_MODE(port, 0), MG_DP_MODE(port, 1) };
2974 u32 val;
2975 int i;
2976
2977 if (tc_port == PORT_TC_NONE)
2978 return;
2979
2980 for (i = 0; i < ARRAY_SIZE(mg_regs); i++) {
2981 val = I915_READ(mg_regs[i]);
2982 val |= MG_DP_MODE_CFG_TR2PWR_GATING |
2983 MG_DP_MODE_CFG_TRPWR_GATING |
2984 MG_DP_MODE_CFG_CLNPWR_GATING |
2985 MG_DP_MODE_CFG_DIGPWR_GATING |
2986 MG_DP_MODE_CFG_GAONPWR_GATING;
2987 I915_WRITE(mg_regs[i], val);
2988 }
2989
2990 val = I915_READ(MG_MISC_SUS0(tc_port));
2991 val |= MG_MISC_SUS0_SUSCLK_DYNCLKGATE_MODE(3) |
2992 MG_MISC_SUS0_CFG_TR2PWR_GATING |
2993 MG_MISC_SUS0_CFG_CL2PWR_GATING |
2994 MG_MISC_SUS0_CFG_GAONPWR_GATING |
2995 MG_MISC_SUS0_CFG_TRPWR_GATING |
2996 MG_MISC_SUS0_CFG_CL1PWR_GATING |
2997 MG_MISC_SUS0_CFG_DGPWR_GATING;
2998 I915_WRITE(MG_MISC_SUS0(tc_port), val);
2999}
3000
3001static void icl_disable_phy_clock_gating(struct intel_digital_port *dig_port)
3002{
3003 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
3004 enum port port = dig_port->base.port;
3005 enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
3006 i915_reg_t mg_regs[2] = { MG_DP_MODE(port, 0), MG_DP_MODE(port, 1) };
3007 u32 val;
3008 int i;
3009
3010 if (tc_port == PORT_TC_NONE)
3011 return;
3012
3013 for (i = 0; i < ARRAY_SIZE(mg_regs); i++) {
3014 val = I915_READ(mg_regs[i]);
3015 val &= ~(MG_DP_MODE_CFG_TR2PWR_GATING |
3016 MG_DP_MODE_CFG_TRPWR_GATING |
3017 MG_DP_MODE_CFG_CLNPWR_GATING |
3018 MG_DP_MODE_CFG_DIGPWR_GATING |
3019 MG_DP_MODE_CFG_GAONPWR_GATING);
3020 I915_WRITE(mg_regs[i], val);
3021 }
3022
3023 val = I915_READ(MG_MISC_SUS0(tc_port));
3024 val &= ~(MG_MISC_SUS0_SUSCLK_DYNCLKGATE_MODE_MASK |
3025 MG_MISC_SUS0_CFG_TR2PWR_GATING |
3026 MG_MISC_SUS0_CFG_CL2PWR_GATING |
3027 MG_MISC_SUS0_CFG_GAONPWR_GATING |
3028 MG_MISC_SUS0_CFG_TRPWR_GATING |
3029 MG_MISC_SUS0_CFG_CL1PWR_GATING |
3030 MG_MISC_SUS0_CFG_DGPWR_GATING);
3031 I915_WRITE(MG_MISC_SUS0(tc_port), val);
3032}
3033
3034static void icl_program_mg_dp_mode(struct intel_digital_port *intel_dig_port)
3035{
3036 struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
3037 enum port port = intel_dig_port->base.port;
3038 enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
3039 u32 ln0, ln1, lane_info;
3040
3041 if (tc_port == PORT_TC_NONE || intel_dig_port->tc_type == TC_PORT_TBT)
3042 return;
3043
3044 ln0 = I915_READ(MG_DP_MODE(port, 0));
3045 ln1 = I915_READ(MG_DP_MODE(port, 1));
3046
3047 switch (intel_dig_port->tc_type) {
3048 case TC_PORT_TYPEC:
3049 ln0 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE);
3050 ln1 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE);
3051
3052 lane_info = (I915_READ(PORT_TX_DFLEXDPSP) &
3053 DP_LANE_ASSIGNMENT_MASK(tc_port)) >>
3054 DP_LANE_ASSIGNMENT_SHIFT(tc_port);
3055
3056 switch (lane_info) {
3057 case 0x1:
3058 case 0x4:
3059 break;
3060 case 0x2:
3061 ln0 |= MG_DP_MODE_CFG_DP_X1_MODE;
3062 break;
3063 case 0x3:
3064 ln0 |= MG_DP_MODE_CFG_DP_X1_MODE |
3065 MG_DP_MODE_CFG_DP_X2_MODE;
3066 break;
3067 case 0x8:
3068 ln1 |= MG_DP_MODE_CFG_DP_X1_MODE;
3069 break;
3070 case 0xC:
3071 ln1 |= MG_DP_MODE_CFG_DP_X1_MODE |
3072 MG_DP_MODE_CFG_DP_X2_MODE;
3073 break;
3074 case 0xF:
3075 ln0 |= MG_DP_MODE_CFG_DP_X1_MODE |
3076 MG_DP_MODE_CFG_DP_X2_MODE;
3077 ln1 |= MG_DP_MODE_CFG_DP_X1_MODE |
3078 MG_DP_MODE_CFG_DP_X2_MODE;
3079 break;
3080 default:
3081 MISSING_CASE(lane_info);
3082 }
3083 break;
3084
3085 case TC_PORT_LEGACY:
3086 ln0 |= MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE;
3087 ln1 |= MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE;
3088 break;
3089
3090 default:
3091 MISSING_CASE(intel_dig_port->tc_type);
3092 return;
3093 }
3094
3095 I915_WRITE(MG_DP_MODE(port, 0), ln0);
3096 I915_WRITE(MG_DP_MODE(port, 1), ln1);
3097}
3098
2884static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder, 3099static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
2885 const struct intel_crtc_state *crtc_state, 3100 const struct intel_crtc_state *crtc_state,
2886 const struct drm_connector_state *conn_state) 3101 const struct drm_connector_state *conn_state)
@@ -2894,19 +3109,16 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
2894 3109
2895 WARN_ON(is_mst && (port == PORT_A || port == PORT_E)); 3110 WARN_ON(is_mst && (port == PORT_A || port == PORT_E));
2896 3111
2897 intel_display_power_get(dev_priv,
2898 intel_ddi_main_link_aux_domain(intel_dp));
2899
2900 intel_dp_set_link_params(intel_dp, crtc_state->port_clock, 3112 intel_dp_set_link_params(intel_dp, crtc_state->port_clock,
2901 crtc_state->lane_count, is_mst); 3113 crtc_state->lane_count, is_mst);
2902 3114
2903 intel_edp_panel_on(intel_dp); 3115 intel_edp_panel_on(intel_dp);
2904 3116
2905 intel_ddi_clk_select(encoder, crtc_state->shared_dpll); 3117 intel_ddi_clk_select(encoder, crtc_state);
2906 3118
2907 intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain); 3119 intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain);
2908 3120
2909 icl_program_mg_dp_mode(intel_dp); 3121 icl_program_mg_dp_mode(dig_port);
2910 icl_disable_phy_clock_gating(dig_port); 3122 icl_disable_phy_clock_gating(dig_port);
2911 3123
2912 if (IS_ICELAKE(dev_priv)) 3124 if (IS_ICELAKE(dev_priv))
@@ -2944,10 +3156,13 @@ static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
2944 struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base); 3156 struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
2945 3157
2946 intel_dp_dual_mode_set_tmds_output(intel_hdmi, true); 3158 intel_dp_dual_mode_set_tmds_output(intel_hdmi, true);
2947 intel_ddi_clk_select(encoder, crtc_state->shared_dpll); 3159 intel_ddi_clk_select(encoder, crtc_state);
2948 3160
2949 intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain); 3161 intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain);
2950 3162
3163 icl_program_mg_dp_mode(dig_port);
3164 icl_disable_phy_clock_gating(dig_port);
3165
2951 if (IS_ICELAKE(dev_priv)) 3166 if (IS_ICELAKE(dev_priv))
2952 icl_ddi_vswing_sequence(encoder, crtc_state->port_clock, 3167 icl_ddi_vswing_sequence(encoder, crtc_state->port_clock,
2953 level, INTEL_OUTPUT_HDMI); 3168 level, INTEL_OUTPUT_HDMI);
@@ -2958,12 +3173,14 @@ static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
2958 else 3173 else
2959 intel_prepare_hdmi_ddi_buffers(encoder, level); 3174 intel_prepare_hdmi_ddi_buffers(encoder, level);
2960 3175
3176 icl_enable_phy_clock_gating(dig_port);
3177
2961 if (IS_GEN9_BC(dev_priv)) 3178 if (IS_GEN9_BC(dev_priv))
2962 skl_ddi_set_iboost(encoder, level, INTEL_OUTPUT_HDMI); 3179 skl_ddi_set_iboost(encoder, level, INTEL_OUTPUT_HDMI);
2963 3180
2964 intel_ddi_enable_pipe_clock(crtc_state); 3181 intel_ddi_enable_pipe_clock(crtc_state);
2965 3182
2966 intel_dig_port->set_infoframes(&encoder->base, 3183 intel_dig_port->set_infoframes(encoder,
2967 crtc_state->has_infoframe, 3184 crtc_state->has_infoframe,
2968 crtc_state, conn_state); 3185 crtc_state, conn_state);
2969} 3186}
@@ -2993,10 +3210,22 @@ static void intel_ddi_pre_enable(struct intel_encoder *encoder,
2993 3210
2994 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 3211 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
2995 3212
2996 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) 3213 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
2997 intel_ddi_pre_enable_hdmi(encoder, crtc_state, conn_state); 3214 intel_ddi_pre_enable_hdmi(encoder, crtc_state, conn_state);
2998 else 3215 } else {
3216 struct intel_lspcon *lspcon =
3217 enc_to_intel_lspcon(&encoder->base);
3218
2999 intel_ddi_pre_enable_dp(encoder, crtc_state, conn_state); 3219 intel_ddi_pre_enable_dp(encoder, crtc_state, conn_state);
3220 if (lspcon->active) {
3221 struct intel_digital_port *dig_port =
3222 enc_to_dig_port(&encoder->base);
3223
3224 dig_port->set_infoframes(encoder,
3225 crtc_state->has_infoframe,
3226 crtc_state, conn_state);
3227 }
3228 }
3000} 3229}
3001 3230
3002static void intel_disable_ddi_buf(struct intel_encoder *encoder) 3231static void intel_disable_ddi_buf(struct intel_encoder *encoder)
@@ -3049,9 +3278,6 @@ static void intel_ddi_post_disable_dp(struct intel_encoder *encoder,
3049 intel_display_power_put(dev_priv, dig_port->ddi_io_power_domain); 3278 intel_display_power_put(dev_priv, dig_port->ddi_io_power_domain);
3050 3279
3051 intel_ddi_clk_disable(encoder); 3280 intel_ddi_clk_disable(encoder);
3052
3053 intel_display_power_put(dev_priv,
3054 intel_ddi_main_link_aux_domain(intel_dp));
3055} 3281}
3056 3282
3057static void intel_ddi_post_disable_hdmi(struct intel_encoder *encoder, 3283static void intel_ddi_post_disable_hdmi(struct intel_encoder *encoder,
@@ -3062,7 +3288,7 @@ static void intel_ddi_post_disable_hdmi(struct intel_encoder *encoder,
3062 struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base); 3288 struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
3063 struct intel_hdmi *intel_hdmi = &dig_port->hdmi; 3289 struct intel_hdmi *intel_hdmi = &dig_port->hdmi;
3064 3290
3065 dig_port->set_infoframes(&encoder->base, false, 3291 dig_port->set_infoframes(encoder, false,
3066 old_crtc_state, old_conn_state); 3292 old_crtc_state, old_conn_state);
3067 3293
3068 intel_ddi_disable_pipe_clock(old_crtc_state); 3294 intel_ddi_disable_pipe_clock(old_crtc_state);
@@ -3154,6 +3380,26 @@ static void intel_enable_ddi_dp(struct intel_encoder *encoder,
3154 intel_audio_codec_enable(encoder, crtc_state, conn_state); 3380 intel_audio_codec_enable(encoder, crtc_state, conn_state);
3155} 3381}
3156 3382
3383static i915_reg_t
3384gen9_chicken_trans_reg_by_port(struct drm_i915_private *dev_priv,
3385 enum port port)
3386{
3387 static const i915_reg_t regs[] = {
3388 [PORT_A] = CHICKEN_TRANS_EDP,
3389 [PORT_B] = CHICKEN_TRANS_A,
3390 [PORT_C] = CHICKEN_TRANS_B,
3391 [PORT_D] = CHICKEN_TRANS_C,
3392 [PORT_E] = CHICKEN_TRANS_A,
3393 };
3394
3395 WARN_ON(INTEL_GEN(dev_priv) < 9);
3396
3397 if (WARN_ON(port < PORT_A || port > PORT_E))
3398 port = PORT_A;
3399
3400 return regs[port];
3401}
3402
3157static void intel_enable_ddi_hdmi(struct intel_encoder *encoder, 3403static void intel_enable_ddi_hdmi(struct intel_encoder *encoder,
3158 const struct intel_crtc_state *crtc_state, 3404 const struct intel_crtc_state *crtc_state,
3159 const struct drm_connector_state *conn_state) 3405 const struct drm_connector_state *conn_state)
@@ -3177,17 +3423,10 @@ static void intel_enable_ddi_hdmi(struct intel_encoder *encoder,
3177 * the bits affect a specific DDI port rather than 3423 * the bits affect a specific DDI port rather than
3178 * a specific transcoder. 3424 * a specific transcoder.
3179 */ 3425 */
3180 static const enum transcoder port_to_transcoder[] = { 3426 i915_reg_t reg = gen9_chicken_trans_reg_by_port(dev_priv, port);
3181 [PORT_A] = TRANSCODER_EDP,
3182 [PORT_B] = TRANSCODER_A,
3183 [PORT_C] = TRANSCODER_B,
3184 [PORT_D] = TRANSCODER_C,
3185 [PORT_E] = TRANSCODER_A,
3186 };
3187 enum transcoder transcoder = port_to_transcoder[port];
3188 u32 val; 3427 u32 val;
3189 3428
3190 val = I915_READ(CHICKEN_TRANS(transcoder)); 3429 val = I915_READ(reg);
3191 3430
3192 if (port == PORT_E) 3431 if (port == PORT_E)
3193 val |= DDIE_TRAINING_OVERRIDE_ENABLE | 3432 val |= DDIE_TRAINING_OVERRIDE_ENABLE |
@@ -3196,8 +3435,8 @@ static void intel_enable_ddi_hdmi(struct intel_encoder *encoder,
3196 val |= DDI_TRAINING_OVERRIDE_ENABLE | 3435 val |= DDI_TRAINING_OVERRIDE_ENABLE |
3197 DDI_TRAINING_OVERRIDE_VALUE; 3436 DDI_TRAINING_OVERRIDE_VALUE;
3198 3437
3199 I915_WRITE(CHICKEN_TRANS(transcoder), val); 3438 I915_WRITE(reg, val);
3200 POSTING_READ(CHICKEN_TRANS(transcoder)); 3439 POSTING_READ(reg);
3201 3440
3202 udelay(1); 3441 udelay(1);
3203 3442
@@ -3208,7 +3447,7 @@ static void intel_enable_ddi_hdmi(struct intel_encoder *encoder,
3208 val &= ~(DDI_TRAINING_OVERRIDE_ENABLE | 3447 val &= ~(DDI_TRAINING_OVERRIDE_ENABLE |
3209 DDI_TRAINING_OVERRIDE_VALUE); 3448 DDI_TRAINING_OVERRIDE_VALUE);
3210 3449
3211 I915_WRITE(CHICKEN_TRANS(transcoder), val); 3450 I915_WRITE(reg, val);
3212 } 3451 }
3213 3452
3214 /* In HDMI/DVI mode, the port width, and swing/emphasis values 3453 /* In HDMI/DVI mode, the port width, and swing/emphasis values
@@ -3282,13 +3521,76 @@ static void intel_disable_ddi(struct intel_encoder *encoder,
3282 intel_disable_ddi_dp(encoder, old_crtc_state, old_conn_state); 3521 intel_disable_ddi_dp(encoder, old_crtc_state, old_conn_state);
3283} 3522}
3284 3523
3285static void bxt_ddi_pre_pll_enable(struct intel_encoder *encoder, 3524static void intel_ddi_set_fia_lane_count(struct intel_encoder *encoder,
3286 const struct intel_crtc_state *pipe_config, 3525 const struct intel_crtc_state *pipe_config,
3287 const struct drm_connector_state *conn_state) 3526 enum port port)
3527{
3528 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3529 struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
3530 enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
3531 u32 val = I915_READ(PORT_TX_DFLEXDPMLE1);
3532 bool lane_reversal = dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL;
3533
3534 val &= ~DFLEXDPMLE1_DPMLETC_MASK(tc_port);
3535 switch (pipe_config->lane_count) {
3536 case 1:
3537 val |= (lane_reversal) ? DFLEXDPMLE1_DPMLETC_ML3(tc_port) :
3538 DFLEXDPMLE1_DPMLETC_ML0(tc_port);
3539 break;
3540 case 2:
3541 val |= (lane_reversal) ? DFLEXDPMLE1_DPMLETC_ML3_2(tc_port) :
3542 DFLEXDPMLE1_DPMLETC_ML1_0(tc_port);
3543 break;
3544 case 4:
3545 val |= DFLEXDPMLE1_DPMLETC_ML3_0(tc_port);
3546 break;
3547 default:
3548 MISSING_CASE(pipe_config->lane_count);
3549 }
3550 I915_WRITE(PORT_TX_DFLEXDPMLE1, val);
3551}
3552
3553static void
3554intel_ddi_pre_pll_enable(struct intel_encoder *encoder,
3555 const struct intel_crtc_state *crtc_state,
3556 const struct drm_connector_state *conn_state)
3557{
3558 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3559 struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
3560 enum port port = encoder->port;
3561
3562 if (intel_crtc_has_dp_encoder(crtc_state) ||
3563 intel_port_is_tc(dev_priv, encoder->port))
3564 intel_display_power_get(dev_priv,
3565 intel_ddi_main_link_aux_domain(dig_port));
3566
3567 if (IS_GEN9_LP(dev_priv))
3568 bxt_ddi_phy_set_lane_optim_mask(encoder,
3569 crtc_state->lane_lat_optim_mask);
3570
3571 /*
3572 * Program the lane count for static/dynamic connections on Type-C ports.
3573 * Skip this step for TBT.
3574 */
3575 if (dig_port->tc_type == TC_PORT_UNKNOWN ||
3576 dig_port->tc_type == TC_PORT_TBT)
3577 return;
3578
3579 intel_ddi_set_fia_lane_count(encoder, crtc_state, port);
3580}
3581
3582static void
3583intel_ddi_post_pll_disable(struct intel_encoder *encoder,
3584 const struct intel_crtc_state *crtc_state,
3585 const struct drm_connector_state *conn_state)
3288{ 3586{
3289 uint8_t mask = pipe_config->lane_lat_optim_mask; 3587 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3588 struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
3290 3589
3291 bxt_ddi_phy_set_lane_optim_mask(encoder, mask); 3590 if (intel_crtc_has_dp_encoder(crtc_state) ||
3591 intel_port_is_tc(dev_priv, encoder->port))
3592 intel_display_power_put(dev_priv,
3593 intel_ddi_main_link_aux_domain(dig_port));
3292} 3594}
3293 3595
3294void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp) 3596void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp)
@@ -3353,10 +3655,10 @@ static bool intel_ddi_is_audio_enabled(struct drm_i915_private *dev_priv,
3353void intel_ddi_compute_min_voltage_level(struct drm_i915_private *dev_priv, 3655void intel_ddi_compute_min_voltage_level(struct drm_i915_private *dev_priv,
3354 struct intel_crtc_state *crtc_state) 3656 struct intel_crtc_state *crtc_state)
3355{ 3657{
3356 if (IS_CANNONLAKE(dev_priv) && crtc_state->port_clock > 594000) 3658 if (IS_ICELAKE(dev_priv) && crtc_state->port_clock > 594000)
3357 crtc_state->min_voltage_level = 2;
3358 else if (IS_ICELAKE(dev_priv) && crtc_state->port_clock > 594000)
3359 crtc_state->min_voltage_level = 1; 3659 crtc_state->min_voltage_level = 1;
3660 else if (IS_CANNONLAKE(dev_priv) && crtc_state->port_clock > 594000)
3661 crtc_state->min_voltage_level = 2;
3360} 3662}
3361 3663
3362void intel_ddi_get_config(struct intel_encoder *encoder, 3664void intel_ddi_get_config(struct intel_encoder *encoder,
@@ -3406,7 +3708,7 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
3406 pipe_config->has_hdmi_sink = true; 3708 pipe_config->has_hdmi_sink = true;
3407 intel_dig_port = enc_to_dig_port(&encoder->base); 3709 intel_dig_port = enc_to_dig_port(&encoder->base);
3408 3710
3409 if (intel_dig_port->infoframe_enabled(&encoder->base, pipe_config)) 3711 if (intel_dig_port->infoframe_enabled(encoder, pipe_config))
3410 pipe_config->has_infoframe = true; 3712 pipe_config->has_infoframe = true;
3411 3713
3412 if ((temp & TRANS_DDI_HDMI_SCRAMBLING_MASK) == 3714 if ((temp & TRANS_DDI_HDMI_SCRAMBLING_MASK) ==
@@ -3767,6 +4069,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
3767 struct intel_encoder *intel_encoder; 4069 struct intel_encoder *intel_encoder;
3768 struct drm_encoder *encoder; 4070 struct drm_encoder *encoder;
3769 bool init_hdmi, init_dp, init_lspcon = false; 4071 bool init_hdmi, init_dp, init_lspcon = false;
4072 enum pipe pipe;
3770 4073
3771 4074
3772 init_hdmi = (dev_priv->vbt.ddi_port_info[port].supports_dvi || 4075 init_hdmi = (dev_priv->vbt.ddi_port_info[port].supports_dvi ||
@@ -3805,8 +4108,8 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
3805 intel_encoder->compute_output_type = intel_ddi_compute_output_type; 4108 intel_encoder->compute_output_type = intel_ddi_compute_output_type;
3806 intel_encoder->compute_config = intel_ddi_compute_config; 4109 intel_encoder->compute_config = intel_ddi_compute_config;
3807 intel_encoder->enable = intel_enable_ddi; 4110 intel_encoder->enable = intel_enable_ddi;
3808 if (IS_GEN9_LP(dev_priv)) 4111 intel_encoder->pre_pll_enable = intel_ddi_pre_pll_enable;
3809 intel_encoder->pre_pll_enable = bxt_ddi_pre_pll_enable; 4112 intel_encoder->post_pll_disable = intel_ddi_post_pll_disable;
3810 intel_encoder->pre_enable = intel_ddi_pre_enable; 4113 intel_encoder->pre_enable = intel_ddi_pre_enable;
3811 intel_encoder->disable = intel_disable_ddi; 4114 intel_encoder->disable = intel_disable_ddi;
3812 intel_encoder->post_disable = intel_ddi_post_disable; 4115 intel_encoder->post_disable = intel_ddi_post_disable;
@@ -3817,8 +4120,9 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
3817 intel_encoder->type = INTEL_OUTPUT_DDI; 4120 intel_encoder->type = INTEL_OUTPUT_DDI;
3818 intel_encoder->power_domain = intel_port_to_power_domain(port); 4121 intel_encoder->power_domain = intel_port_to_power_domain(port);
3819 intel_encoder->port = port; 4122 intel_encoder->port = port;
3820 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
3821 intel_encoder->cloneable = 0; 4123 intel_encoder->cloneable = 0;
4124 for_each_pipe(dev_priv, pipe)
4125 intel_encoder->crtc_mask |= BIT(pipe);
3822 4126
3823 if (INTEL_GEN(dev_priv) >= 11) 4127 if (INTEL_GEN(dev_priv) >= 11)
3824 intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) & 4128 intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) &
@@ -3828,6 +4132,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
3828 (DDI_BUF_PORT_REVERSAL | DDI_A_4_LANES); 4132 (DDI_BUF_PORT_REVERSAL | DDI_A_4_LANES);
3829 intel_dig_port->dp.output_reg = INVALID_MMIO_REG; 4133 intel_dig_port->dp.output_reg = INVALID_MMIO_REG;
3830 intel_dig_port->max_lanes = intel_ddi_max_lanes(intel_dig_port); 4134 intel_dig_port->max_lanes = intel_ddi_max_lanes(intel_dig_port);
4135 intel_dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port);
3831 4136
3832 switch (port) { 4137 switch (port) {
3833 case PORT_A: 4138 case PORT_A:
@@ -3858,8 +4163,6 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
3858 MISSING_CASE(port); 4163 MISSING_CASE(port);
3859 } 4164 }
3860 4165
3861 intel_infoframe_init(intel_dig_port);
3862
3863 if (init_dp) { 4166 if (init_dp) {
3864 if (!intel_ddi_init_dp_connector(intel_dig_port)) 4167 if (!intel_ddi_init_dp_connector(intel_dig_port))
3865 goto err; 4168 goto err;
@@ -3888,6 +4191,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
3888 port_name(port)); 4191 port_name(port));
3889 } 4192 }
3890 4193
4194 intel_infoframe_init(intel_dig_port);
3891 return; 4195 return;
3892 4196
3893err: 4197err:
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
index 0ef0c6448d53..ceecb5bd5226 100644
--- a/drivers/gpu/drm/i915/intel_device_info.c
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -474,7 +474,7 @@ static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv)
474 u8 eu_disabled_mask; 474 u8 eu_disabled_mask;
475 u32 n_disabled; 475 u32 n_disabled;
476 476
477 if (!(sseu->subslice_mask[ss] & BIT(ss))) 477 if (!(sseu->subslice_mask[s] & BIT(ss)))
478 /* skip disabled subslice */ 478 /* skip disabled subslice */
479 continue; 479 continue;
480 480
@@ -744,27 +744,30 @@ void intel_device_info_runtime_init(struct intel_device_info *info)
744 if (INTEL_GEN(dev_priv) >= 10) { 744 if (INTEL_GEN(dev_priv) >= 10) {
745 for_each_pipe(dev_priv, pipe) 745 for_each_pipe(dev_priv, pipe)
746 info->num_scalers[pipe] = 2; 746 info->num_scalers[pipe] = 2;
747 } else if (INTEL_GEN(dev_priv) == 9) { 747 } else if (IS_GEN9(dev_priv)) {
748 info->num_scalers[PIPE_A] = 2; 748 info->num_scalers[PIPE_A] = 2;
749 info->num_scalers[PIPE_B] = 2; 749 info->num_scalers[PIPE_B] = 2;
750 info->num_scalers[PIPE_C] = 1; 750 info->num_scalers[PIPE_C] = 1;
751 } 751 }
752 752
753 BUILD_BUG_ON(I915_NUM_ENGINES > 753 BUILD_BUG_ON(I915_NUM_ENGINES > BITS_PER_TYPE(intel_ring_mask_t));
754 sizeof(intel_ring_mask_t) * BITS_PER_BYTE);
755 754
756 /* 755 if (IS_GEN11(dev_priv))
757 * Skylake and Broxton currently don't expose the topmost plane as its 756 for_each_pipe(dev_priv, pipe)
758 * use is exclusive with the legacy cursor and we only want to expose 757 info->num_sprites[pipe] = 6;
759 * one of those, not both. Until we can safely expose the topmost plane 758 else if (IS_GEN10(dev_priv) || IS_GEMINILAKE(dev_priv))
760 * as a DRM_PLANE_TYPE_CURSOR with all the features exposed/supported,
761 * we don't expose the topmost plane at all to prevent ABI breakage
762 * down the line.
763 */
764 if (IS_GEN10(dev_priv) || IS_GEMINILAKE(dev_priv))
765 for_each_pipe(dev_priv, pipe) 759 for_each_pipe(dev_priv, pipe)
766 info->num_sprites[pipe] = 3; 760 info->num_sprites[pipe] = 3;
767 else if (IS_BROXTON(dev_priv)) { 761 else if (IS_BROXTON(dev_priv)) {
762 /*
763 * Skylake and Broxton currently don't expose the topmost plane as its
764 * use is exclusive with the legacy cursor and we only want to expose
765 * one of those, not both. Until we can safely expose the topmost plane
766 * as a DRM_PLANE_TYPE_CURSOR with all the features exposed/supported,
767 * we don't expose the topmost plane at all to prevent ABI breakage
768 * down the line.
769 */
770
768 info->num_sprites[PIPE_A] = 2; 771 info->num_sprites[PIPE_A] = 2;
769 info->num_sprites[PIPE_B] = 2; 772 info->num_sprites[PIPE_B] = 2;
770 info->num_sprites[PIPE_C] = 1; 773 info->num_sprites[PIPE_C] = 1;
@@ -844,13 +847,18 @@ void intel_device_info_runtime_init(struct intel_device_info *info)
844 cherryview_sseu_info_init(dev_priv); 847 cherryview_sseu_info_init(dev_priv);
845 else if (IS_BROADWELL(dev_priv)) 848 else if (IS_BROADWELL(dev_priv))
846 broadwell_sseu_info_init(dev_priv); 849 broadwell_sseu_info_init(dev_priv);
847 else if (INTEL_GEN(dev_priv) == 9) 850 else if (IS_GEN9(dev_priv))
848 gen9_sseu_info_init(dev_priv); 851 gen9_sseu_info_init(dev_priv);
849 else if (INTEL_GEN(dev_priv) == 10) 852 else if (IS_GEN10(dev_priv))
850 gen10_sseu_info_init(dev_priv); 853 gen10_sseu_info_init(dev_priv);
851 else if (INTEL_GEN(dev_priv) >= 11) 854 else if (INTEL_GEN(dev_priv) >= 11)
852 gen11_sseu_info_init(dev_priv); 855 gen11_sseu_info_init(dev_priv);
853 856
857 if (IS_GEN6(dev_priv) && intel_vtd_active()) {
858 DRM_INFO("Disabling ppGTT for VT-d support\n");
859 info->ppgtt = INTEL_PPGTT_NONE;
860 }
861
854 /* Initialize command stream timestamp frequency */ 862 /* Initialize command stream timestamp frequency */
855 info->cs_timestamp_frequency_khz = read_timestamp_frequency(dev_priv); 863 info->cs_timestamp_frequency_khz = read_timestamp_frequency(dev_priv);
856} 864}
@@ -872,40 +880,37 @@ void intel_driver_caps_print(const struct intel_driver_caps *caps,
872void intel_device_info_init_mmio(struct drm_i915_private *dev_priv) 880void intel_device_info_init_mmio(struct drm_i915_private *dev_priv)
873{ 881{
874 struct intel_device_info *info = mkwrite_device_info(dev_priv); 882 struct intel_device_info *info = mkwrite_device_info(dev_priv);
875 u8 vdbox_disable, vebox_disable;
876 u32 media_fuse; 883 u32 media_fuse;
877 int i; 884 unsigned int i;
878 885
879 if (INTEL_GEN(dev_priv) < 11) 886 if (INTEL_GEN(dev_priv) < 11)
880 return; 887 return;
881 888
882 media_fuse = I915_READ(GEN11_GT_VEBOX_VDBOX_DISABLE); 889 media_fuse = ~I915_READ(GEN11_GT_VEBOX_VDBOX_DISABLE);
883 890
884 vdbox_disable = media_fuse & GEN11_GT_VDBOX_DISABLE_MASK; 891 info->vdbox_enable = media_fuse & GEN11_GT_VDBOX_DISABLE_MASK;
885 vebox_disable = (media_fuse & GEN11_GT_VEBOX_DISABLE_MASK) >> 892 info->vebox_enable = (media_fuse & GEN11_GT_VEBOX_DISABLE_MASK) >>
886 GEN11_GT_VEBOX_DISABLE_SHIFT; 893 GEN11_GT_VEBOX_DISABLE_SHIFT;
887 894
888 DRM_DEBUG_DRIVER("vdbox disable: %04x\n", vdbox_disable); 895 DRM_DEBUG_DRIVER("vdbox enable: %04x\n", info->vdbox_enable);
889 for (i = 0; i < I915_MAX_VCS; i++) { 896 for (i = 0; i < I915_MAX_VCS; i++) {
890 if (!HAS_ENGINE(dev_priv, _VCS(i))) 897 if (!HAS_ENGINE(dev_priv, _VCS(i)))
891 continue; 898 continue;
892 899
893 if (!(BIT(i) & vdbox_disable)) 900 if (!(BIT(i) & info->vdbox_enable)) {
894 continue; 901 info->ring_mask &= ~ENGINE_MASK(_VCS(i));
895 902 DRM_DEBUG_DRIVER("vcs%u fused off\n", i);
896 info->ring_mask &= ~ENGINE_MASK(_VCS(i)); 903 }
897 DRM_DEBUG_DRIVER("vcs%u fused off\n", i);
898 } 904 }
899 905
900 DRM_DEBUG_DRIVER("vebox disable: %04x\n", vebox_disable); 906 DRM_DEBUG_DRIVER("vebox enable: %04x\n", info->vebox_enable);
901 for (i = 0; i < I915_MAX_VECS; i++) { 907 for (i = 0; i < I915_MAX_VECS; i++) {
902 if (!HAS_ENGINE(dev_priv, _VECS(i))) 908 if (!HAS_ENGINE(dev_priv, _VECS(i)))
903 continue; 909 continue;
904 910
905 if (!(BIT(i) & vebox_disable)) 911 if (!(BIT(i) & info->vebox_enable)) {
906 continue; 912 info->ring_mask &= ~ENGINE_MASK(_VECS(i));
907 913 DRM_DEBUG_DRIVER("vecs%u fused off\n", i);
908 info->ring_mask &= ~ENGINE_MASK(_VECS(i)); 914 }
909 DRM_DEBUG_DRIVER("vecs%u fused off\n", i);
910 } 915 }
911} 916}
diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h
index 6eecd64734d5..88f97210dc49 100644
--- a/drivers/gpu/drm/i915/intel_device_info.h
+++ b/drivers/gpu/drm/i915/intel_device_info.h
@@ -25,6 +25,8 @@
25#ifndef _INTEL_DEVICE_INFO_H_ 25#ifndef _INTEL_DEVICE_INFO_H_
26#define _INTEL_DEVICE_INFO_H_ 26#define _INTEL_DEVICE_INFO_H_
27 27
28#include <uapi/drm/i915_drm.h>
29
28#include "intel_display.h" 30#include "intel_display.h"
29 31
30struct drm_printer; 32struct drm_printer;
@@ -74,21 +76,25 @@ enum intel_platform {
74 INTEL_MAX_PLATFORMS 76 INTEL_MAX_PLATFORMS
75}; 77};
76 78
79enum intel_ppgtt {
80 INTEL_PPGTT_NONE = I915_GEM_PPGTT_NONE,
81 INTEL_PPGTT_ALIASING = I915_GEM_PPGTT_ALIASING,
82 INTEL_PPGTT_FULL = I915_GEM_PPGTT_FULL,
83 INTEL_PPGTT_FULL_4LVL,
84};
85
77#define DEV_INFO_FOR_EACH_FLAG(func) \ 86#define DEV_INFO_FOR_EACH_FLAG(func) \
78 func(is_mobile); \ 87 func(is_mobile); \
79 func(is_lp); \ 88 func(is_lp); \
80 func(is_alpha_support); \ 89 func(is_alpha_support); \
81 /* Keep has_* in alphabetical order */ \ 90 /* Keep has_* in alphabetical order */ \
82 func(has_64bit_reloc); \ 91 func(has_64bit_reloc); \
83 func(has_aliasing_ppgtt); \
84 func(has_csr); \ 92 func(has_csr); \
85 func(has_ddi); \ 93 func(has_ddi); \
86 func(has_dp_mst); \ 94 func(has_dp_mst); \
87 func(has_reset_engine); \ 95 func(has_reset_engine); \
88 func(has_fbc); \ 96 func(has_fbc); \
89 func(has_fpga_dbg); \ 97 func(has_fpga_dbg); \
90 func(has_full_ppgtt); \
91 func(has_full_48bit_ppgtt); \
92 func(has_gmch_display); \ 98 func(has_gmch_display); \
93 func(has_guc); \ 99 func(has_guc); \
94 func(has_guc_ct); \ 100 func(has_guc_ct); \
@@ -118,7 +124,7 @@ enum intel_platform {
118 124
119struct sseu_dev_info { 125struct sseu_dev_info {
120 u8 slice_mask; 126 u8 slice_mask;
121 u8 subslice_mask[GEN_MAX_SUBSLICES]; 127 u8 subslice_mask[GEN_MAX_SLICES];
122 u16 eu_total; 128 u16 eu_total;
123 u8 eu_per_subslice; 129 u8 eu_per_subslice;
124 u8 min_eu_in_pool; 130 u8 min_eu_in_pool;
@@ -154,6 +160,7 @@ struct intel_device_info {
154 enum intel_platform platform; 160 enum intel_platform platform;
155 u32 platform_mask; 161 u32 platform_mask;
156 162
163 enum intel_ppgtt ppgtt;
157 unsigned int page_sizes; /* page sizes supported by the HW */ 164 unsigned int page_sizes; /* page sizes supported by the HW */
158 165
159 u32 display_mmio_offset; 166 u32 display_mmio_offset;
@@ -170,7 +177,6 @@ struct intel_device_info {
170 /* Register offsets for the various display pipes and transcoders */ 177 /* Register offsets for the various display pipes and transcoders */
171 int pipe_offsets[I915_MAX_TRANSCODERS]; 178 int pipe_offsets[I915_MAX_TRANSCODERS];
172 int trans_offsets[I915_MAX_TRANSCODERS]; 179 int trans_offsets[I915_MAX_TRANSCODERS];
173 int palette_offsets[I915_MAX_PIPES];
174 int cursor_offsets[I915_MAX_PIPES]; 180 int cursor_offsets[I915_MAX_PIPES];
175 181
176 /* Slice/subslice/EU info */ 182 /* Slice/subslice/EU info */
@@ -178,6 +184,10 @@ struct intel_device_info {
178 184
179 u32 cs_timestamp_frequency_khz; 185 u32 cs_timestamp_frequency_khz;
180 186
187 /* Enabled (not fused off) media engine bitmasks. */
188 u8 vdbox_enable;
189 u8 vebox_enable;
190
181 struct color_luts { 191 struct color_luts {
182 u16 degamma_lut_size; 192 u16 degamma_lut_size;
183 u16 gamma_lut_size; 193 u16 gamma_lut_size;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 9741cc419e1b..812ec5ae5c7b 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -24,7 +24,6 @@
24 * Eric Anholt <eric@anholt.net> 24 * Eric Anholt <eric@anholt.net>
25 */ 25 */
26 26
27#include <linux/dmi.h>
28#include <linux/module.h> 27#include <linux/module.h>
29#include <linux/input.h> 28#include <linux/input.h>
30#include <linux/i2c.h> 29#include <linux/i2c.h>
@@ -74,55 +73,6 @@ static const uint64_t i9xx_format_modifiers[] = {
74 DRM_FORMAT_MOD_INVALID 73 DRM_FORMAT_MOD_INVALID
75}; 74};
76 75
77static const uint32_t skl_primary_formats[] = {
78 DRM_FORMAT_C8,
79 DRM_FORMAT_RGB565,
80 DRM_FORMAT_XRGB8888,
81 DRM_FORMAT_XBGR8888,
82 DRM_FORMAT_ARGB8888,
83 DRM_FORMAT_ABGR8888,
84 DRM_FORMAT_XRGB2101010,
85 DRM_FORMAT_XBGR2101010,
86 DRM_FORMAT_YUYV,
87 DRM_FORMAT_YVYU,
88 DRM_FORMAT_UYVY,
89 DRM_FORMAT_VYUY,
90};
91
92static const uint32_t skl_pri_planar_formats[] = {
93 DRM_FORMAT_C8,
94 DRM_FORMAT_RGB565,
95 DRM_FORMAT_XRGB8888,
96 DRM_FORMAT_XBGR8888,
97 DRM_FORMAT_ARGB8888,
98 DRM_FORMAT_ABGR8888,
99 DRM_FORMAT_XRGB2101010,
100 DRM_FORMAT_XBGR2101010,
101 DRM_FORMAT_YUYV,
102 DRM_FORMAT_YVYU,
103 DRM_FORMAT_UYVY,
104 DRM_FORMAT_VYUY,
105 DRM_FORMAT_NV12,
106};
107
108static const uint64_t skl_format_modifiers_noccs[] = {
109 I915_FORMAT_MOD_Yf_TILED,
110 I915_FORMAT_MOD_Y_TILED,
111 I915_FORMAT_MOD_X_TILED,
112 DRM_FORMAT_MOD_LINEAR,
113 DRM_FORMAT_MOD_INVALID
114};
115
116static const uint64_t skl_format_modifiers_ccs[] = {
117 I915_FORMAT_MOD_Yf_TILED_CCS,
118 I915_FORMAT_MOD_Y_TILED_CCS,
119 I915_FORMAT_MOD_Yf_TILED,
120 I915_FORMAT_MOD_Y_TILED,
121 I915_FORMAT_MOD_X_TILED,
122 DRM_FORMAT_MOD_LINEAR,
123 DRM_FORMAT_MOD_INVALID
124};
125
126/* Cursor formats */ 76/* Cursor formats */
127static const uint32_t intel_cursor_formats[] = { 77static const uint32_t intel_cursor_formats[] = {
128 DRM_FORMAT_ARGB8888, 78 DRM_FORMAT_ARGB8888,
@@ -141,15 +91,15 @@ static void ironlake_pch_clock_get(struct intel_crtc *crtc,
141static int intel_framebuffer_init(struct intel_framebuffer *ifb, 91static int intel_framebuffer_init(struct intel_framebuffer *ifb,
142 struct drm_i915_gem_object *obj, 92 struct drm_i915_gem_object *obj,
143 struct drm_mode_fb_cmd2 *mode_cmd); 93 struct drm_mode_fb_cmd2 *mode_cmd);
144static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc); 94static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state);
145static void intel_set_pipe_timings(struct intel_crtc *intel_crtc); 95static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state);
146static void intel_set_pipe_src_size(struct intel_crtc *intel_crtc); 96static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
147static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc, 97 const struct intel_link_m_n *m_n,
148 struct intel_link_m_n *m_n, 98 const struct intel_link_m_n *m2_n2);
149 struct intel_link_m_n *m2_n2); 99static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state);
150static void ironlake_set_pipeconf(struct drm_crtc *crtc); 100static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state);
151static void haswell_set_pipeconf(struct drm_crtc *crtc); 101static void haswell_set_pipeconf(const struct intel_crtc_state *crtc_state);
152static void haswell_set_pipemisc(struct drm_crtc *crtc); 102static void haswell_set_pipemisc(const struct intel_crtc_state *crtc_state);
153static void vlv_prepare_pll(struct intel_crtc *crtc, 103static void vlv_prepare_pll(struct intel_crtc *crtc,
154 const struct intel_crtc_state *pipe_config); 104 const struct intel_crtc_state *pipe_config);
155static void chv_prepare_pll(struct intel_crtc *crtc, 105static void chv_prepare_pll(struct intel_crtc *crtc,
@@ -158,9 +108,9 @@ static void intel_begin_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
158static void intel_finish_crtc_commit(struct drm_crtc *, struct drm_crtc_state *); 108static void intel_finish_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
159static void intel_crtc_init_scalers(struct intel_crtc *crtc, 109static void intel_crtc_init_scalers(struct intel_crtc *crtc,
160 struct intel_crtc_state *crtc_state); 110 struct intel_crtc_state *crtc_state);
161static void skylake_pfit_enable(struct intel_crtc *crtc); 111static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state);
162static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force); 112static void ironlake_pfit_disable(const struct intel_crtc_state *old_crtc_state);
163static void ironlake_pfit_enable(struct intel_crtc *crtc); 113static void ironlake_pfit_enable(const struct intel_crtc_state *crtc_state);
164static void intel_modeset_setup_hw_state(struct drm_device *dev, 114static void intel_modeset_setup_hw_state(struct drm_device *dev,
165 struct drm_modeset_acquire_ctx *ctx); 115 struct drm_modeset_acquire_ctx *ctx);
166static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc); 116static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc);
@@ -506,23 +456,8 @@ static const struct intel_limit intel_limits_bxt = {
506}; 456};
507 457
508static void 458static void
509skl_wa_528(struct drm_i915_private *dev_priv, int pipe, bool enable)
510{
511 if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))
512 return;
513
514 if (enable)
515 I915_WRITE(CHICKEN_PIPESL_1(pipe), HSW_FBCQ_DIS);
516 else
517 I915_WRITE(CHICKEN_PIPESL_1(pipe), 0);
518}
519
520static void
521skl_wa_clkgate(struct drm_i915_private *dev_priv, int pipe, bool enable) 459skl_wa_clkgate(struct drm_i915_private *dev_priv, int pipe, bool enable)
522{ 460{
523 if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))
524 return;
525
526 if (enable) 461 if (enable)
527 I915_WRITE(CLKGATE_DIS_PSL(pipe), 462 I915_WRITE(CLKGATE_DIS_PSL(pipe),
528 DUPS1_GATING_DIS | DUPS2_GATING_DIS); 463 DUPS1_GATING_DIS | DUPS2_GATING_DIS);
@@ -1381,6 +1316,7 @@ static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1381 "PCH LVDS enabled on transcoder %c, should be disabled\n", 1316 "PCH LVDS enabled on transcoder %c, should be disabled\n",
1382 pipe_name(pipe)); 1317 pipe_name(pipe));
1383 1318
1319 /* PCH SDVOB multiplex with HDMIB */
1384 assert_pch_hdmi_disabled(dev_priv, pipe, PORT_B, PCH_HDMIB); 1320 assert_pch_hdmi_disabled(dev_priv, pipe, PORT_B, PCH_HDMIB);
1385 assert_pch_hdmi_disabled(dev_priv, pipe, PORT_C, PCH_HDMIC); 1321 assert_pch_hdmi_disabled(dev_priv, pipe, PORT_C, PCH_HDMIC);
1386 assert_pch_hdmi_disabled(dev_priv, pipe, PORT_D, PCH_HDMID); 1322 assert_pch_hdmi_disabled(dev_priv, pipe, PORT_D, PCH_HDMID);
@@ -1565,14 +1501,15 @@ static void i9xx_enable_pll(struct intel_crtc *crtc,
1565 } 1501 }
1566} 1502}
1567 1503
1568static void i9xx_disable_pll(struct intel_crtc *crtc) 1504static void i9xx_disable_pll(const struct intel_crtc_state *crtc_state)
1569{ 1505{
1506 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
1570 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1507 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1571 enum pipe pipe = crtc->pipe; 1508 enum pipe pipe = crtc->pipe;
1572 1509
1573 /* Disable DVO 2x clock on both PLLs if necessary */ 1510 /* Disable DVO 2x clock on both PLLs if necessary */
1574 if (IS_I830(dev_priv) && 1511 if (IS_I830(dev_priv) &&
1575 intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DVO) && 1512 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO) &&
1576 !intel_num_dvo_pipes(dev_priv)) { 1513 !intel_num_dvo_pipes(dev_priv)) {
1577 I915_WRITE(DPLL(PIPE_B), 1514 I915_WRITE(DPLL(PIPE_B),
1578 I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE); 1515 I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE);
@@ -1666,16 +1603,16 @@ void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
1666 I915_READ(dpll_reg) & port_mask, expected_mask); 1603 I915_READ(dpll_reg) & port_mask, expected_mask);
1667} 1604}
1668 1605
1669static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv, 1606static void ironlake_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
1670 enum pipe pipe)
1671{ 1607{
1672 struct intel_crtc *intel_crtc = intel_get_crtc_for_pipe(dev_priv, 1608 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
1673 pipe); 1609 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1610 enum pipe pipe = crtc->pipe;
1674 i915_reg_t reg; 1611 i915_reg_t reg;
1675 uint32_t val, pipeconf_val; 1612 uint32_t val, pipeconf_val;
1676 1613
1677 /* Make sure PCH DPLL is enabled */ 1614 /* Make sure PCH DPLL is enabled */
1678 assert_shared_dpll_enabled(dev_priv, intel_crtc->config->shared_dpll); 1615 assert_shared_dpll_enabled(dev_priv, crtc_state->shared_dpll);
1679 1616
1680 /* FDI must be feeding us bits for PCH ports */ 1617 /* FDI must be feeding us bits for PCH ports */
1681 assert_fdi_tx_enabled(dev_priv, pipe); 1618 assert_fdi_tx_enabled(dev_priv, pipe);
@@ -1701,7 +1638,7 @@ static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1701 * here for both 8bpc and 12bpc. 1638 * here for both 8bpc and 12bpc.
1702 */ 1639 */
1703 val &= ~PIPECONF_BPC_MASK; 1640 val &= ~PIPECONF_BPC_MASK;
1704 if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_HDMI)) 1641 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1705 val |= PIPECONF_8BPC; 1642 val |= PIPECONF_8BPC;
1706 else 1643 else
1707 val |= pipeconf_val & PIPECONF_BPC_MASK; 1644 val |= pipeconf_val & PIPECONF_BPC_MASK;
@@ -1710,7 +1647,7 @@ static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1710 val &= ~TRANS_INTERLACE_MASK; 1647 val &= ~TRANS_INTERLACE_MASK;
1711 if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) 1648 if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
1712 if (HAS_PCH_IBX(dev_priv) && 1649 if (HAS_PCH_IBX(dev_priv) &&
1713 intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO)) 1650 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
1714 val |= TRANS_LEGACY_INTERLACED_ILK; 1651 val |= TRANS_LEGACY_INTERLACED_ILK;
1715 else 1652 else
1716 val |= TRANS_INTERLACED; 1653 val |= TRANS_INTERLACED;
@@ -2254,6 +2191,11 @@ static u32 intel_adjust_tile_offset(int *x, int *y,
2254 return new_offset; 2191 return new_offset;
2255} 2192}
2256 2193
2194static bool is_surface_linear(u64 modifier, int color_plane)
2195{
2196 return modifier == DRM_FORMAT_MOD_LINEAR;
2197}
2198
2257static u32 intel_adjust_aligned_offset(int *x, int *y, 2199static u32 intel_adjust_aligned_offset(int *x, int *y,
2258 const struct drm_framebuffer *fb, 2200 const struct drm_framebuffer *fb,
2259 int color_plane, 2201 int color_plane,
@@ -2266,7 +2208,7 @@ static u32 intel_adjust_aligned_offset(int *x, int *y,
2266 2208
2267 WARN_ON(new_offset > old_offset); 2209 WARN_ON(new_offset > old_offset);
2268 2210
2269 if (fb->modifier != DRM_FORMAT_MOD_LINEAR) { 2211 if (!is_surface_linear(fb->modifier, color_plane)) {
2270 unsigned int tile_size, tile_width, tile_height; 2212 unsigned int tile_size, tile_width, tile_height;
2271 unsigned int pitch_tiles; 2213 unsigned int pitch_tiles;
2272 2214
@@ -2330,14 +2272,13 @@ static u32 intel_compute_aligned_offset(struct drm_i915_private *dev_priv,
2330 unsigned int rotation, 2272 unsigned int rotation,
2331 u32 alignment) 2273 u32 alignment)
2332{ 2274{
2333 uint64_t fb_modifier = fb->modifier;
2334 unsigned int cpp = fb->format->cpp[color_plane]; 2275 unsigned int cpp = fb->format->cpp[color_plane];
2335 u32 offset, offset_aligned; 2276 u32 offset, offset_aligned;
2336 2277
2337 if (alignment) 2278 if (alignment)
2338 alignment--; 2279 alignment--;
2339 2280
2340 if (fb_modifier != DRM_FORMAT_MOD_LINEAR) { 2281 if (!is_surface_linear(fb->modifier, color_plane)) {
2341 unsigned int tile_size, tile_width, tile_height; 2282 unsigned int tile_size, tile_width, tile_height;
2342 unsigned int tile_rows, tiles, pitch_tiles; 2283 unsigned int tile_rows, tiles, pitch_tiles;
2343 2284
@@ -2574,7 +2515,7 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv,
2574 tile_size); 2515 tile_size);
2575 offset /= tile_size; 2516 offset /= tile_size;
2576 2517
2577 if (fb->modifier != DRM_FORMAT_MOD_LINEAR) { 2518 if (!is_surface_linear(fb->modifier, i)) {
2578 unsigned int tile_width, tile_height; 2519 unsigned int tile_width, tile_height;
2579 unsigned int pitch_tiles; 2520 unsigned int pitch_tiles;
2580 struct drm_rect r; 2521 struct drm_rect r;
@@ -2788,10 +2729,6 @@ intel_set_plane_visible(struct intel_crtc_state *crtc_state,
2788 crtc_state->base.plane_mask |= drm_plane_mask(&plane->base); 2729 crtc_state->base.plane_mask |= drm_plane_mask(&plane->base);
2789 else 2730 else
2790 crtc_state->base.plane_mask &= ~drm_plane_mask(&plane->base); 2731 crtc_state->base.plane_mask &= ~drm_plane_mask(&plane->base);
2791
2792 DRM_DEBUG_KMS("%s active planes 0x%x\n",
2793 crtc_state->base.crtc->name,
2794 crtc_state->active_planes);
2795} 2732}
2796 2733
2797static void fixup_active_planes(struct intel_crtc_state *crtc_state) 2734static void fixup_active_planes(struct intel_crtc_state *crtc_state)
@@ -2819,6 +2756,10 @@ static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
2819 struct intel_plane_state *plane_state = 2756 struct intel_plane_state *plane_state =
2820 to_intel_plane_state(plane->base.state); 2757 to_intel_plane_state(plane->base.state);
2821 2758
2759 DRM_DEBUG_KMS("Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n",
2760 plane->base.base.id, plane->base.name,
2761 crtc->base.base.id, crtc->base.name);
2762
2822 intel_set_plane_visible(crtc_state, plane_state, false); 2763 intel_set_plane_visible(crtc_state, plane_state, false);
2823 fixup_active_planes(crtc_state); 2764 fixup_active_planes(crtc_state);
2824 2765
@@ -2890,6 +2831,7 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
2890 return; 2831 return;
2891 2832
2892valid_fb: 2833valid_fb:
2834 intel_state->base.rotation = plane_config->rotation;
2893 intel_fill_fb_ggtt_view(&intel_state->view, fb, 2835 intel_fill_fb_ggtt_view(&intel_state->view, fb,
2894 intel_state->base.rotation); 2836 intel_state->base.rotation);
2895 intel_state->color_plane[0].stride = 2837 intel_state->color_plane[0].stride =
@@ -3098,28 +3040,6 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state)
3098 return 0; 3040 return 0;
3099} 3041}
3100 3042
3101static int
3102skl_check_nv12_surface(struct intel_plane_state *plane_state)
3103{
3104 /* Display WA #1106 */
3105 if (plane_state->base.rotation !=
3106 (DRM_MODE_REFLECT_X | DRM_MODE_ROTATE_90) &&
3107 plane_state->base.rotation != DRM_MODE_ROTATE_270)
3108 return 0;
3109
3110 /*
3111 * src coordinates are rotated here.
3112 * We check height but report it as width
3113 */
3114 if (((drm_rect_height(&plane_state->base.src) >> 16) % 4) != 0) {
3115 DRM_DEBUG_KMS("src width must be multiple "
3116 "of 4 for rotated NV12\n");
3117 return -EINVAL;
3118 }
3119
3120 return 0;
3121}
3122
3123static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state) 3043static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state)
3124{ 3044{
3125 const struct drm_framebuffer *fb = plane_state->base.fb; 3045 const struct drm_framebuffer *fb = plane_state->base.fb;
@@ -3198,9 +3118,6 @@ int skl_check_plane_surface(struct intel_plane_state *plane_state)
3198 * the main surface setup depends on it. 3118 * the main surface setup depends on it.
3199 */ 3119 */
3200 if (fb->format->format == DRM_FORMAT_NV12) { 3120 if (fb->format->format == DRM_FORMAT_NV12) {
3201 ret = skl_check_nv12_surface(plane_state);
3202 if (ret)
3203 return ret;
3204 ret = skl_check_nv12_aux_surface(plane_state); 3121 ret = skl_check_nv12_aux_surface(plane_state);
3205 if (ret) 3122 if (ret)
3206 return ret; 3123 return ret;
@@ -3448,7 +3365,6 @@ static void i9xx_update_plane(struct intel_plane *plane,
3448 intel_plane_ggtt_offset(plane_state) + 3365 intel_plane_ggtt_offset(plane_state) +
3449 dspaddr_offset); 3366 dspaddr_offset);
3450 } 3367 }
3451 POSTING_READ_FW(reg);
3452 3368
3453 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 3369 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
3454} 3370}
@@ -3467,7 +3383,6 @@ static void i9xx_disable_plane(struct intel_plane *plane,
3467 I915_WRITE_FW(DSPSURF(i9xx_plane), 0); 3383 I915_WRITE_FW(DSPSURF(i9xx_plane), 0);
3468 else 3384 else
3469 I915_WRITE_FW(DSPADDR(i9xx_plane), 0); 3385 I915_WRITE_FW(DSPADDR(i9xx_plane), 0);
3470 POSTING_READ_FW(DSPCNTR(i9xx_plane));
3471 3386
3472 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 3387 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
3473} 3388}
@@ -3527,13 +3442,13 @@ static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
3527/* 3442/*
3528 * This function detaches (aka. unbinds) unused scalers in hardware 3443 * This function detaches (aka. unbinds) unused scalers in hardware
3529 */ 3444 */
3530static void skl_detach_scalers(struct intel_crtc *intel_crtc) 3445static void skl_detach_scalers(const struct intel_crtc_state *crtc_state)
3531{ 3446{
3532 struct intel_crtc_scaler_state *scaler_state; 3447 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
3448 const struct intel_crtc_scaler_state *scaler_state =
3449 &crtc_state->scaler_state;
3533 int i; 3450 int i;
3534 3451
3535 scaler_state = &intel_crtc->config->scaler_state;
3536
3537 /* loop through and disable scalers that aren't in use */ 3452 /* loop through and disable scalers that aren't in use */
3538 for (i = 0; i < intel_crtc->num_scalers; i++) { 3453 for (i = 0; i < intel_crtc->num_scalers; i++) {
3539 if (!scaler_state->scalers[i].in_use) 3454 if (!scaler_state->scalers[i].in_use)
@@ -3597,29 +3512,38 @@ static u32 skl_plane_ctl_format(uint32_t pixel_format)
3597 return 0; 3512 return 0;
3598} 3513}
3599 3514
3600/* 3515static u32 skl_plane_ctl_alpha(const struct intel_plane_state *plane_state)
3601 * XXX: For ARBG/ABGR formats we default to expecting scanout buffers
3602 * to be already pre-multiplied. We need to add a knob (or a different
3603 * DRM_FORMAT) for user-space to configure that.
3604 */
3605static u32 skl_plane_ctl_alpha(uint32_t pixel_format)
3606{ 3516{
3607 switch (pixel_format) { 3517 if (!plane_state->base.fb->format->has_alpha)
3608 case DRM_FORMAT_ABGR8888: 3518 return PLANE_CTL_ALPHA_DISABLE;
3609 case DRM_FORMAT_ARGB8888: 3519
3520 switch (plane_state->base.pixel_blend_mode) {
3521 case DRM_MODE_BLEND_PIXEL_NONE:
3522 return PLANE_CTL_ALPHA_DISABLE;
3523 case DRM_MODE_BLEND_PREMULTI:
3610 return PLANE_CTL_ALPHA_SW_PREMULTIPLY; 3524 return PLANE_CTL_ALPHA_SW_PREMULTIPLY;
3525 case DRM_MODE_BLEND_COVERAGE:
3526 return PLANE_CTL_ALPHA_HW_PREMULTIPLY;
3611 default: 3527 default:
3528 MISSING_CASE(plane_state->base.pixel_blend_mode);
3612 return PLANE_CTL_ALPHA_DISABLE; 3529 return PLANE_CTL_ALPHA_DISABLE;
3613 } 3530 }
3614} 3531}
3615 3532
3616static u32 glk_plane_color_ctl_alpha(uint32_t pixel_format) 3533static u32 glk_plane_color_ctl_alpha(const struct intel_plane_state *plane_state)
3617{ 3534{
3618 switch (pixel_format) { 3535 if (!plane_state->base.fb->format->has_alpha)
3619 case DRM_FORMAT_ABGR8888: 3536 return PLANE_COLOR_ALPHA_DISABLE;
3620 case DRM_FORMAT_ARGB8888: 3537
3538 switch (plane_state->base.pixel_blend_mode) {
3539 case DRM_MODE_BLEND_PIXEL_NONE:
3540 return PLANE_COLOR_ALPHA_DISABLE;
3541 case DRM_MODE_BLEND_PREMULTI:
3621 return PLANE_COLOR_ALPHA_SW_PREMULTIPLY; 3542 return PLANE_COLOR_ALPHA_SW_PREMULTIPLY;
3543 case DRM_MODE_BLEND_COVERAGE:
3544 return PLANE_COLOR_ALPHA_HW_PREMULTIPLY;
3622 default: 3545 default:
3546 MISSING_CASE(plane_state->base.pixel_blend_mode);
3623 return PLANE_COLOR_ALPHA_DISABLE; 3547 return PLANE_COLOR_ALPHA_DISABLE;
3624 } 3548 }
3625} 3549}
@@ -3696,7 +3620,7 @@ u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
3696 plane_ctl = PLANE_CTL_ENABLE; 3620 plane_ctl = PLANE_CTL_ENABLE;
3697 3621
3698 if (INTEL_GEN(dev_priv) < 10 && !IS_GEMINILAKE(dev_priv)) { 3622 if (INTEL_GEN(dev_priv) < 10 && !IS_GEMINILAKE(dev_priv)) {
3699 plane_ctl |= skl_plane_ctl_alpha(fb->format->format); 3623 plane_ctl |= skl_plane_ctl_alpha(plane_state);
3700 plane_ctl |= 3624 plane_ctl |=
3701 PLANE_CTL_PIPE_GAMMA_ENABLE | 3625 PLANE_CTL_PIPE_GAMMA_ENABLE |
3702 PLANE_CTL_PIPE_CSC_ENABLE | 3626 PLANE_CTL_PIPE_CSC_ENABLE |
@@ -3731,6 +3655,7 @@ u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
3731 struct drm_i915_private *dev_priv = 3655 struct drm_i915_private *dev_priv =
3732 to_i915(plane_state->base.plane->dev); 3656 to_i915(plane_state->base.plane->dev);
3733 const struct drm_framebuffer *fb = plane_state->base.fb; 3657 const struct drm_framebuffer *fb = plane_state->base.fb;
3658 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
3734 u32 plane_color_ctl = 0; 3659 u32 plane_color_ctl = 0;
3735 3660
3736 if (INTEL_GEN(dev_priv) < 11) { 3661 if (INTEL_GEN(dev_priv) < 11) {
@@ -3738,9 +3663,9 @@ u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
3738 plane_color_ctl |= PLANE_COLOR_PIPE_CSC_ENABLE; 3663 plane_color_ctl |= PLANE_COLOR_PIPE_CSC_ENABLE;
3739 } 3664 }
3740 plane_color_ctl |= PLANE_COLOR_PLANE_GAMMA_DISABLE; 3665 plane_color_ctl |= PLANE_COLOR_PLANE_GAMMA_DISABLE;
3741 plane_color_ctl |= glk_plane_color_ctl_alpha(fb->format->format); 3666 plane_color_ctl |= glk_plane_color_ctl_alpha(plane_state);
3742 3667
3743 if (fb->format->is_yuv) { 3668 if (fb->format->is_yuv && !icl_is_hdr_plane(plane)) {
3744 if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709) 3669 if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709)
3745 plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV709_TO_RGB709; 3670 plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV709_TO_RGB709;
3746 else 3671 else
@@ -3748,6 +3673,8 @@ u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
3748 3673
3749 if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE) 3674 if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
3750 plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE; 3675 plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE;
3676 } else if (fb->format->is_yuv) {
3677 plane_color_ctl |= PLANE_COLOR_INPUT_CSC_ENABLE;
3751 } 3678 }
3752 3679
3753 return plane_color_ctl; 3680 return plane_color_ctl;
@@ -3932,15 +3859,15 @@ static void intel_update_pipe_config(const struct intel_crtc_state *old_crtc_sta
3932 3859
3933 /* on skylake this is done by detaching scalers */ 3860 /* on skylake this is done by detaching scalers */
3934 if (INTEL_GEN(dev_priv) >= 9) { 3861 if (INTEL_GEN(dev_priv) >= 9) {
3935 skl_detach_scalers(crtc); 3862 skl_detach_scalers(new_crtc_state);
3936 3863
3937 if (new_crtc_state->pch_pfit.enabled) 3864 if (new_crtc_state->pch_pfit.enabled)
3938 skylake_pfit_enable(crtc); 3865 skylake_pfit_enable(new_crtc_state);
3939 } else if (HAS_PCH_SPLIT(dev_priv)) { 3866 } else if (HAS_PCH_SPLIT(dev_priv)) {
3940 if (new_crtc_state->pch_pfit.enabled) 3867 if (new_crtc_state->pch_pfit.enabled)
3941 ironlake_pfit_enable(crtc); 3868 ironlake_pfit_enable(new_crtc_state);
3942 else if (old_crtc_state->pch_pfit.enabled) 3869 else if (old_crtc_state->pch_pfit.enabled)
3943 ironlake_pfit_disable(crtc, true); 3870 ironlake_pfit_disable(old_crtc_state);
3944 } 3871 }
3945} 3872}
3946 3873
@@ -4339,10 +4266,10 @@ train_done:
4339 DRM_DEBUG_KMS("FDI train done.\n"); 4266 DRM_DEBUG_KMS("FDI train done.\n");
4340} 4267}
4341 4268
4342static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc) 4269static void ironlake_fdi_pll_enable(const struct intel_crtc_state *crtc_state)
4343{ 4270{
4344 struct drm_device *dev = intel_crtc->base.dev; 4271 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
4345 struct drm_i915_private *dev_priv = to_i915(dev); 4272 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
4346 int pipe = intel_crtc->pipe; 4273 int pipe = intel_crtc->pipe;
4347 i915_reg_t reg; 4274 i915_reg_t reg;
4348 u32 temp; 4275 u32 temp;
@@ -4351,7 +4278,7 @@ static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc)
4351 reg = FDI_RX_CTL(pipe); 4278 reg = FDI_RX_CTL(pipe);
4352 temp = I915_READ(reg); 4279 temp = I915_READ(reg);
4353 temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16)); 4280 temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
4354 temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes); 4281 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
4355 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11; 4282 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
4356 I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE); 4283 I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
4357 4284
@@ -4500,10 +4427,11 @@ void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
4500} 4427}
4501 4428
4502/* Program iCLKIP clock to the desired frequency */ 4429/* Program iCLKIP clock to the desired frequency */
4503static void lpt_program_iclkip(struct intel_crtc *crtc) 4430static void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
4504{ 4431{
4432 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
4505 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 4433 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4506 int clock = crtc->config->base.adjusted_mode.crtc_clock; 4434 int clock = crtc_state->base.adjusted_mode.crtc_clock;
4507 u32 divsel, phaseinc, auxdiv, phasedir = 0; 4435 u32 divsel, phaseinc, auxdiv, phasedir = 0;
4508 u32 temp; 4436 u32 temp;
4509 4437
@@ -4614,12 +4542,12 @@ int lpt_get_iclkip(struct drm_i915_private *dev_priv)
4614 desired_divisor << auxdiv); 4542 desired_divisor << auxdiv);
4615} 4543}
4616 4544
4617static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc, 4545static void ironlake_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_state,
4618 enum pipe pch_transcoder) 4546 enum pipe pch_transcoder)
4619{ 4547{
4620 struct drm_device *dev = crtc->base.dev; 4548 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
4621 struct drm_i915_private *dev_priv = to_i915(dev); 4549 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4622 enum transcoder cpu_transcoder = crtc->config->cpu_transcoder; 4550 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
4623 4551
4624 I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder), 4552 I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder),
4625 I915_READ(HTOTAL(cpu_transcoder))); 4553 I915_READ(HTOTAL(cpu_transcoder)));
@@ -4638,9 +4566,8 @@ static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc,
4638 I915_READ(VSYNCSHIFT(cpu_transcoder))); 4566 I915_READ(VSYNCSHIFT(cpu_transcoder)));
4639} 4567}
4640 4568
4641static void cpt_set_fdi_bc_bifurcation(struct drm_device *dev, bool enable) 4569static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool enable)
4642{ 4570{
4643 struct drm_i915_private *dev_priv = to_i915(dev);
4644 uint32_t temp; 4571 uint32_t temp;
4645 4572
4646 temp = I915_READ(SOUTH_CHICKEN1); 4573 temp = I915_READ(SOUTH_CHICKEN1);
@@ -4659,22 +4586,23 @@ static void cpt_set_fdi_bc_bifurcation(struct drm_device *dev, bool enable)
4659 POSTING_READ(SOUTH_CHICKEN1); 4586 POSTING_READ(SOUTH_CHICKEN1);
4660} 4587}
4661 4588
4662static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc) 4589static void ivybridge_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state)
4663{ 4590{
4664 struct drm_device *dev = intel_crtc->base.dev; 4591 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
4592 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4665 4593
4666 switch (intel_crtc->pipe) { 4594 switch (crtc->pipe) {
4667 case PIPE_A: 4595 case PIPE_A:
4668 break; 4596 break;
4669 case PIPE_B: 4597 case PIPE_B:
4670 if (intel_crtc->config->fdi_lanes > 2) 4598 if (crtc_state->fdi_lanes > 2)
4671 cpt_set_fdi_bc_bifurcation(dev, false); 4599 cpt_set_fdi_bc_bifurcation(dev_priv, false);
4672 else 4600 else
4673 cpt_set_fdi_bc_bifurcation(dev, true); 4601 cpt_set_fdi_bc_bifurcation(dev_priv, true);
4674 4602
4675 break; 4603 break;
4676 case PIPE_C: 4604 case PIPE_C:
4677 cpt_set_fdi_bc_bifurcation(dev, true); 4605 cpt_set_fdi_bc_bifurcation(dev_priv, true);
4678 4606
4679 break; 4607 break;
4680 default: 4608 default:
@@ -4731,7 +4659,7 @@ static void ironlake_pch_enable(const struct intel_atomic_state *state,
4731 assert_pch_transcoder_disabled(dev_priv, pipe); 4659 assert_pch_transcoder_disabled(dev_priv, pipe);
4732 4660
4733 if (IS_IVYBRIDGE(dev_priv)) 4661 if (IS_IVYBRIDGE(dev_priv))
4734 ivybridge_update_fdi_bc_bifurcation(crtc); 4662 ivybridge_update_fdi_bc_bifurcation(crtc_state);
4735 4663
4736 /* Write the TU size bits before fdi link training, so that error 4664 /* Write the TU size bits before fdi link training, so that error
4737 * detection works. */ 4665 * detection works. */
@@ -4764,11 +4692,11 @@ static void ironlake_pch_enable(const struct intel_atomic_state *state,
4764 * Note that enable_shared_dpll tries to do the right thing, but 4692 * Note that enable_shared_dpll tries to do the right thing, but
4765 * get_shared_dpll unconditionally resets the pll - we need that to have 4693 * get_shared_dpll unconditionally resets the pll - we need that to have
4766 * the right LVDS enable sequence. */ 4694 * the right LVDS enable sequence. */
4767 intel_enable_shared_dpll(crtc); 4695 intel_enable_shared_dpll(crtc_state);
4768 4696
4769 /* set transcoder timing, panel must allow it */ 4697 /* set transcoder timing, panel must allow it */
4770 assert_panel_unlocked(dev_priv, pipe); 4698 assert_panel_unlocked(dev_priv, pipe);
4771 ironlake_pch_transcoder_set_timings(crtc, pipe); 4699 ironlake_pch_transcoder_set_timings(crtc_state, pipe);
4772 4700
4773 intel_fdi_normal_train(crtc); 4701 intel_fdi_normal_train(crtc);
4774 4702
@@ -4800,7 +4728,7 @@ static void ironlake_pch_enable(const struct intel_atomic_state *state,
4800 I915_WRITE(reg, temp); 4728 I915_WRITE(reg, temp);
4801 } 4729 }
4802 4730
4803 ironlake_enable_pch_transcoder(dev_priv, pipe); 4731 ironlake_enable_pch_transcoder(crtc_state);
4804} 4732}
4805 4733
4806static void lpt_pch_enable(const struct intel_atomic_state *state, 4734static void lpt_pch_enable(const struct intel_atomic_state *state,
@@ -4812,10 +4740,10 @@ static void lpt_pch_enable(const struct intel_atomic_state *state,
4812 4740
4813 assert_pch_transcoder_disabled(dev_priv, PIPE_A); 4741 assert_pch_transcoder_disabled(dev_priv, PIPE_A);
4814 4742
4815 lpt_program_iclkip(crtc); 4743 lpt_program_iclkip(crtc_state);
4816 4744
4817 /* Set transcoder timing. */ 4745 /* Set transcoder timing. */
4818 ironlake_pch_transcoder_set_timings(crtc, PIPE_A); 4746 ironlake_pch_transcoder_set_timings(crtc_state, PIPE_A);
4819 4747
4820 lpt_enable_pch_transcoder(dev_priv, cpu_transcoder); 4748 lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
4821} 4749}
@@ -4850,8 +4778,31 @@ static void cpt_verify_modeset(struct drm_device *dev, int pipe)
4850 * chroma samples for both of the luma samples, and thus we don't 4778 * chroma samples for both of the luma samples, and thus we don't
4851 * actually get the expected MPEG2 chroma siting convention :( 4779 * actually get the expected MPEG2 chroma siting convention :(
4852 * The same behaviour is observed on pre-SKL platforms as well. 4780 * The same behaviour is observed on pre-SKL platforms as well.
4781 *
4782 * Theory behind the formula (note that we ignore sub-pixel
4783 * source coordinates):
4784 * s = source sample position
4785 * d = destination sample position
4786 *
4787 * Downscaling 4:1:
4788 * -0.5
4789 * | 0.0
4790 * | | 1.5 (initial phase)
4791 * | | |
4792 * v v v
4793 * | s | s | s | s |
4794 * | d |
4795 *
4796 * Upscaling 1:4:
4797 * -0.5
4798 * | -0.375 (initial phase)
4799 * | | 0.0
4800 * | | |
4801 * v v v
4802 * | s |
4803 * | d | d | d | d |
4853 */ 4804 */
4854u16 skl_scaler_calc_phase(int sub, bool chroma_cosited) 4805u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_cosited)
4855{ 4806{
4856 int phase = -0x8000; 4807 int phase = -0x8000;
4857 u16 trip = 0; 4808 u16 trip = 0;
@@ -4859,6 +4810,15 @@ u16 skl_scaler_calc_phase(int sub, bool chroma_cosited)
4859 if (chroma_cosited) 4810 if (chroma_cosited)
4860 phase += (sub - 1) * 0x8000 / sub; 4811 phase += (sub - 1) * 0x8000 / sub;
4861 4812
4813 phase += scale / (2 * sub);
4814
4815 /*
4816 * Hardware initial phase limited to [-0.5:1.5].
4817 * Since the max hardware scale factor is 3.0, we
4818 * should never actually excdeed 1.0 here.
4819 */
4820 WARN_ON(phase < -0x8000 || phase > 0x18000);
4821
4862 if (phase < 0) 4822 if (phase < 0)
4863 phase = 0x10000 + phase; 4823 phase = 0x10000 + phase;
4864 else 4824 else
@@ -4871,8 +4831,7 @@ static int
4871skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach, 4831skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
4872 unsigned int scaler_user, int *scaler_id, 4832 unsigned int scaler_user, int *scaler_id,
4873 int src_w, int src_h, int dst_w, int dst_h, 4833 int src_w, int src_h, int dst_w, int dst_h,
4874 bool plane_scaler_check, 4834 const struct drm_format_info *format, bool need_scaler)
4875 uint32_t pixel_format)
4876{ 4835{
4877 struct intel_crtc_scaler_state *scaler_state = 4836 struct intel_crtc_scaler_state *scaler_state =
4878 &crtc_state->scaler_state; 4837 &crtc_state->scaler_state;
@@ -4881,21 +4840,14 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
4881 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev); 4840 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
4882 const struct drm_display_mode *adjusted_mode = 4841 const struct drm_display_mode *adjusted_mode =
4883 &crtc_state->base.adjusted_mode; 4842 &crtc_state->base.adjusted_mode;
4884 int need_scaling;
4885 4843
4886 /* 4844 /*
4887 * Src coordinates are already rotated by 270 degrees for 4845 * Src coordinates are already rotated by 270 degrees for
4888 * the 90/270 degree plane rotation cases (to match the 4846 * the 90/270 degree plane rotation cases (to match the
4889 * GTT mapping), hence no need to account for rotation here. 4847 * GTT mapping), hence no need to account for rotation here.
4890 */ 4848 */
4891 need_scaling = src_w != dst_w || src_h != dst_h; 4849 if (src_w != dst_w || src_h != dst_h)
4892 4850 need_scaler = true;
4893 if (plane_scaler_check)
4894 if (pixel_format == DRM_FORMAT_NV12)
4895 need_scaling = true;
4896
4897 if (crtc_state->ycbcr420 && scaler_user == SKL_CRTC_INDEX)
4898 need_scaling = true;
4899 4851
4900 /* 4852 /*
4901 * Scaling/fitting not supported in IF-ID mode in GEN9+ 4853 * Scaling/fitting not supported in IF-ID mode in GEN9+
@@ -4904,7 +4856,7 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
4904 * for NV12. 4856 * for NV12.
4905 */ 4857 */
4906 if (INTEL_GEN(dev_priv) >= 9 && crtc_state->base.enable && 4858 if (INTEL_GEN(dev_priv) >= 9 && crtc_state->base.enable &&
4907 need_scaling && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { 4859 need_scaler && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
4908 DRM_DEBUG_KMS("Pipe/Plane scaling not supported with IF-ID mode\n"); 4860 DRM_DEBUG_KMS("Pipe/Plane scaling not supported with IF-ID mode\n");
4909 return -EINVAL; 4861 return -EINVAL;
4910 } 4862 }
@@ -4919,7 +4871,7 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
4919 * update to free the scaler is done in plane/panel-fit programming. 4871 * update to free the scaler is done in plane/panel-fit programming.
4920 * For this purpose crtc/plane_state->scaler_id isn't reset here. 4872 * For this purpose crtc/plane_state->scaler_id isn't reset here.
4921 */ 4873 */
4922 if (force_detach || !need_scaling) { 4874 if (force_detach || !need_scaler) {
4923 if (*scaler_id >= 0) { 4875 if (*scaler_id >= 0) {
4924 scaler_state->scaler_users &= ~(1 << scaler_user); 4876 scaler_state->scaler_users &= ~(1 << scaler_user);
4925 scaler_state->scalers[*scaler_id].in_use = 0; 4877 scaler_state->scalers[*scaler_id].in_use = 0;
@@ -4933,7 +4885,7 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
4933 return 0; 4885 return 0;
4934 } 4886 }
4935 4887
4936 if (plane_scaler_check && pixel_format == DRM_FORMAT_NV12 && 4888 if (format && format->format == DRM_FORMAT_NV12 &&
4937 (src_h < SKL_MIN_YUV_420_SRC_H || src_w < SKL_MIN_YUV_420_SRC_W)) { 4889 (src_h < SKL_MIN_YUV_420_SRC_H || src_w < SKL_MIN_YUV_420_SRC_W)) {
4938 DRM_DEBUG_KMS("NV12: src dimensions not met\n"); 4890 DRM_DEBUG_KMS("NV12: src dimensions not met\n");
4939 return -EINVAL; 4891 return -EINVAL;
@@ -4976,12 +4928,16 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
4976int skl_update_scaler_crtc(struct intel_crtc_state *state) 4928int skl_update_scaler_crtc(struct intel_crtc_state *state)
4977{ 4929{
4978 const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode; 4930 const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode;
4931 bool need_scaler = false;
4932
4933 if (state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
4934 need_scaler = true;
4979 4935
4980 return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX, 4936 return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX,
4981 &state->scaler_state.scaler_id, 4937 &state->scaler_state.scaler_id,
4982 state->pipe_src_w, state->pipe_src_h, 4938 state->pipe_src_w, state->pipe_src_h,
4983 adjusted_mode->crtc_hdisplay, 4939 adjusted_mode->crtc_hdisplay,
4984 adjusted_mode->crtc_vdisplay, false, 0); 4940 adjusted_mode->crtc_vdisplay, NULL, need_scaler);
4985} 4941}
4986 4942
4987/** 4943/**
@@ -4996,13 +4952,17 @@ int skl_update_scaler_crtc(struct intel_crtc_state *state)
4996static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state, 4952static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
4997 struct intel_plane_state *plane_state) 4953 struct intel_plane_state *plane_state)
4998{ 4954{
4999
5000 struct intel_plane *intel_plane = 4955 struct intel_plane *intel_plane =
5001 to_intel_plane(plane_state->base.plane); 4956 to_intel_plane(plane_state->base.plane);
5002 struct drm_framebuffer *fb = plane_state->base.fb; 4957 struct drm_framebuffer *fb = plane_state->base.fb;
5003 int ret; 4958 int ret;
5004
5005 bool force_detach = !fb || !plane_state->base.visible; 4959 bool force_detach = !fb || !plane_state->base.visible;
4960 bool need_scaler = false;
4961
4962 /* Pre-gen11 and SDR planes always need a scaler for planar formats. */
4963 if (!icl_is_hdr_plane(intel_plane) &&
4964 fb && fb->format->format == DRM_FORMAT_NV12)
4965 need_scaler = true;
5006 4966
5007 ret = skl_update_scaler(crtc_state, force_detach, 4967 ret = skl_update_scaler(crtc_state, force_detach,
5008 drm_plane_index(&intel_plane->base), 4968 drm_plane_index(&intel_plane->base),
@@ -5011,7 +4971,7 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
5011 drm_rect_height(&plane_state->base.src) >> 16, 4971 drm_rect_height(&plane_state->base.src) >> 16,
5012 drm_rect_width(&plane_state->base.dst), 4972 drm_rect_width(&plane_state->base.dst),
5013 drm_rect_height(&plane_state->base.dst), 4973 drm_rect_height(&plane_state->base.dst),
5014 fb ? true : false, fb ? fb->format->format : 0); 4974 fb ? fb->format : NULL, need_scaler);
5015 4975
5016 if (ret || plane_state->scaler_id < 0) 4976 if (ret || plane_state->scaler_id < 0)
5017 return ret; 4977 return ret;
@@ -5057,23 +5017,30 @@ static void skylake_scaler_disable(struct intel_crtc *crtc)
5057 skl_detach_scaler(crtc, i); 5017 skl_detach_scaler(crtc, i);
5058} 5018}
5059 5019
5060static void skylake_pfit_enable(struct intel_crtc *crtc) 5020static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state)
5061{ 5021{
5062 struct drm_device *dev = crtc->base.dev; 5022 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5063 struct drm_i915_private *dev_priv = to_i915(dev); 5023 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5064 int pipe = crtc->pipe; 5024 enum pipe pipe = crtc->pipe;
5065 struct intel_crtc_scaler_state *scaler_state = 5025 const struct intel_crtc_scaler_state *scaler_state =
5066 &crtc->config->scaler_state; 5026 &crtc_state->scaler_state;
5067 5027
5068 if (crtc->config->pch_pfit.enabled) { 5028 if (crtc_state->pch_pfit.enabled) {
5069 u16 uv_rgb_hphase, uv_rgb_vphase; 5029 u16 uv_rgb_hphase, uv_rgb_vphase;
5030 int pfit_w, pfit_h, hscale, vscale;
5070 int id; 5031 int id;
5071 5032
5072 if (WARN_ON(crtc->config->scaler_state.scaler_id < 0)) 5033 if (WARN_ON(crtc_state->scaler_state.scaler_id < 0))
5073 return; 5034 return;
5074 5035
5075 uv_rgb_hphase = skl_scaler_calc_phase(1, false); 5036 pfit_w = (crtc_state->pch_pfit.size >> 16) & 0xFFFF;
5076 uv_rgb_vphase = skl_scaler_calc_phase(1, false); 5037 pfit_h = crtc_state->pch_pfit.size & 0xFFFF;
5038
5039 hscale = (crtc_state->pipe_src_w << 16) / pfit_w;
5040 vscale = (crtc_state->pipe_src_h << 16) / pfit_h;
5041
5042 uv_rgb_hphase = skl_scaler_calc_phase(1, hscale, false);
5043 uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false);
5077 5044
5078 id = scaler_state->scaler_id; 5045 id = scaler_state->scaler_id;
5079 I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN | 5046 I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN |
@@ -5082,18 +5049,18 @@ static void skylake_pfit_enable(struct intel_crtc *crtc)
5082 PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_vphase)); 5049 PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_vphase));
5083 I915_WRITE_FW(SKL_PS_HPHASE(pipe, id), 5050 I915_WRITE_FW(SKL_PS_HPHASE(pipe, id),
5084 PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_hphase)); 5051 PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_hphase));
5085 I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc->config->pch_pfit.pos); 5052 I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc_state->pch_pfit.pos);
5086 I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc->config->pch_pfit.size); 5053 I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc_state->pch_pfit.size);
5087 } 5054 }
5088} 5055}
5089 5056
5090static void ironlake_pfit_enable(struct intel_crtc *crtc) 5057static void ironlake_pfit_enable(const struct intel_crtc_state *crtc_state)
5091{ 5058{
5092 struct drm_device *dev = crtc->base.dev; 5059 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5093 struct drm_i915_private *dev_priv = to_i915(dev); 5060 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5094 int pipe = crtc->pipe; 5061 int pipe = crtc->pipe;
5095 5062
5096 if (crtc->config->pch_pfit.enabled) { 5063 if (crtc_state->pch_pfit.enabled) {
5097 /* Force use of hard-coded filter coefficients 5064 /* Force use of hard-coded filter coefficients
5098 * as some pre-programmed values are broken, 5065 * as some pre-programmed values are broken,
5099 * e.g. x201. 5066 * e.g. x201.
@@ -5103,8 +5070,8 @@ static void ironlake_pfit_enable(struct intel_crtc *crtc)
5103 PF_PIPE_SEL_IVB(pipe)); 5070 PF_PIPE_SEL_IVB(pipe));
5104 else 5071 else
5105 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3); 5072 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
5106 I915_WRITE(PF_WIN_POS(pipe), crtc->config->pch_pfit.pos); 5073 I915_WRITE(PF_WIN_POS(pipe), crtc_state->pch_pfit.pos);
5107 I915_WRITE(PF_WIN_SZ(pipe), crtc->config->pch_pfit.size); 5074 I915_WRITE(PF_WIN_SZ(pipe), crtc_state->pch_pfit.size);
5108 } 5075 }
5109} 5076}
5110 5077
@@ -5299,11 +5266,8 @@ static bool needs_nv12_wa(struct drm_i915_private *dev_priv,
5299 if (!crtc_state->nv12_planes) 5266 if (!crtc_state->nv12_planes)
5300 return false; 5267 return false;
5301 5268
5302 if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv)) 5269 /* WA Display #0827: Gen9:all */
5303 return false; 5270 if (IS_GEN9(dev_priv) && !IS_GEMINILAKE(dev_priv))
5304
5305 if ((INTEL_GEN(dev_priv) == 9 && !IS_GEMINILAKE(dev_priv)) ||
5306 IS_CANNONLAKE(dev_priv))
5307 return true; 5271 return true;
5308 5272
5309 return false; 5273 return false;
@@ -5346,7 +5310,6 @@ static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state)
5346 if (needs_nv12_wa(dev_priv, old_crtc_state) && 5310 if (needs_nv12_wa(dev_priv, old_crtc_state) &&
5347 !needs_nv12_wa(dev_priv, pipe_config)) { 5311 !needs_nv12_wa(dev_priv, pipe_config)) {
5348 skl_wa_clkgate(dev_priv, crtc->pipe, false); 5312 skl_wa_clkgate(dev_priv, crtc->pipe, false);
5349 skl_wa_528(dev_priv, crtc->pipe, false);
5350 } 5313 }
5351} 5314}
5352 5315
@@ -5386,7 +5349,6 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
5386 if (!needs_nv12_wa(dev_priv, old_crtc_state) && 5349 if (!needs_nv12_wa(dev_priv, old_crtc_state) &&
5387 needs_nv12_wa(dev_priv, pipe_config)) { 5350 needs_nv12_wa(dev_priv, pipe_config)) {
5388 skl_wa_clkgate(dev_priv, crtc->pipe, true); 5351 skl_wa_clkgate(dev_priv, crtc->pipe, true);
5389 skl_wa_528(dev_priv, crtc->pipe, true);
5390 } 5352 }
5391 5353
5392 /* 5354 /*
@@ -5409,7 +5371,8 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
5409 * 5371 *
5410 * WaCxSRDisabledForSpriteScaling:ivb 5372 * WaCxSRDisabledForSpriteScaling:ivb
5411 */ 5373 */
5412 if (pipe_config->disable_lp_wm && ilk_disable_lp_wm(dev)) 5374 if (pipe_config->disable_lp_wm && ilk_disable_lp_wm(dev) &&
5375 old_crtc_state->base.active)
5413 intel_wait_for_vblank(dev_priv, crtc->pipe); 5376 intel_wait_for_vblank(dev_priv, crtc->pipe);
5414 5377
5415 /* 5378 /*
@@ -5440,24 +5403,23 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
5440 intel_update_watermarks(crtc); 5403 intel_update_watermarks(crtc);
5441} 5404}
5442 5405
5443static void intel_crtc_disable_planes(struct drm_crtc *crtc, unsigned plane_mask) 5406static void intel_crtc_disable_planes(struct intel_crtc *crtc, unsigned plane_mask)
5444{ 5407{
5445 struct drm_device *dev = crtc->dev; 5408 struct drm_device *dev = crtc->base.dev;
5446 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5409 struct intel_plane *plane;
5447 struct drm_plane *p; 5410 unsigned fb_bits = 0;
5448 int pipe = intel_crtc->pipe;
5449 5411
5450 intel_crtc_dpms_overlay_disable(intel_crtc); 5412 intel_crtc_dpms_overlay_disable(crtc);
5451 5413
5452 drm_for_each_plane_mask(p, dev, plane_mask) 5414 for_each_intel_plane_on_crtc(dev, crtc, plane) {
5453 to_intel_plane(p)->disable_plane(to_intel_plane(p), intel_crtc); 5415 if (plane_mask & BIT(plane->id)) {
5416 plane->disable_plane(plane, crtc);
5454 5417
5455 /* 5418 fb_bits |= plane->frontbuffer_bit;
5456 * FIXME: Once we grow proper nuclear flip support out of this we need 5419 }
5457 * to compute the mask of flip planes precisely. For the time being 5420 }
5458 * consider this a flip to a NULL plane. 5421
5459 */ 5422 intel_frontbuffer_flip(to_i915(dev), fb_bits);
5460 intel_frontbuffer_flip(to_i915(dev), INTEL_FRONTBUFFER_ALL_MASK(pipe));
5461} 5423}
5462 5424
5463static void intel_encoders_pre_pll_enable(struct drm_crtc *crtc, 5425static void intel_encoders_pre_pll_enable(struct drm_crtc *crtc,
@@ -5515,7 +5477,8 @@ static void intel_encoders_enable(struct drm_crtc *crtc,
5515 if (conn_state->crtc != crtc) 5477 if (conn_state->crtc != crtc)
5516 continue; 5478 continue;
5517 5479
5518 encoder->enable(encoder, crtc_state, conn_state); 5480 if (encoder->enable)
5481 encoder->enable(encoder, crtc_state, conn_state);
5519 intel_opregion_notify_encoder(encoder, true); 5482 intel_opregion_notify_encoder(encoder, true);
5520 } 5483 }
5521} 5484}
@@ -5536,7 +5499,8 @@ static void intel_encoders_disable(struct drm_crtc *crtc,
5536 continue; 5499 continue;
5537 5500
5538 intel_opregion_notify_encoder(encoder, false); 5501 intel_opregion_notify_encoder(encoder, false);
5539 encoder->disable(encoder, old_crtc_state, old_conn_state); 5502 if (encoder->disable)
5503 encoder->disable(encoder, old_crtc_state, old_conn_state);
5540 } 5504 }
5541} 5505}
5542 5506
@@ -5607,37 +5571,37 @@ static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
5607 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 5571 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
5608 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false); 5572 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
5609 5573
5610 if (intel_crtc->config->has_pch_encoder) 5574 if (pipe_config->has_pch_encoder)
5611 intel_prepare_shared_dpll(intel_crtc); 5575 intel_prepare_shared_dpll(pipe_config);
5612 5576
5613 if (intel_crtc_has_dp_encoder(intel_crtc->config)) 5577 if (intel_crtc_has_dp_encoder(pipe_config))
5614 intel_dp_set_m_n(intel_crtc, M1_N1); 5578 intel_dp_set_m_n(pipe_config, M1_N1);
5615 5579
5616 intel_set_pipe_timings(intel_crtc); 5580 intel_set_pipe_timings(pipe_config);
5617 intel_set_pipe_src_size(intel_crtc); 5581 intel_set_pipe_src_size(pipe_config);
5618 5582
5619 if (intel_crtc->config->has_pch_encoder) { 5583 if (pipe_config->has_pch_encoder) {
5620 intel_cpu_transcoder_set_m_n(intel_crtc, 5584 intel_cpu_transcoder_set_m_n(pipe_config,
5621 &intel_crtc->config->fdi_m_n, NULL); 5585 &pipe_config->fdi_m_n, NULL);
5622 } 5586 }
5623 5587
5624 ironlake_set_pipeconf(crtc); 5588 ironlake_set_pipeconf(pipe_config);
5625 5589
5626 intel_crtc->active = true; 5590 intel_crtc->active = true;
5627 5591
5628 intel_encoders_pre_enable(crtc, pipe_config, old_state); 5592 intel_encoders_pre_enable(crtc, pipe_config, old_state);
5629 5593
5630 if (intel_crtc->config->has_pch_encoder) { 5594 if (pipe_config->has_pch_encoder) {
5631 /* Note: FDI PLL enabling _must_ be done before we enable the 5595 /* Note: FDI PLL enabling _must_ be done before we enable the
5632 * cpu pipes, hence this is separate from all the other fdi/pch 5596 * cpu pipes, hence this is separate from all the other fdi/pch
5633 * enabling. */ 5597 * enabling. */
5634 ironlake_fdi_pll_enable(intel_crtc); 5598 ironlake_fdi_pll_enable(pipe_config);
5635 } else { 5599 } else {
5636 assert_fdi_tx_disabled(dev_priv, pipe); 5600 assert_fdi_tx_disabled(dev_priv, pipe);
5637 assert_fdi_rx_disabled(dev_priv, pipe); 5601 assert_fdi_rx_disabled(dev_priv, pipe);
5638 } 5602 }
5639 5603
5640 ironlake_pfit_enable(intel_crtc); 5604 ironlake_pfit_enable(pipe_config);
5641 5605
5642 /* 5606 /*
5643 * On ILK+ LUT must be loaded before the pipe is running but with 5607 * On ILK+ LUT must be loaded before the pipe is running but with
@@ -5646,10 +5610,10 @@ static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
5646 intel_color_load_luts(&pipe_config->base); 5610 intel_color_load_luts(&pipe_config->base);
5647 5611
5648 if (dev_priv->display.initial_watermarks != NULL) 5612 if (dev_priv->display.initial_watermarks != NULL)
5649 dev_priv->display.initial_watermarks(old_intel_state, intel_crtc->config); 5613 dev_priv->display.initial_watermarks(old_intel_state, pipe_config);
5650 intel_enable_pipe(pipe_config); 5614 intel_enable_pipe(pipe_config);
5651 5615
5652 if (intel_crtc->config->has_pch_encoder) 5616 if (pipe_config->has_pch_encoder)
5653 ironlake_pch_enable(old_intel_state, pipe_config); 5617 ironlake_pch_enable(old_intel_state, pipe_config);
5654 5618
5655 assert_vblank_disabled(crtc); 5619 assert_vblank_disabled(crtc);
@@ -5666,7 +5630,7 @@ static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
5666 * some interlaced HDMI modes. Let's do the double wait always 5630 * some interlaced HDMI modes. Let's do the double wait always
5667 * in case there are more corner cases we don't know about. 5631 * in case there are more corner cases we don't know about.
5668 */ 5632 */
5669 if (intel_crtc->config->has_pch_encoder) { 5633 if (pipe_config->has_pch_encoder) {
5670 intel_wait_for_vblank(dev_priv, pipe); 5634 intel_wait_for_vblank(dev_priv, pipe);
5671 intel_wait_for_vblank(dev_priv, pipe); 5635 intel_wait_for_vblank(dev_priv, pipe);
5672 } 5636 }
@@ -5700,10 +5664,9 @@ static void icl_pipe_mbus_enable(struct intel_crtc *crtc)
5700 enum pipe pipe = crtc->pipe; 5664 enum pipe pipe = crtc->pipe;
5701 uint32_t val; 5665 uint32_t val;
5702 5666
5703 val = MBUS_DBOX_BW_CREDIT(1) | MBUS_DBOX_A_CREDIT(2); 5667 val = MBUS_DBOX_A_CREDIT(2);
5704 5668 val |= MBUS_DBOX_BW_CREDIT(1);
5705 /* Program B credit equally to all pipes */ 5669 val |= MBUS_DBOX_B_CREDIT(8);
5706 val |= MBUS_DBOX_B_CREDIT(24 / INTEL_INFO(dev_priv)->num_pipes);
5707 5670
5708 I915_WRITE(PIPE_MBUS_DBOX_CTL(pipe), val); 5671 I915_WRITE(PIPE_MBUS_DBOX_CTL(pipe), val);
5709} 5672}
@@ -5715,7 +5678,7 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
5715 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 5678 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
5716 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5679 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5717 int pipe = intel_crtc->pipe, hsw_workaround_pipe; 5680 int pipe = intel_crtc->pipe, hsw_workaround_pipe;
5718 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; 5681 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
5719 struct intel_atomic_state *old_intel_state = 5682 struct intel_atomic_state *old_intel_state =
5720 to_intel_atomic_state(old_state); 5683 to_intel_atomic_state(old_state);
5721 bool psl_clkgate_wa; 5684 bool psl_clkgate_wa;
@@ -5726,37 +5689,37 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
5726 5689
5727 intel_encoders_pre_pll_enable(crtc, pipe_config, old_state); 5690 intel_encoders_pre_pll_enable(crtc, pipe_config, old_state);
5728 5691
5729 if (intel_crtc->config->shared_dpll) 5692 if (pipe_config->shared_dpll)
5730 intel_enable_shared_dpll(intel_crtc); 5693 intel_enable_shared_dpll(pipe_config);
5731 5694
5732 if (INTEL_GEN(dev_priv) >= 11) 5695 if (INTEL_GEN(dev_priv) >= 11)
5733 icl_map_plls_to_ports(crtc, pipe_config, old_state); 5696 icl_map_plls_to_ports(crtc, pipe_config, old_state);
5734 5697
5735 intel_encoders_pre_enable(crtc, pipe_config, old_state); 5698 intel_encoders_pre_enable(crtc, pipe_config, old_state);
5736 5699
5737 if (intel_crtc_has_dp_encoder(intel_crtc->config)) 5700 if (intel_crtc_has_dp_encoder(pipe_config))
5738 intel_dp_set_m_n(intel_crtc, M1_N1); 5701 intel_dp_set_m_n(pipe_config, M1_N1);
5739 5702
5740 if (!transcoder_is_dsi(cpu_transcoder)) 5703 if (!transcoder_is_dsi(cpu_transcoder))
5741 intel_set_pipe_timings(intel_crtc); 5704 intel_set_pipe_timings(pipe_config);
5742 5705
5743 intel_set_pipe_src_size(intel_crtc); 5706 intel_set_pipe_src_size(pipe_config);
5744 5707
5745 if (cpu_transcoder != TRANSCODER_EDP && 5708 if (cpu_transcoder != TRANSCODER_EDP &&
5746 !transcoder_is_dsi(cpu_transcoder)) { 5709 !transcoder_is_dsi(cpu_transcoder)) {
5747 I915_WRITE(PIPE_MULT(cpu_transcoder), 5710 I915_WRITE(PIPE_MULT(cpu_transcoder),
5748 intel_crtc->config->pixel_multiplier - 1); 5711 pipe_config->pixel_multiplier - 1);
5749 } 5712 }
5750 5713
5751 if (intel_crtc->config->has_pch_encoder) { 5714 if (pipe_config->has_pch_encoder) {
5752 intel_cpu_transcoder_set_m_n(intel_crtc, 5715 intel_cpu_transcoder_set_m_n(pipe_config,
5753 &intel_crtc->config->fdi_m_n, NULL); 5716 &pipe_config->fdi_m_n, NULL);
5754 } 5717 }
5755 5718
5756 if (!transcoder_is_dsi(cpu_transcoder)) 5719 if (!transcoder_is_dsi(cpu_transcoder))
5757 haswell_set_pipeconf(crtc); 5720 haswell_set_pipeconf(pipe_config);
5758 5721
5759 haswell_set_pipemisc(crtc); 5722 haswell_set_pipemisc(pipe_config);
5760 5723
5761 intel_color_set_csc(&pipe_config->base); 5724 intel_color_set_csc(&pipe_config->base);
5762 5725
@@ -5764,14 +5727,14 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
5764 5727
5765 /* Display WA #1180: WaDisableScalarClockGating: glk, cnl */ 5728 /* Display WA #1180: WaDisableScalarClockGating: glk, cnl */
5766 psl_clkgate_wa = (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) && 5729 psl_clkgate_wa = (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) &&
5767 intel_crtc->config->pch_pfit.enabled; 5730 pipe_config->pch_pfit.enabled;
5768 if (psl_clkgate_wa) 5731 if (psl_clkgate_wa)
5769 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true); 5732 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true);
5770 5733
5771 if (INTEL_GEN(dev_priv) >= 9) 5734 if (INTEL_GEN(dev_priv) >= 9)
5772 skylake_pfit_enable(intel_crtc); 5735 skylake_pfit_enable(pipe_config);
5773 else 5736 else
5774 ironlake_pfit_enable(intel_crtc); 5737 ironlake_pfit_enable(pipe_config);
5775 5738
5776 /* 5739 /*
5777 * On ILK+ LUT must be loaded before the pipe is running but with 5740 * On ILK+ LUT must be loaded before the pipe is running but with
@@ -5804,10 +5767,10 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
5804 if (!transcoder_is_dsi(cpu_transcoder)) 5767 if (!transcoder_is_dsi(cpu_transcoder))
5805 intel_enable_pipe(pipe_config); 5768 intel_enable_pipe(pipe_config);
5806 5769
5807 if (intel_crtc->config->has_pch_encoder) 5770 if (pipe_config->has_pch_encoder)
5808 lpt_pch_enable(old_intel_state, pipe_config); 5771 lpt_pch_enable(old_intel_state, pipe_config);
5809 5772
5810 if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DP_MST)) 5773 if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DP_MST))
5811 intel_ddi_set_vc_payload_alloc(pipe_config, true); 5774 intel_ddi_set_vc_payload_alloc(pipe_config, true);
5812 5775
5813 assert_vblank_disabled(crtc); 5776 assert_vblank_disabled(crtc);
@@ -5829,15 +5792,15 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
5829 } 5792 }
5830} 5793}
5831 5794
5832static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force) 5795static void ironlake_pfit_disable(const struct intel_crtc_state *old_crtc_state)
5833{ 5796{
5834 struct drm_device *dev = crtc->base.dev; 5797 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
5835 struct drm_i915_private *dev_priv = to_i915(dev); 5798 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5836 int pipe = crtc->pipe; 5799 enum pipe pipe = crtc->pipe;
5837 5800
5838 /* To avoid upsetting the power well on haswell only disable the pfit if 5801 /* To avoid upsetting the power well on haswell only disable the pfit if
5839 * it's in use. The hw state code will make sure we get this right. */ 5802 * it's in use. The hw state code will make sure we get this right. */
5840 if (force || crtc->config->pch_pfit.enabled) { 5803 if (old_crtc_state->pch_pfit.enabled) {
5841 I915_WRITE(PF_CTL(pipe), 0); 5804 I915_WRITE(PF_CTL(pipe), 0);
5842 I915_WRITE(PF_WIN_POS(pipe), 0); 5805 I915_WRITE(PF_WIN_POS(pipe), 0);
5843 I915_WRITE(PF_WIN_SZ(pipe), 0); 5806 I915_WRITE(PF_WIN_SZ(pipe), 0);
@@ -5868,14 +5831,14 @@ static void ironlake_crtc_disable(struct intel_crtc_state *old_crtc_state,
5868 5831
5869 intel_disable_pipe(old_crtc_state); 5832 intel_disable_pipe(old_crtc_state);
5870 5833
5871 ironlake_pfit_disable(intel_crtc, false); 5834 ironlake_pfit_disable(old_crtc_state);
5872 5835
5873 if (intel_crtc->config->has_pch_encoder) 5836 if (old_crtc_state->has_pch_encoder)
5874 ironlake_fdi_disable(crtc); 5837 ironlake_fdi_disable(crtc);
5875 5838
5876 intel_encoders_post_disable(crtc, old_crtc_state, old_state); 5839 intel_encoders_post_disable(crtc, old_crtc_state, old_state);
5877 5840
5878 if (intel_crtc->config->has_pch_encoder) { 5841 if (old_crtc_state->has_pch_encoder) {
5879 ironlake_disable_pch_transcoder(dev_priv, pipe); 5842 ironlake_disable_pch_transcoder(dev_priv, pipe);
5880 5843
5881 if (HAS_PCH_CPT(dev_priv)) { 5844 if (HAS_PCH_CPT(dev_priv)) {
@@ -5929,21 +5892,22 @@ static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state,
5929 if (INTEL_GEN(dev_priv) >= 9) 5892 if (INTEL_GEN(dev_priv) >= 9)
5930 skylake_scaler_disable(intel_crtc); 5893 skylake_scaler_disable(intel_crtc);
5931 else 5894 else
5932 ironlake_pfit_disable(intel_crtc, false); 5895 ironlake_pfit_disable(old_crtc_state);
5933 5896
5934 intel_encoders_post_disable(crtc, old_crtc_state, old_state); 5897 intel_encoders_post_disable(crtc, old_crtc_state, old_state);
5935 5898
5936 if (INTEL_GEN(dev_priv) >= 11) 5899 if (INTEL_GEN(dev_priv) >= 11)
5937 icl_unmap_plls_to_ports(crtc, old_crtc_state, old_state); 5900 icl_unmap_plls_to_ports(crtc, old_crtc_state, old_state);
5901
5902 intel_encoders_post_pll_disable(crtc, old_crtc_state, old_state);
5938} 5903}
5939 5904
5940static void i9xx_pfit_enable(struct intel_crtc *crtc) 5905static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
5941{ 5906{
5942 struct drm_device *dev = crtc->base.dev; 5907 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5943 struct drm_i915_private *dev_priv = to_i915(dev); 5908 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5944 struct intel_crtc_state *pipe_config = crtc->config;
5945 5909
5946 if (!pipe_config->gmch_pfit.control) 5910 if (!crtc_state->gmch_pfit.control)
5947 return; 5911 return;
5948 5912
5949 /* 5913 /*
@@ -5953,8 +5917,8 @@ static void i9xx_pfit_enable(struct intel_crtc *crtc)
5953 WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE); 5917 WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE);
5954 assert_pipe_disabled(dev_priv, crtc->pipe); 5918 assert_pipe_disabled(dev_priv, crtc->pipe);
5955 5919
5956 I915_WRITE(PFIT_PGM_RATIOS, pipe_config->gmch_pfit.pgm_ratios); 5920 I915_WRITE(PFIT_PGM_RATIOS, crtc_state->gmch_pfit.pgm_ratios);
5957 I915_WRITE(PFIT_CONTROL, pipe_config->gmch_pfit.control); 5921 I915_WRITE(PFIT_CONTROL, crtc_state->gmch_pfit.control);
5958 5922
5959 /* Border color in case we don't scale up to the full screen. Black by 5923 /* Border color in case we don't scale up to the full screen. Black by
5960 * default, change to something else for debugging. */ 5924 * default, change to something else for debugging. */
@@ -6009,6 +5973,28 @@ enum intel_display_power_domain intel_port_to_power_domain(enum port port)
6009 } 5973 }
6010} 5974}
6011 5975
5976enum intel_display_power_domain
5977intel_aux_power_domain(struct intel_digital_port *dig_port)
5978{
5979 switch (dig_port->aux_ch) {
5980 case AUX_CH_A:
5981 return POWER_DOMAIN_AUX_A;
5982 case AUX_CH_B:
5983 return POWER_DOMAIN_AUX_B;
5984 case AUX_CH_C:
5985 return POWER_DOMAIN_AUX_C;
5986 case AUX_CH_D:
5987 return POWER_DOMAIN_AUX_D;
5988 case AUX_CH_E:
5989 return POWER_DOMAIN_AUX_E;
5990 case AUX_CH_F:
5991 return POWER_DOMAIN_AUX_F;
5992 default:
5993 MISSING_CASE(dig_port->aux_ch);
5994 return POWER_DOMAIN_AUX_A;
5995 }
5996}
5997
6012static u64 get_crtc_power_domains(struct drm_crtc *crtc, 5998static u64 get_crtc_power_domains(struct drm_crtc *crtc,
6013 struct intel_crtc_state *crtc_state) 5999 struct intel_crtc_state *crtc_state)
6014{ 6000{
@@ -6088,20 +6074,18 @@ static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
6088 if (WARN_ON(intel_crtc->active)) 6074 if (WARN_ON(intel_crtc->active))
6089 return; 6075 return;
6090 6076
6091 if (intel_crtc_has_dp_encoder(intel_crtc->config)) 6077 if (intel_crtc_has_dp_encoder(pipe_config))
6092 intel_dp_set_m_n(intel_crtc, M1_N1); 6078 intel_dp_set_m_n(pipe_config, M1_N1);
6093 6079
6094 intel_set_pipe_timings(intel_crtc); 6080 intel_set_pipe_timings(pipe_config);
6095 intel_set_pipe_src_size(intel_crtc); 6081 intel_set_pipe_src_size(pipe_config);
6096 6082
6097 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) { 6083 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
6098 struct drm_i915_private *dev_priv = to_i915(dev);
6099
6100 I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY); 6084 I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY);
6101 I915_WRITE(CHV_CANVAS(pipe), 0); 6085 I915_WRITE(CHV_CANVAS(pipe), 0);
6102 } 6086 }
6103 6087
6104 i9xx_set_pipeconf(intel_crtc); 6088 i9xx_set_pipeconf(pipe_config);
6105 6089
6106 intel_color_set_csc(&pipe_config->base); 6090 intel_color_set_csc(&pipe_config->base);
6107 6091
@@ -6112,16 +6096,16 @@ static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
6112 intel_encoders_pre_pll_enable(crtc, pipe_config, old_state); 6096 intel_encoders_pre_pll_enable(crtc, pipe_config, old_state);
6113 6097
6114 if (IS_CHERRYVIEW(dev_priv)) { 6098 if (IS_CHERRYVIEW(dev_priv)) {
6115 chv_prepare_pll(intel_crtc, intel_crtc->config); 6099 chv_prepare_pll(intel_crtc, pipe_config);
6116 chv_enable_pll(intel_crtc, intel_crtc->config); 6100 chv_enable_pll(intel_crtc, pipe_config);
6117 } else { 6101 } else {
6118 vlv_prepare_pll(intel_crtc, intel_crtc->config); 6102 vlv_prepare_pll(intel_crtc, pipe_config);
6119 vlv_enable_pll(intel_crtc, intel_crtc->config); 6103 vlv_enable_pll(intel_crtc, pipe_config);
6120 } 6104 }
6121 6105
6122 intel_encoders_pre_enable(crtc, pipe_config, old_state); 6106 intel_encoders_pre_enable(crtc, pipe_config, old_state);
6123 6107
6124 i9xx_pfit_enable(intel_crtc); 6108 i9xx_pfit_enable(pipe_config);
6125 6109
6126 intel_color_load_luts(&pipe_config->base); 6110 intel_color_load_luts(&pipe_config->base);
6127 6111
@@ -6135,13 +6119,13 @@ static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
6135 intel_encoders_enable(crtc, pipe_config, old_state); 6119 intel_encoders_enable(crtc, pipe_config, old_state);
6136} 6120}
6137 6121
6138static void i9xx_set_pll_dividers(struct intel_crtc *crtc) 6122static void i9xx_set_pll_dividers(const struct intel_crtc_state *crtc_state)
6139{ 6123{
6140 struct drm_device *dev = crtc->base.dev; 6124 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
6141 struct drm_i915_private *dev_priv = to_i915(dev); 6125 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6142 6126
6143 I915_WRITE(FP0(crtc->pipe), crtc->config->dpll_hw_state.fp0); 6127 I915_WRITE(FP0(crtc->pipe), crtc_state->dpll_hw_state.fp0);
6144 I915_WRITE(FP1(crtc->pipe), crtc->config->dpll_hw_state.fp1); 6128 I915_WRITE(FP1(crtc->pipe), crtc_state->dpll_hw_state.fp1);
6145} 6129}
6146 6130
6147static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config, 6131static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
@@ -6158,15 +6142,15 @@ static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
6158 if (WARN_ON(intel_crtc->active)) 6142 if (WARN_ON(intel_crtc->active))
6159 return; 6143 return;
6160 6144
6161 i9xx_set_pll_dividers(intel_crtc); 6145 i9xx_set_pll_dividers(pipe_config);
6162 6146
6163 if (intel_crtc_has_dp_encoder(intel_crtc->config)) 6147 if (intel_crtc_has_dp_encoder(pipe_config))
6164 intel_dp_set_m_n(intel_crtc, M1_N1); 6148 intel_dp_set_m_n(pipe_config, M1_N1);
6165 6149
6166 intel_set_pipe_timings(intel_crtc); 6150 intel_set_pipe_timings(pipe_config);
6167 intel_set_pipe_src_size(intel_crtc); 6151 intel_set_pipe_src_size(pipe_config);
6168 6152
6169 i9xx_set_pipeconf(intel_crtc); 6153 i9xx_set_pipeconf(pipe_config);
6170 6154
6171 intel_crtc->active = true; 6155 intel_crtc->active = true;
6172 6156
@@ -6177,13 +6161,13 @@ static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
6177 6161
6178 i9xx_enable_pll(intel_crtc, pipe_config); 6162 i9xx_enable_pll(intel_crtc, pipe_config);
6179 6163
6180 i9xx_pfit_enable(intel_crtc); 6164 i9xx_pfit_enable(pipe_config);
6181 6165
6182 intel_color_load_luts(&pipe_config->base); 6166 intel_color_load_luts(&pipe_config->base);
6183 6167
6184 if (dev_priv->display.initial_watermarks != NULL) 6168 if (dev_priv->display.initial_watermarks != NULL)
6185 dev_priv->display.initial_watermarks(old_intel_state, 6169 dev_priv->display.initial_watermarks(old_intel_state,
6186 intel_crtc->config); 6170 pipe_config);
6187 else 6171 else
6188 intel_update_watermarks(intel_crtc); 6172 intel_update_watermarks(intel_crtc);
6189 intel_enable_pipe(pipe_config); 6173 intel_enable_pipe(pipe_config);
@@ -6194,12 +6178,12 @@ static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
6194 intel_encoders_enable(crtc, pipe_config, old_state); 6178 intel_encoders_enable(crtc, pipe_config, old_state);
6195} 6179}
6196 6180
6197static void i9xx_pfit_disable(struct intel_crtc *crtc) 6181static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state)
6198{ 6182{
6199 struct drm_device *dev = crtc->base.dev; 6183 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
6200 struct drm_i915_private *dev_priv = to_i915(dev); 6184 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6201 6185
6202 if (!crtc->config->gmch_pfit.control) 6186 if (!old_crtc_state->gmch_pfit.control)
6203 return; 6187 return;
6204 6188
6205 assert_pipe_disabled(dev_priv, crtc->pipe); 6189 assert_pipe_disabled(dev_priv, crtc->pipe);
@@ -6232,17 +6216,17 @@ static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state,
6232 6216
6233 intel_disable_pipe(old_crtc_state); 6217 intel_disable_pipe(old_crtc_state);
6234 6218
6235 i9xx_pfit_disable(intel_crtc); 6219 i9xx_pfit_disable(old_crtc_state);
6236 6220
6237 intel_encoders_post_disable(crtc, old_crtc_state, old_state); 6221 intel_encoders_post_disable(crtc, old_crtc_state, old_state);
6238 6222
6239 if (!intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DSI)) { 6223 if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) {
6240 if (IS_CHERRYVIEW(dev_priv)) 6224 if (IS_CHERRYVIEW(dev_priv))
6241 chv_disable_pll(dev_priv, pipe); 6225 chv_disable_pll(dev_priv, pipe);
6242 else if (IS_VALLEYVIEW(dev_priv)) 6226 else if (IS_VALLEYVIEW(dev_priv))
6243 vlv_disable_pll(dev_priv, pipe); 6227 vlv_disable_pll(dev_priv, pipe);
6244 else 6228 else
6245 i9xx_disable_pll(intel_crtc); 6229 i9xx_disable_pll(old_crtc_state);
6246 } 6230 }
6247 6231
6248 intel_encoders_post_pll_disable(crtc, old_crtc_state, old_state); 6232 intel_encoders_post_pll_disable(crtc, old_crtc_state, old_state);
@@ -6316,7 +6300,7 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc,
6316 6300
6317 intel_fbc_disable(intel_crtc); 6301 intel_fbc_disable(intel_crtc);
6318 intel_update_watermarks(intel_crtc); 6302 intel_update_watermarks(intel_crtc);
6319 intel_disable_shared_dpll(intel_crtc); 6303 intel_disable_shared_dpll(to_intel_crtc_state(crtc->state));
6320 6304
6321 domains = intel_crtc->enabled_power_domains; 6305 domains = intel_crtc->enabled_power_domains;
6322 for_each_power_domain(domain, domains) 6306 for_each_power_domain(domain, domains)
@@ -6394,66 +6378,6 @@ static void intel_connector_verify_state(struct drm_crtc_state *crtc_state,
6394 } 6378 }
6395} 6379}
6396 6380
6397int intel_connector_init(struct intel_connector *connector)
6398{
6399 struct intel_digital_connector_state *conn_state;
6400
6401 /*
6402 * Allocate enough memory to hold intel_digital_connector_state,
6403 * This might be a few bytes too many, but for connectors that don't
6404 * need it we'll free the state and allocate a smaller one on the first
6405 * succesful commit anyway.
6406 */
6407 conn_state = kzalloc(sizeof(*conn_state), GFP_KERNEL);
6408 if (!conn_state)
6409 return -ENOMEM;
6410
6411 __drm_atomic_helper_connector_reset(&connector->base,
6412 &conn_state->base);
6413
6414 return 0;
6415}
6416
6417struct intel_connector *intel_connector_alloc(void)
6418{
6419 struct intel_connector *connector;
6420
6421 connector = kzalloc(sizeof *connector, GFP_KERNEL);
6422 if (!connector)
6423 return NULL;
6424
6425 if (intel_connector_init(connector) < 0) {
6426 kfree(connector);
6427 return NULL;
6428 }
6429
6430 return connector;
6431}
6432
6433/*
6434 * Free the bits allocated by intel_connector_alloc.
6435 * This should only be used after intel_connector_alloc has returned
6436 * successfully, and before drm_connector_init returns successfully.
6437 * Otherwise the destroy callbacks for the connector and the state should
6438 * take care of proper cleanup/free
6439 */
6440void intel_connector_free(struct intel_connector *connector)
6441{
6442 kfree(to_intel_digital_connector_state(connector->base.state));
6443 kfree(connector);
6444}
6445
6446/* Simple connector->get_hw_state implementation for encoders that support only
6447 * one connector and no cloning and hence the encoder state determines the state
6448 * of the connector. */
6449bool intel_connector_get_hw_state(struct intel_connector *connector)
6450{
6451 enum pipe pipe = 0;
6452 struct intel_encoder *encoder = connector->encoder;
6453
6454 return encoder->get_hw_state(encoder, &pipe);
6455}
6456
6457static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state) 6381static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
6458{ 6382{
6459 if (crtc_state->base.enable && crtc_state->has_pch_encoder) 6383 if (crtc_state->base.enable && crtc_state->has_pch_encoder)
@@ -6564,6 +6488,9 @@ retry:
6564 link_bw, &pipe_config->fdi_m_n, false); 6488 link_bw, &pipe_config->fdi_m_n, false);
6565 6489
6566 ret = ironlake_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config); 6490 ret = ironlake_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config);
6491 if (ret == -EDEADLK)
6492 return ret;
6493
6567 if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) { 6494 if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) {
6568 pipe_config->pipe_bpp -= 2*3; 6495 pipe_config->pipe_bpp -= 2*3;
6569 DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n", 6496 DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n",
@@ -6720,7 +6647,9 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
6720 return -EINVAL; 6647 return -EINVAL;
6721 } 6648 }
6722 6649
6723 if (pipe_config->ycbcr420 && pipe_config->base.ctm) { 6650 if ((pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
6651 pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) &&
6652 pipe_config->base.ctm) {
6724 /* 6653 /*
6725 * There is only one pipe CSC unit per pipe, and we need that 6654 * There is only one pipe CSC unit per pipe, and we need that
6726 * for output conversion from RGB->YCBCR. So if CTM is already 6655 * for output conversion from RGB->YCBCR. So if CTM is already
@@ -6886,12 +6815,12 @@ static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe
6886 vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val); 6815 vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
6887} 6816}
6888 6817
6889static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc, 6818static void intel_pch_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
6890 struct intel_link_m_n *m_n) 6819 const struct intel_link_m_n *m_n)
6891{ 6820{
6892 struct drm_device *dev = crtc->base.dev; 6821 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
6893 struct drm_i915_private *dev_priv = to_i915(dev); 6822 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6894 int pipe = crtc->pipe; 6823 enum pipe pipe = crtc->pipe;
6895 6824
6896 I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m); 6825 I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
6897 I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n); 6826 I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
@@ -6899,25 +6828,39 @@ static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc,
6899 I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n); 6828 I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n);
6900} 6829}
6901 6830
6902static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc, 6831static bool transcoder_has_m2_n2(struct drm_i915_private *dev_priv,
6903 struct intel_link_m_n *m_n, 6832 enum transcoder transcoder)
6904 struct intel_link_m_n *m2_n2)
6905{ 6833{
6834 if (IS_HASWELL(dev_priv))
6835 return transcoder == TRANSCODER_EDP;
6836
6837 /*
6838 * Strictly speaking some registers are available before
6839 * gen7, but we only support DRRS on gen7+
6840 */
6841 return IS_GEN7(dev_priv) || IS_CHERRYVIEW(dev_priv);
6842}
6843
6844static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
6845 const struct intel_link_m_n *m_n,
6846 const struct intel_link_m_n *m2_n2)
6847{
6848 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
6906 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 6849 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6907 int pipe = crtc->pipe; 6850 enum pipe pipe = crtc->pipe;
6908 enum transcoder transcoder = crtc->config->cpu_transcoder; 6851 enum transcoder transcoder = crtc_state->cpu_transcoder;
6909 6852
6910 if (INTEL_GEN(dev_priv) >= 5) { 6853 if (INTEL_GEN(dev_priv) >= 5) {
6911 I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m); 6854 I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m);
6912 I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n); 6855 I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n);
6913 I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m); 6856 I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m);
6914 I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n); 6857 I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n);
6915 /* M2_N2 registers to be set only for gen < 8 (M2_N2 available 6858 /*
6916 * for gen < 8) and if DRRS is supported (to make sure the 6859 * M2_N2 registers are set only if DRRS is supported
6917 * registers are not unnecessarily accessed). 6860 * (to make sure the registers are not unnecessarily accessed).
6918 */ 6861 */
6919 if (m2_n2 && (IS_CHERRYVIEW(dev_priv) || 6862 if (m2_n2 && crtc_state->has_drrs &&
6920 INTEL_GEN(dev_priv) < 8) && crtc->config->has_drrs) { 6863 transcoder_has_m2_n2(dev_priv, transcoder)) {
6921 I915_WRITE(PIPE_DATA_M2(transcoder), 6864 I915_WRITE(PIPE_DATA_M2(transcoder),
6922 TU_SIZE(m2_n2->tu) | m2_n2->gmch_m); 6865 TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
6923 I915_WRITE(PIPE_DATA_N2(transcoder), m2_n2->gmch_n); 6866 I915_WRITE(PIPE_DATA_N2(transcoder), m2_n2->gmch_n);
@@ -6932,29 +6875,29 @@ static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
6932 } 6875 }
6933} 6876}
6934 6877
6935void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n) 6878void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state, enum link_m_n_set m_n)
6936{ 6879{
6937 struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL; 6880 const struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL;
6938 6881
6939 if (m_n == M1_N1) { 6882 if (m_n == M1_N1) {
6940 dp_m_n = &crtc->config->dp_m_n; 6883 dp_m_n = &crtc_state->dp_m_n;
6941 dp_m2_n2 = &crtc->config->dp_m2_n2; 6884 dp_m2_n2 = &crtc_state->dp_m2_n2;
6942 } else if (m_n == M2_N2) { 6885 } else if (m_n == M2_N2) {
6943 6886
6944 /* 6887 /*
6945 * M2_N2 registers are not supported. Hence m2_n2 divider value 6888 * M2_N2 registers are not supported. Hence m2_n2 divider value
6946 * needs to be programmed into M1_N1. 6889 * needs to be programmed into M1_N1.
6947 */ 6890 */
6948 dp_m_n = &crtc->config->dp_m2_n2; 6891 dp_m_n = &crtc_state->dp_m2_n2;
6949 } else { 6892 } else {
6950 DRM_ERROR("Unsupported divider value\n"); 6893 DRM_ERROR("Unsupported divider value\n");
6951 return; 6894 return;
6952 } 6895 }
6953 6896
6954 if (crtc->config->has_pch_encoder) 6897 if (crtc_state->has_pch_encoder)
6955 intel_pch_transcoder_set_m_n(crtc, &crtc->config->dp_m_n); 6898 intel_pch_transcoder_set_m_n(crtc_state, &crtc_state->dp_m_n);
6956 else 6899 else
6957 intel_cpu_transcoder_set_m_n(crtc, dp_m_n, dp_m2_n2); 6900 intel_cpu_transcoder_set_m_n(crtc_state, dp_m_n, dp_m2_n2);
6958} 6901}
6959 6902
6960static void vlv_compute_dpll(struct intel_crtc *crtc, 6903static void vlv_compute_dpll(struct intel_crtc *crtc,
@@ -7053,8 +6996,8 @@ static void vlv_prepare_pll(struct intel_crtc *crtc,
7053 6996
7054 /* Set HBR and RBR LPF coefficients */ 6997 /* Set HBR and RBR LPF coefficients */
7055 if (pipe_config->port_clock == 162000 || 6998 if (pipe_config->port_clock == 162000 ||
7056 intel_crtc_has_type(crtc->config, INTEL_OUTPUT_ANALOG) || 6999 intel_crtc_has_type(pipe_config, INTEL_OUTPUT_ANALOG) ||
7057 intel_crtc_has_type(crtc->config, INTEL_OUTPUT_HDMI)) 7000 intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI))
7058 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe), 7001 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
7059 0x009f0003); 7002 0x009f0003);
7060 else 7003 else
@@ -7081,7 +7024,7 @@ static void vlv_prepare_pll(struct intel_crtc *crtc,
7081 7024
7082 coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe)); 7025 coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
7083 coreclk = (coreclk & 0x0000ff00) | 0x01c00000; 7026 coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
7084 if (intel_crtc_has_dp_encoder(crtc->config)) 7027 if (intel_crtc_has_dp_encoder(pipe_config))
7085 coreclk |= 0x01000000; 7028 coreclk |= 0x01000000;
7086 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk); 7029 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
7087 7030
@@ -7360,12 +7303,13 @@ static void i8xx_compute_dpll(struct intel_crtc *crtc,
7360 crtc_state->dpll_hw_state.dpll = dpll; 7303 crtc_state->dpll_hw_state.dpll = dpll;
7361} 7304}
7362 7305
7363static void intel_set_pipe_timings(struct intel_crtc *intel_crtc) 7306static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state)
7364{ 7307{
7365 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev); 7308 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
7366 enum pipe pipe = intel_crtc->pipe; 7309 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7367 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; 7310 enum pipe pipe = crtc->pipe;
7368 const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode; 7311 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
7312 const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
7369 uint32_t crtc_vtotal, crtc_vblank_end; 7313 uint32_t crtc_vtotal, crtc_vblank_end;
7370 int vsyncshift = 0; 7314 int vsyncshift = 0;
7371 7315
@@ -7379,7 +7323,7 @@ static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
7379 crtc_vtotal -= 1; 7323 crtc_vtotal -= 1;
7380 crtc_vblank_end -= 1; 7324 crtc_vblank_end -= 1;
7381 7325
7382 if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO)) 7326 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
7383 vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2; 7327 vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
7384 else 7328 else
7385 vsyncshift = adjusted_mode->crtc_hsync_start - 7329 vsyncshift = adjusted_mode->crtc_hsync_start -
@@ -7421,18 +7365,18 @@ static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
7421 7365
7422} 7366}
7423 7367
7424static void intel_set_pipe_src_size(struct intel_crtc *intel_crtc) 7368static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state)
7425{ 7369{
7426 struct drm_device *dev = intel_crtc->base.dev; 7370 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
7427 struct drm_i915_private *dev_priv = to_i915(dev); 7371 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7428 enum pipe pipe = intel_crtc->pipe; 7372 enum pipe pipe = crtc->pipe;
7429 7373
7430 /* pipesrc controls the size that is scaled from, which should 7374 /* pipesrc controls the size that is scaled from, which should
7431 * always be the user's requested size. 7375 * always be the user's requested size.
7432 */ 7376 */
7433 I915_WRITE(PIPESRC(pipe), 7377 I915_WRITE(PIPESRC(pipe),
7434 ((intel_crtc->config->pipe_src_w - 1) << 16) | 7378 ((crtc_state->pipe_src_w - 1) << 16) |
7435 (intel_crtc->config->pipe_src_h - 1)); 7379 (crtc_state->pipe_src_h - 1));
7436} 7380}
7437 7381
7438static void intel_get_pipe_timings(struct intel_crtc *crtc, 7382static void intel_get_pipe_timings(struct intel_crtc *crtc,
@@ -7508,29 +7452,30 @@ void intel_mode_from_pipe_config(struct drm_display_mode *mode,
7508 drm_mode_set_name(mode); 7452 drm_mode_set_name(mode);
7509} 7453}
7510 7454
7511static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc) 7455static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
7512{ 7456{
7513 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev); 7457 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
7458 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7514 uint32_t pipeconf; 7459 uint32_t pipeconf;
7515 7460
7516 pipeconf = 0; 7461 pipeconf = 0;
7517 7462
7518 /* we keep both pipes enabled on 830 */ 7463 /* we keep both pipes enabled on 830 */
7519 if (IS_I830(dev_priv)) 7464 if (IS_I830(dev_priv))
7520 pipeconf |= I915_READ(PIPECONF(intel_crtc->pipe)) & PIPECONF_ENABLE; 7465 pipeconf |= I915_READ(PIPECONF(crtc->pipe)) & PIPECONF_ENABLE;
7521 7466
7522 if (intel_crtc->config->double_wide) 7467 if (crtc_state->double_wide)
7523 pipeconf |= PIPECONF_DOUBLE_WIDE; 7468 pipeconf |= PIPECONF_DOUBLE_WIDE;
7524 7469
7525 /* only g4x and later have fancy bpc/dither controls */ 7470 /* only g4x and later have fancy bpc/dither controls */
7526 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || 7471 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
7527 IS_CHERRYVIEW(dev_priv)) { 7472 IS_CHERRYVIEW(dev_priv)) {
7528 /* Bspec claims that we can't use dithering for 30bpp pipes. */ 7473 /* Bspec claims that we can't use dithering for 30bpp pipes. */
7529 if (intel_crtc->config->dither && intel_crtc->config->pipe_bpp != 30) 7474 if (crtc_state->dither && crtc_state->pipe_bpp != 30)
7530 pipeconf |= PIPECONF_DITHER_EN | 7475 pipeconf |= PIPECONF_DITHER_EN |
7531 PIPECONF_DITHER_TYPE_SP; 7476 PIPECONF_DITHER_TYPE_SP;
7532 7477
7533 switch (intel_crtc->config->pipe_bpp) { 7478 switch (crtc_state->pipe_bpp) {
7534 case 18: 7479 case 18:
7535 pipeconf |= PIPECONF_6BPC; 7480 pipeconf |= PIPECONF_6BPC;
7536 break; 7481 break;
@@ -7546,9 +7491,9 @@ static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
7546 } 7491 }
7547 } 7492 }
7548 7493
7549 if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) { 7494 if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
7550 if (INTEL_GEN(dev_priv) < 4 || 7495 if (INTEL_GEN(dev_priv) < 4 ||
7551 intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO)) 7496 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
7552 pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION; 7497 pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
7553 else 7498 else
7554 pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT; 7499 pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
@@ -7556,11 +7501,11 @@ static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
7556 pipeconf |= PIPECONF_PROGRESSIVE; 7501 pipeconf |= PIPECONF_PROGRESSIVE;
7557 7502
7558 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && 7503 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
7559 intel_crtc->config->limited_color_range) 7504 crtc_state->limited_color_range)
7560 pipeconf |= PIPECONF_COLOR_RANGE_SELECT; 7505 pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
7561 7506
7562 I915_WRITE(PIPECONF(intel_crtc->pipe), pipeconf); 7507 I915_WRITE(PIPECONF(crtc->pipe), pipeconf);
7563 POSTING_READ(PIPECONF(intel_crtc->pipe)); 7508 POSTING_READ(PIPECONF(crtc->pipe));
7564} 7509}
7565 7510
7566static int i8xx_crtc_compute_clock(struct intel_crtc *crtc, 7511static int i8xx_crtc_compute_clock(struct intel_crtc *crtc,
@@ -7843,8 +7788,15 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
7843 plane_config->tiling = I915_TILING_X; 7788 plane_config->tiling = I915_TILING_X;
7844 fb->modifier = I915_FORMAT_MOD_X_TILED; 7789 fb->modifier = I915_FORMAT_MOD_X_TILED;
7845 } 7790 }
7791
7792 if (val & DISPPLANE_ROTATE_180)
7793 plane_config->rotation = DRM_MODE_ROTATE_180;
7846 } 7794 }
7847 7795
7796 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B &&
7797 val & DISPPLANE_MIRROR)
7798 plane_config->rotation |= DRM_MODE_REFLECT_X;
7799
7848 pixel_format = val & DISPPLANE_PIXFORMAT_MASK; 7800 pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
7849 fourcc = i9xx_format_to_fourcc(pixel_format); 7801 fourcc = i9xx_format_to_fourcc(pixel_format);
7850 fb->format = drm_format_info(fourcc); 7802 fb->format = drm_format_info(fourcc);
@@ -7916,6 +7868,49 @@ static void chv_crtc_clock_get(struct intel_crtc *crtc,
7916 pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock); 7868 pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
7917} 7869}
7918 7870
7871static void intel_get_crtc_ycbcr_config(struct intel_crtc *crtc,
7872 struct intel_crtc_state *pipe_config)
7873{
7874 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7875 enum intel_output_format output = INTEL_OUTPUT_FORMAT_RGB;
7876
7877 pipe_config->lspcon_downsampling = false;
7878
7879 if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9) {
7880 u32 tmp = I915_READ(PIPEMISC(crtc->pipe));
7881
7882 if (tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV) {
7883 bool ycbcr420_enabled = tmp & PIPEMISC_YUV420_ENABLE;
7884 bool blend = tmp & PIPEMISC_YUV420_MODE_FULL_BLEND;
7885
7886 if (ycbcr420_enabled) {
7887 /* We support 4:2:0 in full blend mode only */
7888 if (!blend)
7889 output = INTEL_OUTPUT_FORMAT_INVALID;
7890 else if (!(IS_GEMINILAKE(dev_priv) ||
7891 INTEL_GEN(dev_priv) >= 10))
7892 output = INTEL_OUTPUT_FORMAT_INVALID;
7893 else
7894 output = INTEL_OUTPUT_FORMAT_YCBCR420;
7895 } else {
7896 /*
7897 * Currently there is no interface defined to
7898 * check user preference between RGB/YCBCR444
7899 * or YCBCR420. So the only possible case for
7900 * YCBCR444 usage is driving YCBCR420 output
7901 * with LSPCON, when pipe is configured for
7902 * YCBCR444 output and LSPCON takes care of
7903 * downsampling it.
7904 */
7905 pipe_config->lspcon_downsampling = true;
7906 output = INTEL_OUTPUT_FORMAT_YCBCR444;
7907 }
7908 }
7909 }
7910
7911 pipe_config->output_format = output;
7912}
7913
7919static bool i9xx_get_pipe_config(struct intel_crtc *crtc, 7914static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
7920 struct intel_crtc_state *pipe_config) 7915 struct intel_crtc_state *pipe_config)
7921{ 7916{
@@ -7928,6 +7923,7 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
7928 if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) 7923 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
7929 return false; 7924 return false;
7930 7925
7926 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
7931 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; 7927 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
7932 pipe_config->shared_dpll = NULL; 7928 pipe_config->shared_dpll = NULL;
7933 7929
@@ -8459,16 +8455,16 @@ void intel_init_pch_refclk(struct drm_i915_private *dev_priv)
8459 lpt_init_pch_refclk(dev_priv); 8455 lpt_init_pch_refclk(dev_priv);
8460} 8456}
8461 8457
8462static void ironlake_set_pipeconf(struct drm_crtc *crtc) 8458static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state)
8463{ 8459{
8464 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 8460 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
8465 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 8461 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8466 int pipe = intel_crtc->pipe; 8462 enum pipe pipe = crtc->pipe;
8467 uint32_t val; 8463 uint32_t val;
8468 8464
8469 val = 0; 8465 val = 0;
8470 8466
8471 switch (intel_crtc->config->pipe_bpp) { 8467 switch (crtc_state->pipe_bpp) {
8472 case 18: 8468 case 18:
8473 val |= PIPECONF_6BPC; 8469 val |= PIPECONF_6BPC;
8474 break; 8470 break;
@@ -8486,32 +8482,32 @@ static void ironlake_set_pipeconf(struct drm_crtc *crtc)
8486 BUG(); 8482 BUG();
8487 } 8483 }
8488 8484
8489 if (intel_crtc->config->dither) 8485 if (crtc_state->dither)
8490 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP); 8486 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
8491 8487
8492 if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) 8488 if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
8493 val |= PIPECONF_INTERLACED_ILK; 8489 val |= PIPECONF_INTERLACED_ILK;
8494 else 8490 else
8495 val |= PIPECONF_PROGRESSIVE; 8491 val |= PIPECONF_PROGRESSIVE;
8496 8492
8497 if (intel_crtc->config->limited_color_range) 8493 if (crtc_state->limited_color_range)
8498 val |= PIPECONF_COLOR_RANGE_SELECT; 8494 val |= PIPECONF_COLOR_RANGE_SELECT;
8499 8495
8500 I915_WRITE(PIPECONF(pipe), val); 8496 I915_WRITE(PIPECONF(pipe), val);
8501 POSTING_READ(PIPECONF(pipe)); 8497 POSTING_READ(PIPECONF(pipe));
8502} 8498}
8503 8499
8504static void haswell_set_pipeconf(struct drm_crtc *crtc) 8500static void haswell_set_pipeconf(const struct intel_crtc_state *crtc_state)
8505{ 8501{
8506 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 8502 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
8507 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 8503 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8508 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; 8504 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
8509 u32 val = 0; 8505 u32 val = 0;
8510 8506
8511 if (IS_HASWELL(dev_priv) && intel_crtc->config->dither) 8507 if (IS_HASWELL(dev_priv) && crtc_state->dither)
8512 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP); 8508 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
8513 8509
8514 if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) 8510 if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
8515 val |= PIPECONF_INTERLACED_ILK; 8511 val |= PIPECONF_INTERLACED_ILK;
8516 else 8512 else
8517 val |= PIPECONF_PROGRESSIVE; 8513 val |= PIPECONF_PROGRESSIVE;
@@ -8520,16 +8516,15 @@ static void haswell_set_pipeconf(struct drm_crtc *crtc)
8520 POSTING_READ(PIPECONF(cpu_transcoder)); 8516 POSTING_READ(PIPECONF(cpu_transcoder));
8521} 8517}
8522 8518
8523static void haswell_set_pipemisc(struct drm_crtc *crtc) 8519static void haswell_set_pipemisc(const struct intel_crtc_state *crtc_state)
8524{ 8520{
8525 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 8521 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
8526 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 8522 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
8527 struct intel_crtc_state *config = intel_crtc->config;
8528 8523
8529 if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9) { 8524 if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9) {
8530 u32 val = 0; 8525 u32 val = 0;
8531 8526
8532 switch (intel_crtc->config->pipe_bpp) { 8527 switch (crtc_state->pipe_bpp) {
8533 case 18: 8528 case 18:
8534 val |= PIPEMISC_DITHER_6_BPC; 8529 val |= PIPEMISC_DITHER_6_BPC;
8535 break; 8530 break;
@@ -8547,14 +8542,16 @@ static void haswell_set_pipemisc(struct drm_crtc *crtc)
8547 BUG(); 8542 BUG();
8548 } 8543 }
8549 8544
8550 if (intel_crtc->config->dither) 8545 if (crtc_state->dither)
8551 val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP; 8546 val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
8552 8547
8553 if (config->ycbcr420) { 8548 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
8554 val |= PIPEMISC_OUTPUT_COLORSPACE_YUV | 8549 crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444)
8555 PIPEMISC_YUV420_ENABLE | 8550 val |= PIPEMISC_OUTPUT_COLORSPACE_YUV;
8551
8552 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
8553 val |= PIPEMISC_YUV420_ENABLE |
8556 PIPEMISC_YUV420_MODE_FULL_BLEND; 8554 PIPEMISC_YUV420_MODE_FULL_BLEND;
8557 }
8558 8555
8559 I915_WRITE(PIPEMISC(intel_crtc->pipe), val); 8556 I915_WRITE(PIPEMISC(intel_crtc->pipe), val);
8560 } 8557 }
@@ -8765,12 +8762,8 @@ static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
8765 m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder)); 8762 m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder));
8766 m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder)) 8763 m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder))
8767 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; 8764 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
8768 /* Read M2_N2 registers only for gen < 8 (M2_N2 available for 8765
8769 * gen < 8) and if DRRS is supported (to make sure the 8766 if (m2_n2 && transcoder_has_m2_n2(dev_priv, transcoder)) {
8770 * registers are not unnecessarily read).
8771 */
8772 if (m2_n2 && INTEL_GEN(dev_priv) < 8 &&
8773 crtc->config->has_drrs) {
8774 m2_n2->link_m = I915_READ(PIPE_LINK_M2(transcoder)); 8767 m2_n2->link_m = I915_READ(PIPE_LINK_M2(transcoder));
8775 m2_n2->link_n = I915_READ(PIPE_LINK_N2(transcoder)); 8768 m2_n2->link_n = I915_READ(PIPE_LINK_N2(transcoder));
8776 m2_n2->gmch_m = I915_READ(PIPE_DATA_M2(transcoder)) 8769 m2_n2->gmch_m = I915_READ(PIPE_DATA_M2(transcoder))
@@ -8913,6 +8906,29 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
8913 goto error; 8906 goto error;
8914 } 8907 }
8915 8908
8909 /*
8910 * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr
8911 * while i915 HW rotation is clockwise, thats why this swapping.
8912 */
8913 switch (val & PLANE_CTL_ROTATE_MASK) {
8914 case PLANE_CTL_ROTATE_0:
8915 plane_config->rotation = DRM_MODE_ROTATE_0;
8916 break;
8917 case PLANE_CTL_ROTATE_90:
8918 plane_config->rotation = DRM_MODE_ROTATE_270;
8919 break;
8920 case PLANE_CTL_ROTATE_180:
8921 plane_config->rotation = DRM_MODE_ROTATE_180;
8922 break;
8923 case PLANE_CTL_ROTATE_270:
8924 plane_config->rotation = DRM_MODE_ROTATE_90;
8925 break;
8926 }
8927
8928 if (INTEL_GEN(dev_priv) >= 10 &&
8929 val & PLANE_CTL_FLIP_HORIZONTAL)
8930 plane_config->rotation |= DRM_MODE_REFLECT_X;
8931
8916 base = I915_READ(PLANE_SURF(pipe, plane_id)) & 0xfffff000; 8932 base = I915_READ(PLANE_SURF(pipe, plane_id)) & 0xfffff000;
8917 plane_config->base = base; 8933 plane_config->base = base;
8918 8934
@@ -8979,6 +8995,7 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
8979 if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) 8995 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
8980 return false; 8996 return false;
8981 8997
8998 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
8982 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; 8999 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
8983 pipe_config->shared_dpll = NULL; 9000 pipe_config->shared_dpll = NULL;
8984 9001
@@ -9327,30 +9344,17 @@ static void icelake_get_ddi_pll(struct drm_i915_private *dev_priv,
9327 u32 temp; 9344 u32 temp;
9328 9345
9329 /* TODO: TBT pll not implemented. */ 9346 /* TODO: TBT pll not implemented. */
9330 switch (port) { 9347 if (intel_port_is_combophy(dev_priv, port)) {
9331 case PORT_A:
9332 case PORT_B:
9333 temp = I915_READ(DPCLKA_CFGCR0_ICL) & 9348 temp = I915_READ(DPCLKA_CFGCR0_ICL) &
9334 DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port); 9349 DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
9335 id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port); 9350 id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port);
9336 9351
9337 if (WARN_ON(id != DPLL_ID_ICL_DPLL0 && id != DPLL_ID_ICL_DPLL1)) 9352 if (WARN_ON(!intel_dpll_is_combophy(id)))
9338 return; 9353 return;
9339 break; 9354 } else if (intel_port_is_tc(dev_priv, port)) {
9340 case PORT_C: 9355 id = icl_port_to_mg_pll_id(port);
9341 id = DPLL_ID_ICL_MGPLL1; 9356 } else {
9342 break; 9357 WARN(1, "Invalid port %x\n", port);
9343 case PORT_D:
9344 id = DPLL_ID_ICL_MGPLL2;
9345 break;
9346 case PORT_E:
9347 id = DPLL_ID_ICL_MGPLL3;
9348 break;
9349 case PORT_F:
9350 id = DPLL_ID_ICL_MGPLL4;
9351 break;
9352 default:
9353 MISSING_CASE(port);
9354 return; 9358 return;
9355 } 9359 }
9356 9360
@@ -9613,27 +9617,11 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
9613 } 9617 }
9614 9618
9615 intel_get_pipe_src_size(crtc, pipe_config); 9619 intel_get_pipe_src_size(crtc, pipe_config);
9620 intel_get_crtc_ycbcr_config(crtc, pipe_config);
9616 9621
9617 pipe_config->gamma_mode = 9622 pipe_config->gamma_mode =
9618 I915_READ(GAMMA_MODE(crtc->pipe)) & GAMMA_MODE_MODE_MASK; 9623 I915_READ(GAMMA_MODE(crtc->pipe)) & GAMMA_MODE_MODE_MASK;
9619 9624
9620 if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9) {
9621 u32 tmp = I915_READ(PIPEMISC(crtc->pipe));
9622 bool clrspace_yuv = tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV;
9623
9624 if (IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
9625 bool blend_mode_420 = tmp &
9626 PIPEMISC_YUV420_MODE_FULL_BLEND;
9627
9628 pipe_config->ycbcr420 = tmp & PIPEMISC_YUV420_ENABLE;
9629 if (pipe_config->ycbcr420 != clrspace_yuv ||
9630 pipe_config->ycbcr420 != blend_mode_420)
9631 DRM_DEBUG_KMS("Bad 4:2:0 mode (%08x)\n", tmp);
9632 } else if (clrspace_yuv) {
9633 DRM_DEBUG_KMS("YCbCr 4:2:0 Unsupported\n");
9634 }
9635 }
9636
9637 power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe); 9625 power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
9638 if (intel_display_power_get_if_enabled(dev_priv, power_domain)) { 9626 if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
9639 power_domain_mask |= BIT_ULL(power_domain); 9627 power_domain_mask |= BIT_ULL(power_domain);
@@ -9902,8 +9890,6 @@ static void i845_update_cursor(struct intel_plane *plane,
9902 I915_WRITE_FW(CURPOS(PIPE_A), pos); 9890 I915_WRITE_FW(CURPOS(PIPE_A), pos);
9903 } 9891 }
9904 9892
9905 POSTING_READ_FW(CURCNTR(PIPE_A));
9906
9907 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 9893 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
9908} 9894}
9909 9895
@@ -10132,8 +10118,6 @@ static void i9xx_update_cursor(struct intel_plane *plane,
10132 I915_WRITE_FW(CURBASE(pipe), base); 10118 I915_WRITE_FW(CURBASE(pipe), base);
10133 } 10119 }
10134 10120
10135 POSTING_READ_FW(CURBASE(pipe));
10136
10137 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 10121 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
10138} 10122}
10139 10123
@@ -10738,14 +10722,40 @@ int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_stat
10738 pipe_config->fb_bits |= plane->frontbuffer_bit; 10722 pipe_config->fb_bits |= plane->frontbuffer_bit;
10739 10723
10740 /* 10724 /*
10725 * ILK/SNB DVSACNTR/Sprite Enable
10726 * IVB SPR_CTL/Sprite Enable
10727 * "When in Self Refresh Big FIFO mode, a write to enable the
10728 * plane will be internally buffered and delayed while Big FIFO
10729 * mode is exiting."
10730 *
10731 * Which means that enabling the sprite can take an extra frame
10732 * when we start in big FIFO mode (LP1+). Thus we need to drop
10733 * down to LP0 and wait for vblank in order to make sure the
10734 * sprite gets enabled on the next vblank after the register write.
10735 * Doing otherwise would risk enabling the sprite one frame after
10736 * we've already signalled flip completion. We can resume LP1+
10737 * once the sprite has been enabled.
10738 *
10739 *
10741 * WaCxSRDisabledForSpriteScaling:ivb 10740 * WaCxSRDisabledForSpriteScaling:ivb
10741 * IVB SPR_SCALE/Scaling Enable
10742 * "Low Power watermarks must be disabled for at least one
10743 * frame before enabling sprite scaling, and kept disabled
10744 * until sprite scaling is disabled."
10742 * 10745 *
10743 * cstate->update_wm was already set above, so this flag will 10746 * ILK/SNB DVSASCALE/Scaling Enable
10744 * take effect when we commit and program watermarks. 10747 * "When in Self Refresh Big FIFO mode, scaling enable will be
10748 * masked off while Big FIFO mode is exiting."
10749 *
10750 * Despite the w/a only being listed for IVB we assume that
10751 * the ILK/SNB note has similar ramifications, hence we apply
10752 * the w/a on all three platforms.
10745 */ 10753 */
10746 if (plane->id == PLANE_SPRITE0 && IS_IVYBRIDGE(dev_priv) && 10754 if (plane->id == PLANE_SPRITE0 &&
10747 needs_scaling(to_intel_plane_state(plane_state)) && 10755 (IS_GEN5(dev_priv) || IS_GEN6(dev_priv) ||
10748 !needs_scaling(old_plane_state)) 10756 IS_IVYBRIDGE(dev_priv)) &&
10757 (turn_on || (!needs_scaling(old_plane_state) &&
10758 needs_scaling(to_intel_plane_state(plane_state)))))
10749 pipe_config->disable_lp_wm = true; 10759 pipe_config->disable_lp_wm = true;
10750 10760
10751 return 0; 10761 return 0;
@@ -10781,6 +10791,98 @@ static bool check_single_encoder_cloning(struct drm_atomic_state *state,
10781 return true; 10791 return true;
10782} 10792}
10783 10793
10794static int icl_add_linked_planes(struct intel_atomic_state *state)
10795{
10796 struct intel_plane *plane, *linked;
10797 struct intel_plane_state *plane_state, *linked_plane_state;
10798 int i;
10799
10800 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
10801 linked = plane_state->linked_plane;
10802
10803 if (!linked)
10804 continue;
10805
10806 linked_plane_state = intel_atomic_get_plane_state(state, linked);
10807 if (IS_ERR(linked_plane_state))
10808 return PTR_ERR(linked_plane_state);
10809
10810 WARN_ON(linked_plane_state->linked_plane != plane);
10811 WARN_ON(linked_plane_state->slave == plane_state->slave);
10812 }
10813
10814 return 0;
10815}
10816
10817static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
10818{
10819 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
10820 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10821 struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->base.state);
10822 struct intel_plane *plane, *linked;
10823 struct intel_plane_state *plane_state;
10824 int i;
10825
10826 if (INTEL_GEN(dev_priv) < 11)
10827 return 0;
10828
10829 /*
10830 * Destroy all old plane links and make the slave plane invisible
10831 * in the crtc_state->active_planes mask.
10832 */
10833 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
10834 if (plane->pipe != crtc->pipe || !plane_state->linked_plane)
10835 continue;
10836
10837 plane_state->linked_plane = NULL;
10838 if (plane_state->slave && !plane_state->base.visible)
10839 crtc_state->active_planes &= ~BIT(plane->id);
10840
10841 plane_state->slave = false;
10842 }
10843
10844 if (!crtc_state->nv12_planes)
10845 return 0;
10846
10847 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
10848 struct intel_plane_state *linked_state = NULL;
10849
10850 if (plane->pipe != crtc->pipe ||
10851 !(crtc_state->nv12_planes & BIT(plane->id)))
10852 continue;
10853
10854 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, linked) {
10855 if (!icl_is_nv12_y_plane(linked->id))
10856 continue;
10857
10858 if (crtc_state->active_planes & BIT(linked->id))
10859 continue;
10860
10861 linked_state = intel_atomic_get_plane_state(state, linked);
10862 if (IS_ERR(linked_state))
10863 return PTR_ERR(linked_state);
10864
10865 break;
10866 }
10867
10868 if (!linked_state) {
10869 DRM_DEBUG_KMS("Need %d free Y planes for NV12\n",
10870 hweight8(crtc_state->nv12_planes));
10871
10872 return -EINVAL;
10873 }
10874
10875 plane_state->linked_plane = linked;
10876
10877 linked_state->slave = true;
10878 linked_state->linked_plane = plane;
10879 crtc_state->active_planes |= BIT(linked->id);
10880 DRM_DEBUG_KMS("Using %s as Y plane for %s\n", linked->base.name, plane->base.name);
10881 }
10882
10883 return 0;
10884}
10885
10784static int intel_crtc_atomic_check(struct drm_crtc *crtc, 10886static int intel_crtc_atomic_check(struct drm_crtc *crtc,
10785 struct drm_crtc_state *crtc_state) 10887 struct drm_crtc_state *crtc_state)
10786{ 10888{
@@ -10789,7 +10891,6 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc,
10789 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 10891 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10790 struct intel_crtc_state *pipe_config = 10892 struct intel_crtc_state *pipe_config =
10791 to_intel_crtc_state(crtc_state); 10893 to_intel_crtc_state(crtc_state);
10792 struct drm_atomic_state *state = crtc_state->state;
10793 int ret; 10894 int ret;
10794 bool mode_changed = needs_modeset(crtc_state); 10895 bool mode_changed = needs_modeset(crtc_state);
10795 10896
@@ -10826,8 +10927,7 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc,
10826 } 10927 }
10827 } 10928 }
10828 10929
10829 if (dev_priv->display.compute_intermediate_wm && 10930 if (dev_priv->display.compute_intermediate_wm) {
10830 !to_intel_atomic_state(state)->skip_intermediate_wm) {
10831 if (WARN_ON(!dev_priv->display.compute_pipe_wm)) 10931 if (WARN_ON(!dev_priv->display.compute_pipe_wm))
10832 return 0; 10932 return 0;
10833 10933
@@ -10843,9 +10943,6 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc,
10843 DRM_DEBUG_KMS("No valid intermediate pipe watermarks are possible\n"); 10943 DRM_DEBUG_KMS("No valid intermediate pipe watermarks are possible\n");
10844 return ret; 10944 return ret;
10845 } 10945 }
10846 } else if (dev_priv->display.compute_intermediate_wm) {
10847 if (HAS_PCH_SPLIT(dev_priv) && INTEL_GEN(dev_priv) < 9)
10848 pipe_config->wm.ilk.intermediate = pipe_config->wm.ilk.optimal;
10849 } 10946 }
10850 10947
10851 if (INTEL_GEN(dev_priv) >= 9) { 10948 if (INTEL_GEN(dev_priv) >= 9) {
@@ -10853,6 +10950,8 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc,
10853 ret = skl_update_scaler_crtc(pipe_config); 10950 ret = skl_update_scaler_crtc(pipe_config);
10854 10951
10855 if (!ret) 10952 if (!ret)
10953 ret = icl_check_nv12_planes(pipe_config);
10954 if (!ret)
10856 ret = skl_check_pipe_max_pixel_rate(intel_crtc, 10955 ret = skl_check_pipe_max_pixel_rate(intel_crtc,
10857 pipe_config); 10956 pipe_config);
10858 if (!ret) 10957 if (!ret)
@@ -10867,8 +10966,6 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc,
10867} 10966}
10868 10967
10869static const struct drm_crtc_helper_funcs intel_helper_funcs = { 10968static const struct drm_crtc_helper_funcs intel_helper_funcs = {
10870 .atomic_begin = intel_begin_crtc_commit,
10871 .atomic_flush = intel_finish_crtc_commit,
10872 .atomic_check = intel_crtc_atomic_check, 10969 .atomic_check = intel_crtc_atomic_check,
10873}; 10970};
10874 10971
@@ -10897,30 +10994,42 @@ static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
10897 drm_connector_list_iter_end(&conn_iter); 10994 drm_connector_list_iter_end(&conn_iter);
10898} 10995}
10899 10996
10900static void 10997static int
10901connected_sink_compute_bpp(struct intel_connector *connector, 10998compute_sink_pipe_bpp(const struct drm_connector_state *conn_state,
10902 struct intel_crtc_state *pipe_config) 10999 struct intel_crtc_state *pipe_config)
10903{ 11000{
10904 const struct drm_display_info *info = &connector->base.display_info; 11001 struct drm_connector *connector = conn_state->connector;
10905 int bpp = pipe_config->pipe_bpp; 11002 const struct drm_display_info *info = &connector->display_info;
11003 int bpp;
10906 11004
10907 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] checking for sink bpp constrains\n", 11005 switch (conn_state->max_bpc) {
10908 connector->base.base.id, 11006 case 6 ... 7:
10909 connector->base.name); 11007 bpp = 6 * 3;
10910 11008 break;
10911 /* Don't use an invalid EDID bpc value */ 11009 case 8 ... 9:
10912 if (info->bpc != 0 && info->bpc * 3 < bpp) { 11010 bpp = 8 * 3;
10913 DRM_DEBUG_KMS("clamping display bpp (was %d) to EDID reported max of %d\n", 11011 break;
10914 bpp, info->bpc * 3); 11012 case 10 ... 11:
10915 pipe_config->pipe_bpp = info->bpc * 3; 11013 bpp = 10 * 3;
11014 break;
11015 case 12:
11016 bpp = 12 * 3;
11017 break;
11018 default:
11019 return -EINVAL;
10916 } 11020 }
10917 11021
10918 /* Clamp bpp to 8 on screens without EDID 1.4 */ 11022 if (bpp < pipe_config->pipe_bpp) {
10919 if (info->bpc == 0 && bpp > 24) { 11023 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] Limiting display bpp to %d instead of "
10920 DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of 24\n", 11024 "EDID bpp %d, requested bpp %d, max platform bpp %d\n",
10921 bpp); 11025 connector->base.id, connector->name,
10922 pipe_config->pipe_bpp = 24; 11026 bpp, 3 * info->bpc, 3 * conn_state->max_requested_bpc,
11027 pipe_config->pipe_bpp);
11028
11029 pipe_config->pipe_bpp = bpp;
10923 } 11030 }
11031
11032 return 0;
10924} 11033}
10925 11034
10926static int 11035static int
@@ -10928,7 +11037,7 @@ compute_baseline_pipe_bpp(struct intel_crtc *crtc,
10928 struct intel_crtc_state *pipe_config) 11037 struct intel_crtc_state *pipe_config)
10929{ 11038{
10930 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 11039 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10931 struct drm_atomic_state *state; 11040 struct drm_atomic_state *state = pipe_config->base.state;
10932 struct drm_connector *connector; 11041 struct drm_connector *connector;
10933 struct drm_connector_state *connector_state; 11042 struct drm_connector_state *connector_state;
10934 int bpp, i; 11043 int bpp, i;
@@ -10941,21 +11050,21 @@ compute_baseline_pipe_bpp(struct intel_crtc *crtc,
10941 else 11050 else
10942 bpp = 8*3; 11051 bpp = 8*3;
10943 11052
10944
10945 pipe_config->pipe_bpp = bpp; 11053 pipe_config->pipe_bpp = bpp;
10946 11054
10947 state = pipe_config->base.state; 11055 /* Clamp display bpp to connector max bpp */
10948
10949 /* Clamp display bpp to EDID value */
10950 for_each_new_connector_in_state(state, connector, connector_state, i) { 11056 for_each_new_connector_in_state(state, connector, connector_state, i) {
11057 int ret;
11058
10951 if (connector_state->crtc != &crtc->base) 11059 if (connector_state->crtc != &crtc->base)
10952 continue; 11060 continue;
10953 11061
10954 connected_sink_compute_bpp(to_intel_connector(connector), 11062 ret = compute_sink_pipe_bpp(connector_state, pipe_config);
10955 pipe_config); 11063 if (ret)
11064 return ret;
10956 } 11065 }
10957 11066
10958 return bpp; 11067 return 0;
10959} 11068}
10960 11069
10961static void intel_dump_crtc_timings(const struct drm_display_mode *mode) 11070static void intel_dump_crtc_timings(const struct drm_display_mode *mode)
@@ -11025,6 +11134,20 @@ static void snprintf_output_types(char *buf, size_t len,
11025 WARN_ON_ONCE(output_types != 0); 11134 WARN_ON_ONCE(output_types != 0);
11026} 11135}
11027 11136
11137static const char * const output_format_str[] = {
11138 [INTEL_OUTPUT_FORMAT_INVALID] = "Invalid",
11139 [INTEL_OUTPUT_FORMAT_RGB] = "RGB",
11140 [INTEL_OUTPUT_FORMAT_YCBCR420] = "YCBCR4:2:0",
11141 [INTEL_OUTPUT_FORMAT_YCBCR444] = "YCBCR4:4:4",
11142};
11143
11144static const char *output_formats(enum intel_output_format format)
11145{
11146 if (format >= ARRAY_SIZE(output_format_str))
11147 format = INTEL_OUTPUT_FORMAT_INVALID;
11148 return output_format_str[format];
11149}
11150
11028static void intel_dump_pipe_config(struct intel_crtc *crtc, 11151static void intel_dump_pipe_config(struct intel_crtc *crtc,
11029 struct intel_crtc_state *pipe_config, 11152 struct intel_crtc_state *pipe_config,
11030 const char *context) 11153 const char *context)
@@ -11044,6 +11167,9 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
11044 DRM_DEBUG_KMS("output_types: %s (0x%x)\n", 11167 DRM_DEBUG_KMS("output_types: %s (0x%x)\n",
11045 buf, pipe_config->output_types); 11168 buf, pipe_config->output_types);
11046 11169
11170 DRM_DEBUG_KMS("output format: %s\n",
11171 output_formats(pipe_config->output_format));
11172
11047 DRM_DEBUG_KMS("cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n", 11173 DRM_DEBUG_KMS("cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n",
11048 transcoder_name(pipe_config->cpu_transcoder), 11174 transcoder_name(pipe_config->cpu_transcoder),
11049 pipe_config->pipe_bpp, pipe_config->dither); 11175 pipe_config->pipe_bpp, pipe_config->dither);
@@ -11053,9 +11179,6 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
11053 pipe_config->fdi_lanes, 11179 pipe_config->fdi_lanes,
11054 &pipe_config->fdi_m_n); 11180 &pipe_config->fdi_m_n);
11055 11181
11056 if (pipe_config->ycbcr420)
11057 DRM_DEBUG_KMS("YCbCr 4:2:0 output enabled\n");
11058
11059 if (intel_crtc_has_dp_encoder(pipe_config)) { 11182 if (intel_crtc_has_dp_encoder(pipe_config)) {
11060 intel_dump_m_n_config(pipe_config, "dp m_n", 11183 intel_dump_m_n_config(pipe_config, "dp m_n",
11061 pipe_config->lane_count, &pipe_config->dp_m_n); 11184 pipe_config->lane_count, &pipe_config->dp_m_n);
@@ -11244,7 +11367,7 @@ intel_modeset_pipe_config(struct drm_crtc *crtc,
11244 struct intel_encoder *encoder; 11367 struct intel_encoder *encoder;
11245 struct drm_connector *connector; 11368 struct drm_connector *connector;
11246 struct drm_connector_state *connector_state; 11369 struct drm_connector_state *connector_state;
11247 int base_bpp, ret = -EINVAL; 11370 int base_bpp, ret;
11248 int i; 11371 int i;
11249 bool retry = true; 11372 bool retry = true;
11250 11373
@@ -11266,10 +11389,12 @@ intel_modeset_pipe_config(struct drm_crtc *crtc,
11266 (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC))) 11389 (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
11267 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC; 11390 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
11268 11391
11269 base_bpp = compute_baseline_pipe_bpp(to_intel_crtc(crtc), 11392 ret = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
11270 pipe_config); 11393 pipe_config);
11271 if (base_bpp < 0) 11394 if (ret)
11272 goto fail; 11395 return ret;
11396
11397 base_bpp = pipe_config->pipe_bpp;
11273 11398
11274 /* 11399 /*
11275 * Determine the real pipe dimensions. Note that stereo modes can 11400 * Determine the real pipe dimensions. Note that stereo modes can
@@ -11291,7 +11416,7 @@ intel_modeset_pipe_config(struct drm_crtc *crtc,
11291 11416
11292 if (!check_single_encoder_cloning(state, to_intel_crtc(crtc), encoder)) { 11417 if (!check_single_encoder_cloning(state, to_intel_crtc(crtc), encoder)) {
11293 DRM_DEBUG_KMS("rejecting invalid cloning configuration\n"); 11418 DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
11294 goto fail; 11419 return -EINVAL;
11295 } 11420 }
11296 11421
11297 /* 11422 /*
@@ -11327,7 +11452,7 @@ encoder_retry:
11327 11452
11328 if (!(encoder->compute_config(encoder, pipe_config, connector_state))) { 11453 if (!(encoder->compute_config(encoder, pipe_config, connector_state))) {
11329 DRM_DEBUG_KMS("Encoder config failure\n"); 11454 DRM_DEBUG_KMS("Encoder config failure\n");
11330 goto fail; 11455 return -EINVAL;
11331 } 11456 }
11332 } 11457 }
11333 11458
@@ -11338,16 +11463,16 @@ encoder_retry:
11338 * pipe_config->pixel_multiplier; 11463 * pipe_config->pixel_multiplier;
11339 11464
11340 ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config); 11465 ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
11466 if (ret == -EDEADLK)
11467 return ret;
11341 if (ret < 0) { 11468 if (ret < 0) {
11342 DRM_DEBUG_KMS("CRTC fixup failed\n"); 11469 DRM_DEBUG_KMS("CRTC fixup failed\n");
11343 goto fail; 11470 return ret;
11344 } 11471 }
11345 11472
11346 if (ret == RETRY) { 11473 if (ret == RETRY) {
11347 if (WARN(!retry, "loop in pipe configuration computation\n")) { 11474 if (WARN(!retry, "loop in pipe configuration computation\n"))
11348 ret = -EINVAL; 11475 return -EINVAL;
11349 goto fail;
11350 }
11351 11476
11352 DRM_DEBUG_KMS("CRTC bw constrained, retrying\n"); 11477 DRM_DEBUG_KMS("CRTC bw constrained, retrying\n");
11353 retry = false; 11478 retry = false;
@@ -11363,8 +11488,7 @@ encoder_retry:
11363 DRM_DEBUG_KMS("hw max bpp: %i, pipe bpp: %i, dithering: %i\n", 11488 DRM_DEBUG_KMS("hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
11364 base_bpp, pipe_config->pipe_bpp, pipe_config->dither); 11489 base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
11365 11490
11366fail: 11491 return 0;
11367 return ret;
11368} 11492}
11369 11493
11370static bool intel_fuzzy_clock_check(int clock1, int clock2) 11494static bool intel_fuzzy_clock_check(int clock1, int clock2)
@@ -11633,6 +11757,7 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv,
11633 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_end); 11757 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_end);
11634 11758
11635 PIPE_CONF_CHECK_I(pixel_multiplier); 11759 PIPE_CONF_CHECK_I(pixel_multiplier);
11760 PIPE_CONF_CHECK_I(output_format);
11636 PIPE_CONF_CHECK_BOOL(has_hdmi_sink); 11761 PIPE_CONF_CHECK_BOOL(has_hdmi_sink);
11637 if ((INTEL_GEN(dev_priv) < 8 && !IS_HASWELL(dev_priv)) || 11762 if ((INTEL_GEN(dev_priv) < 8 && !IS_HASWELL(dev_priv)) ||
11638 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 11763 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
@@ -11641,7 +11766,6 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv,
11641 PIPE_CONF_CHECK_BOOL(hdmi_scrambling); 11766 PIPE_CONF_CHECK_BOOL(hdmi_scrambling);
11642 PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio); 11767 PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio);
11643 PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_infoframe); 11768 PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_infoframe);
11644 PIPE_CONF_CHECK_BOOL(ycbcr420);
11645 11769
11646 PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio); 11770 PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio);
11647 11771
@@ -12150,8 +12274,9 @@ intel_modeset_verify_disabled(struct drm_device *dev,
12150 verify_disabled_dpll_state(dev); 12274 verify_disabled_dpll_state(dev);
12151} 12275}
12152 12276
12153static void update_scanline_offset(struct intel_crtc *crtc) 12277static void update_scanline_offset(const struct intel_crtc_state *crtc_state)
12154{ 12278{
12279 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
12155 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 12280 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
12156 12281
12157 /* 12282 /*
@@ -12182,7 +12307,7 @@ static void update_scanline_offset(struct intel_crtc *crtc)
12182 * answer that's slightly in the future. 12307 * answer that's slightly in the future.
12183 */ 12308 */
12184 if (IS_GEN2(dev_priv)) { 12309 if (IS_GEN2(dev_priv)) {
12185 const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode; 12310 const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
12186 int vtotal; 12311 int vtotal;
12187 12312
12188 vtotal = adjusted_mode->crtc_vtotal; 12313 vtotal = adjusted_mode->crtc_vtotal;
@@ -12191,7 +12316,7 @@ static void update_scanline_offset(struct intel_crtc *crtc)
12191 12316
12192 crtc->scanline_offset = vtotal - 1; 12317 crtc->scanline_offset = vtotal - 1;
12193 } else if (HAS_DDI(dev_priv) && 12318 } else if (HAS_DDI(dev_priv) &&
12194 intel_crtc_has_type(crtc->config, INTEL_OUTPUT_HDMI)) { 12319 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
12195 crtc->scanline_offset = 2; 12320 crtc->scanline_offset = 2;
12196 } else 12321 } else
12197 crtc->scanline_offset = 1; 12322 crtc->scanline_offset = 1;
@@ -12474,6 +12599,8 @@ static int intel_atomic_check(struct drm_device *dev,
12474 } 12599 }
12475 12600
12476 ret = intel_modeset_pipe_config(crtc, pipe_config); 12601 ret = intel_modeset_pipe_config(crtc, pipe_config);
12602 if (ret == -EDEADLK)
12603 return ret;
12477 if (ret) { 12604 if (ret) {
12478 intel_dump_pipe_config(to_intel_crtc(crtc), 12605 intel_dump_pipe_config(to_intel_crtc(crtc),
12479 pipe_config, "[failed]"); 12606 pipe_config, "[failed]");
@@ -12505,6 +12632,10 @@ static int intel_atomic_check(struct drm_device *dev,
12505 intel_state->cdclk.logical = dev_priv->cdclk.logical; 12632 intel_state->cdclk.logical = dev_priv->cdclk.logical;
12506 } 12633 }
12507 12634
12635 ret = icl_add_linked_planes(intel_state);
12636 if (ret)
12637 return ret;
12638
12508 ret = drm_atomic_helper_check_planes(dev, state); 12639 ret = drm_atomic_helper_check_planes(dev, state);
12509 if (ret) 12640 if (ret)
12510 return ret; 12641 return ret;
@@ -12537,6 +12668,7 @@ static void intel_update_crtc(struct drm_crtc *crtc,
12537 struct drm_device *dev = crtc->dev; 12668 struct drm_device *dev = crtc->dev;
12538 struct drm_i915_private *dev_priv = to_i915(dev); 12669 struct drm_i915_private *dev_priv = to_i915(dev);
12539 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 12670 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
12671 struct intel_crtc_state *old_intel_cstate = to_intel_crtc_state(old_crtc_state);
12540 struct intel_crtc_state *pipe_config = to_intel_crtc_state(new_crtc_state); 12672 struct intel_crtc_state *pipe_config = to_intel_crtc_state(new_crtc_state);
12541 bool modeset = needs_modeset(new_crtc_state); 12673 bool modeset = needs_modeset(new_crtc_state);
12542 struct intel_plane_state *new_plane_state = 12674 struct intel_plane_state *new_plane_state =
@@ -12544,7 +12676,7 @@ static void intel_update_crtc(struct drm_crtc *crtc,
12544 to_intel_plane(crtc->primary)); 12676 to_intel_plane(crtc->primary));
12545 12677
12546 if (modeset) { 12678 if (modeset) {
12547 update_scanline_offset(intel_crtc); 12679 update_scanline_offset(pipe_config);
12548 dev_priv->display.crtc_enable(pipe_config, state); 12680 dev_priv->display.crtc_enable(pipe_config, state);
12549 12681
12550 /* vblanks work again, re-enable pipe CRC. */ 12682 /* vblanks work again, re-enable pipe CRC. */
@@ -12557,7 +12689,12 @@ static void intel_update_crtc(struct drm_crtc *crtc,
12557 if (new_plane_state) 12689 if (new_plane_state)
12558 intel_fbc_enable(intel_crtc, pipe_config, new_plane_state); 12690 intel_fbc_enable(intel_crtc, pipe_config, new_plane_state);
12559 12691
12560 drm_atomic_helper_commit_planes_on_crtc(old_crtc_state); 12692 intel_begin_crtc_commit(crtc, old_crtc_state);
12693
12694 intel_update_planes_on_crtc(to_intel_atomic_state(state), intel_crtc,
12695 old_intel_cstate, pipe_config);
12696
12697 intel_finish_crtc_commit(crtc, old_crtc_state);
12561} 12698}
12562 12699
12563static void intel_update_crtcs(struct drm_atomic_state *state) 12700static void intel_update_crtcs(struct drm_atomic_state *state)
@@ -12589,13 +12726,12 @@ static void skl_update_crtcs(struct drm_atomic_state *state)
12589 int i; 12726 int i;
12590 u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices; 12727 u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices;
12591 u8 required_slices = intel_state->wm_results.ddb.enabled_slices; 12728 u8 required_slices = intel_state->wm_results.ddb.enabled_slices;
12592 12729 struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
12593 const struct skl_ddb_entry *entries[I915_MAX_PIPES] = {};
12594 12730
12595 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) 12731 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i)
12596 /* ignore allocations for crtc's that have been turned off. */ 12732 /* ignore allocations for crtc's that have been turned off. */
12597 if (new_crtc_state->active) 12733 if (new_crtc_state->active)
12598 entries[i] = &to_intel_crtc_state(old_crtc_state)->wm.skl.ddb; 12734 entries[i] = to_intel_crtc_state(old_crtc_state)->wm.skl.ddb;
12599 12735
12600 /* If 2nd DBuf slice required, enable it here */ 12736 /* If 2nd DBuf slice required, enable it here */
12601 if (INTEL_GEN(dev_priv) >= 11 && required_slices > hw_enabled_slices) 12737 if (INTEL_GEN(dev_priv) >= 11 && required_slices > hw_enabled_slices)
@@ -12621,14 +12757,13 @@ static void skl_update_crtcs(struct drm_atomic_state *state)
12621 if (updated & cmask || !cstate->base.active) 12757 if (updated & cmask || !cstate->base.active)
12622 continue; 12758 continue;
12623 12759
12624 if (skl_ddb_allocation_overlaps(dev_priv, 12760 if (skl_ddb_allocation_overlaps(&cstate->wm.skl.ddb,
12625 entries, 12761 entries,
12626 &cstate->wm.skl.ddb, 12762 INTEL_INFO(dev_priv)->num_pipes, i))
12627 i))
12628 continue; 12763 continue;
12629 12764
12630 updated |= cmask; 12765 updated |= cmask;
12631 entries[i] = &cstate->wm.skl.ddb; 12766 entries[i] = cstate->wm.skl.ddb;
12632 12767
12633 /* 12768 /*
12634 * If this is an already active pipe, it's DDB changed, 12769 * If this is an already active pipe, it's DDB changed,
@@ -12718,8 +12853,9 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
12718 struct intel_atomic_state *intel_state = to_intel_atomic_state(state); 12853 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
12719 struct drm_i915_private *dev_priv = to_i915(dev); 12854 struct drm_i915_private *dev_priv = to_i915(dev);
12720 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 12855 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
12856 struct intel_crtc_state *new_intel_crtc_state, *old_intel_crtc_state;
12721 struct drm_crtc *crtc; 12857 struct drm_crtc *crtc;
12722 struct intel_crtc_state *intel_cstate; 12858 struct intel_crtc *intel_crtc;
12723 u64 put_domains[I915_MAX_PIPES] = {}; 12859 u64 put_domains[I915_MAX_PIPES] = {};
12724 int i; 12860 int i;
12725 12861
@@ -12731,24 +12867,25 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
12731 intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET); 12867 intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
12732 12868
12733 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 12869 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
12734 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 12870 old_intel_crtc_state = to_intel_crtc_state(old_crtc_state);
12871 new_intel_crtc_state = to_intel_crtc_state(new_crtc_state);
12872 intel_crtc = to_intel_crtc(crtc);
12735 12873
12736 if (needs_modeset(new_crtc_state) || 12874 if (needs_modeset(new_crtc_state) ||
12737 to_intel_crtc_state(new_crtc_state)->update_pipe) { 12875 to_intel_crtc_state(new_crtc_state)->update_pipe) {
12738 12876
12739 put_domains[to_intel_crtc(crtc)->pipe] = 12877 put_domains[intel_crtc->pipe] =
12740 modeset_get_crtc_power_domains(crtc, 12878 modeset_get_crtc_power_domains(crtc,
12741 to_intel_crtc_state(new_crtc_state)); 12879 new_intel_crtc_state);
12742 } 12880 }
12743 12881
12744 if (!needs_modeset(new_crtc_state)) 12882 if (!needs_modeset(new_crtc_state))
12745 continue; 12883 continue;
12746 12884
12747 intel_pre_plane_update(to_intel_crtc_state(old_crtc_state), 12885 intel_pre_plane_update(old_intel_crtc_state, new_intel_crtc_state);
12748 to_intel_crtc_state(new_crtc_state));
12749 12886
12750 if (old_crtc_state->active) { 12887 if (old_crtc_state->active) {
12751 intel_crtc_disable_planes(crtc, old_crtc_state->plane_mask); 12888 intel_crtc_disable_planes(intel_crtc, old_intel_crtc_state->active_planes);
12752 12889
12753 /* 12890 /*
12754 * We need to disable pipe CRC before disabling the pipe, 12891 * We need to disable pipe CRC before disabling the pipe,
@@ -12756,10 +12893,10 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
12756 */ 12893 */
12757 intel_crtc_disable_pipe_crc(intel_crtc); 12894 intel_crtc_disable_pipe_crc(intel_crtc);
12758 12895
12759 dev_priv->display.crtc_disable(to_intel_crtc_state(old_crtc_state), state); 12896 dev_priv->display.crtc_disable(old_intel_crtc_state, state);
12760 intel_crtc->active = false; 12897 intel_crtc->active = false;
12761 intel_fbc_disable(intel_crtc); 12898 intel_fbc_disable(intel_crtc);
12762 intel_disable_shared_dpll(intel_crtc); 12899 intel_disable_shared_dpll(old_intel_crtc_state);
12763 12900
12764 /* 12901 /*
12765 * Underruns don't always raise 12902 * Underruns don't always raise
@@ -12768,17 +12905,12 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
12768 intel_check_cpu_fifo_underruns(dev_priv); 12905 intel_check_cpu_fifo_underruns(dev_priv);
12769 intel_check_pch_fifo_underruns(dev_priv); 12906 intel_check_pch_fifo_underruns(dev_priv);
12770 12907
12771 if (!new_crtc_state->active) { 12908 /* FIXME unify this for all platforms */
12772 /* 12909 if (!new_crtc_state->active &&
12773 * Make sure we don't call initial_watermarks 12910 !HAS_GMCH_DISPLAY(dev_priv) &&
12774 * for ILK-style watermark updates. 12911 dev_priv->display.initial_watermarks)
12775 * 12912 dev_priv->display.initial_watermarks(intel_state,
12776 * No clue what this is supposed to achieve. 12913 new_intel_crtc_state);
12777 */
12778 if (INTEL_GEN(dev_priv) >= 9)
12779 dev_priv->display.initial_watermarks(intel_state,
12780 to_intel_crtc_state(new_crtc_state));
12781 }
12782 } 12914 }
12783 } 12915 }
12784 12916
@@ -12837,11 +12969,11 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
12837 * TODO: Move this (and other cleanup) to an async worker eventually. 12969 * TODO: Move this (and other cleanup) to an async worker eventually.
12838 */ 12970 */
12839 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { 12971 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
12840 intel_cstate = to_intel_crtc_state(new_crtc_state); 12972 new_intel_crtc_state = to_intel_crtc_state(new_crtc_state);
12841 12973
12842 if (dev_priv->display.optimize_watermarks) 12974 if (dev_priv->display.optimize_watermarks)
12843 dev_priv->display.optimize_watermarks(intel_state, 12975 dev_priv->display.optimize_watermarks(intel_state,
12844 intel_cstate); 12976 new_intel_crtc_state);
12845 } 12977 }
12846 12978
12847 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 12979 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
@@ -13224,13 +13356,12 @@ intel_prepare_plane_fb(struct drm_plane *plane,
13224 13356
13225 ret = intel_plane_pin_fb(to_intel_plane_state(new_state)); 13357 ret = intel_plane_pin_fb(to_intel_plane_state(new_state));
13226 13358
13227 fb_obj_bump_render_priority(obj);
13228
13229 mutex_unlock(&dev_priv->drm.struct_mutex); 13359 mutex_unlock(&dev_priv->drm.struct_mutex);
13230 i915_gem_object_unpin_pages(obj); 13360 i915_gem_object_unpin_pages(obj);
13231 if (ret) 13361 if (ret)
13232 return ret; 13362 return ret;
13233 13363
13364 fb_obj_bump_render_priority(obj);
13234 intel_fb_obj_flush(obj, ORIGIN_DIRTYFB); 13365 intel_fb_obj_flush(obj, ORIGIN_DIRTYFB);
13235 13366
13236 if (!new_state->fence) { /* implicit fencing */ 13367 if (!new_state->fence) { /* implicit fencing */
@@ -13361,7 +13492,7 @@ static void intel_begin_crtc_commit(struct drm_crtc *crtc,
13361 if (intel_cstate->update_pipe) 13492 if (intel_cstate->update_pipe)
13362 intel_update_pipe_config(old_intel_cstate, intel_cstate); 13493 intel_update_pipe_config(old_intel_cstate, intel_cstate);
13363 else if (INTEL_GEN(dev_priv) >= 9) 13494 else if (INTEL_GEN(dev_priv) >= 9)
13364 skl_detach_scalers(intel_crtc); 13495 skl_detach_scalers(intel_cstate);
13365 13496
13366out: 13497out:
13367 if (dev_priv->display.atomic_update_watermarks) 13498 if (dev_priv->display.atomic_update_watermarks)
@@ -13463,56 +13594,6 @@ static bool i965_plane_format_mod_supported(struct drm_plane *_plane,
13463 } 13594 }
13464} 13595}
13465 13596
13466static bool skl_plane_format_mod_supported(struct drm_plane *_plane,
13467 u32 format, u64 modifier)
13468{
13469 struct intel_plane *plane = to_intel_plane(_plane);
13470
13471 switch (modifier) {
13472 case DRM_FORMAT_MOD_LINEAR:
13473 case I915_FORMAT_MOD_X_TILED:
13474 case I915_FORMAT_MOD_Y_TILED:
13475 case I915_FORMAT_MOD_Yf_TILED:
13476 break;
13477 case I915_FORMAT_MOD_Y_TILED_CCS:
13478 case I915_FORMAT_MOD_Yf_TILED_CCS:
13479 if (!plane->has_ccs)
13480 return false;
13481 break;
13482 default:
13483 return false;
13484 }
13485
13486 switch (format) {
13487 case DRM_FORMAT_XRGB8888:
13488 case DRM_FORMAT_XBGR8888:
13489 case DRM_FORMAT_ARGB8888:
13490 case DRM_FORMAT_ABGR8888:
13491 if (is_ccs_modifier(modifier))
13492 return true;
13493 /* fall through */
13494 case DRM_FORMAT_RGB565:
13495 case DRM_FORMAT_XRGB2101010:
13496 case DRM_FORMAT_XBGR2101010:
13497 case DRM_FORMAT_YUYV:
13498 case DRM_FORMAT_YVYU:
13499 case DRM_FORMAT_UYVY:
13500 case DRM_FORMAT_VYUY:
13501 case DRM_FORMAT_NV12:
13502 if (modifier == I915_FORMAT_MOD_Yf_TILED)
13503 return true;
13504 /* fall through */
13505 case DRM_FORMAT_C8:
13506 if (modifier == DRM_FORMAT_MOD_LINEAR ||
13507 modifier == I915_FORMAT_MOD_X_TILED ||
13508 modifier == I915_FORMAT_MOD_Y_TILED)
13509 return true;
13510 /* fall through */
13511 default:
13512 return false;
13513 }
13514}
13515
13516static bool intel_cursor_format_mod_supported(struct drm_plane *_plane, 13597static bool intel_cursor_format_mod_supported(struct drm_plane *_plane,
13517 u32 format, u64 modifier) 13598 u32 format, u64 modifier)
13518{ 13599{
@@ -13520,18 +13601,7 @@ static bool intel_cursor_format_mod_supported(struct drm_plane *_plane,
13520 format == DRM_FORMAT_ARGB8888; 13601 format == DRM_FORMAT_ARGB8888;
13521} 13602}
13522 13603
13523static struct drm_plane_funcs skl_plane_funcs = { 13604static const struct drm_plane_funcs i965_plane_funcs = {
13524 .update_plane = drm_atomic_helper_update_plane,
13525 .disable_plane = drm_atomic_helper_disable_plane,
13526 .destroy = intel_plane_destroy,
13527 .atomic_get_property = intel_plane_atomic_get_property,
13528 .atomic_set_property = intel_plane_atomic_set_property,
13529 .atomic_duplicate_state = intel_plane_duplicate_state,
13530 .atomic_destroy_state = intel_plane_destroy_state,
13531 .format_mod_supported = skl_plane_format_mod_supported,
13532};
13533
13534static struct drm_plane_funcs i965_plane_funcs = {
13535 .update_plane = drm_atomic_helper_update_plane, 13605 .update_plane = drm_atomic_helper_update_plane,
13536 .disable_plane = drm_atomic_helper_disable_plane, 13606 .disable_plane = drm_atomic_helper_disable_plane,
13537 .destroy = intel_plane_destroy, 13607 .destroy = intel_plane_destroy,
@@ -13542,7 +13612,7 @@ static struct drm_plane_funcs i965_plane_funcs = {
13542 .format_mod_supported = i965_plane_format_mod_supported, 13612 .format_mod_supported = i965_plane_format_mod_supported,
13543}; 13613};
13544 13614
13545static struct drm_plane_funcs i8xx_plane_funcs = { 13615static const struct drm_plane_funcs i8xx_plane_funcs = {
13546 .update_plane = drm_atomic_helper_update_plane, 13616 .update_plane = drm_atomic_helper_update_plane,
13547 .disable_plane = drm_atomic_helper_disable_plane, 13617 .disable_plane = drm_atomic_helper_disable_plane,
13548 .destroy = intel_plane_destroy, 13618 .destroy = intel_plane_destroy,
@@ -13568,14 +13638,16 @@ intel_legacy_cursor_update(struct drm_plane *plane,
13568 struct drm_plane_state *old_plane_state, *new_plane_state; 13638 struct drm_plane_state *old_plane_state, *new_plane_state;
13569 struct intel_plane *intel_plane = to_intel_plane(plane); 13639 struct intel_plane *intel_plane = to_intel_plane(plane);
13570 struct drm_framebuffer *old_fb; 13640 struct drm_framebuffer *old_fb;
13571 struct drm_crtc_state *crtc_state = crtc->state; 13641 struct intel_crtc_state *crtc_state =
13642 to_intel_crtc_state(crtc->state);
13643 struct intel_crtc_state *new_crtc_state;
13572 13644
13573 /* 13645 /*
13574 * When crtc is inactive or there is a modeset pending, 13646 * When crtc is inactive or there is a modeset pending,
13575 * wait for it to complete in the slowpath 13647 * wait for it to complete in the slowpath
13576 */ 13648 */
13577 if (!crtc_state->active || needs_modeset(crtc_state) || 13649 if (!crtc_state->base.active || needs_modeset(&crtc_state->base) ||
13578 to_intel_crtc_state(crtc_state)->update_pipe) 13650 crtc_state->update_pipe)
13579 goto slow; 13651 goto slow;
13580 13652
13581 old_plane_state = plane->state; 13653 old_plane_state = plane->state;
@@ -13605,6 +13677,12 @@ intel_legacy_cursor_update(struct drm_plane *plane,
13605 if (!new_plane_state) 13677 if (!new_plane_state)
13606 return -ENOMEM; 13678 return -ENOMEM;
13607 13679
13680 new_crtc_state = to_intel_crtc_state(intel_crtc_duplicate_state(crtc));
13681 if (!new_crtc_state) {
13682 ret = -ENOMEM;
13683 goto out_free;
13684 }
13685
13608 drm_atomic_set_fb_for_plane(new_plane_state, fb); 13686 drm_atomic_set_fb_for_plane(new_plane_state, fb);
13609 13687
13610 new_plane_state->src_x = src_x; 13688 new_plane_state->src_x = src_x;
@@ -13616,9 +13694,8 @@ intel_legacy_cursor_update(struct drm_plane *plane,
13616 new_plane_state->crtc_w = crtc_w; 13694 new_plane_state->crtc_w = crtc_w;
13617 new_plane_state->crtc_h = crtc_h; 13695 new_plane_state->crtc_h = crtc_h;
13618 13696
13619 ret = intel_plane_atomic_check_with_state(to_intel_crtc_state(crtc->state), 13697 ret = intel_plane_atomic_check_with_state(crtc_state, new_crtc_state,
13620 to_intel_crtc_state(crtc->state), /* FIXME need a new crtc state? */ 13698 to_intel_plane_state(old_plane_state),
13621 to_intel_plane_state(plane->state),
13622 to_intel_plane_state(new_plane_state)); 13699 to_intel_plane_state(new_plane_state));
13623 if (ret) 13700 if (ret)
13624 goto out_free; 13701 goto out_free;
@@ -13640,10 +13717,21 @@ intel_legacy_cursor_update(struct drm_plane *plane,
13640 /* Swap plane state */ 13717 /* Swap plane state */
13641 plane->state = new_plane_state; 13718 plane->state = new_plane_state;
13642 13719
13720 /*
13721 * We cannot swap crtc_state as it may be in use by an atomic commit or
13722 * page flip that's running simultaneously. If we swap crtc_state and
13723 * destroy the old state, we will cause a use-after-free there.
13724 *
13725 * Only update active_planes, which is needed for our internal
13726 * bookkeeping. Either value will do the right thing when updating
13727 * planes atomically. If the cursor was part of the atomic update then
13728 * we would have taken the slowpath.
13729 */
13730 crtc_state->active_planes = new_crtc_state->active_planes;
13731
13643 if (plane->state->visible) { 13732 if (plane->state->visible) {
13644 trace_intel_update_plane(plane, to_intel_crtc(crtc)); 13733 trace_intel_update_plane(plane, to_intel_crtc(crtc));
13645 intel_plane->update_plane(intel_plane, 13734 intel_plane->update_plane(intel_plane, crtc_state,
13646 to_intel_crtc_state(crtc->state),
13647 to_intel_plane_state(plane->state)); 13735 to_intel_plane_state(plane->state));
13648 } else { 13736 } else {
13649 trace_intel_disable_plane(plane, to_intel_crtc(crtc)); 13737 trace_intel_disable_plane(plane, to_intel_crtc(crtc));
@@ -13655,6 +13743,8 @@ intel_legacy_cursor_update(struct drm_plane *plane,
13655out_unlock: 13743out_unlock:
13656 mutex_unlock(&dev_priv->drm.struct_mutex); 13744 mutex_unlock(&dev_priv->drm.struct_mutex);
13657out_free: 13745out_free:
13746 if (new_crtc_state)
13747 intel_crtc_destroy_state(crtc, &new_crtc_state->base);
13658 if (ret) 13748 if (ret)
13659 intel_plane_destroy_state(plane, new_plane_state); 13749 intel_plane_destroy_state(plane, new_plane_state);
13660 else 13750 else
@@ -13695,176 +13785,90 @@ static bool i9xx_plane_has_fbc(struct drm_i915_private *dev_priv,
13695 return i9xx_plane == PLANE_A; 13785 return i9xx_plane == PLANE_A;
13696} 13786}
13697 13787
13698static bool skl_plane_has_fbc(struct drm_i915_private *dev_priv,
13699 enum pipe pipe, enum plane_id plane_id)
13700{
13701 if (!HAS_FBC(dev_priv))
13702 return false;
13703
13704 return pipe == PIPE_A && plane_id == PLANE_PRIMARY;
13705}
13706
13707bool skl_plane_has_planar(struct drm_i915_private *dev_priv,
13708 enum pipe pipe, enum plane_id plane_id)
13709{
13710 /*
13711 * FIXME: ICL requires two hardware planes for scanning out NV12
13712 * framebuffers. Do not advertize support until this is implemented.
13713 */
13714 if (INTEL_GEN(dev_priv) >= 11)
13715 return false;
13716
13717 if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))
13718 return false;
13719
13720 if (INTEL_GEN(dev_priv) == 9 && !IS_GEMINILAKE(dev_priv) && pipe == PIPE_C)
13721 return false;
13722
13723 if (plane_id != PLANE_PRIMARY && plane_id != PLANE_SPRITE0)
13724 return false;
13725
13726 return true;
13727}
13728
13729static struct intel_plane * 13788static struct intel_plane *
13730intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe) 13789intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
13731{ 13790{
13732 struct intel_plane *primary = NULL; 13791 struct intel_plane *plane;
13733 struct intel_plane_state *state = NULL;
13734 const struct drm_plane_funcs *plane_funcs; 13792 const struct drm_plane_funcs *plane_funcs;
13735 const uint32_t *intel_primary_formats;
13736 unsigned int supported_rotations; 13793 unsigned int supported_rotations;
13737 unsigned int num_formats; 13794 unsigned int possible_crtcs;
13738 const uint64_t *modifiers; 13795 const u64 *modifiers;
13796 const u32 *formats;
13797 int num_formats;
13739 int ret; 13798 int ret;
13740 13799
13741 primary = kzalloc(sizeof(*primary), GFP_KERNEL); 13800 if (INTEL_GEN(dev_priv) >= 9)
13742 if (!primary) { 13801 return skl_universal_plane_create(dev_priv, pipe,
13743 ret = -ENOMEM; 13802 PLANE_PRIMARY);
13744 goto fail;
13745 }
13746
13747 state = intel_create_plane_state(&primary->base);
13748 if (!state) {
13749 ret = -ENOMEM;
13750 goto fail;
13751 }
13752 13803
13753 primary->base.state = &state->base; 13804 plane = intel_plane_alloc();
13805 if (IS_ERR(plane))
13806 return plane;
13754 13807
13755 if (INTEL_GEN(dev_priv) >= 9) 13808 plane->pipe = pipe;
13756 state->scaler_id = -1;
13757 primary->pipe = pipe;
13758 /* 13809 /*
13759 * On gen2/3 only plane A can do FBC, but the panel fitter and LVDS 13810 * On gen2/3 only plane A can do FBC, but the panel fitter and LVDS
13760 * port is hooked to pipe B. Hence we want plane A feeding pipe B. 13811 * port is hooked to pipe B. Hence we want plane A feeding pipe B.
13761 */ 13812 */
13762 if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) < 4) 13813 if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) < 4)
13763 primary->i9xx_plane = (enum i9xx_plane_id) !pipe; 13814 plane->i9xx_plane = (enum i9xx_plane_id) !pipe;
13764 else 13815 else
13765 primary->i9xx_plane = (enum i9xx_plane_id) pipe; 13816 plane->i9xx_plane = (enum i9xx_plane_id) pipe;
13766 primary->id = PLANE_PRIMARY; 13817 plane->id = PLANE_PRIMARY;
13767 primary->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, primary->id); 13818 plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane->id);
13768 13819
13769 if (INTEL_GEN(dev_priv) >= 9) 13820 plane->has_fbc = i9xx_plane_has_fbc(dev_priv, plane->i9xx_plane);
13770 primary->has_fbc = skl_plane_has_fbc(dev_priv, 13821 if (plane->has_fbc) {
13771 primary->pipe,
13772 primary->id);
13773 else
13774 primary->has_fbc = i9xx_plane_has_fbc(dev_priv,
13775 primary->i9xx_plane);
13776
13777 if (primary->has_fbc) {
13778 struct intel_fbc *fbc = &dev_priv->fbc; 13822 struct intel_fbc *fbc = &dev_priv->fbc;
13779 13823
13780 fbc->possible_framebuffer_bits |= primary->frontbuffer_bit; 13824 fbc->possible_framebuffer_bits |= plane->frontbuffer_bit;
13781 } 13825 }
13782 13826
13783 if (INTEL_GEN(dev_priv) >= 9) { 13827 if (INTEL_GEN(dev_priv) >= 4) {
13784 primary->has_ccs = skl_plane_has_ccs(dev_priv, pipe, 13828 formats = i965_primary_formats;
13785 PLANE_PRIMARY);
13786
13787 if (skl_plane_has_planar(dev_priv, pipe, PLANE_PRIMARY)) {
13788 intel_primary_formats = skl_pri_planar_formats;
13789 num_formats = ARRAY_SIZE(skl_pri_planar_formats);
13790 } else {
13791 intel_primary_formats = skl_primary_formats;
13792 num_formats = ARRAY_SIZE(skl_primary_formats);
13793 }
13794
13795 if (primary->has_ccs)
13796 modifiers = skl_format_modifiers_ccs;
13797 else
13798 modifiers = skl_format_modifiers_noccs;
13799
13800 primary->max_stride = skl_plane_max_stride;
13801 primary->update_plane = skl_update_plane;
13802 primary->disable_plane = skl_disable_plane;
13803 primary->get_hw_state = skl_plane_get_hw_state;
13804 primary->check_plane = skl_plane_check;
13805
13806 plane_funcs = &skl_plane_funcs;
13807 } else if (INTEL_GEN(dev_priv) >= 4) {
13808 intel_primary_formats = i965_primary_formats;
13809 num_formats = ARRAY_SIZE(i965_primary_formats); 13829 num_formats = ARRAY_SIZE(i965_primary_formats);
13810 modifiers = i9xx_format_modifiers; 13830 modifiers = i9xx_format_modifiers;
13811 13831
13812 primary->max_stride = i9xx_plane_max_stride; 13832 plane->max_stride = i9xx_plane_max_stride;
13813 primary->update_plane = i9xx_update_plane; 13833 plane->update_plane = i9xx_update_plane;
13814 primary->disable_plane = i9xx_disable_plane; 13834 plane->disable_plane = i9xx_disable_plane;
13815 primary->get_hw_state = i9xx_plane_get_hw_state; 13835 plane->get_hw_state = i9xx_plane_get_hw_state;
13816 primary->check_plane = i9xx_plane_check; 13836 plane->check_plane = i9xx_plane_check;
13817 13837
13818 plane_funcs = &i965_plane_funcs; 13838 plane_funcs = &i965_plane_funcs;
13819 } else { 13839 } else {
13820 intel_primary_formats = i8xx_primary_formats; 13840 formats = i8xx_primary_formats;
13821 num_formats = ARRAY_SIZE(i8xx_primary_formats); 13841 num_formats = ARRAY_SIZE(i8xx_primary_formats);
13822 modifiers = i9xx_format_modifiers; 13842 modifiers = i9xx_format_modifiers;
13823 13843
13824 primary->max_stride = i9xx_plane_max_stride; 13844 plane->max_stride = i9xx_plane_max_stride;
13825 primary->update_plane = i9xx_update_plane; 13845 plane->update_plane = i9xx_update_plane;
13826 primary->disable_plane = i9xx_disable_plane; 13846 plane->disable_plane = i9xx_disable_plane;
13827 primary->get_hw_state = i9xx_plane_get_hw_state; 13847 plane->get_hw_state = i9xx_plane_get_hw_state;
13828 primary->check_plane = i9xx_plane_check; 13848 plane->check_plane = i9xx_plane_check;
13829 13849
13830 plane_funcs = &i8xx_plane_funcs; 13850 plane_funcs = &i8xx_plane_funcs;
13831 } 13851 }
13832 13852
13833 if (INTEL_GEN(dev_priv) >= 9) 13853 possible_crtcs = BIT(pipe);
13834 ret = drm_universal_plane_init(&dev_priv->drm, &primary->base, 13854
13835 0, plane_funcs, 13855 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
13836 intel_primary_formats, num_formats, 13856 ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
13837 modifiers, 13857 possible_crtcs, plane_funcs,
13838 DRM_PLANE_TYPE_PRIMARY, 13858 formats, num_formats, modifiers,
13839 "plane 1%c", pipe_name(pipe));
13840 else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
13841 ret = drm_universal_plane_init(&dev_priv->drm, &primary->base,
13842 0, plane_funcs,
13843 intel_primary_formats, num_formats,
13844 modifiers,
13845 DRM_PLANE_TYPE_PRIMARY, 13859 DRM_PLANE_TYPE_PRIMARY,
13846 "primary %c", pipe_name(pipe)); 13860 "primary %c", pipe_name(pipe));
13847 else 13861 else
13848 ret = drm_universal_plane_init(&dev_priv->drm, &primary->base, 13862 ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
13849 0, plane_funcs, 13863 possible_crtcs, plane_funcs,
13850 intel_primary_formats, num_formats, 13864 formats, num_formats, modifiers,
13851 modifiers,
13852 DRM_PLANE_TYPE_PRIMARY, 13865 DRM_PLANE_TYPE_PRIMARY,
13853 "plane %c", 13866 "plane %c",
13854 plane_name(primary->i9xx_plane)); 13867 plane_name(plane->i9xx_plane));
13855 if (ret) 13868 if (ret)
13856 goto fail; 13869 goto fail;
13857 13870
13858 if (INTEL_GEN(dev_priv) >= 10) { 13871 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
13859 supported_rotations =
13860 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
13861 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270 |
13862 DRM_MODE_REFLECT_X;
13863 } else if (INTEL_GEN(dev_priv) >= 9) {
13864 supported_rotations =
13865 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
13866 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
13867 } else if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
13868 supported_rotations = 13872 supported_rotations =
13869 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 | 13873 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 |
13870 DRM_MODE_REFLECT_X; 13874 DRM_MODE_REFLECT_X;
@@ -13876,26 +13880,16 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
13876 } 13880 }
13877 13881
13878 if (INTEL_GEN(dev_priv) >= 4) 13882 if (INTEL_GEN(dev_priv) >= 4)
13879 drm_plane_create_rotation_property(&primary->base, 13883 drm_plane_create_rotation_property(&plane->base,
13880 DRM_MODE_ROTATE_0, 13884 DRM_MODE_ROTATE_0,
13881 supported_rotations); 13885 supported_rotations);
13882 13886
13883 if (INTEL_GEN(dev_priv) >= 9) 13887 drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs);
13884 drm_plane_create_color_properties(&primary->base,
13885 BIT(DRM_COLOR_YCBCR_BT601) |
13886 BIT(DRM_COLOR_YCBCR_BT709),
13887 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
13888 BIT(DRM_COLOR_YCBCR_FULL_RANGE),
13889 DRM_COLOR_YCBCR_BT709,
13890 DRM_COLOR_YCBCR_LIMITED_RANGE);
13891
13892 drm_plane_helper_add(&primary->base, &intel_plane_helper_funcs);
13893 13888
13894 return primary; 13889 return plane;
13895 13890
13896fail: 13891fail:
13897 kfree(state); 13892 intel_plane_free(plane);
13898 kfree(primary);
13899 13893
13900 return ERR_PTR(ret); 13894 return ERR_PTR(ret);
13901} 13895}
@@ -13904,23 +13898,13 @@ static struct intel_plane *
13904intel_cursor_plane_create(struct drm_i915_private *dev_priv, 13898intel_cursor_plane_create(struct drm_i915_private *dev_priv,
13905 enum pipe pipe) 13899 enum pipe pipe)
13906{ 13900{
13907 struct intel_plane *cursor = NULL; 13901 unsigned int possible_crtcs;
13908 struct intel_plane_state *state = NULL; 13902 struct intel_plane *cursor;
13909 int ret; 13903 int ret;
13910 13904
13911 cursor = kzalloc(sizeof(*cursor), GFP_KERNEL); 13905 cursor = intel_plane_alloc();
13912 if (!cursor) { 13906 if (IS_ERR(cursor))
13913 ret = -ENOMEM; 13907 return cursor;
13914 goto fail;
13915 }
13916
13917 state = intel_create_plane_state(&cursor->base);
13918 if (!state) {
13919 ret = -ENOMEM;
13920 goto fail;
13921 }
13922
13923 cursor->base.state = &state->base;
13924 13908
13925 cursor->pipe = pipe; 13909 cursor->pipe = pipe;
13926 cursor->i9xx_plane = (enum i9xx_plane_id) pipe; 13910 cursor->i9xx_plane = (enum i9xx_plane_id) pipe;
@@ -13947,8 +13931,10 @@ intel_cursor_plane_create(struct drm_i915_private *dev_priv,
13947 if (IS_I845G(dev_priv) || IS_I865G(dev_priv) || HAS_CUR_FBC(dev_priv)) 13931 if (IS_I845G(dev_priv) || IS_I865G(dev_priv) || HAS_CUR_FBC(dev_priv))
13948 cursor->cursor.size = ~0; 13932 cursor->cursor.size = ~0;
13949 13933
13934 possible_crtcs = BIT(pipe);
13935
13950 ret = drm_universal_plane_init(&dev_priv->drm, &cursor->base, 13936 ret = drm_universal_plane_init(&dev_priv->drm, &cursor->base,
13951 0, &intel_cursor_plane_funcs, 13937 possible_crtcs, &intel_cursor_plane_funcs,
13952 intel_cursor_formats, 13938 intel_cursor_formats,
13953 ARRAY_SIZE(intel_cursor_formats), 13939 ARRAY_SIZE(intel_cursor_formats),
13954 cursor_format_modifiers, 13940 cursor_format_modifiers,
@@ -13963,16 +13949,12 @@ intel_cursor_plane_create(struct drm_i915_private *dev_priv,
13963 DRM_MODE_ROTATE_0 | 13949 DRM_MODE_ROTATE_0 |
13964 DRM_MODE_ROTATE_180); 13950 DRM_MODE_ROTATE_180);
13965 13951
13966 if (INTEL_GEN(dev_priv) >= 9)
13967 state->scaler_id = -1;
13968
13969 drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs); 13952 drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs);
13970 13953
13971 return cursor; 13954 return cursor;
13972 13955
13973fail: 13956fail:
13974 kfree(state); 13957 intel_plane_free(cursor);
13975 kfree(cursor);
13976 13958
13977 return ERR_PTR(ret); 13959 return ERR_PTR(ret);
13978} 13960}
@@ -13993,7 +13975,7 @@ static void intel_crtc_init_scalers(struct intel_crtc *crtc,
13993 struct intel_scaler *scaler = &scaler_state->scalers[i]; 13975 struct intel_scaler *scaler = &scaler_state->scalers[i];
13994 13976
13995 scaler->in_use = 0; 13977 scaler->in_use = 0;
13996 scaler->mode = PS_SCALER_MODE_DYN; 13978 scaler->mode = 0;
13997 } 13979 }
13998 13980
13999 scaler_state->scaler_id = -1; 13981 scaler_state->scaler_id = -1;
@@ -14088,18 +14070,6 @@ fail:
14088 return ret; 14070 return ret;
14089} 14071}
14090 14072
14091enum pipe intel_get_pipe_from_connector(struct intel_connector *connector)
14092{
14093 struct drm_device *dev = connector->base.dev;
14094
14095 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
14096
14097 if (!connector->base.state->crtc)
14098 return INVALID_PIPE;
14099
14100 return to_intel_crtc(connector->base.state->crtc)->pipe;
14101}
14102
14103int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data, 14073int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
14104 struct drm_file *file) 14074 struct drm_file *file)
14105{ 14075{
@@ -14236,6 +14206,7 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
14236 intel_ddi_init(dev_priv, PORT_D); 14206 intel_ddi_init(dev_priv, PORT_D);
14237 intel_ddi_init(dev_priv, PORT_E); 14207 intel_ddi_init(dev_priv, PORT_E);
14238 intel_ddi_init(dev_priv, PORT_F); 14208 intel_ddi_init(dev_priv, PORT_F);
14209 icl_dsi_init(dev_priv);
14239 } else if (IS_GEN9_LP(dev_priv)) { 14210 } else if (IS_GEN9_LP(dev_priv)) {
14240 /* 14211 /*
14241 * FIXME: Broxton doesn't support port detection via the 14212 * FIXME: Broxton doesn't support port detection via the
@@ -14458,7 +14429,7 @@ static const struct drm_framebuffer_funcs intel_fb_funcs = {
14458 14429
14459static 14430static
14460u32 intel_fb_pitch_limit(struct drm_i915_private *dev_priv, 14431u32 intel_fb_pitch_limit(struct drm_i915_private *dev_priv,
14461 uint64_t fb_modifier, uint32_t pixel_format) 14432 u32 pixel_format, u64 fb_modifier)
14462{ 14433{
14463 struct intel_crtc *crtc; 14434 struct intel_crtc *crtc;
14464 struct intel_plane *plane; 14435 struct intel_plane *plane;
@@ -14526,13 +14497,19 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
14526 goto err; 14497 goto err;
14527 } 14498 }
14528 /* fall through */ 14499 /* fall through */
14529 case I915_FORMAT_MOD_Y_TILED:
14530 case I915_FORMAT_MOD_Yf_TILED: 14500 case I915_FORMAT_MOD_Yf_TILED:
14501 if (mode_cmd->pixel_format == DRM_FORMAT_C8) {
14502 DRM_DEBUG_KMS("Indexed format does not support Yf tiling\n");
14503 goto err;
14504 }
14505 /* fall through */
14506 case I915_FORMAT_MOD_Y_TILED:
14531 if (INTEL_GEN(dev_priv) < 9) { 14507 if (INTEL_GEN(dev_priv) < 9) {
14532 DRM_DEBUG_KMS("Unsupported tiling 0x%llx!\n", 14508 DRM_DEBUG_KMS("Unsupported tiling 0x%llx!\n",
14533 mode_cmd->modifier[0]); 14509 mode_cmd->modifier[0]);
14534 goto err; 14510 goto err;
14535 } 14511 }
14512 break;
14536 case DRM_FORMAT_MOD_LINEAR: 14513 case DRM_FORMAT_MOD_LINEAR:
14537 case I915_FORMAT_MOD_X_TILED: 14514 case I915_FORMAT_MOD_X_TILED:
14538 break; 14515 break;
@@ -14552,8 +14529,8 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
14552 goto err; 14529 goto err;
14553 } 14530 }
14554 14531
14555 pitch_limit = intel_fb_pitch_limit(dev_priv, mode_cmd->modifier[0], 14532 pitch_limit = intel_fb_pitch_limit(dev_priv, mode_cmd->pixel_format,
14556 mode_cmd->pixel_format); 14533 mode_cmd->modifier[0]);
14557 if (mode_cmd->pitches[0] > pitch_limit) { 14534 if (mode_cmd->pitches[0] > pitch_limit) {
14558 DRM_DEBUG_KMS("%s pitch (%u) must be at most %d\n", 14535 DRM_DEBUG_KMS("%s pitch (%u) must be at most %d\n",
14559 mode_cmd->modifier[0] != DRM_FORMAT_MOD_LINEAR ? 14536 mode_cmd->modifier[0] != DRM_FORMAT_MOD_LINEAR ?
@@ -14622,7 +14599,7 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
14622 break; 14599 break;
14623 case DRM_FORMAT_NV12: 14600 case DRM_FORMAT_NV12:
14624 if (INTEL_GEN(dev_priv) < 9 || IS_SKYLAKE(dev_priv) || 14601 if (INTEL_GEN(dev_priv) < 9 || IS_SKYLAKE(dev_priv) ||
14625 IS_BROXTON(dev_priv) || INTEL_GEN(dev_priv) >= 11) { 14602 IS_BROXTON(dev_priv)) {
14626 DRM_DEBUG_KMS("unsupported pixel format: %s\n", 14603 DRM_DEBUG_KMS("unsupported pixel format: %s\n",
14627 drm_get_format_name(mode_cmd->pixel_format, 14604 drm_get_format_name(mode_cmd->pixel_format,
14628 &format_name)); 14605 &format_name));
@@ -14646,7 +14623,7 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
14646 fb->height < SKL_MIN_YUV_420_SRC_H || 14623 fb->height < SKL_MIN_YUV_420_SRC_H ||
14647 (fb->width % 4) != 0 || (fb->height % 4) != 0)) { 14624 (fb->width % 4) != 0 || (fb->height % 4) != 0)) {
14648 DRM_DEBUG_KMS("src dimensions not correct for NV12\n"); 14625 DRM_DEBUG_KMS("src dimensions not correct for NV12\n");
14649 return -EINVAL; 14626 goto err;
14650 } 14627 }
14651 14628
14652 for (i = 0; i < fb->format->num_planes; i++) { 14629 for (i = 0; i < fb->format->num_planes; i++) {
@@ -14906,174 +14883,6 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv)
14906 dev_priv->display.update_crtcs = intel_update_crtcs; 14883 dev_priv->display.update_crtcs = intel_update_crtcs;
14907} 14884}
14908 14885
14909/*
14910 * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
14911 */
14912static void quirk_ssc_force_disable(struct drm_device *dev)
14913{
14914 struct drm_i915_private *dev_priv = to_i915(dev);
14915 dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE;
14916 DRM_INFO("applying lvds SSC disable quirk\n");
14917}
14918
14919/*
14920 * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight
14921 * brightness value
14922 */
14923static void quirk_invert_brightness(struct drm_device *dev)
14924{
14925 struct drm_i915_private *dev_priv = to_i915(dev);
14926 dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS;
14927 DRM_INFO("applying inverted panel brightness quirk\n");
14928}
14929
14930/* Some VBT's incorrectly indicate no backlight is present */
14931static void quirk_backlight_present(struct drm_device *dev)
14932{
14933 struct drm_i915_private *dev_priv = to_i915(dev);
14934 dev_priv->quirks |= QUIRK_BACKLIGHT_PRESENT;
14935 DRM_INFO("applying backlight present quirk\n");
14936}
14937
14938/* Toshiba Satellite P50-C-18C requires T12 delay to be min 800ms
14939 * which is 300 ms greater than eDP spec T12 min.
14940 */
14941static void quirk_increase_t12_delay(struct drm_device *dev)
14942{
14943 struct drm_i915_private *dev_priv = to_i915(dev);
14944
14945 dev_priv->quirks |= QUIRK_INCREASE_T12_DELAY;
14946 DRM_INFO("Applying T12 delay quirk\n");
14947}
14948
14949/*
14950 * GeminiLake NUC HDMI outputs require additional off time
14951 * this allows the onboard retimer to correctly sync to signal
14952 */
14953static void quirk_increase_ddi_disabled_time(struct drm_device *dev)
14954{
14955 struct drm_i915_private *dev_priv = to_i915(dev);
14956
14957 dev_priv->quirks |= QUIRK_INCREASE_DDI_DISABLED_TIME;
14958 DRM_INFO("Applying Increase DDI Disabled quirk\n");
14959}
14960
14961struct intel_quirk {
14962 int device;
14963 int subsystem_vendor;
14964 int subsystem_device;
14965 void (*hook)(struct drm_device *dev);
14966};
14967
14968/* For systems that don't have a meaningful PCI subdevice/subvendor ID */
14969struct intel_dmi_quirk {
14970 void (*hook)(struct drm_device *dev);
14971 const struct dmi_system_id (*dmi_id_list)[];
14972};
14973
14974static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
14975{
14976 DRM_INFO("Backlight polarity reversed on %s\n", id->ident);
14977 return 1;
14978}
14979
14980static const struct intel_dmi_quirk intel_dmi_quirks[] = {
14981 {
14982 .dmi_id_list = &(const struct dmi_system_id[]) {
14983 {
14984 .callback = intel_dmi_reverse_brightness,
14985 .ident = "NCR Corporation",
14986 .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
14987 DMI_MATCH(DMI_PRODUCT_NAME, ""),
14988 },
14989 },
14990 { } /* terminating entry */
14991 },
14992 .hook = quirk_invert_brightness,
14993 },
14994};
14995
14996static struct intel_quirk intel_quirks[] = {
14997 /* Lenovo U160 cannot use SSC on LVDS */
14998 { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
14999
15000 /* Sony Vaio Y cannot use SSC on LVDS */
15001 { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
15002
15003 /* Acer Aspire 5734Z must invert backlight brightness */
15004 { 0x2a42, 0x1025, 0x0459, quirk_invert_brightness },
15005
15006 /* Acer/eMachines G725 */
15007 { 0x2a42, 0x1025, 0x0210, quirk_invert_brightness },
15008
15009 /* Acer/eMachines e725 */
15010 { 0x2a42, 0x1025, 0x0212, quirk_invert_brightness },
15011
15012 /* Acer/Packard Bell NCL20 */
15013 { 0x2a42, 0x1025, 0x034b, quirk_invert_brightness },
15014
15015 /* Acer Aspire 4736Z */
15016 { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
15017
15018 /* Acer Aspire 5336 */
15019 { 0x2a42, 0x1025, 0x048a, quirk_invert_brightness },
15020
15021 /* Acer C720 and C720P Chromebooks (Celeron 2955U) have backlights */
15022 { 0x0a06, 0x1025, 0x0a11, quirk_backlight_present },
15023
15024 /* Acer C720 Chromebook (Core i3 4005U) */
15025 { 0x0a16, 0x1025, 0x0a11, quirk_backlight_present },
15026
15027 /* Apple Macbook 2,1 (Core 2 T7400) */
15028 { 0x27a2, 0x8086, 0x7270, quirk_backlight_present },
15029
15030 /* Apple Macbook 4,1 */
15031 { 0x2a02, 0x106b, 0x00a1, quirk_backlight_present },
15032
15033 /* Toshiba CB35 Chromebook (Celeron 2955U) */
15034 { 0x0a06, 0x1179, 0x0a88, quirk_backlight_present },
15035
15036 /* HP Chromebook 14 (Celeron 2955U) */
15037 { 0x0a06, 0x103c, 0x21ed, quirk_backlight_present },
15038
15039 /* Dell Chromebook 11 */
15040 { 0x0a06, 0x1028, 0x0a35, quirk_backlight_present },
15041
15042 /* Dell Chromebook 11 (2015 version) */
15043 { 0x0a16, 0x1028, 0x0a35, quirk_backlight_present },
15044
15045 /* Toshiba Satellite P50-C-18C */
15046 { 0x191B, 0x1179, 0xF840, quirk_increase_t12_delay },
15047
15048 /* GeminiLake NUC */
15049 { 0x3185, 0x8086, 0x2072, quirk_increase_ddi_disabled_time },
15050 { 0x3184, 0x8086, 0x2072, quirk_increase_ddi_disabled_time },
15051 /* ASRock ITX*/
15052 { 0x3185, 0x1849, 0x2212, quirk_increase_ddi_disabled_time },
15053 { 0x3184, 0x1849, 0x2212, quirk_increase_ddi_disabled_time },
15054};
15055
15056static void intel_init_quirks(struct drm_device *dev)
15057{
15058 struct pci_dev *d = dev->pdev;
15059 int i;
15060
15061 for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) {
15062 struct intel_quirk *q = &intel_quirks[i];
15063
15064 if (d->device == q->device &&
15065 (d->subsystem_vendor == q->subsystem_vendor ||
15066 q->subsystem_vendor == PCI_ANY_ID) &&
15067 (d->subsystem_device == q->subsystem_device ||
15068 q->subsystem_device == PCI_ANY_ID))
15069 q->hook(dev);
15070 }
15071 for (i = 0; i < ARRAY_SIZE(intel_dmi_quirks); i++) {
15072 if (dmi_check_system(*intel_dmi_quirks[i].dmi_id_list) != 0)
15073 intel_dmi_quirks[i].hook(dev);
15074 }
15075}
15076
15077/* Disable the VGA plane that we never use */ 14886/* Disable the VGA plane that we never use */
15078static void i915_disable_vga(struct drm_i915_private *dev_priv) 14887static void i915_disable_vga(struct drm_i915_private *dev_priv)
15079{ 14888{
@@ -15233,6 +15042,14 @@ retry:
15233 ret = drm_atomic_add_affected_planes(state, crtc); 15042 ret = drm_atomic_add_affected_planes(state, crtc);
15234 if (ret) 15043 if (ret)
15235 goto out; 15044 goto out;
15045
15046 /*
15047 * FIXME hack to force a LUT update to avoid the
15048 * plane update forcing the pipe gamma on without
15049 * having a proper LUT loaded. Remove once we
15050 * have readout for pipe gamma enable.
15051 */
15052 crtc_state->color_mgmt_changed = true;
15236 } 15053 }
15237 } 15054 }
15238 15055
@@ -15279,7 +15096,9 @@ int intel_modeset_init(struct drm_device *dev)
15279 INIT_WORK(&dev_priv->atomic_helper.free_work, 15096 INIT_WORK(&dev_priv->atomic_helper.free_work,
15280 intel_atomic_helper_free_state_worker); 15097 intel_atomic_helper_free_state_worker);
15281 15098
15282 intel_init_quirks(dev); 15099 intel_init_quirks(dev_priv);
15100
15101 intel_fbc_init(dev_priv);
15283 15102
15284 intel_init_pm(dev_priv); 15103 intel_init_pm(dev_priv);
15285 15104
@@ -15511,8 +15330,8 @@ intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv)
15511 if (pipe == crtc->pipe) 15330 if (pipe == crtc->pipe)
15512 continue; 15331 continue;
15513 15332
15514 DRM_DEBUG_KMS("%s attached to the wrong pipe, disabling plane\n", 15333 DRM_DEBUG_KMS("[PLANE:%d:%s] attached to the wrong pipe, disabling plane\n",
15515 plane->base.name); 15334 plane->base.base.id, plane->base.name);
15516 15335
15517 plane_crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 15336 plane_crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
15518 intel_plane_disable_noatomic(plane_crtc, plane); 15337 intel_plane_disable_noatomic(plane_crtc, plane);
@@ -15553,7 +15372,8 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc,
15553{ 15372{
15554 struct drm_device *dev = crtc->base.dev; 15373 struct drm_device *dev = crtc->base.dev;
15555 struct drm_i915_private *dev_priv = to_i915(dev); 15374 struct drm_i915_private *dev_priv = to_i915(dev);
15556 enum transcoder cpu_transcoder = crtc->config->cpu_transcoder; 15375 struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
15376 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
15557 15377
15558 /* Clear any frame start delays used for debugging left by the BIOS */ 15378 /* Clear any frame start delays used for debugging left by the BIOS */
15559 if (crtc->active && !transcoder_is_dsi(cpu_transcoder)) { 15379 if (crtc->active && !transcoder_is_dsi(cpu_transcoder)) {
@@ -15563,7 +15383,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc,
15563 I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK); 15383 I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
15564 } 15384 }
15565 15385
15566 if (crtc->active) { 15386 if (crtc_state->base.active) {
15567 struct intel_plane *plane; 15387 struct intel_plane *plane;
15568 15388
15569 /* Disable everything but the primary plane */ 15389 /* Disable everything but the primary plane */
@@ -15579,10 +15399,10 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc,
15579 15399
15580 /* Adjust the state of the output pipe according to whether we 15400 /* Adjust the state of the output pipe according to whether we
15581 * have active connectors/encoders. */ 15401 * have active connectors/encoders. */
15582 if (crtc->active && !intel_crtc_has_encoders(crtc)) 15402 if (crtc_state->base.active && !intel_crtc_has_encoders(crtc))
15583 intel_crtc_disable_noatomic(&crtc->base, ctx); 15403 intel_crtc_disable_noatomic(&crtc->base, ctx);
15584 15404
15585 if (crtc->active || HAS_GMCH_DISPLAY(dev_priv)) { 15405 if (crtc_state->base.active || HAS_GMCH_DISPLAY(dev_priv)) {
15586 /* 15406 /*
15587 * We start out with underrun reporting disabled to avoid races. 15407 * We start out with underrun reporting disabled to avoid races.
15588 * For correct bookkeeping mark this on active crtcs. 15408 * For correct bookkeeping mark this on active crtcs.
@@ -15613,6 +15433,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc,
15613 15433
15614static void intel_sanitize_encoder(struct intel_encoder *encoder) 15434static void intel_sanitize_encoder(struct intel_encoder *encoder)
15615{ 15435{
15436 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
15616 struct intel_connector *connector; 15437 struct intel_connector *connector;
15617 15438
15618 /* We need to check both for a crtc link (meaning that the 15439 /* We need to check both for a crtc link (meaning that the
@@ -15636,7 +15457,8 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
15636 DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n", 15457 DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
15637 encoder->base.base.id, 15458 encoder->base.base.id,
15638 encoder->base.name); 15459 encoder->base.name);
15639 encoder->disable(encoder, to_intel_crtc_state(crtc_state), connector->base.state); 15460 if (encoder->disable)
15461 encoder->disable(encoder, to_intel_crtc_state(crtc_state), connector->base.state);
15640 if (encoder->post_disable) 15462 if (encoder->post_disable)
15641 encoder->post_disable(encoder, to_intel_crtc_state(crtc_state), connector->base.state); 15463 encoder->post_disable(encoder, to_intel_crtc_state(crtc_state), connector->base.state);
15642 } 15464 }
@@ -15653,6 +15475,9 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
15653 15475
15654 /* notify opregion of the sanitized encoder state */ 15476 /* notify opregion of the sanitized encoder state */
15655 intel_opregion_notify_encoder(encoder, connector && has_active_crtc); 15477 intel_opregion_notify_encoder(encoder, connector && has_active_crtc);
15478
15479 if (INTEL_GEN(dev_priv) >= 11)
15480 icl_sanitize_encoder_pll_mapping(encoder);
15656} 15481}
15657 15482
15658void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv) 15483void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv)
@@ -15701,6 +15526,10 @@ static void readout_plane_state(struct drm_i915_private *dev_priv)
15701 crtc_state = to_intel_crtc_state(crtc->base.state); 15526 crtc_state = to_intel_crtc_state(crtc->base.state);
15702 15527
15703 intel_set_plane_visible(crtc_state, plane_state, visible); 15528 intel_set_plane_visible(crtc_state, plane_state, visible);
15529
15530 DRM_DEBUG_KMS("[PLANE:%d:%s] hw state readout: %s, pipe %c\n",
15531 plane->base.base.id, plane->base.name,
15532 enableddisabled(visible), pipe_name(pipe));
15704 } 15533 }
15705 15534
15706 for_each_intel_crtc(&dev_priv->drm, crtc) { 15535 for_each_intel_crtc(&dev_priv->drm, crtc) {
@@ -15853,7 +15682,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
15853 15682
15854 drm_calc_timestamping_constants(&crtc->base, 15683 drm_calc_timestamping_constants(&crtc->base,
15855 &crtc_state->base.adjusted_mode); 15684 &crtc_state->base.adjusted_mode);
15856 update_scanline_offset(crtc); 15685 update_scanline_offset(crtc_state);
15857 } 15686 }
15858 15687
15859 dev_priv->min_cdclk[crtc->pipe] = min_cdclk; 15688 dev_priv->min_cdclk[crtc->pipe] = min_cdclk;
@@ -15908,6 +15737,65 @@ static void intel_early_display_was(struct drm_i915_private *dev_priv)
15908 } 15737 }
15909} 15738}
15910 15739
15740static void ibx_sanitize_pch_hdmi_port(struct drm_i915_private *dev_priv,
15741 enum port port, i915_reg_t hdmi_reg)
15742{
15743 u32 val = I915_READ(hdmi_reg);
15744
15745 if (val & SDVO_ENABLE ||
15746 (val & SDVO_PIPE_SEL_MASK) == SDVO_PIPE_SEL(PIPE_A))
15747 return;
15748
15749 DRM_DEBUG_KMS("Sanitizing transcoder select for HDMI %c\n",
15750 port_name(port));
15751
15752 val &= ~SDVO_PIPE_SEL_MASK;
15753 val |= SDVO_PIPE_SEL(PIPE_A);
15754
15755 I915_WRITE(hdmi_reg, val);
15756}
15757
15758static void ibx_sanitize_pch_dp_port(struct drm_i915_private *dev_priv,
15759 enum port port, i915_reg_t dp_reg)
15760{
15761 u32 val = I915_READ(dp_reg);
15762
15763 if (val & DP_PORT_EN ||
15764 (val & DP_PIPE_SEL_MASK) == DP_PIPE_SEL(PIPE_A))
15765 return;
15766
15767 DRM_DEBUG_KMS("Sanitizing transcoder select for DP %c\n",
15768 port_name(port));
15769
15770 val &= ~DP_PIPE_SEL_MASK;
15771 val |= DP_PIPE_SEL(PIPE_A);
15772
15773 I915_WRITE(dp_reg, val);
15774}
15775
15776static void ibx_sanitize_pch_ports(struct drm_i915_private *dev_priv)
15777{
15778 /*
15779 * The BIOS may select transcoder B on some of the PCH
15780 * ports even it doesn't enable the port. This would trip
15781 * assert_pch_dp_disabled() and assert_pch_hdmi_disabled().
15782 * Sanitize the transcoder select bits to prevent that. We
15783 * assume that the BIOS never actually enabled the port,
15784 * because if it did we'd actually have to toggle the port
15785 * on and back off to make the transcoder A select stick
15786 * (see. intel_dp_link_down(), intel_disable_hdmi(),
15787 * intel_disable_sdvo()).
15788 */
15789 ibx_sanitize_pch_dp_port(dev_priv, PORT_B, PCH_DP_B);
15790 ibx_sanitize_pch_dp_port(dev_priv, PORT_C, PCH_DP_C);
15791 ibx_sanitize_pch_dp_port(dev_priv, PORT_D, PCH_DP_D);
15792
15793 /* PCH SDVOB multiplex with HDMIB */
15794 ibx_sanitize_pch_hdmi_port(dev_priv, PORT_B, PCH_HDMIB);
15795 ibx_sanitize_pch_hdmi_port(dev_priv, PORT_C, PCH_HDMIC);
15796 ibx_sanitize_pch_hdmi_port(dev_priv, PORT_D, PCH_HDMID);
15797}
15798
15911/* Scan out the current hw modeset state, 15799/* Scan out the current hw modeset state,
15912 * and sanitizes it to the current state 15800 * and sanitizes it to the current state
15913 */ 15801 */
@@ -15917,6 +15805,7 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
15917{ 15805{
15918 struct drm_i915_private *dev_priv = to_i915(dev); 15806 struct drm_i915_private *dev_priv = to_i915(dev);
15919 struct intel_crtc *crtc; 15807 struct intel_crtc *crtc;
15808 struct intel_crtc_state *crtc_state;
15920 struct intel_encoder *encoder; 15809 struct intel_encoder *encoder;
15921 int i; 15810 int i;
15922 15811
@@ -15928,6 +15817,9 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
15928 /* HW state is read out, now we need to sanitize this mess. */ 15817 /* HW state is read out, now we need to sanitize this mess. */
15929 get_encoder_power_domains(dev_priv); 15818 get_encoder_power_domains(dev_priv);
15930 15819
15820 if (HAS_PCH_IBX(dev_priv))
15821 ibx_sanitize_pch_ports(dev_priv);
15822
15931 /* 15823 /*
15932 * intel_sanitize_plane_mapping() may need to do vblank 15824 * intel_sanitize_plane_mapping() may need to do vblank
15933 * waits, so we need vblank interrupts restored beforehand. 15825 * waits, so we need vblank interrupts restored beforehand.
@@ -15935,7 +15827,7 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
15935 for_each_intel_crtc(&dev_priv->drm, crtc) { 15827 for_each_intel_crtc(&dev_priv->drm, crtc) {
15936 drm_crtc_vblank_reset(&crtc->base); 15828 drm_crtc_vblank_reset(&crtc->base);
15937 15829
15938 if (crtc->active) 15830 if (crtc->base.state->active)
15939 drm_crtc_vblank_on(&crtc->base); 15831 drm_crtc_vblank_on(&crtc->base);
15940 } 15832 }
15941 15833
@@ -15945,8 +15837,9 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
15945 intel_sanitize_encoder(encoder); 15837 intel_sanitize_encoder(encoder);
15946 15838
15947 for_each_intel_crtc(&dev_priv->drm, crtc) { 15839 for_each_intel_crtc(&dev_priv->drm, crtc) {
15840 crtc_state = to_intel_crtc_state(crtc->base.state);
15948 intel_sanitize_crtc(crtc, ctx); 15841 intel_sanitize_crtc(crtc, ctx);
15949 intel_dump_pipe_config(crtc, crtc->config, 15842 intel_dump_pipe_config(crtc, crtc_state,
15950 "[setup_hw_state]"); 15843 "[setup_hw_state]");
15951 } 15844 }
15952 15845
@@ -15980,7 +15873,8 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
15980 for_each_intel_crtc(dev, crtc) { 15873 for_each_intel_crtc(dev, crtc) {
15981 u64 put_domains; 15874 u64 put_domains;
15982 15875
15983 put_domains = modeset_get_crtc_power_domains(&crtc->base, crtc->config); 15876 crtc_state = to_intel_crtc_state(crtc->base.state);
15877 put_domains = modeset_get_crtc_power_domains(&crtc->base, crtc_state);
15984 if (WARN_ON(put_domains)) 15878 if (WARN_ON(put_domains))
15985 modeset_put_power_domains(dev_priv, put_domains); 15879 modeset_put_power_domains(dev_priv, put_domains);
15986 } 15880 }
@@ -16024,29 +15918,6 @@ void intel_display_resume(struct drm_device *dev)
16024 drm_atomic_state_put(state); 15918 drm_atomic_state_put(state);
16025} 15919}
16026 15920
16027int intel_connector_register(struct drm_connector *connector)
16028{
16029 struct intel_connector *intel_connector = to_intel_connector(connector);
16030 int ret;
16031
16032 ret = intel_backlight_device_register(intel_connector);
16033 if (ret)
16034 goto err;
16035
16036 return 0;
16037
16038err:
16039 return ret;
16040}
16041
16042void intel_connector_unregister(struct drm_connector *connector)
16043{
16044 struct intel_connector *intel_connector = to_intel_connector(connector);
16045
16046 intel_backlight_device_unregister(intel_connector);
16047 intel_panel_destroy_backlight(connector);
16048}
16049
16050static void intel_hpd_poll_fini(struct drm_device *dev) 15921static void intel_hpd_poll_fini(struct drm_device *dev)
16051{ 15922{
16052 struct intel_connector *connector; 15923 struct intel_connector *connector;
@@ -16057,9 +15928,9 @@ static void intel_hpd_poll_fini(struct drm_device *dev)
16057 for_each_intel_connector_iter(connector, &conn_iter) { 15928 for_each_intel_connector_iter(connector, &conn_iter) {
16058 if (connector->modeset_retry_work.func) 15929 if (connector->modeset_retry_work.func)
16059 cancel_work_sync(&connector->modeset_retry_work); 15930 cancel_work_sync(&connector->modeset_retry_work);
16060 if (connector->hdcp_shim) { 15931 if (connector->hdcp.shim) {
16061 cancel_delayed_work_sync(&connector->hdcp_check_work); 15932 cancel_delayed_work_sync(&connector->hdcp.check_work);
16062 cancel_work_sync(&connector->hdcp_prop_work); 15933 cancel_work_sync(&connector->hdcp.prop_work);
16063 } 15934 }
16064 } 15935 }
16065 drm_connector_list_iter_end(&conn_iter); 15936 drm_connector_list_iter_end(&conn_iter);
@@ -16099,18 +15970,13 @@ void intel_modeset_cleanup(struct drm_device *dev)
16099 15970
16100 drm_mode_config_cleanup(dev); 15971 drm_mode_config_cleanup(dev);
16101 15972
16102 intel_cleanup_overlay(dev_priv); 15973 intel_overlay_cleanup(dev_priv);
16103 15974
16104 intel_teardown_gmbus(dev_priv); 15975 intel_teardown_gmbus(dev_priv);
16105 15976
16106 destroy_workqueue(dev_priv->modeset_wq); 15977 destroy_workqueue(dev_priv->modeset_wq);
16107}
16108 15978
16109void intel_connector_attach_encoder(struct intel_connector *connector, 15979 intel_fbc_cleanup_cfb(dev_priv);
16110 struct intel_encoder *encoder)
16111{
16112 connector->encoder = encoder;
16113 drm_connector_attach_encoder(&connector->base, &encoder->base);
16114} 15980}
16115 15981
16116/* 15982/*
diff --git a/drivers/gpu/drm/i915/intel_display.h b/drivers/gpu/drm/i915/intel_display.h
index 9fac67e31205..5f2955b944da 100644
--- a/drivers/gpu/drm/i915/intel_display.h
+++ b/drivers/gpu/drm/i915/intel_display.h
@@ -43,6 +43,11 @@ enum i915_gpio {
43 GPIOM, 43 GPIOM,
44}; 44};
45 45
46/*
47 * Keep the pipe enum values fixed: the code assumes that PIPE_A=0, the
48 * rest have consecutive values and match the enum values of transcoders
49 * with a 1:1 transcoder -> pipe mapping.
50 */
46enum pipe { 51enum pipe {
47 INVALID_PIPE = -1, 52 INVALID_PIPE = -1,
48 53
@@ -57,12 +62,25 @@ enum pipe {
57#define pipe_name(p) ((p) + 'A') 62#define pipe_name(p) ((p) + 'A')
58 63
59enum transcoder { 64enum transcoder {
60 TRANSCODER_A = 0, 65 /*
61 TRANSCODER_B, 66 * The following transcoders have a 1:1 transcoder -> pipe mapping,
62 TRANSCODER_C, 67 * keep their values fixed: the code assumes that TRANSCODER_A=0, the
68 * rest have consecutive values and match the enum values of the pipes
69 * they map to.
70 */
71 TRANSCODER_A = PIPE_A,
72 TRANSCODER_B = PIPE_B,
73 TRANSCODER_C = PIPE_C,
74
75 /*
76 * The following transcoders can map to any pipe, their enum value
77 * doesn't need to stay fixed.
78 */
63 TRANSCODER_EDP, 79 TRANSCODER_EDP,
64 TRANSCODER_DSI_A, 80 TRANSCODER_DSI_0,
65 TRANSCODER_DSI_C, 81 TRANSCODER_DSI_1,
82 TRANSCODER_DSI_A = TRANSCODER_DSI_0, /* legacy DSI */
83 TRANSCODER_DSI_C = TRANSCODER_DSI_1, /* legacy DSI */
66 84
67 I915_MAX_TRANSCODERS 85 I915_MAX_TRANSCODERS
68}; 86};
@@ -120,6 +138,9 @@ enum plane_id {
120 PLANE_SPRITE0, 138 PLANE_SPRITE0,
121 PLANE_SPRITE1, 139 PLANE_SPRITE1,
122 PLANE_SPRITE2, 140 PLANE_SPRITE2,
141 PLANE_SPRITE3,
142 PLANE_SPRITE4,
143 PLANE_SPRITE5,
123 PLANE_CURSOR, 144 PLANE_CURSOR,
124 145
125 I915_MAX_PLANES, 146 I915_MAX_PLANES,
@@ -363,7 +384,7 @@ struct intel_link_m_n {
363 (__dev_priv)->power_domains.power_well_count; \ 384 (__dev_priv)->power_domains.power_well_count; \
364 (__power_well)++) 385 (__power_well)++)
365 386
366#define for_each_power_well_rev(__dev_priv, __power_well) \ 387#define for_each_power_well_reverse(__dev_priv, __power_well) \
367 for ((__power_well) = (__dev_priv)->power_domains.power_wells + \ 388 for ((__power_well) = (__dev_priv)->power_domains.power_wells + \
368 (__dev_priv)->power_domains.power_well_count - 1; \ 389 (__dev_priv)->power_domains.power_well_count - 1; \
369 (__power_well) - (__dev_priv)->power_domains.power_wells >= 0; \ 390 (__power_well) - (__dev_priv)->power_domains.power_wells >= 0; \
@@ -373,8 +394,8 @@ struct intel_link_m_n {
373 for_each_power_well(__dev_priv, __power_well) \ 394 for_each_power_well(__dev_priv, __power_well) \
374 for_each_if((__power_well)->desc->domains & (__domain_mask)) 395 for_each_if((__power_well)->desc->domains & (__domain_mask))
375 396
376#define for_each_power_domain_well_rev(__dev_priv, __power_well, __domain_mask) \ 397#define for_each_power_domain_well_reverse(__dev_priv, __power_well, __domain_mask) \
377 for_each_power_well_rev(__dev_priv, __power_well) \ 398 for_each_power_well_reverse(__dev_priv, __power_well) \
378 for_each_if((__power_well)->desc->domains & (__domain_mask)) 399 for_each_if((__power_well)->desc->domains & (__domain_mask))
379 400
380#define for_each_new_intel_plane_in_state(__state, plane, new_plane_state, __i) \ 401#define for_each_new_intel_plane_in_state(__state, plane, new_plane_state, __i) \
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 13f9b56a9ce7..7699f9b7b2d2 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -45,6 +45,17 @@
45 45
46#define DP_DPRX_ESI_LEN 14 46#define DP_DPRX_ESI_LEN 14
47 47
48/* DP DSC small joiner has 2 FIFOs each of 640 x 6 bytes */
49#define DP_DSC_MAX_SMALL_JOINER_RAM_BUFFER 61440
50
51/* DP DSC throughput values used for slice count calculations KPixels/s */
52#define DP_DSC_PEAK_PIXEL_RATE 2720000
53#define DP_DSC_MAX_ENC_THROUGHPUT_0 340000
54#define DP_DSC_MAX_ENC_THROUGHPUT_1 400000
55
56/* DP DSC FEC Overhead factor = (100 - 2.4)/100 */
57#define DP_DSC_FEC_OVERHEAD_FACTOR 976
58
48/* Compliance test status bits */ 59/* Compliance test status bits */
49#define INTEL_DP_RESOLUTION_SHIFT_MASK 0 60#define INTEL_DP_RESOLUTION_SHIFT_MASK 0
50#define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK) 61#define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
@@ -93,6 +104,14 @@ static const struct dp_link_dpll chv_dpll[] = {
93 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }, 104 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
94}; 105};
95 106
107/* Constants for DP DSC configurations */
108static const u8 valid_dsc_bpp[] = {6, 8, 10, 12, 15};
109
110/* With Single pipe configuration, HW is capable of supporting maximum
111 * of 4 slices per line.
112 */
113static const u8 valid_dsc_slicecount[] = {1, 2, 4};
114
96/** 115/**
97 * intel_dp_is_edp - is the given port attached to an eDP panel (either CPU or PCH) 116 * intel_dp_is_edp - is the given port attached to an eDP panel (either CPU or PCH)
98 * @intel_dp: DP struct 117 * @intel_dp: DP struct
@@ -222,138 +241,6 @@ intel_dp_link_required(int pixel_clock, int bpp)
222 return DIV_ROUND_UP(pixel_clock * bpp, 8); 241 return DIV_ROUND_UP(pixel_clock * bpp, 8);
223} 242}
224 243
225void icl_program_mg_dp_mode(struct intel_dp *intel_dp)
226{
227 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
228 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
229 enum port port = intel_dig_port->base.port;
230 enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
231 u32 ln0, ln1, lane_info;
232
233 if (tc_port == PORT_TC_NONE || intel_dig_port->tc_type == TC_PORT_TBT)
234 return;
235
236 ln0 = I915_READ(MG_DP_MODE(port, 0));
237 ln1 = I915_READ(MG_DP_MODE(port, 1));
238
239 switch (intel_dig_port->tc_type) {
240 case TC_PORT_TYPEC:
241 ln0 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE);
242 ln1 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE);
243
244 lane_info = (I915_READ(PORT_TX_DFLEXDPSP) &
245 DP_LANE_ASSIGNMENT_MASK(tc_port)) >>
246 DP_LANE_ASSIGNMENT_SHIFT(tc_port);
247
248 switch (lane_info) {
249 case 0x1:
250 case 0x4:
251 break;
252 case 0x2:
253 ln0 |= MG_DP_MODE_CFG_DP_X1_MODE;
254 break;
255 case 0x3:
256 ln0 |= MG_DP_MODE_CFG_DP_X1_MODE |
257 MG_DP_MODE_CFG_DP_X2_MODE;
258 break;
259 case 0x8:
260 ln1 |= MG_DP_MODE_CFG_DP_X1_MODE;
261 break;
262 case 0xC:
263 ln1 |= MG_DP_MODE_CFG_DP_X1_MODE |
264 MG_DP_MODE_CFG_DP_X2_MODE;
265 break;
266 case 0xF:
267 ln0 |= MG_DP_MODE_CFG_DP_X1_MODE |
268 MG_DP_MODE_CFG_DP_X2_MODE;
269 ln1 |= MG_DP_MODE_CFG_DP_X1_MODE |
270 MG_DP_MODE_CFG_DP_X2_MODE;
271 break;
272 default:
273 MISSING_CASE(lane_info);
274 }
275 break;
276
277 case TC_PORT_LEGACY:
278 ln0 |= MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE;
279 ln1 |= MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE;
280 break;
281
282 default:
283 MISSING_CASE(intel_dig_port->tc_type);
284 return;
285 }
286
287 I915_WRITE(MG_DP_MODE(port, 0), ln0);
288 I915_WRITE(MG_DP_MODE(port, 1), ln1);
289}
290
291void icl_enable_phy_clock_gating(struct intel_digital_port *dig_port)
292{
293 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
294 enum port port = dig_port->base.port;
295 enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
296 i915_reg_t mg_regs[2] = { MG_DP_MODE(port, 0), MG_DP_MODE(port, 1) };
297 u32 val;
298 int i;
299
300 if (tc_port == PORT_TC_NONE)
301 return;
302
303 for (i = 0; i < ARRAY_SIZE(mg_regs); i++) {
304 val = I915_READ(mg_regs[i]);
305 val |= MG_DP_MODE_CFG_TR2PWR_GATING |
306 MG_DP_MODE_CFG_TRPWR_GATING |
307 MG_DP_MODE_CFG_CLNPWR_GATING |
308 MG_DP_MODE_CFG_DIGPWR_GATING |
309 MG_DP_MODE_CFG_GAONPWR_GATING;
310 I915_WRITE(mg_regs[i], val);
311 }
312
313 val = I915_READ(MG_MISC_SUS0(tc_port));
314 val |= MG_MISC_SUS0_SUSCLK_DYNCLKGATE_MODE(3) |
315 MG_MISC_SUS0_CFG_TR2PWR_GATING |
316 MG_MISC_SUS0_CFG_CL2PWR_GATING |
317 MG_MISC_SUS0_CFG_GAONPWR_GATING |
318 MG_MISC_SUS0_CFG_TRPWR_GATING |
319 MG_MISC_SUS0_CFG_CL1PWR_GATING |
320 MG_MISC_SUS0_CFG_DGPWR_GATING;
321 I915_WRITE(MG_MISC_SUS0(tc_port), val);
322}
323
324void icl_disable_phy_clock_gating(struct intel_digital_port *dig_port)
325{
326 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
327 enum port port = dig_port->base.port;
328 enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
329 i915_reg_t mg_regs[2] = { MG_DP_MODE(port, 0), MG_DP_MODE(port, 1) };
330 u32 val;
331 int i;
332
333 if (tc_port == PORT_TC_NONE)
334 return;
335
336 for (i = 0; i < ARRAY_SIZE(mg_regs); i++) {
337 val = I915_READ(mg_regs[i]);
338 val &= ~(MG_DP_MODE_CFG_TR2PWR_GATING |
339 MG_DP_MODE_CFG_TRPWR_GATING |
340 MG_DP_MODE_CFG_CLNPWR_GATING |
341 MG_DP_MODE_CFG_DIGPWR_GATING |
342 MG_DP_MODE_CFG_GAONPWR_GATING);
343 I915_WRITE(mg_regs[i], val);
344 }
345
346 val = I915_READ(MG_MISC_SUS0(tc_port));
347 val &= ~(MG_MISC_SUS0_SUSCLK_DYNCLKGATE_MODE_MASK |
348 MG_MISC_SUS0_CFG_TR2PWR_GATING |
349 MG_MISC_SUS0_CFG_CL2PWR_GATING |
350 MG_MISC_SUS0_CFG_GAONPWR_GATING |
351 MG_MISC_SUS0_CFG_TRPWR_GATING |
352 MG_MISC_SUS0_CFG_CL1PWR_GATING |
353 MG_MISC_SUS0_CFG_DGPWR_GATING);
354 I915_WRITE(MG_MISC_SUS0(tc_port), val);
355}
356
357int 244int
358intel_dp_max_data_rate(int max_link_clock, int max_lanes) 245intel_dp_max_data_rate(int max_link_clock, int max_lanes)
359{ 246{
@@ -455,7 +342,7 @@ intel_dp_set_source_rates(struct intel_dp *intel_dp)
455 if (INTEL_GEN(dev_priv) >= 10) { 342 if (INTEL_GEN(dev_priv) >= 10) {
456 source_rates = cnl_rates; 343 source_rates = cnl_rates;
457 size = ARRAY_SIZE(cnl_rates); 344 size = ARRAY_SIZE(cnl_rates);
458 if (INTEL_GEN(dev_priv) == 10) 345 if (IS_GEN10(dev_priv))
459 max_rate = cnl_max_source_rate(intel_dp); 346 max_rate = cnl_max_source_rate(intel_dp);
460 else 347 else
461 max_rate = icl_max_source_rate(intel_dp); 348 max_rate = icl_max_source_rate(intel_dp);
@@ -616,9 +503,12 @@ intel_dp_mode_valid(struct drm_connector *connector,
616 struct intel_dp *intel_dp = intel_attached_dp(connector); 503 struct intel_dp *intel_dp = intel_attached_dp(connector);
617 struct intel_connector *intel_connector = to_intel_connector(connector); 504 struct intel_connector *intel_connector = to_intel_connector(connector);
618 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode; 505 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
506 struct drm_i915_private *dev_priv = to_i915(connector->dev);
619 int target_clock = mode->clock; 507 int target_clock = mode->clock;
620 int max_rate, mode_rate, max_lanes, max_link_clock; 508 int max_rate, mode_rate, max_lanes, max_link_clock;
621 int max_dotclk; 509 int max_dotclk;
510 u16 dsc_max_output_bpp = 0;
511 u8 dsc_slice_count = 0;
622 512
623 if (mode->flags & DRM_MODE_FLAG_DBLSCAN) 513 if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
624 return MODE_NO_DBLESCAN; 514 return MODE_NO_DBLESCAN;
@@ -641,7 +531,33 @@ intel_dp_mode_valid(struct drm_connector *connector,
641 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes); 531 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
642 mode_rate = intel_dp_link_required(target_clock, 18); 532 mode_rate = intel_dp_link_required(target_clock, 18);
643 533
644 if (mode_rate > max_rate || target_clock > max_dotclk) 534 /*
535 * Output bpp is stored in 6.4 format so right shift by 4 to get the
536 * integer value since we support only integer values of bpp.
537 */
538 if ((INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) &&
539 drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)) {
540 if (intel_dp_is_edp(intel_dp)) {
541 dsc_max_output_bpp =
542 drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4;
543 dsc_slice_count =
544 drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
545 true);
546 } else {
547 dsc_max_output_bpp =
548 intel_dp_dsc_get_output_bpp(max_link_clock,
549 max_lanes,
550 target_clock,
551 mode->hdisplay) >> 4;
552 dsc_slice_count =
553 intel_dp_dsc_get_slice_count(intel_dp,
554 target_clock,
555 mode->hdisplay);
556 }
557 }
558
559 if ((mode_rate > max_rate && !(dsc_max_output_bpp && dsc_slice_count)) ||
560 target_clock > max_dotclk)
645 return MODE_CLOCK_HIGH; 561 return MODE_CLOCK_HIGH;
646 562
647 if (mode->clock < 10000) 563 if (mode->clock < 10000)
@@ -690,7 +606,8 @@ static void pps_lock(struct intel_dp *intel_dp)
690 * See intel_power_sequencer_reset() why we need 606 * See intel_power_sequencer_reset() why we need
691 * a power domain reference here. 607 * a power domain reference here.
692 */ 608 */
693 intel_display_power_get(dev_priv, intel_dp->aux_power_domain); 609 intel_display_power_get(dev_priv,
610 intel_aux_power_domain(dp_to_dig_port(intel_dp)));
694 611
695 mutex_lock(&dev_priv->pps_mutex); 612 mutex_lock(&dev_priv->pps_mutex);
696} 613}
@@ -701,7 +618,8 @@ static void pps_unlock(struct intel_dp *intel_dp)
701 618
702 mutex_unlock(&dev_priv->pps_mutex); 619 mutex_unlock(&dev_priv->pps_mutex);
703 620
704 intel_display_power_put(dev_priv, intel_dp->aux_power_domain); 621 intel_display_power_put(dev_priv,
622 intel_aux_power_domain(dp_to_dig_port(intel_dp)));
705} 623}
706 624
707static void 625static void
@@ -1156,6 +1074,7 @@ static uint32_t g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
1156static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index) 1074static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
1157{ 1075{
1158 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1076 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1077 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1159 1078
1160 if (index) 1079 if (index)
1161 return 0; 1080 return 0;
@@ -1165,7 +1084,7 @@ static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
1165 * like to run at 2MHz. So, take the cdclk or PCH rawclk value and 1084 * like to run at 2MHz. So, take the cdclk or PCH rawclk value and
1166 * divide by 2000 and use that 1085 * divide by 2000 and use that
1167 */ 1086 */
1168 if (intel_dp->aux_ch == AUX_CH_A) 1087 if (dig_port->aux_ch == AUX_CH_A)
1169 return DIV_ROUND_CLOSEST(dev_priv->cdclk.hw.cdclk, 2000); 1088 return DIV_ROUND_CLOSEST(dev_priv->cdclk.hw.cdclk, 2000);
1170 else 1089 else
1171 return DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 2000); 1090 return DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 2000);
@@ -1174,8 +1093,9 @@ static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
1174static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index) 1093static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
1175{ 1094{
1176 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1095 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1096 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1177 1097
1178 if (intel_dp->aux_ch != AUX_CH_A && HAS_PCH_LPT_H(dev_priv)) { 1098 if (dig_port->aux_ch != AUX_CH_A && HAS_PCH_LPT_H(dev_priv)) {
1179 /* Workaround for non-ULT HSW */ 1099 /* Workaround for non-ULT HSW */
1180 switch (index) { 1100 switch (index) {
1181 case 0: return 63; 1101 case 0: return 63;
@@ -1503,80 +1423,12 @@ intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
1503 return ret; 1423 return ret;
1504} 1424}
1505 1425
1506static enum aux_ch intel_aux_ch(struct intel_dp *intel_dp)
1507{
1508 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
1509 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1510 enum port port = encoder->port;
1511 const struct ddi_vbt_port_info *info =
1512 &dev_priv->vbt.ddi_port_info[port];
1513 enum aux_ch aux_ch;
1514
1515 if (!info->alternate_aux_channel) {
1516 aux_ch = (enum aux_ch) port;
1517
1518 DRM_DEBUG_KMS("using AUX %c for port %c (platform default)\n",
1519 aux_ch_name(aux_ch), port_name(port));
1520 return aux_ch;
1521 }
1522
1523 switch (info->alternate_aux_channel) {
1524 case DP_AUX_A:
1525 aux_ch = AUX_CH_A;
1526 break;
1527 case DP_AUX_B:
1528 aux_ch = AUX_CH_B;
1529 break;
1530 case DP_AUX_C:
1531 aux_ch = AUX_CH_C;
1532 break;
1533 case DP_AUX_D:
1534 aux_ch = AUX_CH_D;
1535 break;
1536 case DP_AUX_E:
1537 aux_ch = AUX_CH_E;
1538 break;
1539 case DP_AUX_F:
1540 aux_ch = AUX_CH_F;
1541 break;
1542 default:
1543 MISSING_CASE(info->alternate_aux_channel);
1544 aux_ch = AUX_CH_A;
1545 break;
1546 }
1547
1548 DRM_DEBUG_KMS("using AUX %c for port %c (VBT)\n",
1549 aux_ch_name(aux_ch), port_name(port));
1550
1551 return aux_ch;
1552}
1553
1554static enum intel_display_power_domain
1555intel_aux_power_domain(struct intel_dp *intel_dp)
1556{
1557 switch (intel_dp->aux_ch) {
1558 case AUX_CH_A:
1559 return POWER_DOMAIN_AUX_A;
1560 case AUX_CH_B:
1561 return POWER_DOMAIN_AUX_B;
1562 case AUX_CH_C:
1563 return POWER_DOMAIN_AUX_C;
1564 case AUX_CH_D:
1565 return POWER_DOMAIN_AUX_D;
1566 case AUX_CH_E:
1567 return POWER_DOMAIN_AUX_E;
1568 case AUX_CH_F:
1569 return POWER_DOMAIN_AUX_F;
1570 default:
1571 MISSING_CASE(intel_dp->aux_ch);
1572 return POWER_DOMAIN_AUX_A;
1573 }
1574}
1575 1426
1576static i915_reg_t g4x_aux_ctl_reg(struct intel_dp *intel_dp) 1427static i915_reg_t g4x_aux_ctl_reg(struct intel_dp *intel_dp)
1577{ 1428{
1578 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1429 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1579 enum aux_ch aux_ch = intel_dp->aux_ch; 1430 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1431 enum aux_ch aux_ch = dig_port->aux_ch;
1580 1432
1581 switch (aux_ch) { 1433 switch (aux_ch) {
1582 case AUX_CH_B: 1434 case AUX_CH_B:
@@ -1592,7 +1444,8 @@ static i915_reg_t g4x_aux_ctl_reg(struct intel_dp *intel_dp)
1592static i915_reg_t g4x_aux_data_reg(struct intel_dp *intel_dp, int index) 1444static i915_reg_t g4x_aux_data_reg(struct intel_dp *intel_dp, int index)
1593{ 1445{
1594 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1446 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1595 enum aux_ch aux_ch = intel_dp->aux_ch; 1447 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1448 enum aux_ch aux_ch = dig_port->aux_ch;
1596 1449
1597 switch (aux_ch) { 1450 switch (aux_ch) {
1598 case AUX_CH_B: 1451 case AUX_CH_B:
@@ -1608,7 +1461,8 @@ static i915_reg_t g4x_aux_data_reg(struct intel_dp *intel_dp, int index)
1608static i915_reg_t ilk_aux_ctl_reg(struct intel_dp *intel_dp) 1461static i915_reg_t ilk_aux_ctl_reg(struct intel_dp *intel_dp)
1609{ 1462{
1610 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1463 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1611 enum aux_ch aux_ch = intel_dp->aux_ch; 1464 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1465 enum aux_ch aux_ch = dig_port->aux_ch;
1612 1466
1613 switch (aux_ch) { 1467 switch (aux_ch) {
1614 case AUX_CH_A: 1468 case AUX_CH_A:
@@ -1626,7 +1480,8 @@ static i915_reg_t ilk_aux_ctl_reg(struct intel_dp *intel_dp)
1626static i915_reg_t ilk_aux_data_reg(struct intel_dp *intel_dp, int index) 1480static i915_reg_t ilk_aux_data_reg(struct intel_dp *intel_dp, int index)
1627{ 1481{
1628 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1482 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1629 enum aux_ch aux_ch = intel_dp->aux_ch; 1483 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1484 enum aux_ch aux_ch = dig_port->aux_ch;
1630 1485
1631 switch (aux_ch) { 1486 switch (aux_ch) {
1632 case AUX_CH_A: 1487 case AUX_CH_A:
@@ -1644,7 +1499,8 @@ static i915_reg_t ilk_aux_data_reg(struct intel_dp *intel_dp, int index)
1644static i915_reg_t skl_aux_ctl_reg(struct intel_dp *intel_dp) 1499static i915_reg_t skl_aux_ctl_reg(struct intel_dp *intel_dp)
1645{ 1500{
1646 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1501 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1647 enum aux_ch aux_ch = intel_dp->aux_ch; 1502 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1503 enum aux_ch aux_ch = dig_port->aux_ch;
1648 1504
1649 switch (aux_ch) { 1505 switch (aux_ch) {
1650 case AUX_CH_A: 1506 case AUX_CH_A:
@@ -1663,7 +1519,8 @@ static i915_reg_t skl_aux_ctl_reg(struct intel_dp *intel_dp)
1663static i915_reg_t skl_aux_data_reg(struct intel_dp *intel_dp, int index) 1519static i915_reg_t skl_aux_data_reg(struct intel_dp *intel_dp, int index)
1664{ 1520{
1665 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1521 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1666 enum aux_ch aux_ch = intel_dp->aux_ch; 1522 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1523 enum aux_ch aux_ch = dig_port->aux_ch;
1667 1524
1668 switch (aux_ch) { 1525 switch (aux_ch) {
1669 case AUX_CH_A: 1526 case AUX_CH_A:
@@ -1689,10 +1546,8 @@ static void
1689intel_dp_aux_init(struct intel_dp *intel_dp) 1546intel_dp_aux_init(struct intel_dp *intel_dp)
1690{ 1547{
1691 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1548 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1692 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 1549 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1693 1550 struct intel_encoder *encoder = &dig_port->base;
1694 intel_dp->aux_ch = intel_aux_ch(intel_dp);
1695 intel_dp->aux_power_domain = intel_aux_power_domain(intel_dp);
1696 1551
1697 if (INTEL_GEN(dev_priv) >= 9) { 1552 if (INTEL_GEN(dev_priv) >= 9) {
1698 intel_dp->aux_ch_ctl_reg = skl_aux_ctl_reg; 1553 intel_dp->aux_ch_ctl_reg = skl_aux_ctl_reg;
@@ -1951,6 +1806,42 @@ intel_dp_compute_link_config_wide(struct intel_dp *intel_dp,
1951 return false; 1806 return false;
1952} 1807}
1953 1808
1809/* Optimize link config in order: max bpp, min lanes, min clock */
1810static bool
1811intel_dp_compute_link_config_fast(struct intel_dp *intel_dp,
1812 struct intel_crtc_state *pipe_config,
1813 const struct link_config_limits *limits)
1814{
1815 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
1816 int bpp, clock, lane_count;
1817 int mode_rate, link_clock, link_avail;
1818
1819 for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) {
1820 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1821 bpp);
1822
1823 for (lane_count = limits->min_lane_count;
1824 lane_count <= limits->max_lane_count;
1825 lane_count <<= 1) {
1826 for (clock = limits->min_clock; clock <= limits->max_clock; clock++) {
1827 link_clock = intel_dp->common_rates[clock];
1828 link_avail = intel_dp_max_data_rate(link_clock,
1829 lane_count);
1830
1831 if (mode_rate <= link_avail) {
1832 pipe_config->lane_count = lane_count;
1833 pipe_config->pipe_bpp = bpp;
1834 pipe_config->port_clock = link_clock;
1835
1836 return true;
1837 }
1838 }
1839 }
1840 }
1841
1842 return false;
1843}
1844
1954static bool 1845static bool
1955intel_dp_compute_link_config(struct intel_encoder *encoder, 1846intel_dp_compute_link_config(struct intel_encoder *encoder,
1956 struct intel_crtc_state *pipe_config) 1847 struct intel_crtc_state *pipe_config)
@@ -1975,13 +1866,15 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
1975 limits.min_bpp = 6 * 3; 1866 limits.min_bpp = 6 * 3;
1976 limits.max_bpp = intel_dp_compute_bpp(intel_dp, pipe_config); 1867 limits.max_bpp = intel_dp_compute_bpp(intel_dp, pipe_config);
1977 1868
1978 if (intel_dp_is_edp(intel_dp)) { 1869 if (intel_dp_is_edp(intel_dp) && intel_dp->edp_dpcd[0] < DP_EDP_14) {
1979 /* 1870 /*
1980 * Use the maximum clock and number of lanes the eDP panel 1871 * Use the maximum clock and number of lanes the eDP panel
1981 * advertizes being capable of. The panels are generally 1872 * advertizes being capable of. The eDP 1.3 and earlier panels
1982 * designed to support only a single clock and lane 1873 * are generally designed to support only a single clock and
1983 * configuration, and typically these values correspond to the 1874 * lane configuration, and typically these values correspond to
1984 * native resolution of the panel. 1875 * the native resolution of the panel. With eDP 1.4 rate select
1876 * and DSC, this is decreasingly the case, and we need to be
1877 * able to select less than maximum link config.
1985 */ 1878 */
1986 limits.min_lane_count = limits.max_lane_count; 1879 limits.min_lane_count = limits.max_lane_count;
1987 limits.min_clock = limits.max_clock; 1880 limits.min_clock = limits.max_clock;
@@ -1995,12 +1888,25 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
1995 intel_dp->common_rates[limits.max_clock], 1888 intel_dp->common_rates[limits.max_clock],
1996 limits.max_bpp, adjusted_mode->crtc_clock); 1889 limits.max_bpp, adjusted_mode->crtc_clock);
1997 1890
1998 /* 1891 if (intel_dp_is_edp(intel_dp)) {
1999 * Optimize for slow and wide. This is the place to add alternative 1892 /*
2000 * optimization policy. 1893 * Optimize for fast and narrow. eDP 1.3 section 3.3 and eDP 1.4
2001 */ 1894 * section A.1: "It is recommended that the minimum number of
2002 if (!intel_dp_compute_link_config_wide(intel_dp, pipe_config, &limits)) 1895 * lanes be used, using the minimum link rate allowed for that
2003 return false; 1896 * lane configuration."
1897 *
1898 * Note that we use the max clock and lane count for eDP 1.3 and
1899 * earlier, and fast vs. wide is irrelevant.
1900 */
1901 if (!intel_dp_compute_link_config_fast(intel_dp, pipe_config,
1902 &limits))
1903 return false;
1904 } else {
1905 /* Optimize for slow and wide. */
1906 if (!intel_dp_compute_link_config_wide(intel_dp, pipe_config,
1907 &limits))
1908 return false;
1909 }
2004 1910
2005 DRM_DEBUG_KMS("DP lane count %d clock %d bpp %d\n", 1911 DRM_DEBUG_KMS("DP lane count %d clock %d bpp %d\n",
2006 pipe_config->lane_count, pipe_config->port_clock, 1912 pipe_config->lane_count, pipe_config->port_clock,
@@ -2023,6 +1929,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
2023 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 1929 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2024 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; 1930 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
2025 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 1931 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1932 struct intel_lspcon *lspcon = enc_to_intel_lspcon(&encoder->base);
2026 enum port port = encoder->port; 1933 enum port port = encoder->port;
2027 struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc); 1934 struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
2028 struct intel_connector *intel_connector = intel_dp->attached_connector; 1935 struct intel_connector *intel_connector = intel_dp->attached_connector;
@@ -2034,6 +1941,10 @@ intel_dp_compute_config(struct intel_encoder *encoder,
2034 if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && port != PORT_A) 1941 if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && port != PORT_A)
2035 pipe_config->has_pch_encoder = true; 1942 pipe_config->has_pch_encoder = true;
2036 1943
1944 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
1945 if (lspcon->active)
1946 lspcon_ycbcr420_config(&intel_connector->base, pipe_config);
1947
2037 pipe_config->has_drrs = false; 1948 pipe_config->has_drrs = false;
2038 if (IS_G4X(dev_priv) || port == PORT_A) 1949 if (IS_G4X(dev_priv) || port == PORT_A)
2039 pipe_config->has_audio = false; 1950 pipe_config->has_audio = false;
@@ -2338,7 +2249,8 @@ static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
2338 if (edp_have_panel_vdd(intel_dp)) 2249 if (edp_have_panel_vdd(intel_dp))
2339 return need_to_disable; 2250 return need_to_disable;
2340 2251
2341 intel_display_power_get(dev_priv, intel_dp->aux_power_domain); 2252 intel_display_power_get(dev_priv,
2253 intel_aux_power_domain(intel_dig_port));
2342 2254
2343 DRM_DEBUG_KMS("Turning eDP port %c VDD on\n", 2255 DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
2344 port_name(intel_dig_port->base.port)); 2256 port_name(intel_dig_port->base.port));
@@ -2424,7 +2336,8 @@ static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
2424 if ((pp & PANEL_POWER_ON) == 0) 2336 if ((pp & PANEL_POWER_ON) == 0)
2425 intel_dp->panel_power_off_time = ktime_get_boottime(); 2337 intel_dp->panel_power_off_time = ktime_get_boottime();
2426 2338
2427 intel_display_power_put(dev_priv, intel_dp->aux_power_domain); 2339 intel_display_power_put(dev_priv,
2340 intel_aux_power_domain(intel_dig_port));
2428} 2341}
2429 2342
2430static void edp_panel_vdd_work(struct work_struct *__work) 2343static void edp_panel_vdd_work(struct work_struct *__work)
@@ -2537,6 +2450,7 @@ void intel_edp_panel_on(struct intel_dp *intel_dp)
2537static void edp_panel_off(struct intel_dp *intel_dp) 2450static void edp_panel_off(struct intel_dp *intel_dp)
2538{ 2451{
2539 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 2452 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2453 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2540 u32 pp; 2454 u32 pp;
2541 i915_reg_t pp_ctrl_reg; 2455 i915_reg_t pp_ctrl_reg;
2542 2456
@@ -2546,10 +2460,10 @@ static void edp_panel_off(struct intel_dp *intel_dp)
2546 return; 2460 return;
2547 2461
2548 DRM_DEBUG_KMS("Turn eDP port %c panel power off\n", 2462 DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
2549 port_name(dp_to_dig_port(intel_dp)->base.port)); 2463 port_name(dig_port->base.port));
2550 2464
2551 WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n", 2465 WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
2552 port_name(dp_to_dig_port(intel_dp)->base.port)); 2466 port_name(dig_port->base.port));
2553 2467
2554 pp = ironlake_get_pp_control(intel_dp); 2468 pp = ironlake_get_pp_control(intel_dp);
2555 /* We need to switch off panel power _and_ force vdd, for otherwise some 2469 /* We need to switch off panel power _and_ force vdd, for otherwise some
@@ -2568,7 +2482,7 @@ static void edp_panel_off(struct intel_dp *intel_dp)
2568 intel_dp->panel_power_off_time = ktime_get_boottime(); 2482 intel_dp->panel_power_off_time = ktime_get_boottime();
2569 2483
2570 /* We got a reference when we enabled the VDD. */ 2484 /* We got a reference when we enabled the VDD. */
2571 intel_display_power_put(dev_priv, intel_dp->aux_power_domain); 2485 intel_display_power_put(dev_priv, intel_aux_power_domain(dig_port));
2572} 2486}
2573 2487
2574void intel_edp_panel_off(struct intel_dp *intel_dp) 2488void intel_edp_panel_off(struct intel_dp *intel_dp)
@@ -3900,6 +3814,41 @@ intel_dp_read_dpcd(struct intel_dp *intel_dp)
3900 return intel_dp->dpcd[DP_DPCD_REV] != 0; 3814 return intel_dp->dpcd[DP_DPCD_REV] != 0;
3901} 3815}
3902 3816
3817static void intel_dp_get_dsc_sink_cap(struct intel_dp *intel_dp)
3818{
3819 /*
3820 * Clear the cached register set to avoid using stale values
3821 * for the sinks that do not support DSC.
3822 */
3823 memset(intel_dp->dsc_dpcd, 0, sizeof(intel_dp->dsc_dpcd));
3824
3825 /* Clear fec_capable to avoid using stale values */
3826 intel_dp->fec_capable = 0;
3827
3828 /* Cache the DSC DPCD if eDP or DP rev >= 1.4 */
3829 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x14 ||
3830 intel_dp->edp_dpcd[0] >= DP_EDP_14) {
3831 if (drm_dp_dpcd_read(&intel_dp->aux, DP_DSC_SUPPORT,
3832 intel_dp->dsc_dpcd,
3833 sizeof(intel_dp->dsc_dpcd)) < 0)
3834 DRM_ERROR("Failed to read DPCD register 0x%x\n",
3835 DP_DSC_SUPPORT);
3836
3837 DRM_DEBUG_KMS("DSC DPCD: %*ph\n",
3838 (int)sizeof(intel_dp->dsc_dpcd),
3839 intel_dp->dsc_dpcd);
3840 /* FEC is supported only on DP 1.4 */
3841 if (!intel_dp_is_edp(intel_dp)) {
3842 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_FEC_CAPABILITY,
3843 &intel_dp->fec_capable) < 0)
3844 DRM_ERROR("Failed to read FEC DPCD register\n");
3845
3846 DRM_DEBUG_KMS("FEC CAPABILITY: %x\n",
3847 intel_dp->fec_capable);
3848 }
3849 }
3850}
3851
3903static bool 3852static bool
3904intel_edp_init_dpcd(struct intel_dp *intel_dp) 3853intel_edp_init_dpcd(struct intel_dp *intel_dp)
3905{ 3854{
@@ -3976,6 +3925,10 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
3976 3925
3977 intel_dp_set_common_rates(intel_dp); 3926 intel_dp_set_common_rates(intel_dp);
3978 3927
3928 /* Read the eDP DSC DPCD registers */
3929 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
3930 intel_dp_get_dsc_sink_cap(intel_dp);
3931
3979 return true; 3932 return true;
3980} 3933}
3981 3934
@@ -4029,16 +3982,10 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
4029} 3982}
4030 3983
4031static bool 3984static bool
4032intel_dp_can_mst(struct intel_dp *intel_dp) 3985intel_dp_sink_can_mst(struct intel_dp *intel_dp)
4033{ 3986{
4034 u8 mstm_cap; 3987 u8 mstm_cap;
4035 3988
4036 if (!i915_modparams.enable_dp_mst)
4037 return false;
4038
4039 if (!intel_dp->can_mst)
4040 return false;
4041
4042 if (intel_dp->dpcd[DP_DPCD_REV] < 0x12) 3989 if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
4043 return false; 3990 return false;
4044 3991
@@ -4048,34 +3995,36 @@ intel_dp_can_mst(struct intel_dp *intel_dp)
4048 return mstm_cap & DP_MST_CAP; 3995 return mstm_cap & DP_MST_CAP;
4049} 3996}
4050 3997
3998static bool
3999intel_dp_can_mst(struct intel_dp *intel_dp)
4000{
4001 return i915_modparams.enable_dp_mst &&
4002 intel_dp->can_mst &&
4003 intel_dp_sink_can_mst(intel_dp);
4004}
4005
4051static void 4006static void
4052intel_dp_configure_mst(struct intel_dp *intel_dp) 4007intel_dp_configure_mst(struct intel_dp *intel_dp)
4053{ 4008{
4054 if (!i915_modparams.enable_dp_mst) 4009 struct intel_encoder *encoder =
4055 return; 4010 &dp_to_dig_port(intel_dp)->base;
4011 bool sink_can_mst = intel_dp_sink_can_mst(intel_dp);
4012
4013 DRM_DEBUG_KMS("MST support? port %c: %s, sink: %s, modparam: %s\n",
4014 port_name(encoder->port), yesno(intel_dp->can_mst),
4015 yesno(sink_can_mst), yesno(i915_modparams.enable_dp_mst));
4056 4016
4057 if (!intel_dp->can_mst) 4017 if (!intel_dp->can_mst)
4058 return; 4018 return;
4059 4019
4060 intel_dp->is_mst = intel_dp_can_mst(intel_dp); 4020 intel_dp->is_mst = sink_can_mst &&
4061 4021 i915_modparams.enable_dp_mst;
4062 if (intel_dp->is_mst)
4063 DRM_DEBUG_KMS("Sink is MST capable\n");
4064 else
4065 DRM_DEBUG_KMS("Sink is not MST capable\n");
4066 4022
4067 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, 4023 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
4068 intel_dp->is_mst); 4024 intel_dp->is_mst);
4069} 4025}
4070 4026
4071static bool 4027static bool
4072intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4073{
4074 return drm_dp_dpcd_readb(&intel_dp->aux, DP_DEVICE_SERVICE_IRQ_VECTOR,
4075 sink_irq_vector) == 1;
4076}
4077
4078static bool
4079intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector) 4028intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4080{ 4029{
4081 return drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT_ESI, 4030 return drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT_ESI,
@@ -4083,6 +4032,91 @@ intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4083 DP_DPRX_ESI_LEN; 4032 DP_DPRX_ESI_LEN;
4084} 4033}
4085 4034
4035u16 intel_dp_dsc_get_output_bpp(int link_clock, uint8_t lane_count,
4036 int mode_clock, int mode_hdisplay)
4037{
4038 u16 bits_per_pixel, max_bpp_small_joiner_ram;
4039 int i;
4040
4041 /*
4042 * Available Link Bandwidth(Kbits/sec) = (NumberOfLanes)*
4043 * (LinkSymbolClock)* 8 * ((100-FECOverhead)/100)*(TimeSlotsPerMTP)
4044 * FECOverhead = 2.4%, for SST -> TimeSlotsPerMTP is 1,
4045 * for MST -> TimeSlotsPerMTP has to be calculated
4046 */
4047 bits_per_pixel = (link_clock * lane_count * 8 *
4048 DP_DSC_FEC_OVERHEAD_FACTOR) /
4049 mode_clock;
4050
4051 /* Small Joiner Check: output bpp <= joiner RAM (bits) / Horiz. width */
4052 max_bpp_small_joiner_ram = DP_DSC_MAX_SMALL_JOINER_RAM_BUFFER /
4053 mode_hdisplay;
4054
4055 /*
4056 * Greatest allowed DSC BPP = MIN (output BPP from avaialble Link BW
4057 * check, output bpp from small joiner RAM check)
4058 */
4059 bits_per_pixel = min(bits_per_pixel, max_bpp_small_joiner_ram);
4060
4061 /* Error out if the max bpp is less than smallest allowed valid bpp */
4062 if (bits_per_pixel < valid_dsc_bpp[0]) {
4063 DRM_DEBUG_KMS("Unsupported BPP %d\n", bits_per_pixel);
4064 return 0;
4065 }
4066
4067 /* Find the nearest match in the array of known BPPs from VESA */
4068 for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp) - 1; i++) {
4069 if (bits_per_pixel < valid_dsc_bpp[i + 1])
4070 break;
4071 }
4072 bits_per_pixel = valid_dsc_bpp[i];
4073
4074 /*
4075 * Compressed BPP in U6.4 format so multiply by 16, for Gen 11,
4076 * fractional part is 0
4077 */
4078 return bits_per_pixel << 4;
4079}
4080
4081u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp,
4082 int mode_clock,
4083 int mode_hdisplay)
4084{
4085 u8 min_slice_count, i;
4086 int max_slice_width;
4087
4088 if (mode_clock <= DP_DSC_PEAK_PIXEL_RATE)
4089 min_slice_count = DIV_ROUND_UP(mode_clock,
4090 DP_DSC_MAX_ENC_THROUGHPUT_0);
4091 else
4092 min_slice_count = DIV_ROUND_UP(mode_clock,
4093 DP_DSC_MAX_ENC_THROUGHPUT_1);
4094
4095 max_slice_width = drm_dp_dsc_sink_max_slice_width(intel_dp->dsc_dpcd);
4096 if (max_slice_width < DP_DSC_MIN_SLICE_WIDTH_VALUE) {
4097 DRM_DEBUG_KMS("Unsupported slice width %d by DP DSC Sink device\n",
4098 max_slice_width);
4099 return 0;
4100 }
4101 /* Also take into account max slice width */
4102 min_slice_count = min_t(uint8_t, min_slice_count,
4103 DIV_ROUND_UP(mode_hdisplay,
4104 max_slice_width));
4105
4106 /* Find the closest match to the valid slice count values */
4107 for (i = 0; i < ARRAY_SIZE(valid_dsc_slicecount); i++) {
4108 if (valid_dsc_slicecount[i] >
4109 drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
4110 false))
4111 break;
4112 if (min_slice_count <= valid_dsc_slicecount[i])
4113 return valid_dsc_slicecount[i];
4114 }
4115
4116 DRM_DEBUG_KMS("Unsupported Slice Count %d\n", min_slice_count);
4117 return 0;
4118}
4119
4086static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp) 4120static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
4087{ 4121{
4088 int status = 0; 4122 int status = 0;
@@ -4403,7 +4437,7 @@ int intel_dp_retrain_link(struct intel_encoder *encoder,
4403 4437
4404 /* Suppress underruns caused by re-training */ 4438 /* Suppress underruns caused by re-training */
4405 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false); 4439 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
4406 if (crtc->config->has_pch_encoder) 4440 if (crtc_state->has_pch_encoder)
4407 intel_set_pch_fifo_underrun_reporting(dev_priv, 4441 intel_set_pch_fifo_underrun_reporting(dev_priv,
4408 intel_crtc_pch_transcoder(crtc), false); 4442 intel_crtc_pch_transcoder(crtc), false);
4409 4443
@@ -4414,7 +4448,7 @@ int intel_dp_retrain_link(struct intel_encoder *encoder,
4414 intel_wait_for_vblank(dev_priv, crtc->pipe); 4448 intel_wait_for_vblank(dev_priv, crtc->pipe);
4415 4449
4416 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true); 4450 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
4417 if (crtc->config->has_pch_encoder) 4451 if (crtc_state->has_pch_encoder)
4418 intel_set_pch_fifo_underrun_reporting(dev_priv, 4452 intel_set_pch_fifo_underrun_reporting(dev_priv,
4419 intel_crtc_pch_transcoder(crtc), true); 4453 intel_crtc_pch_transcoder(crtc), true);
4420 4454
@@ -4462,6 +4496,29 @@ static bool intel_dp_hotplug(struct intel_encoder *encoder,
4462 return changed; 4496 return changed;
4463} 4497}
4464 4498
4499static void intel_dp_check_service_irq(struct intel_dp *intel_dp)
4500{
4501 u8 val;
4502
4503 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
4504 return;
4505
4506 if (drm_dp_dpcd_readb(&intel_dp->aux,
4507 DP_DEVICE_SERVICE_IRQ_VECTOR, &val) != 1 || !val)
4508 return;
4509
4510 drm_dp_dpcd_writeb(&intel_dp->aux, DP_DEVICE_SERVICE_IRQ_VECTOR, val);
4511
4512 if (val & DP_AUTOMATED_TEST_REQUEST)
4513 intel_dp_handle_test_request(intel_dp);
4514
4515 if (val & DP_CP_IRQ)
4516 intel_hdcp_check_link(intel_dp->attached_connector);
4517
4518 if (val & DP_SINK_SPECIFIC_IRQ)
4519 DRM_DEBUG_DRIVER("Sink specific irq unhandled\n");
4520}
4521
4465/* 4522/*
4466 * According to DP spec 4523 * According to DP spec
4467 * 5.1.2: 4524 * 5.1.2:
@@ -4479,7 +4536,6 @@ static bool
4479intel_dp_short_pulse(struct intel_dp *intel_dp) 4536intel_dp_short_pulse(struct intel_dp *intel_dp)
4480{ 4537{
4481 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 4538 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
4482 u8 sink_irq_vector = 0;
4483 u8 old_sink_count = intel_dp->sink_count; 4539 u8 old_sink_count = intel_dp->sink_count;
4484 bool ret; 4540 bool ret;
4485 4541
@@ -4502,20 +4558,7 @@ intel_dp_short_pulse(struct intel_dp *intel_dp)
4502 return false; 4558 return false;
4503 } 4559 }
4504 4560
4505 /* Try to read the source of the interrupt */ 4561 intel_dp_check_service_irq(intel_dp);
4506 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4507 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector) &&
4508 sink_irq_vector != 0) {
4509 /* Clear interrupt source */
4510 drm_dp_dpcd_writeb(&intel_dp->aux,
4511 DP_DEVICE_SERVICE_IRQ_VECTOR,
4512 sink_irq_vector);
4513
4514 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4515 intel_dp_handle_test_request(intel_dp);
4516 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4517 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4518 }
4519 4562
4520 /* Handle CEC interrupts, if any */ 4563 /* Handle CEC interrupts, if any */
4521 drm_dp_cec_irq(&intel_dp->aux); 4564 drm_dp_cec_irq(&intel_dp->aux);
@@ -4810,6 +4853,9 @@ static void icl_update_tc_port_type(struct drm_i915_private *dev_priv,
4810 type_str); 4853 type_str);
4811} 4854}
4812 4855
4856static void icl_tc_phy_disconnect(struct drm_i915_private *dev_priv,
4857 struct intel_digital_port *dig_port);
4858
4813/* 4859/*
4814 * This function implements the first part of the Connect Flow described by our 4860 * This function implements the first part of the Connect Flow described by our
4815 * specification, Gen11 TypeC Programming chapter. The rest of the flow (reading 4861 * specification, Gen11 TypeC Programming chapter. The rest of the flow (reading
@@ -4864,9 +4910,7 @@ static bool icl_tc_phy_connect(struct drm_i915_private *dev_priv,
4864 if (dig_port->tc_type == TC_PORT_TYPEC && 4910 if (dig_port->tc_type == TC_PORT_TYPEC &&
4865 !(I915_READ(PORT_TX_DFLEXDPSP) & TC_LIVE_STATE_TC(tc_port))) { 4911 !(I915_READ(PORT_TX_DFLEXDPSP) & TC_LIVE_STATE_TC(tc_port))) {
4866 DRM_DEBUG_KMS("TC PHY %d sudden disconnect.\n", tc_port); 4912 DRM_DEBUG_KMS("TC PHY %d sudden disconnect.\n", tc_port);
4867 val = I915_READ(PORT_TX_DFLEXDPCSSS); 4913 icl_tc_phy_disconnect(dev_priv, dig_port);
4868 val &= ~DP_PHY_MODE_STATUS_NOT_SAFE(tc_port);
4869 I915_WRITE(PORT_TX_DFLEXDPCSSS, val);
4870 return false; 4914 return false;
4871 } 4915 }
4872 4916
@@ -4881,21 +4925,24 @@ static void icl_tc_phy_disconnect(struct drm_i915_private *dev_priv,
4881 struct intel_digital_port *dig_port) 4925 struct intel_digital_port *dig_port)
4882{ 4926{
4883 enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port); 4927 enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port);
4884 u32 val;
4885 4928
4886 if (dig_port->tc_type != TC_PORT_LEGACY && 4929 if (dig_port->tc_type == TC_PORT_UNKNOWN)
4887 dig_port->tc_type != TC_PORT_TYPEC)
4888 return; 4930 return;
4889 4931
4890 /* 4932 /*
4891 * This function may be called many times in a row without an HPD event 4933 * TBT disconnection flow is read the live status, what was done in
4892 * in between, so try to avoid the write when we can. 4934 * caller.
4893 */ 4935 */
4894 val = I915_READ(PORT_TX_DFLEXDPCSSS); 4936 if (dig_port->tc_type == TC_PORT_TYPEC ||
4895 if (val & DP_PHY_MODE_STATUS_NOT_SAFE(tc_port)) { 4937 dig_port->tc_type == TC_PORT_LEGACY) {
4938 u32 val;
4939
4940 val = I915_READ(PORT_TX_DFLEXDPCSSS);
4896 val &= ~DP_PHY_MODE_STATUS_NOT_SAFE(tc_port); 4941 val &= ~DP_PHY_MODE_STATUS_NOT_SAFE(tc_port);
4897 I915_WRITE(PORT_TX_DFLEXDPCSSS, val); 4942 I915_WRITE(PORT_TX_DFLEXDPCSSS, val);
4898 } 4943 }
4944
4945 dig_port->tc_type = TC_PORT_UNKNOWN;
4899} 4946}
4900 4947
4901/* 4948/*
@@ -4945,19 +4992,14 @@ static bool icl_digital_port_connected(struct intel_encoder *encoder)
4945 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 4992 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4946 struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base); 4993 struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
4947 4994
4948 switch (encoder->hpd_pin) { 4995 if (intel_port_is_combophy(dev_priv, encoder->port))
4949 case HPD_PORT_A:
4950 case HPD_PORT_B:
4951 return icl_combo_port_connected(dev_priv, dig_port); 4996 return icl_combo_port_connected(dev_priv, dig_port);
4952 case HPD_PORT_C: 4997 else if (intel_port_is_tc(dev_priv, encoder->port))
4953 case HPD_PORT_D:
4954 case HPD_PORT_E:
4955 case HPD_PORT_F:
4956 return icl_tc_port_connected(dev_priv, dig_port); 4998 return icl_tc_port_connected(dev_priv, dig_port);
4957 default: 4999 else
4958 MISSING_CASE(encoder->hpd_pin); 5000 MISSING_CASE(encoder->hpd_pin);
4959 return false; 5001
4960 } 5002 return false;
4961} 5003}
4962 5004
4963/* 5005/*
@@ -4982,20 +5024,23 @@ bool intel_digital_port_connected(struct intel_encoder *encoder)
4982 return g4x_digital_port_connected(encoder); 5024 return g4x_digital_port_connected(encoder);
4983 } 5025 }
4984 5026
4985 if (IS_GEN5(dev_priv)) 5027 if (INTEL_GEN(dev_priv) >= 11)
4986 return ilk_digital_port_connected(encoder); 5028 return icl_digital_port_connected(encoder);
4987 else if (IS_GEN6(dev_priv)) 5029 else if (IS_GEN10(dev_priv) || IS_GEN9_BC(dev_priv))
4988 return snb_digital_port_connected(encoder); 5030 return spt_digital_port_connected(encoder);
4989 else if (IS_GEN7(dev_priv))
4990 return ivb_digital_port_connected(encoder);
4991 else if (IS_GEN8(dev_priv))
4992 return bdw_digital_port_connected(encoder);
4993 else if (IS_GEN9_LP(dev_priv)) 5031 else if (IS_GEN9_LP(dev_priv))
4994 return bxt_digital_port_connected(encoder); 5032 return bxt_digital_port_connected(encoder);
4995 else if (IS_GEN9_BC(dev_priv) || IS_GEN10(dev_priv)) 5033 else if (IS_GEN8(dev_priv))
4996 return spt_digital_port_connected(encoder); 5034 return bdw_digital_port_connected(encoder);
4997 else 5035 else if (IS_GEN7(dev_priv))
4998 return icl_digital_port_connected(encoder); 5036 return ivb_digital_port_connected(encoder);
5037 else if (IS_GEN6(dev_priv))
5038 return snb_digital_port_connected(encoder);
5039 else if (IS_GEN5(dev_priv))
5040 return ilk_digital_port_connected(encoder);
5041
5042 MISSING_CASE(INTEL_GEN(dev_priv));
5043 return false;
4999} 5044}
5000 5045
5001static struct edid * 5046static struct edid *
@@ -5042,28 +5087,35 @@ intel_dp_unset_edid(struct intel_dp *intel_dp)
5042} 5087}
5043 5088
5044static int 5089static int
5045intel_dp_long_pulse(struct intel_connector *connector, 5090intel_dp_detect(struct drm_connector *connector,
5046 struct drm_modeset_acquire_ctx *ctx) 5091 struct drm_modeset_acquire_ctx *ctx,
5092 bool force)
5047{ 5093{
5048 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 5094 struct drm_i915_private *dev_priv = to_i915(connector->dev);
5049 struct intel_dp *intel_dp = intel_attached_dp(&connector->base); 5095 struct intel_dp *intel_dp = intel_attached_dp(connector);
5096 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5097 struct intel_encoder *encoder = &dig_port->base;
5050 enum drm_connector_status status; 5098 enum drm_connector_status status;
5051 u8 sink_irq_vector = 0; 5099 enum intel_display_power_domain aux_domain =
5100 intel_aux_power_domain(dig_port);
5052 5101
5102 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
5103 connector->base.id, connector->name);
5053 WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex)); 5104 WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
5054 5105
5055 intel_display_power_get(dev_priv, intel_dp->aux_power_domain); 5106 intel_display_power_get(dev_priv, aux_domain);
5056 5107
5057 /* Can't disconnect eDP */ 5108 /* Can't disconnect eDP */
5058 if (intel_dp_is_edp(intel_dp)) 5109 if (intel_dp_is_edp(intel_dp))
5059 status = edp_detect(intel_dp); 5110 status = edp_detect(intel_dp);
5060 else if (intel_digital_port_connected(&dp_to_dig_port(intel_dp)->base)) 5111 else if (intel_digital_port_connected(encoder))
5061 status = intel_dp_detect_dpcd(intel_dp); 5112 status = intel_dp_detect_dpcd(intel_dp);
5062 else 5113 else
5063 status = connector_status_disconnected; 5114 status = connector_status_disconnected;
5064 5115
5065 if (status == connector_status_disconnected) { 5116 if (status == connector_status_disconnected) {
5066 memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance)); 5117 memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance));
5118 memset(intel_dp->dsc_dpcd, 0, sizeof(intel_dp->dsc_dpcd));
5067 5119
5068 if (intel_dp->is_mst) { 5120 if (intel_dp->is_mst) {
5069 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", 5121 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n",
@@ -5089,6 +5141,10 @@ intel_dp_long_pulse(struct intel_connector *connector,
5089 5141
5090 intel_dp_print_rates(intel_dp); 5142 intel_dp_print_rates(intel_dp);
5091 5143
5144 /* Read DP Sink DSC Cap DPCD regs for DP v1.4 */
5145 if (INTEL_GEN(dev_priv) >= 11)
5146 intel_dp_get_dsc_sink_cap(intel_dp);
5147
5092 drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc, 5148 drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc,
5093 drm_dp_is_branch(intel_dp->dpcd)); 5149 drm_dp_is_branch(intel_dp->dpcd));
5094 5150
@@ -5109,9 +5165,13 @@ intel_dp_long_pulse(struct intel_connector *connector,
5109 * with an IRQ_HPD, so force a link status check. 5165 * with an IRQ_HPD, so force a link status check.
5110 */ 5166 */
5111 if (!intel_dp_is_edp(intel_dp)) { 5167 if (!intel_dp_is_edp(intel_dp)) {
5112 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 5168 int ret;
5113 5169
5114 intel_dp_retrain_link(encoder, ctx); 5170 ret = intel_dp_retrain_link(encoder, ctx);
5171 if (ret) {
5172 intel_display_power_put(dev_priv, aux_domain);
5173 return ret;
5174 }
5115 } 5175 }
5116 5176
5117 /* 5177 /*
@@ -5123,61 +5183,17 @@ intel_dp_long_pulse(struct intel_connector *connector,
5123 intel_dp->aux.i2c_defer_count = 0; 5183 intel_dp->aux.i2c_defer_count = 0;
5124 5184
5125 intel_dp_set_edid(intel_dp); 5185 intel_dp_set_edid(intel_dp);
5126 if (intel_dp_is_edp(intel_dp) || connector->detect_edid) 5186 if (intel_dp_is_edp(intel_dp) ||
5187 to_intel_connector(connector)->detect_edid)
5127 status = connector_status_connected; 5188 status = connector_status_connected;
5128 intel_dp->detect_done = true;
5129 5189
5130 /* Try to read the source of the interrupt */ 5190 intel_dp_check_service_irq(intel_dp);
5131 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
5132 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector) &&
5133 sink_irq_vector != 0) {
5134 /* Clear interrupt source */
5135 drm_dp_dpcd_writeb(&intel_dp->aux,
5136 DP_DEVICE_SERVICE_IRQ_VECTOR,
5137 sink_irq_vector);
5138
5139 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
5140 intel_dp_handle_test_request(intel_dp);
5141 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
5142 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
5143 }
5144 5191
5145out: 5192out:
5146 if (status != connector_status_connected && !intel_dp->is_mst) 5193 if (status != connector_status_connected && !intel_dp->is_mst)
5147 intel_dp_unset_edid(intel_dp); 5194 intel_dp_unset_edid(intel_dp);
5148 5195
5149 intel_display_power_put(dev_priv, intel_dp->aux_power_domain); 5196 intel_display_power_put(dev_priv, aux_domain);
5150 return status;
5151}
5152
5153static int
5154intel_dp_detect(struct drm_connector *connector,
5155 struct drm_modeset_acquire_ctx *ctx,
5156 bool force)
5157{
5158 struct intel_dp *intel_dp = intel_attached_dp(connector);
5159 int status = connector->status;
5160
5161 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
5162 connector->base.id, connector->name);
5163
5164 /* If full detect is not performed yet, do a full detect */
5165 if (!intel_dp->detect_done) {
5166 struct drm_crtc *crtc;
5167 int ret;
5168
5169 crtc = connector->state->crtc;
5170 if (crtc) {
5171 ret = drm_modeset_lock(&crtc->mutex, ctx);
5172 if (ret)
5173 return ret;
5174 }
5175
5176 status = intel_dp_long_pulse(intel_dp->attached_connector, ctx);
5177 }
5178
5179 intel_dp->detect_done = false;
5180
5181 return status; 5197 return status;
5182} 5198}
5183 5199
@@ -5185,8 +5201,11 @@ static void
5185intel_dp_force(struct drm_connector *connector) 5201intel_dp_force(struct drm_connector *connector)
5186{ 5202{
5187 struct intel_dp *intel_dp = intel_attached_dp(connector); 5203 struct intel_dp *intel_dp = intel_attached_dp(connector);
5188 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base; 5204 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5205 struct intel_encoder *intel_encoder = &dig_port->base;
5189 struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev); 5206 struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
5207 enum intel_display_power_domain aux_domain =
5208 intel_aux_power_domain(dig_port);
5190 5209
5191 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", 5210 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
5192 connector->base.id, connector->name); 5211 connector->base.id, connector->name);
@@ -5195,11 +5214,11 @@ intel_dp_force(struct drm_connector *connector)
5195 if (connector->status != connector_status_connected) 5214 if (connector->status != connector_status_connected)
5196 return; 5215 return;
5197 5216
5198 intel_display_power_get(dev_priv, intel_dp->aux_power_domain); 5217 intel_display_power_get(dev_priv, aux_domain);
5199 5218
5200 intel_dp_set_edid(intel_dp); 5219 intel_dp_set_edid(intel_dp);
5201 5220
5202 intel_display_power_put(dev_priv, intel_dp->aux_power_domain); 5221 intel_display_power_put(dev_priv, aux_domain);
5203} 5222}
5204 5223
5205static int intel_dp_get_modes(struct drm_connector *connector) 5224static int intel_dp_get_modes(struct drm_connector *connector)
@@ -5264,27 +5283,6 @@ intel_dp_connector_unregister(struct drm_connector *connector)
5264 intel_connector_unregister(connector); 5283 intel_connector_unregister(connector);
5265} 5284}
5266 5285
5267static void
5268intel_dp_connector_destroy(struct drm_connector *connector)
5269{
5270 struct intel_connector *intel_connector = to_intel_connector(connector);
5271
5272 kfree(intel_connector->detect_edid);
5273
5274 if (!IS_ERR_OR_NULL(intel_connector->edid))
5275 kfree(intel_connector->edid);
5276
5277 /*
5278 * Can't call intel_dp_is_edp() since the encoder may have been
5279 * destroyed already.
5280 */
5281 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
5282 intel_panel_fini(&intel_connector->panel);
5283
5284 drm_connector_cleanup(connector);
5285 kfree(connector);
5286}
5287
5288void intel_dp_encoder_destroy(struct drm_encoder *encoder) 5286void intel_dp_encoder_destroy(struct drm_encoder *encoder)
5289{ 5287{
5290 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); 5288 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
@@ -5348,7 +5346,8 @@ int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port,
5348 dpcd_ret = drm_dp_dpcd_write(&intel_dig_port->dp.aux, DP_AUX_HDCP_AN, 5346 dpcd_ret = drm_dp_dpcd_write(&intel_dig_port->dp.aux, DP_AUX_HDCP_AN,
5349 an, DRM_HDCP_AN_LEN); 5347 an, DRM_HDCP_AN_LEN);
5350 if (dpcd_ret != DRM_HDCP_AN_LEN) { 5348 if (dpcd_ret != DRM_HDCP_AN_LEN) {
5351 DRM_ERROR("Failed to write An over DP/AUX (%zd)\n", dpcd_ret); 5349 DRM_DEBUG_KMS("Failed to write An over DP/AUX (%zd)\n",
5350 dpcd_ret);
5352 return dpcd_ret >= 0 ? -EIO : dpcd_ret; 5351 return dpcd_ret >= 0 ? -EIO : dpcd_ret;
5353 } 5352 }
5354 5353
@@ -5364,10 +5363,10 @@ int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port,
5364 rxbuf, sizeof(rxbuf), 5363 rxbuf, sizeof(rxbuf),
5365 DP_AUX_CH_CTL_AUX_AKSV_SELECT); 5364 DP_AUX_CH_CTL_AUX_AKSV_SELECT);
5366 if (ret < 0) { 5365 if (ret < 0) {
5367 DRM_ERROR("Write Aksv over DP/AUX failed (%d)\n", ret); 5366 DRM_DEBUG_KMS("Write Aksv over DP/AUX failed (%d)\n", ret);
5368 return ret; 5367 return ret;
5369 } else if (ret == 0) { 5368 } else if (ret == 0) {
5370 DRM_ERROR("Aksv write over DP/AUX was empty\n"); 5369 DRM_DEBUG_KMS("Aksv write over DP/AUX was empty\n");
5371 return -EIO; 5370 return -EIO;
5372 } 5371 }
5373 5372
@@ -5382,7 +5381,7 @@ static int intel_dp_hdcp_read_bksv(struct intel_digital_port *intel_dig_port,
5382 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BKSV, bksv, 5381 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BKSV, bksv,
5383 DRM_HDCP_KSV_LEN); 5382 DRM_HDCP_KSV_LEN);
5384 if (ret != DRM_HDCP_KSV_LEN) { 5383 if (ret != DRM_HDCP_KSV_LEN) {
5385 DRM_ERROR("Read Bksv from DP/AUX failed (%zd)\n", ret); 5384 DRM_DEBUG_KMS("Read Bksv from DP/AUX failed (%zd)\n", ret);
5386 return ret >= 0 ? -EIO : ret; 5385 return ret >= 0 ? -EIO : ret;
5387 } 5386 }
5388 return 0; 5387 return 0;
@@ -5400,7 +5399,7 @@ static int intel_dp_hdcp_read_bstatus(struct intel_digital_port *intel_dig_port,
5400 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BINFO, 5399 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BINFO,
5401 bstatus, DRM_HDCP_BSTATUS_LEN); 5400 bstatus, DRM_HDCP_BSTATUS_LEN);
5402 if (ret != DRM_HDCP_BSTATUS_LEN) { 5401 if (ret != DRM_HDCP_BSTATUS_LEN) {
5403 DRM_ERROR("Read bstatus from DP/AUX failed (%zd)\n", ret); 5402 DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret);
5404 return ret >= 0 ? -EIO : ret; 5403 return ret >= 0 ? -EIO : ret;
5405 } 5404 }
5406 return 0; 5405 return 0;
@@ -5415,7 +5414,7 @@ int intel_dp_hdcp_read_bcaps(struct intel_digital_port *intel_dig_port,
5415 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BCAPS, 5414 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BCAPS,
5416 bcaps, 1); 5415 bcaps, 1);
5417 if (ret != 1) { 5416 if (ret != 1) {
5418 DRM_ERROR("Read bcaps from DP/AUX failed (%zd)\n", ret); 5417 DRM_DEBUG_KMS("Read bcaps from DP/AUX failed (%zd)\n", ret);
5419 return ret >= 0 ? -EIO : ret; 5418 return ret >= 0 ? -EIO : ret;
5420 } 5419 }
5421 5420
@@ -5445,7 +5444,7 @@ int intel_dp_hdcp_read_ri_prime(struct intel_digital_port *intel_dig_port,
5445 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_RI_PRIME, 5444 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_RI_PRIME,
5446 ri_prime, DRM_HDCP_RI_LEN); 5445 ri_prime, DRM_HDCP_RI_LEN);
5447 if (ret != DRM_HDCP_RI_LEN) { 5446 if (ret != DRM_HDCP_RI_LEN) {
5448 DRM_ERROR("Read Ri' from DP/AUX failed (%zd)\n", ret); 5447 DRM_DEBUG_KMS("Read Ri' from DP/AUX failed (%zd)\n", ret);
5449 return ret >= 0 ? -EIO : ret; 5448 return ret >= 0 ? -EIO : ret;
5450 } 5449 }
5451 return 0; 5450 return 0;
@@ -5460,7 +5459,7 @@ int intel_dp_hdcp_read_ksv_ready(struct intel_digital_port *intel_dig_port,
5460 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BSTATUS, 5459 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BSTATUS,
5461 &bstatus, 1); 5460 &bstatus, 1);
5462 if (ret != 1) { 5461 if (ret != 1) {
5463 DRM_ERROR("Read bstatus from DP/AUX failed (%zd)\n", ret); 5462 DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret);
5464 return ret >= 0 ? -EIO : ret; 5463 return ret >= 0 ? -EIO : ret;
5465 } 5464 }
5466 *ksv_ready = bstatus & DP_BSTATUS_READY; 5465 *ksv_ready = bstatus & DP_BSTATUS_READY;
@@ -5482,8 +5481,8 @@ int intel_dp_hdcp_read_ksv_fifo(struct intel_digital_port *intel_dig_port,
5482 ksv_fifo + i * DRM_HDCP_KSV_LEN, 5481 ksv_fifo + i * DRM_HDCP_KSV_LEN,
5483 len); 5482 len);
5484 if (ret != len) { 5483 if (ret != len) {
5485 DRM_ERROR("Read ksv[%d] from DP/AUX failed (%zd)\n", i, 5484 DRM_DEBUG_KMS("Read ksv[%d] from DP/AUX failed (%zd)\n",
5486 ret); 5485 i, ret);
5487 return ret >= 0 ? -EIO : ret; 5486 return ret >= 0 ? -EIO : ret;
5488 } 5487 }
5489 } 5488 }
@@ -5503,7 +5502,7 @@ int intel_dp_hdcp_read_v_prime_part(struct intel_digital_port *intel_dig_port,
5503 DP_AUX_HDCP_V_PRIME(i), part, 5502 DP_AUX_HDCP_V_PRIME(i), part,
5504 DRM_HDCP_V_PRIME_PART_LEN); 5503 DRM_HDCP_V_PRIME_PART_LEN);
5505 if (ret != DRM_HDCP_V_PRIME_PART_LEN) { 5504 if (ret != DRM_HDCP_V_PRIME_PART_LEN) {
5506 DRM_ERROR("Read v'[%d] from DP/AUX failed (%zd)\n", i, ret); 5505 DRM_DEBUG_KMS("Read v'[%d] from DP/AUX failed (%zd)\n", i, ret);
5507 return ret >= 0 ? -EIO : ret; 5506 return ret >= 0 ? -EIO : ret;
5508 } 5507 }
5509 return 0; 5508 return 0;
@@ -5526,7 +5525,7 @@ bool intel_dp_hdcp_check_link(struct intel_digital_port *intel_dig_port)
5526 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BSTATUS, 5525 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BSTATUS,
5527 &bstatus, 1); 5526 &bstatus, 1);
5528 if (ret != 1) { 5527 if (ret != 1) {
5529 DRM_ERROR("Read bstatus from DP/AUX failed (%zd)\n", ret); 5528 DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret);
5530 return false; 5529 return false;
5531 } 5530 }
5532 5531
@@ -5565,6 +5564,7 @@ static const struct intel_hdcp_shim intel_dp_hdcp_shim = {
5565static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp) 5564static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
5566{ 5565{
5567 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 5566 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
5567 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5568 5568
5569 lockdep_assert_held(&dev_priv->pps_mutex); 5569 lockdep_assert_held(&dev_priv->pps_mutex);
5570 5570
@@ -5578,7 +5578,7 @@ static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
5578 * indefinitely. 5578 * indefinitely.
5579 */ 5579 */
5580 DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n"); 5580 DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
5581 intel_display_power_get(dev_priv, intel_dp->aux_power_domain); 5581 intel_display_power_get(dev_priv, intel_aux_power_domain(dig_port));
5582 5582
5583 edp_panel_vdd_schedule_off(intel_dp); 5583 edp_panel_vdd_schedule_off(intel_dp);
5584} 5584}
@@ -5631,7 +5631,7 @@ static const struct drm_connector_funcs intel_dp_connector_funcs = {
5631 .atomic_set_property = intel_digital_connector_atomic_set_property, 5631 .atomic_set_property = intel_digital_connector_atomic_set_property,
5632 .late_register = intel_dp_connector_register, 5632 .late_register = intel_dp_connector_register,
5633 .early_unregister = intel_dp_connector_unregister, 5633 .early_unregister = intel_dp_connector_unregister,
5634 .destroy = intel_dp_connector_destroy, 5634 .destroy = intel_connector_destroy,
5635 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 5635 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
5636 .atomic_duplicate_state = intel_digital_connector_duplicate_state, 5636 .atomic_duplicate_state = intel_digital_connector_duplicate_state,
5637}; 5637};
@@ -5673,11 +5673,11 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
5673 5673
5674 if (long_hpd) { 5674 if (long_hpd) {
5675 intel_dp->reset_link_params = true; 5675 intel_dp->reset_link_params = true;
5676 intel_dp->detect_done = false;
5677 return IRQ_NONE; 5676 return IRQ_NONE;
5678 } 5677 }
5679 5678
5680 intel_display_power_get(dev_priv, intel_dp->aux_power_domain); 5679 intel_display_power_get(dev_priv,
5680 intel_aux_power_domain(intel_dig_port));
5681 5681
5682 if (intel_dp->is_mst) { 5682 if (intel_dp->is_mst) {
5683 if (intel_dp_check_mst_status(intel_dp) == -EINVAL) { 5683 if (intel_dp_check_mst_status(intel_dp) == -EINVAL) {
@@ -5690,7 +5690,6 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
5690 intel_dp->is_mst = false; 5690 intel_dp->is_mst = false;
5691 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, 5691 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
5692 intel_dp->is_mst); 5692 intel_dp->is_mst);
5693 intel_dp->detect_done = false;
5694 goto put_power; 5693 goto put_power;
5695 } 5694 }
5696 } 5695 }
@@ -5700,19 +5699,15 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
5700 5699
5701 handled = intel_dp_short_pulse(intel_dp); 5700 handled = intel_dp_short_pulse(intel_dp);
5702 5701
5703 /* Short pulse can signify loss of hdcp authentication */ 5702 if (!handled)
5704 intel_hdcp_check_link(intel_dp->attached_connector);
5705
5706 if (!handled) {
5707 intel_dp->detect_done = false;
5708 goto put_power; 5703 goto put_power;
5709 }
5710 } 5704 }
5711 5705
5712 ret = IRQ_HANDLED; 5706 ret = IRQ_HANDLED;
5713 5707
5714put_power: 5708put_power:
5715 intel_display_power_put(dev_priv, intel_dp->aux_power_domain); 5709 intel_display_power_put(dev_priv,
5710 intel_aux_power_domain(intel_dig_port));
5716 5711
5717 return ret; 5712 return ret;
5718} 5713}
@@ -5743,6 +5738,10 @@ intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connect
5743 intel_attach_force_audio_property(connector); 5738 intel_attach_force_audio_property(connector);
5744 5739
5745 intel_attach_broadcast_rgb_property(connector); 5740 intel_attach_broadcast_rgb_property(connector);
5741 if (HAS_GMCH_DISPLAY(dev_priv))
5742 drm_connector_attach_max_bpc_property(connector, 6, 10);
5743 else if (INTEL_GEN(dev_priv) >= 5)
5744 drm_connector_attach_max_bpc_property(connector, 6, 12);
5746 5745
5747 if (intel_dp_is_edp(intel_dp)) { 5746 if (intel_dp_is_edp(intel_dp)) {
5748 u32 allowed_scalers; 5747 u32 allowed_scalers;
@@ -6099,10 +6098,10 @@ static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv,
6099 if (INTEL_GEN(dev_priv) >= 8 && !IS_CHERRYVIEW(dev_priv)) { 6098 if (INTEL_GEN(dev_priv) >= 8 && !IS_CHERRYVIEW(dev_priv)) {
6100 switch (index) { 6099 switch (index) {
6101 case DRRS_HIGH_RR: 6100 case DRRS_HIGH_RR:
6102 intel_dp_set_m_n(intel_crtc, M1_N1); 6101 intel_dp_set_m_n(crtc_state, M1_N1);
6103 break; 6102 break;
6104 case DRRS_LOW_RR: 6103 case DRRS_LOW_RR:
6105 intel_dp_set_m_n(intel_crtc, M2_N2); 6104 intel_dp_set_m_n(crtc_state, M2_N2);
6106 break; 6105 break;
6107 case DRRS_MAX_RR: 6106 case DRRS_MAX_RR:
6108 default: 6107 default:
@@ -6422,6 +6421,8 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
6422 if (!intel_dp_is_edp(intel_dp)) 6421 if (!intel_dp_is_edp(intel_dp))
6423 return true; 6422 return true;
6424 6423
6424 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work, edp_panel_vdd_work);
6425
6425 /* 6426 /*
6426 * On IBX/CPT we may get here with LVDS already registered. Since the 6427 * On IBX/CPT we may get here with LVDS already registered. Since the
6427 * driver uses the only internal power sequencer available for both 6428 * driver uses the only internal power sequencer available for both
@@ -6514,6 +6515,10 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
6514 intel_connector->panel.backlight.power = intel_edp_backlight_power; 6515 intel_connector->panel.backlight.power = intel_edp_backlight_power;
6515 intel_panel_setup_backlight(connector, pipe); 6516 intel_panel_setup_backlight(connector, pipe);
6516 6517
6518 if (fixed_mode)
6519 drm_connector_init_panel_orientation_property(
6520 connector, fixed_mode->hdisplay, fixed_mode->vdisplay);
6521
6517 return true; 6522 return true;
6518 6523
6519out_vdd_off: 6524out_vdd_off:
@@ -6624,9 +6629,6 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
6624 6629
6625 intel_dp_aux_init(intel_dp); 6630 intel_dp_aux_init(intel_dp);
6626 6631
6627 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
6628 edp_panel_vdd_work);
6629
6630 intel_connector_attach_encoder(intel_connector, intel_encoder); 6632 intel_connector_attach_encoder(intel_connector, intel_encoder);
6631 6633
6632 if (HAS_DDI(dev_priv)) 6634 if (HAS_DDI(dev_priv))
@@ -6743,6 +6745,7 @@ bool intel_dp_init(struct drm_i915_private *dev_priv,
6743 if (port != PORT_A) 6745 if (port != PORT_A)
6744 intel_infoframe_init(intel_dig_port); 6746 intel_infoframe_init(intel_dig_port);
6745 6747
6748 intel_dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port);
6746 if (!intel_dp_init_connector(intel_dig_port, intel_connector)) 6749 if (!intel_dp_init_connector(intel_dig_port, intel_connector))
6747 goto err_init_connector; 6750 goto err_init_connector;
6748 6751
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index 1b00f8ea145b..4de247ddf05f 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -51,6 +51,7 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
51 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) 51 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
52 return false; 52 return false;
53 53
54 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
54 pipe_config->has_pch_encoder = false; 55 pipe_config->has_pch_encoder = false;
55 bpp = 24; 56 bpp = 24;
56 if (intel_dp->compliance.test_data.bpc) { 57 if (intel_dp->compliance.test_data.bpc) {
@@ -208,12 +209,25 @@ static void intel_mst_pre_pll_enable_dp(struct intel_encoder *encoder,
208 struct intel_digital_port *intel_dig_port = intel_mst->primary; 209 struct intel_digital_port *intel_dig_port = intel_mst->primary;
209 struct intel_dp *intel_dp = &intel_dig_port->dp; 210 struct intel_dp *intel_dp = &intel_dig_port->dp;
210 211
211 if (intel_dp->active_mst_links == 0 && 212 if (intel_dp->active_mst_links == 0)
212 intel_dig_port->base.pre_pll_enable)
213 intel_dig_port->base.pre_pll_enable(&intel_dig_port->base, 213 intel_dig_port->base.pre_pll_enable(&intel_dig_port->base,
214 pipe_config, NULL); 214 pipe_config, NULL);
215} 215}
216 216
217static void intel_mst_post_pll_disable_dp(struct intel_encoder *encoder,
218 const struct intel_crtc_state *old_crtc_state,
219 const struct drm_connector_state *old_conn_state)
220{
221 struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
222 struct intel_digital_port *intel_dig_port = intel_mst->primary;
223 struct intel_dp *intel_dp = &intel_dig_port->dp;
224
225 if (intel_dp->active_mst_links == 0)
226 intel_dig_port->base.post_pll_disable(&intel_dig_port->base,
227 old_crtc_state,
228 old_conn_state);
229}
230
217static void intel_mst_pre_enable_dp(struct intel_encoder *encoder, 231static void intel_mst_pre_enable_dp(struct intel_encoder *encoder,
218 const struct intel_crtc_state *pipe_config, 232 const struct intel_crtc_state *pipe_config,
219 const struct drm_connector_state *conn_state) 233 const struct drm_connector_state *conn_state)
@@ -335,24 +349,12 @@ intel_dp_mst_detect(struct drm_connector *connector, bool force)
335 intel_connector->port); 349 intel_connector->port);
336} 350}
337 351
338static void
339intel_dp_mst_connector_destroy(struct drm_connector *connector)
340{
341 struct intel_connector *intel_connector = to_intel_connector(connector);
342
343 if (!IS_ERR_OR_NULL(intel_connector->edid))
344 kfree(intel_connector->edid);
345
346 drm_connector_cleanup(connector);
347 kfree(connector);
348}
349
350static const struct drm_connector_funcs intel_dp_mst_connector_funcs = { 352static const struct drm_connector_funcs intel_dp_mst_connector_funcs = {
351 .detect = intel_dp_mst_detect, 353 .detect = intel_dp_mst_detect,
352 .fill_modes = drm_helper_probe_single_connector_modes, 354 .fill_modes = drm_helper_probe_single_connector_modes,
353 .late_register = intel_connector_register, 355 .late_register = intel_connector_register,
354 .early_unregister = intel_connector_unregister, 356 .early_unregister = intel_connector_unregister,
355 .destroy = intel_dp_mst_connector_destroy, 357 .destroy = intel_connector_destroy,
356 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 358 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
357 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, 359 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
358}; 360};
@@ -452,6 +454,10 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
452 if (!intel_connector) 454 if (!intel_connector)
453 return NULL; 455 return NULL;
454 456
457 intel_connector->get_hw_state = intel_dp_mst_get_hw_state;
458 intel_connector->mst_port = intel_dp;
459 intel_connector->port = port;
460
455 connector = &intel_connector->base; 461 connector = &intel_connector->base;
456 ret = drm_connector_init(dev, connector, &intel_dp_mst_connector_funcs, 462 ret = drm_connector_init(dev, connector, &intel_dp_mst_connector_funcs,
457 DRM_MODE_CONNECTOR_DisplayPort); 463 DRM_MODE_CONNECTOR_DisplayPort);
@@ -462,10 +468,6 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
462 468
463 drm_connector_helper_add(connector, &intel_dp_mst_connector_helper_funcs); 469 drm_connector_helper_add(connector, &intel_dp_mst_connector_helper_funcs);
464 470
465 intel_connector->get_hw_state = intel_dp_mst_get_hw_state;
466 intel_connector->mst_port = intel_dp;
467 intel_connector->port = port;
468
469 for_each_pipe(dev_priv, pipe) { 471 for_each_pipe(dev_priv, pipe) {
470 struct drm_encoder *enc = 472 struct drm_encoder *enc =
471 &intel_dp->mst_encoders[pipe]->base.base; 473 &intel_dp->mst_encoders[pipe]->base.base;
@@ -560,6 +562,7 @@ intel_dp_create_fake_mst_encoder(struct intel_digital_port *intel_dig_port, enum
560 intel_encoder->disable = intel_mst_disable_dp; 562 intel_encoder->disable = intel_mst_disable_dp;
561 intel_encoder->post_disable = intel_mst_post_disable_dp; 563 intel_encoder->post_disable = intel_mst_post_disable_dp;
562 intel_encoder->pre_pll_enable = intel_mst_pre_pll_enable_dp; 564 intel_encoder->pre_pll_enable = intel_mst_pre_pll_enable_dp;
565 intel_encoder->post_pll_disable = intel_mst_post_pll_disable_dp;
563 intel_encoder->pre_enable = intel_mst_pre_enable_dp; 566 intel_encoder->pre_enable = intel_mst_pre_enable_dp;
564 intel_encoder->enable = intel_mst_enable_dp; 567 intel_encoder->enable = intel_mst_enable_dp;
565 intel_encoder->get_hw_state = intel_dp_mst_enc_get_hw_state; 568 intel_encoder->get_hw_state = intel_dp_mst_enc_get_hw_state;
diff --git a/drivers/gpu/drm/i915/intel_dpio_phy.c b/drivers/gpu/drm/i915/intel_dpio_phy.c
index 00b3ab656b06..3c7f10d17658 100644
--- a/drivers/gpu/drm/i915/intel_dpio_phy.c
+++ b/drivers/gpu/drm/i915/intel_dpio_phy.c
@@ -748,7 +748,7 @@ void chv_data_lane_soft_reset(struct intel_encoder *encoder,
748 val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET; 748 val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
749 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val); 749 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
750 750
751 if (crtc->config->lane_count > 2) { 751 if (crtc_state->lane_count > 2) {
752 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch)); 752 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
753 if (reset) 753 if (reset)
754 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET); 754 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
@@ -765,7 +765,7 @@ void chv_data_lane_soft_reset(struct intel_encoder *encoder,
765 val |= DPIO_PCS_CLK_SOFT_RESET; 765 val |= DPIO_PCS_CLK_SOFT_RESET;
766 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val); 766 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
767 767
768 if (crtc->config->lane_count > 2) { 768 if (crtc_state->lane_count > 2) {
769 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch)); 769 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
770 val |= CHV_PCS_REQ_SOFTRESET_EN; 770 val |= CHV_PCS_REQ_SOFTRESET_EN;
771 if (reset) 771 if (reset)
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c
index e6cac9225536..901e15063b24 100644
--- a/drivers/gpu/drm/i915/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c
@@ -126,16 +126,16 @@ void assert_shared_dpll(struct drm_i915_private *dev_priv,
126 126
127/** 127/**
128 * intel_prepare_shared_dpll - call a dpll's prepare hook 128 * intel_prepare_shared_dpll - call a dpll's prepare hook
129 * @crtc: CRTC which has a shared dpll 129 * @crtc_state: CRTC, and its state, which has a shared dpll
130 * 130 *
131 * This calls the PLL's prepare hook if it has one and if the PLL is not 131 * This calls the PLL's prepare hook if it has one and if the PLL is not
132 * already enabled. The prepare hook is platform specific. 132 * already enabled. The prepare hook is platform specific.
133 */ 133 */
134void intel_prepare_shared_dpll(struct intel_crtc *crtc) 134void intel_prepare_shared_dpll(const struct intel_crtc_state *crtc_state)
135{ 135{
136 struct drm_device *dev = crtc->base.dev; 136 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
137 struct drm_i915_private *dev_priv = to_i915(dev); 137 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
138 struct intel_shared_dpll *pll = crtc->config->shared_dpll; 138 struct intel_shared_dpll *pll = crtc_state->shared_dpll;
139 139
140 if (WARN_ON(pll == NULL)) 140 if (WARN_ON(pll == NULL))
141 return; 141 return;
@@ -154,15 +154,15 @@ void intel_prepare_shared_dpll(struct intel_crtc *crtc)
154 154
155/** 155/**
156 * intel_enable_shared_dpll - enable a CRTC's shared DPLL 156 * intel_enable_shared_dpll - enable a CRTC's shared DPLL
157 * @crtc: CRTC which has a shared DPLL 157 * @crtc_state: CRTC, and its state, which has a shared DPLL
158 * 158 *
159 * Enable the shared DPLL used by @crtc. 159 * Enable the shared DPLL used by @crtc.
160 */ 160 */
161void intel_enable_shared_dpll(struct intel_crtc *crtc) 161void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state)
162{ 162{
163 struct drm_device *dev = crtc->base.dev; 163 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
164 struct drm_i915_private *dev_priv = to_i915(dev); 164 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
165 struct intel_shared_dpll *pll = crtc->config->shared_dpll; 165 struct intel_shared_dpll *pll = crtc_state->shared_dpll;
166 unsigned int crtc_mask = drm_crtc_mask(&crtc->base); 166 unsigned int crtc_mask = drm_crtc_mask(&crtc->base);
167 unsigned int old_mask; 167 unsigned int old_mask;
168 168
@@ -199,14 +199,15 @@ out:
199 199
200/** 200/**
201 * intel_disable_shared_dpll - disable a CRTC's shared DPLL 201 * intel_disable_shared_dpll - disable a CRTC's shared DPLL
202 * @crtc: CRTC which has a shared DPLL 202 * @crtc_state: CRTC, and its state, which has a shared DPLL
203 * 203 *
204 * Disable the shared DPLL used by @crtc. 204 * Disable the shared DPLL used by @crtc.
205 */ 205 */
206void intel_disable_shared_dpll(struct intel_crtc *crtc) 206void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state)
207{ 207{
208 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
208 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 209 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
209 struct intel_shared_dpll *pll = crtc->config->shared_dpll; 210 struct intel_shared_dpll *pll = crtc_state->shared_dpll;
210 unsigned int crtc_mask = drm_crtc_mask(&crtc->base); 211 unsigned int crtc_mask = drm_crtc_mask(&crtc->base);
211 212
212 /* PCH only available on ILK+ */ 213 /* PCH only available on ILK+ */
@@ -409,14 +410,6 @@ static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
409 struct intel_shared_dpll *pll) 410 struct intel_shared_dpll *pll)
410{ 411{
411 const enum intel_dpll_id id = pll->info->id; 412 const enum intel_dpll_id id = pll->info->id;
412 struct drm_device *dev = &dev_priv->drm;
413 struct intel_crtc *crtc;
414
415 /* Make sure no transcoder isn't still depending on us. */
416 for_each_intel_crtc(dev, crtc) {
417 if (crtc->config->shared_dpll == pll)
418 assert_pch_transcoder_disabled(dev_priv, crtc->pipe);
419 }
420 413
421 I915_WRITE(PCH_DPLL(id), 0); 414 I915_WRITE(PCH_DPLL(id), 0);
422 POSTING_READ(PCH_DPLL(id)); 415 POSTING_READ(PCH_DPLL(id));
@@ -2628,11 +2621,16 @@ static enum port icl_mg_pll_id_to_port(enum intel_dpll_id id)
2628 return id - DPLL_ID_ICL_MGPLL1 + PORT_C; 2621 return id - DPLL_ID_ICL_MGPLL1 + PORT_C;
2629} 2622}
2630 2623
2631static enum intel_dpll_id icl_port_to_mg_pll_id(enum port port) 2624enum intel_dpll_id icl_port_to_mg_pll_id(enum port port)
2632{ 2625{
2633 return port - PORT_C + DPLL_ID_ICL_MGPLL1; 2626 return port - PORT_C + DPLL_ID_ICL_MGPLL1;
2634} 2627}
2635 2628
2629bool intel_dpll_is_combophy(enum intel_dpll_id id)
2630{
2631 return id == DPLL_ID_ICL_DPLL0 || id == DPLL_ID_ICL_DPLL1;
2632}
2633
2636static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc, 2634static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
2637 uint32_t *target_dco_khz, 2635 uint32_t *target_dco_khz,
2638 struct intel_dpll_hw_state *state) 2636 struct intel_dpll_hw_state *state)
@@ -2874,8 +2872,8 @@ static struct intel_shared_dpll *
2874icl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state, 2872icl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
2875 struct intel_encoder *encoder) 2873 struct intel_encoder *encoder)
2876{ 2874{
2877 struct intel_digital_port *intel_dig_port = 2875 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2878 enc_to_dig_port(&encoder->base); 2876 struct intel_digital_port *intel_dig_port;
2879 struct intel_shared_dpll *pll; 2877 struct intel_shared_dpll *pll;
2880 struct intel_dpll_hw_state pll_state = {}; 2878 struct intel_dpll_hw_state pll_state = {};
2881 enum port port = encoder->port; 2879 enum port port = encoder->port;
@@ -2883,18 +2881,21 @@ icl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
2883 int clock = crtc_state->port_clock; 2881 int clock = crtc_state->port_clock;
2884 bool ret; 2882 bool ret;
2885 2883
2886 switch (port) { 2884 if (intel_port_is_combophy(dev_priv, port)) {
2887 case PORT_A:
2888 case PORT_B:
2889 min = DPLL_ID_ICL_DPLL0; 2885 min = DPLL_ID_ICL_DPLL0;
2890 max = DPLL_ID_ICL_DPLL1; 2886 max = DPLL_ID_ICL_DPLL1;
2891 ret = icl_calc_dpll_state(crtc_state, encoder, clock, 2887 ret = icl_calc_dpll_state(crtc_state, encoder, clock,
2892 &pll_state); 2888 &pll_state);
2893 break; 2889 } else if (intel_port_is_tc(dev_priv, port)) {
2894 case PORT_C: 2890 if (encoder->type == INTEL_OUTPUT_DP_MST) {
2895 case PORT_D: 2891 struct intel_dp_mst_encoder *mst_encoder;
2896 case PORT_E: 2892
2897 case PORT_F: 2893 mst_encoder = enc_to_mst(&encoder->base);
2894 intel_dig_port = mst_encoder->primary;
2895 } else {
2896 intel_dig_port = enc_to_dig_port(&encoder->base);
2897 }
2898
2898 if (intel_dig_port->tc_type == TC_PORT_TBT) { 2899 if (intel_dig_port->tc_type == TC_PORT_TBT) {
2899 min = DPLL_ID_ICL_TBTPLL; 2900 min = DPLL_ID_ICL_TBTPLL;
2900 max = min; 2901 max = min;
@@ -2906,8 +2907,7 @@ icl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
2906 ret = icl_calc_mg_pll_state(crtc_state, encoder, clock, 2907 ret = icl_calc_mg_pll_state(crtc_state, encoder, clock,
2907 &pll_state); 2908 &pll_state);
2908 } 2909 }
2909 break; 2910 } else {
2910 default:
2911 MISSING_CASE(port); 2911 MISSING_CASE(port);
2912 return NULL; 2912 return NULL;
2913 } 2913 }
@@ -2932,21 +2932,16 @@ icl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
2932 2932
2933static i915_reg_t icl_pll_id_to_enable_reg(enum intel_dpll_id id) 2933static i915_reg_t icl_pll_id_to_enable_reg(enum intel_dpll_id id)
2934{ 2934{
2935 switch (id) { 2935 if (intel_dpll_is_combophy(id))
2936 default:
2937 MISSING_CASE(id);
2938 /* fall through */
2939 case DPLL_ID_ICL_DPLL0:
2940 case DPLL_ID_ICL_DPLL1:
2941 return CNL_DPLL_ENABLE(id); 2936 return CNL_DPLL_ENABLE(id);
2942 case DPLL_ID_ICL_TBTPLL: 2937 else if (id == DPLL_ID_ICL_TBTPLL)
2943 return TBT_PLL_ENABLE; 2938 return TBT_PLL_ENABLE;
2944 case DPLL_ID_ICL_MGPLL1: 2939 else
2945 case DPLL_ID_ICL_MGPLL2: 2940 /*
2946 case DPLL_ID_ICL_MGPLL3: 2941 * TODO: Make MG_PLL macros use
2947 case DPLL_ID_ICL_MGPLL4: 2942 * tc port id instead of port id
2943 */
2948 return MG_PLL_ENABLE(icl_mg_pll_id_to_port(id)); 2944 return MG_PLL_ENABLE(icl_mg_pll_id_to_port(id));
2949 }
2950} 2945}
2951 2946
2952static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv, 2947static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
@@ -2965,17 +2960,11 @@ static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
2965 if (!(val & PLL_ENABLE)) 2960 if (!(val & PLL_ENABLE))
2966 goto out; 2961 goto out;
2967 2962
2968 switch (id) { 2963 if (intel_dpll_is_combophy(id) ||
2969 case DPLL_ID_ICL_DPLL0: 2964 id == DPLL_ID_ICL_TBTPLL) {
2970 case DPLL_ID_ICL_DPLL1:
2971 case DPLL_ID_ICL_TBTPLL:
2972 hw_state->cfgcr0 = I915_READ(ICL_DPLL_CFGCR0(id)); 2965 hw_state->cfgcr0 = I915_READ(ICL_DPLL_CFGCR0(id));
2973 hw_state->cfgcr1 = I915_READ(ICL_DPLL_CFGCR1(id)); 2966 hw_state->cfgcr1 = I915_READ(ICL_DPLL_CFGCR1(id));
2974 break; 2967 } else {
2975 case DPLL_ID_ICL_MGPLL1:
2976 case DPLL_ID_ICL_MGPLL2:
2977 case DPLL_ID_ICL_MGPLL3:
2978 case DPLL_ID_ICL_MGPLL4:
2979 port = icl_mg_pll_id_to_port(id); 2968 port = icl_mg_pll_id_to_port(id);
2980 hw_state->mg_refclkin_ctl = I915_READ(MG_REFCLKIN_CTL(port)); 2969 hw_state->mg_refclkin_ctl = I915_READ(MG_REFCLKIN_CTL(port));
2981 hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK; 2970 hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
@@ -3013,9 +3002,6 @@ static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
3013 3002
3014 hw_state->mg_pll_tdc_coldst_bias &= hw_state->mg_pll_tdc_coldst_bias_mask; 3003 hw_state->mg_pll_tdc_coldst_bias &= hw_state->mg_pll_tdc_coldst_bias_mask;
3015 hw_state->mg_pll_bias &= hw_state->mg_pll_bias_mask; 3004 hw_state->mg_pll_bias &= hw_state->mg_pll_bias_mask;
3016 break;
3017 default:
3018 MISSING_CASE(id);
3019 } 3005 }
3020 3006
3021 ret = true; 3007 ret = true;
@@ -3104,21 +3090,10 @@ static void icl_pll_enable(struct drm_i915_private *dev_priv,
3104 PLL_POWER_STATE, 1)) 3090 PLL_POWER_STATE, 1))
3105 DRM_ERROR("PLL %d Power not enabled\n", id); 3091 DRM_ERROR("PLL %d Power not enabled\n", id);
3106 3092
3107 switch (id) { 3093 if (intel_dpll_is_combophy(id) || id == DPLL_ID_ICL_TBTPLL)
3108 case DPLL_ID_ICL_DPLL0:
3109 case DPLL_ID_ICL_DPLL1:
3110 case DPLL_ID_ICL_TBTPLL:
3111 icl_dpll_write(dev_priv, pll); 3094 icl_dpll_write(dev_priv, pll);
3112 break; 3095 else
3113 case DPLL_ID_ICL_MGPLL1:
3114 case DPLL_ID_ICL_MGPLL2:
3115 case DPLL_ID_ICL_MGPLL3:
3116 case DPLL_ID_ICL_MGPLL4:
3117 icl_mg_pll_write(dev_priv, pll); 3096 icl_mg_pll_write(dev_priv, pll);
3118 break;
3119 default:
3120 MISSING_CASE(id);
3121 }
3122 3097
3123 /* 3098 /*
3124 * DVFS pre sequence would be here, but in our driver the cdclk code 3099 * DVFS pre sequence would be here, but in our driver the cdclk code
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.h b/drivers/gpu/drm/i915/intel_dpll_mgr.h
index bf0de8a4dc63..a033d8f06d4a 100644
--- a/drivers/gpu/drm/i915/intel_dpll_mgr.h
+++ b/drivers/gpu/drm/i915/intel_dpll_mgr.h
@@ -334,9 +334,9 @@ struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc,
334void intel_release_shared_dpll(struct intel_shared_dpll *dpll, 334void intel_release_shared_dpll(struct intel_shared_dpll *dpll,
335 struct intel_crtc *crtc, 335 struct intel_crtc *crtc,
336 struct drm_atomic_state *state); 336 struct drm_atomic_state *state);
337void intel_prepare_shared_dpll(struct intel_crtc *crtc); 337void intel_prepare_shared_dpll(const struct intel_crtc_state *crtc_state);
338void intel_enable_shared_dpll(struct intel_crtc *crtc); 338void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state);
339void intel_disable_shared_dpll(struct intel_crtc *crtc); 339void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state);
340void intel_shared_dpll_swap_state(struct drm_atomic_state *state); 340void intel_shared_dpll_swap_state(struct drm_atomic_state *state);
341void intel_shared_dpll_init(struct drm_device *dev); 341void intel_shared_dpll_init(struct drm_device *dev);
342 342
@@ -345,5 +345,7 @@ void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv,
345int icl_calc_dp_combo_pll_link(struct drm_i915_private *dev_priv, 345int icl_calc_dp_combo_pll_link(struct drm_i915_private *dev_priv,
346 uint32_t pll_id); 346 uint32_t pll_id);
347int cnl_hdmi_pll_ref_clock(struct drm_i915_private *dev_priv); 347int cnl_hdmi_pll_ref_clock(struct drm_i915_private *dev_priv);
348enum intel_dpll_id icl_port_to_mg_pll_id(enum port port);
349bool intel_dpll_is_combophy(enum intel_dpll_id id);
348 350
349#endif /* _INTEL_DPLL_MGR_H_ */ 351#endif /* _INTEL_DPLL_MGR_H_ */
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index f8dc84b2d2d3..a7d9ac912125 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -381,6 +381,15 @@ struct intel_hdcp_shim {
381 bool *hdcp_capable); 381 bool *hdcp_capable);
382}; 382};
383 383
384struct intel_hdcp {
385 const struct intel_hdcp_shim *shim;
386 /* Mutex for hdcp state of the connector */
387 struct mutex mutex;
388 u64 value;
389 struct delayed_work check_work;
390 struct work_struct prop_work;
391};
392
384struct intel_connector { 393struct intel_connector {
385 struct drm_connector base; 394 struct drm_connector base;
386 /* 395 /*
@@ -413,11 +422,7 @@ struct intel_connector {
413 /* Work struct to schedule a uevent on link train failure */ 422 /* Work struct to schedule a uevent on link train failure */
414 struct work_struct modeset_retry_work; 423 struct work_struct modeset_retry_work;
415 424
416 const struct intel_hdcp_shim *hdcp_shim; 425 struct intel_hdcp hdcp;
417 struct mutex hdcp_mutex;
418 uint64_t hdcp_value; /* protected by hdcp_mutex */
419 struct delayed_work hdcp_check_work;
420 struct work_struct hdcp_prop_work;
421}; 426};
422 427
423struct intel_digital_connector_state { 428struct intel_digital_connector_state {
@@ -539,6 +544,26 @@ struct intel_plane_state {
539 */ 544 */
540 int scaler_id; 545 int scaler_id;
541 546
547 /*
548 * linked_plane:
549 *
550 * ICL planar formats require 2 planes that are updated as pairs.
551 * This member is used to make sure the other plane is also updated
552 * when required, and for update_slave() to find the correct
553 * plane_state to pass as argument.
554 */
555 struct intel_plane *linked_plane;
556
557 /*
558 * slave:
559 * If set don't update use the linked plane's state for updating
560 * this plane during atomic commit with the update_slave() callback.
561 *
562 * It's also used by the watermark code to ignore wm calculations on
563 * this plane. They're calculated by the linked plane's wm code.
564 */
565 u32 slave;
566
542 struct drm_intel_sprite_colorkey ckey; 567 struct drm_intel_sprite_colorkey ckey;
543}; 568};
544 569
@@ -547,6 +572,7 @@ struct intel_initial_plane_config {
547 unsigned int tiling; 572 unsigned int tiling;
548 int size; 573 int size;
549 u32 base; 574 u32 base;
575 u8 rotation;
550}; 576};
551 577
552#define SKL_MIN_SRC_W 8 578#define SKL_MIN_SRC_W 8
@@ -712,6 +738,13 @@ struct intel_crtc_wm_state {
712 bool need_postvbl_update; 738 bool need_postvbl_update;
713}; 739};
714 740
741enum intel_output_format {
742 INTEL_OUTPUT_FORMAT_INVALID,
743 INTEL_OUTPUT_FORMAT_RGB,
744 INTEL_OUTPUT_FORMAT_YCBCR420,
745 INTEL_OUTPUT_FORMAT_YCBCR444,
746};
747
715struct intel_crtc_state { 748struct intel_crtc_state {
716 struct drm_crtc_state base; 749 struct drm_crtc_state base;
717 750
@@ -899,8 +932,11 @@ struct intel_crtc_state {
899 /* HDMI High TMDS char rate ratio */ 932 /* HDMI High TMDS char rate ratio */
900 bool hdmi_high_tmds_clock_ratio; 933 bool hdmi_high_tmds_clock_ratio;
901 934
902 /* output format is YCBCR 4:2:0 */ 935 /* Output format RGB/YCBCR etc */
903 bool ycbcr420; 936 enum intel_output_format output_format;
937
938 /* Output down scaling is done in LSPCON device */
939 bool lspcon_downsampling;
904}; 940};
905 941
906struct intel_crtc { 942struct intel_crtc {
@@ -973,6 +1009,9 @@ struct intel_plane {
973 void (*update_plane)(struct intel_plane *plane, 1009 void (*update_plane)(struct intel_plane *plane,
974 const struct intel_crtc_state *crtc_state, 1010 const struct intel_crtc_state *crtc_state,
975 const struct intel_plane_state *plane_state); 1011 const struct intel_plane_state *plane_state);
1012 void (*update_slave)(struct intel_plane *plane,
1013 const struct intel_crtc_state *crtc_state,
1014 const struct intel_plane_state *plane_state);
976 void (*disable_plane)(struct intel_plane *plane, 1015 void (*disable_plane)(struct intel_plane *plane,
977 struct intel_crtc *crtc); 1016 struct intel_crtc *crtc);
978 bool (*get_hw_state)(struct intel_plane *plane, enum pipe *pipe); 1017 bool (*get_hw_state)(struct intel_plane *plane, enum pipe *pipe);
@@ -1070,13 +1109,13 @@ struct intel_dp {
1070 bool link_mst; 1109 bool link_mst;
1071 bool link_trained; 1110 bool link_trained;
1072 bool has_audio; 1111 bool has_audio;
1073 bool detect_done;
1074 bool reset_link_params; 1112 bool reset_link_params;
1075 enum aux_ch aux_ch;
1076 uint8_t dpcd[DP_RECEIVER_CAP_SIZE]; 1113 uint8_t dpcd[DP_RECEIVER_CAP_SIZE];
1077 uint8_t psr_dpcd[EDP_PSR_RECEIVER_CAP_SIZE]; 1114 uint8_t psr_dpcd[EDP_PSR_RECEIVER_CAP_SIZE];
1078 uint8_t downstream_ports[DP_MAX_DOWNSTREAM_PORTS]; 1115 uint8_t downstream_ports[DP_MAX_DOWNSTREAM_PORTS];
1079 uint8_t edp_dpcd[EDP_DISPLAY_CTL_CAP_SIZE]; 1116 uint8_t edp_dpcd[EDP_DISPLAY_CTL_CAP_SIZE];
1117 u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE];
1118 u8 fec_capable;
1080 /* source rates */ 1119 /* source rates */
1081 int num_source_rates; 1120 int num_source_rates;
1082 const int *source_rates; 1121 const int *source_rates;
@@ -1094,7 +1133,6 @@ struct intel_dp {
1094 /* sink or branch descriptor */ 1133 /* sink or branch descriptor */
1095 struct drm_dp_desc desc; 1134 struct drm_dp_desc desc;
1096 struct drm_dp_aux aux; 1135 struct drm_dp_aux aux;
1097 enum intel_display_power_domain aux_power_domain;
1098 uint8_t train_set[4]; 1136 uint8_t train_set[4];
1099 int panel_power_up_delay; 1137 int panel_power_up_delay;
1100 int panel_power_down_delay; 1138 int panel_power_down_delay;
@@ -1156,9 +1194,15 @@ struct intel_dp {
1156 struct intel_dp_compliance compliance; 1194 struct intel_dp_compliance compliance;
1157}; 1195};
1158 1196
1197enum lspcon_vendor {
1198 LSPCON_VENDOR_MCA,
1199 LSPCON_VENDOR_PARADE
1200};
1201
1159struct intel_lspcon { 1202struct intel_lspcon {
1160 bool active; 1203 bool active;
1161 enum drm_lspcon_mode mode; 1204 enum drm_lspcon_mode mode;
1205 enum lspcon_vendor vendor;
1162}; 1206};
1163 1207
1164struct intel_digital_port { 1208struct intel_digital_port {
@@ -1170,18 +1214,20 @@ struct intel_digital_port {
1170 enum irqreturn (*hpd_pulse)(struct intel_digital_port *, bool); 1214 enum irqreturn (*hpd_pulse)(struct intel_digital_port *, bool);
1171 bool release_cl2_override; 1215 bool release_cl2_override;
1172 uint8_t max_lanes; 1216 uint8_t max_lanes;
1217 /* Used for DP and ICL+ TypeC/DP and TypeC/HDMI ports. */
1218 enum aux_ch aux_ch;
1173 enum intel_display_power_domain ddi_io_power_domain; 1219 enum intel_display_power_domain ddi_io_power_domain;
1174 enum tc_port_type tc_type; 1220 enum tc_port_type tc_type;
1175 1221
1176 void (*write_infoframe)(struct drm_encoder *encoder, 1222 void (*write_infoframe)(struct intel_encoder *encoder,
1177 const struct intel_crtc_state *crtc_state, 1223 const struct intel_crtc_state *crtc_state,
1178 unsigned int type, 1224 unsigned int type,
1179 const void *frame, ssize_t len); 1225 const void *frame, ssize_t len);
1180 void (*set_infoframes)(struct drm_encoder *encoder, 1226 void (*set_infoframes)(struct intel_encoder *encoder,
1181 bool enable, 1227 bool enable,
1182 const struct intel_crtc_state *crtc_state, 1228 const struct intel_crtc_state *crtc_state,
1183 const struct drm_connector_state *conn_state); 1229 const struct drm_connector_state *conn_state);
1184 bool (*infoframe_enabled)(struct drm_encoder *encoder, 1230 bool (*infoframe_enabled)(struct intel_encoder *encoder,
1185 const struct intel_crtc_state *pipe_config); 1231 const struct intel_crtc_state *pipe_config);
1186}; 1232};
1187 1233
@@ -1281,6 +1327,12 @@ enc_to_dig_port(struct drm_encoder *encoder)
1281 return NULL; 1327 return NULL;
1282} 1328}
1283 1329
1330static inline struct intel_digital_port *
1331conn_to_dig_port(struct intel_connector *connector)
1332{
1333 return enc_to_dig_port(&intel_attached_encoder(&connector->base)->base);
1334}
1335
1284static inline struct intel_dp_mst_encoder * 1336static inline struct intel_dp_mst_encoder *
1285enc_to_mst(struct drm_encoder *encoder) 1337enc_to_mst(struct drm_encoder *encoder)
1286{ 1338{
@@ -1306,6 +1358,12 @@ static inline bool intel_encoder_is_dp(struct intel_encoder *encoder)
1306 } 1358 }
1307} 1359}
1308 1360
1361static inline struct intel_lspcon *
1362enc_to_intel_lspcon(struct drm_encoder *encoder)
1363{
1364 return &enc_to_dig_port(encoder)->lspcon;
1365}
1366
1309static inline struct intel_digital_port * 1367static inline struct intel_digital_port *
1310dp_to_dig_port(struct intel_dp *intel_dp) 1368dp_to_dig_port(struct intel_dp *intel_dp)
1311{ 1369{
@@ -1331,6 +1389,27 @@ hdmi_to_dig_port(struct intel_hdmi *intel_hdmi)
1331} 1389}
1332 1390
1333static inline struct intel_plane_state * 1391static inline struct intel_plane_state *
1392intel_atomic_get_plane_state(struct intel_atomic_state *state,
1393 struct intel_plane *plane)
1394{
1395 struct drm_plane_state *ret =
1396 drm_atomic_get_plane_state(&state->base, &plane->base);
1397
1398 if (IS_ERR(ret))
1399 return ERR_CAST(ret);
1400
1401 return to_intel_plane_state(ret);
1402}
1403
1404static inline struct intel_plane_state *
1405intel_atomic_get_old_plane_state(struct intel_atomic_state *state,
1406 struct intel_plane *plane)
1407{
1408 return to_intel_plane_state(drm_atomic_get_old_plane_state(&state->base,
1409 &plane->base));
1410}
1411
1412static inline struct intel_plane_state *
1334intel_atomic_get_new_plane_state(struct intel_atomic_state *state, 1413intel_atomic_get_new_plane_state(struct intel_atomic_state *state,
1335 struct intel_plane *plane) 1414 struct intel_plane *plane)
1336{ 1415{
@@ -1444,6 +1523,7 @@ void icl_map_plls_to_ports(struct drm_crtc *crtc,
1444void icl_unmap_plls_to_ports(struct drm_crtc *crtc, 1523void icl_unmap_plls_to_ports(struct drm_crtc *crtc,
1445 struct intel_crtc_state *crtc_state, 1524 struct intel_crtc_state *crtc_state,
1446 struct drm_atomic_state *old_state); 1525 struct drm_atomic_state *old_state);
1526void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder);
1447 1527
1448unsigned int intel_fb_align_height(const struct drm_framebuffer *fb, 1528unsigned int intel_fb_align_height(const struct drm_framebuffer *fb,
1449 int color_plane, unsigned int height); 1529 int color_plane, unsigned int height);
@@ -1488,7 +1568,6 @@ void intel_dump_cdclk_state(const struct intel_cdclk_state *cdclk_state,
1488void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe); 1568void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe);
1489void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe); 1569void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe);
1490enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc); 1570enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc);
1491void intel_update_rawclk(struct drm_i915_private *dev_priv);
1492int vlv_get_hpll_vco(struct drm_i915_private *dev_priv); 1571int vlv_get_hpll_vco(struct drm_i915_private *dev_priv);
1493int vlv_get_cck_clock(struct drm_i915_private *dev_priv, 1572int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
1494 const char *name, u32 reg, int ref_freq); 1573 const char *name, u32 reg, int ref_freq);
@@ -1509,20 +1588,12 @@ void intel_mark_idle(struct drm_i915_private *dev_priv);
1509int intel_display_suspend(struct drm_device *dev); 1588int intel_display_suspend(struct drm_device *dev);
1510void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv); 1589void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv);
1511void intel_encoder_destroy(struct drm_encoder *encoder); 1590void intel_encoder_destroy(struct drm_encoder *encoder);
1512int intel_connector_init(struct intel_connector *);
1513struct intel_connector *intel_connector_alloc(void);
1514void intel_connector_free(struct intel_connector *connector);
1515bool intel_connector_get_hw_state(struct intel_connector *connector);
1516void intel_connector_attach_encoder(struct intel_connector *connector,
1517 struct intel_encoder *encoder);
1518struct drm_display_mode * 1591struct drm_display_mode *
1519intel_encoder_current_mode(struct intel_encoder *encoder); 1592intel_encoder_current_mode(struct intel_encoder *encoder);
1520bool intel_port_is_combophy(struct drm_i915_private *dev_priv, enum port port); 1593bool intel_port_is_combophy(struct drm_i915_private *dev_priv, enum port port);
1521bool intel_port_is_tc(struct drm_i915_private *dev_priv, enum port port); 1594bool intel_port_is_tc(struct drm_i915_private *dev_priv, enum port port);
1522enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, 1595enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv,
1523 enum port port); 1596 enum port port);
1524
1525enum pipe intel_get_pipe_from_connector(struct intel_connector *connector);
1526int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data, 1597int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
1527 struct drm_file *file_priv); 1598 struct drm_file *file_priv);
1528enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv, 1599enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
@@ -1628,9 +1699,11 @@ void bxt_enable_dc9(struct drm_i915_private *dev_priv);
1628void bxt_disable_dc9(struct drm_i915_private *dev_priv); 1699void bxt_disable_dc9(struct drm_i915_private *dev_priv);
1629void gen9_enable_dc5(struct drm_i915_private *dev_priv); 1700void gen9_enable_dc5(struct drm_i915_private *dev_priv);
1630unsigned int skl_cdclk_get_vco(unsigned int freq); 1701unsigned int skl_cdclk_get_vco(unsigned int freq);
1702void skl_enable_dc6(struct drm_i915_private *dev_priv);
1631void intel_dp_get_m_n(struct intel_crtc *crtc, 1703void intel_dp_get_m_n(struct intel_crtc *crtc,
1632 struct intel_crtc_state *pipe_config); 1704 struct intel_crtc_state *pipe_config);
1633void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n); 1705void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state,
1706 enum link_m_n_set m_n);
1634int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n); 1707int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n);
1635bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock, 1708bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock,
1636 struct dpll *best_clock); 1709 struct dpll *best_clock);
@@ -1641,12 +1714,14 @@ bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state);
1641void hsw_enable_ips(const struct intel_crtc_state *crtc_state); 1714void hsw_enable_ips(const struct intel_crtc_state *crtc_state);
1642void hsw_disable_ips(const struct intel_crtc_state *crtc_state); 1715void hsw_disable_ips(const struct intel_crtc_state *crtc_state);
1643enum intel_display_power_domain intel_port_to_power_domain(enum port port); 1716enum intel_display_power_domain intel_port_to_power_domain(enum port port);
1717enum intel_display_power_domain
1718intel_aux_power_domain(struct intel_digital_port *dig_port);
1644void intel_mode_from_pipe_config(struct drm_display_mode *mode, 1719void intel_mode_from_pipe_config(struct drm_display_mode *mode,
1645 struct intel_crtc_state *pipe_config); 1720 struct intel_crtc_state *pipe_config);
1646void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc, 1721void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
1647 struct intel_crtc_state *crtc_state); 1722 struct intel_crtc_state *crtc_state);
1648 1723
1649u16 skl_scaler_calc_phase(int sub, bool chroma_center); 1724u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_center);
1650int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state); 1725int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state);
1651int skl_max_scale(const struct intel_crtc_state *crtc_state, 1726int skl_max_scale(const struct intel_crtc_state *crtc_state,
1652 u32 pixel_format); 1727 u32 pixel_format);
@@ -1670,6 +1745,24 @@ unsigned int i9xx_plane_max_stride(struct intel_plane *plane,
1670 u32 pixel_format, u64 modifier, 1745 u32 pixel_format, u64 modifier,
1671 unsigned int rotation); 1746 unsigned int rotation);
1672 1747
1748/* intel_connector.c */
1749int intel_connector_init(struct intel_connector *connector);
1750struct intel_connector *intel_connector_alloc(void);
1751void intel_connector_free(struct intel_connector *connector);
1752void intel_connector_destroy(struct drm_connector *connector);
1753int intel_connector_register(struct drm_connector *connector);
1754void intel_connector_unregister(struct drm_connector *connector);
1755void intel_connector_attach_encoder(struct intel_connector *connector,
1756 struct intel_encoder *encoder);
1757bool intel_connector_get_hw_state(struct intel_connector *connector);
1758enum pipe intel_connector_get_pipe(struct intel_connector *connector);
1759int intel_connector_update_modes(struct drm_connector *connector,
1760 struct edid *edid);
1761int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
1762void intel_attach_force_audio_property(struct drm_connector *connector);
1763void intel_attach_broadcast_rgb_property(struct drm_connector *connector);
1764void intel_attach_aspect_ratio_property(struct drm_connector *connector);
1765
1673/* intel_csr.c */ 1766/* intel_csr.c */
1674void intel_csr_ucode_init(struct drm_i915_private *); 1767void intel_csr_ucode_init(struct drm_i915_private *);
1675void intel_csr_load_program(struct drm_i915_private *); 1768void intel_csr_load_program(struct drm_i915_private *);
@@ -1728,9 +1821,6 @@ void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv,
1728 unsigned int frontbuffer_bits); 1821 unsigned int frontbuffer_bits);
1729void intel_edp_drrs_flush(struct drm_i915_private *dev_priv, 1822void intel_edp_drrs_flush(struct drm_i915_private *dev_priv,
1730 unsigned int frontbuffer_bits); 1823 unsigned int frontbuffer_bits);
1731void icl_program_mg_dp_mode(struct intel_dp *intel_dp);
1732void icl_enable_phy_clock_gating(struct intel_digital_port *dig_port);
1733void icl_disable_phy_clock_gating(struct intel_digital_port *dig_port);
1734 1824
1735void 1825void
1736intel_dp_program_link_training_pattern(struct intel_dp *intel_dp, 1826intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
@@ -1748,6 +1838,10 @@ bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp);
1748bool intel_dp_source_supports_hbr3(struct intel_dp *intel_dp); 1838bool intel_dp_source_supports_hbr3(struct intel_dp *intel_dp);
1749bool 1839bool
1750intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]); 1840intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]);
1841uint16_t intel_dp_dsc_get_output_bpp(int link_clock, uint8_t lane_count,
1842 int mode_clock, int mode_hdisplay);
1843uint8_t intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp, int mode_clock,
1844 int mode_hdisplay);
1751 1845
1752static inline unsigned int intel_dp_unused_lane_mask(int lane_count) 1846static inline unsigned int intel_dp_unused_lane_mask(int lane_count)
1753{ 1847{
@@ -1768,6 +1862,9 @@ void intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port);
1768/* vlv_dsi.c */ 1862/* vlv_dsi.c */
1769void vlv_dsi_init(struct drm_i915_private *dev_priv); 1863void vlv_dsi_init(struct drm_i915_private *dev_priv);
1770 1864
1865/* icl_dsi.c */
1866void icl_dsi_init(struct drm_i915_private *dev_priv);
1867
1771/* intel_dsi_dcs_backlight.c */ 1868/* intel_dsi_dcs_backlight.c */
1772int intel_dsi_dcs_init_backlight_funcs(struct intel_connector *intel_connector); 1869int intel_dsi_dcs_init_backlight_funcs(struct intel_connector *intel_connector);
1773 1870
@@ -1858,7 +1955,6 @@ bool intel_hdmi_handle_sink_scrambling(struct intel_encoder *encoder,
1858void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable); 1955void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable);
1859void intel_infoframe_init(struct intel_digital_port *intel_dig_port); 1956void intel_infoframe_init(struct intel_digital_port *intel_dig_port);
1860 1957
1861
1862/* intel_lvds.c */ 1958/* intel_lvds.c */
1863bool intel_lvds_port_enabled(struct drm_i915_private *dev_priv, 1959bool intel_lvds_port_enabled(struct drm_i915_private *dev_priv,
1864 i915_reg_t lvds_reg, enum pipe *pipe); 1960 i915_reg_t lvds_reg, enum pipe *pipe);
@@ -1866,19 +1962,9 @@ void intel_lvds_init(struct drm_i915_private *dev_priv);
1866struct intel_encoder *intel_get_lvds_encoder(struct drm_device *dev); 1962struct intel_encoder *intel_get_lvds_encoder(struct drm_device *dev);
1867bool intel_is_dual_link_lvds(struct drm_device *dev); 1963bool intel_is_dual_link_lvds(struct drm_device *dev);
1868 1964
1869
1870/* intel_modes.c */
1871int intel_connector_update_modes(struct drm_connector *connector,
1872 struct edid *edid);
1873int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
1874void intel_attach_force_audio_property(struct drm_connector *connector);
1875void intel_attach_broadcast_rgb_property(struct drm_connector *connector);
1876void intel_attach_aspect_ratio_property(struct drm_connector *connector);
1877
1878
1879/* intel_overlay.c */ 1965/* intel_overlay.c */
1880void intel_setup_overlay(struct drm_i915_private *dev_priv); 1966void intel_overlay_setup(struct drm_i915_private *dev_priv);
1881void intel_cleanup_overlay(struct drm_i915_private *dev_priv); 1967void intel_overlay_cleanup(struct drm_i915_private *dev_priv);
1882int intel_overlay_switch_off(struct intel_overlay *overlay); 1968int intel_overlay_switch_off(struct intel_overlay *overlay);
1883int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data, 1969int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
1884 struct drm_file *file_priv); 1970 struct drm_file *file_priv);
@@ -1907,7 +1993,6 @@ int intel_panel_setup_backlight(struct drm_connector *connector,
1907void intel_panel_enable_backlight(const struct intel_crtc_state *crtc_state, 1993void intel_panel_enable_backlight(const struct intel_crtc_state *crtc_state,
1908 const struct drm_connector_state *conn_state); 1994 const struct drm_connector_state *conn_state);
1909void intel_panel_disable_backlight(const struct drm_connector_state *old_conn_state); 1995void intel_panel_disable_backlight(const struct drm_connector_state *old_conn_state);
1910void intel_panel_destroy_backlight(struct drm_connector *connector);
1911extern struct drm_display_mode *intel_find_panel_downclock( 1996extern struct drm_display_mode *intel_find_panel_downclock(
1912 struct drm_i915_private *dev_priv, 1997 struct drm_i915_private *dev_priv,
1913 struct drm_display_mode *fixed_mode, 1998 struct drm_display_mode *fixed_mode,
@@ -1936,6 +2021,7 @@ int intel_hdcp_enable(struct intel_connector *connector);
1936int intel_hdcp_disable(struct intel_connector *connector); 2021int intel_hdcp_disable(struct intel_connector *connector);
1937int intel_hdcp_check_link(struct intel_connector *connector); 2022int intel_hdcp_check_link(struct intel_connector *connector);
1938bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port); 2023bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port);
2024bool intel_hdcp_capable(struct intel_connector *connector);
1939 2025
1940/* intel_psr.c */ 2026/* intel_psr.c */
1941#define CAN_PSR(dev_priv) (HAS_PSR(dev_priv) && dev_priv->psr.sink_support) 2027#define CAN_PSR(dev_priv) (HAS_PSR(dev_priv) && dev_priv->psr.sink_support)
@@ -1962,11 +2048,16 @@ void intel_psr_short_pulse(struct intel_dp *intel_dp);
1962int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state, 2048int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state,
1963 u32 *out_value); 2049 u32 *out_value);
1964 2050
2051/* intel_quirks.c */
2052void intel_init_quirks(struct drm_i915_private *dev_priv);
2053
1965/* intel_runtime_pm.c */ 2054/* intel_runtime_pm.c */
1966int intel_power_domains_init(struct drm_i915_private *); 2055int intel_power_domains_init(struct drm_i915_private *);
1967void intel_power_domains_cleanup(struct drm_i915_private *dev_priv); 2056void intel_power_domains_cleanup(struct drm_i915_private *dev_priv);
1968void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume); 2057void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume);
1969void intel_power_domains_fini_hw(struct drm_i915_private *dev_priv); 2058void intel_power_domains_fini_hw(struct drm_i915_private *dev_priv);
2059void icl_display_core_init(struct drm_i915_private *dev_priv, bool resume);
2060void icl_display_core_uninit(struct drm_i915_private *dev_priv);
1970void intel_power_domains_enable(struct drm_i915_private *dev_priv); 2061void intel_power_domains_enable(struct drm_i915_private *dev_priv);
1971void intel_power_domains_disable(struct drm_i915_private *dev_priv); 2062void intel_power_domains_disable(struct drm_i915_private *dev_priv);
1972 2063
@@ -2101,10 +2192,9 @@ int intel_enable_sagv(struct drm_i915_private *dev_priv);
2101int intel_disable_sagv(struct drm_i915_private *dev_priv); 2192int intel_disable_sagv(struct drm_i915_private *dev_priv);
2102bool skl_wm_level_equals(const struct skl_wm_level *l1, 2193bool skl_wm_level_equals(const struct skl_wm_level *l1,
2103 const struct skl_wm_level *l2); 2194 const struct skl_wm_level *l2);
2104bool skl_ddb_allocation_overlaps(struct drm_i915_private *dev_priv, 2195bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb,
2105 const struct skl_ddb_entry **entries, 2196 const struct skl_ddb_entry entries[],
2106 const struct skl_ddb_entry *ddb, 2197 int num_entries, int ignore_idx);
2107 int ignore);
2108bool ilk_disable_lp_wm(struct drm_device *dev); 2198bool ilk_disable_lp_wm(struct drm_device *dev);
2109int skl_check_pipe_max_pixel_rate(struct intel_crtc *intel_crtc, 2199int skl_check_pipe_max_pixel_rate(struct intel_crtc *intel_crtc,
2110 struct intel_crtc_state *cstate); 2200 struct intel_crtc_state *cstate);
@@ -2127,23 +2217,29 @@ int intel_sprite_set_colorkey_ioctl(struct drm_device *dev, void *data,
2127 struct drm_file *file_priv); 2217 struct drm_file *file_priv);
2128void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state); 2218void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state);
2129void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state); 2219void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state);
2130void skl_update_plane(struct intel_plane *plane,
2131 const struct intel_crtc_state *crtc_state,
2132 const struct intel_plane_state *plane_state);
2133void skl_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc);
2134bool skl_plane_get_hw_state(struct intel_plane *plane, enum pipe *pipe);
2135bool skl_plane_has_ccs(struct drm_i915_private *dev_priv,
2136 enum pipe pipe, enum plane_id plane_id);
2137bool skl_plane_has_planar(struct drm_i915_private *dev_priv,
2138 enum pipe pipe, enum plane_id plane_id);
2139unsigned int skl_plane_max_stride(struct intel_plane *plane,
2140 u32 pixel_format, u64 modifier,
2141 unsigned int rotation);
2142int skl_plane_check(struct intel_crtc_state *crtc_state,
2143 struct intel_plane_state *plane_state);
2144int intel_plane_check_stride(const struct intel_plane_state *plane_state); 2220int intel_plane_check_stride(const struct intel_plane_state *plane_state);
2145int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state); 2221int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state);
2146int chv_plane_check_rotation(const struct intel_plane_state *plane_state); 2222int chv_plane_check_rotation(const struct intel_plane_state *plane_state);
2223struct intel_plane *
2224skl_universal_plane_create(struct drm_i915_private *dev_priv,
2225 enum pipe pipe, enum plane_id plane_id);
2226
2227static inline bool icl_is_nv12_y_plane(enum plane_id id)
2228{
2229 /* Don't need to do a gen check, these planes are only available on gen11 */
2230 if (id == PLANE_SPRITE4 || id == PLANE_SPRITE5)
2231 return true;
2232
2233 return false;
2234}
2235
2236static inline bool icl_is_hdr_plane(struct intel_plane *plane)
2237{
2238 if (INTEL_GEN(to_i915(plane->base.dev)) < 11)
2239 return false;
2240
2241 return plane->id < PLANE_SPRITE2;
2242}
2147 2243
2148/* intel_tv.c */ 2244/* intel_tv.c */
2149void intel_tv_init(struct drm_i915_private *dev_priv); 2245void intel_tv_init(struct drm_i915_private *dev_priv);
@@ -2185,11 +2281,16 @@ int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
2185 struct intel_crtc_state *crtc_state); 2281 struct intel_crtc_state *crtc_state);
2186 2282
2187/* intel_atomic_plane.c */ 2283/* intel_atomic_plane.c */
2188struct intel_plane_state *intel_create_plane_state(struct drm_plane *plane); 2284struct intel_plane *intel_plane_alloc(void);
2285void intel_plane_free(struct intel_plane *plane);
2189struct drm_plane_state *intel_plane_duplicate_state(struct drm_plane *plane); 2286struct drm_plane_state *intel_plane_duplicate_state(struct drm_plane *plane);
2190void intel_plane_destroy_state(struct drm_plane *plane, 2287void intel_plane_destroy_state(struct drm_plane *plane,
2191 struct drm_plane_state *state); 2288 struct drm_plane_state *state);
2192extern const struct drm_plane_helper_funcs intel_plane_helper_funcs; 2289extern const struct drm_plane_helper_funcs intel_plane_helper_funcs;
2290void intel_update_planes_on_crtc(struct intel_atomic_state *old_state,
2291 struct intel_crtc *crtc,
2292 struct intel_crtc_state *old_crtc_state,
2293 struct intel_crtc_state *new_crtc_state);
2193int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_state, 2294int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_state,
2194 struct intel_crtc_state *crtc_state, 2295 struct intel_crtc_state *crtc_state,
2195 const struct intel_plane_state *old_plane_state, 2296 const struct intel_plane_state *old_plane_state,
@@ -2205,6 +2306,18 @@ void intel_color_load_luts(struct drm_crtc_state *crtc_state);
2205bool lspcon_init(struct intel_digital_port *intel_dig_port); 2306bool lspcon_init(struct intel_digital_port *intel_dig_port);
2206void lspcon_resume(struct intel_lspcon *lspcon); 2307void lspcon_resume(struct intel_lspcon *lspcon);
2207void lspcon_wait_pcon_mode(struct intel_lspcon *lspcon); 2308void lspcon_wait_pcon_mode(struct intel_lspcon *lspcon);
2309void lspcon_write_infoframe(struct intel_encoder *encoder,
2310 const struct intel_crtc_state *crtc_state,
2311 unsigned int type,
2312 const void *buf, ssize_t len);
2313void lspcon_set_infoframes(struct intel_encoder *encoder,
2314 bool enable,
2315 const struct intel_crtc_state *crtc_state,
2316 const struct drm_connector_state *conn_state);
2317bool lspcon_infoframe_enabled(struct intel_encoder *encoder,
2318 const struct intel_crtc_state *pipe_config);
2319void lspcon_ycbcr420_config(struct drm_connector *connector,
2320 struct intel_crtc_state *crtc_state);
2208 2321
2209/* intel_pipe_crc.c */ 2322/* intel_pipe_crc.c */
2210#ifdef CONFIG_DEBUG_FS 2323#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
new file mode 100644
index 000000000000..5fec02aceaed
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -0,0 +1,128 @@
1// SPDX-License-Identifier: MIT
2/*
3 * Copyright © 2018 Intel Corporation
4 */
5
6#include <drm/drm_mipi_dsi.h>
7#include "intel_dsi.h"
8
9int intel_dsi_bitrate(const struct intel_dsi *intel_dsi)
10{
11 int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
12
13 if (WARN_ON(bpp < 0))
14 bpp = 16;
15
16 return intel_dsi->pclk * bpp / intel_dsi->lane_count;
17}
18
19int intel_dsi_tlpx_ns(const struct intel_dsi *intel_dsi)
20{
21 switch (intel_dsi->escape_clk_div) {
22 default:
23 case 0:
24 return 50;
25 case 1:
26 return 100;
27 case 2:
28 return 200;
29 }
30}
31
32int intel_dsi_get_modes(struct drm_connector *connector)
33{
34 struct intel_connector *intel_connector = to_intel_connector(connector);
35 struct drm_display_mode *mode;
36
37 DRM_DEBUG_KMS("\n");
38
39 if (!intel_connector->panel.fixed_mode) {
40 DRM_DEBUG_KMS("no fixed mode\n");
41 return 0;
42 }
43
44 mode = drm_mode_duplicate(connector->dev,
45 intel_connector->panel.fixed_mode);
46 if (!mode) {
47 DRM_DEBUG_KMS("drm_mode_duplicate failed\n");
48 return 0;
49 }
50
51 drm_mode_probed_add(connector, mode);
52 return 1;
53}
54
55enum drm_mode_status intel_dsi_mode_valid(struct drm_connector *connector,
56 struct drm_display_mode *mode)
57{
58 struct intel_connector *intel_connector = to_intel_connector(connector);
59 const struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
60 int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
61
62 DRM_DEBUG_KMS("\n");
63
64 if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
65 return MODE_NO_DBLESCAN;
66
67 if (fixed_mode) {
68 if (mode->hdisplay > fixed_mode->hdisplay)
69 return MODE_PANEL;
70 if (mode->vdisplay > fixed_mode->vdisplay)
71 return MODE_PANEL;
72 if (fixed_mode->clock > max_dotclk)
73 return MODE_CLOCK_HIGH;
74 }
75
76 return MODE_OK;
77}
78
79struct intel_dsi_host *intel_dsi_host_init(struct intel_dsi *intel_dsi,
80 const struct mipi_dsi_host_ops *funcs,
81 enum port port)
82{
83 struct intel_dsi_host *host;
84 struct mipi_dsi_device *device;
85
86 host = kzalloc(sizeof(*host), GFP_KERNEL);
87 if (!host)
88 return NULL;
89
90 host->base.ops = funcs;
91 host->intel_dsi = intel_dsi;
92 host->port = port;
93
94 /*
95 * We should call mipi_dsi_host_register(&host->base) here, but we don't
96 * have a host->dev, and we don't have OF stuff either. So just use the
97 * dsi framework as a library and hope for the best. Create the dsi
98 * devices by ourselves here too. Need to be careful though, because we
99 * don't initialize any of the driver model devices here.
100 */
101 device = kzalloc(sizeof(*device), GFP_KERNEL);
102 if (!device) {
103 kfree(host);
104 return NULL;
105 }
106
107 device->host = &host->base;
108 host->device = device;
109
110 return host;
111}
112
113enum drm_panel_orientation
114intel_dsi_get_panel_orientation(struct intel_connector *connector)
115{
116 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
117 enum drm_panel_orientation orientation;
118
119 orientation = dev_priv->vbt.dsi.orientation;
120 if (orientation != DRM_MODE_PANEL_ORIENTATION_UNKNOWN)
121 return orientation;
122
123 orientation = dev_priv->vbt.orientation;
124 if (orientation != DRM_MODE_PANEL_ORIENTATION_UNKNOWN)
125 return orientation;
126
127 return DRM_MODE_PANEL_ORIENTATION_NORMAL;
128}
diff --git a/drivers/gpu/drm/i915/intel_dsi.h b/drivers/gpu/drm/i915/intel_dsi.h
index ad7c1cb32983..ee93137f4433 100644
--- a/drivers/gpu/drm/i915/intel_dsi.h
+++ b/drivers/gpu/drm/i915/intel_dsi.h
@@ -81,14 +81,21 @@ struct intel_dsi {
81 u16 dcs_backlight_ports; 81 u16 dcs_backlight_ports;
82 u16 dcs_cabc_ports; 82 u16 dcs_cabc_ports;
83 83
84 /* RGB or BGR */
85 bool bgr_enabled;
86
84 u8 pixel_overlap; 87 u8 pixel_overlap;
85 u32 port_bits; 88 u32 port_bits;
86 u32 bw_timer; 89 u32 bw_timer;
87 u32 dphy_reg; 90 u32 dphy_reg;
91
92 /* data lanes dphy timing */
93 u32 dphy_data_lane_reg;
88 u32 video_frmt_cfg_bits; 94 u32 video_frmt_cfg_bits;
89 u16 lp_byte_clk; 95 u16 lp_byte_clk;
90 96
91 /* timeouts in byte clocks */ 97 /* timeouts in byte clocks */
98 u16 hs_tx_timeout;
92 u16 lp_rx_timeout; 99 u16 lp_rx_timeout;
93 u16 turn_arnd_val; 100 u16 turn_arnd_val;
94 u16 rst_timer_val; 101 u16 rst_timer_val;
@@ -129,9 +136,31 @@ static inline struct intel_dsi *enc_to_intel_dsi(struct drm_encoder *encoder)
129 return container_of(encoder, struct intel_dsi, base.base); 136 return container_of(encoder, struct intel_dsi, base.base);
130} 137}
131 138
139static inline bool is_vid_mode(struct intel_dsi *intel_dsi)
140{
141 return intel_dsi->operation_mode == INTEL_DSI_VIDEO_MODE;
142}
143
144static inline bool is_cmd_mode(struct intel_dsi *intel_dsi)
145{
146 return intel_dsi->operation_mode == INTEL_DSI_COMMAND_MODE;
147}
148
149/* intel_dsi.c */
150int intel_dsi_bitrate(const struct intel_dsi *intel_dsi);
151int intel_dsi_tlpx_ns(const struct intel_dsi *intel_dsi);
152enum drm_panel_orientation
153intel_dsi_get_panel_orientation(struct intel_connector *connector);
154
132/* vlv_dsi.c */ 155/* vlv_dsi.c */
133void vlv_dsi_wait_for_fifo_empty(struct intel_dsi *intel_dsi, enum port port); 156void vlv_dsi_wait_for_fifo_empty(struct intel_dsi *intel_dsi, enum port port);
134enum mipi_dsi_pixel_format pixel_format_from_register_bits(u32 fmt); 157enum mipi_dsi_pixel_format pixel_format_from_register_bits(u32 fmt);
158int intel_dsi_get_modes(struct drm_connector *connector);
159enum drm_mode_status intel_dsi_mode_valid(struct drm_connector *connector,
160 struct drm_display_mode *mode);
161struct intel_dsi_host *intel_dsi_host_init(struct intel_dsi *intel_dsi,
162 const struct mipi_dsi_host_ops *funcs,
163 enum port port);
135 164
136/* vlv_dsi_pll.c */ 165/* vlv_dsi_pll.c */
137int vlv_dsi_pll_compute(struct intel_encoder *encoder, 166int vlv_dsi_pll_compute(struct intel_encoder *encoder,
@@ -158,5 +187,6 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id);
158int intel_dsi_vbt_get_modes(struct intel_dsi *intel_dsi); 187int intel_dsi_vbt_get_modes(struct intel_dsi *intel_dsi);
159void intel_dsi_vbt_exec_sequence(struct intel_dsi *intel_dsi, 188void intel_dsi_vbt_exec_sequence(struct intel_dsi *intel_dsi,
160 enum mipi_seq seq_id); 189 enum mipi_seq seq_id);
190void intel_dsi_msleep(struct intel_dsi *intel_dsi, int msec);
161 191
162#endif /* _INTEL_DSI_H */ 192#endif /* _INTEL_DSI_H */
diff --git a/drivers/gpu/drm/i915/intel_dsi_vbt.c b/drivers/gpu/drm/i915/intel_dsi_vbt.c
index ac83d6b89ae0..a72de81f4832 100644
--- a/drivers/gpu/drm/i915/intel_dsi_vbt.c
+++ b/drivers/gpu/drm/i915/intel_dsi_vbt.c
@@ -111,6 +111,7 @@ static inline enum port intel_dsi_seq_port_to_port(u8 port)
111static const u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi, 111static const u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi,
112 const u8 *data) 112 const u8 *data)
113{ 113{
114 struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev);
114 struct mipi_dsi_device *dsi_device; 115 struct mipi_dsi_device *dsi_device;
115 u8 type, flags, seq_port; 116 u8 type, flags, seq_port;
116 u16 len; 117 u16 len;
@@ -181,7 +182,8 @@ static const u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi,
181 break; 182 break;
182 } 183 }
183 184
184 vlv_dsi_wait_for_fifo_empty(intel_dsi, port); 185 if (!IS_ICELAKE(dev_priv))
186 vlv_dsi_wait_for_fifo_empty(intel_dsi, port);
185 187
186out: 188out:
187 data += len; 189 data += len;
@@ -481,6 +483,17 @@ void intel_dsi_vbt_exec_sequence(struct intel_dsi *intel_dsi,
481 } 483 }
482} 484}
483 485
486void intel_dsi_msleep(struct intel_dsi *intel_dsi, int msec)
487{
488 struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev);
489
490 /* For v3 VBTs in vid-mode the delays are part of the VBT sequences */
491 if (is_vid_mode(intel_dsi) && dev_priv->vbt.dsi.seq_version >= 3)
492 return;
493
494 msleep(msec);
495}
496
484int intel_dsi_vbt_get_modes(struct intel_dsi *intel_dsi) 497int intel_dsi_vbt_get_modes(struct intel_dsi *intel_dsi)
485{ 498{
486 struct intel_connector *connector = intel_dsi->attached_connector; 499 struct intel_connector *connector = intel_dsi->attached_connector;
@@ -499,110 +512,125 @@ int intel_dsi_vbt_get_modes(struct intel_dsi *intel_dsi)
499 return 1; 512 return 1;
500} 513}
501 514
502bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id) 515#define ICL_PREPARE_CNT_MAX 0x7
516#define ICL_CLK_ZERO_CNT_MAX 0xf
517#define ICL_TRAIL_CNT_MAX 0x7
518#define ICL_TCLK_PRE_CNT_MAX 0x3
519#define ICL_TCLK_POST_CNT_MAX 0x7
520#define ICL_HS_ZERO_CNT_MAX 0xf
521#define ICL_EXIT_ZERO_CNT_MAX 0x7
522
523static void icl_dphy_param_init(struct intel_dsi *intel_dsi)
503{ 524{
504 struct drm_device *dev = intel_dsi->base.base.dev; 525 struct drm_device *dev = intel_dsi->base.base.dev;
505 struct drm_i915_private *dev_priv = to_i915(dev); 526 struct drm_i915_private *dev_priv = to_i915(dev);
506 struct mipi_config *mipi_config = dev_priv->vbt.dsi.config; 527 struct mipi_config *mipi_config = dev_priv->vbt.dsi.config;
507 struct mipi_pps_data *pps = dev_priv->vbt.dsi.pps; 528 u32 tlpx_ns;
508 struct drm_display_mode *mode = dev_priv->vbt.lfp_lvds_vbt_mode;
509 u32 bpp;
510 u32 tlpx_ns, extra_byte_count, bitrate, tlpx_ui;
511 u32 ui_num, ui_den;
512 u32 prepare_cnt, exit_zero_cnt, clk_zero_cnt, trail_cnt; 529 u32 prepare_cnt, exit_zero_cnt, clk_zero_cnt, trail_cnt;
513 u32 ths_prepare_ns, tclk_trail_ns; 530 u32 ths_prepare_ns, tclk_trail_ns;
514 u32 tclk_prepare_clkzero, ths_prepare_hszero; 531 u32 hs_zero_cnt;
515 u32 lp_to_hs_switch, hs_to_lp_switch; 532 u32 tclk_pre_cnt, tclk_post_cnt;
516 u32 pclk, computed_ddr;
517 u32 mul;
518 u16 burst_mode_ratio;
519 enum port port;
520 533
521 DRM_DEBUG_KMS("\n"); 534 tlpx_ns = intel_dsi_tlpx_ns(intel_dsi);
522 535
523 intel_dsi->eotp_pkt = mipi_config->eot_pkt_disabled ? 0 : 1; 536 tclk_trail_ns = max(mipi_config->tclk_trail, mipi_config->ths_trail);
524 intel_dsi->clock_stop = mipi_config->enable_clk_stop ? 1 : 0; 537 ths_prepare_ns = max(mipi_config->ths_prepare,
525 intel_dsi->lane_count = mipi_config->lane_cnt + 1; 538 mipi_config->tclk_prepare);
526 intel_dsi->pixel_format =
527 pixel_format_from_register_bits(
528 mipi_config->videomode_color_format << 7);
529 bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
530
531 intel_dsi->dual_link = mipi_config->dual_link;
532 intel_dsi->pixel_overlap = mipi_config->pixel_overlap;
533 intel_dsi->operation_mode = mipi_config->is_cmd_mode;
534 intel_dsi->video_mode_format = mipi_config->video_transfer_mode;
535 intel_dsi->escape_clk_div = mipi_config->byte_clk_sel;
536 intel_dsi->lp_rx_timeout = mipi_config->lp_rx_timeout;
537 intel_dsi->turn_arnd_val = mipi_config->turn_around_timeout;
538 intel_dsi->rst_timer_val = mipi_config->device_reset_timer;
539 intel_dsi->init_count = mipi_config->master_init_timer;
540 intel_dsi->bw_timer = mipi_config->dbi_bw_timer;
541 intel_dsi->video_frmt_cfg_bits =
542 mipi_config->bta_enabled ? DISABLE_VIDEO_BTA : 0;
543
544 pclk = mode->clock;
545 539
546 /* In dual link mode each port needs half of pixel clock */ 540 /*
547 if (intel_dsi->dual_link) { 541 * prepare cnt in escape clocks
548 pclk = pclk / 2; 542 * this field represents a hexadecimal value with a precision
543 * of 1.2 – i.e. the most significant bit is the integer
544 * and the least significant 2 bits are fraction bits.
545 * so, the field can represent a range of 0.25 to 1.75
546 */
547 prepare_cnt = DIV_ROUND_UP(ths_prepare_ns * 4, tlpx_ns);
548 if (prepare_cnt > ICL_PREPARE_CNT_MAX) {
549 DRM_DEBUG_KMS("prepare_cnt out of range (%d)\n", prepare_cnt);
550 prepare_cnt = ICL_PREPARE_CNT_MAX;
551 }
549 552
550 /* we can enable pixel_overlap if needed by panel. In this 553 /* clk zero count in escape clocks */
551 * case we need to increase the pixelclock for extra pixels 554 clk_zero_cnt = DIV_ROUND_UP(mipi_config->tclk_prepare_clkzero -
552 */ 555 ths_prepare_ns, tlpx_ns);
553 if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK) { 556 if (clk_zero_cnt > ICL_CLK_ZERO_CNT_MAX) {
554 pclk += DIV_ROUND_UP(mode->vtotal * 557 DRM_DEBUG_KMS("clk_zero_cnt out of range (%d)\n", clk_zero_cnt);
555 intel_dsi->pixel_overlap * 558 clk_zero_cnt = ICL_CLK_ZERO_CNT_MAX;
556 60, 1000);
557 }
558 } 559 }
559 560
560 /* Burst Mode Ratio 561 /* trail cnt in escape clocks*/
561 * Target ddr frequency from VBT / non burst ddr freq 562 trail_cnt = DIV_ROUND_UP(tclk_trail_ns, tlpx_ns);
562 * multiply by 100 to preserve remainder 563 if (trail_cnt > ICL_TRAIL_CNT_MAX) {
563 */ 564 DRM_DEBUG_KMS("trail_cnt out of range (%d)\n", trail_cnt);
564 if (intel_dsi->video_mode_format == VIDEO_MODE_BURST) { 565 trail_cnt = ICL_TRAIL_CNT_MAX;
565 if (mipi_config->target_burst_mode_freq) { 566 }
566 computed_ddr = (pclk * bpp) / intel_dsi->lane_count;
567 567
568 if (mipi_config->target_burst_mode_freq < 568 /* tclk pre count in escape clocks */
569 computed_ddr) { 569 tclk_pre_cnt = DIV_ROUND_UP(mipi_config->tclk_pre, tlpx_ns);
570 DRM_ERROR("Burst mode freq is less than computed\n"); 570 if (tclk_pre_cnt > ICL_TCLK_PRE_CNT_MAX) {
571 return false; 571 DRM_DEBUG_KMS("tclk_pre_cnt out of range (%d)\n", tclk_pre_cnt);
572 } 572 tclk_pre_cnt = ICL_TCLK_PRE_CNT_MAX;
573 }
573 574
574 burst_mode_ratio = DIV_ROUND_UP( 575 /* tclk post count in escape clocks */
575 mipi_config->target_burst_mode_freq * 100, 576 tclk_post_cnt = DIV_ROUND_UP(mipi_config->tclk_post, tlpx_ns);
576 computed_ddr); 577 if (tclk_post_cnt > ICL_TCLK_POST_CNT_MAX) {
578 DRM_DEBUG_KMS("tclk_post_cnt out of range (%d)\n", tclk_post_cnt);
579 tclk_post_cnt = ICL_TCLK_POST_CNT_MAX;
580 }
577 581
578 pclk = DIV_ROUND_UP(pclk * burst_mode_ratio, 100); 582 /* hs zero cnt in escape clocks */
579 } else { 583 hs_zero_cnt = DIV_ROUND_UP(mipi_config->ths_prepare_hszero -
580 DRM_ERROR("Burst mode target is not set\n"); 584 ths_prepare_ns, tlpx_ns);
581 return false; 585 if (hs_zero_cnt > ICL_HS_ZERO_CNT_MAX) {
582 } 586 DRM_DEBUG_KMS("hs_zero_cnt out of range (%d)\n", hs_zero_cnt);
583 } else 587 hs_zero_cnt = ICL_HS_ZERO_CNT_MAX;
584 burst_mode_ratio = 100; 588 }
585 589
586 intel_dsi->burst_mode_ratio = burst_mode_ratio; 590 /* hs exit zero cnt in escape clocks */
587 intel_dsi->pclk = pclk; 591 exit_zero_cnt = DIV_ROUND_UP(mipi_config->ths_exit, tlpx_ns);
592 if (exit_zero_cnt > ICL_EXIT_ZERO_CNT_MAX) {
593 DRM_DEBUG_KMS("exit_zero_cnt out of range (%d)\n", exit_zero_cnt);
594 exit_zero_cnt = ICL_EXIT_ZERO_CNT_MAX;
595 }
588 596
589 bitrate = (pclk * bpp) / intel_dsi->lane_count; 597 /* clock lane dphy timings */
598 intel_dsi->dphy_reg = (CLK_PREPARE_OVERRIDE |
599 CLK_PREPARE(prepare_cnt) |
600 CLK_ZERO_OVERRIDE |
601 CLK_ZERO(clk_zero_cnt) |
602 CLK_PRE_OVERRIDE |
603 CLK_PRE(tclk_pre_cnt) |
604 CLK_POST_OVERRIDE |
605 CLK_POST(tclk_post_cnt) |
606 CLK_TRAIL_OVERRIDE |
607 CLK_TRAIL(trail_cnt));
608
609 /* data lanes dphy timings */
610 intel_dsi->dphy_data_lane_reg = (HS_PREPARE_OVERRIDE |
611 HS_PREPARE(prepare_cnt) |
612 HS_ZERO_OVERRIDE |
613 HS_ZERO(hs_zero_cnt) |
614 HS_TRAIL_OVERRIDE |
615 HS_TRAIL(trail_cnt) |
616 HS_EXIT_OVERRIDE |
617 HS_EXIT(exit_zero_cnt));
618}
590 619
591 switch (intel_dsi->escape_clk_div) { 620static void vlv_dphy_param_init(struct intel_dsi *intel_dsi)
592 case 0: 621{
593 tlpx_ns = 50; 622 struct drm_device *dev = intel_dsi->base.base.dev;
594 break; 623 struct drm_i915_private *dev_priv = to_i915(dev);
595 case 1: 624 struct mipi_config *mipi_config = dev_priv->vbt.dsi.config;
596 tlpx_ns = 100; 625 u32 tlpx_ns, extra_byte_count, tlpx_ui;
597 break; 626 u32 ui_num, ui_den;
627 u32 prepare_cnt, exit_zero_cnt, clk_zero_cnt, trail_cnt;
628 u32 ths_prepare_ns, tclk_trail_ns;
629 u32 tclk_prepare_clkzero, ths_prepare_hszero;
630 u32 lp_to_hs_switch, hs_to_lp_switch;
631 u32 mul;
598 632
599 case 2: 633 tlpx_ns = intel_dsi_tlpx_ns(intel_dsi);
600 tlpx_ns = 200;
601 break;
602 default:
603 tlpx_ns = 50;
604 break;
605 }
606 634
607 switch (intel_dsi->lane_count) { 635 switch (intel_dsi->lane_count) {
608 case 1: 636 case 1:
@@ -620,7 +648,7 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id)
620 648
621 /* in Kbps */ 649 /* in Kbps */
622 ui_num = NS_KHZ_RATIO; 650 ui_num = NS_KHZ_RATIO;
623 ui_den = bitrate; 651 ui_den = intel_dsi_bitrate(intel_dsi);
624 652
625 tclk_prepare_clkzero = mipi_config->tclk_prepare_clkzero; 653 tclk_prepare_clkzero = mipi_config->tclk_prepare_clkzero;
626 ths_prepare_hszero = mipi_config->ths_prepare_hszero; 654 ths_prepare_hszero = mipi_config->ths_prepare_hszero;
@@ -746,6 +774,88 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id)
746 DIV_ROUND_UP(2 * tlpx_ui + trail_cnt * 2 + 8, 774 DIV_ROUND_UP(2 * tlpx_ui + trail_cnt * 2 + 8,
747 8); 775 8);
748 intel_dsi->clk_hs_to_lp_count += extra_byte_count; 776 intel_dsi->clk_hs_to_lp_count += extra_byte_count;
777}
778
779bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id)
780{
781 struct drm_device *dev = intel_dsi->base.base.dev;
782 struct drm_i915_private *dev_priv = to_i915(dev);
783 struct mipi_config *mipi_config = dev_priv->vbt.dsi.config;
784 struct mipi_pps_data *pps = dev_priv->vbt.dsi.pps;
785 struct drm_display_mode *mode = dev_priv->vbt.lfp_lvds_vbt_mode;
786 u16 burst_mode_ratio;
787 enum port port;
788
789 DRM_DEBUG_KMS("\n");
790
791 intel_dsi->eotp_pkt = mipi_config->eot_pkt_disabled ? 0 : 1;
792 intel_dsi->clock_stop = mipi_config->enable_clk_stop ? 1 : 0;
793 intel_dsi->lane_count = mipi_config->lane_cnt + 1;
794 intel_dsi->pixel_format =
795 pixel_format_from_register_bits(
796 mipi_config->videomode_color_format << 7);
797
798 intel_dsi->dual_link = mipi_config->dual_link;
799 intel_dsi->pixel_overlap = mipi_config->pixel_overlap;
800 intel_dsi->operation_mode = mipi_config->is_cmd_mode;
801 intel_dsi->video_mode_format = mipi_config->video_transfer_mode;
802 intel_dsi->escape_clk_div = mipi_config->byte_clk_sel;
803 intel_dsi->lp_rx_timeout = mipi_config->lp_rx_timeout;
804 intel_dsi->hs_tx_timeout = mipi_config->hs_tx_timeout;
805 intel_dsi->turn_arnd_val = mipi_config->turn_around_timeout;
806 intel_dsi->rst_timer_val = mipi_config->device_reset_timer;
807 intel_dsi->init_count = mipi_config->master_init_timer;
808 intel_dsi->bw_timer = mipi_config->dbi_bw_timer;
809 intel_dsi->video_frmt_cfg_bits =
810 mipi_config->bta_enabled ? DISABLE_VIDEO_BTA : 0;
811 intel_dsi->bgr_enabled = mipi_config->rgb_flip;
812
813 /* Starting point, adjusted depending on dual link and burst mode */
814 intel_dsi->pclk = mode->clock;
815
816 /* In dual link mode each port needs half of pixel clock */
817 if (intel_dsi->dual_link) {
818 intel_dsi->pclk /= 2;
819
820 /* we can enable pixel_overlap if needed by panel. In this
821 * case we need to increase the pixelclock for extra pixels
822 */
823 if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK) {
824 intel_dsi->pclk += DIV_ROUND_UP(mode->vtotal * intel_dsi->pixel_overlap * 60, 1000);
825 }
826 }
827
828 /* Burst Mode Ratio
829 * Target ddr frequency from VBT / non burst ddr freq
830 * multiply by 100 to preserve remainder
831 */
832 if (intel_dsi->video_mode_format == VIDEO_MODE_BURST) {
833 if (mipi_config->target_burst_mode_freq) {
834 u32 bitrate = intel_dsi_bitrate(intel_dsi);
835
836 if (mipi_config->target_burst_mode_freq < bitrate) {
837 DRM_ERROR("Burst mode freq is less than computed\n");
838 return false;
839 }
840
841 burst_mode_ratio = DIV_ROUND_UP(
842 mipi_config->target_burst_mode_freq * 100,
843 bitrate);
844
845 intel_dsi->pclk = DIV_ROUND_UP(intel_dsi->pclk * burst_mode_ratio, 100);
846 } else {
847 DRM_ERROR("Burst mode target is not set\n");
848 return false;
849 }
850 } else
851 burst_mode_ratio = 100;
852
853 intel_dsi->burst_mode_ratio = burst_mode_ratio;
854
855 if (IS_ICELAKE(dev_priv))
856 icl_dphy_param_init(intel_dsi);
857 else
858 vlv_dphy_param_init(intel_dsi);
749 859
750 DRM_DEBUG_KMS("Pclk %d\n", intel_dsi->pclk); 860 DRM_DEBUG_KMS("Pclk %d\n", intel_dsi->pclk);
751 DRM_DEBUG_KMS("Pixel overlap %d\n", intel_dsi->pixel_overlap); 861 DRM_DEBUG_KMS("Pixel overlap %d\n", intel_dsi->pixel_overlap);
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index 4e142ff49708..0042a7f69387 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -256,6 +256,7 @@ static bool intel_dvo_compute_config(struct intel_encoder *encoder,
256 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) 256 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
257 return false; 257 return false;
258 258
259 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
259 return true; 260 return true;
260} 261}
261 262
@@ -333,18 +334,11 @@ static int intel_dvo_get_modes(struct drm_connector *connector)
333 return 0; 334 return 0;
334} 335}
335 336
336static void intel_dvo_destroy(struct drm_connector *connector)
337{
338 drm_connector_cleanup(connector);
339 intel_panel_fini(&to_intel_connector(connector)->panel);
340 kfree(connector);
341}
342
343static const struct drm_connector_funcs intel_dvo_connector_funcs = { 337static const struct drm_connector_funcs intel_dvo_connector_funcs = {
344 .detect = intel_dvo_detect, 338 .detect = intel_dvo_detect,
345 .late_register = intel_connector_register, 339 .late_register = intel_connector_register,
346 .early_unregister = intel_connector_unregister, 340 .early_unregister = intel_connector_unregister,
347 .destroy = intel_dvo_destroy, 341 .destroy = intel_connector_destroy,
348 .fill_modes = drm_helper_probe_single_connector_modes, 342 .fill_modes = drm_helper_probe_single_connector_modes,
349 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 343 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
350 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, 344 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index 217ed3ee1cab..759c0fd58f8c 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -273,13 +273,13 @@ intel_engine_setup(struct drm_i915_private *dev_priv,
273 BUILD_BUG_ON(MAX_ENGINE_CLASS >= BIT(GEN11_ENGINE_CLASS_WIDTH)); 273 BUILD_BUG_ON(MAX_ENGINE_CLASS >= BIT(GEN11_ENGINE_CLASS_WIDTH));
274 BUILD_BUG_ON(MAX_ENGINE_INSTANCE >= BIT(GEN11_ENGINE_INSTANCE_WIDTH)); 274 BUILD_BUG_ON(MAX_ENGINE_INSTANCE >= BIT(GEN11_ENGINE_INSTANCE_WIDTH));
275 275
276 if (GEM_WARN_ON(info->class > MAX_ENGINE_CLASS)) 276 if (GEM_DEBUG_WARN_ON(info->class > MAX_ENGINE_CLASS))
277 return -EINVAL; 277 return -EINVAL;
278 278
279 if (GEM_WARN_ON(info->instance > MAX_ENGINE_INSTANCE)) 279 if (GEM_DEBUG_WARN_ON(info->instance > MAX_ENGINE_INSTANCE))
280 return -EINVAL; 280 return -EINVAL;
281 281
282 if (GEM_WARN_ON(dev_priv->engine_class[info->class][info->instance])) 282 if (GEM_DEBUG_WARN_ON(dev_priv->engine_class[info->class][info->instance]))
283 return -EINVAL; 283 return -EINVAL;
284 284
285 GEM_BUG_ON(dev_priv->engine[id]); 285 GEM_BUG_ON(dev_priv->engine[id]);
@@ -335,7 +335,10 @@ int intel_engines_init_mmio(struct drm_i915_private *dev_priv)
335 335
336 WARN_ON(ring_mask == 0); 336 WARN_ON(ring_mask == 0);
337 WARN_ON(ring_mask & 337 WARN_ON(ring_mask &
338 GENMASK(sizeof(mask) * BITS_PER_BYTE - 1, I915_NUM_ENGINES)); 338 GENMASK(BITS_PER_TYPE(mask) - 1, I915_NUM_ENGINES));
339
340 if (i915_inject_load_failure())
341 return -ENODEV;
339 342
340 for (i = 0; i < ARRAY_SIZE(intel_engines); i++) { 343 for (i = 0; i < ARRAY_SIZE(intel_engines); i++) {
341 if (!HAS_ENGINE(dev_priv, i)) 344 if (!HAS_ENGINE(dev_priv, i))
@@ -399,7 +402,7 @@ int intel_engines_init(struct drm_i915_private *dev_priv)
399 err = -EINVAL; 402 err = -EINVAL;
400 err_id = id; 403 err_id = id;
401 404
402 if (GEM_WARN_ON(!init)) 405 if (GEM_DEBUG_WARN_ON(!init))
403 goto cleanup; 406 goto cleanup;
404 407
405 err = init(engine); 408 err = init(engine);
@@ -463,7 +466,7 @@ static void intel_engine_init_execlist(struct intel_engine_cs *engine)
463 struct intel_engine_execlists * const execlists = &engine->execlists; 466 struct intel_engine_execlists * const execlists = &engine->execlists;
464 467
465 execlists->port_mask = 1; 468 execlists->port_mask = 1;
466 BUILD_BUG_ON_NOT_POWER_OF_2(execlists_num_ports(execlists)); 469 GEM_BUG_ON(!is_power_of_2(execlists_num_ports(execlists)));
467 GEM_BUG_ON(execlists_num_ports(execlists) > EXECLIST_MAX_PORTS); 470 GEM_BUG_ON(execlists_num_ports(execlists) > EXECLIST_MAX_PORTS);
468 471
469 execlists->queue_priority = INT_MIN; 472 execlists->queue_priority = INT_MIN;
@@ -482,7 +485,7 @@ static void intel_engine_init_execlist(struct intel_engine_cs *engine)
482void intel_engine_setup_common(struct intel_engine_cs *engine) 485void intel_engine_setup_common(struct intel_engine_cs *engine)
483{ 486{
484 i915_timeline_init(engine->i915, &engine->timeline, engine->name); 487 i915_timeline_init(engine->i915, &engine->timeline, engine->name);
485 lockdep_set_subclass(&engine->timeline.lock, TIMELINE_ENGINE); 488 i915_timeline_set_subclass(&engine->timeline, TIMELINE_ENGINE);
486 489
487 intel_engine_init_execlist(engine); 490 intel_engine_init_execlist(engine);
488 intel_engine_init_hangcheck(engine); 491 intel_engine_init_hangcheck(engine);
@@ -809,7 +812,7 @@ u32 intel_calculate_mcr_s_ss_select(struct drm_i915_private *dev_priv)
809 u32 slice = fls(sseu->slice_mask); 812 u32 slice = fls(sseu->slice_mask);
810 u32 subslice = fls(sseu->subslice_mask[slice]); 813 u32 subslice = fls(sseu->subslice_mask[slice]);
811 814
812 if (INTEL_GEN(dev_priv) == 10) 815 if (IS_GEN10(dev_priv))
813 mcr_s_ss_select = GEN8_MCR_SLICE(slice) | 816 mcr_s_ss_select = GEN8_MCR_SLICE(slice) |
814 GEN8_MCR_SUBSLICE(subslice); 817 GEN8_MCR_SUBSLICE(subslice);
815 else if (INTEL_GEN(dev_priv) >= 11) 818 else if (INTEL_GEN(dev_priv) >= 11)
@@ -1534,10 +1537,10 @@ void intel_engine_dump(struct intel_engine_cs *engine,
1534 count = 0; 1537 count = 0;
1535 drm_printf(m, "\t\tQueue priority: %d\n", execlists->queue_priority); 1538 drm_printf(m, "\t\tQueue priority: %d\n", execlists->queue_priority);
1536 for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) { 1539 for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) {
1537 struct i915_priolist *p = 1540 struct i915_priolist *p = rb_entry(rb, typeof(*p), node);
1538 rb_entry(rb, typeof(*p), node); 1541 int i;
1539 1542
1540 list_for_each_entry(rq, &p->requests, sched.link) { 1543 priolist_for_each_request(rq, p, i) {
1541 if (count++ < MAX_REQUESTS_TO_SHOW - 1) 1544 if (count++ < MAX_REQUESTS_TO_SHOW - 1)
1542 print_request(m, rq, "\t\tQ "); 1545 print_request(m, rq, "\t\tQ ");
1543 else 1546 else
@@ -1559,8 +1562,10 @@ void intel_engine_dump(struct intel_engine_cs *engine,
1559 for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) { 1562 for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
1560 struct intel_wait *w = rb_entry(rb, typeof(*w), node); 1563 struct intel_wait *w = rb_entry(rb, typeof(*w), node);
1561 1564
1562 drm_printf(m, "\t%s [%d] waiting for %x\n", 1565 drm_printf(m, "\t%s [%d:%c] waiting for %x\n",
1563 w->tsk->comm, w->tsk->pid, w->seqno); 1566 w->tsk->comm, w->tsk->pid,
1567 task_state_to_char(w->tsk),
1568 w->seqno);
1564 } 1569 }
1565 spin_unlock(&b->rb_lock); 1570 spin_unlock(&b->rb_lock);
1566 local_irq_restore(flags); 1571 local_irq_restore(flags);
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
index 74d425c700ef..14cbaf4a0e93 100644
--- a/drivers/gpu/drm/i915/intel_fbc.c
+++ b/drivers/gpu/drm/i915/intel_fbc.c
@@ -84,7 +84,7 @@ static int intel_fbc_calculate_cfb_size(struct drm_i915_private *dev_priv,
84 int lines; 84 int lines;
85 85
86 intel_fbc_get_plane_source_size(cache, NULL, &lines); 86 intel_fbc_get_plane_source_size(cache, NULL, &lines);
87 if (INTEL_GEN(dev_priv) == 7) 87 if (IS_GEN7(dev_priv))
88 lines = min(lines, 2048); 88 lines = min(lines, 2048);
89 else if (INTEL_GEN(dev_priv) >= 8) 89 else if (INTEL_GEN(dev_priv) >= 8)
90 lines = min(lines, 2560); 90 lines = min(lines, 2560);
@@ -674,6 +674,8 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
674 cache->plane.adjusted_y = plane_state->color_plane[0].y; 674 cache->plane.adjusted_y = plane_state->color_plane[0].y;
675 cache->plane.y = plane_state->base.src.y1 >> 16; 675 cache->plane.y = plane_state->base.src.y1 >> 16;
676 676
677 cache->plane.pixel_blend_mode = plane_state->base.pixel_blend_mode;
678
677 if (!cache->plane.visible) 679 if (!cache->plane.visible)
678 return; 680 return;
679 681
@@ -748,6 +750,12 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
748 return false; 750 return false;
749 } 751 }
750 752
753 if (cache->plane.pixel_blend_mode != DRM_MODE_BLEND_PIXEL_NONE &&
754 cache->fb.format->has_alpha) {
755 fbc->no_fbc_reason = "per-pixel alpha blending is incompatible with FBC";
756 return false;
757 }
758
751 /* WaFbcExceedCdClockThreshold:hsw,bdw */ 759 /* WaFbcExceedCdClockThreshold:hsw,bdw */
752 if ((IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) && 760 if ((IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) &&
753 cache->crtc.hsw_bdw_pixel_rate >= dev_priv->cdclk.hw.cdclk * 95 / 100) { 761 cache->crtc.hsw_bdw_pixel_rate >= dev_priv->cdclk.hw.cdclk * 95 / 100) {
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index f99332972b7a..2480c7d6edee 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -593,7 +593,7 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
593 * pipe. Note we need to use the selected fb's pitch and bpp 593 * pipe. Note we need to use the selected fb's pitch and bpp
594 * rather than the current pipe's, since they differ. 594 * rather than the current pipe's, since they differ.
595 */ 595 */
596 cur_size = intel_crtc->config->base.adjusted_mode.crtc_hdisplay; 596 cur_size = crtc->state->adjusted_mode.crtc_hdisplay;
597 cur_size = cur_size * fb->base.format->cpp[0]; 597 cur_size = cur_size * fb->base.format->cpp[0];
598 if (fb->base.pitches[0] < cur_size) { 598 if (fb->base.pitches[0] < cur_size) {
599 DRM_DEBUG_KMS("fb not wide enough for plane %c (%d vs %d)\n", 599 DRM_DEBUG_KMS("fb not wide enough for plane %c (%d vs %d)\n",
@@ -603,13 +603,13 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
603 break; 603 break;
604 } 604 }
605 605
606 cur_size = intel_crtc->config->base.adjusted_mode.crtc_vdisplay; 606 cur_size = crtc->state->adjusted_mode.crtc_vdisplay;
607 cur_size = intel_fb_align_height(&fb->base, 0, cur_size); 607 cur_size = intel_fb_align_height(&fb->base, 0, cur_size);
608 cur_size *= fb->base.pitches[0]; 608 cur_size *= fb->base.pitches[0];
609 DRM_DEBUG_KMS("pipe %c area: %dx%d, bpp: %d, size: %d\n", 609 DRM_DEBUG_KMS("pipe %c area: %dx%d, bpp: %d, size: %d\n",
610 pipe_name(intel_crtc->pipe), 610 pipe_name(intel_crtc->pipe),
611 intel_crtc->config->base.adjusted_mode.crtc_hdisplay, 611 crtc->state->adjusted_mode.crtc_hdisplay,
612 intel_crtc->config->base.adjusted_mode.crtc_vdisplay, 612 crtc->state->adjusted_mode.crtc_vdisplay,
613 fb->base.format->cpp[0] * 8, 613 fb->base.format->cpp[0] * 8,
614 cur_size); 614 cur_size);
615 615
diff --git a/drivers/gpu/drm/i915/intel_guc.c b/drivers/gpu/drm/i915/intel_guc.c
index 230aea69385d..8660af3fd755 100644
--- a/drivers/gpu/drm/i915/intel_guc.c
+++ b/drivers/gpu/drm/i915/intel_guc.c
@@ -50,7 +50,8 @@ void intel_guc_init_send_regs(struct intel_guc *guc)
50 unsigned int i; 50 unsigned int i;
51 51
52 guc->send_regs.base = i915_mmio_reg_offset(SOFT_SCRATCH(0)); 52 guc->send_regs.base = i915_mmio_reg_offset(SOFT_SCRATCH(0));
53 guc->send_regs.count = SOFT_SCRATCH_COUNT - 1; 53 guc->send_regs.count = GUC_MAX_MMIO_MSG_LEN;
54 BUILD_BUG_ON(GUC_MAX_MMIO_MSG_LEN > SOFT_SCRATCH_COUNT);
54 55
55 for (i = 0; i < guc->send_regs.count; i++) { 56 for (i = 0; i < guc->send_regs.count; i++) {
56 fw_domains |= intel_uncore_forcewake_for_reg(dev_priv, 57 fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
@@ -521,6 +522,44 @@ int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset)
521 return intel_guc_send(guc, action, ARRAY_SIZE(action)); 522 return intel_guc_send(guc, action, ARRAY_SIZE(action));
522} 523}
523 524
525/*
526 * The ENTER/EXIT_S_STATE actions queue the save/restore operation in GuC FW and
527 * then return, so waiting on the H2G is not enough to guarantee GuC is done.
528 * When all the processing is done, GuC writes INTEL_GUC_SLEEP_STATE_SUCCESS to
529 * scratch register 14, so we can poll on that. Note that GuC does not ensure
530 * that the value in the register is different from
531 * INTEL_GUC_SLEEP_STATE_SUCCESS while the action is in progress so we need to
532 * take care of that ourselves as well.
533 */
534static int guc_sleep_state_action(struct intel_guc *guc,
535 const u32 *action, u32 len)
536{
537 struct drm_i915_private *dev_priv = guc_to_i915(guc);
538 int ret;
539 u32 status;
540
541 I915_WRITE(SOFT_SCRATCH(14), INTEL_GUC_SLEEP_STATE_INVALID_MASK);
542
543 ret = intel_guc_send(guc, action, len);
544 if (ret)
545 return ret;
546
547 ret = __intel_wait_for_register(dev_priv, SOFT_SCRATCH(14),
548 INTEL_GUC_SLEEP_STATE_INVALID_MASK,
549 0, 0, 10, &status);
550 if (ret)
551 return ret;
552
553 if (status != INTEL_GUC_SLEEP_STATE_SUCCESS) {
554 DRM_ERROR("GuC failed to change sleep state. "
555 "action=0x%x, err=%u\n",
556 action[0], status);
557 return -EIO;
558 }
559
560 return 0;
561}
562
524/** 563/**
525 * intel_guc_suspend() - notify GuC entering suspend state 564 * intel_guc_suspend() - notify GuC entering suspend state
526 * @guc: the guc 565 * @guc: the guc
@@ -533,7 +572,7 @@ int intel_guc_suspend(struct intel_guc *guc)
533 intel_guc_ggtt_offset(guc, guc->shared_data) 572 intel_guc_ggtt_offset(guc, guc->shared_data)
534 }; 573 };
535 574
536 return intel_guc_send(guc, data, ARRAY_SIZE(data)); 575 return guc_sleep_state_action(guc, data, ARRAY_SIZE(data));
537} 576}
538 577
539/** 578/**
@@ -571,7 +610,7 @@ int intel_guc_resume(struct intel_guc *guc)
571 intel_guc_ggtt_offset(guc, guc->shared_data) 610 intel_guc_ggtt_offset(guc, guc->shared_data)
572 }; 611 };
573 612
574 return intel_guc_send(guc, data, ARRAY_SIZE(data)); 613 return guc_sleep_state_action(guc, data, ARRAY_SIZE(data));
575} 614}
576 615
577/** 616/**
diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h
index ad42faf48c46..0f1c4f9ebfd8 100644
--- a/drivers/gpu/drm/i915/intel_guc.h
+++ b/drivers/gpu/drm/i915/intel_guc.h
@@ -95,6 +95,11 @@ struct intel_guc {
95 void (*notify)(struct intel_guc *guc); 95 void (*notify)(struct intel_guc *guc);
96}; 96};
97 97
98static inline bool intel_guc_is_alive(struct intel_guc *guc)
99{
100 return intel_uc_fw_is_loaded(&guc->fw);
101}
102
98static 103static
99inline int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len) 104inline int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len)
100{ 105{
diff --git a/drivers/gpu/drm/i915/intel_guc_fw.c b/drivers/gpu/drm/i915/intel_guc_fw.c
index a9e6fcce467c..a67144ee5ceb 100644
--- a/drivers/gpu/drm/i915/intel_guc_fw.c
+++ b/drivers/gpu/drm/i915/intel_guc_fw.c
@@ -78,7 +78,8 @@ static void guc_fw_select(struct intel_uc_fw *guc_fw)
78 guc_fw->major_ver_wanted = KBL_FW_MAJOR; 78 guc_fw->major_ver_wanted = KBL_FW_MAJOR;
79 guc_fw->minor_ver_wanted = KBL_FW_MINOR; 79 guc_fw->minor_ver_wanted = KBL_FW_MINOR;
80 } else { 80 } else {
81 DRM_WARN("%s: No firmware known for this platform!\n", 81 dev_info(dev_priv->drm.dev,
82 "%s: No firmware known for this platform!\n",
82 intel_uc_fw_type_repr(guc_fw->type)); 83 intel_uc_fw_type_repr(guc_fw->type));
83 } 84 }
84} 85}
@@ -125,66 +126,26 @@ static void guc_prepare_xfer(struct intel_guc *guc)
125} 126}
126 127
127/* Copy RSA signature from the fw image to HW for verification */ 128/* Copy RSA signature from the fw image to HW for verification */
128static int guc_xfer_rsa(struct intel_guc *guc, struct i915_vma *vma) 129static void guc_xfer_rsa(struct intel_guc *guc, struct i915_vma *vma)
129{ 130{
130 struct drm_i915_private *dev_priv = guc_to_i915(guc); 131 struct drm_i915_private *dev_priv = guc_to_i915(guc);
131 struct intel_uc_fw *guc_fw = &guc->fw;
132 struct sg_table *sg = vma->pages;
133 u32 rsa[UOS_RSA_SCRATCH_COUNT]; 132 u32 rsa[UOS_RSA_SCRATCH_COUNT];
134 int i; 133 int i;
135 134
136 if (sg_pcopy_to_buffer(sg->sgl, sg->nents, rsa, sizeof(rsa), 135 sg_pcopy_to_buffer(vma->pages->sgl, vma->pages->nents,
137 guc_fw->rsa_offset) != sizeof(rsa)) 136 rsa, sizeof(rsa), guc->fw.rsa_offset);
138 return -EINVAL;
139 137
140 for (i = 0; i < UOS_RSA_SCRATCH_COUNT; i++) 138 for (i = 0; i < UOS_RSA_SCRATCH_COUNT; i++)
141 I915_WRITE(UOS_RSA_SCRATCH(i), rsa[i]); 139 I915_WRITE(UOS_RSA_SCRATCH(i), rsa[i]);
142
143 return 0;
144} 140}
145 141
146/* 142static bool guc_xfer_completed(struct intel_guc *guc, u32 *status)
147 * Transfer the firmware image to RAM for execution by the microcontroller.
148 *
149 * Architecturally, the DMA engine is bidirectional, and can potentially even
150 * transfer between GTT locations. This functionality is left out of the API
151 * for now as there is no need for it.
152 */
153static int guc_xfer_ucode(struct intel_guc *guc, struct i915_vma *vma)
154{ 143{
155 struct drm_i915_private *dev_priv = guc_to_i915(guc); 144 struct drm_i915_private *dev_priv = guc_to_i915(guc);
156 struct intel_uc_fw *guc_fw = &guc->fw;
157 unsigned long offset;
158 u32 status;
159 int ret;
160
161 /*
162 * The header plus uCode will be copied to WOPCM via DMA, excluding any
163 * other components
164 */
165 I915_WRITE(DMA_COPY_SIZE, guc_fw->header_size + guc_fw->ucode_size);
166
167 /* Set the source address for the new blob */
168 offset = intel_guc_ggtt_offset(guc, vma) + guc_fw->header_offset;
169 I915_WRITE(DMA_ADDR_0_LOW, lower_32_bits(offset));
170 I915_WRITE(DMA_ADDR_0_HIGH, upper_32_bits(offset) & 0xFFFF);
171 145
172 /* 146 /* Did we complete the xfer? */
173 * Set the DMA destination. Current uCode expects the code to be 147 *status = I915_READ(DMA_CTRL);
174 * loaded at 8k; locations below this are used for the stack. 148 return !(*status & START_DMA);
175 */
176 I915_WRITE(DMA_ADDR_1_LOW, 0x2000);
177 I915_WRITE(DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM);
178
179 /* Finally start the DMA */
180 I915_WRITE(DMA_CTRL, _MASKED_BIT_ENABLE(UOS_MOVE | START_DMA));
181
182 /* Wait for DMA to finish */
183 ret = __intel_wait_for_register_fw(dev_priv, DMA_CTRL, START_DMA, 0,
184 2, 100, &status);
185 DRM_DEBUG_DRIVER("GuC DMA status %#x\n", status);
186
187 return ret;
188} 149}
189 150
190/* 151/*
@@ -217,8 +178,8 @@ static int guc_wait_ucode(struct intel_guc *guc)
217 * NB: Docs recommend not using the interrupt for completion. 178 * NB: Docs recommend not using the interrupt for completion.
218 * Measurements indicate this should take no more than 20ms, so a 179 * Measurements indicate this should take no more than 20ms, so a
219 * timeout here indicates that the GuC has failed and is unusable. 180 * timeout here indicates that the GuC has failed and is unusable.
220 * (Higher levels of the driver will attempt to fall back to 181 * (Higher levels of the driver may decide to reset the GuC and
221 * execlist mode if this happens.) 182 * attempt the ucode load again if this happens.)
222 */ 183 */
223 ret = wait_for(guc_ready(guc, &status), 100); 184 ret = wait_for(guc_ready(guc, &status), 100);
224 DRM_DEBUG_DRIVER("GuC status %#x\n", status); 185 DRM_DEBUG_DRIVER("GuC status %#x\n", status);
@@ -228,10 +189,52 @@ static int guc_wait_ucode(struct intel_guc *guc)
228 ret = -ENOEXEC; 189 ret = -ENOEXEC;
229 } 190 }
230 191
192 if (ret == 0 && !guc_xfer_completed(guc, &status)) {
193 DRM_ERROR("GuC is ready, but the xfer %08x is incomplete\n",
194 status);
195 ret = -ENXIO;
196 }
197
231 return ret; 198 return ret;
232} 199}
233 200
234/* 201/*
202 * Transfer the firmware image to RAM for execution by the microcontroller.
203 *
204 * Architecturally, the DMA engine is bidirectional, and can potentially even
205 * transfer between GTT locations. This functionality is left out of the API
206 * for now as there is no need for it.
207 */
208static int guc_xfer_ucode(struct intel_guc *guc, struct i915_vma *vma)
209{
210 struct drm_i915_private *dev_priv = guc_to_i915(guc);
211 struct intel_uc_fw *guc_fw = &guc->fw;
212 unsigned long offset;
213
214 /*
215 * The header plus uCode will be copied to WOPCM via DMA, excluding any
216 * other components
217 */
218 I915_WRITE(DMA_COPY_SIZE, guc_fw->header_size + guc_fw->ucode_size);
219
220 /* Set the source address for the new blob */
221 offset = intel_guc_ggtt_offset(guc, vma) + guc_fw->header_offset;
222 I915_WRITE(DMA_ADDR_0_LOW, lower_32_bits(offset));
223 I915_WRITE(DMA_ADDR_0_HIGH, upper_32_bits(offset) & 0xFFFF);
224
225 /*
226 * Set the DMA destination. Current uCode expects the code to be
227 * loaded at 8k; locations below this are used for the stack.
228 */
229 I915_WRITE(DMA_ADDR_1_LOW, 0x2000);
230 I915_WRITE(DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM);
231
232 /* Finally start the DMA */
233 I915_WRITE(DMA_CTRL, _MASKED_BIT_ENABLE(UOS_MOVE | START_DMA));
234
235 return guc_wait_ucode(guc);
236}
237/*
235 * Load the GuC firmware blob into the MinuteIA. 238 * Load the GuC firmware blob into the MinuteIA.
236 */ 239 */
237static int guc_fw_xfer(struct intel_uc_fw *guc_fw, struct i915_vma *vma) 240static int guc_fw_xfer(struct intel_uc_fw *guc_fw, struct i915_vma *vma)
@@ -251,17 +254,9 @@ static int guc_fw_xfer(struct intel_uc_fw *guc_fw, struct i915_vma *vma)
251 * by the DMA engine in one operation, whereas the RSA signature is 254 * by the DMA engine in one operation, whereas the RSA signature is
252 * loaded via MMIO. 255 * loaded via MMIO.
253 */ 256 */
254 ret = guc_xfer_rsa(guc, vma); 257 guc_xfer_rsa(guc, vma);
255 if (ret)
256 DRM_WARN("GuC firmware signature xfer error %d\n", ret);
257 258
258 ret = guc_xfer_ucode(guc, vma); 259 ret = guc_xfer_ucode(guc, vma);
259 if (ret)
260 DRM_WARN("GuC firmware code xfer error %d\n", ret);
261
262 ret = guc_wait_ucode(guc);
263 if (ret)
264 DRM_ERROR("GuC firmware xfer error %d\n", ret);
265 260
266 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 261 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
267 262
diff --git a/drivers/gpu/drm/i915/intel_guc_fwif.h b/drivers/gpu/drm/i915/intel_guc_fwif.h
index 8382d591c784..b2f5148f4f17 100644
--- a/drivers/gpu/drm/i915/intel_guc_fwif.h
+++ b/drivers/gpu/drm/i915/intel_guc_fwif.h
@@ -39,6 +39,11 @@
39#define GUC_VIDEO_ENGINE2 4 39#define GUC_VIDEO_ENGINE2 4
40#define GUC_MAX_ENGINES_NUM (GUC_VIDEO_ENGINE2 + 1) 40#define GUC_MAX_ENGINES_NUM (GUC_VIDEO_ENGINE2 + 1)
41 41
42#define GUC_DOORBELL_INVALID 256
43
44#define GUC_DB_SIZE (PAGE_SIZE)
45#define GUC_WQ_SIZE (PAGE_SIZE * 2)
46
42/* Work queue item header definitions */ 47/* Work queue item header definitions */
43#define WQ_STATUS_ACTIVE 1 48#define WQ_STATUS_ACTIVE 1
44#define WQ_STATUS_SUSPENDED 2 49#define WQ_STATUS_SUSPENDED 2
@@ -59,9 +64,6 @@
59#define WQ_RING_TAIL_MAX 0x7FF /* 2^11 QWords */ 64#define WQ_RING_TAIL_MAX 0x7FF /* 2^11 QWords */
60#define WQ_RING_TAIL_MASK (WQ_RING_TAIL_MAX << WQ_RING_TAIL_SHIFT) 65#define WQ_RING_TAIL_MASK (WQ_RING_TAIL_MAX << WQ_RING_TAIL_SHIFT)
61 66
62#define GUC_DOORBELL_ENABLED 1
63#define GUC_DOORBELL_DISABLED 0
64
65#define GUC_STAGE_DESC_ATTR_ACTIVE BIT(0) 67#define GUC_STAGE_DESC_ATTR_ACTIVE BIT(0)
66#define GUC_STAGE_DESC_ATTR_PENDING_DB BIT(1) 68#define GUC_STAGE_DESC_ATTR_PENDING_DB BIT(1)
67#define GUC_STAGE_DESC_ATTR_KERNEL BIT(2) 69#define GUC_STAGE_DESC_ATTR_KERNEL BIT(2)
@@ -219,26 +221,6 @@ struct uc_css_header {
219 u32 header_info; 221 u32 header_info;
220} __packed; 222} __packed;
221 223
222struct guc_doorbell_info {
223 u32 db_status;
224 u32 cookie;
225 u32 reserved[14];
226} __packed;
227
228union guc_doorbell_qw {
229 struct {
230 u32 db_status;
231 u32 cookie;
232 };
233 u64 value_qw;
234} __packed;
235
236#define GUC_NUM_DOORBELLS 256
237#define GUC_DOORBELL_INVALID (GUC_NUM_DOORBELLS)
238
239#define GUC_DB_SIZE (PAGE_SIZE)
240#define GUC_WQ_SIZE (PAGE_SIZE * 2)
241
242/* Work item for submitting workloads into work queue of GuC. */ 224/* Work item for submitting workloads into work queue of GuC. */
243struct guc_wq_item { 225struct guc_wq_item {
244 u32 header; 226 u32 header;
@@ -601,7 +583,9 @@ struct guc_shared_ctx_data {
601 * registers, where first register holds data treated as message header, 583 * registers, where first register holds data treated as message header,
602 * and other registers are used to hold message payload. 584 * and other registers are used to hold message payload.
603 * 585 *
604 * For Gen9+, GuC uses software scratch registers 0xC180-0xC1B8 586 * For Gen9+, GuC uses software scratch registers 0xC180-0xC1B8,
587 * but no H2G command takes more than 8 parameters and the GuC FW
588 * itself uses an 8-element array to store the H2G message.
605 * 589 *
606 * +-----------+---------+---------+---------+ 590 * +-----------+---------+---------+---------+
607 * | MMIO[0] | MMIO[1] | ... | MMIO[n] | 591 * | MMIO[0] | MMIO[1] | ... | MMIO[n] |
@@ -633,6 +617,8 @@ struct guc_shared_ctx_data {
633 * field. 617 * field.
634 */ 618 */
635 619
620#define GUC_MAX_MMIO_MSG_LEN 8
621
636#define INTEL_GUC_MSG_TYPE_SHIFT 28 622#define INTEL_GUC_MSG_TYPE_SHIFT 28
637#define INTEL_GUC_MSG_TYPE_MASK (0xF << INTEL_GUC_MSG_TYPE_SHIFT) 623#define INTEL_GUC_MSG_TYPE_MASK (0xF << INTEL_GUC_MSG_TYPE_SHIFT)
638#define INTEL_GUC_MSG_DATA_SHIFT 16 624#define INTEL_GUC_MSG_DATA_SHIFT 16
@@ -687,6 +673,13 @@ enum intel_guc_report_status {
687 INTEL_GUC_REPORT_STATUS_COMPLETE = 0x4, 673 INTEL_GUC_REPORT_STATUS_COMPLETE = 0x4,
688}; 674};
689 675
676enum intel_guc_sleep_state_status {
677 INTEL_GUC_SLEEP_STATE_SUCCESS = 0x0,
678 INTEL_GUC_SLEEP_STATE_PREEMPT_TO_IDLE_FAILED = 0x1,
679 INTEL_GUC_SLEEP_STATE_ENGINE_RESET_FAILED = 0x2
680#define INTEL_GUC_SLEEP_STATE_INVALID_MASK 0x80000000
681};
682
690#define GUC_LOG_CONTROL_LOGGING_ENABLED (1 << 0) 683#define GUC_LOG_CONTROL_LOGGING_ENABLED (1 << 0)
691#define GUC_LOG_CONTROL_VERBOSITY_SHIFT 4 684#define GUC_LOG_CONTROL_VERBOSITY_SHIFT 4
692#define GUC_LOG_CONTROL_VERBOSITY_MASK (0xF << GUC_LOG_CONTROL_VERBOSITY_SHIFT) 685#define GUC_LOG_CONTROL_VERBOSITY_MASK (0xF << GUC_LOG_CONTROL_VERBOSITY_SHIFT)
diff --git a/drivers/gpu/drm/i915/intel_guc_reg.h b/drivers/gpu/drm/i915/intel_guc_reg.h
index d86084742a4a..57e7ad522c2f 100644
--- a/drivers/gpu/drm/i915/intel_guc_reg.h
+++ b/drivers/gpu/drm/i915/intel_guc_reg.h
@@ -104,6 +104,18 @@
104#define GUC_SEND_INTERRUPT _MMIO(0xc4c8) 104#define GUC_SEND_INTERRUPT _MMIO(0xc4c8)
105#define GUC_SEND_TRIGGER (1<<0) 105#define GUC_SEND_TRIGGER (1<<0)
106 106
107#define GUC_NUM_DOORBELLS 256
108
109/* format of the HW-monitored doorbell cacheline */
110struct guc_doorbell_info {
111 u32 db_status;
112#define GUC_DOORBELL_DISABLED 0
113#define GUC_DOORBELL_ENABLED 1
114
115 u32 cookie;
116 u32 reserved[14];
117} __packed;
118
107#define GEN8_DRBREGL(x) _MMIO(0x1000 + (x) * 8) 119#define GEN8_DRBREGL(x) _MMIO(0x1000 + (x) * 8)
108#define GEN8_DRB_VALID (1<<0) 120#define GEN8_DRB_VALID (1<<0)
109#define GEN8_DRBREGU(x) _MMIO(0x1000 + (x) * 8 + 4) 121#define GEN8_DRBREGU(x) _MMIO(0x1000 + (x) * 8 + 4)
diff --git a/drivers/gpu/drm/i915/intel_guc_submission.c b/drivers/gpu/drm/i915/intel_guc_submission.c
index a81f04d46e87..1570dcbe249c 100644
--- a/drivers/gpu/drm/i915/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/intel_guc_submission.c
@@ -192,7 +192,15 @@ static struct guc_doorbell_info *__get_doorbell(struct intel_guc_client *client)
192 return client->vaddr + client->doorbell_offset; 192 return client->vaddr + client->doorbell_offset;
193} 193}
194 194
195static void __create_doorbell(struct intel_guc_client *client) 195static bool __doorbell_valid(struct intel_guc *guc, u16 db_id)
196{
197 struct drm_i915_private *dev_priv = guc_to_i915(guc);
198
199 GEM_BUG_ON(db_id >= GUC_NUM_DOORBELLS);
200 return I915_READ(GEN8_DRBREGL(db_id)) & GEN8_DRB_VALID;
201}
202
203static void __init_doorbell(struct intel_guc_client *client)
196{ 204{
197 struct guc_doorbell_info *doorbell; 205 struct guc_doorbell_info *doorbell;
198 206
@@ -201,21 +209,19 @@ static void __create_doorbell(struct intel_guc_client *client)
201 doorbell->cookie = 0; 209 doorbell->cookie = 0;
202} 210}
203 211
204static void __destroy_doorbell(struct intel_guc_client *client) 212static void __fini_doorbell(struct intel_guc_client *client)
205{ 213{
206 struct drm_i915_private *dev_priv = guc_to_i915(client->guc);
207 struct guc_doorbell_info *doorbell; 214 struct guc_doorbell_info *doorbell;
208 u16 db_id = client->doorbell_id; 215 u16 db_id = client->doorbell_id;
209 216
210 doorbell = __get_doorbell(client); 217 doorbell = __get_doorbell(client);
211 doorbell->db_status = GUC_DOORBELL_DISABLED; 218 doorbell->db_status = GUC_DOORBELL_DISABLED;
212 doorbell->cookie = 0;
213 219
214 /* Doorbell release flow requires that we wait for GEN8_DRB_VALID bit 220 /* Doorbell release flow requires that we wait for GEN8_DRB_VALID bit
215 * to go to zero after updating db_status before we call the GuC to 221 * to go to zero after updating db_status before we call the GuC to
216 * release the doorbell 222 * release the doorbell
217 */ 223 */
218 if (wait_for_us(!(I915_READ(GEN8_DRBREGL(db_id)) & GEN8_DRB_VALID), 10)) 224 if (wait_for_us(!__doorbell_valid(client->guc, db_id), 10))
219 WARN_ONCE(true, "Doorbell never became invalid after disable\n"); 225 WARN_ONCE(true, "Doorbell never became invalid after disable\n");
220} 226}
221 227
@@ -227,11 +233,11 @@ static int create_doorbell(struct intel_guc_client *client)
227 return -ENODEV; /* internal setup error, should never happen */ 233 return -ENODEV; /* internal setup error, should never happen */
228 234
229 __update_doorbell_desc(client, client->doorbell_id); 235 __update_doorbell_desc(client, client->doorbell_id);
230 __create_doorbell(client); 236 __init_doorbell(client);
231 237
232 ret = __guc_allocate_doorbell(client->guc, client->stage_id); 238 ret = __guc_allocate_doorbell(client->guc, client->stage_id);
233 if (ret) { 239 if (ret) {
234 __destroy_doorbell(client); 240 __fini_doorbell(client);
235 __update_doorbell_desc(client, GUC_DOORBELL_INVALID); 241 __update_doorbell_desc(client, GUC_DOORBELL_INVALID);
236 DRM_DEBUG_DRIVER("Couldn't create client %u doorbell: %d\n", 242 DRM_DEBUG_DRIVER("Couldn't create client %u doorbell: %d\n",
237 client->stage_id, ret); 243 client->stage_id, ret);
@@ -247,7 +253,7 @@ static int destroy_doorbell(struct intel_guc_client *client)
247 253
248 GEM_BUG_ON(!has_doorbell(client)); 254 GEM_BUG_ON(!has_doorbell(client));
249 255
250 __destroy_doorbell(client); 256 __fini_doorbell(client);
251 ret = __guc_deallocate_doorbell(client->guc, client->stage_id); 257 ret = __guc_deallocate_doorbell(client->guc, client->stage_id);
252 if (ret) 258 if (ret)
253 DRM_ERROR("Couldn't destroy client %u doorbell: %d\n", 259 DRM_ERROR("Couldn't destroy client %u doorbell: %d\n",
@@ -282,8 +288,7 @@ __get_process_desc(struct intel_guc_client *client)
282/* 288/*
283 * Initialise the process descriptor shared with the GuC firmware. 289 * Initialise the process descriptor shared with the GuC firmware.
284 */ 290 */
285static void guc_proc_desc_init(struct intel_guc *guc, 291static void guc_proc_desc_init(struct intel_guc_client *client)
286 struct intel_guc_client *client)
287{ 292{
288 struct guc_process_desc *desc; 293 struct guc_process_desc *desc;
289 294
@@ -304,6 +309,14 @@ static void guc_proc_desc_init(struct intel_guc *guc,
304 desc->priority = client->priority; 309 desc->priority = client->priority;
305} 310}
306 311
312static void guc_proc_desc_fini(struct intel_guc_client *client)
313{
314 struct guc_process_desc *desc;
315
316 desc = __get_process_desc(client);
317 memset(desc, 0, sizeof(*desc));
318}
319
307static int guc_stage_desc_pool_create(struct intel_guc *guc) 320static int guc_stage_desc_pool_create(struct intel_guc *guc)
308{ 321{
309 struct i915_vma *vma; 322 struct i915_vma *vma;
@@ -341,9 +354,9 @@ static void guc_stage_desc_pool_destroy(struct intel_guc *guc)
341 * data structures relating to this client (doorbell, process descriptor, 354 * data structures relating to this client (doorbell, process descriptor,
342 * write queue, etc). 355 * write queue, etc).
343 */ 356 */
344static void guc_stage_desc_init(struct intel_guc *guc, 357static void guc_stage_desc_init(struct intel_guc_client *client)
345 struct intel_guc_client *client)
346{ 358{
359 struct intel_guc *guc = client->guc;
347 struct drm_i915_private *dev_priv = guc_to_i915(guc); 360 struct drm_i915_private *dev_priv = guc_to_i915(guc);
348 struct intel_engine_cs *engine; 361 struct intel_engine_cs *engine;
349 struct i915_gem_context *ctx = client->owner; 362 struct i915_gem_context *ctx = client->owner;
@@ -424,8 +437,7 @@ static void guc_stage_desc_init(struct intel_guc *guc,
424 desc->desc_private = ptr_to_u64(client); 437 desc->desc_private = ptr_to_u64(client);
425} 438}
426 439
427static void guc_stage_desc_fini(struct intel_guc *guc, 440static void guc_stage_desc_fini(struct intel_guc_client *client)
428 struct intel_guc_client *client)
429{ 441{
430 struct guc_stage_desc *desc; 442 struct guc_stage_desc *desc;
431 443
@@ -486,14 +498,6 @@ static void guc_wq_item_append(struct intel_guc_client *client,
486 WRITE_ONCE(desc->tail, (wq_off + wqi_size) & (GUC_WQ_SIZE - 1)); 498 WRITE_ONCE(desc->tail, (wq_off + wqi_size) & (GUC_WQ_SIZE - 1));
487} 499}
488 500
489static void guc_reset_wq(struct intel_guc_client *client)
490{
491 struct guc_process_desc *desc = __get_process_desc(client);
492
493 desc->head = 0;
494 desc->tail = 0;
495}
496
497static void guc_ring_doorbell(struct intel_guc_client *client) 501static void guc_ring_doorbell(struct intel_guc_client *client)
498{ 502{
499 struct guc_doorbell_info *db; 503 struct guc_doorbell_info *db;
@@ -746,30 +750,28 @@ static bool __guc_dequeue(struct intel_engine_cs *engine)
746 while ((rb = rb_first_cached(&execlists->queue))) { 750 while ((rb = rb_first_cached(&execlists->queue))) {
747 struct i915_priolist *p = to_priolist(rb); 751 struct i915_priolist *p = to_priolist(rb);
748 struct i915_request *rq, *rn; 752 struct i915_request *rq, *rn;
753 int i;
749 754
750 list_for_each_entry_safe(rq, rn, &p->requests, sched.link) { 755 priolist_for_each_request_consume(rq, rn, p, i) {
751 if (last && rq->hw_context != last->hw_context) { 756 if (last && rq->hw_context != last->hw_context) {
752 if (port == last_port) { 757 if (port == last_port)
753 __list_del_many(&p->requests,
754 &rq->sched.link);
755 goto done; 758 goto done;
756 }
757 759
758 if (submit) 760 if (submit)
759 port_assign(port, last); 761 port_assign(port, last);
760 port++; 762 port++;
761 } 763 }
762 764
763 INIT_LIST_HEAD(&rq->sched.link); 765 list_del_init(&rq->sched.link);
764 766
765 __i915_request_submit(rq); 767 __i915_request_submit(rq);
766 trace_i915_request_in(rq, port_index(port, execlists)); 768 trace_i915_request_in(rq, port_index(port, execlists));
769
767 last = rq; 770 last = rq;
768 submit = true; 771 submit = true;
769 } 772 }
770 773
771 rb_erase_cached(&p->node, &execlists->queue); 774 rb_erase_cached(&p->node, &execlists->queue);
772 INIT_LIST_HEAD(&p->requests);
773 if (p->priority != I915_PRIORITY_NORMAL) 775 if (p->priority != I915_PRIORITY_NORMAL)
774 kmem_cache_free(engine->i915->priorities, p); 776 kmem_cache_free(engine->i915->priorities, p);
775 } 777 }
@@ -791,19 +793,8 @@ done:
791 793
792static void guc_dequeue(struct intel_engine_cs *engine) 794static void guc_dequeue(struct intel_engine_cs *engine)
793{ 795{
794 unsigned long flags; 796 if (__guc_dequeue(engine))
795 bool submit;
796
797 local_irq_save(flags);
798
799 spin_lock(&engine->timeline.lock);
800 submit = __guc_dequeue(engine);
801 spin_unlock(&engine->timeline.lock);
802
803 if (submit)
804 guc_submit(engine); 797 guc_submit(engine);
805
806 local_irq_restore(flags);
807} 798}
808 799
809static void guc_submission_tasklet(unsigned long data) 800static void guc_submission_tasklet(unsigned long data)
@@ -812,6 +803,9 @@ static void guc_submission_tasklet(unsigned long data)
812 struct intel_engine_execlists * const execlists = &engine->execlists; 803 struct intel_engine_execlists * const execlists = &engine->execlists;
813 struct execlist_port *port = execlists->port; 804 struct execlist_port *port = execlists->port;
814 struct i915_request *rq; 805 struct i915_request *rq;
806 unsigned long flags;
807
808 spin_lock_irqsave(&engine->timeline.lock, flags);
815 809
816 rq = port_request(port); 810 rq = port_request(port);
817 while (rq && i915_request_completed(rq)) { 811 while (rq && i915_request_completed(rq)) {
@@ -835,6 +829,8 @@ static void guc_submission_tasklet(unsigned long data)
835 829
836 if (!execlists_is_active(execlists, EXECLISTS_ACTIVE_PREEMPT)) 830 if (!execlists_is_active(execlists, EXECLISTS_ACTIVE_PREEMPT))
837 guc_dequeue(engine); 831 guc_dequeue(engine);
832
833 spin_unlock_irqrestore(&engine->timeline.lock, flags);
838} 834}
839 835
840static struct i915_request * 836static struct i915_request *
@@ -877,72 +873,31 @@ guc_reset_prepare(struct intel_engine_cs *engine)
877/* Check that a doorbell register is in the expected state */ 873/* Check that a doorbell register is in the expected state */
878static bool doorbell_ok(struct intel_guc *guc, u16 db_id) 874static bool doorbell_ok(struct intel_guc *guc, u16 db_id)
879{ 875{
880 struct drm_i915_private *dev_priv = guc_to_i915(guc);
881 u32 drbregl;
882 bool valid; 876 bool valid;
883 877
884 GEM_BUG_ON(db_id >= GUC_DOORBELL_INVALID); 878 GEM_BUG_ON(db_id >= GUC_NUM_DOORBELLS);
885 879
886 drbregl = I915_READ(GEN8_DRBREGL(db_id)); 880 valid = __doorbell_valid(guc, db_id);
887 valid = drbregl & GEN8_DRB_VALID;
888 881
889 if (test_bit(db_id, guc->doorbell_bitmap) == valid) 882 if (test_bit(db_id, guc->doorbell_bitmap) == valid)
890 return true; 883 return true;
891 884
892 DRM_DEBUG_DRIVER("Doorbell %d has unexpected state (0x%x): valid=%s\n", 885 DRM_DEBUG_DRIVER("Doorbell %u has unexpected state: valid=%s\n",
893 db_id, drbregl, yesno(valid)); 886 db_id, yesno(valid));
894 887
895 return false; 888 return false;
896} 889}
897 890
898static bool guc_verify_doorbells(struct intel_guc *guc) 891static bool guc_verify_doorbells(struct intel_guc *guc)
899{ 892{
893 bool doorbells_ok = true;
900 u16 db_id; 894 u16 db_id;
901 895
902 for (db_id = 0; db_id < GUC_NUM_DOORBELLS; ++db_id) 896 for (db_id = 0; db_id < GUC_NUM_DOORBELLS; ++db_id)
903 if (!doorbell_ok(guc, db_id)) 897 if (!doorbell_ok(guc, db_id))
904 return false; 898 doorbells_ok = false;
905
906 return true;
907}
908
909static int guc_clients_doorbell_init(struct intel_guc *guc)
910{
911 int ret;
912
913 ret = create_doorbell(guc->execbuf_client);
914 if (ret)
915 return ret;
916
917 if (guc->preempt_client) {
918 ret = create_doorbell(guc->preempt_client);
919 if (ret) {
920 destroy_doorbell(guc->execbuf_client);
921 return ret;
922 }
923 }
924
925 return 0;
926}
927
928static void guc_clients_doorbell_fini(struct intel_guc *guc)
929{
930 /*
931 * By the time we're here, GuC has already been reset.
932 * Instead of trying (in vain) to communicate with it, let's just
933 * cleanup the doorbell HW and our internal state.
934 */
935 if (guc->preempt_client) {
936 __destroy_doorbell(guc->preempt_client);
937 __update_doorbell_desc(guc->preempt_client,
938 GUC_DOORBELL_INVALID);
939 }
940 899
941 if (guc->execbuf_client) { 900 return doorbells_ok;
942 __destroy_doorbell(guc->execbuf_client);
943 __update_doorbell_desc(guc->execbuf_client,
944 GUC_DOORBELL_INVALID);
945 }
946} 901}
947 902
948/** 903/**
@@ -1005,6 +960,10 @@ guc_client_alloc(struct drm_i915_private *dev_priv,
1005 } 960 }
1006 client->vaddr = vaddr; 961 client->vaddr = vaddr;
1007 962
963 ret = reserve_doorbell(client);
964 if (ret)
965 goto err_vaddr;
966
1008 client->doorbell_offset = __select_cacheline(guc); 967 client->doorbell_offset = __select_cacheline(guc);
1009 968
1010 /* 969 /*
@@ -1017,13 +976,6 @@ guc_client_alloc(struct drm_i915_private *dev_priv,
1017 else 976 else
1018 client->proc_desc_offset = (GUC_DB_SIZE / 2); 977 client->proc_desc_offset = (GUC_DB_SIZE / 2);
1019 978
1020 guc_proc_desc_init(guc, client);
1021 guc_stage_desc_init(guc, client);
1022
1023 ret = reserve_doorbell(client);
1024 if (ret)
1025 goto err_vaddr;
1026
1027 DRM_DEBUG_DRIVER("new priority %u client %p for engine(s) 0x%x: stage_id %u\n", 979 DRM_DEBUG_DRIVER("new priority %u client %p for engine(s) 0x%x: stage_id %u\n",
1028 priority, client, client->engines, client->stage_id); 980 priority, client, client->engines, client->stage_id);
1029 DRM_DEBUG_DRIVER("doorbell id %u, cacheline offset 0x%lx\n", 981 DRM_DEBUG_DRIVER("doorbell id %u, cacheline offset 0x%lx\n",
@@ -1045,7 +997,6 @@ err_client:
1045static void guc_client_free(struct intel_guc_client *client) 997static void guc_client_free(struct intel_guc_client *client)
1046{ 998{
1047 unreserve_doorbell(client); 999 unreserve_doorbell(client);
1048 guc_stage_desc_fini(client->guc, client);
1049 i915_vma_unpin_and_release(&client->vma, I915_VMA_RELEASE_MAP); 1000 i915_vma_unpin_and_release(&client->vma, I915_VMA_RELEASE_MAP);
1050 ida_simple_remove(&client->guc->stage_ids, client->stage_id); 1001 ida_simple_remove(&client->guc->stage_ids, client->stage_id);
1051 kfree(client); 1002 kfree(client);
@@ -1112,6 +1063,69 @@ static void guc_clients_destroy(struct intel_guc *guc)
1112 guc_client_free(client); 1063 guc_client_free(client);
1113} 1064}
1114 1065
1066static int __guc_client_enable(struct intel_guc_client *client)
1067{
1068 int ret;
1069
1070 guc_proc_desc_init(client);
1071 guc_stage_desc_init(client);
1072
1073 ret = create_doorbell(client);
1074 if (ret)
1075 goto fail;
1076
1077 return 0;
1078
1079fail:
1080 guc_stage_desc_fini(client);
1081 guc_proc_desc_fini(client);
1082 return ret;
1083}
1084
1085static void __guc_client_disable(struct intel_guc_client *client)
1086{
1087 /*
1088 * By the time we're here, GuC may have already been reset. if that is
1089 * the case, instead of trying (in vain) to communicate with it, let's
1090 * just cleanup the doorbell HW and our internal state.
1091 */
1092 if (intel_guc_is_alive(client->guc))
1093 destroy_doorbell(client);
1094 else
1095 __fini_doorbell(client);
1096
1097 guc_stage_desc_fini(client);
1098 guc_proc_desc_fini(client);
1099}
1100
1101static int guc_clients_enable(struct intel_guc *guc)
1102{
1103 int ret;
1104
1105 ret = __guc_client_enable(guc->execbuf_client);
1106 if (ret)
1107 return ret;
1108
1109 if (guc->preempt_client) {
1110 ret = __guc_client_enable(guc->preempt_client);
1111 if (ret) {
1112 __guc_client_disable(guc->execbuf_client);
1113 return ret;
1114 }
1115 }
1116
1117 return 0;
1118}
1119
1120static void guc_clients_disable(struct intel_guc *guc)
1121{
1122 if (guc->preempt_client)
1123 __guc_client_disable(guc->preempt_client);
1124
1125 if (guc->execbuf_client)
1126 __guc_client_disable(guc->execbuf_client);
1127}
1128
1115/* 1129/*
1116 * Set up the memory resources to be shared with the GuC (via the GGTT) 1130 * Set up the memory resources to be shared with the GuC (via the GGTT)
1117 * at firmware loading time. 1131 * at firmware loading time.
@@ -1295,15 +1309,11 @@ int intel_guc_submission_enable(struct intel_guc *guc)
1295 1309
1296 GEM_BUG_ON(!guc->execbuf_client); 1310 GEM_BUG_ON(!guc->execbuf_client);
1297 1311
1298 guc_reset_wq(guc->execbuf_client);
1299 if (guc->preempt_client)
1300 guc_reset_wq(guc->preempt_client);
1301
1302 err = intel_guc_sample_forcewake(guc); 1312 err = intel_guc_sample_forcewake(guc);
1303 if (err) 1313 if (err)
1304 return err; 1314 return err;
1305 1315
1306 err = guc_clients_doorbell_init(guc); 1316 err = guc_clients_enable(guc);
1307 if (err) 1317 if (err)
1308 return err; 1318 return err;
1309 1319
@@ -1325,7 +1335,7 @@ void intel_guc_submission_disable(struct intel_guc *guc)
1325 GEM_BUG_ON(dev_priv->gt.awake); /* GT should be parked first */ 1335 GEM_BUG_ON(dev_priv->gt.awake); /* GT should be parked first */
1326 1336
1327 guc_interrupts_release(dev_priv); 1337 guc_interrupts_release(dev_priv);
1328 guc_clients_doorbell_fini(guc); 1338 guc_clients_disable(guc);
1329} 1339}
1330 1340
1331#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 1341#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
diff --git a/drivers/gpu/drm/i915/intel_hdcp.c b/drivers/gpu/drm/i915/intel_hdcp.c
index 26e48fc95543..1bf487f94254 100644
--- a/drivers/gpu/drm/i915/intel_hdcp.c
+++ b/drivers/gpu/drm/i915/intel_hdcp.c
@@ -16,6 +16,62 @@
16 16
17#define KEY_LOAD_TRIES 5 17#define KEY_LOAD_TRIES 5
18 18
19static
20bool intel_hdcp_is_ksv_valid(u8 *ksv)
21{
22 int i, ones = 0;
23 /* KSV has 20 1's and 20 0's */
24 for (i = 0; i < DRM_HDCP_KSV_LEN; i++)
25 ones += hweight8(ksv[i]);
26 if (ones != 20)
27 return false;
28
29 return true;
30}
31
32static
33int intel_hdcp_read_valid_bksv(struct intel_digital_port *intel_dig_port,
34 const struct intel_hdcp_shim *shim, u8 *bksv)
35{
36 int ret, i, tries = 2;
37
38 /* HDCP spec states that we must retry the bksv if it is invalid */
39 for (i = 0; i < tries; i++) {
40 ret = shim->read_bksv(intel_dig_port, bksv);
41 if (ret)
42 return ret;
43 if (intel_hdcp_is_ksv_valid(bksv))
44 break;
45 }
46 if (i == tries) {
47 DRM_DEBUG_KMS("Bksv is invalid\n");
48 return -ENODEV;
49 }
50
51 return 0;
52}
53
54/* Is HDCP1.4 capable on Platform and Sink */
55bool intel_hdcp_capable(struct intel_connector *connector)
56{
57 struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
58 const struct intel_hdcp_shim *shim = connector->hdcp.shim;
59 bool capable = false;
60 u8 bksv[5];
61
62 if (!shim)
63 return capable;
64
65 if (shim->hdcp_capable) {
66 shim->hdcp_capable(intel_dig_port, &capable);
67 } else {
68 if (!intel_hdcp_read_valid_bksv(intel_dig_port, shim, bksv))
69 capable = true;
70 }
71
72 return capable;
73}
74
19static int intel_hdcp_poll_ksv_fifo(struct intel_digital_port *intel_dig_port, 75static int intel_hdcp_poll_ksv_fifo(struct intel_digital_port *intel_dig_port,
20 const struct intel_hdcp_shim *shim) 76 const struct intel_hdcp_shim *shim)
21{ 77{
@@ -168,18 +224,6 @@ u32 intel_hdcp_get_repeater_ctl(struct intel_digital_port *intel_dig_port)
168} 224}
169 225
170static 226static
171bool intel_hdcp_is_ksv_valid(u8 *ksv)
172{
173 int i, ones = 0;
174 /* KSV has 20 1's and 20 0's */
175 for (i = 0; i < DRM_HDCP_KSV_LEN; i++)
176 ones += hweight8(ksv[i]);
177 if (ones != 20)
178 return false;
179 return true;
180}
181
182static
183int intel_hdcp_validate_v_prime(struct intel_digital_port *intel_dig_port, 227int intel_hdcp_validate_v_prime(struct intel_digital_port *intel_dig_port,
184 const struct intel_hdcp_shim *shim, 228 const struct intel_hdcp_shim *shim,
185 u8 *ksv_fifo, u8 num_downstream, u8 *bstatus) 229 u8 *ksv_fifo, u8 num_downstream, u8 *bstatus)
@@ -383,7 +427,7 @@ int intel_hdcp_validate_v_prime(struct intel_digital_port *intel_dig_port,
383 if (intel_wait_for_register(dev_priv, HDCP_REP_CTL, 427 if (intel_wait_for_register(dev_priv, HDCP_REP_CTL,
384 HDCP_SHA1_COMPLETE, 428 HDCP_SHA1_COMPLETE,
385 HDCP_SHA1_COMPLETE, 1)) { 429 HDCP_SHA1_COMPLETE, 1)) {
386 DRM_DEBUG_KMS("Timed out waiting for SHA1 complete\n"); 430 DRM_ERROR("Timed out waiting for SHA1 complete\n");
387 return -ETIMEDOUT; 431 return -ETIMEDOUT;
388 } 432 }
389 if (!(I915_READ(HDCP_REP_CTL) & HDCP_SHA1_V_MATCH)) { 433 if (!(I915_READ(HDCP_REP_CTL) & HDCP_SHA1_V_MATCH)) {
@@ -404,7 +448,7 @@ int intel_hdcp_auth_downstream(struct intel_digital_port *intel_dig_port,
404 448
405 ret = intel_hdcp_poll_ksv_fifo(intel_dig_port, shim); 449 ret = intel_hdcp_poll_ksv_fifo(intel_dig_port, shim);
406 if (ret) { 450 if (ret) {
407 DRM_ERROR("KSV list failed to become ready (%d)\n", ret); 451 DRM_DEBUG_KMS("KSV list failed to become ready (%d)\n", ret);
408 return ret; 452 return ret;
409 } 453 }
410 454
@@ -414,7 +458,7 @@ int intel_hdcp_auth_downstream(struct intel_digital_port *intel_dig_port,
414 458
415 if (DRM_HDCP_MAX_DEVICE_EXCEEDED(bstatus[0]) || 459 if (DRM_HDCP_MAX_DEVICE_EXCEEDED(bstatus[0]) ||
416 DRM_HDCP_MAX_CASCADE_EXCEEDED(bstatus[1])) { 460 DRM_HDCP_MAX_CASCADE_EXCEEDED(bstatus[1])) {
417 DRM_ERROR("Max Topology Limit Exceeded\n"); 461 DRM_DEBUG_KMS("Max Topology Limit Exceeded\n");
418 return -EPERM; 462 return -EPERM;
419 } 463 }
420 464
@@ -450,7 +494,7 @@ int intel_hdcp_auth_downstream(struct intel_digital_port *intel_dig_port,
450 } 494 }
451 495
452 if (i == tries) { 496 if (i == tries) {
453 DRM_ERROR("V Prime validation failed.(%d)\n", ret); 497 DRM_DEBUG_KMS("V Prime validation failed.(%d)\n", ret);
454 goto err; 498 goto err;
455 } 499 }
456 500
@@ -499,7 +543,7 @@ static int intel_hdcp_auth(struct intel_digital_port *intel_dig_port,
499 if (ret) 543 if (ret)
500 return ret; 544 return ret;
501 if (!hdcp_capable) { 545 if (!hdcp_capable) {
502 DRM_ERROR("Panel is not HDCP capable\n"); 546 DRM_DEBUG_KMS("Panel is not HDCP capable\n");
503 return -EINVAL; 547 return -EINVAL;
504 } 548 }
505 } 549 }
@@ -527,18 +571,9 @@ static int intel_hdcp_auth(struct intel_digital_port *intel_dig_port,
527 571
528 memset(&bksv, 0, sizeof(bksv)); 572 memset(&bksv, 0, sizeof(bksv));
529 573
530 /* HDCP spec states that we must retry the bksv if it is invalid */ 574 ret = intel_hdcp_read_valid_bksv(intel_dig_port, shim, bksv.shim);
531 for (i = 0; i < tries; i++) { 575 if (ret < 0)
532 ret = shim->read_bksv(intel_dig_port, bksv.shim); 576 return ret;
533 if (ret)
534 return ret;
535 if (intel_hdcp_is_ksv_valid(bksv.shim))
536 break;
537 }
538 if (i == tries) {
539 DRM_ERROR("HDCP failed, Bksv is invalid\n");
540 return -ENODEV;
541 }
542 577
543 I915_WRITE(PORT_HDCP_BKSVLO(port), bksv.reg[0]); 578 I915_WRITE(PORT_HDCP_BKSVLO(port), bksv.reg[0]);
544 I915_WRITE(PORT_HDCP_BKSVHI(port), bksv.reg[1]); 579 I915_WRITE(PORT_HDCP_BKSVHI(port), bksv.reg[1]);
@@ -594,8 +629,8 @@ static int intel_hdcp_auth(struct intel_digital_port *intel_dig_port,
594 } 629 }
595 630
596 if (i == tries) { 631 if (i == tries) {
597 DRM_ERROR("Timed out waiting for Ri prime match (%x)\n", 632 DRM_DEBUG_KMS("Timed out waiting for Ri prime match (%x)\n",
598 I915_READ(PORT_HDCP_STATUS(port))); 633 I915_READ(PORT_HDCP_STATUS(port)));
599 return -ETIMEDOUT; 634 return -ETIMEDOUT;
600 } 635 }
601 636
@@ -618,14 +653,9 @@ static int intel_hdcp_auth(struct intel_digital_port *intel_dig_port,
618 return 0; 653 return 0;
619} 654}
620 655
621static
622struct intel_digital_port *conn_to_dig_port(struct intel_connector *connector)
623{
624 return enc_to_dig_port(&intel_attached_encoder(&connector->base)->base);
625}
626
627static int _intel_hdcp_disable(struct intel_connector *connector) 656static int _intel_hdcp_disable(struct intel_connector *connector)
628{ 657{
658 struct intel_hdcp *hdcp = &connector->hdcp;
629 struct drm_i915_private *dev_priv = connector->base.dev->dev_private; 659 struct drm_i915_private *dev_priv = connector->base.dev->dev_private;
630 struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector); 660 struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
631 enum port port = intel_dig_port->base.port; 661 enum port port = intel_dig_port->base.port;
@@ -641,7 +671,7 @@ static int _intel_hdcp_disable(struct intel_connector *connector)
641 return -ETIMEDOUT; 671 return -ETIMEDOUT;
642 } 672 }
643 673
644 ret = connector->hdcp_shim->toggle_signalling(intel_dig_port, false); 674 ret = hdcp->shim->toggle_signalling(intel_dig_port, false);
645 if (ret) { 675 if (ret) {
646 DRM_ERROR("Failed to disable HDCP signalling\n"); 676 DRM_ERROR("Failed to disable HDCP signalling\n");
647 return ret; 677 return ret;
@@ -653,6 +683,7 @@ static int _intel_hdcp_disable(struct intel_connector *connector)
653 683
654static int _intel_hdcp_enable(struct intel_connector *connector) 684static int _intel_hdcp_enable(struct intel_connector *connector)
655{ 685{
686 struct intel_hdcp *hdcp = &connector->hdcp;
656 struct drm_i915_private *dev_priv = connector->base.dev->dev_private; 687 struct drm_i915_private *dev_priv = connector->base.dev->dev_private;
657 int i, ret, tries = 3; 688 int i, ret, tries = 3;
658 689
@@ -677,8 +708,7 @@ static int _intel_hdcp_enable(struct intel_connector *connector)
677 708
678 /* Incase of authentication failures, HDCP spec expects reauth. */ 709 /* Incase of authentication failures, HDCP spec expects reauth. */
679 for (i = 0; i < tries; i++) { 710 for (i = 0; i < tries; i++) {
680 ret = intel_hdcp_auth(conn_to_dig_port(connector), 711 ret = intel_hdcp_auth(conn_to_dig_port(connector), hdcp->shim);
681 connector->hdcp_shim);
682 if (!ret) 712 if (!ret)
683 return 0; 713 return 0;
684 714
@@ -688,42 +718,50 @@ static int _intel_hdcp_enable(struct intel_connector *connector)
688 _intel_hdcp_disable(connector); 718 _intel_hdcp_disable(connector);
689 } 719 }
690 720
691 DRM_ERROR("HDCP authentication failed (%d tries/%d)\n", tries, ret); 721 DRM_DEBUG_KMS("HDCP authentication failed (%d tries/%d)\n", tries, ret);
692 return ret; 722 return ret;
693} 723}
694 724
725static inline
726struct intel_connector *intel_hdcp_to_connector(struct intel_hdcp *hdcp)
727{
728 return container_of(hdcp, struct intel_connector, hdcp);
729}
730
695static void intel_hdcp_check_work(struct work_struct *work) 731static void intel_hdcp_check_work(struct work_struct *work)
696{ 732{
697 struct intel_connector *connector = container_of(to_delayed_work(work), 733 struct intel_hdcp *hdcp = container_of(to_delayed_work(work),
698 struct intel_connector, 734 struct intel_hdcp,
699 hdcp_check_work); 735 check_work);
736 struct intel_connector *connector = intel_hdcp_to_connector(hdcp);
737
700 if (!intel_hdcp_check_link(connector)) 738 if (!intel_hdcp_check_link(connector))
701 schedule_delayed_work(&connector->hdcp_check_work, 739 schedule_delayed_work(&hdcp->check_work,
702 DRM_HDCP_CHECK_PERIOD_MS); 740 DRM_HDCP_CHECK_PERIOD_MS);
703} 741}
704 742
705static void intel_hdcp_prop_work(struct work_struct *work) 743static void intel_hdcp_prop_work(struct work_struct *work)
706{ 744{
707 struct intel_connector *connector = container_of(work, 745 struct intel_hdcp *hdcp = container_of(work, struct intel_hdcp,
708 struct intel_connector, 746 prop_work);
709 hdcp_prop_work); 747 struct intel_connector *connector = intel_hdcp_to_connector(hdcp);
710 struct drm_device *dev = connector->base.dev; 748 struct drm_device *dev = connector->base.dev;
711 struct drm_connector_state *state; 749 struct drm_connector_state *state;
712 750
713 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); 751 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
714 mutex_lock(&connector->hdcp_mutex); 752 mutex_lock(&hdcp->mutex);
715 753
716 /* 754 /*
717 * This worker is only used to flip between ENABLED/DESIRED. Either of 755 * This worker is only used to flip between ENABLED/DESIRED. Either of
718 * those to UNDESIRED is handled by core. If hdcp_value == UNDESIRED, 756 * those to UNDESIRED is handled by core. If value == UNDESIRED,
719 * we're running just after hdcp has been disabled, so just exit 757 * we're running just after hdcp has been disabled, so just exit
720 */ 758 */
721 if (connector->hdcp_value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) { 759 if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
722 state = connector->base.state; 760 state = connector->base.state;
723 state->content_protection = connector->hdcp_value; 761 state->content_protection = hdcp->value;
724 } 762 }
725 763
726 mutex_unlock(&connector->hdcp_mutex); 764 mutex_unlock(&hdcp->mutex);
727 drm_modeset_unlock(&dev->mode_config.connection_mutex); 765 drm_modeset_unlock(&dev->mode_config.connection_mutex);
728} 766}
729 767
@@ -735,8 +773,9 @@ bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port)
735} 773}
736 774
737int intel_hdcp_init(struct intel_connector *connector, 775int intel_hdcp_init(struct intel_connector *connector,
738 const struct intel_hdcp_shim *hdcp_shim) 776 const struct intel_hdcp_shim *shim)
739{ 777{
778 struct intel_hdcp *hdcp = &connector->hdcp;
740 int ret; 779 int ret;
741 780
742 ret = drm_connector_attach_content_protection_property( 781 ret = drm_connector_attach_content_protection_property(
@@ -744,51 +783,53 @@ int intel_hdcp_init(struct intel_connector *connector,
744 if (ret) 783 if (ret)
745 return ret; 784 return ret;
746 785
747 connector->hdcp_shim = hdcp_shim; 786 hdcp->shim = shim;
748 mutex_init(&connector->hdcp_mutex); 787 mutex_init(&hdcp->mutex);
749 INIT_DELAYED_WORK(&connector->hdcp_check_work, intel_hdcp_check_work); 788 INIT_DELAYED_WORK(&hdcp->check_work, intel_hdcp_check_work);
750 INIT_WORK(&connector->hdcp_prop_work, intel_hdcp_prop_work); 789 INIT_WORK(&hdcp->prop_work, intel_hdcp_prop_work);
751 return 0; 790 return 0;
752} 791}
753 792
754int intel_hdcp_enable(struct intel_connector *connector) 793int intel_hdcp_enable(struct intel_connector *connector)
755{ 794{
795 struct intel_hdcp *hdcp = &connector->hdcp;
756 int ret; 796 int ret;
757 797
758 if (!connector->hdcp_shim) 798 if (!hdcp->shim)
759 return -ENOENT; 799 return -ENOENT;
760 800
761 mutex_lock(&connector->hdcp_mutex); 801 mutex_lock(&hdcp->mutex);
762 802
763 ret = _intel_hdcp_enable(connector); 803 ret = _intel_hdcp_enable(connector);
764 if (ret) 804 if (ret)
765 goto out; 805 goto out;
766 806
767 connector->hdcp_value = DRM_MODE_CONTENT_PROTECTION_ENABLED; 807 hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
768 schedule_work(&connector->hdcp_prop_work); 808 schedule_work(&hdcp->prop_work);
769 schedule_delayed_work(&connector->hdcp_check_work, 809 schedule_delayed_work(&hdcp->check_work,
770 DRM_HDCP_CHECK_PERIOD_MS); 810 DRM_HDCP_CHECK_PERIOD_MS);
771out: 811out:
772 mutex_unlock(&connector->hdcp_mutex); 812 mutex_unlock(&hdcp->mutex);
773 return ret; 813 return ret;
774} 814}
775 815
776int intel_hdcp_disable(struct intel_connector *connector) 816int intel_hdcp_disable(struct intel_connector *connector)
777{ 817{
818 struct intel_hdcp *hdcp = &connector->hdcp;
778 int ret = 0; 819 int ret = 0;
779 820
780 if (!connector->hdcp_shim) 821 if (!hdcp->shim)
781 return -ENOENT; 822 return -ENOENT;
782 823
783 mutex_lock(&connector->hdcp_mutex); 824 mutex_lock(&hdcp->mutex);
784 825
785 if (connector->hdcp_value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) { 826 if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
786 connector->hdcp_value = DRM_MODE_CONTENT_PROTECTION_UNDESIRED; 827 hdcp->value = DRM_MODE_CONTENT_PROTECTION_UNDESIRED;
787 ret = _intel_hdcp_disable(connector); 828 ret = _intel_hdcp_disable(connector);
788 } 829 }
789 830
790 mutex_unlock(&connector->hdcp_mutex); 831 mutex_unlock(&hdcp->mutex);
791 cancel_delayed_work_sync(&connector->hdcp_check_work); 832 cancel_delayed_work_sync(&hdcp->check_work);
792 return ret; 833 return ret;
793} 834}
794 835
@@ -828,17 +869,18 @@ void intel_hdcp_atomic_check(struct drm_connector *connector,
828/* Implements Part 3 of the HDCP authorization procedure */ 869/* Implements Part 3 of the HDCP authorization procedure */
829int intel_hdcp_check_link(struct intel_connector *connector) 870int intel_hdcp_check_link(struct intel_connector *connector)
830{ 871{
872 struct intel_hdcp *hdcp = &connector->hdcp;
831 struct drm_i915_private *dev_priv = connector->base.dev->dev_private; 873 struct drm_i915_private *dev_priv = connector->base.dev->dev_private;
832 struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector); 874 struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
833 enum port port = intel_dig_port->base.port; 875 enum port port = intel_dig_port->base.port;
834 int ret = 0; 876 int ret = 0;
835 877
836 if (!connector->hdcp_shim) 878 if (!hdcp->shim)
837 return -ENOENT; 879 return -ENOENT;
838 880
839 mutex_lock(&connector->hdcp_mutex); 881 mutex_lock(&hdcp->mutex);
840 882
841 if (connector->hdcp_value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED) 883 if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
842 goto out; 884 goto out;
843 885
844 if (!(I915_READ(PORT_HDCP_STATUS(port)) & HDCP_STATUS_ENC)) { 886 if (!(I915_READ(PORT_HDCP_STATUS(port)) & HDCP_STATUS_ENC)) {
@@ -846,17 +888,15 @@ int intel_hdcp_check_link(struct intel_connector *connector)
846 connector->base.name, connector->base.base.id, 888 connector->base.name, connector->base.base.id,
847 I915_READ(PORT_HDCP_STATUS(port))); 889 I915_READ(PORT_HDCP_STATUS(port)));
848 ret = -ENXIO; 890 ret = -ENXIO;
849 connector->hdcp_value = DRM_MODE_CONTENT_PROTECTION_DESIRED; 891 hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
850 schedule_work(&connector->hdcp_prop_work); 892 schedule_work(&hdcp->prop_work);
851 goto out; 893 goto out;
852 } 894 }
853 895
854 if (connector->hdcp_shim->check_link(intel_dig_port)) { 896 if (hdcp->shim->check_link(intel_dig_port)) {
855 if (connector->hdcp_value != 897 if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
856 DRM_MODE_CONTENT_PROTECTION_UNDESIRED) { 898 hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
857 connector->hdcp_value = 899 schedule_work(&hdcp->prop_work);
858 DRM_MODE_CONTENT_PROTECTION_ENABLED;
859 schedule_work(&connector->hdcp_prop_work);
860 } 900 }
861 goto out; 901 goto out;
862 } 902 }
@@ -867,20 +907,20 @@ int intel_hdcp_check_link(struct intel_connector *connector)
867 ret = _intel_hdcp_disable(connector); 907 ret = _intel_hdcp_disable(connector);
868 if (ret) { 908 if (ret) {
869 DRM_ERROR("Failed to disable hdcp (%d)\n", ret); 909 DRM_ERROR("Failed to disable hdcp (%d)\n", ret);
870 connector->hdcp_value = DRM_MODE_CONTENT_PROTECTION_DESIRED; 910 hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
871 schedule_work(&connector->hdcp_prop_work); 911 schedule_work(&hdcp->prop_work);
872 goto out; 912 goto out;
873 } 913 }
874 914
875 ret = _intel_hdcp_enable(connector); 915 ret = _intel_hdcp_enable(connector);
876 if (ret) { 916 if (ret) {
877 DRM_ERROR("Failed to enable hdcp (%d)\n", ret); 917 DRM_DEBUG_KMS("Failed to enable hdcp (%d)\n", ret);
878 connector->hdcp_value = DRM_MODE_CONTENT_PROTECTION_DESIRED; 918 hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
879 schedule_work(&connector->hdcp_prop_work); 919 schedule_work(&hdcp->prop_work);
880 goto out; 920 goto out;
881 } 921 }
882 922
883out: 923out:
884 mutex_unlock(&connector->hdcp_mutex); 924 mutex_unlock(&hdcp->mutex);
885 return ret; 925 return ret;
886} 926}
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index d7234e03fdb0..e2c6a2b3e8f2 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -148,14 +148,13 @@ hsw_dip_data_reg(struct drm_i915_private *dev_priv,
148 } 148 }
149} 149}
150 150
151static void g4x_write_infoframe(struct drm_encoder *encoder, 151static void g4x_write_infoframe(struct intel_encoder *encoder,
152 const struct intel_crtc_state *crtc_state, 152 const struct intel_crtc_state *crtc_state,
153 unsigned int type, 153 unsigned int type,
154 const void *frame, ssize_t len) 154 const void *frame, ssize_t len)
155{ 155{
156 const u32 *data = frame; 156 const u32 *data = frame;
157 struct drm_device *dev = encoder->dev; 157 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
158 struct drm_i915_private *dev_priv = to_i915(dev);
159 u32 val = I915_READ(VIDEO_DIP_CTL); 158 u32 val = I915_READ(VIDEO_DIP_CTL);
160 int i; 159 int i;
161 160
@@ -186,31 +185,29 @@ static void g4x_write_infoframe(struct drm_encoder *encoder,
186 POSTING_READ(VIDEO_DIP_CTL); 185 POSTING_READ(VIDEO_DIP_CTL);
187} 186}
188 187
189static bool g4x_infoframe_enabled(struct drm_encoder *encoder, 188static bool g4x_infoframe_enabled(struct intel_encoder *encoder,
190 const struct intel_crtc_state *pipe_config) 189 const struct intel_crtc_state *pipe_config)
191{ 190{
192 struct drm_i915_private *dev_priv = to_i915(encoder->dev); 191 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
193 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
194 u32 val = I915_READ(VIDEO_DIP_CTL); 192 u32 val = I915_READ(VIDEO_DIP_CTL);
195 193
196 if ((val & VIDEO_DIP_ENABLE) == 0) 194 if ((val & VIDEO_DIP_ENABLE) == 0)
197 return false; 195 return false;
198 196
199 if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(intel_dig_port->base.port)) 197 if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(encoder->port))
200 return false; 198 return false;
201 199
202 return val & (VIDEO_DIP_ENABLE_AVI | 200 return val & (VIDEO_DIP_ENABLE_AVI |
203 VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_SPD); 201 VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_SPD);
204} 202}
205 203
206static void ibx_write_infoframe(struct drm_encoder *encoder, 204static void ibx_write_infoframe(struct intel_encoder *encoder,
207 const struct intel_crtc_state *crtc_state, 205 const struct intel_crtc_state *crtc_state,
208 unsigned int type, 206 unsigned int type,
209 const void *frame, ssize_t len) 207 const void *frame, ssize_t len)
210{ 208{
211 const u32 *data = frame; 209 const u32 *data = frame;
212 struct drm_device *dev = encoder->dev; 210 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
213 struct drm_i915_private *dev_priv = to_i915(dev);
214 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); 211 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
215 i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe); 212 i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
216 u32 val = I915_READ(reg); 213 u32 val = I915_READ(reg);
@@ -243,11 +240,10 @@ static void ibx_write_infoframe(struct drm_encoder *encoder,
243 POSTING_READ(reg); 240 POSTING_READ(reg);
244} 241}
245 242
246static bool ibx_infoframe_enabled(struct drm_encoder *encoder, 243static bool ibx_infoframe_enabled(struct intel_encoder *encoder,
247 const struct intel_crtc_state *pipe_config) 244 const struct intel_crtc_state *pipe_config)
248{ 245{
249 struct drm_i915_private *dev_priv = to_i915(encoder->dev); 246 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
250 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
251 enum pipe pipe = to_intel_crtc(pipe_config->base.crtc)->pipe; 247 enum pipe pipe = to_intel_crtc(pipe_config->base.crtc)->pipe;
252 i915_reg_t reg = TVIDEO_DIP_CTL(pipe); 248 i915_reg_t reg = TVIDEO_DIP_CTL(pipe);
253 u32 val = I915_READ(reg); 249 u32 val = I915_READ(reg);
@@ -255,7 +251,7 @@ static bool ibx_infoframe_enabled(struct drm_encoder *encoder,
255 if ((val & VIDEO_DIP_ENABLE) == 0) 251 if ((val & VIDEO_DIP_ENABLE) == 0)
256 return false; 252 return false;
257 253
258 if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(intel_dig_port->base.port)) 254 if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(encoder->port))
259 return false; 255 return false;
260 256
261 return val & (VIDEO_DIP_ENABLE_AVI | 257 return val & (VIDEO_DIP_ENABLE_AVI |
@@ -263,14 +259,13 @@ static bool ibx_infoframe_enabled(struct drm_encoder *encoder,
263 VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP); 259 VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
264} 260}
265 261
266static void cpt_write_infoframe(struct drm_encoder *encoder, 262static void cpt_write_infoframe(struct intel_encoder *encoder,
267 const struct intel_crtc_state *crtc_state, 263 const struct intel_crtc_state *crtc_state,
268 unsigned int type, 264 unsigned int type,
269 const void *frame, ssize_t len) 265 const void *frame, ssize_t len)
270{ 266{
271 const u32 *data = frame; 267 const u32 *data = frame;
272 struct drm_device *dev = encoder->dev; 268 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
273 struct drm_i915_private *dev_priv = to_i915(dev);
274 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); 269 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
275 i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe); 270 i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
276 u32 val = I915_READ(reg); 271 u32 val = I915_READ(reg);
@@ -306,10 +301,10 @@ static void cpt_write_infoframe(struct drm_encoder *encoder,
306 POSTING_READ(reg); 301 POSTING_READ(reg);
307} 302}
308 303
309static bool cpt_infoframe_enabled(struct drm_encoder *encoder, 304static bool cpt_infoframe_enabled(struct intel_encoder *encoder,
310 const struct intel_crtc_state *pipe_config) 305 const struct intel_crtc_state *pipe_config)
311{ 306{
312 struct drm_i915_private *dev_priv = to_i915(encoder->dev); 307 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
313 enum pipe pipe = to_intel_crtc(pipe_config->base.crtc)->pipe; 308 enum pipe pipe = to_intel_crtc(pipe_config->base.crtc)->pipe;
314 u32 val = I915_READ(TVIDEO_DIP_CTL(pipe)); 309 u32 val = I915_READ(TVIDEO_DIP_CTL(pipe));
315 310
@@ -321,14 +316,13 @@ static bool cpt_infoframe_enabled(struct drm_encoder *encoder,
321 VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP); 316 VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
322} 317}
323 318
324static void vlv_write_infoframe(struct drm_encoder *encoder, 319static void vlv_write_infoframe(struct intel_encoder *encoder,
325 const struct intel_crtc_state *crtc_state, 320 const struct intel_crtc_state *crtc_state,
326 unsigned int type, 321 unsigned int type,
327 const void *frame, ssize_t len) 322 const void *frame, ssize_t len)
328{ 323{
329 const u32 *data = frame; 324 const u32 *data = frame;
330 struct drm_device *dev = encoder->dev; 325 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
331 struct drm_i915_private *dev_priv = to_i915(dev);
332 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); 326 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
333 i915_reg_t reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe); 327 i915_reg_t reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
334 u32 val = I915_READ(reg); 328 u32 val = I915_READ(reg);
@@ -361,18 +355,17 @@ static void vlv_write_infoframe(struct drm_encoder *encoder,
361 POSTING_READ(reg); 355 POSTING_READ(reg);
362} 356}
363 357
364static bool vlv_infoframe_enabled(struct drm_encoder *encoder, 358static bool vlv_infoframe_enabled(struct intel_encoder *encoder,
365 const struct intel_crtc_state *pipe_config) 359 const struct intel_crtc_state *pipe_config)
366{ 360{
367 struct drm_i915_private *dev_priv = to_i915(encoder->dev); 361 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
368 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
369 enum pipe pipe = to_intel_crtc(pipe_config->base.crtc)->pipe; 362 enum pipe pipe = to_intel_crtc(pipe_config->base.crtc)->pipe;
370 u32 val = I915_READ(VLV_TVIDEO_DIP_CTL(pipe)); 363 u32 val = I915_READ(VLV_TVIDEO_DIP_CTL(pipe));
371 364
372 if ((val & VIDEO_DIP_ENABLE) == 0) 365 if ((val & VIDEO_DIP_ENABLE) == 0)
373 return false; 366 return false;
374 367
375 if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(intel_dig_port->base.port)) 368 if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(encoder->port))
376 return false; 369 return false;
377 370
378 return val & (VIDEO_DIP_ENABLE_AVI | 371 return val & (VIDEO_DIP_ENABLE_AVI |
@@ -380,14 +373,13 @@ static bool vlv_infoframe_enabled(struct drm_encoder *encoder,
380 VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP); 373 VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
381} 374}
382 375
383static void hsw_write_infoframe(struct drm_encoder *encoder, 376static void hsw_write_infoframe(struct intel_encoder *encoder,
384 const struct intel_crtc_state *crtc_state, 377 const struct intel_crtc_state *crtc_state,
385 unsigned int type, 378 unsigned int type,
386 const void *frame, ssize_t len) 379 const void *frame, ssize_t len)
387{ 380{
388 const u32 *data = frame; 381 const u32 *data = frame;
389 struct drm_device *dev = encoder->dev; 382 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
390 struct drm_i915_private *dev_priv = to_i915(dev);
391 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 383 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
392 i915_reg_t ctl_reg = HSW_TVIDEO_DIP_CTL(cpu_transcoder); 384 i915_reg_t ctl_reg = HSW_TVIDEO_DIP_CTL(cpu_transcoder);
393 int data_size = type == DP_SDP_VSC ? 385 int data_size = type == DP_SDP_VSC ?
@@ -415,10 +407,10 @@ static void hsw_write_infoframe(struct drm_encoder *encoder,
415 POSTING_READ(ctl_reg); 407 POSTING_READ(ctl_reg);
416} 408}
417 409
418static bool hsw_infoframe_enabled(struct drm_encoder *encoder, 410static bool hsw_infoframe_enabled(struct intel_encoder *encoder,
419 const struct intel_crtc_state *pipe_config) 411 const struct intel_crtc_state *pipe_config)
420{ 412{
421 struct drm_i915_private *dev_priv = to_i915(encoder->dev); 413 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
422 u32 val = I915_READ(HSW_TVIDEO_DIP_CTL(pipe_config->cpu_transcoder)); 414 u32 val = I915_READ(HSW_TVIDEO_DIP_CTL(pipe_config->cpu_transcoder));
423 415
424 return val & (VIDEO_DIP_ENABLE_VSC_HSW | VIDEO_DIP_ENABLE_AVI_HSW | 416 return val & (VIDEO_DIP_ENABLE_VSC_HSW | VIDEO_DIP_ENABLE_AVI_HSW |
@@ -443,11 +435,11 @@ static bool hsw_infoframe_enabled(struct drm_encoder *encoder,
443 * trick them by giving an offset into the buffer and moving back the header 435 * trick them by giving an offset into the buffer and moving back the header
444 * bytes by one. 436 * bytes by one.
445 */ 437 */
446static void intel_write_infoframe(struct drm_encoder *encoder, 438static void intel_write_infoframe(struct intel_encoder *encoder,
447 const struct intel_crtc_state *crtc_state, 439 const struct intel_crtc_state *crtc_state,
448 union hdmi_infoframe *frame) 440 union hdmi_infoframe *frame)
449{ 441{
450 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); 442 struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base);
451 u8 buffer[VIDEO_DIP_DATA_SIZE]; 443 u8 buffer[VIDEO_DIP_DATA_SIZE];
452 ssize_t len; 444 ssize_t len;
453 445
@@ -457,20 +449,20 @@ static void intel_write_infoframe(struct drm_encoder *encoder,
457 return; 449 return;
458 450
459 /* Insert the 'hole' (see big comment above) at position 3 */ 451 /* Insert the 'hole' (see big comment above) at position 3 */
460 buffer[0] = buffer[1]; 452 memmove(&buffer[0], &buffer[1], 3);
461 buffer[1] = buffer[2];
462 buffer[2] = buffer[3];
463 buffer[3] = 0; 453 buffer[3] = 0;
464 len++; 454 len++;
465 455
466 intel_dig_port->write_infoframe(encoder, crtc_state, frame->any.type, buffer, len); 456 intel_dig_port->write_infoframe(encoder,
457 crtc_state,
458 frame->any.type, buffer, len);
467} 459}
468 460
469static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder, 461static void intel_hdmi_set_avi_infoframe(struct intel_encoder *encoder,
470 const struct intel_crtc_state *crtc_state, 462 const struct intel_crtc_state *crtc_state,
471 const struct drm_connector_state *conn_state) 463 const struct drm_connector_state *conn_state)
472{ 464{
473 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); 465 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
474 const struct drm_display_mode *adjusted_mode = 466 const struct drm_display_mode *adjusted_mode =
475 &crtc_state->base.adjusted_mode; 467 &crtc_state->base.adjusted_mode;
476 struct drm_connector *connector = &intel_hdmi->attached_connector->base; 468 struct drm_connector *connector = &intel_hdmi->attached_connector->base;
@@ -487,8 +479,10 @@ static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
487 return; 479 return;
488 } 480 }
489 481
490 if (crtc_state->ycbcr420) 482 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
491 frame.avi.colorspace = HDMI_COLORSPACE_YUV420; 483 frame.avi.colorspace = HDMI_COLORSPACE_YUV420;
484 else if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444)
485 frame.avi.colorspace = HDMI_COLORSPACE_YUV444;
492 else 486 else
493 frame.avi.colorspace = HDMI_COLORSPACE_RGB; 487 frame.avi.colorspace = HDMI_COLORSPACE_RGB;
494 488
@@ -503,10 +497,11 @@ static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
503 conn_state); 497 conn_state);
504 498
505 /* TODO: handle pixel repetition for YCBCR420 outputs */ 499 /* TODO: handle pixel repetition for YCBCR420 outputs */
506 intel_write_infoframe(encoder, crtc_state, &frame); 500 intel_write_infoframe(encoder, crtc_state,
501 &frame);
507} 502}
508 503
509static void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder, 504static void intel_hdmi_set_spd_infoframe(struct intel_encoder *encoder,
510 const struct intel_crtc_state *crtc_state) 505 const struct intel_crtc_state *crtc_state)
511{ 506{
512 union hdmi_infoframe frame; 507 union hdmi_infoframe frame;
@@ -520,11 +515,12 @@ static void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder,
520 515
521 frame.spd.sdi = HDMI_SPD_SDI_PC; 516 frame.spd.sdi = HDMI_SPD_SDI_PC;
522 517
523 intel_write_infoframe(encoder, crtc_state, &frame); 518 intel_write_infoframe(encoder, crtc_state,
519 &frame);
524} 520}
525 521
526static void 522static void
527intel_hdmi_set_hdmi_infoframe(struct drm_encoder *encoder, 523intel_hdmi_set_hdmi_infoframe(struct intel_encoder *encoder,
528 const struct intel_crtc_state *crtc_state, 524 const struct intel_crtc_state *crtc_state,
529 const struct drm_connector_state *conn_state) 525 const struct drm_connector_state *conn_state)
530{ 526{
@@ -537,20 +533,21 @@ intel_hdmi_set_hdmi_infoframe(struct drm_encoder *encoder,
537 if (ret < 0) 533 if (ret < 0)
538 return; 534 return;
539 535
540 intel_write_infoframe(encoder, crtc_state, &frame); 536 intel_write_infoframe(encoder, crtc_state,
537 &frame);
541} 538}
542 539
543static void g4x_set_infoframes(struct drm_encoder *encoder, 540static void g4x_set_infoframes(struct intel_encoder *encoder,
544 bool enable, 541 bool enable,
545 const struct intel_crtc_state *crtc_state, 542 const struct intel_crtc_state *crtc_state,
546 const struct drm_connector_state *conn_state) 543 const struct drm_connector_state *conn_state)
547{ 544{
548 struct drm_i915_private *dev_priv = to_i915(encoder->dev); 545 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
549 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); 546 struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base);
550 struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi; 547 struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
551 i915_reg_t reg = VIDEO_DIP_CTL; 548 i915_reg_t reg = VIDEO_DIP_CTL;
552 u32 val = I915_READ(reg); 549 u32 val = I915_READ(reg);
553 u32 port = VIDEO_DIP_PORT(intel_dig_port->base.port); 550 u32 port = VIDEO_DIP_PORT(encoder->port);
554 551
555 assert_hdmi_port_disabled(intel_hdmi); 552 assert_hdmi_port_disabled(intel_hdmi);
556 553
@@ -658,11 +655,11 @@ static bool gcp_default_phase_possible(int pipe_bpp,
658 mode->crtc_htotal/2 % pixels_per_group == 0); 655 mode->crtc_htotal/2 % pixels_per_group == 0);
659} 656}
660 657
661static bool intel_hdmi_set_gcp_infoframe(struct drm_encoder *encoder, 658static bool intel_hdmi_set_gcp_infoframe(struct intel_encoder *encoder,
662 const struct intel_crtc_state *crtc_state, 659 const struct intel_crtc_state *crtc_state,
663 const struct drm_connector_state *conn_state) 660 const struct drm_connector_state *conn_state)
664{ 661{
665 struct drm_i915_private *dev_priv = to_i915(encoder->dev); 662 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
666 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 663 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
667 i915_reg_t reg; 664 i915_reg_t reg;
668 u32 val = 0; 665 u32 val = 0;
@@ -690,18 +687,18 @@ static bool intel_hdmi_set_gcp_infoframe(struct drm_encoder *encoder,
690 return val != 0; 687 return val != 0;
691} 688}
692 689
693static void ibx_set_infoframes(struct drm_encoder *encoder, 690static void ibx_set_infoframes(struct intel_encoder *encoder,
694 bool enable, 691 bool enable,
695 const struct intel_crtc_state *crtc_state, 692 const struct intel_crtc_state *crtc_state,
696 const struct drm_connector_state *conn_state) 693 const struct drm_connector_state *conn_state)
697{ 694{
698 struct drm_i915_private *dev_priv = to_i915(encoder->dev); 695 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
699 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); 696 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
700 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); 697 struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base);
701 struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi; 698 struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
702 i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe); 699 i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
703 u32 val = I915_READ(reg); 700 u32 val = I915_READ(reg);
704 u32 port = VIDEO_DIP_PORT(intel_dig_port->base.port); 701 u32 port = VIDEO_DIP_PORT(encoder->port);
705 702
706 assert_hdmi_port_disabled(intel_hdmi); 703 assert_hdmi_port_disabled(intel_hdmi);
707 704
@@ -743,14 +740,14 @@ static void ibx_set_infoframes(struct drm_encoder *encoder,
743 intel_hdmi_set_hdmi_infoframe(encoder, crtc_state, conn_state); 740 intel_hdmi_set_hdmi_infoframe(encoder, crtc_state, conn_state);
744} 741}
745 742
746static void cpt_set_infoframes(struct drm_encoder *encoder, 743static void cpt_set_infoframes(struct intel_encoder *encoder,
747 bool enable, 744 bool enable,
748 const struct intel_crtc_state *crtc_state, 745 const struct intel_crtc_state *crtc_state,
749 const struct drm_connector_state *conn_state) 746 const struct drm_connector_state *conn_state)
750{ 747{
751 struct drm_i915_private *dev_priv = to_i915(encoder->dev); 748 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
752 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); 749 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
753 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); 750 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
754 i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe); 751 i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
755 u32 val = I915_READ(reg); 752 u32 val = I915_READ(reg);
756 753
@@ -786,18 +783,17 @@ static void cpt_set_infoframes(struct drm_encoder *encoder,
786 intel_hdmi_set_hdmi_infoframe(encoder, crtc_state, conn_state); 783 intel_hdmi_set_hdmi_infoframe(encoder, crtc_state, conn_state);
787} 784}
788 785
789static void vlv_set_infoframes(struct drm_encoder *encoder, 786static void vlv_set_infoframes(struct intel_encoder *encoder,
790 bool enable, 787 bool enable,
791 const struct intel_crtc_state *crtc_state, 788 const struct intel_crtc_state *crtc_state,
792 const struct drm_connector_state *conn_state) 789 const struct drm_connector_state *conn_state)
793{ 790{
794 struct drm_i915_private *dev_priv = to_i915(encoder->dev); 791 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
795 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
796 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); 792 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
797 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); 793 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
798 i915_reg_t reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe); 794 i915_reg_t reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
799 u32 val = I915_READ(reg); 795 u32 val = I915_READ(reg);
800 u32 port = VIDEO_DIP_PORT(intel_dig_port->base.port); 796 u32 port = VIDEO_DIP_PORT(encoder->port);
801 797
802 assert_hdmi_port_disabled(intel_hdmi); 798 assert_hdmi_port_disabled(intel_hdmi);
803 799
@@ -839,12 +835,12 @@ static void vlv_set_infoframes(struct drm_encoder *encoder,
839 intel_hdmi_set_hdmi_infoframe(encoder, crtc_state, conn_state); 835 intel_hdmi_set_hdmi_infoframe(encoder, crtc_state, conn_state);
840} 836}
841 837
842static void hsw_set_infoframes(struct drm_encoder *encoder, 838static void hsw_set_infoframes(struct intel_encoder *encoder,
843 bool enable, 839 bool enable,
844 const struct intel_crtc_state *crtc_state, 840 const struct intel_crtc_state *crtc_state,
845 const struct drm_connector_state *conn_state) 841 const struct drm_connector_state *conn_state)
846{ 842{
847 struct drm_i915_private *dev_priv = to_i915(encoder->dev); 843 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
848 i915_reg_t reg = HSW_TVIDEO_DIP_CTL(crtc_state->cpu_transcoder); 844 i915_reg_t reg = HSW_TVIDEO_DIP_CTL(crtc_state->cpu_transcoder);
849 u32 val = I915_READ(reg); 845 u32 val = I915_READ(reg);
850 846
@@ -966,13 +962,13 @@ int intel_hdmi_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port,
966 ret = intel_hdmi_hdcp_write(intel_dig_port, DRM_HDCP_DDC_AN, an, 962 ret = intel_hdmi_hdcp_write(intel_dig_port, DRM_HDCP_DDC_AN, an,
967 DRM_HDCP_AN_LEN); 963 DRM_HDCP_AN_LEN);
968 if (ret) { 964 if (ret) {
969 DRM_ERROR("Write An over DDC failed (%d)\n", ret); 965 DRM_DEBUG_KMS("Write An over DDC failed (%d)\n", ret);
970 return ret; 966 return ret;
971 } 967 }
972 968
973 ret = intel_gmbus_output_aksv(adapter); 969 ret = intel_gmbus_output_aksv(adapter);
974 if (ret < 0) { 970 if (ret < 0) {
975 DRM_ERROR("Failed to output aksv (%d)\n", ret); 971 DRM_DEBUG_KMS("Failed to output aksv (%d)\n", ret);
976 return ret; 972 return ret;
977 } 973 }
978 return 0; 974 return 0;
@@ -985,7 +981,7 @@ static int intel_hdmi_hdcp_read_bksv(struct intel_digital_port *intel_dig_port,
985 ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_BKSV, bksv, 981 ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_BKSV, bksv,
986 DRM_HDCP_KSV_LEN); 982 DRM_HDCP_KSV_LEN);
987 if (ret) 983 if (ret)
988 DRM_ERROR("Read Bksv over DDC failed (%d)\n", ret); 984 DRM_DEBUG_KMS("Read Bksv over DDC failed (%d)\n", ret);
989 return ret; 985 return ret;
990} 986}
991 987
@@ -997,7 +993,7 @@ int intel_hdmi_hdcp_read_bstatus(struct intel_digital_port *intel_dig_port,
997 ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_BSTATUS, 993 ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_BSTATUS,
998 bstatus, DRM_HDCP_BSTATUS_LEN); 994 bstatus, DRM_HDCP_BSTATUS_LEN);
999 if (ret) 995 if (ret)
1000 DRM_ERROR("Read bstatus over DDC failed (%d)\n", ret); 996 DRM_DEBUG_KMS("Read bstatus over DDC failed (%d)\n", ret);
1001 return ret; 997 return ret;
1002} 998}
1003 999
@@ -1010,7 +1006,7 @@ int intel_hdmi_hdcp_repeater_present(struct intel_digital_port *intel_dig_port,
1010 1006
1011 ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_BCAPS, &val, 1); 1007 ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_BCAPS, &val, 1);
1012 if (ret) { 1008 if (ret) {
1013 DRM_ERROR("Read bcaps over DDC failed (%d)\n", ret); 1009 DRM_DEBUG_KMS("Read bcaps over DDC failed (%d)\n", ret);
1014 return ret; 1010 return ret;
1015 } 1011 }
1016 *repeater_present = val & DRM_HDCP_DDC_BCAPS_REPEATER_PRESENT; 1012 *repeater_present = val & DRM_HDCP_DDC_BCAPS_REPEATER_PRESENT;
@@ -1025,7 +1021,7 @@ int intel_hdmi_hdcp_read_ri_prime(struct intel_digital_port *intel_dig_port,
1025 ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_RI_PRIME, 1021 ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_RI_PRIME,
1026 ri_prime, DRM_HDCP_RI_LEN); 1022 ri_prime, DRM_HDCP_RI_LEN);
1027 if (ret) 1023 if (ret)
1028 DRM_ERROR("Read Ri' over DDC failed (%d)\n", ret); 1024 DRM_DEBUG_KMS("Read Ri' over DDC failed (%d)\n", ret);
1029 return ret; 1025 return ret;
1030} 1026}
1031 1027
@@ -1038,7 +1034,7 @@ int intel_hdmi_hdcp_read_ksv_ready(struct intel_digital_port *intel_dig_port,
1038 1034
1039 ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_BCAPS, &val, 1); 1035 ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_BCAPS, &val, 1);
1040 if (ret) { 1036 if (ret) {
1041 DRM_ERROR("Read bcaps over DDC failed (%d)\n", ret); 1037 DRM_DEBUG_KMS("Read bcaps over DDC failed (%d)\n", ret);
1042 return ret; 1038 return ret;
1043 } 1039 }
1044 *ksv_ready = val & DRM_HDCP_DDC_BCAPS_KSV_FIFO_READY; 1040 *ksv_ready = val & DRM_HDCP_DDC_BCAPS_KSV_FIFO_READY;
@@ -1053,7 +1049,7 @@ int intel_hdmi_hdcp_read_ksv_fifo(struct intel_digital_port *intel_dig_port,
1053 ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_KSV_FIFO, 1049 ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_KSV_FIFO,
1054 ksv_fifo, num_downstream * DRM_HDCP_KSV_LEN); 1050 ksv_fifo, num_downstream * DRM_HDCP_KSV_LEN);
1055 if (ret) { 1051 if (ret) {
1056 DRM_ERROR("Read ksv fifo over DDC failed (%d)\n", ret); 1052 DRM_DEBUG_KMS("Read ksv fifo over DDC failed (%d)\n", ret);
1057 return ret; 1053 return ret;
1058 } 1054 }
1059 return 0; 1055 return 0;
@@ -1071,7 +1067,7 @@ int intel_hdmi_hdcp_read_v_prime_part(struct intel_digital_port *intel_dig_port,
1071 ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_V_PRIME(i), 1067 ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_V_PRIME(i),
1072 part, DRM_HDCP_V_PRIME_PART_LEN); 1068 part, DRM_HDCP_V_PRIME_PART_LEN);
1073 if (ret) 1069 if (ret)
1074 DRM_ERROR("Read V'[%d] over DDC failed (%d)\n", i, ret); 1070 DRM_DEBUG_KMS("Read V'[%d] over DDC failed (%d)\n", i, ret);
1075 return ret; 1071 return ret;
1076} 1072}
1077 1073
@@ -1218,7 +1214,7 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
1218 if (tmp & HDMI_MODE_SELECT_HDMI) 1214 if (tmp & HDMI_MODE_SELECT_HDMI)
1219 pipe_config->has_hdmi_sink = true; 1215 pipe_config->has_hdmi_sink = true;
1220 1216
1221 if (intel_dig_port->infoframe_enabled(&encoder->base, pipe_config)) 1217 if (intel_dig_port->infoframe_enabled(encoder, pipe_config))
1222 pipe_config->has_infoframe = true; 1218 pipe_config->has_infoframe = true;
1223 1219
1224 if (tmp & SDVO_AUDIO_ENABLE) 1220 if (tmp & SDVO_AUDIO_ENABLE)
@@ -1439,7 +1435,8 @@ static void intel_disable_hdmi(struct intel_encoder *encoder,
1439 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true); 1435 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
1440 } 1436 }
1441 1437
1442 intel_dig_port->set_infoframes(&encoder->base, false, 1438 intel_dig_port->set_infoframes(encoder,
1439 false,
1443 old_crtc_state, old_conn_state); 1440 old_crtc_state, old_conn_state);
1444 1441
1445 intel_dp_dual_mode_set_tmds_output(intel_hdmi, false); 1442 intel_dp_dual_mode_set_tmds_output(intel_hdmi, false);
@@ -1598,6 +1595,8 @@ static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state,
1598 struct drm_atomic_state *state = crtc_state->base.state; 1595 struct drm_atomic_state *state = crtc_state->base.state;
1599 struct drm_connector_state *connector_state; 1596 struct drm_connector_state *connector_state;
1600 struct drm_connector *connector; 1597 struct drm_connector *connector;
1598 const struct drm_display_mode *adjusted_mode =
1599 &crtc_state->base.adjusted_mode;
1601 int i; 1600 int i;
1602 1601
1603 if (HAS_GMCH_DISPLAY(dev_priv)) 1602 if (HAS_GMCH_DISPLAY(dev_priv))
@@ -1625,7 +1624,7 @@ static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state,
1625 if (connector_state->crtc != crtc_state->base.crtc) 1624 if (connector_state->crtc != crtc_state->base.crtc)
1626 continue; 1625 continue;
1627 1626
1628 if (crtc_state->ycbcr420) { 1627 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) {
1629 const struct drm_hdmi_info *hdmi = &info->hdmi; 1628 const struct drm_hdmi_info *hdmi = &info->hdmi;
1630 1629
1631 if (bpc == 12 && !(hdmi->y420_dc_modes & 1630 if (bpc == 12 && !(hdmi->y420_dc_modes &
@@ -1646,7 +1645,14 @@ static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state,
1646 1645
1647 /* Display WA #1139: glk */ 1646 /* Display WA #1139: glk */
1648 if (bpc == 12 && IS_GLK_REVID(dev_priv, 0, GLK_REVID_A1) && 1647 if (bpc == 12 && IS_GLK_REVID(dev_priv, 0, GLK_REVID_A1) &&
1649 crtc_state->base.adjusted_mode.htotal > 5460) 1648 adjusted_mode->htotal > 5460)
1649 return false;
1650
1651 /* Display Wa_1405510057:icl */
1652 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 &&
1653 bpc == 10 && IS_ICELAKE(dev_priv) &&
1654 (adjusted_mode->crtc_hblank_end -
1655 adjusted_mode->crtc_hblank_start) % 8 == 2)
1650 return false; 1656 return false;
1651 1657
1652 return true; 1658 return true;
@@ -1670,7 +1676,7 @@ intel_hdmi_ycbcr420_config(struct drm_connector *connector,
1670 *clock_12bpc /= 2; 1676 *clock_12bpc /= 2;
1671 *clock_10bpc /= 2; 1677 *clock_10bpc /= 2;
1672 *clock_8bpc /= 2; 1678 *clock_8bpc /= 2;
1673 config->ycbcr420 = true; 1679 config->output_format = INTEL_OUTPUT_FORMAT_YCBCR420;
1674 1680
1675 /* YCBCR 420 output conversion needs a scaler */ 1681 /* YCBCR 420 output conversion needs a scaler */
1676 if (skl_update_scaler_crtc(config)) { 1682 if (skl_update_scaler_crtc(config)) {
@@ -1704,6 +1710,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
1704 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) 1710 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
1705 return false; 1711 return false;
1706 1712
1713 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
1707 pipe_config->has_hdmi_sink = !force_dvi && intel_hdmi->has_hdmi_sink; 1714 pipe_config->has_hdmi_sink = !force_dvi && intel_hdmi->has_hdmi_sink;
1708 1715
1709 if (pipe_config->has_hdmi_sink) 1716 if (pipe_config->has_hdmi_sink)
@@ -1974,7 +1981,7 @@ static void intel_hdmi_pre_enable(struct intel_encoder *encoder,
1974 1981
1975 intel_hdmi_prepare(encoder, pipe_config); 1982 intel_hdmi_prepare(encoder, pipe_config);
1976 1983
1977 intel_dig_port->set_infoframes(&encoder->base, 1984 intel_dig_port->set_infoframes(encoder,
1978 pipe_config->has_infoframe, 1985 pipe_config->has_infoframe,
1979 pipe_config, conn_state); 1986 pipe_config, conn_state);
1980} 1987}
@@ -1992,7 +1999,7 @@ static void vlv_hdmi_pre_enable(struct intel_encoder *encoder,
1992 vlv_set_phy_signal_level(encoder, 0x2b245f5f, 0x00002000, 0x5578b83a, 1999 vlv_set_phy_signal_level(encoder, 0x2b245f5f, 0x00002000, 0x5578b83a,
1993 0x2b247878); 2000 0x2b247878);
1994 2001
1995 dport->set_infoframes(&encoder->base, 2002 dport->set_infoframes(encoder,
1996 pipe_config->has_infoframe, 2003 pipe_config->has_infoframe,
1997 pipe_config, conn_state); 2004 pipe_config, conn_state);
1998 2005
@@ -2063,7 +2070,7 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder,
2063 /* Use 800mV-0dB */ 2070 /* Use 800mV-0dB */
2064 chv_set_phy_signal_level(encoder, 128, 102, false); 2071 chv_set_phy_signal_level(encoder, 128, 102, false);
2065 2072
2066 dport->set_infoframes(&encoder->base, 2073 dport->set_infoframes(encoder,
2067 pipe_config->has_infoframe, 2074 pipe_config->has_infoframe,
2068 pipe_config, conn_state); 2075 pipe_config, conn_state);
2069 2076
@@ -2075,13 +2082,26 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder,
2075 chv_phy_release_cl2_override(encoder); 2082 chv_phy_release_cl2_override(encoder);
2076} 2083}
2077 2084
2085static int
2086intel_hdmi_connector_register(struct drm_connector *connector)
2087{
2088 int ret;
2089
2090 ret = intel_connector_register(connector);
2091 if (ret)
2092 return ret;
2093
2094 i915_debugfs_connector_add(connector);
2095
2096 return ret;
2097}
2098
2078static void intel_hdmi_destroy(struct drm_connector *connector) 2099static void intel_hdmi_destroy(struct drm_connector *connector)
2079{ 2100{
2080 if (intel_attached_hdmi(connector)->cec_notifier) 2101 if (intel_attached_hdmi(connector)->cec_notifier)
2081 cec_notifier_put(intel_attached_hdmi(connector)->cec_notifier); 2102 cec_notifier_put(intel_attached_hdmi(connector)->cec_notifier);
2082 kfree(to_intel_connector(connector)->detect_edid); 2103
2083 drm_connector_cleanup(connector); 2104 intel_connector_destroy(connector);
2084 kfree(connector);
2085} 2105}
2086 2106
2087static const struct drm_connector_funcs intel_hdmi_connector_funcs = { 2107static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
@@ -2090,7 +2110,7 @@ static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
2090 .fill_modes = drm_helper_probe_single_connector_modes, 2110 .fill_modes = drm_helper_probe_single_connector_modes,
2091 .atomic_get_property = intel_digital_connector_atomic_get_property, 2111 .atomic_get_property = intel_digital_connector_atomic_get_property,
2092 .atomic_set_property = intel_digital_connector_atomic_set_property, 2112 .atomic_set_property = intel_digital_connector_atomic_set_property,
2093 .late_register = intel_connector_register, 2113 .late_register = intel_hdmi_connector_register,
2094 .early_unregister = intel_connector_unregister, 2114 .early_unregister = intel_connector_unregister,
2095 .destroy = intel_hdmi_destroy, 2115 .destroy = intel_hdmi_destroy,
2096 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 2116 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
@@ -2110,11 +2130,16 @@ static const struct drm_encoder_funcs intel_hdmi_enc_funcs = {
2110static void 2130static void
2111intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *connector) 2131intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *connector)
2112{ 2132{
2133 struct drm_i915_private *dev_priv = to_i915(connector->dev);
2134
2113 intel_attach_force_audio_property(connector); 2135 intel_attach_force_audio_property(connector);
2114 intel_attach_broadcast_rgb_property(connector); 2136 intel_attach_broadcast_rgb_property(connector);
2115 intel_attach_aspect_ratio_property(connector); 2137 intel_attach_aspect_ratio_property(connector);
2116 drm_connector_attach_content_type_property(connector); 2138 drm_connector_attach_content_type_property(connector);
2117 connector->state->picture_aspect_ratio = HDMI_PICTURE_ASPECT_NONE; 2139 connector->state->picture_aspect_ratio = HDMI_PICTURE_ASPECT_NONE;
2140
2141 if (!HAS_GMCH_DISPLAY(dev_priv))
2142 drm_connector_attach_max_bpc_property(connector, 8, 12);
2118} 2143}
2119 2144
2120/* 2145/*
@@ -2325,9 +2350,18 @@ void intel_infoframe_init(struct intel_digital_port *intel_dig_port)
2325 intel_dig_port->set_infoframes = g4x_set_infoframes; 2350 intel_dig_port->set_infoframes = g4x_set_infoframes;
2326 intel_dig_port->infoframe_enabled = g4x_infoframe_enabled; 2351 intel_dig_port->infoframe_enabled = g4x_infoframe_enabled;
2327 } else if (HAS_DDI(dev_priv)) { 2352 } else if (HAS_DDI(dev_priv)) {
2328 intel_dig_port->write_infoframe = hsw_write_infoframe; 2353 if (intel_dig_port->lspcon.active) {
2329 intel_dig_port->set_infoframes = hsw_set_infoframes; 2354 intel_dig_port->write_infoframe =
2330 intel_dig_port->infoframe_enabled = hsw_infoframe_enabled; 2355 lspcon_write_infoframe;
2356 intel_dig_port->set_infoframes = lspcon_set_infoframes;
2357 intel_dig_port->infoframe_enabled =
2358 lspcon_infoframe_enabled;
2359 } else {
2360 intel_dig_port->set_infoframes = hsw_set_infoframes;
2361 intel_dig_port->infoframe_enabled =
2362 hsw_infoframe_enabled;
2363 intel_dig_port->write_infoframe = hsw_write_infoframe;
2364 }
2331 } else if (HAS_PCH_IBX(dev_priv)) { 2365 } else if (HAS_PCH_IBX(dev_priv)) {
2332 intel_dig_port->write_infoframe = ibx_write_infoframe; 2366 intel_dig_port->write_infoframe = ibx_write_infoframe;
2333 intel_dig_port->set_infoframes = ibx_set_infoframes; 2367 intel_dig_port->set_infoframes = ibx_set_infoframes;
@@ -2486,5 +2520,6 @@ void intel_hdmi_init(struct drm_i915_private *dev_priv,
2486 2520
2487 intel_infoframe_init(intel_dig_port); 2521 intel_infoframe_init(intel_dig_port);
2488 2522
2523 intel_dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port);
2489 intel_hdmi_init_connector(intel_dig_port, intel_connector); 2524 intel_hdmi_init_connector(intel_dig_port, intel_connector);
2490} 2525}
diff --git a/drivers/gpu/drm/i915/intel_hotplug.c b/drivers/gpu/drm/i915/intel_hotplug.c
index 648a13c6043c..e24174d08fed 100644
--- a/drivers/gpu/drm/i915/intel_hotplug.c
+++ b/drivers/gpu/drm/i915/intel_hotplug.c
@@ -114,51 +114,68 @@ enum hpd_pin intel_hpd_pin_default(struct drm_i915_private *dev_priv,
114#define HPD_STORM_REENABLE_DELAY (2 * 60 * 1000) 114#define HPD_STORM_REENABLE_DELAY (2 * 60 * 1000)
115 115
116/** 116/**
117 * intel_hpd_irq_storm_detect - gather stats and detect HPD irq storm on a pin 117 * intel_hpd_irq_storm_detect - gather stats and detect HPD IRQ storm on a pin
118 * @dev_priv: private driver data pointer 118 * @dev_priv: private driver data pointer
119 * @pin: the pin to gather stats on 119 * @pin: the pin to gather stats on
120 * @long_hpd: whether the HPD IRQ was long or short
120 * 121 *
121 * Gather stats about HPD irqs from the specified @pin, and detect irq 122 * Gather stats about HPD IRQs from the specified @pin, and detect IRQ
122 * storms. Only the pin specific stats and state are changed, the caller is 123 * storms. Only the pin specific stats and state are changed, the caller is
123 * responsible for further action. 124 * responsible for further action.
124 * 125 *
125 * The number of irqs that are allowed within @HPD_STORM_DETECT_PERIOD is 126 * The number of IRQs that are allowed within @HPD_STORM_DETECT_PERIOD is
126 * stored in @dev_priv->hotplug.hpd_storm_threshold which defaults to 127 * stored in @dev_priv->hotplug.hpd_storm_threshold which defaults to
127 * @HPD_STORM_DEFAULT_THRESHOLD. If this threshold is exceeded, it's 128 * @HPD_STORM_DEFAULT_THRESHOLD. Long IRQs count as +10 to this threshold, and
128 * considered an irq storm and the irq state is set to @HPD_MARK_DISABLED. 129 * short IRQs count as +1. If this threshold is exceeded, it's considered an
130 * IRQ storm and the IRQ state is set to @HPD_MARK_DISABLED.
131 *
132 * By default, most systems will only count long IRQs towards
133 * &dev_priv->hotplug.hpd_storm_threshold. However, some older systems also
134 * suffer from short IRQ storms and must also track these. Because short IRQ
135 * storms are naturally caused by sideband interactions with DP MST devices,
136 * short IRQ detection is only enabled for systems without DP MST support.
137 * Systems which are new enough to support DP MST are far less likely to
138 * suffer from IRQ storms at all, so this is fine.
129 * 139 *
130 * The HPD threshold can be controlled through i915_hpd_storm_ctl in debugfs, 140 * The HPD threshold can be controlled through i915_hpd_storm_ctl in debugfs,
131 * and should only be adjusted for automated hotplug testing. 141 * and should only be adjusted for automated hotplug testing.
132 * 142 *
133 * Return true if an irq storm was detected on @pin. 143 * Return true if an IRQ storm was detected on @pin.
134 */ 144 */
135static bool intel_hpd_irq_storm_detect(struct drm_i915_private *dev_priv, 145static bool intel_hpd_irq_storm_detect(struct drm_i915_private *dev_priv,
136 enum hpd_pin pin) 146 enum hpd_pin pin, bool long_hpd)
137{ 147{
138 unsigned long start = dev_priv->hotplug.stats[pin].last_jiffies; 148 struct i915_hotplug *hpd = &dev_priv->hotplug;
149 unsigned long start = hpd->stats[pin].last_jiffies;
139 unsigned long end = start + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD); 150 unsigned long end = start + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD);
140 const int threshold = dev_priv->hotplug.hpd_storm_threshold; 151 const int increment = long_hpd ? 10 : 1;
152 const int threshold = hpd->hpd_storm_threshold;
141 bool storm = false; 153 bool storm = false;
142 154
155 if (!threshold ||
156 (!long_hpd && !dev_priv->hotplug.hpd_short_storm_enabled))
157 return false;
158
143 if (!time_in_range(jiffies, start, end)) { 159 if (!time_in_range(jiffies, start, end)) {
144 dev_priv->hotplug.stats[pin].last_jiffies = jiffies; 160 hpd->stats[pin].last_jiffies = jiffies;
145 dev_priv->hotplug.stats[pin].count = 0; 161 hpd->stats[pin].count = 0;
146 DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", pin); 162 }
147 } else if (dev_priv->hotplug.stats[pin].count > threshold && 163
148 threshold) { 164 hpd->stats[pin].count += increment;
149 dev_priv->hotplug.stats[pin].state = HPD_MARK_DISABLED; 165 if (hpd->stats[pin].count > threshold) {
166 hpd->stats[pin].state = HPD_MARK_DISABLED;
150 DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", pin); 167 DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", pin);
151 storm = true; 168 storm = true;
152 } else { 169 } else {
153 dev_priv->hotplug.stats[pin].count++;
154 DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", pin, 170 DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", pin,
155 dev_priv->hotplug.stats[pin].count); 171 hpd->stats[pin].count);
156 } 172 }
157 173
158 return storm; 174 return storm;
159} 175}
160 176
161static void intel_hpd_irq_storm_disable(struct drm_i915_private *dev_priv) 177static void
178intel_hpd_irq_storm_switch_to_polling(struct drm_i915_private *dev_priv)
162{ 179{
163 struct drm_device *dev = &dev_priv->drm; 180 struct drm_device *dev = &dev_priv->drm;
164 struct intel_connector *intel_connector; 181 struct intel_connector *intel_connector;
@@ -228,7 +245,9 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
228 drm_for_each_connector_iter(connector, &conn_iter) { 245 drm_for_each_connector_iter(connector, &conn_iter) {
229 struct intel_connector *intel_connector = to_intel_connector(connector); 246 struct intel_connector *intel_connector = to_intel_connector(connector);
230 247
231 if (intel_connector->encoder->hpd_pin == pin) { 248 /* Don't check MST ports, they don't have pins */
249 if (!intel_connector->mst_port &&
250 intel_connector->encoder->hpd_pin == pin) {
232 if (connector->polled != intel_connector->polled) 251 if (connector->polled != intel_connector->polled)
233 DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n", 252 DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n",
234 connector->name); 253 connector->name);
@@ -346,8 +365,8 @@ static void i915_hotplug_work_func(struct work_struct *work)
346 hpd_event_bits = dev_priv->hotplug.event_bits; 365 hpd_event_bits = dev_priv->hotplug.event_bits;
347 dev_priv->hotplug.event_bits = 0; 366 dev_priv->hotplug.event_bits = 0;
348 367
349 /* Disable hotplug on connectors that hit an irq storm. */ 368 /* Enable polling for connectors which had HPD IRQ storms */
350 intel_hpd_irq_storm_disable(dev_priv); 369 intel_hpd_irq_storm_switch_to_polling(dev_priv);
351 370
352 spin_unlock_irq(&dev_priv->irq_lock); 371 spin_unlock_irq(&dev_priv->irq_lock);
353 372
@@ -395,37 +414,54 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
395 struct intel_encoder *encoder; 414 struct intel_encoder *encoder;
396 bool storm_detected = false; 415 bool storm_detected = false;
397 bool queue_dig = false, queue_hp = false; 416 bool queue_dig = false, queue_hp = false;
417 u32 long_hpd_pulse_mask = 0;
418 u32 short_hpd_pulse_mask = 0;
419 enum hpd_pin pin;
398 420
399 if (!pin_mask) 421 if (!pin_mask)
400 return; 422 return;
401 423
402 spin_lock(&dev_priv->irq_lock); 424 spin_lock(&dev_priv->irq_lock);
425
426 /*
427 * Determine whether ->hpd_pulse() exists for each pin, and
428 * whether we have a short or a long pulse. This is needed
429 * as each pin may have up to two encoders (HDMI and DP) and
430 * only the one of them (DP) will have ->hpd_pulse().
431 */
403 for_each_intel_encoder(&dev_priv->drm, encoder) { 432 for_each_intel_encoder(&dev_priv->drm, encoder) {
404 enum hpd_pin pin = encoder->hpd_pin;
405 bool has_hpd_pulse = intel_encoder_has_hpd_pulse(encoder); 433 bool has_hpd_pulse = intel_encoder_has_hpd_pulse(encoder);
434 enum port port = encoder->port;
435 bool long_hpd;
406 436
437 pin = encoder->hpd_pin;
407 if (!(BIT(pin) & pin_mask)) 438 if (!(BIT(pin) & pin_mask))
408 continue; 439 continue;
409 440
410 if (has_hpd_pulse) { 441 if (!has_hpd_pulse)
411 bool long_hpd = long_mask & BIT(pin); 442 continue;
412 enum port port = encoder->port;
413 443
414 DRM_DEBUG_DRIVER("digital hpd port %c - %s\n", port_name(port), 444 long_hpd = long_mask & BIT(pin);
415 long_hpd ? "long" : "short"); 445
416 /* 446 DRM_DEBUG_DRIVER("digital hpd port %c - %s\n", port_name(port),
417 * For long HPD pulses we want to have the digital queue happen, 447 long_hpd ? "long" : "short");
418 * but we still want HPD storm detection to function. 448 queue_dig = true;
419 */ 449
420 queue_dig = true; 450 if (long_hpd) {
421 if (long_hpd) { 451 long_hpd_pulse_mask |= BIT(pin);
422 dev_priv->hotplug.long_port_mask |= (1 << port); 452 dev_priv->hotplug.long_port_mask |= BIT(port);
423 } else { 453 } else {
424 /* for short HPD just trigger the digital queue */ 454 short_hpd_pulse_mask |= BIT(pin);
425 dev_priv->hotplug.short_port_mask |= (1 << port); 455 dev_priv->hotplug.short_port_mask |= BIT(port);
426 continue;
427 }
428 } 456 }
457 }
458
459 /* Now process each pin just once */
460 for_each_hpd_pin(pin) {
461 bool long_hpd;
462
463 if (!(BIT(pin) & pin_mask))
464 continue;
429 465
430 if (dev_priv->hotplug.stats[pin].state == HPD_DISABLED) { 466 if (dev_priv->hotplug.stats[pin].state == HPD_DISABLED) {
431 /* 467 /*
@@ -442,17 +478,30 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
442 if (dev_priv->hotplug.stats[pin].state != HPD_ENABLED) 478 if (dev_priv->hotplug.stats[pin].state != HPD_ENABLED)
443 continue; 479 continue;
444 480
445 if (!has_hpd_pulse) { 481 /*
482 * Delegate to ->hpd_pulse() if one of the encoders for this
483 * pin has it, otherwise let the hotplug_work deal with this
484 * pin directly.
485 */
486 if (((short_hpd_pulse_mask | long_hpd_pulse_mask) & BIT(pin))) {
487 long_hpd = long_hpd_pulse_mask & BIT(pin);
488 } else {
446 dev_priv->hotplug.event_bits |= BIT(pin); 489 dev_priv->hotplug.event_bits |= BIT(pin);
490 long_hpd = true;
447 queue_hp = true; 491 queue_hp = true;
448 } 492 }
449 493
450 if (intel_hpd_irq_storm_detect(dev_priv, pin)) { 494 if (intel_hpd_irq_storm_detect(dev_priv, pin, long_hpd)) {
451 dev_priv->hotplug.event_bits &= ~BIT(pin); 495 dev_priv->hotplug.event_bits &= ~BIT(pin);
452 storm_detected = true; 496 storm_detected = true;
497 queue_hp = true;
453 } 498 }
454 } 499 }
455 500
501 /*
502 * Disable any IRQs that storms were detected on. Polling enablement
503 * happens later in our hotplug work.
504 */
456 if (storm_detected && dev_priv->display_irqs_enabled) 505 if (storm_detected && dev_priv->display_irqs_enabled)
457 dev_priv->display.hpd_irq_setup(dev_priv); 506 dev_priv->display.hpd_irq_setup(dev_priv);
458 spin_unlock(&dev_priv->irq_lock); 507 spin_unlock(&dev_priv->irq_lock);
diff --git a/drivers/gpu/drm/i915/intel_huc.c b/drivers/gpu/drm/i915/intel_huc.c
index 37ef540dd280..bc27b691d824 100644
--- a/drivers/gpu/drm/i915/intel_huc.c
+++ b/drivers/gpu/drm/i915/intel_huc.c
@@ -108,13 +108,14 @@ fail:
108 * This function reads status register to verify if HuC 108 * This function reads status register to verify if HuC
109 * firmware was successfully loaded. 109 * firmware was successfully loaded.
110 * 110 *
111 * Returns positive value if HuC firmware is loaded and verified 111 * Returns: 1 if HuC firmware is loaded and verified,
112 * and -ENODEV if HuC is not present. 112 * 0 if HuC firmware is not loaded and -ENODEV if HuC
113 * is not present on this platform.
113 */ 114 */
114int intel_huc_check_status(struct intel_huc *huc) 115int intel_huc_check_status(struct intel_huc *huc)
115{ 116{
116 struct drm_i915_private *dev_priv = huc_to_i915(huc); 117 struct drm_i915_private *dev_priv = huc_to_i915(huc);
117 u32 status; 118 bool status;
118 119
119 if (!HAS_HUC(dev_priv)) 120 if (!HAS_HUC(dev_priv))
120 return -ENODEV; 121 return -ENODEV;
diff --git a/drivers/gpu/drm/i915/intel_lpe_audio.c b/drivers/gpu/drm/i915/intel_lpe_audio.c
index cdf19553ffac..5d5336fbe7b0 100644
--- a/drivers/gpu/drm/i915/intel_lpe_audio.c
+++ b/drivers/gpu/drm/i915/intel_lpe_audio.c
@@ -297,8 +297,10 @@ void intel_lpe_audio_teardown(struct drm_i915_private *dev_priv)
297 lpe_audio_platdev_destroy(dev_priv); 297 lpe_audio_platdev_destroy(dev_priv);
298 298
299 irq_free_desc(dev_priv->lpe_audio.irq); 299 irq_free_desc(dev_priv->lpe_audio.irq);
300}
301 300
301 dev_priv->lpe_audio.irq = -1;
302 dev_priv->lpe_audio.platdev = NULL;
303}
302 304
303/** 305/**
304 * intel_lpe_audio_notify() - notify lpe audio event 306 * intel_lpe_audio_notify() - notify lpe audio event
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 43957bb37a42..08fd9b12e4d7 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -259,63 +259,6 @@ intel_lr_context_descriptor_update(struct i915_gem_context *ctx,
259 ce->lrc_desc = desc; 259 ce->lrc_desc = desc;
260} 260}
261 261
262static struct i915_priolist *
263lookup_priolist(struct intel_engine_cs *engine, int prio)
264{
265 struct intel_engine_execlists * const execlists = &engine->execlists;
266 struct i915_priolist *p;
267 struct rb_node **parent, *rb;
268 bool first = true;
269
270 if (unlikely(execlists->no_priolist))
271 prio = I915_PRIORITY_NORMAL;
272
273find_priolist:
274 /* most positive priority is scheduled first, equal priorities fifo */
275 rb = NULL;
276 parent = &execlists->queue.rb_root.rb_node;
277 while (*parent) {
278 rb = *parent;
279 p = to_priolist(rb);
280 if (prio > p->priority) {
281 parent = &rb->rb_left;
282 } else if (prio < p->priority) {
283 parent = &rb->rb_right;
284 first = false;
285 } else {
286 return p;
287 }
288 }
289
290 if (prio == I915_PRIORITY_NORMAL) {
291 p = &execlists->default_priolist;
292 } else {
293 p = kmem_cache_alloc(engine->i915->priorities, GFP_ATOMIC);
294 /* Convert an allocation failure to a priority bump */
295 if (unlikely(!p)) {
296 prio = I915_PRIORITY_NORMAL; /* recurses just once */
297
298 /* To maintain ordering with all rendering, after an
299 * allocation failure we have to disable all scheduling.
300 * Requests will then be executed in fifo, and schedule
301 * will ensure that dependencies are emitted in fifo.
302 * There will be still some reordering with existing
303 * requests, so if userspace lied about their
304 * dependencies that reordering may be visible.
305 */
306 execlists->no_priolist = true;
307 goto find_priolist;
308 }
309 }
310
311 p->priority = prio;
312 INIT_LIST_HEAD(&p->requests);
313 rb_link_node(&p->node, rb, parent);
314 rb_insert_color_cached(&p->node, &execlists->queue, first);
315
316 return p;
317}
318
319static void unwind_wa_tail(struct i915_request *rq) 262static void unwind_wa_tail(struct i915_request *rq)
320{ 263{
321 rq->tail = intel_ring_wrap(rq->ring, rq->wa_tail - WA_TAIL_BYTES); 264 rq->tail = intel_ring_wrap(rq->ring, rq->wa_tail - WA_TAIL_BYTES);
@@ -324,9 +267,9 @@ static void unwind_wa_tail(struct i915_request *rq)
324 267
325static void __unwind_incomplete_requests(struct intel_engine_cs *engine) 268static void __unwind_incomplete_requests(struct intel_engine_cs *engine)
326{ 269{
327 struct i915_request *rq, *rn; 270 struct i915_request *rq, *rn, *active = NULL;
328 struct i915_priolist *uninitialized_var(p); 271 struct list_head *uninitialized_var(pl);
329 int last_prio = I915_PRIORITY_INVALID; 272 int prio = I915_PRIORITY_INVALID | I915_PRIORITY_NEWCLIENT;
330 273
331 lockdep_assert_held(&engine->timeline.lock); 274 lockdep_assert_held(&engine->timeline.lock);
332 275
@@ -334,19 +277,34 @@ static void __unwind_incomplete_requests(struct intel_engine_cs *engine)
334 &engine->timeline.requests, 277 &engine->timeline.requests,
335 link) { 278 link) {
336 if (i915_request_completed(rq)) 279 if (i915_request_completed(rq))
337 return; 280 break;
338 281
339 __i915_request_unsubmit(rq); 282 __i915_request_unsubmit(rq);
340 unwind_wa_tail(rq); 283 unwind_wa_tail(rq);
341 284
285 GEM_BUG_ON(rq->hw_context->active);
286
342 GEM_BUG_ON(rq_prio(rq) == I915_PRIORITY_INVALID); 287 GEM_BUG_ON(rq_prio(rq) == I915_PRIORITY_INVALID);
343 if (rq_prio(rq) != last_prio) { 288 if (rq_prio(rq) != prio) {
344 last_prio = rq_prio(rq); 289 prio = rq_prio(rq);
345 p = lookup_priolist(engine, last_prio); 290 pl = i915_sched_lookup_priolist(engine, prio);
346 } 291 }
292 GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root));
293
294 list_add(&rq->sched.link, pl);
347 295
348 GEM_BUG_ON(p->priority != rq_prio(rq)); 296 active = rq;
349 list_add(&rq->sched.link, &p->requests); 297 }
298
299 /*
300 * The active request is now effectively the start of a new client
301 * stream, so give it the equivalent small priority bump to prevent
302 * it being gazumped a second time by another peer.
303 */
304 if (!(prio & I915_PRIORITY_NEWCLIENT)) {
305 prio |= I915_PRIORITY_NEWCLIENT;
306 list_move_tail(&active->sched.link,
307 i915_sched_lookup_priolist(engine, prio));
350 } 308 }
351} 309}
352 310
@@ -355,13 +313,8 @@ execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists)
355{ 313{
356 struct intel_engine_cs *engine = 314 struct intel_engine_cs *engine =
357 container_of(execlists, typeof(*engine), execlists); 315 container_of(execlists, typeof(*engine), execlists);
358 unsigned long flags;
359
360 spin_lock_irqsave(&engine->timeline.lock, flags);
361 316
362 __unwind_incomplete_requests(engine); 317 __unwind_incomplete_requests(engine);
363
364 spin_unlock_irqrestore(&engine->timeline.lock, flags);
365} 318}
366 319
367static inline void 320static inline void
@@ -394,13 +347,17 @@ execlists_user_end(struct intel_engine_execlists *execlists)
394static inline void 347static inline void
395execlists_context_schedule_in(struct i915_request *rq) 348execlists_context_schedule_in(struct i915_request *rq)
396{ 349{
350 GEM_BUG_ON(rq->hw_context->active);
351
397 execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_IN); 352 execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_IN);
398 intel_engine_context_in(rq->engine); 353 intel_engine_context_in(rq->engine);
354 rq->hw_context->active = rq->engine;
399} 355}
400 356
401static inline void 357static inline void
402execlists_context_schedule_out(struct i915_request *rq, unsigned long status) 358execlists_context_schedule_out(struct i915_request *rq, unsigned long status)
403{ 359{
360 rq->hw_context->active = NULL;
404 intel_engine_context_out(rq->engine); 361 intel_engine_context_out(rq->engine);
405 execlists_context_status_change(rq, status); 362 execlists_context_status_change(rq, status);
406 trace_i915_request_out(rq); 363 trace_i915_request_out(rq);
@@ -417,21 +374,32 @@ execlists_update_context_pdps(struct i915_hw_ppgtt *ppgtt, u32 *reg_state)
417 374
418static u64 execlists_update_context(struct i915_request *rq) 375static u64 execlists_update_context(struct i915_request *rq)
419{ 376{
377 struct i915_hw_ppgtt *ppgtt = rq->gem_context->ppgtt;
420 struct intel_context *ce = rq->hw_context; 378 struct intel_context *ce = rq->hw_context;
421 struct i915_hw_ppgtt *ppgtt =
422 rq->gem_context->ppgtt ?: rq->i915->mm.aliasing_ppgtt;
423 u32 *reg_state = ce->lrc_reg_state; 379 u32 *reg_state = ce->lrc_reg_state;
424 380
425 reg_state[CTX_RING_TAIL+1] = intel_ring_set_tail(rq->ring, rq->tail); 381 reg_state[CTX_RING_TAIL+1] = intel_ring_set_tail(rq->ring, rq->tail);
426 382
427 /* True 32b PPGTT with dynamic page allocation: update PDP 383 /*
384 * True 32b PPGTT with dynamic page allocation: update PDP
428 * registers and point the unallocated PDPs to scratch page. 385 * registers and point the unallocated PDPs to scratch page.
429 * PML4 is allocated during ppgtt init, so this is not needed 386 * PML4 is allocated during ppgtt init, so this is not needed
430 * in 48-bit mode. 387 * in 48-bit mode.
431 */ 388 */
432 if (ppgtt && !i915_vm_is_48bit(&ppgtt->vm)) 389 if (!i915_vm_is_48bit(&ppgtt->vm))
433 execlists_update_context_pdps(ppgtt, reg_state); 390 execlists_update_context_pdps(ppgtt, reg_state);
434 391
392 /*
393 * Make sure the context image is complete before we submit it to HW.
394 *
395 * Ostensibly, writes (including the WCB) should be flushed prior to
396 * an uncached write such as our mmio register access, the empirical
397 * evidence (esp. on Braswell) suggests that the WC write into memory
398 * may not be visible to the HW prior to the completion of the UC
399 * register write and that we may begin execution from the context
400 * before its image is complete leading to invalid PD chasing.
401 */
402 wmb();
435 return ce->lrc_desc; 403 return ce->lrc_desc;
436} 404}
437 405
@@ -669,8 +637,9 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
669 while ((rb = rb_first_cached(&execlists->queue))) { 637 while ((rb = rb_first_cached(&execlists->queue))) {
670 struct i915_priolist *p = to_priolist(rb); 638 struct i915_priolist *p = to_priolist(rb);
671 struct i915_request *rq, *rn; 639 struct i915_request *rq, *rn;
640 int i;
672 641
673 list_for_each_entry_safe(rq, rn, &p->requests, sched.link) { 642 priolist_for_each_request_consume(rq, rn, p, i) {
674 /* 643 /*
675 * Can we combine this request with the current port? 644 * Can we combine this request with the current port?
676 * It has to be the same context/ringbuffer and not 645 * It has to be the same context/ringbuffer and not
@@ -689,11 +658,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
689 * combine this request with the last, then we 658 * combine this request with the last, then we
690 * are done. 659 * are done.
691 */ 660 */
692 if (port == last_port) { 661 if (port == last_port)
693 __list_del_many(&p->requests,
694 &rq->sched.link);
695 goto done; 662 goto done;
696 }
697 663
698 /* 664 /*
699 * If GVT overrides us we only ever submit 665 * If GVT overrides us we only ever submit
@@ -703,11 +669,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
703 * request) to the second port. 669 * request) to the second port.
704 */ 670 */
705 if (ctx_single_port_submission(last->hw_context) || 671 if (ctx_single_port_submission(last->hw_context) ||
706 ctx_single_port_submission(rq->hw_context)) { 672 ctx_single_port_submission(rq->hw_context))
707 __list_del_many(&p->requests,
708 &rq->sched.link);
709 goto done; 673 goto done;
710 }
711 674
712 GEM_BUG_ON(last->hw_context == rq->hw_context); 675 GEM_BUG_ON(last->hw_context == rq->hw_context);
713 676
@@ -718,15 +681,16 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
718 GEM_BUG_ON(port_isset(port)); 681 GEM_BUG_ON(port_isset(port));
719 } 682 }
720 683
721 INIT_LIST_HEAD(&rq->sched.link); 684 list_del_init(&rq->sched.link);
685
722 __i915_request_submit(rq); 686 __i915_request_submit(rq);
723 trace_i915_request_in(rq, port_index(port, execlists)); 687 trace_i915_request_in(rq, port_index(port, execlists));
688
724 last = rq; 689 last = rq;
725 submit = true; 690 submit = true;
726 } 691 }
727 692
728 rb_erase_cached(&p->node, &execlists->queue); 693 rb_erase_cached(&p->node, &execlists->queue);
729 INIT_LIST_HEAD(&p->requests);
730 if (p->priority != I915_PRIORITY_NORMAL) 694 if (p->priority != I915_PRIORITY_NORMAL)
731 kmem_cache_free(engine->i915->priorities, p); 695 kmem_cache_free(engine->i915->priorities, p);
732 } 696 }
@@ -861,16 +825,16 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
861 /* Flush the queued requests to the timeline list (for retiring). */ 825 /* Flush the queued requests to the timeline list (for retiring). */
862 while ((rb = rb_first_cached(&execlists->queue))) { 826 while ((rb = rb_first_cached(&execlists->queue))) {
863 struct i915_priolist *p = to_priolist(rb); 827 struct i915_priolist *p = to_priolist(rb);
828 int i;
864 829
865 list_for_each_entry_safe(rq, rn, &p->requests, sched.link) { 830 priolist_for_each_request_consume(rq, rn, p, i) {
866 INIT_LIST_HEAD(&rq->sched.link); 831 list_del_init(&rq->sched.link);
867 832
868 dma_fence_set_error(&rq->fence, -EIO); 833 dma_fence_set_error(&rq->fence, -EIO);
869 __i915_request_submit(rq); 834 __i915_request_submit(rq);
870 } 835 }
871 836
872 rb_erase_cached(&p->node, &execlists->queue); 837 rb_erase_cached(&p->node, &execlists->queue);
873 INIT_LIST_HEAD(&p->requests);
874 if (p->priority != I915_PRIORITY_NORMAL) 838 if (p->priority != I915_PRIORITY_NORMAL)
875 kmem_cache_free(engine->i915->priorities, p); 839 kmem_cache_free(engine->i915->priorities, p);
876 } 840 }
@@ -1076,13 +1040,7 @@ static void queue_request(struct intel_engine_cs *engine,
1076 struct i915_sched_node *node, 1040 struct i915_sched_node *node,
1077 int prio) 1041 int prio)
1078{ 1042{
1079 list_add_tail(&node->link, 1043 list_add_tail(&node->link, i915_sched_lookup_priolist(engine, prio));
1080 &lookup_priolist(engine, prio)->requests);
1081}
1082
1083static void __update_queue(struct intel_engine_cs *engine, int prio)
1084{
1085 engine->execlists.queue_priority = prio;
1086} 1044}
1087 1045
1088static void __submit_queue_imm(struct intel_engine_cs *engine) 1046static void __submit_queue_imm(struct intel_engine_cs *engine)
@@ -1101,7 +1059,7 @@ static void __submit_queue_imm(struct intel_engine_cs *engine)
1101static void submit_queue(struct intel_engine_cs *engine, int prio) 1059static void submit_queue(struct intel_engine_cs *engine, int prio)
1102{ 1060{
1103 if (prio > engine->execlists.queue_priority) { 1061 if (prio > engine->execlists.queue_priority) {
1104 __update_queue(engine, prio); 1062 engine->execlists.queue_priority = prio;
1105 __submit_queue_imm(engine); 1063 __submit_queue_imm(engine);
1106 } 1064 }
1107} 1065}
@@ -1124,139 +1082,6 @@ static void execlists_submit_request(struct i915_request *request)
1124 spin_unlock_irqrestore(&engine->timeline.lock, flags); 1082 spin_unlock_irqrestore(&engine->timeline.lock, flags);
1125} 1083}
1126 1084
1127static struct i915_request *sched_to_request(struct i915_sched_node *node)
1128{
1129 return container_of(node, struct i915_request, sched);
1130}
1131
1132static struct intel_engine_cs *
1133sched_lock_engine(struct i915_sched_node *node, struct intel_engine_cs *locked)
1134{
1135 struct intel_engine_cs *engine = sched_to_request(node)->engine;
1136
1137 GEM_BUG_ON(!locked);
1138
1139 if (engine != locked) {
1140 spin_unlock(&locked->timeline.lock);
1141 spin_lock(&engine->timeline.lock);
1142 }
1143
1144 return engine;
1145}
1146
1147static void execlists_schedule(struct i915_request *request,
1148 const struct i915_sched_attr *attr)
1149{
1150 struct i915_priolist *uninitialized_var(pl);
1151 struct intel_engine_cs *engine, *last;
1152 struct i915_dependency *dep, *p;
1153 struct i915_dependency stack;
1154 const int prio = attr->priority;
1155 LIST_HEAD(dfs);
1156
1157 GEM_BUG_ON(prio == I915_PRIORITY_INVALID);
1158
1159 if (i915_request_completed(request))
1160 return;
1161
1162 if (prio <= READ_ONCE(request->sched.attr.priority))
1163 return;
1164
1165 /* Need BKL in order to use the temporary link inside i915_dependency */
1166 lockdep_assert_held(&request->i915->drm.struct_mutex);
1167
1168 stack.signaler = &request->sched;
1169 list_add(&stack.dfs_link, &dfs);
1170
1171 /*
1172 * Recursively bump all dependent priorities to match the new request.
1173 *
1174 * A naive approach would be to use recursion:
1175 * static void update_priorities(struct i915_sched_node *node, prio) {
1176 * list_for_each_entry(dep, &node->signalers_list, signal_link)
1177 * update_priorities(dep->signal, prio)
1178 * queue_request(node);
1179 * }
1180 * but that may have unlimited recursion depth and so runs a very
1181 * real risk of overunning the kernel stack. Instead, we build
1182 * a flat list of all dependencies starting with the current request.
1183 * As we walk the list of dependencies, we add all of its dependencies
1184 * to the end of the list (this may include an already visited
1185 * request) and continue to walk onwards onto the new dependencies. The
1186 * end result is a topological list of requests in reverse order, the
1187 * last element in the list is the request we must execute first.
1188 */
1189 list_for_each_entry(dep, &dfs, dfs_link) {
1190 struct i915_sched_node *node = dep->signaler;
1191
1192 /*
1193 * Within an engine, there can be no cycle, but we may
1194 * refer to the same dependency chain multiple times
1195 * (redundant dependencies are not eliminated) and across
1196 * engines.
1197 */
1198 list_for_each_entry(p, &node->signalers_list, signal_link) {
1199 GEM_BUG_ON(p == dep); /* no cycles! */
1200
1201 if (i915_sched_node_signaled(p->signaler))
1202 continue;
1203
1204 GEM_BUG_ON(p->signaler->attr.priority < node->attr.priority);
1205 if (prio > READ_ONCE(p->signaler->attr.priority))
1206 list_move_tail(&p->dfs_link, &dfs);
1207 }
1208 }
1209
1210 /*
1211 * If we didn't need to bump any existing priorities, and we haven't
1212 * yet submitted this request (i.e. there is no potential race with
1213 * execlists_submit_request()), we can set our own priority and skip
1214 * acquiring the engine locks.
1215 */
1216 if (request->sched.attr.priority == I915_PRIORITY_INVALID) {
1217 GEM_BUG_ON(!list_empty(&request->sched.link));
1218 request->sched.attr = *attr;
1219 if (stack.dfs_link.next == stack.dfs_link.prev)
1220 return;
1221 __list_del_entry(&stack.dfs_link);
1222 }
1223
1224 last = NULL;
1225 engine = request->engine;
1226 spin_lock_irq(&engine->timeline.lock);
1227
1228 /* Fifo and depth-first replacement ensure our deps execute before us */
1229 list_for_each_entry_safe_reverse(dep, p, &dfs, dfs_link) {
1230 struct i915_sched_node *node = dep->signaler;
1231
1232 INIT_LIST_HEAD(&dep->dfs_link);
1233
1234 engine = sched_lock_engine(node, engine);
1235
1236 if (prio <= node->attr.priority)
1237 continue;
1238
1239 node->attr.priority = prio;
1240 if (!list_empty(&node->link)) {
1241 if (last != engine) {
1242 pl = lookup_priolist(engine, prio);
1243 last = engine;
1244 }
1245 GEM_BUG_ON(pl->priority != prio);
1246 list_move_tail(&node->link, &pl->requests);
1247 }
1248
1249 if (prio > engine->execlists.queue_priority &&
1250 i915_sw_fence_done(&sched_to_request(node)->submit)) {
1251 /* defer submission until after all of our updates */
1252 __update_queue(engine, prio);
1253 tasklet_hi_schedule(&engine->execlists.tasklet);
1254 }
1255 }
1256
1257 spin_unlock_irq(&engine->timeline.lock);
1258}
1259
1260static void execlists_context_destroy(struct intel_context *ce) 1085static void execlists_context_destroy(struct intel_context *ce)
1261{ 1086{
1262 GEM_BUG_ON(ce->pin_count); 1087 GEM_BUG_ON(ce->pin_count);
@@ -1272,6 +1097,28 @@ static void execlists_context_destroy(struct intel_context *ce)
1272 1097
1273static void execlists_context_unpin(struct intel_context *ce) 1098static void execlists_context_unpin(struct intel_context *ce)
1274{ 1099{
1100 struct intel_engine_cs *engine;
1101
1102 /*
1103 * The tasklet may still be using a pointer to our state, via an
1104 * old request. However, since we know we only unpin the context
1105 * on retirement of the following request, we know that the last
1106 * request referencing us will have had a completion CS interrupt.
1107 * If we see that it is still active, it means that the tasklet hasn't
1108 * had the chance to run yet; let it run before we teardown the
1109 * reference it may use.
1110 */
1111 engine = READ_ONCE(ce->active);
1112 if (unlikely(engine)) {
1113 unsigned long flags;
1114
1115 spin_lock_irqsave(&engine->timeline.lock, flags);
1116 process_csb(engine);
1117 spin_unlock_irqrestore(&engine->timeline.lock, flags);
1118
1119 GEM_BUG_ON(READ_ONCE(ce->active));
1120 }
1121
1275 i915_gem_context_unpin_hw_id(ce->gem_context); 1122 i915_gem_context_unpin_hw_id(ce->gem_context);
1276 1123
1277 intel_ring_unpin(ce->ring); 1124 intel_ring_unpin(ce->ring);
@@ -1375,6 +1222,7 @@ execlists_context_pin(struct intel_engine_cs *engine,
1375 struct intel_context *ce = to_intel_context(ctx, engine); 1222 struct intel_context *ce = to_intel_context(ctx, engine);
1376 1223
1377 lockdep_assert_held(&ctx->i915->drm.struct_mutex); 1224 lockdep_assert_held(&ctx->i915->drm.struct_mutex);
1225 GEM_BUG_ON(!ctx->ppgtt);
1378 1226
1379 if (likely(ce->pin_count++)) 1227 if (likely(ce->pin_count++))
1380 return ce; 1228 return ce;
@@ -1679,7 +1527,7 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
1679 unsigned int i; 1527 unsigned int i;
1680 int ret; 1528 int ret;
1681 1529
1682 if (GEM_WARN_ON(engine->id != RCS)) 1530 if (GEM_DEBUG_WARN_ON(engine->id != RCS))
1683 return -EINVAL; 1531 return -EINVAL;
1684 1532
1685 switch (INTEL_GEN(engine->i915)) { 1533 switch (INTEL_GEN(engine->i915)) {
@@ -1718,8 +1566,8 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
1718 */ 1566 */
1719 for (i = 0; i < ARRAY_SIZE(wa_bb_fn); i++) { 1567 for (i = 0; i < ARRAY_SIZE(wa_bb_fn); i++) {
1720 wa_bb[i]->offset = batch_ptr - batch; 1568 wa_bb[i]->offset = batch_ptr - batch;
1721 if (GEM_WARN_ON(!IS_ALIGNED(wa_bb[i]->offset, 1569 if (GEM_DEBUG_WARN_ON(!IS_ALIGNED(wa_bb[i]->offset,
1722 CACHELINE_BYTES))) { 1570 CACHELINE_BYTES))) {
1723 ret = -EINVAL; 1571 ret = -EINVAL;
1724 break; 1572 break;
1725 } 1573 }
@@ -1902,7 +1750,7 @@ static void execlists_reset(struct intel_engine_cs *engine,
1902 unsigned long flags; 1750 unsigned long flags;
1903 u32 *regs; 1751 u32 *regs;
1904 1752
1905 GEM_TRACE("%s request global=%x, current=%d\n", 1753 GEM_TRACE("%s request global=%d, current=%d\n",
1906 engine->name, request ? request->global_seqno : 0, 1754 engine->name, request ? request->global_seqno : 0,
1907 intel_engine_get_seqno(engine)); 1755 intel_engine_get_seqno(engine));
1908 1756
@@ -2029,8 +1877,7 @@ static int gen8_emit_bb_start(struct i915_request *rq,
2029 * it is unsafe in case of lite-restore (because the ctx is 1877 * it is unsafe in case of lite-restore (because the ctx is
2030 * not idle). PML4 is allocated during ppgtt init so this is 1878 * not idle). PML4 is allocated during ppgtt init so this is
2031 * not needed in 48-bit.*/ 1879 * not needed in 48-bit.*/
2032 if (rq->gem_context->ppgtt && 1880 if ((intel_engine_flag(rq->engine) & rq->gem_context->ppgtt->pd_dirty_rings) &&
2033 (intel_engine_flag(rq->engine) & rq->gem_context->ppgtt->pd_dirty_rings) &&
2034 !i915_vm_is_48bit(&rq->gem_context->ppgtt->vm) && 1881 !i915_vm_is_48bit(&rq->gem_context->ppgtt->vm) &&
2035 !intel_vgpu_active(rq->i915)) { 1882 !intel_vgpu_active(rq->i915)) {
2036 ret = intel_logical_ring_emit_pdps(rq); 1883 ret = intel_logical_ring_emit_pdps(rq);
@@ -2109,7 +1956,7 @@ static int gen8_emit_flush(struct i915_request *request, u32 mode)
2109 1956
2110 if (mode & EMIT_INVALIDATE) { 1957 if (mode & EMIT_INVALIDATE) {
2111 cmd |= MI_INVALIDATE_TLB; 1958 cmd |= MI_INVALIDATE_TLB;
2112 if (request->engine->id == VCS) 1959 if (request->engine->class == VIDEO_DECODE_CLASS)
2113 cmd |= MI_INVALIDATE_BSD; 1960 cmd |= MI_INVALIDATE_BSD;
2114 } 1961 }
2115 1962
@@ -2294,7 +2141,7 @@ void intel_execlists_set_default_submission(struct intel_engine_cs *engine)
2294{ 2141{
2295 engine->submit_request = execlists_submit_request; 2142 engine->submit_request = execlists_submit_request;
2296 engine->cancel_requests = execlists_cancel_requests; 2143 engine->cancel_requests = execlists_cancel_requests;
2297 engine->schedule = execlists_schedule; 2144 engine->schedule = i915_schedule;
2298 engine->execlists.tasklet.func = execlists_submission_tasklet; 2145 engine->execlists.tasklet.func = execlists_submission_tasklet;
2299 2146
2300 engine->reset.prepare = execlists_reset_prepare; 2147 engine->reset.prepare = execlists_reset_prepare;
@@ -2632,7 +2479,6 @@ static void execlists_init_reg_state(u32 *regs,
2632 struct intel_ring *ring) 2479 struct intel_ring *ring)
2633{ 2480{
2634 struct drm_i915_private *dev_priv = engine->i915; 2481 struct drm_i915_private *dev_priv = engine->i915;
2635 struct i915_hw_ppgtt *ppgtt = ctx->ppgtt ?: dev_priv->mm.aliasing_ppgtt;
2636 u32 base = engine->mmio_base; 2482 u32 base = engine->mmio_base;
2637 bool rcs = engine->class == RENDER_CLASS; 2483 bool rcs = engine->class == RENDER_CLASS;
2638 2484
@@ -2704,12 +2550,12 @@ static void execlists_init_reg_state(u32 *regs,
2704 CTX_REG(regs, CTX_PDP0_UDW, GEN8_RING_PDP_UDW(engine, 0), 0); 2550 CTX_REG(regs, CTX_PDP0_UDW, GEN8_RING_PDP_UDW(engine, 0), 0);
2705 CTX_REG(regs, CTX_PDP0_LDW, GEN8_RING_PDP_LDW(engine, 0), 0); 2551 CTX_REG(regs, CTX_PDP0_LDW, GEN8_RING_PDP_LDW(engine, 0), 0);
2706 2552
2707 if (ppgtt && i915_vm_is_48bit(&ppgtt->vm)) { 2553 if (i915_vm_is_48bit(&ctx->ppgtt->vm)) {
2708 /* 64b PPGTT (48bit canonical) 2554 /* 64b PPGTT (48bit canonical)
2709 * PDP0_DESCRIPTOR contains the base address to PML4 and 2555 * PDP0_DESCRIPTOR contains the base address to PML4 and
2710 * other PDP Descriptors are ignored. 2556 * other PDP Descriptors are ignored.
2711 */ 2557 */
2712 ASSIGN_CTX_PML4(ppgtt, regs); 2558 ASSIGN_CTX_PML4(ctx->ppgtt, regs);
2713 } 2559 }
2714 2560
2715 if (rcs) { 2561 if (rcs) {
diff --git a/drivers/gpu/drm/i915/intel_lspcon.c b/drivers/gpu/drm/i915/intel_lspcon.c
index 3e085c5f2b81..96a8d9524b0c 100644
--- a/drivers/gpu/drm/i915/intel_lspcon.c
+++ b/drivers/gpu/drm/i915/intel_lspcon.c
@@ -27,6 +27,22 @@
27#include <drm/drm_dp_dual_mode_helper.h> 27#include <drm/drm_dp_dual_mode_helper.h>
28#include "intel_drv.h" 28#include "intel_drv.h"
29 29
30/* LSPCON OUI Vendor ID(signatures) */
31#define LSPCON_VENDOR_PARADE_OUI 0x001CF8
32#define LSPCON_VENDOR_MCA_OUI 0x0060AD
33
34/* AUX addresses to write MCA AVI IF */
35#define LSPCON_MCA_AVI_IF_WRITE_OFFSET 0x5C0
36#define LSPCON_MCA_AVI_IF_CTRL 0x5DF
37#define LSPCON_MCA_AVI_IF_KICKOFF (1 << 0)
38#define LSPCON_MCA_AVI_IF_HANDLED (1 << 1)
39
40/* AUX addresses to write Parade AVI IF */
41#define LSPCON_PARADE_AVI_IF_WRITE_OFFSET 0x516
42#define LSPCON_PARADE_AVI_IF_CTRL 0x51E
43#define LSPCON_PARADE_AVI_IF_KICKOFF (1 << 7)
44#define LSPCON_PARADE_AVI_IF_DATA_SIZE 32
45
30static struct intel_dp *lspcon_to_intel_dp(struct intel_lspcon *lspcon) 46static struct intel_dp *lspcon_to_intel_dp(struct intel_lspcon *lspcon)
31{ 47{
32 struct intel_digital_port *dig_port = 48 struct intel_digital_port *dig_port =
@@ -50,6 +66,40 @@ static const char *lspcon_mode_name(enum drm_lspcon_mode mode)
50 } 66 }
51} 67}
52 68
69static bool lspcon_detect_vendor(struct intel_lspcon *lspcon)
70{
71 struct intel_dp *dp = lspcon_to_intel_dp(lspcon);
72 struct drm_dp_dpcd_ident *ident;
73 u32 vendor_oui;
74
75 if (drm_dp_read_desc(&dp->aux, &dp->desc, drm_dp_is_branch(dp->dpcd))) {
76 DRM_ERROR("Can't read description\n");
77 return false;
78 }
79
80 ident = &dp->desc.ident;
81 vendor_oui = (ident->oui[0] << 16) | (ident->oui[1] << 8) |
82 ident->oui[2];
83
84 switch (vendor_oui) {
85 case LSPCON_VENDOR_MCA_OUI:
86 lspcon->vendor = LSPCON_VENDOR_MCA;
87 DRM_DEBUG_KMS("Vendor: Mega Chips\n");
88 break;
89
90 case LSPCON_VENDOR_PARADE_OUI:
91 lspcon->vendor = LSPCON_VENDOR_PARADE;
92 DRM_DEBUG_KMS("Vendor: Parade Tech\n");
93 break;
94
95 default:
96 DRM_ERROR("Invalid/Unknown vendor OUI\n");
97 return false;
98 }
99
100 return true;
101}
102
53static enum drm_lspcon_mode lspcon_get_current_mode(struct intel_lspcon *lspcon) 103static enum drm_lspcon_mode lspcon_get_current_mode(struct intel_lspcon *lspcon)
54{ 104{
55 enum drm_lspcon_mode current_mode; 105 enum drm_lspcon_mode current_mode;
@@ -130,6 +180,21 @@ static bool lspcon_wake_native_aux_ch(struct intel_lspcon *lspcon)
130 return true; 180 return true;
131} 181}
132 182
183void lspcon_ycbcr420_config(struct drm_connector *connector,
184 struct intel_crtc_state *crtc_state)
185{
186 const struct drm_display_info *info = &connector->display_info;
187 const struct drm_display_mode *adjusted_mode =
188 &crtc_state->base.adjusted_mode;
189
190 if (drm_mode_is_420_only(info, adjusted_mode) &&
191 connector->ycbcr_420_allowed) {
192 crtc_state->port_clock /= 2;
193 crtc_state->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
194 crtc_state->lspcon_downsampling = true;
195 }
196}
197
133static bool lspcon_probe(struct intel_lspcon *lspcon) 198static bool lspcon_probe(struct intel_lspcon *lspcon)
134{ 199{
135 int retry; 200 int retry;
@@ -159,7 +224,18 @@ static bool lspcon_probe(struct intel_lspcon *lspcon)
159 /* Yay ... got a LSPCON device */ 224 /* Yay ... got a LSPCON device */
160 DRM_DEBUG_KMS("LSPCON detected\n"); 225 DRM_DEBUG_KMS("LSPCON detected\n");
161 lspcon->mode = lspcon_wait_mode(lspcon, expected_mode); 226 lspcon->mode = lspcon_wait_mode(lspcon, expected_mode);
162 lspcon->active = true; 227
228 /*
229 * In the SW state machine, lets Put LSPCON in PCON mode only.
230 * In this way, it will work with both HDMI 1.4 sinks as well as HDMI
231 * 2.0 sinks.
232 */
233 if (lspcon->mode != DRM_LSPCON_MODE_PCON) {
234 if (lspcon_change_mode(lspcon, DRM_LSPCON_MODE_PCON) < 0) {
235 DRM_ERROR("LSPCON mode change to PCON failed\n");
236 return false;
237 }
238 }
163 return true; 239 return true;
164} 240}
165 241
@@ -185,6 +261,255 @@ static void lspcon_resume_in_pcon_wa(struct intel_lspcon *lspcon)
185 DRM_DEBUG_KMS("LSPCON DP descriptor mismatch after resume\n"); 261 DRM_DEBUG_KMS("LSPCON DP descriptor mismatch after resume\n");
186} 262}
187 263
264static bool lspcon_parade_fw_ready(struct drm_dp_aux *aux)
265{
266 u8 avi_if_ctrl;
267 u8 retry;
268 ssize_t ret;
269
270 /* Check if LSPCON FW is ready for data */
271 for (retry = 0; retry < 5; retry++) {
272 if (retry)
273 usleep_range(200, 300);
274
275 ret = drm_dp_dpcd_read(aux, LSPCON_PARADE_AVI_IF_CTRL,
276 &avi_if_ctrl, 1);
277 if (ret < 0) {
278 DRM_ERROR("Failed to read AVI IF control\n");
279 return false;
280 }
281
282 if ((avi_if_ctrl & LSPCON_PARADE_AVI_IF_KICKOFF) == 0)
283 return true;
284 }
285
286 DRM_ERROR("Parade FW not ready to accept AVI IF\n");
287 return false;
288}
289
290static bool _lspcon_parade_write_infoframe_blocks(struct drm_dp_aux *aux,
291 uint8_t *avi_buf)
292{
293 u8 avi_if_ctrl;
294 u8 block_count = 0;
295 u8 *data;
296 uint16_t reg;
297 ssize_t ret;
298
299 while (block_count < 4) {
300 if (!lspcon_parade_fw_ready(aux)) {
301 DRM_DEBUG_KMS("LSPCON FW not ready, block %d\n",
302 block_count);
303 return false;
304 }
305
306 reg = LSPCON_PARADE_AVI_IF_WRITE_OFFSET;
307 data = avi_buf + block_count * 8;
308 ret = drm_dp_dpcd_write(aux, reg, data, 8);
309 if (ret < 0) {
310 DRM_ERROR("Failed to write AVI IF block %d\n",
311 block_count);
312 return false;
313 }
314
315 /*
316 * Once a block of data is written, we have to inform the FW
317 * about this by writing into avi infoframe control register:
318 * - set the kickoff bit[7] to 1
319 * - write the block no. to bits[1:0]
320 */
321 reg = LSPCON_PARADE_AVI_IF_CTRL;
322 avi_if_ctrl = LSPCON_PARADE_AVI_IF_KICKOFF | block_count;
323 ret = drm_dp_dpcd_write(aux, reg, &avi_if_ctrl, 1);
324 if (ret < 0) {
325 DRM_ERROR("Failed to update (0x%x), block %d\n",
326 reg, block_count);
327 return false;
328 }
329
330 block_count++;
331 }
332
333 DRM_DEBUG_KMS("Wrote AVI IF blocks successfully\n");
334 return true;
335}
336
337static bool _lspcon_write_avi_infoframe_parade(struct drm_dp_aux *aux,
338 const uint8_t *frame,
339 ssize_t len)
340{
341 uint8_t avi_if[LSPCON_PARADE_AVI_IF_DATA_SIZE] = {1, };
342
343 /*
344 * Parade's frames contains 32 bytes of data, divided
345 * into 4 frames:
346 * Token byte (first byte of first frame, must be non-zero)
347 * HB0 to HB2 from AVI IF (3 bytes header)
348 * PB0 to PB27 from AVI IF (28 bytes data)
349 * So it should look like this
350 * first block: | <token> <HB0-HB2> <DB0-DB3> |
351 * next 3 blocks: |<DB4-DB11>|<DB12-DB19>|<DB20-DB28>|
352 */
353
354 if (len > LSPCON_PARADE_AVI_IF_DATA_SIZE - 1) {
355 DRM_ERROR("Invalid length of infoframes\n");
356 return false;
357 }
358
359 memcpy(&avi_if[1], frame, len);
360
361 if (!_lspcon_parade_write_infoframe_blocks(aux, avi_if)) {
362 DRM_DEBUG_KMS("Failed to write infoframe blocks\n");
363 return false;
364 }
365
366 return true;
367}
368
369static bool _lspcon_write_avi_infoframe_mca(struct drm_dp_aux *aux,
370 const uint8_t *buffer, ssize_t len)
371{
372 int ret;
373 uint32_t val = 0;
374 uint32_t retry;
375 uint16_t reg;
376 const uint8_t *data = buffer;
377
378 reg = LSPCON_MCA_AVI_IF_WRITE_OFFSET;
379 while (val < len) {
380 /* DPCD write for AVI IF can fail on a slow FW day, so retry */
381 for (retry = 0; retry < 5; retry++) {
382 ret = drm_dp_dpcd_write(aux, reg, (void *)data, 1);
383 if (ret == 1) {
384 break;
385 } else if (retry < 4) {
386 mdelay(50);
387 continue;
388 } else {
389 DRM_ERROR("DPCD write failed at:0x%x\n", reg);
390 return false;
391 }
392 }
393 val++; reg++; data++;
394 }
395
396 val = 0;
397 reg = LSPCON_MCA_AVI_IF_CTRL;
398 ret = drm_dp_dpcd_read(aux, reg, &val, 1);
399 if (ret < 0) {
400 DRM_ERROR("DPCD read failed, address 0x%x\n", reg);
401 return false;
402 }
403
404 /* Indicate LSPCON chip about infoframe, clear bit 1 and set bit 0 */
405 val &= ~LSPCON_MCA_AVI_IF_HANDLED;
406 val |= LSPCON_MCA_AVI_IF_KICKOFF;
407
408 ret = drm_dp_dpcd_write(aux, reg, &val, 1);
409 if (ret < 0) {
410 DRM_ERROR("DPCD read failed, address 0x%x\n", reg);
411 return false;
412 }
413
414 val = 0;
415 ret = drm_dp_dpcd_read(aux, reg, &val, 1);
416 if (ret < 0) {
417 DRM_ERROR("DPCD read failed, address 0x%x\n", reg);
418 return false;
419 }
420
421 if (val == LSPCON_MCA_AVI_IF_HANDLED)
422 DRM_DEBUG_KMS("AVI IF handled by FW\n");
423
424 return true;
425}
426
427void lspcon_write_infoframe(struct intel_encoder *encoder,
428 const struct intel_crtc_state *crtc_state,
429 unsigned int type,
430 const void *frame, ssize_t len)
431{
432 bool ret;
433 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
434 struct intel_lspcon *lspcon = enc_to_intel_lspcon(&encoder->base);
435
436 /* LSPCON only needs AVI IF */
437 if (type != HDMI_INFOFRAME_TYPE_AVI)
438 return;
439
440 if (lspcon->vendor == LSPCON_VENDOR_MCA)
441 ret = _lspcon_write_avi_infoframe_mca(&intel_dp->aux,
442 frame, len);
443 else
444 ret = _lspcon_write_avi_infoframe_parade(&intel_dp->aux,
445 frame, len);
446
447 if (!ret) {
448 DRM_ERROR("Failed to write AVI infoframes\n");
449 return;
450 }
451
452 DRM_DEBUG_DRIVER("AVI infoframes updated successfully\n");
453}
454
455void lspcon_set_infoframes(struct intel_encoder *encoder,
456 bool enable,
457 const struct intel_crtc_state *crtc_state,
458 const struct drm_connector_state *conn_state)
459{
460 ssize_t ret;
461 union hdmi_infoframe frame;
462 uint8_t buf[VIDEO_DIP_DATA_SIZE];
463 struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
464 struct intel_lspcon *lspcon = &dig_port->lspcon;
465 struct intel_dp *intel_dp = &dig_port->dp;
466 struct drm_connector *connector = &intel_dp->attached_connector->base;
467 const struct drm_display_mode *mode = &crtc_state->base.adjusted_mode;
468 bool is_hdmi2_sink = connector->display_info.hdmi.scdc.supported;
469
470 if (!lspcon->active) {
471 DRM_ERROR("Writing infoframes while LSPCON disabled ?\n");
472 return;
473 }
474
475 ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi,
476 mode, is_hdmi2_sink);
477 if (ret < 0) {
478 DRM_ERROR("couldn't fill AVI infoframe\n");
479 return;
480 }
481
482 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) {
483 if (crtc_state->lspcon_downsampling)
484 frame.avi.colorspace = HDMI_COLORSPACE_YUV420;
485 else
486 frame.avi.colorspace = HDMI_COLORSPACE_YUV444;
487 } else {
488 frame.avi.colorspace = HDMI_COLORSPACE_RGB;
489 }
490
491 drm_hdmi_avi_infoframe_quant_range(&frame.avi, mode,
492 crtc_state->limited_color_range ?
493 HDMI_QUANTIZATION_RANGE_LIMITED :
494 HDMI_QUANTIZATION_RANGE_FULL,
495 false, is_hdmi2_sink);
496
497 ret = hdmi_infoframe_pack(&frame, buf, sizeof(buf));
498 if (ret < 0) {
499 DRM_ERROR("Failed to pack AVI IF\n");
500 return;
501 }
502
503 dig_port->write_infoframe(encoder, crtc_state, HDMI_INFOFRAME_TYPE_AVI,
504 buf, ret);
505}
506
507bool lspcon_infoframe_enabled(struct intel_encoder *encoder,
508 const struct intel_crtc_state *pipe_config)
509{
510 return enc_to_intel_lspcon(&encoder->base)->active;
511}
512
188void lspcon_resume(struct intel_lspcon *lspcon) 513void lspcon_resume(struct intel_lspcon *lspcon)
189{ 514{
190 enum drm_lspcon_mode expected_mode; 515 enum drm_lspcon_mode expected_mode;
@@ -216,6 +541,7 @@ bool lspcon_init(struct intel_digital_port *intel_dig_port)
216 struct intel_lspcon *lspcon = &intel_dig_port->lspcon; 541 struct intel_lspcon *lspcon = &intel_dig_port->lspcon;
217 struct drm_device *dev = intel_dig_port->base.base.dev; 542 struct drm_device *dev = intel_dig_port->base.base.dev;
218 struct drm_i915_private *dev_priv = to_i915(dev); 543 struct drm_i915_private *dev_priv = to_i915(dev);
544 struct drm_connector *connector = &dp->attached_connector->base;
219 545
220 if (!HAS_LSPCON(dev_priv)) { 546 if (!HAS_LSPCON(dev_priv)) {
221 DRM_ERROR("LSPCON is not supported on this platform\n"); 547 DRM_ERROR("LSPCON is not supported on this platform\n");
@@ -230,25 +556,18 @@ bool lspcon_init(struct intel_digital_port *intel_dig_port)
230 return false; 556 return false;
231 } 557 }
232 558
233 /*
234 * In the SW state machine, lets Put LSPCON in PCON mode only.
235 * In this way, it will work with both HDMI 1.4 sinks as well as HDMI
236 * 2.0 sinks.
237 */
238 if (lspcon->active && lspcon->mode != DRM_LSPCON_MODE_PCON) {
239 if (lspcon_change_mode(lspcon, DRM_LSPCON_MODE_PCON) < 0) {
240 DRM_ERROR("LSPCON mode change to PCON failed\n");
241 return false;
242 }
243 }
244
245 if (!intel_dp_read_dpcd(dp)) { 559 if (!intel_dp_read_dpcd(dp)) {
246 DRM_ERROR("LSPCON DPCD read failed\n"); 560 DRM_ERROR("LSPCON DPCD read failed\n");
247 return false; 561 return false;
248 } 562 }
249 563
250 drm_dp_read_desc(&dp->aux, &dp->desc, drm_dp_is_branch(dp->dpcd)); 564 if (!lspcon_detect_vendor(lspcon)) {
565 DRM_ERROR("LSPCON vendor detection failed\n");
566 return false;
567 }
251 568
569 connector->ycbcr_420_allowed = true;
570 lspcon->active = true;
252 DRM_DEBUG_KMS("Success: LSPCON init\n"); 571 DRM_DEBUG_KMS("Success: LSPCON init\n");
253 return true; 572 return true;
254} 573}
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index f9f3b0885ba5..e6c5d985ea0a 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -42,10 +42,6 @@
42#include <linux/acpi.h> 42#include <linux/acpi.h>
43 43
44/* Private structure for the integrated LVDS support */ 44/* Private structure for the integrated LVDS support */
45struct intel_lvds_connector {
46 struct intel_connector base;
47};
48
49struct intel_lvds_pps { 45struct intel_lvds_pps {
50 /* 100us units */ 46 /* 100us units */
51 int t1_t2; 47 int t1_t2;
@@ -70,7 +66,7 @@ struct intel_lvds_encoder {
70 struct intel_lvds_pps init_pps; 66 struct intel_lvds_pps init_pps;
71 u32 init_lvds_val; 67 u32 init_lvds_val;
72 68
73 struct intel_lvds_connector *attached_connector; 69 struct intel_connector *attached_connector;
74}; 70};
75 71
76static struct intel_lvds_encoder *to_lvds_encoder(struct drm_encoder *encoder) 72static struct intel_lvds_encoder *to_lvds_encoder(struct drm_encoder *encoder)
@@ -78,11 +74,6 @@ static struct intel_lvds_encoder *to_lvds_encoder(struct drm_encoder *encoder)
78 return container_of(encoder, struct intel_lvds_encoder, base.base); 74 return container_of(encoder, struct intel_lvds_encoder, base.base);
79} 75}
80 76
81static struct intel_lvds_connector *to_lvds_connector(struct drm_connector *connector)
82{
83 return container_of(connector, struct intel_lvds_connector, base.base);
84}
85
86bool intel_lvds_port_enabled(struct drm_i915_private *dev_priv, 77bool intel_lvds_port_enabled(struct drm_i915_private *dev_priv,
87 i915_reg_t lvds_reg, enum pipe *pipe) 78 i915_reg_t lvds_reg, enum pipe *pipe)
88{ 79{
@@ -396,7 +387,7 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
396 struct intel_lvds_encoder *lvds_encoder = 387 struct intel_lvds_encoder *lvds_encoder =
397 to_lvds_encoder(&intel_encoder->base); 388 to_lvds_encoder(&intel_encoder->base);
398 struct intel_connector *intel_connector = 389 struct intel_connector *intel_connector =
399 &lvds_encoder->attached_connector->base; 390 lvds_encoder->attached_connector;
400 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; 391 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
401 struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc); 392 struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
402 unsigned int lvds_bpp; 393 unsigned int lvds_bpp;
@@ -418,6 +409,8 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
418 pipe_config->pipe_bpp = lvds_bpp; 409 pipe_config->pipe_bpp = lvds_bpp;
419 } 410 }
420 411
412 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
413
421 /* 414 /*
422 * We have timings from the BIOS for the panel, put them in 415 * We have timings from the BIOS for the panel, put them in
423 * to the adjusted mode. The CRTC will be set up for this mode, 416 * to the adjusted mode. The CRTC will be set up for this mode,
@@ -461,15 +454,15 @@ intel_lvds_detect(struct drm_connector *connector, bool force)
461 */ 454 */
462static int intel_lvds_get_modes(struct drm_connector *connector) 455static int intel_lvds_get_modes(struct drm_connector *connector)
463{ 456{
464 struct intel_lvds_connector *lvds_connector = to_lvds_connector(connector); 457 struct intel_connector *intel_connector = to_intel_connector(connector);
465 struct drm_device *dev = connector->dev; 458 struct drm_device *dev = connector->dev;
466 struct drm_display_mode *mode; 459 struct drm_display_mode *mode;
467 460
468 /* use cached edid if we have one */ 461 /* use cached edid if we have one */
469 if (!IS_ERR_OR_NULL(lvds_connector->base.edid)) 462 if (!IS_ERR_OR_NULL(intel_connector->edid))
470 return drm_add_edid_modes(connector, lvds_connector->base.edid); 463 return drm_add_edid_modes(connector, intel_connector->edid);
471 464
472 mode = drm_mode_duplicate(dev, lvds_connector->base.panel.fixed_mode); 465 mode = drm_mode_duplicate(dev, intel_connector->panel.fixed_mode);
473 if (mode == NULL) 466 if (mode == NULL)
474 return 0; 467 return 0;
475 468
@@ -477,27 +470,6 @@ static int intel_lvds_get_modes(struct drm_connector *connector)
477 return 1; 470 return 1;
478} 471}
479 472
480/**
481 * intel_lvds_destroy - unregister and free LVDS structures
482 * @connector: connector to free
483 *
484 * Unregister the DDC bus for this connector then free the driver private
485 * structure.
486 */
487static void intel_lvds_destroy(struct drm_connector *connector)
488{
489 struct intel_lvds_connector *lvds_connector =
490 to_lvds_connector(connector);
491
492 if (!IS_ERR_OR_NULL(lvds_connector->base.edid))
493 kfree(lvds_connector->base.edid);
494
495 intel_panel_fini(&lvds_connector->base.panel);
496
497 drm_connector_cleanup(connector);
498 kfree(connector);
499}
500
501static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs = { 473static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs = {
502 .get_modes = intel_lvds_get_modes, 474 .get_modes = intel_lvds_get_modes,
503 .mode_valid = intel_lvds_mode_valid, 475 .mode_valid = intel_lvds_mode_valid,
@@ -511,7 +483,7 @@ static const struct drm_connector_funcs intel_lvds_connector_funcs = {
511 .atomic_set_property = intel_digital_connector_atomic_set_property, 483 .atomic_set_property = intel_digital_connector_atomic_set_property,
512 .late_register = intel_connector_register, 484 .late_register = intel_connector_register,
513 .early_unregister = intel_connector_unregister, 485 .early_unregister = intel_connector_unregister,
514 .destroy = intel_lvds_destroy, 486 .destroy = intel_connector_destroy,
515 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 487 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
516 .atomic_duplicate_state = intel_digital_connector_duplicate_state, 488 .atomic_duplicate_state = intel_digital_connector_duplicate_state,
517}; 489};
@@ -802,8 +774,7 @@ static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder)
802 return i915_modparams.lvds_channel_mode == 2; 774 return i915_modparams.lvds_channel_mode == 2;
803 775
804 /* single channel LVDS is limited to 112 MHz */ 776 /* single channel LVDS is limited to 112 MHz */
805 if (lvds_encoder->attached_connector->base.panel.fixed_mode->clock 777 if (lvds_encoder->attached_connector->panel.fixed_mode->clock > 112999)
806 > 112999)
807 return true; 778 return true;
808 779
809 if (dmi_check_system(intel_dual_link_lvds)) 780 if (dmi_check_system(intel_dual_link_lvds))
@@ -858,7 +829,6 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
858 struct drm_device *dev = &dev_priv->drm; 829 struct drm_device *dev = &dev_priv->drm;
859 struct intel_lvds_encoder *lvds_encoder; 830 struct intel_lvds_encoder *lvds_encoder;
860 struct intel_encoder *intel_encoder; 831 struct intel_encoder *intel_encoder;
861 struct intel_lvds_connector *lvds_connector;
862 struct intel_connector *intel_connector; 832 struct intel_connector *intel_connector;
863 struct drm_connector *connector; 833 struct drm_connector *connector;
864 struct drm_encoder *encoder; 834 struct drm_encoder *encoder;
@@ -911,23 +881,16 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
911 if (!lvds_encoder) 881 if (!lvds_encoder)
912 return; 882 return;
913 883
914 lvds_connector = kzalloc(sizeof(*lvds_connector), GFP_KERNEL); 884 intel_connector = intel_connector_alloc();
915 if (!lvds_connector) { 885 if (!intel_connector) {
916 kfree(lvds_encoder);
917 return;
918 }
919
920 if (intel_connector_init(&lvds_connector->base) < 0) {
921 kfree(lvds_connector);
922 kfree(lvds_encoder); 886 kfree(lvds_encoder);
923 return; 887 return;
924 } 888 }
925 889
926 lvds_encoder->attached_connector = lvds_connector; 890 lvds_encoder->attached_connector = intel_connector;
927 891
928 intel_encoder = &lvds_encoder->base; 892 intel_encoder = &lvds_encoder->base;
929 encoder = &intel_encoder->base; 893 encoder = &intel_encoder->base;
930 intel_connector = &lvds_connector->base;
931 connector = &intel_connector->base; 894 connector = &intel_connector->base;
932 drm_connector_init(dev, &intel_connector->base, &intel_lvds_connector_funcs, 895 drm_connector_init(dev, &intel_connector->base, &intel_lvds_connector_funcs,
933 DRM_MODE_CONNECTOR_LVDS); 896 DRM_MODE_CONNECTOR_LVDS);
@@ -1008,7 +971,7 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
1008 } else { 971 } else {
1009 edid = ERR_PTR(-ENOENT); 972 edid = ERR_PTR(-ENOENT);
1010 } 973 }
1011 lvds_connector->base.edid = edid; 974 intel_connector->edid = edid;
1012 975
1013 list_for_each_entry(scan, &connector->probed_modes, head) { 976 list_for_each_entry(scan, &connector->probed_modes, head) {
1014 if (scan->type & DRM_MODE_TYPE_PREFERRED) { 977 if (scan->type & DRM_MODE_TYPE_PREFERRED) {
@@ -1072,6 +1035,6 @@ failed:
1072 drm_connector_cleanup(connector); 1035 drm_connector_cleanup(connector);
1073 drm_encoder_cleanup(encoder); 1036 drm_encoder_cleanup(encoder);
1074 kfree(lvds_encoder); 1037 kfree(lvds_encoder);
1075 kfree(lvds_connector); 1038 intel_connector_free(intel_connector);
1076 return; 1039 return;
1077} 1040}
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index e034b4166d32..b8f106d9ecf8 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -773,70 +773,6 @@ static void intel_setup_cadls(struct drm_i915_private *dev_priv)
773 opregion->acpi->cadl[i] = 0; 773 opregion->acpi->cadl[i] = 0;
774} 774}
775 775
776void intel_opregion_register(struct drm_i915_private *dev_priv)
777{
778 struct intel_opregion *opregion = &dev_priv->opregion;
779
780 if (!opregion->header)
781 return;
782
783 if (opregion->acpi) {
784 intel_didl_outputs(dev_priv);
785 intel_setup_cadls(dev_priv);
786
787 /* Notify BIOS we are ready to handle ACPI video ext notifs.
788 * Right now, all the events are handled by the ACPI video module.
789 * We don't actually need to do anything with them. */
790 opregion->acpi->csts = 0;
791 opregion->acpi->drdy = 1;
792
793 opregion->acpi_notifier.notifier_call = intel_opregion_video_event;
794 register_acpi_notifier(&opregion->acpi_notifier);
795 }
796
797 if (opregion->asle) {
798 opregion->asle->tche = ASLE_TCHE_BLC_EN;
799 opregion->asle->ardy = ASLE_ARDY_READY;
800 }
801}
802
803void intel_opregion_unregister(struct drm_i915_private *dev_priv)
804{
805 struct intel_opregion *opregion = &dev_priv->opregion;
806
807 if (!opregion->header)
808 return;
809
810 if (opregion->asle)
811 opregion->asle->ardy = ASLE_ARDY_NOT_READY;
812
813 cancel_work_sync(&dev_priv->opregion.asle_work);
814
815 if (opregion->acpi) {
816 opregion->acpi->drdy = 0;
817
818 unregister_acpi_notifier(&opregion->acpi_notifier);
819 opregion->acpi_notifier.notifier_call = NULL;
820 }
821
822 /* just clear all opregion memory pointers now */
823 memunmap(opregion->header);
824 if (opregion->rvda) {
825 memunmap(opregion->rvda);
826 opregion->rvda = NULL;
827 }
828 if (opregion->vbt_firmware) {
829 kfree(opregion->vbt_firmware);
830 opregion->vbt_firmware = NULL;
831 }
832 opregion->header = NULL;
833 opregion->acpi = NULL;
834 opregion->swsci = NULL;
835 opregion->asle = NULL;
836 opregion->vbt = NULL;
837 opregion->lid_state = NULL;
838}
839
840static void swsci_setup(struct drm_i915_private *dev_priv) 776static void swsci_setup(struct drm_i915_private *dev_priv)
841{ 777{
842 struct intel_opregion *opregion = &dev_priv->opregion; 778 struct intel_opregion *opregion = &dev_priv->opregion;
@@ -1115,3 +1051,97 @@ intel_opregion_get_panel_type(struct drm_i915_private *dev_priv)
1115 1051
1116 return ret - 1; 1052 return ret - 1;
1117} 1053}
1054
1055void intel_opregion_register(struct drm_i915_private *i915)
1056{
1057 struct intel_opregion *opregion = &i915->opregion;
1058
1059 if (!opregion->header)
1060 return;
1061
1062 if (opregion->acpi) {
1063 opregion->acpi_notifier.notifier_call =
1064 intel_opregion_video_event;
1065 register_acpi_notifier(&opregion->acpi_notifier);
1066 }
1067
1068 intel_opregion_resume(i915);
1069}
1070
1071void intel_opregion_resume(struct drm_i915_private *i915)
1072{
1073 struct intel_opregion *opregion = &i915->opregion;
1074
1075 if (!opregion->header)
1076 return;
1077
1078 if (opregion->acpi) {
1079 intel_didl_outputs(i915);
1080 intel_setup_cadls(i915);
1081
1082 /*
1083 * Notify BIOS we are ready to handle ACPI video ext notifs.
1084 * Right now, all the events are handled by the ACPI video
1085 * module. We don't actually need to do anything with them.
1086 */
1087 opregion->acpi->csts = 0;
1088 opregion->acpi->drdy = 1;
1089 }
1090
1091 if (opregion->asle) {
1092 opregion->asle->tche = ASLE_TCHE_BLC_EN;
1093 opregion->asle->ardy = ASLE_ARDY_READY;
1094 }
1095
1096 intel_opregion_notify_adapter(i915, PCI_D0);
1097}
1098
1099void intel_opregion_suspend(struct drm_i915_private *i915, pci_power_t state)
1100{
1101 struct intel_opregion *opregion = &i915->opregion;
1102
1103 if (!opregion->header)
1104 return;
1105
1106 intel_opregion_notify_adapter(i915, state);
1107
1108 if (opregion->asle)
1109 opregion->asle->ardy = ASLE_ARDY_NOT_READY;
1110
1111 cancel_work_sync(&i915->opregion.asle_work);
1112
1113 if (opregion->acpi)
1114 opregion->acpi->drdy = 0;
1115}
1116
1117void intel_opregion_unregister(struct drm_i915_private *i915)
1118{
1119 struct intel_opregion *opregion = &i915->opregion;
1120
1121 intel_opregion_suspend(i915, PCI_D1);
1122
1123 if (!opregion->header)
1124 return;
1125
1126 if (opregion->acpi_notifier.notifier_call) {
1127 unregister_acpi_notifier(&opregion->acpi_notifier);
1128 opregion->acpi_notifier.notifier_call = NULL;
1129 }
1130
1131 /* just clear all opregion memory pointers now */
1132 memunmap(opregion->header);
1133 if (opregion->rvda) {
1134 memunmap(opregion->rvda);
1135 opregion->rvda = NULL;
1136 }
1137 if (opregion->vbt_firmware) {
1138 kfree(opregion->vbt_firmware);
1139 opregion->vbt_firmware = NULL;
1140 }
1141 opregion->header = NULL;
1142 opregion->acpi = NULL;
1143 opregion->swsci = NULL;
1144 opregion->asle = NULL;
1145 opregion->vbt = NULL;
1146 opregion->lid_state = NULL;
1147}
diff --git a/drivers/gpu/drm/i915/intel_opregion.h b/drivers/gpu/drm/i915/intel_opregion.h
index e8498a8cda3d..d84b6d2d2fae 100644
--- a/drivers/gpu/drm/i915/intel_opregion.h
+++ b/drivers/gpu/drm/i915/intel_opregion.h
@@ -57,8 +57,14 @@ struct intel_opregion {
57#ifdef CONFIG_ACPI 57#ifdef CONFIG_ACPI
58 58
59int intel_opregion_setup(struct drm_i915_private *dev_priv); 59int intel_opregion_setup(struct drm_i915_private *dev_priv);
60
60void intel_opregion_register(struct drm_i915_private *dev_priv); 61void intel_opregion_register(struct drm_i915_private *dev_priv);
61void intel_opregion_unregister(struct drm_i915_private *dev_priv); 62void intel_opregion_unregister(struct drm_i915_private *dev_priv);
63
64void intel_opregion_resume(struct drm_i915_private *dev_priv);
65void intel_opregion_suspend(struct drm_i915_private *dev_priv,
66 pci_power_t state);
67
62void intel_opregion_asle_intr(struct drm_i915_private *dev_priv); 68void intel_opregion_asle_intr(struct drm_i915_private *dev_priv);
63int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, 69int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
64 bool enable); 70 bool enable);
@@ -81,6 +87,15 @@ static inline void intel_opregion_unregister(struct drm_i915_private *dev_priv)
81{ 87{
82} 88}
83 89
90void intel_opregion_resume(struct drm_i915_private *dev_priv)
91{
92}
93
94void intel_opregion_suspend(struct drm_i915_private *dev_priv,
95 pci_power_t state)
96{
97}
98
84static inline void intel_opregion_asle_intr(struct drm_i915_private *dev_priv) 99static inline void intel_opregion_asle_intr(struct drm_i915_private *dev_priv)
85{ 100{
86} 101}
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 72eb7e48e8bc..20ea7c99d13a 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -1338,7 +1338,7 @@ err_put_bo:
1338 return err; 1338 return err;
1339} 1339}
1340 1340
1341void intel_setup_overlay(struct drm_i915_private *dev_priv) 1341void intel_overlay_setup(struct drm_i915_private *dev_priv)
1342{ 1342{
1343 struct intel_overlay *overlay; 1343 struct intel_overlay *overlay;
1344 int ret; 1344 int ret;
@@ -1387,7 +1387,7 @@ out_free:
1387 kfree(overlay); 1387 kfree(overlay);
1388} 1388}
1389 1389
1390void intel_cleanup_overlay(struct drm_i915_private *dev_priv) 1390void intel_overlay_cleanup(struct drm_i915_private *dev_priv)
1391{ 1391{
1392 struct intel_overlay *overlay; 1392 struct intel_overlay *overlay;
1393 1393
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 4a9f139e7b73..e6cd7b55c018 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -111,7 +111,7 @@ intel_pch_panel_fitting(struct intel_crtc *intel_crtc,
111 /* Native modes don't need fitting */ 111 /* Native modes don't need fitting */
112 if (adjusted_mode->crtc_hdisplay == pipe_config->pipe_src_w && 112 if (adjusted_mode->crtc_hdisplay == pipe_config->pipe_src_w &&
113 adjusted_mode->crtc_vdisplay == pipe_config->pipe_src_h && 113 adjusted_mode->crtc_vdisplay == pipe_config->pipe_src_h &&
114 !pipe_config->ycbcr420) 114 pipe_config->output_format != INTEL_OUTPUT_FORMAT_YCBCR420)
115 goto done; 115 goto done;
116 116
117 switch (fitting_mode) { 117 switch (fitting_mode) {
@@ -505,7 +505,7 @@ static u32 _vlv_get_backlight(struct drm_i915_private *dev_priv, enum pipe pipe)
505static u32 vlv_get_backlight(struct intel_connector *connector) 505static u32 vlv_get_backlight(struct intel_connector *connector)
506{ 506{
507 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 507 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
508 enum pipe pipe = intel_get_pipe_from_connector(connector); 508 enum pipe pipe = intel_connector_get_pipe(connector);
509 509
510 return _vlv_get_backlight(dev_priv, pipe); 510 return _vlv_get_backlight(dev_priv, pipe);
511} 511}
@@ -763,7 +763,7 @@ static void pwm_disable_backlight(const struct drm_connector_state *old_conn_sta
763 struct intel_panel *panel = &connector->panel; 763 struct intel_panel *panel = &connector->panel;
764 764
765 /* Disable the backlight */ 765 /* Disable the backlight */
766 pwm_config(panel->backlight.pwm, 0, CRC_PMIC_PWM_PERIOD_NS); 766 intel_panel_actually_set_backlight(old_conn_state, 0);
767 usleep_range(2000, 3000); 767 usleep_range(2000, 3000);
768 pwm_disable(panel->backlight.pwm); 768 pwm_disable(panel->backlight.pwm);
769} 769}
@@ -1814,11 +1814,8 @@ int intel_panel_setup_backlight(struct drm_connector *connector, enum pipe pipe)
1814 return 0; 1814 return 0;
1815} 1815}
1816 1816
1817void intel_panel_destroy_backlight(struct drm_connector *connector) 1817static void intel_panel_destroy_backlight(struct intel_panel *panel)
1818{ 1818{
1819 struct intel_connector *intel_connector = to_intel_connector(connector);
1820 struct intel_panel *panel = &intel_connector->panel;
1821
1822 /* dispose of the pwm */ 1819 /* dispose of the pwm */
1823 if (panel->backlight.pwm) 1820 if (panel->backlight.pwm)
1824 pwm_put(panel->backlight.pwm); 1821 pwm_put(panel->backlight.pwm);
@@ -1923,6 +1920,8 @@ void intel_panel_fini(struct intel_panel *panel)
1923 struct intel_connector *intel_connector = 1920 struct intel_connector *intel_connector =
1924 container_of(panel, struct intel_connector, panel); 1921 container_of(panel, struct intel_connector, panel);
1925 1922
1923 intel_panel_destroy_backlight(panel);
1924
1926 if (panel->fixed_mode) 1925 if (panel->fixed_mode)
1927 drm_mode_destroy(intel_connector->base.dev, panel->fixed_mode); 1926 drm_mode_destroy(intel_connector->base.dev, panel->fixed_mode);
1928 1927
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 1db9b8328275..897a791662c5 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -2493,6 +2493,9 @@ static uint32_t ilk_compute_pri_wm(const struct intel_crtc_state *cstate,
2493 uint32_t method1, method2; 2493 uint32_t method1, method2;
2494 int cpp; 2494 int cpp;
2495 2495
2496 if (mem_value == 0)
2497 return U32_MAX;
2498
2496 if (!intel_wm_plane_visible(cstate, pstate)) 2499 if (!intel_wm_plane_visible(cstate, pstate))
2497 return 0; 2500 return 0;
2498 2501
@@ -2522,6 +2525,9 @@ static uint32_t ilk_compute_spr_wm(const struct intel_crtc_state *cstate,
2522 uint32_t method1, method2; 2525 uint32_t method1, method2;
2523 int cpp; 2526 int cpp;
2524 2527
2528 if (mem_value == 0)
2529 return U32_MAX;
2530
2525 if (!intel_wm_plane_visible(cstate, pstate)) 2531 if (!intel_wm_plane_visible(cstate, pstate))
2526 return 0; 2532 return 0;
2527 2533
@@ -2545,6 +2551,9 @@ static uint32_t ilk_compute_cur_wm(const struct intel_crtc_state *cstate,
2545{ 2551{
2546 int cpp; 2552 int cpp;
2547 2553
2554 if (mem_value == 0)
2555 return U32_MAX;
2556
2548 if (!intel_wm_plane_visible(cstate, pstate)) 2557 if (!intel_wm_plane_visible(cstate, pstate))
2549 return 0; 2558 return 0;
2550 2559
@@ -2881,8 +2890,7 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
2881 * any underrun. If not able to get Dimm info assume 16GB dimm 2890 * any underrun. If not able to get Dimm info assume 16GB dimm
2882 * to avoid any underrun. 2891 * to avoid any underrun.
2883 */ 2892 */
2884 if (!dev_priv->dram_info.valid_dimm || 2893 if (dev_priv->dram_info.is_16gb_dimm)
2885 dev_priv->dram_info.is_16gb_dimm)
2886 wm[0] += 1; 2894 wm[0] += 1;
2887 2895
2888 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { 2896 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
@@ -3009,6 +3017,34 @@ static void snb_wm_latency_quirk(struct drm_i915_private *dev_priv)
3009 intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency); 3017 intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
3010} 3018}
3011 3019
3020static void snb_wm_lp3_irq_quirk(struct drm_i915_private *dev_priv)
3021{
3022 /*
3023 * On some SNB machines (Thinkpad X220 Tablet at least)
3024 * LP3 usage can cause vblank interrupts to be lost.
3025 * The DEIIR bit will go high but it looks like the CPU
3026 * never gets interrupted.
3027 *
3028 * It's not clear whether other interrupt source could
3029 * be affected or if this is somehow limited to vblank
3030 * interrupts only. To play it safe we disable LP3
3031 * watermarks entirely.
3032 */
3033 if (dev_priv->wm.pri_latency[3] == 0 &&
3034 dev_priv->wm.spr_latency[3] == 0 &&
3035 dev_priv->wm.cur_latency[3] == 0)
3036 return;
3037
3038 dev_priv->wm.pri_latency[3] = 0;
3039 dev_priv->wm.spr_latency[3] = 0;
3040 dev_priv->wm.cur_latency[3] = 0;
3041
3042 DRM_DEBUG_KMS("LP3 watermarks disabled due to potential for lost interrupts\n");
3043 intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency);
3044 intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency);
3045 intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
3046}
3047
3012static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv) 3048static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv)
3013{ 3049{
3014 intel_read_wm_latency(dev_priv, dev_priv->wm.pri_latency); 3050 intel_read_wm_latency(dev_priv, dev_priv->wm.pri_latency);
@@ -3025,8 +3061,10 @@ static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv)
3025 intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency); 3061 intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency);
3026 intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency); 3062 intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
3027 3063
3028 if (IS_GEN6(dev_priv)) 3064 if (IS_GEN6(dev_priv)) {
3029 snb_wm_latency_quirk(dev_priv); 3065 snb_wm_latency_quirk(dev_priv);
3066 snb_wm_lp3_irq_quirk(dev_priv);
3067 }
3030} 3068}
3031 3069
3032static void skl_setup_wm_latency(struct drm_i915_private *dev_priv) 3070static void skl_setup_wm_latency(struct drm_i915_private *dev_priv)
@@ -3160,7 +3198,8 @@ static int ilk_compute_intermediate_wm(struct drm_device *dev,
3160 * and after the vblank. 3198 * and after the vblank.
3161 */ 3199 */
3162 *a = newstate->wm.ilk.optimal; 3200 *a = newstate->wm.ilk.optimal;
3163 if (!newstate->base.active || drm_atomic_crtc_needs_modeset(&newstate->base)) 3201 if (!newstate->base.active || drm_atomic_crtc_needs_modeset(&newstate->base) ||
3202 intel_state->skip_intermediate_wm)
3164 return 0; 3203 return 0;
3165 3204
3166 a->pipe_enabled |= b->pipe_enabled; 3205 a->pipe_enabled |= b->pipe_enabled;
@@ -3612,15 +3651,8 @@ static bool skl_needs_memory_bw_wa(struct intel_atomic_state *state)
3612static bool 3651static bool
3613intel_has_sagv(struct drm_i915_private *dev_priv) 3652intel_has_sagv(struct drm_i915_private *dev_priv)
3614{ 3653{
3615 if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv) || 3654 return (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) &&
3616 IS_CANNONLAKE(dev_priv)) 3655 dev_priv->sagv_status != I915_SAGV_NOT_CONTROLLED;
3617 return true;
3618
3619 if (IS_SKYLAKE(dev_priv) &&
3620 dev_priv->sagv_status != I915_SAGV_NOT_CONTROLLED)
3621 return true;
3622
3623 return false;
3624} 3656}
3625 3657
3626/* 3658/*
@@ -3784,7 +3816,7 @@ bool intel_can_enable_sagv(struct drm_atomic_state *state)
3784 3816
3785static u16 intel_get_ddb_size(struct drm_i915_private *dev_priv, 3817static u16 intel_get_ddb_size(struct drm_i915_private *dev_priv,
3786 const struct intel_crtc_state *cstate, 3818 const struct intel_crtc_state *cstate,
3787 const unsigned int total_data_rate, 3819 const u64 total_data_rate,
3788 const int num_active, 3820 const int num_active,
3789 struct skl_ddb_allocation *ddb) 3821 struct skl_ddb_allocation *ddb)
3790{ 3822{
@@ -3798,12 +3830,12 @@ static u16 intel_get_ddb_size(struct drm_i915_private *dev_priv,
3798 return ddb_size - 4; /* 4 blocks for bypass path allocation */ 3830 return ddb_size - 4; /* 4 blocks for bypass path allocation */
3799 3831
3800 adjusted_mode = &cstate->base.adjusted_mode; 3832 adjusted_mode = &cstate->base.adjusted_mode;
3801 total_data_bw = (u64)total_data_rate * drm_mode_vrefresh(adjusted_mode); 3833 total_data_bw = total_data_rate * drm_mode_vrefresh(adjusted_mode);
3802 3834
3803 /* 3835 /*
3804 * 12GB/s is maximum BW supported by single DBuf slice. 3836 * 12GB/s is maximum BW supported by single DBuf slice.
3805 */ 3837 */
3806 if (total_data_bw >= GBps(12) || num_active > 1) { 3838 if (num_active > 1 || total_data_bw >= GBps(12)) {
3807 ddb->enabled_slices = 2; 3839 ddb->enabled_slices = 2;
3808 } else { 3840 } else {
3809 ddb->enabled_slices = 1; 3841 ddb->enabled_slices = 1;
@@ -3814,16 +3846,15 @@ static u16 intel_get_ddb_size(struct drm_i915_private *dev_priv,
3814} 3846}
3815 3847
3816static void 3848static void
3817skl_ddb_get_pipe_allocation_limits(struct drm_device *dev, 3849skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv,
3818 const struct intel_crtc_state *cstate, 3850 const struct intel_crtc_state *cstate,
3819 const unsigned int total_data_rate, 3851 const u64 total_data_rate,
3820 struct skl_ddb_allocation *ddb, 3852 struct skl_ddb_allocation *ddb,
3821 struct skl_ddb_entry *alloc, /* out */ 3853 struct skl_ddb_entry *alloc, /* out */
3822 int *num_active /* out */) 3854 int *num_active /* out */)
3823{ 3855{
3824 struct drm_atomic_state *state = cstate->base.state; 3856 struct drm_atomic_state *state = cstate->base.state;
3825 struct intel_atomic_state *intel_state = to_intel_atomic_state(state); 3857 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
3826 struct drm_i915_private *dev_priv = to_i915(dev);
3827 struct drm_crtc *for_crtc = cstate->base.crtc; 3858 struct drm_crtc *for_crtc = cstate->base.crtc;
3828 const struct drm_crtc_state *crtc_state; 3859 const struct drm_crtc_state *crtc_state;
3829 const struct drm_crtc *crtc; 3860 const struct drm_crtc *crtc;
@@ -3945,14 +3976,9 @@ skl_ddb_get_hw_plane_state(struct drm_i915_private *dev_priv,
3945 val & PLANE_CTL_ALPHA_MASK); 3976 val & PLANE_CTL_ALPHA_MASK);
3946 3977
3947 val = I915_READ(PLANE_BUF_CFG(pipe, plane_id)); 3978 val = I915_READ(PLANE_BUF_CFG(pipe, plane_id));
3948 /* 3979 if (fourcc == DRM_FORMAT_NV12 && INTEL_GEN(dev_priv) < 11) {
3949 * FIXME: add proper NV12 support for ICL. Avoid reading unclaimed
3950 * registers for now.
3951 */
3952 if (INTEL_GEN(dev_priv) < 11)
3953 val2 = I915_READ(PLANE_NV12_BUF_CFG(pipe, plane_id)); 3980 val2 = I915_READ(PLANE_NV12_BUF_CFG(pipe, plane_id));
3954 3981
3955 if (fourcc == DRM_FORMAT_NV12) {
3956 skl_ddb_entry_init_from_hw(dev_priv, 3982 skl_ddb_entry_init_from_hw(dev_priv,
3957 &ddb->plane[pipe][plane_id], val2); 3983 &ddb->plane[pipe][plane_id], val2);
3958 skl_ddb_entry_init_from_hw(dev_priv, 3984 skl_ddb_entry_init_from_hw(dev_priv,
@@ -4139,23 +4165,24 @@ int skl_check_pipe_max_pixel_rate(struct intel_crtc *intel_crtc,
4139 return 0; 4165 return 0;
4140} 4166}
4141 4167
4142static unsigned int 4168static u64
4143skl_plane_relative_data_rate(const struct intel_crtc_state *cstate, 4169skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
4144 const struct drm_plane_state *pstate, 4170 const struct intel_plane_state *intel_pstate,
4145 const int plane) 4171 const int plane)
4146{ 4172{
4147 struct intel_plane *intel_plane = to_intel_plane(pstate->plane); 4173 struct intel_plane *intel_plane =
4148 struct intel_plane_state *intel_pstate = to_intel_plane_state(pstate); 4174 to_intel_plane(intel_pstate->base.plane);
4149 uint32_t data_rate; 4175 uint32_t data_rate;
4150 uint32_t width = 0, height = 0; 4176 uint32_t width = 0, height = 0;
4151 struct drm_framebuffer *fb; 4177 struct drm_framebuffer *fb;
4152 u32 format; 4178 u32 format;
4153 uint_fixed_16_16_t down_scale_amount; 4179 uint_fixed_16_16_t down_scale_amount;
4180 u64 rate;
4154 4181
4155 if (!intel_pstate->base.visible) 4182 if (!intel_pstate->base.visible)
4156 return 0; 4183 return 0;
4157 4184
4158 fb = pstate->fb; 4185 fb = intel_pstate->base.fb;
4159 format = fb->format->format; 4186 format = fb->format->format;
4160 4187
4161 if (intel_plane->id == PLANE_CURSOR) 4188 if (intel_plane->id == PLANE_CURSOR)
@@ -4177,28 +4204,26 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
4177 height /= 2; 4204 height /= 2;
4178 } 4205 }
4179 4206
4180 data_rate = width * height * fb->format->cpp[plane]; 4207 data_rate = width * height;
4181 4208
4182 down_scale_amount = skl_plane_downscale_amount(cstate, intel_pstate); 4209 down_scale_amount = skl_plane_downscale_amount(cstate, intel_pstate);
4183 4210
4184 return mul_round_up_u32_fixed16(data_rate, down_scale_amount); 4211 rate = mul_round_up_u32_fixed16(data_rate, down_scale_amount);
4212
4213 rate *= fb->format->cpp[plane];
4214 return rate;
4185} 4215}
4186 4216
4187/* 4217static u64
4188 * We don't overflow 32 bits. Worst case is 3 planes enabled, each fetching
4189 * a 8192x4096@32bpp framebuffer:
4190 * 3 * 4096 * 8192 * 4 < 2^32
4191 */
4192static unsigned int
4193skl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate, 4218skl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate,
4194 unsigned int *plane_data_rate, 4219 u64 *plane_data_rate,
4195 unsigned int *uv_plane_data_rate) 4220 u64 *uv_plane_data_rate)
4196{ 4221{
4197 struct drm_crtc_state *cstate = &intel_cstate->base; 4222 struct drm_crtc_state *cstate = &intel_cstate->base;
4198 struct drm_atomic_state *state = cstate->state; 4223 struct drm_atomic_state *state = cstate->state;
4199 struct drm_plane *plane; 4224 struct drm_plane *plane;
4200 const struct drm_plane_state *pstate; 4225 const struct drm_plane_state *pstate;
4201 unsigned int total_data_rate = 0; 4226 u64 total_data_rate = 0;
4202 4227
4203 if (WARN_ON(!state)) 4228 if (WARN_ON(!state))
4204 return 0; 4229 return 0;
@@ -4206,26 +4231,81 @@ skl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate,
4206 /* Calculate and cache data rate for each plane */ 4231 /* Calculate and cache data rate for each plane */
4207 drm_atomic_crtc_state_for_each_plane_state(plane, pstate, cstate) { 4232 drm_atomic_crtc_state_for_each_plane_state(plane, pstate, cstate) {
4208 enum plane_id plane_id = to_intel_plane(plane)->id; 4233 enum plane_id plane_id = to_intel_plane(plane)->id;
4209 unsigned int rate; 4234 u64 rate;
4235 const struct intel_plane_state *intel_pstate =
4236 to_intel_plane_state(pstate);
4210 4237
4211 /* packed/y */ 4238 /* packed/y */
4212 rate = skl_plane_relative_data_rate(intel_cstate, 4239 rate = skl_plane_relative_data_rate(intel_cstate,
4213 pstate, 0); 4240 intel_pstate, 0);
4214 plane_data_rate[plane_id] = rate; 4241 plane_data_rate[plane_id] = rate;
4215
4216 total_data_rate += rate; 4242 total_data_rate += rate;
4217 4243
4218 /* uv-plane */ 4244 /* uv-plane */
4219 rate = skl_plane_relative_data_rate(intel_cstate, 4245 rate = skl_plane_relative_data_rate(intel_cstate,
4220 pstate, 1); 4246 intel_pstate, 1);
4221 uv_plane_data_rate[plane_id] = rate; 4247 uv_plane_data_rate[plane_id] = rate;
4222
4223 total_data_rate += rate; 4248 total_data_rate += rate;
4224 } 4249 }
4225 4250
4226 return total_data_rate; 4251 return total_data_rate;
4227} 4252}
4228 4253
4254static u64
4255icl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate,
4256 u64 *plane_data_rate)
4257{
4258 struct drm_crtc_state *cstate = &intel_cstate->base;
4259 struct drm_atomic_state *state = cstate->state;
4260 struct drm_plane *plane;
4261 const struct drm_plane_state *pstate;
4262 u64 total_data_rate = 0;
4263
4264 if (WARN_ON(!state))
4265 return 0;
4266
4267 /* Calculate and cache data rate for each plane */
4268 drm_atomic_crtc_state_for_each_plane_state(plane, pstate, cstate) {
4269 const struct intel_plane_state *intel_pstate =
4270 to_intel_plane_state(pstate);
4271 enum plane_id plane_id = to_intel_plane(plane)->id;
4272 u64 rate;
4273
4274 if (!intel_pstate->linked_plane) {
4275 rate = skl_plane_relative_data_rate(intel_cstate,
4276 intel_pstate, 0);
4277 plane_data_rate[plane_id] = rate;
4278 total_data_rate += rate;
4279 } else {
4280 enum plane_id y_plane_id;
4281
4282 /*
4283 * The slave plane might not iterate in
4284 * drm_atomic_crtc_state_for_each_plane_state(),
4285 * and needs the master plane state which may be
4286 * NULL if we try get_new_plane_state(), so we
4287 * always calculate from the master.
4288 */
4289 if (intel_pstate->slave)
4290 continue;
4291
4292 /* Y plane rate is calculated on the slave */
4293 rate = skl_plane_relative_data_rate(intel_cstate,
4294 intel_pstate, 0);
4295 y_plane_id = intel_pstate->linked_plane->id;
4296 plane_data_rate[y_plane_id] = rate;
4297 total_data_rate += rate;
4298
4299 rate = skl_plane_relative_data_rate(intel_cstate,
4300 intel_pstate, 1);
4301 plane_data_rate[plane_id] = rate;
4302 total_data_rate += rate;
4303 }
4304 }
4305
4306 return total_data_rate;
4307}
4308
4229static uint16_t 4309static uint16_t
4230skl_ddb_min_alloc(const struct drm_plane_state *pstate, const int plane) 4310skl_ddb_min_alloc(const struct drm_plane_state *pstate, const int plane)
4231{ 4311{
@@ -4298,15 +4378,25 @@ skl_ddb_calc_min(const struct intel_crtc_state *cstate, int num_active,
4298 4378
4299 drm_atomic_crtc_state_for_each_plane_state(plane, pstate, &cstate->base) { 4379 drm_atomic_crtc_state_for_each_plane_state(plane, pstate, &cstate->base) {
4300 enum plane_id plane_id = to_intel_plane(plane)->id; 4380 enum plane_id plane_id = to_intel_plane(plane)->id;
4381 struct intel_plane_state *plane_state = to_intel_plane_state(pstate);
4301 4382
4302 if (plane_id == PLANE_CURSOR) 4383 if (plane_id == PLANE_CURSOR)
4303 continue; 4384 continue;
4304 4385
4305 if (!pstate->visible) 4386 /* slave plane must be invisible and calculated from master */
4387 if (!pstate->visible || WARN_ON(plane_state->slave))
4306 continue; 4388 continue;
4307 4389
4308 minimum[plane_id] = skl_ddb_min_alloc(pstate, 0); 4390 if (!plane_state->linked_plane) {
4309 uv_minimum[plane_id] = skl_ddb_min_alloc(pstate, 1); 4391 minimum[plane_id] = skl_ddb_min_alloc(pstate, 0);
4392 uv_minimum[plane_id] = skl_ddb_min_alloc(pstate, 1);
4393 } else {
4394 enum plane_id y_plane_id =
4395 plane_state->linked_plane->id;
4396
4397 minimum[y_plane_id] = skl_ddb_min_alloc(pstate, 0);
4398 minimum[plane_id] = skl_ddb_min_alloc(pstate, 1);
4399 }
4310 } 4400 }
4311 4401
4312 minimum[PLANE_CURSOR] = skl_cursor_allocation(num_active); 4402 minimum[PLANE_CURSOR] = skl_cursor_allocation(num_active);
@@ -4318,18 +4408,18 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
4318{ 4408{
4319 struct drm_atomic_state *state = cstate->base.state; 4409 struct drm_atomic_state *state = cstate->base.state;
4320 struct drm_crtc *crtc = cstate->base.crtc; 4410 struct drm_crtc *crtc = cstate->base.crtc;
4321 struct drm_device *dev = crtc->dev; 4411 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
4322 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4412 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4323 enum pipe pipe = intel_crtc->pipe; 4413 enum pipe pipe = intel_crtc->pipe;
4324 struct skl_ddb_entry *alloc = &cstate->wm.skl.ddb; 4414 struct skl_ddb_entry *alloc = &cstate->wm.skl.ddb;
4325 uint16_t alloc_size, start; 4415 uint16_t alloc_size, start;
4326 uint16_t minimum[I915_MAX_PLANES] = {}; 4416 uint16_t minimum[I915_MAX_PLANES] = {};
4327 uint16_t uv_minimum[I915_MAX_PLANES] = {}; 4417 uint16_t uv_minimum[I915_MAX_PLANES] = {};
4328 unsigned int total_data_rate; 4418 u64 total_data_rate;
4329 enum plane_id plane_id; 4419 enum plane_id plane_id;
4330 int num_active; 4420 int num_active;
4331 unsigned int plane_data_rate[I915_MAX_PLANES] = {}; 4421 u64 plane_data_rate[I915_MAX_PLANES] = {};
4332 unsigned int uv_plane_data_rate[I915_MAX_PLANES] = {}; 4422 u64 uv_plane_data_rate[I915_MAX_PLANES] = {};
4333 uint16_t total_min_blocks = 0; 4423 uint16_t total_min_blocks = 0;
4334 4424
4335 /* Clear the partitioning for disabled planes. */ 4425 /* Clear the partitioning for disabled planes. */
@@ -4344,11 +4434,18 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
4344 return 0; 4434 return 0;
4345 } 4435 }
4346 4436
4347 total_data_rate = skl_get_total_relative_data_rate(cstate, 4437 if (INTEL_GEN(dev_priv) < 11)
4348 plane_data_rate, 4438 total_data_rate =
4349 uv_plane_data_rate); 4439 skl_get_total_relative_data_rate(cstate,
4350 skl_ddb_get_pipe_allocation_limits(dev, cstate, total_data_rate, ddb, 4440 plane_data_rate,
4351 alloc, &num_active); 4441 uv_plane_data_rate);
4442 else
4443 total_data_rate =
4444 icl_get_total_relative_data_rate(cstate,
4445 plane_data_rate);
4446
4447 skl_ddb_get_pipe_allocation_limits(dev_priv, cstate, total_data_rate,
4448 ddb, alloc, &num_active);
4352 alloc_size = skl_ddb_entry_size(alloc); 4449 alloc_size = skl_ddb_entry_size(alloc);
4353 if (alloc_size == 0) 4450 if (alloc_size == 0)
4354 return 0; 4451 return 0;
@@ -4388,7 +4485,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
4388 4485
4389 start = alloc->start; 4486 start = alloc->start;
4390 for_each_plane_id_on_crtc(intel_crtc, plane_id) { 4487 for_each_plane_id_on_crtc(intel_crtc, plane_id) {
4391 unsigned int data_rate, uv_data_rate; 4488 u64 data_rate, uv_data_rate;
4392 uint16_t plane_blocks, uv_plane_blocks; 4489 uint16_t plane_blocks, uv_plane_blocks;
4393 4490
4394 if (plane_id == PLANE_CURSOR) 4491 if (plane_id == PLANE_CURSOR)
@@ -4402,8 +4499,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
4402 * result is < available as data_rate / total_data_rate < 1 4499 * result is < available as data_rate / total_data_rate < 1
4403 */ 4500 */
4404 plane_blocks = minimum[plane_id]; 4501 plane_blocks = minimum[plane_id];
4405 plane_blocks += div_u64((uint64_t)alloc_size * data_rate, 4502 plane_blocks += div64_u64(alloc_size * data_rate, total_data_rate);
4406 total_data_rate);
4407 4503
4408 /* Leave disabled planes at (0,0) */ 4504 /* Leave disabled planes at (0,0) */
4409 if (data_rate) { 4505 if (data_rate) {
@@ -4417,8 +4513,10 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
4417 uv_data_rate = uv_plane_data_rate[plane_id]; 4513 uv_data_rate = uv_plane_data_rate[plane_id];
4418 4514
4419 uv_plane_blocks = uv_minimum[plane_id]; 4515 uv_plane_blocks = uv_minimum[plane_id];
4420 uv_plane_blocks += div_u64((uint64_t)alloc_size * uv_data_rate, 4516 uv_plane_blocks += div64_u64(alloc_size * uv_data_rate, total_data_rate);
4421 total_data_rate); 4517
4518 /* Gen11+ uses a separate plane for UV watermarks */
4519 WARN_ON(INTEL_GEN(dev_priv) >= 11 && uv_plane_blocks);
4422 4520
4423 if (uv_data_rate) { 4521 if (uv_data_rate) {
4424 ddb->uv_plane[pipe][plane_id].start = start; 4522 ddb->uv_plane[pipe][plane_id].start = start;
@@ -4476,7 +4574,7 @@ static uint_fixed_16_16_t skl_wm_method2(uint32_t pixel_rate,
4476} 4574}
4477 4575
4478static uint_fixed_16_16_t 4576static uint_fixed_16_16_t
4479intel_get_linetime_us(struct intel_crtc_state *cstate) 4577intel_get_linetime_us(const struct intel_crtc_state *cstate)
4480{ 4578{
4481 uint32_t pixel_rate; 4579 uint32_t pixel_rate;
4482 uint32_t crtc_htotal; 4580 uint32_t crtc_htotal;
@@ -4520,7 +4618,7 @@ skl_adjusted_plane_pixel_rate(const struct intel_crtc_state *cstate,
4520 4618
4521static int 4619static int
4522skl_compute_plane_wm_params(const struct drm_i915_private *dev_priv, 4620skl_compute_plane_wm_params(const struct drm_i915_private *dev_priv,
4523 struct intel_crtc_state *cstate, 4621 const struct intel_crtc_state *cstate,
4524 const struct intel_plane_state *intel_pstate, 4622 const struct intel_plane_state *intel_pstate,
4525 struct skl_wm_params *wp, int plane_id) 4623 struct skl_wm_params *wp, int plane_id)
4526{ 4624{
@@ -4627,7 +4725,7 @@ skl_compute_plane_wm_params(const struct drm_i915_private *dev_priv,
4627} 4725}
4628 4726
4629static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv, 4727static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
4630 struct intel_crtc_state *cstate, 4728 const struct intel_crtc_state *cstate,
4631 const struct intel_plane_state *intel_pstate, 4729 const struct intel_plane_state *intel_pstate,
4632 uint16_t ddb_allocation, 4730 uint16_t ddb_allocation,
4633 int level, 4731 int level,
@@ -4672,15 +4770,24 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
4672 } else { 4770 } else {
4673 if ((wp->cpp * cstate->base.adjusted_mode.crtc_htotal / 4771 if ((wp->cpp * cstate->base.adjusted_mode.crtc_htotal /
4674 wp->dbuf_block_size < 1) && 4772 wp->dbuf_block_size < 1) &&
4675 (wp->plane_bytes_per_line / wp->dbuf_block_size < 1)) 4773 (wp->plane_bytes_per_line / wp->dbuf_block_size < 1)) {
4676 selected_result = method2; 4774 selected_result = method2;
4677 else if (ddb_allocation >= 4775 } else if (ddb_allocation >=
4678 fixed16_to_u32_round_up(wp->plane_blocks_per_line)) 4776 fixed16_to_u32_round_up(wp->plane_blocks_per_line)) {
4679 selected_result = min_fixed16(method1, method2); 4777 if (IS_GEN9(dev_priv) &&
4680 else if (latency >= wp->linetime_us) 4778 !IS_GEMINILAKE(dev_priv))
4681 selected_result = min_fixed16(method1, method2); 4779 selected_result = min_fixed16(method1, method2);
4682 else 4780 else
4781 selected_result = method2;
4782 } else if (latency >= wp->linetime_us) {
4783 if (IS_GEN9(dev_priv) &&
4784 !IS_GEMINILAKE(dev_priv))
4785 selected_result = min_fixed16(method1, method2);
4786 else
4787 selected_result = method2;
4788 } else {
4683 selected_result = method1; 4789 selected_result = method1;
4790 }
4684 } 4791 }
4685 4792
4686 res_blocks = fixed16_to_u32_round_up(selected_result) + 1; 4793 res_blocks = fixed16_to_u32_round_up(selected_result) + 1;
@@ -4756,17 +4863,6 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
4756 } 4863 }
4757 } 4864 }
4758 4865
4759 /*
4760 * Display WA #826 (SKL:ALL, BXT:ALL) & #1059 (CNL:A)
4761 * disable wm level 1-7 on NV12 planes
4762 */
4763 if (wp->is_planar && level >= 1 &&
4764 (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv) ||
4765 IS_CNL_REVID(dev_priv, CNL_REVID_A0, CNL_REVID_A0))) {
4766 result->plane_en = false;
4767 return 0;
4768 }
4769
4770 /* The number of lines are ignored for the level 0 watermark. */ 4866 /* The number of lines are ignored for the level 0 watermark. */
4771 result->plane_res_b = res_blocks; 4867 result->plane_res_b = res_blocks;
4772 result->plane_res_l = res_lines; 4868 result->plane_res_l = res_lines;
@@ -4778,38 +4874,22 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
4778static int 4874static int
4779skl_compute_wm_levels(const struct drm_i915_private *dev_priv, 4875skl_compute_wm_levels(const struct drm_i915_private *dev_priv,
4780 struct skl_ddb_allocation *ddb, 4876 struct skl_ddb_allocation *ddb,
4781 struct intel_crtc_state *cstate, 4877 const struct intel_crtc_state *cstate,
4782 const struct intel_plane_state *intel_pstate, 4878 const struct intel_plane_state *intel_pstate,
4879 uint16_t ddb_blocks,
4783 const struct skl_wm_params *wm_params, 4880 const struct skl_wm_params *wm_params,
4784 struct skl_plane_wm *wm, 4881 struct skl_plane_wm *wm,
4785 int plane_id) 4882 struct skl_wm_level *levels)
4786{ 4883{
4787 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
4788 struct drm_plane *plane = intel_pstate->base.plane;
4789 struct intel_plane *intel_plane = to_intel_plane(plane);
4790 uint16_t ddb_blocks;
4791 enum pipe pipe = intel_crtc->pipe;
4792 int level, max_level = ilk_wm_max_level(dev_priv); 4884 int level, max_level = ilk_wm_max_level(dev_priv);
4793 enum plane_id intel_plane_id = intel_plane->id; 4885 struct skl_wm_level *result_prev = &levels[0];
4794 int ret; 4886 int ret;
4795 4887
4796 if (WARN_ON(!intel_pstate->base.fb)) 4888 if (WARN_ON(!intel_pstate->base.fb))
4797 return -EINVAL; 4889 return -EINVAL;
4798 4890
4799 ddb_blocks = plane_id ?
4800 skl_ddb_entry_size(&ddb->uv_plane[pipe][intel_plane_id]) :
4801 skl_ddb_entry_size(&ddb->plane[pipe][intel_plane_id]);
4802
4803 for (level = 0; level <= max_level; level++) { 4891 for (level = 0; level <= max_level; level++) {
4804 struct skl_wm_level *result = plane_id ? &wm->uv_wm[level] : 4892 struct skl_wm_level *result = &levels[level];
4805 &wm->wm[level];
4806 struct skl_wm_level *result_prev;
4807
4808 if (level)
4809 result_prev = plane_id ? &wm->uv_wm[level - 1] :
4810 &wm->wm[level - 1];
4811 else
4812 result_prev = plane_id ? &wm->uv_wm[0] : &wm->wm[0];
4813 4893
4814 ret = skl_compute_plane_wm(dev_priv, 4894 ret = skl_compute_plane_wm(dev_priv,
4815 cstate, 4895 cstate,
@@ -4821,6 +4901,8 @@ skl_compute_wm_levels(const struct drm_i915_private *dev_priv,
4821 result); 4901 result);
4822 if (ret) 4902 if (ret)
4823 return ret; 4903 return ret;
4904
4905 result_prev = result;
4824 } 4906 }
4825 4907
4826 if (intel_pstate->base.fb->format->format == DRM_FORMAT_NV12) 4908 if (intel_pstate->base.fb->format->format == DRM_FORMAT_NV12)
@@ -4830,7 +4912,7 @@ skl_compute_wm_levels(const struct drm_i915_private *dev_priv,
4830} 4912}
4831 4913
4832static uint32_t 4914static uint32_t
4833skl_compute_linetime_wm(struct intel_crtc_state *cstate) 4915skl_compute_linetime_wm(const struct intel_crtc_state *cstate)
4834{ 4916{
4835 struct drm_atomic_state *state = cstate->base.state; 4917 struct drm_atomic_state *state = cstate->base.state;
4836 struct drm_i915_private *dev_priv = to_i915(state->dev); 4918 struct drm_i915_private *dev_priv = to_i915(state->dev);
@@ -4852,7 +4934,7 @@ skl_compute_linetime_wm(struct intel_crtc_state *cstate)
4852 return linetime_wm; 4934 return linetime_wm;
4853} 4935}
4854 4936
4855static void skl_compute_transition_wm(struct intel_crtc_state *cstate, 4937static void skl_compute_transition_wm(const struct intel_crtc_state *cstate,
4856 struct skl_wm_params *wp, 4938 struct skl_wm_params *wp,
4857 struct skl_wm_level *wm_l0, 4939 struct skl_wm_level *wm_l0,
4858 uint16_t ddb_allocation, 4940 uint16_t ddb_allocation,
@@ -4862,7 +4944,7 @@ static void skl_compute_transition_wm(struct intel_crtc_state *cstate,
4862 const struct drm_i915_private *dev_priv = to_i915(dev); 4944 const struct drm_i915_private *dev_priv = to_i915(dev);
4863 uint16_t trans_min, trans_y_tile_min; 4945 uint16_t trans_min, trans_y_tile_min;
4864 const uint16_t trans_amount = 10; /* This is configurable amount */ 4946 const uint16_t trans_amount = 10; /* This is configurable amount */
4865 uint16_t trans_offset_b, res_blocks; 4947 uint16_t wm0_sel_res_b, trans_offset_b, res_blocks;
4866 4948
4867 if (!cstate->base.active) 4949 if (!cstate->base.active)
4868 goto exit; 4950 goto exit;
@@ -4875,19 +4957,31 @@ static void skl_compute_transition_wm(struct intel_crtc_state *cstate,
4875 if (!dev_priv->ipc_enabled) 4957 if (!dev_priv->ipc_enabled)
4876 goto exit; 4958 goto exit;
4877 4959
4878 trans_min = 0; 4960 trans_min = 14;
4879 if (INTEL_GEN(dev_priv) >= 10) 4961 if (INTEL_GEN(dev_priv) >= 11)
4880 trans_min = 4; 4962 trans_min = 4;
4881 4963
4882 trans_offset_b = trans_min + trans_amount; 4964 trans_offset_b = trans_min + trans_amount;
4883 4965
4966 /*
4967 * The spec asks for Selected Result Blocks for wm0 (the real value),
4968 * not Result Blocks (the integer value). Pay attention to the capital
4969 * letters. The value wm_l0->plane_res_b is actually Result Blocks, but
4970 * since Result Blocks is the ceiling of Selected Result Blocks plus 1,
4971 * and since we later will have to get the ceiling of the sum in the
4972 * transition watermarks calculation, we can just pretend Selected
4973 * Result Blocks is Result Blocks minus 1 and it should work for the
4974 * current platforms.
4975 */
4976 wm0_sel_res_b = wm_l0->plane_res_b - 1;
4977
4884 if (wp->y_tiled) { 4978 if (wp->y_tiled) {
4885 trans_y_tile_min = (uint16_t) mul_round_up_u32_fixed16(2, 4979 trans_y_tile_min = (uint16_t) mul_round_up_u32_fixed16(2,
4886 wp->y_tile_minimum); 4980 wp->y_tile_minimum);
4887 res_blocks = max(wm_l0->plane_res_b, trans_y_tile_min) + 4981 res_blocks = max(wm0_sel_res_b, trans_y_tile_min) +
4888 trans_offset_b; 4982 trans_offset_b;
4889 } else { 4983 } else {
4890 res_blocks = wm_l0->plane_res_b + trans_offset_b; 4984 res_blocks = wm0_sel_res_b + trans_offset_b;
4891 4985
4892 /* WA BUG:1938466 add one block for non y-tile planes */ 4986 /* WA BUG:1938466 add one block for non y-tile planes */
4893 if (IS_CNL_REVID(dev_priv, CNL_REVID_A0, CNL_REVID_A0)) 4987 if (IS_CNL_REVID(dev_priv, CNL_REVID_A0, CNL_REVID_A0))
@@ -4907,16 +5001,101 @@ exit:
4907 trans_wm->plane_en = false; 5001 trans_wm->plane_en = false;
4908} 5002}
4909 5003
5004static int __skl_build_plane_wm_single(struct skl_ddb_allocation *ddb,
5005 struct skl_pipe_wm *pipe_wm,
5006 enum plane_id plane_id,
5007 const struct intel_crtc_state *cstate,
5008 const struct intel_plane_state *pstate,
5009 int color_plane)
5010{
5011 struct drm_i915_private *dev_priv = to_i915(pstate->base.plane->dev);
5012 struct skl_plane_wm *wm = &pipe_wm->planes[plane_id];
5013 enum pipe pipe = to_intel_plane(pstate->base.plane)->pipe;
5014 struct skl_wm_params wm_params;
5015 uint16_t ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][plane_id]);
5016 int ret;
5017
5018 ret = skl_compute_plane_wm_params(dev_priv, cstate, pstate,
5019 &wm_params, color_plane);
5020 if (ret)
5021 return ret;
5022
5023 ret = skl_compute_wm_levels(dev_priv, ddb, cstate, pstate,
5024 ddb_blocks, &wm_params, wm, wm->wm);
5025
5026 if (ret)
5027 return ret;
5028
5029 skl_compute_transition_wm(cstate, &wm_params, &wm->wm[0],
5030 ddb_blocks, &wm->trans_wm);
5031
5032 return 0;
5033}
5034
5035static int skl_build_plane_wm_single(struct skl_ddb_allocation *ddb,
5036 struct skl_pipe_wm *pipe_wm,
5037 const struct intel_crtc_state *cstate,
5038 const struct intel_plane_state *pstate)
5039{
5040 enum plane_id plane_id = to_intel_plane(pstate->base.plane)->id;
5041
5042 return __skl_build_plane_wm_single(ddb, pipe_wm, plane_id, cstate, pstate, 0);
5043}
5044
5045static int skl_build_plane_wm_planar(struct skl_ddb_allocation *ddb,
5046 struct skl_pipe_wm *pipe_wm,
5047 const struct intel_crtc_state *cstate,
5048 const struct intel_plane_state *pstate)
5049{
5050 struct intel_plane *plane = to_intel_plane(pstate->base.plane);
5051 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
5052 enum plane_id plane_id = plane->id;
5053 struct skl_plane_wm *wm = &pipe_wm->planes[plane_id];
5054 struct skl_wm_params wm_params;
5055 enum pipe pipe = plane->pipe;
5056 uint16_t ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][plane_id]);
5057 int ret;
5058
5059 ret = __skl_build_plane_wm_single(ddb, pipe_wm, plane_id, cstate, pstate, 0);
5060 if (ret)
5061 return ret;
5062
5063 /* uv plane watermarks must also be validated for NV12/Planar */
5064 ddb_blocks = skl_ddb_entry_size(&ddb->uv_plane[pipe][plane_id]);
5065
5066 ret = skl_compute_plane_wm_params(dev_priv, cstate, pstate, &wm_params, 1);
5067 if (ret)
5068 return ret;
5069
5070 return skl_compute_wm_levels(dev_priv, ddb, cstate, pstate,
5071 ddb_blocks, &wm_params, wm, wm->uv_wm);
5072}
5073
5074static int icl_build_plane_wm_planar(struct skl_ddb_allocation *ddb,
5075 struct skl_pipe_wm *pipe_wm,
5076 const struct intel_crtc_state *cstate,
5077 const struct intel_plane_state *pstate)
5078{
5079 int ret;
5080 enum plane_id y_plane_id = pstate->linked_plane->id;
5081 enum plane_id uv_plane_id = to_intel_plane(pstate->base.plane)->id;
5082
5083 ret = __skl_build_plane_wm_single(ddb, pipe_wm, y_plane_id,
5084 cstate, pstate, 0);
5085 if (ret)
5086 return ret;
5087
5088 return __skl_build_plane_wm_single(ddb, pipe_wm, uv_plane_id,
5089 cstate, pstate, 1);
5090}
5091
4910static int skl_build_pipe_wm(struct intel_crtc_state *cstate, 5092static int skl_build_pipe_wm(struct intel_crtc_state *cstate,
4911 struct skl_ddb_allocation *ddb, 5093 struct skl_ddb_allocation *ddb,
4912 struct skl_pipe_wm *pipe_wm) 5094 struct skl_pipe_wm *pipe_wm)
4913{ 5095{
4914 struct drm_device *dev = cstate->base.crtc->dev;
4915 struct drm_crtc_state *crtc_state = &cstate->base; 5096 struct drm_crtc_state *crtc_state = &cstate->base;
4916 const struct drm_i915_private *dev_priv = to_i915(dev);
4917 struct drm_plane *plane; 5097 struct drm_plane *plane;
4918 const struct drm_plane_state *pstate; 5098 const struct drm_plane_state *pstate;
4919 struct skl_plane_wm *wm;
4920 int ret; 5099 int ret;
4921 5100
4922 /* 5101 /*
@@ -4928,44 +5107,21 @@ static int skl_build_pipe_wm(struct intel_crtc_state *cstate,
4928 drm_atomic_crtc_state_for_each_plane_state(plane, pstate, crtc_state) { 5107 drm_atomic_crtc_state_for_each_plane_state(plane, pstate, crtc_state) {
4929 const struct intel_plane_state *intel_pstate = 5108 const struct intel_plane_state *intel_pstate =
4930 to_intel_plane_state(pstate); 5109 to_intel_plane_state(pstate);
4931 enum plane_id plane_id = to_intel_plane(plane)->id;
4932 struct skl_wm_params wm_params;
4933 enum pipe pipe = to_intel_crtc(cstate->base.crtc)->pipe;
4934 uint16_t ddb_blocks;
4935 5110
4936 wm = &pipe_wm->planes[plane_id]; 5111 /* Watermarks calculated in master */
4937 ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][plane_id]); 5112 if (intel_pstate->slave)
5113 continue;
4938 5114
4939 ret = skl_compute_plane_wm_params(dev_priv, cstate, 5115 if (intel_pstate->linked_plane)
4940 intel_pstate, &wm_params, 0); 5116 ret = icl_build_plane_wm_planar(ddb, pipe_wm, cstate, intel_pstate);
4941 if (ret) 5117 else if (intel_pstate->base.fb &&
4942 return ret; 5118 intel_pstate->base.fb->format->format == DRM_FORMAT_NV12)
5119 ret = skl_build_plane_wm_planar(ddb, pipe_wm, cstate, intel_pstate);
5120 else
5121 ret = skl_build_plane_wm_single(ddb, pipe_wm, cstate, intel_pstate);
4943 5122
4944 ret = skl_compute_wm_levels(dev_priv, ddb, cstate,
4945 intel_pstate, &wm_params, wm, 0);
4946 if (ret) 5123 if (ret)
4947 return ret; 5124 return ret;
4948
4949 skl_compute_transition_wm(cstate, &wm_params, &wm->wm[0],
4950 ddb_blocks, &wm->trans_wm);
4951
4952 /* uv plane watermarks must also be validated for NV12/Planar */
4953 if (wm_params.is_planar) {
4954 memset(&wm_params, 0, sizeof(struct skl_wm_params));
4955 wm->is_planar = true;
4956
4957 ret = skl_compute_plane_wm_params(dev_priv, cstate,
4958 intel_pstate,
4959 &wm_params, 1);
4960 if (ret)
4961 return ret;
4962
4963 ret = skl_compute_wm_levels(dev_priv, ddb, cstate,
4964 intel_pstate, &wm_params,
4965 wm, 1);
4966 if (ret)
4967 return ret;
4968 }
4969 } 5125 }
4970 5126
4971 pipe_wm->linetime = skl_compute_linetime_wm(cstate); 5127 pipe_wm->linetime = skl_compute_linetime_wm(cstate);
@@ -5016,14 +5172,7 @@ static void skl_write_plane_wm(struct intel_crtc *intel_crtc,
5016 skl_write_wm_level(dev_priv, PLANE_WM_TRANS(pipe, plane_id), 5172 skl_write_wm_level(dev_priv, PLANE_WM_TRANS(pipe, plane_id),
5017 &wm->trans_wm); 5173 &wm->trans_wm);
5018 5174
5019 skl_ddb_entry_write(dev_priv, PLANE_BUF_CFG(pipe, plane_id), 5175 if (wm->is_planar && INTEL_GEN(dev_priv) < 11) {
5020 &ddb->plane[pipe][plane_id]);
5021 /* FIXME: add proper NV12 support for ICL. */
5022 if (INTEL_GEN(dev_priv) >= 11)
5023 return skl_ddb_entry_write(dev_priv,
5024 PLANE_BUF_CFG(pipe, plane_id),
5025 &ddb->plane[pipe][plane_id]);
5026 if (wm->is_planar) {
5027 skl_ddb_entry_write(dev_priv, PLANE_BUF_CFG(pipe, plane_id), 5176 skl_ddb_entry_write(dev_priv, PLANE_BUF_CFG(pipe, plane_id),
5028 &ddb->uv_plane[pipe][plane_id]); 5177 &ddb->uv_plane[pipe][plane_id]);
5029 skl_ddb_entry_write(dev_priv, 5178 skl_ddb_entry_write(dev_priv,
@@ -5032,7 +5181,8 @@ static void skl_write_plane_wm(struct intel_crtc *intel_crtc,
5032 } else { 5181 } else {
5033 skl_ddb_entry_write(dev_priv, PLANE_BUF_CFG(pipe, plane_id), 5182 skl_ddb_entry_write(dev_priv, PLANE_BUF_CFG(pipe, plane_id),
5034 &ddb->plane[pipe][plane_id]); 5183 &ddb->plane[pipe][plane_id]);
5035 I915_WRITE(PLANE_NV12_BUF_CFG(pipe, plane_id), 0x0); 5184 if (INTEL_GEN(dev_priv) < 11)
5185 I915_WRITE(PLANE_NV12_BUF_CFG(pipe, plane_id), 0x0);
5036 } 5186 }
5037} 5187}
5038 5188
@@ -5076,16 +5226,15 @@ static inline bool skl_ddb_entries_overlap(const struct skl_ddb_entry *a,
5076 return a->start < b->end && b->start < a->end; 5226 return a->start < b->end && b->start < a->end;
5077} 5227}
5078 5228
5079bool skl_ddb_allocation_overlaps(struct drm_i915_private *dev_priv, 5229bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb,
5080 const struct skl_ddb_entry **entries, 5230 const struct skl_ddb_entry entries[],
5081 const struct skl_ddb_entry *ddb, 5231 int num_entries, int ignore_idx)
5082 int ignore)
5083{ 5232{
5084 enum pipe pipe; 5233 int i;
5085 5234
5086 for_each_pipe(dev_priv, pipe) { 5235 for (i = 0; i < num_entries; i++) {
5087 if (pipe != ignore && entries[pipe] && 5236 if (i != ignore_idx &&
5088 skl_ddb_entries_overlap(ddb, entries[pipe])) 5237 skl_ddb_entries_overlap(ddb, &entries[i]))
5089 return true; 5238 return true;
5090 } 5239 }
5091 5240
@@ -5137,11 +5286,12 @@ skl_ddb_add_affected_planes(struct intel_crtc_state *cstate)
5137 struct intel_atomic_state *intel_state = to_intel_atomic_state(state); 5286 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
5138 struct skl_ddb_allocation *new_ddb = &intel_state->wm_results.ddb; 5287 struct skl_ddb_allocation *new_ddb = &intel_state->wm_results.ddb;
5139 struct skl_ddb_allocation *cur_ddb = &dev_priv->wm.skl_hw.ddb; 5288 struct skl_ddb_allocation *cur_ddb = &dev_priv->wm.skl_hw.ddb;
5140 struct drm_plane_state *plane_state;
5141 struct drm_plane *plane; 5289 struct drm_plane *plane;
5142 enum pipe pipe = intel_crtc->pipe; 5290 enum pipe pipe = intel_crtc->pipe;
5143 5291
5144 drm_for_each_plane_mask(plane, dev, cstate->base.plane_mask) { 5292 drm_for_each_plane_mask(plane, dev, cstate->base.plane_mask) {
5293 struct drm_plane_state *plane_state;
5294 struct intel_plane *linked;
5145 enum plane_id plane_id = to_intel_plane(plane)->id; 5295 enum plane_id plane_id = to_intel_plane(plane)->id;
5146 5296
5147 if (skl_ddb_entry_equal(&cur_ddb->plane[pipe][plane_id], 5297 if (skl_ddb_entry_equal(&cur_ddb->plane[pipe][plane_id],
@@ -5153,6 +5303,15 @@ skl_ddb_add_affected_planes(struct intel_crtc_state *cstate)
5153 plane_state = drm_atomic_get_plane_state(state, plane); 5303 plane_state = drm_atomic_get_plane_state(state, plane);
5154 if (IS_ERR(plane_state)) 5304 if (IS_ERR(plane_state))
5155 return PTR_ERR(plane_state); 5305 return PTR_ERR(plane_state);
5306
5307 /* Make sure linked plane is updated too */
5308 linked = to_intel_plane_state(plane_state)->linked_plane;
5309 if (!linked)
5310 continue;
5311
5312 plane_state = drm_atomic_get_plane_state(state, &linked->base);
5313 if (IS_ERR(plane_state))
5314 return PTR_ERR(plane_state);
5156 } 5315 }
5157 5316
5158 return 0; 5317 return 0;
@@ -5211,11 +5370,11 @@ skl_print_wm_changes(const struct drm_atomic_state *state)
5211 if (skl_ddb_entry_equal(old, new)) 5370 if (skl_ddb_entry_equal(old, new))
5212 continue; 5371 continue;
5213 5372
5214 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] ddb (%d - %d) -> (%d - %d)\n", 5373 DRM_DEBUG_KMS("[PLANE:%d:%s] ddb (%d - %d) -> (%d - %d)\n",
5215 intel_plane->base.base.id, 5374 intel_plane->base.base.id,
5216 intel_plane->base.name, 5375 intel_plane->base.name,
5217 old->start, old->end, 5376 old->start, old->end,
5218 new->start, new->end); 5377 new->start, new->end);
5219 } 5378 }
5220 } 5379 }
5221} 5380}
@@ -6117,14 +6276,8 @@ void intel_enable_ipc(struct drm_i915_private *dev_priv)
6117{ 6276{
6118 u32 val; 6277 u32 val;
6119 6278
6120 /* Display WA #0477 WaDisableIPC: skl */ 6279 if (!HAS_IPC(dev_priv))
6121 if (IS_SKYLAKE(dev_priv)) 6280 return;
6122 dev_priv->ipc_enabled = false;
6123
6124 /* Display WA #1141: SKL:all KBL:all CFL */
6125 if ((IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) &&
6126 !dev_priv->dram_info.symmetric_memory)
6127 dev_priv->ipc_enabled = false;
6128 6281
6129 val = I915_READ(DISP_ARB_CTL2); 6282 val = I915_READ(DISP_ARB_CTL2);
6130 6283
@@ -6138,11 +6291,15 @@ void intel_enable_ipc(struct drm_i915_private *dev_priv)
6138 6291
6139void intel_init_ipc(struct drm_i915_private *dev_priv) 6292void intel_init_ipc(struct drm_i915_private *dev_priv)
6140{ 6293{
6141 dev_priv->ipc_enabled = false;
6142 if (!HAS_IPC(dev_priv)) 6294 if (!HAS_IPC(dev_priv))
6143 return; 6295 return;
6144 6296
6145 dev_priv->ipc_enabled = true; 6297 /* Display WA #1141: SKL:all KBL:all CFL */
6298 if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv))
6299 dev_priv->ipc_enabled = dev_priv->dram_info.symmetric_memory;
6300 else
6301 dev_priv->ipc_enabled = true;
6302
6146 intel_enable_ipc(dev_priv); 6303 intel_enable_ipc(dev_priv);
6147} 6304}
6148 6305
@@ -8736,6 +8893,10 @@ static void icl_init_clock_gating(struct drm_i915_private *dev_priv)
8736 /* This is not an Wa. Enable to reduce Sampler power */ 8893 /* This is not an Wa. Enable to reduce Sampler power */
8737 I915_WRITE(GEN10_DFR_RATIO_EN_AND_CHICKEN, 8894 I915_WRITE(GEN10_DFR_RATIO_EN_AND_CHICKEN,
8738 I915_READ(GEN10_DFR_RATIO_EN_AND_CHICKEN) & ~DFR_DISABLE); 8895 I915_READ(GEN10_DFR_RATIO_EN_AND_CHICKEN) & ~DFR_DISABLE);
8896
8897 /* WaEnable32PlaneMode:icl */
8898 I915_WRITE(GEN9_CSFE_CHICKEN1_RCS,
8899 _MASKED_BIT_ENABLE(GEN11_ENABLE_32_PLANE_MODE));
8739} 8900}
8740 8901
8741static void cnp_init_clock_gating(struct drm_i915_private *dev_priv) 8902static void cnp_init_clock_gating(struct drm_i915_private *dev_priv)
@@ -9313,8 +9474,6 @@ void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
9313/* Set up chip specific power management-related functions */ 9474/* Set up chip specific power management-related functions */
9314void intel_init_pm(struct drm_i915_private *dev_priv) 9475void intel_init_pm(struct drm_i915_private *dev_priv)
9315{ 9476{
9316 intel_fbc_init(dev_priv);
9317
9318 /* For cxsr */ 9477 /* For cxsr */
9319 if (IS_PINEVIEW(dev_priv)) 9478 if (IS_PINEVIEW(dev_priv))
9320 i915_pineview_get_mem_freq(dev_priv); 9479 i915_pineview_get_mem_freq(dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index b6838b525502..54fa17a5596a 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -71,6 +71,10 @@ static bool psr_global_enabled(u32 debug)
71static bool intel_psr2_enabled(struct drm_i915_private *dev_priv, 71static bool intel_psr2_enabled(struct drm_i915_private *dev_priv,
72 const struct intel_crtc_state *crtc_state) 72 const struct intel_crtc_state *crtc_state)
73{ 73{
74 /* Disable PSR2 by default for all platforms */
75 if (i915_modparams.enable_psr == -1)
76 return false;
77
74 switch (dev_priv->psr.debug & I915_PSR_DEBUG_MODE_MASK) { 78 switch (dev_priv->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
75 case I915_PSR_DEBUG_FORCE_PSR1: 79 case I915_PSR_DEBUG_FORCE_PSR1:
76 return false; 80 return false;
@@ -79,25 +83,42 @@ static bool intel_psr2_enabled(struct drm_i915_private *dev_priv,
79 } 83 }
80} 84}
81 85
86static int edp_psr_shift(enum transcoder cpu_transcoder)
87{
88 switch (cpu_transcoder) {
89 case TRANSCODER_A:
90 return EDP_PSR_TRANSCODER_A_SHIFT;
91 case TRANSCODER_B:
92 return EDP_PSR_TRANSCODER_B_SHIFT;
93 case TRANSCODER_C:
94 return EDP_PSR_TRANSCODER_C_SHIFT;
95 default:
96 MISSING_CASE(cpu_transcoder);
97 /* fallthrough */
98 case TRANSCODER_EDP:
99 return EDP_PSR_TRANSCODER_EDP_SHIFT;
100 }
101}
102
82void intel_psr_irq_control(struct drm_i915_private *dev_priv, u32 debug) 103void intel_psr_irq_control(struct drm_i915_private *dev_priv, u32 debug)
83{ 104{
84 u32 debug_mask, mask; 105 u32 debug_mask, mask;
106 enum transcoder cpu_transcoder;
107 u32 transcoders = BIT(TRANSCODER_EDP);
108
109 if (INTEL_GEN(dev_priv) >= 8)
110 transcoders |= BIT(TRANSCODER_A) |
111 BIT(TRANSCODER_B) |
112 BIT(TRANSCODER_C);
85 113
86 mask = EDP_PSR_ERROR(TRANSCODER_EDP); 114 debug_mask = 0;
87 debug_mask = EDP_PSR_POST_EXIT(TRANSCODER_EDP) | 115 mask = 0;
88 EDP_PSR_PRE_ENTRY(TRANSCODER_EDP); 116 for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder, transcoders) {
89 117 int shift = edp_psr_shift(cpu_transcoder);
90 if (INTEL_GEN(dev_priv) >= 8) { 118
91 mask |= EDP_PSR_ERROR(TRANSCODER_A) | 119 mask |= EDP_PSR_ERROR(shift);
92 EDP_PSR_ERROR(TRANSCODER_B) | 120 debug_mask |= EDP_PSR_POST_EXIT(shift) |
93 EDP_PSR_ERROR(TRANSCODER_C); 121 EDP_PSR_PRE_ENTRY(shift);
94
95 debug_mask |= EDP_PSR_POST_EXIT(TRANSCODER_A) |
96 EDP_PSR_PRE_ENTRY(TRANSCODER_A) |
97 EDP_PSR_POST_EXIT(TRANSCODER_B) |
98 EDP_PSR_PRE_ENTRY(TRANSCODER_B) |
99 EDP_PSR_POST_EXIT(TRANSCODER_C) |
100 EDP_PSR_PRE_ENTRY(TRANSCODER_C);
101 } 122 }
102 123
103 if (debug & I915_PSR_DEBUG_IRQ) 124 if (debug & I915_PSR_DEBUG_IRQ)
@@ -155,18 +176,20 @@ void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir)
155 BIT(TRANSCODER_C); 176 BIT(TRANSCODER_C);
156 177
157 for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder, transcoders) { 178 for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder, transcoders) {
179 int shift = edp_psr_shift(cpu_transcoder);
180
158 /* FIXME: Exit PSR and link train manually when this happens. */ 181 /* FIXME: Exit PSR and link train manually when this happens. */
159 if (psr_iir & EDP_PSR_ERROR(cpu_transcoder)) 182 if (psr_iir & EDP_PSR_ERROR(shift))
160 DRM_DEBUG_KMS("[transcoder %s] PSR aux error\n", 183 DRM_DEBUG_KMS("[transcoder %s] PSR aux error\n",
161 transcoder_name(cpu_transcoder)); 184 transcoder_name(cpu_transcoder));
162 185
163 if (psr_iir & EDP_PSR_PRE_ENTRY(cpu_transcoder)) { 186 if (psr_iir & EDP_PSR_PRE_ENTRY(shift)) {
164 dev_priv->psr.last_entry_attempt = time_ns; 187 dev_priv->psr.last_entry_attempt = time_ns;
165 DRM_DEBUG_KMS("[transcoder %s] PSR entry attempt in 2 vblanks\n", 188 DRM_DEBUG_KMS("[transcoder %s] PSR entry attempt in 2 vblanks\n",
166 transcoder_name(cpu_transcoder)); 189 transcoder_name(cpu_transcoder));
167 } 190 }
168 191
169 if (psr_iir & EDP_PSR_POST_EXIT(cpu_transcoder)) { 192 if (psr_iir & EDP_PSR_POST_EXIT(shift)) {
170 dev_priv->psr.last_exit = time_ns; 193 dev_priv->psr.last_exit = time_ns;
171 DRM_DEBUG_KMS("[transcoder %s] PSR exit completed\n", 194 DRM_DEBUG_KMS("[transcoder %s] PSR exit completed\n",
172 transcoder_name(cpu_transcoder)); 195 transcoder_name(cpu_transcoder));
@@ -294,7 +317,8 @@ static void intel_psr_setup_vsc(struct intel_dp *intel_dp,
294 psr_vsc.sdp_header.HB3 = 0x8; 317 psr_vsc.sdp_header.HB3 = 0x8;
295 } 318 }
296 319
297 intel_dig_port->write_infoframe(&intel_dig_port->base.base, crtc_state, 320 intel_dig_port->write_infoframe(&intel_dig_port->base,
321 crtc_state,
298 DP_SDP_VSC, &psr_vsc, sizeof(psr_vsc)); 322 DP_SDP_VSC, &psr_vsc, sizeof(psr_vsc));
299} 323}
300 324
@@ -553,11 +577,31 @@ static void intel_psr_activate(struct intel_dp *intel_dp)
553 dev_priv->psr.active = true; 577 dev_priv->psr.active = true;
554} 578}
555 579
580static i915_reg_t gen9_chicken_trans_reg(struct drm_i915_private *dev_priv,
581 enum transcoder cpu_transcoder)
582{
583 static const i915_reg_t regs[] = {
584 [TRANSCODER_A] = CHICKEN_TRANS_A,
585 [TRANSCODER_B] = CHICKEN_TRANS_B,
586 [TRANSCODER_C] = CHICKEN_TRANS_C,
587 [TRANSCODER_EDP] = CHICKEN_TRANS_EDP,
588 };
589
590 WARN_ON(INTEL_GEN(dev_priv) < 9);
591
592 if (WARN_ON(cpu_transcoder >= ARRAY_SIZE(regs) ||
593 !regs[cpu_transcoder].reg))
594 cpu_transcoder = TRANSCODER_A;
595
596 return regs[cpu_transcoder];
597}
598
556static void intel_psr_enable_source(struct intel_dp *intel_dp, 599static void intel_psr_enable_source(struct intel_dp *intel_dp,
557 const struct intel_crtc_state *crtc_state) 600 const struct intel_crtc_state *crtc_state)
558{ 601{
559 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 602 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
560 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 603 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
604 u32 mask;
561 605
562 /* Only HSW and BDW have PSR AUX registers that need to be setup. SKL+ 606 /* Only HSW and BDW have PSR AUX registers that need to be setup. SKL+
563 * use hardcoded values PSR AUX transactions 607 * use hardcoded values PSR AUX transactions
@@ -566,37 +610,34 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
566 hsw_psr_setup_aux(intel_dp); 610 hsw_psr_setup_aux(intel_dp);
567 611
568 if (dev_priv->psr.psr2_enabled) { 612 if (dev_priv->psr.psr2_enabled) {
569 u32 chicken = I915_READ(CHICKEN_TRANS(cpu_transcoder)); 613 i915_reg_t reg = gen9_chicken_trans_reg(dev_priv,
614 cpu_transcoder);
615 u32 chicken = I915_READ(reg);
570 616
571 if (INTEL_GEN(dev_priv) == 9 && !IS_GEMINILAKE(dev_priv)) 617 if (IS_GEN9(dev_priv) && !IS_GEMINILAKE(dev_priv))
572 chicken |= (PSR2_VSC_ENABLE_PROG_HEADER 618 chicken |= (PSR2_VSC_ENABLE_PROG_HEADER
573 | PSR2_ADD_VERTICAL_LINE_COUNT); 619 | PSR2_ADD_VERTICAL_LINE_COUNT);
574 620
575 else 621 else
576 chicken &= ~VSC_DATA_SEL_SOFTWARE_CONTROL; 622 chicken &= ~VSC_DATA_SEL_SOFTWARE_CONTROL;
577 I915_WRITE(CHICKEN_TRANS(cpu_transcoder), chicken); 623 I915_WRITE(reg, chicken);
578
579 I915_WRITE(EDP_PSR_DEBUG,
580 EDP_PSR_DEBUG_MASK_MEMUP |
581 EDP_PSR_DEBUG_MASK_HPD |
582 EDP_PSR_DEBUG_MASK_LPSP |
583 EDP_PSR_DEBUG_MASK_MAX_SLEEP |
584 EDP_PSR_DEBUG_MASK_DISP_REG_WRITE);
585 } else {
586 /*
587 * Per Spec: Avoid continuous PSR exit by masking MEMUP
588 * and HPD. also mask LPSP to avoid dependency on other
589 * drivers that might block runtime_pm besides
590 * preventing other hw tracking issues now we can rely
591 * on frontbuffer tracking.
592 */
593 I915_WRITE(EDP_PSR_DEBUG,
594 EDP_PSR_DEBUG_MASK_MEMUP |
595 EDP_PSR_DEBUG_MASK_HPD |
596 EDP_PSR_DEBUG_MASK_LPSP |
597 EDP_PSR_DEBUG_MASK_DISP_REG_WRITE |
598 EDP_PSR_DEBUG_MASK_MAX_SLEEP);
599 } 624 }
625
626 /*
627 * Per Spec: Avoid continuous PSR exit by masking MEMUP and HPD also
628 * mask LPSP to avoid dependency on other drivers that might block
629 * runtime_pm besides preventing other hw tracking issues now we
630 * can rely on frontbuffer tracking.
631 */
632 mask = EDP_PSR_DEBUG_MASK_MEMUP |
633 EDP_PSR_DEBUG_MASK_HPD |
634 EDP_PSR_DEBUG_MASK_LPSP |
635 EDP_PSR_DEBUG_MASK_MAX_SLEEP;
636
637 if (INTEL_GEN(dev_priv) < 11)
638 mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE;
639
640 I915_WRITE(EDP_PSR_DEBUG, mask);
600} 641}
601 642
602static void intel_psr_enable_locked(struct drm_i915_private *dev_priv, 643static void intel_psr_enable_locked(struct drm_i915_private *dev_priv,
@@ -656,49 +697,34 @@ unlock:
656 mutex_unlock(&dev_priv->psr.lock); 697 mutex_unlock(&dev_priv->psr.lock);
657} 698}
658 699
659static void 700static void intel_psr_exit(struct drm_i915_private *dev_priv)
660intel_psr_disable_source(struct intel_dp *intel_dp)
661{ 701{
662 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 702 u32 val;
663
664 if (dev_priv->psr.active) {
665 i915_reg_t psr_status;
666 u32 psr_status_mask;
667
668 if (dev_priv->psr.psr2_enabled) {
669 psr_status = EDP_PSR2_STATUS;
670 psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;
671
672 I915_WRITE(EDP_PSR2_CTL,
673 I915_READ(EDP_PSR2_CTL) &
674 ~(EDP_PSR2_ENABLE | EDP_SU_TRACK_ENABLE));
675
676 } else {
677 psr_status = EDP_PSR_STATUS;
678 psr_status_mask = EDP_PSR_STATUS_STATE_MASK;
679
680 I915_WRITE(EDP_PSR_CTL,
681 I915_READ(EDP_PSR_CTL) & ~EDP_PSR_ENABLE);
682 }
683 703
684 /* Wait till PSR is idle */ 704 if (!dev_priv->psr.active) {
685 if (intel_wait_for_register(dev_priv, 705 if (INTEL_GEN(dev_priv) >= 9)
686 psr_status, psr_status_mask, 0, 706 WARN_ON(I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE);
687 2000)) 707 WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
688 DRM_ERROR("Timed out waiting for PSR Idle State\n"); 708 return;
709 }
689 710
690 dev_priv->psr.active = false; 711 if (dev_priv->psr.psr2_enabled) {
712 val = I915_READ(EDP_PSR2_CTL);
713 WARN_ON(!(val & EDP_PSR2_ENABLE));
714 I915_WRITE(EDP_PSR2_CTL, val & ~EDP_PSR2_ENABLE);
691 } else { 715 } else {
692 if (dev_priv->psr.psr2_enabled) 716 val = I915_READ(EDP_PSR_CTL);
693 WARN_ON(I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE); 717 WARN_ON(!(val & EDP_PSR_ENABLE));
694 else 718 I915_WRITE(EDP_PSR_CTL, val & ~EDP_PSR_ENABLE);
695 WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
696 } 719 }
720 dev_priv->psr.active = false;
697} 721}
698 722
699static void intel_psr_disable_locked(struct intel_dp *intel_dp) 723static void intel_psr_disable_locked(struct intel_dp *intel_dp)
700{ 724{
701 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 725 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
726 i915_reg_t psr_status;
727 u32 psr_status_mask;
702 728
703 lockdep_assert_held(&dev_priv->psr.lock); 729 lockdep_assert_held(&dev_priv->psr.lock);
704 730
@@ -707,7 +733,21 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
707 733
708 DRM_DEBUG_KMS("Disabling PSR%s\n", 734 DRM_DEBUG_KMS("Disabling PSR%s\n",
709 dev_priv->psr.psr2_enabled ? "2" : "1"); 735 dev_priv->psr.psr2_enabled ? "2" : "1");
710 intel_psr_disable_source(intel_dp); 736
737 intel_psr_exit(dev_priv);
738
739 if (dev_priv->psr.psr2_enabled) {
740 psr_status = EDP_PSR2_STATUS;
741 psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;
742 } else {
743 psr_status = EDP_PSR_STATUS;
744 psr_status_mask = EDP_PSR_STATUS_STATE_MASK;
745 }
746
747 /* Wait till PSR is idle */
748 if (intel_wait_for_register(dev_priv, psr_status, psr_status_mask, 0,
749 2000))
750 DRM_ERROR("Timed out waiting PSR idle state\n");
711 751
712 /* Disable PSR on Sink */ 752 /* Disable PSR on Sink */
713 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0); 753 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);
@@ -925,25 +965,6 @@ unlock:
925 mutex_unlock(&dev_priv->psr.lock); 965 mutex_unlock(&dev_priv->psr.lock);
926} 966}
927 967
928static void intel_psr_exit(struct drm_i915_private *dev_priv)
929{
930 u32 val;
931
932 if (!dev_priv->psr.active)
933 return;
934
935 if (dev_priv->psr.psr2_enabled) {
936 val = I915_READ(EDP_PSR2_CTL);
937 WARN_ON(!(val & EDP_PSR2_ENABLE));
938 I915_WRITE(EDP_PSR2_CTL, val & ~EDP_PSR2_ENABLE);
939 } else {
940 val = I915_READ(EDP_PSR_CTL);
941 WARN_ON(!(val & EDP_PSR_ENABLE));
942 I915_WRITE(EDP_PSR_CTL, val & ~EDP_PSR_ENABLE);
943 }
944 dev_priv->psr.active = false;
945}
946
947/** 968/**
948 * intel_psr_invalidate - Invalidade PSR 969 * intel_psr_invalidate - Invalidade PSR
949 * @dev_priv: i915 device 970 * @dev_priv: i915 device
@@ -1026,20 +1047,16 @@ void intel_psr_flush(struct drm_i915_private *dev_priv,
1026 1047
1027 /* By definition flush = invalidate + flush */ 1048 /* By definition flush = invalidate + flush */
1028 if (frontbuffer_bits) { 1049 if (frontbuffer_bits) {
1029 if (dev_priv->psr.psr2_enabled) { 1050 /*
1030 intel_psr_exit(dev_priv); 1051 * Display WA #0884: all
1031 } else { 1052 * This documented WA for bxt can be safely applied
1032 /* 1053 * broadly so we can force HW tracking to exit PSR
1033 * Display WA #0884: all 1054 * instead of disabling and re-enabling.
1034 * This documented WA for bxt can be safely applied 1055 * Workaround tells us to write 0 to CUR_SURFLIVE_A,
1035 * broadly so we can force HW tracking to exit PSR 1056 * but it makes more sense write to the current active
1036 * instead of disabling and re-enabling. 1057 * pipe.
1037 * Workaround tells us to write 0 to CUR_SURFLIVE_A, 1058 */
1038 * but it makes more sense write to the current active 1059 I915_WRITE(CURSURFLIVE(pipe), 0);
1039 * pipe.
1040 */
1041 I915_WRITE(CURSURFLIVE(pipe), 0);
1042 }
1043 } 1060 }
1044 1061
1045 if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits) 1062 if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
@@ -1065,12 +1082,9 @@ void intel_psr_init(struct drm_i915_private *dev_priv)
1065 if (!dev_priv->psr.sink_support) 1082 if (!dev_priv->psr.sink_support)
1066 return; 1083 return;
1067 1084
1068 if (i915_modparams.enable_psr == -1) { 1085 if (i915_modparams.enable_psr == -1)
1069 i915_modparams.enable_psr = dev_priv->vbt.psr.enable; 1086 if (INTEL_GEN(dev_priv) < 9 || !dev_priv->vbt.psr.enable)
1070 1087 i915_modparams.enable_psr = 0;
1071 /* Per platform default: all disabled. */
1072 i915_modparams.enable_psr = 0;
1073 }
1074 1088
1075 /* Set link_standby x link_off defaults */ 1089 /* Set link_standby x link_off defaults */
1076 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 1090 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
@@ -1130,8 +1144,6 @@ void intel_psr_short_pulse(struct intel_dp *intel_dp)
1130 intel_psr_disable_locked(intel_dp); 1144 intel_psr_disable_locked(intel_dp);
1131 /* clear status register */ 1145 /* clear status register */
1132 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ERROR_STATUS, val); 1146 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ERROR_STATUS, val);
1133
1134 /* TODO: handle PSR2 errors */
1135exit: 1147exit:
1136 mutex_unlock(&psr->lock); 1148 mutex_unlock(&psr->lock);
1137} 1149}
diff --git a/drivers/gpu/drm/i915/intel_quirks.c b/drivers/gpu/drm/i915/intel_quirks.c
new file mode 100644
index 000000000000..ec2b0fc92b8b
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_quirks.c
@@ -0,0 +1,169 @@
1// SPDX-License-Identifier: MIT
2/*
3 * Copyright © 2018 Intel Corporation
4 */
5
6#include <linux/dmi.h>
7
8#include "intel_drv.h"
9
10/*
11 * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
12 */
13static void quirk_ssc_force_disable(struct drm_i915_private *i915)
14{
15 i915->quirks |= QUIRK_LVDS_SSC_DISABLE;
16 DRM_INFO("applying lvds SSC disable quirk\n");
17}
18
19/*
20 * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight
21 * brightness value
22 */
23static void quirk_invert_brightness(struct drm_i915_private *i915)
24{
25 i915->quirks |= QUIRK_INVERT_BRIGHTNESS;
26 DRM_INFO("applying inverted panel brightness quirk\n");
27}
28
29/* Some VBT's incorrectly indicate no backlight is present */
30static void quirk_backlight_present(struct drm_i915_private *i915)
31{
32 i915->quirks |= QUIRK_BACKLIGHT_PRESENT;
33 DRM_INFO("applying backlight present quirk\n");
34}
35
36/* Toshiba Satellite P50-C-18C requires T12 delay to be min 800ms
37 * which is 300 ms greater than eDP spec T12 min.
38 */
39static void quirk_increase_t12_delay(struct drm_i915_private *i915)
40{
41 i915->quirks |= QUIRK_INCREASE_T12_DELAY;
42 DRM_INFO("Applying T12 delay quirk\n");
43}
44
45/*
46 * GeminiLake NUC HDMI outputs require additional off time
47 * this allows the onboard retimer to correctly sync to signal
48 */
49static void quirk_increase_ddi_disabled_time(struct drm_i915_private *i915)
50{
51 i915->quirks |= QUIRK_INCREASE_DDI_DISABLED_TIME;
52 DRM_INFO("Applying Increase DDI Disabled quirk\n");
53}
54
55struct intel_quirk {
56 int device;
57 int subsystem_vendor;
58 int subsystem_device;
59 void (*hook)(struct drm_i915_private *i915);
60};
61
62/* For systems that don't have a meaningful PCI subdevice/subvendor ID */
63struct intel_dmi_quirk {
64 void (*hook)(struct drm_i915_private *i915);
65 const struct dmi_system_id (*dmi_id_list)[];
66};
67
68static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
69{
70 DRM_INFO("Backlight polarity reversed on %s\n", id->ident);
71 return 1;
72}
73
74static const struct intel_dmi_quirk intel_dmi_quirks[] = {
75 {
76 .dmi_id_list = &(const struct dmi_system_id[]) {
77 {
78 .callback = intel_dmi_reverse_brightness,
79 .ident = "NCR Corporation",
80 .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
81 DMI_MATCH(DMI_PRODUCT_NAME, ""),
82 },
83 },
84 { } /* terminating entry */
85 },
86 .hook = quirk_invert_brightness,
87 },
88};
89
90static struct intel_quirk intel_quirks[] = {
91 /* Lenovo U160 cannot use SSC on LVDS */
92 { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
93
94 /* Sony Vaio Y cannot use SSC on LVDS */
95 { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
96
97 /* Acer Aspire 5734Z must invert backlight brightness */
98 { 0x2a42, 0x1025, 0x0459, quirk_invert_brightness },
99
100 /* Acer/eMachines G725 */
101 { 0x2a42, 0x1025, 0x0210, quirk_invert_brightness },
102
103 /* Acer/eMachines e725 */
104 { 0x2a42, 0x1025, 0x0212, quirk_invert_brightness },
105
106 /* Acer/Packard Bell NCL20 */
107 { 0x2a42, 0x1025, 0x034b, quirk_invert_brightness },
108
109 /* Acer Aspire 4736Z */
110 { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
111
112 /* Acer Aspire 5336 */
113 { 0x2a42, 0x1025, 0x048a, quirk_invert_brightness },
114
115 /* Acer C720 and C720P Chromebooks (Celeron 2955U) have backlights */
116 { 0x0a06, 0x1025, 0x0a11, quirk_backlight_present },
117
118 /* Acer C720 Chromebook (Core i3 4005U) */
119 { 0x0a16, 0x1025, 0x0a11, quirk_backlight_present },
120
121 /* Apple Macbook 2,1 (Core 2 T7400) */
122 { 0x27a2, 0x8086, 0x7270, quirk_backlight_present },
123
124 /* Apple Macbook 4,1 */
125 { 0x2a02, 0x106b, 0x00a1, quirk_backlight_present },
126
127 /* Toshiba CB35 Chromebook (Celeron 2955U) */
128 { 0x0a06, 0x1179, 0x0a88, quirk_backlight_present },
129
130 /* HP Chromebook 14 (Celeron 2955U) */
131 { 0x0a06, 0x103c, 0x21ed, quirk_backlight_present },
132
133 /* Dell Chromebook 11 */
134 { 0x0a06, 0x1028, 0x0a35, quirk_backlight_present },
135
136 /* Dell Chromebook 11 (2015 version) */
137 { 0x0a16, 0x1028, 0x0a35, quirk_backlight_present },
138
139 /* Toshiba Satellite P50-C-18C */
140 { 0x191B, 0x1179, 0xF840, quirk_increase_t12_delay },
141
142 /* GeminiLake NUC */
143 { 0x3185, 0x8086, 0x2072, quirk_increase_ddi_disabled_time },
144 { 0x3184, 0x8086, 0x2072, quirk_increase_ddi_disabled_time },
145 /* ASRock ITX*/
146 { 0x3185, 0x1849, 0x2212, quirk_increase_ddi_disabled_time },
147 { 0x3184, 0x1849, 0x2212, quirk_increase_ddi_disabled_time },
148};
149
150void intel_init_quirks(struct drm_i915_private *i915)
151{
152 struct pci_dev *d = i915->drm.pdev;
153 int i;
154
155 for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) {
156 struct intel_quirk *q = &intel_quirks[i];
157
158 if (d->device == q->device &&
159 (d->subsystem_vendor == q->subsystem_vendor ||
160 q->subsystem_vendor == PCI_ANY_ID) &&
161 (d->subsystem_device == q->subsystem_device ||
162 q->subsystem_device == PCI_ANY_ID))
163 q->hook(i915);
164 }
165 for (i = 0; i < ARRAY_SIZE(intel_dmi_quirks); i++) {
166 if (dmi_check_system(*intel_dmi_quirks[i].dmi_id_list) != 0)
167 intel_dmi_quirks[i].hook(i915);
168 }
169}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index d0ef50bf930a..87eebc13c0d8 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -91,6 +91,7 @@ static int
91gen4_render_ring_flush(struct i915_request *rq, u32 mode) 91gen4_render_ring_flush(struct i915_request *rq, u32 mode)
92{ 92{
93 u32 cmd, *cs; 93 u32 cmd, *cs;
94 int i;
94 95
95 /* 96 /*
96 * read/write caches: 97 * read/write caches:
@@ -127,12 +128,45 @@ gen4_render_ring_flush(struct i915_request *rq, u32 mode)
127 cmd |= MI_INVALIDATE_ISP; 128 cmd |= MI_INVALIDATE_ISP;
128 } 129 }
129 130
130 cs = intel_ring_begin(rq, 2); 131 i = 2;
132 if (mode & EMIT_INVALIDATE)
133 i += 20;
134
135 cs = intel_ring_begin(rq, i);
131 if (IS_ERR(cs)) 136 if (IS_ERR(cs))
132 return PTR_ERR(cs); 137 return PTR_ERR(cs);
133 138
134 *cs++ = cmd; 139 *cs++ = cmd;
135 *cs++ = MI_NOOP; 140
141 /*
142 * A random delay to let the CS invalidate take effect? Without this
143 * delay, the GPU relocation path fails as the CS does not see
144 * the updated contents. Just as important, if we apply the flushes
145 * to the EMIT_FLUSH branch (i.e. immediately after the relocation
146 * write and before the invalidate on the next batch), the relocations
147 * still fail. This implies that is a delay following invalidation
148 * that is required to reset the caches as opposed to a delay to
149 * ensure the memory is written.
150 */
151 if (mode & EMIT_INVALIDATE) {
152 *cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE;
153 *cs++ = i915_ggtt_offset(rq->engine->scratch) |
154 PIPE_CONTROL_GLOBAL_GTT;
155 *cs++ = 0;
156 *cs++ = 0;
157
158 for (i = 0; i < 12; i++)
159 *cs++ = MI_FLUSH;
160
161 *cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE;
162 *cs++ = i915_ggtt_offset(rq->engine->scratch) |
163 PIPE_CONTROL_GLOBAL_GTT;
164 *cs++ = 0;
165 *cs++ = 0;
166 }
167
168 *cs++ = cmd;
169
136 intel_ring_advance(rq, cs); 170 intel_ring_advance(rq, cs);
137 171
138 return 0; 172 return 0;
@@ -574,7 +608,9 @@ static void skip_request(struct i915_request *rq)
574 608
575static void reset_ring(struct intel_engine_cs *engine, struct i915_request *rq) 609static void reset_ring(struct intel_engine_cs *engine, struct i915_request *rq)
576{ 610{
577 GEM_TRACE("%s seqno=%x\n", engine->name, rq ? rq->global_seqno : 0); 611 GEM_TRACE("%s request global=%d, current=%d\n",
612 engine->name, rq ? rq->global_seqno : 0,
613 intel_engine_get_seqno(engine));
578 614
579 /* 615 /*
580 * Try to restore the logical GPU state to match the continuation 616 * Try to restore the logical GPU state to match the continuation
@@ -1021,8 +1057,7 @@ i915_emit_bb_start(struct i915_request *rq,
1021int intel_ring_pin(struct intel_ring *ring) 1057int intel_ring_pin(struct intel_ring *ring)
1022{ 1058{
1023 struct i915_vma *vma = ring->vma; 1059 struct i915_vma *vma = ring->vma;
1024 enum i915_map_type map = 1060 enum i915_map_type map = i915_coherent_map_type(vma->vm->i915);
1025 HAS_LLC(vma->vm->i915) ? I915_MAP_WB : I915_MAP_WC;
1026 unsigned int flags; 1061 unsigned int flags;
1027 void *addr; 1062 void *addr;
1028 int ret; 1063 int ret;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 2dfa585712c2..8a2270b209b0 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -1,4 +1,4 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: MIT */
2#ifndef _INTEL_RINGBUFFER_H_ 2#ifndef _INTEL_RINGBUFFER_H_
3#define _INTEL_RINGBUFFER_H_ 3#define _INTEL_RINGBUFFER_H_
4 4
@@ -93,11 +93,11 @@ hangcheck_action_to_str(const enum intel_engine_hangcheck_action a)
93#define I915_MAX_SUBSLICES 8 93#define I915_MAX_SUBSLICES 8
94 94
95#define instdone_slice_mask(dev_priv__) \ 95#define instdone_slice_mask(dev_priv__) \
96 (INTEL_GEN(dev_priv__) == 7 ? \ 96 (IS_GEN7(dev_priv__) ? \
97 1 : INTEL_INFO(dev_priv__)->sseu.slice_mask) 97 1 : INTEL_INFO(dev_priv__)->sseu.slice_mask)
98 98
99#define instdone_subslice_mask(dev_priv__) \ 99#define instdone_subslice_mask(dev_priv__) \
100 (INTEL_GEN(dev_priv__) == 7 ? \ 100 (IS_GEN7(dev_priv__) ? \
101 1 : INTEL_INFO(dev_priv__)->sseu.subslice_mask[0]) 101 1 : INTEL_INFO(dev_priv__)->sseu.subslice_mask[0])
102 102
103#define for_each_instdone_slice_subslice(dev_priv__, slice__, subslice__) \ 103#define for_each_instdone_slice_subslice(dev_priv__, slice__, subslice__) \
@@ -190,11 +190,22 @@ enum intel_engine_id {
190}; 190};
191 191
192struct i915_priolist { 192struct i915_priolist {
193 struct list_head requests[I915_PRIORITY_COUNT];
193 struct rb_node node; 194 struct rb_node node;
194 struct list_head requests; 195 unsigned long used;
195 int priority; 196 int priority;
196}; 197};
197 198
199#define priolist_for_each_request(it, plist, idx) \
200 for (idx = 0; idx < ARRAY_SIZE((plist)->requests); idx++) \
201 list_for_each_entry(it, &(plist)->requests[idx], sched.link)
202
203#define priolist_for_each_request_consume(it, n, plist, idx) \
204 for (; (idx = ffs((plist)->used)); (plist)->used &= ~BIT(idx - 1)) \
205 list_for_each_entry_safe(it, n, \
206 &(plist)->requests[idx - 1], \
207 sched.link)
208
198struct st_preempt_hang { 209struct st_preempt_hang {
199 struct completion completion; 210 struct completion completion;
200 bool inject_hang; 211 bool inject_hang;
@@ -487,11 +498,10 @@ struct intel_engine_cs {
487 */ 498 */
488 void (*submit_request)(struct i915_request *rq); 499 void (*submit_request)(struct i915_request *rq);
489 500
490 /* Call when the priority on a request has changed and it and its 501 /*
502 * Call when the priority on a request has changed and it and its
491 * dependencies may need rescheduling. Note the request itself may 503 * dependencies may need rescheduling. Note the request itself may
492 * not be ready to run! 504 * not be ready to run!
493 *
494 * Called under the struct_mutex.
495 */ 505 */
496 void (*schedule)(struct i915_request *request, 506 void (*schedule)(struct i915_request *request,
497 const struct i915_sched_attr *attr); 507 const struct i915_sched_attr *attr);
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 0fdabce647ab..1c2de9b69a19 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -208,7 +208,7 @@ bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
208 208
209 is_enabled = true; 209 is_enabled = true;
210 210
211 for_each_power_domain_well_rev(dev_priv, power_well, BIT_ULL(domain)) { 211 for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain)) {
212 if (power_well->desc->always_on) 212 if (power_well->desc->always_on)
213 continue; 213 continue;
214 214
@@ -436,6 +436,15 @@ icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
436 I915_WRITE(ICL_PORT_CL_DW12(port), val | ICL_LANE_ENABLE_AUX); 436 I915_WRITE(ICL_PORT_CL_DW12(port), val | ICL_LANE_ENABLE_AUX);
437 437
438 hsw_wait_for_power_well_enable(dev_priv, power_well); 438 hsw_wait_for_power_well_enable(dev_priv, power_well);
439
440 /* Display WA #1178: icl */
441 if (IS_ICELAKE(dev_priv) &&
442 pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= ICL_PW_CTL_IDX_AUX_B &&
443 !intel_bios_is_port_edp(dev_priv, port)) {
444 val = I915_READ(ICL_AUX_ANAOVRD1(pw_idx));
445 val |= ICL_AUX_ANAOVRD1_ENABLE | ICL_AUX_ANAOVRD1_LDO_BYPASS;
446 I915_WRITE(ICL_AUX_ANAOVRD1(pw_idx), val);
447 }
439} 448}
440 449
441static void 450static void
@@ -456,6 +465,25 @@ icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
456 hsw_wait_for_power_well_disable(dev_priv, power_well); 465 hsw_wait_for_power_well_disable(dev_priv, power_well);
457} 466}
458 467
468#define ICL_AUX_PW_TO_CH(pw_idx) \
469 ((pw_idx) - ICL_PW_CTL_IDX_AUX_A + AUX_CH_A)
470
471static void
472icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
473 struct i915_power_well *power_well)
474{
475 enum aux_ch aux_ch = ICL_AUX_PW_TO_CH(power_well->desc->hsw.idx);
476 u32 val;
477
478 val = I915_READ(DP_AUX_CH_CTL(aux_ch));
479 val &= ~DP_AUX_CH_CTL_TBT_IO;
480 if (power_well->desc->hsw.is_tc_tbt)
481 val |= DP_AUX_CH_CTL_TBT_IO;
482 I915_WRITE(DP_AUX_CH_CTL(aux_ch), val);
483
484 hsw_power_well_enable(dev_priv, power_well);
485}
486
459/* 487/*
460 * We should only use the power well if we explicitly asked the hardware to 488 * We should only use the power well if we explicitly asked the hardware to
461 * enable it, so check if it's enabled and also check if we've requested it to 489 * enable it, so check if it's enabled and also check if we've requested it to
@@ -465,11 +493,25 @@ static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
465 struct i915_power_well *power_well) 493 struct i915_power_well *power_well)
466{ 494{
467 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; 495 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
496 enum i915_power_well_id id = power_well->desc->id;
468 int pw_idx = power_well->desc->hsw.idx; 497 int pw_idx = power_well->desc->hsw.idx;
469 u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx) | 498 u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx) |
470 HSW_PWR_WELL_CTL_STATE(pw_idx); 499 HSW_PWR_WELL_CTL_STATE(pw_idx);
500 u32 val;
471 501
472 return (I915_READ(regs->driver) & mask) == mask; 502 val = I915_READ(regs->driver);
503
504 /*
505 * On GEN9 big core due to a DMC bug the driver's request bits for PW1
506 * and the MISC_IO PW will be not restored, so check instead for the
507 * BIOS's own request bits, which are forced-on for these power wells
508 * when exiting DC5/6.
509 */
510 if (IS_GEN9(dev_priv) && !IS_GEN9_LP(dev_priv) &&
511 (id == SKL_DISP_PW_1 || id == SKL_DISP_PW_MISC_IO))
512 val |= I915_READ(regs->bios);
513
514 return (val & mask) == mask;
473} 515}
474 516
475static void assert_can_enable_dc9(struct drm_i915_private *dev_priv) 517static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
@@ -551,7 +593,9 @@ static u32 gen9_dc_mask(struct drm_i915_private *dev_priv)
551 u32 mask; 593 u32 mask;
552 594
553 mask = DC_STATE_EN_UPTO_DC5; 595 mask = DC_STATE_EN_UPTO_DC5;
554 if (IS_GEN9_LP(dev_priv)) 596 if (INTEL_GEN(dev_priv) >= 11)
597 mask |= DC_STATE_EN_UPTO_DC6 | DC_STATE_EN_DC9;
598 else if (IS_GEN9_LP(dev_priv))
555 mask |= DC_STATE_EN_DC9; 599 mask |= DC_STATE_EN_DC9;
556 else 600 else
557 mask |= DC_STATE_EN_UPTO_DC6; 601 mask |= DC_STATE_EN_UPTO_DC6;
@@ -624,8 +668,13 @@ void bxt_enable_dc9(struct drm_i915_private *dev_priv)
624 assert_can_enable_dc9(dev_priv); 668 assert_can_enable_dc9(dev_priv);
625 669
626 DRM_DEBUG_KMS("Enabling DC9\n"); 670 DRM_DEBUG_KMS("Enabling DC9\n");
627 671 /*
628 intel_power_sequencer_reset(dev_priv); 672 * Power sequencer reset is not needed on
673 * platforms with South Display Engine on PCH,
674 * because PPS registers are always on.
675 */
676 if (!HAS_PCH_SPLIT(dev_priv))
677 intel_power_sequencer_reset(dev_priv);
629 gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9); 678 gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9);
630} 679}
631 680
@@ -707,7 +756,7 @@ static void assert_can_enable_dc6(struct drm_i915_private *dev_priv)
707 assert_csr_loaded(dev_priv); 756 assert_csr_loaded(dev_priv);
708} 757}
709 758
710static void skl_enable_dc6(struct drm_i915_private *dev_priv) 759void skl_enable_dc6(struct drm_i915_private *dev_priv)
711{ 760{
712 assert_can_enable_dc6(dev_priv); 761 assert_can_enable_dc6(dev_priv);
713 762
@@ -808,6 +857,14 @@ static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
808 857
809 if (IS_GEN9_LP(dev_priv)) 858 if (IS_GEN9_LP(dev_priv))
810 bxt_verify_ddi_phy_power_wells(dev_priv); 859 bxt_verify_ddi_phy_power_wells(dev_priv);
860
861 if (INTEL_GEN(dev_priv) >= 11)
862 /*
863 * DMC retains HW context only for port A, the other combo
864 * PHY's HW context for port B is lost after DC transitions,
865 * so we need to restore it manually.
866 */
867 icl_combo_phys_init(dev_priv);
811} 868}
812 869
813static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv, 870static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
@@ -1608,7 +1665,7 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
1608 intel_display_power_domain_str(domain)); 1665 intel_display_power_domain_str(domain));
1609 power_domains->domain_use_count[domain]--; 1666 power_domains->domain_use_count[domain]--;
1610 1667
1611 for_each_power_domain_well_rev(dev_priv, power_well, BIT_ULL(domain)) 1668 for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain))
1612 intel_power_well_put(dev_priv, power_well); 1669 intel_power_well_put(dev_priv, power_well);
1613 1670
1614 mutex_unlock(&power_domains->lock); 1671 mutex_unlock(&power_domains->lock);
@@ -2041,7 +2098,7 @@ static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = {
2041static const struct i915_power_well_desc i9xx_always_on_power_well[] = { 2098static const struct i915_power_well_desc i9xx_always_on_power_well[] = {
2042 { 2099 {
2043 .name = "always-on", 2100 .name = "always-on",
2044 .always_on = 1, 2101 .always_on = true,
2045 .domains = POWER_DOMAIN_MASK, 2102 .domains = POWER_DOMAIN_MASK,
2046 .ops = &i9xx_always_on_power_well_ops, 2103 .ops = &i9xx_always_on_power_well_ops,
2047 .id = DISP_PW_ID_NONE, 2104 .id = DISP_PW_ID_NONE,
@@ -2058,7 +2115,7 @@ static const struct i915_power_well_ops i830_pipes_power_well_ops = {
2058static const struct i915_power_well_desc i830_power_wells[] = { 2115static const struct i915_power_well_desc i830_power_wells[] = {
2059 { 2116 {
2060 .name = "always-on", 2117 .name = "always-on",
2061 .always_on = 1, 2118 .always_on = true,
2062 .domains = POWER_DOMAIN_MASK, 2119 .domains = POWER_DOMAIN_MASK,
2063 .ops = &i9xx_always_on_power_well_ops, 2120 .ops = &i9xx_always_on_power_well_ops,
2064 .id = DISP_PW_ID_NONE, 2121 .id = DISP_PW_ID_NONE,
@@ -2102,7 +2159,7 @@ static const struct i915_power_well_regs hsw_power_well_regs = {
2102static const struct i915_power_well_desc hsw_power_wells[] = { 2159static const struct i915_power_well_desc hsw_power_wells[] = {
2103 { 2160 {
2104 .name = "always-on", 2161 .name = "always-on",
2105 .always_on = 1, 2162 .always_on = true,
2106 .domains = POWER_DOMAIN_MASK, 2163 .domains = POWER_DOMAIN_MASK,
2107 .ops = &i9xx_always_on_power_well_ops, 2164 .ops = &i9xx_always_on_power_well_ops,
2108 .id = DISP_PW_ID_NONE, 2165 .id = DISP_PW_ID_NONE,
@@ -2123,7 +2180,7 @@ static const struct i915_power_well_desc hsw_power_wells[] = {
2123static const struct i915_power_well_desc bdw_power_wells[] = { 2180static const struct i915_power_well_desc bdw_power_wells[] = {
2124 { 2181 {
2125 .name = "always-on", 2182 .name = "always-on",
2126 .always_on = 1, 2183 .always_on = true,
2127 .domains = POWER_DOMAIN_MASK, 2184 .domains = POWER_DOMAIN_MASK,
2128 .ops = &i9xx_always_on_power_well_ops, 2185 .ops = &i9xx_always_on_power_well_ops,
2129 .id = DISP_PW_ID_NONE, 2186 .id = DISP_PW_ID_NONE,
@@ -2166,7 +2223,7 @@ static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
2166static const struct i915_power_well_desc vlv_power_wells[] = { 2223static const struct i915_power_well_desc vlv_power_wells[] = {
2167 { 2224 {
2168 .name = "always-on", 2225 .name = "always-on",
2169 .always_on = 1, 2226 .always_on = true,
2170 .domains = POWER_DOMAIN_MASK, 2227 .domains = POWER_DOMAIN_MASK,
2171 .ops = &i9xx_always_on_power_well_ops, 2228 .ops = &i9xx_always_on_power_well_ops,
2172 .id = DISP_PW_ID_NONE, 2229 .id = DISP_PW_ID_NONE,
@@ -2242,7 +2299,7 @@ static const struct i915_power_well_desc vlv_power_wells[] = {
2242static const struct i915_power_well_desc chv_power_wells[] = { 2299static const struct i915_power_well_desc chv_power_wells[] = {
2243 { 2300 {
2244 .name = "always-on", 2301 .name = "always-on",
2245 .always_on = 1, 2302 .always_on = true,
2246 .domains = POWER_DOMAIN_MASK, 2303 .domains = POWER_DOMAIN_MASK,
2247 .ops = &i9xx_always_on_power_well_ops, 2304 .ops = &i9xx_always_on_power_well_ops,
2248 .id = DISP_PW_ID_NONE, 2305 .id = DISP_PW_ID_NONE,
@@ -2293,7 +2350,7 @@ bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
2293static const struct i915_power_well_desc skl_power_wells[] = { 2350static const struct i915_power_well_desc skl_power_wells[] = {
2294 { 2351 {
2295 .name = "always-on", 2352 .name = "always-on",
2296 .always_on = 1, 2353 .always_on = true,
2297 .domains = POWER_DOMAIN_MASK, 2354 .domains = POWER_DOMAIN_MASK,
2298 .ops = &i9xx_always_on_power_well_ops, 2355 .ops = &i9xx_always_on_power_well_ops,
2299 .id = DISP_PW_ID_NONE, 2356 .id = DISP_PW_ID_NONE,
@@ -2301,6 +2358,7 @@ static const struct i915_power_well_desc skl_power_wells[] = {
2301 { 2358 {
2302 .name = "power well 1", 2359 .name = "power well 1",
2303 /* Handled by the DMC firmware */ 2360 /* Handled by the DMC firmware */
2361 .always_on = true,
2304 .domains = 0, 2362 .domains = 0,
2305 .ops = &hsw_power_well_ops, 2363 .ops = &hsw_power_well_ops,
2306 .id = SKL_DISP_PW_1, 2364 .id = SKL_DISP_PW_1,
@@ -2313,6 +2371,7 @@ static const struct i915_power_well_desc skl_power_wells[] = {
2313 { 2371 {
2314 .name = "MISC IO power well", 2372 .name = "MISC IO power well",
2315 /* Handled by the DMC firmware */ 2373 /* Handled by the DMC firmware */
2374 .always_on = true,
2316 .domains = 0, 2375 .domains = 0,
2317 .ops = &hsw_power_well_ops, 2376 .ops = &hsw_power_well_ops,
2318 .id = SKL_DISP_PW_MISC_IO, 2377 .id = SKL_DISP_PW_MISC_IO,
@@ -2385,13 +2444,15 @@ static const struct i915_power_well_desc skl_power_wells[] = {
2385static const struct i915_power_well_desc bxt_power_wells[] = { 2444static const struct i915_power_well_desc bxt_power_wells[] = {
2386 { 2445 {
2387 .name = "always-on", 2446 .name = "always-on",
2388 .always_on = 1, 2447 .always_on = true,
2389 .domains = POWER_DOMAIN_MASK, 2448 .domains = POWER_DOMAIN_MASK,
2390 .ops = &i9xx_always_on_power_well_ops, 2449 .ops = &i9xx_always_on_power_well_ops,
2391 .id = DISP_PW_ID_NONE, 2450 .id = DISP_PW_ID_NONE,
2392 }, 2451 },
2393 { 2452 {
2394 .name = "power well 1", 2453 .name = "power well 1",
2454 /* Handled by the DMC firmware */
2455 .always_on = true,
2395 .domains = 0, 2456 .domains = 0,
2396 .ops = &hsw_power_well_ops, 2457 .ops = &hsw_power_well_ops,
2397 .id = SKL_DISP_PW_1, 2458 .id = SKL_DISP_PW_1,
@@ -2443,7 +2504,7 @@ static const struct i915_power_well_desc bxt_power_wells[] = {
2443static const struct i915_power_well_desc glk_power_wells[] = { 2504static const struct i915_power_well_desc glk_power_wells[] = {
2444 { 2505 {
2445 .name = "always-on", 2506 .name = "always-on",
2446 .always_on = 1, 2507 .always_on = true,
2447 .domains = POWER_DOMAIN_MASK, 2508 .domains = POWER_DOMAIN_MASK,
2448 .ops = &i9xx_always_on_power_well_ops, 2509 .ops = &i9xx_always_on_power_well_ops,
2449 .id = DISP_PW_ID_NONE, 2510 .id = DISP_PW_ID_NONE,
@@ -2451,6 +2512,7 @@ static const struct i915_power_well_desc glk_power_wells[] = {
2451 { 2512 {
2452 .name = "power well 1", 2513 .name = "power well 1",
2453 /* Handled by the DMC firmware */ 2514 /* Handled by the DMC firmware */
2515 .always_on = true,
2454 .domains = 0, 2516 .domains = 0,
2455 .ops = &hsw_power_well_ops, 2517 .ops = &hsw_power_well_ops,
2456 .id = SKL_DISP_PW_1, 2518 .id = SKL_DISP_PW_1,
@@ -2571,7 +2633,7 @@ static const struct i915_power_well_desc glk_power_wells[] = {
2571static const struct i915_power_well_desc cnl_power_wells[] = { 2633static const struct i915_power_well_desc cnl_power_wells[] = {
2572 { 2634 {
2573 .name = "always-on", 2635 .name = "always-on",
2574 .always_on = 1, 2636 .always_on = true,
2575 .domains = POWER_DOMAIN_MASK, 2637 .domains = POWER_DOMAIN_MASK,
2576 .ops = &i9xx_always_on_power_well_ops, 2638 .ops = &i9xx_always_on_power_well_ops,
2577 .id = DISP_PW_ID_NONE, 2639 .id = DISP_PW_ID_NONE,
@@ -2579,6 +2641,7 @@ static const struct i915_power_well_desc cnl_power_wells[] = {
2579 { 2641 {
2580 .name = "power well 1", 2642 .name = "power well 1",
2581 /* Handled by the DMC firmware */ 2643 /* Handled by the DMC firmware */
2644 .always_on = true,
2582 .domains = 0, 2645 .domains = 0,
2583 .ops = &hsw_power_well_ops, 2646 .ops = &hsw_power_well_ops,
2584 .id = SKL_DISP_PW_1, 2647 .id = SKL_DISP_PW_1,
@@ -2716,6 +2779,13 @@ static const struct i915_power_well_ops icl_combo_phy_aux_power_well_ops = {
2716 .is_enabled = hsw_power_well_enabled, 2779 .is_enabled = hsw_power_well_enabled,
2717}; 2780};
2718 2781
2782static const struct i915_power_well_ops icl_tc_phy_aux_power_well_ops = {
2783 .sync_hw = hsw_power_well_sync_hw,
2784 .enable = icl_tc_phy_aux_power_well_enable,
2785 .disable = hsw_power_well_disable,
2786 .is_enabled = hsw_power_well_enabled,
2787};
2788
2719static const struct i915_power_well_regs icl_aux_power_well_regs = { 2789static const struct i915_power_well_regs icl_aux_power_well_regs = {
2720 .bios = ICL_PWR_WELL_CTL_AUX1, 2790 .bios = ICL_PWR_WELL_CTL_AUX1,
2721 .driver = ICL_PWR_WELL_CTL_AUX2, 2791 .driver = ICL_PWR_WELL_CTL_AUX2,
@@ -2731,7 +2801,7 @@ static const struct i915_power_well_regs icl_ddi_power_well_regs = {
2731static const struct i915_power_well_desc icl_power_wells[] = { 2801static const struct i915_power_well_desc icl_power_wells[] = {
2732 { 2802 {
2733 .name = "always-on", 2803 .name = "always-on",
2734 .always_on = 1, 2804 .always_on = true,
2735 .domains = POWER_DOMAIN_MASK, 2805 .domains = POWER_DOMAIN_MASK,
2736 .ops = &i9xx_always_on_power_well_ops, 2806 .ops = &i9xx_always_on_power_well_ops,
2737 .id = DISP_PW_ID_NONE, 2807 .id = DISP_PW_ID_NONE,
@@ -2739,6 +2809,7 @@ static const struct i915_power_well_desc icl_power_wells[] = {
2739 { 2809 {
2740 .name = "power well 1", 2810 .name = "power well 1",
2741 /* Handled by the DMC firmware */ 2811 /* Handled by the DMC firmware */
2812 .always_on = true,
2742 .domains = 0, 2813 .domains = 0,
2743 .ops = &hsw_power_well_ops, 2814 .ops = &hsw_power_well_ops,
2744 .id = SKL_DISP_PW_1, 2815 .id = SKL_DISP_PW_1,
@@ -2749,6 +2820,12 @@ static const struct i915_power_well_desc icl_power_wells[] = {
2749 }, 2820 },
2750 }, 2821 },
2751 { 2822 {
2823 .name = "DC off",
2824 .domains = ICL_DISPLAY_DC_OFF_POWER_DOMAINS,
2825 .ops = &gen9_dc_off_power_well_ops,
2826 .id = DISP_PW_ID_NONE,
2827 },
2828 {
2752 .name = "power well 2", 2829 .name = "power well 2",
2753 .domains = ICL_PW_2_POWER_DOMAINS, 2830 .domains = ICL_PW_2_POWER_DOMAINS,
2754 .ops = &hsw_power_well_ops, 2831 .ops = &hsw_power_well_ops,
@@ -2760,12 +2837,6 @@ static const struct i915_power_well_desc icl_power_wells[] = {
2760 }, 2837 },
2761 }, 2838 },
2762 { 2839 {
2763 .name = "DC off",
2764 .domains = ICL_DISPLAY_DC_OFF_POWER_DOMAINS,
2765 .ops = &gen9_dc_off_power_well_ops,
2766 .id = DISP_PW_ID_NONE,
2767 },
2768 {
2769 .name = "power well 3", 2840 .name = "power well 3",
2770 .domains = ICL_PW_3_POWER_DOMAINS, 2841 .domains = ICL_PW_3_POWER_DOMAINS,
2771 .ops = &hsw_power_well_ops, 2842 .ops = &hsw_power_well_ops,
@@ -2861,81 +2932,89 @@ static const struct i915_power_well_desc icl_power_wells[] = {
2861 { 2932 {
2862 .name = "AUX C", 2933 .name = "AUX C",
2863 .domains = ICL_AUX_C_IO_POWER_DOMAINS, 2934 .domains = ICL_AUX_C_IO_POWER_DOMAINS,
2864 .ops = &hsw_power_well_ops, 2935 .ops = &icl_tc_phy_aux_power_well_ops,
2865 .id = DISP_PW_ID_NONE, 2936 .id = DISP_PW_ID_NONE,
2866 { 2937 {
2867 .hsw.regs = &icl_aux_power_well_regs, 2938 .hsw.regs = &icl_aux_power_well_regs,
2868 .hsw.idx = ICL_PW_CTL_IDX_AUX_C, 2939 .hsw.idx = ICL_PW_CTL_IDX_AUX_C,
2940 .hsw.is_tc_tbt = false,
2869 }, 2941 },
2870 }, 2942 },
2871 { 2943 {
2872 .name = "AUX D", 2944 .name = "AUX D",
2873 .domains = ICL_AUX_D_IO_POWER_DOMAINS, 2945 .domains = ICL_AUX_D_IO_POWER_DOMAINS,
2874 .ops = &hsw_power_well_ops, 2946 .ops = &icl_tc_phy_aux_power_well_ops,
2875 .id = DISP_PW_ID_NONE, 2947 .id = DISP_PW_ID_NONE,
2876 { 2948 {
2877 .hsw.regs = &icl_aux_power_well_regs, 2949 .hsw.regs = &icl_aux_power_well_regs,
2878 .hsw.idx = ICL_PW_CTL_IDX_AUX_D, 2950 .hsw.idx = ICL_PW_CTL_IDX_AUX_D,
2951 .hsw.is_tc_tbt = false,
2879 }, 2952 },
2880 }, 2953 },
2881 { 2954 {
2882 .name = "AUX E", 2955 .name = "AUX E",
2883 .domains = ICL_AUX_E_IO_POWER_DOMAINS, 2956 .domains = ICL_AUX_E_IO_POWER_DOMAINS,
2884 .ops = &hsw_power_well_ops, 2957 .ops = &icl_tc_phy_aux_power_well_ops,
2885 .id = DISP_PW_ID_NONE, 2958 .id = DISP_PW_ID_NONE,
2886 { 2959 {
2887 .hsw.regs = &icl_aux_power_well_regs, 2960 .hsw.regs = &icl_aux_power_well_regs,
2888 .hsw.idx = ICL_PW_CTL_IDX_AUX_E, 2961 .hsw.idx = ICL_PW_CTL_IDX_AUX_E,
2962 .hsw.is_tc_tbt = false,
2889 }, 2963 },
2890 }, 2964 },
2891 { 2965 {
2892 .name = "AUX F", 2966 .name = "AUX F",
2893 .domains = ICL_AUX_F_IO_POWER_DOMAINS, 2967 .domains = ICL_AUX_F_IO_POWER_DOMAINS,
2894 .ops = &hsw_power_well_ops, 2968 .ops = &icl_tc_phy_aux_power_well_ops,
2895 .id = DISP_PW_ID_NONE, 2969 .id = DISP_PW_ID_NONE,
2896 { 2970 {
2897 .hsw.regs = &icl_aux_power_well_regs, 2971 .hsw.regs = &icl_aux_power_well_regs,
2898 .hsw.idx = ICL_PW_CTL_IDX_AUX_F, 2972 .hsw.idx = ICL_PW_CTL_IDX_AUX_F,
2973 .hsw.is_tc_tbt = false,
2899 }, 2974 },
2900 }, 2975 },
2901 { 2976 {
2902 .name = "AUX TBT1", 2977 .name = "AUX TBT1",
2903 .domains = ICL_AUX_TBT1_IO_POWER_DOMAINS, 2978 .domains = ICL_AUX_TBT1_IO_POWER_DOMAINS,
2904 .ops = &hsw_power_well_ops, 2979 .ops = &icl_tc_phy_aux_power_well_ops,
2905 .id = DISP_PW_ID_NONE, 2980 .id = DISP_PW_ID_NONE,
2906 { 2981 {
2907 .hsw.regs = &icl_aux_power_well_regs, 2982 .hsw.regs = &icl_aux_power_well_regs,
2908 .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT1, 2983 .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT1,
2984 .hsw.is_tc_tbt = true,
2909 }, 2985 },
2910 }, 2986 },
2911 { 2987 {
2912 .name = "AUX TBT2", 2988 .name = "AUX TBT2",
2913 .domains = ICL_AUX_TBT2_IO_POWER_DOMAINS, 2989 .domains = ICL_AUX_TBT2_IO_POWER_DOMAINS,
2914 .ops = &hsw_power_well_ops, 2990 .ops = &icl_tc_phy_aux_power_well_ops,
2915 .id = DISP_PW_ID_NONE, 2991 .id = DISP_PW_ID_NONE,
2916 { 2992 {
2917 .hsw.regs = &icl_aux_power_well_regs, 2993 .hsw.regs = &icl_aux_power_well_regs,
2918 .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT2, 2994 .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT2,
2995 .hsw.is_tc_tbt = true,
2919 }, 2996 },
2920 }, 2997 },
2921 { 2998 {
2922 .name = "AUX TBT3", 2999 .name = "AUX TBT3",
2923 .domains = ICL_AUX_TBT3_IO_POWER_DOMAINS, 3000 .domains = ICL_AUX_TBT3_IO_POWER_DOMAINS,
2924 .ops = &hsw_power_well_ops, 3001 .ops = &icl_tc_phy_aux_power_well_ops,
2925 .id = DISP_PW_ID_NONE, 3002 .id = DISP_PW_ID_NONE,
2926 { 3003 {
2927 .hsw.regs = &icl_aux_power_well_regs, 3004 .hsw.regs = &icl_aux_power_well_regs,
2928 .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT3, 3005 .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT3,
3006 .hsw.is_tc_tbt = true,
2929 }, 3007 },
2930 }, 3008 },
2931 { 3009 {
2932 .name = "AUX TBT4", 3010 .name = "AUX TBT4",
2933 .domains = ICL_AUX_TBT4_IO_POWER_DOMAINS, 3011 .domains = ICL_AUX_TBT4_IO_POWER_DOMAINS,
2934 .ops = &hsw_power_well_ops, 3012 .ops = &icl_tc_phy_aux_power_well_ops,
2935 .id = DISP_PW_ID_NONE, 3013 .id = DISP_PW_ID_NONE,
2936 { 3014 {
2937 .hsw.regs = &icl_aux_power_well_regs, 3015 .hsw.regs = &icl_aux_power_well_regs,
2938 .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT4, 3016 .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT4,
3017 .hsw.is_tc_tbt = true,
2939 }, 3018 },
2940 }, 3019 },
2941 { 3020 {
@@ -2969,17 +3048,20 @@ static uint32_t get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
2969 int requested_dc; 3048 int requested_dc;
2970 int max_dc; 3049 int max_dc;
2971 3050
2972 if (IS_GEN9_BC(dev_priv) || INTEL_INFO(dev_priv)->gen >= 10) { 3051 if (INTEL_GEN(dev_priv) >= 11) {
2973 max_dc = 2; 3052 max_dc = 2;
2974 mask = 0;
2975 } else if (IS_GEN9_LP(dev_priv)) {
2976 max_dc = 1;
2977 /* 3053 /*
2978 * DC9 has a separate HW flow from the rest of the DC states, 3054 * DC9 has a separate HW flow from the rest of the DC states,
2979 * not depending on the DMC firmware. It's needed by system 3055 * not depending on the DMC firmware. It's needed by system
2980 * suspend/resume, so allow it unconditionally. 3056 * suspend/resume, so allow it unconditionally.
2981 */ 3057 */
2982 mask = DC_STATE_EN_DC9; 3058 mask = DC_STATE_EN_DC9;
3059 } else if (IS_GEN10(dev_priv) || IS_GEN9_BC(dev_priv)) {
3060 max_dc = 2;
3061 mask = 0;
3062 } else if (IS_GEN9_LP(dev_priv)) {
3063 max_dc = 1;
3064 mask = DC_STATE_EN_DC9;
2983 } else { 3065 } else {
2984 max_dc = 0; 3066 max_dc = 0;
2985 mask = 0; 3067 mask = 0;
@@ -3075,12 +3157,6 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
3075 */ 3157 */
3076 if (IS_ICELAKE(dev_priv)) { 3158 if (IS_ICELAKE(dev_priv)) {
3077 err = set_power_wells(power_domains, icl_power_wells); 3159 err = set_power_wells(power_domains, icl_power_wells);
3078 } else if (IS_HASWELL(dev_priv)) {
3079 err = set_power_wells(power_domains, hsw_power_wells);
3080 } else if (IS_BROADWELL(dev_priv)) {
3081 err = set_power_wells(power_domains, bdw_power_wells);
3082 } else if (IS_GEN9_BC(dev_priv)) {
3083 err = set_power_wells(power_domains, skl_power_wells);
3084 } else if (IS_CANNONLAKE(dev_priv)) { 3160 } else if (IS_CANNONLAKE(dev_priv)) {
3085 err = set_power_wells(power_domains, cnl_power_wells); 3161 err = set_power_wells(power_domains, cnl_power_wells);
3086 3162
@@ -3092,13 +3168,18 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
3092 */ 3168 */
3093 if (!IS_CNL_WITH_PORT_F(dev_priv)) 3169 if (!IS_CNL_WITH_PORT_F(dev_priv))
3094 power_domains->power_well_count -= 2; 3170 power_domains->power_well_count -= 2;
3095
3096 } else if (IS_BROXTON(dev_priv)) {
3097 err = set_power_wells(power_domains, bxt_power_wells);
3098 } else if (IS_GEMINILAKE(dev_priv)) { 3171 } else if (IS_GEMINILAKE(dev_priv)) {
3099 err = set_power_wells(power_domains, glk_power_wells); 3172 err = set_power_wells(power_domains, glk_power_wells);
3173 } else if (IS_BROXTON(dev_priv)) {
3174 err = set_power_wells(power_domains, bxt_power_wells);
3175 } else if (IS_GEN9_BC(dev_priv)) {
3176 err = set_power_wells(power_domains, skl_power_wells);
3100 } else if (IS_CHERRYVIEW(dev_priv)) { 3177 } else if (IS_CHERRYVIEW(dev_priv)) {
3101 err = set_power_wells(power_domains, chv_power_wells); 3178 err = set_power_wells(power_domains, chv_power_wells);
3179 } else if (IS_BROADWELL(dev_priv)) {
3180 err = set_power_wells(power_domains, bdw_power_wells);
3181 } else if (IS_HASWELL(dev_priv)) {
3182 err = set_power_wells(power_domains, hsw_power_wells);
3102 } else if (IS_VALLEYVIEW(dev_priv)) { 3183 } else if (IS_VALLEYVIEW(dev_priv)) {
3103 err = set_power_wells(power_domains, vlv_power_wells); 3184 err = set_power_wells(power_domains, vlv_power_wells);
3104 } else if (IS_I830(dev_priv)) { 3185 } else if (IS_I830(dev_priv)) {
@@ -3176,8 +3257,7 @@ static u8 intel_dbuf_max_slices(struct drm_i915_private *dev_priv)
3176void icl_dbuf_slices_update(struct drm_i915_private *dev_priv, 3257void icl_dbuf_slices_update(struct drm_i915_private *dev_priv,
3177 u8 req_slices) 3258 u8 req_slices)
3178{ 3259{
3179 u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices; 3260 const u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices;
3180 u32 val;
3181 bool ret; 3261 bool ret;
3182 3262
3183 if (req_slices > intel_dbuf_max_slices(dev_priv)) { 3263 if (req_slices > intel_dbuf_max_slices(dev_priv)) {
@@ -3188,7 +3268,6 @@ void icl_dbuf_slices_update(struct drm_i915_private *dev_priv,
3188 if (req_slices == hw_enabled_slices || req_slices == 0) 3268 if (req_slices == hw_enabled_slices || req_slices == 0)
3189 return; 3269 return;
3190 3270
3191 val = I915_READ(DBUF_CTL_S2);
3192 if (req_slices > hw_enabled_slices) 3271 if (req_slices > hw_enabled_slices)
3193 ret = intel_dbuf_slice_set(dev_priv, DBUF_CTL_S2, true); 3272 ret = intel_dbuf_slice_set(dev_priv, DBUF_CTL_S2, true);
3194 else 3273 else
@@ -3240,18 +3319,40 @@ static void icl_mbus_init(struct drm_i915_private *dev_priv)
3240 I915_WRITE(MBUS_ABOX_CTL, val); 3319 I915_WRITE(MBUS_ABOX_CTL, val);
3241} 3320}
3242 3321
3322static void intel_pch_reset_handshake(struct drm_i915_private *dev_priv,
3323 bool enable)
3324{
3325 i915_reg_t reg;
3326 u32 reset_bits, val;
3327
3328 if (IS_IVYBRIDGE(dev_priv)) {
3329 reg = GEN7_MSG_CTL;
3330 reset_bits = WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK;
3331 } else {
3332 reg = HSW_NDE_RSTWRN_OPT;
3333 reset_bits = RESET_PCH_HANDSHAKE_ENABLE;
3334 }
3335
3336 val = I915_READ(reg);
3337
3338 if (enable)
3339 val |= reset_bits;
3340 else
3341 val &= ~reset_bits;
3342
3343 I915_WRITE(reg, val);
3344}
3345
3243static void skl_display_core_init(struct drm_i915_private *dev_priv, 3346static void skl_display_core_init(struct drm_i915_private *dev_priv,
3244 bool resume) 3347 bool resume)
3245{ 3348{
3246 struct i915_power_domains *power_domains = &dev_priv->power_domains; 3349 struct i915_power_domains *power_domains = &dev_priv->power_domains;
3247 struct i915_power_well *well; 3350 struct i915_power_well *well;
3248 uint32_t val;
3249 3351
3250 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 3352 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
3251 3353
3252 /* enable PCH reset handshake */ 3354 /* enable PCH reset handshake */
3253 val = I915_READ(HSW_NDE_RSTWRN_OPT); 3355 intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
3254 I915_WRITE(HSW_NDE_RSTWRN_OPT, val | RESET_PCH_HANDSHAKE_ENABLE);
3255 3356
3256 /* enable PG1 and Misc I/O */ 3357 /* enable PG1 and Misc I/O */
3257 mutex_lock(&power_domains->lock); 3358 mutex_lock(&power_domains->lock);
@@ -3307,7 +3408,6 @@ void bxt_display_core_init(struct drm_i915_private *dev_priv,
3307{ 3408{
3308 struct i915_power_domains *power_domains = &dev_priv->power_domains; 3409 struct i915_power_domains *power_domains = &dev_priv->power_domains;
3309 struct i915_power_well *well; 3410 struct i915_power_well *well;
3310 uint32_t val;
3311 3411
3312 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 3412 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
3313 3413
@@ -3317,9 +3417,7 @@ void bxt_display_core_init(struct drm_i915_private *dev_priv,
3317 * Move the handshake programming to initialization sequence. 3417 * Move the handshake programming to initialization sequence.
3318 * Previously was left up to BIOS. 3418 * Previously was left up to BIOS.
3319 */ 3419 */
3320 val = I915_READ(HSW_NDE_RSTWRN_OPT); 3420 intel_pch_reset_handshake(dev_priv, false);
3321 val &= ~RESET_PCH_HANDSHAKE_ENABLE;
3322 I915_WRITE(HSW_NDE_RSTWRN_OPT, val);
3323 3421
3324 /* Enable PG1 */ 3422 /* Enable PG1 */
3325 mutex_lock(&power_domains->lock); 3423 mutex_lock(&power_domains->lock);
@@ -3365,101 +3463,18 @@ void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
3365 usleep_range(10, 30); /* 10 us delay per Bspec */ 3463 usleep_range(10, 30); /* 10 us delay per Bspec */
3366} 3464}
3367 3465
3368enum {
3369 PROCMON_0_85V_DOT_0,
3370 PROCMON_0_95V_DOT_0,
3371 PROCMON_0_95V_DOT_1,
3372 PROCMON_1_05V_DOT_0,
3373 PROCMON_1_05V_DOT_1,
3374};
3375
3376static const struct cnl_procmon {
3377 u32 dw1, dw9, dw10;
3378} cnl_procmon_values[] = {
3379 [PROCMON_0_85V_DOT_0] =
3380 { .dw1 = 0x00000000, .dw9 = 0x62AB67BB, .dw10 = 0x51914F96, },
3381 [PROCMON_0_95V_DOT_0] =
3382 { .dw1 = 0x00000000, .dw9 = 0x86E172C7, .dw10 = 0x77CA5EAB, },
3383 [PROCMON_0_95V_DOT_1] =
3384 { .dw1 = 0x00000000, .dw9 = 0x93F87FE1, .dw10 = 0x8AE871C5, },
3385 [PROCMON_1_05V_DOT_0] =
3386 { .dw1 = 0x00000000, .dw9 = 0x98FA82DD, .dw10 = 0x89E46DC1, },
3387 [PROCMON_1_05V_DOT_1] =
3388 { .dw1 = 0x00440000, .dw9 = 0x9A00AB25, .dw10 = 0x8AE38FF1, },
3389};
3390
3391/*
3392 * CNL has just one set of registers, while ICL has two sets: one for port A and
3393 * the other for port B. The CNL registers are equivalent to the ICL port A
3394 * registers, that's why we call the ICL macros even though the function has CNL
3395 * on its name.
3396 */
3397static void cnl_set_procmon_ref_values(struct drm_i915_private *dev_priv,
3398 enum port port)
3399{
3400 const struct cnl_procmon *procmon;
3401 u32 val;
3402
3403 val = I915_READ(ICL_PORT_COMP_DW3(port));
3404 switch (val & (PROCESS_INFO_MASK | VOLTAGE_INFO_MASK)) {
3405 default:
3406 MISSING_CASE(val);
3407 /* fall through */
3408 case VOLTAGE_INFO_0_85V | PROCESS_INFO_DOT_0:
3409 procmon = &cnl_procmon_values[PROCMON_0_85V_DOT_0];
3410 break;
3411 case VOLTAGE_INFO_0_95V | PROCESS_INFO_DOT_0:
3412 procmon = &cnl_procmon_values[PROCMON_0_95V_DOT_0];
3413 break;
3414 case VOLTAGE_INFO_0_95V | PROCESS_INFO_DOT_1:
3415 procmon = &cnl_procmon_values[PROCMON_0_95V_DOT_1];
3416 break;
3417 case VOLTAGE_INFO_1_05V | PROCESS_INFO_DOT_0:
3418 procmon = &cnl_procmon_values[PROCMON_1_05V_DOT_0];
3419 break;
3420 case VOLTAGE_INFO_1_05V | PROCESS_INFO_DOT_1:
3421 procmon = &cnl_procmon_values[PROCMON_1_05V_DOT_1];
3422 break;
3423 }
3424
3425 val = I915_READ(ICL_PORT_COMP_DW1(port));
3426 val &= ~((0xff << 16) | 0xff);
3427 val |= procmon->dw1;
3428 I915_WRITE(ICL_PORT_COMP_DW1(port), val);
3429
3430 I915_WRITE(ICL_PORT_COMP_DW9(port), procmon->dw9);
3431 I915_WRITE(ICL_PORT_COMP_DW10(port), procmon->dw10);
3432}
3433
3434static void cnl_display_core_init(struct drm_i915_private *dev_priv, bool resume) 3466static void cnl_display_core_init(struct drm_i915_private *dev_priv, bool resume)
3435{ 3467{
3436 struct i915_power_domains *power_domains = &dev_priv->power_domains; 3468 struct i915_power_domains *power_domains = &dev_priv->power_domains;
3437 struct i915_power_well *well; 3469 struct i915_power_well *well;
3438 u32 val;
3439 3470
3440 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 3471 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
3441 3472
3442 /* 1. Enable PCH Reset Handshake */ 3473 /* 1. Enable PCH Reset Handshake */
3443 val = I915_READ(HSW_NDE_RSTWRN_OPT); 3474 intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
3444 val |= RESET_PCH_HANDSHAKE_ENABLE;
3445 I915_WRITE(HSW_NDE_RSTWRN_OPT, val);
3446 3475
3447 /* 2. Enable Comp */ 3476 /* 2-3. */
3448 val = I915_READ(CHICKEN_MISC_2); 3477 cnl_combo_phys_init(dev_priv);
3449 val &= ~CNL_COMP_PWR_DOWN;
3450 I915_WRITE(CHICKEN_MISC_2, val);
3451
3452 /* Dummy PORT_A to get the correct CNL register from the ICL macro */
3453 cnl_set_procmon_ref_values(dev_priv, PORT_A);
3454
3455 val = I915_READ(CNL_PORT_COMP_DW0);
3456 val |= COMP_INIT;
3457 I915_WRITE(CNL_PORT_COMP_DW0, val);
3458
3459 /* 3. */
3460 val = I915_READ(CNL_PORT_CL1CM_DW5);
3461 val |= CL_POWER_DOWN_ENABLE;
3462 I915_WRITE(CNL_PORT_CL1CM_DW5, val);
3463 3478
3464 /* 3479 /*
3465 * 4. Enable Power Well 1 (PG1). 3480 * 4. Enable Power Well 1 (PG1).
@@ -3484,7 +3499,6 @@ static void cnl_display_core_uninit(struct drm_i915_private *dev_priv)
3484{ 3499{
3485 struct i915_power_domains *power_domains = &dev_priv->power_domains; 3500 struct i915_power_domains *power_domains = &dev_priv->power_domains;
3486 struct i915_power_well *well; 3501 struct i915_power_well *well;
3487 u32 val;
3488 3502
3489 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 3503 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
3490 3504
@@ -3508,44 +3522,23 @@ static void cnl_display_core_uninit(struct drm_i915_private *dev_priv)
3508 3522
3509 usleep_range(10, 30); /* 10 us delay per Bspec */ 3523 usleep_range(10, 30); /* 10 us delay per Bspec */
3510 3524
3511 /* 5. Disable Comp */ 3525 /* 5. */
3512 val = I915_READ(CHICKEN_MISC_2); 3526 cnl_combo_phys_uninit(dev_priv);
3513 val |= CNL_COMP_PWR_DOWN;
3514 I915_WRITE(CHICKEN_MISC_2, val);
3515} 3527}
3516 3528
3517static void icl_display_core_init(struct drm_i915_private *dev_priv, 3529void icl_display_core_init(struct drm_i915_private *dev_priv,
3518 bool resume) 3530 bool resume)
3519{ 3531{
3520 struct i915_power_domains *power_domains = &dev_priv->power_domains; 3532 struct i915_power_domains *power_domains = &dev_priv->power_domains;
3521 struct i915_power_well *well; 3533 struct i915_power_well *well;
3522 enum port port;
3523 u32 val;
3524 3534
3525 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 3535 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
3526 3536
3527 /* 1. Enable PCH reset handshake. */ 3537 /* 1. Enable PCH reset handshake. */
3528 val = I915_READ(HSW_NDE_RSTWRN_OPT); 3538 intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
3529 val |= RESET_PCH_HANDSHAKE_ENABLE; 3539
3530 I915_WRITE(HSW_NDE_RSTWRN_OPT, val); 3540 /* 2-3. */
3531 3541 icl_combo_phys_init(dev_priv);
3532 for (port = PORT_A; port <= PORT_B; port++) {
3533 /* 2. Enable DDI combo PHY comp. */
3534 val = I915_READ(ICL_PHY_MISC(port));
3535 val &= ~ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN;
3536 I915_WRITE(ICL_PHY_MISC(port), val);
3537
3538 cnl_set_procmon_ref_values(dev_priv, port);
3539
3540 val = I915_READ(ICL_PORT_COMP_DW0(port));
3541 val |= COMP_INIT;
3542 I915_WRITE(ICL_PORT_COMP_DW0(port), val);
3543
3544 /* 3. Set power down enable. */
3545 val = I915_READ(ICL_PORT_CL_DW5(port));
3546 val |= CL_POWER_DOWN_ENABLE;
3547 I915_WRITE(ICL_PORT_CL_DW5(port), val);
3548 }
3549 3542
3550 /* 3543 /*
3551 * 4. Enable Power Well 1 (PG1). 3544 * 4. Enable Power Well 1 (PG1).
@@ -3569,12 +3562,10 @@ static void icl_display_core_init(struct drm_i915_private *dev_priv,
3569 intel_csr_load_program(dev_priv); 3562 intel_csr_load_program(dev_priv);
3570} 3563}
3571 3564
3572static void icl_display_core_uninit(struct drm_i915_private *dev_priv) 3565void icl_display_core_uninit(struct drm_i915_private *dev_priv)
3573{ 3566{
3574 struct i915_power_domains *power_domains = &dev_priv->power_domains; 3567 struct i915_power_domains *power_domains = &dev_priv->power_domains;
3575 struct i915_power_well *well; 3568 struct i915_power_well *well;
3576 enum port port;
3577 u32 val;
3578 3569
3579 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 3570 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
3580 3571
@@ -3596,12 +3587,8 @@ static void icl_display_core_uninit(struct drm_i915_private *dev_priv)
3596 intel_power_well_disable(dev_priv, well); 3587 intel_power_well_disable(dev_priv, well);
3597 mutex_unlock(&power_domains->lock); 3588 mutex_unlock(&power_domains->lock);
3598 3589
3599 /* 5. Disable Comp */ 3590 /* 5. */
3600 for (port = PORT_A; port <= PORT_B; port++) { 3591 icl_combo_phys_uninit(dev_priv);
3601 val = I915_READ(ICL_PHY_MISC(port));
3602 val |= ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN;
3603 I915_WRITE(ICL_PHY_MISC(port), val);
3604 }
3605} 3592}
3606 3593
3607static void chv_phy_control_init(struct drm_i915_private *dev_priv) 3594static void chv_phy_control_init(struct drm_i915_private *dev_priv)
@@ -3759,7 +3746,8 @@ void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume)
3759 mutex_lock(&power_domains->lock); 3746 mutex_lock(&power_domains->lock);
3760 vlv_cmnlane_wa(dev_priv); 3747 vlv_cmnlane_wa(dev_priv);
3761 mutex_unlock(&power_domains->lock); 3748 mutex_unlock(&power_domains->lock);
3762 } 3749 } else if (IS_IVYBRIDGE(dev_priv) || INTEL_GEN(dev_priv) >= 7)
3750 intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
3763 3751
3764 /* 3752 /*
3765 * Keep all power wells enabled for any dependent HW access during 3753 * Keep all power wells enabled for any dependent HW access during
@@ -3953,14 +3941,6 @@ static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv)
3953 int domains_count; 3941 int domains_count;
3954 bool enabled; 3942 bool enabled;
3955 3943
3956 /*
3957 * Power wells not belonging to any domain (like the MISC_IO
3958 * and PW1 power wells) are under FW control, so ignore them,
3959 * since their state can change asynchronously.
3960 */
3961 if (!power_well->desc->domains)
3962 continue;
3963
3964 enabled = power_well->desc->ops->is_enabled(dev_priv, 3944 enabled = power_well->desc->ops->is_enabled(dev_priv,
3965 power_well); 3945 power_well);
3966 if ((power_well->count || power_well->desc->always_on) != 3946 if ((power_well->count || power_well->desc->always_on) !=
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 701372e512a8..5805ec1aba12 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -105,11 +105,6 @@ struct intel_sdvo {
105 bool has_hdmi_audio; 105 bool has_hdmi_audio;
106 bool rgb_quant_range_selectable; 106 bool rgb_quant_range_selectable;
107 107
108 /**
109 * This is sdvo fixed pannel mode pointer
110 */
111 struct drm_display_mode *sdvo_lvds_fixed_mode;
112
113 /* DDC bus used by this SDVO encoder */ 108 /* DDC bus used by this SDVO encoder */
114 uint8_t ddc_bus; 109 uint8_t ddc_bus;
115 110
@@ -765,10 +760,14 @@ intel_sdvo_create_preferred_input_timing(struct intel_sdvo *intel_sdvo,
765 args.height = height; 760 args.height = height;
766 args.interlace = 0; 761 args.interlace = 0;
767 762
768 if (IS_LVDS(intel_sdvo_connector) && 763 if (IS_LVDS(intel_sdvo_connector)) {
769 (intel_sdvo->sdvo_lvds_fixed_mode->hdisplay != width || 764 const struct drm_display_mode *fixed_mode =
770 intel_sdvo->sdvo_lvds_fixed_mode->vdisplay != height)) 765 intel_sdvo_connector->base.panel.fixed_mode;
771 args.scaled = 1; 766
767 if (fixed_mode->hdisplay != width ||
768 fixed_mode->vdisplay != height)
769 args.scaled = 1;
770 }
772 771
773 return intel_sdvo_set_value(intel_sdvo, 772 return intel_sdvo_set_value(intel_sdvo,
774 SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING, 773 SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING,
@@ -1123,6 +1122,7 @@ static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
1123 1122
1124 DRM_DEBUG_KMS("forcing bpc to 8 for SDVO\n"); 1123 DRM_DEBUG_KMS("forcing bpc to 8 for SDVO\n");
1125 pipe_config->pipe_bpp = 8*3; 1124 pipe_config->pipe_bpp = 8*3;
1125 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
1126 1126
1127 if (HAS_PCH_SPLIT(to_i915(encoder->base.dev))) 1127 if (HAS_PCH_SPLIT(to_i915(encoder->base.dev)))
1128 pipe_config->has_pch_encoder = true; 1128 pipe_config->has_pch_encoder = true;
@@ -1144,7 +1144,7 @@ static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
1144 pipe_config->sdvo_tv_clock = true; 1144 pipe_config->sdvo_tv_clock = true;
1145 } else if (IS_LVDS(intel_sdvo_connector)) { 1145 } else if (IS_LVDS(intel_sdvo_connector)) {
1146 if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo, 1146 if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo,
1147 intel_sdvo->sdvo_lvds_fixed_mode)) 1147 intel_sdvo_connector->base.panel.fixed_mode))
1148 return false; 1148 return false;
1149 1149
1150 (void) intel_sdvo_get_preferred_input_mode(intel_sdvo, 1150 (void) intel_sdvo_get_preferred_input_mode(intel_sdvo,
@@ -1301,7 +1301,7 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder,
1301 /* lvds has a special fixed output timing. */ 1301 /* lvds has a special fixed output timing. */
1302 if (IS_LVDS(intel_sdvo_connector)) 1302 if (IS_LVDS(intel_sdvo_connector))
1303 intel_sdvo_get_dtd_from_mode(&output_dtd, 1303 intel_sdvo_get_dtd_from_mode(&output_dtd,
1304 intel_sdvo->sdvo_lvds_fixed_mode); 1304 intel_sdvo_connector->base.panel.fixed_mode);
1305 else 1305 else
1306 intel_sdvo_get_dtd_from_mode(&output_dtd, mode); 1306 intel_sdvo_get_dtd_from_mode(&output_dtd, mode);
1307 if (!intel_sdvo_set_output_timing(intel_sdvo, &output_dtd)) 1307 if (!intel_sdvo_set_output_timing(intel_sdvo, &output_dtd))
@@ -1642,10 +1642,13 @@ intel_sdvo_mode_valid(struct drm_connector *connector,
1642 return MODE_CLOCK_HIGH; 1642 return MODE_CLOCK_HIGH;
1643 1643
1644 if (IS_LVDS(intel_sdvo_connector)) { 1644 if (IS_LVDS(intel_sdvo_connector)) {
1645 if (mode->hdisplay > intel_sdvo->sdvo_lvds_fixed_mode->hdisplay) 1645 const struct drm_display_mode *fixed_mode =
1646 intel_sdvo_connector->base.panel.fixed_mode;
1647
1648 if (mode->hdisplay > fixed_mode->hdisplay)
1646 return MODE_PANEL; 1649 return MODE_PANEL;
1647 1650
1648 if (mode->vdisplay > intel_sdvo->sdvo_lvds_fixed_mode->vdisplay) 1651 if (mode->vdisplay > fixed_mode->vdisplay)
1649 return MODE_PANEL; 1652 return MODE_PANEL;
1650 } 1653 }
1651 1654
@@ -2058,14 +2061,6 @@ static int intel_sdvo_get_modes(struct drm_connector *connector)
2058 return !list_empty(&connector->probed_modes); 2061 return !list_empty(&connector->probed_modes);
2059} 2062}
2060 2063
2061static void intel_sdvo_destroy(struct drm_connector *connector)
2062{
2063 struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
2064
2065 drm_connector_cleanup(connector);
2066 kfree(intel_sdvo_connector);
2067}
2068
2069static int 2064static int
2070intel_sdvo_connector_atomic_get_property(struct drm_connector *connector, 2065intel_sdvo_connector_atomic_get_property(struct drm_connector *connector,
2071 const struct drm_connector_state *state, 2066 const struct drm_connector_state *state,
@@ -2228,7 +2223,7 @@ static const struct drm_connector_funcs intel_sdvo_connector_funcs = {
2228 .atomic_set_property = intel_sdvo_connector_atomic_set_property, 2223 .atomic_set_property = intel_sdvo_connector_atomic_set_property,
2229 .late_register = intel_sdvo_connector_register, 2224 .late_register = intel_sdvo_connector_register,
2230 .early_unregister = intel_sdvo_connector_unregister, 2225 .early_unregister = intel_sdvo_connector_unregister,
2231 .destroy = intel_sdvo_destroy, 2226 .destroy = intel_connector_destroy,
2232 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 2227 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
2233 .atomic_duplicate_state = intel_sdvo_connector_duplicate_state, 2228 .atomic_duplicate_state = intel_sdvo_connector_duplicate_state,
2234}; 2229};
@@ -2267,10 +2262,6 @@ static void intel_sdvo_enc_destroy(struct drm_encoder *encoder)
2267{ 2262{
2268 struct intel_sdvo *intel_sdvo = to_sdvo(to_intel_encoder(encoder)); 2263 struct intel_sdvo *intel_sdvo = to_sdvo(to_intel_encoder(encoder));
2269 2264
2270 if (intel_sdvo->sdvo_lvds_fixed_mode != NULL)
2271 drm_mode_destroy(encoder->dev,
2272 intel_sdvo->sdvo_lvds_fixed_mode);
2273
2274 i2c_del_adapter(&intel_sdvo->ddc); 2265 i2c_del_adapter(&intel_sdvo->ddc);
2275 intel_encoder_destroy(encoder); 2266 intel_encoder_destroy(encoder);
2276} 2267}
@@ -2583,7 +2574,7 @@ intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type)
2583 return true; 2574 return true;
2584 2575
2585err: 2576err:
2586 intel_sdvo_destroy(connector); 2577 intel_connector_destroy(connector);
2587 return false; 2578 return false;
2588} 2579}
2589 2580
@@ -2663,19 +2654,22 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
2663 2654
2664 list_for_each_entry(mode, &connector->probed_modes, head) { 2655 list_for_each_entry(mode, &connector->probed_modes, head) {
2665 if (mode->type & DRM_MODE_TYPE_PREFERRED) { 2656 if (mode->type & DRM_MODE_TYPE_PREFERRED) {
2666 intel_sdvo->sdvo_lvds_fixed_mode = 2657 struct drm_display_mode *fixed_mode =
2667 drm_mode_duplicate(connector->dev, mode); 2658 drm_mode_duplicate(connector->dev, mode);
2659
2660 intel_panel_init(&intel_connector->panel,
2661 fixed_mode, NULL);
2668 break; 2662 break;
2669 } 2663 }
2670 } 2664 }
2671 2665
2672 if (!intel_sdvo->sdvo_lvds_fixed_mode) 2666 if (!intel_connector->panel.fixed_mode)
2673 goto err; 2667 goto err;
2674 2668
2675 return true; 2669 return true;
2676 2670
2677err: 2671err:
2678 intel_sdvo_destroy(connector); 2672 intel_connector_destroy(connector);
2679 return false; 2673 return false;
2680} 2674}
2681 2675
@@ -2745,7 +2739,7 @@ static void intel_sdvo_output_cleanup(struct intel_sdvo *intel_sdvo)
2745 &dev->mode_config.connector_list, head) { 2739 &dev->mode_config.connector_list, head) {
2746 if (intel_attached_encoder(connector) == &intel_sdvo->base) { 2740 if (intel_attached_encoder(connector) == &intel_sdvo->base) {
2747 drm_connector_unregister(connector); 2741 drm_connector_unregister(connector);
2748 intel_sdvo_destroy(connector); 2742 intel_connector_destroy(connector);
2749 } 2743 }
2750 } 2744 }
2751} 2745}
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 5fd2f7bf3927..abe193815ccc 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -40,6 +40,7 @@
40#include "intel_frontbuffer.h" 40#include "intel_frontbuffer.h"
41#include <drm/i915_drm.h> 41#include <drm/i915_drm.h>
42#include "i915_drv.h" 42#include "i915_drv.h"
43#include <drm/drm_color_mgmt.h>
43 44
44int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode, 45int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
45 int usecs) 46 int usecs)
@@ -275,17 +276,24 @@ int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state)
275 src->y2 = (src_y + src_h) << 16; 276 src->y2 = (src_y + src_h) << 16;
276 277
277 if (fb->format->is_yuv && 278 if (fb->format->is_yuv &&
278 fb->format->format != DRM_FORMAT_NV12 &&
279 (src_x & 1 || src_w & 1)) { 279 (src_x & 1 || src_w & 1)) {
280 DRM_DEBUG_KMS("src x/w (%u, %u) must be a multiple of 2 for YUV planes\n", 280 DRM_DEBUG_KMS("src x/w (%u, %u) must be a multiple of 2 for YUV planes\n",
281 src_x, src_w); 281 src_x, src_w);
282 return -EINVAL; 282 return -EINVAL;
283 } 283 }
284 284
285 if (fb->format->is_yuv &&
286 fb->format->num_planes > 1 &&
287 (src_y & 1 || src_h & 1)) {
288 DRM_DEBUG_KMS("src y/h (%u, %u) must be a multiple of 2 for planar YUV planes\n",
289 src_y, src_h);
290 return -EINVAL;
291 }
292
285 return 0; 293 return 0;
286} 294}
287 295
288unsigned int 296static unsigned int
289skl_plane_max_stride(struct intel_plane *plane, 297skl_plane_max_stride(struct intel_plane *plane,
290 u32 pixel_format, u64 modifier, 298 u32 pixel_format, u64 modifier,
291 unsigned int rotation) 299 unsigned int rotation)
@@ -302,35 +310,201 @@ skl_plane_max_stride(struct intel_plane *plane,
302 return min(8192 * cpp, 32768); 310 return min(8192 * cpp, 32768);
303} 311}
304 312
305void 313static void
306skl_update_plane(struct intel_plane *plane, 314skl_program_scaler(struct intel_plane *plane,
307 const struct intel_crtc_state *crtc_state, 315 const struct intel_crtc_state *crtc_state,
308 const struct intel_plane_state *plane_state) 316 const struct intel_plane_state *plane_state)
317{
318 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
319 enum pipe pipe = plane->pipe;
320 int scaler_id = plane_state->scaler_id;
321 const struct intel_scaler *scaler =
322 &crtc_state->scaler_state.scalers[scaler_id];
323 int crtc_x = plane_state->base.dst.x1;
324 int crtc_y = plane_state->base.dst.y1;
325 uint32_t crtc_w = drm_rect_width(&plane_state->base.dst);
326 uint32_t crtc_h = drm_rect_height(&plane_state->base.dst);
327 u16 y_hphase, uv_rgb_hphase;
328 u16 y_vphase, uv_rgb_vphase;
329 int hscale, vscale;
330
331 hscale = drm_rect_calc_hscale(&plane_state->base.src,
332 &plane_state->base.dst,
333 0, INT_MAX);
334 vscale = drm_rect_calc_vscale(&plane_state->base.src,
335 &plane_state->base.dst,
336 0, INT_MAX);
337
338 /* TODO: handle sub-pixel coordinates */
339 if (plane_state->base.fb->format->format == DRM_FORMAT_NV12 &&
340 !icl_is_hdr_plane(plane)) {
341 y_hphase = skl_scaler_calc_phase(1, hscale, false);
342 y_vphase = skl_scaler_calc_phase(1, vscale, false);
343
344 /* MPEG2 chroma siting convention */
345 uv_rgb_hphase = skl_scaler_calc_phase(2, hscale, true);
346 uv_rgb_vphase = skl_scaler_calc_phase(2, vscale, false);
347 } else {
348 /* not used */
349 y_hphase = 0;
350 y_vphase = 0;
351
352 uv_rgb_hphase = skl_scaler_calc_phase(1, hscale, false);
353 uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false);
354 }
355
356 I915_WRITE_FW(SKL_PS_CTRL(pipe, scaler_id),
357 PS_SCALER_EN | PS_PLANE_SEL(plane->id) | scaler->mode);
358 I915_WRITE_FW(SKL_PS_VPHASE(pipe, scaler_id),
359 PS_Y_PHASE(y_vphase) | PS_UV_RGB_PHASE(uv_rgb_vphase));
360 I915_WRITE_FW(SKL_PS_HPHASE(pipe, scaler_id),
361 PS_Y_PHASE(y_hphase) | PS_UV_RGB_PHASE(uv_rgb_hphase));
362 I915_WRITE_FW(SKL_PS_WIN_POS(pipe, scaler_id), (crtc_x << 16) | crtc_y);
363 I915_WRITE_FW(SKL_PS_WIN_SZ(pipe, scaler_id), (crtc_w << 16) | crtc_h);
364}
365
366/* Preoffset values for YUV to RGB Conversion */
367#define PREOFF_YUV_TO_RGB_HI 0x1800
368#define PREOFF_YUV_TO_RGB_ME 0x1F00
369#define PREOFF_YUV_TO_RGB_LO 0x1800
370
371#define ROFF(x) (((x) & 0xffff) << 16)
372#define GOFF(x) (((x) & 0xffff) << 0)
373#define BOFF(x) (((x) & 0xffff) << 16)
374
375static void
376icl_program_input_csc_coeff(const struct intel_crtc_state *crtc_state,
377 const struct intel_plane_state *plane_state)
378{
379 struct drm_i915_private *dev_priv =
380 to_i915(plane_state->base.plane->dev);
381 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
382 enum pipe pipe = crtc->pipe;
383 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
384 enum plane_id plane_id = plane->id;
385
386 static const u16 input_csc_matrix[][9] = {
387 /*
388 * BT.601 full range YCbCr -> full range RGB
389 * The matrix required is :
390 * [1.000, 0.000, 1.371,
391 * 1.000, -0.336, -0.698,
392 * 1.000, 1.732, 0.0000]
393 */
394 [DRM_COLOR_YCBCR_BT601] = {
395 0x7AF8, 0x7800, 0x0,
396 0x8B28, 0x7800, 0x9AC0,
397 0x0, 0x7800, 0x7DD8,
398 },
399 /*
400 * BT.709 full range YCbCr -> full range RGB
401 * The matrix required is :
402 * [1.000, 0.000, 1.574,
403 * 1.000, -0.187, -0.468,
404 * 1.000, 1.855, 0.0000]
405 */
406 [DRM_COLOR_YCBCR_BT709] = {
407 0x7C98, 0x7800, 0x0,
408 0x9EF8, 0x7800, 0xABF8,
409 0x0, 0x7800, 0x7ED8,
410 },
411 };
412
413 /* Matrix for Limited Range to Full Range Conversion */
414 static const u16 input_csc_matrix_lr[][9] = {
415 /*
416 * BT.601 Limted range YCbCr -> full range RGB
417 * The matrix required is :
418 * [1.164384, 0.000, 1.596370,
419 * 1.138393, -0.382500, -0.794598,
420 * 1.138393, 1.971696, 0.0000]
421 */
422 [DRM_COLOR_YCBCR_BT601] = {
423 0x7CC8, 0x7950, 0x0,
424 0x8CB8, 0x7918, 0x9C40,
425 0x0, 0x7918, 0x7FC8,
426 },
427 /*
428 * BT.709 Limited range YCbCr -> full range RGB
429 * The matrix required is :
430 * [1.164, 0.000, 1.833671,
431 * 1.138393, -0.213249, -0.532909,
432 * 1.138393, 2.112402, 0.0000]
433 */
434 [DRM_COLOR_YCBCR_BT709] = {
435 0x7EA8, 0x7950, 0x0,
436 0x8888, 0x7918, 0xADA8,
437 0x0, 0x7918, 0x6870,
438 },
439 };
440 const u16 *csc;
441
442 if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
443 csc = input_csc_matrix[plane_state->base.color_encoding];
444 else
445 csc = input_csc_matrix_lr[plane_state->base.color_encoding];
446
447 I915_WRITE_FW(PLANE_INPUT_CSC_COEFF(pipe, plane_id, 0), ROFF(csc[0]) |
448 GOFF(csc[1]));
449 I915_WRITE_FW(PLANE_INPUT_CSC_COEFF(pipe, plane_id, 1), BOFF(csc[2]));
450 I915_WRITE_FW(PLANE_INPUT_CSC_COEFF(pipe, plane_id, 2), ROFF(csc[3]) |
451 GOFF(csc[4]));
452 I915_WRITE_FW(PLANE_INPUT_CSC_COEFF(pipe, plane_id, 3), BOFF(csc[5]));
453 I915_WRITE_FW(PLANE_INPUT_CSC_COEFF(pipe, plane_id, 4), ROFF(csc[6]) |
454 GOFF(csc[7]));
455 I915_WRITE_FW(PLANE_INPUT_CSC_COEFF(pipe, plane_id, 5), BOFF(csc[8]));
456
457 I915_WRITE_FW(PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 0),
458 PREOFF_YUV_TO_RGB_HI);
459 I915_WRITE_FW(PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 1),
460 PREOFF_YUV_TO_RGB_ME);
461 I915_WRITE_FW(PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 2),
462 PREOFF_YUV_TO_RGB_LO);
463 I915_WRITE_FW(PLANE_INPUT_CSC_POSTOFF(pipe, plane_id, 0), 0x0);
464 I915_WRITE_FW(PLANE_INPUT_CSC_POSTOFF(pipe, plane_id, 1), 0x0);
465 I915_WRITE_FW(PLANE_INPUT_CSC_POSTOFF(pipe, plane_id, 2), 0x0);
466}
467
468static void
469skl_program_plane(struct intel_plane *plane,
470 const struct intel_crtc_state *crtc_state,
471 const struct intel_plane_state *plane_state,
472 int color_plane, bool slave, u32 plane_ctl)
309{ 473{
310 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 474 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
311 const struct drm_framebuffer *fb = plane_state->base.fb;
312 enum plane_id plane_id = plane->id; 475 enum plane_id plane_id = plane->id;
313 enum pipe pipe = plane->pipe; 476 enum pipe pipe = plane->pipe;
314 u32 plane_ctl = plane_state->ctl;
315 const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; 477 const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
316 u32 surf_addr = plane_state->color_plane[0].offset; 478 u32 surf_addr = plane_state->color_plane[color_plane].offset;
317 u32 stride = skl_plane_stride(plane_state, 0); 479 u32 stride = skl_plane_stride(plane_state, color_plane);
318 u32 aux_stride = skl_plane_stride(plane_state, 1); 480 u32 aux_stride = skl_plane_stride(plane_state, 1);
319 int crtc_x = plane_state->base.dst.x1; 481 int crtc_x = plane_state->base.dst.x1;
320 int crtc_y = plane_state->base.dst.y1; 482 int crtc_y = plane_state->base.dst.y1;
321 uint32_t crtc_w = drm_rect_width(&plane_state->base.dst); 483 uint32_t x = plane_state->color_plane[color_plane].x;
322 uint32_t crtc_h = drm_rect_height(&plane_state->base.dst); 484 uint32_t y = plane_state->color_plane[color_plane].y;
323 uint32_t x = plane_state->color_plane[0].x;
324 uint32_t y = plane_state->color_plane[0].y;
325 uint32_t src_w = drm_rect_width(&plane_state->base.src) >> 16; 485 uint32_t src_w = drm_rect_width(&plane_state->base.src) >> 16;
326 uint32_t src_h = drm_rect_height(&plane_state->base.src) >> 16; 486 uint32_t src_h = drm_rect_height(&plane_state->base.src) >> 16;
487 struct intel_plane *linked = plane_state->linked_plane;
488 const struct drm_framebuffer *fb = plane_state->base.fb;
489 u8 alpha = plane_state->base.alpha >> 8;
327 unsigned long irqflags; 490 unsigned long irqflags;
491 u32 keymsk, keymax;
328 492
329 /* Sizes are 0 based */ 493 /* Sizes are 0 based */
330 src_w--; 494 src_w--;
331 src_h--; 495 src_h--;
332 crtc_w--; 496
333 crtc_h--; 497 keymax = (key->max_value & 0xffffff) | PLANE_KEYMAX_ALPHA(alpha);
498
499 keymsk = key->channel_mask & 0x3ffffff;
500 if (alpha < 0xff)
501 keymsk |= PLANE_KEYMSK_ALPHA_ENABLE;
502
503 /* The scaler will handle the output position */
504 if (plane_state->scaler_id >= 0) {
505 crtc_x = 0;
506 crtc_y = 0;
507 }
334 508
335 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 509 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
336 510
@@ -338,71 +512,83 @@ skl_update_plane(struct intel_plane *plane,
338 I915_WRITE_FW(PLANE_COLOR_CTL(pipe, plane_id), 512 I915_WRITE_FW(PLANE_COLOR_CTL(pipe, plane_id),
339 plane_state->color_ctl); 513 plane_state->color_ctl);
340 514
341 if (key->flags) { 515 if (fb->format->is_yuv && icl_is_hdr_plane(plane))
342 I915_WRITE_FW(PLANE_KEYVAL(pipe, plane_id), key->min_value); 516 icl_program_input_csc_coeff(crtc_state, plane_state);
343 I915_WRITE_FW(PLANE_KEYMAX(pipe, plane_id), key->max_value); 517
344 I915_WRITE_FW(PLANE_KEYMSK(pipe, plane_id), key->channel_mask); 518 I915_WRITE_FW(PLANE_KEYVAL(pipe, plane_id), key->min_value);
345 } 519 I915_WRITE_FW(PLANE_KEYMAX(pipe, plane_id), keymax);
520 I915_WRITE_FW(PLANE_KEYMSK(pipe, plane_id), keymsk);
346 521
347 I915_WRITE_FW(PLANE_OFFSET(pipe, plane_id), (y << 16) | x); 522 I915_WRITE_FW(PLANE_OFFSET(pipe, plane_id), (y << 16) | x);
348 I915_WRITE_FW(PLANE_STRIDE(pipe, plane_id), stride); 523 I915_WRITE_FW(PLANE_STRIDE(pipe, plane_id), stride);
349 I915_WRITE_FW(PLANE_SIZE(pipe, plane_id), (src_h << 16) | src_w); 524 I915_WRITE_FW(PLANE_SIZE(pipe, plane_id), (src_h << 16) | src_w);
350 I915_WRITE_FW(PLANE_AUX_DIST(pipe, plane_id), 525 I915_WRITE_FW(PLANE_AUX_DIST(pipe, plane_id),
351 (plane_state->color_plane[1].offset - surf_addr) | aux_stride); 526 (plane_state->color_plane[1].offset - surf_addr) | aux_stride);
352 I915_WRITE_FW(PLANE_AUX_OFFSET(pipe, plane_id),
353 (plane_state->color_plane[1].y << 16) |
354 plane_state->color_plane[1].x);
355 527
356 /* program plane scaler */ 528 if (INTEL_GEN(dev_priv) < 11)
357 if (plane_state->scaler_id >= 0) { 529 I915_WRITE_FW(PLANE_AUX_OFFSET(pipe, plane_id),
358 int scaler_id = plane_state->scaler_id; 530 (plane_state->color_plane[1].y << 16) |
359 const struct intel_scaler *scaler = 531 plane_state->color_plane[1].x);
360 &crtc_state->scaler_state.scalers[scaler_id]; 532
361 u16 y_hphase, uv_rgb_hphase; 533 if (icl_is_hdr_plane(plane)) {
362 u16 y_vphase, uv_rgb_vphase; 534 u32 cus_ctl = 0;
363 535
364 /* TODO: handle sub-pixel coordinates */ 536 if (linked) {
365 if (fb->format->format == DRM_FORMAT_NV12) { 537 /* Enable and use MPEG-2 chroma siting */
366 y_hphase = skl_scaler_calc_phase(1, false); 538 cus_ctl = PLANE_CUS_ENABLE |
367 y_vphase = skl_scaler_calc_phase(1, false); 539 PLANE_CUS_HPHASE_0 |
368 540 PLANE_CUS_VPHASE_SIGN_NEGATIVE |
369 /* MPEG2 chroma siting convention */ 541 PLANE_CUS_VPHASE_0_25;
370 uv_rgb_hphase = skl_scaler_calc_phase(2, true); 542
371 uv_rgb_vphase = skl_scaler_calc_phase(2, false); 543 if (linked->id == PLANE_SPRITE5)
372 } else { 544 cus_ctl |= PLANE_CUS_PLANE_7;
373 /* not used */ 545 else if (linked->id == PLANE_SPRITE4)
374 y_hphase = 0; 546 cus_ctl |= PLANE_CUS_PLANE_6;
375 y_vphase = 0; 547 else
376 548 MISSING_CASE(linked->id);
377 uv_rgb_hphase = skl_scaler_calc_phase(1, false);
378 uv_rgb_vphase = skl_scaler_calc_phase(1, false);
379 } 549 }
380 550
381 I915_WRITE_FW(SKL_PS_CTRL(pipe, scaler_id), 551 I915_WRITE_FW(PLANE_CUS_CTL(pipe, plane_id), cus_ctl);
382 PS_SCALER_EN | PS_PLANE_SEL(plane_id) | scaler->mode);
383 I915_WRITE_FW(SKL_PS_PWR_GATE(pipe, scaler_id), 0);
384 I915_WRITE_FW(SKL_PS_VPHASE(pipe, scaler_id),
385 PS_Y_PHASE(y_vphase) | PS_UV_RGB_PHASE(uv_rgb_vphase));
386 I915_WRITE_FW(SKL_PS_HPHASE(pipe, scaler_id),
387 PS_Y_PHASE(y_hphase) | PS_UV_RGB_PHASE(uv_rgb_hphase));
388 I915_WRITE_FW(SKL_PS_WIN_POS(pipe, scaler_id), (crtc_x << 16) | crtc_y);
389 I915_WRITE_FW(SKL_PS_WIN_SZ(pipe, scaler_id),
390 ((crtc_w + 1) << 16)|(crtc_h + 1));
391
392 I915_WRITE_FW(PLANE_POS(pipe, plane_id), 0);
393 } else {
394 I915_WRITE_FW(PLANE_POS(pipe, plane_id), (crtc_y << 16) | crtc_x);
395 } 552 }
396 553
554 if (!slave && plane_state->scaler_id >= 0)
555 skl_program_scaler(plane, crtc_state, plane_state);
556
557 I915_WRITE_FW(PLANE_POS(pipe, plane_id), (crtc_y << 16) | crtc_x);
558
397 I915_WRITE_FW(PLANE_CTL(pipe, plane_id), plane_ctl); 559 I915_WRITE_FW(PLANE_CTL(pipe, plane_id), plane_ctl);
398 I915_WRITE_FW(PLANE_SURF(pipe, plane_id), 560 I915_WRITE_FW(PLANE_SURF(pipe, plane_id),
399 intel_plane_ggtt_offset(plane_state) + surf_addr); 561 intel_plane_ggtt_offset(plane_state) + surf_addr);
400 POSTING_READ_FW(PLANE_SURF(pipe, plane_id));
401 562
402 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 563 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
403} 564}
404 565
405void 566static void
567skl_update_plane(struct intel_plane *plane,
568 const struct intel_crtc_state *crtc_state,
569 const struct intel_plane_state *plane_state)
570{
571 int color_plane = 0;
572
573 if (plane_state->linked_plane) {
574 /* Program the UV plane */
575 color_plane = 1;
576 }
577
578 skl_program_plane(plane, crtc_state, plane_state,
579 color_plane, false, plane_state->ctl);
580}
581
582static void
583icl_update_slave(struct intel_plane *plane,
584 const struct intel_crtc_state *crtc_state,
585 const struct intel_plane_state *plane_state)
586{
587 skl_program_plane(plane, crtc_state, plane_state, 0, true,
588 plane_state->ctl | PLANE_CTL_YUV420_Y_PLANE);
589}
590
591static void
406skl_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc) 592skl_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
407{ 593{
408 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 594 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
@@ -413,14 +599,12 @@ skl_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
413 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 599 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
414 600
415 I915_WRITE_FW(PLANE_CTL(pipe, plane_id), 0); 601 I915_WRITE_FW(PLANE_CTL(pipe, plane_id), 0);
416
417 I915_WRITE_FW(PLANE_SURF(pipe, plane_id), 0); 602 I915_WRITE_FW(PLANE_SURF(pipe, plane_id), 0);
418 POSTING_READ_FW(PLANE_SURF(pipe, plane_id));
419 603
420 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 604 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
421} 605}
422 606
423bool 607static bool
424skl_plane_get_hw_state(struct intel_plane *plane, 608skl_plane_get_hw_state(struct intel_plane *plane,
425 enum pipe *pipe) 609 enum pipe *pipe)
426{ 610{
@@ -613,7 +797,6 @@ vlv_update_plane(struct intel_plane *plane,
613 const struct intel_plane_state *plane_state) 797 const struct intel_plane_state *plane_state)
614{ 798{
615 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 799 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
616 const struct drm_framebuffer *fb = plane_state->base.fb;
617 enum pipe pipe = plane->pipe; 800 enum pipe pipe = plane->pipe;
618 enum plane_id plane_id = plane->id; 801 enum plane_id plane_id = plane->id;
619 u32 sprctl = plane_state->ctl; 802 u32 sprctl = plane_state->ctl;
@@ -650,10 +833,8 @@ vlv_update_plane(struct intel_plane *plane,
650 plane_state->color_plane[0].stride); 833 plane_state->color_plane[0].stride);
651 I915_WRITE_FW(SPPOS(pipe, plane_id), (crtc_y << 16) | crtc_x); 834 I915_WRITE_FW(SPPOS(pipe, plane_id), (crtc_y << 16) | crtc_x);
652 835
653 if (fb->modifier == I915_FORMAT_MOD_X_TILED) 836 I915_WRITE_FW(SPTILEOFF(pipe, plane_id), (y << 16) | x);
654 I915_WRITE_FW(SPTILEOFF(pipe, plane_id), (y << 16) | x); 837 I915_WRITE_FW(SPLINOFF(pipe, plane_id), linear_offset);
655 else
656 I915_WRITE_FW(SPLINOFF(pipe, plane_id), linear_offset);
657 838
658 I915_WRITE_FW(SPCONSTALPHA(pipe, plane_id), 0); 839 I915_WRITE_FW(SPCONSTALPHA(pipe, plane_id), 0);
659 840
@@ -661,7 +842,6 @@ vlv_update_plane(struct intel_plane *plane,
661 I915_WRITE_FW(SPCNTR(pipe, plane_id), sprctl); 842 I915_WRITE_FW(SPCNTR(pipe, plane_id), sprctl);
662 I915_WRITE_FW(SPSURF(pipe, plane_id), 843 I915_WRITE_FW(SPSURF(pipe, plane_id),
663 intel_plane_ggtt_offset(plane_state) + sprsurf_offset); 844 intel_plane_ggtt_offset(plane_state) + sprsurf_offset);
664 POSTING_READ_FW(SPSURF(pipe, plane_id));
665 845
666 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 846 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
667} 847}
@@ -677,9 +857,7 @@ vlv_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
677 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 857 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
678 858
679 I915_WRITE_FW(SPCNTR(pipe, plane_id), 0); 859 I915_WRITE_FW(SPCNTR(pipe, plane_id), 0);
680
681 I915_WRITE_FW(SPSURF(pipe, plane_id), 0); 860 I915_WRITE_FW(SPSURF(pipe, plane_id), 0);
682 POSTING_READ_FW(SPSURF(pipe, plane_id));
683 861
684 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 862 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
685} 863}
@@ -774,7 +952,6 @@ ivb_update_plane(struct intel_plane *plane,
774 const struct intel_plane_state *plane_state) 952 const struct intel_plane_state *plane_state)
775{ 953{
776 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 954 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
777 const struct drm_framebuffer *fb = plane_state->base.fb;
778 enum pipe pipe = plane->pipe; 955 enum pipe pipe = plane->pipe;
779 u32 sprctl = plane_state->ctl, sprscale = 0; 956 u32 sprctl = plane_state->ctl, sprscale = 0;
780 u32 sprsurf_offset = plane_state->color_plane[0].offset; 957 u32 sprsurf_offset = plane_state->color_plane[0].offset;
@@ -814,12 +991,12 @@ ivb_update_plane(struct intel_plane *plane,
814 991
815 /* HSW consolidates SPRTILEOFF and SPRLINOFF into a single SPROFFSET 992 /* HSW consolidates SPRTILEOFF and SPRLINOFF into a single SPROFFSET
816 * register */ 993 * register */
817 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 994 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
818 I915_WRITE_FW(SPROFFSET(pipe), (y << 16) | x); 995 I915_WRITE_FW(SPROFFSET(pipe), (y << 16) | x);
819 else if (fb->modifier == I915_FORMAT_MOD_X_TILED) 996 } else {
820 I915_WRITE_FW(SPRTILEOFF(pipe), (y << 16) | x); 997 I915_WRITE_FW(SPRTILEOFF(pipe), (y << 16) | x);
821 else
822 I915_WRITE_FW(SPRLINOFF(pipe), linear_offset); 998 I915_WRITE_FW(SPRLINOFF(pipe), linear_offset);
999 }
823 1000
824 I915_WRITE_FW(SPRSIZE(pipe), (crtc_h << 16) | crtc_w); 1001 I915_WRITE_FW(SPRSIZE(pipe), (crtc_h << 16) | crtc_w);
825 if (IS_IVYBRIDGE(dev_priv)) 1002 if (IS_IVYBRIDGE(dev_priv))
@@ -827,7 +1004,6 @@ ivb_update_plane(struct intel_plane *plane,
827 I915_WRITE_FW(SPRCTL(pipe), sprctl); 1004 I915_WRITE_FW(SPRCTL(pipe), sprctl);
828 I915_WRITE_FW(SPRSURF(pipe), 1005 I915_WRITE_FW(SPRSURF(pipe),
829 intel_plane_ggtt_offset(plane_state) + sprsurf_offset); 1006 intel_plane_ggtt_offset(plane_state) + sprsurf_offset);
830 POSTING_READ_FW(SPRSURF(pipe));
831 1007
832 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 1008 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
833} 1009}
@@ -845,9 +1021,7 @@ ivb_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
845 /* Can't leave the scaler enabled... */ 1021 /* Can't leave the scaler enabled... */
846 if (IS_IVYBRIDGE(dev_priv)) 1022 if (IS_IVYBRIDGE(dev_priv))
847 I915_WRITE_FW(SPRSCALE(pipe), 0); 1023 I915_WRITE_FW(SPRSCALE(pipe), 0);
848
849 I915_WRITE_FW(SPRSURF(pipe), 0); 1024 I915_WRITE_FW(SPRSURF(pipe), 0);
850 POSTING_READ_FW(SPRSURF(pipe));
851 1025
852 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 1026 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
853} 1027}
@@ -946,7 +1120,6 @@ g4x_update_plane(struct intel_plane *plane,
946 const struct intel_plane_state *plane_state) 1120 const struct intel_plane_state *plane_state)
947{ 1121{
948 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 1122 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
949 const struct drm_framebuffer *fb = plane_state->base.fb;
950 enum pipe pipe = plane->pipe; 1123 enum pipe pipe = plane->pipe;
951 u32 dvscntr = plane_state->ctl, dvsscale = 0; 1124 u32 dvscntr = plane_state->ctl, dvsscale = 0;
952 u32 dvssurf_offset = plane_state->color_plane[0].offset; 1125 u32 dvssurf_offset = plane_state->color_plane[0].offset;
@@ -984,17 +1157,14 @@ g4x_update_plane(struct intel_plane *plane,
984 I915_WRITE_FW(DVSSTRIDE(pipe), plane_state->color_plane[0].stride); 1157 I915_WRITE_FW(DVSSTRIDE(pipe), plane_state->color_plane[0].stride);
985 I915_WRITE_FW(DVSPOS(pipe), (crtc_y << 16) | crtc_x); 1158 I915_WRITE_FW(DVSPOS(pipe), (crtc_y << 16) | crtc_x);
986 1159
987 if (fb->modifier == I915_FORMAT_MOD_X_TILED) 1160 I915_WRITE_FW(DVSTILEOFF(pipe), (y << 16) | x);
988 I915_WRITE_FW(DVSTILEOFF(pipe), (y << 16) | x); 1161 I915_WRITE_FW(DVSLINOFF(pipe), linear_offset);
989 else
990 I915_WRITE_FW(DVSLINOFF(pipe), linear_offset);
991 1162
992 I915_WRITE_FW(DVSSIZE(pipe), (crtc_h << 16) | crtc_w); 1163 I915_WRITE_FW(DVSSIZE(pipe), (crtc_h << 16) | crtc_w);
993 I915_WRITE_FW(DVSSCALE(pipe), dvsscale); 1164 I915_WRITE_FW(DVSSCALE(pipe), dvsscale);
994 I915_WRITE_FW(DVSCNTR(pipe), dvscntr); 1165 I915_WRITE_FW(DVSCNTR(pipe), dvscntr);
995 I915_WRITE_FW(DVSSURF(pipe), 1166 I915_WRITE_FW(DVSSURF(pipe),
996 intel_plane_ggtt_offset(plane_state) + dvssurf_offset); 1167 intel_plane_ggtt_offset(plane_state) + dvssurf_offset);
997 POSTING_READ_FW(DVSSURF(pipe));
998 1168
999 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 1169 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
1000} 1170}
@@ -1011,9 +1181,7 @@ g4x_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
1011 I915_WRITE_FW(DVSCNTR(pipe), 0); 1181 I915_WRITE_FW(DVSCNTR(pipe), 0);
1012 /* Disable the scaler */ 1182 /* Disable the scaler */
1013 I915_WRITE_FW(DVSSCALE(pipe), 0); 1183 I915_WRITE_FW(DVSSCALE(pipe), 0);
1014
1015 I915_WRITE_FW(DVSSURF(pipe), 0); 1184 I915_WRITE_FW(DVSSURF(pipe), 0);
1016 POSTING_READ_FW(DVSSURF(pipe));
1017 1185
1018 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 1186 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
1019} 1187}
@@ -1039,6 +1207,19 @@ g4x_plane_get_hw_state(struct intel_plane *plane,
1039 return ret; 1207 return ret;
1040} 1208}
1041 1209
1210static bool intel_fb_scalable(const struct drm_framebuffer *fb)
1211{
1212 if (!fb)
1213 return false;
1214
1215 switch (fb->format->format) {
1216 case DRM_FORMAT_C8:
1217 return false;
1218 default:
1219 return true;
1220 }
1221}
1222
1042static int 1223static int
1043g4x_sprite_check_scaling(struct intel_crtc_state *crtc_state, 1224g4x_sprite_check_scaling(struct intel_crtc_state *crtc_state,
1044 struct intel_plane_state *plane_state) 1225 struct intel_plane_state *plane_state)
@@ -1106,18 +1287,18 @@ g4x_sprite_check(struct intel_crtc_state *crtc_state,
1106{ 1287{
1107 struct intel_plane *plane = to_intel_plane(plane_state->base.plane); 1288 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
1108 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 1289 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
1109 int max_scale, min_scale; 1290 int min_scale = DRM_PLANE_HELPER_NO_SCALING;
1291 int max_scale = DRM_PLANE_HELPER_NO_SCALING;
1110 int ret; 1292 int ret;
1111 1293
1112 if (INTEL_GEN(dev_priv) < 7) { 1294 if (intel_fb_scalable(plane_state->base.fb)) {
1113 min_scale = 1; 1295 if (INTEL_GEN(dev_priv) < 7) {
1114 max_scale = 16 << 16; 1296 min_scale = 1;
1115 } else if (IS_IVYBRIDGE(dev_priv)) { 1297 max_scale = 16 << 16;
1116 min_scale = 1; 1298 } else if (IS_IVYBRIDGE(dev_priv)) {
1117 max_scale = 2 << 16; 1299 min_scale = 1;
1118 } else { 1300 max_scale = 2 << 16;
1119 min_scale = DRM_PLANE_HELPER_NO_SCALING; 1301 }
1120 max_scale = DRM_PLANE_HELPER_NO_SCALING;
1121 } 1302 }
1122 1303
1123 ret = drm_atomic_helper_check_plane_state(&plane_state->base, 1304 ret = drm_atomic_helper_check_plane_state(&plane_state->base,
@@ -1204,6 +1385,8 @@ vlv_sprite_check(struct intel_crtc_state *crtc_state,
1204static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state, 1385static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state,
1205 const struct intel_plane_state *plane_state) 1386 const struct intel_plane_state *plane_state)
1206{ 1387{
1388 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
1389 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
1207 const struct drm_framebuffer *fb = plane_state->base.fb; 1390 const struct drm_framebuffer *fb = plane_state->base.fb;
1208 unsigned int rotation = plane_state->base.rotation; 1391 unsigned int rotation = plane_state->base.rotation;
1209 struct drm_format_name_buf format_name; 1392 struct drm_format_name_buf format_name;
@@ -1232,13 +1415,17 @@ static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state,
1232 } 1415 }
1233 1416
1234 /* 1417 /*
1235 * 90/270 is not allowed with RGB64 16:16:16:16, 1418 * 90/270 is not allowed with RGB64 16:16:16:16 and
1236 * RGB 16-bit 5:6:5, and Indexed 8-bit. 1419 * Indexed 8-bit. RGB 16-bit 5:6:5 is allowed gen11 onwards.
1237 * TBD: Add RGB64 case once its added in supported format list. 1420 * TBD: Add RGB64 case once its added in supported format
1421 * list.
1238 */ 1422 */
1239 switch (fb->format->format) { 1423 switch (fb->format->format) {
1240 case DRM_FORMAT_C8:
1241 case DRM_FORMAT_RGB565: 1424 case DRM_FORMAT_RGB565:
1425 if (INTEL_GEN(dev_priv) >= 11)
1426 break;
1427 /* fall through */
1428 case DRM_FORMAT_C8:
1242 DRM_DEBUG_KMS("Unsupported pixel format %s for 90/270!\n", 1429 DRM_DEBUG_KMS("Unsupported pixel format %s for 90/270!\n",
1243 drm_get_format_name(fb->format->format, 1430 drm_get_format_name(fb->format->format,
1244 &format_name)); 1431 &format_name));
@@ -1292,12 +1479,31 @@ static int skl_plane_check_dst_coordinates(const struct intel_crtc_state *crtc_s
1292 return 0; 1479 return 0;
1293} 1480}
1294 1481
1295int skl_plane_check(struct intel_crtc_state *crtc_state, 1482static int skl_plane_check_nv12_rotation(const struct intel_plane_state *plane_state)
1296 struct intel_plane_state *plane_state) 1483{
1484 const struct drm_framebuffer *fb = plane_state->base.fb;
1485 unsigned int rotation = plane_state->base.rotation;
1486 int src_w = drm_rect_width(&plane_state->base.src) >> 16;
1487
1488 /* Display WA #1106 */
1489 if (fb->format->format == DRM_FORMAT_NV12 && src_w & 3 &&
1490 (rotation == DRM_MODE_ROTATE_270 ||
1491 rotation == (DRM_MODE_REFLECT_X | DRM_MODE_ROTATE_90))) {
1492 DRM_DEBUG_KMS("src width must be multiple of 4 for rotated NV12\n");
1493 return -EINVAL;
1494 }
1495
1496 return 0;
1497}
1498
1499static int skl_plane_check(struct intel_crtc_state *crtc_state,
1500 struct intel_plane_state *plane_state)
1297{ 1501{
1298 struct intel_plane *plane = to_intel_plane(plane_state->base.plane); 1502 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
1299 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 1503 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
1300 int max_scale, min_scale; 1504 const struct drm_framebuffer *fb = plane_state->base.fb;
1505 int min_scale = DRM_PLANE_HELPER_NO_SCALING;
1506 int max_scale = DRM_PLANE_HELPER_NO_SCALING;
1301 int ret; 1507 int ret;
1302 1508
1303 ret = skl_plane_check_fb(crtc_state, plane_state); 1509 ret = skl_plane_check_fb(crtc_state, plane_state);
@@ -1305,15 +1511,9 @@ int skl_plane_check(struct intel_crtc_state *crtc_state,
1305 return ret; 1511 return ret;
1306 1512
1307 /* use scaler when colorkey is not required */ 1513 /* use scaler when colorkey is not required */
1308 if (!plane_state->ckey.flags) { 1514 if (!plane_state->ckey.flags && intel_fb_scalable(fb)) {
1309 const struct drm_framebuffer *fb = plane_state->base.fb;
1310
1311 min_scale = 1; 1515 min_scale = 1;
1312 max_scale = skl_max_scale(crtc_state, 1516 max_scale = skl_max_scale(crtc_state, fb->format->format);
1313 fb ? fb->format->format : 0);
1314 } else {
1315 min_scale = DRM_PLANE_HELPER_NO_SCALING;
1316 max_scale = DRM_PLANE_HELPER_NO_SCALING;
1317 } 1517 }
1318 1518
1319 ret = drm_atomic_helper_check_plane_state(&plane_state->base, 1519 ret = drm_atomic_helper_check_plane_state(&plane_state->base,
@@ -1334,10 +1534,18 @@ int skl_plane_check(struct intel_crtc_state *crtc_state,
1334 if (ret) 1534 if (ret)
1335 return ret; 1535 return ret;
1336 1536
1537 ret = skl_plane_check_nv12_rotation(plane_state);
1538 if (ret)
1539 return ret;
1540
1337 ret = skl_check_plane_surface(plane_state); 1541 ret = skl_check_plane_surface(plane_state);
1338 if (ret) 1542 if (ret)
1339 return ret; 1543 return ret;
1340 1544
1545 /* HW only has 8 bits pixel precision, disable plane if invisible */
1546 if (!(plane_state->base.alpha >> 8))
1547 plane_state->base.visible = false;
1548
1341 plane_state->ctl = skl_plane_ctl(crtc_state, plane_state); 1549 plane_state->ctl = skl_plane_ctl(crtc_state, plane_state);
1342 1550
1343 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) 1551 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
@@ -1502,24 +1710,30 @@ static const uint32_t vlv_plane_formats[] = {
1502 DRM_FORMAT_VYUY, 1710 DRM_FORMAT_VYUY,
1503}; 1711};
1504 1712
1505static uint32_t skl_plane_formats[] = { 1713static const uint32_t skl_plane_formats[] = {
1714 DRM_FORMAT_C8,
1506 DRM_FORMAT_RGB565, 1715 DRM_FORMAT_RGB565,
1507 DRM_FORMAT_ABGR8888,
1508 DRM_FORMAT_ARGB8888,
1509 DRM_FORMAT_XBGR8888,
1510 DRM_FORMAT_XRGB8888, 1716 DRM_FORMAT_XRGB8888,
1717 DRM_FORMAT_XBGR8888,
1718 DRM_FORMAT_ARGB8888,
1719 DRM_FORMAT_ABGR8888,
1720 DRM_FORMAT_XRGB2101010,
1721 DRM_FORMAT_XBGR2101010,
1511 DRM_FORMAT_YUYV, 1722 DRM_FORMAT_YUYV,
1512 DRM_FORMAT_YVYU, 1723 DRM_FORMAT_YVYU,
1513 DRM_FORMAT_UYVY, 1724 DRM_FORMAT_UYVY,
1514 DRM_FORMAT_VYUY, 1725 DRM_FORMAT_VYUY,
1515}; 1726};
1516 1727
1517static uint32_t skl_planar_formats[] = { 1728static const uint32_t skl_planar_formats[] = {
1729 DRM_FORMAT_C8,
1518 DRM_FORMAT_RGB565, 1730 DRM_FORMAT_RGB565,
1519 DRM_FORMAT_ABGR8888,
1520 DRM_FORMAT_ARGB8888,
1521 DRM_FORMAT_XBGR8888,
1522 DRM_FORMAT_XRGB8888, 1731 DRM_FORMAT_XRGB8888,
1732 DRM_FORMAT_XBGR8888,
1733 DRM_FORMAT_ARGB8888,
1734 DRM_FORMAT_ABGR8888,
1735 DRM_FORMAT_XRGB2101010,
1736 DRM_FORMAT_XBGR2101010,
1523 DRM_FORMAT_YUYV, 1737 DRM_FORMAT_YUYV,
1524 DRM_FORMAT_YVYU, 1738 DRM_FORMAT_YVYU,
1525 DRM_FORMAT_UYVY, 1739 DRM_FORMAT_UYVY,
@@ -1724,8 +1938,36 @@ static const struct drm_plane_funcs skl_plane_funcs = {
1724 .format_mod_supported = skl_plane_format_mod_supported, 1938 .format_mod_supported = skl_plane_format_mod_supported,
1725}; 1939};
1726 1940
1727bool skl_plane_has_ccs(struct drm_i915_private *dev_priv, 1941static bool skl_plane_has_fbc(struct drm_i915_private *dev_priv,
1728 enum pipe pipe, enum plane_id plane_id) 1942 enum pipe pipe, enum plane_id plane_id)
1943{
1944 if (!HAS_FBC(dev_priv))
1945 return false;
1946
1947 return pipe == PIPE_A && plane_id == PLANE_PRIMARY;
1948}
1949
1950static bool skl_plane_has_planar(struct drm_i915_private *dev_priv,
1951 enum pipe pipe, enum plane_id plane_id)
1952{
1953 if (INTEL_GEN(dev_priv) >= 11)
1954 return plane_id <= PLANE_SPRITE3;
1955
1956 /* Display WA #0870: skl, bxt */
1957 if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))
1958 return false;
1959
1960 if (IS_GEN9(dev_priv) && !IS_GEMINILAKE(dev_priv) && pipe == PIPE_C)
1961 return false;
1962
1963 if (plane_id != PLANE_PRIMARY && plane_id != PLANE_SPRITE0)
1964 return false;
1965
1966 return true;
1967}
1968
1969static bool skl_plane_has_ccs(struct drm_i915_private *dev_priv,
1970 enum pipe pipe, enum plane_id plane_id)
1729{ 1971{
1730 if (plane_id == PLANE_CURSOR) 1972 if (plane_id == PLANE_CURSOR)
1731 return false; 1973 return false;
@@ -1742,109 +1984,173 @@ bool skl_plane_has_ccs(struct drm_i915_private *dev_priv,
1742} 1984}
1743 1985
1744struct intel_plane * 1986struct intel_plane *
1745intel_sprite_plane_create(struct drm_i915_private *dev_priv, 1987skl_universal_plane_create(struct drm_i915_private *dev_priv,
1746 enum pipe pipe, int plane) 1988 enum pipe pipe, enum plane_id plane_id)
1747{ 1989{
1748 struct intel_plane *intel_plane = NULL; 1990 struct intel_plane *plane;
1749 struct intel_plane_state *state = NULL; 1991 enum drm_plane_type plane_type;
1750 const struct drm_plane_funcs *plane_funcs;
1751 unsigned long possible_crtcs;
1752 const uint32_t *plane_formats;
1753 const uint64_t *modifiers;
1754 unsigned int supported_rotations; 1992 unsigned int supported_rotations;
1755 int num_plane_formats; 1993 unsigned int possible_crtcs;
1994 const u64 *modifiers;
1995 const u32 *formats;
1996 int num_formats;
1756 int ret; 1997 int ret;
1757 1998
1758 intel_plane = kzalloc(sizeof(*intel_plane), GFP_KERNEL); 1999 plane = intel_plane_alloc();
1759 if (!intel_plane) { 2000 if (IS_ERR(plane))
1760 ret = -ENOMEM; 2001 return plane;
1761 goto fail; 2002
2003 plane->pipe = pipe;
2004 plane->id = plane_id;
2005 plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane_id);
2006
2007 plane->has_fbc = skl_plane_has_fbc(dev_priv, pipe, plane_id);
2008 if (plane->has_fbc) {
2009 struct intel_fbc *fbc = &dev_priv->fbc;
2010
2011 fbc->possible_framebuffer_bits |= plane->frontbuffer_bit;
1762 } 2012 }
1763 2013
1764 state = intel_create_plane_state(&intel_plane->base); 2014 plane->max_stride = skl_plane_max_stride;
1765 if (!state) { 2015 plane->update_plane = skl_update_plane;
1766 ret = -ENOMEM; 2016 plane->disable_plane = skl_disable_plane;
1767 goto fail; 2017 plane->get_hw_state = skl_plane_get_hw_state;
2018 plane->check_plane = skl_plane_check;
2019 if (icl_is_nv12_y_plane(plane_id))
2020 plane->update_slave = icl_update_slave;
2021
2022 if (skl_plane_has_planar(dev_priv, pipe, plane_id)) {
2023 formats = skl_planar_formats;
2024 num_formats = ARRAY_SIZE(skl_planar_formats);
2025 } else {
2026 formats = skl_plane_formats;
2027 num_formats = ARRAY_SIZE(skl_plane_formats);
1768 } 2028 }
1769 intel_plane->base.state = &state->base;
1770 2029
1771 if (INTEL_GEN(dev_priv) >= 9) { 2030 plane->has_ccs = skl_plane_has_ccs(dev_priv, pipe, plane_id);
1772 state->scaler_id = -1; 2031 if (plane->has_ccs)
2032 modifiers = skl_plane_format_modifiers_ccs;
2033 else
2034 modifiers = skl_plane_format_modifiers_noccs;
1773 2035
1774 intel_plane->has_ccs = skl_plane_has_ccs(dev_priv, pipe, 2036 if (plane_id == PLANE_PRIMARY)
1775 PLANE_SPRITE0 + plane); 2037 plane_type = DRM_PLANE_TYPE_PRIMARY;
2038 else
2039 plane_type = DRM_PLANE_TYPE_OVERLAY;
1776 2040
1777 intel_plane->max_stride = skl_plane_max_stride; 2041 possible_crtcs = BIT(pipe);
1778 intel_plane->update_plane = skl_update_plane;
1779 intel_plane->disable_plane = skl_disable_plane;
1780 intel_plane->get_hw_state = skl_plane_get_hw_state;
1781 intel_plane->check_plane = skl_plane_check;
1782 2042
1783 if (skl_plane_has_planar(dev_priv, pipe, 2043 ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
1784 PLANE_SPRITE0 + plane)) { 2044 possible_crtcs, &skl_plane_funcs,
1785 plane_formats = skl_planar_formats; 2045 formats, num_formats, modifiers,
1786 num_plane_formats = ARRAY_SIZE(skl_planar_formats); 2046 plane_type,
1787 } else { 2047 "plane %d%c", plane_id + 1,
1788 plane_formats = skl_plane_formats; 2048 pipe_name(pipe));
1789 num_plane_formats = ARRAY_SIZE(skl_plane_formats); 2049 if (ret)
1790 } 2050 goto fail;
1791 2051
1792 if (intel_plane->has_ccs) 2052 supported_rotations =
1793 modifiers = skl_plane_format_modifiers_ccs; 2053 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
1794 else 2054 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
1795 modifiers = skl_plane_format_modifiers_noccs; 2055
1796 2056 if (INTEL_GEN(dev_priv) >= 10)
1797 plane_funcs = &skl_plane_funcs; 2057 supported_rotations |= DRM_MODE_REFLECT_X;
1798 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 2058
1799 intel_plane->max_stride = i9xx_plane_max_stride; 2059 drm_plane_create_rotation_property(&plane->base,
1800 intel_plane->update_plane = vlv_update_plane; 2060 DRM_MODE_ROTATE_0,
1801 intel_plane->disable_plane = vlv_disable_plane; 2061 supported_rotations);
1802 intel_plane->get_hw_state = vlv_plane_get_hw_state; 2062
1803 intel_plane->check_plane = vlv_sprite_check; 2063 drm_plane_create_color_properties(&plane->base,
1804 2064 BIT(DRM_COLOR_YCBCR_BT601) |
1805 plane_formats = vlv_plane_formats; 2065 BIT(DRM_COLOR_YCBCR_BT709),
1806 num_plane_formats = ARRAY_SIZE(vlv_plane_formats); 2066 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
2067 BIT(DRM_COLOR_YCBCR_FULL_RANGE),
2068 DRM_COLOR_YCBCR_BT709,
2069 DRM_COLOR_YCBCR_LIMITED_RANGE);
2070
2071 drm_plane_create_alpha_property(&plane->base);
2072 drm_plane_create_blend_mode_property(&plane->base,
2073 BIT(DRM_MODE_BLEND_PIXEL_NONE) |
2074 BIT(DRM_MODE_BLEND_PREMULTI) |
2075 BIT(DRM_MODE_BLEND_COVERAGE));
2076
2077 drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs);
2078
2079 return plane;
2080
2081fail:
2082 intel_plane_free(plane);
2083
2084 return ERR_PTR(ret);
2085}
2086
2087struct intel_plane *
2088intel_sprite_plane_create(struct drm_i915_private *dev_priv,
2089 enum pipe pipe, int sprite)
2090{
2091 struct intel_plane *plane;
2092 const struct drm_plane_funcs *plane_funcs;
2093 unsigned long possible_crtcs;
2094 unsigned int supported_rotations;
2095 const u64 *modifiers;
2096 const u32 *formats;
2097 int num_formats;
2098 int ret;
2099
2100 if (INTEL_GEN(dev_priv) >= 9)
2101 return skl_universal_plane_create(dev_priv, pipe,
2102 PLANE_SPRITE0 + sprite);
2103
2104 plane = intel_plane_alloc();
2105 if (IS_ERR(plane))
2106 return plane;
2107
2108 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
2109 plane->max_stride = i9xx_plane_max_stride;
2110 plane->update_plane = vlv_update_plane;
2111 plane->disable_plane = vlv_disable_plane;
2112 plane->get_hw_state = vlv_plane_get_hw_state;
2113 plane->check_plane = vlv_sprite_check;
2114
2115 formats = vlv_plane_formats;
2116 num_formats = ARRAY_SIZE(vlv_plane_formats);
1807 modifiers = i9xx_plane_format_modifiers; 2117 modifiers = i9xx_plane_format_modifiers;
1808 2118
1809 plane_funcs = &vlv_sprite_funcs; 2119 plane_funcs = &vlv_sprite_funcs;
1810 } else if (INTEL_GEN(dev_priv) >= 7) { 2120 } else if (INTEL_GEN(dev_priv) >= 7) {
1811 intel_plane->max_stride = g4x_sprite_max_stride; 2121 plane->max_stride = g4x_sprite_max_stride;
1812 intel_plane->update_plane = ivb_update_plane; 2122 plane->update_plane = ivb_update_plane;
1813 intel_plane->disable_plane = ivb_disable_plane; 2123 plane->disable_plane = ivb_disable_plane;
1814 intel_plane->get_hw_state = ivb_plane_get_hw_state; 2124 plane->get_hw_state = ivb_plane_get_hw_state;
1815 intel_plane->check_plane = g4x_sprite_check; 2125 plane->check_plane = g4x_sprite_check;
1816 2126
1817 plane_formats = snb_plane_formats; 2127 formats = snb_plane_formats;
1818 num_plane_formats = ARRAY_SIZE(snb_plane_formats); 2128 num_formats = ARRAY_SIZE(snb_plane_formats);
1819 modifiers = i9xx_plane_format_modifiers; 2129 modifiers = i9xx_plane_format_modifiers;
1820 2130
1821 plane_funcs = &snb_sprite_funcs; 2131 plane_funcs = &snb_sprite_funcs;
1822 } else { 2132 } else {
1823 intel_plane->max_stride = g4x_sprite_max_stride; 2133 plane->max_stride = g4x_sprite_max_stride;
1824 intel_plane->update_plane = g4x_update_plane; 2134 plane->update_plane = g4x_update_plane;
1825 intel_plane->disable_plane = g4x_disable_plane; 2135 plane->disable_plane = g4x_disable_plane;
1826 intel_plane->get_hw_state = g4x_plane_get_hw_state; 2136 plane->get_hw_state = g4x_plane_get_hw_state;
1827 intel_plane->check_plane = g4x_sprite_check; 2137 plane->check_plane = g4x_sprite_check;
1828 2138
1829 modifiers = i9xx_plane_format_modifiers; 2139 modifiers = i9xx_plane_format_modifiers;
1830 if (IS_GEN6(dev_priv)) { 2140 if (IS_GEN6(dev_priv)) {
1831 plane_formats = snb_plane_formats; 2141 formats = snb_plane_formats;
1832 num_plane_formats = ARRAY_SIZE(snb_plane_formats); 2142 num_formats = ARRAY_SIZE(snb_plane_formats);
1833 2143
1834 plane_funcs = &snb_sprite_funcs; 2144 plane_funcs = &snb_sprite_funcs;
1835 } else { 2145 } else {
1836 plane_formats = g4x_plane_formats; 2146 formats = g4x_plane_formats;
1837 num_plane_formats = ARRAY_SIZE(g4x_plane_formats); 2147 num_formats = ARRAY_SIZE(g4x_plane_formats);
1838 2148
1839 plane_funcs = &g4x_sprite_funcs; 2149 plane_funcs = &g4x_sprite_funcs;
1840 } 2150 }
1841 } 2151 }
1842 2152
1843 if (INTEL_GEN(dev_priv) >= 9) { 2153 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
1844 supported_rotations =
1845 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
1846 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
1847 } else if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
1848 supported_rotations = 2154 supported_rotations =
1849 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 | 2155 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 |
1850 DRM_MODE_REFLECT_X; 2156 DRM_MODE_REFLECT_X;
@@ -1853,35 +2159,25 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
1853 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180; 2159 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180;
1854 } 2160 }
1855 2161
1856 intel_plane->pipe = pipe; 2162 plane->pipe = pipe;
1857 intel_plane->i9xx_plane = plane; 2163 plane->id = PLANE_SPRITE0 + sprite;
1858 intel_plane->id = PLANE_SPRITE0 + plane; 2164 plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane->id);
1859 intel_plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, intel_plane->id);
1860 2165
1861 possible_crtcs = (1 << pipe); 2166 possible_crtcs = BIT(pipe);
1862 2167
1863 if (INTEL_GEN(dev_priv) >= 9) 2168 ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
1864 ret = drm_universal_plane_init(&dev_priv->drm, &intel_plane->base, 2169 possible_crtcs, plane_funcs,
1865 possible_crtcs, plane_funcs, 2170 formats, num_formats, modifiers,
1866 plane_formats, num_plane_formats, 2171 DRM_PLANE_TYPE_OVERLAY,
1867 modifiers, 2172 "sprite %c", sprite_name(pipe, sprite));
1868 DRM_PLANE_TYPE_OVERLAY,
1869 "plane %d%c", plane + 2, pipe_name(pipe));
1870 else
1871 ret = drm_universal_plane_init(&dev_priv->drm, &intel_plane->base,
1872 possible_crtcs, plane_funcs,
1873 plane_formats, num_plane_formats,
1874 modifiers,
1875 DRM_PLANE_TYPE_OVERLAY,
1876 "sprite %c", sprite_name(pipe, plane));
1877 if (ret) 2173 if (ret)
1878 goto fail; 2174 goto fail;
1879 2175
1880 drm_plane_create_rotation_property(&intel_plane->base, 2176 drm_plane_create_rotation_property(&plane->base,
1881 DRM_MODE_ROTATE_0, 2177 DRM_MODE_ROTATE_0,
1882 supported_rotations); 2178 supported_rotations);
1883 2179
1884 drm_plane_create_color_properties(&intel_plane->base, 2180 drm_plane_create_color_properties(&plane->base,
1885 BIT(DRM_COLOR_YCBCR_BT601) | 2181 BIT(DRM_COLOR_YCBCR_BT601) |
1886 BIT(DRM_COLOR_YCBCR_BT709), 2182 BIT(DRM_COLOR_YCBCR_BT709),
1887 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) | 2183 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
@@ -1889,13 +2185,12 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
1889 DRM_COLOR_YCBCR_BT709, 2185 DRM_COLOR_YCBCR_BT709,
1890 DRM_COLOR_YCBCR_LIMITED_RANGE); 2186 DRM_COLOR_YCBCR_LIMITED_RANGE);
1891 2187
1892 drm_plane_helper_add(&intel_plane->base, &intel_plane_helper_funcs); 2188 drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs);
1893 2189
1894 return intel_plane; 2190 return plane;
1895 2191
1896fail: 2192fail:
1897 kfree(state); 2193 intel_plane_free(plane);
1898 kfree(intel_plane);
1899 2194
1900 return ERR_PTR(ret); 2195 return ERR_PTR(ret);
1901} 2196}
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index b5b04cb892e9..860f306a23ba 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -885,6 +885,7 @@ intel_tv_compute_config(struct intel_encoder *encoder,
885 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) 885 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
886 return false; 886 return false;
887 887
888 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
888 adjusted_mode->crtc_clock = tv_mode->clock; 889 adjusted_mode->crtc_clock = tv_mode->clock;
889 DRM_DEBUG_KMS("forcing bpc to 8 for TV\n"); 890 DRM_DEBUG_KMS("forcing bpc to 8 for TV\n");
890 pipe_config->pipe_bpp = 8*3; 891 pipe_config->pipe_bpp = 8*3;
@@ -1377,17 +1378,10 @@ intel_tv_get_modes(struct drm_connector *connector)
1377 return count; 1378 return count;
1378} 1379}
1379 1380
1380static void
1381intel_tv_destroy(struct drm_connector *connector)
1382{
1383 drm_connector_cleanup(connector);
1384 kfree(connector);
1385}
1386
1387static const struct drm_connector_funcs intel_tv_connector_funcs = { 1381static const struct drm_connector_funcs intel_tv_connector_funcs = {
1388 .late_register = intel_connector_register, 1382 .late_register = intel_connector_register,
1389 .early_unregister = intel_connector_unregister, 1383 .early_unregister = intel_connector_unregister,
1390 .destroy = intel_tv_destroy, 1384 .destroy = intel_connector_destroy,
1391 .fill_modes = drm_helper_probe_single_connector_modes, 1385 .fill_modes = drm_helper_probe_single_connector_modes,
1392 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 1386 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
1393 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, 1387 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
diff --git a/drivers/gpu/drm/i915/intel_uc.c b/drivers/gpu/drm/i915/intel_uc.c
index b1b3e81b6e24..b34c318b238d 100644
--- a/drivers/gpu/drm/i915/intel_uc.c
+++ b/drivers/gpu/drm/i915/intel_uc.c
@@ -376,7 +376,7 @@ int intel_uc_init_hw(struct drm_i915_private *i915)
376 376
377 intel_guc_init_params(guc); 377 intel_guc_init_params(guc);
378 ret = intel_guc_fw_upload(guc); 378 ret = intel_guc_fw_upload(guc);
379 if (ret == 0 || ret != -EAGAIN) 379 if (ret == 0 || ret != -ETIMEDOUT)
380 break; 380 break;
381 381
382 DRM_DEBUG_DRIVER("GuC fw load failed: %d; will reset and " 382 DRM_DEBUG_DRIVER("GuC fw load failed: %d; will reset and "
diff --git a/drivers/gpu/drm/i915/intel_uc_fw.h b/drivers/gpu/drm/i915/intel_uc_fw.h
index 87910aa83267..0e3bd580e267 100644
--- a/drivers/gpu/drm/i915/intel_uc_fw.h
+++ b/drivers/gpu/drm/i915/intel_uc_fw.h
@@ -115,9 +115,14 @@ static inline bool intel_uc_fw_is_selected(struct intel_uc_fw *uc_fw)
115 return uc_fw->path != NULL; 115 return uc_fw->path != NULL;
116} 116}
117 117
118static inline bool intel_uc_fw_is_loaded(struct intel_uc_fw *uc_fw)
119{
120 return uc_fw->load_status == INTEL_UC_FIRMWARE_SUCCESS;
121}
122
118static inline void intel_uc_fw_sanitize(struct intel_uc_fw *uc_fw) 123static inline void intel_uc_fw_sanitize(struct intel_uc_fw *uc_fw)
119{ 124{
120 if (uc_fw->load_status == INTEL_UC_FIRMWARE_SUCCESS) 125 if (intel_uc_fw_is_loaded(uc_fw))
121 uc_fw->load_status = INTEL_UC_FIRMWARE_PENDING; 126 uc_fw->load_status = INTEL_UC_FIRMWARE_PENDING;
122} 127}
123 128
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 3ad302c66254..9289515108c3 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -1437,7 +1437,7 @@ static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv)
1437 FORCEWAKE_MEDIA_VEBOX_GEN11(i), 1437 FORCEWAKE_MEDIA_VEBOX_GEN11(i),
1438 FORCEWAKE_ACK_MEDIA_VEBOX_GEN11(i)); 1438 FORCEWAKE_ACK_MEDIA_VEBOX_GEN11(i));
1439 } 1439 }
1440 } else if (IS_GEN9(dev_priv) || IS_GEN10(dev_priv)) { 1440 } else if (IS_GEN10(dev_priv) || IS_GEN9(dev_priv)) {
1441 dev_priv->uncore.funcs.force_wake_get = 1441 dev_priv->uncore.funcs.force_wake_get =
1442 fw_domains_get_with_fallback; 1442 fw_domains_get_with_fallback;
1443 dev_priv->uncore.funcs.force_wake_put = fw_domains_put; 1443 dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
diff --git a/drivers/gpu/drm/i915/intel_vbt_defs.h b/drivers/gpu/drm/i915/intel_vbt_defs.h
index bba98cf83cbd..bf3662ad5fed 100644
--- a/drivers/gpu/drm/i915/intel_vbt_defs.h
+++ b/drivers/gpu/drm/i915/intel_vbt_defs.h
@@ -326,6 +326,13 @@ enum vbt_gmbus_ddi {
326 ICL_DDC_BUS_PORT_4, 326 ICL_DDC_BUS_PORT_4,
327}; 327};
328 328
329#define DP_AUX_A 0x40
330#define DP_AUX_B 0x10
331#define DP_AUX_C 0x20
332#define DP_AUX_D 0x30
333#define DP_AUX_E 0x50
334#define DP_AUX_F 0x60
335
329#define VBT_DP_MAX_LINK_RATE_HBR3 0 336#define VBT_DP_MAX_LINK_RATE_HBR3 0
330#define VBT_DP_MAX_LINK_RATE_HBR2 1 337#define VBT_DP_MAX_LINK_RATE_HBR2 1
331#define VBT_DP_MAX_LINK_RATE_HBR 2 338#define VBT_DP_MAX_LINK_RATE_HBR 2
diff --git a/drivers/gpu/drm/i915/intel_workarounds.c b/drivers/gpu/drm/i915/intel_workarounds.c
index 4bcdeaf8d98f..ca1f78a42b17 100644
--- a/drivers/gpu/drm/i915/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/intel_workarounds.c
@@ -823,18 +823,21 @@ static void icl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
823 _MASKED_BIT_ENABLE(_3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE)); 823 _MASKED_BIT_ENABLE(_3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE));
824 824
825 /* WaInPlaceDecompressionHang:icl */ 825 /* WaInPlaceDecompressionHang:icl */
826 I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA, I915_READ(GEN9_GAMT_ECO_REG_RW_IA) | 826 I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
827 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); 827 I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
828 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
828 829
829 /* WaPipelineFlushCoherentLines:icl */ 830 /* WaPipelineFlushCoherentLines:icl */
830 I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) | 831 I915_WRITE(GEN8_L3SQCREG4,
831 GEN8_LQSC_FLUSH_COHERENT_LINES); 832 I915_READ(GEN8_L3SQCREG4) |
833 GEN8_LQSC_FLUSH_COHERENT_LINES);
832 834
833 /* Wa_1405543622:icl 835 /* Wa_1405543622:icl
834 * Formerly known as WaGAPZPriorityScheme 836 * Formerly known as WaGAPZPriorityScheme
835 */ 837 */
836 I915_WRITE(GEN8_GARBCNTL, I915_READ(GEN8_GARBCNTL) | 838 I915_WRITE(GEN8_GARBCNTL,
837 GEN11_ARBITRATION_PRIO_ORDER_MASK); 839 I915_READ(GEN8_GARBCNTL) |
840 GEN11_ARBITRATION_PRIO_ORDER_MASK);
838 841
839 /* Wa_1604223664:icl 842 /* Wa_1604223664:icl
840 * Formerly known as WaL3BankAddressHashing 843 * Formerly known as WaL3BankAddressHashing
@@ -854,21 +857,24 @@ static void icl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
854 /* Wa_1405733216:icl 857 /* Wa_1405733216:icl
855 * Formerly known as WaDisableCleanEvicts 858 * Formerly known as WaDisableCleanEvicts
856 */ 859 */
857 I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) | 860 I915_WRITE(GEN8_L3SQCREG4,
858 GEN11_LQSC_CLEAN_EVICT_DISABLE); 861 I915_READ(GEN8_L3SQCREG4) |
862 GEN11_LQSC_CLEAN_EVICT_DISABLE);
859 863
860 /* Wa_1405766107:icl 864 /* Wa_1405766107:icl
861 * Formerly known as WaCL2SFHalfMaxAlloc 865 * Formerly known as WaCL2SFHalfMaxAlloc
862 */ 866 */
863 I915_WRITE(GEN11_LSN_UNSLCVC, I915_READ(GEN11_LSN_UNSLCVC) | 867 I915_WRITE(GEN11_LSN_UNSLCVC,
864 GEN11_LSN_UNSLCVC_GAFS_HALF_SF_MAXALLOC | 868 I915_READ(GEN11_LSN_UNSLCVC) |
865 GEN11_LSN_UNSLCVC_GAFS_HALF_CL2_MAXALLOC); 869 GEN11_LSN_UNSLCVC_GAFS_HALF_SF_MAXALLOC |
870 GEN11_LSN_UNSLCVC_GAFS_HALF_CL2_MAXALLOC);
866 871
867 /* Wa_220166154:icl 872 /* Wa_220166154:icl
868 * Formerly known as WaDisCtxReload 873 * Formerly known as WaDisCtxReload
869 */ 874 */
870 I915_WRITE(GAMW_ECO_DEV_RW_IA_REG, I915_READ(GAMW_ECO_DEV_RW_IA_REG) | 875 I915_WRITE(GEN8_GAMW_ECO_DEV_RW_IA,
871 GAMW_ECO_DEV_CTX_RELOAD_DISABLE); 876 I915_READ(GEN8_GAMW_ECO_DEV_RW_IA) |
877 GAMW_ECO_DEV_CTX_RELOAD_DISABLE);
872 878
873 /* Wa_1405779004:icl (pre-prod) */ 879 /* Wa_1405779004:icl (pre-prod) */
874 if (IS_ICL_REVID(dev_priv, ICL_REVID_A0, ICL_REVID_A0)) 880 if (IS_ICL_REVID(dev_priv, ICL_REVID_A0, ICL_REVID_A0))
@@ -905,6 +911,13 @@ static void icl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
905 I915_WRITE(GAMT_CHKN_BIT_REG, 911 I915_WRITE(GAMT_CHKN_BIT_REG,
906 I915_READ(GAMT_CHKN_BIT_REG) | 912 I915_READ(GAMT_CHKN_BIT_REG) |
907 GAMT_CHKN_DISABLE_L3_COH_PIPE); 913 GAMT_CHKN_DISABLE_L3_COH_PIPE);
914
915 /* Wa_1406609255:icl (pre-prod) */
916 if (IS_ICL_REVID(dev_priv, ICL_REVID_A0, ICL_REVID_B0))
917 I915_WRITE(GEN7_SARCHKMD,
918 I915_READ(GEN7_SARCHKMD) |
919 GEN7_DISABLE_DEMAND_PREFETCH |
920 GEN7_DISABLE_SAMPLER_PREFETCH);
908} 921}
909 922
910void intel_gt_workarounds_apply(struct drm_i915_private *dev_priv) 923void intel_gt_workarounds_apply(struct drm_i915_private *dev_priv)
@@ -941,7 +954,7 @@ struct whitelist {
941 954
942static void whitelist_reg(struct whitelist *w, i915_reg_t reg) 955static void whitelist_reg(struct whitelist *w, i915_reg_t reg)
943{ 956{
944 if (GEM_WARN_ON(w->count >= RING_MAX_NONPRIV_SLOTS)) 957 if (GEM_DEBUG_WARN_ON(w->count >= RING_MAX_NONPRIV_SLOTS))
945 return; 958 return;
946 959
947 w->reg[w->count++] = reg; 960 w->reg[w->count++] = reg;
@@ -1009,6 +1022,11 @@ static void cnl_whitelist_build(struct whitelist *w)
1009 1022
1010static void icl_whitelist_build(struct whitelist *w) 1023static void icl_whitelist_build(struct whitelist *w)
1011{ 1024{
1025 /* WaAllowUMDToModifyHalfSliceChicken7:icl */
1026 whitelist_reg(w, GEN9_HALF_SLICE_CHICKEN7);
1027
1028 /* WaAllowUMDToModifySamplerMode:icl */
1029 whitelist_reg(w, GEN10_SAMPLER_MODE);
1012} 1030}
1013 1031
1014static struct whitelist *whitelist_build(struct intel_engine_cs *engine, 1032static struct whitelist *whitelist_build(struct intel_engine_cs *engine,
diff --git a/drivers/gpu/drm/i915/selftests/huge_pages.c b/drivers/gpu/drm/i915/selftests/huge_pages.c
index 8d03f64eabd7..26c065c8d2c0 100644
--- a/drivers/gpu/drm/i915/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/selftests/huge_pages.c
@@ -551,7 +551,7 @@ static int igt_mock_ppgtt_misaligned_dma(void *arg)
551 err = igt_check_page_sizes(vma); 551 err = igt_check_page_sizes(vma);
552 552
553 if (vma->page_sizes.gtt != I915_GTT_PAGE_SIZE_4K) { 553 if (vma->page_sizes.gtt != I915_GTT_PAGE_SIZE_4K) {
554 pr_err("page_sizes.gtt=%u, expected %lu\n", 554 pr_err("page_sizes.gtt=%u, expected %llu\n",
555 vma->page_sizes.gtt, I915_GTT_PAGE_SIZE_4K); 555 vma->page_sizes.gtt, I915_GTT_PAGE_SIZE_4K);
556 err = -EINVAL; 556 err = -EINVAL;
557 } 557 }
@@ -1135,7 +1135,8 @@ static int igt_write_huge(struct i915_gem_context *ctx,
1135 n = 0; 1135 n = 0;
1136 for_each_engine(engine, i915, id) { 1136 for_each_engine(engine, i915, id) {
1137 if (!intel_engine_can_store_dword(engine)) { 1137 if (!intel_engine_can_store_dword(engine)) {
1138 pr_info("store-dword-imm not supported on engine=%u\n", id); 1138 pr_info("store-dword-imm not supported on engine=%u\n",
1139 id);
1139 continue; 1140 continue;
1140 } 1141 }
1141 engines[n++] = engine; 1142 engines[n++] = engine;
@@ -1167,17 +1168,30 @@ static int igt_write_huge(struct i915_gem_context *ctx,
1167 engine = engines[order[i] % n]; 1168 engine = engines[order[i] % n];
1168 i = (i + 1) % (n * I915_NUM_ENGINES); 1169 i = (i + 1) % (n * I915_NUM_ENGINES);
1169 1170
1170 err = __igt_write_huge(ctx, engine, obj, size, offset_low, dword, num + 1); 1171 /*
1172 * In order to utilize 64K pages we need to both pad the vma
1173 * size and ensure the vma offset is at the start of the pt
1174 * boundary, however to improve coverage we opt for testing both
1175 * aligned and unaligned offsets.
1176 */
1177 if (obj->mm.page_sizes.sg & I915_GTT_PAGE_SIZE_64K)
1178 offset_low = round_down(offset_low,
1179 I915_GTT_PAGE_SIZE_2M);
1180
1181 err = __igt_write_huge(ctx, engine, obj, size, offset_low,
1182 dword, num + 1);
1171 if (err) 1183 if (err)
1172 break; 1184 break;
1173 1185
1174 err = __igt_write_huge(ctx, engine, obj, size, offset_high, dword, num + 1); 1186 err = __igt_write_huge(ctx, engine, obj, size, offset_high,
1187 dword, num + 1);
1175 if (err) 1188 if (err)
1176 break; 1189 break;
1177 1190
1178 if (igt_timeout(end_time, 1191 if (igt_timeout(end_time,
1179 "%s timed out on engine=%u, offset_low=%llx offset_high=%llx, max_page_size=%x\n", 1192 "%s timed out on engine=%u, offset_low=%llx offset_high=%llx, max_page_size=%x\n",
1180 __func__, engine->id, offset_low, offset_high, max_page_size)) 1193 __func__, engine->id, offset_low, offset_high,
1194 max_page_size))
1181 break; 1195 break;
1182 } 1196 }
1183 1197
@@ -1436,7 +1450,7 @@ static int igt_ppgtt_pin_update(void *arg)
1436 * huge-gtt-pages. 1450 * huge-gtt-pages.
1437 */ 1451 */
1438 1452
1439 if (!USES_FULL_48BIT_PPGTT(dev_priv)) { 1453 if (!HAS_FULL_48BIT_PPGTT(dev_priv)) {
1440 pr_info("48b PPGTT not supported, skipping\n"); 1454 pr_info("48b PPGTT not supported, skipping\n");
1441 return 0; 1455 return 0;
1442 } 1456 }
@@ -1687,10 +1701,9 @@ int i915_gem_huge_page_mock_selftests(void)
1687 SUBTEST(igt_mock_ppgtt_huge_fill), 1701 SUBTEST(igt_mock_ppgtt_huge_fill),
1688 SUBTEST(igt_mock_ppgtt_64K), 1702 SUBTEST(igt_mock_ppgtt_64K),
1689 }; 1703 };
1690 int saved_ppgtt = i915_modparams.enable_ppgtt;
1691 struct drm_i915_private *dev_priv; 1704 struct drm_i915_private *dev_priv;
1692 struct pci_dev *pdev;
1693 struct i915_hw_ppgtt *ppgtt; 1705 struct i915_hw_ppgtt *ppgtt;
1706 struct pci_dev *pdev;
1694 int err; 1707 int err;
1695 1708
1696 dev_priv = mock_gem_device(); 1709 dev_priv = mock_gem_device();
@@ -1698,7 +1711,7 @@ int i915_gem_huge_page_mock_selftests(void)
1698 return -ENOMEM; 1711 return -ENOMEM;
1699 1712
1700 /* Pretend to be a device which supports the 48b PPGTT */ 1713 /* Pretend to be a device which supports the 48b PPGTT */
1701 i915_modparams.enable_ppgtt = 3; 1714 mkwrite_device_info(dev_priv)->ppgtt = INTEL_PPGTT_FULL_4LVL;
1702 1715
1703 pdev = dev_priv->drm.pdev; 1716 pdev = dev_priv->drm.pdev;
1704 dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(39)); 1717 dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(39));
@@ -1731,9 +1744,6 @@ out_close:
1731 1744
1732out_unlock: 1745out_unlock:
1733 mutex_unlock(&dev_priv->drm.struct_mutex); 1746 mutex_unlock(&dev_priv->drm.struct_mutex);
1734
1735 i915_modparams.enable_ppgtt = saved_ppgtt;
1736
1737 drm_dev_put(&dev_priv->drm); 1747 drm_dev_put(&dev_priv->drm);
1738 1748
1739 return err; 1749 return err;
@@ -1753,7 +1763,7 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *dev_priv)
1753 struct i915_gem_context *ctx; 1763 struct i915_gem_context *ctx;
1754 int err; 1764 int err;
1755 1765
1756 if (!USES_PPGTT(dev_priv)) { 1766 if (!HAS_PPGTT(dev_priv)) {
1757 pr_info("PPGTT not supported, skipping live-selftests\n"); 1767 pr_info("PPGTT not supported, skipping live-selftests\n");
1758 return 0; 1768 return 0;
1759 } 1769 }
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
index 76df25aa90c9..7d82043aff10 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
@@ -39,7 +39,8 @@ struct live_test {
39 const char *func; 39 const char *func;
40 const char *name; 40 const char *name;
41 41
42 unsigned int reset_count; 42 unsigned int reset_global;
43 unsigned int reset_engine[I915_NUM_ENGINES];
43}; 44};
44 45
45static int begin_live_test(struct live_test *t, 46static int begin_live_test(struct live_test *t,
@@ -47,6 +48,8 @@ static int begin_live_test(struct live_test *t,
47 const char *func, 48 const char *func,
48 const char *name) 49 const char *name)
49{ 50{
51 struct intel_engine_cs *engine;
52 enum intel_engine_id id;
50 int err; 53 int err;
51 54
52 t->i915 = i915; 55 t->i915 = i915;
@@ -63,7 +66,11 @@ static int begin_live_test(struct live_test *t,
63 } 66 }
64 67
65 i915->gpu_error.missed_irq_rings = 0; 68 i915->gpu_error.missed_irq_rings = 0;
66 t->reset_count = i915_reset_count(&i915->gpu_error); 69 t->reset_global = i915_reset_count(&i915->gpu_error);
70
71 for_each_engine(engine, i915, id)
72 t->reset_engine[id] =
73 i915_reset_engine_count(&i915->gpu_error, engine);
67 74
68 return 0; 75 return 0;
69} 76}
@@ -71,14 +78,28 @@ static int begin_live_test(struct live_test *t,
71static int end_live_test(struct live_test *t) 78static int end_live_test(struct live_test *t)
72{ 79{
73 struct drm_i915_private *i915 = t->i915; 80 struct drm_i915_private *i915 = t->i915;
81 struct intel_engine_cs *engine;
82 enum intel_engine_id id;
74 83
75 if (igt_flush_test(i915, I915_WAIT_LOCKED)) 84 if (igt_flush_test(i915, I915_WAIT_LOCKED))
76 return -EIO; 85 return -EIO;
77 86
78 if (t->reset_count != i915_reset_count(&i915->gpu_error)) { 87 if (t->reset_global != i915_reset_count(&i915->gpu_error)) {
79 pr_err("%s(%s): GPU was reset %d times!\n", 88 pr_err("%s(%s): GPU was reset %d times!\n",
80 t->func, t->name, 89 t->func, t->name,
81 i915_reset_count(&i915->gpu_error) - t->reset_count); 90 i915_reset_count(&i915->gpu_error) - t->reset_global);
91 return -EIO;
92 }
93
94 for_each_engine(engine, i915, id) {
95 if (t->reset_engine[id] ==
96 i915_reset_engine_count(&i915->gpu_error, engine))
97 continue;
98
99 pr_err("%s(%s): engine '%s' was reset %d times!\n",
100 t->func, t->name, engine->name,
101 i915_reset_engine_count(&i915->gpu_error, engine) -
102 t->reset_engine[id]);
82 return -EIO; 103 return -EIO;
83 } 104 }
84 105
@@ -531,11 +552,11 @@ static int igt_ctx_exec(void *arg)
531{ 552{
532 struct drm_i915_private *i915 = arg; 553 struct drm_i915_private *i915 = arg;
533 struct drm_i915_gem_object *obj = NULL; 554 struct drm_i915_gem_object *obj = NULL;
555 unsigned long ncontexts, ndwords, dw;
534 struct drm_file *file; 556 struct drm_file *file;
535 IGT_TIMEOUT(end_time); 557 IGT_TIMEOUT(end_time);
536 LIST_HEAD(objects); 558 LIST_HEAD(objects);
537 unsigned long ncontexts, ndwords, dw; 559 struct live_test t;
538 bool first_shared_gtt = true;
539 int err = -ENODEV; 560 int err = -ENODEV;
540 561
541 /* 562 /*
@@ -553,6 +574,10 @@ static int igt_ctx_exec(void *arg)
553 574
554 mutex_lock(&i915->drm.struct_mutex); 575 mutex_lock(&i915->drm.struct_mutex);
555 576
577 err = begin_live_test(&t, i915, __func__, "");
578 if (err)
579 goto out_unlock;
580
556 ncontexts = 0; 581 ncontexts = 0;
557 ndwords = 0; 582 ndwords = 0;
558 dw = 0; 583 dw = 0;
@@ -561,12 +586,7 @@ static int igt_ctx_exec(void *arg)
561 struct i915_gem_context *ctx; 586 struct i915_gem_context *ctx;
562 unsigned int id; 587 unsigned int id;
563 588
564 if (first_shared_gtt) { 589 ctx = i915_gem_create_context(i915, file->driver_priv);
565 ctx = __create_hw_context(i915, file->driver_priv);
566 first_shared_gtt = false;
567 } else {
568 ctx = i915_gem_create_context(i915, file->driver_priv);
569 }
570 if (IS_ERR(ctx)) { 590 if (IS_ERR(ctx)) {
571 err = PTR_ERR(ctx); 591 err = PTR_ERR(ctx);
572 goto out_unlock; 592 goto out_unlock;
@@ -622,7 +642,7 @@ static int igt_ctx_exec(void *arg)
622 } 642 }
623 643
624out_unlock: 644out_unlock:
625 if (igt_flush_test(i915, I915_WAIT_LOCKED)) 645 if (end_live_test(&t))
626 err = -EIO; 646 err = -EIO;
627 mutex_unlock(&i915->drm.struct_mutex); 647 mutex_unlock(&i915->drm.struct_mutex);
628 648
@@ -634,13 +654,14 @@ static int igt_ctx_readonly(void *arg)
634{ 654{
635 struct drm_i915_private *i915 = arg; 655 struct drm_i915_private *i915 = arg;
636 struct drm_i915_gem_object *obj = NULL; 656 struct drm_i915_gem_object *obj = NULL;
657 struct i915_gem_context *ctx;
658 struct i915_hw_ppgtt *ppgtt;
659 unsigned long ndwords, dw;
637 struct drm_file *file; 660 struct drm_file *file;
638 I915_RND_STATE(prng); 661 I915_RND_STATE(prng);
639 IGT_TIMEOUT(end_time); 662 IGT_TIMEOUT(end_time);
640 LIST_HEAD(objects); 663 LIST_HEAD(objects);
641 struct i915_gem_context *ctx; 664 struct live_test t;
642 struct i915_hw_ppgtt *ppgtt;
643 unsigned long ndwords, dw;
644 int err = -ENODEV; 665 int err = -ENODEV;
645 666
646 /* 667 /*
@@ -655,6 +676,10 @@ static int igt_ctx_readonly(void *arg)
655 676
656 mutex_lock(&i915->drm.struct_mutex); 677 mutex_lock(&i915->drm.struct_mutex);
657 678
679 err = begin_live_test(&t, i915, __func__, "");
680 if (err)
681 goto out_unlock;
682
658 ctx = i915_gem_create_context(i915, file->driver_priv); 683 ctx = i915_gem_create_context(i915, file->driver_priv);
659 if (IS_ERR(ctx)) { 684 if (IS_ERR(ctx)) {
660 err = PTR_ERR(ctx); 685 err = PTR_ERR(ctx);
@@ -727,7 +752,324 @@ static int igt_ctx_readonly(void *arg)
727 } 752 }
728 753
729out_unlock: 754out_unlock:
730 if (igt_flush_test(i915, I915_WAIT_LOCKED)) 755 if (end_live_test(&t))
756 err = -EIO;
757 mutex_unlock(&i915->drm.struct_mutex);
758
759 mock_file_free(i915, file);
760 return err;
761}
762
763static int check_scratch(struct i915_gem_context *ctx, u64 offset)
764{
765 struct drm_mm_node *node =
766 __drm_mm_interval_first(&ctx->ppgtt->vm.mm,
767 offset, offset + sizeof(u32) - 1);
768 if (!node || node->start > offset)
769 return 0;
770
771 GEM_BUG_ON(offset >= node->start + node->size);
772
773 pr_err("Target offset 0x%08x_%08x overlaps with a node in the mm!\n",
774 upper_32_bits(offset), lower_32_bits(offset));
775 return -EINVAL;
776}
777
778static int write_to_scratch(struct i915_gem_context *ctx,
779 struct intel_engine_cs *engine,
780 u64 offset, u32 value)
781{
782 struct drm_i915_private *i915 = ctx->i915;
783 struct drm_i915_gem_object *obj;
784 struct i915_request *rq;
785 struct i915_vma *vma;
786 u32 *cmd;
787 int err;
788
789 GEM_BUG_ON(offset < I915_GTT_PAGE_SIZE);
790
791 obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
792 if (IS_ERR(obj))
793 return PTR_ERR(obj);
794
795 cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
796 if (IS_ERR(cmd)) {
797 err = PTR_ERR(cmd);
798 goto err;
799 }
800
801 *cmd++ = MI_STORE_DWORD_IMM_GEN4;
802 if (INTEL_GEN(i915) >= 8) {
803 *cmd++ = lower_32_bits(offset);
804 *cmd++ = upper_32_bits(offset);
805 } else {
806 *cmd++ = 0;
807 *cmd++ = offset;
808 }
809 *cmd++ = value;
810 *cmd = MI_BATCH_BUFFER_END;
811 i915_gem_object_unpin_map(obj);
812
813 err = i915_gem_object_set_to_gtt_domain(obj, false);
814 if (err)
815 goto err;
816
817 vma = i915_vma_instance(obj, &ctx->ppgtt->vm, NULL);
818 if (IS_ERR(vma)) {
819 err = PTR_ERR(vma);
820 goto err;
821 }
822
823 err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_OFFSET_FIXED);
824 if (err)
825 goto err;
826
827 err = check_scratch(ctx, offset);
828 if (err)
829 goto err_unpin;
830
831 rq = i915_request_alloc(engine, ctx);
832 if (IS_ERR(rq)) {
833 err = PTR_ERR(rq);
834 goto err_unpin;
835 }
836
837 err = engine->emit_bb_start(rq, vma->node.start, vma->node.size, 0);
838 if (err)
839 goto err_request;
840
841 err = i915_vma_move_to_active(vma, rq, 0);
842 if (err)
843 goto skip_request;
844
845 i915_gem_object_set_active_reference(obj);
846 i915_vma_unpin(vma);
847 i915_vma_close(vma);
848
849 i915_request_add(rq);
850
851 return 0;
852
853skip_request:
854 i915_request_skip(rq, err);
855err_request:
856 i915_request_add(rq);
857err_unpin:
858 i915_vma_unpin(vma);
859err:
860 i915_gem_object_put(obj);
861 return err;
862}
863
864static int read_from_scratch(struct i915_gem_context *ctx,
865 struct intel_engine_cs *engine,
866 u64 offset, u32 *value)
867{
868 struct drm_i915_private *i915 = ctx->i915;
869 struct drm_i915_gem_object *obj;
870 const u32 RCS_GPR0 = 0x2600; /* not all engines have their own GPR! */
871 const u32 result = 0x100;
872 struct i915_request *rq;
873 struct i915_vma *vma;
874 u32 *cmd;
875 int err;
876
877 GEM_BUG_ON(offset < I915_GTT_PAGE_SIZE);
878
879 obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
880 if (IS_ERR(obj))
881 return PTR_ERR(obj);
882
883 cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
884 if (IS_ERR(cmd)) {
885 err = PTR_ERR(cmd);
886 goto err;
887 }
888
889 memset(cmd, POISON_INUSE, PAGE_SIZE);
890 if (INTEL_GEN(i915) >= 8) {
891 *cmd++ = MI_LOAD_REGISTER_MEM_GEN8;
892 *cmd++ = RCS_GPR0;
893 *cmd++ = lower_32_bits(offset);
894 *cmd++ = upper_32_bits(offset);
895 *cmd++ = MI_STORE_REGISTER_MEM_GEN8;
896 *cmd++ = RCS_GPR0;
897 *cmd++ = result;
898 *cmd++ = 0;
899 } else {
900 *cmd++ = MI_LOAD_REGISTER_MEM;
901 *cmd++ = RCS_GPR0;
902 *cmd++ = offset;
903 *cmd++ = MI_STORE_REGISTER_MEM;
904 *cmd++ = RCS_GPR0;
905 *cmd++ = result;
906 }
907 *cmd = MI_BATCH_BUFFER_END;
908 i915_gem_object_unpin_map(obj);
909
910 err = i915_gem_object_set_to_gtt_domain(obj, false);
911 if (err)
912 goto err;
913
914 vma = i915_vma_instance(obj, &ctx->ppgtt->vm, NULL);
915 if (IS_ERR(vma)) {
916 err = PTR_ERR(vma);
917 goto err;
918 }
919
920 err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_OFFSET_FIXED);
921 if (err)
922 goto err;
923
924 err = check_scratch(ctx, offset);
925 if (err)
926 goto err_unpin;
927
928 rq = i915_request_alloc(engine, ctx);
929 if (IS_ERR(rq)) {
930 err = PTR_ERR(rq);
931 goto err_unpin;
932 }
933
934 err = engine->emit_bb_start(rq, vma->node.start, vma->node.size, 0);
935 if (err)
936 goto err_request;
937
938 err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
939 if (err)
940 goto skip_request;
941
942 i915_vma_unpin(vma);
943 i915_vma_close(vma);
944
945 i915_request_add(rq);
946
947 err = i915_gem_object_set_to_cpu_domain(obj, false);
948 if (err)
949 goto err;
950
951 cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
952 if (IS_ERR(cmd)) {
953 err = PTR_ERR(cmd);
954 goto err;
955 }
956
957 *value = cmd[result / sizeof(*cmd)];
958 i915_gem_object_unpin_map(obj);
959 i915_gem_object_put(obj);
960
961 return 0;
962
963skip_request:
964 i915_request_skip(rq, err);
965err_request:
966 i915_request_add(rq);
967err_unpin:
968 i915_vma_unpin(vma);
969err:
970 i915_gem_object_put(obj);
971 return err;
972}
973
974static int igt_vm_isolation(void *arg)
975{
976 struct drm_i915_private *i915 = arg;
977 struct i915_gem_context *ctx_a, *ctx_b;
978 struct intel_engine_cs *engine;
979 struct drm_file *file;
980 I915_RND_STATE(prng);
981 unsigned long count;
982 struct live_test t;
983 unsigned int id;
984 u64 vm_total;
985 int err;
986
987 if (INTEL_GEN(i915) < 7)
988 return 0;
989
990 /*
991 * The simple goal here is that a write into one context is not
992 * observed in a second (separate page tables and scratch).
993 */
994
995 file = mock_file(i915);
996 if (IS_ERR(file))
997 return PTR_ERR(file);
998
999 mutex_lock(&i915->drm.struct_mutex);
1000
1001 err = begin_live_test(&t, i915, __func__, "");
1002 if (err)
1003 goto out_unlock;
1004
1005 ctx_a = i915_gem_create_context(i915, file->driver_priv);
1006 if (IS_ERR(ctx_a)) {
1007 err = PTR_ERR(ctx_a);
1008 goto out_unlock;
1009 }
1010
1011 ctx_b = i915_gem_create_context(i915, file->driver_priv);
1012 if (IS_ERR(ctx_b)) {
1013 err = PTR_ERR(ctx_b);
1014 goto out_unlock;
1015 }
1016
1017 /* We can only test vm isolation, if the vm are distinct */
1018 if (ctx_a->ppgtt == ctx_b->ppgtt)
1019 goto out_unlock;
1020
1021 vm_total = ctx_a->ppgtt->vm.total;
1022 GEM_BUG_ON(ctx_b->ppgtt->vm.total != vm_total);
1023 vm_total -= I915_GTT_PAGE_SIZE;
1024
1025 intel_runtime_pm_get(i915);
1026
1027 count = 0;
1028 for_each_engine(engine, i915, id) {
1029 IGT_TIMEOUT(end_time);
1030 unsigned long this = 0;
1031
1032 if (!intel_engine_can_store_dword(engine))
1033 continue;
1034
1035 while (!__igt_timeout(end_time, NULL)) {
1036 u32 value = 0xc5c5c5c5;
1037 u64 offset;
1038
1039 div64_u64_rem(i915_prandom_u64_state(&prng),
1040 vm_total, &offset);
1041 offset &= ~sizeof(u32);
1042 offset += I915_GTT_PAGE_SIZE;
1043
1044 err = write_to_scratch(ctx_a, engine,
1045 offset, 0xdeadbeef);
1046 if (err == 0)
1047 err = read_from_scratch(ctx_b, engine,
1048 offset, &value);
1049 if (err)
1050 goto out_rpm;
1051
1052 if (value) {
1053 pr_err("%s: Read %08x from scratch (offset 0x%08x_%08x), after %lu reads!\n",
1054 engine->name, value,
1055 upper_32_bits(offset),
1056 lower_32_bits(offset),
1057 this);
1058 err = -EINVAL;
1059 goto out_rpm;
1060 }
1061
1062 this++;
1063 }
1064 count += this;
1065 }
1066 pr_info("Checked %lu scratch offsets across %d engines\n",
1067 count, INTEL_INFO(i915)->num_rings);
1068
1069out_rpm:
1070 intel_runtime_pm_put(i915);
1071out_unlock:
1072 if (end_live_test(&t))
731 err = -EIO; 1073 err = -EIO;
732 mutex_unlock(&i915->drm.struct_mutex); 1074 mutex_unlock(&i915->drm.struct_mutex);
733 1075
@@ -865,33 +1207,6 @@ out_unlock:
865 return err; 1207 return err;
866} 1208}
867 1209
868static int fake_aliasing_ppgtt_enable(struct drm_i915_private *i915)
869{
870 struct drm_i915_gem_object *obj;
871 int err;
872
873 err = i915_gem_init_aliasing_ppgtt(i915);
874 if (err)
875 return err;
876
877 list_for_each_entry(obj, &i915->mm.bound_list, mm.link) {
878 struct i915_vma *vma;
879
880 vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
881 if (IS_ERR(vma))
882 continue;
883
884 vma->flags &= ~I915_VMA_LOCAL_BIND;
885 }
886
887 return 0;
888}
889
890static void fake_aliasing_ppgtt_disable(struct drm_i915_private *i915)
891{
892 i915_gem_fini_aliasing_ppgtt(i915);
893}
894
895int i915_gem_context_mock_selftests(void) 1210int i915_gem_context_mock_selftests(void)
896{ 1211{
897 static const struct i915_subtest tests[] = { 1212 static const struct i915_subtest tests[] = {
@@ -917,32 +1232,11 @@ int i915_gem_context_live_selftests(struct drm_i915_private *dev_priv)
917 SUBTEST(live_nop_switch), 1232 SUBTEST(live_nop_switch),
918 SUBTEST(igt_ctx_exec), 1233 SUBTEST(igt_ctx_exec),
919 SUBTEST(igt_ctx_readonly), 1234 SUBTEST(igt_ctx_readonly),
1235 SUBTEST(igt_vm_isolation),
920 }; 1236 };
921 bool fake_alias = false;
922 int err;
923 1237
924 if (i915_terminally_wedged(&dev_priv->gpu_error)) 1238 if (i915_terminally_wedged(&dev_priv->gpu_error))
925 return 0; 1239 return 0;
926 1240
927 /* Install a fake aliasing gtt for exercise */ 1241 return i915_subtests(tests, dev_priv);
928 if (USES_PPGTT(dev_priv) && !dev_priv->mm.aliasing_ppgtt) {
929 mutex_lock(&dev_priv->drm.struct_mutex);
930 err = fake_aliasing_ppgtt_enable(dev_priv);
931 mutex_unlock(&dev_priv->drm.struct_mutex);
932 if (err)
933 return err;
934
935 GEM_BUG_ON(!dev_priv->mm.aliasing_ppgtt);
936 fake_alias = true;
937 }
938
939 err = i915_subtests(tests, dev_priv);
940
941 if (fake_alias) {
942 mutex_lock(&dev_priv->drm.struct_mutex);
943 fake_aliasing_ppgtt_disable(dev_priv);
944 mutex_unlock(&dev_priv->drm.struct_mutex);
945 }
946
947 return err;
948} 1242}
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
index 128ad1cf0647..4365979d8222 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
@@ -351,7 +351,7 @@ static int igt_evict_contexts(void *arg)
351 * where the GTT space of the request is separate from the GGTT 351 * where the GTT space of the request is separate from the GGTT
352 * allocation required to build the request. 352 * allocation required to build the request.
353 */ 353 */
354 if (!USES_FULL_PPGTT(i915)) 354 if (!HAS_FULL_PPGTT(i915))
355 return 0; 355 return 0;
356 356
357 mutex_lock(&i915->drm.struct_mutex); 357 mutex_lock(&i915->drm.struct_mutex);
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
index 8e2e269db97e..69fe86b30fbb 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
@@ -153,7 +153,7 @@ static int igt_ppgtt_alloc(void *arg)
153 153
154 /* Allocate a ppggt and try to fill the entire range */ 154 /* Allocate a ppggt and try to fill the entire range */
155 155
156 if (!USES_PPGTT(dev_priv)) 156 if (!HAS_PPGTT(dev_priv))
157 return 0; 157 return 0;
158 158
159 ppgtt = __hw_ppgtt_create(dev_priv); 159 ppgtt = __hw_ppgtt_create(dev_priv);
@@ -1001,7 +1001,7 @@ static int exercise_ppgtt(struct drm_i915_private *dev_priv,
1001 IGT_TIMEOUT(end_time); 1001 IGT_TIMEOUT(end_time);
1002 int err; 1002 int err;
1003 1003
1004 if (!USES_FULL_PPGTT(dev_priv)) 1004 if (!HAS_FULL_PPGTT(dev_priv))
1005 return 0; 1005 return 0;
1006 1006
1007 file = mock_file(dev_priv); 1007 file = mock_file(dev_priv);
@@ -1337,7 +1337,7 @@ static int igt_gtt_reserve(void *arg)
1337 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); 1337 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1338 if (vma->node.start != total || 1338 if (vma->node.start != total ||
1339 vma->node.size != 2*I915_GTT_PAGE_SIZE) { 1339 vma->node.size != 2*I915_GTT_PAGE_SIZE) {
1340 pr_err("i915_gem_gtt_reserve (pass 1) placement failed, found (%llx + %llx), expected (%llx + %lx)\n", 1340 pr_err("i915_gem_gtt_reserve (pass 1) placement failed, found (%llx + %llx), expected (%llx + %llx)\n",
1341 vma->node.start, vma->node.size, 1341 vma->node.start, vma->node.size,
1342 total, 2*I915_GTT_PAGE_SIZE); 1342 total, 2*I915_GTT_PAGE_SIZE);
1343 err = -EINVAL; 1343 err = -EINVAL;
@@ -1386,7 +1386,7 @@ static int igt_gtt_reserve(void *arg)
1386 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); 1386 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1387 if (vma->node.start != total || 1387 if (vma->node.start != total ||
1388 vma->node.size != 2*I915_GTT_PAGE_SIZE) { 1388 vma->node.size != 2*I915_GTT_PAGE_SIZE) {
1389 pr_err("i915_gem_gtt_reserve (pass 2) placement failed, found (%llx + %llx), expected (%llx + %lx)\n", 1389 pr_err("i915_gem_gtt_reserve (pass 2) placement failed, found (%llx + %llx), expected (%llx + %llx)\n",
1390 vma->node.start, vma->node.size, 1390 vma->node.start, vma->node.size,
1391 total, 2*I915_GTT_PAGE_SIZE); 1391 total, 2*I915_GTT_PAGE_SIZE);
1392 err = -EINVAL; 1392 err = -EINVAL;
@@ -1430,7 +1430,7 @@ static int igt_gtt_reserve(void *arg)
1430 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); 1430 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1431 if (vma->node.start != offset || 1431 if (vma->node.start != offset ||
1432 vma->node.size != 2*I915_GTT_PAGE_SIZE) { 1432 vma->node.size != 2*I915_GTT_PAGE_SIZE) {
1433 pr_err("i915_gem_gtt_reserve (pass 3) placement failed, found (%llx + %llx), expected (%llx + %lx)\n", 1433 pr_err("i915_gem_gtt_reserve (pass 3) placement failed, found (%llx + %llx), expected (%llx + %llx)\n",
1434 vma->node.start, vma->node.size, 1434 vma->node.start, vma->node.size,
1435 offset, 2*I915_GTT_PAGE_SIZE); 1435 offset, 2*I915_GTT_PAGE_SIZE);
1436 err = -EINVAL; 1436 err = -EINVAL;
diff --git a/drivers/gpu/drm/i915/selftests/intel_guc.c b/drivers/gpu/drm/i915/selftests/intel_guc.c
index 0c0ab82b6228..32cba4cae31a 100644
--- a/drivers/gpu/drm/i915/selftests/intel_guc.c
+++ b/drivers/gpu/drm/i915/selftests/intel_guc.c
@@ -159,6 +159,7 @@ static int igt_guc_clients(void *args)
159 * Get rid of clients created during driver load because the test will 159 * Get rid of clients created during driver load because the test will
160 * recreate them. 160 * recreate them.
161 */ 161 */
162 guc_clients_disable(guc);
162 guc_clients_destroy(guc); 163 guc_clients_destroy(guc);
163 if (guc->execbuf_client || guc->preempt_client) { 164 if (guc->execbuf_client || guc->preempt_client) {
164 pr_err("guc_clients_destroy lied!\n"); 165 pr_err("guc_clients_destroy lied!\n");
@@ -197,8 +198,8 @@ static int igt_guc_clients(void *args)
197 goto out; 198 goto out;
198 } 199 }
199 200
200 /* Now create the doorbells */ 201 /* Now enable the clients */
201 guc_clients_doorbell_init(guc); 202 guc_clients_enable(guc);
202 203
203 /* each client should now have received a doorbell */ 204 /* each client should now have received a doorbell */
204 if (!client_doorbell_in_sync(guc->execbuf_client) || 205 if (!client_doorbell_in_sync(guc->execbuf_client) ||
@@ -212,63 +213,17 @@ static int igt_guc_clients(void *args)
212 * Basic test - an attempt to reallocate a valid doorbell to the 213 * Basic test - an attempt to reallocate a valid doorbell to the
213 * client it is currently assigned should not cause a failure. 214 * client it is currently assigned should not cause a failure.
214 */ 215 */
215 err = guc_clients_doorbell_init(guc);
216 if (err)
217 goto out;
218
219 /*
220 * Negative test - a client with no doorbell (invalid db id).
221 * After destroying the doorbell, the db id is changed to
222 * GUC_DOORBELL_INVALID and the firmware will reject any attempt to
223 * allocate a doorbell with an invalid id (db has to be reserved before
224 * allocation).
225 */
226 destroy_doorbell(guc->execbuf_client);
227 if (client_doorbell_in_sync(guc->execbuf_client)) {
228 pr_err("destroy db did not work\n");
229 err = -EINVAL;
230 goto out;
231 }
232
233 unreserve_doorbell(guc->execbuf_client);
234
235 __create_doorbell(guc->execbuf_client);
236 err = __guc_allocate_doorbell(guc, guc->execbuf_client->stage_id);
237 if (err != -EIO) {
238 pr_err("unexpected (err = %d)", err);
239 goto out_db;
240 }
241
242 if (!available_dbs(guc, guc->execbuf_client->priority)) {
243 pr_err("doorbell not available when it should\n");
244 err = -EIO;
245 goto out_db;
246 }
247
248out_db:
249 /* clean after test */
250 __destroy_doorbell(guc->execbuf_client);
251 err = reserve_doorbell(guc->execbuf_client);
252 if (err) {
253 pr_err("failed to reserve back the doorbell back\n");
254 }
255 err = create_doorbell(guc->execbuf_client); 216 err = create_doorbell(guc->execbuf_client);
256 if (err) {
257 pr_err("recreate doorbell failed\n");
258 goto out;
259 }
260 217
261out: 218out:
262 /* 219 /*
263 * Leave clean state for other test, plus the driver always destroy the 220 * Leave clean state for other test, plus the driver always destroy the
264 * clients during unload. 221 * clients during unload.
265 */ 222 */
266 destroy_doorbell(guc->execbuf_client); 223 guc_clients_disable(guc);
267 if (guc->preempt_client)
268 destroy_doorbell(guc->preempt_client);
269 guc_clients_destroy(guc); 224 guc_clients_destroy(guc);
270 guc_clients_create(guc); 225 guc_clients_create(guc);
271 guc_clients_doorbell_init(guc); 226 guc_clients_enable(guc);
272unlock: 227unlock:
273 intel_runtime_pm_put(dev_priv); 228 intel_runtime_pm_put(dev_priv);
274 mutex_unlock(&dev_priv->drm.struct_mutex); 229 mutex_unlock(&dev_priv->drm.struct_mutex);
@@ -352,7 +307,7 @@ static int igt_guc_doorbells(void *arg)
352 307
353 db_id = clients[i]->doorbell_id; 308 db_id = clients[i]->doorbell_id;
354 309
355 err = create_doorbell(clients[i]); 310 err = __guc_client_enable(clients[i]);
356 if (err) { 311 if (err) {
357 pr_err("[%d] Failed to create a doorbell\n", i); 312 pr_err("[%d] Failed to create a doorbell\n", i);
358 goto out; 313 goto out;
@@ -378,7 +333,7 @@ static int igt_guc_doorbells(void *arg)
378out: 333out:
379 for (i = 0; i < ATTEMPTS; i++) 334 for (i = 0; i < ATTEMPTS; i++)
380 if (!IS_ERR_OR_NULL(clients[i])) { 335 if (!IS_ERR_OR_NULL(clients[i])) {
381 destroy_doorbell(clients[i]); 336 __guc_client_disable(clients[i]);
382 guc_client_free(clients[i]); 337 guc_client_free(clients[i]);
383 } 338 }
384unlock: 339unlock:
diff --git a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
index db378226ac10..defe671130ab 100644
--- a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
+++ b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
@@ -76,7 +76,7 @@ static int hang_init(struct hang *h, struct drm_i915_private *i915)
76 h->seqno = memset(vaddr, 0xff, PAGE_SIZE); 76 h->seqno = memset(vaddr, 0xff, PAGE_SIZE);
77 77
78 vaddr = i915_gem_object_pin_map(h->obj, 78 vaddr = i915_gem_object_pin_map(h->obj,
79 HAS_LLC(i915) ? I915_MAP_WB : I915_MAP_WC); 79 i915_coherent_map_type(i915));
80 if (IS_ERR(vaddr)) { 80 if (IS_ERR(vaddr)) {
81 err = PTR_ERR(vaddr); 81 err = PTR_ERR(vaddr);
82 goto err_unpin_hws; 82 goto err_unpin_hws;
@@ -234,7 +234,7 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine)
234 return ERR_CAST(obj); 234 return ERR_CAST(obj);
235 235
236 vaddr = i915_gem_object_pin_map(obj, 236 vaddr = i915_gem_object_pin_map(obj,
237 HAS_LLC(h->i915) ? I915_MAP_WB : I915_MAP_WC); 237 i915_coherent_map_type(h->i915));
238 if (IS_ERR(vaddr)) { 238 if (IS_ERR(vaddr)) {
239 i915_gem_object_put(obj); 239 i915_gem_object_put(obj);
240 return ERR_CAST(vaddr); 240 return ERR_CAST(vaddr);
@@ -1150,6 +1150,7 @@ static int __igt_reset_evict_vma(struct drm_i915_private *i915,
1150 tsk = NULL; 1150 tsk = NULL;
1151 goto out_reset; 1151 goto out_reset;
1152 } 1152 }
1153 get_task_struct(tsk);
1153 1154
1154 wait_for_completion(&arg.completion); 1155 wait_for_completion(&arg.completion);
1155 1156
@@ -1172,6 +1173,8 @@ out_reset:
1172 /* The reset, even indirectly, should take less than 10ms. */ 1173 /* The reset, even indirectly, should take less than 10ms. */
1173 igt_wedge_on_timeout(&w, i915, HZ / 10 /* 100ms timeout*/) 1174 igt_wedge_on_timeout(&w, i915, HZ / 10 /* 100ms timeout*/)
1174 err = kthread_stop(tsk); 1175 err = kthread_stop(tsk);
1176
1177 put_task_struct(tsk);
1175 } 1178 }
1176 1179
1177 mutex_lock(&i915->drm.struct_mutex); 1180 mutex_lock(&i915->drm.struct_mutex);
diff --git a/drivers/gpu/drm/i915/selftests/intel_lrc.c b/drivers/gpu/drm/i915/selftests/intel_lrc.c
index 1aea7a8f2224..94fc0e5c8766 100644
--- a/drivers/gpu/drm/i915/selftests/intel_lrc.c
+++ b/drivers/gpu/drm/i915/selftests/intel_lrc.c
@@ -6,6 +6,7 @@
6 6
7#include "../i915_selftest.h" 7#include "../i915_selftest.h"
8#include "igt_flush_test.h" 8#include "igt_flush_test.h"
9#include "i915_random.h"
9 10
10#include "mock_context.h" 11#include "mock_context.h"
11 12
@@ -48,7 +49,7 @@ static int spinner_init(struct spinner *spin, struct drm_i915_private *i915)
48 } 49 }
49 spin->seqno = memset(vaddr, 0xff, PAGE_SIZE); 50 spin->seqno = memset(vaddr, 0xff, PAGE_SIZE);
50 51
51 mode = HAS_LLC(i915) ? I915_MAP_WB : I915_MAP_WC; 52 mode = i915_coherent_map_type(i915);
52 vaddr = i915_gem_object_pin_map(spin->obj, mode); 53 vaddr = i915_gem_object_pin_map(spin->obj, mode);
53 if (IS_ERR(vaddr)) { 54 if (IS_ERR(vaddr)) {
54 err = PTR_ERR(vaddr); 55 err = PTR_ERR(vaddr);
@@ -291,12 +292,14 @@ static int live_preempt(void *arg)
291 ctx_hi = kernel_context(i915); 292 ctx_hi = kernel_context(i915);
292 if (!ctx_hi) 293 if (!ctx_hi)
293 goto err_spin_lo; 294 goto err_spin_lo;
294 ctx_hi->sched.priority = I915_CONTEXT_MAX_USER_PRIORITY; 295 ctx_hi->sched.priority =
296 I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY);
295 297
296 ctx_lo = kernel_context(i915); 298 ctx_lo = kernel_context(i915);
297 if (!ctx_lo) 299 if (!ctx_lo)
298 goto err_ctx_hi; 300 goto err_ctx_hi;
299 ctx_lo->sched.priority = I915_CONTEXT_MIN_USER_PRIORITY; 301 ctx_lo->sched.priority =
302 I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY);
300 303
301 for_each_engine(engine, i915, id) { 304 for_each_engine(engine, i915, id) {
302 struct i915_request *rq; 305 struct i915_request *rq;
@@ -417,7 +420,7 @@ static int live_late_preempt(void *arg)
417 goto err_wedged; 420 goto err_wedged;
418 } 421 }
419 422
420 attr.priority = I915_PRIORITY_MAX; 423 attr.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX);
421 engine->schedule(rq, &attr); 424 engine->schedule(rq, &attr);
422 425
423 if (!wait_for_spinner(&spin_hi, rq)) { 426 if (!wait_for_spinner(&spin_hi, rq)) {
@@ -573,6 +576,261 @@ err_unlock:
573 return err; 576 return err;
574} 577}
575 578
579static int random_range(struct rnd_state *rnd, int min, int max)
580{
581 return i915_prandom_u32_max_state(max - min, rnd) + min;
582}
583
584static int random_priority(struct rnd_state *rnd)
585{
586 return random_range(rnd, I915_PRIORITY_MIN, I915_PRIORITY_MAX);
587}
588
589struct preempt_smoke {
590 struct drm_i915_private *i915;
591 struct i915_gem_context **contexts;
592 struct intel_engine_cs *engine;
593 struct drm_i915_gem_object *batch;
594 unsigned int ncontext;
595 struct rnd_state prng;
596 unsigned long count;
597};
598
599static struct i915_gem_context *smoke_context(struct preempt_smoke *smoke)
600{
601 return smoke->contexts[i915_prandom_u32_max_state(smoke->ncontext,
602 &smoke->prng)];
603}
604
605static int smoke_submit(struct preempt_smoke *smoke,
606 struct i915_gem_context *ctx, int prio,
607 struct drm_i915_gem_object *batch)
608{
609 struct i915_request *rq;
610 struct i915_vma *vma = NULL;
611 int err = 0;
612
613 if (batch) {
614 vma = i915_vma_instance(batch, &ctx->ppgtt->vm, NULL);
615 if (IS_ERR(vma))
616 return PTR_ERR(vma);
617
618 err = i915_vma_pin(vma, 0, 0, PIN_USER);
619 if (err)
620 return err;
621 }
622
623 ctx->sched.priority = prio;
624
625 rq = i915_request_alloc(smoke->engine, ctx);
626 if (IS_ERR(rq)) {
627 err = PTR_ERR(rq);
628 goto unpin;
629 }
630
631 if (vma) {
632 err = rq->engine->emit_bb_start(rq,
633 vma->node.start,
634 PAGE_SIZE, 0);
635 if (!err)
636 err = i915_vma_move_to_active(vma, rq, 0);
637 }
638
639 i915_request_add(rq);
640
641unpin:
642 if (vma)
643 i915_vma_unpin(vma);
644
645 return err;
646}
647
648static int smoke_crescendo_thread(void *arg)
649{
650 struct preempt_smoke *smoke = arg;
651 IGT_TIMEOUT(end_time);
652 unsigned long count;
653
654 count = 0;
655 do {
656 struct i915_gem_context *ctx = smoke_context(smoke);
657 int err;
658
659 mutex_lock(&smoke->i915->drm.struct_mutex);
660 err = smoke_submit(smoke,
661 ctx, count % I915_PRIORITY_MAX,
662 smoke->batch);
663 mutex_unlock(&smoke->i915->drm.struct_mutex);
664 if (err)
665 return err;
666
667 count++;
668 } while (!__igt_timeout(end_time, NULL));
669
670 smoke->count = count;
671 return 0;
672}
673
674static int smoke_crescendo(struct preempt_smoke *smoke, unsigned int flags)
675#define BATCH BIT(0)
676{
677 struct task_struct *tsk[I915_NUM_ENGINES] = {};
678 struct preempt_smoke arg[I915_NUM_ENGINES];
679 struct intel_engine_cs *engine;
680 enum intel_engine_id id;
681 unsigned long count;
682 int err = 0;
683
684 mutex_unlock(&smoke->i915->drm.struct_mutex);
685
686 for_each_engine(engine, smoke->i915, id) {
687 arg[id] = *smoke;
688 arg[id].engine = engine;
689 if (!(flags & BATCH))
690 arg[id].batch = NULL;
691 arg[id].count = 0;
692
693 tsk[id] = kthread_run(smoke_crescendo_thread, &arg,
694 "igt/smoke:%d", id);
695 if (IS_ERR(tsk[id])) {
696 err = PTR_ERR(tsk[id]);
697 break;
698 }
699 get_task_struct(tsk[id]);
700 }
701
702 count = 0;
703 for_each_engine(engine, smoke->i915, id) {
704 int status;
705
706 if (IS_ERR_OR_NULL(tsk[id]))
707 continue;
708
709 status = kthread_stop(tsk[id]);
710 if (status && !err)
711 err = status;
712
713 count += arg[id].count;
714
715 put_task_struct(tsk[id]);
716 }
717
718 mutex_lock(&smoke->i915->drm.struct_mutex);
719
720 pr_info("Submitted %lu crescendo:%x requests across %d engines and %d contexts\n",
721 count, flags,
722 INTEL_INFO(smoke->i915)->num_rings, smoke->ncontext);
723 return 0;
724}
725
726static int smoke_random(struct preempt_smoke *smoke, unsigned int flags)
727{
728 enum intel_engine_id id;
729 IGT_TIMEOUT(end_time);
730 unsigned long count;
731
732 count = 0;
733 do {
734 for_each_engine(smoke->engine, smoke->i915, id) {
735 struct i915_gem_context *ctx = smoke_context(smoke);
736 int err;
737
738 err = smoke_submit(smoke,
739 ctx, random_priority(&smoke->prng),
740 flags & BATCH ? smoke->batch : NULL);
741 if (err)
742 return err;
743
744 count++;
745 }
746 } while (!__igt_timeout(end_time, NULL));
747
748 pr_info("Submitted %lu random:%x requests across %d engines and %d contexts\n",
749 count, flags,
750 INTEL_INFO(smoke->i915)->num_rings, smoke->ncontext);
751 return 0;
752}
753
754static int live_preempt_smoke(void *arg)
755{
756 struct preempt_smoke smoke = {
757 .i915 = arg,
758 .prng = I915_RND_STATE_INITIALIZER(i915_selftest.random_seed),
759 .ncontext = 1024,
760 };
761 const unsigned int phase[] = { 0, BATCH };
762 int err = -ENOMEM;
763 u32 *cs;
764 int n;
765
766 if (!HAS_LOGICAL_RING_PREEMPTION(smoke.i915))
767 return 0;
768
769 smoke.contexts = kmalloc_array(smoke.ncontext,
770 sizeof(*smoke.contexts),
771 GFP_KERNEL);
772 if (!smoke.contexts)
773 return -ENOMEM;
774
775 mutex_lock(&smoke.i915->drm.struct_mutex);
776 intel_runtime_pm_get(smoke.i915);
777
778 smoke.batch = i915_gem_object_create_internal(smoke.i915, PAGE_SIZE);
779 if (IS_ERR(smoke.batch)) {
780 err = PTR_ERR(smoke.batch);
781 goto err_unlock;
782 }
783
784 cs = i915_gem_object_pin_map(smoke.batch, I915_MAP_WB);
785 if (IS_ERR(cs)) {
786 err = PTR_ERR(cs);
787 goto err_batch;
788 }
789 for (n = 0; n < PAGE_SIZE / sizeof(*cs) - 1; n++)
790 cs[n] = MI_ARB_CHECK;
791 cs[n] = MI_BATCH_BUFFER_END;
792 i915_gem_object_unpin_map(smoke.batch);
793
794 err = i915_gem_object_set_to_gtt_domain(smoke.batch, false);
795 if (err)
796 goto err_batch;
797
798 for (n = 0; n < smoke.ncontext; n++) {
799 smoke.contexts[n] = kernel_context(smoke.i915);
800 if (!smoke.contexts[n])
801 goto err_ctx;
802 }
803
804 for (n = 0; n < ARRAY_SIZE(phase); n++) {
805 err = smoke_crescendo(&smoke, phase[n]);
806 if (err)
807 goto err_ctx;
808
809 err = smoke_random(&smoke, phase[n]);
810 if (err)
811 goto err_ctx;
812 }
813
814err_ctx:
815 if (igt_flush_test(smoke.i915, I915_WAIT_LOCKED))
816 err = -EIO;
817
818 for (n = 0; n < smoke.ncontext; n++) {
819 if (!smoke.contexts[n])
820 break;
821 kernel_context_close(smoke.contexts[n]);
822 }
823
824err_batch:
825 i915_gem_object_put(smoke.batch);
826err_unlock:
827 intel_runtime_pm_put(smoke.i915);
828 mutex_unlock(&smoke.i915->drm.struct_mutex);
829 kfree(smoke.contexts);
830
831 return err;
832}
833
576int intel_execlists_live_selftests(struct drm_i915_private *i915) 834int intel_execlists_live_selftests(struct drm_i915_private *i915)
577{ 835{
578 static const struct i915_subtest tests[] = { 836 static const struct i915_subtest tests[] = {
@@ -580,6 +838,7 @@ int intel_execlists_live_selftests(struct drm_i915_private *i915)
580 SUBTEST(live_preempt), 838 SUBTEST(live_preempt),
581 SUBTEST(live_late_preempt), 839 SUBTEST(live_late_preempt),
582 SUBTEST(live_preempt_hang), 840 SUBTEST(live_preempt_hang),
841 SUBTEST(live_preempt_smoke),
583 }; 842 };
584 843
585 if (!HAS_EXECLISTS(i915)) 844 if (!HAS_EXECLISTS(i915))
diff --git a/drivers/gpu/drm/i915/selftests/mock_engine.c b/drivers/gpu/drm/i915/selftests/mock_engine.c
index 22a73da45ad5..d0c44c18db42 100644
--- a/drivers/gpu/drm/i915/selftests/mock_engine.c
+++ b/drivers/gpu/drm/i915/selftests/mock_engine.c
@@ -200,7 +200,7 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
200 engine->base.submit_request = mock_submit_request; 200 engine->base.submit_request = mock_submit_request;
201 201
202 i915_timeline_init(i915, &engine->base.timeline, engine->base.name); 202 i915_timeline_init(i915, &engine->base.timeline, engine->base.name);
203 lockdep_set_subclass(&engine->base.timeline.lock, TIMELINE_ENGINE); 203 i915_timeline_set_subclass(&engine->base.timeline, TIMELINE_ENGINE);
204 204
205 intel_engine_init_breadcrumbs(&engine->base); 205 intel_engine_init_breadcrumbs(&engine->base);
206 engine->base.breadcrumbs.mock = true; /* prevent touching HW for irqs */ 206 engine->base.breadcrumbs.mock = true; /* prevent touching HW for irqs */
diff --git a/drivers/gpu/drm/i915/vlv_dsi.c b/drivers/gpu/drm/i915/vlv_dsi.c
index 435a2c35ee8c..361e962a7969 100644
--- a/drivers/gpu/drm/i915/vlv_dsi.c
+++ b/drivers/gpu/drm/i915/vlv_dsi.c
@@ -206,39 +206,6 @@ static const struct mipi_dsi_host_ops intel_dsi_host_ops = {
206 .transfer = intel_dsi_host_transfer, 206 .transfer = intel_dsi_host_transfer,
207}; 207};
208 208
209static struct intel_dsi_host *intel_dsi_host_init(struct intel_dsi *intel_dsi,
210 enum port port)
211{
212 struct intel_dsi_host *host;
213 struct mipi_dsi_device *device;
214
215 host = kzalloc(sizeof(*host), GFP_KERNEL);
216 if (!host)
217 return NULL;
218
219 host->base.ops = &intel_dsi_host_ops;
220 host->intel_dsi = intel_dsi;
221 host->port = port;
222
223 /*
224 * We should call mipi_dsi_host_register(&host->base) here, but we don't
225 * have a host->dev, and we don't have OF stuff either. So just use the
226 * dsi framework as a library and hope for the best. Create the dsi
227 * devices by ourselves here too. Need to be careful though, because we
228 * don't initialize any of the driver model devices here.
229 */
230 device = kzalloc(sizeof(*device), GFP_KERNEL);
231 if (!device) {
232 kfree(host);
233 return NULL;
234 }
235
236 device->host = &host->base;
237 host->device = device;
238
239 return host;
240}
241
242/* 209/*
243 * send a video mode command 210 * send a video mode command
244 * 211 *
@@ -290,16 +257,6 @@ static void band_gap_reset(struct drm_i915_private *dev_priv)
290 mutex_unlock(&dev_priv->sb_lock); 257 mutex_unlock(&dev_priv->sb_lock);
291} 258}
292 259
293static inline bool is_vid_mode(struct intel_dsi *intel_dsi)
294{
295 return intel_dsi->operation_mode == INTEL_DSI_VIDEO_MODE;
296}
297
298static inline bool is_cmd_mode(struct intel_dsi *intel_dsi)
299{
300 return intel_dsi->operation_mode == INTEL_DSI_COMMAND_MODE;
301}
302
303static bool intel_dsi_compute_config(struct intel_encoder *encoder, 260static bool intel_dsi_compute_config(struct intel_encoder *encoder,
304 struct intel_crtc_state *pipe_config, 261 struct intel_crtc_state *pipe_config,
305 struct drm_connector_state *conn_state) 262 struct drm_connector_state *conn_state)
@@ -314,6 +271,7 @@ static bool intel_dsi_compute_config(struct intel_encoder *encoder,
314 int ret; 271 int ret;
315 272
316 DRM_DEBUG_KMS("\n"); 273 DRM_DEBUG_KMS("\n");
274 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
317 275
318 if (fixed_mode) { 276 if (fixed_mode) {
319 intel_fixed_panel_mode(fixed_mode, adjusted_mode); 277 intel_fixed_panel_mode(fixed_mode, adjusted_mode);
@@ -745,17 +703,6 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
745 const struct intel_crtc_state *pipe_config); 703 const struct intel_crtc_state *pipe_config);
746static void intel_dsi_unprepare(struct intel_encoder *encoder); 704static void intel_dsi_unprepare(struct intel_encoder *encoder);
747 705
748static void intel_dsi_msleep(struct intel_dsi *intel_dsi, int msec)
749{
750 struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev);
751
752 /* For v3 VBTs in vid-mode the delays are part of the VBT sequences */
753 if (is_vid_mode(intel_dsi) && dev_priv->vbt.dsi.seq_version >= 3)
754 return;
755
756 msleep(msec);
757}
758
759/* 706/*
760 * Panel enable/disable sequences from the VBT spec. 707 * Panel enable/disable sequences from the VBT spec.
761 * 708 *
@@ -793,6 +740,10 @@ static void intel_dsi_msleep(struct intel_dsi *intel_dsi, int msec)
793 * - wait t4 - wait t4 740 * - wait t4 - wait t4
794 */ 741 */
795 742
743/*
744 * DSI port enable has to be done before pipe and plane enable, so we do it in
745 * the pre_enable hook instead of the enable hook.
746 */
796static void intel_dsi_pre_enable(struct intel_encoder *encoder, 747static void intel_dsi_pre_enable(struct intel_encoder *encoder,
797 const struct intel_crtc_state *pipe_config, 748 const struct intel_crtc_state *pipe_config,
798 const struct drm_connector_state *conn_state) 749 const struct drm_connector_state *conn_state)
@@ -895,17 +846,6 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder,
895} 846}
896 847
897/* 848/*
898 * DSI port enable has to be done before pipe and plane enable, so we do it in
899 * the pre_enable hook.
900 */
901static void intel_dsi_enable_nop(struct intel_encoder *encoder,
902 const struct intel_crtc_state *pipe_config,
903 const struct drm_connector_state *conn_state)
904{
905 DRM_DEBUG_KMS("\n");
906}
907
908/*
909 * DSI port disable has to be done after pipe and plane disable, so we do it in 849 * DSI port disable has to be done after pipe and plane disable, so we do it in
910 * the post_disable hook. 850 * the post_disable hook.
911 */ 851 */
@@ -1272,31 +1212,6 @@ static void intel_dsi_get_config(struct intel_encoder *encoder,
1272 } 1212 }
1273} 1213}
1274 1214
1275static enum drm_mode_status
1276intel_dsi_mode_valid(struct drm_connector *connector,
1277 struct drm_display_mode *mode)
1278{
1279 struct intel_connector *intel_connector = to_intel_connector(connector);
1280 const struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
1281 int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
1282
1283 DRM_DEBUG_KMS("\n");
1284
1285 if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
1286 return MODE_NO_DBLESCAN;
1287
1288 if (fixed_mode) {
1289 if (mode->hdisplay > fixed_mode->hdisplay)
1290 return MODE_PANEL;
1291 if (mode->vdisplay > fixed_mode->vdisplay)
1292 return MODE_PANEL;
1293 if (fixed_mode->clock > max_dotclk)
1294 return MODE_CLOCK_HIGH;
1295 }
1296
1297 return MODE_OK;
1298}
1299
1300/* return txclkesc cycles in terms of divider and duration in us */ 1215/* return txclkesc cycles in terms of divider and duration in us */
1301static u16 txclkesc(u32 divider, unsigned int us) 1216static u16 txclkesc(u32 divider, unsigned int us)
1302{ 1217{
@@ -1619,39 +1534,6 @@ static void intel_dsi_unprepare(struct intel_encoder *encoder)
1619 } 1534 }
1620} 1535}
1621 1536
1622static int intel_dsi_get_modes(struct drm_connector *connector)
1623{
1624 struct intel_connector *intel_connector = to_intel_connector(connector);
1625 struct drm_display_mode *mode;
1626
1627 DRM_DEBUG_KMS("\n");
1628
1629 if (!intel_connector->panel.fixed_mode) {
1630 DRM_DEBUG_KMS("no fixed mode\n");
1631 return 0;
1632 }
1633
1634 mode = drm_mode_duplicate(connector->dev,
1635 intel_connector->panel.fixed_mode);
1636 if (!mode) {
1637 DRM_DEBUG_KMS("drm_mode_duplicate failed\n");
1638 return 0;
1639 }
1640
1641 drm_mode_probed_add(connector, mode);
1642 return 1;
1643}
1644
1645static void intel_dsi_connector_destroy(struct drm_connector *connector)
1646{
1647 struct intel_connector *intel_connector = to_intel_connector(connector);
1648
1649 DRM_DEBUG_KMS("\n");
1650 intel_panel_fini(&intel_connector->panel);
1651 drm_connector_cleanup(connector);
1652 kfree(connector);
1653}
1654
1655static void intel_dsi_encoder_destroy(struct drm_encoder *encoder) 1537static void intel_dsi_encoder_destroy(struct drm_encoder *encoder)
1656{ 1538{
1657 struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); 1539 struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
@@ -1676,7 +1558,7 @@ static const struct drm_connector_helper_funcs intel_dsi_connector_helper_funcs
1676static const struct drm_connector_funcs intel_dsi_connector_funcs = { 1558static const struct drm_connector_funcs intel_dsi_connector_funcs = {
1677 .late_register = intel_connector_register, 1559 .late_register = intel_connector_register,
1678 .early_unregister = intel_connector_unregister, 1560 .early_unregister = intel_connector_unregister,
1679 .destroy = intel_dsi_connector_destroy, 1561 .destroy = intel_connector_destroy,
1680 .fill_modes = drm_helper_probe_single_connector_modes, 1562 .fill_modes = drm_helper_probe_single_connector_modes,
1681 .atomic_get_property = intel_digital_connector_atomic_get_property, 1563 .atomic_get_property = intel_digital_connector_atomic_get_property,
1682 .atomic_set_property = intel_digital_connector_atomic_set_property, 1564 .atomic_set_property = intel_digital_connector_atomic_set_property,
@@ -1684,27 +1566,57 @@ static const struct drm_connector_funcs intel_dsi_connector_funcs = {
1684 .atomic_duplicate_state = intel_digital_connector_duplicate_state, 1566 .atomic_duplicate_state = intel_digital_connector_duplicate_state,
1685}; 1567};
1686 1568
1687static int intel_dsi_get_panel_orientation(struct intel_connector *connector) 1569static enum drm_panel_orientation
1570vlv_dsi_get_hw_panel_orientation(struct intel_connector *connector)
1688{ 1571{
1689 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 1572 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1690 int orientation = DRM_MODE_PANEL_ORIENTATION_NORMAL; 1573 struct intel_encoder *encoder = connector->encoder;
1691 enum i9xx_plane_id i9xx_plane; 1574 enum intel_display_power_domain power_domain;
1575 enum drm_panel_orientation orientation;
1576 struct intel_plane *plane;
1577 struct intel_crtc *crtc;
1578 enum pipe pipe;
1692 u32 val; 1579 u32 val;
1693 1580
1694 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 1581 if (!encoder->get_hw_state(encoder, &pipe))
1695 if (connector->encoder->crtc_mask == BIT(PIPE_B)) 1582 return DRM_MODE_PANEL_ORIENTATION_UNKNOWN;
1696 i9xx_plane = PLANE_B;
1697 else
1698 i9xx_plane = PLANE_A;
1699 1583
1700 val = I915_READ(DSPCNTR(i9xx_plane)); 1584 crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
1701 if (val & DISPPLANE_ROTATE_180) 1585 plane = to_intel_plane(crtc->base.primary);
1702 orientation = DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP; 1586
1703 } 1587 power_domain = POWER_DOMAIN_PIPE(pipe);
1588 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
1589 return DRM_MODE_PANEL_ORIENTATION_UNKNOWN;
1590
1591 val = I915_READ(DSPCNTR(plane->i9xx_plane));
1592
1593 if (!(val & DISPLAY_PLANE_ENABLE))
1594 orientation = DRM_MODE_PANEL_ORIENTATION_UNKNOWN;
1595 else if (val & DISPPLANE_ROTATE_180)
1596 orientation = DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP;
1597 else
1598 orientation = DRM_MODE_PANEL_ORIENTATION_NORMAL;
1599
1600 intel_display_power_put(dev_priv, power_domain);
1704 1601
1705 return orientation; 1602 return orientation;
1706} 1603}
1707 1604
1605static enum drm_panel_orientation
1606vlv_dsi_get_panel_orientation(struct intel_connector *connector)
1607{
1608 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1609 enum drm_panel_orientation orientation;
1610
1611 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1612 orientation = vlv_dsi_get_hw_panel_orientation(connector);
1613 if (orientation != DRM_MODE_PANEL_ORIENTATION_UNKNOWN)
1614 return orientation;
1615 }
1616
1617 return intel_dsi_get_panel_orientation(connector);
1618}
1619
1708static void intel_dsi_add_properties(struct intel_connector *connector) 1620static void intel_dsi_add_properties(struct intel_connector *connector)
1709{ 1621{
1710 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 1622 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
@@ -1722,7 +1634,7 @@ static void intel_dsi_add_properties(struct intel_connector *connector)
1722 connector->base.state->scaling_mode = DRM_MODE_SCALE_ASPECT; 1634 connector->base.state->scaling_mode = DRM_MODE_SCALE_ASPECT;
1723 1635
1724 connector->base.display_info.panel_orientation = 1636 connector->base.display_info.panel_orientation =
1725 intel_dsi_get_panel_orientation(connector); 1637 vlv_dsi_get_panel_orientation(connector);
1726 drm_connector_init_panel_orientation_property( 1638 drm_connector_init_panel_orientation_property(
1727 &connector->base, 1639 &connector->base,
1728 connector->panel.fixed_mode->hdisplay, 1640 connector->panel.fixed_mode->hdisplay,
@@ -1773,7 +1685,6 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
1773 1685
1774 intel_encoder->compute_config = intel_dsi_compute_config; 1686 intel_encoder->compute_config = intel_dsi_compute_config;
1775 intel_encoder->pre_enable = intel_dsi_pre_enable; 1687 intel_encoder->pre_enable = intel_dsi_pre_enable;
1776 intel_encoder->enable = intel_dsi_enable_nop;
1777 intel_encoder->disable = intel_dsi_disable; 1688 intel_encoder->disable = intel_dsi_disable;
1778 intel_encoder->post_disable = intel_dsi_post_disable; 1689 intel_encoder->post_disable = intel_dsi_post_disable;
1779 intel_encoder->get_hw_state = intel_dsi_get_hw_state; 1690 intel_encoder->get_hw_state = intel_dsi_get_hw_state;
@@ -1806,7 +1717,8 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
1806 for_each_dsi_port(port, intel_dsi->ports) { 1717 for_each_dsi_port(port, intel_dsi->ports) {
1807 struct intel_dsi_host *host; 1718 struct intel_dsi_host *host;
1808 1719
1809 host = intel_dsi_host_init(intel_dsi, port); 1720 host = intel_dsi_host_init(intel_dsi, &intel_dsi_host_ops,
1721 port);
1810 if (!host) 1722 if (!host)
1811 goto err; 1723 goto err;
1812 1724