aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/drm/i915/gvt/cfg_space.c4
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c55
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.h4
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h1
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c46
-rw-r--r--drivers/gpu/drm/i915/gvt/opregion.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c22
-rw-r--r--drivers/gpu/drm/i915/i915_gem_request.h19
-rw-r--r--drivers/gpu/drm/i915/intel_display.c32
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c41
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c9
11 files changed, 190 insertions, 45 deletions
diff --git a/drivers/gpu/drm/i915/gvt/cfg_space.c b/drivers/gpu/drm/i915/gvt/cfg_space.c
index db516382a4d4..711c31c8d8b4 100644
--- a/drivers/gpu/drm/i915/gvt/cfg_space.c
+++ b/drivers/gpu/drm/i915/gvt/cfg_space.c
@@ -123,6 +123,7 @@ static int emulate_pci_command_write(struct intel_vgpu *vgpu,
123 u8 changed = old ^ new; 123 u8 changed = old ^ new;
124 int ret; 124 int ret;
125 125
126 memcpy(vgpu_cfg_space(vgpu) + offset, p_data, bytes);
126 if (!(changed & PCI_COMMAND_MEMORY)) 127 if (!(changed & PCI_COMMAND_MEMORY))
127 return 0; 128 return 0;
128 129
@@ -142,7 +143,6 @@ static int emulate_pci_command_write(struct intel_vgpu *vgpu,
142 return ret; 143 return ret;
143 } 144 }
144 145
145 memcpy(vgpu_cfg_space(vgpu) + offset, p_data, bytes);
146 return 0; 146 return 0;
147} 147}
148 148
@@ -240,7 +240,7 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
240 if (WARN_ON(bytes > 4)) 240 if (WARN_ON(bytes > 4))
241 return -EINVAL; 241 return -EINVAL;
242 242
243 if (WARN_ON(offset + bytes >= INTEL_GVT_MAX_CFG_SPACE_SZ)) 243 if (WARN_ON(offset + bytes > INTEL_GVT_MAX_CFG_SPACE_SZ))
244 return -EINVAL; 244 return -EINVAL;
245 245
246 /* First check if it's PCI_COMMAND */ 246 /* First check if it's PCI_COMMAND */
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 7eaaf1c9ed2b..6c5fdf5b2ce2 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -1998,6 +1998,8 @@ int intel_vgpu_init_gtt(struct intel_vgpu *vgpu)
1998 INIT_LIST_HEAD(&gtt->oos_page_list_head); 1998 INIT_LIST_HEAD(&gtt->oos_page_list_head);
1999 INIT_LIST_HEAD(&gtt->post_shadow_list_head); 1999 INIT_LIST_HEAD(&gtt->post_shadow_list_head);
2000 2000
2001 intel_vgpu_reset_ggtt(vgpu);
2002
2001 ggtt_mm = intel_vgpu_create_mm(vgpu, INTEL_GVT_MM_GGTT, 2003 ggtt_mm = intel_vgpu_create_mm(vgpu, INTEL_GVT_MM_GGTT,
2002 NULL, 1, 0); 2004 NULL, 1, 0);
2003 if (IS_ERR(ggtt_mm)) { 2005 if (IS_ERR(ggtt_mm)) {
@@ -2206,6 +2208,7 @@ int intel_vgpu_g2v_destroy_ppgtt_mm(struct intel_vgpu *vgpu,
2206int intel_gvt_init_gtt(struct intel_gvt *gvt) 2208int intel_gvt_init_gtt(struct intel_gvt *gvt)
2207{ 2209{
2208 int ret; 2210 int ret;
2211 void *page_addr;
2209 2212
2210 gvt_dbg_core("init gtt\n"); 2213 gvt_dbg_core("init gtt\n");
2211 2214
@@ -2218,6 +2221,23 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
2218 return -ENODEV; 2221 return -ENODEV;
2219 } 2222 }
2220 2223
2224 gvt->gtt.scratch_ggtt_page =
2225 alloc_page(GFP_KERNEL | GFP_ATOMIC | __GFP_ZERO);
2226 if (!gvt->gtt.scratch_ggtt_page) {
2227 gvt_err("fail to allocate scratch ggtt page\n");
2228 return -ENOMEM;
2229 }
2230
2231 page_addr = page_address(gvt->gtt.scratch_ggtt_page);
2232
2233 gvt->gtt.scratch_ggtt_mfn =
2234 intel_gvt_hypervisor_virt_to_mfn(page_addr);
2235 if (gvt->gtt.scratch_ggtt_mfn == INTEL_GVT_INVALID_ADDR) {
2236 gvt_err("fail to translate scratch ggtt page\n");
2237 __free_page(gvt->gtt.scratch_ggtt_page);
2238 return -EFAULT;
2239 }
2240
2221 if (enable_out_of_sync) { 2241 if (enable_out_of_sync) {
2222 ret = setup_spt_oos(gvt); 2242 ret = setup_spt_oos(gvt);
2223 if (ret) { 2243 if (ret) {
@@ -2239,6 +2259,41 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
2239 */ 2259 */
2240void intel_gvt_clean_gtt(struct intel_gvt *gvt) 2260void intel_gvt_clean_gtt(struct intel_gvt *gvt)
2241{ 2261{
2262 __free_page(gvt->gtt.scratch_ggtt_page);
2263
2242 if (enable_out_of_sync) 2264 if (enable_out_of_sync)
2243 clean_spt_oos(gvt); 2265 clean_spt_oos(gvt);
2244} 2266}
2267
2268/**
2269 * intel_vgpu_reset_ggtt - reset the GGTT entry
2270 * @vgpu: a vGPU
2271 *
2272 * This function is called at the vGPU create stage
2273 * to reset all the GGTT entries.
2274 *
2275 */
2276void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu)
2277{
2278 struct intel_gvt *gvt = vgpu->gvt;
2279 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
2280 u32 index;
2281 u32 offset;
2282 u32 num_entries;
2283 struct intel_gvt_gtt_entry e;
2284
2285 memset(&e, 0, sizeof(struct intel_gvt_gtt_entry));
2286 e.type = GTT_TYPE_GGTT_PTE;
2287 ops->set_pfn(&e, gvt->gtt.scratch_ggtt_mfn);
2288 e.val64 |= _PAGE_PRESENT;
2289
2290 index = vgpu_aperture_gmadr_base(vgpu) >> PAGE_SHIFT;
2291 num_entries = vgpu_aperture_sz(vgpu) >> PAGE_SHIFT;
2292 for (offset = 0; offset < num_entries; offset++)
2293 ops->set_entry(NULL, &e, index + offset, false, 0, vgpu);
2294
2295 index = vgpu_hidden_gmadr_base(vgpu) >> PAGE_SHIFT;
2296 num_entries = vgpu_hidden_sz(vgpu) >> PAGE_SHIFT;
2297 for (offset = 0; offset < num_entries; offset++)
2298 ops->set_entry(NULL, &e, index + offset, false, 0, vgpu);
2299}
diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h
index d250013bc37b..b315ab3593ec 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.h
+++ b/drivers/gpu/drm/i915/gvt/gtt.h
@@ -81,6 +81,9 @@ struct intel_gvt_gtt {
81 struct list_head oos_page_use_list_head; 81 struct list_head oos_page_use_list_head;
82 struct list_head oos_page_free_list_head; 82 struct list_head oos_page_free_list_head;
83 struct list_head mm_lru_list_head; 83 struct list_head mm_lru_list_head;
84
85 struct page *scratch_ggtt_page;
86 unsigned long scratch_ggtt_mfn;
84}; 87};
85 88
86enum { 89enum {
@@ -202,6 +205,7 @@ struct intel_vgpu_gtt {
202 205
203extern int intel_vgpu_init_gtt(struct intel_vgpu *vgpu); 206extern int intel_vgpu_init_gtt(struct intel_vgpu *vgpu);
204extern void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu); 207extern void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu);
208void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu);
205 209
206extern int intel_gvt_init_gtt(struct intel_gvt *gvt); 210extern int intel_gvt_init_gtt(struct intel_gvt *gvt);
207extern void intel_gvt_clean_gtt(struct intel_gvt *gvt); 211extern void intel_gvt_clean_gtt(struct intel_gvt *gvt);
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index ad0e9364ee70..0af17016f33f 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -175,6 +175,7 @@ struct intel_vgpu {
175 struct notifier_block group_notifier; 175 struct notifier_block group_notifier;
176 struct kvm *kvm; 176 struct kvm *kvm;
177 struct work_struct release_work; 177 struct work_struct release_work;
178 atomic_t released;
178 } vdev; 179 } vdev;
179#endif 180#endif
180}; 181};
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index 4dd6722a7339..934963970288 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -114,12 +114,15 @@ out:
114static kvm_pfn_t gvt_cache_find(struct intel_vgpu *vgpu, gfn_t gfn) 114static kvm_pfn_t gvt_cache_find(struct intel_vgpu *vgpu, gfn_t gfn)
115{ 115{
116 struct gvt_dma *entry; 116 struct gvt_dma *entry;
117 kvm_pfn_t pfn;
117 118
118 mutex_lock(&vgpu->vdev.cache_lock); 119 mutex_lock(&vgpu->vdev.cache_lock);
120
119 entry = __gvt_cache_find(vgpu, gfn); 121 entry = __gvt_cache_find(vgpu, gfn);
120 mutex_unlock(&vgpu->vdev.cache_lock); 122 pfn = (entry == NULL) ? 0 : entry->pfn;
121 123
122 return entry == NULL ? 0 : entry->pfn; 124 mutex_unlock(&vgpu->vdev.cache_lock);
125 return pfn;
123} 126}
124 127
125static void gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn, kvm_pfn_t pfn) 128static void gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn, kvm_pfn_t pfn)
@@ -497,7 +500,16 @@ static int intel_vgpu_open(struct mdev_device *mdev)
497 goto undo_iommu; 500 goto undo_iommu;
498 } 501 }
499 502
500 return kvmgt_guest_init(mdev); 503 ret = kvmgt_guest_init(mdev);
504 if (ret)
505 goto undo_group;
506
507 atomic_set(&vgpu->vdev.released, 0);
508 return ret;
509
510undo_group:
511 vfio_unregister_notifier(&mdev->dev, VFIO_GROUP_NOTIFY,
512 &vgpu->vdev.group_notifier);
501 513
502undo_iommu: 514undo_iommu:
503 vfio_unregister_notifier(&mdev->dev, VFIO_IOMMU_NOTIFY, 515 vfio_unregister_notifier(&mdev->dev, VFIO_IOMMU_NOTIFY,
@@ -509,17 +521,26 @@ out:
509static void __intel_vgpu_release(struct intel_vgpu *vgpu) 521static void __intel_vgpu_release(struct intel_vgpu *vgpu)
510{ 522{
511 struct kvmgt_guest_info *info; 523 struct kvmgt_guest_info *info;
524 int ret;
512 525
513 if (!handle_valid(vgpu->handle)) 526 if (!handle_valid(vgpu->handle))
514 return; 527 return;
515 528
516 vfio_unregister_notifier(&vgpu->vdev.mdev->dev, VFIO_IOMMU_NOTIFY, 529 if (atomic_cmpxchg(&vgpu->vdev.released, 0, 1))
530 return;
531
532 ret = vfio_unregister_notifier(&vgpu->vdev.mdev->dev, VFIO_IOMMU_NOTIFY,
517 &vgpu->vdev.iommu_notifier); 533 &vgpu->vdev.iommu_notifier);
518 vfio_unregister_notifier(&vgpu->vdev.mdev->dev, VFIO_GROUP_NOTIFY, 534 WARN(ret, "vfio_unregister_notifier for iommu failed: %d\n", ret);
535
536 ret = vfio_unregister_notifier(&vgpu->vdev.mdev->dev, VFIO_GROUP_NOTIFY,
519 &vgpu->vdev.group_notifier); 537 &vgpu->vdev.group_notifier);
538 WARN(ret, "vfio_unregister_notifier for group failed: %d\n", ret);
520 539
521 info = (struct kvmgt_guest_info *)vgpu->handle; 540 info = (struct kvmgt_guest_info *)vgpu->handle;
522 kvmgt_guest_exit(info); 541 kvmgt_guest_exit(info);
542
543 vgpu->vdev.kvm = NULL;
523 vgpu->handle = 0; 544 vgpu->handle = 0;
524} 545}
525 546
@@ -534,6 +555,7 @@ static void intel_vgpu_release_work(struct work_struct *work)
534{ 555{
535 struct intel_vgpu *vgpu = container_of(work, struct intel_vgpu, 556 struct intel_vgpu *vgpu = container_of(work, struct intel_vgpu,
536 vdev.release_work); 557 vdev.release_work);
558
537 __intel_vgpu_release(vgpu); 559 __intel_vgpu_release(vgpu);
538} 560}
539 561
@@ -1134,6 +1156,10 @@ static int kvmgt_write_protect_add(unsigned long handle, u64 gfn)
1134 1156
1135 idx = srcu_read_lock(&kvm->srcu); 1157 idx = srcu_read_lock(&kvm->srcu);
1136 slot = gfn_to_memslot(kvm, gfn); 1158 slot = gfn_to_memslot(kvm, gfn);
1159 if (!slot) {
1160 srcu_read_unlock(&kvm->srcu, idx);
1161 return -EINVAL;
1162 }
1137 1163
1138 spin_lock(&kvm->mmu_lock); 1164 spin_lock(&kvm->mmu_lock);
1139 1165
@@ -1164,6 +1190,10 @@ static int kvmgt_write_protect_remove(unsigned long handle, u64 gfn)
1164 1190
1165 idx = srcu_read_lock(&kvm->srcu); 1191 idx = srcu_read_lock(&kvm->srcu);
1166 slot = gfn_to_memslot(kvm, gfn); 1192 slot = gfn_to_memslot(kvm, gfn);
1193 if (!slot) {
1194 srcu_read_unlock(&kvm->srcu, idx);
1195 return -EINVAL;
1196 }
1167 1197
1168 spin_lock(&kvm->mmu_lock); 1198 spin_lock(&kvm->mmu_lock);
1169 1199
@@ -1311,18 +1341,14 @@ static int kvmgt_guest_init(struct mdev_device *mdev)
1311 1341
1312static bool kvmgt_guest_exit(struct kvmgt_guest_info *info) 1342static bool kvmgt_guest_exit(struct kvmgt_guest_info *info)
1313{ 1343{
1314 struct intel_vgpu *vgpu;
1315
1316 if (!info) { 1344 if (!info) {
1317 gvt_err("kvmgt_guest_info invalid\n"); 1345 gvt_err("kvmgt_guest_info invalid\n");
1318 return false; 1346 return false;
1319 } 1347 }
1320 1348
1321 vgpu = info->vgpu;
1322
1323 kvm_page_track_unregister_notifier(info->kvm, &info->track_node); 1349 kvm_page_track_unregister_notifier(info->kvm, &info->track_node);
1324 kvmgt_protect_table_destroy(info); 1350 kvmgt_protect_table_destroy(info);
1325 gvt_cache_destroy(vgpu); 1351 gvt_cache_destroy(info->vgpu);
1326 vfree(info); 1352 vfree(info);
1327 1353
1328 return true; 1354 return true;
diff --git a/drivers/gpu/drm/i915/gvt/opregion.c b/drivers/gpu/drm/i915/gvt/opregion.c
index d2a0fbc896c3..81cd921770c6 100644
--- a/drivers/gpu/drm/i915/gvt/opregion.c
+++ b/drivers/gpu/drm/i915/gvt/opregion.c
@@ -65,7 +65,7 @@ static int map_vgpu_opregion(struct intel_vgpu *vgpu, bool map)
65 int i, ret; 65 int i, ret;
66 66
67 for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++) { 67 for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++) {
68 mfn = intel_gvt_hypervisor_virt_to_mfn(vgpu_opregion(vgpu) 68 mfn = intel_gvt_hypervisor_virt_to_mfn(vgpu_opregion(vgpu)->va
69 + i * PAGE_SIZE); 69 + i * PAGE_SIZE);
70 if (mfn == INTEL_GVT_INVALID_ADDR) { 70 if (mfn == INTEL_GVT_INVALID_ADDR) {
71 gvt_err("fail to get MFN from VA\n"); 71 gvt_err("fail to get MFN from VA\n");
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 4a31b7a891ec..1e505d30b71e 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -244,14 +244,16 @@ err_phys:
244 244
245static void 245static void
246__i915_gem_object_release_shmem(struct drm_i915_gem_object *obj, 246__i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
247 struct sg_table *pages) 247 struct sg_table *pages,
248 bool needs_clflush)
248{ 249{
249 GEM_BUG_ON(obj->mm.madv == __I915_MADV_PURGED); 250 GEM_BUG_ON(obj->mm.madv == __I915_MADV_PURGED);
250 251
251 if (obj->mm.madv == I915_MADV_DONTNEED) 252 if (obj->mm.madv == I915_MADV_DONTNEED)
252 obj->mm.dirty = false; 253 obj->mm.dirty = false;
253 254
254 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0 && 255 if (needs_clflush &&
256 (obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0 &&
255 !cpu_cache_is_coherent(obj->base.dev, obj->cache_level)) 257 !cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
256 drm_clflush_sg(pages); 258 drm_clflush_sg(pages);
257 259
@@ -263,7 +265,7 @@ static void
263i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj, 265i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
264 struct sg_table *pages) 266 struct sg_table *pages)
265{ 267{
266 __i915_gem_object_release_shmem(obj, pages); 268 __i915_gem_object_release_shmem(obj, pages, false);
267 269
268 if (obj->mm.dirty) { 270 if (obj->mm.dirty) {
269 struct address_space *mapping = obj->base.filp->f_mapping; 271 struct address_space *mapping = obj->base.filp->f_mapping;
@@ -2231,7 +2233,7 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj,
2231 struct sgt_iter sgt_iter; 2233 struct sgt_iter sgt_iter;
2232 struct page *page; 2234 struct page *page;
2233 2235
2234 __i915_gem_object_release_shmem(obj, pages); 2236 __i915_gem_object_release_shmem(obj, pages, true);
2235 2237
2236 i915_gem_gtt_finish_pages(obj, pages); 2238 i915_gem_gtt_finish_pages(obj, pages);
2237 2239
@@ -2322,7 +2324,7 @@ static void i915_sg_trim(struct sg_table *orig_st)
2322 if (orig_st->nents == orig_st->orig_nents) 2324 if (orig_st->nents == orig_st->orig_nents)
2323 return; 2325 return;
2324 2326
2325 if (sg_alloc_table(&new_st, orig_st->nents, GFP_KERNEL)) 2327 if (sg_alloc_table(&new_st, orig_st->nents, GFP_KERNEL | __GFP_NOWARN))
2326 return; 2328 return;
2327 2329
2328 new_sg = new_st.sgl; 2330 new_sg = new_st.sgl;
@@ -2728,6 +2730,7 @@ static void i915_gem_reset_engine(struct intel_engine_cs *engine)
2728 struct drm_i915_gem_request *request; 2730 struct drm_i915_gem_request *request;
2729 struct i915_gem_context *incomplete_ctx; 2731 struct i915_gem_context *incomplete_ctx;
2730 struct intel_timeline *timeline; 2732 struct intel_timeline *timeline;
2733 unsigned long flags;
2731 bool ring_hung; 2734 bool ring_hung;
2732 2735
2733 if (engine->irq_seqno_barrier) 2736 if (engine->irq_seqno_barrier)
@@ -2763,13 +2766,20 @@ static void i915_gem_reset_engine(struct intel_engine_cs *engine)
2763 if (i915_gem_context_is_default(incomplete_ctx)) 2766 if (i915_gem_context_is_default(incomplete_ctx))
2764 return; 2767 return;
2765 2768
2769 timeline = i915_gem_context_lookup_timeline(incomplete_ctx, engine);
2770
2771 spin_lock_irqsave(&engine->timeline->lock, flags);
2772 spin_lock(&timeline->lock);
2773
2766 list_for_each_entry_continue(request, &engine->timeline->requests, link) 2774 list_for_each_entry_continue(request, &engine->timeline->requests, link)
2767 if (request->ctx == incomplete_ctx) 2775 if (request->ctx == incomplete_ctx)
2768 reset_request(request); 2776 reset_request(request);
2769 2777
2770 timeline = i915_gem_context_lookup_timeline(incomplete_ctx, engine);
2771 list_for_each_entry(request, &timeline->requests, link) 2778 list_for_each_entry(request, &timeline->requests, link)
2772 reset_request(request); 2779 reset_request(request);
2780
2781 spin_unlock(&timeline->lock);
2782 spin_unlock_irqrestore(&engine->timeline->lock, flags);
2773} 2783}
2774 2784
2775void i915_gem_reset(struct drm_i915_private *dev_priv) 2785void i915_gem_reset(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/i915_gem_request.h b/drivers/gpu/drm/i915/i915_gem_request.h
index e2b077df2da0..d229f47d1028 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.h
+++ b/drivers/gpu/drm/i915/i915_gem_request.h
@@ -413,6 +413,25 @@ i915_gem_active_set(struct i915_gem_active *active,
413 rcu_assign_pointer(active->request, request); 413 rcu_assign_pointer(active->request, request);
414} 414}
415 415
416/**
417 * i915_gem_active_set_retire_fn - updates the retirement callback
418 * @active - the active tracker
419 * @fn - the routine called when the request is retired
420 * @mutex - struct_mutex used to guard retirements
421 *
422 * i915_gem_active_set_retire_fn() updates the function pointer that
423 * is called when the final request associated with the @active tracker
424 * is retired.
425 */
426static inline void
427i915_gem_active_set_retire_fn(struct i915_gem_active *active,
428 i915_gem_retire_fn fn,
429 struct mutex *mutex)
430{
431 lockdep_assert_held(mutex);
432 active->retire = fn ?: i915_gem_retire_noop;
433}
434
416static inline struct drm_i915_gem_request * 435static inline struct drm_i915_gem_request *
417__i915_gem_active_peek(const struct i915_gem_active *active) 436__i915_gem_active_peek(const struct i915_gem_active *active)
418{ 437{
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 6daad8613760..3dc8724df400 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -16791,7 +16791,6 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
16791 16791
16792 for_each_intel_crtc(dev, crtc) { 16792 for_each_intel_crtc(dev, crtc) {
16793 struct intel_crtc_state *crtc_state = crtc->config; 16793 struct intel_crtc_state *crtc_state = crtc->config;
16794 int pixclk = 0;
16795 16794
16796 __drm_atomic_helper_crtc_destroy_state(&crtc_state->base); 16795 __drm_atomic_helper_crtc_destroy_state(&crtc_state->base);
16797 memset(crtc_state, 0, sizeof(*crtc_state)); 16796 memset(crtc_state, 0, sizeof(*crtc_state));
@@ -16803,23 +16802,9 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
16803 crtc->base.enabled = crtc_state->base.enable; 16802 crtc->base.enabled = crtc_state->base.enable;
16804 crtc->active = crtc_state->base.active; 16803 crtc->active = crtc_state->base.active;
16805 16804
16806 if (crtc_state->base.active) { 16805 if (crtc_state->base.active)
16807 dev_priv->active_crtcs |= 1 << crtc->pipe; 16806 dev_priv->active_crtcs |= 1 << crtc->pipe;
16808 16807
16809 if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
16810 pixclk = ilk_pipe_pixel_rate(crtc_state);
16811 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
16812 pixclk = crtc_state->base.adjusted_mode.crtc_clock;
16813 else
16814 WARN_ON(dev_priv->display.modeset_calc_cdclk);
16815
16816 /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
16817 if (IS_BROADWELL(dev_priv) && crtc_state->ips_enabled)
16818 pixclk = DIV_ROUND_UP(pixclk * 100, 95);
16819 }
16820
16821 dev_priv->min_pixclk[crtc->pipe] = pixclk;
16822
16823 readout_plane_state(crtc); 16808 readout_plane_state(crtc);
16824 16809
16825 DRM_DEBUG_KMS("[CRTC:%d:%s] hw state readout: %s\n", 16810 DRM_DEBUG_KMS("[CRTC:%d:%s] hw state readout: %s\n",
@@ -16892,6 +16877,8 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
16892 } 16877 }
16893 16878
16894 for_each_intel_crtc(dev, crtc) { 16879 for_each_intel_crtc(dev, crtc) {
16880 int pixclk = 0;
16881
16895 crtc->base.hwmode = crtc->config->base.adjusted_mode; 16882 crtc->base.hwmode = crtc->config->base.adjusted_mode;
16896 16883
16897 memset(&crtc->base.mode, 0, sizeof(crtc->base.mode)); 16884 memset(&crtc->base.mode, 0, sizeof(crtc->base.mode));
@@ -16919,10 +16906,23 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
16919 */ 16906 */
16920 crtc->base.state->mode.private_flags = I915_MODE_FLAG_INHERITED; 16907 crtc->base.state->mode.private_flags = I915_MODE_FLAG_INHERITED;
16921 16908
16909 if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
16910 pixclk = ilk_pipe_pixel_rate(crtc->config);
16911 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
16912 pixclk = crtc->config->base.adjusted_mode.crtc_clock;
16913 else
16914 WARN_ON(dev_priv->display.modeset_calc_cdclk);
16915
16916 /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
16917 if (IS_BROADWELL(dev_priv) && crtc->config->ips_enabled)
16918 pixclk = DIV_ROUND_UP(pixclk * 100, 95);
16919
16922 drm_calc_timestamping_constants(&crtc->base, &crtc->base.hwmode); 16920 drm_calc_timestamping_constants(&crtc->base, &crtc->base.hwmode);
16923 update_scanline_offset(crtc); 16921 update_scanline_offset(crtc);
16924 } 16922 }
16925 16923
16924 dev_priv->min_pixclk[crtc->pipe] = pixclk;
16925
16926 intel_pipe_config_sanity_check(dev_priv, crtc->config); 16926 intel_pipe_config_sanity_check(dev_priv, crtc->config);
16927 } 16927 }
16928} 16928}
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index d9bc19be855e..0b8e8eb85c19 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -355,7 +355,8 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev,
355 struct intel_dp *intel_dp); 355 struct intel_dp *intel_dp);
356static void 356static void
357intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev, 357intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
358 struct intel_dp *intel_dp); 358 struct intel_dp *intel_dp,
359 bool force_disable_vdd);
359static void 360static void
360intel_dp_pps_init(struct drm_device *dev, struct intel_dp *intel_dp); 361intel_dp_pps_init(struct drm_device *dev, struct intel_dp *intel_dp);
361 362
@@ -516,7 +517,7 @@ vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
516 517
517 /* init power sequencer on this pipe and port */ 518 /* init power sequencer on this pipe and port */
518 intel_dp_init_panel_power_sequencer(dev, intel_dp); 519 intel_dp_init_panel_power_sequencer(dev, intel_dp);
519 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp); 520 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, true);
520 521
521 /* 522 /*
522 * Even vdd force doesn't work until we've made 523 * Even vdd force doesn't work until we've made
@@ -553,7 +554,7 @@ bxt_power_sequencer_idx(struct intel_dp *intel_dp)
553 * Only the HW needs to be reprogrammed, the SW state is fixed and 554 * Only the HW needs to be reprogrammed, the SW state is fixed and
554 * has been setup during connector init. 555 * has been setup during connector init.
555 */ 556 */
556 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp); 557 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, false);
557 558
558 return 0; 559 return 0;
559} 560}
@@ -636,7 +637,7 @@ vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
636 port_name(port), pipe_name(intel_dp->pps_pipe)); 637 port_name(port), pipe_name(intel_dp->pps_pipe));
637 638
638 intel_dp_init_panel_power_sequencer(dev, intel_dp); 639 intel_dp_init_panel_power_sequencer(dev, intel_dp);
639 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp); 640 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, false);
640} 641}
641 642
642void intel_power_sequencer_reset(struct drm_i915_private *dev_priv) 643void intel_power_sequencer_reset(struct drm_i915_private *dev_priv)
@@ -2912,7 +2913,7 @@ static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2912 2913
2913 /* init power sequencer on this pipe and port */ 2914 /* init power sequencer on this pipe and port */
2914 intel_dp_init_panel_power_sequencer(dev, intel_dp); 2915 intel_dp_init_panel_power_sequencer(dev, intel_dp);
2915 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp); 2916 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, true);
2916} 2917}
2917 2918
2918static void vlv_pre_enable_dp(struct intel_encoder *encoder, 2919static void vlv_pre_enable_dp(struct intel_encoder *encoder,
@@ -5055,7 +5056,8 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev,
5055 5056
5056static void 5057static void
5057intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev, 5058intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
5058 struct intel_dp *intel_dp) 5059 struct intel_dp *intel_dp,
5060 bool force_disable_vdd)
5059{ 5061{
5060 struct drm_i915_private *dev_priv = to_i915(dev); 5062 struct drm_i915_private *dev_priv = to_i915(dev);
5061 u32 pp_on, pp_off, pp_div, port_sel = 0; 5063 u32 pp_on, pp_off, pp_div, port_sel = 0;
@@ -5068,6 +5070,31 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
5068 5070
5069 intel_pps_get_registers(dev_priv, intel_dp, &regs); 5071 intel_pps_get_registers(dev_priv, intel_dp, &regs);
5070 5072
5073 /*
5074 * On some VLV machines the BIOS can leave the VDD
5075 * enabled even on power seqeuencers which aren't
5076 * hooked up to any port. This would mess up the
5077 * power domain tracking the first time we pick
5078 * one of these power sequencers for use since
5079 * edp_panel_vdd_on() would notice that the VDD was
5080 * already on and therefore wouldn't grab the power
5081 * domain reference. Disable VDD first to avoid this.
5082 * This also avoids spuriously turning the VDD on as
5083 * soon as the new power seqeuencer gets initialized.
5084 */
5085 if (force_disable_vdd) {
5086 u32 pp = ironlake_get_pp_control(intel_dp);
5087
5088 WARN(pp & PANEL_POWER_ON, "Panel power already on\n");
5089
5090 if (pp & EDP_FORCE_VDD)
5091 DRM_DEBUG_KMS("VDD already on, disabling first\n");
5092
5093 pp &= ~EDP_FORCE_VDD;
5094
5095 I915_WRITE(regs.pp_ctrl, pp);
5096 }
5097
5071 pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) | 5098 pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
5072 (seq->t8 << PANEL_LIGHT_ON_DELAY_SHIFT); 5099 (seq->t8 << PANEL_LIGHT_ON_DELAY_SHIFT);
5073 pp_off = (seq->t9 << PANEL_LIGHT_OFF_DELAY_SHIFT) | 5100 pp_off = (seq->t9 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
@@ -5122,7 +5149,7 @@ static void intel_dp_pps_init(struct drm_device *dev,
5122 vlv_initial_power_sequencer_setup(intel_dp); 5149 vlv_initial_power_sequencer_setup(intel_dp);
5123 } else { 5150 } else {
5124 intel_dp_init_panel_power_sequencer(dev, intel_dp); 5151 intel_dp_init_panel_power_sequencer(dev, intel_dp);
5125 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp); 5152 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, false);
5126 } 5153 }
5127} 5154}
5128 5155
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index fd0e4dac7cc1..e589e17876dc 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -216,7 +216,8 @@ static void intel_overlay_submit_request(struct intel_overlay *overlay,
216{ 216{
217 GEM_BUG_ON(i915_gem_active_peek(&overlay->last_flip, 217 GEM_BUG_ON(i915_gem_active_peek(&overlay->last_flip,
218 &overlay->i915->drm.struct_mutex)); 218 &overlay->i915->drm.struct_mutex));
219 overlay->last_flip.retire = retire; 219 i915_gem_active_set_retire_fn(&overlay->last_flip, retire,
220 &overlay->i915->drm.struct_mutex);
220 i915_gem_active_set(&overlay->last_flip, req); 221 i915_gem_active_set(&overlay->last_flip, req);
221 i915_add_request(req); 222 i915_add_request(req);
222} 223}
@@ -839,8 +840,8 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
839 if (ret) 840 if (ret)
840 goto out_unpin; 841 goto out_unpin;
841 842
842 i915_gem_track_fb(overlay->vma->obj, new_bo, 843 i915_gem_track_fb(overlay->vma ? overlay->vma->obj : NULL,
843 INTEL_FRONTBUFFER_OVERLAY(pipe)); 844 vma->obj, INTEL_FRONTBUFFER_OVERLAY(pipe));
844 845
845 overlay->old_vma = overlay->vma; 846 overlay->old_vma = overlay->vma;
846 overlay->vma = vma; 847 overlay->vma = vma;
@@ -1430,6 +1431,8 @@ void intel_setup_overlay(struct drm_i915_private *dev_priv)
1430 overlay->contrast = 75; 1431 overlay->contrast = 75;
1431 overlay->saturation = 146; 1432 overlay->saturation = 146;
1432 1433
1434 init_request_active(&overlay->last_flip, NULL);
1435
1433 regs = intel_overlay_map_regs(overlay); 1436 regs = intel_overlay_map_regs(overlay);
1434 if (!regs) 1437 if (!regs)
1435 goto out_unpin_bo; 1438 goto out_unpin_bo;