aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChuanxiao Dong <chuanxiao.dong@intel.com>2017-07-07 01:21:52 -0400
committerZhenyu Wang <zhenyuw@linux.intel.com>2017-07-11 01:46:58 -0400
commit08673c3e27aa4407899e4fbb4738dac25370f706 (patch)
treeddd9217207927fdbacb917f7bcce0e7a6167ae56
parent3364bf5fd00f0391ad090f547932a5c4b2068dbc (diff)
drm/i915/gvt: Revert "drm/i915/gvt: Fix possible recursive locking issue"
This reverts commit 62d02fd1f807bf5a259a242c483c9fb98a242630. The rwsem recursive trace should not be fixed from kvmgt side by using a workqueue and it is an issue should be fixed in VFIO. So this one should be reverted. Signed-off-by: Chuanxiao Dong <chuanxiao.dong@intel.com> Cc: Zhenyu Wang <zhenyuw@linux.intel.com> Cc: stable@vger.kernel.org # v4.10+ Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com>
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h3
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c55
2 files changed, 10 insertions, 48 deletions
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index 28d817e96e58..3a74e79eac2f 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -182,9 +182,6 @@ struct intel_vgpu {
182 struct kvm *kvm; 182 struct kvm *kvm;
183 struct work_struct release_work; 183 struct work_struct release_work;
184 atomic_t released; 184 atomic_t released;
185 struct work_struct unpin_work;
186 spinlock_t unpin_lock; /* To protect unpin_list */
187 struct list_head unpin_list;
188 } vdev; 185 } vdev;
189#endif 186#endif
190}; 187};
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index 75a6e1d8af0d..fd0c85f9ef3c 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -78,7 +78,6 @@ struct gvt_dma {
78 struct rb_node node; 78 struct rb_node node;
79 gfn_t gfn; 79 gfn_t gfn;
80 unsigned long iova; 80 unsigned long iova;
81 struct list_head list;
82}; 81};
83 82
84static inline bool handle_valid(unsigned long handle) 83static inline bool handle_valid(unsigned long handle)
@@ -167,7 +166,6 @@ static void gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn,
167 166
168 new->gfn = gfn; 167 new->gfn = gfn;
169 new->iova = iova; 168 new->iova = iova;
170 INIT_LIST_HEAD(&new->list);
171 169
172 mutex_lock(&vgpu->vdev.cache_lock); 170 mutex_lock(&vgpu->vdev.cache_lock);
173 while (*link) { 171 while (*link) {
@@ -199,52 +197,26 @@ static void __gvt_cache_remove_entry(struct intel_vgpu *vgpu,
199 kfree(entry); 197 kfree(entry);
200} 198}
201 199
202static void intel_vgpu_unpin_work(struct work_struct *work) 200static void gvt_cache_remove(struct intel_vgpu *vgpu, gfn_t gfn)
203{ 201{
204 struct intel_vgpu *vgpu = container_of(work, struct intel_vgpu,
205 vdev.unpin_work);
206 struct device *dev = mdev_dev(vgpu->vdev.mdev); 202 struct device *dev = mdev_dev(vgpu->vdev.mdev);
207 struct gvt_dma *this; 203 struct gvt_dma *this;
208 unsigned long gfn; 204 unsigned long g1;
209 205 int rc;
210 for (;;) {
211 spin_lock(&vgpu->vdev.unpin_lock);
212 if (list_empty(&vgpu->vdev.unpin_list)) {
213 spin_unlock(&vgpu->vdev.unpin_lock);
214 break;
215 }
216 this = list_first_entry(&vgpu->vdev.unpin_list,
217 struct gvt_dma, list);
218 list_del(&this->list);
219 spin_unlock(&vgpu->vdev.unpin_lock);
220
221 gfn = this->gfn;
222 vfio_unpin_pages(dev, &gfn, 1);
223 kfree(this);
224 }
225}
226
227static bool gvt_cache_mark_remove(struct intel_vgpu *vgpu, gfn_t gfn)
228{
229 struct gvt_dma *this;
230 206
231 mutex_lock(&vgpu->vdev.cache_lock); 207 mutex_lock(&vgpu->vdev.cache_lock);
232 this = __gvt_cache_find(vgpu, gfn); 208 this = __gvt_cache_find(vgpu, gfn);
233 if (!this) { 209 if (!this) {
234 mutex_unlock(&vgpu->vdev.cache_lock); 210 mutex_unlock(&vgpu->vdev.cache_lock);
235 return false; 211 return;
236 } 212 }
213
214 g1 = gfn;
237 gvt_dma_unmap_iova(vgpu, this->iova); 215 gvt_dma_unmap_iova(vgpu, this->iova);
238 /* remove this from rb tree */ 216 rc = vfio_unpin_pages(dev, &g1, 1);
239 rb_erase(&this->node, &vgpu->vdev.cache); 217 WARN_ON(rc != 1);
218 __gvt_cache_remove_entry(vgpu, this);
240 mutex_unlock(&vgpu->vdev.cache_lock); 219 mutex_unlock(&vgpu->vdev.cache_lock);
241
242 /* put this to the unpin_list */
243 spin_lock(&vgpu->vdev.unpin_lock);
244 list_move_tail(&this->list, &vgpu->vdev.unpin_list);
245 spin_unlock(&vgpu->vdev.unpin_lock);
246
247 return true;
248} 220}
249 221
250static void gvt_cache_init(struct intel_vgpu *vgpu) 222static void gvt_cache_init(struct intel_vgpu *vgpu)
@@ -485,9 +457,6 @@ static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
485 } 457 }
486 458
487 INIT_WORK(&vgpu->vdev.release_work, intel_vgpu_release_work); 459 INIT_WORK(&vgpu->vdev.release_work, intel_vgpu_release_work);
488 INIT_WORK(&vgpu->vdev.unpin_work, intel_vgpu_unpin_work);
489 spin_lock_init(&vgpu->vdev.unpin_lock);
490 INIT_LIST_HEAD(&vgpu->vdev.unpin_list);
491 460
492 vgpu->vdev.mdev = mdev; 461 vgpu->vdev.mdev = mdev;
493 mdev_set_drvdata(mdev, vgpu); 462 mdev_set_drvdata(mdev, vgpu);
@@ -517,7 +486,6 @@ static int intel_vgpu_iommu_notifier(struct notifier_block *nb,
517 struct intel_vgpu *vgpu = container_of(nb, 486 struct intel_vgpu *vgpu = container_of(nb,
518 struct intel_vgpu, 487 struct intel_vgpu,
519 vdev.iommu_notifier); 488 vdev.iommu_notifier);
520 bool sched_unmap = false;
521 489
522 if (action == VFIO_IOMMU_NOTIFY_DMA_UNMAP) { 490 if (action == VFIO_IOMMU_NOTIFY_DMA_UNMAP) {
523 struct vfio_iommu_type1_dma_unmap *unmap = data; 491 struct vfio_iommu_type1_dma_unmap *unmap = data;
@@ -527,10 +495,7 @@ static int intel_vgpu_iommu_notifier(struct notifier_block *nb,
527 end_gfn = gfn + unmap->size / PAGE_SIZE; 495 end_gfn = gfn + unmap->size / PAGE_SIZE;
528 496
529 while (gfn < end_gfn) 497 while (gfn < end_gfn)
530 sched_unmap |= gvt_cache_mark_remove(vgpu, gfn++); 498 gvt_cache_remove(vgpu, gfn++);
531
532 if (sched_unmap)
533 schedule_work(&vgpu->vdev.unpin_work);
534 } 499 }
535 500
536 return NOTIFY_OK; 501 return NOTIFY_OK;