diff options
author | Jani Nikula <jani.nikula@intel.com> | 2016-12-27 05:59:38 -0500 |
---|---|---|
committer | Jani Nikula <jani.nikula@intel.com> | 2016-12-27 05:59:50 -0500 |
commit | ade4d4410f8b8816a8e9d85bfdb4bdcc9464065a (patch) | |
tree | a8085776d1fdda2d2366cf74bfb074fc3b55b8e0 | |
parent | 7ce7d89f48834cefece7804d38fc5d85382edf77 (diff) | |
parent | 4e0203ba11e735694600d7c704d7d56f069f9eb6 (diff) |
Merge tag 'gvt-fixes-2016-12-26' of https://github.com/01org/gvt-linux into drm-intel-fixes
From Zhenyu, "This is current GVT-g device model fixes for 4.10. I need
to base on v4.10-rc1 for merged vfio and KVMGT support."
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
-rw-r--r-- | drivers/gpu/drm/i915/gvt/cfg_space.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/gtt.c | 55 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/gtt.h | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/gvt.h | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/kvmgt.c | 46 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/opregion.c | 2 |
6 files changed, 99 insertions, 13 deletions
diff --git a/drivers/gpu/drm/i915/gvt/cfg_space.c b/drivers/gpu/drm/i915/gvt/cfg_space.c index db516382a4d4..711c31c8d8b4 100644 --- a/drivers/gpu/drm/i915/gvt/cfg_space.c +++ b/drivers/gpu/drm/i915/gvt/cfg_space.c | |||
@@ -123,6 +123,7 @@ static int emulate_pci_command_write(struct intel_vgpu *vgpu, | |||
123 | u8 changed = old ^ new; | 123 | u8 changed = old ^ new; |
124 | int ret; | 124 | int ret; |
125 | 125 | ||
126 | memcpy(vgpu_cfg_space(vgpu) + offset, p_data, bytes); | ||
126 | if (!(changed & PCI_COMMAND_MEMORY)) | 127 | if (!(changed & PCI_COMMAND_MEMORY)) |
127 | return 0; | 128 | return 0; |
128 | 129 | ||
@@ -142,7 +143,6 @@ static int emulate_pci_command_write(struct intel_vgpu *vgpu, | |||
142 | return ret; | 143 | return ret; |
143 | } | 144 | } |
144 | 145 | ||
145 | memcpy(vgpu_cfg_space(vgpu) + offset, p_data, bytes); | ||
146 | return 0; | 146 | return 0; |
147 | } | 147 | } |
148 | 148 | ||
@@ -240,7 +240,7 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset, | |||
240 | if (WARN_ON(bytes > 4)) | 240 | if (WARN_ON(bytes > 4)) |
241 | return -EINVAL; | 241 | return -EINVAL; |
242 | 242 | ||
243 | if (WARN_ON(offset + bytes >= INTEL_GVT_MAX_CFG_SPACE_SZ)) | 243 | if (WARN_ON(offset + bytes > INTEL_GVT_MAX_CFG_SPACE_SZ)) |
244 | return -EINVAL; | 244 | return -EINVAL; |
245 | 245 | ||
246 | /* First check if it's PCI_COMMAND */ | 246 | /* First check if it's PCI_COMMAND */ |
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index 7eaaf1c9ed2b..6c5fdf5b2ce2 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c | |||
@@ -1998,6 +1998,8 @@ int intel_vgpu_init_gtt(struct intel_vgpu *vgpu) | |||
1998 | INIT_LIST_HEAD(>t->oos_page_list_head); | 1998 | INIT_LIST_HEAD(>t->oos_page_list_head); |
1999 | INIT_LIST_HEAD(>t->post_shadow_list_head); | 1999 | INIT_LIST_HEAD(>t->post_shadow_list_head); |
2000 | 2000 | ||
2001 | intel_vgpu_reset_ggtt(vgpu); | ||
2002 | |||
2001 | ggtt_mm = intel_vgpu_create_mm(vgpu, INTEL_GVT_MM_GGTT, | 2003 | ggtt_mm = intel_vgpu_create_mm(vgpu, INTEL_GVT_MM_GGTT, |
2002 | NULL, 1, 0); | 2004 | NULL, 1, 0); |
2003 | if (IS_ERR(ggtt_mm)) { | 2005 | if (IS_ERR(ggtt_mm)) { |
@@ -2206,6 +2208,7 @@ int intel_vgpu_g2v_destroy_ppgtt_mm(struct intel_vgpu *vgpu, | |||
2206 | int intel_gvt_init_gtt(struct intel_gvt *gvt) | 2208 | int intel_gvt_init_gtt(struct intel_gvt *gvt) |
2207 | { | 2209 | { |
2208 | int ret; | 2210 | int ret; |
2211 | void *page_addr; | ||
2209 | 2212 | ||
2210 | gvt_dbg_core("init gtt\n"); | 2213 | gvt_dbg_core("init gtt\n"); |
2211 | 2214 | ||
@@ -2218,6 +2221,23 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt) | |||
2218 | return -ENODEV; | 2221 | return -ENODEV; |
2219 | } | 2222 | } |
2220 | 2223 | ||
2224 | gvt->gtt.scratch_ggtt_page = | ||
2225 | alloc_page(GFP_KERNEL | GFP_ATOMIC | __GFP_ZERO); | ||
2226 | if (!gvt->gtt.scratch_ggtt_page) { | ||
2227 | gvt_err("fail to allocate scratch ggtt page\n"); | ||
2228 | return -ENOMEM; | ||
2229 | } | ||
2230 | |||
2231 | page_addr = page_address(gvt->gtt.scratch_ggtt_page); | ||
2232 | |||
2233 | gvt->gtt.scratch_ggtt_mfn = | ||
2234 | intel_gvt_hypervisor_virt_to_mfn(page_addr); | ||
2235 | if (gvt->gtt.scratch_ggtt_mfn == INTEL_GVT_INVALID_ADDR) { | ||
2236 | gvt_err("fail to translate scratch ggtt page\n"); | ||
2237 | __free_page(gvt->gtt.scratch_ggtt_page); | ||
2238 | return -EFAULT; | ||
2239 | } | ||
2240 | |||
2221 | if (enable_out_of_sync) { | 2241 | if (enable_out_of_sync) { |
2222 | ret = setup_spt_oos(gvt); | 2242 | ret = setup_spt_oos(gvt); |
2223 | if (ret) { | 2243 | if (ret) { |
@@ -2239,6 +2259,41 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt) | |||
2239 | */ | 2259 | */ |
2240 | void intel_gvt_clean_gtt(struct intel_gvt *gvt) | 2260 | void intel_gvt_clean_gtt(struct intel_gvt *gvt) |
2241 | { | 2261 | { |
2262 | __free_page(gvt->gtt.scratch_ggtt_page); | ||
2263 | |||
2242 | if (enable_out_of_sync) | 2264 | if (enable_out_of_sync) |
2243 | clean_spt_oos(gvt); | 2265 | clean_spt_oos(gvt); |
2244 | } | 2266 | } |
2267 | |||
2268 | /** | ||
2269 | * intel_vgpu_reset_ggtt - reset the GGTT entry | ||
2270 | * @vgpu: a vGPU | ||
2271 | * | ||
2272 | * This function is called at the vGPU create stage | ||
2273 | * to reset all the GGTT entries. | ||
2274 | * | ||
2275 | */ | ||
2276 | void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu) | ||
2277 | { | ||
2278 | struct intel_gvt *gvt = vgpu->gvt; | ||
2279 | struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; | ||
2280 | u32 index; | ||
2281 | u32 offset; | ||
2282 | u32 num_entries; | ||
2283 | struct intel_gvt_gtt_entry e; | ||
2284 | |||
2285 | memset(&e, 0, sizeof(struct intel_gvt_gtt_entry)); | ||
2286 | e.type = GTT_TYPE_GGTT_PTE; | ||
2287 | ops->set_pfn(&e, gvt->gtt.scratch_ggtt_mfn); | ||
2288 | e.val64 |= _PAGE_PRESENT; | ||
2289 | |||
2290 | index = vgpu_aperture_gmadr_base(vgpu) >> PAGE_SHIFT; | ||
2291 | num_entries = vgpu_aperture_sz(vgpu) >> PAGE_SHIFT; | ||
2292 | for (offset = 0; offset < num_entries; offset++) | ||
2293 | ops->set_entry(NULL, &e, index + offset, false, 0, vgpu); | ||
2294 | |||
2295 | index = vgpu_hidden_gmadr_base(vgpu) >> PAGE_SHIFT; | ||
2296 | num_entries = vgpu_hidden_sz(vgpu) >> PAGE_SHIFT; | ||
2297 | for (offset = 0; offset < num_entries; offset++) | ||
2298 | ops->set_entry(NULL, &e, index + offset, false, 0, vgpu); | ||
2299 | } | ||
diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h index d250013bc37b..b315ab3593ec 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.h +++ b/drivers/gpu/drm/i915/gvt/gtt.h | |||
@@ -81,6 +81,9 @@ struct intel_gvt_gtt { | |||
81 | struct list_head oos_page_use_list_head; | 81 | struct list_head oos_page_use_list_head; |
82 | struct list_head oos_page_free_list_head; | 82 | struct list_head oos_page_free_list_head; |
83 | struct list_head mm_lru_list_head; | 83 | struct list_head mm_lru_list_head; |
84 | |||
85 | struct page *scratch_ggtt_page; | ||
86 | unsigned long scratch_ggtt_mfn; | ||
84 | }; | 87 | }; |
85 | 88 | ||
86 | enum { | 89 | enum { |
@@ -202,6 +205,7 @@ struct intel_vgpu_gtt { | |||
202 | 205 | ||
203 | extern int intel_vgpu_init_gtt(struct intel_vgpu *vgpu); | 206 | extern int intel_vgpu_init_gtt(struct intel_vgpu *vgpu); |
204 | extern void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu); | 207 | extern void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu); |
208 | void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu); | ||
205 | 209 | ||
206 | extern int intel_gvt_init_gtt(struct intel_gvt *gvt); | 210 | extern int intel_gvt_init_gtt(struct intel_gvt *gvt); |
207 | extern void intel_gvt_clean_gtt(struct intel_gvt *gvt); | 211 | extern void intel_gvt_clean_gtt(struct intel_gvt *gvt); |
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index ad0e9364ee70..0af17016f33f 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h | |||
@@ -175,6 +175,7 @@ struct intel_vgpu { | |||
175 | struct notifier_block group_notifier; | 175 | struct notifier_block group_notifier; |
176 | struct kvm *kvm; | 176 | struct kvm *kvm; |
177 | struct work_struct release_work; | 177 | struct work_struct release_work; |
178 | atomic_t released; | ||
178 | } vdev; | 179 | } vdev; |
179 | #endif | 180 | #endif |
180 | }; | 181 | }; |
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index 4dd6722a7339..934963970288 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c | |||
@@ -114,12 +114,15 @@ out: | |||
114 | static kvm_pfn_t gvt_cache_find(struct intel_vgpu *vgpu, gfn_t gfn) | 114 | static kvm_pfn_t gvt_cache_find(struct intel_vgpu *vgpu, gfn_t gfn) |
115 | { | 115 | { |
116 | struct gvt_dma *entry; | 116 | struct gvt_dma *entry; |
117 | kvm_pfn_t pfn; | ||
117 | 118 | ||
118 | mutex_lock(&vgpu->vdev.cache_lock); | 119 | mutex_lock(&vgpu->vdev.cache_lock); |
120 | |||
119 | entry = __gvt_cache_find(vgpu, gfn); | 121 | entry = __gvt_cache_find(vgpu, gfn); |
120 | mutex_unlock(&vgpu->vdev.cache_lock); | 122 | pfn = (entry == NULL) ? 0 : entry->pfn; |
121 | 123 | ||
122 | return entry == NULL ? 0 : entry->pfn; | 124 | mutex_unlock(&vgpu->vdev.cache_lock); |
125 | return pfn; | ||
123 | } | 126 | } |
124 | 127 | ||
125 | static void gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn, kvm_pfn_t pfn) | 128 | static void gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn, kvm_pfn_t pfn) |
@@ -497,7 +500,16 @@ static int intel_vgpu_open(struct mdev_device *mdev) | |||
497 | goto undo_iommu; | 500 | goto undo_iommu; |
498 | } | 501 | } |
499 | 502 | ||
500 | return kvmgt_guest_init(mdev); | 503 | ret = kvmgt_guest_init(mdev); |
504 | if (ret) | ||
505 | goto undo_group; | ||
506 | |||
507 | atomic_set(&vgpu->vdev.released, 0); | ||
508 | return ret; | ||
509 | |||
510 | undo_group: | ||
511 | vfio_unregister_notifier(&mdev->dev, VFIO_GROUP_NOTIFY, | ||
512 | &vgpu->vdev.group_notifier); | ||
501 | 513 | ||
502 | undo_iommu: | 514 | undo_iommu: |
503 | vfio_unregister_notifier(&mdev->dev, VFIO_IOMMU_NOTIFY, | 515 | vfio_unregister_notifier(&mdev->dev, VFIO_IOMMU_NOTIFY, |
@@ -509,17 +521,26 @@ out: | |||
509 | static void __intel_vgpu_release(struct intel_vgpu *vgpu) | 521 | static void __intel_vgpu_release(struct intel_vgpu *vgpu) |
510 | { | 522 | { |
511 | struct kvmgt_guest_info *info; | 523 | struct kvmgt_guest_info *info; |
524 | int ret; | ||
512 | 525 | ||
513 | if (!handle_valid(vgpu->handle)) | 526 | if (!handle_valid(vgpu->handle)) |
514 | return; | 527 | return; |
515 | 528 | ||
516 | vfio_unregister_notifier(&vgpu->vdev.mdev->dev, VFIO_IOMMU_NOTIFY, | 529 | if (atomic_cmpxchg(&vgpu->vdev.released, 0, 1)) |
530 | return; | ||
531 | |||
532 | ret = vfio_unregister_notifier(&vgpu->vdev.mdev->dev, VFIO_IOMMU_NOTIFY, | ||
517 | &vgpu->vdev.iommu_notifier); | 533 | &vgpu->vdev.iommu_notifier); |
518 | vfio_unregister_notifier(&vgpu->vdev.mdev->dev, VFIO_GROUP_NOTIFY, | 534 | WARN(ret, "vfio_unregister_notifier for iommu failed: %d\n", ret); |
535 | |||
536 | ret = vfio_unregister_notifier(&vgpu->vdev.mdev->dev, VFIO_GROUP_NOTIFY, | ||
519 | &vgpu->vdev.group_notifier); | 537 | &vgpu->vdev.group_notifier); |
538 | WARN(ret, "vfio_unregister_notifier for group failed: %d\n", ret); | ||
520 | 539 | ||
521 | info = (struct kvmgt_guest_info *)vgpu->handle; | 540 | info = (struct kvmgt_guest_info *)vgpu->handle; |
522 | kvmgt_guest_exit(info); | 541 | kvmgt_guest_exit(info); |
542 | |||
543 | vgpu->vdev.kvm = NULL; | ||
523 | vgpu->handle = 0; | 544 | vgpu->handle = 0; |
524 | } | 545 | } |
525 | 546 | ||
@@ -534,6 +555,7 @@ static void intel_vgpu_release_work(struct work_struct *work) | |||
534 | { | 555 | { |
535 | struct intel_vgpu *vgpu = container_of(work, struct intel_vgpu, | 556 | struct intel_vgpu *vgpu = container_of(work, struct intel_vgpu, |
536 | vdev.release_work); | 557 | vdev.release_work); |
558 | |||
537 | __intel_vgpu_release(vgpu); | 559 | __intel_vgpu_release(vgpu); |
538 | } | 560 | } |
539 | 561 | ||
@@ -1134,6 +1156,10 @@ static int kvmgt_write_protect_add(unsigned long handle, u64 gfn) | |||
1134 | 1156 | ||
1135 | idx = srcu_read_lock(&kvm->srcu); | 1157 | idx = srcu_read_lock(&kvm->srcu); |
1136 | slot = gfn_to_memslot(kvm, gfn); | 1158 | slot = gfn_to_memslot(kvm, gfn); |
1159 | if (!slot) { | ||
1160 | srcu_read_unlock(&kvm->srcu, idx); | ||
1161 | return -EINVAL; | ||
1162 | } | ||
1137 | 1163 | ||
1138 | spin_lock(&kvm->mmu_lock); | 1164 | spin_lock(&kvm->mmu_lock); |
1139 | 1165 | ||
@@ -1164,6 +1190,10 @@ static int kvmgt_write_protect_remove(unsigned long handle, u64 gfn) | |||
1164 | 1190 | ||
1165 | idx = srcu_read_lock(&kvm->srcu); | 1191 | idx = srcu_read_lock(&kvm->srcu); |
1166 | slot = gfn_to_memslot(kvm, gfn); | 1192 | slot = gfn_to_memslot(kvm, gfn); |
1193 | if (!slot) { | ||
1194 | srcu_read_unlock(&kvm->srcu, idx); | ||
1195 | return -EINVAL; | ||
1196 | } | ||
1167 | 1197 | ||
1168 | spin_lock(&kvm->mmu_lock); | 1198 | spin_lock(&kvm->mmu_lock); |
1169 | 1199 | ||
@@ -1311,18 +1341,14 @@ static int kvmgt_guest_init(struct mdev_device *mdev) | |||
1311 | 1341 | ||
1312 | static bool kvmgt_guest_exit(struct kvmgt_guest_info *info) | 1342 | static bool kvmgt_guest_exit(struct kvmgt_guest_info *info) |
1313 | { | 1343 | { |
1314 | struct intel_vgpu *vgpu; | ||
1315 | |||
1316 | if (!info) { | 1344 | if (!info) { |
1317 | gvt_err("kvmgt_guest_info invalid\n"); | 1345 | gvt_err("kvmgt_guest_info invalid\n"); |
1318 | return false; | 1346 | return false; |
1319 | } | 1347 | } |
1320 | 1348 | ||
1321 | vgpu = info->vgpu; | ||
1322 | |||
1323 | kvm_page_track_unregister_notifier(info->kvm, &info->track_node); | 1349 | kvm_page_track_unregister_notifier(info->kvm, &info->track_node); |
1324 | kvmgt_protect_table_destroy(info); | 1350 | kvmgt_protect_table_destroy(info); |
1325 | gvt_cache_destroy(vgpu); | 1351 | gvt_cache_destroy(info->vgpu); |
1326 | vfree(info); | 1352 | vfree(info); |
1327 | 1353 | ||
1328 | return true; | 1354 | return true; |
diff --git a/drivers/gpu/drm/i915/gvt/opregion.c b/drivers/gpu/drm/i915/gvt/opregion.c index d2a0fbc896c3..81cd921770c6 100644 --- a/drivers/gpu/drm/i915/gvt/opregion.c +++ b/drivers/gpu/drm/i915/gvt/opregion.c | |||
@@ -65,7 +65,7 @@ static int map_vgpu_opregion(struct intel_vgpu *vgpu, bool map) | |||
65 | int i, ret; | 65 | int i, ret; |
66 | 66 | ||
67 | for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++) { | 67 | for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++) { |
68 | mfn = intel_gvt_hypervisor_virt_to_mfn(vgpu_opregion(vgpu) | 68 | mfn = intel_gvt_hypervisor_virt_to_mfn(vgpu_opregion(vgpu)->va |
69 | + i * PAGE_SIZE); | 69 | + i * PAGE_SIZE); |
70 | if (mfn == INTEL_GVT_INVALID_ADDR) { | 70 | if (mfn == INTEL_GVT_INVALID_ADDR) { |
71 | gvt_err("fail to get MFN from VA\n"); | 71 | gvt_err("fail to get MFN from VA\n"); |