aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/gvt/kvmgt.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/gvt/kvmgt.c')
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c116
1 files changed, 56 insertions, 60 deletions
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index 0c9234a87a20..0f7f5d97f582 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -77,7 +77,7 @@ struct kvmgt_guest_info {
77struct gvt_dma { 77struct gvt_dma {
78 struct rb_node node; 78 struct rb_node node;
79 gfn_t gfn; 79 gfn_t gfn;
80 kvm_pfn_t pfn; 80 unsigned long iova;
81}; 81};
82 82
83static inline bool handle_valid(unsigned long handle) 83static inline bool handle_valid(unsigned long handle)
@@ -89,6 +89,35 @@ static int kvmgt_guest_init(struct mdev_device *mdev);
89static void intel_vgpu_release_work(struct work_struct *work); 89static void intel_vgpu_release_work(struct work_struct *work);
90static bool kvmgt_guest_exit(struct kvmgt_guest_info *info); 90static bool kvmgt_guest_exit(struct kvmgt_guest_info *info);
91 91
92static int gvt_dma_map_iova(struct intel_vgpu *vgpu, kvm_pfn_t pfn,
93 unsigned long *iova)
94{
95 struct page *page;
96 struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
97 dma_addr_t daddr;
98
99 page = pfn_to_page(pfn);
100 if (is_error_page(page))
101 return -EFAULT;
102
103 daddr = dma_map_page(dev, page, 0, PAGE_SIZE,
104 PCI_DMA_BIDIRECTIONAL);
105 if (dma_mapping_error(dev, daddr))
106 return -ENOMEM;
107
108 *iova = (unsigned long)(daddr >> PAGE_SHIFT);
109 return 0;
110}
111
112static void gvt_dma_unmap_iova(struct intel_vgpu *vgpu, unsigned long iova)
113{
114 struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
115 dma_addr_t daddr;
116
117 daddr = (dma_addr_t)(iova << PAGE_SHIFT);
118 dma_unmap_page(dev, daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
119}
120
92static struct gvt_dma *__gvt_cache_find(struct intel_vgpu *vgpu, gfn_t gfn) 121static struct gvt_dma *__gvt_cache_find(struct intel_vgpu *vgpu, gfn_t gfn)
93{ 122{
94 struct rb_node *node = vgpu->vdev.cache.rb_node; 123 struct rb_node *node = vgpu->vdev.cache.rb_node;
@@ -111,21 +140,22 @@ out:
111 return ret; 140 return ret;
112} 141}
113 142
114static kvm_pfn_t gvt_cache_find(struct intel_vgpu *vgpu, gfn_t gfn) 143static unsigned long gvt_cache_find(struct intel_vgpu *vgpu, gfn_t gfn)
115{ 144{
116 struct gvt_dma *entry; 145 struct gvt_dma *entry;
117 kvm_pfn_t pfn; 146 unsigned long iova;
118 147
119 mutex_lock(&vgpu->vdev.cache_lock); 148 mutex_lock(&vgpu->vdev.cache_lock);
120 149
121 entry = __gvt_cache_find(vgpu, gfn); 150 entry = __gvt_cache_find(vgpu, gfn);
122 pfn = (entry == NULL) ? 0 : entry->pfn; 151 iova = (entry == NULL) ? INTEL_GVT_INVALID_ADDR : entry->iova;
123 152
124 mutex_unlock(&vgpu->vdev.cache_lock); 153 mutex_unlock(&vgpu->vdev.cache_lock);
125 return pfn; 154 return iova;
126} 155}
127 156
128static void gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn, kvm_pfn_t pfn) 157static void gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn,
158 unsigned long iova)
129{ 159{
130 struct gvt_dma *new, *itr; 160 struct gvt_dma *new, *itr;
131 struct rb_node **link = &vgpu->vdev.cache.rb_node, *parent = NULL; 161 struct rb_node **link = &vgpu->vdev.cache.rb_node, *parent = NULL;
@@ -135,7 +165,7 @@ static void gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn, kvm_pfn_t pfn)
135 return; 165 return;
136 166
137 new->gfn = gfn; 167 new->gfn = gfn;
138 new->pfn = pfn; 168 new->iova = iova;
139 169
140 mutex_lock(&vgpu->vdev.cache_lock); 170 mutex_lock(&vgpu->vdev.cache_lock);
141 while (*link) { 171 while (*link) {
@@ -182,6 +212,7 @@ static void gvt_cache_remove(struct intel_vgpu *vgpu, gfn_t gfn)
182 } 212 }
183 213
184 g1 = gfn; 214 g1 = gfn;
215 gvt_dma_unmap_iova(vgpu, this->iova);
185 rc = vfio_unpin_pages(dev, &g1, 1); 216 rc = vfio_unpin_pages(dev, &g1, 1);
186 WARN_ON(rc != 1); 217 WARN_ON(rc != 1);
187 __gvt_cache_remove_entry(vgpu, this); 218 __gvt_cache_remove_entry(vgpu, this);
@@ -204,6 +235,7 @@ static void gvt_cache_destroy(struct intel_vgpu *vgpu)
204 mutex_lock(&vgpu->vdev.cache_lock); 235 mutex_lock(&vgpu->vdev.cache_lock);
205 while ((node = rb_first(&vgpu->vdev.cache))) { 236 while ((node = rb_first(&vgpu->vdev.cache))) {
206 dma = rb_entry(node, struct gvt_dma, node); 237 dma = rb_entry(node, struct gvt_dma, node);
238 gvt_dma_unmap_iova(vgpu, dma->iova);
207 gfn = dma->gfn; 239 gfn = dma->gfn;
208 240
209 vfio_unpin_pages(dev, &gfn, 1); 241 vfio_unpin_pages(dev, &gfn, 1);
@@ -230,8 +262,8 @@ static struct intel_vgpu_type *intel_gvt_find_vgpu_type(struct intel_gvt *gvt,
230 return NULL; 262 return NULL;
231} 263}
232 264
233static ssize_t available_instance_show(struct kobject *kobj, struct device *dev, 265static ssize_t available_instances_show(struct kobject *kobj,
234 char *buf) 266 struct device *dev, char *buf)
235{ 267{
236 struct intel_vgpu_type *type; 268 struct intel_vgpu_type *type;
237 unsigned int num = 0; 269 unsigned int num = 0;
@@ -269,12 +301,12 @@ static ssize_t description_show(struct kobject *kobj, struct device *dev,
269 type->fence); 301 type->fence);
270} 302}
271 303
272static MDEV_TYPE_ATTR_RO(available_instance); 304static MDEV_TYPE_ATTR_RO(available_instances);
273static MDEV_TYPE_ATTR_RO(device_api); 305static MDEV_TYPE_ATTR_RO(device_api);
274static MDEV_TYPE_ATTR_RO(description); 306static MDEV_TYPE_ATTR_RO(description);
275 307
276static struct attribute *type_attrs[] = { 308static struct attribute *type_attrs[] = {
277 &mdev_type_attr_available_instance.attr, 309 &mdev_type_attr_available_instances.attr,
278 &mdev_type_attr_device_api.attr, 310 &mdev_type_attr_device_api.attr,
279 &mdev_type_attr_description.attr, 311 &mdev_type_attr_description.attr,
280 NULL, 312 NULL,
@@ -965,11 +997,6 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
965 sparse->areas[0].offset = 997 sparse->areas[0].offset =
966 PAGE_ALIGN(vgpu_aperture_offset(vgpu)); 998 PAGE_ALIGN(vgpu_aperture_offset(vgpu));
967 sparse->areas[0].size = vgpu_aperture_sz(vgpu); 999 sparse->areas[0].size = vgpu_aperture_sz(vgpu);
968 if (!caps.buf) {
969 kfree(caps.buf);
970 caps.buf = NULL;
971 caps.size = 0;
972 }
973 break; 1000 break;
974 1001
975 case VFIO_PCI_BAR3_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX: 1002 case VFIO_PCI_BAR3_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX:
@@ -1248,43 +1275,6 @@ static void kvmgt_page_track_flush_slot(struct kvm *kvm,
1248 spin_unlock(&kvm->mmu_lock); 1275 spin_unlock(&kvm->mmu_lock);
1249} 1276}
1250 1277
1251static bool kvmgt_check_guest(void)
1252{
1253 unsigned int eax, ebx, ecx, edx;
1254 char s[12];
1255 unsigned int *i;
1256
1257 eax = KVM_CPUID_SIGNATURE;
1258 ebx = ecx = edx = 0;
1259
1260 asm volatile ("cpuid"
1261 : "+a"(eax), "=b"(ebx), "=c"(ecx), "=d"(edx)
1262 :
1263 : "cc", "memory");
1264 i = (unsigned int *)s;
1265 i[0] = ebx;
1266 i[1] = ecx;
1267 i[2] = edx;
1268
1269 return !strncmp(s, "KVMKVMKVM", strlen("KVMKVMKVM"));
1270}
1271
1272/**
1273 * NOTE:
1274 * It's actually impossible to check if we are running in KVM host,
1275 * since the "KVM host" is simply native. So we only dectect guest here.
1276 */
1277static int kvmgt_detect_host(void)
1278{
1279#ifdef CONFIG_INTEL_IOMMU
1280 if (intel_iommu_gfx_mapped) {
1281 gvt_err("Hardware IOMMU compatibility not yet supported, try to boot with intel_iommu=igfx_off\n");
1282 return -ENODEV;
1283 }
1284#endif
1285 return kvmgt_check_guest() ? -ENODEV : 0;
1286}
1287
1288static bool __kvmgt_vgpu_exist(struct intel_vgpu *vgpu, struct kvm *kvm) 1278static bool __kvmgt_vgpu_exist(struct intel_vgpu *vgpu, struct kvm *kvm)
1289{ 1279{
1290 struct intel_vgpu *itr; 1280 struct intel_vgpu *itr;
@@ -1390,7 +1380,7 @@ static int kvmgt_inject_msi(unsigned long handle, u32 addr, u16 data)
1390 1380
1391static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn) 1381static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn)
1392{ 1382{
1393 unsigned long pfn; 1383 unsigned long iova, pfn;
1394 struct kvmgt_guest_info *info; 1384 struct kvmgt_guest_info *info;
1395 struct device *dev; 1385 struct device *dev;
1396 int rc; 1386 int rc;
@@ -1399,9 +1389,9 @@ static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn)
1399 return INTEL_GVT_INVALID_ADDR; 1389 return INTEL_GVT_INVALID_ADDR;
1400 1390
1401 info = (struct kvmgt_guest_info *)handle; 1391 info = (struct kvmgt_guest_info *)handle;
1402 pfn = gvt_cache_find(info->vgpu, gfn); 1392 iova = gvt_cache_find(info->vgpu, gfn);
1403 if (pfn != 0) 1393 if (iova != INTEL_GVT_INVALID_ADDR)
1404 return pfn; 1394 return iova;
1405 1395
1406 pfn = INTEL_GVT_INVALID_ADDR; 1396 pfn = INTEL_GVT_INVALID_ADDR;
1407 dev = mdev_dev(info->vgpu->vdev.mdev); 1397 dev = mdev_dev(info->vgpu->vdev.mdev);
@@ -1410,9 +1400,16 @@ static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn)
1410 gvt_err("vfio_pin_pages failed for gfn 0x%lx: %d\n", gfn, rc); 1400 gvt_err("vfio_pin_pages failed for gfn 0x%lx: %d\n", gfn, rc);
1411 return INTEL_GVT_INVALID_ADDR; 1401 return INTEL_GVT_INVALID_ADDR;
1412 } 1402 }
1403 /* transfer to host iova for GFX to use DMA */
1404 rc = gvt_dma_map_iova(info->vgpu, pfn, &iova);
1405 if (rc) {
1406 gvt_err("gvt_dma_map_iova failed for gfn: 0x%lx\n", gfn);
1407 vfio_unpin_pages(dev, &gfn, 1);
1408 return INTEL_GVT_INVALID_ADDR;
1409 }
1413 1410
1414 gvt_cache_add(info->vgpu, gfn, pfn); 1411 gvt_cache_add(info->vgpu, gfn, iova);
1415 return pfn; 1412 return iova;
1416} 1413}
1417 1414
1418static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa, 1415static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa,
@@ -1459,7 +1456,6 @@ static unsigned long kvmgt_virt_to_pfn(void *addr)
1459} 1456}
1460 1457
1461struct intel_gvt_mpt kvmgt_mpt = { 1458struct intel_gvt_mpt kvmgt_mpt = {
1462 .detect_host = kvmgt_detect_host,
1463 .host_init = kvmgt_host_init, 1459 .host_init = kvmgt_host_init,
1464 .host_exit = kvmgt_host_exit, 1460 .host_exit = kvmgt_host_exit,
1465 .attach_vgpu = kvmgt_attach_vgpu, 1461 .attach_vgpu = kvmgt_attach_vgpu,