aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/vfio/pci/vfio_pci.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/vfio/pci/vfio_pci.c')
-rw-r--r--drivers/vfio/pci/vfio_pci.c175
1 files changed, 168 insertions, 7 deletions
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index 8c80a48e3233..712a84978e97 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -111,6 +111,7 @@ static inline bool vfio_pci_is_vga(struct pci_dev *pdev)
111} 111}
112 112
113static void vfio_pci_try_bus_reset(struct vfio_pci_device *vdev); 113static void vfio_pci_try_bus_reset(struct vfio_pci_device *vdev);
114static void vfio_pci_disable(struct vfio_pci_device *vdev);
114 115
115static int vfio_pci_enable(struct vfio_pci_device *vdev) 116static int vfio_pci_enable(struct vfio_pci_device *vdev)
116{ 117{
@@ -169,13 +170,26 @@ static int vfio_pci_enable(struct vfio_pci_device *vdev)
169 if (!vfio_vga_disabled() && vfio_pci_is_vga(pdev)) 170 if (!vfio_vga_disabled() && vfio_pci_is_vga(pdev))
170 vdev->has_vga = true; 171 vdev->has_vga = true;
171 172
173
174 if (vfio_pci_is_vga(pdev) &&
175 pdev->vendor == PCI_VENDOR_ID_INTEL &&
176 IS_ENABLED(CONFIG_VFIO_PCI_IGD)) {
177 ret = vfio_pci_igd_init(vdev);
178 if (ret) {
179 dev_warn(&vdev->pdev->dev,
180 "Failed to setup Intel IGD regions\n");
181 vfio_pci_disable(vdev);
182 return ret;
183 }
184 }
185
172 return 0; 186 return 0;
173} 187}
174 188
175static void vfio_pci_disable(struct vfio_pci_device *vdev) 189static void vfio_pci_disable(struct vfio_pci_device *vdev)
176{ 190{
177 struct pci_dev *pdev = vdev->pdev; 191 struct pci_dev *pdev = vdev->pdev;
178 int bar; 192 int i, bar;
179 193
180 /* Stop the device from further DMA */ 194 /* Stop the device from further DMA */
181 pci_clear_master(pdev); 195 pci_clear_master(pdev);
@@ -186,6 +200,13 @@ static void vfio_pci_disable(struct vfio_pci_device *vdev)
186 200
187 vdev->virq_disabled = false; 201 vdev->virq_disabled = false;
188 202
203 for (i = 0; i < vdev->num_regions; i++)
204 vdev->region[i].ops->release(vdev, &vdev->region[i]);
205
206 vdev->num_regions = 0;
207 kfree(vdev->region);
208 vdev->region = NULL; /* don't krealloc a freed pointer */
209
189 vfio_config_free(vdev); 210 vfio_config_free(vdev);
190 211
191 for (bar = PCI_STD_RESOURCES; bar <= PCI_STD_RESOURCE_END; bar++) { 212 for (bar = PCI_STD_RESOURCES; bar <= PCI_STD_RESOURCE_END; bar++) {
@@ -421,6 +442,93 @@ static int vfio_pci_for_each_slot_or_bus(struct pci_dev *pdev,
421 return walk.ret; 442 return walk.ret;
422} 443}
423 444
445static int msix_sparse_mmap_cap(struct vfio_pci_device *vdev,
446 struct vfio_info_cap *caps)
447{
448 struct vfio_info_cap_header *header;
449 struct vfio_region_info_cap_sparse_mmap *sparse;
450 size_t end, size;
451 int nr_areas = 2, i = 0;
452
453 end = pci_resource_len(vdev->pdev, vdev->msix_bar);
454
455 /* If MSI-X table is aligned to the start or end, only one area */
456 if (((vdev->msix_offset & PAGE_MASK) == 0) ||
457 (PAGE_ALIGN(vdev->msix_offset + vdev->msix_size) >= end))
458 nr_areas = 1;
459
460 size = sizeof(*sparse) + (nr_areas * sizeof(*sparse->areas));
461
462 header = vfio_info_cap_add(caps, size,
463 VFIO_REGION_INFO_CAP_SPARSE_MMAP, 1);
464 if (IS_ERR(header))
465 return PTR_ERR(header);
466
467 sparse = container_of(header,
468 struct vfio_region_info_cap_sparse_mmap, header);
469 sparse->nr_areas = nr_areas;
470
471 if (vdev->msix_offset & PAGE_MASK) {
472 sparse->areas[i].offset = 0;
473 sparse->areas[i].size = vdev->msix_offset & PAGE_MASK;
474 i++;
475 }
476
477 if (PAGE_ALIGN(vdev->msix_offset + vdev->msix_size) < end) {
478 sparse->areas[i].offset = PAGE_ALIGN(vdev->msix_offset +
479 vdev->msix_size);
480 sparse->areas[i].size = end - sparse->areas[i].offset;
481 i++;
482 }
483
484 return 0;
485}
486
487static int region_type_cap(struct vfio_pci_device *vdev,
488 struct vfio_info_cap *caps,
489 unsigned int type, unsigned int subtype)
490{
491 struct vfio_info_cap_header *header;
492 struct vfio_region_info_cap_type *cap;
493
494 header = vfio_info_cap_add(caps, sizeof(*cap),
495 VFIO_REGION_INFO_CAP_TYPE, 1);
496 if (IS_ERR(header))
497 return PTR_ERR(header);
498
499 cap = container_of(header, struct vfio_region_info_cap_type, header);
500 cap->type = type;
501 cap->subtype = subtype;
502
503 return 0;
504}
505
506int vfio_pci_register_dev_region(struct vfio_pci_device *vdev,
507 unsigned int type, unsigned int subtype,
508 const struct vfio_pci_regops *ops,
509 size_t size, u32 flags, void *data)
510{
511 struct vfio_pci_region *region;
512
513 region = krealloc(vdev->region,
514 (vdev->num_regions + 1) * sizeof(*region),
515 GFP_KERNEL);
516 if (!region)
517 return -ENOMEM;
518
519 vdev->region = region;
520 vdev->region[vdev->num_regions].type = type;
521 vdev->region[vdev->num_regions].subtype = subtype;
522 vdev->region[vdev->num_regions].ops = ops;
523 vdev->region[vdev->num_regions].size = size;
524 vdev->region[vdev->num_regions].flags = flags;
525 vdev->region[vdev->num_regions].data = data;
526
527 vdev->num_regions++;
528
529 return 0;
530}
531
424static long vfio_pci_ioctl(void *device_data, 532static long vfio_pci_ioctl(void *device_data,
425 unsigned int cmd, unsigned long arg) 533 unsigned int cmd, unsigned long arg)
426{ 534{
@@ -443,7 +551,7 @@ static long vfio_pci_ioctl(void *device_data,
443 if (vdev->reset_works) 551 if (vdev->reset_works)
444 info.flags |= VFIO_DEVICE_FLAGS_RESET; 552 info.flags |= VFIO_DEVICE_FLAGS_RESET;
445 553
446 info.num_regions = VFIO_PCI_NUM_REGIONS; 554 info.num_regions = VFIO_PCI_NUM_REGIONS + vdev->num_regions;
447 info.num_irqs = VFIO_PCI_NUM_IRQS; 555 info.num_irqs = VFIO_PCI_NUM_IRQS;
448 556
449 return copy_to_user((void __user *)arg, &info, minsz) ? 557 return copy_to_user((void __user *)arg, &info, minsz) ?
@@ -452,6 +560,8 @@ static long vfio_pci_ioctl(void *device_data,
452 } else if (cmd == VFIO_DEVICE_GET_REGION_INFO) { 560 } else if (cmd == VFIO_DEVICE_GET_REGION_INFO) {
453 struct pci_dev *pdev = vdev->pdev; 561 struct pci_dev *pdev = vdev->pdev;
454 struct vfio_region_info info; 562 struct vfio_region_info info;
563 struct vfio_info_cap caps = { .buf = NULL, .size = 0 };
564 int i, ret;
455 565
456 minsz = offsetofend(struct vfio_region_info, offset); 566 minsz = offsetofend(struct vfio_region_info, offset);
457 567
@@ -480,8 +590,15 @@ static long vfio_pci_ioctl(void *device_data,
480 VFIO_REGION_INFO_FLAG_WRITE; 590 VFIO_REGION_INFO_FLAG_WRITE;
481 if (IS_ENABLED(CONFIG_VFIO_PCI_MMAP) && 591 if (IS_ENABLED(CONFIG_VFIO_PCI_MMAP) &&
482 pci_resource_flags(pdev, info.index) & 592 pci_resource_flags(pdev, info.index) &
483 IORESOURCE_MEM && info.size >= PAGE_SIZE) 593 IORESOURCE_MEM && info.size >= PAGE_SIZE) {
484 info.flags |= VFIO_REGION_INFO_FLAG_MMAP; 594 info.flags |= VFIO_REGION_INFO_FLAG_MMAP;
595 if (info.index == vdev->msix_bar) {
596 ret = msix_sparse_mmap_cap(vdev, &caps);
597 if (ret)
598 return ret;
599 }
600 }
601
485 break; 602 break;
486 case VFIO_PCI_ROM_REGION_INDEX: 603 case VFIO_PCI_ROM_REGION_INDEX:
487 { 604 {
@@ -493,8 +610,14 @@ static long vfio_pci_ioctl(void *device_data,
493 610
494 /* Report the BAR size, not the ROM size */ 611 /* Report the BAR size, not the ROM size */
495 info.size = pci_resource_len(pdev, info.index); 612 info.size = pci_resource_len(pdev, info.index);
496 if (!info.size) 613 if (!info.size) {
497 break; 614 /* Shadow ROMs appear as PCI option ROMs */
615 if (pdev->resource[PCI_ROM_RESOURCE].flags &
616 IORESOURCE_ROM_SHADOW)
617 info.size = 0x20000;
618 else
619 break;
620 }
498 621
499 /* Is it really there? */ 622 /* Is it really there? */
500 io = pci_map_rom(pdev, &size); 623 io = pci_map_rom(pdev, &size);
@@ -518,7 +641,40 @@ static long vfio_pci_ioctl(void *device_data,
518 641
519 break; 642 break;
520 default: 643 default:
521 return -EINVAL; 644 if (info.index >=
645 VFIO_PCI_NUM_REGIONS + vdev->num_regions)
646 return -EINVAL;
647
648 i = info.index - VFIO_PCI_NUM_REGIONS;
649
650 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
651 info.size = vdev->region[i].size;
652 info.flags = vdev->region[i].flags;
653
654 ret = region_type_cap(vdev, &caps,
655 vdev->region[i].type,
656 vdev->region[i].subtype);
657 if (ret)
658 return ret;
659 }
660
661 if (caps.size) {
662 info.flags |= VFIO_REGION_INFO_FLAG_CAPS;
663 if (info.argsz < sizeof(info) + caps.size) {
664 info.argsz = sizeof(info) + caps.size;
665 info.cap_offset = 0;
666 } else {
667 vfio_info_cap_shift(&caps, sizeof(info));
668 if (copy_to_user((void __user *)arg +
669 sizeof(info), caps.buf,
670 caps.size)) {
671 kfree(caps.buf);
672 return -EFAULT;
673 }
674 info.cap_offset = sizeof(info);
675 }
676
677 kfree(caps.buf);
522 } 678 }
523 679
524 return copy_to_user((void __user *)arg, &info, minsz) ? 680 return copy_to_user((void __user *)arg, &info, minsz) ?
@@ -798,7 +954,7 @@ static ssize_t vfio_pci_rw(void *device_data, char __user *buf,
798 unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos); 954 unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
799 struct vfio_pci_device *vdev = device_data; 955 struct vfio_pci_device *vdev = device_data;
800 956
801 if (index >= VFIO_PCI_NUM_REGIONS) 957 if (index >= VFIO_PCI_NUM_REGIONS + vdev->num_regions)
802 return -EINVAL; 958 return -EINVAL;
803 959
804 switch (index) { 960 switch (index) {
@@ -815,6 +971,10 @@ static ssize_t vfio_pci_rw(void *device_data, char __user *buf,
815 971
816 case VFIO_PCI_VGA_REGION_INDEX: 972 case VFIO_PCI_VGA_REGION_INDEX:
817 return vfio_pci_vga_rw(vdev, buf, count, ppos, iswrite); 973 return vfio_pci_vga_rw(vdev, buf, count, ppos, iswrite);
974 default:
975 index -= VFIO_PCI_NUM_REGIONS;
976 return vdev->region[index].ops->rw(vdev, buf,
977 count, ppos, iswrite);
818 } 978 }
819 979
820 return -EINVAL; 980 return -EINVAL;
@@ -997,6 +1157,7 @@ static void vfio_pci_remove(struct pci_dev *pdev)
997 return; 1157 return;
998 1158
999 vfio_iommu_group_put(pdev->dev.iommu_group, &pdev->dev); 1159 vfio_iommu_group_put(pdev->dev.iommu_group, &pdev->dev);
1160 kfree(vdev->region);
1000 kfree(vdev); 1161 kfree(vdev);
1001 1162
1002 if (vfio_pci_is_vga(pdev)) { 1163 if (vfio_pci_is_vga(pdev)) {