diff options
-rw-r--r-- | arch/x86/kernel/amd_iommu.c | 34 |
1 files changed, 34 insertions, 0 deletions
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index aab9125ac0b2..bed5f820898d 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c | |||
@@ -872,3 +872,37 @@ free_mem: | |||
872 | free_pages((unsigned long)virt_addr, get_order(size)); | 872 | free_pages((unsigned long)virt_addr, get_order(size)); |
873 | } | 873 | } |
874 | 874 | ||
875 | /* | ||
876 | * If the driver core informs the DMA layer if a driver grabs a device | ||
877 | * we don't need to preallocate the protection domains anymore. | ||
878 | * For now we have to. | ||
879 | */ | ||
880 | void prealloc_protection_domains(void) | ||
881 | { | ||
882 | struct pci_dev *dev = NULL; | ||
883 | struct dma_ops_domain *dma_dom; | ||
884 | struct amd_iommu *iommu; | ||
885 | int order = amd_iommu_aperture_order; | ||
886 | u16 devid; | ||
887 | |||
888 | while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { | ||
889 | devid = (dev->bus->number << 8) | dev->devfn; | ||
890 | if (devid >= amd_iommu_last_bdf) | ||
891 | continue; | ||
892 | devid = amd_iommu_alias_table[devid]; | ||
893 | if (domain_for_device(devid)) | ||
894 | continue; | ||
895 | iommu = amd_iommu_rlookup_table[devid]; | ||
896 | if (!iommu) | ||
897 | continue; | ||
898 | dma_dom = dma_ops_domain_alloc(iommu, order); | ||
899 | if (!dma_dom) | ||
900 | continue; | ||
901 | init_unity_mappings_for_device(dma_dom, devid); | ||
902 | set_device_domain(iommu, &dma_dom->domain, devid); | ||
903 | printk(KERN_INFO "AMD IOMMU: Allocated domain %d for device ", | ||
904 | dma_dom->domain.id); | ||
905 | print_devid(devid, 1); | ||
906 | } | ||
907 | } | ||
908 | |||