diff options
Diffstat (limited to 'drivers/pci/intel-iommu.c')
| -rw-r--r-- | drivers/pci/intel-iommu.c | 55 |
1 files changed, 43 insertions, 12 deletions
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c index 505c1c7075f0..d552d2c77844 100644 --- a/drivers/pci/intel-iommu.c +++ b/drivers/pci/intel-iommu.c | |||
| @@ -1299,7 +1299,7 @@ static void iommu_detach_domain(struct dmar_domain *domain, | |||
| 1299 | static struct iova_domain reserved_iova_list; | 1299 | static struct iova_domain reserved_iova_list; |
| 1300 | static struct lock_class_key reserved_rbtree_key; | 1300 | static struct lock_class_key reserved_rbtree_key; |
| 1301 | 1301 | ||
| 1302 | static void dmar_init_reserved_ranges(void) | 1302 | static int dmar_init_reserved_ranges(void) |
| 1303 | { | 1303 | { |
| 1304 | struct pci_dev *pdev = NULL; | 1304 | struct pci_dev *pdev = NULL; |
| 1305 | struct iova *iova; | 1305 | struct iova *iova; |
| @@ -1313,8 +1313,10 @@ static void dmar_init_reserved_ranges(void) | |||
| 1313 | /* IOAPIC ranges shouldn't be accessed by DMA */ | 1313 | /* IOAPIC ranges shouldn't be accessed by DMA */ |
| 1314 | iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START), | 1314 | iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START), |
| 1315 | IOVA_PFN(IOAPIC_RANGE_END)); | 1315 | IOVA_PFN(IOAPIC_RANGE_END)); |
| 1316 | if (!iova) | 1316 | if (!iova) { |
| 1317 | printk(KERN_ERR "Reserve IOAPIC range failed\n"); | 1317 | printk(KERN_ERR "Reserve IOAPIC range failed\n"); |
| 1318 | return -ENODEV; | ||
| 1319 | } | ||
| 1318 | 1320 | ||
| 1319 | /* Reserve all PCI MMIO to avoid peer-to-peer access */ | 1321 | /* Reserve all PCI MMIO to avoid peer-to-peer access */ |
| 1320 | for_each_pci_dev(pdev) { | 1322 | for_each_pci_dev(pdev) { |
| @@ -1327,11 +1329,13 @@ static void dmar_init_reserved_ranges(void) | |||
| 1327 | iova = reserve_iova(&reserved_iova_list, | 1329 | iova = reserve_iova(&reserved_iova_list, |
| 1328 | IOVA_PFN(r->start), | 1330 | IOVA_PFN(r->start), |
| 1329 | IOVA_PFN(r->end)); | 1331 | IOVA_PFN(r->end)); |
| 1330 | if (!iova) | 1332 | if (!iova) { |
| 1331 | printk(KERN_ERR "Reserve iova failed\n"); | 1333 | printk(KERN_ERR "Reserve iova failed\n"); |
| 1334 | return -ENODEV; | ||
| 1335 | } | ||
| 1332 | } | 1336 | } |
| 1333 | } | 1337 | } |
| 1334 | 1338 | return 0; | |
| 1335 | } | 1339 | } |
| 1336 | 1340 | ||
| 1337 | static void domain_reserve_special_ranges(struct dmar_domain *domain) | 1341 | static void domain_reserve_special_ranges(struct dmar_domain *domain) |
| @@ -1835,7 +1839,7 @@ static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw) | |||
| 1835 | 1839 | ||
| 1836 | ret = iommu_attach_domain(domain, iommu); | 1840 | ret = iommu_attach_domain(domain, iommu); |
| 1837 | if (ret) { | 1841 | if (ret) { |
| 1838 | domain_exit(domain); | 1842 | free_domain_mem(domain); |
| 1839 | goto error; | 1843 | goto error; |
| 1840 | } | 1844 | } |
| 1841 | 1845 | ||
| @@ -2213,7 +2217,7 @@ static int __init iommu_prepare_static_identity_mapping(int hw) | |||
| 2213 | return 0; | 2217 | return 0; |
| 2214 | } | 2218 | } |
| 2215 | 2219 | ||
| 2216 | int __init init_dmars(void) | 2220 | static int __init init_dmars(int force_on) |
| 2217 | { | 2221 | { |
| 2218 | struct dmar_drhd_unit *drhd; | 2222 | struct dmar_drhd_unit *drhd; |
| 2219 | struct dmar_rmrr_unit *rmrr; | 2223 | struct dmar_rmrr_unit *rmrr; |
| @@ -2393,8 +2397,15 @@ int __init init_dmars(void) | |||
| 2393 | * enable translation | 2397 | * enable translation |
| 2394 | */ | 2398 | */ |
| 2395 | for_each_drhd_unit(drhd) { | 2399 | for_each_drhd_unit(drhd) { |
| 2396 | if (drhd->ignored) | 2400 | if (drhd->ignored) { |
| 2401 | /* | ||
| 2402 | * we always have to disable PMRs or DMA may fail on | ||
| 2403 | * this device | ||
| 2404 | */ | ||
| 2405 | if (force_on) | ||
| 2406 | iommu_disable_protect_mem_regions(drhd->iommu); | ||
| 2397 | continue; | 2407 | continue; |
| 2408 | } | ||
| 2398 | iommu = drhd->iommu; | 2409 | iommu = drhd->iommu; |
| 2399 | 2410 | ||
| 2400 | iommu_flush_write_buffer(iommu); | 2411 | iommu_flush_write_buffer(iommu); |
| @@ -3240,9 +3251,15 @@ static int device_notifier(struct notifier_block *nb, | |||
| 3240 | if (!domain) | 3251 | if (!domain) |
| 3241 | return 0; | 3252 | return 0; |
| 3242 | 3253 | ||
| 3243 | if (action == BUS_NOTIFY_UNBOUND_DRIVER && !iommu_pass_through) | 3254 | if (action == BUS_NOTIFY_UNBOUND_DRIVER && !iommu_pass_through) { |
| 3244 | domain_remove_one_dev_info(domain, pdev); | 3255 | domain_remove_one_dev_info(domain, pdev); |
| 3245 | 3256 | ||
| 3257 | if (!(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) && | ||
| 3258 | !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) && | ||
| 3259 | list_empty(&domain->devices)) | ||
| 3260 | domain_exit(domain); | ||
| 3261 | } | ||
| 3262 | |||
| 3246 | return 0; | 3263 | return 0; |
| 3247 | } | 3264 | } |
| 3248 | 3265 | ||
| @@ -3277,12 +3294,21 @@ int __init intel_iommu_init(void) | |||
| 3277 | if (no_iommu || dmar_disabled) | 3294 | if (no_iommu || dmar_disabled) |
| 3278 | return -ENODEV; | 3295 | return -ENODEV; |
| 3279 | 3296 | ||
| 3280 | iommu_init_mempool(); | 3297 | if (iommu_init_mempool()) { |
| 3281 | dmar_init_reserved_ranges(); | 3298 | if (force_on) |
| 3299 | panic("tboot: Failed to initialize iommu memory\n"); | ||
| 3300 | return -ENODEV; | ||
| 3301 | } | ||
| 3302 | |||
| 3303 | if (dmar_init_reserved_ranges()) { | ||
| 3304 | if (force_on) | ||
| 3305 | panic("tboot: Failed to reserve iommu ranges\n"); | ||
| 3306 | return -ENODEV; | ||
| 3307 | } | ||
| 3282 | 3308 | ||
| 3283 | init_no_remapping_devices(); | 3309 | init_no_remapping_devices(); |
| 3284 | 3310 | ||
| 3285 | ret = init_dmars(); | 3311 | ret = init_dmars(force_on); |
| 3286 | if (ret) { | 3312 | if (ret) { |
| 3287 | if (force_on) | 3313 | if (force_on) |
| 3288 | panic("tboot: Failed to initialize DMARs\n"); | 3314 | panic("tboot: Failed to initialize DMARs\n"); |
| @@ -3391,6 +3417,11 @@ static void domain_remove_one_dev_info(struct dmar_domain *domain, | |||
| 3391 | domain->iommu_count--; | 3417 | domain->iommu_count--; |
| 3392 | domain_update_iommu_cap(domain); | 3418 | domain_update_iommu_cap(domain); |
| 3393 | spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags); | 3419 | spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags); |
| 3420 | |||
| 3421 | spin_lock_irqsave(&iommu->lock, tmp_flags); | ||
| 3422 | clear_bit(domain->id, iommu->domain_ids); | ||
| 3423 | iommu->domains[domain->id] = NULL; | ||
| 3424 | spin_unlock_irqrestore(&iommu->lock, tmp_flags); | ||
| 3394 | } | 3425 | } |
| 3395 | 3426 | ||
| 3396 | spin_unlock_irqrestore(&device_domain_lock, flags); | 3427 | spin_unlock_irqrestore(&device_domain_lock, flags); |
| @@ -3607,9 +3638,9 @@ static int intel_iommu_attach_device(struct iommu_domain *domain, | |||
| 3607 | 3638 | ||
| 3608 | pte = dmar_domain->pgd; | 3639 | pte = dmar_domain->pgd; |
| 3609 | if (dma_pte_present(pte)) { | 3640 | if (dma_pte_present(pte)) { |
| 3610 | free_pgtable_page(dmar_domain->pgd); | ||
| 3611 | dmar_domain->pgd = (struct dma_pte *) | 3641 | dmar_domain->pgd = (struct dma_pte *) |
| 3612 | phys_to_virt(dma_pte_addr(pte)); | 3642 | phys_to_virt(dma_pte_addr(pte)); |
| 3643 | free_pgtable_page(pte); | ||
| 3613 | } | 3644 | } |
| 3614 | dmar_domain->agaw--; | 3645 | dmar_domain->agaw--; |
| 3615 | } | 3646 | } |
