aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/pci/intel-iommu.c171
1 files changed, 166 insertions, 5 deletions
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index ffbe4c573729..6ed18faa1198 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -228,6 +228,8 @@ struct dmar_domain {
228 int flags; /* flags to find out type of domain */ 228 int flags; /* flags to find out type of domain */
229 229
230 int iommu_coherency;/* indicate coherency of iommu access */ 230 int iommu_coherency;/* indicate coherency of iommu access */
231 int iommu_count; /* reference count of iommu */
232 spinlock_t iommu_lock; /* protect iommu set in domain */
231}; 233};
232 234
233/* PCI domain-device relationship */ 235/* PCI domain-device relationship */
@@ -422,6 +424,27 @@ static void domain_update_iommu_coherency(struct dmar_domain *domain)
422 } 424 }
423} 425}
424 426
427static struct intel_iommu *device_to_iommu(u8 bus, u8 devfn)
428{
429 struct dmar_drhd_unit *drhd = NULL;
430 int i;
431
432 for_each_drhd_unit(drhd) {
433 if (drhd->ignored)
434 continue;
435
436 for (i = 0; i < drhd->devices_cnt; i++)
437 if (drhd->devices[i]->bus->number == bus &&
438 drhd->devices[i]->devfn == devfn)
439 return drhd->iommu;
440
441 if (drhd->include_all)
442 return drhd->iommu;
443 }
444
445 return NULL;
446}
447
425/* Gets context entry for a given bus and devfn */ 448/* Gets context entry for a given bus and devfn */
426static struct context_entry * device_to_context_entry(struct intel_iommu *iommu, 449static struct context_entry * device_to_context_entry(struct intel_iommu *iommu,
427 u8 bus, u8 devfn) 450 u8 bus, u8 devfn)
@@ -1196,12 +1219,18 @@ void free_dmar_iommu(struct intel_iommu *iommu)
1196{ 1219{
1197 struct dmar_domain *domain; 1220 struct dmar_domain *domain;
1198 int i; 1221 int i;
1222 unsigned long flags;
1199 1223
1200 i = find_first_bit(iommu->domain_ids, cap_ndoms(iommu->cap)); 1224 i = find_first_bit(iommu->domain_ids, cap_ndoms(iommu->cap));
1201 for (; i < cap_ndoms(iommu->cap); ) { 1225 for (; i < cap_ndoms(iommu->cap); ) {
1202 domain = iommu->domains[i]; 1226 domain = iommu->domains[i];
1203 clear_bit(i, iommu->domain_ids); 1227 clear_bit(i, iommu->domain_ids);
1204 domain_exit(domain); 1228
1229 spin_lock_irqsave(&domain->iommu_lock, flags);
1230 if (--domain->iommu_count == 0)
1231 domain_exit(domain);
1232 spin_unlock_irqrestore(&domain->iommu_lock, flags);
1233
1205 i = find_next_bit(iommu->domain_ids, 1234 i = find_next_bit(iommu->domain_ids,
1206 cap_ndoms(iommu->cap), i+1); 1235 cap_ndoms(iommu->cap), i+1);
1207 } 1236 }
@@ -1351,6 +1380,7 @@ static int domain_init(struct dmar_domain *domain, int guest_width)
1351 1380
1352 init_iova_domain(&domain->iovad, DMA_32BIT_PFN); 1381 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
1353 spin_lock_init(&domain->mapping_lock); 1382 spin_lock_init(&domain->mapping_lock);
1383 spin_lock_init(&domain->iommu_lock);
1354 1384
1355 domain_reserve_special_ranges(domain); 1385 domain_reserve_special_ranges(domain);
1356 1386
@@ -1377,6 +1407,8 @@ static int domain_init(struct dmar_domain *domain, int guest_width)
1377 else 1407 else
1378 domain->iommu_coherency = 0; 1408 domain->iommu_coherency = 0;
1379 1409
1410 domain->iommu_count = 1;
1411
1380 /* always allocate the top pgd */ 1412 /* always allocate the top pgd */
1381 domain->pgd = (struct dma_pte *)alloc_pgtable_page(); 1413 domain->pgd = (struct dma_pte *)alloc_pgtable_page();
1382 if (!domain->pgd) 1414 if (!domain->pgd)
@@ -1445,6 +1477,13 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
1445 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_DSI_FLUSH, 0); 1477 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_DSI_FLUSH, 0);
1446 1478
1447 spin_unlock_irqrestore(&iommu->lock, flags); 1479 spin_unlock_irqrestore(&iommu->lock, flags);
1480
1481 spin_lock_irqsave(&domain->iommu_lock, flags);
1482 if (!test_and_set_bit(iommu->seq_id, &domain->iommu_bmp)) {
1483 domain->iommu_count++;
1484 domain_update_iommu_coherency(domain);
1485 }
1486 spin_unlock_irqrestore(&domain->iommu_lock, flags);
1448 return 0; 1487 return 0;
1449} 1488}
1450 1489
@@ -1547,9 +1586,10 @@ domain_page_mapping(struct dmar_domain *domain, dma_addr_t iova,
1547 return 0; 1586 return 0;
1548} 1587}
1549 1588
1550static void detach_domain_for_dev(struct dmar_domain *domain, u8 bus, u8 devfn) 1589static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
1551{ 1590{
1552 struct intel_iommu *iommu = domain_get_iommu(domain); 1591 if (!iommu)
1592 return;
1553 1593
1554 clear_context_table(iommu, bus, devfn); 1594 clear_context_table(iommu, bus, devfn);
1555 iommu->flush.flush_context(iommu, 0, 0, 0, 1595 iommu->flush.flush_context(iommu, 0, 0, 0,
@@ -1562,6 +1602,7 @@ static void domain_remove_dev_info(struct dmar_domain *domain)
1562{ 1602{
1563 struct device_domain_info *info; 1603 struct device_domain_info *info;
1564 unsigned long flags; 1604 unsigned long flags;
1605 struct intel_iommu *iommu;
1565 1606
1566 spin_lock_irqsave(&device_domain_lock, flags); 1607 spin_lock_irqsave(&device_domain_lock, flags);
1567 while (!list_empty(&domain->devices)) { 1608 while (!list_empty(&domain->devices)) {
@@ -1573,7 +1614,8 @@ static void domain_remove_dev_info(struct dmar_domain *domain)
1573 info->dev->dev.archdata.iommu = NULL; 1614 info->dev->dev.archdata.iommu = NULL;
1574 spin_unlock_irqrestore(&device_domain_lock, flags); 1615 spin_unlock_irqrestore(&device_domain_lock, flags);
1575 1616
1576 detach_domain_for_dev(info->domain, info->bus, info->devfn); 1617 iommu = device_to_iommu(info->bus, info->devfn);
1618 iommu_detach_dev(iommu, info->bus, info->devfn);
1577 free_devinfo_mem(info); 1619 free_devinfo_mem(info);
1578 1620
1579 spin_lock_irqsave(&device_domain_lock, flags); 1621 spin_lock_irqsave(&device_domain_lock, flags);
@@ -2625,6 +2667,122 @@ int __init intel_iommu_init(void)
2625 return 0; 2667 return 0;
2626} 2668}
2627 2669
2670static int vm_domain_add_dev_info(struct dmar_domain *domain,
2671 struct pci_dev *pdev)
2672{
2673 struct device_domain_info *info;
2674 unsigned long flags;
2675
2676 info = alloc_devinfo_mem();
2677 if (!info)
2678 return -ENOMEM;
2679
2680 info->bus = pdev->bus->number;
2681 info->devfn = pdev->devfn;
2682 info->dev = pdev;
2683 info->domain = domain;
2684
2685 spin_lock_irqsave(&device_domain_lock, flags);
2686 list_add(&info->link, &domain->devices);
2687 list_add(&info->global, &device_domain_list);
2688 pdev->dev.archdata.iommu = info;
2689 spin_unlock_irqrestore(&device_domain_lock, flags);
2690
2691 return 0;
2692}
2693
2694static void vm_domain_remove_one_dev_info(struct dmar_domain *domain,
2695 struct pci_dev *pdev)
2696{
2697 struct device_domain_info *info;
2698 struct intel_iommu *iommu;
2699 unsigned long flags;
2700 int found = 0;
2701 struct list_head *entry, *tmp;
2702
2703 iommu = device_to_iommu(pdev->bus->number, pdev->devfn);
2704 if (!iommu)
2705 return;
2706
2707 spin_lock_irqsave(&device_domain_lock, flags);
2708 list_for_each_safe(entry, tmp, &domain->devices) {
2709 info = list_entry(entry, struct device_domain_info, link);
2710 if (info->bus == pdev->bus->number &&
2711 info->devfn == pdev->devfn) {
2712 list_del(&info->link);
2713 list_del(&info->global);
2714 if (info->dev)
2715 info->dev->dev.archdata.iommu = NULL;
2716 spin_unlock_irqrestore(&device_domain_lock, flags);
2717
2718 iommu_detach_dev(iommu, info->bus, info->devfn);
2719 free_devinfo_mem(info);
2720
2721 spin_lock_irqsave(&device_domain_lock, flags);
2722
2723 if (found)
2724 break;
2725 else
2726 continue;
2727 }
2728
2729 /* if there is no other devices under the same iommu
2730 * owned by this domain, clear this iommu in iommu_bmp
2731 * update iommu count and coherency
2732 */
2733 if (device_to_iommu(info->bus, info->devfn) == iommu)
2734 found = 1;
2735 }
2736
2737 if (found == 0) {
2738 unsigned long tmp_flags;
2739 spin_lock_irqsave(&domain->iommu_lock, tmp_flags);
2740 clear_bit(iommu->seq_id, &domain->iommu_bmp);
2741 domain->iommu_count--;
2742 domain_update_iommu_coherency(domain);
2743 spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags);
2744 }
2745
2746 spin_unlock_irqrestore(&device_domain_lock, flags);
2747}
2748
2749static void vm_domain_remove_all_dev_info(struct dmar_domain *domain)
2750{
2751 struct device_domain_info *info;
2752 struct intel_iommu *iommu;
2753 unsigned long flags1, flags2;
2754
2755 spin_lock_irqsave(&device_domain_lock, flags1);
2756 while (!list_empty(&domain->devices)) {
2757 info = list_entry(domain->devices.next,
2758 struct device_domain_info, link);
2759 list_del(&info->link);
2760 list_del(&info->global);
2761 if (info->dev)
2762 info->dev->dev.archdata.iommu = NULL;
2763
2764 spin_unlock_irqrestore(&device_domain_lock, flags1);
2765
2766 iommu = device_to_iommu(info->bus, info->devfn);
2767 iommu_detach_dev(iommu, info->bus, info->devfn);
2768
2769 /* clear this iommu in iommu_bmp, update iommu count
2770 * and coherency
2771 */
2772 spin_lock_irqsave(&domain->iommu_lock, flags2);
2773 if (test_and_clear_bit(iommu->seq_id,
2774 &domain->iommu_bmp)) {
2775 domain->iommu_count--;
2776 domain_update_iommu_coherency(domain);
2777 }
2778 spin_unlock_irqrestore(&domain->iommu_lock, flags2);
2779
2780 free_devinfo_mem(info);
2781 spin_lock_irqsave(&device_domain_lock, flags1);
2782 }
2783 spin_unlock_irqrestore(&device_domain_lock, flags1);
2784}
2785
2628void intel_iommu_domain_exit(struct dmar_domain *domain) 2786void intel_iommu_domain_exit(struct dmar_domain *domain)
2629{ 2787{
2630 u64 end; 2788 u64 end;
@@ -2702,7 +2860,10 @@ EXPORT_SYMBOL_GPL(intel_iommu_page_mapping);
2702 2860
2703void intel_iommu_detach_dev(struct dmar_domain *domain, u8 bus, u8 devfn) 2861void intel_iommu_detach_dev(struct dmar_domain *domain, u8 bus, u8 devfn)
2704{ 2862{
2705 detach_domain_for_dev(domain, bus, devfn); 2863 struct intel_iommu *iommu;
2864
2865 iommu = device_to_iommu(bus, devfn);
2866 iommu_detach_dev(iommu, bus, devfn);
2706} 2867}
2707EXPORT_SYMBOL_GPL(intel_iommu_detach_dev); 2868EXPORT_SYMBOL_GPL(intel_iommu_detach_dev);
2708 2869