aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/pci/intel-iommu.c
diff options
context:
space:
mode:
authorWeidong Han <weidong.han@intel.com>2008-12-08 10:00:00 -0500
committerJoerg Roedel <joerg.roedel@amd.com>2009-01-03 08:02:18 -0500
commit5331fe6f5627e06eec7d0dc154a0a3a9c27813c5 (patch)
tree9546a411d65d6f2abaa1a0506d289f042178656b /drivers/pci/intel-iommu.c
parentc7151a8dfefd11108de5b4293af2390962bcff71 (diff)
Add domain_flush_cache
Because virtual machine domain may have multiple devices from different iommus, it cannot use __iommu_flush_cache. In some common low level functions, use domain_flush_cache instead of __iommu_flush_cache. On the other hand, in some functions, iommu can is specified or domain cannot be got, still use __iommu_flush_cache Signed-off-by: Weidong Han <weidong.han@intel.com> Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Diffstat (limited to 'drivers/pci/intel-iommu.c')
-rw-r--r--drivers/pci/intel-iommu.c43
1 files changed, 26 insertions, 17 deletions
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index 6ed18faa1198..f0a21995b135 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -445,6 +445,13 @@ static struct intel_iommu *device_to_iommu(u8 bus, u8 devfn)
445 return NULL; 445 return NULL;
446} 446}
447 447
448static void domain_flush_cache(struct dmar_domain *domain,
449 void *addr, int size)
450{
451 if (!domain->iommu_coherency)
452 clflush_cache_range(addr, size);
453}
454
448/* Gets context entry for a given bus and devfn */ 455/* Gets context entry for a given bus and devfn */
449static struct context_entry * device_to_context_entry(struct intel_iommu *iommu, 456static struct context_entry * device_to_context_entry(struct intel_iommu *iommu,
450 u8 bus, u8 devfn) 457 u8 bus, u8 devfn)
@@ -585,7 +592,6 @@ static struct dma_pte * addr_to_dma_pte(struct dmar_domain *domain, u64 addr)
585 int level = agaw_to_level(domain->agaw); 592 int level = agaw_to_level(domain->agaw);
586 int offset; 593 int offset;
587 unsigned long flags; 594 unsigned long flags;
588 struct intel_iommu *iommu = domain_get_iommu(domain);
589 595
590 BUG_ON(!domain->pgd); 596 BUG_ON(!domain->pgd);
591 597
@@ -609,8 +615,7 @@ static struct dma_pte * addr_to_dma_pte(struct dmar_domain *domain, u64 addr)
609 flags); 615 flags);
610 return NULL; 616 return NULL;
611 } 617 }
612 __iommu_flush_cache(iommu, tmp_page, 618 domain_flush_cache(domain, tmp_page, PAGE_SIZE);
613 PAGE_SIZE);
614 dma_set_pte_addr(pte, virt_to_phys(tmp_page)); 619 dma_set_pte_addr(pte, virt_to_phys(tmp_page));
615 /* 620 /*
616 * high level table always sets r/w, last level page 621 * high level table always sets r/w, last level page
@@ -618,7 +623,7 @@ static struct dma_pte * addr_to_dma_pte(struct dmar_domain *domain, u64 addr)
618 */ 623 */
619 dma_set_pte_readable(pte); 624 dma_set_pte_readable(pte);
620 dma_set_pte_writable(pte); 625 dma_set_pte_writable(pte);
621 __iommu_flush_cache(iommu, pte, sizeof(*pte)); 626 domain_flush_cache(domain, pte, sizeof(*pte));
622 } 627 }
623 parent = phys_to_virt(dma_pte_addr(pte)); 628 parent = phys_to_virt(dma_pte_addr(pte));
624 level--; 629 level--;
@@ -655,14 +660,13 @@ static struct dma_pte *dma_addr_level_pte(struct dmar_domain *domain, u64 addr,
655static void dma_pte_clear_one(struct dmar_domain *domain, u64 addr) 660static void dma_pte_clear_one(struct dmar_domain *domain, u64 addr)
656{ 661{
657 struct dma_pte *pte = NULL; 662 struct dma_pte *pte = NULL;
658 struct intel_iommu *iommu = domain_get_iommu(domain);
659 663
660 /* get last level pte */ 664 /* get last level pte */
661 pte = dma_addr_level_pte(domain, addr, 1); 665 pte = dma_addr_level_pte(domain, addr, 1);
662 666
663 if (pte) { 667 if (pte) {
664 dma_clear_pte(pte); 668 dma_clear_pte(pte);
665 __iommu_flush_cache(iommu, pte, sizeof(*pte)); 669 domain_flush_cache(domain, pte, sizeof(*pte));
666 } 670 }
667} 671}
668 672
@@ -693,7 +697,6 @@ static void dma_pte_free_pagetable(struct dmar_domain *domain,
693 int total = agaw_to_level(domain->agaw); 697 int total = agaw_to_level(domain->agaw);
694 int level; 698 int level;
695 u64 tmp; 699 u64 tmp;
696 struct intel_iommu *iommu = domain_get_iommu(domain);
697 700
698 start &= (((u64)1) << addr_width) - 1; 701 start &= (((u64)1) << addr_width) - 1;
699 end &= (((u64)1) << addr_width) - 1; 702 end &= (((u64)1) << addr_width) - 1;
@@ -711,8 +714,7 @@ static void dma_pte_free_pagetable(struct dmar_domain *domain,
711 free_pgtable_page( 714 free_pgtable_page(
712 phys_to_virt(dma_pte_addr(pte))); 715 phys_to_virt(dma_pte_addr(pte)));
713 dma_clear_pte(pte); 716 dma_clear_pte(pte);
714 __iommu_flush_cache(iommu, 717 domain_flush_cache(domain, pte, sizeof(*pte));
715 pte, sizeof(*pte));
716 } 718 }
717 tmp += level_size(level); 719 tmp += level_size(level);
718 } 720 }
@@ -1445,12 +1447,17 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
1445 u8 bus, u8 devfn) 1447 u8 bus, u8 devfn)
1446{ 1448{
1447 struct context_entry *context; 1449 struct context_entry *context;
1448 struct intel_iommu *iommu = domain_get_iommu(domain);
1449 unsigned long flags; 1450 unsigned long flags;
1451 struct intel_iommu *iommu;
1450 1452
1451 pr_debug("Set context mapping for %02x:%02x.%d\n", 1453 pr_debug("Set context mapping for %02x:%02x.%d\n",
1452 bus, PCI_SLOT(devfn), PCI_FUNC(devfn)); 1454 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
1453 BUG_ON(!domain->pgd); 1455 BUG_ON(!domain->pgd);
1456
1457 iommu = device_to_iommu(bus, devfn);
1458 if (!iommu)
1459 return -ENODEV;
1460
1454 context = device_to_context_entry(iommu, bus, devfn); 1461 context = device_to_context_entry(iommu, bus, devfn);
1455 if (!context) 1462 if (!context)
1456 return -ENOMEM; 1463 return -ENOMEM;
@@ -1466,7 +1473,7 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
1466 context_set_translation_type(context, CONTEXT_TT_MULTI_LEVEL); 1473 context_set_translation_type(context, CONTEXT_TT_MULTI_LEVEL);
1467 context_set_fault_enable(context); 1474 context_set_fault_enable(context);
1468 context_set_present(context); 1475 context_set_present(context);
1469 __iommu_flush_cache(iommu, context, sizeof(*context)); 1476 domain_flush_cache(domain, context, sizeof(*context));
1470 1477
1471 /* it's a non-present to present mapping */ 1478 /* it's a non-present to present mapping */
1472 if (iommu->flush.flush_context(iommu, domain->id, 1479 if (iommu->flush.flush_context(iommu, domain->id,
@@ -1519,12 +1526,15 @@ domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev)
1519 tmp->bus->number, tmp->devfn); 1526 tmp->bus->number, tmp->devfn);
1520} 1527}
1521 1528
1522static int domain_context_mapped(struct dmar_domain *domain, 1529static int domain_context_mapped(struct pci_dev *pdev)
1523 struct pci_dev *pdev)
1524{ 1530{
1525 int ret; 1531 int ret;
1526 struct pci_dev *tmp, *parent; 1532 struct pci_dev *tmp, *parent;
1527 struct intel_iommu *iommu = domain_get_iommu(domain); 1533 struct intel_iommu *iommu;
1534
1535 iommu = device_to_iommu(pdev->bus->number, pdev->devfn);
1536 if (!iommu)
1537 return -ENODEV;
1528 1538
1529 ret = device_context_mapped(iommu, 1539 ret = device_context_mapped(iommu,
1530 pdev->bus->number, pdev->devfn); 1540 pdev->bus->number, pdev->devfn);
@@ -1559,7 +1569,6 @@ domain_page_mapping(struct dmar_domain *domain, dma_addr_t iova,
1559 struct dma_pte *pte; 1569 struct dma_pte *pte;
1560 int index; 1570 int index;
1561 int addr_width = agaw_to_width(domain->agaw); 1571 int addr_width = agaw_to_width(domain->agaw);
1562 struct intel_iommu *iommu = domain_get_iommu(domain);
1563 1572
1564 hpa &= (((u64)1) << addr_width) - 1; 1573 hpa &= (((u64)1) << addr_width) - 1;
1565 1574
@@ -1579,7 +1588,7 @@ domain_page_mapping(struct dmar_domain *domain, dma_addr_t iova,
1579 BUG_ON(dma_pte_addr(pte)); 1588 BUG_ON(dma_pte_addr(pte));
1580 dma_set_pte_addr(pte, start_pfn << VTD_PAGE_SHIFT); 1589 dma_set_pte_addr(pte, start_pfn << VTD_PAGE_SHIFT);
1581 dma_set_pte_prot(pte, prot); 1590 dma_set_pte_prot(pte, prot);
1582 __iommu_flush_cache(iommu, pte, sizeof(*pte)); 1591 domain_flush_cache(domain, pte, sizeof(*pte));
1583 start_pfn++; 1592 start_pfn++;
1584 index++; 1593 index++;
1585 } 1594 }
@@ -2129,7 +2138,7 @@ get_valid_domain_for_dev(struct pci_dev *pdev)
2129 } 2138 }
2130 2139
2131 /* make sure context mapping is ok */ 2140 /* make sure context mapping is ok */
2132 if (unlikely(!domain_context_mapped(domain, pdev))) { 2141 if (unlikely(!domain_context_mapped(pdev))) {
2133 ret = domain_context_mapping(domain, pdev); 2142 ret = domain_context_mapping(domain, pdev);
2134 if (ret) { 2143 if (ret) {
2135 printk(KERN_ERR 2144 printk(KERN_ERR