aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-04-03 13:36:57 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-04-03 13:36:57 -0400
commitca1ee219c070eab755712d50638bbcd1f8630fc1 (patch)
treec0c252a9095830aadc5dc9ffdd16d9167dd605c9
parent3cc50ac0dbda5100684e570247782330155d35e0 (diff)
parentafeeb7cebbd223ffee303fd8de4ba97458b13581 (diff)
Merge git://git.infradead.org/iommu-2.6
* git://git.infradead.org/iommu-2.6: intel-iommu: Fix address wrap on 32-bit kernel. intel-iommu: Enable DMAR on 32-bit kernel. intel-iommu: fix PCI device detach from virtual machine intel-iommu: VT-d page table to support snooping control bit iommu: Add domain_has_cap iommu_ops intel-iommu: Snooping control support Fixed trivial conflicts in arch/x86/Kconfig and drivers/pci/intel-iommu.c
-rw-r--r--arch/x86/Kconfig4
-rw-r--r--arch/x86/kernel/amd_iommu.c7
-rw-r--r--drivers/base/iommu.c7
-rw-r--r--drivers/pci/intel-iommu.c115
-rw-r--r--include/linux/dma_remapping.h1
-rw-r--r--include/linux/intel-iommu.h2
-rw-r--r--include/linux/iommu.h13
7 files changed, 127 insertions, 22 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 748e50a1a152..3f27e5c0c9c9 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -1837,8 +1837,8 @@ config PCI_MMCONFIG
1837 1837
1838config DMAR 1838config DMAR
1839 bool "Support for DMA Remapping Devices (EXPERIMENTAL)" 1839 bool "Support for DMA Remapping Devices (EXPERIMENTAL)"
1840 depends on X86_64 && PCI_MSI && ACPI && EXPERIMENTAL 1840 depends on PCI_MSI && ACPI && EXPERIMENTAL
1841 ---help--- 1841 help
1842 DMA remapping (DMAR) devices support enables independent address 1842 DMA remapping (DMAR) devices support enables independent address
1843 translations for Direct Memory Access (DMA) from devices. 1843 translations for Direct Memory Access (DMA) from devices.
1844 These DMA remapping devices are reported via ACPI tables 1844 These DMA remapping devices are reported via ACPI tables
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
index c5962fe3796f..a97db99dad52 100644
--- a/arch/x86/kernel/amd_iommu.c
+++ b/arch/x86/kernel/amd_iommu.c
@@ -1928,6 +1928,12 @@ static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
1928 return paddr; 1928 return paddr;
1929} 1929}
1930 1930
1931static int amd_iommu_domain_has_cap(struct iommu_domain *domain,
1932 unsigned long cap)
1933{
1934 return 0;
1935}
1936
1931static struct iommu_ops amd_iommu_ops = { 1937static struct iommu_ops amd_iommu_ops = {
1932 .domain_init = amd_iommu_domain_init, 1938 .domain_init = amd_iommu_domain_init,
1933 .domain_destroy = amd_iommu_domain_destroy, 1939 .domain_destroy = amd_iommu_domain_destroy,
@@ -1936,5 +1942,6 @@ static struct iommu_ops amd_iommu_ops = {
1936 .map = amd_iommu_map_range, 1942 .map = amd_iommu_map_range,
1937 .unmap = amd_iommu_unmap_range, 1943 .unmap = amd_iommu_unmap_range,
1938 .iova_to_phys = amd_iommu_iova_to_phys, 1944 .iova_to_phys = amd_iommu_iova_to_phys,
1945 .domain_has_cap = amd_iommu_domain_has_cap,
1939}; 1946};
1940 1947
diff --git a/drivers/base/iommu.c b/drivers/base/iommu.c
index c2d1eed90376..9f0e672f4be8 100644
--- a/drivers/base/iommu.c
+++ b/drivers/base/iommu.c
@@ -98,3 +98,10 @@ phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain,
98 return iommu_ops->iova_to_phys(domain, iova); 98 return iommu_ops->iova_to_phys(domain, iova);
99} 99}
100EXPORT_SYMBOL_GPL(iommu_iova_to_phys); 100EXPORT_SYMBOL_GPL(iommu_iova_to_phys);
101
102int iommu_domain_has_cap(struct iommu_domain *domain,
103 unsigned long cap)
104{
105 return iommu_ops->domain_has_cap(domain, cap);
106}
107EXPORT_SYMBOL_GPL(iommu_domain_has_cap);
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index 9dbd5066acaf..23e56a564e05 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -164,7 +164,8 @@ static inline void context_clear_entry(struct context_entry *context)
164 * 1: writable 164 * 1: writable
165 * 2-6: reserved 165 * 2-6: reserved
166 * 7: super page 166 * 7: super page
167 * 8-11: available 167 * 8-10: available
168 * 11: snoop behavior
168 * 12-63: Host physcial address 169 * 12-63: Host physcial address
169 */ 170 */
170struct dma_pte { 171struct dma_pte {
@@ -186,6 +187,11 @@ static inline void dma_set_pte_writable(struct dma_pte *pte)
186 pte->val |= DMA_PTE_WRITE; 187 pte->val |= DMA_PTE_WRITE;
187} 188}
188 189
190static inline void dma_set_pte_snp(struct dma_pte *pte)
191{
192 pte->val |= DMA_PTE_SNP;
193}
194
189static inline void dma_set_pte_prot(struct dma_pte *pte, unsigned long prot) 195static inline void dma_set_pte_prot(struct dma_pte *pte, unsigned long prot)
190{ 196{
191 pte->val = (pte->val & ~3) | (prot & 3); 197 pte->val = (pte->val & ~3) | (prot & 3);
@@ -231,6 +237,7 @@ struct dmar_domain {
231 int flags; /* flags to find out type of domain */ 237 int flags; /* flags to find out type of domain */
232 238
233 int iommu_coherency;/* indicate coherency of iommu access */ 239 int iommu_coherency;/* indicate coherency of iommu access */
240 int iommu_snooping; /* indicate snooping control feature*/
234 int iommu_count; /* reference count of iommu */ 241 int iommu_count; /* reference count of iommu */
235 spinlock_t iommu_lock; /* protect iommu set in domain */ 242 spinlock_t iommu_lock; /* protect iommu set in domain */
236 u64 max_addr; /* maximum mapped address */ 243 u64 max_addr; /* maximum mapped address */
@@ -421,7 +428,6 @@ static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
421 return g_iommus[iommu_id]; 428 return g_iommus[iommu_id];
422} 429}
423 430
424/* "Coherency" capability may be different across iommus */
425static void domain_update_iommu_coherency(struct dmar_domain *domain) 431static void domain_update_iommu_coherency(struct dmar_domain *domain)
426{ 432{
427 int i; 433 int i;
@@ -438,6 +444,29 @@ static void domain_update_iommu_coherency(struct dmar_domain *domain)
438 } 444 }
439} 445}
440 446
447static void domain_update_iommu_snooping(struct dmar_domain *domain)
448{
449 int i;
450
451 domain->iommu_snooping = 1;
452
453 i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
454 for (; i < g_num_of_iommus; ) {
455 if (!ecap_sc_support(g_iommus[i]->ecap)) {
456 domain->iommu_snooping = 0;
457 break;
458 }
459 i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1);
460 }
461}
462
463/* Some capabilities may be different across iommus */
464static void domain_update_iommu_cap(struct dmar_domain *domain)
465{
466 domain_update_iommu_coherency(domain);
467 domain_update_iommu_snooping(domain);
468}
469
441static struct intel_iommu *device_to_iommu(u8 bus, u8 devfn) 470static struct intel_iommu *device_to_iommu(u8 bus, u8 devfn)
442{ 471{
443 struct dmar_drhd_unit *drhd = NULL; 472 struct dmar_drhd_unit *drhd = NULL;
@@ -689,15 +718,17 @@ static void dma_pte_clear_one(struct dmar_domain *domain, u64 addr)
689static void dma_pte_clear_range(struct dmar_domain *domain, u64 start, u64 end) 718static void dma_pte_clear_range(struct dmar_domain *domain, u64 start, u64 end)
690{ 719{
691 int addr_width = agaw_to_width(domain->agaw); 720 int addr_width = agaw_to_width(domain->agaw);
721 int npages;
692 722
693 start &= (((u64)1) << addr_width) - 1; 723 start &= (((u64)1) << addr_width) - 1;
694 end &= (((u64)1) << addr_width) - 1; 724 end &= (((u64)1) << addr_width) - 1;
695 /* in case it's partial page */ 725 /* in case it's partial page */
696 start = PAGE_ALIGN(start); 726 start = PAGE_ALIGN(start);
697 end &= PAGE_MASK; 727 end &= PAGE_MASK;
728 npages = (end - start) / VTD_PAGE_SIZE;
698 729
699 /* we don't need lock here, nobody else touches the iova range */ 730 /* we don't need lock here, nobody else touches the iova range */
700 while (start < end) { 731 while (npages--) {
701 dma_pte_clear_one(domain, start); 732 dma_pte_clear_one(domain, start);
702 start += VTD_PAGE_SIZE; 733 start += VTD_PAGE_SIZE;
703 } 734 }
@@ -1241,6 +1272,11 @@ static int domain_init(struct dmar_domain *domain, int guest_width)
1241 else 1272 else
1242 domain->iommu_coherency = 0; 1273 domain->iommu_coherency = 0;
1243 1274
1275 if (ecap_sc_support(iommu->ecap))
1276 domain->iommu_snooping = 1;
1277 else
1278 domain->iommu_snooping = 0;
1279
1244 domain->iommu_count = 1; 1280 domain->iommu_count = 1;
1245 1281
1246 /* always allocate the top pgd */ 1282 /* always allocate the top pgd */
@@ -1369,7 +1405,7 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
1369 spin_lock_irqsave(&domain->iommu_lock, flags); 1405 spin_lock_irqsave(&domain->iommu_lock, flags);
1370 if (!test_and_set_bit(iommu->seq_id, &domain->iommu_bmp)) { 1406 if (!test_and_set_bit(iommu->seq_id, &domain->iommu_bmp)) {
1371 domain->iommu_count++; 1407 domain->iommu_count++;
1372 domain_update_iommu_coherency(domain); 1408 domain_update_iommu_cap(domain);
1373 } 1409 }
1374 spin_unlock_irqrestore(&domain->iommu_lock, flags); 1410 spin_unlock_irqrestore(&domain->iommu_lock, flags);
1375 return 0; 1411 return 0;
@@ -1469,6 +1505,8 @@ domain_page_mapping(struct dmar_domain *domain, dma_addr_t iova,
1469 BUG_ON(dma_pte_addr(pte)); 1505 BUG_ON(dma_pte_addr(pte));
1470 dma_set_pte_addr(pte, start_pfn << VTD_PAGE_SHIFT); 1506 dma_set_pte_addr(pte, start_pfn << VTD_PAGE_SHIFT);
1471 dma_set_pte_prot(pte, prot); 1507 dma_set_pte_prot(pte, prot);
1508 if (prot & DMA_PTE_SNP)
1509 dma_set_pte_snp(pte);
1472 domain_flush_cache(domain, pte, sizeof(*pte)); 1510 domain_flush_cache(domain, pte, sizeof(*pte));
1473 start_pfn++; 1511 start_pfn++;
1474 index++; 1512 index++;
@@ -2119,7 +2157,7 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
2119error: 2157error:
2120 if (iova) 2158 if (iova)
2121 __free_iova(&domain->iovad, iova); 2159 __free_iova(&domain->iovad, iova);
2122 printk(KERN_ERR"Device %s request: %lx@%llx dir %d --- failed\n", 2160 printk(KERN_ERR"Device %s request: %zx@%llx dir %d --- failed\n",
2123 pci_name(pdev), size, (unsigned long long)paddr, dir); 2161 pci_name(pdev), size, (unsigned long long)paddr, dir);
2124 return 0; 2162 return 0;
2125} 2163}
@@ -2218,7 +2256,7 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
2218 start_addr = iova->pfn_lo << PAGE_SHIFT; 2256 start_addr = iova->pfn_lo << PAGE_SHIFT;
2219 size = aligned_size((u64)dev_addr, size); 2257 size = aligned_size((u64)dev_addr, size);
2220 2258
2221 pr_debug("Device %s unmapping: %lx@%llx\n", 2259 pr_debug("Device %s unmapping: %zx@%llx\n",
2222 pci_name(pdev), size, (unsigned long long)start_addr); 2260 pci_name(pdev), size, (unsigned long long)start_addr);
2223 2261
2224 /* clear the whole page */ 2262 /* clear the whole page */
@@ -2282,8 +2320,6 @@ static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
2282 free_pages((unsigned long)vaddr, order); 2320 free_pages((unsigned long)vaddr, order);
2283} 2321}
2284 2322
2285#define SG_ENT_VIRT_ADDRESS(sg) (sg_virt((sg)))
2286
2287static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, 2323static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
2288 int nelems, enum dma_data_direction dir, 2324 int nelems, enum dma_data_direction dir,
2289 struct dma_attrs *attrs) 2325 struct dma_attrs *attrs)
@@ -2294,7 +2330,7 @@ static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
2294 unsigned long start_addr; 2330 unsigned long start_addr;
2295 struct iova *iova; 2331 struct iova *iova;
2296 size_t size = 0; 2332 size_t size = 0;
2297 void *addr; 2333 phys_addr_t addr;
2298 struct scatterlist *sg; 2334 struct scatterlist *sg;
2299 struct intel_iommu *iommu; 2335 struct intel_iommu *iommu;
2300 2336
@@ -2310,7 +2346,7 @@ static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
2310 if (!iova) 2346 if (!iova)
2311 return; 2347 return;
2312 for_each_sg(sglist, sg, nelems, i) { 2348 for_each_sg(sglist, sg, nelems, i) {
2313 addr = SG_ENT_VIRT_ADDRESS(sg); 2349 addr = page_to_phys(sg_page(sg)) + sg->offset;
2314 size += aligned_size((u64)addr, sg->length); 2350 size += aligned_size((u64)addr, sg->length);
2315 } 2351 }
2316 2352
@@ -2337,7 +2373,7 @@ static int intel_nontranslate_map_sg(struct device *hddev,
2337 2373
2338 for_each_sg(sglist, sg, nelems, i) { 2374 for_each_sg(sglist, sg, nelems, i) {
2339 BUG_ON(!sg_page(sg)); 2375 BUG_ON(!sg_page(sg));
2340 sg->dma_address = virt_to_bus(SG_ENT_VIRT_ADDRESS(sg)); 2376 sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
2341 sg->dma_length = sg->length; 2377 sg->dma_length = sg->length;
2342 } 2378 }
2343 return nelems; 2379 return nelems;
@@ -2346,7 +2382,7 @@ static int intel_nontranslate_map_sg(struct device *hddev,
2346static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, 2382static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
2347 enum dma_data_direction dir, struct dma_attrs *attrs) 2383 enum dma_data_direction dir, struct dma_attrs *attrs)
2348{ 2384{
2349 void *addr; 2385 phys_addr_t addr;
2350 int i; 2386 int i;
2351 struct pci_dev *pdev = to_pci_dev(hwdev); 2387 struct pci_dev *pdev = to_pci_dev(hwdev);
2352 struct dmar_domain *domain; 2388 struct dmar_domain *domain;
@@ -2370,8 +2406,7 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne
2370 iommu = domain_get_iommu(domain); 2406 iommu = domain_get_iommu(domain);
2371 2407
2372 for_each_sg(sglist, sg, nelems, i) { 2408 for_each_sg(sglist, sg, nelems, i) {
2373 addr = SG_ENT_VIRT_ADDRESS(sg); 2409 addr = page_to_phys(sg_page(sg)) + sg->offset;
2374 addr = (void *)virt_to_phys(addr);
2375 size += aligned_size((u64)addr, sg->length); 2410 size += aligned_size((u64)addr, sg->length);
2376 } 2411 }
2377 2412
@@ -2394,8 +2429,7 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne
2394 start_addr = iova->pfn_lo << PAGE_SHIFT; 2429 start_addr = iova->pfn_lo << PAGE_SHIFT;
2395 offset = 0; 2430 offset = 0;
2396 for_each_sg(sglist, sg, nelems, i) { 2431 for_each_sg(sglist, sg, nelems, i) {
2397 addr = SG_ENT_VIRT_ADDRESS(sg); 2432 addr = page_to_phys(sg_page(sg)) + sg->offset;
2398 addr = (void *)virt_to_phys(addr);
2399 size = aligned_size((u64)addr, sg->length); 2433 size = aligned_size((u64)addr, sg->length);
2400 ret = domain_page_mapping(domain, start_addr + offset, 2434 ret = domain_page_mapping(domain, start_addr + offset,
2401 ((u64)addr) & PAGE_MASK, 2435 ((u64)addr) & PAGE_MASK,
@@ -2628,6 +2662,33 @@ static int vm_domain_add_dev_info(struct dmar_domain *domain,
2628 return 0; 2662 return 0;
2629} 2663}
2630 2664
2665static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
2666 struct pci_dev *pdev)
2667{
2668 struct pci_dev *tmp, *parent;
2669
2670 if (!iommu || !pdev)
2671 return;
2672
2673 /* dependent device detach */
2674 tmp = pci_find_upstream_pcie_bridge(pdev);
2675 /* Secondary interface's bus number and devfn 0 */
2676 if (tmp) {
2677 parent = pdev->bus->self;
2678 while (parent != tmp) {
2679 iommu_detach_dev(iommu, parent->bus->number,
2680 parent->devfn);
2681 parent = parent->bus->self;
2682 }
2683 if (tmp->is_pcie) /* this is a PCIE-to-PCI bridge */
2684 iommu_detach_dev(iommu,
2685 tmp->subordinate->number, 0);
2686 else /* this is a legacy PCI bridge */
2687 iommu_detach_dev(iommu,
2688 tmp->bus->number, tmp->devfn);
2689 }
2690}
2691
2631static void vm_domain_remove_one_dev_info(struct dmar_domain *domain, 2692static void vm_domain_remove_one_dev_info(struct dmar_domain *domain,
2632 struct pci_dev *pdev) 2693 struct pci_dev *pdev)
2633{ 2694{
@@ -2653,6 +2714,7 @@ static void vm_domain_remove_one_dev_info(struct dmar_domain *domain,
2653 spin_unlock_irqrestore(&device_domain_lock, flags); 2714 spin_unlock_irqrestore(&device_domain_lock, flags);
2654 2715
2655 iommu_detach_dev(iommu, info->bus, info->devfn); 2716 iommu_detach_dev(iommu, info->bus, info->devfn);
2717 iommu_detach_dependent_devices(iommu, pdev);
2656 free_devinfo_mem(info); 2718 free_devinfo_mem(info);
2657 2719
2658 spin_lock_irqsave(&device_domain_lock, flags); 2720 spin_lock_irqsave(&device_domain_lock, flags);
@@ -2676,7 +2738,7 @@ static void vm_domain_remove_one_dev_info(struct dmar_domain *domain,
2676 spin_lock_irqsave(&domain->iommu_lock, tmp_flags); 2738 spin_lock_irqsave(&domain->iommu_lock, tmp_flags);
2677 clear_bit(iommu->seq_id, &domain->iommu_bmp); 2739 clear_bit(iommu->seq_id, &domain->iommu_bmp);
2678 domain->iommu_count--; 2740 domain->iommu_count--;
2679 domain_update_iommu_coherency(domain); 2741 domain_update_iommu_cap(domain);
2680 spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags); 2742 spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags);
2681 } 2743 }
2682 2744
@@ -2702,15 +2764,16 @@ static void vm_domain_remove_all_dev_info(struct dmar_domain *domain)
2702 2764
2703 iommu = device_to_iommu(info->bus, info->devfn); 2765 iommu = device_to_iommu(info->bus, info->devfn);
2704 iommu_detach_dev(iommu, info->bus, info->devfn); 2766 iommu_detach_dev(iommu, info->bus, info->devfn);
2767 iommu_detach_dependent_devices(iommu, info->dev);
2705 2768
2706 /* clear this iommu in iommu_bmp, update iommu count 2769 /* clear this iommu in iommu_bmp, update iommu count
2707 * and coherency 2770 * and capabilities
2708 */ 2771 */
2709 spin_lock_irqsave(&domain->iommu_lock, flags2); 2772 spin_lock_irqsave(&domain->iommu_lock, flags2);
2710 if (test_and_clear_bit(iommu->seq_id, 2773 if (test_and_clear_bit(iommu->seq_id,
2711 &domain->iommu_bmp)) { 2774 &domain->iommu_bmp)) {
2712 domain->iommu_count--; 2775 domain->iommu_count--;
2713 domain_update_iommu_coherency(domain); 2776 domain_update_iommu_cap(domain);
2714 } 2777 }
2715 spin_unlock_irqrestore(&domain->iommu_lock, flags2); 2778 spin_unlock_irqrestore(&domain->iommu_lock, flags2);
2716 2779
@@ -2933,6 +2996,8 @@ static int intel_iommu_map_range(struct iommu_domain *domain,
2933 prot |= DMA_PTE_READ; 2996 prot |= DMA_PTE_READ;
2934 if (iommu_prot & IOMMU_WRITE) 2997 if (iommu_prot & IOMMU_WRITE)
2935 prot |= DMA_PTE_WRITE; 2998 prot |= DMA_PTE_WRITE;
2999 if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
3000 prot |= DMA_PTE_SNP;
2936 3001
2937 max_addr = (iova & VTD_PAGE_MASK) + VTD_PAGE_ALIGN(size); 3002 max_addr = (iova & VTD_PAGE_MASK) + VTD_PAGE_ALIGN(size);
2938 if (dmar_domain->max_addr < max_addr) { 3003 if (dmar_domain->max_addr < max_addr) {
@@ -2986,6 +3051,17 @@ static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
2986 return phys; 3051 return phys;
2987} 3052}
2988 3053
3054static int intel_iommu_domain_has_cap(struct iommu_domain *domain,
3055 unsigned long cap)
3056{
3057 struct dmar_domain *dmar_domain = domain->priv;
3058
3059 if (cap == IOMMU_CAP_CACHE_COHERENCY)
3060 return dmar_domain->iommu_snooping;
3061
3062 return 0;
3063}
3064
2989static struct iommu_ops intel_iommu_ops = { 3065static struct iommu_ops intel_iommu_ops = {
2990 .domain_init = intel_iommu_domain_init, 3066 .domain_init = intel_iommu_domain_init,
2991 .domain_destroy = intel_iommu_domain_destroy, 3067 .domain_destroy = intel_iommu_domain_destroy,
@@ -2994,6 +3070,7 @@ static struct iommu_ops intel_iommu_ops = {
2994 .map = intel_iommu_map_range, 3070 .map = intel_iommu_map_range,
2995 .unmap = intel_iommu_unmap_range, 3071 .unmap = intel_iommu_unmap_range,
2996 .iova_to_phys = intel_iommu_iova_to_phys, 3072 .iova_to_phys = intel_iommu_iova_to_phys,
3073 .domain_has_cap = intel_iommu_domain_has_cap,
2997}; 3074};
2998 3075
2999static void __devinit quirk_iommu_rwbf(struct pci_dev *dev) 3076static void __devinit quirk_iommu_rwbf(struct pci_dev *dev)
diff --git a/include/linux/dma_remapping.h b/include/linux/dma_remapping.h
index af1dab41674b..1a455f1f86d7 100644
--- a/include/linux/dma_remapping.h
+++ b/include/linux/dma_remapping.h
@@ -11,6 +11,7 @@
11 11
12#define DMA_PTE_READ (1) 12#define DMA_PTE_READ (1)
13#define DMA_PTE_WRITE (2) 13#define DMA_PTE_WRITE (2)
14#define DMA_PTE_SNP (1 << 11)
14 15
15struct intel_iommu; 16struct intel_iommu;
16struct dmar_domain; 17struct dmar_domain;
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
index 1d6c71d96ede..77214ead1a36 100644
--- a/include/linux/intel-iommu.h
+++ b/include/linux/intel-iommu.h
@@ -123,7 +123,7 @@ static inline void dmar_writeq(void __iomem *addr, u64 val)
123#define ecap_eim_support(e) ((e >> 4) & 0x1) 123#define ecap_eim_support(e) ((e >> 4) & 0x1)
124#define ecap_ir_support(e) ((e >> 3) & 0x1) 124#define ecap_ir_support(e) ((e >> 3) & 0x1)
125#define ecap_max_handle_mask(e) ((e >> 20) & 0xf) 125#define ecap_max_handle_mask(e) ((e >> 20) & 0xf)
126 126#define ecap_sc_support(e) ((e >> 7) & 0x1) /* Snooping Control */
127 127
128/* IOTLB_REG */ 128/* IOTLB_REG */
129#define DMA_TLB_FLUSH_GRANU_OFFSET 60 129#define DMA_TLB_FLUSH_GRANU_OFFSET 60
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 8a7bfb1b6ca0..3af4ffd591b9 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -21,6 +21,7 @@
21 21
22#define IOMMU_READ (1) 22#define IOMMU_READ (1)
23#define IOMMU_WRITE (2) 23#define IOMMU_WRITE (2)
24#define IOMMU_CACHE (4) /* DMA cache coherency */
24 25
25struct device; 26struct device;
26 27
@@ -28,6 +29,8 @@ struct iommu_domain {
28 void *priv; 29 void *priv;
29}; 30};
30 31
32#define IOMMU_CAP_CACHE_COHERENCY 0x1
33
31struct iommu_ops { 34struct iommu_ops {
32 int (*domain_init)(struct iommu_domain *domain); 35 int (*domain_init)(struct iommu_domain *domain);
33 void (*domain_destroy)(struct iommu_domain *domain); 36 void (*domain_destroy)(struct iommu_domain *domain);
@@ -39,6 +42,8 @@ struct iommu_ops {
39 size_t size); 42 size_t size);
40 phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, 43 phys_addr_t (*iova_to_phys)(struct iommu_domain *domain,
41 unsigned long iova); 44 unsigned long iova);
45 int (*domain_has_cap)(struct iommu_domain *domain,
46 unsigned long cap);
42}; 47};
43 48
44#ifdef CONFIG_IOMMU_API 49#ifdef CONFIG_IOMMU_API
@@ -57,6 +62,8 @@ extern void iommu_unmap_range(struct iommu_domain *domain, unsigned long iova,
57 size_t size); 62 size_t size);
58extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, 63extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain,
59 unsigned long iova); 64 unsigned long iova);
65extern int iommu_domain_has_cap(struct iommu_domain *domain,
66 unsigned long cap);
60 67
61#else /* CONFIG_IOMMU_API */ 68#else /* CONFIG_IOMMU_API */
62 69
@@ -107,6 +114,12 @@ static inline phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain,
107 return 0; 114 return 0;
108} 115}
109 116
117static inline int domain_has_cap(struct iommu_domain *domain,
118 unsigned long cap)
119{
120 return 0;
121}
122
110#endif /* CONFIG_IOMMU_API */ 123#endif /* CONFIG_IOMMU_API */
111 124
112#endif /* __LINUX_IOMMU_H */ 125#endif /* __LINUX_IOMMU_H */