aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorFenghua Yu <fenghua.yu@intel.com>2008-10-16 21:02:32 -0400
committerDavid Woodhouse <David.Woodhouse@intel.com>2008-10-18 09:29:15 -0400
commit5b6985ce8ec7127b4d60ad450b64ca8b82748a3b (patch)
treef1d5a27601df04a3481690a1a2f90fc688034aff
parentcacd4213d8ffed83676f38d5d8e93c673e0f1af7 (diff)
intel-iommu: IA64 support
The current Intel IOMMU code assumes that both host page size and Intel IOMMU page size are 4KiB. The first patch supports variable page size. This provides support for IA64 which has multiple page sizes. This patch also adds some other code hooks for IA64 platform including DMAR_OPERATION_TIMEOUT definition. [dwmw2: some cleanup] Signed-off-by: Fenghua Yu <fenghua.yu@intel.com> Signed-off-by: Tony Luck <tony.luck@intel.com> Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
-rw-r--r--arch/x86/kernel/pci-dma.c16
-rw-r--r--drivers/pci/dmar.c19
-rw-r--r--drivers/pci/intel-iommu.c128
-rw-r--r--drivers/pci/quirks.c14
-rw-r--r--include/asm-x86/iommu.h4
-rw-r--r--include/linux/dma_remapping.h27
-rw-r--r--include/linux/intel-iommu.h39
7 files changed, 131 insertions, 116 deletions
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
index 192624820217..1972266e8ba5 100644
--- a/arch/x86/kernel/pci-dma.c
+++ b/arch/x86/kernel/pci-dma.c
@@ -9,8 +9,6 @@
9#include <asm/calgary.h> 9#include <asm/calgary.h>
10#include <asm/amd_iommu.h> 10#include <asm/amd_iommu.h>
11 11
12static int forbid_dac __read_mostly;
13
14struct dma_mapping_ops *dma_ops; 12struct dma_mapping_ops *dma_ops;
15EXPORT_SYMBOL(dma_ops); 13EXPORT_SYMBOL(dma_ops);
16 14
@@ -293,17 +291,3 @@ void pci_iommu_shutdown(void)
293} 291}
294/* Must execute after PCI subsystem */ 292/* Must execute after PCI subsystem */
295fs_initcall(pci_iommu_init); 293fs_initcall(pci_iommu_init);
296
297#ifdef CONFIG_PCI
298/* Many VIA bridges seem to corrupt data for DAC. Disable it here */
299
300static __devinit void via_no_dac(struct pci_dev *dev)
301{
302 if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI && forbid_dac == 0) {
303 printk(KERN_INFO "PCI: VIA PCI bridge detected."
304 "Disabling DAC.\n");
305 forbid_dac = 1;
306 }
307}
308DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID, via_no_dac);
309#endif
diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c
index 44d6c7081b8f..b65173828bc2 100644
--- a/drivers/pci/dmar.c
+++ b/drivers/pci/dmar.c
@@ -277,14 +277,15 @@ dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
277 drhd = (struct acpi_dmar_hardware_unit *)header; 277 drhd = (struct acpi_dmar_hardware_unit *)header;
278 printk (KERN_INFO PREFIX 278 printk (KERN_INFO PREFIX
279 "DRHD (flags: 0x%08x)base: 0x%016Lx\n", 279 "DRHD (flags: 0x%08x)base: 0x%016Lx\n",
280 drhd->flags, drhd->address); 280 drhd->flags, (unsigned long long)drhd->address);
281 break; 281 break;
282 case ACPI_DMAR_TYPE_RESERVED_MEMORY: 282 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
283 rmrr = (struct acpi_dmar_reserved_memory *)header; 283 rmrr = (struct acpi_dmar_reserved_memory *)header;
284 284
285 printk (KERN_INFO PREFIX 285 printk (KERN_INFO PREFIX
286 "RMRR base: 0x%016Lx end: 0x%016Lx\n", 286 "RMRR base: 0x%016Lx end: 0x%016Lx\n",
287 rmrr->base_address, rmrr->end_address); 287 (unsigned long long)rmrr->base_address,
288 (unsigned long long)rmrr->end_address);
288 break; 289 break;
289 } 290 }
290} 291}
@@ -304,7 +305,7 @@ parse_dmar_table(void)
304 if (!dmar) 305 if (!dmar)
305 return -ENODEV; 306 return -ENODEV;
306 307
307 if (dmar->width < PAGE_SHIFT_4K - 1) { 308 if (dmar->width < PAGE_SHIFT - 1) {
308 printk(KERN_WARNING PREFIX "Invalid DMAR haw\n"); 309 printk(KERN_WARNING PREFIX "Invalid DMAR haw\n");
309 return -EINVAL; 310 return -EINVAL;
310 } 311 }
@@ -493,7 +494,7 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
493 494
494 iommu->seq_id = iommu_allocated++; 495 iommu->seq_id = iommu_allocated++;
495 496
496 iommu->reg = ioremap(drhd->reg_base_addr, PAGE_SIZE_4K); 497 iommu->reg = ioremap(drhd->reg_base_addr, VTD_PAGE_SIZE);
497 if (!iommu->reg) { 498 if (!iommu->reg) {
498 printk(KERN_ERR "IOMMU: can't map the region\n"); 499 printk(KERN_ERR "IOMMU: can't map the region\n");
499 goto error; 500 goto error;
@@ -504,8 +505,8 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
504 /* the registers might be more than one page */ 505 /* the registers might be more than one page */
505 map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap), 506 map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
506 cap_max_fault_reg_offset(iommu->cap)); 507 cap_max_fault_reg_offset(iommu->cap));
507 map_size = PAGE_ALIGN_4K(map_size); 508 map_size = VTD_PAGE_ALIGN(map_size);
508 if (map_size > PAGE_SIZE_4K) { 509 if (map_size > VTD_PAGE_SIZE) {
509 iounmap(iommu->reg); 510 iounmap(iommu->reg);
510 iommu->reg = ioremap(drhd->reg_base_addr, map_size); 511 iommu->reg = ioremap(drhd->reg_base_addr, map_size);
511 if (!iommu->reg) { 512 if (!iommu->reg) {
@@ -516,8 +517,10 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
516 517
517 ver = readl(iommu->reg + DMAR_VER_REG); 518 ver = readl(iommu->reg + DMAR_VER_REG);
518 pr_debug("IOMMU %llx: ver %d:%d cap %llx ecap %llx\n", 519 pr_debug("IOMMU %llx: ver %d:%d cap %llx ecap %llx\n",
519 drhd->reg_base_addr, DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver), 520 (unsigned long long)drhd->reg_base_addr,
520 iommu->cap, iommu->ecap); 521 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
522 (unsigned long long)iommu->cap,
523 (unsigned long long)iommu->ecap);
521 524
522 spin_lock_init(&iommu->register_lock); 525 spin_lock_init(&iommu->register_lock);
523 526
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index 509470419130..2bf96babbc4f 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -18,6 +18,7 @@
18 * Author: Ashok Raj <ashok.raj@intel.com> 18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com> 19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> 20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
21 * Author: Fenghua Yu <fenghua.yu@intel.com>
21 */ 22 */
22 23
23#include <linux/init.h> 24#include <linux/init.h>
@@ -35,11 +36,13 @@
35#include <linux/timer.h> 36#include <linux/timer.h>
36#include <linux/iova.h> 37#include <linux/iova.h>
37#include <linux/intel-iommu.h> 38#include <linux/intel-iommu.h>
38#include <asm/proto.h> /* force_iommu in this header in x86-64*/
39#include <asm/cacheflush.h> 39#include <asm/cacheflush.h>
40#include <asm/iommu.h> 40#include <asm/iommu.h>
41#include "pci.h" 41#include "pci.h"
42 42
43#define ROOT_SIZE VTD_PAGE_SIZE
44#define CONTEXT_SIZE VTD_PAGE_SIZE
45
43#define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY) 46#define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
44#define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA) 47#define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
45 48
@@ -199,7 +202,7 @@ static struct context_entry * device_to_context_entry(struct intel_iommu *iommu,
199 spin_unlock_irqrestore(&iommu->lock, flags); 202 spin_unlock_irqrestore(&iommu->lock, flags);
200 return NULL; 203 return NULL;
201 } 204 }
202 __iommu_flush_cache(iommu, (void *)context, PAGE_SIZE_4K); 205 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
203 phy_addr = virt_to_phys((void *)context); 206 phy_addr = virt_to_phys((void *)context);
204 set_root_value(root, phy_addr); 207 set_root_value(root, phy_addr);
205 set_root_present(root); 208 set_root_present(root);
@@ -345,7 +348,7 @@ static struct dma_pte * addr_to_dma_pte(struct dmar_domain *domain, u64 addr)
345 return NULL; 348 return NULL;
346 } 349 }
347 __iommu_flush_cache(domain->iommu, tmp_page, 350 __iommu_flush_cache(domain->iommu, tmp_page,
348 PAGE_SIZE_4K); 351 PAGE_SIZE);
349 dma_set_pte_addr(*pte, virt_to_phys(tmp_page)); 352 dma_set_pte_addr(*pte, virt_to_phys(tmp_page));
350 /* 353 /*
351 * high level table always sets r/w, last level page 354 * high level table always sets r/w, last level page
@@ -408,13 +411,13 @@ static void dma_pte_clear_range(struct dmar_domain *domain, u64 start, u64 end)
408 start &= (((u64)1) << addr_width) - 1; 411 start &= (((u64)1) << addr_width) - 1;
409 end &= (((u64)1) << addr_width) - 1; 412 end &= (((u64)1) << addr_width) - 1;
410 /* in case it's partial page */ 413 /* in case it's partial page */
411 start = PAGE_ALIGN_4K(start); 414 start = PAGE_ALIGN(start);
412 end &= PAGE_MASK_4K; 415 end &= PAGE_MASK;
413 416
414 /* we don't need lock here, nobody else touches the iova range */ 417 /* we don't need lock here, nobody else touches the iova range */
415 while (start < end) { 418 while (start < end) {
416 dma_pte_clear_one(domain, start); 419 dma_pte_clear_one(domain, start);
417 start += PAGE_SIZE_4K; 420 start += VTD_PAGE_SIZE;
418 } 421 }
419} 422}
420 423
@@ -468,7 +471,7 @@ static int iommu_alloc_root_entry(struct intel_iommu *iommu)
468 if (!root) 471 if (!root)
469 return -ENOMEM; 472 return -ENOMEM;
470 473
471 __iommu_flush_cache(iommu, root, PAGE_SIZE_4K); 474 __iommu_flush_cache(iommu, root, ROOT_SIZE);
472 475
473 spin_lock_irqsave(&iommu->lock, flags); 476 spin_lock_irqsave(&iommu->lock, flags);
474 iommu->root_entry = root; 477 iommu->root_entry = root;
@@ -634,7 +637,8 @@ static int __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
634 printk(KERN_ERR"IOMMU: flush IOTLB failed\n"); 637 printk(KERN_ERR"IOMMU: flush IOTLB failed\n");
635 if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type)) 638 if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
636 pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n", 639 pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
637 DMA_TLB_IIRG(type), DMA_TLB_IAIG(val)); 640 (unsigned long long)DMA_TLB_IIRG(type),
641 (unsigned long long)DMA_TLB_IAIG(val));
638 /* flush context entry will implictly flush write buffer */ 642 /* flush context entry will implictly flush write buffer */
639 return 0; 643 return 0;
640} 644}
@@ -644,7 +648,7 @@ static int iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
644{ 648{
645 unsigned int mask; 649 unsigned int mask;
646 650
647 BUG_ON(addr & (~PAGE_MASK_4K)); 651 BUG_ON(addr & (~VTD_PAGE_MASK));
648 BUG_ON(pages == 0); 652 BUG_ON(pages == 0);
649 653
650 /* Fallback to domain selective flush if no PSI support */ 654 /* Fallback to domain selective flush if no PSI support */
@@ -798,7 +802,7 @@ void dmar_msi_read(int irq, struct msi_msg *msg)
798} 802}
799 803
800static int iommu_page_fault_do_one(struct intel_iommu *iommu, int type, 804static int iommu_page_fault_do_one(struct intel_iommu *iommu, int type,
801 u8 fault_reason, u16 source_id, u64 addr) 805 u8 fault_reason, u16 source_id, unsigned long long addr)
802{ 806{
803 const char *reason; 807 const char *reason;
804 808
@@ -1051,9 +1055,9 @@ static void dmar_init_reserved_ranges(void)
1051 if (!r->flags || !(r->flags & IORESOURCE_MEM)) 1055 if (!r->flags || !(r->flags & IORESOURCE_MEM))
1052 continue; 1056 continue;
1053 addr = r->start; 1057 addr = r->start;
1054 addr &= PAGE_MASK_4K; 1058 addr &= PAGE_MASK;
1055 size = r->end - addr; 1059 size = r->end - addr;
1056 size = PAGE_ALIGN_4K(size); 1060 size = PAGE_ALIGN(size);
1057 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(addr), 1061 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(addr),
1058 IOVA_PFN(size + addr) - 1); 1062 IOVA_PFN(size + addr) - 1);
1059 if (!iova) 1063 if (!iova)
@@ -1115,7 +1119,7 @@ static int domain_init(struct dmar_domain *domain, int guest_width)
1115 domain->pgd = (struct dma_pte *)alloc_pgtable_page(); 1119 domain->pgd = (struct dma_pte *)alloc_pgtable_page();
1116 if (!domain->pgd) 1120 if (!domain->pgd)
1117 return -ENOMEM; 1121 return -ENOMEM;
1118 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE_4K); 1122 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
1119 return 0; 1123 return 0;
1120} 1124}
1121 1125
@@ -1131,7 +1135,7 @@ static void domain_exit(struct dmar_domain *domain)
1131 /* destroy iovas */ 1135 /* destroy iovas */
1132 put_iova_domain(&domain->iovad); 1136 put_iova_domain(&domain->iovad);
1133 end = DOMAIN_MAX_ADDR(domain->gaw); 1137 end = DOMAIN_MAX_ADDR(domain->gaw);
1134 end = end & (~PAGE_MASK_4K); 1138 end = end & (~PAGE_MASK);
1135 1139
1136 /* clear ptes */ 1140 /* clear ptes */
1137 dma_pte_clear_range(domain, 0, end); 1141 dma_pte_clear_range(domain, 0, end);
@@ -1252,22 +1256,25 @@ domain_page_mapping(struct dmar_domain *domain, dma_addr_t iova,
1252 u64 start_pfn, end_pfn; 1256 u64 start_pfn, end_pfn;
1253 struct dma_pte *pte; 1257 struct dma_pte *pte;
1254 int index; 1258 int index;
1259 int addr_width = agaw_to_width(domain->agaw);
1260
1261 hpa &= (((u64)1) << addr_width) - 1;
1255 1262
1256 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0) 1263 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
1257 return -EINVAL; 1264 return -EINVAL;
1258 iova &= PAGE_MASK_4K; 1265 iova &= PAGE_MASK;
1259 start_pfn = ((u64)hpa) >> PAGE_SHIFT_4K; 1266 start_pfn = ((u64)hpa) >> VTD_PAGE_SHIFT;
1260 end_pfn = (PAGE_ALIGN_4K(((u64)hpa) + size)) >> PAGE_SHIFT_4K; 1267 end_pfn = (VTD_PAGE_ALIGN(((u64)hpa) + size)) >> VTD_PAGE_SHIFT;
1261 index = 0; 1268 index = 0;
1262 while (start_pfn < end_pfn) { 1269 while (start_pfn < end_pfn) {
1263 pte = addr_to_dma_pte(domain, iova + PAGE_SIZE_4K * index); 1270 pte = addr_to_dma_pte(domain, iova + VTD_PAGE_SIZE * index);
1264 if (!pte) 1271 if (!pte)
1265 return -ENOMEM; 1272 return -ENOMEM;
1266 /* We don't need lock here, nobody else 1273 /* We don't need lock here, nobody else
1267 * touches the iova range 1274 * touches the iova range
1268 */ 1275 */
1269 BUG_ON(dma_pte_addr(*pte)); 1276 BUG_ON(dma_pte_addr(*pte));
1270 dma_set_pte_addr(*pte, start_pfn << PAGE_SHIFT_4K); 1277 dma_set_pte_addr(*pte, start_pfn << VTD_PAGE_SHIFT);
1271 dma_set_pte_prot(*pte, prot); 1278 dma_set_pte_prot(*pte, prot);
1272 __iommu_flush_cache(domain->iommu, pte, sizeof(*pte)); 1279 __iommu_flush_cache(domain->iommu, pte, sizeof(*pte));
1273 start_pfn++; 1280 start_pfn++;
@@ -1445,11 +1452,13 @@ error:
1445 return find_domain(pdev); 1452 return find_domain(pdev);
1446} 1453}
1447 1454
1448static int iommu_prepare_identity_map(struct pci_dev *pdev, u64 start, u64 end) 1455static int iommu_prepare_identity_map(struct pci_dev *pdev,
1456 unsigned long long start,
1457 unsigned long long end)
1449{ 1458{
1450 struct dmar_domain *domain; 1459 struct dmar_domain *domain;
1451 unsigned long size; 1460 unsigned long size;
1452 u64 base; 1461 unsigned long long base;
1453 int ret; 1462 int ret;
1454 1463
1455 printk(KERN_INFO 1464 printk(KERN_INFO
@@ -1461,9 +1470,9 @@ static int iommu_prepare_identity_map(struct pci_dev *pdev, u64 start, u64 end)
1461 return -ENOMEM; 1470 return -ENOMEM;
1462 1471
1463 /* The address might not be aligned */ 1472 /* The address might not be aligned */
1464 base = start & PAGE_MASK_4K; 1473 base = start & PAGE_MASK;
1465 size = end - base; 1474 size = end - base;
1466 size = PAGE_ALIGN_4K(size); 1475 size = PAGE_ALIGN(size);
1467 if (!reserve_iova(&domain->iovad, IOVA_PFN(base), 1476 if (!reserve_iova(&domain->iovad, IOVA_PFN(base),
1468 IOVA_PFN(base + size) - 1)) { 1477 IOVA_PFN(base + size) - 1)) {
1469 printk(KERN_ERR "IOMMU: reserve iova failed\n"); 1478 printk(KERN_ERR "IOMMU: reserve iova failed\n");
@@ -1732,8 +1741,8 @@ error:
1732static inline u64 aligned_size(u64 host_addr, size_t size) 1741static inline u64 aligned_size(u64 host_addr, size_t size)
1733{ 1742{
1734 u64 addr; 1743 u64 addr;
1735 addr = (host_addr & (~PAGE_MASK_4K)) + size; 1744 addr = (host_addr & (~PAGE_MASK)) + size;
1736 return PAGE_ALIGN_4K(addr); 1745 return PAGE_ALIGN(addr);
1737} 1746}
1738 1747
1739struct iova * 1748struct iova *
@@ -1747,7 +1756,7 @@ iommu_alloc_iova(struct dmar_domain *domain, size_t size, u64 end)
1747 return NULL; 1756 return NULL;
1748 1757
1749 piova = alloc_iova(&domain->iovad, 1758 piova = alloc_iova(&domain->iovad,
1750 size >> PAGE_SHIFT_4K, IOVA_PFN(end), 1); 1759 size >> PAGE_SHIFT, IOVA_PFN(end), 1);
1751 return piova; 1760 return piova;
1752} 1761}
1753 1762
@@ -1807,12 +1816,12 @@ get_valid_domain_for_dev(struct pci_dev *pdev)
1807 return domain; 1816 return domain;
1808} 1817}
1809 1818
1810static dma_addr_t 1819dma_addr_t
1811intel_map_single(struct device *hwdev, phys_addr_t paddr, size_t size, int dir) 1820intel_map_single(struct device *hwdev, phys_addr_t paddr, size_t size, int dir)
1812{ 1821{
1813 struct pci_dev *pdev = to_pci_dev(hwdev); 1822 struct pci_dev *pdev = to_pci_dev(hwdev);
1814 struct dmar_domain *domain; 1823 struct dmar_domain *domain;
1815 unsigned long start_paddr; 1824 phys_addr_t start_paddr;
1816 struct iova *iova; 1825 struct iova *iova;
1817 int prot = 0; 1826 int prot = 0;
1818 int ret; 1827 int ret;
@@ -1831,7 +1840,7 @@ intel_map_single(struct device *hwdev, phys_addr_t paddr, size_t size, int dir)
1831 if (!iova) 1840 if (!iova)
1832 goto error; 1841 goto error;
1833 1842
1834 start_paddr = iova->pfn_lo << PAGE_SHIFT_4K; 1843 start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT;
1835 1844
1836 /* 1845 /*
1837 * Check if DMAR supports zero-length reads on write only 1846 * Check if DMAR supports zero-length reads on write only
@@ -1849,27 +1858,23 @@ intel_map_single(struct device *hwdev, phys_addr_t paddr, size_t size, int dir)
1849 * is not a big problem 1858 * is not a big problem
1850 */ 1859 */
1851 ret = domain_page_mapping(domain, start_paddr, 1860 ret = domain_page_mapping(domain, start_paddr,
1852 ((u64)paddr) & PAGE_MASK_4K, size, prot); 1861 ((u64)paddr) & PAGE_MASK, size, prot);
1853 if (ret) 1862 if (ret)
1854 goto error; 1863 goto error;
1855 1864
1856 pr_debug("Device %s request: %lx@%llx mapping: %lx@%llx, dir %d\n",
1857 pci_name(pdev), size, (u64)paddr,
1858 size, (u64)start_paddr, dir);
1859
1860 /* it's a non-present to present mapping */ 1865 /* it's a non-present to present mapping */
1861 ret = iommu_flush_iotlb_psi(domain->iommu, domain->id, 1866 ret = iommu_flush_iotlb_psi(domain->iommu, domain->id,
1862 start_paddr, size >> PAGE_SHIFT_4K, 1); 1867 start_paddr, size >> VTD_PAGE_SHIFT, 1);
1863 if (ret) 1868 if (ret)
1864 iommu_flush_write_buffer(domain->iommu); 1869 iommu_flush_write_buffer(domain->iommu);
1865 1870
1866 return (start_paddr + ((u64)paddr & (~PAGE_MASK_4K))); 1871 return start_paddr + ((u64)paddr & (~PAGE_MASK));
1867 1872
1868error: 1873error:
1869 if (iova) 1874 if (iova)
1870 __free_iova(&domain->iovad, iova); 1875 __free_iova(&domain->iovad, iova);
1871 printk(KERN_ERR"Device %s request: %lx@%llx dir %d --- failed\n", 1876 printk(KERN_ERR"Device %s request: %lx@%llx dir %d --- failed\n",
1872 pci_name(pdev), size, (u64)paddr, dir); 1877 pci_name(pdev), size, (unsigned long long)paddr, dir);
1873 return 0; 1878 return 0;
1874} 1879}
1875 1880
@@ -1931,8 +1936,8 @@ static void add_unmap(struct dmar_domain *dom, struct iova *iova)
1931 spin_unlock_irqrestore(&async_umap_flush_lock, flags); 1936 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
1932} 1937}
1933 1938
1934static void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, 1939void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, size_t size,
1935 size_t size, int dir) 1940 int dir)
1936{ 1941{
1937 struct pci_dev *pdev = to_pci_dev(dev); 1942 struct pci_dev *pdev = to_pci_dev(dev);
1938 struct dmar_domain *domain; 1943 struct dmar_domain *domain;
@@ -1948,11 +1953,11 @@ static void intel_unmap_single(struct device *dev, dma_addr_t dev_addr,
1948 if (!iova) 1953 if (!iova)
1949 return; 1954 return;
1950 1955
1951 start_addr = iova->pfn_lo << PAGE_SHIFT_4K; 1956 start_addr = iova->pfn_lo << PAGE_SHIFT;
1952 size = aligned_size((u64)dev_addr, size); 1957 size = aligned_size((u64)dev_addr, size);
1953 1958
1954 pr_debug("Device %s unmapping: %lx@%llx\n", 1959 pr_debug("Device %s unmapping: %lx@%llx\n",
1955 pci_name(pdev), size, (u64)start_addr); 1960 pci_name(pdev), size, (unsigned long long)start_addr);
1956 1961
1957 /* clear the whole page */ 1962 /* clear the whole page */
1958 dma_pte_clear_range(domain, start_addr, start_addr + size); 1963 dma_pte_clear_range(domain, start_addr, start_addr + size);
@@ -1960,7 +1965,7 @@ static void intel_unmap_single(struct device *dev, dma_addr_t dev_addr,
1960 dma_pte_free_pagetable(domain, start_addr, start_addr + size); 1965 dma_pte_free_pagetable(domain, start_addr, start_addr + size);
1961 if (intel_iommu_strict) { 1966 if (intel_iommu_strict) {
1962 if (iommu_flush_iotlb_psi(domain->iommu, 1967 if (iommu_flush_iotlb_psi(domain->iommu,
1963 domain->id, start_addr, size >> PAGE_SHIFT_4K, 0)) 1968 domain->id, start_addr, size >> VTD_PAGE_SHIFT, 0))
1964 iommu_flush_write_buffer(domain->iommu); 1969 iommu_flush_write_buffer(domain->iommu);
1965 /* free iova */ 1970 /* free iova */
1966 __free_iova(&domain->iovad, iova); 1971 __free_iova(&domain->iovad, iova);
@@ -1973,13 +1978,13 @@ static void intel_unmap_single(struct device *dev, dma_addr_t dev_addr,
1973 } 1978 }
1974} 1979}
1975 1980
1976static void * intel_alloc_coherent(struct device *hwdev, size_t size, 1981void *intel_alloc_coherent(struct device *hwdev, size_t size,
1977 dma_addr_t *dma_handle, gfp_t flags) 1982 dma_addr_t *dma_handle, gfp_t flags)
1978{ 1983{
1979 void *vaddr; 1984 void *vaddr;
1980 int order; 1985 int order;
1981 1986
1982 size = PAGE_ALIGN_4K(size); 1987 size = PAGE_ALIGN(size);
1983 order = get_order(size); 1988 order = get_order(size);
1984 flags &= ~(GFP_DMA | GFP_DMA32); 1989 flags &= ~(GFP_DMA | GFP_DMA32);
1985 1990
@@ -1995,12 +2000,12 @@ static void * intel_alloc_coherent(struct device *hwdev, size_t size,
1995 return NULL; 2000 return NULL;
1996} 2001}
1997 2002
1998static void intel_free_coherent(struct device *hwdev, size_t size, 2003void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
1999 void *vaddr, dma_addr_t dma_handle) 2004 dma_addr_t dma_handle)
2000{ 2005{
2001 int order; 2006 int order;
2002 2007
2003 size = PAGE_ALIGN_4K(size); 2008 size = PAGE_ALIGN(size);
2004 order = get_order(size); 2009 order = get_order(size);
2005 2010
2006 intel_unmap_single(hwdev, dma_handle, size, DMA_BIDIRECTIONAL); 2011 intel_unmap_single(hwdev, dma_handle, size, DMA_BIDIRECTIONAL);
@@ -2008,8 +2013,9 @@ static void intel_free_coherent(struct device *hwdev, size_t size,
2008} 2013}
2009 2014
2010#define SG_ENT_VIRT_ADDRESS(sg) (sg_virt((sg))) 2015#define SG_ENT_VIRT_ADDRESS(sg) (sg_virt((sg)))
2011static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, 2016
2012 int nelems, int dir) 2017void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
2018 int nelems, int dir)
2013{ 2019{
2014 int i; 2020 int i;
2015 struct pci_dev *pdev = to_pci_dev(hwdev); 2021 struct pci_dev *pdev = to_pci_dev(hwdev);
@@ -2033,7 +2039,7 @@ static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
2033 size += aligned_size((u64)addr, sg->length); 2039 size += aligned_size((u64)addr, sg->length);
2034 } 2040 }
2035 2041
2036 start_addr = iova->pfn_lo << PAGE_SHIFT_4K; 2042 start_addr = iova->pfn_lo << PAGE_SHIFT;
2037 2043
2038 /* clear the whole page */ 2044 /* clear the whole page */
2039 dma_pte_clear_range(domain, start_addr, start_addr + size); 2045 dma_pte_clear_range(domain, start_addr, start_addr + size);
@@ -2041,7 +2047,7 @@ static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
2041 dma_pte_free_pagetable(domain, start_addr, start_addr + size); 2047 dma_pte_free_pagetable(domain, start_addr, start_addr + size);
2042 2048
2043 if (iommu_flush_iotlb_psi(domain->iommu, domain->id, start_addr, 2049 if (iommu_flush_iotlb_psi(domain->iommu, domain->id, start_addr,
2044 size >> PAGE_SHIFT_4K, 0)) 2050 size >> VTD_PAGE_SHIFT, 0))
2045 iommu_flush_write_buffer(domain->iommu); 2051 iommu_flush_write_buffer(domain->iommu);
2046 2052
2047 /* free iova */ 2053 /* free iova */
@@ -2062,8 +2068,8 @@ static int intel_nontranslate_map_sg(struct device *hddev,
2062 return nelems; 2068 return nelems;
2063} 2069}
2064 2070
2065static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, 2071int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
2066 int nelems, int dir) 2072 int dir)
2067{ 2073{
2068 void *addr; 2074 void *addr;
2069 int i; 2075 int i;
@@ -2107,14 +2113,14 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist,
2107 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) 2113 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
2108 prot |= DMA_PTE_WRITE; 2114 prot |= DMA_PTE_WRITE;
2109 2115
2110 start_addr = iova->pfn_lo << PAGE_SHIFT_4K; 2116 start_addr = iova->pfn_lo << PAGE_SHIFT;
2111 offset = 0; 2117 offset = 0;
2112 for_each_sg(sglist, sg, nelems, i) { 2118 for_each_sg(sglist, sg, nelems, i) {
2113 addr = SG_ENT_VIRT_ADDRESS(sg); 2119 addr = SG_ENT_VIRT_ADDRESS(sg);
2114 addr = (void *)virt_to_phys(addr); 2120 addr = (void *)virt_to_phys(addr);
2115 size = aligned_size((u64)addr, sg->length); 2121 size = aligned_size((u64)addr, sg->length);
2116 ret = domain_page_mapping(domain, start_addr + offset, 2122 ret = domain_page_mapping(domain, start_addr + offset,
2117 ((u64)addr) & PAGE_MASK_4K, 2123 ((u64)addr) & PAGE_MASK,
2118 size, prot); 2124 size, prot);
2119 if (ret) { 2125 if (ret) {
2120 /* clear the page */ 2126 /* clear the page */
@@ -2128,14 +2134,14 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist,
2128 return 0; 2134 return 0;
2129 } 2135 }
2130 sg->dma_address = start_addr + offset + 2136 sg->dma_address = start_addr + offset +
2131 ((u64)addr & (~PAGE_MASK_4K)); 2137 ((u64)addr & (~PAGE_MASK));
2132 sg->dma_length = sg->length; 2138 sg->dma_length = sg->length;
2133 offset += size; 2139 offset += size;
2134 } 2140 }
2135 2141
2136 /* it's a non-present to present mapping */ 2142 /* it's a non-present to present mapping */
2137 if (iommu_flush_iotlb_psi(domain->iommu, domain->id, 2143 if (iommu_flush_iotlb_psi(domain->iommu, domain->id,
2138 start_addr, offset >> PAGE_SHIFT_4K, 1)) 2144 start_addr, offset >> VTD_PAGE_SHIFT, 1))
2139 iommu_flush_write_buffer(domain->iommu); 2145 iommu_flush_write_buffer(domain->iommu);
2140 return nelems; 2146 return nelems;
2141} 2147}
@@ -2175,7 +2181,6 @@ static inline int iommu_devinfo_cache_init(void)
2175 sizeof(struct device_domain_info), 2181 sizeof(struct device_domain_info),
2176 0, 2182 0,
2177 SLAB_HWCACHE_ALIGN, 2183 SLAB_HWCACHE_ALIGN,
2178
2179 NULL); 2184 NULL);
2180 if (!iommu_devinfo_cache) { 2185 if (!iommu_devinfo_cache) {
2181 printk(KERN_ERR "Couldn't create devinfo cache\n"); 2186 printk(KERN_ERR "Couldn't create devinfo cache\n");
@@ -2193,7 +2198,6 @@ static inline int iommu_iova_cache_init(void)
2193 sizeof(struct iova), 2198 sizeof(struct iova),
2194 0, 2199 0,
2195 SLAB_HWCACHE_ALIGN, 2200 SLAB_HWCACHE_ALIGN,
2196
2197 NULL); 2201 NULL);
2198 if (!iommu_iova_cache) { 2202 if (!iommu_iova_cache) {
2199 printk(KERN_ERR "Couldn't create iova cache\n"); 2203 printk(KERN_ERR "Couldn't create iova cache\n");
@@ -2322,7 +2326,7 @@ void intel_iommu_domain_exit(struct dmar_domain *domain)
2322 return; 2326 return;
2323 2327
2324 end = DOMAIN_MAX_ADDR(domain->gaw); 2328 end = DOMAIN_MAX_ADDR(domain->gaw);
2325 end = end & (~PAGE_MASK_4K); 2329 end = end & (~VTD_PAGE_MASK);
2326 2330
2327 /* clear ptes */ 2331 /* clear ptes */
2328 dma_pte_clear_range(domain, 0, end); 2332 dma_pte_clear_range(domain, 0, end);
@@ -2418,6 +2422,6 @@ u64 intel_iommu_iova_to_pfn(struct dmar_domain *domain, u64 iova)
2418 if (pte) 2422 if (pte)
2419 pfn = dma_pte_addr(*pte); 2423 pfn = dma_pte_addr(*pte);
2420 2424
2421 return pfn >> PAGE_SHIFT_4K; 2425 return pfn >> VTD_PAGE_SHIFT;
2422} 2426}
2423EXPORT_SYMBOL_GPL(intel_iommu_iova_to_pfn); 2427EXPORT_SYMBOL_GPL(intel_iommu_iova_to_pfn);
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index e872ac925b4b..832175d9ca25 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -35,6 +35,20 @@ static void __devinit quirk_mellanox_tavor(struct pci_dev *dev)
35DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX,PCI_DEVICE_ID_MELLANOX_TAVOR,quirk_mellanox_tavor); 35DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX,PCI_DEVICE_ID_MELLANOX_TAVOR,quirk_mellanox_tavor);
36DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX,PCI_DEVICE_ID_MELLANOX_TAVOR_BRIDGE,quirk_mellanox_tavor); 36DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX,PCI_DEVICE_ID_MELLANOX_TAVOR_BRIDGE,quirk_mellanox_tavor);
37 37
38/* Many VIA bridges seem to corrupt data for DAC. Disable it here */
39int forbid_dac __read_mostly;
40EXPORT_SYMBOL(forbid_dac);
41
42static __devinit void via_no_dac(struct pci_dev *dev)
43{
44 if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI && forbid_dac == 0) {
45 dev_info(&dev->dev,
46 "VIA PCI bridge detected. Disabling DAC.\n");
47 forbid_dac = 1;
48 }
49}
50DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID, via_no_dac);
51
38/* Deal with broken BIOS'es that neglect to enable passive release, 52/* Deal with broken BIOS'es that neglect to enable passive release,
39 which can cause problems in combination with the 82441FX/PPro MTRRs */ 53 which can cause problems in combination with the 82441FX/PPro MTRRs */
40static void quirk_passive_release(struct pci_dev *dev) 54static void quirk_passive_release(struct pci_dev *dev)
diff --git a/include/asm-x86/iommu.h b/include/asm-x86/iommu.h
index 961e746da977..2daaffcda52f 100644
--- a/include/asm-x86/iommu.h
+++ b/include/asm-x86/iommu.h
@@ -7,9 +7,13 @@ extern struct dma_mapping_ops nommu_dma_ops;
7extern int force_iommu, no_iommu; 7extern int force_iommu, no_iommu;
8extern int iommu_detected; 8extern int iommu_detected;
9extern int dmar_disabled; 9extern int dmar_disabled;
10extern int forbid_dac;
10 11
11extern unsigned long iommu_nr_pages(unsigned long addr, unsigned long len); 12extern unsigned long iommu_nr_pages(unsigned long addr, unsigned long len);
12 13
14/* 10 seconds */
15#define DMAR_OPERATION_TIMEOUT ((cycles_t) tsc_khz*10*1000)
16
13#ifdef CONFIG_GART_IOMMU 17#ifdef CONFIG_GART_IOMMU
14extern int gart_iommu_aperture; 18extern int gart_iommu_aperture;
15extern int gart_iommu_aperture_allowed; 19extern int gart_iommu_aperture_allowed;
diff --git a/include/linux/dma_remapping.h b/include/linux/dma_remapping.h
index bff5c65f81dc..952df39c989d 100644
--- a/include/linux/dma_remapping.h
+++ b/include/linux/dma_remapping.h
@@ -2,15 +2,14 @@
2#define _DMA_REMAPPING_H 2#define _DMA_REMAPPING_H
3 3
4/* 4/*
5 * We need a fixed PAGE_SIZE of 4K irrespective of 5 * VT-d hardware uses 4KiB page size regardless of host page size.
6 * arch PAGE_SIZE for IOMMU page tables.
7 */ 6 */
8#define PAGE_SHIFT_4K (12) 7#define VTD_PAGE_SHIFT (12)
9#define PAGE_SIZE_4K (1UL << PAGE_SHIFT_4K) 8#define VTD_PAGE_SIZE (1UL << VTD_PAGE_SHIFT)
10#define PAGE_MASK_4K (((u64)-1) << PAGE_SHIFT_4K) 9#define VTD_PAGE_MASK (((u64)-1) << VTD_PAGE_SHIFT)
11#define PAGE_ALIGN_4K(addr) (((addr) + PAGE_SIZE_4K - 1) & PAGE_MASK_4K) 10#define VTD_PAGE_ALIGN(addr) (((addr) + VTD_PAGE_SIZE - 1) & VTD_PAGE_MASK)
12 11
13#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT_4K) 12#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
14#define DMA_32BIT_PFN IOVA_PFN(DMA_32BIT_MASK) 13#define DMA_32BIT_PFN IOVA_PFN(DMA_32BIT_MASK)
15#define DMA_64BIT_PFN IOVA_PFN(DMA_64BIT_MASK) 14#define DMA_64BIT_PFN IOVA_PFN(DMA_64BIT_MASK)
16 15
@@ -25,7 +24,7 @@ struct root_entry {
25 u64 val; 24 u64 val;
26 u64 rsvd1; 25 u64 rsvd1;
27}; 26};
28#define ROOT_ENTRY_NR (PAGE_SIZE_4K/sizeof(struct root_entry)) 27#define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
29static inline bool root_present(struct root_entry *root) 28static inline bool root_present(struct root_entry *root)
30{ 29{
31 return (root->val & 1); 30 return (root->val & 1);
@@ -36,7 +35,7 @@ static inline void set_root_present(struct root_entry *root)
36} 35}
37static inline void set_root_value(struct root_entry *root, unsigned long value) 36static inline void set_root_value(struct root_entry *root, unsigned long value)
38{ 37{
39 root->val |= value & PAGE_MASK_4K; 38 root->val |= value & VTD_PAGE_MASK;
40} 39}
41 40
42struct context_entry; 41struct context_entry;
@@ -45,7 +44,7 @@ get_context_addr_from_root(struct root_entry *root)
45{ 44{
46 return (struct context_entry *) 45 return (struct context_entry *)
47 (root_present(root)?phys_to_virt( 46 (root_present(root)?phys_to_virt(
48 root->val & PAGE_MASK_4K): 47 root->val & VTD_PAGE_MASK) :
49 NULL); 48 NULL);
50} 49}
51 50
@@ -67,7 +66,7 @@ struct context_entry {
67#define context_present(c) ((c).lo & 1) 66#define context_present(c) ((c).lo & 1)
68#define context_fault_disable(c) (((c).lo >> 1) & 1) 67#define context_fault_disable(c) (((c).lo >> 1) & 1)
69#define context_translation_type(c) (((c).lo >> 2) & 3) 68#define context_translation_type(c) (((c).lo >> 2) & 3)
70#define context_address_root(c) ((c).lo & PAGE_MASK_4K) 69#define context_address_root(c) ((c).lo & VTD_PAGE_MASK)
71#define context_address_width(c) ((c).hi & 7) 70#define context_address_width(c) ((c).hi & 7)
72#define context_domain_id(c) (((c).hi >> 8) & ((1 << 16) - 1)) 71#define context_domain_id(c) (((c).hi >> 8) & ((1 << 16) - 1))
73 72
@@ -81,7 +80,7 @@ struct context_entry {
81 } while (0) 80 } while (0)
82#define CONTEXT_TT_MULTI_LEVEL 0 81#define CONTEXT_TT_MULTI_LEVEL 0
83#define context_set_address_root(c, val) \ 82#define context_set_address_root(c, val) \
84 do {(c).lo |= (val) & PAGE_MASK_4K;} while (0) 83 do {(c).lo |= (val) & VTD_PAGE_MASK; } while (0)
85#define context_set_address_width(c, val) do {(c).hi |= (val) & 7;} while (0) 84#define context_set_address_width(c, val) do {(c).hi |= (val) & 7;} while (0)
86#define context_set_domain_id(c, val) \ 85#define context_set_domain_id(c, val) \
87 do {(c).hi |= ((val) & ((1 << 16) - 1)) << 8;} while (0) 86 do {(c).hi |= ((val) & ((1 << 16) - 1)) << 8;} while (0)
@@ -107,9 +106,9 @@ struct dma_pte {
107#define dma_set_pte_writable(p) do {(p).val |= DMA_PTE_WRITE;} while (0) 106#define dma_set_pte_writable(p) do {(p).val |= DMA_PTE_WRITE;} while (0)
108#define dma_set_pte_prot(p, prot) \ 107#define dma_set_pte_prot(p, prot) \
109 do {(p).val = ((p).val & ~3) | ((prot) & 3); } while (0) 108 do {(p).val = ((p).val & ~3) | ((prot) & 3); } while (0)
110#define dma_pte_addr(p) ((p).val & PAGE_MASK_4K) 109#define dma_pte_addr(p) ((p).val & VTD_PAGE_MASK)
111#define dma_set_pte_addr(p, addr) do {\ 110#define dma_set_pte_addr(p, addr) do {\
112 (p).val |= ((addr) & PAGE_MASK_4K); } while (0) 111 (p).val |= ((addr) & VTD_PAGE_MASK); } while (0)
113#define dma_pte_present(p) (((p).val & 3) != 0) 112#define dma_pte_present(p) (((p).val & 3) != 0)
114 113
115struct intel_iommu; 114struct intel_iommu;
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
index afb0d2a5b7cd..3d017cfd245b 100644
--- a/include/linux/intel-iommu.h
+++ b/include/linux/intel-iommu.h
@@ -29,6 +29,7 @@
29#include <linux/io.h> 29#include <linux/io.h>
30#include <linux/dma_remapping.h> 30#include <linux/dma_remapping.h>
31#include <asm/cacheflush.h> 31#include <asm/cacheflush.h>
32#include <asm/iommu.h>
32 33
33/* 34/*
34 * Intel IOMMU register specification per version 1.0 public spec. 35 * Intel IOMMU register specification per version 1.0 public spec.
@@ -202,22 +203,21 @@ static inline void dmar_writeq(void __iomem *addr, u64 val)
202#define dma_frcd_type(d) ((d >> 30) & 1) 203#define dma_frcd_type(d) ((d >> 30) & 1)
203#define dma_frcd_fault_reason(c) (c & 0xff) 204#define dma_frcd_fault_reason(c) (c & 0xff)
204#define dma_frcd_source_id(c) (c & 0xffff) 205#define dma_frcd_source_id(c) (c & 0xffff)
205#define dma_frcd_page_addr(d) (d & (((u64)-1) << 12)) /* low 64 bit */ 206/* low 64 bit */
206 207#define dma_frcd_page_addr(d) (d & (((u64)-1) << PAGE_SHIFT))
207#define DMAR_OPERATION_TIMEOUT ((cycles_t) tsc_khz*10*1000) /* 10sec */ 208
208 209#define IOMMU_WAIT_OP(iommu, offset, op, cond, sts) \
209#define IOMMU_WAIT_OP(iommu, offset, op, cond, sts) \ 210do { \
210{\ 211 cycles_t start_time = get_cycles(); \
211 cycles_t start_time = get_cycles();\ 212 while (1) { \
212 while (1) {\ 213 sts = op(iommu->reg + offset); \
213 sts = op (iommu->reg + offset);\ 214 if (cond) \
214 if (cond)\ 215 break; \
215 break;\
216 if (DMAR_OPERATION_TIMEOUT < (get_cycles() - start_time))\ 216 if (DMAR_OPERATION_TIMEOUT < (get_cycles() - start_time))\
217 panic("DMAR hardware is malfunctioning\n");\ 217 panic("DMAR hardware is malfunctioning\n"); \
218 cpu_relax();\ 218 cpu_relax(); \
219 }\ 219 } \
220} 220} while (0)
221 221
222#define QI_LENGTH 256 /* queue length */ 222#define QI_LENGTH 256 /* queue length */
223 223
@@ -244,7 +244,7 @@ enum {
244#define QI_IOTLB_DR(dr) (((u64)dr) << 7) 244#define QI_IOTLB_DR(dr) (((u64)dr) << 7)
245#define QI_IOTLB_DW(dw) (((u64)dw) << 6) 245#define QI_IOTLB_DW(dw) (((u64)dw) << 6)
246#define QI_IOTLB_GRAN(gran) (((u64)gran) >> (DMA_TLB_FLUSH_GRANU_OFFSET-4)) 246#define QI_IOTLB_GRAN(gran) (((u64)gran) >> (DMA_TLB_FLUSH_GRANU_OFFSET-4))
247#define QI_IOTLB_ADDR(addr) (((u64)addr) & PAGE_MASK_4K) 247#define QI_IOTLB_ADDR(addr) (((u64)addr) & VTD_PAGE_MASK)
248#define QI_IOTLB_IH(ih) (((u64)ih) << 6) 248#define QI_IOTLB_IH(ih) (((u64)ih) << 6)
249#define QI_IOTLB_AM(am) (((u8)am)) 249#define QI_IOTLB_AM(am) (((u8)am))
250 250
@@ -353,4 +353,11 @@ static inline int intel_iommu_found(void)
353} 353}
354#endif /* CONFIG_DMAR */ 354#endif /* CONFIG_DMAR */
355 355
356extern void *intel_alloc_coherent(struct device *, size_t, dma_addr_t *, gfp_t);
357extern void intel_free_coherent(struct device *, size_t, void *, dma_addr_t);
358extern dma_addr_t intel_map_single(struct device *, phys_addr_t, size_t, int);
359extern void intel_unmap_single(struct device *, dma_addr_t, size_t, int);
360extern int intel_map_sg(struct device *, struct scatterlist *, int, int);
361extern void intel_unmap_sg(struct device *, struct scatterlist *, int, int);
362
356#endif 363#endif