aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/pci/intel-iommu.c
diff options
context:
space:
mode:
authorFenghua Yu <fenghua.yu@intel.com>2008-10-16 21:02:32 -0400
committerDavid Woodhouse <David.Woodhouse@intel.com>2008-10-18 09:29:15 -0400
commit5b6985ce8ec7127b4d60ad450b64ca8b82748a3b (patch)
treef1d5a27601df04a3481690a1a2f90fc688034aff /drivers/pci/intel-iommu.c
parentcacd4213d8ffed83676f38d5d8e93c673e0f1af7 (diff)
intel-iommu: IA64 support
The current Intel IOMMU code assumes that both host page size and Intel IOMMU page size are 4KiB. The first patch supports variable page size. This provides support for IA64 which has multiple page sizes. This patch also adds some other code hooks for IA64 platform including DMAR_OPERATION_TIMEOUT definition. [dwmw2: some cleanup] Signed-off-by: Fenghua Yu <fenghua.yu@intel.com> Signed-off-by: Tony Luck <tony.luck@intel.com> Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
Diffstat (limited to 'drivers/pci/intel-iommu.c')
-rw-r--r--drivers/pci/intel-iommu.c128
1 files changed, 66 insertions, 62 deletions
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index 509470419130..2bf96babbc4f 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -18,6 +18,7 @@
18 * Author: Ashok Raj <ashok.raj@intel.com> 18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com> 19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> 20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
21 * Author: Fenghua Yu <fenghua.yu@intel.com>
21 */ 22 */
22 23
23#include <linux/init.h> 24#include <linux/init.h>
@@ -35,11 +36,13 @@
35#include <linux/timer.h> 36#include <linux/timer.h>
36#include <linux/iova.h> 37#include <linux/iova.h>
37#include <linux/intel-iommu.h> 38#include <linux/intel-iommu.h>
38#include <asm/proto.h> /* force_iommu in this header in x86-64*/
39#include <asm/cacheflush.h> 39#include <asm/cacheflush.h>
40#include <asm/iommu.h> 40#include <asm/iommu.h>
41#include "pci.h" 41#include "pci.h"
42 42
43#define ROOT_SIZE VTD_PAGE_SIZE
44#define CONTEXT_SIZE VTD_PAGE_SIZE
45
43#define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY) 46#define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
44#define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA) 47#define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
45 48
@@ -199,7 +202,7 @@ static struct context_entry * device_to_context_entry(struct intel_iommu *iommu,
199 spin_unlock_irqrestore(&iommu->lock, flags); 202 spin_unlock_irqrestore(&iommu->lock, flags);
200 return NULL; 203 return NULL;
201 } 204 }
202 __iommu_flush_cache(iommu, (void *)context, PAGE_SIZE_4K); 205 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
203 phy_addr = virt_to_phys((void *)context); 206 phy_addr = virt_to_phys((void *)context);
204 set_root_value(root, phy_addr); 207 set_root_value(root, phy_addr);
205 set_root_present(root); 208 set_root_present(root);
@@ -345,7 +348,7 @@ static struct dma_pte * addr_to_dma_pte(struct dmar_domain *domain, u64 addr)
345 return NULL; 348 return NULL;
346 } 349 }
347 __iommu_flush_cache(domain->iommu, tmp_page, 350 __iommu_flush_cache(domain->iommu, tmp_page,
348 PAGE_SIZE_4K); 351 PAGE_SIZE);
349 dma_set_pte_addr(*pte, virt_to_phys(tmp_page)); 352 dma_set_pte_addr(*pte, virt_to_phys(tmp_page));
350 /* 353 /*
351 * high level table always sets r/w, last level page 354 * high level table always sets r/w, last level page
@@ -408,13 +411,13 @@ static void dma_pte_clear_range(struct dmar_domain *domain, u64 start, u64 end)
408 start &= (((u64)1) << addr_width) - 1; 411 start &= (((u64)1) << addr_width) - 1;
409 end &= (((u64)1) << addr_width) - 1; 412 end &= (((u64)1) << addr_width) - 1;
410 /* in case it's partial page */ 413 /* in case it's partial page */
411 start = PAGE_ALIGN_4K(start); 414 start = PAGE_ALIGN(start);
412 end &= PAGE_MASK_4K; 415 end &= PAGE_MASK;
413 416
414 /* we don't need lock here, nobody else touches the iova range */ 417 /* we don't need lock here, nobody else touches the iova range */
415 while (start < end) { 418 while (start < end) {
416 dma_pte_clear_one(domain, start); 419 dma_pte_clear_one(domain, start);
417 start += PAGE_SIZE_4K; 420 start += VTD_PAGE_SIZE;
418 } 421 }
419} 422}
420 423
@@ -468,7 +471,7 @@ static int iommu_alloc_root_entry(struct intel_iommu *iommu)
468 if (!root) 471 if (!root)
469 return -ENOMEM; 472 return -ENOMEM;
470 473
471 __iommu_flush_cache(iommu, root, PAGE_SIZE_4K); 474 __iommu_flush_cache(iommu, root, ROOT_SIZE);
472 475
473 spin_lock_irqsave(&iommu->lock, flags); 476 spin_lock_irqsave(&iommu->lock, flags);
474 iommu->root_entry = root; 477 iommu->root_entry = root;
@@ -634,7 +637,8 @@ static int __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
634 printk(KERN_ERR"IOMMU: flush IOTLB failed\n"); 637 printk(KERN_ERR"IOMMU: flush IOTLB failed\n");
635 if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type)) 638 if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
636 pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n", 639 pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
637 DMA_TLB_IIRG(type), DMA_TLB_IAIG(val)); 640 (unsigned long long)DMA_TLB_IIRG(type),
641 (unsigned long long)DMA_TLB_IAIG(val));
638 /* flush context entry will implictly flush write buffer */ 642 /* flush context entry will implictly flush write buffer */
639 return 0; 643 return 0;
640} 644}
@@ -644,7 +648,7 @@ static int iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
644{ 648{
645 unsigned int mask; 649 unsigned int mask;
646 650
647 BUG_ON(addr & (~PAGE_MASK_4K)); 651 BUG_ON(addr & (~VTD_PAGE_MASK));
648 BUG_ON(pages == 0); 652 BUG_ON(pages == 0);
649 653
650 /* Fallback to domain selective flush if no PSI support */ 654 /* Fallback to domain selective flush if no PSI support */
@@ -798,7 +802,7 @@ void dmar_msi_read(int irq, struct msi_msg *msg)
798} 802}
799 803
800static int iommu_page_fault_do_one(struct intel_iommu *iommu, int type, 804static int iommu_page_fault_do_one(struct intel_iommu *iommu, int type,
801 u8 fault_reason, u16 source_id, u64 addr) 805 u8 fault_reason, u16 source_id, unsigned long long addr)
802{ 806{
803 const char *reason; 807 const char *reason;
804 808
@@ -1051,9 +1055,9 @@ static void dmar_init_reserved_ranges(void)
1051 if (!r->flags || !(r->flags & IORESOURCE_MEM)) 1055 if (!r->flags || !(r->flags & IORESOURCE_MEM))
1052 continue; 1056 continue;
1053 addr = r->start; 1057 addr = r->start;
1054 addr &= PAGE_MASK_4K; 1058 addr &= PAGE_MASK;
1055 size = r->end - addr; 1059 size = r->end - addr;
1056 size = PAGE_ALIGN_4K(size); 1060 size = PAGE_ALIGN(size);
1057 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(addr), 1061 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(addr),
1058 IOVA_PFN(size + addr) - 1); 1062 IOVA_PFN(size + addr) - 1);
1059 if (!iova) 1063 if (!iova)
@@ -1115,7 +1119,7 @@ static int domain_init(struct dmar_domain *domain, int guest_width)
1115 domain->pgd = (struct dma_pte *)alloc_pgtable_page(); 1119 domain->pgd = (struct dma_pte *)alloc_pgtable_page();
1116 if (!domain->pgd) 1120 if (!domain->pgd)
1117 return -ENOMEM; 1121 return -ENOMEM;
1118 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE_4K); 1122 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
1119 return 0; 1123 return 0;
1120} 1124}
1121 1125
@@ -1131,7 +1135,7 @@ static void domain_exit(struct dmar_domain *domain)
1131 /* destroy iovas */ 1135 /* destroy iovas */
1132 put_iova_domain(&domain->iovad); 1136 put_iova_domain(&domain->iovad);
1133 end = DOMAIN_MAX_ADDR(domain->gaw); 1137 end = DOMAIN_MAX_ADDR(domain->gaw);
1134 end = end & (~PAGE_MASK_4K); 1138 end = end & (~PAGE_MASK);
1135 1139
1136 /* clear ptes */ 1140 /* clear ptes */
1137 dma_pte_clear_range(domain, 0, end); 1141 dma_pte_clear_range(domain, 0, end);
@@ -1252,22 +1256,25 @@ domain_page_mapping(struct dmar_domain *domain, dma_addr_t iova,
1252 u64 start_pfn, end_pfn; 1256 u64 start_pfn, end_pfn;
1253 struct dma_pte *pte; 1257 struct dma_pte *pte;
1254 int index; 1258 int index;
1259 int addr_width = agaw_to_width(domain->agaw);
1260
1261 hpa &= (((u64)1) << addr_width) - 1;
1255 1262
1256 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0) 1263 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
1257 return -EINVAL; 1264 return -EINVAL;
1258 iova &= PAGE_MASK_4K; 1265 iova &= PAGE_MASK;
1259 start_pfn = ((u64)hpa) >> PAGE_SHIFT_4K; 1266 start_pfn = ((u64)hpa) >> VTD_PAGE_SHIFT;
1260 end_pfn = (PAGE_ALIGN_4K(((u64)hpa) + size)) >> PAGE_SHIFT_4K; 1267 end_pfn = (VTD_PAGE_ALIGN(((u64)hpa) + size)) >> VTD_PAGE_SHIFT;
1261 index = 0; 1268 index = 0;
1262 while (start_pfn < end_pfn) { 1269 while (start_pfn < end_pfn) {
1263 pte = addr_to_dma_pte(domain, iova + PAGE_SIZE_4K * index); 1270 pte = addr_to_dma_pte(domain, iova + VTD_PAGE_SIZE * index);
1264 if (!pte) 1271 if (!pte)
1265 return -ENOMEM; 1272 return -ENOMEM;
1266 /* We don't need lock here, nobody else 1273 /* We don't need lock here, nobody else
1267 * touches the iova range 1274 * touches the iova range
1268 */ 1275 */
1269 BUG_ON(dma_pte_addr(*pte)); 1276 BUG_ON(dma_pte_addr(*pte));
1270 dma_set_pte_addr(*pte, start_pfn << PAGE_SHIFT_4K); 1277 dma_set_pte_addr(*pte, start_pfn << VTD_PAGE_SHIFT);
1271 dma_set_pte_prot(*pte, prot); 1278 dma_set_pte_prot(*pte, prot);
1272 __iommu_flush_cache(domain->iommu, pte, sizeof(*pte)); 1279 __iommu_flush_cache(domain->iommu, pte, sizeof(*pte));
1273 start_pfn++; 1280 start_pfn++;
@@ -1445,11 +1452,13 @@ error:
1445 return find_domain(pdev); 1452 return find_domain(pdev);
1446} 1453}
1447 1454
1448static int iommu_prepare_identity_map(struct pci_dev *pdev, u64 start, u64 end) 1455static int iommu_prepare_identity_map(struct pci_dev *pdev,
1456 unsigned long long start,
1457 unsigned long long end)
1449{ 1458{
1450 struct dmar_domain *domain; 1459 struct dmar_domain *domain;
1451 unsigned long size; 1460 unsigned long size;
1452 u64 base; 1461 unsigned long long base;
1453 int ret; 1462 int ret;
1454 1463
1455 printk(KERN_INFO 1464 printk(KERN_INFO
@@ -1461,9 +1470,9 @@ static int iommu_prepare_identity_map(struct pci_dev *pdev, u64 start, u64 end)
1461 return -ENOMEM; 1470 return -ENOMEM;
1462 1471
1463 /* The address might not be aligned */ 1472 /* The address might not be aligned */
1464 base = start & PAGE_MASK_4K; 1473 base = start & PAGE_MASK;
1465 size = end - base; 1474 size = end - base;
1466 size = PAGE_ALIGN_4K(size); 1475 size = PAGE_ALIGN(size);
1467 if (!reserve_iova(&domain->iovad, IOVA_PFN(base), 1476 if (!reserve_iova(&domain->iovad, IOVA_PFN(base),
1468 IOVA_PFN(base + size) - 1)) { 1477 IOVA_PFN(base + size) - 1)) {
1469 printk(KERN_ERR "IOMMU: reserve iova failed\n"); 1478 printk(KERN_ERR "IOMMU: reserve iova failed\n");
@@ -1732,8 +1741,8 @@ error:
1732static inline u64 aligned_size(u64 host_addr, size_t size) 1741static inline u64 aligned_size(u64 host_addr, size_t size)
1733{ 1742{
1734 u64 addr; 1743 u64 addr;
1735 addr = (host_addr & (~PAGE_MASK_4K)) + size; 1744 addr = (host_addr & (~PAGE_MASK)) + size;
1736 return PAGE_ALIGN_4K(addr); 1745 return PAGE_ALIGN(addr);
1737} 1746}
1738 1747
1739struct iova * 1748struct iova *
@@ -1747,7 +1756,7 @@ iommu_alloc_iova(struct dmar_domain *domain, size_t size, u64 end)
1747 return NULL; 1756 return NULL;
1748 1757
1749 piova = alloc_iova(&domain->iovad, 1758 piova = alloc_iova(&domain->iovad,
1750 size >> PAGE_SHIFT_4K, IOVA_PFN(end), 1); 1759 size >> PAGE_SHIFT, IOVA_PFN(end), 1);
1751 return piova; 1760 return piova;
1752} 1761}
1753 1762
@@ -1807,12 +1816,12 @@ get_valid_domain_for_dev(struct pci_dev *pdev)
1807 return domain; 1816 return domain;
1808} 1817}
1809 1818
1810static dma_addr_t 1819dma_addr_t
1811intel_map_single(struct device *hwdev, phys_addr_t paddr, size_t size, int dir) 1820intel_map_single(struct device *hwdev, phys_addr_t paddr, size_t size, int dir)
1812{ 1821{
1813 struct pci_dev *pdev = to_pci_dev(hwdev); 1822 struct pci_dev *pdev = to_pci_dev(hwdev);
1814 struct dmar_domain *domain; 1823 struct dmar_domain *domain;
1815 unsigned long start_paddr; 1824 phys_addr_t start_paddr;
1816 struct iova *iova; 1825 struct iova *iova;
1817 int prot = 0; 1826 int prot = 0;
1818 int ret; 1827 int ret;
@@ -1831,7 +1840,7 @@ intel_map_single(struct device *hwdev, phys_addr_t paddr, size_t size, int dir)
1831 if (!iova) 1840 if (!iova)
1832 goto error; 1841 goto error;
1833 1842
1834 start_paddr = iova->pfn_lo << PAGE_SHIFT_4K; 1843 start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT;
1835 1844
1836 /* 1845 /*
1837 * Check if DMAR supports zero-length reads on write only 1846 * Check if DMAR supports zero-length reads on write only
@@ -1849,27 +1858,23 @@ intel_map_single(struct device *hwdev, phys_addr_t paddr, size_t size, int dir)
1849 * is not a big problem 1858 * is not a big problem
1850 */ 1859 */
1851 ret = domain_page_mapping(domain, start_paddr, 1860 ret = domain_page_mapping(domain, start_paddr,
1852 ((u64)paddr) & PAGE_MASK_4K, size, prot); 1861 ((u64)paddr) & PAGE_MASK, size, prot);
1853 if (ret) 1862 if (ret)
1854 goto error; 1863 goto error;
1855 1864
1856 pr_debug("Device %s request: %lx@%llx mapping: %lx@%llx, dir %d\n",
1857 pci_name(pdev), size, (u64)paddr,
1858 size, (u64)start_paddr, dir);
1859
1860 /* it's a non-present to present mapping */ 1865 /* it's a non-present to present mapping */
1861 ret = iommu_flush_iotlb_psi(domain->iommu, domain->id, 1866 ret = iommu_flush_iotlb_psi(domain->iommu, domain->id,
1862 start_paddr, size >> PAGE_SHIFT_4K, 1); 1867 start_paddr, size >> VTD_PAGE_SHIFT, 1);
1863 if (ret) 1868 if (ret)
1864 iommu_flush_write_buffer(domain->iommu); 1869 iommu_flush_write_buffer(domain->iommu);
1865 1870
1866 return (start_paddr + ((u64)paddr & (~PAGE_MASK_4K))); 1871 return start_paddr + ((u64)paddr & (~PAGE_MASK));
1867 1872
1868error: 1873error:
1869 if (iova) 1874 if (iova)
1870 __free_iova(&domain->iovad, iova); 1875 __free_iova(&domain->iovad, iova);
1871 printk(KERN_ERR"Device %s request: %lx@%llx dir %d --- failed\n", 1876 printk(KERN_ERR"Device %s request: %lx@%llx dir %d --- failed\n",
1872 pci_name(pdev), size, (u64)paddr, dir); 1877 pci_name(pdev), size, (unsigned long long)paddr, dir);
1873 return 0; 1878 return 0;
1874} 1879}
1875 1880
@@ -1931,8 +1936,8 @@ static void add_unmap(struct dmar_domain *dom, struct iova *iova)
1931 spin_unlock_irqrestore(&async_umap_flush_lock, flags); 1936 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
1932} 1937}
1933 1938
1934static void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, 1939void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, size_t size,
1935 size_t size, int dir) 1940 int dir)
1936{ 1941{
1937 struct pci_dev *pdev = to_pci_dev(dev); 1942 struct pci_dev *pdev = to_pci_dev(dev);
1938 struct dmar_domain *domain; 1943 struct dmar_domain *domain;
@@ -1948,11 +1953,11 @@ static void intel_unmap_single(struct device *dev, dma_addr_t dev_addr,
1948 if (!iova) 1953 if (!iova)
1949 return; 1954 return;
1950 1955
1951 start_addr = iova->pfn_lo << PAGE_SHIFT_4K; 1956 start_addr = iova->pfn_lo << PAGE_SHIFT;
1952 size = aligned_size((u64)dev_addr, size); 1957 size = aligned_size((u64)dev_addr, size);
1953 1958
1954 pr_debug("Device %s unmapping: %lx@%llx\n", 1959 pr_debug("Device %s unmapping: %lx@%llx\n",
1955 pci_name(pdev), size, (u64)start_addr); 1960 pci_name(pdev), size, (unsigned long long)start_addr);
1956 1961
1957 /* clear the whole page */ 1962 /* clear the whole page */
1958 dma_pte_clear_range(domain, start_addr, start_addr + size); 1963 dma_pte_clear_range(domain, start_addr, start_addr + size);
@@ -1960,7 +1965,7 @@ static void intel_unmap_single(struct device *dev, dma_addr_t dev_addr,
1960 dma_pte_free_pagetable(domain, start_addr, start_addr + size); 1965 dma_pte_free_pagetable(domain, start_addr, start_addr + size);
1961 if (intel_iommu_strict) { 1966 if (intel_iommu_strict) {
1962 if (iommu_flush_iotlb_psi(domain->iommu, 1967 if (iommu_flush_iotlb_psi(domain->iommu,
1963 domain->id, start_addr, size >> PAGE_SHIFT_4K, 0)) 1968 domain->id, start_addr, size >> VTD_PAGE_SHIFT, 0))
1964 iommu_flush_write_buffer(domain->iommu); 1969 iommu_flush_write_buffer(domain->iommu);
1965 /* free iova */ 1970 /* free iova */
1966 __free_iova(&domain->iovad, iova); 1971 __free_iova(&domain->iovad, iova);
@@ -1973,13 +1978,13 @@ static void intel_unmap_single(struct device *dev, dma_addr_t dev_addr,
1973 } 1978 }
1974} 1979}
1975 1980
1976static void * intel_alloc_coherent(struct device *hwdev, size_t size, 1981void *intel_alloc_coherent(struct device *hwdev, size_t size,
1977 dma_addr_t *dma_handle, gfp_t flags) 1982 dma_addr_t *dma_handle, gfp_t flags)
1978{ 1983{
1979 void *vaddr; 1984 void *vaddr;
1980 int order; 1985 int order;
1981 1986
1982 size = PAGE_ALIGN_4K(size); 1987 size = PAGE_ALIGN(size);
1983 order = get_order(size); 1988 order = get_order(size);
1984 flags &= ~(GFP_DMA | GFP_DMA32); 1989 flags &= ~(GFP_DMA | GFP_DMA32);
1985 1990
@@ -1995,12 +2000,12 @@ static void * intel_alloc_coherent(struct device *hwdev, size_t size,
1995 return NULL; 2000 return NULL;
1996} 2001}
1997 2002
1998static void intel_free_coherent(struct device *hwdev, size_t size, 2003void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
1999 void *vaddr, dma_addr_t dma_handle) 2004 dma_addr_t dma_handle)
2000{ 2005{
2001 int order; 2006 int order;
2002 2007
2003 size = PAGE_ALIGN_4K(size); 2008 size = PAGE_ALIGN(size);
2004 order = get_order(size); 2009 order = get_order(size);
2005 2010
2006 intel_unmap_single(hwdev, dma_handle, size, DMA_BIDIRECTIONAL); 2011 intel_unmap_single(hwdev, dma_handle, size, DMA_BIDIRECTIONAL);
@@ -2008,8 +2013,9 @@ static void intel_free_coherent(struct device *hwdev, size_t size,
2008} 2013}
2009 2014
2010#define SG_ENT_VIRT_ADDRESS(sg) (sg_virt((sg))) 2015#define SG_ENT_VIRT_ADDRESS(sg) (sg_virt((sg)))
2011static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, 2016
2012 int nelems, int dir) 2017void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
2018 int nelems, int dir)
2013{ 2019{
2014 int i; 2020 int i;
2015 struct pci_dev *pdev = to_pci_dev(hwdev); 2021 struct pci_dev *pdev = to_pci_dev(hwdev);
@@ -2033,7 +2039,7 @@ static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
2033 size += aligned_size((u64)addr, sg->length); 2039 size += aligned_size((u64)addr, sg->length);
2034 } 2040 }
2035 2041
2036 start_addr = iova->pfn_lo << PAGE_SHIFT_4K; 2042 start_addr = iova->pfn_lo << PAGE_SHIFT;
2037 2043
2038 /* clear the whole page */ 2044 /* clear the whole page */
2039 dma_pte_clear_range(domain, start_addr, start_addr + size); 2045 dma_pte_clear_range(domain, start_addr, start_addr + size);
@@ -2041,7 +2047,7 @@ static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
2041 dma_pte_free_pagetable(domain, start_addr, start_addr + size); 2047 dma_pte_free_pagetable(domain, start_addr, start_addr + size);
2042 2048
2043 if (iommu_flush_iotlb_psi(domain->iommu, domain->id, start_addr, 2049 if (iommu_flush_iotlb_psi(domain->iommu, domain->id, start_addr,
2044 size >> PAGE_SHIFT_4K, 0)) 2050 size >> VTD_PAGE_SHIFT, 0))
2045 iommu_flush_write_buffer(domain->iommu); 2051 iommu_flush_write_buffer(domain->iommu);
2046 2052
2047 /* free iova */ 2053 /* free iova */
@@ -2062,8 +2068,8 @@ static int intel_nontranslate_map_sg(struct device *hddev,
2062 return nelems; 2068 return nelems;
2063} 2069}
2064 2070
2065static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, 2071int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
2066 int nelems, int dir) 2072 int dir)
2067{ 2073{
2068 void *addr; 2074 void *addr;
2069 int i; 2075 int i;
@@ -2107,14 +2113,14 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist,
2107 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) 2113 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
2108 prot |= DMA_PTE_WRITE; 2114 prot |= DMA_PTE_WRITE;
2109 2115
2110 start_addr = iova->pfn_lo << PAGE_SHIFT_4K; 2116 start_addr = iova->pfn_lo << PAGE_SHIFT;
2111 offset = 0; 2117 offset = 0;
2112 for_each_sg(sglist, sg, nelems, i) { 2118 for_each_sg(sglist, sg, nelems, i) {
2113 addr = SG_ENT_VIRT_ADDRESS(sg); 2119 addr = SG_ENT_VIRT_ADDRESS(sg);
2114 addr = (void *)virt_to_phys(addr); 2120 addr = (void *)virt_to_phys(addr);
2115 size = aligned_size((u64)addr, sg->length); 2121 size = aligned_size((u64)addr, sg->length);
2116 ret = domain_page_mapping(domain, start_addr + offset, 2122 ret = domain_page_mapping(domain, start_addr + offset,
2117 ((u64)addr) & PAGE_MASK_4K, 2123 ((u64)addr) & PAGE_MASK,
2118 size, prot); 2124 size, prot);
2119 if (ret) { 2125 if (ret) {
2120 /* clear the page */ 2126 /* clear the page */
@@ -2128,14 +2134,14 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist,
2128 return 0; 2134 return 0;
2129 } 2135 }
2130 sg->dma_address = start_addr + offset + 2136 sg->dma_address = start_addr + offset +
2131 ((u64)addr & (~PAGE_MASK_4K)); 2137 ((u64)addr & (~PAGE_MASK));
2132 sg->dma_length = sg->length; 2138 sg->dma_length = sg->length;
2133 offset += size; 2139 offset += size;
2134 } 2140 }
2135 2141
2136 /* it's a non-present to present mapping */ 2142 /* it's a non-present to present mapping */
2137 if (iommu_flush_iotlb_psi(domain->iommu, domain->id, 2143 if (iommu_flush_iotlb_psi(domain->iommu, domain->id,
2138 start_addr, offset >> PAGE_SHIFT_4K, 1)) 2144 start_addr, offset >> VTD_PAGE_SHIFT, 1))
2139 iommu_flush_write_buffer(domain->iommu); 2145 iommu_flush_write_buffer(domain->iommu);
2140 return nelems; 2146 return nelems;
2141} 2147}
@@ -2175,7 +2181,6 @@ static inline int iommu_devinfo_cache_init(void)
2175 sizeof(struct device_domain_info), 2181 sizeof(struct device_domain_info),
2176 0, 2182 0,
2177 SLAB_HWCACHE_ALIGN, 2183 SLAB_HWCACHE_ALIGN,
2178
2179 NULL); 2184 NULL);
2180 if (!iommu_devinfo_cache) { 2185 if (!iommu_devinfo_cache) {
2181 printk(KERN_ERR "Couldn't create devinfo cache\n"); 2186 printk(KERN_ERR "Couldn't create devinfo cache\n");
@@ -2193,7 +2198,6 @@ static inline int iommu_iova_cache_init(void)
2193 sizeof(struct iova), 2198 sizeof(struct iova),
2194 0, 2199 0,
2195 SLAB_HWCACHE_ALIGN, 2200 SLAB_HWCACHE_ALIGN,
2196
2197 NULL); 2201 NULL);
2198 if (!iommu_iova_cache) { 2202 if (!iommu_iova_cache) {
2199 printk(KERN_ERR "Couldn't create iova cache\n"); 2203 printk(KERN_ERR "Couldn't create iova cache\n");
@@ -2322,7 +2326,7 @@ void intel_iommu_domain_exit(struct dmar_domain *domain)
2322 return; 2326 return;
2323 2327
2324 end = DOMAIN_MAX_ADDR(domain->gaw); 2328 end = DOMAIN_MAX_ADDR(domain->gaw);
2325 end = end & (~PAGE_MASK_4K); 2329 end = end & (~VTD_PAGE_MASK);
2326 2330
2327 /* clear ptes */ 2331 /* clear ptes */
2328 dma_pte_clear_range(domain, 0, end); 2332 dma_pte_clear_range(domain, 0, end);
@@ -2418,6 +2422,6 @@ u64 intel_iommu_iova_to_pfn(struct dmar_domain *domain, u64 iova)
2418 if (pte) 2422 if (pte)
2419 pfn = dma_pte_addr(*pte); 2423 pfn = dma_pte_addr(*pte);
2420 2424
2421 return pfn >> PAGE_SHIFT_4K; 2425 return pfn >> VTD_PAGE_SHIFT;
2422} 2426}
2423EXPORT_SYMBOL_GPL(intel_iommu_iova_to_pfn); 2427EXPORT_SYMBOL_GPL(intel_iommu_iova_to_pfn);