aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/pci
diff options
context:
space:
mode:
authorDavid Woodhouse <dwmw2@infradead.org>2009-05-10 18:57:41 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-05-11 10:51:01 -0400
commitfd18de50b9e7965f93d231e7390436fb8900c0e6 (patch)
treedcf09dbd8d07fd55f2777c9224173039c4a1f1fd /drivers/pci
parenta4d7749be5de4a7261bcbe3c7d96c748792ec455 (diff)
intel-iommu: PAE memory corruption fix
PAGE_MASK is 0xFFFFF000 on i386 -- even with PAE. So it's not sufficient to ensure that you use phys_addr_t or uint64_t everywhere you handle physical addresses -- you also have to avoid using the construct 'addr & PAGE_MASK', because that will strip the high 32 bits of the address. This patch avoids that problem by using PHYSICAL_PAGE_MASK instead of PAGE_MASK where appropriate. It leaves '& PAGE_MASK' in a few instances that don't matter -- where it's being used on the virtual bus addresses we're dishing out, which are 32-bit anyway. Since PHYSICAL_PAGE_MASK is not present on other architectures, we have to define it (to PAGE_MASK) if it's not already defined. Maybe it would be better just to fix PAGE_MASK for i386/PAE? Signed-off-by: David Woodhouse <David.Woodhouse@intel.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'drivers/pci')
-rw-r--r--drivers/pci/intel-iommu.c13
1 files changed, 9 insertions, 4 deletions
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index 001b328adf80..a563fbe559d0 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -59,6 +59,10 @@
59#define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32)) 59#define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
60#define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64)) 60#define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
61 61
62#ifndef PHYSICAL_PAGE_MASK
63#define PHYSICAL_PAGE_MASK PAGE_MASK
64#endif
65
62/* global iommu list, set NULL for ignored DMAR units */ 66/* global iommu list, set NULL for ignored DMAR units */
63static struct intel_iommu **g_iommus; 67static struct intel_iommu **g_iommus;
64 68
@@ -1216,7 +1220,7 @@ static void dmar_init_reserved_ranges(void)
1216 if (!r->flags || !(r->flags & IORESOURCE_MEM)) 1220 if (!r->flags || !(r->flags & IORESOURCE_MEM))
1217 continue; 1221 continue;
1218 addr = r->start; 1222 addr = r->start;
1219 addr &= PAGE_MASK; 1223 addr &= PHYSICAL_PAGE_MASK;
1220 size = r->end - addr; 1224 size = r->end - addr;
1221 size = PAGE_ALIGN(size); 1225 size = PAGE_ALIGN(size);
1222 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(addr), 1226 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(addr),
@@ -2173,7 +2177,8 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
2173 * is not a big problem 2177 * is not a big problem
2174 */ 2178 */
2175 ret = domain_page_mapping(domain, start_paddr, 2179 ret = domain_page_mapping(domain, start_paddr,
2176 ((u64)paddr) & PAGE_MASK, size, prot); 2180 ((u64)paddr) & PHYSICAL_PAGE_MASK,
2181 size, prot);
2177 if (ret) 2182 if (ret)
2178 goto error; 2183 goto error;
2179 2184
@@ -2463,8 +2468,8 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne
2463 addr = page_to_phys(sg_page(sg)) + sg->offset; 2468 addr = page_to_phys(sg_page(sg)) + sg->offset;
2464 size = aligned_size((u64)addr, sg->length); 2469 size = aligned_size((u64)addr, sg->length);
2465 ret = domain_page_mapping(domain, start_addr + offset, 2470 ret = domain_page_mapping(domain, start_addr + offset,
2466 ((u64)addr) & PAGE_MASK, 2471 ((u64)addr) & PHYSICAL_PAGE_MASK,
2467 size, prot); 2472 size, prot);
2468 if (ret) { 2473 if (ret) {
2469 /* clear the page */ 2474 /* clear the page */
2470 dma_pte_clear_range(domain, start_addr, 2475 dma_pte_clear_range(domain, start_addr,