aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/pci/intel-iommu.c
diff options
context:
space:
mode:
authorDavid Woodhouse <dwmw2@infradead.org>2009-02-11 12:23:43 -0500
committerDavid Woodhouse <David.Woodhouse@intel.com>2009-03-25 12:00:41 -0400
commit4cf2e75d0bec15d945972b005056c4a8731b82cf (patch)
tree1d53fce01100e81637fb820f28d9ac0cf551d4db /drivers/pci/intel-iommu.c
parent3199aa6bc8766e17b8f60820c4f78d59c25fce0e (diff)
intel-iommu: Enable DMAR on 32-bit kernel.
If we fix a few highmem-related thinkos and a couple of printk format warnings, the Intel IOMMU driver works fine in a 32-bit kernel. Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
Diffstat (limited to 'drivers/pci/intel-iommu.c')
-rw-r--r--drivers/pci/intel-iommu.c20
1 files changed, 8 insertions, 12 deletions
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index e541c3bdbf0d..0c12d06bade6 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -2315,7 +2315,7 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
2315error: 2315error:
2316 if (iova) 2316 if (iova)
2317 __free_iova(&domain->iovad, iova); 2317 __free_iova(&domain->iovad, iova);
2318 printk(KERN_ERR"Device %s request: %lx@%llx dir %d --- failed\n", 2318 printk(KERN_ERR"Device %s request: %zx@%llx dir %d --- failed\n",
2319 pci_name(pdev), size, (unsigned long long)paddr, dir); 2319 pci_name(pdev), size, (unsigned long long)paddr, dir);
2320 return 0; 2320 return 0;
2321} 2321}
@@ -2411,7 +2411,7 @@ void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, size_t size,
2411 start_addr = iova->pfn_lo << PAGE_SHIFT; 2411 start_addr = iova->pfn_lo << PAGE_SHIFT;
2412 size = aligned_size((u64)dev_addr, size); 2412 size = aligned_size((u64)dev_addr, size);
2413 2413
2414 pr_debug("Device %s unmapping: %lx@%llx\n", 2414 pr_debug("Device %s unmapping: %zx@%llx\n",
2415 pci_name(pdev), size, (unsigned long long)start_addr); 2415 pci_name(pdev), size, (unsigned long long)start_addr);
2416 2416
2417 /* clear the whole page */ 2417 /* clear the whole page */
@@ -2469,8 +2469,6 @@ void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
2469 free_pages((unsigned long)vaddr, order); 2469 free_pages((unsigned long)vaddr, order);
2470} 2470}
2471 2471
2472#define SG_ENT_VIRT_ADDRESS(sg) (sg_virt((sg)))
2473
2474void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, 2472void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
2475 int nelems, int dir) 2473 int nelems, int dir)
2476{ 2474{
@@ -2480,7 +2478,7 @@ void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
2480 unsigned long start_addr; 2478 unsigned long start_addr;
2481 struct iova *iova; 2479 struct iova *iova;
2482 size_t size = 0; 2480 size_t size = 0;
2483 void *addr; 2481 phys_addr_t addr;
2484 struct scatterlist *sg; 2482 struct scatterlist *sg;
2485 struct intel_iommu *iommu; 2483 struct intel_iommu *iommu;
2486 2484
@@ -2496,7 +2494,7 @@ void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
2496 if (!iova) 2494 if (!iova)
2497 return; 2495 return;
2498 for_each_sg(sglist, sg, nelems, i) { 2496 for_each_sg(sglist, sg, nelems, i) {
2499 addr = SG_ENT_VIRT_ADDRESS(sg); 2497 addr = page_to_phys(sg_page(sg)) + sg->offset;
2500 size += aligned_size((u64)addr, sg->length); 2498 size += aligned_size((u64)addr, sg->length);
2501 } 2499 }
2502 2500
@@ -2523,7 +2521,7 @@ static int intel_nontranslate_map_sg(struct device *hddev,
2523 2521
2524 for_each_sg(sglist, sg, nelems, i) { 2522 for_each_sg(sglist, sg, nelems, i) {
2525 BUG_ON(!sg_page(sg)); 2523 BUG_ON(!sg_page(sg));
2526 sg->dma_address = virt_to_bus(SG_ENT_VIRT_ADDRESS(sg)); 2524 sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
2527 sg->dma_length = sg->length; 2525 sg->dma_length = sg->length;
2528 } 2526 }
2529 return nelems; 2527 return nelems;
@@ -2532,7 +2530,7 @@ static int intel_nontranslate_map_sg(struct device *hddev,
2532int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, 2530int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
2533 int dir) 2531 int dir)
2534{ 2532{
2535 void *addr; 2533 phys_addr_t addr;
2536 int i; 2534 int i;
2537 struct pci_dev *pdev = to_pci_dev(hwdev); 2535 struct pci_dev *pdev = to_pci_dev(hwdev);
2538 struct dmar_domain *domain; 2536 struct dmar_domain *domain;
@@ -2556,8 +2554,7 @@ int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
2556 iommu = domain_get_iommu(domain); 2554 iommu = domain_get_iommu(domain);
2557 2555
2558 for_each_sg(sglist, sg, nelems, i) { 2556 for_each_sg(sglist, sg, nelems, i) {
2559 addr = SG_ENT_VIRT_ADDRESS(sg); 2557 addr = page_to_phys(sg_page(sg)) + sg->offset;
2560 addr = (void *)virt_to_phys(addr);
2561 size += aligned_size((u64)addr, sg->length); 2558 size += aligned_size((u64)addr, sg->length);
2562 } 2559 }
2563 2560
@@ -2580,8 +2577,7 @@ int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
2580 start_addr = iova->pfn_lo << PAGE_SHIFT; 2577 start_addr = iova->pfn_lo << PAGE_SHIFT;
2581 offset = 0; 2578 offset = 0;
2582 for_each_sg(sglist, sg, nelems, i) { 2579 for_each_sg(sglist, sg, nelems, i) {
2583 addr = SG_ENT_VIRT_ADDRESS(sg); 2580 addr = page_to_phys(sg_page(sg)) + sg->offset;
2584 addr = (void *)virt_to_phys(addr);
2585 size = aligned_size((u64)addr, sg->length); 2581 size = aligned_size((u64)addr, sg->length);
2586 ret = domain_page_mapping(domain, start_addr + offset, 2582 ret = domain_page_mapping(domain, start_addr + offset,
2587 ((u64)addr) & PAGE_MASK, 2583 ((u64)addr) & PAGE_MASK,