diff options
author | FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> | 2009-01-05 09:47:26 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-01-06 08:06:55 -0500 |
commit | ffbbef5c0639dbf31a0511d8f65f574bb3e30758 (patch) | |
tree | 47ab8be07ea12212e35460c56dc140e998892c98 /drivers/pci/intel-iommu.c | |
parent | 51491367c2541c51a9d435eec88b0e846223fb59 (diff) |
intel-iommu: add map_page and unmap_page
This is a preparation of struct dma_mapping_ops unification. We use
map_page and unmap_page instead of map_single and unmap_single.
This uses a temporary workaround, ifdef X86_64 to avoid IA64
build. The workaround will be removed after the unification. Well,
changing x86's struct dma_mapping_ops could break IA64. It's just
wrong. It's one of problems that this patchset fixes.
We will remove map_single and unmap_single hooks in the last patch in
this patchset.
Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'drivers/pci/intel-iommu.c')
-rw-r--r-- | drivers/pci/intel-iommu.c | 24 |
1 files changed, 22 insertions, 2 deletions
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c index 235fb7a5a8a5..60258ecbcb4c 100644 --- a/drivers/pci/intel-iommu.c +++ b/drivers/pci/intel-iommu.c | |||
@@ -2273,6 +2273,15 @@ error: | |||
2273 | return 0; | 2273 | return 0; |
2274 | } | 2274 | } |
2275 | 2275 | ||
2276 | static dma_addr_t intel_map_page(struct device *dev, struct page *page, | ||
2277 | unsigned long offset, size_t size, | ||
2278 | enum dma_data_direction dir, | ||
2279 | struct dma_attrs *attrs) | ||
2280 | { | ||
2281 | return __intel_map_single(dev, page_to_phys(page) + offset, size, | ||
2282 | dir, to_pci_dev(dev)->dma_mask); | ||
2283 | } | ||
2284 | |||
2276 | dma_addr_t intel_map_single(struct device *hwdev, phys_addr_t paddr, | 2285 | dma_addr_t intel_map_single(struct device *hwdev, phys_addr_t paddr, |
2277 | size_t size, int dir) | 2286 | size_t size, int dir) |
2278 | { | 2287 | { |
@@ -2341,8 +2350,9 @@ static void add_unmap(struct dmar_domain *dom, struct iova *iova) | |||
2341 | spin_unlock_irqrestore(&async_umap_flush_lock, flags); | 2350 | spin_unlock_irqrestore(&async_umap_flush_lock, flags); |
2342 | } | 2351 | } |
2343 | 2352 | ||
2344 | void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, size_t size, | 2353 | static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr, |
2345 | int dir) | 2354 | size_t size, enum dma_data_direction dir, |
2355 | struct dma_attrs *attrs) | ||
2346 | { | 2356 | { |
2347 | struct pci_dev *pdev = to_pci_dev(dev); | 2357 | struct pci_dev *pdev = to_pci_dev(dev); |
2348 | struct dmar_domain *domain; | 2358 | struct dmar_domain *domain; |
@@ -2386,6 +2396,12 @@ void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, size_t size, | |||
2386 | } | 2396 | } |
2387 | } | 2397 | } |
2388 | 2398 | ||
2399 | void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, size_t size, | ||
2400 | int dir) | ||
2401 | { | ||
2402 | intel_unmap_page(dev, dev_addr, size, dir, NULL); | ||
2403 | } | ||
2404 | |||
2389 | void *intel_alloc_coherent(struct device *hwdev, size_t size, | 2405 | void *intel_alloc_coherent(struct device *hwdev, size_t size, |
2390 | dma_addr_t *dma_handle, gfp_t flags) | 2406 | dma_addr_t *dma_handle, gfp_t flags) |
2391 | { | 2407 | { |
@@ -2570,6 +2586,10 @@ static struct dma_mapping_ops intel_dma_ops = { | |||
2570 | .unmap_single = intel_unmap_single, | 2586 | .unmap_single = intel_unmap_single, |
2571 | .map_sg = intel_map_sg, | 2587 | .map_sg = intel_map_sg, |
2572 | .unmap_sg = intel_unmap_sg, | 2588 | .unmap_sg = intel_unmap_sg, |
2589 | #ifdef CONFIG_X86_64 | ||
2590 | .map_page = intel_map_page, | ||
2591 | .unmap_page = intel_unmap_page, | ||
2592 | #endif | ||
2573 | }; | 2593 | }; |
2574 | 2594 | ||
2575 | static inline int iommu_domain_cache_init(void) | 2595 | static inline int iommu_domain_cache_init(void) |