diff options
author | FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> | 2009-01-05 09:47:28 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-01-06 08:06:56 -0500 |
commit | d7dff84053524186b139342ac66a4160ce6bb517 (patch) | |
tree | 504ba7cc335158fed360d32bec3baf834cafbbce | |
parent | 33feffd4525fc2e4dd0a322fb5d07d61f85d791e (diff) |
x86: remove map_single and unmap_single in struct dma_mapping_ops
This patch converts dma_map_single and dma_unmap_single to use
map_page and unmap_page respectively and removes unnecessary
map_single and unmap_single in struct dma_mapping_ops.
This leaves intel-iommu's dma_map_single and dma_unmap_single since
IA64 uses them. They will be removed after the unification.
Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r-- | arch/x86/include/asm/dma-mapping.h | 15 | ||||
-rw-r--r-- | arch/x86/kernel/amd_iommu.c | 15 | ||||
-rw-r--r-- | arch/x86/kernel/pci-calgary_64.c | 16 | ||||
-rw-r--r-- | arch/x86/kernel/pci-gart_64.c | 19 | ||||
-rw-r--r-- | arch/x86/kernel/pci-nommu.c | 8 | ||||
-rw-r--r-- | arch/x86/kernel/pci-swiotlb_64.c | 9 | ||||
-rw-r--r-- | drivers/pci/intel-iommu.c | 2 |
7 files changed, 8 insertions, 76 deletions
diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h index 3fe05046fb31..b81f82268a16 100644 --- a/arch/x86/include/asm/dma-mapping.h +++ b/arch/x86/include/asm/dma-mapping.h | |||
@@ -24,10 +24,6 @@ struct dma_mapping_ops { | |||
24 | dma_addr_t *dma_handle, gfp_t gfp); | 24 | dma_addr_t *dma_handle, gfp_t gfp); |
25 | void (*free_coherent)(struct device *dev, size_t size, | 25 | void (*free_coherent)(struct device *dev, size_t size, |
26 | void *vaddr, dma_addr_t dma_handle); | 26 | void *vaddr, dma_addr_t dma_handle); |
27 | dma_addr_t (*map_single)(struct device *hwdev, phys_addr_t ptr, | ||
28 | size_t size, int direction); | ||
29 | void (*unmap_single)(struct device *dev, dma_addr_t addr, | ||
30 | size_t size, int direction); | ||
31 | void (*sync_single_for_cpu)(struct device *hwdev, | 27 | void (*sync_single_for_cpu)(struct device *hwdev, |
32 | dma_addr_t dma_handle, size_t size, | 28 | dma_addr_t dma_handle, size_t size, |
33 | int direction); | 29 | int direction); |
@@ -103,7 +99,9 @@ dma_map_single(struct device *hwdev, void *ptr, size_t size, | |||
103 | struct dma_mapping_ops *ops = get_dma_ops(hwdev); | 99 | struct dma_mapping_ops *ops = get_dma_ops(hwdev); |
104 | 100 | ||
105 | BUG_ON(!valid_dma_direction(direction)); | 101 | BUG_ON(!valid_dma_direction(direction)); |
106 | return ops->map_single(hwdev, virt_to_phys(ptr), size, direction); | 102 | return ops->map_page(hwdev, virt_to_page(ptr), |
103 | (unsigned long)ptr & ~PAGE_MASK, size, | ||
104 | direction, NULL); | ||
107 | } | 105 | } |
108 | 106 | ||
109 | static inline void | 107 | static inline void |
@@ -113,8 +111,8 @@ dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size, | |||
113 | struct dma_mapping_ops *ops = get_dma_ops(dev); | 111 | struct dma_mapping_ops *ops = get_dma_ops(dev); |
114 | 112 | ||
115 | BUG_ON(!valid_dma_direction(direction)); | 113 | BUG_ON(!valid_dma_direction(direction)); |
116 | if (ops->unmap_single) | 114 | if (ops->unmap_page) |
117 | ops->unmap_single(dev, addr, size, direction); | 115 | ops->unmap_page(dev, addr, size, direction, NULL); |
118 | } | 116 | } |
119 | 117 | ||
120 | static inline int | 118 | static inline int |
@@ -221,8 +219,7 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, | |||
221 | struct dma_mapping_ops *ops = get_dma_ops(dev); | 219 | struct dma_mapping_ops *ops = get_dma_ops(dev); |
222 | 220 | ||
223 | BUG_ON(!valid_dma_direction(direction)); | 221 | BUG_ON(!valid_dma_direction(direction)); |
224 | return ops->map_single(dev, page_to_phys(page) + offset, | 222 | return ops->map_page(dev, page, offset, size, direction, NULL); |
225 | size, direction); | ||
226 | } | 223 | } |
227 | 224 | ||
228 | static inline void dma_unmap_page(struct device *dev, dma_addr_t addr, | 225 | static inline void dma_unmap_page(struct device *dev, dma_addr_t addr, |
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index 85704418644a..a5dedb690a9a 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c | |||
@@ -1341,13 +1341,6 @@ out: | |||
1341 | return addr; | 1341 | return addr; |
1342 | } | 1342 | } |
1343 | 1343 | ||
1344 | static dma_addr_t map_single(struct device *dev, phys_addr_t paddr, | ||
1345 | size_t size, int dir) | ||
1346 | { | ||
1347 | return map_page(dev, pfn_to_page(paddr >> PAGE_SHIFT), | ||
1348 | paddr & ~PAGE_MASK, size, dir, NULL); | ||
1349 | } | ||
1350 | |||
1351 | /* | 1344 | /* |
1352 | * The exported unmap_single function for dma_ops. | 1345 | * The exported unmap_single function for dma_ops. |
1353 | */ | 1346 | */ |
@@ -1378,12 +1371,6 @@ static void unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size, | |||
1378 | spin_unlock_irqrestore(&domain->lock, flags); | 1371 | spin_unlock_irqrestore(&domain->lock, flags); |
1379 | } | 1372 | } |
1380 | 1373 | ||
1381 | static void unmap_single(struct device *dev, dma_addr_t dma_addr, | ||
1382 | size_t size, int dir) | ||
1383 | { | ||
1384 | return unmap_page(dev, dma_addr, size, dir, NULL); | ||
1385 | } | ||
1386 | |||
1387 | /* | 1374 | /* |
1388 | * This is a special map_sg function which is used if we should map a | 1375 | * This is a special map_sg function which is used if we should map a |
1389 | * device which is not handled by an AMD IOMMU in the system. | 1376 | * device which is not handled by an AMD IOMMU in the system. |
@@ -1664,8 +1651,6 @@ static void prealloc_protection_domains(void) | |||
1664 | static struct dma_mapping_ops amd_iommu_dma_ops = { | 1651 | static struct dma_mapping_ops amd_iommu_dma_ops = { |
1665 | .alloc_coherent = alloc_coherent, | 1652 | .alloc_coherent = alloc_coherent, |
1666 | .free_coherent = free_coherent, | 1653 | .free_coherent = free_coherent, |
1667 | .map_single = map_single, | ||
1668 | .unmap_single = unmap_single, | ||
1669 | .map_page = map_page, | 1654 | .map_page = map_page, |
1670 | .unmap_page = unmap_page, | 1655 | .unmap_page = unmap_page, |
1671 | .map_sg = map_sg, | 1656 | .map_sg = map_sg, |
diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c index e33cfcf1af5b..756138b604e1 100644 --- a/arch/x86/kernel/pci-calgary_64.c +++ b/arch/x86/kernel/pci-calgary_64.c | |||
@@ -461,14 +461,6 @@ static dma_addr_t calgary_map_page(struct device *dev, struct page *page, | |||
461 | return iommu_alloc(dev, tbl, vaddr, npages, dir); | 461 | return iommu_alloc(dev, tbl, vaddr, npages, dir); |
462 | } | 462 | } |
463 | 463 | ||
464 | static dma_addr_t calgary_map_single(struct device *dev, phys_addr_t paddr, | ||
465 | size_t size, int direction) | ||
466 | { | ||
467 | return calgary_map_page(dev, pfn_to_page(paddr >> PAGE_SHIFT), | ||
468 | paddr & ~PAGE_MASK, size, | ||
469 | direction, NULL); | ||
470 | } | ||
471 | |||
472 | static void calgary_unmap_page(struct device *dev, dma_addr_t dma_addr, | 464 | static void calgary_unmap_page(struct device *dev, dma_addr_t dma_addr, |
473 | size_t size, enum dma_data_direction dir, | 465 | size_t size, enum dma_data_direction dir, |
474 | struct dma_attrs *attrs) | 466 | struct dma_attrs *attrs) |
@@ -480,12 +472,6 @@ static void calgary_unmap_page(struct device *dev, dma_addr_t dma_addr, | |||
480 | iommu_free(tbl, dma_addr, npages); | 472 | iommu_free(tbl, dma_addr, npages); |
481 | } | 473 | } |
482 | 474 | ||
483 | static void calgary_unmap_single(struct device *dev, dma_addr_t dma_handle, | ||
484 | size_t size, int direction) | ||
485 | { | ||
486 | calgary_unmap_page(dev, dma_handle, size, direction, NULL); | ||
487 | } | ||
488 | |||
489 | static void* calgary_alloc_coherent(struct device *dev, size_t size, | 475 | static void* calgary_alloc_coherent(struct device *dev, size_t size, |
490 | dma_addr_t *dma_handle, gfp_t flag) | 476 | dma_addr_t *dma_handle, gfp_t flag) |
491 | { | 477 | { |
@@ -535,8 +521,6 @@ static void calgary_free_coherent(struct device *dev, size_t size, | |||
535 | static struct dma_mapping_ops calgary_dma_ops = { | 521 | static struct dma_mapping_ops calgary_dma_ops = { |
536 | .alloc_coherent = calgary_alloc_coherent, | 522 | .alloc_coherent = calgary_alloc_coherent, |
537 | .free_coherent = calgary_free_coherent, | 523 | .free_coherent = calgary_free_coherent, |
538 | .map_single = calgary_map_single, | ||
539 | .unmap_single = calgary_unmap_single, | ||
540 | .map_sg = calgary_map_sg, | 524 | .map_sg = calgary_map_sg, |
541 | .unmap_sg = calgary_unmap_sg, | 525 | .unmap_sg = calgary_unmap_sg, |
542 | .map_page = calgary_map_page, | 526 | .map_page = calgary_map_page, |
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c index e49c6dd0e8c6..9c557c0c928c 100644 --- a/arch/x86/kernel/pci-gart_64.c +++ b/arch/x86/kernel/pci-gart_64.c | |||
@@ -275,13 +275,6 @@ static dma_addr_t gart_map_page(struct device *dev, struct page *page, | |||
275 | return bus; | 275 | return bus; |
276 | } | 276 | } |
277 | 277 | ||
278 | static dma_addr_t gart_map_single(struct device *dev, phys_addr_t paddr, | ||
279 | size_t size, int dir) | ||
280 | { | ||
281 | return gart_map_page(dev, pfn_to_page(paddr >> PAGE_SHIFT), | ||
282 | paddr & ~PAGE_MASK, size, dir, NULL); | ||
283 | } | ||
284 | |||
285 | /* | 278 | /* |
286 | * Free a DMA mapping. | 279 | * Free a DMA mapping. |
287 | */ | 280 | */ |
@@ -306,12 +299,6 @@ static void gart_unmap_page(struct device *dev, dma_addr_t dma_addr, | |||
306 | free_iommu(iommu_page, npages); | 299 | free_iommu(iommu_page, npages); |
307 | } | 300 | } |
308 | 301 | ||
309 | static void gart_unmap_single(struct device *dev, dma_addr_t dma_addr, | ||
310 | size_t size, int direction) | ||
311 | { | ||
312 | gart_unmap_page(dev, dma_addr, size, direction, NULL); | ||
313 | } | ||
314 | |||
315 | /* | 302 | /* |
316 | * Wrapper for pci_unmap_single working with scatterlists. | 303 | * Wrapper for pci_unmap_single working with scatterlists. |
317 | */ | 304 | */ |
@@ -324,7 +311,7 @@ gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, int dir) | |||
324 | for_each_sg(sg, s, nents, i) { | 311 | for_each_sg(sg, s, nents, i) { |
325 | if (!s->dma_length || !s->length) | 312 | if (!s->dma_length || !s->length) |
326 | break; | 313 | break; |
327 | gart_unmap_single(dev, s->dma_address, s->dma_length, dir); | 314 | gart_unmap_page(dev, s->dma_address, s->dma_length, dir, NULL); |
328 | } | 315 | } |
329 | } | 316 | } |
330 | 317 | ||
@@ -538,7 +525,7 @@ static void | |||
538 | gart_free_coherent(struct device *dev, size_t size, void *vaddr, | 525 | gart_free_coherent(struct device *dev, size_t size, void *vaddr, |
539 | dma_addr_t dma_addr) | 526 | dma_addr_t dma_addr) |
540 | { | 527 | { |
541 | gart_unmap_single(dev, dma_addr, size, DMA_BIDIRECTIONAL); | 528 | gart_unmap_page(dev, dma_addr, size, DMA_BIDIRECTIONAL, NULL); |
542 | free_pages((unsigned long)vaddr, get_order(size)); | 529 | free_pages((unsigned long)vaddr, get_order(size)); |
543 | } | 530 | } |
544 | 531 | ||
@@ -725,8 +712,6 @@ static __init int init_k8_gatt(struct agp_kern_info *info) | |||
725 | } | 712 | } |
726 | 713 | ||
727 | static struct dma_mapping_ops gart_dma_ops = { | 714 | static struct dma_mapping_ops gart_dma_ops = { |
728 | .map_single = gart_map_single, | ||
729 | .unmap_single = gart_unmap_single, | ||
730 | .map_sg = gart_map_sg, | 715 | .map_sg = gart_map_sg, |
731 | .unmap_sg = gart_unmap_sg, | 716 | .unmap_sg = gart_unmap_sg, |
732 | .map_page = gart_map_page, | 717 | .map_page = gart_map_page, |
diff --git a/arch/x86/kernel/pci-nommu.c b/arch/x86/kernel/pci-nommu.c index 5a73a824ac1c..d42b69c90b40 100644 --- a/arch/x86/kernel/pci-nommu.c +++ b/arch/x86/kernel/pci-nommu.c | |||
@@ -38,13 +38,6 @@ static dma_addr_t nommu_map_page(struct device *dev, struct page *page, | |||
38 | return bus; | 38 | return bus; |
39 | } | 39 | } |
40 | 40 | ||
41 | static dma_addr_t nommu_map_single(struct device *hwdev, phys_addr_t paddr, | ||
42 | size_t size, int direction) | ||
43 | { | ||
44 | return nommu_map_page(hwdev, pfn_to_page(paddr >> PAGE_SHIFT), | ||
45 | paddr & ~PAGE_MASK, size, direction, NULL); | ||
46 | } | ||
47 | |||
48 | /* Map a set of buffers described by scatterlist in streaming | 41 | /* Map a set of buffers described by scatterlist in streaming |
49 | * mode for DMA. This is the scatter-gather version of the | 42 | * mode for DMA. This is the scatter-gather version of the |
50 | * above pci_map_single interface. Here the scatter gather list | 43 | * above pci_map_single interface. Here the scatter gather list |
@@ -88,7 +81,6 @@ static void nommu_free_coherent(struct device *dev, size_t size, void *vaddr, | |||
88 | struct dma_mapping_ops nommu_dma_ops = { | 81 | struct dma_mapping_ops nommu_dma_ops = { |
89 | .alloc_coherent = dma_generic_alloc_coherent, | 82 | .alloc_coherent = dma_generic_alloc_coherent, |
90 | .free_coherent = nommu_free_coherent, | 83 | .free_coherent = nommu_free_coherent, |
91 | .map_single = nommu_map_single, | ||
92 | .map_sg = nommu_map_sg, | 84 | .map_sg = nommu_map_sg, |
93 | .map_page = nommu_map_page, | 85 | .map_page = nommu_map_page, |
94 | .is_phys = 1, | 86 | .is_phys = 1, |
diff --git a/arch/x86/kernel/pci-swiotlb_64.c b/arch/x86/kernel/pci-swiotlb_64.c index d1c0366886dd..3ae354c0fdef 100644 --- a/arch/x86/kernel/pci-swiotlb_64.c +++ b/arch/x86/kernel/pci-swiotlb_64.c | |||
@@ -38,13 +38,6 @@ int __weak swiotlb_arch_range_needs_mapping(void *ptr, size_t size) | |||
38 | return 0; | 38 | return 0; |
39 | } | 39 | } |
40 | 40 | ||
41 | static dma_addr_t | ||
42 | swiotlb_map_single_phys(struct device *hwdev, phys_addr_t paddr, size_t size, | ||
43 | int direction) | ||
44 | { | ||
45 | return swiotlb_map_single(hwdev, phys_to_virt(paddr), size, direction); | ||
46 | } | ||
47 | |||
48 | /* these will be moved to lib/swiotlb.c later on */ | 41 | /* these will be moved to lib/swiotlb.c later on */ |
49 | 42 | ||
50 | static dma_addr_t swiotlb_map_page(struct device *dev, struct page *page, | 43 | static dma_addr_t swiotlb_map_page(struct device *dev, struct page *page, |
@@ -78,8 +71,6 @@ struct dma_mapping_ops swiotlb_dma_ops = { | |||
78 | .mapping_error = swiotlb_dma_mapping_error, | 71 | .mapping_error = swiotlb_dma_mapping_error, |
79 | .alloc_coherent = x86_swiotlb_alloc_coherent, | 72 | .alloc_coherent = x86_swiotlb_alloc_coherent, |
80 | .free_coherent = swiotlb_free_coherent, | 73 | .free_coherent = swiotlb_free_coherent, |
81 | .map_single = swiotlb_map_single_phys, | ||
82 | .unmap_single = swiotlb_unmap_single, | ||
83 | .sync_single_for_cpu = swiotlb_sync_single_for_cpu, | 74 | .sync_single_for_cpu = swiotlb_sync_single_for_cpu, |
84 | .sync_single_for_device = swiotlb_sync_single_for_device, | 75 | .sync_single_for_device = swiotlb_sync_single_for_device, |
85 | .sync_single_range_for_cpu = swiotlb_sync_single_range_for_cpu, | 76 | .sync_single_range_for_cpu = swiotlb_sync_single_range_for_cpu, |
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c index 60258ecbcb4c..da273e4ef66c 100644 --- a/drivers/pci/intel-iommu.c +++ b/drivers/pci/intel-iommu.c | |||
@@ -2582,8 +2582,6 @@ int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, | |||
2582 | static struct dma_mapping_ops intel_dma_ops = { | 2582 | static struct dma_mapping_ops intel_dma_ops = { |
2583 | .alloc_coherent = intel_alloc_coherent, | 2583 | .alloc_coherent = intel_alloc_coherent, |
2584 | .free_coherent = intel_free_coherent, | 2584 | .free_coherent = intel_free_coherent, |
2585 | .map_single = intel_map_single, | ||
2586 | .unmap_single = intel_unmap_single, | ||
2587 | .map_sg = intel_map_sg, | 2585 | .map_sg = intel_map_sg, |
2588 | .unmap_sg = intel_unmap_sg, | 2586 | .unmap_sg = intel_unmap_sg, |
2589 | #ifdef CONFIG_X86_64 | 2587 | #ifdef CONFIG_X86_64 |