diff options
author | FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> | 2009-01-05 09:59:03 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-01-06 08:06:58 -0500 |
commit | f98eee8ea99fe74ee9c4e867ba178ec3072793be (patch) | |
tree | 67843fb8a25630dfdaaac8471423a9682cb0c390 | |
parent | 160c1d8e40866edfeae7d68816b7005d70acf391 (diff) |
x86, ia64: remove duplicated swiotlb code
This adds swiotlb_map_page and swiotlb_unmap_page to lib/swiotlb.c and
remove IA64 and X86's swiotlb_map_page and swiotlb_unmap_page.
This also removes unnecessary swiotlb_map_single, swiotlb_map_single_attrs,
swiotlb_unmap_single and swiotlb_unmap_single_attrs.
Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
Acked-by: Tony Luck <tony.luck@intel.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r-- | arch/ia64/kernel/pci-swiotlb.c | 16 | ||||
-rw-r--r-- | arch/x86/kernel/pci-swiotlb_64.c | 17 | ||||
-rw-r--r-- | include/linux/swiotlb.h | 21 | ||||
-rw-r--r-- | lib/swiotlb.c | 48 |
4 files changed, 25 insertions, 77 deletions
diff --git a/arch/ia64/kernel/pci-swiotlb.c b/arch/ia64/kernel/pci-swiotlb.c index 6bf8f66786bd..e6b2ec9b27da 100644 --- a/arch/ia64/kernel/pci-swiotlb.c +++ b/arch/ia64/kernel/pci-swiotlb.c | |||
@@ -16,22 +16,6 @@ EXPORT_SYMBOL(swiotlb); | |||
16 | /* Set this to 1 if there is a HW IOMMU in the system */ | 16 | /* Set this to 1 if there is a HW IOMMU in the system */ |
17 | int iommu_detected __read_mostly; | 17 | int iommu_detected __read_mostly; |
18 | 18 | ||
19 | static dma_addr_t swiotlb_map_page(struct device *dev, struct page *page, | ||
20 | unsigned long offset, size_t size, | ||
21 | enum dma_data_direction dir, | ||
22 | struct dma_attrs *attrs) | ||
23 | { | ||
24 | return swiotlb_map_single_attrs(dev, page_address(page) + offset, size, | ||
25 | dir, attrs); | ||
26 | } | ||
27 | |||
28 | static void swiotlb_unmap_page(struct device *dev, dma_addr_t dma_handle, | ||
29 | size_t size, enum dma_data_direction dir, | ||
30 | struct dma_attrs *attrs) | ||
31 | { | ||
32 | swiotlb_unmap_single_attrs(dev, dma_handle, size, dir, attrs); | ||
33 | } | ||
34 | |||
35 | struct dma_map_ops swiotlb_dma_ops = { | 19 | struct dma_map_ops swiotlb_dma_ops = { |
36 | .alloc_coherent = swiotlb_alloc_coherent, | 20 | .alloc_coherent = swiotlb_alloc_coherent, |
37 | .free_coherent = swiotlb_free_coherent, | 21 | .free_coherent = swiotlb_free_coherent, |
diff --git a/arch/x86/kernel/pci-swiotlb_64.c b/arch/x86/kernel/pci-swiotlb_64.c index 3f0d9924dd1c..5e32c4f6a7ba 100644 --- a/arch/x86/kernel/pci-swiotlb_64.c +++ b/arch/x86/kernel/pci-swiotlb_64.c | |||
@@ -38,23 +38,6 @@ int __weak swiotlb_arch_range_needs_mapping(void *ptr, size_t size) | |||
38 | return 0; | 38 | return 0; |
39 | } | 39 | } |
40 | 40 | ||
41 | /* these will be moved to lib/swiotlb.c later on */ | ||
42 | |||
43 | static dma_addr_t swiotlb_map_page(struct device *dev, struct page *page, | ||
44 | unsigned long offset, size_t size, | ||
45 | enum dma_data_direction dir, | ||
46 | struct dma_attrs *attrs) | ||
47 | { | ||
48 | return swiotlb_map_single(dev, page_address(page) + offset, size, dir); | ||
49 | } | ||
50 | |||
51 | static void swiotlb_unmap_page(struct device *dev, dma_addr_t dma_handle, | ||
52 | size_t size, enum dma_data_direction dir, | ||
53 | struct dma_attrs *attrs) | ||
54 | { | ||
55 | swiotlb_unmap_single(dev, dma_handle, size, dir); | ||
56 | } | ||
57 | |||
58 | static void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size, | 41 | static void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size, |
59 | dma_addr_t *dma_handle, gfp_t flags) | 42 | dma_addr_t *dma_handle, gfp_t flags) |
60 | { | 43 | { |
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h index 0567c3d8633b..493dc17e7c87 100644 --- a/include/linux/swiotlb.h +++ b/include/linux/swiotlb.h | |||
@@ -41,20 +41,13 @@ extern void | |||
41 | swiotlb_free_coherent(struct device *hwdev, size_t size, | 41 | swiotlb_free_coherent(struct device *hwdev, size_t size, |
42 | void *vaddr, dma_addr_t dma_handle); | 42 | void *vaddr, dma_addr_t dma_handle); |
43 | 43 | ||
44 | extern dma_addr_t | 44 | extern dma_addr_t swiotlb_map_page(struct device *dev, struct page *page, |
45 | swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir); | 45 | unsigned long offset, size_t size, |
46 | 46 | enum dma_data_direction dir, | |
47 | extern void | 47 | struct dma_attrs *attrs); |
48 | swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, | 48 | extern void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr, |
49 | size_t size, int dir); | 49 | size_t size, enum dma_data_direction dir, |
50 | 50 | struct dma_attrs *attrs); | |
51 | extern dma_addr_t | ||
52 | swiotlb_map_single_attrs(struct device *hwdev, void *ptr, size_t size, | ||
53 | int dir, struct dma_attrs *attrs); | ||
54 | |||
55 | extern void | ||
56 | swiotlb_unmap_single_attrs(struct device *hwdev, dma_addr_t dev_addr, | ||
57 | size_t size, int dir, struct dma_attrs *attrs); | ||
58 | 51 | ||
59 | extern int | 52 | extern int |
60 | swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nents, | 53 | swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nents, |
diff --git a/lib/swiotlb.c b/lib/swiotlb.c index d047de990a3f..ec7922bd0d61 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c | |||
@@ -636,11 +636,14 @@ swiotlb_full(struct device *dev, size_t size, int dir, int do_panic) | |||
636 | * Once the device is given the dma address, the device owns this memory until | 636 | * Once the device is given the dma address, the device owns this memory until |
637 | * either swiotlb_unmap_single or swiotlb_dma_sync_single is performed. | 637 | * either swiotlb_unmap_single or swiotlb_dma_sync_single is performed. |
638 | */ | 638 | */ |
639 | dma_addr_t | 639 | dma_addr_t swiotlb_map_page(struct device *dev, struct page *page, |
640 | swiotlb_map_single_attrs(struct device *hwdev, void *ptr, size_t size, | 640 | unsigned long offset, size_t size, |
641 | int dir, struct dma_attrs *attrs) | 641 | enum dma_data_direction dir, |
642 | { | 642 | struct dma_attrs *attrs) |
643 | dma_addr_t dev_addr = swiotlb_virt_to_bus(hwdev, ptr); | 643 | { |
644 | phys_addr_t phys = page_to_phys(page) + offset; | ||
645 | void *ptr = page_address(page) + offset; | ||
646 | dma_addr_t dev_addr = swiotlb_phys_to_bus(dev, phys); | ||
644 | void *map; | 647 | void *map; |
645 | 648 | ||
646 | BUG_ON(dir == DMA_NONE); | 649 | BUG_ON(dir == DMA_NONE); |
@@ -649,37 +652,30 @@ swiotlb_map_single_attrs(struct device *hwdev, void *ptr, size_t size, | |||
649 | * we can safely return the device addr and not worry about bounce | 652 | * we can safely return the device addr and not worry about bounce |
650 | * buffering it. | 653 | * buffering it. |
651 | */ | 654 | */ |
652 | if (!address_needs_mapping(hwdev, dev_addr, size) && | 655 | if (!address_needs_mapping(dev, dev_addr, size) && |
653 | !range_needs_mapping(ptr, size)) | 656 | !range_needs_mapping(ptr, size)) |
654 | return dev_addr; | 657 | return dev_addr; |
655 | 658 | ||
656 | /* | 659 | /* |
657 | * Oh well, have to allocate and map a bounce buffer. | 660 | * Oh well, have to allocate and map a bounce buffer. |
658 | */ | 661 | */ |
659 | map = map_single(hwdev, virt_to_phys(ptr), size, dir); | 662 | map = map_single(dev, phys, size, dir); |
660 | if (!map) { | 663 | if (!map) { |
661 | swiotlb_full(hwdev, size, dir, 1); | 664 | swiotlb_full(dev, size, dir, 1); |
662 | map = io_tlb_overflow_buffer; | 665 | map = io_tlb_overflow_buffer; |
663 | } | 666 | } |
664 | 667 | ||
665 | dev_addr = swiotlb_virt_to_bus(hwdev, map); | 668 | dev_addr = swiotlb_virt_to_bus(dev, map); |
666 | 669 | ||
667 | /* | 670 | /* |
668 | * Ensure that the address returned is DMA'ble | 671 | * Ensure that the address returned is DMA'ble |
669 | */ | 672 | */ |
670 | if (address_needs_mapping(hwdev, dev_addr, size)) | 673 | if (address_needs_mapping(dev, dev_addr, size)) |
671 | panic("map_single: bounce buffer is not DMA'ble"); | 674 | panic("map_single: bounce buffer is not DMA'ble"); |
672 | 675 | ||
673 | return dev_addr; | 676 | return dev_addr; |
674 | } | 677 | } |
675 | EXPORT_SYMBOL(swiotlb_map_single_attrs); | 678 | EXPORT_SYMBOL_GPL(swiotlb_map_page); |
676 | |||
677 | dma_addr_t | ||
678 | swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir) | ||
679 | { | ||
680 | return swiotlb_map_single_attrs(hwdev, ptr, size, dir, NULL); | ||
681 | } | ||
682 | EXPORT_SYMBOL(swiotlb_map_single); | ||
683 | 679 | ||
684 | /* | 680 | /* |
685 | * Unmap a single streaming mode DMA translation. The dma_addr and size must | 681 | * Unmap a single streaming mode DMA translation. The dma_addr and size must |
@@ -689,9 +685,9 @@ EXPORT_SYMBOL(swiotlb_map_single); | |||
689 | * After this call, reads by the cpu to the buffer are guaranteed to see | 685 | * After this call, reads by the cpu to the buffer are guaranteed to see |
690 | * whatever the device wrote there. | 686 | * whatever the device wrote there. |
691 | */ | 687 | */ |
692 | void | 688 | void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr, |
693 | swiotlb_unmap_single_attrs(struct device *hwdev, dma_addr_t dev_addr, | 689 | size_t size, enum dma_data_direction dir, |
694 | size_t size, int dir, struct dma_attrs *attrs) | 690 | struct dma_attrs *attrs) |
695 | { | 691 | { |
696 | char *dma_addr = swiotlb_bus_to_virt(dev_addr); | 692 | char *dma_addr = swiotlb_bus_to_virt(dev_addr); |
697 | 693 | ||
@@ -701,15 +697,7 @@ swiotlb_unmap_single_attrs(struct device *hwdev, dma_addr_t dev_addr, | |||
701 | else if (dir == DMA_FROM_DEVICE) | 697 | else if (dir == DMA_FROM_DEVICE) |
702 | dma_mark_clean(dma_addr, size); | 698 | dma_mark_clean(dma_addr, size); |
703 | } | 699 | } |
704 | EXPORT_SYMBOL(swiotlb_unmap_single_attrs); | 700 | EXPORT_SYMBOL_GPL(swiotlb_unmap_page); |
705 | |||
706 | void | ||
707 | swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size, | ||
708 | int dir) | ||
709 | { | ||
710 | return swiotlb_unmap_single_attrs(hwdev, dev_addr, size, dir, NULL); | ||
711 | } | ||
712 | EXPORT_SYMBOL(swiotlb_unmap_single); | ||
713 | 701 | ||
714 | /* | 702 | /* |
715 | * Make physical memory consistent for a single streaming mode DMA translation | 703 | * Make physical memory consistent for a single streaming mode DMA translation |