aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorFUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>2009-07-09 21:04:59 -0400
committerFUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>2009-07-28 01:19:19 -0400
commit30945fdf6a08f52370dab3bc162e96c4b4e36082 (patch)
tree915ac16aedfda98a65d0a06f63be58d1c3215d04 /arch/powerpc
parentb9394647ac88faad9db0f9e92eac4db434faded6 (diff)
powerpc: remove unncesary swiotlb_arch_address_needs_mapping
swiotlb doesn't use swiotlb_arch_address_needs_mapping(); it uses dma_capalbe(). We can remove unnecessary swiotlb_arch_address_needs_mapping(). We can remove swiotlb_addr_needs_map() and is_buffer_dma_capable() in swiotlb_pci_addr_needs_map() too; dma_capable() handles the features that both provide. Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> Acked-by: Becky Bruce <beckyb@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/kernel/dma-swiotlb.c27
1 files changed, 1 insertions, 26 deletions
diff --git a/arch/powerpc/kernel/dma-swiotlb.c b/arch/powerpc/kernel/dma-swiotlb.c
index 41534ae2f589..a3bbe02cda98 100644
--- a/arch/powerpc/kernel/dma-swiotlb.c
+++ b/arch/powerpc/kernel/dma-swiotlb.c
@@ -36,28 +36,11 @@ phys_addr_t swiotlb_bus_to_phys(struct device *hwdev, dma_addr_t baddr)
36} 36}
37 37
38/* 38/*
39 * Determine if an address needs bounce buffering via swiotlb.
40 * Going forward I expect the swiotlb code to generalize on using
41 * a dma_ops->addr_needs_map, and this function will move from here to the
42 * generic swiotlb code.
43 */
44int
45swiotlb_arch_address_needs_mapping(struct device *hwdev, dma_addr_t addr,
46 size_t size)
47{
48 struct dma_mapping_ops *dma_ops = get_dma_ops(hwdev);
49
50 BUG_ON(!dma_ops);
51 return dma_ops->addr_needs_map(hwdev, addr, size);
52}
53
54/*
55 * Determine if an address is reachable by a pci device, or if we must bounce. 39 * Determine if an address is reachable by a pci device, or if we must bounce.
56 */ 40 */
57static int 41static int
58swiotlb_pci_addr_needs_map(struct device *hwdev, dma_addr_t addr, size_t size) 42swiotlb_pci_addr_needs_map(struct device *hwdev, dma_addr_t addr, size_t size)
59{ 43{
60 u64 mask = dma_get_mask(hwdev);
61 dma_addr_t max; 44 dma_addr_t max;
62 struct pci_controller *hose; 45 struct pci_controller *hose;
63 struct pci_dev *pdev = to_pci_dev(hwdev); 46 struct pci_dev *pdev = to_pci_dev(hwdev);
@@ -69,16 +52,9 @@ swiotlb_pci_addr_needs_map(struct device *hwdev, dma_addr_t addr, size_t size)
69 if ((addr + size > max) | (addr < hose->dma_window_base_cur)) 52 if ((addr + size > max) | (addr < hose->dma_window_base_cur))
70 return 1; 53 return 1;
71 54
72 return !is_buffer_dma_capable(mask, addr, size); 55 return 0;
73}
74
75static int
76swiotlb_addr_needs_map(struct device *hwdev, dma_addr_t addr, size_t size)
77{
78 return !is_buffer_dma_capable(dma_get_mask(hwdev), addr, size);
79} 56}
80 57
81
82/* 58/*
83 * At the moment, all platforms that use this code only require 59 * At the moment, all platforms that use this code only require
84 * swiotlb to be used if we're operating on HIGHMEM. Since 60 * swiotlb to be used if we're operating on HIGHMEM. Since
@@ -94,7 +70,6 @@ struct dma_mapping_ops swiotlb_dma_ops = {
94 .dma_supported = swiotlb_dma_supported, 70 .dma_supported = swiotlb_dma_supported,
95 .map_page = swiotlb_map_page, 71 .map_page = swiotlb_map_page,
96 .unmap_page = swiotlb_unmap_page, 72 .unmap_page = swiotlb_unmap_page,
97 .addr_needs_map = swiotlb_addr_needs_map,
98 .sync_single_range_for_cpu = swiotlb_sync_single_range_for_cpu, 73 .sync_single_range_for_cpu = swiotlb_sync_single_range_for_cpu,
99 .sync_single_range_for_device = swiotlb_sync_single_range_for_device, 74 .sync_single_range_for_device = swiotlb_sync_single_range_for_device,
100 .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu, 75 .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,