aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorMilton Miller <miltonm@bga.com>2011-06-24 05:05:24 -0400
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2011-09-19 19:19:35 -0400
commitd24f9c6999eacd3a7bc2b289e49fcb2bf2fafef2 (patch)
tree66276ee7149e5eab4b7ec9785bad7bdf0564ea3d /arch
parent3a8f7558e475b68254d8bc3a2211f3f89bf67a71 (diff)
powerpc: Use the newly added get_required_mask dma_map_ops hook
Now that the generic code has dma_map_ops set, instead of having a messy ifdef & if block in the base dma_get_required_mask hook push the computation into the dma ops. If the ops fails to set the get_required_mask hook default to the width of dma_addr_t. This also corrects ibmbus ibmebus_dma_supported to require a 64 bit mask. I doubt anything is checking or setting the dma mask on that bus. Signed-off-by: Milton Miller <miltonm@bga.com> Signed-off-by: Nishanth Aravamudan <nacc@us.ibm.com> Cc: linuxppc-dev@lists.ozlabs.org Cc: linux-kernel@vger.kernel.org Cc: benh@kernel.crashing.org Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/include/asm/device.h2
-rw-r--r--arch/powerpc/include/asm/dma-mapping.h3
-rw-r--r--arch/powerpc/kernel/dma-iommu.c3
-rw-r--r--arch/powerpc/kernel/dma-swiotlb.c16
-rw-r--r--arch/powerpc/kernel/dma.c41
-rw-r--r--arch/powerpc/kernel/ibmebus.c8
-rw-r--r--arch/powerpc/kernel/vio.c7
-rw-r--r--arch/powerpc/platforms/cell/iommu.c13
-rw-r--r--arch/powerpc/platforms/ps3/system-bus.c7
-rw-r--r--arch/powerpc/platforms/pseries/iommu.c2
10 files changed, 68 insertions, 34 deletions
diff --git a/arch/powerpc/include/asm/device.h b/arch/powerpc/include/asm/device.h
index 16d25c0974be..d57c08acedfc 100644
--- a/arch/powerpc/include/asm/device.h
+++ b/arch/powerpc/include/asm/device.h
@@ -37,4 +37,6 @@ struct pdev_archdata {
37 u64 dma_mask; 37 u64 dma_mask;
38}; 38};
39 39
40#define ARCH_HAS_DMA_GET_REQUIRED_MASK
41
40#endif /* _ASM_POWERPC_DEVICE_H */ 42#endif /* _ASM_POWERPC_DEVICE_H */
diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h
index 8135e66a4bb9..dd70fac57ec8 100644
--- a/arch/powerpc/include/asm/dma-mapping.h
+++ b/arch/powerpc/include/asm/dma-mapping.h
@@ -20,8 +20,6 @@
20 20
21#define DMA_ERROR_CODE (~(dma_addr_t)0x0) 21#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
22 22
23#define ARCH_HAS_DMA_GET_REQUIRED_MASK
24
25/* Some dma direct funcs must be visible for use in other dma_ops */ 23/* Some dma direct funcs must be visible for use in other dma_ops */
26extern void *dma_direct_alloc_coherent(struct device *dev, size_t size, 24extern void *dma_direct_alloc_coherent(struct device *dev, size_t size,
27 dma_addr_t *dma_handle, gfp_t flag); 25 dma_addr_t *dma_handle, gfp_t flag);
@@ -71,7 +69,6 @@ static inline unsigned long device_to_mask(struct device *dev)
71 */ 69 */
72#ifdef CONFIG_PPC64 70#ifdef CONFIG_PPC64
73extern struct dma_map_ops dma_iommu_ops; 71extern struct dma_map_ops dma_iommu_ops;
74extern u64 dma_iommu_get_required_mask(struct device *dev);
75#endif 72#endif
76extern struct dma_map_ops dma_direct_ops; 73extern struct dma_map_ops dma_direct_ops;
77 74
diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iommu.c
index 1f2a711a261e..c1ad9db934f6 100644
--- a/arch/powerpc/kernel/dma-iommu.c
+++ b/arch/powerpc/kernel/dma-iommu.c
@@ -90,7 +90,7 @@ static int dma_iommu_dma_supported(struct device *dev, u64 mask)
90 return 1; 90 return 1;
91} 91}
92 92
93u64 dma_iommu_get_required_mask(struct device *dev) 93static u64 dma_iommu_get_required_mask(struct device *dev)
94{ 94{
95 struct iommu_table *tbl = get_iommu_table_base(dev); 95 struct iommu_table *tbl = get_iommu_table_base(dev);
96 u64 mask; 96 u64 mask;
@@ -111,5 +111,6 @@ struct dma_map_ops dma_iommu_ops = {
111 .dma_supported = dma_iommu_dma_supported, 111 .dma_supported = dma_iommu_dma_supported,
112 .map_page = dma_iommu_map_page, 112 .map_page = dma_iommu_map_page,
113 .unmap_page = dma_iommu_unmap_page, 113 .unmap_page = dma_iommu_unmap_page,
114 .get_required_mask = dma_iommu_get_required_mask,
114}; 115};
115EXPORT_SYMBOL(dma_iommu_ops); 116EXPORT_SYMBOL(dma_iommu_ops);
diff --git a/arch/powerpc/kernel/dma-swiotlb.c b/arch/powerpc/kernel/dma-swiotlb.c
index 4295e0b94b2d..1ebc9189aada 100644
--- a/arch/powerpc/kernel/dma-swiotlb.c
+++ b/arch/powerpc/kernel/dma-swiotlb.c
@@ -24,6 +24,21 @@
24 24
25unsigned int ppc_swiotlb_enable; 25unsigned int ppc_swiotlb_enable;
26 26
27static u64 swiotlb_powerpc_get_required(struct device *dev)
28{
29 u64 end, mask, max_direct_dma_addr = dev->archdata.max_direct_dma_addr;
30
31 end = memblock_end_of_DRAM();
32 if (max_direct_dma_addr && end > max_direct_dma_addr)
33 end = max_direct_dma_addr;
34 end += get_dma_offset(dev);
35
36 mask = 1ULL << (fls64(end) - 1);
37 mask += mask - 1;
38
39 return mask;
40}
41
27/* 42/*
28 * At the moment, all platforms that use this code only require 43 * At the moment, all platforms that use this code only require
29 * swiotlb to be used if we're operating on HIGHMEM. Since 44 * swiotlb to be used if we're operating on HIGHMEM. Since
@@ -44,6 +59,7 @@ struct dma_map_ops swiotlb_dma_ops = {
44 .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu, 59 .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
45 .sync_sg_for_device = swiotlb_sync_sg_for_device, 60 .sync_sg_for_device = swiotlb_sync_sg_for_device,
46 .mapping_error = swiotlb_dma_mapping_error, 61 .mapping_error = swiotlb_dma_mapping_error,
62 .get_required_mask = swiotlb_powerpc_get_required,
47}; 63};
48 64
49void pci_dma_dev_setup_swiotlb(struct pci_dev *pdev) 65void pci_dma_dev_setup_swiotlb(struct pci_dev *pdev)
diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c
index 503093efa202..10b136afbf50 100644
--- a/arch/powerpc/kernel/dma.c
+++ b/arch/powerpc/kernel/dma.c
@@ -96,6 +96,18 @@ static int dma_direct_dma_supported(struct device *dev, u64 mask)
96#endif 96#endif
97} 97}
98 98
99static u64 dma_direct_get_required_mask(struct device *dev)
100{
101 u64 end, mask;
102
103 end = memblock_end_of_DRAM() + get_dma_offset(dev);
104
105 mask = 1ULL << (fls64(end) - 1);
106 mask += mask - 1;
107
108 return mask;
109}
110
99static inline dma_addr_t dma_direct_map_page(struct device *dev, 111static inline dma_addr_t dma_direct_map_page(struct device *dev,
100 struct page *page, 112 struct page *page,
101 unsigned long offset, 113 unsigned long offset,
@@ -144,6 +156,7 @@ struct dma_map_ops dma_direct_ops = {
144 .dma_supported = dma_direct_dma_supported, 156 .dma_supported = dma_direct_dma_supported,
145 .map_page = dma_direct_map_page, 157 .map_page = dma_direct_map_page,
146 .unmap_page = dma_direct_unmap_page, 158 .unmap_page = dma_direct_unmap_page,
159 .get_required_mask = dma_direct_get_required_mask,
147#ifdef CONFIG_NOT_COHERENT_CACHE 160#ifdef CONFIG_NOT_COHERENT_CACHE
148 .sync_single_for_cpu = dma_direct_sync_single, 161 .sync_single_for_cpu = dma_direct_sync_single,
149 .sync_single_for_device = dma_direct_sync_single, 162 .sync_single_for_device = dma_direct_sync_single,
@@ -173,7 +186,6 @@ EXPORT_SYMBOL(dma_set_mask);
173u64 dma_get_required_mask(struct device *dev) 186u64 dma_get_required_mask(struct device *dev)
174{ 187{
175 struct dma_map_ops *dma_ops = get_dma_ops(dev); 188 struct dma_map_ops *dma_ops = get_dma_ops(dev);
176 u64 mask, end = 0;
177 189
178 if (ppc_md.dma_get_required_mask) 190 if (ppc_md.dma_get_required_mask)
179 return ppc_md.dma_get_required_mask(dev); 191 return ppc_md.dma_get_required_mask(dev);
@@ -181,31 +193,10 @@ u64 dma_get_required_mask(struct device *dev)
181 if (unlikely(dma_ops == NULL)) 193 if (unlikely(dma_ops == NULL))
182 return 0; 194 return 0;
183 195
184#ifdef CONFIG_PPC64 196 if (dma_ops->get_required_mask)
185 else if (dma_ops == &dma_iommu_ops) 197 return dma_ops->get_required_mask(dev);
186 return dma_iommu_get_required_mask(dev);
187#endif
188#ifdef CONFIG_SWIOTLB
189 else if (dma_ops == &swiotlb_dma_ops) {
190 u64 max_direct_dma_addr = dev->archdata.max_direct_dma_addr;
191
192 end = memblock_end_of_DRAM();
193 if (max_direct_dma_addr && end > max_direct_dma_addr)
194 end = max_direct_dma_addr;
195 end += get_dma_offset(dev);
196 }
197#endif
198 else if (dma_ops == &dma_direct_ops)
199 end = memblock_end_of_DRAM() + get_dma_offset(dev);
200 else {
201 WARN_ONCE(1, "%s: unknown ops %p\n", __func__, dma_ops);
202 end = memblock_end_of_DRAM();
203 }
204 198
205 mask = 1ULL << (fls64(end) - 1); 199 return DMA_BIT_MASK(8 * sizeof(dma_addr_t));
206 mask += mask - 1;
207
208 return mask;
209} 200}
210EXPORT_SYMBOL_GPL(dma_get_required_mask); 201EXPORT_SYMBOL_GPL(dma_get_required_mask);
211 202
diff --git a/arch/powerpc/kernel/ibmebus.c b/arch/powerpc/kernel/ibmebus.c
index 28581f1ad2c0..90ef2a44613b 100644
--- a/arch/powerpc/kernel/ibmebus.c
+++ b/arch/powerpc/kernel/ibmebus.c
@@ -125,7 +125,12 @@ static void ibmebus_unmap_sg(struct device *dev,
125 125
126static int ibmebus_dma_supported(struct device *dev, u64 mask) 126static int ibmebus_dma_supported(struct device *dev, u64 mask)
127{ 127{
128 return 1; 128 return mask == DMA_BIT_MASK(64);
129}
130
131static u64 ibmebus_dma_get_required_mask(struct device *dev)
132{
133 return DMA_BIT_MASK(64);
129} 134}
130 135
131static struct dma_map_ops ibmebus_dma_ops = { 136static struct dma_map_ops ibmebus_dma_ops = {
@@ -134,6 +139,7 @@ static struct dma_map_ops ibmebus_dma_ops = {
134 .map_sg = ibmebus_map_sg, 139 .map_sg = ibmebus_map_sg,
135 .unmap_sg = ibmebus_unmap_sg, 140 .unmap_sg = ibmebus_unmap_sg,
136 .dma_supported = ibmebus_dma_supported, 141 .dma_supported = ibmebus_dma_supported,
142 .get_required_mask = ibmebus_dma_get_required_mask,
137 .map_page = ibmebus_map_page, 143 .map_page = ibmebus_map_page,
138 .unmap_page = ibmebus_unmap_page, 144 .unmap_page = ibmebus_unmap_page,
139}; 145};
diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c
index 1b695fdc362b..c0493259d133 100644
--- a/arch/powerpc/kernel/vio.c
+++ b/arch/powerpc/kernel/vio.c
@@ -605,6 +605,11 @@ static int vio_dma_iommu_dma_supported(struct device *dev, u64 mask)
605 return dma_iommu_ops.dma_supported(dev, mask); 605 return dma_iommu_ops.dma_supported(dev, mask);
606} 606}
607 607
608static u64 vio_dma_get_required_mask(struct device *dev)
609{
610 return dma_iommu_ops.get_required_mask(dev);
611}
612
608struct dma_map_ops vio_dma_mapping_ops = { 613struct dma_map_ops vio_dma_mapping_ops = {
609 .alloc_coherent = vio_dma_iommu_alloc_coherent, 614 .alloc_coherent = vio_dma_iommu_alloc_coherent,
610 .free_coherent = vio_dma_iommu_free_coherent, 615 .free_coherent = vio_dma_iommu_free_coherent,
@@ -613,7 +618,7 @@ struct dma_map_ops vio_dma_mapping_ops = {
613 .map_page = vio_dma_iommu_map_page, 618 .map_page = vio_dma_iommu_map_page,
614 .unmap_page = vio_dma_iommu_unmap_page, 619 .unmap_page = vio_dma_iommu_unmap_page,
615 .dma_supported = vio_dma_iommu_dma_supported, 620 .dma_supported = vio_dma_iommu_dma_supported,
616 621 .get_required_mask = vio_dma_get_required_mask,
617}; 622};
618 623
619/** 624/**
diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c
index 5ef55f3b0987..fc46fcac3921 100644
--- a/arch/powerpc/platforms/cell/iommu.c
+++ b/arch/powerpc/platforms/cell/iommu.c
@@ -1161,11 +1161,20 @@ __setup("iommu_fixed=", setup_iommu_fixed);
1161 1161
1162static u64 cell_dma_get_required_mask(struct device *dev) 1162static u64 cell_dma_get_required_mask(struct device *dev)
1163{ 1163{
1164 struct dma_map_ops *dma_ops;
1165
1164 if (!dev->dma_mask) 1166 if (!dev->dma_mask)
1165 return 0; 1167 return 0;
1166 1168
1167 if (iommu_fixed_disabled && get_dma_ops(dev) == &dma_iommu_ops) 1169 if (!iommu_fixed_disabled &&
1168 return dma_iommu_get_required_mask(dev); 1170 cell_iommu_get_fixed_address(dev) != OF_BAD_ADDR)
1171 return DMA_BIT_MASK(64);
1172
1173 dma_ops = get_dma_ops(dev);
1174 if (dma_ops->get_required_mask)
1175 return dma_ops->get_required_mask(dev);
1176
1177 WARN_ONCE(1, "no get_required_mask in %p ops", dma_ops);
1169 1178
1170 return DMA_BIT_MASK(64); 1179 return DMA_BIT_MASK(64);
1171} 1180}
diff --git a/arch/powerpc/platforms/ps3/system-bus.c b/arch/powerpc/platforms/ps3/system-bus.c
index 23083c397528..688141c76e03 100644
--- a/arch/powerpc/platforms/ps3/system-bus.c
+++ b/arch/powerpc/platforms/ps3/system-bus.c
@@ -695,12 +695,18 @@ static int ps3_dma_supported(struct device *_dev, u64 mask)
695 return mask >= DMA_BIT_MASK(32); 695 return mask >= DMA_BIT_MASK(32);
696} 696}
697 697
698static u64 ps3_dma_get_required_mask(struct device *_dev)
699{
700 return DMA_BIT_MASK(32);
701}
702
698static struct dma_map_ops ps3_sb_dma_ops = { 703static struct dma_map_ops ps3_sb_dma_ops = {
699 .alloc_coherent = ps3_alloc_coherent, 704 .alloc_coherent = ps3_alloc_coherent,
700 .free_coherent = ps3_free_coherent, 705 .free_coherent = ps3_free_coherent,
701 .map_sg = ps3_sb_map_sg, 706 .map_sg = ps3_sb_map_sg,
702 .unmap_sg = ps3_sb_unmap_sg, 707 .unmap_sg = ps3_sb_unmap_sg,
703 .dma_supported = ps3_dma_supported, 708 .dma_supported = ps3_dma_supported,
709 .get_required_mask = ps3_dma_get_required_mask,
704 .map_page = ps3_sb_map_page, 710 .map_page = ps3_sb_map_page,
705 .unmap_page = ps3_unmap_page, 711 .unmap_page = ps3_unmap_page,
706}; 712};
@@ -711,6 +717,7 @@ static struct dma_map_ops ps3_ioc0_dma_ops = {
711 .map_sg = ps3_ioc0_map_sg, 717 .map_sg = ps3_ioc0_map_sg,
712 .unmap_sg = ps3_ioc0_unmap_sg, 718 .unmap_sg = ps3_ioc0_unmap_sg,
713 .dma_supported = ps3_dma_supported, 719 .dma_supported = ps3_dma_supported,
720 .get_required_mask = ps3_dma_get_required_mask,
714 .map_page = ps3_ioc0_map_page, 721 .map_page = ps3_ioc0_map_page,
715 .unmap_page = ps3_unmap_page, 722 .unmap_page = ps3_unmap_page,
716}; 723};
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
index fe5ededf0d60..9f121a37eb51 100644
--- a/arch/powerpc/platforms/pseries/iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
@@ -1099,7 +1099,7 @@ static u64 dma_get_required_mask_pSeriesLP(struct device *dev)
1099 return DMA_BIT_MASK(64); 1099 return DMA_BIT_MASK(64);
1100 } 1100 }
1101 1101
1102 return dma_iommu_get_required_mask(dev); 1102 return dma_iommu_ops.get_required_mask(dev);
1103} 1103}
1104 1104
1105#else /* CONFIG_PCI */ 1105#else /* CONFIG_PCI */