aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel/dma.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kernel/dma.c')
-rw-r--r--arch/powerpc/kernel/dma.c47
1 files changed, 43 insertions, 4 deletions
diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c
index ee78f6e49d64..adac9dc54aee 100644
--- a/arch/powerpc/kernel/dma.c
+++ b/arch/powerpc/kernel/dma.c
@@ -15,6 +15,7 @@
15#include <asm/vio.h> 15#include <asm/vio.h>
16#include <asm/bug.h> 16#include <asm/bug.h>
17#include <asm/machdep.h> 17#include <asm/machdep.h>
18#include <asm/swiotlb.h>
18 19
19/* 20/*
20 * Generic direct DMA implementation 21 * Generic direct DMA implementation
@@ -25,6 +26,18 @@
25 * default the offset is PCI_DRAM_OFFSET. 26 * default the offset is PCI_DRAM_OFFSET.
26 */ 27 */
27 28
29static u64 __maybe_unused get_pfn_limit(struct device *dev)
30{
31 u64 pfn = (dev->coherent_dma_mask >> PAGE_SHIFT) + 1;
32 struct dev_archdata __maybe_unused *sd = &dev->archdata;
33
34#ifdef CONFIG_SWIOTLB
35 if (sd->max_direct_dma_addr && sd->dma_ops == &swiotlb_dma_ops)
36 pfn = min_t(u64, pfn, sd->max_direct_dma_addr >> PAGE_SHIFT);
37#endif
38
39 return pfn;
40}
28 41
29void *dma_direct_alloc_coherent(struct device *dev, size_t size, 42void *dma_direct_alloc_coherent(struct device *dev, size_t size,
30 dma_addr_t *dma_handle, gfp_t flag, 43 dma_addr_t *dma_handle, gfp_t flag,
@@ -40,6 +53,26 @@ void *dma_direct_alloc_coherent(struct device *dev, size_t size,
40#else 53#else
41 struct page *page; 54 struct page *page;
42 int node = dev_to_node(dev); 55 int node = dev_to_node(dev);
56 u64 pfn = get_pfn_limit(dev);
57 int zone;
58
59 zone = dma_pfn_limit_to_zone(pfn);
60 if (zone < 0) {
61 dev_err(dev, "%s: No suitable zone for pfn %#llx\n",
62 __func__, pfn);
63 return NULL;
64 }
65
66 switch (zone) {
67 case ZONE_DMA:
68 flag |= GFP_DMA;
69 break;
70#ifdef CONFIG_ZONE_DMA32
71 case ZONE_DMA32:
72 flag |= GFP_DMA32;
73 break;
74#endif
75 };
43 76
44 /* ignore region specifiers */ 77 /* ignore region specifiers */
45 flag &= ~(__GFP_HIGHMEM); 78 flag &= ~(__GFP_HIGHMEM);
@@ -202,6 +235,7 @@ int __dma_set_mask(struct device *dev, u64 dma_mask)
202 *dev->dma_mask = dma_mask; 235 *dev->dma_mask = dma_mask;
203 return 0; 236 return 0;
204} 237}
238
205int dma_set_mask(struct device *dev, u64 dma_mask) 239int dma_set_mask(struct device *dev, u64 dma_mask)
206{ 240{
207 if (ppc_md.dma_set_mask) 241 if (ppc_md.dma_set_mask)
@@ -210,13 +244,10 @@ int dma_set_mask(struct device *dev, u64 dma_mask)
210} 244}
211EXPORT_SYMBOL(dma_set_mask); 245EXPORT_SYMBOL(dma_set_mask);
212 246
213u64 dma_get_required_mask(struct device *dev) 247u64 __dma_get_required_mask(struct device *dev)
214{ 248{
215 struct dma_map_ops *dma_ops = get_dma_ops(dev); 249 struct dma_map_ops *dma_ops = get_dma_ops(dev);
216 250
217 if (ppc_md.dma_get_required_mask)
218 return ppc_md.dma_get_required_mask(dev);
219
220 if (unlikely(dma_ops == NULL)) 251 if (unlikely(dma_ops == NULL))
221 return 0; 252 return 0;
222 253
@@ -225,6 +256,14 @@ u64 dma_get_required_mask(struct device *dev)
225 256
226 return DMA_BIT_MASK(8 * sizeof(dma_addr_t)); 257 return DMA_BIT_MASK(8 * sizeof(dma_addr_t));
227} 258}
259
260u64 dma_get_required_mask(struct device *dev)
261{
262 if (ppc_md.dma_get_required_mask)
263 return ppc_md.dma_get_required_mask(dev);
264
265 return __dma_get_required_mask(dev);
266}
228EXPORT_SYMBOL_GPL(dma_get_required_mask); 267EXPORT_SYMBOL_GPL(dma_get_required_mask);
229 268
230static int __init dma_init(void) 269static int __init dma_init(void)