diff options
Diffstat (limited to 'arch/arm/mm/dma-mapping.c')
-rw-r--r-- | arch/arm/mm/dma-mapping.c | 94 |
1 files changed, 72 insertions, 22 deletions
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 510c179b0ac8..b30925fcbcdc 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c | |||
@@ -36,7 +36,34 @@ | |||
36 | #define CONSISTENT_PTE_INDEX(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PGDIR_SHIFT) | 36 | #define CONSISTENT_PTE_INDEX(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PGDIR_SHIFT) |
37 | #define NUM_CONSISTENT_PTES (CONSISTENT_DMA_SIZE >> PGDIR_SHIFT) | 37 | #define NUM_CONSISTENT_PTES (CONSISTENT_DMA_SIZE >> PGDIR_SHIFT) |
38 | 38 | ||
39 | static u64 get_coherent_dma_mask(struct device *dev) | ||
40 | { | ||
41 | u64 mask = ISA_DMA_THRESHOLD; | ||
42 | |||
43 | if (dev) { | ||
44 | mask = dev->coherent_dma_mask; | ||
45 | |||
46 | /* | ||
47 | * Sanity check the DMA mask - it must be non-zero, and | ||
48 | * must be able to be satisfied by a DMA allocation. | ||
49 | */ | ||
50 | if (mask == 0) { | ||
51 | dev_warn(dev, "coherent DMA mask is unset\n"); | ||
52 | return 0; | ||
53 | } | ||
54 | |||
55 | if ((~mask) & ISA_DMA_THRESHOLD) { | ||
56 | dev_warn(dev, "coherent DMA mask %#llx is smaller " | ||
57 | "than system GFP_DMA mask %#llx\n", | ||
58 | mask, (unsigned long long)ISA_DMA_THRESHOLD); | ||
59 | return 0; | ||
60 | } | ||
61 | } | ||
39 | 62 | ||
63 | return mask; | ||
64 | } | ||
65 | |||
66 | #ifdef CONFIG_MMU | ||
40 | /* | 67 | /* |
41 | * These are the page tables (2MB each) covering uncached, DMA consistent allocations | 68 | * These are the page tables (2MB each) covering uncached, DMA consistent allocations |
42 | */ | 69 | */ |
@@ -152,7 +179,8 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, | |||
152 | struct page *page; | 179 | struct page *page; |
153 | struct arm_vm_region *c; | 180 | struct arm_vm_region *c; |
154 | unsigned long order; | 181 | unsigned long order; |
155 | u64 mask = ISA_DMA_THRESHOLD, limit; | 182 | u64 mask = get_coherent_dma_mask(dev); |
183 | u64 limit; | ||
156 | 184 | ||
157 | if (!consistent_pte[0]) { | 185 | if (!consistent_pte[0]) { |
158 | printk(KERN_ERR "%s: not initialised\n", __func__); | 186 | printk(KERN_ERR "%s: not initialised\n", __func__); |
@@ -160,25 +188,8 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, | |||
160 | return NULL; | 188 | return NULL; |
161 | } | 189 | } |
162 | 190 | ||
163 | if (dev) { | 191 | if (!mask) |
164 | mask = dev->coherent_dma_mask; | 192 | goto no_page; |
165 | |||
166 | /* | ||
167 | * Sanity check the DMA mask - it must be non-zero, and | ||
168 | * must be able to be satisfied by a DMA allocation. | ||
169 | */ | ||
170 | if (mask == 0) { | ||
171 | dev_warn(dev, "coherent DMA mask is unset\n"); | ||
172 | goto no_page; | ||
173 | } | ||
174 | |||
175 | if ((~mask) & ISA_DMA_THRESHOLD) { | ||
176 | dev_warn(dev, "coherent DMA mask %#llx is smaller " | ||
177 | "than system GFP_DMA mask %#llx\n", | ||
178 | mask, (unsigned long long)ISA_DMA_THRESHOLD); | ||
179 | goto no_page; | ||
180 | } | ||
181 | } | ||
182 | 193 | ||
183 | /* | 194 | /* |
184 | * Sanity check the allocation size. | 195 | * Sanity check the allocation size. |
@@ -267,6 +278,31 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, | |||
267 | *handle = ~0; | 278 | *handle = ~0; |
268 | return NULL; | 279 | return NULL; |
269 | } | 280 | } |
281 | #else /* !CONFIG_MMU */ | ||
282 | static void * | ||
283 | __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, | ||
284 | pgprot_t prot) | ||
285 | { | ||
286 | void *virt; | ||
287 | u64 mask = get_coherent_dma_mask(dev); | ||
288 | |||
289 | if (!mask) | ||
290 | goto error; | ||
291 | |||
292 | if (mask != 0xffffffff) | ||
293 | gfp |= GFP_DMA; | ||
294 | virt = kmalloc(size, gfp); | ||
295 | if (!virt) | ||
296 | goto error; | ||
297 | |||
298 | *handle = virt_to_dma(dev, virt); | ||
299 | return virt; | ||
300 | |||
301 | error: | ||
302 | *handle = ~0; | ||
303 | return NULL; | ||
304 | } | ||
305 | #endif /* CONFIG_MMU */ | ||
270 | 306 | ||
271 | /* | 307 | /* |
272 | * Allocate DMA-coherent memory space and return both the kernel remapped | 308 | * Allocate DMA-coherent memory space and return both the kernel remapped |
@@ -311,9 +347,10 @@ EXPORT_SYMBOL(dma_alloc_writecombine); | |||
311 | static int dma_mmap(struct device *dev, struct vm_area_struct *vma, | 347 | static int dma_mmap(struct device *dev, struct vm_area_struct *vma, |
312 | void *cpu_addr, dma_addr_t dma_addr, size_t size) | 348 | void *cpu_addr, dma_addr_t dma_addr, size_t size) |
313 | { | 349 | { |
350 | int ret = -ENXIO; | ||
351 | #ifdef CONFIG_MMU | ||
314 | unsigned long flags, user_size, kern_size; | 352 | unsigned long flags, user_size, kern_size; |
315 | struct arm_vm_region *c; | 353 | struct arm_vm_region *c; |
316 | int ret = -ENXIO; | ||
317 | 354 | ||
318 | user_size = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; | 355 | user_size = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; |
319 | 356 | ||
@@ -334,6 +371,7 @@ static int dma_mmap(struct device *dev, struct vm_area_struct *vma, | |||
334 | vma->vm_page_prot); | 371 | vma->vm_page_prot); |
335 | } | 372 | } |
336 | } | 373 | } |
374 | #endif /* CONFIG_MMU */ | ||
337 | 375 | ||
338 | return ret; | 376 | return ret; |
339 | } | 377 | } |
@@ -358,6 +396,7 @@ EXPORT_SYMBOL(dma_mmap_writecombine); | |||
358 | * free a page as defined by the above mapping. | 396 | * free a page as defined by the above mapping. |
359 | * Must not be called with IRQs disabled. | 397 | * Must not be called with IRQs disabled. |
360 | */ | 398 | */ |
399 | #ifdef CONFIG_MMU | ||
361 | void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle) | 400 | void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle) |
362 | { | 401 | { |
363 | struct arm_vm_region *c; | 402 | struct arm_vm_region *c; |
@@ -444,6 +483,14 @@ void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr | |||
444 | __func__, cpu_addr); | 483 | __func__, cpu_addr); |
445 | dump_stack(); | 484 | dump_stack(); |
446 | } | 485 | } |
486 | #else /* !CONFIG_MMU */ | ||
487 | void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle) | ||
488 | { | ||
489 | if (dma_release_from_coherent(dev, get_order(size), cpu_addr)) | ||
490 | return; | ||
491 | kfree(cpu_addr); | ||
492 | } | ||
493 | #endif /* CONFIG_MMU */ | ||
447 | EXPORT_SYMBOL(dma_free_coherent); | 494 | EXPORT_SYMBOL(dma_free_coherent); |
448 | 495 | ||
449 | /* | 496 | /* |
@@ -451,10 +498,12 @@ EXPORT_SYMBOL(dma_free_coherent); | |||
451 | */ | 498 | */ |
452 | static int __init consistent_init(void) | 499 | static int __init consistent_init(void) |
453 | { | 500 | { |
501 | int ret = 0; | ||
502 | #ifdef CONFIG_MMU | ||
454 | pgd_t *pgd; | 503 | pgd_t *pgd; |
455 | pmd_t *pmd; | 504 | pmd_t *pmd; |
456 | pte_t *pte; | 505 | pte_t *pte; |
457 | int ret = 0, i = 0; | 506 | int i = 0; |
458 | u32 base = CONSISTENT_BASE; | 507 | u32 base = CONSISTENT_BASE; |
459 | 508 | ||
460 | do { | 509 | do { |
@@ -477,6 +526,7 @@ static int __init consistent_init(void) | |||
477 | consistent_pte[i++] = pte; | 526 | consistent_pte[i++] = pte; |
478 | base += (1 << PGDIR_SHIFT); | 527 | base += (1 << PGDIR_SHIFT); |
479 | } while (base < CONSISTENT_END); | 528 | } while (base < CONSISTENT_END); |
529 | #endif /* !CONFIG_MMU */ | ||
480 | 530 | ||
481 | return ret; | 531 | return ret; |
482 | } | 532 | } |