aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm/dma-mapping.c
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@arm.linux.org.uk>2009-11-19 10:07:04 -0500
committerRussell King <rmk+kernel@arm.linux.org.uk>2009-11-24 12:41:34 -0500
commit13ccf3ad99a45052664f2c1a6c64899f9d778152 (patch)
tree6e8f43fcb8e7f1c266b8c13a670c206bbf34bc96 /arch/arm/mm/dma-mapping.c
parentac50e950784cae1c26ad9e09ebd8f8c706131eb3 (diff)
ARM: dma-mapping: split out vmregion code from dma coherent mapping code
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk> Acked-by: Greg Ungerer <gerg@uclinux.org>
Diffstat (limited to 'arch/arm/mm/dma-mapping.c')
-rw-r--r--arch/arm/mm/dma-mapping.c132
1 files changed, 13 insertions, 119 deletions
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index b9590a7085ca..c54f1acf92c8 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -68,106 +68,16 @@ static u64 get_coherent_dma_mask(struct device *dev)
68 * These are the page tables (2MB each) covering uncached, DMA consistent allocations 68 * These are the page tables (2MB each) covering uncached, DMA consistent allocations
69 */ 69 */
70static pte_t *consistent_pte[NUM_CONSISTENT_PTES]; 70static pte_t *consistent_pte[NUM_CONSISTENT_PTES];
71static DEFINE_SPINLOCK(consistent_lock);
72 71
73/* 72#include "vmregion.h"
74 * VM region handling support.
75 *
76 * This should become something generic, handling VM region allocations for
77 * vmalloc and similar (ioremap, module space, etc).
78 *
79 * I envisage vmalloc()'s supporting vm_struct becoming:
80 *
81 * struct vm_struct {
82 * struct vm_region region;
83 * unsigned long flags;
84 * struct page **pages;
85 * unsigned int nr_pages;
86 * unsigned long phys_addr;
87 * };
88 *
89 * get_vm_area() would then call vm_region_alloc with an appropriate
90 * struct vm_region head (eg):
91 *
92 * struct vm_region vmalloc_head = {
93 * .vm_list = LIST_HEAD_INIT(vmalloc_head.vm_list),
94 * .vm_start = VMALLOC_START,
95 * .vm_end = VMALLOC_END,
96 * };
97 *
98 * However, vmalloc_head.vm_start is variable (typically, it is dependent on
99 * the amount of RAM found at boot time.) I would imagine that get_vm_area()
100 * would have to initialise this each time prior to calling vm_region_alloc().
101 */
102struct arm_vm_region {
103 struct list_head vm_list;
104 unsigned long vm_start;
105 unsigned long vm_end;
106 struct page *vm_pages;
107 int vm_active;
108};
109 73
110static struct arm_vm_region consistent_head = { 74static struct arm_vmregion_head consistent_head = {
75 .vm_lock = __SPIN_LOCK_UNLOCKED(&consistent_head.vm_lock),
111 .vm_list = LIST_HEAD_INIT(consistent_head.vm_list), 76 .vm_list = LIST_HEAD_INIT(consistent_head.vm_list),
112 .vm_start = CONSISTENT_BASE, 77 .vm_start = CONSISTENT_BASE,
113 .vm_end = CONSISTENT_END, 78 .vm_end = CONSISTENT_END,
114}; 79};
115 80
116static struct arm_vm_region *
117arm_vm_region_alloc(struct arm_vm_region *head, size_t size, gfp_t gfp)
118{
119 unsigned long addr = head->vm_start, end = head->vm_end - size;
120 unsigned long flags;
121 struct arm_vm_region *c, *new;
122
123 new = kmalloc(sizeof(struct arm_vm_region), gfp);
124 if (!new)
125 goto out;
126
127 spin_lock_irqsave(&consistent_lock, flags);
128
129 list_for_each_entry(c, &head->vm_list, vm_list) {
130 if ((addr + size) < addr)
131 goto nospc;
132 if ((addr + size) <= c->vm_start)
133 goto found;
134 addr = c->vm_end;
135 if (addr > end)
136 goto nospc;
137 }
138
139 found:
140 /*
141 * Insert this entry _before_ the one we found.
142 */
143 list_add_tail(&new->vm_list, &c->vm_list);
144 new->vm_start = addr;
145 new->vm_end = addr + size;
146 new->vm_active = 1;
147
148 spin_unlock_irqrestore(&consistent_lock, flags);
149 return new;
150
151 nospc:
152 spin_unlock_irqrestore(&consistent_lock, flags);
153 kfree(new);
154 out:
155 return NULL;
156}
157
158static struct arm_vm_region *arm_vm_region_find(struct arm_vm_region *head, unsigned long addr)
159{
160 struct arm_vm_region *c;
161
162 list_for_each_entry(c, &head->vm_list, vm_list) {
163 if (c->vm_active && c->vm_start == addr)
164 goto out;
165 }
166 c = NULL;
167 out:
168 return c;
169}
170
171#ifdef CONFIG_HUGETLB_PAGE 81#ifdef CONFIG_HUGETLB_PAGE
172#error ARM Coherent DMA allocator does not (yet) support huge TLB 82#error ARM Coherent DMA allocator does not (yet) support huge TLB
173#endif 83#endif
@@ -177,7 +87,7 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
177 pgprot_t prot) 87 pgprot_t prot)
178{ 88{
179 struct page *page; 89 struct page *page;
180 struct arm_vm_region *c; 90 struct arm_vmregion *c;
181 unsigned long order; 91 unsigned long order;
182 u64 mask = get_coherent_dma_mask(dev); 92 u64 mask = get_coherent_dma_mask(dev);
183 u64 limit; 93 u64 limit;
@@ -191,13 +101,9 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
191 if (!mask) 101 if (!mask)
192 goto no_page; 102 goto no_page;
193 103
194 /*
195 * Sanity check the allocation size.
196 */
197 size = PAGE_ALIGN(size); 104 size = PAGE_ALIGN(size);
198 limit = (mask + 1) & ~mask; 105 limit = (mask + 1) & ~mask;
199 if ((limit && size >= limit) || 106 if (limit && size >= limit) {
200 size >= (CONSISTENT_END - CONSISTENT_BASE)) {
201 printk(KERN_WARNING "coherent allocation too big " 107 printk(KERN_WARNING "coherent allocation too big "
202 "(requested %#x mask %#llx)\n", size, mask); 108 "(requested %#x mask %#llx)\n", size, mask);
203 goto no_page; 109 goto no_page;
@@ -226,7 +132,7 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
226 /* 132 /*
227 * Allocate a virtual address in the consistent mapping region. 133 * Allocate a virtual address in the consistent mapping region.
228 */ 134 */
229 c = arm_vm_region_alloc(&consistent_head, size, 135 c = arm_vmregion_alloc(&consistent_head, size,
230 gfp & ~(__GFP_DMA | __GFP_HIGHMEM)); 136 gfp & ~(__GFP_DMA | __GFP_HIGHMEM));
231 if (c) { 137 if (c) {
232 pte_t *pte; 138 pte_t *pte;
@@ -349,15 +255,12 @@ static int dma_mmap(struct device *dev, struct vm_area_struct *vma,
349{ 255{
350 int ret = -ENXIO; 256 int ret = -ENXIO;
351#ifdef CONFIG_MMU 257#ifdef CONFIG_MMU
352 unsigned long flags, user_size, kern_size; 258 unsigned long user_size, kern_size;
353 struct arm_vm_region *c; 259 struct arm_vmregion *c;
354 260
355 user_size = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; 261 user_size = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
356 262
357 spin_lock_irqsave(&consistent_lock, flags); 263 c = arm_vmregion_find(&consistent_head, (unsigned long)cpu_addr);
358 c = arm_vm_region_find(&consistent_head, (unsigned long)cpu_addr);
359 spin_unlock_irqrestore(&consistent_lock, flags);
360
361 if (c) { 264 if (c) {
362 unsigned long off = vma->vm_pgoff; 265 unsigned long off = vma->vm_pgoff;
363 266
@@ -399,8 +302,8 @@ EXPORT_SYMBOL(dma_mmap_writecombine);
399#ifdef CONFIG_MMU 302#ifdef CONFIG_MMU
400void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle) 303void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle)
401{ 304{
402 struct arm_vm_region *c; 305 struct arm_vmregion *c;
403 unsigned long flags, addr; 306 unsigned long addr;
404 pte_t *ptep; 307 pte_t *ptep;
405 int idx; 308 int idx;
406 u32 off; 309 u32 off;
@@ -417,14 +320,10 @@ void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr
417 320
418 size = PAGE_ALIGN(size); 321 size = PAGE_ALIGN(size);
419 322
420 spin_lock_irqsave(&consistent_lock, flags); 323 c = arm_vmregion_find_remove(&consistent_head, (unsigned long)cpu_addr);
421 c = arm_vm_region_find(&consistent_head, (unsigned long)cpu_addr);
422 if (!c) 324 if (!c)
423 goto no_area; 325 goto no_area;
424 326
425 c->vm_active = 0;
426 spin_unlock_irqrestore(&consistent_lock, flags);
427
428 if ((c->vm_end - c->vm_start) != size) { 327 if ((c->vm_end - c->vm_start) != size) {
429 printk(KERN_ERR "%s: freeing wrong coherent size (%ld != %d)\n", 328 printk(KERN_ERR "%s: freeing wrong coherent size (%ld != %d)\n",
430 __func__, c->vm_end - c->vm_start, size); 329 __func__, c->vm_end - c->vm_start, size);
@@ -470,15 +369,10 @@ void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr
470 369
471 flush_tlb_kernel_range(c->vm_start, c->vm_end); 370 flush_tlb_kernel_range(c->vm_start, c->vm_end);
472 371
473 spin_lock_irqsave(&consistent_lock, flags); 372 arm_vmregion_free(&consistent_head, c);
474 list_del(&c->vm_list);
475 spin_unlock_irqrestore(&consistent_lock, flags);
476
477 kfree(c);
478 return; 373 return;
479 374
480 no_area: 375 no_area:
481 spin_unlock_irqrestore(&consistent_lock, flags);
482 printk(KERN_ERR "%s: trying to free invalid coherent area: %p\n", 376 printk(KERN_ERR "%s: trying to free invalid coherent area: %p\n",
483 __func__, cpu_addr); 377 __func__, cpu_addr);
484 dump_stack(); 378 dump_stack();