aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm')
-rw-r--r--arch/arm/mm/Makefile2
-rw-r--r--arch/arm/mm/dma-mapping.c132
-rw-r--r--arch/arm/mm/vmregion.c131
-rw-r--r--arch/arm/mm/vmregion.h29
4 files changed, 174 insertions, 120 deletions
diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile
index 055cb2aa8134..42352e75742b 100644
--- a/arch/arm/mm/Makefile
+++ b/arch/arm/mm/Makefile
@@ -6,7 +6,7 @@ obj-y := dma-mapping.o extable.o fault.o init.o \
6 iomap.o 6 iomap.o
7 7
8obj-$(CONFIG_MMU) += fault-armv.o flush.o ioremap.o mmap.o \ 8obj-$(CONFIG_MMU) += fault-armv.o flush.o ioremap.o mmap.o \
9 pgd.o mmu.o 9 pgd.o mmu.o vmregion.o
10 10
11ifneq ($(CONFIG_MMU),y) 11ifneq ($(CONFIG_MMU),y)
12obj-y += nommu.o 12obj-y += nommu.o
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index b9590a7085ca..c54f1acf92c8 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -68,106 +68,16 @@ static u64 get_coherent_dma_mask(struct device *dev)
68 * These are the page tables (2MB each) covering uncached, DMA consistent allocations 68 * These are the page tables (2MB each) covering uncached, DMA consistent allocations
69 */ 69 */
70static pte_t *consistent_pte[NUM_CONSISTENT_PTES]; 70static pte_t *consistent_pte[NUM_CONSISTENT_PTES];
71static DEFINE_SPINLOCK(consistent_lock);
72 71
73/* 72#include "vmregion.h"
74 * VM region handling support.
75 *
76 * This should become something generic, handling VM region allocations for
77 * vmalloc and similar (ioremap, module space, etc).
78 *
79 * I envisage vmalloc()'s supporting vm_struct becoming:
80 *
81 * struct vm_struct {
82 * struct vm_region region;
83 * unsigned long flags;
84 * struct page **pages;
85 * unsigned int nr_pages;
86 * unsigned long phys_addr;
87 * };
88 *
89 * get_vm_area() would then call vm_region_alloc with an appropriate
90 * struct vm_region head (eg):
91 *
92 * struct vm_region vmalloc_head = {
93 * .vm_list = LIST_HEAD_INIT(vmalloc_head.vm_list),
94 * .vm_start = VMALLOC_START,
95 * .vm_end = VMALLOC_END,
96 * };
97 *
98 * However, vmalloc_head.vm_start is variable (typically, it is dependent on
99 * the amount of RAM found at boot time.) I would imagine that get_vm_area()
100 * would have to initialise this each time prior to calling vm_region_alloc().
101 */
102struct arm_vm_region {
103 struct list_head vm_list;
104 unsigned long vm_start;
105 unsigned long vm_end;
106 struct page *vm_pages;
107 int vm_active;
108};
109 73
110static struct arm_vm_region consistent_head = { 74static struct arm_vmregion_head consistent_head = {
75 .vm_lock = __SPIN_LOCK_UNLOCKED(&consistent_head.vm_lock),
111 .vm_list = LIST_HEAD_INIT(consistent_head.vm_list), 76 .vm_list = LIST_HEAD_INIT(consistent_head.vm_list),
112 .vm_start = CONSISTENT_BASE, 77 .vm_start = CONSISTENT_BASE,
113 .vm_end = CONSISTENT_END, 78 .vm_end = CONSISTENT_END,
114}; 79};
115 80
116static struct arm_vm_region *
117arm_vm_region_alloc(struct arm_vm_region *head, size_t size, gfp_t gfp)
118{
119 unsigned long addr = head->vm_start, end = head->vm_end - size;
120 unsigned long flags;
121 struct arm_vm_region *c, *new;
122
123 new = kmalloc(sizeof(struct arm_vm_region), gfp);
124 if (!new)
125 goto out;
126
127 spin_lock_irqsave(&consistent_lock, flags);
128
129 list_for_each_entry(c, &head->vm_list, vm_list) {
130 if ((addr + size) < addr)
131 goto nospc;
132 if ((addr + size) <= c->vm_start)
133 goto found;
134 addr = c->vm_end;
135 if (addr > end)
136 goto nospc;
137 }
138
139 found:
140 /*
141 * Insert this entry _before_ the one we found.
142 */
143 list_add_tail(&new->vm_list, &c->vm_list);
144 new->vm_start = addr;
145 new->vm_end = addr + size;
146 new->vm_active = 1;
147
148 spin_unlock_irqrestore(&consistent_lock, flags);
149 return new;
150
151 nospc:
152 spin_unlock_irqrestore(&consistent_lock, flags);
153 kfree(new);
154 out:
155 return NULL;
156}
157
158static struct arm_vm_region *arm_vm_region_find(struct arm_vm_region *head, unsigned long addr)
159{
160 struct arm_vm_region *c;
161
162 list_for_each_entry(c, &head->vm_list, vm_list) {
163 if (c->vm_active && c->vm_start == addr)
164 goto out;
165 }
166 c = NULL;
167 out:
168 return c;
169}
170
171#ifdef CONFIG_HUGETLB_PAGE 81#ifdef CONFIG_HUGETLB_PAGE
172#error ARM Coherent DMA allocator does not (yet) support huge TLB 82#error ARM Coherent DMA allocator does not (yet) support huge TLB
173#endif 83#endif
@@ -177,7 +87,7 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
177 pgprot_t prot) 87 pgprot_t prot)
178{ 88{
179 struct page *page; 89 struct page *page;
180 struct arm_vm_region *c; 90 struct arm_vmregion *c;
181 unsigned long order; 91 unsigned long order;
182 u64 mask = get_coherent_dma_mask(dev); 92 u64 mask = get_coherent_dma_mask(dev);
183 u64 limit; 93 u64 limit;
@@ -191,13 +101,9 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
191 if (!mask) 101 if (!mask)
192 goto no_page; 102 goto no_page;
193 103
194 /*
195 * Sanity check the allocation size.
196 */
197 size = PAGE_ALIGN(size); 104 size = PAGE_ALIGN(size);
198 limit = (mask + 1) & ~mask; 105 limit = (mask + 1) & ~mask;
199 if ((limit && size >= limit) || 106 if (limit && size >= limit) {
200 size >= (CONSISTENT_END - CONSISTENT_BASE)) {
201 printk(KERN_WARNING "coherent allocation too big " 107 printk(KERN_WARNING "coherent allocation too big "
202 "(requested %#x mask %#llx)\n", size, mask); 108 "(requested %#x mask %#llx)\n", size, mask);
203 goto no_page; 109 goto no_page;
@@ -226,7 +132,7 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
226 /* 132 /*
227 * Allocate a virtual address in the consistent mapping region. 133 * Allocate a virtual address in the consistent mapping region.
228 */ 134 */
229 c = arm_vm_region_alloc(&consistent_head, size, 135 c = arm_vmregion_alloc(&consistent_head, size,
230 gfp & ~(__GFP_DMA | __GFP_HIGHMEM)); 136 gfp & ~(__GFP_DMA | __GFP_HIGHMEM));
231 if (c) { 137 if (c) {
232 pte_t *pte; 138 pte_t *pte;
@@ -349,15 +255,12 @@ static int dma_mmap(struct device *dev, struct vm_area_struct *vma,
349{ 255{
350 int ret = -ENXIO; 256 int ret = -ENXIO;
351#ifdef CONFIG_MMU 257#ifdef CONFIG_MMU
352 unsigned long flags, user_size, kern_size; 258 unsigned long user_size, kern_size;
353 struct arm_vm_region *c; 259 struct arm_vmregion *c;
354 260
355 user_size = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; 261 user_size = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
356 262
357 spin_lock_irqsave(&consistent_lock, flags); 263 c = arm_vmregion_find(&consistent_head, (unsigned long)cpu_addr);
358 c = arm_vm_region_find(&consistent_head, (unsigned long)cpu_addr);
359 spin_unlock_irqrestore(&consistent_lock, flags);
360
361 if (c) { 264 if (c) {
362 unsigned long off = vma->vm_pgoff; 265 unsigned long off = vma->vm_pgoff;
363 266
@@ -399,8 +302,8 @@ EXPORT_SYMBOL(dma_mmap_writecombine);
399#ifdef CONFIG_MMU 302#ifdef CONFIG_MMU
400void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle) 303void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle)
401{ 304{
402 struct arm_vm_region *c; 305 struct arm_vmregion *c;
403 unsigned long flags, addr; 306 unsigned long addr;
404 pte_t *ptep; 307 pte_t *ptep;
405 int idx; 308 int idx;
406 u32 off; 309 u32 off;
@@ -417,14 +320,10 @@ void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr
417 320
418 size = PAGE_ALIGN(size); 321 size = PAGE_ALIGN(size);
419 322
420 spin_lock_irqsave(&consistent_lock, flags); 323 c = arm_vmregion_find_remove(&consistent_head, (unsigned long)cpu_addr);
421 c = arm_vm_region_find(&consistent_head, (unsigned long)cpu_addr);
422 if (!c) 324 if (!c)
423 goto no_area; 325 goto no_area;
424 326
425 c->vm_active = 0;
426 spin_unlock_irqrestore(&consistent_lock, flags);
427
428 if ((c->vm_end - c->vm_start) != size) { 327 if ((c->vm_end - c->vm_start) != size) {
429 printk(KERN_ERR "%s: freeing wrong coherent size (%ld != %d)\n", 328 printk(KERN_ERR "%s: freeing wrong coherent size (%ld != %d)\n",
430 __func__, c->vm_end - c->vm_start, size); 329 __func__, c->vm_end - c->vm_start, size);
@@ -470,15 +369,10 @@ void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr
470 369
471 flush_tlb_kernel_range(c->vm_start, c->vm_end); 370 flush_tlb_kernel_range(c->vm_start, c->vm_end);
472 371
473 spin_lock_irqsave(&consistent_lock, flags); 372 arm_vmregion_free(&consistent_head, c);
474 list_del(&c->vm_list);
475 spin_unlock_irqrestore(&consistent_lock, flags);
476
477 kfree(c);
478 return; 373 return;
479 374
480 no_area: 375 no_area:
481 spin_unlock_irqrestore(&consistent_lock, flags);
482 printk(KERN_ERR "%s: trying to free invalid coherent area: %p\n", 376 printk(KERN_ERR "%s: trying to free invalid coherent area: %p\n",
483 __func__, cpu_addr); 377 __func__, cpu_addr);
484 dump_stack(); 378 dump_stack();
diff --git a/arch/arm/mm/vmregion.c b/arch/arm/mm/vmregion.c
new file mode 100644
index 000000000000..19e09bdb1b8a
--- /dev/null
+++ b/arch/arm/mm/vmregion.c
@@ -0,0 +1,131 @@
1#include <linux/spinlock.h>
2#include <linux/list.h>
3#include <linux/slab.h>
4
5#include "vmregion.h"
6
7/*
8 * VM region handling support.
9 *
10 * This should become something generic, handling VM region allocations for
11 * vmalloc and similar (ioremap, module space, etc).
12 *
13 * I envisage vmalloc()'s supporting vm_struct becoming:
14 *
15 * struct vm_struct {
16 * struct vmregion region;
17 * unsigned long flags;
18 * struct page **pages;
19 * unsigned int nr_pages;
20 * unsigned long phys_addr;
21 * };
22 *
23 * get_vm_area() would then call vmregion_alloc with an appropriate
24 * struct vmregion head (eg):
25 *
26 * struct vmregion vmalloc_head = {
27 * .vm_list = LIST_HEAD_INIT(vmalloc_head.vm_list),
28 * .vm_start = VMALLOC_START,
29 * .vm_end = VMALLOC_END,
30 * };
31 *
32 * However, vmalloc_head.vm_start is variable (typically, it is dependent on
33 * the amount of RAM found at boot time.) I would imagine that get_vm_area()
34 * would have to initialise this each time prior to calling vmregion_alloc().
35 */
36
37struct arm_vmregion *
38arm_vmregion_alloc(struct arm_vmregion_head *head, size_t size, gfp_t gfp)
39{
40 unsigned long addr = head->vm_start, end = head->vm_end - size;
41 unsigned long flags;
42 struct arm_vmregion *c, *new;
43
44 if (head->vm_end - head->vm_start < size) {
45 printk(KERN_WARNING "%s: allocation too big (requested %#x)\n",
46 __func__, size);
47 goto out;
48 }
49
50 new = kmalloc(sizeof(struct arm_vmregion), gfp);
51 if (!new)
52 goto out;
53
54 spin_lock_irqsave(&head->vm_lock, flags);
55
56 list_for_each_entry(c, &head->vm_list, vm_list) {
57 if ((addr + size) < addr)
58 goto nospc;
59 if ((addr + size) <= c->vm_start)
60 goto found;
61 addr = c->vm_end;
62 if (addr > end)
63 goto nospc;
64 }
65
66 found:
67 /*
68 * Insert this entry _before_ the one we found.
69 */
70 list_add_tail(&new->vm_list, &c->vm_list);
71 new->vm_start = addr;
72 new->vm_end = addr + size;
73 new->vm_active = 1;
74
75 spin_unlock_irqrestore(&head->vm_lock, flags);
76 return new;
77
78 nospc:
79 spin_unlock_irqrestore(&head->vm_lock, flags);
80 kfree(new);
81 out:
82 return NULL;
83}
84
85static struct arm_vmregion *__arm_vmregion_find(struct arm_vmregion_head *head, unsigned long addr)
86{
87 struct arm_vmregion *c;
88
89 list_for_each_entry(c, &head->vm_list, vm_list) {
90 if (c->vm_active && c->vm_start == addr)
91 goto out;
92 }
93 c = NULL;
94 out:
95 return c;
96}
97
98struct arm_vmregion *arm_vmregion_find(struct arm_vmregion_head *head, unsigned long addr)
99{
100 struct arm_vmregion *c;
101 unsigned long flags;
102
103 spin_lock_irqsave(&head->vm_lock, flags);
104 c = __arm_vmregion_find(head, addr);
105 spin_unlock_irqrestore(&head->vm_lock, flags);
106 return c;
107}
108
109struct arm_vmregion *arm_vmregion_find_remove(struct arm_vmregion_head *head, unsigned long addr)
110{
111 struct arm_vmregion *c;
112 unsigned long flags;
113
114 spin_lock_irqsave(&head->vm_lock, flags);
115 c = __arm_vmregion_find(head, addr);
116 if (c)
117 c->vm_active = 0;
118 spin_unlock_irqrestore(&head->vm_lock, flags);
119 return c;
120}
121
122void arm_vmregion_free(struct arm_vmregion_head *head, struct arm_vmregion *c)
123{
124 unsigned long flags;
125
126 spin_lock_irqsave(&head->vm_lock, flags);
127 list_del(&c->vm_list);
128 spin_unlock_irqrestore(&head->vm_lock, flags);
129
130 kfree(c);
131}
diff --git a/arch/arm/mm/vmregion.h b/arch/arm/mm/vmregion.h
new file mode 100644
index 000000000000..6b2cdbdf3a85
--- /dev/null
+++ b/arch/arm/mm/vmregion.h
@@ -0,0 +1,29 @@
1#ifndef VMREGION_H
2#define VMREGION_H
3
4#include <linux/spinlock.h>
5#include <linux/list.h>
6
7struct page;
8
9struct arm_vmregion_head {
10 spinlock_t vm_lock;
11 struct list_head vm_list;
12 unsigned long vm_start;
13 unsigned long vm_end;
14};
15
16struct arm_vmregion {
17 struct list_head vm_list;
18 unsigned long vm_start;
19 unsigned long vm_end;
20 struct page *vm_pages;
21 int vm_active;
22};
23
24struct arm_vmregion *arm_vmregion_alloc(struct arm_vmregion_head *, size_t, gfp_t);
25struct arm_vmregion *arm_vmregion_find(struct arm_vmregion_head *, unsigned long);
26struct arm_vmregion *arm_vmregion_find_remove(struct arm_vmregion_head *, unsigned long);
27void arm_vmregion_free(struct arm_vmregion_head *, struct arm_vmregion *);
28
29#endif