diff options
author | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2009-05-26 23:33:14 -0400 |
---|---|---|
committer | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2009-05-26 23:33:14 -0400 |
commit | 84532a0fc3d5811dca8e3726fe4d372ea87bd7c6 (patch) | |
tree | f76d521d22d662dd1dccf22fd42ee57c71446e4f /arch/powerpc/lib | |
parent | cd86a536c81e9300d984327517548ca0652eebf9 (diff) |
Revert "powerpc: Rework dma-noncoherent to use generic vmalloc layer"
This reverts commit 33f00dcedb0e22cdb156a23632814fc580fcfcf8.
While it was a good idea to try to use the mm/vmalloc.c allocator instead
of our own (in fact, ours is itself a dup on an old variant of the vmalloc
one), unfortunately, the approach is terminally busted since
dma_alloc_coherent() can be called at interrupt time or in atomic contexts
and there's little chances we'll make the code in mm/vmalloc.c cope with\ that :-(
Until we can get the generic code to forbid that idiocy and fix all
drivers abusing it, we pretty much have no choice but revert to
our custom virtual space allocator.
There's also a problem with SMP safety since freeing such mapping
would require an IPI which cannot be done at interrupt time.
However, right now, I don't think we support any platform that is
both SMP and has non-coherent DMA (don't laugh, I know such things
do exist !) so we can sort that out later.
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc/lib')
-rw-r--r-- | arch/powerpc/lib/dma-noncoherent.c | 303 |
1 files changed, 246 insertions, 57 deletions
diff --git a/arch/powerpc/lib/dma-noncoherent.c b/arch/powerpc/lib/dma-noncoherent.c index 005a28d380af..b7dc4c19f582 100644 --- a/arch/powerpc/lib/dma-noncoherent.c +++ b/arch/powerpc/lib/dma-noncoherent.c | |||
@@ -29,11 +29,121 @@ | |||
29 | #include <linux/types.h> | 29 | #include <linux/types.h> |
30 | #include <linux/highmem.h> | 30 | #include <linux/highmem.h> |
31 | #include <linux/dma-mapping.h> | 31 | #include <linux/dma-mapping.h> |
32 | #include <linux/vmalloc.h> | ||
33 | 32 | ||
34 | #include <asm/tlbflush.h> | 33 | #include <asm/tlbflush.h> |
35 | 34 | ||
36 | /* | 35 | /* |
36 | * This address range defaults to a value that is safe for all | ||
37 | * platforms which currently set CONFIG_NOT_COHERENT_CACHE. It | ||
38 | * can be further configured for specific applications under | ||
39 | * the "Advanced Setup" menu. -Matt | ||
40 | */ | ||
41 | #define CONSISTENT_BASE (CONFIG_CONSISTENT_START) | ||
42 | #define CONSISTENT_END (CONFIG_CONSISTENT_START + CONFIG_CONSISTENT_SIZE) | ||
43 | #define CONSISTENT_OFFSET(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PAGE_SHIFT) | ||
44 | |||
45 | /* | ||
46 | * This is the page table (2MB) covering uncached, DMA consistent allocations | ||
47 | */ | ||
48 | static pte_t *consistent_pte; | ||
49 | static DEFINE_SPINLOCK(consistent_lock); | ||
50 | |||
51 | /* | ||
52 | * VM region handling support. | ||
53 | * | ||
54 | * This should become something generic, handling VM region allocations for | ||
55 | * vmalloc and similar (ioremap, module space, etc). | ||
56 | * | ||
57 | * I envisage vmalloc()'s supporting vm_struct becoming: | ||
58 | * | ||
59 | * struct vm_struct { | ||
60 | * struct vm_region region; | ||
61 | * unsigned long flags; | ||
62 | * struct page **pages; | ||
63 | * unsigned int nr_pages; | ||
64 | * unsigned long phys_addr; | ||
65 | * }; | ||
66 | * | ||
67 | * get_vm_area() would then call vm_region_alloc with an appropriate | ||
68 | * struct vm_region head (eg): | ||
69 | * | ||
70 | * struct vm_region vmalloc_head = { | ||
71 | * .vm_list = LIST_HEAD_INIT(vmalloc_head.vm_list), | ||
72 | * .vm_start = VMALLOC_START, | ||
73 | * .vm_end = VMALLOC_END, | ||
74 | * }; | ||
75 | * | ||
76 | * However, vmalloc_head.vm_start is variable (typically, it is dependent on | ||
77 | * the amount of RAM found at boot time.) I would imagine that get_vm_area() | ||
78 | * would have to initialise this each time prior to calling vm_region_alloc(). | ||
79 | */ | ||
80 | struct ppc_vm_region { | ||
81 | struct list_head vm_list; | ||
82 | unsigned long vm_start; | ||
83 | unsigned long vm_end; | ||
84 | }; | ||
85 | |||
86 | static struct ppc_vm_region consistent_head = { | ||
87 | .vm_list = LIST_HEAD_INIT(consistent_head.vm_list), | ||
88 | .vm_start = CONSISTENT_BASE, | ||
89 | .vm_end = CONSISTENT_END, | ||
90 | }; | ||
91 | |||
92 | static struct ppc_vm_region * | ||
93 | ppc_vm_region_alloc(struct ppc_vm_region *head, size_t size, gfp_t gfp) | ||
94 | { | ||
95 | unsigned long addr = head->vm_start, end = head->vm_end - size; | ||
96 | unsigned long flags; | ||
97 | struct ppc_vm_region *c, *new; | ||
98 | |||
99 | new = kmalloc(sizeof(struct ppc_vm_region), gfp); | ||
100 | if (!new) | ||
101 | goto out; | ||
102 | |||
103 | spin_lock_irqsave(&consistent_lock, flags); | ||
104 | |||
105 | list_for_each_entry(c, &head->vm_list, vm_list) { | ||
106 | if ((addr + size) < addr) | ||
107 | goto nospc; | ||
108 | if ((addr + size) <= c->vm_start) | ||
109 | goto found; | ||
110 | addr = c->vm_end; | ||
111 | if (addr > end) | ||
112 | goto nospc; | ||
113 | } | ||
114 | |||
115 | found: | ||
116 | /* | ||
117 | * Insert this entry _before_ the one we found. | ||
118 | */ | ||
119 | list_add_tail(&new->vm_list, &c->vm_list); | ||
120 | new->vm_start = addr; | ||
121 | new->vm_end = addr + size; | ||
122 | |||
123 | spin_unlock_irqrestore(&consistent_lock, flags); | ||
124 | return new; | ||
125 | |||
126 | nospc: | ||
127 | spin_unlock_irqrestore(&consistent_lock, flags); | ||
128 | kfree(new); | ||
129 | out: | ||
130 | return NULL; | ||
131 | } | ||
132 | |||
133 | static struct ppc_vm_region *ppc_vm_region_find(struct ppc_vm_region *head, unsigned long addr) | ||
134 | { | ||
135 | struct ppc_vm_region *c; | ||
136 | |||
137 | list_for_each_entry(c, &head->vm_list, vm_list) { | ||
138 | if (c->vm_start == addr) | ||
139 | goto out; | ||
140 | } | ||
141 | c = NULL; | ||
142 | out: | ||
143 | return c; | ||
144 | } | ||
145 | |||
146 | /* | ||
37 | * Allocate DMA-coherent memory space and return both the kernel remapped | 147 | * Allocate DMA-coherent memory space and return both the kernel remapped |
38 | * virtual and bus address for that space. | 148 | * virtual and bus address for that space. |
39 | */ | 149 | */ |
@@ -41,21 +151,21 @@ void * | |||
41 | __dma_alloc_coherent(size_t size, dma_addr_t *handle, gfp_t gfp) | 151 | __dma_alloc_coherent(size_t size, dma_addr_t *handle, gfp_t gfp) |
42 | { | 152 | { |
43 | struct page *page; | 153 | struct page *page; |
154 | struct ppc_vm_region *c; | ||
44 | unsigned long order; | 155 | unsigned long order; |
45 | int i; | ||
46 | unsigned int nr_pages = PAGE_ALIGN(size)>>PAGE_SHIFT; | ||
47 | unsigned int array_size = nr_pages * sizeof(struct page *); | ||
48 | struct page **pages; | ||
49 | struct page *end; | ||
50 | u64 mask = 0x00ffffff, limit; /* ISA default */ | 156 | u64 mask = 0x00ffffff, limit; /* ISA default */ |
51 | struct vm_struct *area; | ||
52 | 157 | ||
53 | BUG_ON(!mem_init_done); | 158 | if (!consistent_pte) { |
159 | printk(KERN_ERR "%s: not initialised\n", __func__); | ||
160 | dump_stack(); | ||
161 | return NULL; | ||
162 | } | ||
163 | |||
54 | size = PAGE_ALIGN(size); | 164 | size = PAGE_ALIGN(size); |
55 | limit = (mask + 1) & ~mask; | 165 | limit = (mask + 1) & ~mask; |
56 | if (limit && size >= limit) { | 166 | if ((limit && size >= limit) || size >= (CONSISTENT_END - CONSISTENT_BASE)) { |
57 | printk(KERN_WARNING "coherent allocation too big (requested " | 167 | printk(KERN_WARNING "coherent allocation too big (requested %#x mask %#Lx)\n", |
58 | "%#x mask %#Lx)\n", size, mask); | 168 | size, mask); |
59 | return NULL; | 169 | return NULL; |
60 | } | 170 | } |
61 | 171 | ||
@@ -68,8 +178,6 @@ __dma_alloc_coherent(size_t size, dma_addr_t *handle, gfp_t gfp) | |||
68 | if (!page) | 178 | if (!page) |
69 | goto no_page; | 179 | goto no_page; |
70 | 180 | ||
71 | end = page + (1 << order); | ||
72 | |||
73 | /* | 181 | /* |
74 | * Invalidate any data that might be lurking in the | 182 | * Invalidate any data that might be lurking in the |
75 | * kernel direct-mapped region for device DMA. | 183 | * kernel direct-mapped region for device DMA. |
@@ -80,59 +188,48 @@ __dma_alloc_coherent(size_t size, dma_addr_t *handle, gfp_t gfp) | |||
80 | flush_dcache_range(kaddr, kaddr + size); | 188 | flush_dcache_range(kaddr, kaddr + size); |
81 | } | 189 | } |
82 | 190 | ||
83 | split_page(page, order); | ||
84 | |||
85 | /* | 191 | /* |
86 | * Set the "dma handle" | 192 | * Allocate a virtual address in the consistent mapping region. |
87 | */ | 193 | */ |
88 | *handle = page_to_phys(page); | 194 | c = ppc_vm_region_alloc(&consistent_head, size, |
89 | 195 | gfp & ~(__GFP_DMA | __GFP_HIGHMEM)); | |
90 | area = get_vm_area_caller(size, VM_IOREMAP, | 196 | if (c) { |
91 | __builtin_return_address(1)); | 197 | unsigned long vaddr = c->vm_start; |
92 | if (!area) | 198 | pte_t *pte = consistent_pte + CONSISTENT_OFFSET(vaddr); |
93 | goto out_free_pages; | 199 | struct page *end = page + (1 << order); |
94 | |||
95 | if (array_size > PAGE_SIZE) { | ||
96 | pages = vmalloc(array_size); | ||
97 | area->flags |= VM_VPAGES; | ||
98 | } else { | ||
99 | pages = kmalloc(array_size, GFP_KERNEL); | ||
100 | } | ||
101 | if (!pages) | ||
102 | goto out_free_area; | ||
103 | 200 | ||
104 | area->pages = pages; | 201 | split_page(page, order); |
105 | area->nr_pages = nr_pages; | ||
106 | 202 | ||
107 | for (i = 0; i < nr_pages; i++) | 203 | /* |
108 | pages[i] = page + i; | 204 | * Set the "dma handle" |
205 | */ | ||
206 | *handle = page_to_phys(page); | ||
109 | 207 | ||
110 | if (map_vm_area(area, pgprot_noncached(PAGE_KERNEL), &pages)) | 208 | do { |
111 | goto out_unmap; | 209 | BUG_ON(!pte_none(*pte)); |
112 | 210 | ||
113 | /* | 211 | SetPageReserved(page); |
114 | * Free the otherwise unused pages. | 212 | set_pte_at(&init_mm, vaddr, |
115 | */ | 213 | pte, mk_pte(page, pgprot_noncached(PAGE_KERNEL))); |
116 | page += nr_pages; | 214 | page++; |
117 | while (page < end) { | 215 | pte++; |
118 | __free_page(page); | 216 | vaddr += PAGE_SIZE; |
119 | page++; | 217 | } while (size -= PAGE_SIZE); |
218 | |||
219 | /* | ||
220 | * Free the otherwise unused pages. | ||
221 | */ | ||
222 | while (page < end) { | ||
223 | __free_page(page); | ||
224 | page++; | ||
225 | } | ||
226 | |||
227 | return (void *)c->vm_start; | ||
120 | } | 228 | } |
121 | 229 | ||
122 | return area->addr; | ||
123 | out_unmap: | ||
124 | vunmap(area->addr); | ||
125 | if (array_size > PAGE_SIZE) | ||
126 | vfree(pages); | ||
127 | else | ||
128 | kfree(pages); | ||
129 | goto out_free_pages; | ||
130 | out_free_area: | ||
131 | free_vm_area(area); | ||
132 | out_free_pages: | ||
133 | if (page) | 230 | if (page) |
134 | __free_pages(page, order); | 231 | __free_pages(page, order); |
135 | no_page: | 232 | no_page: |
136 | return NULL; | 233 | return NULL; |
137 | } | 234 | } |
138 | EXPORT_SYMBOL(__dma_alloc_coherent); | 235 | EXPORT_SYMBOL(__dma_alloc_coherent); |
@@ -142,12 +239,104 @@ EXPORT_SYMBOL(__dma_alloc_coherent); | |||
142 | */ | 239 | */ |
143 | void __dma_free_coherent(size_t size, void *vaddr) | 240 | void __dma_free_coherent(size_t size, void *vaddr) |
144 | { | 241 | { |
145 | vfree(vaddr); | 242 | struct ppc_vm_region *c; |
243 | unsigned long flags, addr; | ||
244 | pte_t *ptep; | ||
245 | |||
246 | size = PAGE_ALIGN(size); | ||
247 | |||
248 | spin_lock_irqsave(&consistent_lock, flags); | ||
249 | |||
250 | c = ppc_vm_region_find(&consistent_head, (unsigned long)vaddr); | ||
251 | if (!c) | ||
252 | goto no_area; | ||
253 | |||
254 | if ((c->vm_end - c->vm_start) != size) { | ||
255 | printk(KERN_ERR "%s: freeing wrong coherent size (%ld != %d)\n", | ||
256 | __func__, c->vm_end - c->vm_start, size); | ||
257 | dump_stack(); | ||
258 | size = c->vm_end - c->vm_start; | ||
259 | } | ||
260 | |||
261 | ptep = consistent_pte + CONSISTENT_OFFSET(c->vm_start); | ||
262 | addr = c->vm_start; | ||
263 | do { | ||
264 | pte_t pte = ptep_get_and_clear(&init_mm, addr, ptep); | ||
265 | unsigned long pfn; | ||
266 | |||
267 | ptep++; | ||
268 | addr += PAGE_SIZE; | ||
146 | 269 | ||
270 | if (!pte_none(pte) && pte_present(pte)) { | ||
271 | pfn = pte_pfn(pte); | ||
272 | |||
273 | if (pfn_valid(pfn)) { | ||
274 | struct page *page = pfn_to_page(pfn); | ||
275 | ClearPageReserved(page); | ||
276 | |||
277 | __free_page(page); | ||
278 | continue; | ||
279 | } | ||
280 | } | ||
281 | |||
282 | printk(KERN_CRIT "%s: bad page in kernel page table\n", | ||
283 | __func__); | ||
284 | } while (size -= PAGE_SIZE); | ||
285 | |||
286 | flush_tlb_kernel_range(c->vm_start, c->vm_end); | ||
287 | |||
288 | list_del(&c->vm_list); | ||
289 | |||
290 | spin_unlock_irqrestore(&consistent_lock, flags); | ||
291 | |||
292 | kfree(c); | ||
293 | return; | ||
294 | |||
295 | no_area: | ||
296 | spin_unlock_irqrestore(&consistent_lock, flags); | ||
297 | printk(KERN_ERR "%s: trying to free invalid coherent area: %p\n", | ||
298 | __func__, vaddr); | ||
299 | dump_stack(); | ||
147 | } | 300 | } |
148 | EXPORT_SYMBOL(__dma_free_coherent); | 301 | EXPORT_SYMBOL(__dma_free_coherent); |
149 | 302 | ||
150 | /* | 303 | /* |
304 | * Initialise the consistent memory allocation. | ||
305 | */ | ||
306 | static int __init dma_alloc_init(void) | ||
307 | { | ||
308 | pgd_t *pgd; | ||
309 | pud_t *pud; | ||
310 | pmd_t *pmd; | ||
311 | pte_t *pte; | ||
312 | int ret = 0; | ||
313 | |||
314 | do { | ||
315 | pgd = pgd_offset(&init_mm, CONSISTENT_BASE); | ||
316 | pud = pud_alloc(&init_mm, pgd, CONSISTENT_BASE); | ||
317 | pmd = pmd_alloc(&init_mm, pud, CONSISTENT_BASE); | ||
318 | if (!pmd) { | ||
319 | printk(KERN_ERR "%s: no pmd tables\n", __func__); | ||
320 | ret = -ENOMEM; | ||
321 | break; | ||
322 | } | ||
323 | |||
324 | pte = pte_alloc_kernel(pmd, CONSISTENT_BASE); | ||
325 | if (!pte) { | ||
326 | printk(KERN_ERR "%s: no pte tables\n", __func__); | ||
327 | ret = -ENOMEM; | ||
328 | break; | ||
329 | } | ||
330 | |||
331 | consistent_pte = pte; | ||
332 | } while (0); | ||
333 | |||
334 | return ret; | ||
335 | } | ||
336 | |||
337 | core_initcall(dma_alloc_init); | ||
338 | |||
339 | /* | ||
151 | * make an area consistent. | 340 | * make an area consistent. |
152 | */ | 341 | */ |
153 | void __dma_sync(void *vaddr, size_t size, int direction) | 342 | void __dma_sync(void *vaddr, size_t size, int direction) |