aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/lib
diff options
context:
space:
mode:
authorIlya Yanok <yanok@emcraft.com>2009-02-12 08:20:53 -0500
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2009-02-22 18:48:57 -0500
commit33f00dcedb0e22cdb156a23632814fc580fcfcf8 (patch)
treeca3258932cf3ea50783a309f66e897f75fdae557 /arch/powerpc/lib
parent812d904e3975450cbf96aea5c1fec4d0a176ab50 (diff)
powerpc: Rework dma-noncoherent to use generic vmalloc layer
This patch rewrites consistent dma allocations support to use vmalloc layer to allocate virtual memory space from vmalloc pool and get rid of CONFIG_CONSISTENT_{START,SIZE}. This greatly simplifies the code by effectively removing a custom allocator we had for virtual space. Signed-off-by: Ilya Yanok <yanok@emcraft.com> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc/lib')
-rw-r--r--arch/powerpc/lib/dma-noncoherent.c303
1 files changed, 57 insertions, 246 deletions
diff --git a/arch/powerpc/lib/dma-noncoherent.c b/arch/powerpc/lib/dma-noncoherent.c
index b7dc4c19f582..005a28d380af 100644
--- a/arch/powerpc/lib/dma-noncoherent.c
+++ b/arch/powerpc/lib/dma-noncoherent.c
@@ -29,121 +29,11 @@
29#include <linux/types.h> 29#include <linux/types.h>
30#include <linux/highmem.h> 30#include <linux/highmem.h>
31#include <linux/dma-mapping.h> 31#include <linux/dma-mapping.h>
32#include <linux/vmalloc.h>
32 33
33#include <asm/tlbflush.h> 34#include <asm/tlbflush.h>
34 35
35/* 36/*
36 * This address range defaults to a value that is safe for all
37 * platforms which currently set CONFIG_NOT_COHERENT_CACHE. It
38 * can be further configured for specific applications under
39 * the "Advanced Setup" menu. -Matt
40 */
41#define CONSISTENT_BASE (CONFIG_CONSISTENT_START)
42#define CONSISTENT_END (CONFIG_CONSISTENT_START + CONFIG_CONSISTENT_SIZE)
43#define CONSISTENT_OFFSET(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PAGE_SHIFT)
44
45/*
46 * This is the page table (2MB) covering uncached, DMA consistent allocations
47 */
48static pte_t *consistent_pte;
49static DEFINE_SPINLOCK(consistent_lock);
50
51/*
52 * VM region handling support.
53 *
54 * This should become something generic, handling VM region allocations for
55 * vmalloc and similar (ioremap, module space, etc).
56 *
57 * I envisage vmalloc()'s supporting vm_struct becoming:
58 *
59 * struct vm_struct {
60 * struct vm_region region;
61 * unsigned long flags;
62 * struct page **pages;
63 * unsigned int nr_pages;
64 * unsigned long phys_addr;
65 * };
66 *
67 * get_vm_area() would then call vm_region_alloc with an appropriate
68 * struct vm_region head (eg):
69 *
70 * struct vm_region vmalloc_head = {
71 * .vm_list = LIST_HEAD_INIT(vmalloc_head.vm_list),
72 * .vm_start = VMALLOC_START,
73 * .vm_end = VMALLOC_END,
74 * };
75 *
76 * However, vmalloc_head.vm_start is variable (typically, it is dependent on
77 * the amount of RAM found at boot time.) I would imagine that get_vm_area()
78 * would have to initialise this each time prior to calling vm_region_alloc().
79 */
80struct ppc_vm_region {
81 struct list_head vm_list;
82 unsigned long vm_start;
83 unsigned long vm_end;
84};
85
86static struct ppc_vm_region consistent_head = {
87 .vm_list = LIST_HEAD_INIT(consistent_head.vm_list),
88 .vm_start = CONSISTENT_BASE,
89 .vm_end = CONSISTENT_END,
90};
91
92static struct ppc_vm_region *
93ppc_vm_region_alloc(struct ppc_vm_region *head, size_t size, gfp_t gfp)
94{
95 unsigned long addr = head->vm_start, end = head->vm_end - size;
96 unsigned long flags;
97 struct ppc_vm_region *c, *new;
98
99 new = kmalloc(sizeof(struct ppc_vm_region), gfp);
100 if (!new)
101 goto out;
102
103 spin_lock_irqsave(&consistent_lock, flags);
104
105 list_for_each_entry(c, &head->vm_list, vm_list) {
106 if ((addr + size) < addr)
107 goto nospc;
108 if ((addr + size) <= c->vm_start)
109 goto found;
110 addr = c->vm_end;
111 if (addr > end)
112 goto nospc;
113 }
114
115 found:
116 /*
117 * Insert this entry _before_ the one we found.
118 */
119 list_add_tail(&new->vm_list, &c->vm_list);
120 new->vm_start = addr;
121 new->vm_end = addr + size;
122
123 spin_unlock_irqrestore(&consistent_lock, flags);
124 return new;
125
126 nospc:
127 spin_unlock_irqrestore(&consistent_lock, flags);
128 kfree(new);
129 out:
130 return NULL;
131}
132
133static struct ppc_vm_region *ppc_vm_region_find(struct ppc_vm_region *head, unsigned long addr)
134{
135 struct ppc_vm_region *c;
136
137 list_for_each_entry(c, &head->vm_list, vm_list) {
138 if (c->vm_start == addr)
139 goto out;
140 }
141 c = NULL;
142 out:
143 return c;
144}
145
146/*
147 * Allocate DMA-coherent memory space and return both the kernel remapped 37 * Allocate DMA-coherent memory space and return both the kernel remapped
148 * virtual and bus address for that space. 38 * virtual and bus address for that space.
149 */ 39 */
@@ -151,21 +41,21 @@ void *
151__dma_alloc_coherent(size_t size, dma_addr_t *handle, gfp_t gfp) 41__dma_alloc_coherent(size_t size, dma_addr_t *handle, gfp_t gfp)
152{ 42{
153 struct page *page; 43 struct page *page;
154 struct ppc_vm_region *c;
155 unsigned long order; 44 unsigned long order;
45 int i;
46 unsigned int nr_pages = PAGE_ALIGN(size)>>PAGE_SHIFT;
47 unsigned int array_size = nr_pages * sizeof(struct page *);
48 struct page **pages;
49 struct page *end;
156 u64 mask = 0x00ffffff, limit; /* ISA default */ 50 u64 mask = 0x00ffffff, limit; /* ISA default */
51 struct vm_struct *area;
157 52
158 if (!consistent_pte) { 53 BUG_ON(!mem_init_done);
159 printk(KERN_ERR "%s: not initialised\n", __func__);
160 dump_stack();
161 return NULL;
162 }
163
164 size = PAGE_ALIGN(size); 54 size = PAGE_ALIGN(size);
165 limit = (mask + 1) & ~mask; 55 limit = (mask + 1) & ~mask;
166 if ((limit && size >= limit) || size >= (CONSISTENT_END - CONSISTENT_BASE)) { 56 if (limit && size >= limit) {
167 printk(KERN_WARNING "coherent allocation too big (requested %#x mask %#Lx)\n", 57 printk(KERN_WARNING "coherent allocation too big (requested "
168 size, mask); 58 "%#x mask %#Lx)\n", size, mask);
169 return NULL; 59 return NULL;
170 } 60 }
171 61
@@ -178,6 +68,8 @@ __dma_alloc_coherent(size_t size, dma_addr_t *handle, gfp_t gfp)
178 if (!page) 68 if (!page)
179 goto no_page; 69 goto no_page;
180 70
71 end = page + (1 << order);
72
181 /* 73 /*
182 * Invalidate any data that might be lurking in the 74 * Invalidate any data that might be lurking in the
183 * kernel direct-mapped region for device DMA. 75 * kernel direct-mapped region for device DMA.
@@ -188,48 +80,59 @@ __dma_alloc_coherent(size_t size, dma_addr_t *handle, gfp_t gfp)
188 flush_dcache_range(kaddr, kaddr + size); 80 flush_dcache_range(kaddr, kaddr + size);
189 } 81 }
190 82
83 split_page(page, order);
84
191 /* 85 /*
192 * Allocate a virtual address in the consistent mapping region. 86 * Set the "dma handle"
193 */ 87 */
194 c = ppc_vm_region_alloc(&consistent_head, size, 88 *handle = page_to_phys(page);
195 gfp & ~(__GFP_DMA | __GFP_HIGHMEM)); 89
196 if (c) { 90 area = get_vm_area_caller(size, VM_IOREMAP,
197 unsigned long vaddr = c->vm_start; 91 __builtin_return_address(1));
198 pte_t *pte = consistent_pte + CONSISTENT_OFFSET(vaddr); 92 if (!area)
199 struct page *end = page + (1 << order); 93 goto out_free_pages;
200 94
201 split_page(page, order); 95 if (array_size > PAGE_SIZE) {
202 96 pages = vmalloc(array_size);
203 /* 97 area->flags |= VM_VPAGES;
204 * Set the "dma handle" 98 } else {
205 */ 99 pages = kmalloc(array_size, GFP_KERNEL);
206 *handle = page_to_phys(page); 100 }
101 if (!pages)
102 goto out_free_area;
207 103
208 do { 104 area->pages = pages;
209 BUG_ON(!pte_none(*pte)); 105 area->nr_pages = nr_pages;
210 106
211 SetPageReserved(page); 107 for (i = 0; i < nr_pages; i++)
212 set_pte_at(&init_mm, vaddr, 108 pages[i] = page + i;
213 pte, mk_pte(page, pgprot_noncached(PAGE_KERNEL)));
214 page++;
215 pte++;
216 vaddr += PAGE_SIZE;
217 } while (size -= PAGE_SIZE);
218 109
219 /* 110 if (map_vm_area(area, pgprot_noncached(PAGE_KERNEL), &pages))
220 * Free the otherwise unused pages. 111 goto out_unmap;
221 */
222 while (page < end) {
223 __free_page(page);
224 page++;
225 }
226 112
227 return (void *)c->vm_start; 113 /*
114 * Free the otherwise unused pages.
115 */
116 page += nr_pages;
117 while (page < end) {
118 __free_page(page);
119 page++;
228 } 120 }
229 121
122 return area->addr;
123out_unmap:
124 vunmap(area->addr);
125 if (array_size > PAGE_SIZE)
126 vfree(pages);
127 else
128 kfree(pages);
129 goto out_free_pages;
130out_free_area:
131 free_vm_area(area);
132out_free_pages:
230 if (page) 133 if (page)
231 __free_pages(page, order); 134 __free_pages(page, order);
232 no_page: 135no_page:
233 return NULL; 136 return NULL;
234} 137}
235EXPORT_SYMBOL(__dma_alloc_coherent); 138EXPORT_SYMBOL(__dma_alloc_coherent);
@@ -239,104 +142,12 @@ EXPORT_SYMBOL(__dma_alloc_coherent);
239 */ 142 */
240void __dma_free_coherent(size_t size, void *vaddr) 143void __dma_free_coherent(size_t size, void *vaddr)
241{ 144{
242 struct ppc_vm_region *c; 145 vfree(vaddr);
243 unsigned long flags, addr;
244 pte_t *ptep;
245
246 size = PAGE_ALIGN(size);
247
248 spin_lock_irqsave(&consistent_lock, flags);
249
250 c = ppc_vm_region_find(&consistent_head, (unsigned long)vaddr);
251 if (!c)
252 goto no_area;
253
254 if ((c->vm_end - c->vm_start) != size) {
255 printk(KERN_ERR "%s: freeing wrong coherent size (%ld != %d)\n",
256 __func__, c->vm_end - c->vm_start, size);
257 dump_stack();
258 size = c->vm_end - c->vm_start;
259 }
260
261 ptep = consistent_pte + CONSISTENT_OFFSET(c->vm_start);
262 addr = c->vm_start;
263 do {
264 pte_t pte = ptep_get_and_clear(&init_mm, addr, ptep);
265 unsigned long pfn;
266
267 ptep++;
268 addr += PAGE_SIZE;
269 146
270 if (!pte_none(pte) && pte_present(pte)) {
271 pfn = pte_pfn(pte);
272
273 if (pfn_valid(pfn)) {
274 struct page *page = pfn_to_page(pfn);
275 ClearPageReserved(page);
276
277 __free_page(page);
278 continue;
279 }
280 }
281
282 printk(KERN_CRIT "%s: bad page in kernel page table\n",
283 __func__);
284 } while (size -= PAGE_SIZE);
285
286 flush_tlb_kernel_range(c->vm_start, c->vm_end);
287
288 list_del(&c->vm_list);
289
290 spin_unlock_irqrestore(&consistent_lock, flags);
291
292 kfree(c);
293 return;
294
295 no_area:
296 spin_unlock_irqrestore(&consistent_lock, flags);
297 printk(KERN_ERR "%s: trying to free invalid coherent area: %p\n",
298 __func__, vaddr);
299 dump_stack();
300} 147}
301EXPORT_SYMBOL(__dma_free_coherent); 148EXPORT_SYMBOL(__dma_free_coherent);
302 149
303/* 150/*
304 * Initialise the consistent memory allocation.
305 */
306static int __init dma_alloc_init(void)
307{
308 pgd_t *pgd;
309 pud_t *pud;
310 pmd_t *pmd;
311 pte_t *pte;
312 int ret = 0;
313
314 do {
315 pgd = pgd_offset(&init_mm, CONSISTENT_BASE);
316 pud = pud_alloc(&init_mm, pgd, CONSISTENT_BASE);
317 pmd = pmd_alloc(&init_mm, pud, CONSISTENT_BASE);
318 if (!pmd) {
319 printk(KERN_ERR "%s: no pmd tables\n", __func__);
320 ret = -ENOMEM;
321 break;
322 }
323
324 pte = pte_alloc_kernel(pmd, CONSISTENT_BASE);
325 if (!pte) {
326 printk(KERN_ERR "%s: no pte tables\n", __func__);
327 ret = -ENOMEM;
328 break;
329 }
330
331 consistent_pte = pte;
332 } while (0);
333
334 return ret;
335}
336
337core_initcall(dma_alloc_init);
338
339/*
340 * make an area consistent. 151 * make an area consistent.
341 */ 152 */
342void __dma_sync(void *vaddr, size_t size, int direction) 153void __dma_sync(void *vaddr, size_t size, int direction)