aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2009-05-26 23:50:33 -0400
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2009-05-27 02:33:59 -0400
commit8b31e49d1d75729c1da9009664ba52abd1adc628 (patch)
tree046095a0cce051a471c7af93784c024c29a4f99b /arch/powerpc
parentf637a49e507c88354ab32b5d914e06acfb7ee00d (diff)
powerpc: Fix up dma_alloc_coherent() on platforms without cache coherency.
The implementation we just revived has issues, such as using a Kconfig-defined virtual address area in kernel space that nothing actually carves out (and thus will overlap whatever is there), or having some dependencies on being self contained in a single PTE page which adds unnecessary constraints on the kernel virtual address space. This fixes it by using more classic PTE accessors and automatically locating the area for consistent memory, carving an appropriate hole in the kernel virtual address space, leaving only the size of that area as a Kconfig option. It also brings some dma-mask related fixes from the ARM implementation which was almost identical initially but grew its own fixes. Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/Kconfig13
-rw-r--r--arch/powerpc/include/asm/dma-mapping.h6
-rw-r--r--arch/powerpc/include/asm/pgtable-ppc32.h4
-rw-r--r--arch/powerpc/kernel/dma.c2
-rw-r--r--arch/powerpc/mm/dma-noncoherent.c108
-rw-r--r--arch/powerpc/mm/mem.c4
6 files changed, 54 insertions, 83 deletions
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 3bb43adce44d..cdc9a6ff4be8 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -868,19 +868,6 @@ config TASK_SIZE
868 default "0x80000000" if PPC_PREP || PPC_8xx 868 default "0x80000000" if PPC_PREP || PPC_8xx
869 default "0xc0000000" 869 default "0xc0000000"
870 870
871config CONSISTENT_START_BOOL
872 bool "Set custom consistent memory pool address"
873 depends on ADVANCED_OPTIONS && NOT_COHERENT_CACHE
874 help
875 This option allows you to set the base virtual address
876 of the consistent memory pool. This pool of virtual
877 memory is used to make consistent memory allocations.
878
879config CONSISTENT_START
880 hex "Base virtual address of consistent memory pool" if CONSISTENT_START_BOOL
881 default "0xfd000000" if (NOT_COHERENT_CACHE && 8xx)
882 default "0xff100000" if NOT_COHERENT_CACHE
883
884config CONSISTENT_SIZE_BOOL 871config CONSISTENT_SIZE_BOOL
885 bool "Set custom consistent memory pool size" 872 bool "Set custom consistent memory pool size"
886 depends on ADVANCED_OPTIONS && NOT_COHERENT_CACHE 873 depends on ADVANCED_OPTIONS && NOT_COHERENT_CACHE
diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h
index c69f2b5f0cc4..cb448d68452c 100644
--- a/arch/powerpc/include/asm/dma-mapping.h
+++ b/arch/powerpc/include/asm/dma-mapping.h
@@ -26,7 +26,9 @@
26 * allocate the space "normally" and use the cache management functions 26 * allocate the space "normally" and use the cache management functions
27 * to ensure it is consistent. 27 * to ensure it is consistent.
28 */ 28 */
29extern void *__dma_alloc_coherent(size_t size, dma_addr_t *handle, gfp_t gfp); 29struct device;
30extern void *__dma_alloc_coherent(struct device *dev, size_t size,
31 dma_addr_t *handle, gfp_t gfp);
30extern void __dma_free_coherent(size_t size, void *vaddr); 32extern void __dma_free_coherent(size_t size, void *vaddr);
31extern void __dma_sync(void *vaddr, size_t size, int direction); 33extern void __dma_sync(void *vaddr, size_t size, int direction);
32extern void __dma_sync_page(struct page *page, unsigned long offset, 34extern void __dma_sync_page(struct page *page, unsigned long offset,
@@ -37,7 +39,7 @@ extern void __dma_sync_page(struct page *page, unsigned long offset,
37 * Cache coherent cores. 39 * Cache coherent cores.
38 */ 40 */
39 41
40#define __dma_alloc_coherent(gfp, size, handle) NULL 42#define __dma_alloc_coherent(dev, gfp, size, handle) NULL
41#define __dma_free_coherent(size, addr) ((void)0) 43#define __dma_free_coherent(size, addr) ((void)0)
42#define __dma_sync(addr, size, rw) ((void)0) 44#define __dma_sync(addr, size, rw) ((void)0)
43#define __dma_sync_page(pg, off, sz, rw) ((void)0) 45#define __dma_sync_page(pg, off, sz, rw) ((void)0)
diff --git a/arch/powerpc/include/asm/pgtable-ppc32.h b/arch/powerpc/include/asm/pgtable-ppc32.h
index 28fe9d4bae35..c9ff9d75990e 100644
--- a/arch/powerpc/include/asm/pgtable-ppc32.h
+++ b/arch/powerpc/include/asm/pgtable-ppc32.h
@@ -71,7 +71,11 @@ extern int icache_44x_need_flush;
71 * until mem_init() at which point this becomes the top of the vmalloc 71 * until mem_init() at which point this becomes the top of the vmalloc
72 * and ioremap space 72 * and ioremap space
73 */ 73 */
74#ifdef CONFIG_NOT_COHERENT_CACHE
75#define IOREMAP_TOP ((KVIRT_TOP - CONFIG_CONSISTENT_SIZE) & PAGE_MASK)
76#else
74#define IOREMAP_TOP KVIRT_TOP 77#define IOREMAP_TOP KVIRT_TOP
78#endif
75 79
76/* 80/*
77 * Just any arbitrary offset to the start of the vmalloc VM area: the 81 * Just any arbitrary offset to the start of the vmalloc VM area: the
diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c
index 53c7788cba78..6b02793dc75b 100644
--- a/arch/powerpc/kernel/dma.c
+++ b/arch/powerpc/kernel/dma.c
@@ -32,7 +32,7 @@ void *dma_direct_alloc_coherent(struct device *dev, size_t size,
32{ 32{
33 void *ret; 33 void *ret;
34#ifdef CONFIG_NOT_COHERENT_CACHE 34#ifdef CONFIG_NOT_COHERENT_CACHE
35 ret = __dma_alloc_coherent(size, dma_handle, flag); 35 ret = __dma_alloc_coherent(dev, size, dma_handle, flag);
36 if (ret == NULL) 36 if (ret == NULL)
37 return NULL; 37 return NULL;
38 *dma_handle += get_dma_direct_offset(dev); 38 *dma_handle += get_dma_direct_offset(dev);
diff --git a/arch/powerpc/mm/dma-noncoherent.c b/arch/powerpc/mm/dma-noncoherent.c
index b7dc4c19f582..36692f5c9a76 100644
--- a/arch/powerpc/mm/dma-noncoherent.c
+++ b/arch/powerpc/mm/dma-noncoherent.c
@@ -32,20 +32,21 @@
32 32
33#include <asm/tlbflush.h> 33#include <asm/tlbflush.h>
34 34
35#include "mmu_decl.h"
36
35/* 37/*
36 * This address range defaults to a value that is safe for all 38 * This address range defaults to a value that is safe for all
37 * platforms which currently set CONFIG_NOT_COHERENT_CACHE. It 39 * platforms which currently set CONFIG_NOT_COHERENT_CACHE. It
38 * can be further configured for specific applications under 40 * can be further configured for specific applications under
39 * the "Advanced Setup" menu. -Matt 41 * the "Advanced Setup" menu. -Matt
40 */ 42 */
41#define CONSISTENT_BASE (CONFIG_CONSISTENT_START) 43#define CONSISTENT_BASE (IOREMAP_TOP)
42#define CONSISTENT_END (CONFIG_CONSISTENT_START + CONFIG_CONSISTENT_SIZE) 44#define CONSISTENT_END (CONSISTENT_BASE + CONFIG_CONSISTENT_SIZE)
43#define CONSISTENT_OFFSET(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PAGE_SHIFT) 45#define CONSISTENT_OFFSET(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PAGE_SHIFT)
44 46
45/* 47/*
46 * This is the page table (2MB) covering uncached, DMA consistent allocations 48 * This is the page table (2MB) covering uncached, DMA consistent allocations
47 */ 49 */
48static pte_t *consistent_pte;
49static DEFINE_SPINLOCK(consistent_lock); 50static DEFINE_SPINLOCK(consistent_lock);
50 51
51/* 52/*
@@ -148,22 +149,38 @@ static struct ppc_vm_region *ppc_vm_region_find(struct ppc_vm_region *head, unsi
148 * virtual and bus address for that space. 149 * virtual and bus address for that space.
149 */ 150 */
150void * 151void *
151__dma_alloc_coherent(size_t size, dma_addr_t *handle, gfp_t gfp) 152__dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp)
152{ 153{
153 struct page *page; 154 struct page *page;
154 struct ppc_vm_region *c; 155 struct ppc_vm_region *c;
155 unsigned long order; 156 unsigned long order;
156 u64 mask = 0x00ffffff, limit; /* ISA default */ 157 u64 mask = ISA_DMA_THRESHOLD, limit;
157 158
158 if (!consistent_pte) { 159 if (dev) {
159 printk(KERN_ERR "%s: not initialised\n", __func__); 160 mask = dev->coherent_dma_mask;
160 dump_stack(); 161
161 return NULL; 162 /*
163 * Sanity check the DMA mask - it must be non-zero, and
164 * must be able to be satisfied by a DMA allocation.
165 */
166 if (mask == 0) {
167 dev_warn(dev, "coherent DMA mask is unset\n");
168 goto no_page;
169 }
170
171 if ((~mask) & ISA_DMA_THRESHOLD) {
172 dev_warn(dev, "coherent DMA mask %#llx is smaller "
173 "than system GFP_DMA mask %#llx\n",
174 mask, (unsigned long long)ISA_DMA_THRESHOLD);
175 goto no_page;
176 }
162 } 177 }
163 178
179
164 size = PAGE_ALIGN(size); 180 size = PAGE_ALIGN(size);
165 limit = (mask + 1) & ~mask; 181 limit = (mask + 1) & ~mask;
166 if ((limit && size >= limit) || size >= (CONSISTENT_END - CONSISTENT_BASE)) { 182 if ((limit && size >= limit) ||
183 size >= (CONSISTENT_END - CONSISTENT_BASE)) {
167 printk(KERN_WARNING "coherent allocation too big (requested %#x mask %#Lx)\n", 184 printk(KERN_WARNING "coherent allocation too big (requested %#x mask %#Lx)\n",
168 size, mask); 185 size, mask);
169 return NULL; 186 return NULL;
@@ -171,6 +188,7 @@ __dma_alloc_coherent(size_t size, dma_addr_t *handle, gfp_t gfp)
171 188
172 order = get_order(size); 189 order = get_order(size);
173 190
191 /* Might be useful if we ever have a real legacy DMA zone... */
174 if (mask != 0xffffffff) 192 if (mask != 0xffffffff)
175 gfp |= GFP_DMA; 193 gfp |= GFP_DMA;
176 194
@@ -195,7 +213,6 @@ __dma_alloc_coherent(size_t size, dma_addr_t *handle, gfp_t gfp)
195 gfp & ~(__GFP_DMA | __GFP_HIGHMEM)); 213 gfp & ~(__GFP_DMA | __GFP_HIGHMEM));
196 if (c) { 214 if (c) {
197 unsigned long vaddr = c->vm_start; 215 unsigned long vaddr = c->vm_start;
198 pte_t *pte = consistent_pte + CONSISTENT_OFFSET(vaddr);
199 struct page *end = page + (1 << order); 216 struct page *end = page + (1 << order);
200 217
201 split_page(page, order); 218 split_page(page, order);
@@ -206,13 +223,10 @@ __dma_alloc_coherent(size_t size, dma_addr_t *handle, gfp_t gfp)
206 *handle = page_to_phys(page); 223 *handle = page_to_phys(page);
207 224
208 do { 225 do {
209 BUG_ON(!pte_none(*pte));
210
211 SetPageReserved(page); 226 SetPageReserved(page);
212 set_pte_at(&init_mm, vaddr, 227 map_page(vaddr, page_to_phys(page),
213 pte, mk_pte(page, pgprot_noncached(PAGE_KERNEL))); 228 pgprot_noncached(PAGE_KERNEL));
214 page++; 229 page++;
215 pte++;
216 vaddr += PAGE_SIZE; 230 vaddr += PAGE_SIZE;
217 } while (size -= PAGE_SIZE); 231 } while (size -= PAGE_SIZE);
218 232
@@ -241,8 +255,7 @@ void __dma_free_coherent(size_t size, void *vaddr)
241{ 255{
242 struct ppc_vm_region *c; 256 struct ppc_vm_region *c;
243 unsigned long flags, addr; 257 unsigned long flags, addr;
244 pte_t *ptep; 258
245
246 size = PAGE_ALIGN(size); 259 size = PAGE_ALIGN(size);
247 260
248 spin_lock_irqsave(&consistent_lock, flags); 261 spin_lock_irqsave(&consistent_lock, flags);
@@ -258,29 +271,26 @@ void __dma_free_coherent(size_t size, void *vaddr)
258 size = c->vm_end - c->vm_start; 271 size = c->vm_end - c->vm_start;
259 } 272 }
260 273
261 ptep = consistent_pte + CONSISTENT_OFFSET(c->vm_start);
262 addr = c->vm_start; 274 addr = c->vm_start;
263 do { 275 do {
264 pte_t pte = ptep_get_and_clear(&init_mm, addr, ptep); 276 pte_t *ptep;
265 unsigned long pfn; 277 unsigned long pfn;
266 278
267 ptep++; 279 ptep = pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(addr),
268 addr += PAGE_SIZE; 280 addr),
269 281 addr),
270 if (!pte_none(pte) && pte_present(pte)) { 282 addr);
271 pfn = pte_pfn(pte); 283 if (!pte_none(*ptep) && pte_present(*ptep)) {
272 284 pfn = pte_pfn(*ptep);
285 pte_clear(&init_mm, addr, ptep);
273 if (pfn_valid(pfn)) { 286 if (pfn_valid(pfn)) {
274 struct page *page = pfn_to_page(pfn); 287 struct page *page = pfn_to_page(pfn);
275 ClearPageReserved(page);
276 288
289 ClearPageReserved(page);
277 __free_page(page); 290 __free_page(page);
278 continue;
279 } 291 }
280 } 292 }
281 293 addr += PAGE_SIZE;
282 printk(KERN_CRIT "%s: bad page in kernel page table\n",
283 __func__);
284 } while (size -= PAGE_SIZE); 294 } while (size -= PAGE_SIZE);
285 295
286 flush_tlb_kernel_range(c->vm_start, c->vm_end); 296 flush_tlb_kernel_range(c->vm_start, c->vm_end);
@@ -301,42 +311,6 @@ void __dma_free_coherent(size_t size, void *vaddr)
301EXPORT_SYMBOL(__dma_free_coherent); 311EXPORT_SYMBOL(__dma_free_coherent);
302 312
303/* 313/*
304 * Initialise the consistent memory allocation.
305 */
306static int __init dma_alloc_init(void)
307{
308 pgd_t *pgd;
309 pud_t *pud;
310 pmd_t *pmd;
311 pte_t *pte;
312 int ret = 0;
313
314 do {
315 pgd = pgd_offset(&init_mm, CONSISTENT_BASE);
316 pud = pud_alloc(&init_mm, pgd, CONSISTENT_BASE);
317 pmd = pmd_alloc(&init_mm, pud, CONSISTENT_BASE);
318 if (!pmd) {
319 printk(KERN_ERR "%s: no pmd tables\n", __func__);
320 ret = -ENOMEM;
321 break;
322 }
323
324 pte = pte_alloc_kernel(pmd, CONSISTENT_BASE);
325 if (!pte) {
326 printk(KERN_ERR "%s: no pte tables\n", __func__);
327 ret = -ENOMEM;
328 break;
329 }
330
331 consistent_pte = pte;
332 } while (0);
333
334 return ret;
335}
336
337core_initcall(dma_alloc_init);
338
339/*
340 * make an area consistent. 314 * make an area consistent.
341 */ 315 */
342void __dma_sync(void *vaddr, size_t size, int direction) 316void __dma_sync(void *vaddr, size_t size, int direction)
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index d3a4e67561fa..579382c163a9 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -387,6 +387,10 @@ void __init mem_init(void)
387 pr_info(" * 0x%08lx..0x%08lx : highmem PTEs\n", 387 pr_info(" * 0x%08lx..0x%08lx : highmem PTEs\n",
388 PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP)); 388 PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP));
389#endif /* CONFIG_HIGHMEM */ 389#endif /* CONFIG_HIGHMEM */
390#ifdef CONFIG_NOT_COHERENT_CACHE
391 pr_info(" * 0x%08lx..0x%08lx : consistent mem\n",
392 IOREMAP_TOP, IOREMAP_TOP + CONFIG_CONSISTENT_SIZE);
393#endif /* CONFIG_NOT_COHERENT_CACHE */
390 pr_info(" * 0x%08lx..0x%08lx : early ioremap\n", 394 pr_info(" * 0x%08lx..0x%08lx : early ioremap\n",
391 ioremap_bot, IOREMAP_TOP); 395 ioremap_bot, IOREMAP_TOP);
392 pr_info(" * 0x%08lx..0x%08lx : vmalloc & ioremap\n", 396 pr_info(" * 0x%08lx..0x%08lx : vmalloc & ioremap\n",