aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-05-27 13:58:23 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-05-27 13:58:23 -0400
commitb14f3bd90d2202a83f36eac85bcb3db0fba7d6a6 (patch)
treebad35a2937e40ca211a70dbfbb5432b864eab724
parent911e690e70540f009125bacd16c017eb1a7b1916 (diff)
parent8b31e49d1d75729c1da9009664ba52abd1adc628 (diff)
Merge branch 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc
* 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc: powerpc: Fix up dma_alloc_coherent() on platforms without cache coherency. powerpc: Minor cleanups of kernel virt address space definitions powerpc: Move dma-noncoherent.c from arch/powerpc/lib to arch/powerpc/mm Revert "powerpc: Rework dma-noncoherent to use generic vmalloc layer"
-rw-r--r--arch/powerpc/Kconfig12
-rw-r--r--arch/powerpc/include/asm/dma-mapping.h6
-rw-r--r--arch/powerpc/include/asm/fixmap.h4
-rw-r--r--arch/powerpc/include/asm/pgtable-ppc32.h26
-rw-r--r--arch/powerpc/kernel/dma.c2
-rw-r--r--arch/powerpc/lib/Makefile1
-rw-r--r--arch/powerpc/lib/dma-noncoherent.c237
-rw-r--r--arch/powerpc/mm/Makefile1
-rw-r--r--arch/powerpc/mm/dma-noncoherent.c400
-rw-r--r--arch/powerpc/mm/init_32.c8
-rw-r--r--arch/powerpc/mm/mem.c17
-rw-r--r--arch/powerpc/mm/pgtable_32.c2
12 files changed, 463 insertions, 253 deletions
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index a0d1146a0578..cdc9a6ff4be8 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -868,6 +868,18 @@ config TASK_SIZE
868 default "0x80000000" if PPC_PREP || PPC_8xx 868 default "0x80000000" if PPC_PREP || PPC_8xx
869 default "0xc0000000" 869 default "0xc0000000"
870 870
871config CONSISTENT_SIZE_BOOL
872 bool "Set custom consistent memory pool size"
873 depends on ADVANCED_OPTIONS && NOT_COHERENT_CACHE
874 help
875 This option allows you to set the size of the
876 consistent memory pool. This pool of virtual memory
877 is used to make consistent memory allocations.
878
879config CONSISTENT_SIZE
880 hex "Size of consistent memory pool" if CONSISTENT_SIZE_BOOL
881 default "0x00200000" if NOT_COHERENT_CACHE
882
871config PIN_TLB 883config PIN_TLB
872 bool "Pinned Kernel TLBs (860 ONLY)" 884 bool "Pinned Kernel TLBs (860 ONLY)"
873 depends on ADVANCED_OPTIONS && 8xx 885 depends on ADVANCED_OPTIONS && 8xx
diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h
index c69f2b5f0cc4..cb448d68452c 100644
--- a/arch/powerpc/include/asm/dma-mapping.h
+++ b/arch/powerpc/include/asm/dma-mapping.h
@@ -26,7 +26,9 @@
26 * allocate the space "normally" and use the cache management functions 26 * allocate the space "normally" and use the cache management functions
27 * to ensure it is consistent. 27 * to ensure it is consistent.
28 */ 28 */
29extern void *__dma_alloc_coherent(size_t size, dma_addr_t *handle, gfp_t gfp); 29struct device;
30extern void *__dma_alloc_coherent(struct device *dev, size_t size,
31 dma_addr_t *handle, gfp_t gfp);
30extern void __dma_free_coherent(size_t size, void *vaddr); 32extern void __dma_free_coherent(size_t size, void *vaddr);
31extern void __dma_sync(void *vaddr, size_t size, int direction); 33extern void __dma_sync(void *vaddr, size_t size, int direction);
32extern void __dma_sync_page(struct page *page, unsigned long offset, 34extern void __dma_sync_page(struct page *page, unsigned long offset,
@@ -37,7 +39,7 @@ extern void __dma_sync_page(struct page *page, unsigned long offset,
37 * Cache coherent cores. 39 * Cache coherent cores.
38 */ 40 */
39 41
40#define __dma_alloc_coherent(gfp, size, handle) NULL 42#define __dma_alloc_coherent(dev, gfp, size, handle) NULL
41#define __dma_free_coherent(size, addr) ((void)0) 43#define __dma_free_coherent(size, addr) ((void)0)
42#define __dma_sync(addr, size, rw) ((void)0) 44#define __dma_sync(addr, size, rw) ((void)0)
43#define __dma_sync_page(pg, off, sz, rw) ((void)0) 45#define __dma_sync_page(pg, off, sz, rw) ((void)0)
diff --git a/arch/powerpc/include/asm/fixmap.h b/arch/powerpc/include/asm/fixmap.h
index d60fd18f428c..f1f4e23a84e9 100644
--- a/arch/powerpc/include/asm/fixmap.h
+++ b/arch/powerpc/include/asm/fixmap.h
@@ -14,8 +14,6 @@
14#ifndef _ASM_FIXMAP_H 14#ifndef _ASM_FIXMAP_H
15#define _ASM_FIXMAP_H 15#define _ASM_FIXMAP_H
16 16
17extern unsigned long FIXADDR_TOP;
18
19#ifndef __ASSEMBLY__ 17#ifndef __ASSEMBLY__
20#include <linux/kernel.h> 18#include <linux/kernel.h>
21#include <asm/page.h> 19#include <asm/page.h>
@@ -24,6 +22,8 @@ extern unsigned long FIXADDR_TOP;
24#include <asm/kmap_types.h> 22#include <asm/kmap_types.h>
25#endif 23#endif
26 24
25#define FIXADDR_TOP ((unsigned long)(-PAGE_SIZE))
26
27/* 27/*
28 * Here we define all the compile-time 'special' virtual 28 * Here we define all the compile-time 'special' virtual
29 * addresses. The point is to have a constant address at 29 * addresses. The point is to have a constant address at
diff --git a/arch/powerpc/include/asm/pgtable-ppc32.h b/arch/powerpc/include/asm/pgtable-ppc32.h
index ba45c997830f..c9ff9d75990e 100644
--- a/arch/powerpc/include/asm/pgtable-ppc32.h
+++ b/arch/powerpc/include/asm/pgtable-ppc32.h
@@ -10,7 +10,7 @@
10 10
11extern unsigned long va_to_phys(unsigned long address); 11extern unsigned long va_to_phys(unsigned long address);
12extern pte_t *va_to_pte(unsigned long address); 12extern pte_t *va_to_pte(unsigned long address);
13extern unsigned long ioremap_bot, ioremap_base; 13extern unsigned long ioremap_bot;
14 14
15#ifdef CONFIG_44x 15#ifdef CONFIG_44x
16extern int icache_44x_need_flush; 16extern int icache_44x_need_flush;
@@ -56,8 +56,30 @@ extern int icache_44x_need_flush;
56 printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e)) 56 printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
57 57
58/* 58/*
59 * This is the bottom of the PKMAP area with HIGHMEM or an arbitrary
60 * value (for now) on others, from where we can start layout kernel
61 * virtual space that goes below PKMAP and FIXMAP
62 */
63#ifdef CONFIG_HIGHMEM
64#define KVIRT_TOP PKMAP_BASE
65#else
66#define KVIRT_TOP (0xfe000000UL) /* for now, could be FIXMAP_BASE ? */
67#endif
68
69/*
70 * ioremap_bot starts at that address. Early ioremaps move down from there,
71 * until mem_init() at which point this becomes the top of the vmalloc
72 * and ioremap space
73 */
74#ifdef CONFIG_NOT_COHERENT_CACHE
75#define IOREMAP_TOP ((KVIRT_TOP - CONFIG_CONSISTENT_SIZE) & PAGE_MASK)
76#else
77#define IOREMAP_TOP KVIRT_TOP
78#endif
79
80/*
59 * Just any arbitrary offset to the start of the vmalloc VM area: the 81 * Just any arbitrary offset to the start of the vmalloc VM area: the
60 * current 64MB value just means that there will be a 64MB "hole" after the 82 * current 16MB value just means that there will be a 64MB "hole" after the
61 * physical memory until the kernel virtual memory starts. That means that 83 * physical memory until the kernel virtual memory starts. That means that
62 * any out-of-bounds memory accesses will hopefully be caught. 84 * any out-of-bounds memory accesses will hopefully be caught.
63 * The vmalloc() routines leaves a hole of 4kB between each vmalloced 85 * The vmalloc() routines leaves a hole of 4kB between each vmalloced
diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c
index 53c7788cba78..6b02793dc75b 100644
--- a/arch/powerpc/kernel/dma.c
+++ b/arch/powerpc/kernel/dma.c
@@ -32,7 +32,7 @@ void *dma_direct_alloc_coherent(struct device *dev, size_t size,
32{ 32{
33 void *ret; 33 void *ret;
34#ifdef CONFIG_NOT_COHERENT_CACHE 34#ifdef CONFIG_NOT_COHERENT_CACHE
35 ret = __dma_alloc_coherent(size, dma_handle, flag); 35 ret = __dma_alloc_coherent(dev, size, dma_handle, flag);
36 if (ret == NULL) 36 if (ret == NULL)
37 return NULL; 37 return NULL;
38 *dma_handle += get_dma_direct_offset(dev); 38 *dma_handle += get_dma_direct_offset(dev);
diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile
index 8db35278a4b4..29b742b90f1f 100644
--- a/arch/powerpc/lib/Makefile
+++ b/arch/powerpc/lib/Makefile
@@ -18,7 +18,6 @@ obj-$(CONFIG_PPC64) += copypage_64.o copyuser_64.o \
18 memcpy_64.o usercopy_64.o mem_64.o string.o 18 memcpy_64.o usercopy_64.o mem_64.o string.o
19obj-$(CONFIG_XMON) += sstep.o 19obj-$(CONFIG_XMON) += sstep.o
20obj-$(CONFIG_KPROBES) += sstep.o 20obj-$(CONFIG_KPROBES) += sstep.o
21obj-$(CONFIG_NOT_COHERENT_CACHE) += dma-noncoherent.o
22 21
23ifeq ($(CONFIG_PPC64),y) 22ifeq ($(CONFIG_PPC64),y)
24obj-$(CONFIG_SMP) += locks.o 23obj-$(CONFIG_SMP) += locks.o
diff --git a/arch/powerpc/lib/dma-noncoherent.c b/arch/powerpc/lib/dma-noncoherent.c
deleted file mode 100644
index 005a28d380af..000000000000
--- a/arch/powerpc/lib/dma-noncoherent.c
+++ /dev/null
@@ -1,237 +0,0 @@
1/*
2 * PowerPC version derived from arch/arm/mm/consistent.c
3 * Copyright (C) 2001 Dan Malek (dmalek@jlc.net)
4 *
5 * Copyright (C) 2000 Russell King
6 *
7 * Consistent memory allocators. Used for DMA devices that want to
8 * share uncached memory with the processor core. The function return
9 * is the virtual address and 'dma_handle' is the physical address.
10 * Mostly stolen from the ARM port, with some changes for PowerPC.
11 * -- Dan
12 *
13 * Reorganized to get rid of the arch-specific consistent_* functions
14 * and provide non-coherent implementations for the DMA API. -Matt
15 *
16 * Added in_interrupt() safe dma_alloc_coherent()/dma_free_coherent()
17 * implementation. This is pulled straight from ARM and barely
18 * modified. -Matt
19 *
20 * This program is free software; you can redistribute it and/or modify
21 * it under the terms of the GNU General Public License version 2 as
22 * published by the Free Software Foundation.
23 */
24
25#include <linux/sched.h>
26#include <linux/kernel.h>
27#include <linux/errno.h>
28#include <linux/string.h>
29#include <linux/types.h>
30#include <linux/highmem.h>
31#include <linux/dma-mapping.h>
32#include <linux/vmalloc.h>
33
34#include <asm/tlbflush.h>
35
36/*
37 * Allocate DMA-coherent memory space and return both the kernel remapped
38 * virtual and bus address for that space.
39 */
40void *
41__dma_alloc_coherent(size_t size, dma_addr_t *handle, gfp_t gfp)
42{
43 struct page *page;
44 unsigned long order;
45 int i;
46 unsigned int nr_pages = PAGE_ALIGN(size)>>PAGE_SHIFT;
47 unsigned int array_size = nr_pages * sizeof(struct page *);
48 struct page **pages;
49 struct page *end;
50 u64 mask = 0x00ffffff, limit; /* ISA default */
51 struct vm_struct *area;
52
53 BUG_ON(!mem_init_done);
54 size = PAGE_ALIGN(size);
55 limit = (mask + 1) & ~mask;
56 if (limit && size >= limit) {
57 printk(KERN_WARNING "coherent allocation too big (requested "
58 "%#x mask %#Lx)\n", size, mask);
59 return NULL;
60 }
61
62 order = get_order(size);
63
64 if (mask != 0xffffffff)
65 gfp |= GFP_DMA;
66
67 page = alloc_pages(gfp, order);
68 if (!page)
69 goto no_page;
70
71 end = page + (1 << order);
72
73 /*
74 * Invalidate any data that might be lurking in the
75 * kernel direct-mapped region for device DMA.
76 */
77 {
78 unsigned long kaddr = (unsigned long)page_address(page);
79 memset(page_address(page), 0, size);
80 flush_dcache_range(kaddr, kaddr + size);
81 }
82
83 split_page(page, order);
84
85 /*
86 * Set the "dma handle"
87 */
88 *handle = page_to_phys(page);
89
90 area = get_vm_area_caller(size, VM_IOREMAP,
91 __builtin_return_address(1));
92 if (!area)
93 goto out_free_pages;
94
95 if (array_size > PAGE_SIZE) {
96 pages = vmalloc(array_size);
97 area->flags |= VM_VPAGES;
98 } else {
99 pages = kmalloc(array_size, GFP_KERNEL);
100 }
101 if (!pages)
102 goto out_free_area;
103
104 area->pages = pages;
105 area->nr_pages = nr_pages;
106
107 for (i = 0; i < nr_pages; i++)
108 pages[i] = page + i;
109
110 if (map_vm_area(area, pgprot_noncached(PAGE_KERNEL), &pages))
111 goto out_unmap;
112
113 /*
114 * Free the otherwise unused pages.
115 */
116 page += nr_pages;
117 while (page < end) {
118 __free_page(page);
119 page++;
120 }
121
122 return area->addr;
123out_unmap:
124 vunmap(area->addr);
125 if (array_size > PAGE_SIZE)
126 vfree(pages);
127 else
128 kfree(pages);
129 goto out_free_pages;
130out_free_area:
131 free_vm_area(area);
132out_free_pages:
133 if (page)
134 __free_pages(page, order);
135no_page:
136 return NULL;
137}
138EXPORT_SYMBOL(__dma_alloc_coherent);
139
140/*
141 * free a page as defined by the above mapping.
142 */
143void __dma_free_coherent(size_t size, void *vaddr)
144{
145 vfree(vaddr);
146
147}
148EXPORT_SYMBOL(__dma_free_coherent);
149
150/*
151 * make an area consistent.
152 */
153void __dma_sync(void *vaddr, size_t size, int direction)
154{
155 unsigned long start = (unsigned long)vaddr;
156 unsigned long end = start + size;
157
158 switch (direction) {
159 case DMA_NONE:
160 BUG();
161 case DMA_FROM_DEVICE:
162 /*
163 * invalidate only when cache-line aligned otherwise there is
164 * the potential for discarding uncommitted data from the cache
165 */
166 if ((start & (L1_CACHE_BYTES - 1)) || (size & (L1_CACHE_BYTES - 1)))
167 flush_dcache_range(start, end);
168 else
169 invalidate_dcache_range(start, end);
170 break;
171 case DMA_TO_DEVICE: /* writeback only */
172 clean_dcache_range(start, end);
173 break;
174 case DMA_BIDIRECTIONAL: /* writeback and invalidate */
175 flush_dcache_range(start, end);
176 break;
177 }
178}
179EXPORT_SYMBOL(__dma_sync);
180
181#ifdef CONFIG_HIGHMEM
182/*
183 * __dma_sync_page() implementation for systems using highmem.
184 * In this case, each page of a buffer must be kmapped/kunmapped
185 * in order to have a virtual address for __dma_sync(). This must
186 * not sleep so kmap_atomic()/kunmap_atomic() are used.
187 *
188 * Note: yes, it is possible and correct to have a buffer extend
189 * beyond the first page.
190 */
191static inline void __dma_sync_page_highmem(struct page *page,
192 unsigned long offset, size_t size, int direction)
193{
194 size_t seg_size = min((size_t)(PAGE_SIZE - offset), size);
195 size_t cur_size = seg_size;
196 unsigned long flags, start, seg_offset = offset;
197 int nr_segs = 1 + ((size - seg_size) + PAGE_SIZE - 1)/PAGE_SIZE;
198 int seg_nr = 0;
199
200 local_irq_save(flags);
201
202 do {
203 start = (unsigned long)kmap_atomic(page + seg_nr,
204 KM_PPC_SYNC_PAGE) + seg_offset;
205
206 /* Sync this buffer segment */
207 __dma_sync((void *)start, seg_size, direction);
208 kunmap_atomic((void *)start, KM_PPC_SYNC_PAGE);
209 seg_nr++;
210
211 /* Calculate next buffer segment size */
212 seg_size = min((size_t)PAGE_SIZE, size - cur_size);
213
214 /* Add the segment size to our running total */
215 cur_size += seg_size;
216 seg_offset = 0;
217 } while (seg_nr < nr_segs);
218
219 local_irq_restore(flags);
220}
221#endif /* CONFIG_HIGHMEM */
222
223/*
224 * __dma_sync_page makes memory consistent. identical to __dma_sync, but
225 * takes a struct page instead of a virtual address
226 */
227void __dma_sync_page(struct page *page, unsigned long offset,
228 size_t size, int direction)
229{
230#ifdef CONFIG_HIGHMEM
231 __dma_sync_page_highmem(page, offset, size, direction);
232#else
233 unsigned long start = (unsigned long)page_address(page) + offset;
234 __dma_sync((void *)start, size, direction);
235#endif
236}
237EXPORT_SYMBOL(__dma_sync_page);
diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile
index 17290bcedc5e..b746f4ca4209 100644
--- a/arch/powerpc/mm/Makefile
+++ b/arch/powerpc/mm/Makefile
@@ -26,3 +26,4 @@ obj-$(CONFIG_NEED_MULTIPLE_NODES) += numa.o
26obj-$(CONFIG_PPC_MM_SLICES) += slice.o 26obj-$(CONFIG_PPC_MM_SLICES) += slice.o
27obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o 27obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
28obj-$(CONFIG_PPC_SUBPAGE_PROT) += subpage-prot.o 28obj-$(CONFIG_PPC_SUBPAGE_PROT) += subpage-prot.o
29obj-$(CONFIG_NOT_COHERENT_CACHE) += dma-noncoherent.o
diff --git a/arch/powerpc/mm/dma-noncoherent.c b/arch/powerpc/mm/dma-noncoherent.c
new file mode 100644
index 000000000000..36692f5c9a76
--- /dev/null
+++ b/arch/powerpc/mm/dma-noncoherent.c
@@ -0,0 +1,400 @@
1/*
2 * PowerPC version derived from arch/arm/mm/consistent.c
3 * Copyright (C) 2001 Dan Malek (dmalek@jlc.net)
4 *
5 * Copyright (C) 2000 Russell King
6 *
7 * Consistent memory allocators. Used for DMA devices that want to
8 * share uncached memory with the processor core. The function return
9 * is the virtual address and 'dma_handle' is the physical address.
10 * Mostly stolen from the ARM port, with some changes for PowerPC.
11 * -- Dan
12 *
13 * Reorganized to get rid of the arch-specific consistent_* functions
14 * and provide non-coherent implementations for the DMA API. -Matt
15 *
16 * Added in_interrupt() safe dma_alloc_coherent()/dma_free_coherent()
17 * implementation. This is pulled straight from ARM and barely
18 * modified. -Matt
19 *
20 * This program is free software; you can redistribute it and/or modify
21 * it under the terms of the GNU General Public License version 2 as
22 * published by the Free Software Foundation.
23 */
24
25#include <linux/sched.h>
26#include <linux/kernel.h>
27#include <linux/errno.h>
28#include <linux/string.h>
29#include <linux/types.h>
30#include <linux/highmem.h>
31#include <linux/dma-mapping.h>
32
33#include <asm/tlbflush.h>
34
35#include "mmu_decl.h"
36
37/*
38 * This address range defaults to a value that is safe for all
39 * platforms which currently set CONFIG_NOT_COHERENT_CACHE. It
40 * can be further configured for specific applications under
41 * the "Advanced Setup" menu. -Matt
42 */
43#define CONSISTENT_BASE (IOREMAP_TOP)
44#define CONSISTENT_END (CONSISTENT_BASE + CONFIG_CONSISTENT_SIZE)
45#define CONSISTENT_OFFSET(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PAGE_SHIFT)
46
47/*
48 * This is the page table (2MB) covering uncached, DMA consistent allocations
49 */
50static DEFINE_SPINLOCK(consistent_lock);
51
52/*
53 * VM region handling support.
54 *
55 * This should become something generic, handling VM region allocations for
56 * vmalloc and similar (ioremap, module space, etc).
57 *
58 * I envisage vmalloc()'s supporting vm_struct becoming:
59 *
60 * struct vm_struct {
61 * struct vm_region region;
62 * unsigned long flags;
63 * struct page **pages;
64 * unsigned int nr_pages;
65 * unsigned long phys_addr;
66 * };
67 *
68 * get_vm_area() would then call vm_region_alloc with an appropriate
69 * struct vm_region head (eg):
70 *
71 * struct vm_region vmalloc_head = {
72 * .vm_list = LIST_HEAD_INIT(vmalloc_head.vm_list),
73 * .vm_start = VMALLOC_START,
74 * .vm_end = VMALLOC_END,
75 * };
76 *
77 * However, vmalloc_head.vm_start is variable (typically, it is dependent on
78 * the amount of RAM found at boot time.) I would imagine that get_vm_area()
79 * would have to initialise this each time prior to calling vm_region_alloc().
80 */
81struct ppc_vm_region {
82 struct list_head vm_list;
83 unsigned long vm_start;
84 unsigned long vm_end;
85};
86
87static struct ppc_vm_region consistent_head = {
88 .vm_list = LIST_HEAD_INIT(consistent_head.vm_list),
89 .vm_start = CONSISTENT_BASE,
90 .vm_end = CONSISTENT_END,
91};
92
93static struct ppc_vm_region *
94ppc_vm_region_alloc(struct ppc_vm_region *head, size_t size, gfp_t gfp)
95{
96 unsigned long addr = head->vm_start, end = head->vm_end - size;
97 unsigned long flags;
98 struct ppc_vm_region *c, *new;
99
100 new = kmalloc(sizeof(struct ppc_vm_region), gfp);
101 if (!new)
102 goto out;
103
104 spin_lock_irqsave(&consistent_lock, flags);
105
106 list_for_each_entry(c, &head->vm_list, vm_list) {
107 if ((addr + size) < addr)
108 goto nospc;
109 if ((addr + size) <= c->vm_start)
110 goto found;
111 addr = c->vm_end;
112 if (addr > end)
113 goto nospc;
114 }
115
116 found:
117 /*
118 * Insert this entry _before_ the one we found.
119 */
120 list_add_tail(&new->vm_list, &c->vm_list);
121 new->vm_start = addr;
122 new->vm_end = addr + size;
123
124 spin_unlock_irqrestore(&consistent_lock, flags);
125 return new;
126
127 nospc:
128 spin_unlock_irqrestore(&consistent_lock, flags);
129 kfree(new);
130 out:
131 return NULL;
132}
133
134static struct ppc_vm_region *ppc_vm_region_find(struct ppc_vm_region *head, unsigned long addr)
135{
136 struct ppc_vm_region *c;
137
138 list_for_each_entry(c, &head->vm_list, vm_list) {
139 if (c->vm_start == addr)
140 goto out;
141 }
142 c = NULL;
143 out:
144 return c;
145}
146
147/*
148 * Allocate DMA-coherent memory space and return both the kernel remapped
149 * virtual and bus address for that space.
150 */
151void *
152__dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp)
153{
154 struct page *page;
155 struct ppc_vm_region *c;
156 unsigned long order;
157 u64 mask = ISA_DMA_THRESHOLD, limit;
158
159 if (dev) {
160 mask = dev->coherent_dma_mask;
161
162 /*
163 * Sanity check the DMA mask - it must be non-zero, and
164 * must be able to be satisfied by a DMA allocation.
165 */
166 if (mask == 0) {
167 dev_warn(dev, "coherent DMA mask is unset\n");
168 goto no_page;
169 }
170
171 if ((~mask) & ISA_DMA_THRESHOLD) {
172 dev_warn(dev, "coherent DMA mask %#llx is smaller "
173 "than system GFP_DMA mask %#llx\n",
174 mask, (unsigned long long)ISA_DMA_THRESHOLD);
175 goto no_page;
176 }
177 }
178
179
180 size = PAGE_ALIGN(size);
181 limit = (mask + 1) & ~mask;
182 if ((limit && size >= limit) ||
183 size >= (CONSISTENT_END - CONSISTENT_BASE)) {
184 printk(KERN_WARNING "coherent allocation too big (requested %#x mask %#Lx)\n",
185 size, mask);
186 return NULL;
187 }
188
189 order = get_order(size);
190
191 /* Might be useful if we ever have a real legacy DMA zone... */
192 if (mask != 0xffffffff)
193 gfp |= GFP_DMA;
194
195 page = alloc_pages(gfp, order);
196 if (!page)
197 goto no_page;
198
199 /*
200 * Invalidate any data that might be lurking in the
201 * kernel direct-mapped region for device DMA.
202 */
203 {
204 unsigned long kaddr = (unsigned long)page_address(page);
205 memset(page_address(page), 0, size);
206 flush_dcache_range(kaddr, kaddr + size);
207 }
208
209 /*
210 * Allocate a virtual address in the consistent mapping region.
211 */
212 c = ppc_vm_region_alloc(&consistent_head, size,
213 gfp & ~(__GFP_DMA | __GFP_HIGHMEM));
214 if (c) {
215 unsigned long vaddr = c->vm_start;
216 struct page *end = page + (1 << order);
217
218 split_page(page, order);
219
220 /*
221 * Set the "dma handle"
222 */
223 *handle = page_to_phys(page);
224
225 do {
226 SetPageReserved(page);
227 map_page(vaddr, page_to_phys(page),
228 pgprot_noncached(PAGE_KERNEL));
229 page++;
230 vaddr += PAGE_SIZE;
231 } while (size -= PAGE_SIZE);
232
233 /*
234 * Free the otherwise unused pages.
235 */
236 while (page < end) {
237 __free_page(page);
238 page++;
239 }
240
241 return (void *)c->vm_start;
242 }
243
244 if (page)
245 __free_pages(page, order);
246 no_page:
247 return NULL;
248}
249EXPORT_SYMBOL(__dma_alloc_coherent);
250
251/*
252 * free a page as defined by the above mapping.
253 */
254void __dma_free_coherent(size_t size, void *vaddr)
255{
256 struct ppc_vm_region *c;
257 unsigned long flags, addr;
258
259 size = PAGE_ALIGN(size);
260
261 spin_lock_irqsave(&consistent_lock, flags);
262
263 c = ppc_vm_region_find(&consistent_head, (unsigned long)vaddr);
264 if (!c)
265 goto no_area;
266
267 if ((c->vm_end - c->vm_start) != size) {
268 printk(KERN_ERR "%s: freeing wrong coherent size (%ld != %d)\n",
269 __func__, c->vm_end - c->vm_start, size);
270 dump_stack();
271 size = c->vm_end - c->vm_start;
272 }
273
274 addr = c->vm_start;
275 do {
276 pte_t *ptep;
277 unsigned long pfn;
278
279 ptep = pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(addr),
280 addr),
281 addr),
282 addr);
283 if (!pte_none(*ptep) && pte_present(*ptep)) {
284 pfn = pte_pfn(*ptep);
285 pte_clear(&init_mm, addr, ptep);
286 if (pfn_valid(pfn)) {
287 struct page *page = pfn_to_page(pfn);
288
289 ClearPageReserved(page);
290 __free_page(page);
291 }
292 }
293 addr += PAGE_SIZE;
294 } while (size -= PAGE_SIZE);
295
296 flush_tlb_kernel_range(c->vm_start, c->vm_end);
297
298 list_del(&c->vm_list);
299
300 spin_unlock_irqrestore(&consistent_lock, flags);
301
302 kfree(c);
303 return;
304
305 no_area:
306 spin_unlock_irqrestore(&consistent_lock, flags);
307 printk(KERN_ERR "%s: trying to free invalid coherent area: %p\n",
308 __func__, vaddr);
309 dump_stack();
310}
311EXPORT_SYMBOL(__dma_free_coherent);
312
313/*
314 * make an area consistent.
315 */
316void __dma_sync(void *vaddr, size_t size, int direction)
317{
318 unsigned long start = (unsigned long)vaddr;
319 unsigned long end = start + size;
320
321 switch (direction) {
322 case DMA_NONE:
323 BUG();
324 case DMA_FROM_DEVICE:
325 /*
326 * invalidate only when cache-line aligned otherwise there is
327 * the potential for discarding uncommitted data from the cache
328 */
329 if ((start & (L1_CACHE_BYTES - 1)) || (size & (L1_CACHE_BYTES - 1)))
330 flush_dcache_range(start, end);
331 else
332 invalidate_dcache_range(start, end);
333 break;
334 case DMA_TO_DEVICE: /* writeback only */
335 clean_dcache_range(start, end);
336 break;
337 case DMA_BIDIRECTIONAL: /* writeback and invalidate */
338 flush_dcache_range(start, end);
339 break;
340 }
341}
342EXPORT_SYMBOL(__dma_sync);
343
344#ifdef CONFIG_HIGHMEM
345/*
346 * __dma_sync_page() implementation for systems using highmem.
347 * In this case, each page of a buffer must be kmapped/kunmapped
348 * in order to have a virtual address for __dma_sync(). This must
349 * not sleep so kmap_atomic()/kunmap_atomic() are used.
350 *
351 * Note: yes, it is possible and correct to have a buffer extend
352 * beyond the first page.
353 */
354static inline void __dma_sync_page_highmem(struct page *page,
355 unsigned long offset, size_t size, int direction)
356{
357 size_t seg_size = min((size_t)(PAGE_SIZE - offset), size);
358 size_t cur_size = seg_size;
359 unsigned long flags, start, seg_offset = offset;
360 int nr_segs = 1 + ((size - seg_size) + PAGE_SIZE - 1)/PAGE_SIZE;
361 int seg_nr = 0;
362
363 local_irq_save(flags);
364
365 do {
366 start = (unsigned long)kmap_atomic(page + seg_nr,
367 KM_PPC_SYNC_PAGE) + seg_offset;
368
369 /* Sync this buffer segment */
370 __dma_sync((void *)start, seg_size, direction);
371 kunmap_atomic((void *)start, KM_PPC_SYNC_PAGE);
372 seg_nr++;
373
374 /* Calculate next buffer segment size */
375 seg_size = min((size_t)PAGE_SIZE, size - cur_size);
376
377 /* Add the segment size to our running total */
378 cur_size += seg_size;
379 seg_offset = 0;
380 } while (seg_nr < nr_segs);
381
382 local_irq_restore(flags);
383}
384#endif /* CONFIG_HIGHMEM */
385
386/*
387 * __dma_sync_page makes memory consistent. identical to __dma_sync, but
388 * takes a struct page instead of a virtual address
389 */
390void __dma_sync_page(struct page *page, unsigned long offset,
391 size_t size, int direction)
392{
393#ifdef CONFIG_HIGHMEM
394 __dma_sync_page_highmem(page, offset, size, direction);
395#else
396 unsigned long start = (unsigned long)page_address(page) + offset;
397 __dma_sync((void *)start, size, direction);
398#endif
399}
400EXPORT_SYMBOL(__dma_sync_page);
diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c
index 666a5e8a5be1..3de6a0d93824 100644
--- a/arch/powerpc/mm/init_32.c
+++ b/arch/powerpc/mm/init_32.c
@@ -168,12 +168,8 @@ void __init MMU_init(void)
168 ppc_md.progress("MMU:mapin", 0x301); 168 ppc_md.progress("MMU:mapin", 0x301);
169 mapin_ram(); 169 mapin_ram();
170 170
171#ifdef CONFIG_HIGHMEM 171 /* Initialize early top-down ioremap allocator */
172 ioremap_base = PKMAP_BASE; 172 ioremap_bot = IOREMAP_TOP;
173#else
174 ioremap_base = 0xfe000000UL; /* for now, could be 0xfffff000 */
175#endif /* CONFIG_HIGHMEM */
176 ioremap_bot = ioremap_base;
177 173
178 /* Map in I/O resources */ 174 /* Map in I/O resources */
179 if (ppc_md.progress) 175 if (ppc_md.progress)
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index d0602a76bf7f..579382c163a9 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -380,6 +380,23 @@ void __init mem_init(void)
380 bsssize >> 10, 380 bsssize >> 10,
381 initsize >> 10); 381 initsize >> 10);
382 382
383#ifdef CONFIG_PPC32
384 pr_info("Kernel virtual memory layout:\n");
385 pr_info(" * 0x%08lx..0x%08lx : fixmap\n", FIXADDR_START, FIXADDR_TOP);
386#ifdef CONFIG_HIGHMEM
387 pr_info(" * 0x%08lx..0x%08lx : highmem PTEs\n",
388 PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP));
389#endif /* CONFIG_HIGHMEM */
390#ifdef CONFIG_NOT_COHERENT_CACHE
391 pr_info(" * 0x%08lx..0x%08lx : consistent mem\n",
392 IOREMAP_TOP, IOREMAP_TOP + CONFIG_CONSISTENT_SIZE);
393#endif /* CONFIG_NOT_COHERENT_CACHE */
394 pr_info(" * 0x%08lx..0x%08lx : early ioremap\n",
395 ioremap_bot, IOREMAP_TOP);
396 pr_info(" * 0x%08lx..0x%08lx : vmalloc & ioremap\n",
397 VMALLOC_START, VMALLOC_END);
398#endif /* CONFIG_PPC32 */
399
383 mem_init_done = 1; 400 mem_init_done = 1;
384} 401}
385 402
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index 430d0908fa50..5422169626ba 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -399,8 +399,6 @@ void kernel_map_pages(struct page *page, int numpages, int enable)
399#endif /* CONFIG_DEBUG_PAGEALLOC */ 399#endif /* CONFIG_DEBUG_PAGEALLOC */
400 400
401static int fixmaps; 401static int fixmaps;
402unsigned long FIXADDR_TOP = (-PAGE_SIZE);
403EXPORT_SYMBOL(FIXADDR_TOP);
404 402
405void __set_fixmap (enum fixed_addresses idx, phys_addr_t phys, pgprot_t flags) 403void __set_fixmap (enum fixed_addresses idx, phys_addr_t phys, pgprot_t flags)
406{ 404{