diff options
Diffstat (limited to 'arch/powerpc/mm/pgtable_64.c')
-rw-r--r-- | arch/powerpc/mm/pgtable_64.c | 206 |
1 files changed, 49 insertions, 157 deletions
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c index ad6e135bf212..3dfd10db931a 100644 --- a/arch/powerpc/mm/pgtable_64.c +++ b/arch/powerpc/mm/pgtable_64.c | |||
@@ -7,7 +7,6 @@ | |||
7 | * Modifications by Paul Mackerras (PowerMac) (paulus@samba.org) | 7 | * Modifications by Paul Mackerras (PowerMac) (paulus@samba.org) |
8 | * and Cort Dougan (PReP) (cort@cs.nmt.edu) | 8 | * and Cort Dougan (PReP) (cort@cs.nmt.edu) |
9 | * Copyright (C) 1996 Paul Mackerras | 9 | * Copyright (C) 1996 Paul Mackerras |
10 | * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk). | ||
11 | * | 10 | * |
12 | * Derived from "arch/i386/mm/init.c" | 11 | * Derived from "arch/i386/mm/init.c" |
13 | * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds | 12 | * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds |
@@ -34,41 +33,27 @@ | |||
34 | #include <linux/stddef.h> | 33 | #include <linux/stddef.h> |
35 | #include <linux/vmalloc.h> | 34 | #include <linux/vmalloc.h> |
36 | #include <linux/init.h> | 35 | #include <linux/init.h> |
37 | #include <linux/delay.h> | ||
38 | #include <linux/bootmem.h> | ||
39 | #include <linux/highmem.h> | ||
40 | #include <linux/idr.h> | ||
41 | #include <linux/nodemask.h> | ||
42 | #include <linux/module.h> | ||
43 | 36 | ||
44 | #include <asm/pgalloc.h> | 37 | #include <asm/pgalloc.h> |
45 | #include <asm/page.h> | 38 | #include <asm/page.h> |
46 | #include <asm/prom.h> | 39 | #include <asm/prom.h> |
47 | #include <asm/lmb.h> | ||
48 | #include <asm/rtas.h> | ||
49 | #include <asm/io.h> | 40 | #include <asm/io.h> |
50 | #include <asm/mmu_context.h> | 41 | #include <asm/mmu_context.h> |
51 | #include <asm/pgtable.h> | 42 | #include <asm/pgtable.h> |
52 | #include <asm/mmu.h> | 43 | #include <asm/mmu.h> |
53 | #include <asm/uaccess.h> | ||
54 | #include <asm/smp.h> | 44 | #include <asm/smp.h> |
55 | #include <asm/machdep.h> | 45 | #include <asm/machdep.h> |
56 | #include <asm/tlb.h> | 46 | #include <asm/tlb.h> |
57 | #include <asm/eeh.h> | ||
58 | #include <asm/processor.h> | 47 | #include <asm/processor.h> |
59 | #include <asm/mmzone.h> | ||
60 | #include <asm/cputable.h> | 48 | #include <asm/cputable.h> |
61 | #include <asm/sections.h> | 49 | #include <asm/sections.h> |
62 | #include <asm/system.h> | 50 | #include <asm/system.h> |
63 | #include <asm/iommu.h> | ||
64 | #include <asm/abs_addr.h> | 51 | #include <asm/abs_addr.h> |
65 | #include <asm/vdso.h> | ||
66 | #include <asm/firmware.h> | 52 | #include <asm/firmware.h> |
67 | 53 | ||
68 | #include "mmu_decl.h" | 54 | #include "mmu_decl.h" |
69 | 55 | ||
70 | unsigned long ioremap_bot = IMALLOC_BASE; | 56 | unsigned long ioremap_bot = IOREMAP_BASE; |
71 | static unsigned long phbs_io_bot = PHBS_IO_BASE; | ||
72 | 57 | ||
73 | /* | 58 | /* |
74 | * map_io_page currently only called by __ioremap | 59 | * map_io_page currently only called by __ioremap |
@@ -102,8 +87,8 @@ static int map_io_page(unsigned long ea, unsigned long pa, int flags) | |||
102 | * entry in the hardware page table. | 87 | * entry in the hardware page table. |
103 | * | 88 | * |
104 | */ | 89 | */ |
105 | if (htab_bolt_mapping(ea, ea + PAGE_SIZE, pa, flags, | 90 | if (htab_bolt_mapping(ea, (unsigned long)ea + PAGE_SIZE, |
106 | mmu_io_psize)) { | 91 | pa, flags, mmu_io_psize)) { |
107 | printk(KERN_ERR "Failed to do bolted mapping IO " | 92 | printk(KERN_ERR "Failed to do bolted mapping IO " |
108 | "memory at %016lx !\n", pa); | 93 | "memory at %016lx !\n", pa); |
109 | return -ENOMEM; | 94 | return -ENOMEM; |
@@ -113,8 +98,11 @@ static int map_io_page(unsigned long ea, unsigned long pa, int flags) | |||
113 | } | 98 | } |
114 | 99 | ||
115 | 100 | ||
116 | static void __iomem * __ioremap_com(phys_addr_t addr, unsigned long pa, | 101 | /** |
117 | unsigned long ea, unsigned long size, | 102 | * __ioremap_at - Low level function to establish the page tables |
103 | * for an IO mapping | ||
104 | */ | ||
105 | void __iomem * __ioremap_at(phys_addr_t pa, void *ea, unsigned long size, | ||
118 | unsigned long flags) | 106 | unsigned long flags) |
119 | { | 107 | { |
120 | unsigned long i; | 108 | unsigned long i; |
@@ -122,17 +110,35 @@ static void __iomem * __ioremap_com(phys_addr_t addr, unsigned long pa, | |||
122 | if ((flags & _PAGE_PRESENT) == 0) | 110 | if ((flags & _PAGE_PRESENT) == 0) |
123 | flags |= pgprot_val(PAGE_KERNEL); | 111 | flags |= pgprot_val(PAGE_KERNEL); |
124 | 112 | ||
113 | WARN_ON(pa & ~PAGE_MASK); | ||
114 | WARN_ON(((unsigned long)ea) & ~PAGE_MASK); | ||
115 | WARN_ON(size & ~PAGE_MASK); | ||
116 | |||
125 | for (i = 0; i < size; i += PAGE_SIZE) | 117 | for (i = 0; i < size; i += PAGE_SIZE) |
126 | if (map_io_page(ea+i, pa+i, flags)) | 118 | if (map_io_page((unsigned long)ea+i, pa+i, flags)) |
127 | return NULL; | 119 | return NULL; |
128 | 120 | ||
129 | return (void __iomem *) (ea + (addr & ~PAGE_MASK)); | 121 | return (void __iomem *)ea; |
122 | } | ||
123 | |||
124 | /** | ||
125 | * __iounmap_from - Low level function to tear down the page tables | ||
126 | * for an IO mapping. This is used for mappings that | ||
127 | * are manipulated manually, like partial unmapping of | ||
128 | * PCI IOs or ISA space. | ||
129 | */ | ||
130 | void __iounmap_at(void *ea, unsigned long size) | ||
131 | { | ||
132 | WARN_ON(((unsigned long)ea) & ~PAGE_MASK); | ||
133 | WARN_ON(size & ~PAGE_MASK); | ||
134 | |||
135 | unmap_kernel_range((unsigned long)ea, size); | ||
130 | } | 136 | } |
131 | 137 | ||
132 | void __iomem * __ioremap(phys_addr_t addr, unsigned long size, | 138 | void __iomem * __ioremap(phys_addr_t addr, unsigned long size, |
133 | unsigned long flags) | 139 | unsigned long flags) |
134 | { | 140 | { |
135 | unsigned long pa, ea; | 141 | phys_addr_t paligned; |
136 | void __iomem *ret; | 142 | void __iomem *ret; |
137 | 143 | ||
138 | /* | 144 | /* |
@@ -144,27 +150,30 @@ void __iomem * __ioremap(phys_addr_t addr, unsigned long size, | |||
144 | * IMALLOC_END | 150 | * IMALLOC_END |
145 | * | 151 | * |
146 | */ | 152 | */ |
147 | pa = addr & PAGE_MASK; | 153 | paligned = addr & PAGE_MASK; |
148 | size = PAGE_ALIGN(addr + size) - pa; | 154 | size = PAGE_ALIGN(addr + size) - paligned; |
149 | 155 | ||
150 | if ((size == 0) || (pa == 0)) | 156 | if ((size == 0) || (paligned == 0)) |
151 | return NULL; | 157 | return NULL; |
152 | 158 | ||
153 | if (mem_init_done) { | 159 | if (mem_init_done) { |
154 | struct vm_struct *area; | 160 | struct vm_struct *area; |
155 | area = im_get_free_area(size); | 161 | |
162 | area = __get_vm_area(size, VM_IOREMAP, | ||
163 | ioremap_bot, IOREMAP_END); | ||
156 | if (area == NULL) | 164 | if (area == NULL) |
157 | return NULL; | 165 | return NULL; |
158 | ea = (unsigned long)(area->addr); | 166 | ret = __ioremap_at(paligned, area->addr, size, flags); |
159 | ret = __ioremap_com(addr, pa, ea, size, flags); | ||
160 | if (!ret) | 167 | if (!ret) |
161 | im_free(area->addr); | 168 | vunmap(area->addr); |
162 | } else { | 169 | } else { |
163 | ea = ioremap_bot; | 170 | ret = __ioremap_at(paligned, (void *)ioremap_bot, size, flags); |
164 | ret = __ioremap_com(addr, pa, ea, size, flags); | ||
165 | if (ret) | 171 | if (ret) |
166 | ioremap_bot += size; | 172 | ioremap_bot += size; |
167 | } | 173 | } |
174 | |||
175 | if (ret) | ||
176 | ret += addr & ~PAGE_MASK; | ||
168 | return ret; | 177 | return ret; |
169 | } | 178 | } |
170 | 179 | ||
@@ -187,62 +196,9 @@ void __iomem * ioremap_flags(phys_addr_t addr, unsigned long size, | |||
187 | } | 196 | } |
188 | 197 | ||
189 | 198 | ||
190 | #define IS_PAGE_ALIGNED(_val) ((_val) == ((_val) & PAGE_MASK)) | ||
191 | |||
192 | int __ioremap_explicit(phys_addr_t pa, unsigned long ea, | ||
193 | unsigned long size, unsigned long flags) | ||
194 | { | ||
195 | struct vm_struct *area; | ||
196 | void __iomem *ret; | ||
197 | |||
198 | /* For now, require page-aligned values for pa, ea, and size */ | ||
199 | if (!IS_PAGE_ALIGNED(pa) || !IS_PAGE_ALIGNED(ea) || | ||
200 | !IS_PAGE_ALIGNED(size)) { | ||
201 | printk(KERN_ERR "unaligned value in %s\n", __FUNCTION__); | ||
202 | return 1; | ||
203 | } | ||
204 | |||
205 | if (!mem_init_done) { | ||
206 | /* Two things to consider in this case: | ||
207 | * 1) No records will be kept (imalloc, etc) that the region | ||
208 | * has been remapped | ||
209 | * 2) It won't be easy to iounmap() the region later (because | ||
210 | * of 1) | ||
211 | */ | ||
212 | ; | ||
213 | } else { | ||
214 | area = im_get_area(ea, size, | ||
215 | IM_REGION_UNUSED|IM_REGION_SUBSET|IM_REGION_EXISTS); | ||
216 | if (area == NULL) { | ||
217 | /* Expected when PHB-dlpar is in play */ | ||
218 | return 1; | ||
219 | } | ||
220 | if (ea != (unsigned long) area->addr) { | ||
221 | printk(KERN_ERR "unexpected addr return from " | ||
222 | "im_get_area\n"); | ||
223 | return 1; | ||
224 | } | ||
225 | } | ||
226 | |||
227 | ret = __ioremap_com(pa, pa, ea, size, flags); | ||
228 | if (ret == NULL) { | ||
229 | printk(KERN_ERR "ioremap_explicit() allocation failure !\n"); | ||
230 | return 1; | ||
231 | } | ||
232 | if (ret != (void *) ea) { | ||
233 | printk(KERN_ERR "__ioremap_com() returned unexpected addr\n"); | ||
234 | return 1; | ||
235 | } | ||
236 | |||
237 | return 0; | ||
238 | } | ||
239 | |||
240 | /* | 199 | /* |
241 | * Unmap an IO region and remove it from imalloc'd list. | 200 | * Unmap an IO region and remove it from imalloc'd list. |
242 | * Access to IO memory should be serialized by driver. | 201 | * Access to IO memory should be serialized by driver. |
243 | * This code is modeled after vmalloc code - unmap_vm_area() | ||
244 | * | ||
245 | * XXX what about calls before mem_init_done (ie python_countermeasures()) | ||
246 | */ | 202 | */ |
247 | void __iounmap(volatile void __iomem *token) | 203 | void __iounmap(volatile void __iomem *token) |
248 | { | 204 | { |
@@ -251,9 +207,14 @@ void __iounmap(volatile void __iomem *token) | |||
251 | if (!mem_init_done) | 207 | if (!mem_init_done) |
252 | return; | 208 | return; |
253 | 209 | ||
254 | addr = (void *) ((unsigned long __force) token & PAGE_MASK); | 210 | addr = (void *) ((unsigned long __force) |
255 | 211 | PCI_FIX_ADDR(token) & PAGE_MASK); | |
256 | im_free(addr); | 212 | if ((unsigned long)addr < ioremap_bot) { |
213 | printk(KERN_WARNING "Attempt to iounmap early bolted mapping" | ||
214 | " at 0x%p\n", addr); | ||
215 | return; | ||
216 | } | ||
217 | vunmap(addr); | ||
257 | } | 218 | } |
258 | 219 | ||
259 | void iounmap(volatile void __iomem *token) | 220 | void iounmap(volatile void __iomem *token) |
@@ -264,77 +225,8 @@ void iounmap(volatile void __iomem *token) | |||
264 | __iounmap(token); | 225 | __iounmap(token); |
265 | } | 226 | } |
266 | 227 | ||
267 | static int iounmap_subset_regions(unsigned long addr, unsigned long size) | ||
268 | { | ||
269 | struct vm_struct *area; | ||
270 | |||
271 | /* Check whether subsets of this region exist */ | ||
272 | area = im_get_area(addr, size, IM_REGION_SUPERSET); | ||
273 | if (area == NULL) | ||
274 | return 1; | ||
275 | |||
276 | while (area) { | ||
277 | iounmap((void __iomem *) area->addr); | ||
278 | area = im_get_area(addr, size, | ||
279 | IM_REGION_SUPERSET); | ||
280 | } | ||
281 | |||
282 | return 0; | ||
283 | } | ||
284 | |||
285 | int __iounmap_explicit(volatile void __iomem *start, unsigned long size) | ||
286 | { | ||
287 | struct vm_struct *area; | ||
288 | unsigned long addr; | ||
289 | int rc; | ||
290 | |||
291 | addr = (unsigned long __force) start & PAGE_MASK; | ||
292 | |||
293 | /* Verify that the region either exists or is a subset of an existing | ||
294 | * region. In the latter case, split the parent region to create | ||
295 | * the exact region | ||
296 | */ | ||
297 | area = im_get_area(addr, size, | ||
298 | IM_REGION_EXISTS | IM_REGION_SUBSET); | ||
299 | if (area == NULL) { | ||
300 | /* Determine whether subset regions exist. If so, unmap */ | ||
301 | rc = iounmap_subset_regions(addr, size); | ||
302 | if (rc) { | ||
303 | printk(KERN_ERR | ||
304 | "%s() cannot unmap nonexistent range 0x%lx\n", | ||
305 | __FUNCTION__, addr); | ||
306 | return 1; | ||
307 | } | ||
308 | } else { | ||
309 | iounmap((void __iomem *) area->addr); | ||
310 | } | ||
311 | /* | ||
312 | * FIXME! This can't be right: | ||
313 | iounmap(area->addr); | ||
314 | * Maybe it should be "iounmap(area);" | ||
315 | */ | ||
316 | return 0; | ||
317 | } | ||
318 | |||
319 | EXPORT_SYMBOL(ioremap); | 228 | EXPORT_SYMBOL(ioremap); |
320 | EXPORT_SYMBOL(ioremap_flags); | 229 | EXPORT_SYMBOL(ioremap_flags); |
321 | EXPORT_SYMBOL(__ioremap); | 230 | EXPORT_SYMBOL(__ioremap); |
322 | EXPORT_SYMBOL(iounmap); | 231 | EXPORT_SYMBOL(iounmap); |
323 | EXPORT_SYMBOL(__iounmap); | 232 | EXPORT_SYMBOL(__iounmap); |
324 | |||
325 | static DEFINE_SPINLOCK(phb_io_lock); | ||
326 | |||
327 | void __iomem * reserve_phb_iospace(unsigned long size) | ||
328 | { | ||
329 | void __iomem *virt_addr; | ||
330 | |||
331 | if (phbs_io_bot >= IMALLOC_BASE) | ||
332 | panic("reserve_phb_iospace(): phb io space overflow\n"); | ||
333 | |||
334 | spin_lock(&phb_io_lock); | ||
335 | virt_addr = (void __iomem *) phbs_io_bot; | ||
336 | phbs_io_bot += size; | ||
337 | spin_unlock(&phb_io_lock); | ||
338 | |||
339 | return virt_addr; | ||
340 | } | ||