aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2007-05-06 16:20:10 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-05-06 16:20:10 -0400
commitc6799ade4ae04b53a5f677e5289116155ff01574 (patch)
tree3601b5e2387e39d62c207e4268c6cc5c68f2a364 /arch/arm/mm
parentb7405e16435f710edfae6ba32bef4ca20d3de145 (diff)
parent5cd47155155a32e5b944ac9fc3f3dc578e429aa0 (diff)
Merge branch 'for-linus' of master.kernel.org:/home/rmk/linux-2.6-arm
* 'for-linus' of master.kernel.org:/home/rmk/linux-2.6-arm: (82 commits) [ARM] Add comments marking in-use ptrace numbers [ARM] Move syscall saving out of the way of utrace [ARM] 4360/1: S3C24XX: regs-udc.h remove unused macro [ARM] 4358/1: S3C24XX: mach-qt2410.c: remove linux/mmc/protocol.h header [ARM] mm 10: allow memory type to be specified with ioremap [ARM] mm 9: add additional device memory types [ARM] mm 8: define mem_types table L1 bit 4 to be for ARMv6 [ARM] iop: add missing parens in macro [ARM] mm 7: remove duplicated __ioremap() prototypes ARM: OMAP: fix OMAP1 mpuio suspend/resume oops ARM: OMAP: MPUIO wake updates ARM: OMAP: speed up gpio irq handling ARM: OMAP: plat-omap changes for 2430 SDP ARM: OMAP: gpio object shrinkage, cleanup ARM: OMAP: /sys/kernel/debug/omap_gpio ARM: OMAP: Implement workaround for GPIO wakeup bug in OMAP2420 silicon ARM: OMAP: Enable 24xx GPIO autoidling [ARM] 4318/2: DSM-G600 Board Support [ARM] 4227/1: minor head.S fixups [ARM] 4328/1: Move i.MX UART regs to driver ...
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/alignment.c1
-rw-r--r--arch/arm/mm/fault.c5
-rw-r--r--arch/arm/mm/init.c1
-rw-r--r--arch/arm/mm/ioremap.c80
-rw-r--r--arch/arm/mm/mm.h10
-rw-r--r--arch/arm/mm/mmu.c349
-rw-r--r--arch/arm/mm/nommu.c12
-rw-r--r--arch/arm/mm/proc-xscale.S28
8 files changed, 251 insertions, 235 deletions
diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
index aa109f074dd9..19ca333240ec 100644
--- a/arch/arm/mm/alignment.c
+++ b/arch/arm/mm/alignment.c
@@ -15,7 +15,6 @@
15#include <linux/kernel.h> 15#include <linux/kernel.h>
16#include <linux/errno.h> 16#include <linux/errno.h>
17#include <linux/string.h> 17#include <linux/string.h>
18#include <linux/ptrace.h>
19#include <linux/proc_fs.h> 18#include <linux/proc_fs.h>
20#include <linux/init.h> 19#include <linux/init.h>
21 20
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index 9fd6d2eafb40..5d9ce7deb4a7 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -10,7 +10,6 @@
10 */ 10 */
11#include <linux/module.h> 11#include <linux/module.h>
12#include <linux/signal.h> 12#include <linux/signal.h>
13#include <linux/ptrace.h>
14#include <linux/mm.h> 13#include <linux/mm.h>
15#include <linux/init.h> 14#include <linux/init.h>
16 15
@@ -438,7 +437,7 @@ hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *)
438/* 437/*
439 * Dispatch a data abort to the relevant handler. 438 * Dispatch a data abort to the relevant handler.
440 */ 439 */
441asmlinkage void 440asmlinkage void __exception
442do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs) 441do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
443{ 442{
444 const struct fsr_info *inf = fsr_info + (fsr & 15) + ((fsr & (1 << 10)) >> 6); 443 const struct fsr_info *inf = fsr_info + (fsr & 15) + ((fsr & (1 << 10)) >> 6);
@@ -457,7 +456,7 @@ do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
457 notify_die("", regs, &info, fsr, 0); 456 notify_die("", regs, &info, fsr, 0);
458} 457}
459 458
460asmlinkage void 459asmlinkage void __exception
461do_PrefetchAbort(unsigned long addr, struct pt_regs *regs) 460do_PrefetchAbort(unsigned long addr, struct pt_regs *regs)
462{ 461{
463 do_translation_fault(addr, 0, regs); 462 do_translation_fault(addr, 0, regs);
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 7760193e74cc..c0ad7c0fbae0 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -9,7 +9,6 @@
9 */ 9 */
10#include <linux/kernel.h> 10#include <linux/kernel.h>
11#include <linux/errno.h> 11#include <linux/errno.h>
12#include <linux/ptrace.h>
13#include <linux/swap.h> 12#include <linux/swap.h>
14#include <linux/init.h> 13#include <linux/init.h>
15#include <linux/bootmem.h> 14#include <linux/bootmem.h>
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
index 0ac615c0f798..d6167ad4e011 100644
--- a/arch/arm/mm/ioremap.c
+++ b/arch/arm/mm/ioremap.c
@@ -32,6 +32,9 @@
32#include <asm/tlbflush.h> 32#include <asm/tlbflush.h>
33#include <asm/sizes.h> 33#include <asm/sizes.h>
34 34
35#include <asm/mach/map.h>
36#include "mm.h"
37
35/* 38/*
36 * Used by ioremap() and iounmap() code to mark (super)section-mapped 39 * Used by ioremap() and iounmap() code to mark (super)section-mapped
37 * I/O regions in vm_struct->flags field. 40 * I/O regions in vm_struct->flags field.
@@ -39,8 +42,9 @@
39#define VM_ARM_SECTION_MAPPING 0x80000000 42#define VM_ARM_SECTION_MAPPING 0x80000000
40 43
41static int remap_area_pte(pmd_t *pmd, unsigned long addr, unsigned long end, 44static int remap_area_pte(pmd_t *pmd, unsigned long addr, unsigned long end,
42 unsigned long phys_addr, pgprot_t prot) 45 unsigned long phys_addr, const struct mem_type *type)
43{ 46{
47 pgprot_t prot = __pgprot(type->prot_pte);
44 pte_t *pte; 48 pte_t *pte;
45 49
46 pte = pte_alloc_kernel(pmd, addr); 50 pte = pte_alloc_kernel(pmd, addr);
@@ -51,7 +55,8 @@ static int remap_area_pte(pmd_t *pmd, unsigned long addr, unsigned long end,
51 if (!pte_none(*pte)) 55 if (!pte_none(*pte))
52 goto bad; 56 goto bad;
53 57
54 set_pte_ext(pte, pfn_pte(phys_addr >> PAGE_SHIFT, prot), 0); 58 set_pte_ext(pte, pfn_pte(phys_addr >> PAGE_SHIFT, prot),
59 type->prot_pte_ext);
55 phys_addr += PAGE_SIZE; 60 phys_addr += PAGE_SIZE;
56 } while (pte++, addr += PAGE_SIZE, addr != end); 61 } while (pte++, addr += PAGE_SIZE, addr != end);
57 return 0; 62 return 0;
@@ -63,7 +68,7 @@ static int remap_area_pte(pmd_t *pmd, unsigned long addr, unsigned long end,
63 68
64static inline int remap_area_pmd(pgd_t *pgd, unsigned long addr, 69static inline int remap_area_pmd(pgd_t *pgd, unsigned long addr,
65 unsigned long end, unsigned long phys_addr, 70 unsigned long end, unsigned long phys_addr,
66 pgprot_t prot) 71 const struct mem_type *type)
67{ 72{
68 unsigned long next; 73 unsigned long next;
69 pmd_t *pmd; 74 pmd_t *pmd;
@@ -75,7 +80,7 @@ static inline int remap_area_pmd(pgd_t *pgd, unsigned long addr,
75 80
76 do { 81 do {
77 next = pmd_addr_end(addr, end); 82 next = pmd_addr_end(addr, end);
78 ret = remap_area_pte(pmd, addr, next, phys_addr, prot); 83 ret = remap_area_pte(pmd, addr, next, phys_addr, type);
79 if (ret) 84 if (ret)
80 return ret; 85 return ret;
81 phys_addr += next - addr; 86 phys_addr += next - addr;
@@ -84,13 +89,11 @@ static inline int remap_area_pmd(pgd_t *pgd, unsigned long addr,
84} 89}
85 90
86static int remap_area_pages(unsigned long start, unsigned long pfn, 91static int remap_area_pages(unsigned long start, unsigned long pfn,
87 unsigned long size, unsigned long flags) 92 size_t size, const struct mem_type *type)
88{ 93{
89 unsigned long addr = start; 94 unsigned long addr = start;
90 unsigned long next, end = start + size; 95 unsigned long next, end = start + size;
91 unsigned long phys_addr = __pfn_to_phys(pfn); 96 unsigned long phys_addr = __pfn_to_phys(pfn);
92 pgprot_t prot = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG |
93 L_PTE_DIRTY | L_PTE_WRITE | flags);
94 pgd_t *pgd; 97 pgd_t *pgd;
95 int err = 0; 98 int err = 0;
96 99
@@ -98,7 +101,7 @@ static int remap_area_pages(unsigned long start, unsigned long pfn,
98 pgd = pgd_offset_k(addr); 101 pgd = pgd_offset_k(addr);
99 do { 102 do {
100 next = pgd_addr_end(addr, end); 103 next = pgd_addr_end(addr, end);
101 err = remap_area_pmd(pgd, addr, next, phys_addr, prot); 104 err = remap_area_pmd(pgd, addr, next, phys_addr, type);
102 if (err) 105 if (err)
103 break; 106 break;
104 phys_addr += next - addr; 107 phys_addr += next - addr;
@@ -178,9 +181,9 @@ static void unmap_area_sections(unsigned long virt, unsigned long size)
178 181
179static int 182static int
180remap_area_sections(unsigned long virt, unsigned long pfn, 183remap_area_sections(unsigned long virt, unsigned long pfn,
181 unsigned long size, unsigned long flags) 184 size_t size, const struct mem_type *type)
182{ 185{
183 unsigned long prot, addr = virt, end = virt + size; 186 unsigned long addr = virt, end = virt + size;
184 pgd_t *pgd; 187 pgd_t *pgd;
185 188
186 /* 189 /*
@@ -189,23 +192,13 @@ remap_area_sections(unsigned long virt, unsigned long pfn,
189 */ 192 */
190 unmap_area_sections(virt, size); 193 unmap_area_sections(virt, size);
191 194
192 prot = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_DOMAIN(DOMAIN_IO) |
193 (flags & (L_PTE_CACHEABLE | L_PTE_BUFFERABLE));
194
195 /*
196 * ARMv6 and above need XN set to prevent speculative prefetches
197 * hitting IO.
198 */
199 if (cpu_architecture() >= CPU_ARCH_ARMv6)
200 prot |= PMD_SECT_XN;
201
202 pgd = pgd_offset_k(addr); 195 pgd = pgd_offset_k(addr);
203 do { 196 do {
204 pmd_t *pmd = pmd_offset(pgd, addr); 197 pmd_t *pmd = pmd_offset(pgd, addr);
205 198
206 pmd[0] = __pmd(__pfn_to_phys(pfn) | prot); 199 pmd[0] = __pmd(__pfn_to_phys(pfn) | type->prot_sect);
207 pfn += SZ_1M >> PAGE_SHIFT; 200 pfn += SZ_1M >> PAGE_SHIFT;
208 pmd[1] = __pmd(__pfn_to_phys(pfn) | prot); 201 pmd[1] = __pmd(__pfn_to_phys(pfn) | type->prot_sect);
209 pfn += SZ_1M >> PAGE_SHIFT; 202 pfn += SZ_1M >> PAGE_SHIFT;
210 flush_pmd_entry(pmd); 203 flush_pmd_entry(pmd);
211 204
@@ -218,9 +211,9 @@ remap_area_sections(unsigned long virt, unsigned long pfn,
218 211
219static int 212static int
220remap_area_supersections(unsigned long virt, unsigned long pfn, 213remap_area_supersections(unsigned long virt, unsigned long pfn,
221 unsigned long size, unsigned long flags) 214 size_t size, const struct mem_type *type)
222{ 215{
223 unsigned long prot, addr = virt, end = virt + size; 216 unsigned long addr = virt, end = virt + size;
224 pgd_t *pgd; 217 pgd_t *pgd;
225 218
226 /* 219 /*
@@ -229,22 +222,12 @@ remap_area_supersections(unsigned long virt, unsigned long pfn,
229 */ 222 */
230 unmap_area_sections(virt, size); 223 unmap_area_sections(virt, size);
231 224
232 prot = PMD_TYPE_SECT | PMD_SECT_SUPER | PMD_SECT_AP_WRITE |
233 PMD_DOMAIN(DOMAIN_IO) |
234 (flags & (L_PTE_CACHEABLE | L_PTE_BUFFERABLE));
235
236 /*
237 * ARMv6 and above need XN set to prevent speculative prefetches
238 * hitting IO.
239 */
240 if (cpu_architecture() >= CPU_ARCH_ARMv6)
241 prot |= PMD_SECT_XN;
242
243 pgd = pgd_offset_k(virt); 225 pgd = pgd_offset_k(virt);
244 do { 226 do {
245 unsigned long super_pmd_val, i; 227 unsigned long super_pmd_val, i;
246 228
247 super_pmd_val = __pfn_to_phys(pfn) | prot; 229 super_pmd_val = __pfn_to_phys(pfn) | type->prot_sect |
230 PMD_SECT_SUPER;
248 super_pmd_val |= ((pfn >> (32 - PAGE_SHIFT)) & 0xf) << 20; 231 super_pmd_val |= ((pfn >> (32 - PAGE_SHIFT)) & 0xf) << 20;
249 232
250 for (i = 0; i < 8; i++) { 233 for (i = 0; i < 8; i++) {
@@ -279,9 +262,10 @@ remap_area_supersections(unsigned long virt, unsigned long pfn,
279 * mapping. See include/asm-arm/proc-armv/pgtable.h for more information. 262 * mapping. See include/asm-arm/proc-armv/pgtable.h for more information.
280 */ 263 */
281void __iomem * 264void __iomem *
282__ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size, 265__arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
283 unsigned long flags) 266 unsigned int mtype)
284{ 267{
268 const struct mem_type *type;
285 int err; 269 int err;
286 unsigned long addr; 270 unsigned long addr;
287 struct vm_struct * area; 271 struct vm_struct * area;
@@ -292,6 +276,10 @@ __ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
292 if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK)) 276 if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK))
293 return NULL; 277 return NULL;
294 278
279 type = get_mem_type(mtype);
280 if (!type)
281 return NULL;
282
295 size = PAGE_ALIGN(size); 283 size = PAGE_ALIGN(size);
296 284
297 area = get_vm_area(size, VM_IOREMAP); 285 area = get_vm_area(size, VM_IOREMAP);
@@ -302,16 +290,16 @@ __ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
302#ifndef CONFIG_SMP 290#ifndef CONFIG_SMP
303 if (DOMAIN_IO == 0 && 291 if (DOMAIN_IO == 0 &&
304 (((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) || 292 (((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) ||
305 cpu_is_xsc3()) && 293 cpu_is_xsc3()) && pfn >= 0x100000 &&
306 !((__pfn_to_phys(pfn) | size | addr) & ~SUPERSECTION_MASK)) { 294 !((__pfn_to_phys(pfn) | size | addr) & ~SUPERSECTION_MASK)) {
307 area->flags |= VM_ARM_SECTION_MAPPING; 295 area->flags |= VM_ARM_SECTION_MAPPING;
308 err = remap_area_supersections(addr, pfn, size, flags); 296 err = remap_area_supersections(addr, pfn, size, type);
309 } else if (!((__pfn_to_phys(pfn) | size | addr) & ~PMD_MASK)) { 297 } else if (!((__pfn_to_phys(pfn) | size | addr) & ~PMD_MASK)) {
310 area->flags |= VM_ARM_SECTION_MAPPING; 298 area->flags |= VM_ARM_SECTION_MAPPING;
311 err = remap_area_sections(addr, pfn, size, flags); 299 err = remap_area_sections(addr, pfn, size, type);
312 } else 300 } else
313#endif 301#endif
314 err = remap_area_pages(addr, pfn, size, flags); 302 err = remap_area_pages(addr, pfn, size, type);
315 303
316 if (err) { 304 if (err) {
317 vunmap((void *)addr); 305 vunmap((void *)addr);
@@ -321,10 +309,10 @@ __ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
321 flush_cache_vmap(addr, addr + size); 309 flush_cache_vmap(addr, addr + size);
322 return (void __iomem *) (offset + addr); 310 return (void __iomem *) (offset + addr);
323} 311}
324EXPORT_SYMBOL(__ioremap_pfn); 312EXPORT_SYMBOL(__arm_ioremap_pfn);
325 313
326void __iomem * 314void __iomem *
327__ioremap(unsigned long phys_addr, size_t size, unsigned long flags) 315__arm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype)
328{ 316{
329 unsigned long last_addr; 317 unsigned long last_addr;
330 unsigned long offset = phys_addr & ~PAGE_MASK; 318 unsigned long offset = phys_addr & ~PAGE_MASK;
@@ -342,9 +330,9 @@ __ioremap(unsigned long phys_addr, size_t size, unsigned long flags)
342 */ 330 */
343 size = PAGE_ALIGN(last_addr + 1) - phys_addr; 331 size = PAGE_ALIGN(last_addr + 1) - phys_addr;
344 332
345 return __ioremap_pfn(pfn, offset, size, flags); 333 return __arm_ioremap_pfn(pfn, offset, size, mtype);
346} 334}
347EXPORT_SYMBOL(__ioremap); 335EXPORT_SYMBOL(__arm_ioremap);
348 336
349void __iounmap(volatile void __iomem *addr) 337void __iounmap(volatile void __iomem *addr)
350{ 338{
diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h
index a44e30970635..7647c597fc59 100644
--- a/arch/arm/mm/mm.h
+++ b/arch/arm/mm/mm.h
@@ -16,6 +16,16 @@ static inline pmd_t *pmd_off_k(unsigned long virt)
16 return pmd_off(pgd_offset_k(virt), virt); 16 return pmd_off(pgd_offset_k(virt), virt);
17} 17}
18 18
19struct mem_type {
20 unsigned int prot_pte;
21 unsigned int prot_pte_ext;
22 unsigned int prot_l1;
23 unsigned int prot_sect;
24 unsigned int domain;
25};
26
27const struct mem_type *get_mem_type(unsigned int type);
28
19#endif 29#endif
20 30
21struct map_desc; 31struct map_desc;
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 94fd4bf5cb9e..2ba1530d1ce1 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -176,28 +176,42 @@ void adjust_cr(unsigned long mask, unsigned long set)
176} 176}
177#endif 177#endif
178 178
179struct mem_types { 179#define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_WRITE
180 unsigned int prot_pte; 180#define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_XN|PMD_SECT_AP_WRITE
181 unsigned int prot_l1; 181
182 unsigned int prot_sect; 182static struct mem_type mem_types[] = {
183 unsigned int domain; 183 [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */
184}; 184 .prot_pte = PROT_PTE_DEVICE,
185 185 .prot_l1 = PMD_TYPE_TABLE,
186static struct mem_types mem_types[] __initdata = { 186 .prot_sect = PROT_SECT_DEVICE | PMD_SECT_UNCACHED,
187 [MT_DEVICE] = { 187 .domain = DOMAIN_IO,
188 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | 188 },
189 L_PTE_WRITE, 189 [MT_DEVICE_NONSHARED] = { /* ARMv6 non-shared device */
190 .prot_l1 = PMD_TYPE_TABLE, 190 .prot_pte = PROT_PTE_DEVICE,
191 .prot_sect = PMD_TYPE_SECT | PMD_BIT4 | PMD_SECT_UNCACHED | 191 .prot_pte_ext = PTE_EXT_TEX(2),
192 PMD_SECT_AP_WRITE, 192 .prot_l1 = PMD_TYPE_TABLE,
193 .domain = DOMAIN_IO, 193 .prot_sect = PROT_SECT_DEVICE | PMD_SECT_TEX(2),
194 .domain = DOMAIN_IO,
195 },
196 [MT_DEVICE_CACHED] = { /* ioremap_cached */
197 .prot_pte = PROT_PTE_DEVICE | L_PTE_CACHEABLE | L_PTE_BUFFERABLE,
198 .prot_l1 = PMD_TYPE_TABLE,
199 .prot_sect = PROT_SECT_DEVICE | PMD_SECT_WB,
200 .domain = DOMAIN_IO,
201 },
202 [MT_DEVICE_IXP2000] = { /* IXP2400 requires XCB=101 for on-chip I/O */
203 .prot_pte = PROT_PTE_DEVICE,
204 .prot_l1 = PMD_TYPE_TABLE,
205 .prot_sect = PROT_SECT_DEVICE | PMD_SECT_BUFFERABLE |
206 PMD_SECT_TEX(1),
207 .domain = DOMAIN_IO,
194 }, 208 },
195 [MT_CACHECLEAN] = { 209 [MT_CACHECLEAN] = {
196 .prot_sect = PMD_TYPE_SECT | PMD_BIT4, 210 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
197 .domain = DOMAIN_KERNEL, 211 .domain = DOMAIN_KERNEL,
198 }, 212 },
199 [MT_MINICLEAN] = { 213 [MT_MINICLEAN] = {
200 .prot_sect = PMD_TYPE_SECT | PMD_BIT4 | PMD_SECT_MINICACHE, 214 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE,
201 .domain = DOMAIN_KERNEL, 215 .domain = DOMAIN_KERNEL,
202 }, 216 },
203 [MT_LOW_VECTORS] = { 217 [MT_LOW_VECTORS] = {
@@ -213,30 +227,20 @@ static struct mem_types mem_types[] __initdata = {
213 .domain = DOMAIN_USER, 227 .domain = DOMAIN_USER,
214 }, 228 },
215 [MT_MEMORY] = { 229 [MT_MEMORY] = {
216 .prot_sect = PMD_TYPE_SECT | PMD_BIT4 | PMD_SECT_AP_WRITE, 230 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
217 .domain = DOMAIN_KERNEL, 231 .domain = DOMAIN_KERNEL,
218 }, 232 },
219 [MT_ROM] = { 233 [MT_ROM] = {
220 .prot_sect = PMD_TYPE_SECT | PMD_BIT4, 234 .prot_sect = PMD_TYPE_SECT,
221 .domain = DOMAIN_KERNEL, 235 .domain = DOMAIN_KERNEL,
222 }, 236 },
223 [MT_IXP2000_DEVICE] = { /* IXP2400 requires XCB=101 for on-chip I/O */
224 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
225 L_PTE_WRITE,
226 .prot_l1 = PMD_TYPE_TABLE,
227 .prot_sect = PMD_TYPE_SECT | PMD_BIT4 | PMD_SECT_UNCACHED |
228 PMD_SECT_AP_WRITE | PMD_SECT_BUFFERABLE |
229 PMD_SECT_TEX(1),
230 .domain = DOMAIN_IO,
231 },
232 [MT_NONSHARED_DEVICE] = {
233 .prot_l1 = PMD_TYPE_TABLE,
234 .prot_sect = PMD_TYPE_SECT | PMD_BIT4 | PMD_SECT_NONSHARED_DEV |
235 PMD_SECT_AP_WRITE,
236 .domain = DOMAIN_IO,
237 }
238}; 237};
239 238
239const struct mem_type *get_mem_type(unsigned int type)
240{
241 return type < ARRAY_SIZE(mem_types) ? &mem_types[type] : NULL;
242}
243
240/* 244/*
241 * Adjust the PMD section entries according to the CPU in use. 245 * Adjust the PMD section entries according to the CPU in use.
242 */ 246 */
@@ -262,20 +266,23 @@ static void __init build_mem_type_table(void)
262 } 266 }
263 267
264 /* 268 /*
265 * Xscale must not have PMD bit 4 set for section mappings. 269 * ARMv5 and lower, bit 4 must be set for page tables.
270 * (was: cache "update-able on write" bit on ARM610)
271 * However, Xscale cores require this bit to be cleared.
266 */ 272 */
267 if (cpu_is_xscale()) 273 if (cpu_is_xscale()) {
268 for (i = 0; i < ARRAY_SIZE(mem_types); i++) 274 for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
269 mem_types[i].prot_sect &= ~PMD_BIT4; 275 mem_types[i].prot_sect &= ~PMD_BIT4;
270 276 mem_types[i].prot_l1 &= ~PMD_BIT4;
271 /* 277 }
272 * ARMv5 and lower, excluding Xscale, bit 4 must be set for 278 } else if (cpu_arch < CPU_ARCH_ARMv6) {
273 * page tables. 279 for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
274 */
275 if (cpu_arch < CPU_ARCH_ARMv6 && !cpu_is_xscale())
276 for (i = 0; i < ARRAY_SIZE(mem_types); i++)
277 if (mem_types[i].prot_l1) 280 if (mem_types[i].prot_l1)
278 mem_types[i].prot_l1 |= PMD_BIT4; 281 mem_types[i].prot_l1 |= PMD_BIT4;
282 if (mem_types[i].prot_sect)
283 mem_types[i].prot_sect |= PMD_BIT4;
284 }
285 }
279 286
280 cp = &cache_policies[cachepolicy]; 287 cp = &cache_policies[cachepolicy];
281 kern_pgprot = user_pgprot = cp->pte; 288 kern_pgprot = user_pgprot = cp->pte;
@@ -296,13 +303,6 @@ static void __init build_mem_type_table(void)
296 */ 303 */
297 if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) { 304 if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) {
298 /* 305 /*
299 * bit 4 becomes XN which we must clear for the
300 * kernel memory mapping.
301 */
302 mem_types[MT_MEMORY].prot_sect &= ~PMD_SECT_XN;
303 mem_types[MT_ROM].prot_sect &= ~PMD_SECT_XN;
304
305 /*
306 * Mark cache clean areas and XIP ROM read only 306 * Mark cache clean areas and XIP ROM read only
307 * from SVC mode and no access from userspace. 307 * from SVC mode and no access from userspace.
308 */ 308 */
@@ -368,64 +368,126 @@ static void __init build_mem_type_table(void)
368 } 368 }
369 printk("Memory policy: ECC %sabled, Data cache %s\n", 369 printk("Memory policy: ECC %sabled, Data cache %s\n",
370 ecc_mask ? "en" : "dis", cp->policy); 370 ecc_mask ? "en" : "dis", cp->policy);
371
372 for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
373 struct mem_type *t = &mem_types[i];
374 if (t->prot_l1)
375 t->prot_l1 |= PMD_DOMAIN(t->domain);
376 if (t->prot_sect)
377 t->prot_sect |= PMD_DOMAIN(t->domain);
378 }
371} 379}
372 380
373#define vectors_base() (vectors_high() ? 0xffff0000 : 0) 381#define vectors_base() (vectors_high() ? 0xffff0000 : 0)
374 382
375/* 383static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,
376 * Create a SECTION PGD between VIRT and PHYS in domain 384 unsigned long end, unsigned long pfn,
377 * DOMAIN with protection PROT. This operates on half- 385 const struct mem_type *type)
378 * pgdir entry increments.
379 */
380static inline void
381alloc_init_section(unsigned long virt, unsigned long phys, int prot)
382{ 386{
383 pmd_t *pmdp = pmd_off_k(virt); 387 pte_t *pte;
384 388
385 if (virt & (1 << 20)) 389 if (pmd_none(*pmd)) {
386 pmdp++; 390 pte = alloc_bootmem_low_pages(2 * PTRS_PER_PTE * sizeof(pte_t));
391 __pmd_populate(pmd, __pa(pte) | type->prot_l1);
392 }
387 393
388 *pmdp = __pmd(phys | prot); 394 pte = pte_offset_kernel(pmd, addr);
389 flush_pmd_entry(pmdp); 395 do {
396 set_pte_ext(pte, pfn_pte(pfn, __pgprot(type->prot_pte)),
397 type->prot_pte_ext);
398 pfn++;
399 } while (pte++, addr += PAGE_SIZE, addr != end);
390} 400}
391 401
392/* 402static void __init alloc_init_section(pgd_t *pgd, unsigned long addr,
393 * Create a SUPER SECTION PGD between VIRT and PHYS with protection PROT 403 unsigned long end, unsigned long phys,
394 */ 404 const struct mem_type *type)
395static inline void
396alloc_init_supersection(unsigned long virt, unsigned long phys, int prot)
397{ 405{
398 int i; 406 pmd_t *pmd = pmd_offset(pgd, addr);
407
408 /*
409 * Try a section mapping - end, addr and phys must all be aligned
410 * to a section boundary. Note that PMDs refer to the individual
411 * L1 entries, whereas PGDs refer to a group of L1 entries making
412 * up one logical pointer to an L2 table.
413 */
414 if (((addr | end | phys) & ~SECTION_MASK) == 0) {
415 pmd_t *p = pmd;
416
417 if (addr & SECTION_SIZE)
418 pmd++;
399 419
400 for (i = 0; i < 16; i += 1) { 420 do {
401 alloc_init_section(virt, phys, prot | PMD_SECT_SUPER); 421 *pmd = __pmd(phys | type->prot_sect);
422 phys += SECTION_SIZE;
423 } while (pmd++, addr += SECTION_SIZE, addr != end);
402 424
403 virt += (PGDIR_SIZE / 2); 425 flush_pmd_entry(p);
426 } else {
427 /*
428 * No need to loop; pte's aren't interested in the
429 * individual L1 entries.
430 */
431 alloc_init_pte(pmd, addr, end, __phys_to_pfn(phys), type);
404 } 432 }
405} 433}
406 434
407/* 435static void __init create_36bit_mapping(struct map_desc *md,
408 * Add a PAGE mapping between VIRT and PHYS in domain 436 const struct mem_type *type)
409 * DOMAIN with protection PROT. Note that due to the
410 * way we map the PTEs, we must allocate two PTE_SIZE'd
411 * blocks - one for the Linux pte table, and one for
412 * the hardware pte table.
413 */
414static inline void
415alloc_init_page(unsigned long virt, unsigned long phys, unsigned int prot_l1, pgprot_t prot)
416{ 437{
417 pmd_t *pmdp = pmd_off_k(virt); 438 unsigned long phys, addr, length, end;
418 pte_t *ptep; 439 pgd_t *pgd;
440
441 addr = md->virtual;
442 phys = (unsigned long)__pfn_to_phys(md->pfn);
443 length = PAGE_ALIGN(md->length);
444
445 if (!(cpu_architecture() >= CPU_ARCH_ARMv6 || cpu_is_xsc3())) {
446 printk(KERN_ERR "MM: CPU does not support supersection "
447 "mapping for 0x%08llx at 0x%08lx\n",
448 __pfn_to_phys((u64)md->pfn), addr);
449 return;
450 }
419 451
420 if (pmd_none(*pmdp)) { 452 /* N.B. ARMv6 supersections are only defined to work with domain 0.
421 ptep = alloc_bootmem_low_pages(2 * PTRS_PER_PTE * 453 * Since domain assignments can in fact be arbitrary, the
422 sizeof(pte_t)); 454 * 'domain == 0' check below is required to insure that ARMv6
455 * supersections are only allocated for domain 0 regardless
456 * of the actual domain assignments in use.
457 */
458 if (type->domain) {
459 printk(KERN_ERR "MM: invalid domain in supersection "
460 "mapping for 0x%08llx at 0x%08lx\n",
461 __pfn_to_phys((u64)md->pfn), addr);
462 return;
463 }
423 464
424 __pmd_populate(pmdp, __pa(ptep) | prot_l1); 465 if ((addr | length | __pfn_to_phys(md->pfn)) & ~SUPERSECTION_MASK) {
466 printk(KERN_ERR "MM: cannot create mapping for "
467 "0x%08llx at 0x%08lx invalid alignment\n",
468 __pfn_to_phys((u64)md->pfn), addr);
469 return;
425 } 470 }
426 ptep = pte_offset_kernel(pmdp, virt);
427 471
428 set_pte_ext(ptep, pfn_pte(phys >> PAGE_SHIFT, prot), 0); 472 /*
473 * Shift bits [35:32] of address into bits [23:20] of PMD
474 * (See ARMv6 spec).
475 */
476 phys |= (((md->pfn >> (32 - PAGE_SHIFT)) & 0xF) << 20);
477
478 pgd = pgd_offset_k(addr);
479 end = addr + length;
480 do {
481 pmd_t *pmd = pmd_offset(pgd, addr);
482 int i;
483
484 for (i = 0; i < 16; i++)
485 *pmd++ = __pmd(phys | type->prot_sect | PMD_SECT_SUPER);
486
487 addr += SUPERSECTION_SIZE;
488 phys += SUPERSECTION_SIZE;
489 pgd += SUPERSECTION_SIZE >> PGDIR_SHIFT;
490 } while (addr != end);
429} 491}
430 492
431/* 493/*
@@ -437,10 +499,9 @@ alloc_init_page(unsigned long virt, unsigned long phys, unsigned int prot_l1, pg
437 */ 499 */
438void __init create_mapping(struct map_desc *md) 500void __init create_mapping(struct map_desc *md)
439{ 501{
440 unsigned long virt, length; 502 unsigned long phys, addr, length, end;
441 int prot_sect, prot_l1, domain; 503 const struct mem_type *type;
442 pgprot_t prot_pte; 504 pgd_t *pgd;
443 unsigned long off = (u32)__pfn_to_phys(md->pfn);
444 505
445 if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) { 506 if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) {
446 printk(KERN_WARNING "BUG: not creating mapping for " 507 printk(KERN_WARNING "BUG: not creating mapping for "
@@ -456,105 +517,37 @@ void __init create_mapping(struct map_desc *md)
456 __pfn_to_phys((u64)md->pfn), md->virtual); 517 __pfn_to_phys((u64)md->pfn), md->virtual);
457 } 518 }
458 519
459 domain = mem_types[md->type].domain; 520 type = &mem_types[md->type];
460 prot_pte = __pgprot(mem_types[md->type].prot_pte);
461 prot_l1 = mem_types[md->type].prot_l1 | PMD_DOMAIN(domain);
462 prot_sect = mem_types[md->type].prot_sect | PMD_DOMAIN(domain);
463 521
464 /* 522 /*
465 * Catch 36-bit addresses 523 * Catch 36-bit addresses
466 */ 524 */
467 if(md->pfn >= 0x100000) { 525 if (md->pfn >= 0x100000) {
468 if(domain) { 526 create_36bit_mapping(md, type);
469 printk(KERN_ERR "MM: invalid domain in supersection " 527 return;
470 "mapping for 0x%08llx at 0x%08lx\n",
471 __pfn_to_phys((u64)md->pfn), md->virtual);
472 return;
473 }
474 if((md->virtual | md->length | __pfn_to_phys(md->pfn))
475 & ~SUPERSECTION_MASK) {
476 printk(KERN_ERR "MM: cannot create mapping for "
477 "0x%08llx at 0x%08lx invalid alignment\n",
478 __pfn_to_phys((u64)md->pfn), md->virtual);
479 return;
480 }
481
482 /*
483 * Shift bits [35:32] of address into bits [23:20] of PMD
484 * (See ARMv6 spec).
485 */
486 off |= (((md->pfn >> (32 - PAGE_SHIFT)) & 0xF) << 20);
487 } 528 }
488 529
489 virt = md->virtual; 530 addr = md->virtual;
490 off -= virt; 531 phys = (unsigned long)__pfn_to_phys(md->pfn);
491 length = md->length; 532 length = PAGE_ALIGN(md->length);
492 533
493 if (mem_types[md->type].prot_l1 == 0 && 534 if (type->prot_l1 == 0 && ((addr | phys | length) & ~SECTION_MASK)) {
494 (virt & 0xfffff || (virt + off) & 0xfffff || (virt + length) & 0xfffff)) {
495 printk(KERN_WARNING "BUG: map for 0x%08lx at 0x%08lx can not " 535 printk(KERN_WARNING "BUG: map for 0x%08lx at 0x%08lx can not "
496 "be mapped using pages, ignoring.\n", 536 "be mapped using pages, ignoring.\n",
497 __pfn_to_phys(md->pfn), md->virtual); 537 __pfn_to_phys(md->pfn), addr);
498 return; 538 return;
499 } 539 }
500 540
501 while ((virt & 0xfffff || (virt + off) & 0xfffff) && length >= PAGE_SIZE) { 541 pgd = pgd_offset_k(addr);
502 alloc_init_page(virt, virt + off, prot_l1, prot_pte); 542 end = addr + length;
543 do {
544 unsigned long next = pgd_addr_end(addr, end);
503 545
504 virt += PAGE_SIZE; 546 alloc_init_section(pgd, addr, next, phys, type);
505 length -= PAGE_SIZE;
506 }
507
508 /* N.B. ARMv6 supersections are only defined to work with domain 0.
509 * Since domain assignments can in fact be arbitrary, the
510 * 'domain == 0' check below is required to insure that ARMv6
511 * supersections are only allocated for domain 0 regardless
512 * of the actual domain assignments in use.
513 */
514 if ((cpu_architecture() >= CPU_ARCH_ARMv6 || cpu_is_xsc3())
515 && domain == 0) {
516 /*
517 * Align to supersection boundary if !high pages.
518 * High pages have already been checked for proper
519 * alignment above and they will fail the SUPSERSECTION_MASK
520 * check because of the way the address is encoded into
521 * offset.
522 */
523 if (md->pfn <= 0x100000) {
524 while ((virt & ~SUPERSECTION_MASK ||
525 (virt + off) & ~SUPERSECTION_MASK) &&
526 length >= (PGDIR_SIZE / 2)) {
527 alloc_init_section(virt, virt + off, prot_sect);
528
529 virt += (PGDIR_SIZE / 2);
530 length -= (PGDIR_SIZE / 2);
531 }
532 }
533 547
534 while (length >= SUPERSECTION_SIZE) { 548 phys += next - addr;
535 alloc_init_supersection(virt, virt + off, prot_sect); 549 addr = next;
536 550 } while (pgd++, addr != end);
537 virt += SUPERSECTION_SIZE;
538 length -= SUPERSECTION_SIZE;
539 }
540 }
541
542 /*
543 * A section mapping covers half a "pgdir" entry.
544 */
545 while (length >= (PGDIR_SIZE / 2)) {
546 alloc_init_section(virt, virt + off, prot_sect);
547
548 virt += (PGDIR_SIZE / 2);
549 length -= (PGDIR_SIZE / 2);
550 }
551
552 while (length >= PAGE_SIZE) {
553 alloc_init_page(virt, virt + off, prot_l1, prot_pte);
554
555 virt += PAGE_SIZE;
556 length -= PAGE_SIZE;
557 }
558} 551}
559 552
560/* 553/*
diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c
index 05818fc0c705..8cd3a60954f0 100644
--- a/arch/arm/mm/nommu.c
+++ b/arch/arm/mm/nommu.c
@@ -62,21 +62,21 @@ void flush_dcache_page(struct page *page)
62} 62}
63EXPORT_SYMBOL(flush_dcache_page); 63EXPORT_SYMBOL(flush_dcache_page);
64 64
65void __iomem *__ioremap_pfn(unsigned long pfn, unsigned long offset, 65void __iomem *__arm_ioremap_pfn(unsigned long pfn, unsigned long offset,
66 size_t size, unsigned long flags) 66 size_t size, unsigned int mtype)
67{ 67{
68 if (pfn >= (0x100000000ULL >> PAGE_SHIFT)) 68 if (pfn >= (0x100000000ULL >> PAGE_SHIFT))
69 return NULL; 69 return NULL;
70 return (void __iomem *) (offset + (pfn << PAGE_SHIFT)); 70 return (void __iomem *) (offset + (pfn << PAGE_SHIFT));
71} 71}
72EXPORT_SYMBOL(__ioremap_pfn); 72EXPORT_SYMBOL(__arm_ioremap_pfn);
73 73
74void __iomem *__ioremap(unsigned long phys_addr, size_t size, 74void __iomem *__arm_ioremap(unsigned long phys_addr, size_t size,
75 unsigned long flags) 75 unsigned int mtype)
76{ 76{
77 return (void __iomem *)phys_addr; 77 return (void __iomem *)phys_addr;
78} 78}
79EXPORT_SYMBOL(__ioremap); 79EXPORT_SYMBOL(__arm_ioremap);
80 80
81void __iounmap(volatile void __iomem *addr) 81void __iounmap(volatile void __iomem *addr)
82{ 82{
diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S
index d29fe927ee9e..c156ddab9a2d 100644
--- a/arch/arm/mm/proc-xscale.S
+++ b/arch/arm/mm/proc-xscale.S
@@ -584,6 +584,11 @@ cpu_ixp42x_name:
584 .asciz "XScale-IXP42x Family" 584 .asciz "XScale-IXP42x Family"
585 .size cpu_ixp42x_name, . - cpu_ixp42x_name 585 .size cpu_ixp42x_name, . - cpu_ixp42x_name
586 586
587 .type cpu_ixp43x_name, #object
588cpu_ixp43x_name:
589 .asciz "XScale-IXP43x Family"
590 .size cpu_ixp43x_name, . - cpu_ixp43x_name
591
587 .type cpu_ixp46x_name, #object 592 .type cpu_ixp46x_name, #object
588cpu_ixp46x_name: 593cpu_ixp46x_name:
589 .asciz "XScale-IXP46x Family" 594 .asciz "XScale-IXP46x Family"
@@ -843,6 +848,29 @@ __ixp42x_proc_info:
843 .long xscale_cache_fns 848 .long xscale_cache_fns
844 .size __ixp42x_proc_info, . - __ixp42x_proc_info 849 .size __ixp42x_proc_info, . - __ixp42x_proc_info
845 850
851 .type __ixp43x_proc_info, #object
852__ixp43x_proc_info:
853 .long 0x69054040
854 .long 0xfffffff0
855 .long PMD_TYPE_SECT | \
856 PMD_SECT_BUFFERABLE | \
857 PMD_SECT_CACHEABLE | \
858 PMD_SECT_AP_WRITE | \
859 PMD_SECT_AP_READ
860 .long PMD_TYPE_SECT | \
861 PMD_SECT_AP_WRITE | \
862 PMD_SECT_AP_READ
863 b __xscale_setup
864 .long cpu_arch_name
865 .long cpu_elf_name
866 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
867 .long cpu_ixp43x_name
868 .long xscale_processor_functions
869 .long v4wbi_tlb_fns
870 .long xscale_mc_user_fns
871 .long xscale_cache_fns
872 .size __ixp43x_proc_info, . - __ixp43x_proc_info
873
846 .type __ixp46x_proc_info, #object 874 .type __ixp46x_proc_info, #object
847__ixp46x_proc_info: 875__ixp46x_proc_info:
848 .long 0x69054200 876 .long 0x69054200