aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorRussell King <rmk@dyn-67.arm.linux.org.uk>2008-11-04 05:52:28 -0500
committerRussell King <rmk+kernel@arm.linux.org.uk>2008-11-06 12:45:32 -0500
commitb1cce6b1b2785fd61454b47ceacb461815407662 (patch)
tree90a877fcab8defd4a6770aaab4dcb3de6e69ba04 /arch
parentab4f2ee130d5ffcf35616e1f5c6ab75af5b463b6 (diff)
[ARM] mm: fix page table initialization
As a result of the ptebits changes, we ended up marking device mappings as normal memory on ARMv7 CPUs, resulting in undesirable behaviour with serial ports and the like. While reviewing the section mapping table entries, other errors in the memory type settings for devices were detected and confirmed to prevent Xscale3 platforms booting. Tested on: OMAP34xx (ARMv7), OMAP24xx (ARMv6), OMAP16xx (ARM926T, ARMv5), PXA311 (Xscale3), PXA272 (Xscale), PXA255 (Xscale), IXP42x (Xscale), S3C2410 (ARM920T, ARMv4T), ARM720T (ARMv4T) StrongARM-110 (ARMv4) Acked-by: Tony Lindgren <tony@atomide.com> Tested-by: Robert Jarzmik <robert.jarzmik@free.fr> Tested-by: Mike Rapoport <mike@compulab.co.il> Tested-by: Ben Dooks <ben-linux@fluff.org> Tested-by: Anders Grafström <grfstrm@users.sourceforge.net> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/include/asm/system.h4
-rw-r--r--arch/arm/mm/mmu.c107
-rw-r--r--arch/arm/mm/proc-v7.S12
3 files changed, 87 insertions, 36 deletions
diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h
index 7aad78420f1..568020b34e3 100644
--- a/arch/arm/include/asm/system.h
+++ b/arch/arm/include/asm/system.h
@@ -42,6 +42,10 @@
42#define CR_U (1 << 22) /* Unaligned access operation */ 42#define CR_U (1 << 22) /* Unaligned access operation */
43#define CR_XP (1 << 23) /* Extended page tables */ 43#define CR_XP (1 << 23) /* Extended page tables */
44#define CR_VE (1 << 24) /* Vectored interrupts */ 44#define CR_VE (1 << 24) /* Vectored interrupts */
45#define CR_EE (1 << 25) /* Exception (Big) Endian */
46#define CR_TRE (1 << 28) /* TEX remap enable */
47#define CR_AFE (1 << 29) /* Access flag enable */
48#define CR_TE (1 << 30) /* Thumb exception enable */
45 49
46/* 50/*
47 * This is used to ensure the compiler did actually allocate the register we 51 * This is used to ensure the compiler did actually allocate the register we
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 34e53596ff1..e63db11f16a 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -180,20 +180,20 @@ void adjust_cr(unsigned long mask, unsigned long set)
180#endif 180#endif
181 181
182#define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_WRITE 182#define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_WRITE
183#define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_XN|PMD_SECT_AP_WRITE 183#define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE
184 184
185static struct mem_type mem_types[] = { 185static struct mem_type mem_types[] = {
186 [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */ 186 [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */
187 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED | 187 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
188 L_PTE_SHARED, 188 L_PTE_SHARED,
189 .prot_l1 = PMD_TYPE_TABLE, 189 .prot_l1 = PMD_TYPE_TABLE,
190 .prot_sect = PROT_SECT_DEVICE | PMD_SECT_UNCACHED, 190 .prot_sect = PROT_SECT_DEVICE | PMD_SECT_S,
191 .domain = DOMAIN_IO, 191 .domain = DOMAIN_IO,
192 }, 192 },
193 [MT_DEVICE_NONSHARED] = { /* ARMv6 non-shared device */ 193 [MT_DEVICE_NONSHARED] = { /* ARMv6 non-shared device */
194 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_NONSHARED, 194 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_NONSHARED,
195 .prot_l1 = PMD_TYPE_TABLE, 195 .prot_l1 = PMD_TYPE_TABLE,
196 .prot_sect = PROT_SECT_DEVICE | PMD_SECT_TEX(2), 196 .prot_sect = PROT_SECT_DEVICE,
197 .domain = DOMAIN_IO, 197 .domain = DOMAIN_IO,
198 }, 198 },
199 [MT_DEVICE_CACHED] = { /* ioremap_cached */ 199 [MT_DEVICE_CACHED] = { /* ioremap_cached */
@@ -205,7 +205,7 @@ static struct mem_type mem_types[] = {
205 [MT_DEVICE_WC] = { /* ioremap_wc */ 205 [MT_DEVICE_WC] = { /* ioremap_wc */
206 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_WC, 206 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_WC,
207 .prot_l1 = PMD_TYPE_TABLE, 207 .prot_l1 = PMD_TYPE_TABLE,
208 .prot_sect = PROT_SECT_DEVICE | PMD_SECT_BUFFERABLE, 208 .prot_sect = PROT_SECT_DEVICE,
209 .domain = DOMAIN_IO, 209 .domain = DOMAIN_IO,
210 }, 210 },
211 [MT_CACHECLEAN] = { 211 [MT_CACHECLEAN] = {
@@ -273,22 +273,23 @@ static void __init build_mem_type_table(void)
273#endif 273#endif
274 274
275 /* 275 /*
276 * On non-Xscale3 ARMv5-and-older systems, use CB=01 276 * Strip out features not present on earlier architectures.
277 * (Uncached/Buffered) for ioremap_wc() mappings. On XScale3 277 * Pre-ARMv5 CPUs don't have TEX bits. Pre-ARMv6 CPUs or those
278 * and ARMv6+, use TEXCB=00100 mappings (Inner/Outer Uncacheable 278 * without extended page tables don't have the 'Shared' bit.
279 * in xsc3 parlance, Uncached Normal in ARMv6 parlance).
280 */ 279 */
281 if (cpu_is_xsc3() || cpu_arch >= CPU_ARCH_ARMv6) { 280 if (cpu_arch < CPU_ARCH_ARMv5)
282 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1); 281 for (i = 0; i < ARRAY_SIZE(mem_types); i++)
283 mem_types[MT_DEVICE_WC].prot_sect &= ~PMD_SECT_BUFFERABLE; 282 mem_types[i].prot_sect &= ~PMD_SECT_TEX(7);
284 } 283 if ((cpu_arch < CPU_ARCH_ARMv6 || !(cr & CR_XP)) && !cpu_is_xsc3())
284 for (i = 0; i < ARRAY_SIZE(mem_types); i++)
285 mem_types[i].prot_sect &= ~PMD_SECT_S;
285 286
286 /* 287 /*
287 * ARMv5 and lower, bit 4 must be set for page tables. 288 * ARMv5 and lower, bit 4 must be set for page tables (was: cache
288 * (was: cache "update-able on write" bit on ARM610) 289 * "update-able on write" bit on ARM610). However, Xscale and
289 * However, Xscale cores require this bit to be cleared. 290 * Xscale3 require this bit to be cleared.
290 */ 291 */
291 if (cpu_is_xscale()) { 292 if (cpu_is_xscale() || cpu_is_xsc3()) {
292 for (i = 0; i < ARRAY_SIZE(mem_types); i++) { 293 for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
293 mem_types[i].prot_sect &= ~PMD_BIT4; 294 mem_types[i].prot_sect &= ~PMD_BIT4;
294 mem_types[i].prot_l1 &= ~PMD_BIT4; 295 mem_types[i].prot_l1 &= ~PMD_BIT4;
@@ -302,6 +303,64 @@ static void __init build_mem_type_table(void)
302 } 303 }
303 } 304 }
304 305
306 /*
307 * Mark the device areas according to the CPU/architecture.
308 */
309 if (cpu_is_xsc3() || (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP))) {
310 if (!cpu_is_xsc3()) {
311 /*
312 * Mark device regions on ARMv6+ as execute-never
313 * to prevent speculative instruction fetches.
314 */
315 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_XN;
316 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_XN;
317 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_XN;
318 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_XN;
319 }
320 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
321 /*
322 * For ARMv7 with TEX remapping,
323 * - shared device is SXCB=1100
324 * - nonshared device is SXCB=0100
325 * - write combine device mem is SXCB=0001
326 * (Uncached Normal memory)
327 */
328 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_TEX(1);
329 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(1);
330 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_BUFFERABLE;
331 } else if (cpu_is_xsc3()) {
332 /*
333 * For Xscale3,
334 * - shared device is TEXCB=00101
335 * - nonshared device is TEXCB=01000
336 * - write combine device mem is TEXCB=00100
337 * (Inner/Outer Uncacheable in xsc3 parlance)
338 */
339 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_TEX(1) | PMD_SECT_BUFFERED;
340 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(2);
341 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1);
342 } else {
343 /*
344 * For ARMv6 and ARMv7 without TEX remapping,
345 * - shared device is TEXCB=00001
346 * - nonshared device is TEXCB=01000
347 * - write combine device mem is TEXCB=00100
348 * (Uncached Normal in ARMv6 parlance).
349 */
350 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_BUFFERED;
351 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(2);
352 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1);
353 }
354 } else {
355 /*
356 * On others, write combining is "Uncached/Buffered"
357 */
358 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_BUFFERABLE;
359 }
360
361 /*
362 * Now deal with the memory-type mappings
363 */
305 cp = &cache_policies[cachepolicy]; 364 cp = &cache_policies[cachepolicy];
306 vecs_pgprot = kern_pgprot = user_pgprot = cp->pte; 365 vecs_pgprot = kern_pgprot = user_pgprot = cp->pte;
307 366
@@ -317,12 +376,8 @@ static void __init build_mem_type_table(void)
317 * Enable CPU-specific coherency if supported. 376 * Enable CPU-specific coherency if supported.
318 * (Only available on XSC3 at the moment.) 377 * (Only available on XSC3 at the moment.)
319 */ 378 */
320 if (arch_is_coherent()) { 379 if (arch_is_coherent() && cpu_is_xsc3())
321 if (cpu_is_xsc3()) { 380 mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
322 mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
323 mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
324 }
325 }
326 381
327 /* 382 /*
328 * ARMv6 and above have extended page tables. 383 * ARMv6 and above have extended page tables.
@@ -336,11 +391,6 @@ static void __init build_mem_type_table(void)
336 mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; 391 mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
337 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; 392 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
338 393
339 /*
340 * Mark the device area as "shared device"
341 */
342 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_BUFFERED;
343
344#ifdef CONFIG_SMP 394#ifdef CONFIG_SMP
345 /* 395 /*
346 * Mark memory with the "shared" attribute for SMP systems 396 * Mark memory with the "shared" attribute for SMP systems
@@ -360,9 +410,6 @@ static void __init build_mem_type_table(void)
360 mem_types[MT_LOW_VECTORS].prot_pte |= vecs_pgprot; 410 mem_types[MT_LOW_VECTORS].prot_pte |= vecs_pgprot;
361 mem_types[MT_HIGH_VECTORS].prot_pte |= vecs_pgprot; 411 mem_types[MT_HIGH_VECTORS].prot_pte |= vecs_pgprot;
362 412
363 if (cpu_arch < CPU_ARCH_ARMv5)
364 mem_types[MT_MINICLEAN].prot_sect &= ~PMD_SECT_TEX(1);
365
366 pgprot_user = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | user_pgprot); 413 pgprot_user = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | user_pgprot);
367 pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | 414 pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG |
368 L_PTE_DIRTY | L_PTE_WRITE | 415 L_PTE_DIRTY | L_PTE_WRITE |
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index 07f82db7094..4d3c0a73e7f 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -115,7 +115,7 @@ ENTRY(cpu_v7_set_pte_ext)
115 orr r3, r3, r2 115 orr r3, r3, r2
116 orr r3, r3, #PTE_EXT_AP0 | 2 116 orr r3, r3, #PTE_EXT_AP0 | 2
117 117
118 tst r2, #1 << 4 118 tst r1, #1 << 4
119 orrne r3, r3, #PTE_EXT_TEX(1) 119 orrne r3, r3, #PTE_EXT_TEX(1)
120 120
121 tst r1, #L_PTE_WRITE 121 tst r1, #L_PTE_WRITE
@@ -192,11 +192,11 @@ __v7_setup:
192 mov pc, lr @ return to head.S:__ret 192 mov pc, lr @ return to head.S:__ret
193ENDPROC(__v7_setup) 193ENDPROC(__v7_setup)
194 194
195 /* 195 /* AT
196 * V X F I D LR 196 * TFR EV X F I D LR
197 * .... ...E PUI. .T.T 4RVI ZFRS BLDP WCAM 197 * .EEE ..EE PUI. .T.T 4RVI ZFRS BLDP WCAM
198 * rrrr rrrx xxx0 0101 xxxx xxxx x111 xxxx < forced 198 * rxxx rrxx xxx0 0101 xxxx xxxx x111 xxxx < forced
199 * 0 110 0011 1.00 .111 1101 < we want 199 * 1 0 110 0011 1.00 .111 1101 < we want
200 */ 200 */
201 .type v7_crval, #object 201 .type v7_crval, #object
202v7_crval: 202v7_crval: