aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm/mm-armv.c
diff options
context:
space:
mode:
authorRussell King <rmk@dyn-67.arm.linux.org.uk>2006-06-29 13:24:21 -0400
committerRussell King <rmk+kernel@arm.linux.org.uk>2006-06-29 13:24:21 -0400
commit8799ee9f49f6171fd58f4d64f8c067ca49006a5d (patch)
treeb746b8800bc99633f31505d151624c8ccd75cd47 /arch/arm/mm/mm-armv.c
parent326764a85b7676388db3ebad6488f312631d7661 (diff)
[ARM] Set bit 4 on section mappings correctly depending on CPU
On some CPUs, bit 4 of section mappings means "update the cache when written to". On others, this bit is required to be one, and others it's required to be zero. Finally, on ARMv6 and above, setting it turns on "no execute" and prevents speculative prefetches. With all these combinations, no one value fits all CPUs, so we have to pick a value depending on the CPU type, and the area we're mapping. Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/mm/mm-armv.c')
-rw-r--r--arch/arm/mm/mm-armv.c37
1 files changed, 22 insertions, 15 deletions
diff --git a/arch/arm/mm/mm-armv.c b/arch/arm/mm/mm-armv.c
index 95273de4f772..d06440cc4e8f 100644
--- a/arch/arm/mm/mm-armv.c
+++ b/arch/arm/mm/mm-armv.c
@@ -303,16 +303,16 @@ static struct mem_types mem_types[] __initdata = {
303 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | 303 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
304 L_PTE_WRITE, 304 L_PTE_WRITE,
305 .prot_l1 = PMD_TYPE_TABLE, 305 .prot_l1 = PMD_TYPE_TABLE,
306 .prot_sect = PMD_TYPE_SECT | PMD_SECT_UNCACHED | 306 .prot_sect = PMD_TYPE_SECT | PMD_BIT4 | PMD_SECT_UNCACHED |
307 PMD_SECT_AP_WRITE, 307 PMD_SECT_AP_WRITE,
308 .domain = DOMAIN_IO, 308 .domain = DOMAIN_IO,
309 }, 309 },
310 [MT_CACHECLEAN] = { 310 [MT_CACHECLEAN] = {
311 .prot_sect = PMD_TYPE_SECT, 311 .prot_sect = PMD_TYPE_SECT | PMD_BIT4,
312 .domain = DOMAIN_KERNEL, 312 .domain = DOMAIN_KERNEL,
313 }, 313 },
314 [MT_MINICLEAN] = { 314 [MT_MINICLEAN] = {
315 .prot_sect = PMD_TYPE_SECT | PMD_SECT_MINICACHE, 315 .prot_sect = PMD_TYPE_SECT | PMD_BIT4 | PMD_SECT_MINICACHE,
316 .domain = DOMAIN_KERNEL, 316 .domain = DOMAIN_KERNEL,
317 }, 317 },
318 [MT_LOW_VECTORS] = { 318 [MT_LOW_VECTORS] = {
@@ -328,25 +328,25 @@ static struct mem_types mem_types[] __initdata = {
328 .domain = DOMAIN_USER, 328 .domain = DOMAIN_USER,
329 }, 329 },
330 [MT_MEMORY] = { 330 [MT_MEMORY] = {
331 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, 331 .prot_sect = PMD_TYPE_SECT | PMD_BIT4 | PMD_SECT_AP_WRITE,
332 .domain = DOMAIN_KERNEL, 332 .domain = DOMAIN_KERNEL,
333 }, 333 },
334 [MT_ROM] = { 334 [MT_ROM] = {
335 .prot_sect = PMD_TYPE_SECT, 335 .prot_sect = PMD_TYPE_SECT | PMD_BIT4,
336 .domain = DOMAIN_KERNEL, 336 .domain = DOMAIN_KERNEL,
337 }, 337 },
338 [MT_IXP2000_DEVICE] = { /* IXP2400 requires XCB=101 for on-chip I/O */ 338 [MT_IXP2000_DEVICE] = { /* IXP2400 requires XCB=101 for on-chip I/O */
339 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | 339 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
340 L_PTE_WRITE, 340 L_PTE_WRITE,
341 .prot_l1 = PMD_TYPE_TABLE, 341 .prot_l1 = PMD_TYPE_TABLE,
342 .prot_sect = PMD_TYPE_SECT | PMD_SECT_UNCACHED | 342 .prot_sect = PMD_TYPE_SECT | PMD_BIT4 | PMD_SECT_UNCACHED |
343 PMD_SECT_AP_WRITE | PMD_SECT_BUFFERABLE | 343 PMD_SECT_AP_WRITE | PMD_SECT_BUFFERABLE |
344 PMD_SECT_TEX(1), 344 PMD_SECT_TEX(1),
345 .domain = DOMAIN_IO, 345 .domain = DOMAIN_IO,
346 }, 346 },
347 [MT_NONSHARED_DEVICE] = { 347 [MT_NONSHARED_DEVICE] = {
348 .prot_l1 = PMD_TYPE_TABLE, 348 .prot_l1 = PMD_TYPE_TABLE,
349 .prot_sect = PMD_TYPE_SECT | PMD_SECT_NONSHARED_DEV | 349 .prot_sect = PMD_TYPE_SECT | PMD_BIT4 | PMD_SECT_NONSHARED_DEV |
350 PMD_SECT_AP_WRITE, 350 PMD_SECT_AP_WRITE,
351 .domain = DOMAIN_IO, 351 .domain = DOMAIN_IO,
352 } 352 }
@@ -376,14 +376,21 @@ void __init build_mem_type_table(void)
376 ecc_mask = 0; 376 ecc_mask = 0;
377 } 377 }
378 378
379 if (cpu_arch <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale()) { 379 /*
380 for (i = 0; i < ARRAY_SIZE(mem_types); i++) { 380 * Xscale must not have PMD bit 4 set for section mappings.
381 */
382 if (cpu_is_xscale())
383 for (i = 0; i < ARRAY_SIZE(mem_types); i++)
384 mem_types[i].prot_sect &= ~PMD_BIT4;
385
386 /*
387 * ARMv5 and lower, excluding Xscale, bit 4 must be set for
388 * page tables.
389 */
390 if (cpu_arch < CPU_ARCH_ARMv6 && !cpu_is_xscale())
391 for (i = 0; i < ARRAY_SIZE(mem_types); i++)
381 if (mem_types[i].prot_l1) 392 if (mem_types[i].prot_l1)
382 mem_types[i].prot_l1 |= PMD_BIT4; 393 mem_types[i].prot_l1 |= PMD_BIT4;
383 if (mem_types[i].prot_sect)
384 mem_types[i].prot_sect |= PMD_BIT4;
385 }
386 }
387 394
388 cp = &cache_policies[cachepolicy]; 395 cp = &cache_policies[cachepolicy];
389 kern_pgprot = user_pgprot = cp->pte; 396 kern_pgprot = user_pgprot = cp->pte;
@@ -407,8 +414,8 @@ void __init build_mem_type_table(void)
407 * bit 4 becomes XN which we must clear for the 414 * bit 4 becomes XN which we must clear for the
408 * kernel memory mapping. 415 * kernel memory mapping.
409 */ 416 */
410 mem_types[MT_MEMORY].prot_sect &= ~PMD_BIT4; 417 mem_types[MT_MEMORY].prot_sect &= ~PMD_SECT_XN;
411 mem_types[MT_ROM].prot_sect &= ~PMD_BIT4; 418 mem_types[MT_ROM].prot_sect &= ~PMD_SECT_XN;
412 419
413 /* 420 /*
414 * Mark cache clean areas and XIP ROM read only 421 * Mark cache clean areas and XIP ROM read only