aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm/mm-armv.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@g5.osdl.org>2006-07-02 18:04:12 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-07-02 18:04:12 -0400
commita8c4c20dfa8b28a3c99e33c639d9c2ea5657741e (patch)
tree887b64d29b5a46d9ab2ca1267d8a2f05b5845561 /arch/arm/mm/mm-armv.c
parent168d04b3b4de7723eb73b3cffc9cb75224e0f393 (diff)
parent2dc7667b9d0674db6572723356fe3857031101a4 (diff)
Merge branch 'devel' of master.kernel.org:/home/rmk/linux-2.6-arm
* 'devel' of master.kernel.org:/home/rmk/linux-2.6-arm: (44 commits) [ARM] 3541/2: workaround for PXA27x erratum E7 [ARM] nommu: provide a way for correct control register value selection [ARM] 3705/1: add supersection support to ioremap() [ARM] 3707/1: iwmmxt: use the generic thread notifier infrastructure [ARM] 3706/2: ep93xx: add cirrus logic edb9315a support [ARM] 3704/1: format IOP Kconfig with tabs, create more consistency [ARM] 3703/1: Add help description for ARCH_EP80219 [ARM] 3678/1: MMC: Make OMAP MMC work [ARM] 3677/1: OMAP: Update H2 defconfig [ARM] 3676/1: ARM: OMAP: Fix dmtimers and timer32k to compile on OMAP1 [ARM] Add section support to ioremap [ARM] Fix sa11x0 SDRAM selection [ARM] Set bit 4 on section mappings correctly depending on CPU [ARM] 3666/1: TRIZEPS4 [1/5] core ARM: OMAP: Multiplexing for 24xx GPMC wait pin monitoring ARM: OMAP: Fix SRAM to use MT_MEMORY instead of MT_DEVICE ARM: OMAP: Update dmtimers ARM: OMAP: Make clock variables static ARM: OMAP: Fix GPMC compilation when DEBUG is defined ARM: OMAP: Mux updates for external DMA and GPIO ...
Diffstat (limited to 'arch/arm/mm/mm-armv.c')
-rw-r--r--arch/arm/mm/mm-armv.c37
1 files changed, 22 insertions, 15 deletions
diff --git a/arch/arm/mm/mm-armv.c b/arch/arm/mm/mm-armv.c
index b0242c6ea066..38769f5862bc 100644
--- a/arch/arm/mm/mm-armv.c
+++ b/arch/arm/mm/mm-armv.c
@@ -302,16 +302,16 @@ static struct mem_types mem_types[] __initdata = {
302 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | 302 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
303 L_PTE_WRITE, 303 L_PTE_WRITE,
304 .prot_l1 = PMD_TYPE_TABLE, 304 .prot_l1 = PMD_TYPE_TABLE,
305 .prot_sect = PMD_TYPE_SECT | PMD_SECT_UNCACHED | 305 .prot_sect = PMD_TYPE_SECT | PMD_BIT4 | PMD_SECT_UNCACHED |
306 PMD_SECT_AP_WRITE, 306 PMD_SECT_AP_WRITE,
307 .domain = DOMAIN_IO, 307 .domain = DOMAIN_IO,
308 }, 308 },
309 [MT_CACHECLEAN] = { 309 [MT_CACHECLEAN] = {
310 .prot_sect = PMD_TYPE_SECT, 310 .prot_sect = PMD_TYPE_SECT | PMD_BIT4,
311 .domain = DOMAIN_KERNEL, 311 .domain = DOMAIN_KERNEL,
312 }, 312 },
313 [MT_MINICLEAN] = { 313 [MT_MINICLEAN] = {
314 .prot_sect = PMD_TYPE_SECT | PMD_SECT_MINICACHE, 314 .prot_sect = PMD_TYPE_SECT | PMD_BIT4 | PMD_SECT_MINICACHE,
315 .domain = DOMAIN_KERNEL, 315 .domain = DOMAIN_KERNEL,
316 }, 316 },
317 [MT_LOW_VECTORS] = { 317 [MT_LOW_VECTORS] = {
@@ -327,25 +327,25 @@ static struct mem_types mem_types[] __initdata = {
327 .domain = DOMAIN_USER, 327 .domain = DOMAIN_USER,
328 }, 328 },
329 [MT_MEMORY] = { 329 [MT_MEMORY] = {
330 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, 330 .prot_sect = PMD_TYPE_SECT | PMD_BIT4 | PMD_SECT_AP_WRITE,
331 .domain = DOMAIN_KERNEL, 331 .domain = DOMAIN_KERNEL,
332 }, 332 },
333 [MT_ROM] = { 333 [MT_ROM] = {
334 .prot_sect = PMD_TYPE_SECT, 334 .prot_sect = PMD_TYPE_SECT | PMD_BIT4,
335 .domain = DOMAIN_KERNEL, 335 .domain = DOMAIN_KERNEL,
336 }, 336 },
337 [MT_IXP2000_DEVICE] = { /* IXP2400 requires XCB=101 for on-chip I/O */ 337 [MT_IXP2000_DEVICE] = { /* IXP2400 requires XCB=101 for on-chip I/O */
338 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | 338 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
339 L_PTE_WRITE, 339 L_PTE_WRITE,
340 .prot_l1 = PMD_TYPE_TABLE, 340 .prot_l1 = PMD_TYPE_TABLE,
341 .prot_sect = PMD_TYPE_SECT | PMD_SECT_UNCACHED | 341 .prot_sect = PMD_TYPE_SECT | PMD_BIT4 | PMD_SECT_UNCACHED |
342 PMD_SECT_AP_WRITE | PMD_SECT_BUFFERABLE | 342 PMD_SECT_AP_WRITE | PMD_SECT_BUFFERABLE |
343 PMD_SECT_TEX(1), 343 PMD_SECT_TEX(1),
344 .domain = DOMAIN_IO, 344 .domain = DOMAIN_IO,
345 }, 345 },
346 [MT_NONSHARED_DEVICE] = { 346 [MT_NONSHARED_DEVICE] = {
347 .prot_l1 = PMD_TYPE_TABLE, 347 .prot_l1 = PMD_TYPE_TABLE,
348 .prot_sect = PMD_TYPE_SECT | PMD_SECT_NONSHARED_DEV | 348 .prot_sect = PMD_TYPE_SECT | PMD_BIT4 | PMD_SECT_NONSHARED_DEV |
349 PMD_SECT_AP_WRITE, 349 PMD_SECT_AP_WRITE,
350 .domain = DOMAIN_IO, 350 .domain = DOMAIN_IO,
351 } 351 }
@@ -375,14 +375,21 @@ void __init build_mem_type_table(void)
375 ecc_mask = 0; 375 ecc_mask = 0;
376 } 376 }
377 377
378 if (cpu_arch <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale()) { 378 /*
379 for (i = 0; i < ARRAY_SIZE(mem_types); i++) { 379 * Xscale must not have PMD bit 4 set for section mappings.
380 */
381 if (cpu_is_xscale())
382 for (i = 0; i < ARRAY_SIZE(mem_types); i++)
383 mem_types[i].prot_sect &= ~PMD_BIT4;
384
385 /*
386 * ARMv5 and lower, excluding Xscale, bit 4 must be set for
387 * page tables.
388 */
389 if (cpu_arch < CPU_ARCH_ARMv6 && !cpu_is_xscale())
390 for (i = 0; i < ARRAY_SIZE(mem_types); i++)
380 if (mem_types[i].prot_l1) 391 if (mem_types[i].prot_l1)
381 mem_types[i].prot_l1 |= PMD_BIT4; 392 mem_types[i].prot_l1 |= PMD_BIT4;
382 if (mem_types[i].prot_sect)
383 mem_types[i].prot_sect |= PMD_BIT4;
384 }
385 }
386 393
387 cp = &cache_policies[cachepolicy]; 394 cp = &cache_policies[cachepolicy];
388 kern_pgprot = user_pgprot = cp->pte; 395 kern_pgprot = user_pgprot = cp->pte;
@@ -406,8 +413,8 @@ void __init build_mem_type_table(void)
406 * bit 4 becomes XN which we must clear for the 413 * bit 4 becomes XN which we must clear for the
407 * kernel memory mapping. 414 * kernel memory mapping.
408 */ 415 */
409 mem_types[MT_MEMORY].prot_sect &= ~PMD_BIT4; 416 mem_types[MT_MEMORY].prot_sect &= ~PMD_SECT_XN;
410 mem_types[MT_ROM].prot_sect &= ~PMD_BIT4; 417 mem_types[MT_ROM].prot_sect &= ~PMD_SECT_XN;
411 418
412 /* 419 /*
413 * Mark cache clean areas and XIP ROM read only 420 * Mark cache clean areas and XIP ROM read only