aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm/mmu.c
diff options
context:
space:
mode:
authorPaul Walmsley <paul@pwsan.com>2009-03-12 15:11:43 -0400
committerRussell King <rmk+kernel@arm.linux.org.uk>2009-03-12 15:25:02 -0400
commite4707dd3e9d0cb57597b6568a5e51fea5d6fca41 (patch)
treec76a6cb54e4f90a8deebf380bf8fe8a5f6eb2750 /arch/arm/mm/mmu.c
parent6dc4a47a0cf423879b505af0e29997fca4088630 (diff)
[ARM] 5422/1: ARM: MMU: add a Non-cacheable Normal executable memory type
This patch adds a Non-cacheable Normal ARM executable memory type, MT_MEMORY_NONCACHED. On OMAP3, this is used for rapid dynamic voltage/frequency scaling in the VDD2 voltage domain. OMAP3's SDRAM controller (SDRC) is in the VDD2 voltage domain, and its clock frequency must change along with voltage. The SDRC clock change code cannot run from SDRAM itself, since SDRAM accesses are paused during the clock change. So the current implementation of the DVFS code executes from OMAP on-chip SRAM, aka "OCM RAM." If the OCM RAM pages are marked as Cacheable, the ARM cache controller will attempt to flush dirty cache lines to the SDRC, so it can fill those lines with OCM RAM instruction code. The problem is that the SDRC is paused during DVFS, and so any SDRAM access causes the ARM MPU subsystem to hang. TI's original solution to this problem was to mark the OCM RAM sections as Strongly Ordered memory, thus preventing caching. This is overkill: since the memory is marked as non-bufferable, OCM RAM writes become needlessly slow. The idea of "Strongly Ordered SRAM" is also conceptually disturbing. Previous LAKML list discussion is here: http://www.spinics.net/lists/arm-kernel/msg54312.html This memory type MT_MEMORY_NONCACHED is used for OCM RAM by a future patch. Cc: Richard Woodruff <r-woodruff2@ti.com> Signed-off-by: Paul Walmsley <paul@pwsan.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/mm/mmu.c')
-rw-r--r--arch/arm/mm/mmu.c23
1 files changed, 23 insertions, 0 deletions
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 9b36c5cb5e9f..aa424e1da8a1 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -243,6 +243,10 @@ static struct mem_type mem_types[] = {
243 .prot_sect = PMD_TYPE_SECT, 243 .prot_sect = PMD_TYPE_SECT,
244 .domain = DOMAIN_KERNEL, 244 .domain = DOMAIN_KERNEL,
245 }, 245 },
246 [MT_MEMORY_NONCACHED] = {
247 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
248 .domain = DOMAIN_KERNEL,
249 },
246}; 250};
247 251
248const struct mem_type *get_mem_type(unsigned int type) 252const struct mem_type *get_mem_type(unsigned int type)
@@ -406,9 +410,28 @@ static void __init build_mem_type_table(void)
406 kern_pgprot |= L_PTE_SHARED; 410 kern_pgprot |= L_PTE_SHARED;
407 vecs_pgprot |= L_PTE_SHARED; 411 vecs_pgprot |= L_PTE_SHARED;
408 mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S; 412 mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
413 mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
409#endif 414#endif
410 } 415 }
411 416
417 /*
418 * Non-cacheable Normal - intended for memory areas that must
419 * not cause dirty cache line writebacks when used
420 */
421 if (cpu_arch >= CPU_ARCH_ARMv6) {
422 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
423 /* Non-cacheable Normal is XCB = 001 */
424 mem_types[MT_MEMORY_NONCACHED].prot_sect |=
425 PMD_SECT_BUFFERED;
426 } else {
427 /* For both ARMv6 and non-TEX-remapping ARMv7 */
428 mem_types[MT_MEMORY_NONCACHED].prot_sect |=
429 PMD_SECT_TEX(1);
430 }
431 } else {
432 mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
433 }
434
412 for (i = 0; i < 16; i++) { 435 for (i = 0; i < 16; i++) {
413 unsigned long v = pgprot_val(protection_map[i]); 436 unsigned long v = pgprot_val(protection_map[i]);
414 protection_map[i] = __pgprot(v | user_pgprot); 437 protection_map[i] = __pgprot(v | user_pgprot);