diff options
-rw-r--r-- | arch/arm/include/asm/mach/map.h | 1 | ||||
-rw-r--r-- | arch/arm/mm/mmu.c | 23 |
2 files changed, 24 insertions, 0 deletions
diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h index 39d949b63e80..58cf91f38e6f 100644 --- a/arch/arm/include/asm/mach/map.h +++ b/arch/arm/include/asm/mach/map.h | |||
@@ -26,6 +26,7 @@ struct map_desc { | |||
26 | #define MT_HIGH_VECTORS 8 | 26 | #define MT_HIGH_VECTORS 8 |
27 | #define MT_MEMORY 9 | 27 | #define MT_MEMORY 9 |
28 | #define MT_ROM 10 | 28 | #define MT_ROM 10 |
29 | #define MT_MEMORY_NONCACHED 11 | ||
29 | 30 | ||
30 | #ifdef CONFIG_MMU | 31 | #ifdef CONFIG_MMU |
31 | extern void iotable_init(struct map_desc *, int); | 32 | extern void iotable_init(struct map_desc *, int); |
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 9b36c5cb5e9f..aa424e1da8a1 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c | |||
@@ -243,6 +243,10 @@ static struct mem_type mem_types[] = { | |||
243 | .prot_sect = PMD_TYPE_SECT, | 243 | .prot_sect = PMD_TYPE_SECT, |
244 | .domain = DOMAIN_KERNEL, | 244 | .domain = DOMAIN_KERNEL, |
245 | }, | 245 | }, |
246 | [MT_MEMORY_NONCACHED] = { | ||
247 | .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, | ||
248 | .domain = DOMAIN_KERNEL, | ||
249 | }, | ||
246 | }; | 250 | }; |
247 | 251 | ||
248 | const struct mem_type *get_mem_type(unsigned int type) | 252 | const struct mem_type *get_mem_type(unsigned int type) |
@@ -406,9 +410,28 @@ static void __init build_mem_type_table(void) | |||
406 | kern_pgprot |= L_PTE_SHARED; | 410 | kern_pgprot |= L_PTE_SHARED; |
407 | vecs_pgprot |= L_PTE_SHARED; | 411 | vecs_pgprot |= L_PTE_SHARED; |
408 | mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S; | 412 | mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S; |
413 | mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S; | ||
409 | #endif | 414 | #endif |
410 | } | 415 | } |
411 | 416 | ||
417 | /* | ||
418 | * Non-cacheable Normal - intended for memory areas that must | ||
419 | * not cause dirty cache line writebacks when used | ||
420 | */ | ||
421 | if (cpu_arch >= CPU_ARCH_ARMv6) { | ||
422 | if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) { | ||
423 | /* Non-cacheable Normal is XCB = 001 */ | ||
424 | mem_types[MT_MEMORY_NONCACHED].prot_sect |= | ||
425 | PMD_SECT_BUFFERED; | ||
426 | } else { | ||
427 | /* For both ARMv6 and non-TEX-remapping ARMv7 */ | ||
428 | mem_types[MT_MEMORY_NONCACHED].prot_sect |= | ||
429 | PMD_SECT_TEX(1); | ||
430 | } | ||
431 | } else { | ||
432 | mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE; | ||
433 | } | ||
434 | |||
412 | for (i = 0; i < 16; i++) { | 435 | for (i = 0; i < 16; i++) { |
413 | unsigned long v = pgprot_val(protection_map[i]); | 436 | unsigned long v = pgprot_val(protection_map[i]); |
414 | protection_map[i] = __pgprot(v | user_pgprot); | 437 | protection_map[i] = __pgprot(v | user_pgprot); |