aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm/mmu.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm/mmu.c')
-rw-r--r--arch/arm/mm/mmu.c54
1 files changed, 53 insertions, 1 deletions
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index d4d082c5c2d4..e6344ece00ce 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -18,9 +18,11 @@
18#include <asm/cputype.h> 18#include <asm/cputype.h>
19#include <asm/mach-types.h> 19#include <asm/mach-types.h>
20#include <asm/sections.h> 20#include <asm/sections.h>
21#include <asm/cachetype.h>
21#include <asm/setup.h> 22#include <asm/setup.h>
22#include <asm/sizes.h> 23#include <asm/sizes.h>
23#include <asm/tlb.h> 24#include <asm/tlb.h>
25#include <asm/highmem.h>
24 26
25#include <asm/mach/arch.h> 27#include <asm/mach/arch.h>
26#include <asm/mach/map.h> 28#include <asm/mach/map.h>
@@ -243,6 +245,10 @@ static struct mem_type mem_types[] = {
243 .prot_sect = PMD_TYPE_SECT, 245 .prot_sect = PMD_TYPE_SECT,
244 .domain = DOMAIN_KERNEL, 246 .domain = DOMAIN_KERNEL,
245 }, 247 },
248 [MT_MEMORY_NONCACHED] = {
249 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
250 .domain = DOMAIN_KERNEL,
251 },
246}; 252};
247 253
248const struct mem_type *get_mem_type(unsigned int type) 254const struct mem_type *get_mem_type(unsigned int type)
@@ -406,9 +412,28 @@ static void __init build_mem_type_table(void)
406 kern_pgprot |= L_PTE_SHARED; 412 kern_pgprot |= L_PTE_SHARED;
407 vecs_pgprot |= L_PTE_SHARED; 413 vecs_pgprot |= L_PTE_SHARED;
408 mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S; 414 mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
415 mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
409#endif 416#endif
410 } 417 }
411 418
419 /*
420 * Non-cacheable Normal - intended for memory areas that must
421 * not cause dirty cache line writebacks when used
422 */
423 if (cpu_arch >= CPU_ARCH_ARMv6) {
424 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
425 /* Non-cacheable Normal is XCB = 001 */
426 mem_types[MT_MEMORY_NONCACHED].prot_sect |=
427 PMD_SECT_BUFFERED;
428 } else {
429 /* For both ARMv6 and non-TEX-remapping ARMv7 */
430 mem_types[MT_MEMORY_NONCACHED].prot_sect |=
431 PMD_SECT_TEX(1);
432 }
433 } else {
434 mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
435 }
436
412 for (i = 0; i < 16; i++) { 437 for (i = 0; i < 16; i++) {
413 unsigned long v = pgprot_val(protection_map[i]); 438 unsigned long v = pgprot_val(protection_map[i]);
414 protection_map[i] = __pgprot(v | user_pgprot); 439 protection_map[i] = __pgprot(v | user_pgprot);
@@ -677,6 +702,10 @@ static void __init sanity_check_meminfo(void)
677 if (meminfo.nr_banks >= NR_BANKS) { 702 if (meminfo.nr_banks >= NR_BANKS) {
678 printk(KERN_CRIT "NR_BANKS too low, " 703 printk(KERN_CRIT "NR_BANKS too low, "
679 "ignoring high memory\n"); 704 "ignoring high memory\n");
705 } else if (cache_is_vipt_aliasing()) {
706 printk(KERN_CRIT "HIGHMEM is not yet supported "
707 "with VIPT aliasing cache, "
708 "ignoring high memory\n");
680 } else { 709 } else {
681 memmove(bank + 1, bank, 710 memmove(bank + 1, bank,
682 (meminfo.nr_banks - i) * sizeof(*bank)); 711 (meminfo.nr_banks - i) * sizeof(*bank));
@@ -694,7 +723,7 @@ static void __init sanity_check_meminfo(void)
694 * the vmalloc area. 723 * the vmalloc area.
695 */ 724 */
696 if (__va(bank->start) >= VMALLOC_MIN || 725 if (__va(bank->start) >= VMALLOC_MIN ||
697 __va(bank->start) < PAGE_OFFSET) { 726 __va(bank->start) < (void *)PAGE_OFFSET) {
698 printk(KERN_NOTICE "Ignoring RAM at %.8lx-%.8lx " 727 printk(KERN_NOTICE "Ignoring RAM at %.8lx-%.8lx "
699 "(vmalloc region overlap).\n", 728 "(vmalloc region overlap).\n",
700 bank->start, bank->start + bank->size - 1); 729 bank->start, bank->start + bank->size - 1);
@@ -799,6 +828,17 @@ void __init reserve_node_zero(pg_data_t *pgdat)
799 BOOTMEM_DEFAULT); 828 BOOTMEM_DEFAULT);
800 } 829 }
801 830
831 if (machine_is_palmld() || machine_is_palmtx()) {
832 reserve_bootmem_node(pgdat, 0xa0000000, 0x1000,
833 BOOTMEM_EXCLUSIVE);
834 reserve_bootmem_node(pgdat, 0xa0200000, 0x1000,
835 BOOTMEM_EXCLUSIVE);
836 }
837
838 if (machine_is_palmt5())
839 reserve_bootmem_node(pgdat, 0xa0200000, 0x1000,
840 BOOTMEM_EXCLUSIVE);
841
802#ifdef CONFIG_SA1111 842#ifdef CONFIG_SA1111
803 /* 843 /*
804 * Because of the SA1111 DMA bug, we want to preserve our 844 * Because of the SA1111 DMA bug, we want to preserve our
@@ -895,6 +935,17 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
895 flush_cache_all(); 935 flush_cache_all();
896} 936}
897 937
938static void __init kmap_init(void)
939{
940#ifdef CONFIG_HIGHMEM
941 pmd_t *pmd = pmd_off_k(PKMAP_BASE);
942 pte_t *pte = alloc_bootmem_low_pages(2 * PTRS_PER_PTE * sizeof(pte_t));
943 BUG_ON(!pmd_none(*pmd) || !pte);
944 __pmd_populate(pmd, __pa(pte) | _PAGE_KERNEL_TABLE);
945 pkmap_page_table = pte + PTRS_PER_PTE;
946#endif
947}
948
898/* 949/*
899 * paging_init() sets up the page tables, initialises the zone memory 950 * paging_init() sets up the page tables, initialises the zone memory
900 * maps, and sets up the zero page, bad page and bad page tables. 951 * maps, and sets up the zero page, bad page and bad page tables.
@@ -908,6 +959,7 @@ void __init paging_init(struct machine_desc *mdesc)
908 prepare_page_table(); 959 prepare_page_table();
909 bootmem_init(); 960 bootmem_init();
910 devicemaps_init(mdesc); 961 devicemaps_init(mdesc);
962 kmap_init();
911 963
912 top_pmd = pmd_off_k(0xffff0000); 964 top_pmd = pmd_off_k(0xffff0000);
913 965