aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm/mmu.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm/mmu.c')
-rw-r--r--arch/arm/mm/mmu.c64
1 files changed, 36 insertions, 28 deletions
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index ea67be0223ac..241c24a1c18f 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -100,18 +100,17 @@ static struct cachepolicy cache_policies[] __initdata = {
100 * writebuffer to be turned off. (Note: the write 100 * writebuffer to be turned off. (Note: the write
101 * buffer should not be on and the cache off). 101 * buffer should not be on and the cache off).
102 */ 102 */
103static void __init early_cachepolicy(char **p) 103static int __init early_cachepolicy(char *p)
104{ 104{
105 int i; 105 int i;
106 106
107 for (i = 0; i < ARRAY_SIZE(cache_policies); i++) { 107 for (i = 0; i < ARRAY_SIZE(cache_policies); i++) {
108 int len = strlen(cache_policies[i].policy); 108 int len = strlen(cache_policies[i].policy);
109 109
110 if (memcmp(*p, cache_policies[i].policy, len) == 0) { 110 if (memcmp(p, cache_policies[i].policy, len) == 0) {
111 cachepolicy = i; 111 cachepolicy = i;
112 cr_alignment &= ~cache_policies[i].cr_mask; 112 cr_alignment &= ~cache_policies[i].cr_mask;
113 cr_no_alignment &= ~cache_policies[i].cr_mask; 113 cr_no_alignment &= ~cache_policies[i].cr_mask;
114 *p += len;
115 break; 114 break;
116 } 115 }
117 } 116 }
@@ -130,36 +129,37 @@ static void __init early_cachepolicy(char **p)
130 } 129 }
131 flush_cache_all(); 130 flush_cache_all();
132 set_cr(cr_alignment); 131 set_cr(cr_alignment);
132 return 0;
133} 133}
134__early_param("cachepolicy=", early_cachepolicy); 134early_param("cachepolicy", early_cachepolicy);
135 135
136static void __init early_nocache(char **__unused) 136static int __init early_nocache(char *__unused)
137{ 137{
138 char *p = "buffered"; 138 char *p = "buffered";
139 printk(KERN_WARNING "nocache is deprecated; use cachepolicy=%s\n", p); 139 printk(KERN_WARNING "nocache is deprecated; use cachepolicy=%s\n", p);
140 early_cachepolicy(&p); 140 early_cachepolicy(p);
141 return 0;
141} 142}
142__early_param("nocache", early_nocache); 143early_param("nocache", early_nocache);
143 144
144static void __init early_nowrite(char **__unused) 145static int __init early_nowrite(char *__unused)
145{ 146{
146 char *p = "uncached"; 147 char *p = "uncached";
147 printk(KERN_WARNING "nowb is deprecated; use cachepolicy=%s\n", p); 148 printk(KERN_WARNING "nowb is deprecated; use cachepolicy=%s\n", p);
148 early_cachepolicy(&p); 149 early_cachepolicy(p);
150 return 0;
149} 151}
150__early_param("nowb", early_nowrite); 152early_param("nowb", early_nowrite);
151 153
152static void __init early_ecc(char **p) 154static int __init early_ecc(char *p)
153{ 155{
154 if (memcmp(*p, "on", 2) == 0) { 156 if (memcmp(p, "on", 2) == 0)
155 ecc_mask = PMD_PROTECTION; 157 ecc_mask = PMD_PROTECTION;
156 *p += 2; 158 else if (memcmp(p, "off", 3) == 0)
157 } else if (memcmp(*p, "off", 3) == 0) {
158 ecc_mask = 0; 159 ecc_mask = 0;
159 *p += 3; 160 return 0;
160 }
161} 161}
162__early_param("ecc=", early_ecc); 162early_param("ecc", early_ecc);
163 163
164static int __init noalign_setup(char *__unused) 164static int __init noalign_setup(char *__unused)
165{ 165{
@@ -420,6 +420,10 @@ static void __init build_mem_type_table(void)
420 user_pgprot |= L_PTE_SHARED; 420 user_pgprot |= L_PTE_SHARED;
421 kern_pgprot |= L_PTE_SHARED; 421 kern_pgprot |= L_PTE_SHARED;
422 vecs_pgprot |= L_PTE_SHARED; 422 vecs_pgprot |= L_PTE_SHARED;
423 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_S;
424 mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
425 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
426 mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
423 mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S; 427 mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
424 mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S; 428 mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
425#endif 429#endif
@@ -453,8 +457,7 @@ static void __init build_mem_type_table(void)
453 457
454 pgprot_user = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | user_pgprot); 458 pgprot_user = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | user_pgprot);
455 pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | 459 pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG |
456 L_PTE_DIRTY | L_PTE_WRITE | 460 L_PTE_DIRTY | L_PTE_WRITE | kern_pgprot);
457 L_PTE_EXEC | kern_pgprot);
458 461
459 mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask; 462 mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
460 mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask; 463 mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
@@ -671,9 +674,9 @@ static unsigned long __initdata vmalloc_reserve = SZ_128M;
671 * bytes. This can be used to increase (or decrease) the vmalloc 674 * bytes. This can be used to increase (or decrease) the vmalloc
672 * area - the default is 128m. 675 * area - the default is 128m.
673 */ 676 */
674static void __init early_vmalloc(char **arg) 677static int __init early_vmalloc(char *arg)
675{ 678{
676 vmalloc_reserve = memparse(*arg, arg); 679 vmalloc_reserve = memparse(arg, NULL);
677 680
678 if (vmalloc_reserve < SZ_16M) { 681 if (vmalloc_reserve < SZ_16M) {
679 vmalloc_reserve = SZ_16M; 682 vmalloc_reserve = SZ_16M;
@@ -688,8 +691,9 @@ static void __init early_vmalloc(char **arg)
688 "vmalloc area is too big, limiting to %luMB\n", 691 "vmalloc area is too big, limiting to %luMB\n",
689 vmalloc_reserve >> 20); 692 vmalloc_reserve >> 20);
690 } 693 }
694 return 0;
691} 695}
692__early_param("vmalloc=", early_vmalloc); 696early_param("vmalloc", early_vmalloc);
693 697
694#define VMALLOC_MIN (void *)(VMALLOC_END - vmalloc_reserve) 698#define VMALLOC_MIN (void *)(VMALLOC_END - vmalloc_reserve)
695 699
@@ -881,7 +885,7 @@ void __init reserve_node_zero(pg_data_t *pgdat)
881 BOOTMEM_EXCLUSIVE); 885 BOOTMEM_EXCLUSIVE);
882 } 886 }
883 887
884 if (machine_is_treo680()) { 888 if (machine_is_treo680() || machine_is_centro()) {
885 reserve_bootmem_node(pgdat, 0xa0000000, 0x1000, 889 reserve_bootmem_node(pgdat, 0xa0000000, 0x1000,
886 BOOTMEM_EXCLUSIVE); 890 BOOTMEM_EXCLUSIVE);
887 reserve_bootmem_node(pgdat, 0xa2000000, 0x1000, 891 reserve_bootmem_node(pgdat, 0xa2000000, 0x1000,
@@ -1036,7 +1040,7 @@ void __init paging_init(struct machine_desc *mdesc)
1036 */ 1040 */
1037 zero_page = alloc_bootmem_low_pages(PAGE_SIZE); 1041 zero_page = alloc_bootmem_low_pages(PAGE_SIZE);
1038 empty_zero_page = virt_to_page(zero_page); 1042 empty_zero_page = virt_to_page(zero_page);
1039 flush_dcache_page(empty_zero_page); 1043 __flush_dcache_page(NULL, empty_zero_page);
1040} 1044}
1041 1045
1042/* 1046/*
@@ -1050,10 +1054,12 @@ void setup_mm_for_reboot(char mode)
1050 pgd_t *pgd; 1054 pgd_t *pgd;
1051 int i; 1055 int i;
1052 1056
1053 if (current->mm && current->mm->pgd) 1057 /*
1054 pgd = current->mm->pgd; 1058 * We need to access to user-mode page tables here. For kernel threads
1055 else 1059 * we don't have any user-mode mappings so we use the context that we
1056 pgd = init_mm.pgd; 1060 * "borrowed".
1061 */
1062 pgd = current->active_mm->pgd;
1057 1063
1058 base_pmdval = PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | PMD_TYPE_SECT; 1064 base_pmdval = PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | PMD_TYPE_SECT;
1059 if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale()) 1065 if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale())
@@ -1068,4 +1074,6 @@ void setup_mm_for_reboot(char mode)
1068 pmd[1] = __pmd(pmdval + (1 << (PGDIR_SHIFT - 1))); 1074 pmd[1] = __pmd(pmdval + (1 << (PGDIR_SHIFT - 1)));
1069 flush_pmd_entry(pmd); 1075 flush_pmd_entry(pmd);
1070 } 1076 }
1077
1078 local_flush_tlb_all();
1071} 1079}