aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm/mmu.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-06-05 18:57:04 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-06-05 18:57:04 -0400
commiteb3d3ec567e868c8a3bfbfdfc9465ffd52983d11 (patch)
tree75acf38b8d73cd281e5ce4dcc941faf48e244b98 /arch/arm/mm/mmu.c
parentc3c55a07203947f72afa50a3218460b27307c47d (diff)
parentbd63ce27d9d62bc40a962b991cbbbe4f0dc913d2 (diff)
Merge branch 'for-linus' of git://ftp.arm.linux.org.uk/~rmk/linux-arm into next
Pull ARM updates from Russell King: - Major clean-up of the L2 cache support code. The existing mess was becoming rather unmaintainable through all the additions that others have done over time. This turns it into a much nicer structure, and implements a few performance improvements as well. - Clean up some of the CP15 control register tweaks for alignment support, moving some code and data into alignment.c - DMA properties for ARM, from Santosh and reviewed by DT people. This adds DT properties to specify bus translations we can't discover automatically, and to indicate whether devices are coherent. - Hibernation support for ARM - Make ftrace work with read-only text in modules - add suspend support for PJ4B CPUs - rework interrupt masking for undefined instruction handling, which allows us to enable interrupts earlier in the handling of these exceptions. - support for big endian page tables - fix stacktrace support to exclude stacktrace functions from the trace, and add save_stack_trace_regs() implementation so that kprobes can record stack traces. - Add support for the Cortex-A17 CPU. - Remove last vestiges of ARM710 support. - Removal of ARM "meminfo" structure, finally converting us solely to memblock to handle the early memory initialisation. * 'for-linus' of git://ftp.arm.linux.org.uk/~rmk/linux-arm: (142 commits) ARM: ensure C page table setup code follows assembly code (part II) ARM: ensure C page table setup code follows assembly code ARM: consolidate last remaining open-coded alignment trap enable ARM: remove global cr_no_alignment ARM: remove CPU_CP15 conditional from alignment.c ARM: remove unused adjust_cr() function ARM: move "noalign" command line option to alignment.c ARM: provide common method to clear bits in CPU control register ARM: 8025/1: Get rid of meminfo ARM: 8060/1: mm: allow sub-architectures to override PCI I/O memory type ARM: 8066/1: correction for ARM patch 8031/2 ARM: 8049/1: ftrace/add save_stack_trace_regs() implementation ARM: 8065/1: remove last use of CONFIG_CPU_ARM710 ARM: 8062/1: Modify ldrt fixup handler to re-execute the userspace instruction ARM: 8047/1: rwsem: use asm-generic rwsem implementation ARM: l2c: trial at enabling some Cortex-A9 optimisations ARM: l2c: add warnings for stuff modifying aux_ctrl register values ARM: l2c: print a warning with L2C-310 caches if the cache size is modified ARM: l2c: remove old .set_debug method ARM: l2c: kill L2X0_AUX_CTRL_MASK before anyone else makes use of this ...
Diffstat (limited to 'arch/arm/mm/mmu.c')
-rw-r--r--arch/arm/mm/mmu.c238
1 files changed, 104 insertions, 134 deletions
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index b68c6b22e1c8..ab14b79b03f0 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -35,6 +35,7 @@
35#include <asm/mach/arch.h> 35#include <asm/mach/arch.h>
36#include <asm/mach/map.h> 36#include <asm/mach/map.h>
37#include <asm/mach/pci.h> 37#include <asm/mach/pci.h>
38#include <asm/fixmap.h>
38 39
39#include "mm.h" 40#include "mm.h"
40#include "tcm.h" 41#include "tcm.h"
@@ -117,28 +118,54 @@ static struct cachepolicy cache_policies[] __initdata = {
117}; 118};
118 119
119#ifdef CONFIG_CPU_CP15 120#ifdef CONFIG_CPU_CP15
121static unsigned long initial_pmd_value __initdata = 0;
122
120/* 123/*
121 * These are useful for identifying cache coherency 124 * Initialise the cache_policy variable with the initial state specified
122 * problems by allowing the cache or the cache and 125 * via the "pmd" value. This is used to ensure that on ARMv6 and later,
123 * writebuffer to be turned off. (Note: the write 126 * the C code sets the page tables up with the same policy as the head
124 * buffer should not be on and the cache off). 127 * assembly code, which avoids an illegal state where the TLBs can get
128 * confused. See comments in early_cachepolicy() for more information.
125 */ 129 */
126static int __init early_cachepolicy(char *p) 130void __init init_default_cache_policy(unsigned long pmd)
127{ 131{
128 int i; 132 int i;
129 133
134 initial_pmd_value = pmd;
135
136 pmd &= PMD_SECT_TEX(1) | PMD_SECT_BUFFERABLE | PMD_SECT_CACHEABLE;
137
138 for (i = 0; i < ARRAY_SIZE(cache_policies); i++)
139 if (cache_policies[i].pmd == pmd) {
140 cachepolicy = i;
141 break;
142 }
143
144 if (i == ARRAY_SIZE(cache_policies))
145 pr_err("ERROR: could not find cache policy\n");
146}
147
148/*
149 * These are useful for identifying cache coherency problems by allowing
150 * the cache or the cache and writebuffer to be turned off. (Note: the
151 * write buffer should not be on and the cache off).
152 */
153static int __init early_cachepolicy(char *p)
154{
155 int i, selected = -1;
156
130 for (i = 0; i < ARRAY_SIZE(cache_policies); i++) { 157 for (i = 0; i < ARRAY_SIZE(cache_policies); i++) {
131 int len = strlen(cache_policies[i].policy); 158 int len = strlen(cache_policies[i].policy);
132 159
133 if (memcmp(p, cache_policies[i].policy, len) == 0) { 160 if (memcmp(p, cache_policies[i].policy, len) == 0) {
134 cachepolicy = i; 161 selected = i;
135 cr_alignment &= ~cache_policies[i].cr_mask;
136 cr_no_alignment &= ~cache_policies[i].cr_mask;
137 break; 162 break;
138 } 163 }
139 } 164 }
140 if (i == ARRAY_SIZE(cache_policies)) 165
141 printk(KERN_ERR "ERROR: unknown or unsupported cache policy\n"); 166 if (selected == -1)
167 pr_err("ERROR: unknown or unsupported cache policy\n");
168
142 /* 169 /*
143 * This restriction is partly to do with the way we boot; it is 170 * This restriction is partly to do with the way we boot; it is
144 * unpredictable to have memory mapped using two different sets of 171 * unpredictable to have memory mapped using two different sets of
@@ -146,12 +173,18 @@ static int __init early_cachepolicy(char *p)
146 * change these attributes once the initial assembly has setup the 173 * change these attributes once the initial assembly has setup the
147 * page tables. 174 * page tables.
148 */ 175 */
149 if (cpu_architecture() >= CPU_ARCH_ARMv6) { 176 if (cpu_architecture() >= CPU_ARCH_ARMv6 && selected != cachepolicy) {
150 printk(KERN_WARNING "Only cachepolicy=writeback supported on ARMv6 and later\n"); 177 pr_warn("Only cachepolicy=%s supported on ARMv6 and later\n",
151 cachepolicy = CPOLICY_WRITEBACK; 178 cache_policies[cachepolicy].policy);
179 return 0;
180 }
181
182 if (selected != cachepolicy) {
183 unsigned long cr = __clear_cr(cache_policies[selected].cr_mask);
184 cachepolicy = selected;
185 flush_cache_all();
186 set_cr(cr);
152 } 187 }
153 flush_cache_all();
154 set_cr(cr_alignment);
155 return 0; 188 return 0;
156} 189}
157early_param("cachepolicy", early_cachepolicy); 190early_param("cachepolicy", early_cachepolicy);
@@ -186,35 +219,6 @@ static int __init early_ecc(char *p)
186early_param("ecc", early_ecc); 219early_param("ecc", early_ecc);
187#endif 220#endif
188 221
189static int __init noalign_setup(char *__unused)
190{
191 cr_alignment &= ~CR_A;
192 cr_no_alignment &= ~CR_A;
193 set_cr(cr_alignment);
194 return 1;
195}
196__setup("noalign", noalign_setup);
197
198#ifndef CONFIG_SMP
199void adjust_cr(unsigned long mask, unsigned long set)
200{
201 unsigned long flags;
202
203 mask &= ~CR_A;
204
205 set &= mask;
206
207 local_irq_save(flags);
208
209 cr_no_alignment = (cr_no_alignment & ~mask) | set;
210 cr_alignment = (cr_alignment & ~mask) | set;
211
212 set_cr((get_cr() & ~mask) | set);
213
214 local_irq_restore(flags);
215}
216#endif
217
218#else /* ifdef CONFIG_CPU_CP15 */ 222#else /* ifdef CONFIG_CPU_CP15 */
219 223
220static int __init early_cachepolicy(char *p) 224static int __init early_cachepolicy(char *p)
@@ -414,8 +418,17 @@ static void __init build_mem_type_table(void)
414 cachepolicy = CPOLICY_WRITEBACK; 418 cachepolicy = CPOLICY_WRITEBACK;
415 ecc_mask = 0; 419 ecc_mask = 0;
416 } 420 }
417 if (is_smp()) 421
418 cachepolicy = CPOLICY_WRITEALLOC; 422 if (is_smp()) {
423 if (cachepolicy != CPOLICY_WRITEALLOC) {
424 pr_warn("Forcing write-allocate cache policy for SMP\n");
425 cachepolicy = CPOLICY_WRITEALLOC;
426 }
427 if (!(initial_pmd_value & PMD_SECT_S)) {
428 pr_warn("Forcing shared mappings for SMP\n");
429 initial_pmd_value |= PMD_SECT_S;
430 }
431 }
419 432
420 /* 433 /*
421 * Strip out features not present on earlier architectures. 434 * Strip out features not present on earlier architectures.
@@ -539,11 +552,12 @@ static void __init build_mem_type_table(void)
539 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; 552 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
540#endif 553#endif
541 554
542 if (is_smp()) { 555 /*
543 /* 556 * If the initial page tables were created with the S bit
544 * Mark memory with the "shared" attribute 557 * set, then we need to do the same here for the same
545 * for SMP systems 558 * reasons given in early_cachepolicy().
546 */ 559 */
560 if (initial_pmd_value & PMD_SECT_S) {
547 user_pgprot |= L_PTE_SHARED; 561 user_pgprot |= L_PTE_SHARED;
548 kern_pgprot |= L_PTE_SHARED; 562 kern_pgprot |= L_PTE_SHARED;
549 vecs_pgprot |= L_PTE_SHARED; 563 vecs_pgprot |= L_PTE_SHARED;
@@ -1061,74 +1075,47 @@ phys_addr_t arm_lowmem_limit __initdata = 0;
1061void __init sanity_check_meminfo(void) 1075void __init sanity_check_meminfo(void)
1062{ 1076{
1063 phys_addr_t memblock_limit = 0; 1077 phys_addr_t memblock_limit = 0;
1064 int i, j, highmem = 0; 1078 int highmem = 0;
1065 phys_addr_t vmalloc_limit = __pa(vmalloc_min - 1) + 1; 1079 phys_addr_t vmalloc_limit = __pa(vmalloc_min - 1) + 1;
1080 struct memblock_region *reg;
1066 1081
1067 for (i = 0, j = 0; i < meminfo.nr_banks; i++) { 1082 for_each_memblock(memory, reg) {
1068 struct membank *bank = &meminfo.bank[j]; 1083 phys_addr_t block_start = reg->base;
1069 phys_addr_t size_limit; 1084 phys_addr_t block_end = reg->base + reg->size;
1070 1085 phys_addr_t size_limit = reg->size;
1071 *bank = meminfo.bank[i];
1072 size_limit = bank->size;
1073 1086
1074 if (bank->start >= vmalloc_limit) 1087 if (reg->base >= vmalloc_limit)
1075 highmem = 1; 1088 highmem = 1;
1076 else 1089 else
1077 size_limit = vmalloc_limit - bank->start; 1090 size_limit = vmalloc_limit - reg->base;
1078 1091
1079 bank->highmem = highmem;
1080 1092
1081#ifdef CONFIG_HIGHMEM 1093 if (!IS_ENABLED(CONFIG_HIGHMEM) || cache_is_vipt_aliasing()) {
1082 /* 1094
1083 * Split those memory banks which are partially overlapping 1095 if (highmem) {
1084 * the vmalloc area greatly simplifying things later. 1096 pr_notice("Ignoring RAM at %pa-%pa (!CONFIG_HIGHMEM)\n",
1085 */ 1097 &block_start, &block_end);
1086 if (!highmem && bank->size > size_limit) { 1098 memblock_remove(reg->base, reg->size);
1087 if (meminfo.nr_banks >= NR_BANKS) { 1099 continue;
1088 printk(KERN_CRIT "NR_BANKS too low, "
1089 "ignoring high memory\n");
1090 } else {
1091 memmove(bank + 1, bank,
1092 (meminfo.nr_banks - i) * sizeof(*bank));
1093 meminfo.nr_banks++;
1094 i++;
1095 bank[1].size -= size_limit;
1096 bank[1].start = vmalloc_limit;
1097 bank[1].highmem = highmem = 1;
1098 j++;
1099 } 1100 }
1100 bank->size = size_limit;
1101 }
1102#else
1103 /*
1104 * Highmem banks not allowed with !CONFIG_HIGHMEM.
1105 */
1106 if (highmem) {
1107 printk(KERN_NOTICE "Ignoring RAM at %.8llx-%.8llx "
1108 "(!CONFIG_HIGHMEM).\n",
1109 (unsigned long long)bank->start,
1110 (unsigned long long)bank->start + bank->size - 1);
1111 continue;
1112 }
1113 1101
1114 /* 1102 if (reg->size > size_limit) {
1115 * Check whether this memory bank would partially overlap 1103 phys_addr_t overlap_size = reg->size - size_limit;
1116 * the vmalloc area. 1104
1117 */ 1105 pr_notice("Truncating RAM at %pa-%pa to -%pa",
1118 if (bank->size > size_limit) { 1106 &block_start, &block_end, &vmalloc_limit);
1119 printk(KERN_NOTICE "Truncating RAM at %.8llx-%.8llx " 1107 memblock_remove(vmalloc_limit, overlap_size);
1120 "to -%.8llx (vmalloc region overlap).\n", 1108 block_end = vmalloc_limit;
1121 (unsigned long long)bank->start, 1109 }
1122 (unsigned long long)bank->start + bank->size - 1,
1123 (unsigned long long)bank->start + size_limit - 1);
1124 bank->size = size_limit;
1125 } 1110 }
1126#endif
1127 if (!bank->highmem) {
1128 phys_addr_t bank_end = bank->start + bank->size;
1129 1111
1130 if (bank_end > arm_lowmem_limit) 1112 if (!highmem) {
1131 arm_lowmem_limit = bank_end; 1113 if (block_end > arm_lowmem_limit) {
1114 if (reg->size > size_limit)
1115 arm_lowmem_limit = vmalloc_limit;
1116 else
1117 arm_lowmem_limit = block_end;
1118 }
1132 1119
1133 /* 1120 /*
1134 * Find the first non-section-aligned page, and point 1121 * Find the first non-section-aligned page, and point
@@ -1144,35 +1131,15 @@ void __init sanity_check_meminfo(void)
1144 * occurs before any free memory is mapped. 1131 * occurs before any free memory is mapped.
1145 */ 1132 */
1146 if (!memblock_limit) { 1133 if (!memblock_limit) {
1147 if (!IS_ALIGNED(bank->start, SECTION_SIZE)) 1134 if (!IS_ALIGNED(block_start, SECTION_SIZE))
1148 memblock_limit = bank->start; 1135 memblock_limit = block_start;
1149 else if (!IS_ALIGNED(bank_end, SECTION_SIZE)) 1136 else if (!IS_ALIGNED(block_end, SECTION_SIZE))
1150 memblock_limit = bank_end; 1137 memblock_limit = arm_lowmem_limit;
1151 } 1138 }
1152 }
1153 j++;
1154 }
1155#ifdef CONFIG_HIGHMEM
1156 if (highmem) {
1157 const char *reason = NULL;
1158 1139
1159 if (cache_is_vipt_aliasing()) {
1160 /*
1161 * Interactions between kmap and other mappings
1162 * make highmem support with aliasing VIPT caches
1163 * rather difficult.
1164 */
1165 reason = "with VIPT aliasing cache";
1166 }
1167 if (reason) {
1168 printk(KERN_CRIT "HIGHMEM is not supported %s, ignoring high memory\n",
1169 reason);
1170 while (j > 0 && meminfo.bank[j - 1].highmem)
1171 j--;
1172 } 1140 }
1173 } 1141 }
1174#endif 1142
1175 meminfo.nr_banks = j;
1176 high_memory = __va(arm_lowmem_limit - 1) + 1; 1143 high_memory = __va(arm_lowmem_limit - 1) + 1;
1177 1144
1178 /* 1145 /*
@@ -1359,6 +1326,9 @@ static void __init kmap_init(void)
1359#ifdef CONFIG_HIGHMEM 1326#ifdef CONFIG_HIGHMEM
1360 pkmap_page_table = early_pte_alloc(pmd_off_k(PKMAP_BASE), 1327 pkmap_page_table = early_pte_alloc(pmd_off_k(PKMAP_BASE),
1361 PKMAP_BASE, _PAGE_KERNEL_TABLE); 1328 PKMAP_BASE, _PAGE_KERNEL_TABLE);
1329
1330 fixmap_page_table = early_pte_alloc(pmd_off_k(FIXADDR_START),
1331 FIXADDR_START, _PAGE_KERNEL_TABLE);
1362#endif 1332#endif
1363} 1333}
1364 1334
@@ -1461,7 +1431,7 @@ void __init early_paging_init(const struct machine_desc *mdesc,
1461 * just complicate the code. 1431 * just complicate the code.
1462 */ 1432 */
1463 flush_cache_louis(); 1433 flush_cache_louis();
1464 dsb(); 1434 dsb(ishst);
1465 isb(); 1435 isb();
1466 1436
1467 /* remap level 1 table */ 1437 /* remap level 1 table */