aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm/mmu.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm/mmu.c')
-rw-r--r--arch/arm/mm/mmu.c117
1 files changed, 85 insertions, 32 deletions
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 8ba754064559..7f36c825718d 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -180,20 +180,20 @@ void adjust_cr(unsigned long mask, unsigned long set)
180#endif 180#endif
181 181
182#define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_WRITE 182#define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_WRITE
183#define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_XN|PMD_SECT_AP_WRITE 183#define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE
184 184
185static struct mem_type mem_types[] = { 185static struct mem_type mem_types[] = {
186 [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */ 186 [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */
187 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED | 187 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
188 L_PTE_SHARED, 188 L_PTE_SHARED,
189 .prot_l1 = PMD_TYPE_TABLE, 189 .prot_l1 = PMD_TYPE_TABLE,
190 .prot_sect = PROT_SECT_DEVICE | PMD_SECT_UNCACHED, 190 .prot_sect = PROT_SECT_DEVICE | PMD_SECT_S,
191 .domain = DOMAIN_IO, 191 .domain = DOMAIN_IO,
192 }, 192 },
193 [MT_DEVICE_NONSHARED] = { /* ARMv6 non-shared device */ 193 [MT_DEVICE_NONSHARED] = { /* ARMv6 non-shared device */
194 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_NONSHARED, 194 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_NONSHARED,
195 .prot_l1 = PMD_TYPE_TABLE, 195 .prot_l1 = PMD_TYPE_TABLE,
196 .prot_sect = PROT_SECT_DEVICE | PMD_SECT_TEX(2), 196 .prot_sect = PROT_SECT_DEVICE,
197 .domain = DOMAIN_IO, 197 .domain = DOMAIN_IO,
198 }, 198 },
199 [MT_DEVICE_CACHED] = { /* ioremap_cached */ 199 [MT_DEVICE_CACHED] = { /* ioremap_cached */
@@ -205,7 +205,13 @@ static struct mem_type mem_types[] = {
205 [MT_DEVICE_WC] = { /* ioremap_wc */ 205 [MT_DEVICE_WC] = { /* ioremap_wc */
206 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_WC, 206 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_WC,
207 .prot_l1 = PMD_TYPE_TABLE, 207 .prot_l1 = PMD_TYPE_TABLE,
208 .prot_sect = PROT_SECT_DEVICE | PMD_SECT_BUFFERABLE, 208 .prot_sect = PROT_SECT_DEVICE,
209 .domain = DOMAIN_IO,
210 },
211 [MT_UNCACHED] = {
212 .prot_pte = PROT_PTE_DEVICE,
213 .prot_l1 = PMD_TYPE_TABLE,
214 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
209 .domain = DOMAIN_IO, 215 .domain = DOMAIN_IO,
210 }, 216 },
211 [MT_CACHECLEAN] = { 217 [MT_CACHECLEAN] = {
@@ -273,22 +279,23 @@ static void __init build_mem_type_table(void)
273#endif 279#endif
274 280
275 /* 281 /*
276 * On non-Xscale3 ARMv5-and-older systems, use CB=01 282 * Strip out features not present on earlier architectures.
277 * (Uncached/Buffered) for ioremap_wc() mappings. On XScale3 283 * Pre-ARMv5 CPUs don't have TEX bits. Pre-ARMv6 CPUs or those
278 * and ARMv6+, use TEXCB=00100 mappings (Inner/Outer Uncacheable 284 * without extended page tables don't have the 'Shared' bit.
279 * in xsc3 parlance, Uncached Normal in ARMv6 parlance).
280 */ 285 */
281 if (cpu_is_xsc3() || cpu_arch >= CPU_ARCH_ARMv6) { 286 if (cpu_arch < CPU_ARCH_ARMv5)
282 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1); 287 for (i = 0; i < ARRAY_SIZE(mem_types); i++)
283 mem_types[MT_DEVICE_WC].prot_sect &= ~PMD_SECT_BUFFERABLE; 288 mem_types[i].prot_sect &= ~PMD_SECT_TEX(7);
284 } 289 if ((cpu_arch < CPU_ARCH_ARMv6 || !(cr & CR_XP)) && !cpu_is_xsc3())
290 for (i = 0; i < ARRAY_SIZE(mem_types); i++)
291 mem_types[i].prot_sect &= ~PMD_SECT_S;
285 292
286 /* 293 /*
287 * ARMv5 and lower, bit 4 must be set for page tables. 294 * ARMv5 and lower, bit 4 must be set for page tables (was: cache
288 * (was: cache "update-able on write" bit on ARM610) 295 * "update-able on write" bit on ARM610). However, Xscale and
289 * However, Xscale cores require this bit to be cleared. 296 * Xscale3 require this bit to be cleared.
290 */ 297 */
291 if (cpu_is_xscale()) { 298 if (cpu_is_xscale() || cpu_is_xsc3()) {
292 for (i = 0; i < ARRAY_SIZE(mem_types); i++) { 299 for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
293 mem_types[i].prot_sect &= ~PMD_BIT4; 300 mem_types[i].prot_sect &= ~PMD_BIT4;
294 mem_types[i].prot_l1 &= ~PMD_BIT4; 301 mem_types[i].prot_l1 &= ~PMD_BIT4;
@@ -302,6 +309,64 @@ static void __init build_mem_type_table(void)
302 } 309 }
303 } 310 }
304 311
312 /*
313 * Mark the device areas according to the CPU/architecture.
314 */
315 if (cpu_is_xsc3() || (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP))) {
316 if (!cpu_is_xsc3()) {
317 /*
318 * Mark device regions on ARMv6+ as execute-never
319 * to prevent speculative instruction fetches.
320 */
321 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_XN;
322 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_XN;
323 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_XN;
324 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_XN;
325 }
326 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
327 /*
328 * For ARMv7 with TEX remapping,
329 * - shared device is SXCB=1100
330 * - nonshared device is SXCB=0100
331 * - write combine device mem is SXCB=0001
332 * (Uncached Normal memory)
333 */
334 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_TEX(1);
335 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(1);
336 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_BUFFERABLE;
337 } else if (cpu_is_xsc3()) {
338 /*
339 * For Xscale3,
340 * - shared device is TEXCB=00101
341 * - nonshared device is TEXCB=01000
342 * - write combine device mem is TEXCB=00100
343 * (Inner/Outer Uncacheable in xsc3 parlance)
344 */
345 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_TEX(1) | PMD_SECT_BUFFERED;
346 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(2);
347 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1);
348 } else {
349 /*
350 * For ARMv6 and ARMv7 without TEX remapping,
351 * - shared device is TEXCB=00001
352 * - nonshared device is TEXCB=01000
353 * - write combine device mem is TEXCB=00100
354 * (Uncached Normal in ARMv6 parlance).
355 */
356 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_BUFFERED;
357 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(2);
358 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1);
359 }
360 } else {
361 /*
362 * On others, write combining is "Uncached/Buffered"
363 */
364 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_BUFFERABLE;
365 }
366
367 /*
368 * Now deal with the memory-type mappings
369 */
305 cp = &cache_policies[cachepolicy]; 370 cp = &cache_policies[cachepolicy];
306 vecs_pgprot = kern_pgprot = user_pgprot = cp->pte; 371 vecs_pgprot = kern_pgprot = user_pgprot = cp->pte;
307 372
@@ -317,12 +382,8 @@ static void __init build_mem_type_table(void)
317 * Enable CPU-specific coherency if supported. 382 * Enable CPU-specific coherency if supported.
318 * (Only available on XSC3 at the moment.) 383 * (Only available on XSC3 at the moment.)
319 */ 384 */
320 if (arch_is_coherent()) { 385 if (arch_is_coherent() && cpu_is_xsc3())
321 if (cpu_is_xsc3()) { 386 mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
322 mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
323 mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
324 }
325 }
326 387
327 /* 388 /*
328 * ARMv6 and above have extended page tables. 389 * ARMv6 and above have extended page tables.
@@ -336,11 +397,6 @@ static void __init build_mem_type_table(void)
336 mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; 397 mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
337 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; 398 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
338 399
339 /*
340 * Mark the device area as "shared device"
341 */
342 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_BUFFERED;
343
344#ifdef CONFIG_SMP 400#ifdef CONFIG_SMP
345 /* 401 /*
346 * Mark memory with the "shared" attribute for SMP systems 402 * Mark memory with the "shared" attribute for SMP systems
@@ -360,9 +416,6 @@ static void __init build_mem_type_table(void)
360 mem_types[MT_LOW_VECTORS].prot_pte |= vecs_pgprot; 416 mem_types[MT_LOW_VECTORS].prot_pte |= vecs_pgprot;
361 mem_types[MT_HIGH_VECTORS].prot_pte |= vecs_pgprot; 417 mem_types[MT_HIGH_VECTORS].prot_pte |= vecs_pgprot;
362 418
363 if (cpu_arch < CPU_ARCH_ARMv5)
364 mem_types[MT_MINICLEAN].prot_sect &= ~PMD_SECT_TEX(1);
365
366 pgprot_user = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | user_pgprot); 419 pgprot_user = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | user_pgprot);
367 pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | 420 pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG |
368 L_PTE_DIRTY | L_PTE_WRITE | 421 L_PTE_DIRTY | L_PTE_WRITE |
@@ -654,7 +707,7 @@ static inline void prepare_page_table(struct meminfo *mi)
654 /* 707 /*
655 * Clear out all the mappings below the kernel image. 708 * Clear out all the mappings below the kernel image.
656 */ 709 */
657 for (addr = 0; addr < MODULE_START; addr += PGDIR_SIZE) 710 for (addr = 0; addr < MODULES_VADDR; addr += PGDIR_SIZE)
658 pmd_clear(pmd_off_k(addr)); 711 pmd_clear(pmd_off_k(addr));
659 712
660#ifdef CONFIG_XIP_KERNEL 713#ifdef CONFIG_XIP_KERNEL
@@ -766,7 +819,7 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
766 */ 819 */
767#ifdef CONFIG_XIP_KERNEL 820#ifdef CONFIG_XIP_KERNEL
768 map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK); 821 map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK);
769 map.virtual = MODULE_START; 822 map.virtual = MODULES_VADDR;
770 map.length = ((unsigned long)&_etext - map.virtual + ~SECTION_MASK) & SECTION_MASK; 823 map.length = ((unsigned long)&_etext - map.virtual + ~SECTION_MASK) & SECTION_MASK;
771 map.type = MT_ROM; 824 map.type = MT_ROM;
772 create_mapping(&map); 825 create_mapping(&map);