aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm/mmu.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm/mmu.c')
-rw-r--r--arch/arm/mm/mmu.c127
1 files changed, 64 insertions, 63 deletions
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 9f98cec7fe1e..cda7c40999b6 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -22,6 +22,7 @@
22#include <asm/cputype.h> 22#include <asm/cputype.h>
23#include <asm/sections.h> 23#include <asm/sections.h>
24#include <asm/cachetype.h> 24#include <asm/cachetype.h>
25#include <asm/fixmap.h>
25#include <asm/sections.h> 26#include <asm/sections.h>
26#include <asm/setup.h> 27#include <asm/setup.h>
27#include <asm/smp_plat.h> 28#include <asm/smp_plat.h>
@@ -52,6 +53,8 @@ EXPORT_SYMBOL(empty_zero_page);
52 */ 53 */
53pmd_t *top_pmd; 54pmd_t *top_pmd;
54 55
56pmdval_t user_pmd_table = _PAGE_USER_TABLE;
57
55#define CPOLICY_UNCACHED 0 58#define CPOLICY_UNCACHED 0
56#define CPOLICY_BUFFERED 1 59#define CPOLICY_BUFFERED 1
57#define CPOLICY_WRITETHROUGH 2 60#define CPOLICY_WRITETHROUGH 2
@@ -192,7 +195,7 @@ early_param("cachepolicy", early_cachepolicy);
192static int __init early_nocache(char *__unused) 195static int __init early_nocache(char *__unused)
193{ 196{
194 char *p = "buffered"; 197 char *p = "buffered";
195 printk(KERN_WARNING "nocache is deprecated; use cachepolicy=%s\n", p); 198 pr_warn("nocache is deprecated; use cachepolicy=%s\n", p);
196 early_cachepolicy(p); 199 early_cachepolicy(p);
197 return 0; 200 return 0;
198} 201}
@@ -201,7 +204,7 @@ early_param("nocache", early_nocache);
201static int __init early_nowrite(char *__unused) 204static int __init early_nowrite(char *__unused)
202{ 205{
203 char *p = "uncached"; 206 char *p = "uncached";
204 printk(KERN_WARNING "nowb is deprecated; use cachepolicy=%s\n", p); 207 pr_warn("nowb is deprecated; use cachepolicy=%s\n", p);
205 early_cachepolicy(p); 208 early_cachepolicy(p);
206 return 0; 209 return 0;
207} 210}
@@ -354,43 +357,28 @@ const struct mem_type *get_mem_type(unsigned int type)
354} 357}
355EXPORT_SYMBOL(get_mem_type); 358EXPORT_SYMBOL(get_mem_type);
356 359
357#define PTE_SET_FN(_name, pteop) \ 360/*
358static int pte_set_##_name(pte_t *ptep, pgtable_t token, unsigned long addr, \ 361 * To avoid TLB flush broadcasts, this uses local_flush_tlb_kernel_range().
359 void *data) \ 362 * As a result, this can only be called with preemption disabled, as under
360{ \ 363 * stop_machine().
361 pte_t pte = pteop(*ptep); \ 364 */
362\ 365void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot)
363 set_pte_ext(ptep, pte, 0); \ 366{
364 return 0; \ 367 unsigned long vaddr = __fix_to_virt(idx);
365} \ 368 pte_t *pte = pte_offset_kernel(pmd_off_k(vaddr), vaddr);
366
367#define SET_MEMORY_FN(_name, callback) \
368int set_memory_##_name(unsigned long addr, int numpages) \
369{ \
370 unsigned long start = addr; \
371 unsigned long size = PAGE_SIZE*numpages; \
372 unsigned end = start + size; \
373\
374 if (start < MODULES_VADDR || start >= MODULES_END) \
375 return -EINVAL;\
376\
377 if (end < MODULES_VADDR || end >= MODULES_END) \
378 return -EINVAL; \
379\
380 apply_to_page_range(&init_mm, start, size, callback, NULL); \
381 flush_tlb_kernel_range(start, end); \
382 return 0;\
383}
384 369
385PTE_SET_FN(ro, pte_wrprotect) 370 /* Make sure fixmap region does not exceed available allocation. */
386PTE_SET_FN(rw, pte_mkwrite) 371 BUILD_BUG_ON(FIXADDR_START + (__end_of_fixed_addresses * PAGE_SIZE) >
387PTE_SET_FN(x, pte_mkexec) 372 FIXADDR_END);
388PTE_SET_FN(nx, pte_mknexec) 373 BUG_ON(idx >= __end_of_fixed_addresses);
389 374
390SET_MEMORY_FN(ro, pte_set_ro) 375 if (pgprot_val(prot))
391SET_MEMORY_FN(rw, pte_set_rw) 376 set_pte_at(NULL, vaddr, pte,
392SET_MEMORY_FN(x, pte_set_x) 377 pfn_pte(phys >> PAGE_SHIFT, prot));
393SET_MEMORY_FN(nx, pte_set_nx) 378 else
379 pte_clear(NULL, vaddr, pte);
380 local_flush_tlb_kernel_range(vaddr, vaddr + PAGE_SIZE);
381}
394 382
395/* 383/*
396 * Adjust the PMD section entries according to the CPU in use. 384 * Adjust the PMD section entries according to the CPU in use.
@@ -528,14 +516,23 @@ static void __init build_mem_type_table(void)
528 hyp_device_pgprot = mem_types[MT_DEVICE].prot_pte; 516 hyp_device_pgprot = mem_types[MT_DEVICE].prot_pte;
529 s2_device_pgprot = mem_types[MT_DEVICE].prot_pte_s2; 517 s2_device_pgprot = mem_types[MT_DEVICE].prot_pte_s2;
530 518
519#ifndef CONFIG_ARM_LPAE
531 /* 520 /*
532 * We don't use domains on ARMv6 (since this causes problems with 521 * We don't use domains on ARMv6 (since this causes problems with
533 * v6/v7 kernels), so we must use a separate memory type for user 522 * v6/v7 kernels), so we must use a separate memory type for user
534 * r/o, kernel r/w to map the vectors page. 523 * r/o, kernel r/w to map the vectors page.
535 */ 524 */
536#ifndef CONFIG_ARM_LPAE
537 if (cpu_arch == CPU_ARCH_ARMv6) 525 if (cpu_arch == CPU_ARCH_ARMv6)
538 vecs_pgprot |= L_PTE_MT_VECTORS; 526 vecs_pgprot |= L_PTE_MT_VECTORS;
527
528 /*
529 * Check is it with support for the PXN bit
530 * in the Short-descriptor translation table format descriptors.
531 */
532 if (cpu_arch == CPU_ARCH_ARMv7 &&
533 (read_cpuid_ext(CPUID_EXT_MMFR0) & 0xF) == 4) {
534 user_pmd_table |= PMD_PXNTABLE;
535 }
539#endif 536#endif
540 537
541 /* 538 /*
@@ -605,6 +602,11 @@ static void __init build_mem_type_table(void)
605 } 602 }
606 kern_pgprot |= PTE_EXT_AF; 603 kern_pgprot |= PTE_EXT_AF;
607 vecs_pgprot |= PTE_EXT_AF; 604 vecs_pgprot |= PTE_EXT_AF;
605
606 /*
607 * Set PXN for user mappings
608 */
609 user_pgprot |= PTE_EXT_PXN;
608#endif 610#endif
609 611
610 for (i = 0; i < 16; i++) { 612 for (i = 0; i < 16; i++) {
@@ -786,8 +788,7 @@ static void __init create_36bit_mapping(struct map_desc *md,
786 length = PAGE_ALIGN(md->length); 788 length = PAGE_ALIGN(md->length);
787 789
788 if (!(cpu_architecture() >= CPU_ARCH_ARMv6 || cpu_is_xsc3())) { 790 if (!(cpu_architecture() >= CPU_ARCH_ARMv6 || cpu_is_xsc3())) {
789 printk(KERN_ERR "MM: CPU does not support supersection " 791 pr_err("MM: CPU does not support supersection mapping for 0x%08llx at 0x%08lx\n",
790 "mapping for 0x%08llx at 0x%08lx\n",
791 (long long)__pfn_to_phys((u64)md->pfn), addr); 792 (long long)__pfn_to_phys((u64)md->pfn), addr);
792 return; 793 return;
793 } 794 }
@@ -799,15 +800,13 @@ static void __init create_36bit_mapping(struct map_desc *md,
799 * of the actual domain assignments in use. 800 * of the actual domain assignments in use.
800 */ 801 */
801 if (type->domain) { 802 if (type->domain) {
802 printk(KERN_ERR "MM: invalid domain in supersection " 803 pr_err("MM: invalid domain in supersection mapping for 0x%08llx at 0x%08lx\n",
803 "mapping for 0x%08llx at 0x%08lx\n",
804 (long long)__pfn_to_phys((u64)md->pfn), addr); 804 (long long)__pfn_to_phys((u64)md->pfn), addr);
805 return; 805 return;
806 } 806 }
807 807
808 if ((addr | length | __pfn_to_phys(md->pfn)) & ~SUPERSECTION_MASK) { 808 if ((addr | length | __pfn_to_phys(md->pfn)) & ~SUPERSECTION_MASK) {
809 printk(KERN_ERR "MM: cannot create mapping for 0x%08llx" 809 pr_err("MM: cannot create mapping for 0x%08llx at 0x%08lx invalid alignment\n",
810 " at 0x%08lx invalid alignment\n",
811 (long long)__pfn_to_phys((u64)md->pfn), addr); 810 (long long)__pfn_to_phys((u64)md->pfn), addr);
812 return; 811 return;
813 } 812 }
@@ -850,18 +849,16 @@ static void __init create_mapping(struct map_desc *md)
850 pgd_t *pgd; 849 pgd_t *pgd;
851 850
852 if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) { 851 if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) {
853 printk(KERN_WARNING "BUG: not creating mapping for 0x%08llx" 852 pr_warn("BUG: not creating mapping for 0x%08llx at 0x%08lx in user region\n",
854 " at 0x%08lx in user region\n", 853 (long long)__pfn_to_phys((u64)md->pfn), md->virtual);
855 (long long)__pfn_to_phys((u64)md->pfn), md->virtual);
856 return; 854 return;
857 } 855 }
858 856
859 if ((md->type == MT_DEVICE || md->type == MT_ROM) && 857 if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
860 md->virtual >= PAGE_OFFSET && 858 md->virtual >= PAGE_OFFSET &&
861 (md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) { 859 (md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) {
862 printk(KERN_WARNING "BUG: mapping for 0x%08llx" 860 pr_warn("BUG: mapping for 0x%08llx at 0x%08lx out of vmalloc space\n",
863 " at 0x%08lx out of vmalloc space\n", 861 (long long)__pfn_to_phys((u64)md->pfn), md->virtual);
864 (long long)__pfn_to_phys((u64)md->pfn), md->virtual);
865 } 862 }
866 863
867 type = &mem_types[md->type]; 864 type = &mem_types[md->type];
@@ -881,9 +878,8 @@ static void __init create_mapping(struct map_desc *md)
881 length = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK)); 878 length = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));
882 879
883 if (type->prot_l1 == 0 && ((addr | phys | length) & ~SECTION_MASK)) { 880 if (type->prot_l1 == 0 && ((addr | phys | length) & ~SECTION_MASK)) {
884 printk(KERN_WARNING "BUG: map for 0x%08llx at 0x%08lx can not " 881 pr_warn("BUG: map for 0x%08llx at 0x%08lx can not be mapped using pages, ignoring.\n",
885 "be mapped using pages, ignoring.\n", 882 (long long)__pfn_to_phys(md->pfn), addr);
886 (long long)__pfn_to_phys(md->pfn), addr);
887 return; 883 return;
888 } 884 }
889 885
@@ -1053,15 +1049,13 @@ static int __init early_vmalloc(char *arg)
1053 1049
1054 if (vmalloc_reserve < SZ_16M) { 1050 if (vmalloc_reserve < SZ_16M) {
1055 vmalloc_reserve = SZ_16M; 1051 vmalloc_reserve = SZ_16M;
1056 printk(KERN_WARNING 1052 pr_warn("vmalloc area too small, limiting to %luMB\n",
1057 "vmalloc area too small, limiting to %luMB\n",
1058 vmalloc_reserve >> 20); 1053 vmalloc_reserve >> 20);
1059 } 1054 }
1060 1055
1061 if (vmalloc_reserve > VMALLOC_END - (PAGE_OFFSET + SZ_32M)) { 1056 if (vmalloc_reserve > VMALLOC_END - (PAGE_OFFSET + SZ_32M)) {
1062 vmalloc_reserve = VMALLOC_END - (PAGE_OFFSET + SZ_32M); 1057 vmalloc_reserve = VMALLOC_END - (PAGE_OFFSET + SZ_32M);
1063 printk(KERN_WARNING 1058 pr_warn("vmalloc area is too big, limiting to %luMB\n",
1064 "vmalloc area is too big, limiting to %luMB\n",
1065 vmalloc_reserve >> 20); 1059 vmalloc_reserve >> 20);
1066 } 1060 }
1067 1061
@@ -1094,7 +1088,7 @@ void __init sanity_check_meminfo(void)
1094 1088
1095 if (highmem) { 1089 if (highmem) {
1096 pr_notice("Ignoring RAM at %pa-%pa (!CONFIG_HIGHMEM)\n", 1090 pr_notice("Ignoring RAM at %pa-%pa (!CONFIG_HIGHMEM)\n",
1097 &block_start, &block_end); 1091 &block_start, &block_end);
1098 memblock_remove(reg->base, reg->size); 1092 memblock_remove(reg->base, reg->size);
1099 continue; 1093 continue;
1100 } 1094 }
@@ -1103,7 +1097,7 @@ void __init sanity_check_meminfo(void)
1103 phys_addr_t overlap_size = reg->size - size_limit; 1097 phys_addr_t overlap_size = reg->size - size_limit;
1104 1098
1105 pr_notice("Truncating RAM at %pa-%pa to -%pa", 1099 pr_notice("Truncating RAM at %pa-%pa to -%pa",
1106 &block_start, &block_end, &vmalloc_limit); 1100 &block_start, &block_end, &vmalloc_limit);
1107 memblock_remove(vmalloc_limit, overlap_size); 1101 memblock_remove(vmalloc_limit, overlap_size);
1108 block_end = vmalloc_limit; 1102 block_end = vmalloc_limit;
1109 } 1103 }
@@ -1326,10 +1320,10 @@ static void __init kmap_init(void)
1326#ifdef CONFIG_HIGHMEM 1320#ifdef CONFIG_HIGHMEM
1327 pkmap_page_table = early_pte_alloc(pmd_off_k(PKMAP_BASE), 1321 pkmap_page_table = early_pte_alloc(pmd_off_k(PKMAP_BASE),
1328 PKMAP_BASE, _PAGE_KERNEL_TABLE); 1322 PKMAP_BASE, _PAGE_KERNEL_TABLE);
1329
1330 fixmap_page_table = early_pte_alloc(pmd_off_k(FIXADDR_START),
1331 FIXADDR_START, _PAGE_KERNEL_TABLE);
1332#endif 1323#endif
1324
1325 early_pte_alloc(pmd_off_k(FIXADDR_START), FIXADDR_START,
1326 _PAGE_KERNEL_TABLE);
1333} 1327}
1334 1328
1335static void __init map_lowmem(void) 1329static void __init map_lowmem(void)
@@ -1349,13 +1343,20 @@ static void __init map_lowmem(void)
1349 if (start >= end) 1343 if (start >= end)
1350 break; 1344 break;
1351 1345
1352 if (end < kernel_x_start || start >= kernel_x_end) { 1346 if (end < kernel_x_start) {
1353 map.pfn = __phys_to_pfn(start); 1347 map.pfn = __phys_to_pfn(start);
1354 map.virtual = __phys_to_virt(start); 1348 map.virtual = __phys_to_virt(start);
1355 map.length = end - start; 1349 map.length = end - start;
1356 map.type = MT_MEMORY_RWX; 1350 map.type = MT_MEMORY_RWX;
1357 1351
1358 create_mapping(&map); 1352 create_mapping(&map);
1353 } else if (start >= kernel_x_end) {
1354 map.pfn = __phys_to_pfn(start);
1355 map.virtual = __phys_to_virt(start);
1356 map.length = end - start;
1357 map.type = MT_MEMORY_RW;
1358
1359 create_mapping(&map);
1359 } else { 1360 } else {
1360 /* This better cover the entire kernel */ 1361 /* This better cover the entire kernel */
1361 if (start < kernel_x_start) { 1362 if (start < kernel_x_start) {