diff options
Diffstat (limited to 'arch/arm/mm')
-rw-r--r-- | arch/arm/mm/Kconfig | 37 | ||||
-rw-r--r-- | arch/arm/mm/context.c | 3 | ||||
-rw-r--r-- | arch/arm/mm/mmu.c | 57 | ||||
-rw-r--r-- | arch/arm/mm/proc-v7-2level.S | 2 | ||||
-rw-r--r-- | arch/arm/mm/proc-v7-3level.S | 2 | ||||
-rw-r--r-- | arch/arm/mm/proc-v7.S | 11 |
6 files changed, 98 insertions, 14 deletions
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index 6cacdc8dd654..cd2c88e7a8f7 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig | |||
@@ -421,24 +421,28 @@ config CPU_32v3 | |||
421 | select CPU_USE_DOMAINS if MMU | 421 | select CPU_USE_DOMAINS if MMU |
422 | select NEEDS_SYSCALL_FOR_CMPXCHG if SMP | 422 | select NEEDS_SYSCALL_FOR_CMPXCHG if SMP |
423 | select TLS_REG_EMUL if SMP || !MMU | 423 | select TLS_REG_EMUL if SMP || !MMU |
424 | select NEED_KUSER_HELPERS | ||
424 | 425 | ||
425 | config CPU_32v4 | 426 | config CPU_32v4 |
426 | bool | 427 | bool |
427 | select CPU_USE_DOMAINS if MMU | 428 | select CPU_USE_DOMAINS if MMU |
428 | select NEEDS_SYSCALL_FOR_CMPXCHG if SMP | 429 | select NEEDS_SYSCALL_FOR_CMPXCHG if SMP |
429 | select TLS_REG_EMUL if SMP || !MMU | 430 | select TLS_REG_EMUL if SMP || !MMU |
431 | select NEED_KUSER_HELPERS | ||
430 | 432 | ||
431 | config CPU_32v4T | 433 | config CPU_32v4T |
432 | bool | 434 | bool |
433 | select CPU_USE_DOMAINS if MMU | 435 | select CPU_USE_DOMAINS if MMU |
434 | select NEEDS_SYSCALL_FOR_CMPXCHG if SMP | 436 | select NEEDS_SYSCALL_FOR_CMPXCHG if SMP |
435 | select TLS_REG_EMUL if SMP || !MMU | 437 | select TLS_REG_EMUL if SMP || !MMU |
438 | select NEED_KUSER_HELPERS | ||
436 | 439 | ||
437 | config CPU_32v5 | 440 | config CPU_32v5 |
438 | bool | 441 | bool |
439 | select CPU_USE_DOMAINS if MMU | 442 | select CPU_USE_DOMAINS if MMU |
440 | select NEEDS_SYSCALL_FOR_CMPXCHG if SMP | 443 | select NEEDS_SYSCALL_FOR_CMPXCHG if SMP |
441 | select TLS_REG_EMUL if SMP || !MMU | 444 | select TLS_REG_EMUL if SMP || !MMU |
445 | select NEED_KUSER_HELPERS | ||
442 | 446 | ||
443 | config CPU_32v6 | 447 | config CPU_32v6 |
444 | bool | 448 | bool |
@@ -776,6 +780,7 @@ config CPU_BPREDICT_DISABLE | |||
776 | 780 | ||
777 | config TLS_REG_EMUL | 781 | config TLS_REG_EMUL |
778 | bool | 782 | bool |
783 | select NEED_KUSER_HELPERS | ||
779 | help | 784 | help |
780 | An SMP system using a pre-ARMv6 processor (there are apparently | 785 | An SMP system using a pre-ARMv6 processor (there are apparently |
781 | a few prototypes like that in existence) and therefore access to | 786 | a few prototypes like that in existence) and therefore access to |
@@ -783,11 +788,43 @@ config TLS_REG_EMUL | |||
783 | 788 | ||
784 | config NEEDS_SYSCALL_FOR_CMPXCHG | 789 | config NEEDS_SYSCALL_FOR_CMPXCHG |
785 | bool | 790 | bool |
791 | select NEED_KUSER_HELPERS | ||
786 | help | 792 | help |
787 | SMP on a pre-ARMv6 processor? Well OK then. | 793 | SMP on a pre-ARMv6 processor? Well OK then. |
788 | Forget about fast user space cmpxchg support. | 794 | Forget about fast user space cmpxchg support. |
789 | It is just not possible. | 795 | It is just not possible. |
790 | 796 | ||
797 | config NEED_KUSER_HELPERS | ||
798 | bool | ||
799 | |||
800 | config KUSER_HELPERS | ||
801 | bool "Enable kuser helpers in vector page" if !NEED_KUSER_HELPERS | ||
802 | default y | ||
803 | help | ||
804 | Warning: disabling this option may break user programs. | ||
805 | |||
806 | Provide kuser helpers in the vector page. The kernel provides | ||
807 | helper code to userspace in read only form at a fixed location | ||
808 | in the high vector page to allow userspace to be independent of | ||
809 | the CPU type fitted to the system. This permits binaries to be | ||
810 | run on ARMv4 through to ARMv7 without modification. | ||
811 | |||
812 | See Documentation/arm/kernel_user_helpers.txt for details. | ||
813 | |||
814 | However, the fixed address nature of these helpers can be used | ||
815 | by ROP (return orientated programming) authors when creating | ||
816 | exploits. | ||
817 | |||
818 | If all of the binaries and libraries which run on your platform | ||
819 | are built specifically for your platform, and make no use of | ||
820 | these helpers, then you can turn this option off to hinder | ||
821 | such exploits. However, in that case, if a binary or library | ||
822 | relying on those helpers is run, it will receive a SIGILL signal, | ||
823 | which will terminate the program. | ||
824 | |||
825 | Say N here only if you are absolutely certain that you do not | ||
826 | need these helpers; otherwise, the safe option is to say Y. | ||
827 | |||
791 | config DMA_CACHE_RWFO | 828 | config DMA_CACHE_RWFO |
792 | bool "Enable read/write for ownership DMA cache maintenance" | 829 | bool "Enable read/write for ownership DMA cache maintenance" |
793 | depends on CPU_V6K && SMP | 830 | depends on CPU_V6K && SMP |
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c index b55b1015724b..4a0544492f10 100644 --- a/arch/arm/mm/context.c +++ b/arch/arm/mm/context.c | |||
@@ -245,7 +245,8 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk) | |||
245 | if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) { | 245 | if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) { |
246 | local_flush_bp_all(); | 246 | local_flush_bp_all(); |
247 | local_flush_tlb_all(); | 247 | local_flush_tlb_all(); |
248 | dummy_flush_tlb_a15_erratum(); | 248 | if (erratum_a15_798181()) |
249 | dummy_flush_tlb_a15_erratum(); | ||
249 | } | 250 | } |
250 | 251 | ||
251 | atomic64_set(&per_cpu(active_asids, cpu), asid); | 252 | atomic64_set(&per_cpu(active_asids, cpu), asid); |
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 4f56617a2392..53cdbd39ec8e 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c | |||
@@ -989,6 +989,7 @@ phys_addr_t arm_lowmem_limit __initdata = 0; | |||
989 | 989 | ||
990 | void __init sanity_check_meminfo(void) | 990 | void __init sanity_check_meminfo(void) |
991 | { | 991 | { |
992 | phys_addr_t memblock_limit = 0; | ||
992 | int i, j, highmem = 0; | 993 | int i, j, highmem = 0; |
993 | phys_addr_t vmalloc_limit = __pa(vmalloc_min - 1) + 1; | 994 | phys_addr_t vmalloc_limit = __pa(vmalloc_min - 1) + 1; |
994 | 995 | ||
@@ -1052,9 +1053,32 @@ void __init sanity_check_meminfo(void) | |||
1052 | bank->size = size_limit; | 1053 | bank->size = size_limit; |
1053 | } | 1054 | } |
1054 | #endif | 1055 | #endif |
1055 | if (!bank->highmem && bank->start + bank->size > arm_lowmem_limit) | 1056 | if (!bank->highmem) { |
1056 | arm_lowmem_limit = bank->start + bank->size; | 1057 | phys_addr_t bank_end = bank->start + bank->size; |
1057 | 1058 | ||
1059 | if (bank_end > arm_lowmem_limit) | ||
1060 | arm_lowmem_limit = bank_end; | ||
1061 | |||
1062 | /* | ||
1063 | * Find the first non-section-aligned page, and point | ||
1064 | * memblock_limit at it. This relies on rounding the | ||
1065 | * limit down to be section-aligned, which happens at | ||
1066 | * the end of this function. | ||
1067 | * | ||
1068 | * With this algorithm, the start or end of almost any | ||
1069 | * bank can be non-section-aligned. The only exception | ||
1070 | * is that the start of the bank 0 must be section- | ||
1071 | * aligned, since otherwise memory would need to be | ||
1072 | * allocated when mapping the start of bank 0, which | ||
1073 | * occurs before any free memory is mapped. | ||
1074 | */ | ||
1075 | if (!memblock_limit) { | ||
1076 | if (!IS_ALIGNED(bank->start, SECTION_SIZE)) | ||
1077 | memblock_limit = bank->start; | ||
1078 | else if (!IS_ALIGNED(bank_end, SECTION_SIZE)) | ||
1079 | memblock_limit = bank_end; | ||
1080 | } | ||
1081 | } | ||
1058 | j++; | 1082 | j++; |
1059 | } | 1083 | } |
1060 | #ifdef CONFIG_HIGHMEM | 1084 | #ifdef CONFIG_HIGHMEM |
@@ -1079,7 +1103,18 @@ void __init sanity_check_meminfo(void) | |||
1079 | #endif | 1103 | #endif |
1080 | meminfo.nr_banks = j; | 1104 | meminfo.nr_banks = j; |
1081 | high_memory = __va(arm_lowmem_limit - 1) + 1; | 1105 | high_memory = __va(arm_lowmem_limit - 1) + 1; |
1082 | memblock_set_current_limit(arm_lowmem_limit); | 1106 | |
1107 | /* | ||
1108 | * Round the memblock limit down to a section size. This | ||
1109 | * helps to ensure that we will allocate memory from the | ||
1110 | * last full section, which should be mapped. | ||
1111 | */ | ||
1112 | if (memblock_limit) | ||
1113 | memblock_limit = round_down(memblock_limit, SECTION_SIZE); | ||
1114 | if (!memblock_limit) | ||
1115 | memblock_limit = arm_lowmem_limit; | ||
1116 | |||
1117 | memblock_set_current_limit(memblock_limit); | ||
1083 | } | 1118 | } |
1084 | 1119 | ||
1085 | static inline void prepare_page_table(void) | 1120 | static inline void prepare_page_table(void) |
@@ -1160,7 +1195,7 @@ static void __init devicemaps_init(struct machine_desc *mdesc) | |||
1160 | /* | 1195 | /* |
1161 | * Allocate the vector page early. | 1196 | * Allocate the vector page early. |
1162 | */ | 1197 | */ |
1163 | vectors = early_alloc(PAGE_SIZE); | 1198 | vectors = early_alloc(PAGE_SIZE * 2); |
1164 | 1199 | ||
1165 | early_trap_init(vectors); | 1200 | early_trap_init(vectors); |
1166 | 1201 | ||
@@ -1205,15 +1240,27 @@ static void __init devicemaps_init(struct machine_desc *mdesc) | |||
1205 | map.pfn = __phys_to_pfn(virt_to_phys(vectors)); | 1240 | map.pfn = __phys_to_pfn(virt_to_phys(vectors)); |
1206 | map.virtual = 0xffff0000; | 1241 | map.virtual = 0xffff0000; |
1207 | map.length = PAGE_SIZE; | 1242 | map.length = PAGE_SIZE; |
1243 | #ifdef CONFIG_KUSER_HELPERS | ||
1208 | map.type = MT_HIGH_VECTORS; | 1244 | map.type = MT_HIGH_VECTORS; |
1245 | #else | ||
1246 | map.type = MT_LOW_VECTORS; | ||
1247 | #endif | ||
1209 | create_mapping(&map); | 1248 | create_mapping(&map); |
1210 | 1249 | ||
1211 | if (!vectors_high()) { | 1250 | if (!vectors_high()) { |
1212 | map.virtual = 0; | 1251 | map.virtual = 0; |
1252 | map.length = PAGE_SIZE * 2; | ||
1213 | map.type = MT_LOW_VECTORS; | 1253 | map.type = MT_LOW_VECTORS; |
1214 | create_mapping(&map); | 1254 | create_mapping(&map); |
1215 | } | 1255 | } |
1216 | 1256 | ||
1257 | /* Now create a kernel read-only mapping */ | ||
1258 | map.pfn += 1; | ||
1259 | map.virtual = 0xffff0000 + PAGE_SIZE; | ||
1260 | map.length = PAGE_SIZE; | ||
1261 | map.type = MT_LOW_VECTORS; | ||
1262 | create_mapping(&map); | ||
1263 | |||
1217 | /* | 1264 | /* |
1218 | * Ask the machine support to map in the statically mapped devices. | 1265 | * Ask the machine support to map in the statically mapped devices. |
1219 | */ | 1266 | */ |
@@ -1276,8 +1323,6 @@ void __init paging_init(struct machine_desc *mdesc) | |||
1276 | { | 1323 | { |
1277 | void *zero_page; | 1324 | void *zero_page; |
1278 | 1325 | ||
1279 | memblock_set_current_limit(arm_lowmem_limit); | ||
1280 | |||
1281 | build_mem_type_table(); | 1326 | build_mem_type_table(); |
1282 | prepare_page_table(); | 1327 | prepare_page_table(); |
1283 | map_lowmem(); | 1328 | map_lowmem(); |
diff --git a/arch/arm/mm/proc-v7-2level.S b/arch/arm/mm/proc-v7-2level.S index f64afb9f1bd5..bdd3be4be77a 100644 --- a/arch/arm/mm/proc-v7-2level.S +++ b/arch/arm/mm/proc-v7-2level.S | |||
@@ -110,7 +110,7 @@ ENTRY(cpu_v7_set_pte_ext) | |||
110 | ARM( str r3, [r0, #2048]! ) | 110 | ARM( str r3, [r0, #2048]! ) |
111 | THUMB( add r0, r0, #2048 ) | 111 | THUMB( add r0, r0, #2048 ) |
112 | THUMB( str r3, [r0] ) | 112 | THUMB( str r3, [r0] ) |
113 | ALT_SMP(mov pc,lr) | 113 | ALT_SMP(W(nop)) |
114 | ALT_UP (mcr p15, 0, r0, c7, c10, 1) @ flush_pte | 114 | ALT_UP (mcr p15, 0, r0, c7, c10, 1) @ flush_pte |
115 | #endif | 115 | #endif |
116 | mov pc, lr | 116 | mov pc, lr |
diff --git a/arch/arm/mm/proc-v7-3level.S b/arch/arm/mm/proc-v7-3level.S index c36ac69488c8..01a719e18bb0 100644 --- a/arch/arm/mm/proc-v7-3level.S +++ b/arch/arm/mm/proc-v7-3level.S | |||
@@ -81,7 +81,7 @@ ENTRY(cpu_v7_set_pte_ext) | |||
81 | tst r3, #1 << (55 - 32) @ L_PTE_DIRTY | 81 | tst r3, #1 << (55 - 32) @ L_PTE_DIRTY |
82 | orreq r2, #L_PTE_RDONLY | 82 | orreq r2, #L_PTE_RDONLY |
83 | 1: strd r2, r3, [r0] | 83 | 1: strd r2, r3, [r0] |
84 | ALT_SMP(mov pc, lr) | 84 | ALT_SMP(W(nop)) |
85 | ALT_UP (mcr p15, 0, r0, c7, c10, 1) @ flush_pte | 85 | ALT_UP (mcr p15, 0, r0, c7, c10, 1) @ flush_pte |
86 | #endif | 86 | #endif |
87 | mov pc, lr | 87 | mov pc, lr |
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index 5c6d5a3050ea..73398bcf9bd8 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S | |||
@@ -75,13 +75,14 @@ ENTRY(cpu_v7_do_idle) | |||
75 | ENDPROC(cpu_v7_do_idle) | 75 | ENDPROC(cpu_v7_do_idle) |
76 | 76 | ||
77 | ENTRY(cpu_v7_dcache_clean_area) | 77 | ENTRY(cpu_v7_dcache_clean_area) |
78 | ALT_SMP(mov pc, lr) @ MP extensions imply L1 PTW | 78 | ALT_SMP(W(nop)) @ MP extensions imply L1 PTW |
79 | ALT_UP(W(nop)) | 79 | ALT_UP_B(1f) |
80 | dcache_line_size r2, r3 | 80 | mov pc, lr |
81 | 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry | 81 | 1: dcache_line_size r2, r3 |
82 | 2: mcr p15, 0, r0, c7, c10, 1 @ clean D entry | ||
82 | add r0, r0, r2 | 83 | add r0, r0, r2 |
83 | subs r1, r1, r2 | 84 | subs r1, r1, r2 |
84 | bhi 1b | 85 | bhi 2b |
85 | dsb | 86 | dsb |
86 | mov pc, lr | 87 | mov pc, lr |
87 | ENDPROC(cpu_v7_dcache_clean_area) | 88 | ENDPROC(cpu_v7_dcache_clean_area) |