aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/Kconfig21
-rw-r--r--arch/arm/mm/Makefile2
-rw-r--r--arch/arm/mm/alignment.c10
-rw-r--r--arch/arm/mm/cache-feroceon-l2.c6
-rw-r--r--arch/arm/mm/cache-tauros2.c12
-rw-r--r--arch/arm/mm/context.c58
-rw-r--r--arch/arm/mm/copypage-v6.c2
-rw-r--r--arch/arm/mm/fault-armv.c6
-rw-r--r--arch/arm/mm/fault.c31
-rw-r--r--arch/arm/mm/flush.c2
-rw-r--r--arch/arm/mm/highmem.c15
-rw-r--r--arch/arm/mm/init.c153
-rw-r--r--arch/arm/mm/mmu.c127
-rw-r--r--arch/arm/mm/pageattr.c91
-rw-r--r--arch/arm/mm/proc-v7.S5
15 files changed, 405 insertions, 136 deletions
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index ab906b801047..03823e784f63 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -1009,3 +1009,24 @@ config ARCH_SUPPORTS_BIG_ENDIAN
1009 help 1009 help
1010 This option specifies the architecture can support big endian 1010 This option specifies the architecture can support big endian
1011 operation. 1011 operation.
1012
1013config ARM_KERNMEM_PERMS
1014 bool "Restrict kernel memory permissions"
1015 help
1016 If this is set, kernel memory other than kernel text (and rodata)
1017 will be made non-executable. The tradeoff is that each region is
1018 padded to section-size (1MiB) boundaries (because their permissions
1019 are different and splitting the 1M pages into 4K ones causes TLB
1020 performance problems), wasting memory.
1021
1022config DEBUG_RODATA
1023 bool "Make kernel text and rodata read-only"
1024 depends on ARM_KERNMEM_PERMS
1025 default y
1026 help
1027 If this is set, kernel text and rodata will be made read-only. This
1028 is to help catch accidental or malicious attempts to change the
1029 kernel's executable code. Additionally splits rodata from kernel
1030 text so it can be made explicitly non-executable. This creates
1031 another section-size padded region, so it can waste more memory
1032 space while gaining the read-only protections.
diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile
index 91da64de440f..d3afdf9eb65a 100644
--- a/arch/arm/mm/Makefile
+++ b/arch/arm/mm/Makefile
@@ -6,7 +6,7 @@ obj-y := dma-mapping.o extable.o fault.o init.o \
6 iomap.o 6 iomap.o
7 7
8obj-$(CONFIG_MMU) += fault-armv.o flush.o idmap.o ioremap.o \ 8obj-$(CONFIG_MMU) += fault-armv.o flush.o idmap.o ioremap.o \
9 mmap.o pgd.o mmu.o 9 mmap.o pgd.o mmu.o pageattr.o
10 10
11ifneq ($(CONFIG_MMU),y) 11ifneq ($(CONFIG_MMU),y)
12obj-y += nommu.o 12obj-y += nommu.o
diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
index 83792f4324ea..2c0c541c60ca 100644
--- a/arch/arm/mm/alignment.c
+++ b/arch/arm/mm/alignment.c
@@ -113,7 +113,7 @@ static int safe_usermode(int new_usermode, bool warn)
113 new_usermode |= UM_FIXUP; 113 new_usermode |= UM_FIXUP;
114 114
115 if (warn) 115 if (warn)
116 printk(KERN_WARNING "alignment: ignoring faults is unsafe on this CPU. Defaulting to fixup mode.\n"); 116 pr_warn("alignment: ignoring faults is unsafe on this CPU. Defaulting to fixup mode.\n");
117 } 117 }
118 118
119 return new_usermode; 119 return new_usermode;
@@ -523,7 +523,7 @@ do_alignment_ldmstm(unsigned long addr, unsigned long instr, struct pt_regs *reg
523 * processor for us. 523 * processor for us.
524 */ 524 */
525 if (addr != eaddr) { 525 if (addr != eaddr) {
526 printk(KERN_ERR "LDMSTM: PC = %08lx, instr = %08lx, " 526 pr_err("LDMSTM: PC = %08lx, instr = %08lx, "
527 "addr = %08lx, eaddr = %08lx\n", 527 "addr = %08lx, eaddr = %08lx\n",
528 instruction_pointer(regs), instr, addr, eaddr); 528 instruction_pointer(regs), instr, addr, eaddr);
529 show_regs(regs); 529 show_regs(regs);
@@ -567,7 +567,7 @@ fault:
567 return TYPE_FAULT; 567 return TYPE_FAULT;
568 568
569bad: 569bad:
570 printk(KERN_ERR "Alignment trap: not handling ldm with s-bit set\n"); 570 pr_err("Alignment trap: not handling ldm with s-bit set\n");
571 return TYPE_ERROR; 571 return TYPE_ERROR;
572} 572}
573 573
@@ -899,13 +899,13 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
899 return 0; 899 return 0;
900 900
901 swp: 901 swp:
902 printk(KERN_ERR "Alignment trap: not handling swp instruction\n"); 902 pr_err("Alignment trap: not handling swp instruction\n");
903 903
904 bad: 904 bad:
905 /* 905 /*
906 * Oops, we didn't handle the instruction. 906 * Oops, we didn't handle the instruction.
907 */ 907 */
908 printk(KERN_ERR "Alignment trap: not handling instruction " 908 pr_err("Alignment trap: not handling instruction "
909 "%0*lx at [<%08lx>]\n", 909 "%0*lx at [<%08lx>]\n",
910 isize << 1, 910 isize << 1,
911 isize == 2 ? tinstr : instr, instrptr); 911 isize == 2 ? tinstr : instr, instrptr);
diff --git a/arch/arm/mm/cache-feroceon-l2.c b/arch/arm/mm/cache-feroceon-l2.c
index e028a7f2ebcc..097181e08c25 100644
--- a/arch/arm/mm/cache-feroceon-l2.c
+++ b/arch/arm/mm/cache-feroceon-l2.c
@@ -313,7 +313,7 @@ static void __init disable_l2_prefetch(void)
313 */ 313 */
314 u = read_extra_features(); 314 u = read_extra_features();
315 if (!(u & 0x01000000)) { 315 if (!(u & 0x01000000)) {
316 printk(KERN_INFO "Feroceon L2: Disabling L2 prefetch.\n"); 316 pr_info("Feroceon L2: Disabling L2 prefetch.\n");
317 write_extra_features(u | 0x01000000); 317 write_extra_features(u | 0x01000000);
318 } 318 }
319} 319}
@@ -326,7 +326,7 @@ static void __init enable_l2(void)
326 if (!(u & 0x00400000)) { 326 if (!(u & 0x00400000)) {
327 int i, d; 327 int i, d;
328 328
329 printk(KERN_INFO "Feroceon L2: Enabling L2\n"); 329 pr_info("Feroceon L2: Enabling L2\n");
330 330
331 d = flush_and_disable_dcache(); 331 d = flush_and_disable_dcache();
332 i = invalidate_and_disable_icache(); 332 i = invalidate_and_disable_icache();
@@ -353,7 +353,7 @@ void __init feroceon_l2_init(int __l2_wt_override)
353 353
354 enable_l2(); 354 enable_l2();
355 355
356 printk(KERN_INFO "Feroceon L2: Cache support initialised%s.\n", 356 pr_info("Feroceon L2: Cache support initialised%s.\n",
357 l2_wt_override ? ", in WT override mode" : ""); 357 l2_wt_override ? ", in WT override mode" : "");
358} 358}
359#ifdef CONFIG_OF 359#ifdef CONFIG_OF
diff --git a/arch/arm/mm/cache-tauros2.c b/arch/arm/mm/cache-tauros2.c
index b273739e6359..1e373d268c04 100644
--- a/arch/arm/mm/cache-tauros2.c
+++ b/arch/arm/mm/cache-tauros2.c
@@ -185,7 +185,7 @@ static void enable_extra_feature(unsigned int features)
185 u &= ~0x01000000; 185 u &= ~0x01000000;
186 else 186 else
187 u |= 0x01000000; 187 u |= 0x01000000;
188 printk(KERN_INFO "Tauros2: %s L2 prefetch.\n", 188 pr_info("Tauros2: %s L2 prefetch.\n",
189 (features & CACHE_TAUROS2_PREFETCH_ON) 189 (features & CACHE_TAUROS2_PREFETCH_ON)
190 ? "Enabling" : "Disabling"); 190 ? "Enabling" : "Disabling");
191 191
@@ -193,7 +193,7 @@ static void enable_extra_feature(unsigned int features)
193 u |= 0x00100000; 193 u |= 0x00100000;
194 else 194 else
195 u &= ~0x00100000; 195 u &= ~0x00100000;
196 printk(KERN_INFO "Tauros2: %s line fill burt8.\n", 196 pr_info("Tauros2: %s line fill burt8.\n",
197 (features & CACHE_TAUROS2_LINEFILL_BURST8) 197 (features & CACHE_TAUROS2_LINEFILL_BURST8)
198 ? "Enabling" : "Disabling"); 198 ? "Enabling" : "Disabling");
199 199
@@ -216,7 +216,7 @@ static void __init tauros2_internal_init(unsigned int features)
216 */ 216 */
217 feat = read_extra_features(); 217 feat = read_extra_features();
218 if (!(feat & 0x00400000)) { 218 if (!(feat & 0x00400000)) {
219 printk(KERN_INFO "Tauros2: Enabling L2 cache.\n"); 219 pr_info("Tauros2: Enabling L2 cache.\n");
220 write_extra_features(feat | 0x00400000); 220 write_extra_features(feat | 0x00400000);
221 } 221 }
222 222
@@ -253,7 +253,7 @@ static void __init tauros2_internal_init(unsigned int features)
253 */ 253 */
254 actlr = read_actlr(); 254 actlr = read_actlr();
255 if (!(actlr & 0x00000002)) { 255 if (!(actlr & 0x00000002)) {
256 printk(KERN_INFO "Tauros2: Enabling L2 cache.\n"); 256 pr_info("Tauros2: Enabling L2 cache.\n");
257 write_actlr(actlr | 0x00000002); 257 write_actlr(actlr | 0x00000002);
258 } 258 }
259 259
@@ -262,11 +262,11 @@ static void __init tauros2_internal_init(unsigned int features)
262#endif 262#endif
263 263
264 if (mode == NULL) { 264 if (mode == NULL) {
265 printk(KERN_CRIT "Tauros2: Unable to detect CPU mode.\n"); 265 pr_crit("Tauros2: Unable to detect CPU mode.\n");
266 return; 266 return;
267 } 267 }
268 268
269 printk(KERN_INFO "Tauros2: L2 cache support initialised " 269 pr_info("Tauros2: L2 cache support initialised "
270 "in %s mode.\n", mode); 270 "in %s mode.\n", mode);
271} 271}
272 272
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
index 6eb97b3a7481..91892569710f 100644
--- a/arch/arm/mm/context.c
+++ b/arch/arm/mm/context.c
@@ -184,36 +184,46 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
184 u64 asid = atomic64_read(&mm->context.id); 184 u64 asid = atomic64_read(&mm->context.id);
185 u64 generation = atomic64_read(&asid_generation); 185 u64 generation = atomic64_read(&asid_generation);
186 186
187 if (asid != 0 && is_reserved_asid(asid)) { 187 if (asid != 0) {
188 /* 188 /*
189 * Our current ASID was active during a rollover, we can 189 * If our current ASID was active during a rollover, we
190 * continue to use it and this was just a false alarm. 190 * can continue to use it and this was just a false alarm.
191 */ 191 */
192 asid = generation | (asid & ~ASID_MASK); 192 if (is_reserved_asid(asid))
193 } else { 193 return generation | (asid & ~ASID_MASK);
194
194 /* 195 /*
195 * Allocate a free ASID. If we can't find one, take a 196 * We had a valid ASID in a previous life, so try to re-use
196 * note of the currently active ASIDs and mark the TLBs 197 * it if possible.,
197 * as requiring flushes. We always count from ASID #1,
198 * as we reserve ASID #0 to switch via TTBR0 and to
199 * avoid speculative page table walks from hitting in
200 * any partial walk caches, which could be populated
201 * from overlapping level-1 descriptors used to map both
202 * the module area and the userspace stack.
203 */ 198 */
204 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx); 199 asid &= ~ASID_MASK;
205 if (asid == NUM_USER_ASIDS) { 200 if (!__test_and_set_bit(asid, asid_map))
206 generation = atomic64_add_return(ASID_FIRST_VERSION, 201 goto bump_gen;
207 &asid_generation);
208 flush_context(cpu);
209 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
210 }
211 __set_bit(asid, asid_map);
212 cur_idx = asid;
213 asid |= generation;
214 cpumask_clear(mm_cpumask(mm));
215 } 202 }
216 203
204 /*
205 * Allocate a free ASID. If we can't find one, take a note of the
206 * currently active ASIDs and mark the TLBs as requiring flushes.
207 * We always count from ASID #1, as we reserve ASID #0 to switch
208 * via TTBR0 and to avoid speculative page table walks from hitting
209 * in any partial walk caches, which could be populated from
210 * overlapping level-1 descriptors used to map both the module
211 * area and the userspace stack.
212 */
213 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx);
214 if (asid == NUM_USER_ASIDS) {
215 generation = atomic64_add_return(ASID_FIRST_VERSION,
216 &asid_generation);
217 flush_context(cpu);
218 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
219 }
220
221 __set_bit(asid, asid_map);
222 cur_idx = asid;
223
224bump_gen:
225 asid |= generation;
226 cpumask_clear(mm_cpumask(mm));
217 return asid; 227 return asid;
218} 228}
219 229
diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c
index b9bcc9d79176..70423345da26 100644
--- a/arch/arm/mm/copypage-v6.c
+++ b/arch/arm/mm/copypage-v6.c
@@ -62,7 +62,7 @@ static void discard_old_kernel_data(void *kto)
62 __asm__("mcrr p15, 0, %1, %0, c6 @ 0xec401f06" 62 __asm__("mcrr p15, 0, %1, %0, c6 @ 0xec401f06"
63 : 63 :
64 : "r" (kto), 64 : "r" (kto),
65 "r" ((unsigned long)kto + PAGE_SIZE - L1_CACHE_BYTES) 65 "r" ((unsigned long)kto + PAGE_SIZE - 1)
66 : "cc"); 66 : "cc");
67} 67}
68 68
diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c
index ff379ac115df..d9e0d00a6699 100644
--- a/arch/arm/mm/fault-armv.c
+++ b/arch/arm/mm/fault-armv.c
@@ -235,7 +235,7 @@ void __init check_writebuffer_bugs(void)
235 const char *reason; 235 const char *reason;
236 unsigned long v = 1; 236 unsigned long v = 1;
237 237
238 printk(KERN_INFO "CPU: Testing write buffer coherency: "); 238 pr_info("CPU: Testing write buffer coherency: ");
239 239
240 page = alloc_page(GFP_KERNEL); 240 page = alloc_page(GFP_KERNEL);
241 if (page) { 241 if (page) {
@@ -261,9 +261,9 @@ void __init check_writebuffer_bugs(void)
261 } 261 }
262 262
263 if (v) { 263 if (v) {
264 printk("failed, %s\n", reason); 264 pr_cont("failed, %s\n", reason);
265 shared_pte_mask = L_PTE_MT_UNCACHED; 265 shared_pte_mask = L_PTE_MT_UNCACHED;
266 } else { 266 } else {
267 printk("ok\n"); 267 pr_cont("ok\n");
268 } 268 }
269} 269}
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index eb8830a4c5ed..a982dc3190df 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -63,9 +63,9 @@ void show_pte(struct mm_struct *mm, unsigned long addr)
63 if (!mm) 63 if (!mm)
64 mm = &init_mm; 64 mm = &init_mm;
65 65
66 printk(KERN_ALERT "pgd = %p\n", mm->pgd); 66 pr_alert("pgd = %p\n", mm->pgd);
67 pgd = pgd_offset(mm, addr); 67 pgd = pgd_offset(mm, addr);
68 printk(KERN_ALERT "[%08lx] *pgd=%08llx", 68 pr_alert("[%08lx] *pgd=%08llx",
69 addr, (long long)pgd_val(*pgd)); 69 addr, (long long)pgd_val(*pgd));
70 70
71 do { 71 do {
@@ -77,31 +77,31 @@ void show_pte(struct mm_struct *mm, unsigned long addr)
77 break; 77 break;
78 78
79 if (pgd_bad(*pgd)) { 79 if (pgd_bad(*pgd)) {
80 printk("(bad)"); 80 pr_cont("(bad)");
81 break; 81 break;
82 } 82 }
83 83
84 pud = pud_offset(pgd, addr); 84 pud = pud_offset(pgd, addr);
85 if (PTRS_PER_PUD != 1) 85 if (PTRS_PER_PUD != 1)
86 printk(", *pud=%08llx", (long long)pud_val(*pud)); 86 pr_cont(", *pud=%08llx", (long long)pud_val(*pud));
87 87
88 if (pud_none(*pud)) 88 if (pud_none(*pud))
89 break; 89 break;
90 90
91 if (pud_bad(*pud)) { 91 if (pud_bad(*pud)) {
92 printk("(bad)"); 92 pr_cont("(bad)");
93 break; 93 break;
94 } 94 }
95 95
96 pmd = pmd_offset(pud, addr); 96 pmd = pmd_offset(pud, addr);
97 if (PTRS_PER_PMD != 1) 97 if (PTRS_PER_PMD != 1)
98 printk(", *pmd=%08llx", (long long)pmd_val(*pmd)); 98 pr_cont(", *pmd=%08llx", (long long)pmd_val(*pmd));
99 99
100 if (pmd_none(*pmd)) 100 if (pmd_none(*pmd))
101 break; 101 break;
102 102
103 if (pmd_bad(*pmd)) { 103 if (pmd_bad(*pmd)) {
104 printk("(bad)"); 104 pr_cont("(bad)");
105 break; 105 break;
106 } 106 }
107 107
@@ -110,15 +110,15 @@ void show_pte(struct mm_struct *mm, unsigned long addr)
110 break; 110 break;
111 111
112 pte = pte_offset_map(pmd, addr); 112 pte = pte_offset_map(pmd, addr);
113 printk(", *pte=%08llx", (long long)pte_val(*pte)); 113 pr_cont(", *pte=%08llx", (long long)pte_val(*pte));
114#ifndef CONFIG_ARM_LPAE 114#ifndef CONFIG_ARM_LPAE
115 printk(", *ppte=%08llx", 115 pr_cont(", *ppte=%08llx",
116 (long long)pte_val(pte[PTE_HWTABLE_PTRS])); 116 (long long)pte_val(pte[PTE_HWTABLE_PTRS]));
117#endif 117#endif
118 pte_unmap(pte); 118 pte_unmap(pte);
119 } while(0); 119 } while(0);
120 120
121 printk("\n"); 121 pr_cont("\n");
122} 122}
123#else /* CONFIG_MMU */ 123#else /* CONFIG_MMU */
124void show_pte(struct mm_struct *mm, unsigned long addr) 124void show_pte(struct mm_struct *mm, unsigned long addr)
@@ -142,10 +142,9 @@ __do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
142 * No handler, we'll have to terminate things with extreme prejudice. 142 * No handler, we'll have to terminate things with extreme prejudice.
143 */ 143 */
144 bust_spinlocks(1); 144 bust_spinlocks(1);
145 printk(KERN_ALERT 145 pr_alert("Unable to handle kernel %s at virtual address %08lx\n",
146 "Unable to handle kernel %s at virtual address %08lx\n", 146 (addr < PAGE_SIZE) ? "NULL pointer dereference" :
147 (addr < PAGE_SIZE) ? "NULL pointer dereference" : 147 "paging request", addr);
148 "paging request", addr);
149 148
150 show_pte(mm, addr); 149 show_pte(mm, addr);
151 die("Oops", regs, fsr); 150 die("Oops", regs, fsr);
@@ -551,7 +550,7 @@ do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
551 if (!inf->fn(addr, fsr & ~FSR_LNX_PF, regs)) 550 if (!inf->fn(addr, fsr & ~FSR_LNX_PF, regs))
552 return; 551 return;
553 552
554 printk(KERN_ALERT "Unhandled fault: %s (0x%03x) at 0x%08lx\n", 553 pr_alert("Unhandled fault: %s (0x%03x) at 0x%08lx\n",
555 inf->name, fsr, addr); 554 inf->name, fsr, addr);
556 555
557 info.si_signo = inf->sig; 556 info.si_signo = inf->sig;
@@ -583,7 +582,7 @@ do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
583 if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs)) 582 if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
584 return; 583 return;
585 584
586 printk(KERN_ALERT "Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n", 585 pr_alert("Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n",
587 inf->name, ifsr, addr); 586 inf->name, ifsr, addr);
588 587
589 info.si_signo = inf->sig; 588 info.si_signo = inf->sig;
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
index 265b836b3bd1..34b66af516ea 100644
--- a/arch/arm/mm/flush.c
+++ b/arch/arm/mm/flush.c
@@ -33,7 +33,7 @@ static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr)
33 asm( "mcrr p15, 0, %1, %0, c14\n" 33 asm( "mcrr p15, 0, %1, %0, c14\n"
34 " mcr p15, 0, %2, c7, c10, 4" 34 " mcr p15, 0, %2, c7, c10, 4"
35 : 35 :
36 : "r" (to), "r" (to + PAGE_SIZE - L1_CACHE_BYTES), "r" (zero) 36 : "r" (to), "r" (to + PAGE_SIZE - 1), "r" (zero)
37 : "cc"); 37 : "cc");
38} 38}
39 39
diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c
index e17ed00828d7..b98895d9fe57 100644
--- a/arch/arm/mm/highmem.c
+++ b/arch/arm/mm/highmem.c
@@ -18,19 +18,20 @@
18#include <asm/tlbflush.h> 18#include <asm/tlbflush.h>
19#include "mm.h" 19#include "mm.h"
20 20
21pte_t *fixmap_page_table;
22
23static inline void set_fixmap_pte(int idx, pte_t pte) 21static inline void set_fixmap_pte(int idx, pte_t pte)
24{ 22{
25 unsigned long vaddr = __fix_to_virt(idx); 23 unsigned long vaddr = __fix_to_virt(idx);
26 set_pte_ext(fixmap_page_table + idx, pte, 0); 24 pte_t *ptep = pte_offset_kernel(pmd_off_k(vaddr), vaddr);
25
26 set_pte_ext(ptep, pte, 0);
27 local_flush_tlb_kernel_page(vaddr); 27 local_flush_tlb_kernel_page(vaddr);
28} 28}
29 29
30static inline pte_t get_fixmap_pte(unsigned long vaddr) 30static inline pte_t get_fixmap_pte(unsigned long vaddr)
31{ 31{
32 unsigned long idx = __virt_to_fix(vaddr); 32 pte_t *ptep = pte_offset_kernel(pmd_off_k(vaddr), vaddr);
33 return *(fixmap_page_table + idx); 33
34 return *ptep;
34} 35}
35 36
36void *kmap(struct page *page) 37void *kmap(struct page *page)
@@ -84,7 +85,7 @@ void *kmap_atomic(struct page *page)
84 * With debugging enabled, kunmap_atomic forces that entry to 0. 85 * With debugging enabled, kunmap_atomic forces that entry to 0.
85 * Make sure it was indeed properly unmapped. 86 * Make sure it was indeed properly unmapped.
86 */ 87 */
87 BUG_ON(!pte_none(*(fixmap_page_table + idx))); 88 BUG_ON(!pte_none(get_fixmap_pte(vaddr)));
88#endif 89#endif
89 /* 90 /*
90 * When debugging is off, kunmap_atomic leaves the previous mapping 91 * When debugging is off, kunmap_atomic leaves the previous mapping
@@ -137,7 +138,7 @@ void *kmap_atomic_pfn(unsigned long pfn)
137 idx = type + KM_TYPE_NR * smp_processor_id(); 138 idx = type + KM_TYPE_NR * smp_processor_id();
138 vaddr = __fix_to_virt(idx); 139 vaddr = __fix_to_virt(idx);
139#ifdef CONFIG_DEBUG_HIGHMEM 140#ifdef CONFIG_DEBUG_HIGHMEM
140 BUG_ON(!pte_none(*(fixmap_page_table + idx))); 141 BUG_ON(!pte_none(get_fixmap_pte(vaddr)));
141#endif 142#endif
142 set_fixmap_pte(idx, pfn_pte(pfn, kmap_prot)); 143 set_fixmap_pte(idx, pfn_pte(pfn, kmap_prot));
143 144
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 9481f85c56e6..98ad9c79ea0e 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -29,6 +29,7 @@
29#include <asm/prom.h> 29#include <asm/prom.h>
30#include <asm/sections.h> 30#include <asm/sections.h>
31#include <asm/setup.h> 31#include <asm/setup.h>
32#include <asm/system_info.h>
32#include <asm/tlb.h> 33#include <asm/tlb.h>
33#include <asm/fixmap.h> 34#include <asm/fixmap.h>
34 35
@@ -67,7 +68,7 @@ early_param("initrd", early_initrd);
67 68
68static int __init parse_tag_initrd(const struct tag *tag) 69static int __init parse_tag_initrd(const struct tag *tag)
69{ 70{
70 printk(KERN_WARNING "ATAG_INITRD is deprecated; " 71 pr_warn("ATAG_INITRD is deprecated; "
71 "please update your bootloader.\n"); 72 "please update your bootloader.\n");
72 phys_initrd_start = __virt_to_phys(tag->u.initrd.start); 73 phys_initrd_start = __virt_to_phys(tag->u.initrd.start);
73 phys_initrd_size = tag->u.initrd.size; 74 phys_initrd_size = tag->u.initrd.size;
@@ -544,7 +545,7 @@ void __init mem_init(void)
544#define MLM(b, t) b, t, ((t) - (b)) >> 20 545#define MLM(b, t) b, t, ((t) - (b)) >> 20
545#define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), SZ_1K) 546#define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), SZ_1K)
546 547
547 printk(KERN_NOTICE "Virtual kernel memory layout:\n" 548 pr_notice("Virtual kernel memory layout:\n"
548 " vector : 0x%08lx - 0x%08lx (%4ld kB)\n" 549 " vector : 0x%08lx - 0x%08lx (%4ld kB)\n"
549#ifdef CONFIG_HAVE_TCM 550#ifdef CONFIG_HAVE_TCM
550 " DTCM : 0x%08lx - 0x%08lx (%4ld kB)\n" 551 " DTCM : 0x%08lx - 0x%08lx (%4ld kB)\n"
@@ -570,7 +571,7 @@ void __init mem_init(void)
570 MLK(DTCM_OFFSET, (unsigned long) dtcm_end), 571 MLK(DTCM_OFFSET, (unsigned long) dtcm_end),
571 MLK(ITCM_OFFSET, (unsigned long) itcm_end), 572 MLK(ITCM_OFFSET, (unsigned long) itcm_end),
572#endif 573#endif
573 MLK(FIXADDR_START, FIXADDR_TOP), 574 MLK(FIXADDR_START, FIXADDR_END),
574 MLM(VMALLOC_START, VMALLOC_END), 575 MLM(VMALLOC_START, VMALLOC_END),
575 MLM(PAGE_OFFSET, (unsigned long)high_memory), 576 MLM(PAGE_OFFSET, (unsigned long)high_memory),
576#ifdef CONFIG_HIGHMEM 577#ifdef CONFIG_HIGHMEM
@@ -615,7 +616,145 @@ void __init mem_init(void)
615 } 616 }
616} 617}
617 618
618void free_initmem(void) 619#ifdef CONFIG_ARM_KERNMEM_PERMS
620struct section_perm {
621 unsigned long start;
622 unsigned long end;
623 pmdval_t mask;
624 pmdval_t prot;
625 pmdval_t clear;
626};
627
628static struct section_perm nx_perms[] = {
629 /* Make pages tables, etc before _stext RW (set NX). */
630 {
631 .start = PAGE_OFFSET,
632 .end = (unsigned long)_stext,
633 .mask = ~PMD_SECT_XN,
634 .prot = PMD_SECT_XN,
635 },
636 /* Make init RW (set NX). */
637 {
638 .start = (unsigned long)__init_begin,
639 .end = (unsigned long)_sdata,
640 .mask = ~PMD_SECT_XN,
641 .prot = PMD_SECT_XN,
642 },
643#ifdef CONFIG_DEBUG_RODATA
644 /* Make rodata NX (set RO in ro_perms below). */
645 {
646 .start = (unsigned long)__start_rodata,
647 .end = (unsigned long)__init_begin,
648 .mask = ~PMD_SECT_XN,
649 .prot = PMD_SECT_XN,
650 },
651#endif
652};
653
654#ifdef CONFIG_DEBUG_RODATA
655static struct section_perm ro_perms[] = {
656 /* Make kernel code and rodata RX (set RO). */
657 {
658 .start = (unsigned long)_stext,
659 .end = (unsigned long)__init_begin,
660#ifdef CONFIG_ARM_LPAE
661 .mask = ~PMD_SECT_RDONLY,
662 .prot = PMD_SECT_RDONLY,
663#else
664 .mask = ~(PMD_SECT_APX | PMD_SECT_AP_WRITE),
665 .prot = PMD_SECT_APX | PMD_SECT_AP_WRITE,
666 .clear = PMD_SECT_AP_WRITE,
667#endif
668 },
669};
670#endif
671
672/*
673 * Updates section permissions only for the current mm (sections are
674 * copied into each mm). During startup, this is the init_mm. Is only
675 * safe to be called with preemption disabled, as under stop_machine().
676 */
677static inline void section_update(unsigned long addr, pmdval_t mask,
678 pmdval_t prot)
679{
680 struct mm_struct *mm;
681 pmd_t *pmd;
682
683 mm = current->active_mm;
684 pmd = pmd_offset(pud_offset(pgd_offset(mm, addr), addr), addr);
685
686#ifdef CONFIG_ARM_LPAE
687 pmd[0] = __pmd((pmd_val(pmd[0]) & mask) | prot);
688#else
689 if (addr & SECTION_SIZE)
690 pmd[1] = __pmd((pmd_val(pmd[1]) & mask) | prot);
691 else
692 pmd[0] = __pmd((pmd_val(pmd[0]) & mask) | prot);
693#endif
694 flush_pmd_entry(pmd);
695 local_flush_tlb_kernel_range(addr, addr + SECTION_SIZE);
696}
697
698/* Make sure extended page tables are in use. */
699static inline bool arch_has_strict_perms(void)
700{
701 if (cpu_architecture() < CPU_ARCH_ARMv6)
702 return false;
703
704 return !!(get_cr() & CR_XP);
705}
706
707#define set_section_perms(perms, field) { \
708 size_t i; \
709 unsigned long addr; \
710 \
711 if (!arch_has_strict_perms()) \
712 return; \
713 \
714 for (i = 0; i < ARRAY_SIZE(perms); i++) { \
715 if (!IS_ALIGNED(perms[i].start, SECTION_SIZE) || \
716 !IS_ALIGNED(perms[i].end, SECTION_SIZE)) { \
717 pr_err("BUG: section %lx-%lx not aligned to %lx\n", \
718 perms[i].start, perms[i].end, \
719 SECTION_SIZE); \
720 continue; \
721 } \
722 \
723 for (addr = perms[i].start; \
724 addr < perms[i].end; \
725 addr += SECTION_SIZE) \
726 section_update(addr, perms[i].mask, \
727 perms[i].field); \
728 } \
729}
730
731static inline void fix_kernmem_perms(void)
732{
733 set_section_perms(nx_perms, prot);
734}
735
736#ifdef CONFIG_DEBUG_RODATA
737void mark_rodata_ro(void)
738{
739 set_section_perms(ro_perms, prot);
740}
741
742void set_kernel_text_rw(void)
743{
744 set_section_perms(ro_perms, clear);
745}
746
747void set_kernel_text_ro(void)
748{
749 set_section_perms(ro_perms, prot);
750}
751#endif /* CONFIG_DEBUG_RODATA */
752
753#else
754static inline void fix_kernmem_perms(void) { }
755#endif /* CONFIG_ARM_KERNMEM_PERMS */
756
757void free_tcmmem(void)
619{ 758{
620#ifdef CONFIG_HAVE_TCM 759#ifdef CONFIG_HAVE_TCM
621 extern char __tcm_start, __tcm_end; 760 extern char __tcm_start, __tcm_end;
@@ -623,6 +762,12 @@ void free_initmem(void)
623 poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start); 762 poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start);
624 free_reserved_area(&__tcm_start, &__tcm_end, -1, "TCM link"); 763 free_reserved_area(&__tcm_start, &__tcm_end, -1, "TCM link");
625#endif 764#endif
765}
766
767void free_initmem(void)
768{
769 fix_kernmem_perms();
770 free_tcmmem();
626 771
627 poison_init_mem(__init_begin, __init_end - __init_begin); 772 poison_init_mem(__init_begin, __init_end - __init_begin);
628 if (!machine_is_integrator() && !machine_is_cintegrator()) 773 if (!machine_is_integrator() && !machine_is_cintegrator())
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 9f98cec7fe1e..cda7c40999b6 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -22,6 +22,7 @@
22#include <asm/cputype.h> 22#include <asm/cputype.h>
23#include <asm/sections.h> 23#include <asm/sections.h>
24#include <asm/cachetype.h> 24#include <asm/cachetype.h>
25#include <asm/fixmap.h>
25#include <asm/sections.h> 26#include <asm/sections.h>
26#include <asm/setup.h> 27#include <asm/setup.h>
27#include <asm/smp_plat.h> 28#include <asm/smp_plat.h>
@@ -52,6 +53,8 @@ EXPORT_SYMBOL(empty_zero_page);
52 */ 53 */
53pmd_t *top_pmd; 54pmd_t *top_pmd;
54 55
56pmdval_t user_pmd_table = _PAGE_USER_TABLE;
57
55#define CPOLICY_UNCACHED 0 58#define CPOLICY_UNCACHED 0
56#define CPOLICY_BUFFERED 1 59#define CPOLICY_BUFFERED 1
57#define CPOLICY_WRITETHROUGH 2 60#define CPOLICY_WRITETHROUGH 2
@@ -192,7 +195,7 @@ early_param("cachepolicy", early_cachepolicy);
192static int __init early_nocache(char *__unused) 195static int __init early_nocache(char *__unused)
193{ 196{
194 char *p = "buffered"; 197 char *p = "buffered";
195 printk(KERN_WARNING "nocache is deprecated; use cachepolicy=%s\n", p); 198 pr_warn("nocache is deprecated; use cachepolicy=%s\n", p);
196 early_cachepolicy(p); 199 early_cachepolicy(p);
197 return 0; 200 return 0;
198} 201}
@@ -201,7 +204,7 @@ early_param("nocache", early_nocache);
201static int __init early_nowrite(char *__unused) 204static int __init early_nowrite(char *__unused)
202{ 205{
203 char *p = "uncached"; 206 char *p = "uncached";
204 printk(KERN_WARNING "nowb is deprecated; use cachepolicy=%s\n", p); 207 pr_warn("nowb is deprecated; use cachepolicy=%s\n", p);
205 early_cachepolicy(p); 208 early_cachepolicy(p);
206 return 0; 209 return 0;
207} 210}
@@ -354,43 +357,28 @@ const struct mem_type *get_mem_type(unsigned int type)
354} 357}
355EXPORT_SYMBOL(get_mem_type); 358EXPORT_SYMBOL(get_mem_type);
356 359
357#define PTE_SET_FN(_name, pteop) \ 360/*
358static int pte_set_##_name(pte_t *ptep, pgtable_t token, unsigned long addr, \ 361 * To avoid TLB flush broadcasts, this uses local_flush_tlb_kernel_range().
359 void *data) \ 362 * As a result, this can only be called with preemption disabled, as under
360{ \ 363 * stop_machine().
361 pte_t pte = pteop(*ptep); \ 364 */
362\ 365void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot)
363 set_pte_ext(ptep, pte, 0); \ 366{
364 return 0; \ 367 unsigned long vaddr = __fix_to_virt(idx);
365} \ 368 pte_t *pte = pte_offset_kernel(pmd_off_k(vaddr), vaddr);
366
367#define SET_MEMORY_FN(_name, callback) \
368int set_memory_##_name(unsigned long addr, int numpages) \
369{ \
370 unsigned long start = addr; \
371 unsigned long size = PAGE_SIZE*numpages; \
372 unsigned end = start + size; \
373\
374 if (start < MODULES_VADDR || start >= MODULES_END) \
375 return -EINVAL;\
376\
377 if (end < MODULES_VADDR || end >= MODULES_END) \
378 return -EINVAL; \
379\
380 apply_to_page_range(&init_mm, start, size, callback, NULL); \
381 flush_tlb_kernel_range(start, end); \
382 return 0;\
383}
384 369
385PTE_SET_FN(ro, pte_wrprotect) 370 /* Make sure fixmap region does not exceed available allocation. */
386PTE_SET_FN(rw, pte_mkwrite) 371 BUILD_BUG_ON(FIXADDR_START + (__end_of_fixed_addresses * PAGE_SIZE) >
387PTE_SET_FN(x, pte_mkexec) 372 FIXADDR_END);
388PTE_SET_FN(nx, pte_mknexec) 373 BUG_ON(idx >= __end_of_fixed_addresses);
389 374
390SET_MEMORY_FN(ro, pte_set_ro) 375 if (pgprot_val(prot))
391SET_MEMORY_FN(rw, pte_set_rw) 376 set_pte_at(NULL, vaddr, pte,
392SET_MEMORY_FN(x, pte_set_x) 377 pfn_pte(phys >> PAGE_SHIFT, prot));
393SET_MEMORY_FN(nx, pte_set_nx) 378 else
379 pte_clear(NULL, vaddr, pte);
380 local_flush_tlb_kernel_range(vaddr, vaddr + PAGE_SIZE);
381}
394 382
395/* 383/*
396 * Adjust the PMD section entries according to the CPU in use. 384 * Adjust the PMD section entries according to the CPU in use.
@@ -528,14 +516,23 @@ static void __init build_mem_type_table(void)
528 hyp_device_pgprot = mem_types[MT_DEVICE].prot_pte; 516 hyp_device_pgprot = mem_types[MT_DEVICE].prot_pte;
529 s2_device_pgprot = mem_types[MT_DEVICE].prot_pte_s2; 517 s2_device_pgprot = mem_types[MT_DEVICE].prot_pte_s2;
530 518
519#ifndef CONFIG_ARM_LPAE
531 /* 520 /*
532 * We don't use domains on ARMv6 (since this causes problems with 521 * We don't use domains on ARMv6 (since this causes problems with
533 * v6/v7 kernels), so we must use a separate memory type for user 522 * v6/v7 kernels), so we must use a separate memory type for user
534 * r/o, kernel r/w to map the vectors page. 523 * r/o, kernel r/w to map the vectors page.
535 */ 524 */
536#ifndef CONFIG_ARM_LPAE
537 if (cpu_arch == CPU_ARCH_ARMv6) 525 if (cpu_arch == CPU_ARCH_ARMv6)
538 vecs_pgprot |= L_PTE_MT_VECTORS; 526 vecs_pgprot |= L_PTE_MT_VECTORS;
527
528 /*
529 * Check is it with support for the PXN bit
530 * in the Short-descriptor translation table format descriptors.
531 */
532 if (cpu_arch == CPU_ARCH_ARMv7 &&
533 (read_cpuid_ext(CPUID_EXT_MMFR0) & 0xF) == 4) {
534 user_pmd_table |= PMD_PXNTABLE;
535 }
539#endif 536#endif
540 537
541 /* 538 /*
@@ -605,6 +602,11 @@ static void __init build_mem_type_table(void)
605 } 602 }
606 kern_pgprot |= PTE_EXT_AF; 603 kern_pgprot |= PTE_EXT_AF;
607 vecs_pgprot |= PTE_EXT_AF; 604 vecs_pgprot |= PTE_EXT_AF;
605
606 /*
607 * Set PXN for user mappings
608 */
609 user_pgprot |= PTE_EXT_PXN;
608#endif 610#endif
609 611
610 for (i = 0; i < 16; i++) { 612 for (i = 0; i < 16; i++) {
@@ -786,8 +788,7 @@ static void __init create_36bit_mapping(struct map_desc *md,
786 length = PAGE_ALIGN(md->length); 788 length = PAGE_ALIGN(md->length);
787 789
788 if (!(cpu_architecture() >= CPU_ARCH_ARMv6 || cpu_is_xsc3())) { 790 if (!(cpu_architecture() >= CPU_ARCH_ARMv6 || cpu_is_xsc3())) {
789 printk(KERN_ERR "MM: CPU does not support supersection " 791 pr_err("MM: CPU does not support supersection mapping for 0x%08llx at 0x%08lx\n",
790 "mapping for 0x%08llx at 0x%08lx\n",
791 (long long)__pfn_to_phys((u64)md->pfn), addr); 792 (long long)__pfn_to_phys((u64)md->pfn), addr);
792 return; 793 return;
793 } 794 }
@@ -799,15 +800,13 @@ static void __init create_36bit_mapping(struct map_desc *md,
799 * of the actual domain assignments in use. 800 * of the actual domain assignments in use.
800 */ 801 */
801 if (type->domain) { 802 if (type->domain) {
802 printk(KERN_ERR "MM: invalid domain in supersection " 803 pr_err("MM: invalid domain in supersection mapping for 0x%08llx at 0x%08lx\n",
803 "mapping for 0x%08llx at 0x%08lx\n",
804 (long long)__pfn_to_phys((u64)md->pfn), addr); 804 (long long)__pfn_to_phys((u64)md->pfn), addr);
805 return; 805 return;
806 } 806 }
807 807
808 if ((addr | length | __pfn_to_phys(md->pfn)) & ~SUPERSECTION_MASK) { 808 if ((addr | length | __pfn_to_phys(md->pfn)) & ~SUPERSECTION_MASK) {
809 printk(KERN_ERR "MM: cannot create mapping for 0x%08llx" 809 pr_err("MM: cannot create mapping for 0x%08llx at 0x%08lx invalid alignment\n",
810 " at 0x%08lx invalid alignment\n",
811 (long long)__pfn_to_phys((u64)md->pfn), addr); 810 (long long)__pfn_to_phys((u64)md->pfn), addr);
812 return; 811 return;
813 } 812 }
@@ -850,18 +849,16 @@ static void __init create_mapping(struct map_desc *md)
850 pgd_t *pgd; 849 pgd_t *pgd;
851 850
852 if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) { 851 if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) {
853 printk(KERN_WARNING "BUG: not creating mapping for 0x%08llx" 852 pr_warn("BUG: not creating mapping for 0x%08llx at 0x%08lx in user region\n",
854 " at 0x%08lx in user region\n", 853 (long long)__pfn_to_phys((u64)md->pfn), md->virtual);
855 (long long)__pfn_to_phys((u64)md->pfn), md->virtual);
856 return; 854 return;
857 } 855 }
858 856
859 if ((md->type == MT_DEVICE || md->type == MT_ROM) && 857 if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
860 md->virtual >= PAGE_OFFSET && 858 md->virtual >= PAGE_OFFSET &&
861 (md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) { 859 (md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) {
862 printk(KERN_WARNING "BUG: mapping for 0x%08llx" 860 pr_warn("BUG: mapping for 0x%08llx at 0x%08lx out of vmalloc space\n",
863 " at 0x%08lx out of vmalloc space\n", 861 (long long)__pfn_to_phys((u64)md->pfn), md->virtual);
864 (long long)__pfn_to_phys((u64)md->pfn), md->virtual);
865 } 862 }
866 863
867 type = &mem_types[md->type]; 864 type = &mem_types[md->type];
@@ -881,9 +878,8 @@ static void __init create_mapping(struct map_desc *md)
881 length = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK)); 878 length = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));
882 879
883 if (type->prot_l1 == 0 && ((addr | phys | length) & ~SECTION_MASK)) { 880 if (type->prot_l1 == 0 && ((addr | phys | length) & ~SECTION_MASK)) {
884 printk(KERN_WARNING "BUG: map for 0x%08llx at 0x%08lx can not " 881 pr_warn("BUG: map for 0x%08llx at 0x%08lx can not be mapped using pages, ignoring.\n",
885 "be mapped using pages, ignoring.\n", 882 (long long)__pfn_to_phys(md->pfn), addr);
886 (long long)__pfn_to_phys(md->pfn), addr);
887 return; 883 return;
888 } 884 }
889 885
@@ -1053,15 +1049,13 @@ static int __init early_vmalloc(char *arg)
1053 1049
1054 if (vmalloc_reserve < SZ_16M) { 1050 if (vmalloc_reserve < SZ_16M) {
1055 vmalloc_reserve = SZ_16M; 1051 vmalloc_reserve = SZ_16M;
1056 printk(KERN_WARNING 1052 pr_warn("vmalloc area too small, limiting to %luMB\n",
1057 "vmalloc area too small, limiting to %luMB\n",
1058 vmalloc_reserve >> 20); 1053 vmalloc_reserve >> 20);
1059 } 1054 }
1060 1055
1061 if (vmalloc_reserve > VMALLOC_END - (PAGE_OFFSET + SZ_32M)) { 1056 if (vmalloc_reserve > VMALLOC_END - (PAGE_OFFSET + SZ_32M)) {
1062 vmalloc_reserve = VMALLOC_END - (PAGE_OFFSET + SZ_32M); 1057 vmalloc_reserve = VMALLOC_END - (PAGE_OFFSET + SZ_32M);
1063 printk(KERN_WARNING 1058 pr_warn("vmalloc area is too big, limiting to %luMB\n",
1064 "vmalloc area is too big, limiting to %luMB\n",
1065 vmalloc_reserve >> 20); 1059 vmalloc_reserve >> 20);
1066 } 1060 }
1067 1061
@@ -1094,7 +1088,7 @@ void __init sanity_check_meminfo(void)
1094 1088
1095 if (highmem) { 1089 if (highmem) {
1096 pr_notice("Ignoring RAM at %pa-%pa (!CONFIG_HIGHMEM)\n", 1090 pr_notice("Ignoring RAM at %pa-%pa (!CONFIG_HIGHMEM)\n",
1097 &block_start, &block_end); 1091 &block_start, &block_end);
1098 memblock_remove(reg->base, reg->size); 1092 memblock_remove(reg->base, reg->size);
1099 continue; 1093 continue;
1100 } 1094 }
@@ -1103,7 +1097,7 @@ void __init sanity_check_meminfo(void)
1103 phys_addr_t overlap_size = reg->size - size_limit; 1097 phys_addr_t overlap_size = reg->size - size_limit;
1104 1098
1105 pr_notice("Truncating RAM at %pa-%pa to -%pa", 1099 pr_notice("Truncating RAM at %pa-%pa to -%pa",
1106 &block_start, &block_end, &vmalloc_limit); 1100 &block_start, &block_end, &vmalloc_limit);
1107 memblock_remove(vmalloc_limit, overlap_size); 1101 memblock_remove(vmalloc_limit, overlap_size);
1108 block_end = vmalloc_limit; 1102 block_end = vmalloc_limit;
1109 } 1103 }
@@ -1326,10 +1320,10 @@ static void __init kmap_init(void)
1326#ifdef CONFIG_HIGHMEM 1320#ifdef CONFIG_HIGHMEM
1327 pkmap_page_table = early_pte_alloc(pmd_off_k(PKMAP_BASE), 1321 pkmap_page_table = early_pte_alloc(pmd_off_k(PKMAP_BASE),
1328 PKMAP_BASE, _PAGE_KERNEL_TABLE); 1322 PKMAP_BASE, _PAGE_KERNEL_TABLE);
1329
1330 fixmap_page_table = early_pte_alloc(pmd_off_k(FIXADDR_START),
1331 FIXADDR_START, _PAGE_KERNEL_TABLE);
1332#endif 1323#endif
1324
1325 early_pte_alloc(pmd_off_k(FIXADDR_START), FIXADDR_START,
1326 _PAGE_KERNEL_TABLE);
1333} 1327}
1334 1328
1335static void __init map_lowmem(void) 1329static void __init map_lowmem(void)
@@ -1349,13 +1343,20 @@ static void __init map_lowmem(void)
1349 if (start >= end) 1343 if (start >= end)
1350 break; 1344 break;
1351 1345
1352 if (end < kernel_x_start || start >= kernel_x_end) { 1346 if (end < kernel_x_start) {
1353 map.pfn = __phys_to_pfn(start); 1347 map.pfn = __phys_to_pfn(start);
1354 map.virtual = __phys_to_virt(start); 1348 map.virtual = __phys_to_virt(start);
1355 map.length = end - start; 1349 map.length = end - start;
1356 map.type = MT_MEMORY_RWX; 1350 map.type = MT_MEMORY_RWX;
1357 1351
1358 create_mapping(&map); 1352 create_mapping(&map);
1353 } else if (start >= kernel_x_end) {
1354 map.pfn = __phys_to_pfn(start);
1355 map.virtual = __phys_to_virt(start);
1356 map.length = end - start;
1357 map.type = MT_MEMORY_RW;
1358
1359 create_mapping(&map);
1359 } else { 1360 } else {
1360 /* This better cover the entire kernel */ 1361 /* This better cover the entire kernel */
1361 if (start < kernel_x_start) { 1362 if (start < kernel_x_start) {
diff --git a/arch/arm/mm/pageattr.c b/arch/arm/mm/pageattr.c
new file mode 100644
index 000000000000..004e35cdcfff
--- /dev/null
+++ b/arch/arm/mm/pageattr.c
@@ -0,0 +1,91 @@
1/*
2 * Copyright (c) 2014, The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13#include <linux/mm.h>
14#include <linux/module.h>
15
16#include <asm/pgtable.h>
17#include <asm/tlbflush.h>
18
19struct page_change_data {
20 pgprot_t set_mask;
21 pgprot_t clear_mask;
22};
23
24static int change_page_range(pte_t *ptep, pgtable_t token, unsigned long addr,
25 void *data)
26{
27 struct page_change_data *cdata = data;
28 pte_t pte = *ptep;
29
30 pte = clear_pte_bit(pte, cdata->clear_mask);
31 pte = set_pte_bit(pte, cdata->set_mask);
32
33 set_pte_ext(ptep, pte, 0);
34 return 0;
35}
36
37static int change_memory_common(unsigned long addr, int numpages,
38 pgprot_t set_mask, pgprot_t clear_mask)
39{
40 unsigned long start = addr;
41 unsigned long size = PAGE_SIZE*numpages;
42 unsigned long end = start + size;
43 int ret;
44 struct page_change_data data;
45
46 if (!IS_ALIGNED(addr, PAGE_SIZE)) {
47 start &= PAGE_MASK;
48 end = start + size;
49 WARN_ON_ONCE(1);
50 }
51
52 if (!is_module_address(start) || !is_module_address(end - 1))
53 return -EINVAL;
54
55 data.set_mask = set_mask;
56 data.clear_mask = clear_mask;
57
58 ret = apply_to_page_range(&init_mm, start, size, change_page_range,
59 &data);
60
61 flush_tlb_kernel_range(start, end);
62 return ret;
63}
64
65int set_memory_ro(unsigned long addr, int numpages)
66{
67 return change_memory_common(addr, numpages,
68 __pgprot(L_PTE_RDONLY),
69 __pgprot(0));
70}
71
72int set_memory_rw(unsigned long addr, int numpages)
73{
74 return change_memory_common(addr, numpages,
75 __pgprot(0),
76 __pgprot(L_PTE_RDONLY));
77}
78
79int set_memory_nx(unsigned long addr, int numpages)
80{
81 return change_memory_common(addr, numpages,
82 __pgprot(L_PTE_XN),
83 __pgprot(0));
84}
85
86int set_memory_x(unsigned long addr, int numpages)
87{
88 return change_memory_common(addr, numpages,
89 __pgprot(0),
90 __pgprot(L_PTE_XN));
91}
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index 22ac2a6fbfe3..8b4ee5e81c14 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -591,9 +591,10 @@ __krait_proc_info:
591 /* 591 /*
592 * Some Krait processors don't indicate support for SDIV and UDIV 592 * Some Krait processors don't indicate support for SDIV and UDIV
593 * instructions in the ARM instruction set, even though they actually 593 * instructions in the ARM instruction set, even though they actually
594 * do support them. 594 * do support them. They also don't indicate support for fused multiply
595 * instructions even though they actually do support them.
595 */ 596 */
596 __v7_proc __v7_setup, hwcaps = HWCAP_IDIV 597 __v7_proc __v7_setup, hwcaps = HWCAP_IDIV | HWCAP_VFPv4
597 .size __krait_proc_info, . - __krait_proc_info 598 .size __krait_proc_info, . - __krait_proc_info
598 599
599 /* 600 /*