aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm/mmu.c
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@arm.linux.org.uk>2013-10-24 03:12:39 -0400
committerRussell King <rmk+kernel@arm.linux.org.uk>2013-12-11 04:53:16 -0500
commitebd4922ecc38e31e662cd641dd04099a9652c9b3 (patch)
tree9bdeb148dca0324f7c16bf33feee00fe013dcd0f /arch/arm/mm/mmu.c
parent2e2c9de207be043ee80161971c814d740759d3bc (diff)
ARM: implement basic NX support for kernel lowmem mappings
Add basic NX support for kernel lowmem mappings. We mark any section which does not overlap kernel text as non-executable, preventing it from being used to write code and then execute directly from there. This does not change the alignment of the sections, so the kernel image doesn't grow significantly via this change, so we can do this without needing a config option. Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/mm/mmu.c')
-rw-r--r--arch/arm/mm/mmu.c55
1 files changed, 50 insertions, 5 deletions
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index fce2e7388098..9ec715f12224 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -22,6 +22,7 @@
22#include <asm/cputype.h> 22#include <asm/cputype.h>
23#include <asm/sections.h> 23#include <asm/sections.h>
24#include <asm/cachetype.h> 24#include <asm/cachetype.h>
25#include <asm/sections.h>
25#include <asm/setup.h> 26#include <asm/setup.h>
26#include <asm/smp_plat.h> 27#include <asm/smp_plat.h>
27#include <asm/tlb.h> 28#include <asm/tlb.h>
@@ -293,6 +294,13 @@ static struct mem_type mem_types[] = {
293 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, 294 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
294 .domain = DOMAIN_KERNEL, 295 .domain = DOMAIN_KERNEL,
295 }, 296 },
297 [MT_MEMORY_RW] = {
298 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
299 L_PTE_XN,
300 .prot_l1 = PMD_TYPE_TABLE,
301 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
302 .domain = DOMAIN_KERNEL,
303 },
296 [MT_ROM] = { 304 [MT_ROM] = {
297 .prot_sect = PMD_TYPE_SECT, 305 .prot_sect = PMD_TYPE_SECT,
298 .domain = DOMAIN_KERNEL, 306 .domain = DOMAIN_KERNEL,
@@ -410,6 +418,9 @@ static void __init build_mem_type_table(void)
410 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_XN; 418 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_XN;
411 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_XN; 419 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_XN;
412 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_XN; 420 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_XN;
421
422 /* Also setup NX memory mapping */
423 mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_XN;
413 } 424 }
414 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) { 425 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
415 /* 426 /*
@@ -489,6 +500,8 @@ static void __init build_mem_type_table(void)
489 mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED; 500 mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
490 mem_types[MT_MEMORY_RWX].prot_sect |= PMD_SECT_S; 501 mem_types[MT_MEMORY_RWX].prot_sect |= PMD_SECT_S;
491 mem_types[MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED; 502 mem_types[MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED;
503 mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_S;
504 mem_types[MT_MEMORY_RW].prot_pte |= L_PTE_SHARED;
492 mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED; 505 mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
493 mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= PMD_SECT_S; 506 mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= PMD_SECT_S;
494 mem_types[MT_MEMORY_RWX_NONCACHED].prot_pte |= L_PTE_SHARED; 507 mem_types[MT_MEMORY_RWX_NONCACHED].prot_pte |= L_PTE_SHARED;
@@ -545,6 +558,8 @@ static void __init build_mem_type_table(void)
545 mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask; 558 mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
546 mem_types[MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd; 559 mem_types[MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd;
547 mem_types[MT_MEMORY_RWX].prot_pte |= kern_pgprot; 560 mem_types[MT_MEMORY_RWX].prot_pte |= kern_pgprot;
561 mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd;
562 mem_types[MT_MEMORY_RW].prot_pte |= kern_pgprot;
548 mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot; 563 mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot;
549 mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= ecc_mask; 564 mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= ecc_mask;
550 mem_types[MT_ROM].prot_sect |= cp->pmd; 565 mem_types[MT_ROM].prot_sect |= cp->pmd;
@@ -1296,6 +1311,8 @@ static void __init kmap_init(void)
1296static void __init map_lowmem(void) 1311static void __init map_lowmem(void)
1297{ 1312{
1298 struct memblock_region *reg; 1313 struct memblock_region *reg;
1314 unsigned long kernel_x_start = round_down(__pa(_stext), SECTION_SIZE);
1315 unsigned long kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
1299 1316
1300 /* Map all the lowmem memory banks. */ 1317 /* Map all the lowmem memory banks. */
1301 for_each_memblock(memory, reg) { 1318 for_each_memblock(memory, reg) {
@@ -1308,12 +1325,40 @@ static void __init map_lowmem(void)
1308 if (start >= end) 1325 if (start >= end)
1309 break; 1326 break;
1310 1327
1311 map.pfn = __phys_to_pfn(start); 1328 if (end < kernel_x_start || start >= kernel_x_end) {
1312 map.virtual = __phys_to_virt(start); 1329 map.pfn = __phys_to_pfn(start);
1313 map.length = end - start; 1330 map.virtual = __phys_to_virt(start);
1314 map.type = MT_MEMORY; 1331 map.length = end - start;
1332 map.type = MT_MEMORY_RWX;
1315 1333
1316 create_mapping(&map); 1334 create_mapping(&map);
1335 } else {
1336 /* This better cover the entire kernel */
1337 if (start < kernel_x_start) {
1338 map.pfn = __phys_to_pfn(start);
1339 map.virtual = __phys_to_virt(start);
1340 map.length = kernel_x_start - start;
1341 map.type = MT_MEMORY_RW;
1342
1343 create_mapping(&map);
1344 }
1345
1346 map.pfn = __phys_to_pfn(kernel_x_start);
1347 map.virtual = __phys_to_virt(kernel_x_start);
1348 map.length = kernel_x_end - kernel_x_start;
1349 map.type = MT_MEMORY_RWX;
1350
1351 create_mapping(&map);
1352
1353 if (kernel_x_end < end) {
1354 map.pfn = __phys_to_pfn(kernel_x_end);
1355 map.virtual = __phys_to_virt(kernel_x_end);
1356 map.length = end - kernel_x_end;
1357 map.type = MT_MEMORY_RW;
1358
1359 create_mapping(&map);
1360 }
1361 }
1317 } 1362 }
1318} 1363}
1319 1364