aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64/mm
diff options
context:
space:
mode:
authorArd Biesheuvel <ard.biesheuvel@linaro.org>2016-06-23 09:53:17 -0400
committerCatalin Marinas <catalin.marinas@arm.com>2016-06-27 13:21:27 -0400
commit9fdc14c55cd6579d619ccd9d40982e0805e62b6d (patch)
tree39c75ed542eccac983b74f9297b2c562f06a6bd8 /arch/arm64/mm
parentea2cbee3bc671390139802dd0d50b08db024b03c (diff)
arm64: mm: fix location of _etext
As Kees Cook notes in the ARM counterpart of this patch [0]: The _etext position is defined to be the end of the kernel text code, and should not include any part of the data segments. This interferes with things that might check memory ranges and expect executable code up to _etext. In particular, Kees is referring to the HARDENED_USERCOPY patch set [1], which rejects attempts to call copy_to_user() on kernel ranges containing executable code, but does allow access to the .rodata segment. Regardless of whether one may or may not agree with the distinction, it makes sense for _etext to have the same meaning across architectures. So let's put _etext where it belongs, between .text and .rodata, and fix up existing references to use __init_begin instead, which unlike _end_rodata includes the exception and notes sections as well. The _etext references in kaslr.c are left untouched, since its references to [_stext, _etext) are meant to capture potential jump instruction targets, and so disregarding .rodata is actually an improvement here. [0] http://article.gmane.org/gmane.linux.kernel/2245084 [1] http://thread.gmane.org/gmane.linux.kernel.hardened.devel/2502 Reported-by: Kees Cook <keescook@chromium.org> Reviewed-by: Mark Rutland <mark.rutland@arm.com> Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org> Reviewed-by: Kees Cook <keescook@chromium.org> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Diffstat (limited to 'arch/arm64/mm')
-rw-r--r--arch/arm64/mm/init.c4
-rw-r--r--arch/arm64/mm/mmu.c20
2 files changed, 12 insertions, 12 deletions
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 64ea28306661..2ade7a6a10a7 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -429,9 +429,9 @@ void __init mem_init(void)
429 pr_cont(" vmalloc : 0x%16lx - 0x%16lx (%6ld GB)\n", 429 pr_cont(" vmalloc : 0x%16lx - 0x%16lx (%6ld GB)\n",
430 MLG(VMALLOC_START, VMALLOC_END)); 430 MLG(VMALLOC_START, VMALLOC_END));
431 pr_cont(" .text : 0x%p" " - 0x%p" " (%6ld KB)\n", 431 pr_cont(" .text : 0x%p" " - 0x%p" " (%6ld KB)\n",
432 MLK_ROUNDUP(_text, __start_rodata)); 432 MLK_ROUNDUP(_text, _etext));
433 pr_cont(" .rodata : 0x%p" " - 0x%p" " (%6ld KB)\n", 433 pr_cont(" .rodata : 0x%p" " - 0x%p" " (%6ld KB)\n",
434 MLK_ROUNDUP(__start_rodata, _etext)); 434 MLK_ROUNDUP(__start_rodata, __init_begin));
435 pr_cont(" .init : 0x%p" " - 0x%p" " (%6ld KB)\n", 435 pr_cont(" .init : 0x%p" " - 0x%p" " (%6ld KB)\n",
436 MLK_ROUNDUP(__init_begin, __init_end)); 436 MLK_ROUNDUP(__init_begin, __init_end));
437 pr_cont(" .data : 0x%p" " - 0x%p" " (%6ld KB)\n", 437 pr_cont(" .data : 0x%p" " - 0x%p" " (%6ld KB)\n",
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 0f85a46c3e18..c356f7b84d4d 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -386,14 +386,14 @@ static void create_mapping_late(phys_addr_t phys, unsigned long virt,
386static void __init __map_memblock(pgd_t *pgd, phys_addr_t start, phys_addr_t end) 386static void __init __map_memblock(pgd_t *pgd, phys_addr_t start, phys_addr_t end)
387{ 387{
388 unsigned long kernel_start = __pa(_text); 388 unsigned long kernel_start = __pa(_text);
389 unsigned long kernel_end = __pa(_etext); 389 unsigned long kernel_end = __pa(__init_begin);
390 390
391 /* 391 /*
392 * Take care not to create a writable alias for the 392 * Take care not to create a writable alias for the
393 * read-only text and rodata sections of the kernel image. 393 * read-only text and rodata sections of the kernel image.
394 */ 394 */
395 395
396 /* No overlap with the kernel text */ 396 /* No overlap with the kernel text/rodata */
397 if (end < kernel_start || start >= kernel_end) { 397 if (end < kernel_start || start >= kernel_end) {
398 __create_pgd_mapping(pgd, start, __phys_to_virt(start), 398 __create_pgd_mapping(pgd, start, __phys_to_virt(start),
399 end - start, PAGE_KERNEL, 399 end - start, PAGE_KERNEL,
@@ -402,7 +402,7 @@ static void __init __map_memblock(pgd_t *pgd, phys_addr_t start, phys_addr_t end
402 } 402 }
403 403
404 /* 404 /*
405 * This block overlaps the kernel text mapping. 405 * This block overlaps the kernel text/rodata mappings.
406 * Map the portion(s) which don't overlap. 406 * Map the portion(s) which don't overlap.
407 */ 407 */
408 if (start < kernel_start) 408 if (start < kernel_start)
@@ -417,7 +417,7 @@ static void __init __map_memblock(pgd_t *pgd, phys_addr_t start, phys_addr_t end
417 early_pgtable_alloc); 417 early_pgtable_alloc);
418 418
419 /* 419 /*
420 * Map the linear alias of the [_text, _etext) interval as 420 * Map the linear alias of the [_text, __init_begin) interval as
421 * read-only/non-executable. This makes the contents of the 421 * read-only/non-executable. This makes the contents of the
422 * region accessible to subsystems such as hibernate, but 422 * region accessible to subsystems such as hibernate, but
423 * protects it from inadvertent modification or execution. 423 * protects it from inadvertent modification or execution.
@@ -449,14 +449,14 @@ void mark_rodata_ro(void)
449{ 449{
450 unsigned long section_size; 450 unsigned long section_size;
451 451
452 section_size = (unsigned long)__start_rodata - (unsigned long)_text; 452 section_size = (unsigned long)_etext - (unsigned long)_text;
453 create_mapping_late(__pa(_text), (unsigned long)_text, 453 create_mapping_late(__pa(_text), (unsigned long)_text,
454 section_size, PAGE_KERNEL_ROX); 454 section_size, PAGE_KERNEL_ROX);
455 /* 455 /*
456 * mark .rodata as read only. Use _etext rather than __end_rodata to 456 * mark .rodata as read only. Use __init_begin rather than __end_rodata
457 * cover NOTES and EXCEPTION_TABLE. 457 * to cover NOTES and EXCEPTION_TABLE.
458 */ 458 */
459 section_size = (unsigned long)_etext - (unsigned long)__start_rodata; 459 section_size = (unsigned long)__init_begin - (unsigned long)__start_rodata;
460 create_mapping_late(__pa(__start_rodata), (unsigned long)__start_rodata, 460 create_mapping_late(__pa(__start_rodata), (unsigned long)__start_rodata,
461 section_size, PAGE_KERNEL_RO); 461 section_size, PAGE_KERNEL_RO);
462} 462}
@@ -499,8 +499,8 @@ static void __init map_kernel(pgd_t *pgd)
499{ 499{
500 static struct vm_struct vmlinux_text, vmlinux_rodata, vmlinux_init, vmlinux_data; 500 static struct vm_struct vmlinux_text, vmlinux_rodata, vmlinux_init, vmlinux_data;
501 501
502 map_kernel_segment(pgd, _text, __start_rodata, PAGE_KERNEL_EXEC, &vmlinux_text); 502 map_kernel_segment(pgd, _text, _etext, PAGE_KERNEL_EXEC, &vmlinux_text);
503 map_kernel_segment(pgd, __start_rodata, _etext, PAGE_KERNEL, &vmlinux_rodata); 503 map_kernel_segment(pgd, __start_rodata, __init_begin, PAGE_KERNEL, &vmlinux_rodata);
504 map_kernel_segment(pgd, __init_begin, __init_end, PAGE_KERNEL_EXEC, 504 map_kernel_segment(pgd, __init_begin, __init_end, PAGE_KERNEL_EXEC,
505 &vmlinux_init); 505 &vmlinux_init);
506 map_kernel_segment(pgd, _data, _end, PAGE_KERNEL, &vmlinux_data); 506 map_kernel_segment(pgd, _data, _end, PAGE_KERNEL, &vmlinux_data);