diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2011-01-06 14:07:33 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-01-06 14:07:33 -0500 |
commit | 4f00b901d4233a78e6ca4d44c8c6fc5d38a3ee9e (patch) | |
tree | 9b9da4230d33c47298d4fa3e93a4a5f0cb047ee8 /arch/x86/mm | |
parent | b4c6e2ea5e46b03c764a918f4999a77a3149979f (diff) | |
parent | 94462ad3b14739d158a1ab87bb30008c1e5a6bc1 (diff) |
Merge branch 'x86-security-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'x86-security-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
module: Move RO/NX module protection to after ftrace module update
x86: Resume trampoline must be executable
x86: Add RO/NX protection for loadable kernel modules
x86: Add NX protection for kernel data
x86: Fix improper large page preservation
Diffstat (limited to 'arch/x86/mm')
-rw-r--r-- | arch/x86/mm/init.c | 3 | ||||
-rw-r--r-- | arch/x86/mm/init_32.c | 20 | ||||
-rw-r--r-- | arch/x86/mm/pageattr.c | 33 |
3 files changed, 43 insertions, 13 deletions
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index c0e28a13de7d..947f42abe820 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c | |||
@@ -364,8 +364,9 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end) | |||
364 | /* | 364 | /* |
365 | * We just marked the kernel text read only above, now that | 365 | * We just marked the kernel text read only above, now that |
366 | * we are going to free part of that, we need to make that | 366 | * we are going to free part of that, we need to make that |
367 | * writeable first. | 367 | * writeable and non-executable first. |
368 | */ | 368 | */ |
369 | set_memory_nx(begin, (end - begin) >> PAGE_SHIFT); | ||
369 | set_memory_rw(begin, (end - begin) >> PAGE_SHIFT); | 370 | set_memory_rw(begin, (end - begin) >> PAGE_SHIFT); |
370 | 371 | ||
371 | printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10); | 372 | printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10); |
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index 0e969f9f401b..f89b5bb4e93f 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c | |||
@@ -226,7 +226,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base) | |||
226 | 226 | ||
227 | static inline int is_kernel_text(unsigned long addr) | 227 | static inline int is_kernel_text(unsigned long addr) |
228 | { | 228 | { |
229 | if (addr >= PAGE_OFFSET && addr <= (unsigned long)__init_end) | 229 | if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end) |
230 | return 1; | 230 | return 1; |
231 | return 0; | 231 | return 0; |
232 | } | 232 | } |
@@ -912,6 +912,23 @@ void set_kernel_text_ro(void) | |||
912 | set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT); | 912 | set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT); |
913 | } | 913 | } |
914 | 914 | ||
915 | static void mark_nxdata_nx(void) | ||
916 | { | ||
917 | /* | ||
918 | * When this called, init has already been executed and released, | ||
919 | * so everything past _etext sould be NX. | ||
920 | */ | ||
921 | unsigned long start = PFN_ALIGN(_etext); | ||
922 | /* | ||
923 | * This comes from is_kernel_text upper limit. Also HPAGE where used: | ||
924 | */ | ||
925 | unsigned long size = (((unsigned long)__init_end + HPAGE_SIZE) & HPAGE_MASK) - start; | ||
926 | |||
927 | if (__supported_pte_mask & _PAGE_NX) | ||
928 | printk(KERN_INFO "NX-protecting the kernel data: %luk\n", size >> 10); | ||
929 | set_pages_nx(virt_to_page(start), size >> PAGE_SHIFT); | ||
930 | } | ||
931 | |||
915 | void mark_rodata_ro(void) | 932 | void mark_rodata_ro(void) |
916 | { | 933 | { |
917 | unsigned long start = PFN_ALIGN(_text); | 934 | unsigned long start = PFN_ALIGN(_text); |
@@ -946,6 +963,7 @@ void mark_rodata_ro(void) | |||
946 | printk(KERN_INFO "Testing CPA: write protecting again\n"); | 963 | printk(KERN_INFO "Testing CPA: write protecting again\n"); |
947 | set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT); | 964 | set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT); |
948 | #endif | 965 | #endif |
966 | mark_nxdata_nx(); | ||
949 | } | 967 | } |
950 | #endif | 968 | #endif |
951 | 969 | ||
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index 532e7933d606..8b830ca14ac4 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c | |||
@@ -13,6 +13,7 @@ | |||
13 | #include <linux/pfn.h> | 13 | #include <linux/pfn.h> |
14 | #include <linux/percpu.h> | 14 | #include <linux/percpu.h> |
15 | #include <linux/gfp.h> | 15 | #include <linux/gfp.h> |
16 | #include <linux/pci.h> | ||
16 | 17 | ||
17 | #include <asm/e820.h> | 18 | #include <asm/e820.h> |
18 | #include <asm/processor.h> | 19 | #include <asm/processor.h> |
@@ -255,13 +256,16 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address, | |||
255 | unsigned long pfn) | 256 | unsigned long pfn) |
256 | { | 257 | { |
257 | pgprot_t forbidden = __pgprot(0); | 258 | pgprot_t forbidden = __pgprot(0); |
259 | pgprot_t required = __pgprot(0); | ||
258 | 260 | ||
259 | /* | 261 | /* |
260 | * The BIOS area between 640k and 1Mb needs to be executable for | 262 | * The BIOS area between 640k and 1Mb needs to be executable for |
261 | * PCI BIOS based config access (CONFIG_PCI_GOBIOS) support. | 263 | * PCI BIOS based config access (CONFIG_PCI_GOBIOS) support. |
262 | */ | 264 | */ |
263 | if (within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT)) | 265 | #ifdef CONFIG_PCI_BIOS |
266 | if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT)) | ||
264 | pgprot_val(forbidden) |= _PAGE_NX; | 267 | pgprot_val(forbidden) |= _PAGE_NX; |
268 | #endif | ||
265 | 269 | ||
266 | /* | 270 | /* |
267 | * The kernel text needs to be executable for obvious reasons | 271 | * The kernel text needs to be executable for obvious reasons |
@@ -278,6 +282,12 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address, | |||
278 | if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT, | 282 | if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT, |
279 | __pa((unsigned long)__end_rodata) >> PAGE_SHIFT)) | 283 | __pa((unsigned long)__end_rodata) >> PAGE_SHIFT)) |
280 | pgprot_val(forbidden) |= _PAGE_RW; | 284 | pgprot_val(forbidden) |= _PAGE_RW; |
285 | /* | ||
286 | * .data and .bss should always be writable. | ||
287 | */ | ||
288 | if (within(address, (unsigned long)_sdata, (unsigned long)_edata) || | ||
289 | within(address, (unsigned long)__bss_start, (unsigned long)__bss_stop)) | ||
290 | pgprot_val(required) |= _PAGE_RW; | ||
281 | 291 | ||
282 | #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA) | 292 | #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA) |
283 | /* | 293 | /* |
@@ -317,6 +327,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address, | |||
317 | #endif | 327 | #endif |
318 | 328 | ||
319 | prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden)); | 329 | prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden)); |
330 | prot = __pgprot(pgprot_val(prot) | pgprot_val(required)); | ||
320 | 331 | ||
321 | return prot; | 332 | return prot; |
322 | } | 333 | } |
@@ -393,7 +404,7 @@ try_preserve_large_page(pte_t *kpte, unsigned long address, | |||
393 | { | 404 | { |
394 | unsigned long nextpage_addr, numpages, pmask, psize, flags, addr, pfn; | 405 | unsigned long nextpage_addr, numpages, pmask, psize, flags, addr, pfn; |
395 | pte_t new_pte, old_pte, *tmp; | 406 | pte_t new_pte, old_pte, *tmp; |
396 | pgprot_t old_prot, new_prot; | 407 | pgprot_t old_prot, new_prot, req_prot; |
397 | int i, do_split = 1; | 408 | int i, do_split = 1; |
398 | unsigned int level; | 409 | unsigned int level; |
399 | 410 | ||
@@ -438,10 +449,10 @@ try_preserve_large_page(pte_t *kpte, unsigned long address, | |||
438 | * We are safe now. Check whether the new pgprot is the same: | 449 | * We are safe now. Check whether the new pgprot is the same: |
439 | */ | 450 | */ |
440 | old_pte = *kpte; | 451 | old_pte = *kpte; |
441 | old_prot = new_prot = pte_pgprot(old_pte); | 452 | old_prot = new_prot = req_prot = pte_pgprot(old_pte); |
442 | 453 | ||
443 | pgprot_val(new_prot) &= ~pgprot_val(cpa->mask_clr); | 454 | pgprot_val(req_prot) &= ~pgprot_val(cpa->mask_clr); |
444 | pgprot_val(new_prot) |= pgprot_val(cpa->mask_set); | 455 | pgprot_val(req_prot) |= pgprot_val(cpa->mask_set); |
445 | 456 | ||
446 | /* | 457 | /* |
447 | * old_pte points to the large page base address. So we need | 458 | * old_pte points to the large page base address. So we need |
@@ -450,17 +461,17 @@ try_preserve_large_page(pte_t *kpte, unsigned long address, | |||
450 | pfn = pte_pfn(old_pte) + ((address & (psize - 1)) >> PAGE_SHIFT); | 461 | pfn = pte_pfn(old_pte) + ((address & (psize - 1)) >> PAGE_SHIFT); |
451 | cpa->pfn = pfn; | 462 | cpa->pfn = pfn; |
452 | 463 | ||
453 | new_prot = static_protections(new_prot, address, pfn); | 464 | new_prot = static_protections(req_prot, address, pfn); |
454 | 465 | ||
455 | /* | 466 | /* |
456 | * We need to check the full range, whether | 467 | * We need to check the full range, whether |
457 | * static_protection() requires a different pgprot for one of | 468 | * static_protection() requires a different pgprot for one of |
458 | * the pages in the range we try to preserve: | 469 | * the pages in the range we try to preserve: |
459 | */ | 470 | */ |
460 | addr = address + PAGE_SIZE; | 471 | addr = address & pmask; |
461 | pfn++; | 472 | pfn = pte_pfn(old_pte); |
462 | for (i = 1; i < cpa->numpages; i++, addr += PAGE_SIZE, pfn++) { | 473 | for (i = 0; i < (psize >> PAGE_SHIFT); i++, addr += PAGE_SIZE, pfn++) { |
463 | pgprot_t chk_prot = static_protections(new_prot, addr, pfn); | 474 | pgprot_t chk_prot = static_protections(req_prot, addr, pfn); |
464 | 475 | ||
465 | if (pgprot_val(chk_prot) != pgprot_val(new_prot)) | 476 | if (pgprot_val(chk_prot) != pgprot_val(new_prot)) |
466 | goto out_unlock; | 477 | goto out_unlock; |
@@ -483,7 +494,7 @@ try_preserve_large_page(pte_t *kpte, unsigned long address, | |||
483 | * that we limited the number of possible pages already to | 494 | * that we limited the number of possible pages already to |
484 | * the number of pages in the large page. | 495 | * the number of pages in the large page. |
485 | */ | 496 | */ |
486 | if (address == (nextpage_addr - psize) && cpa->numpages == numpages) { | 497 | if (address == (address & pmask) && cpa->numpages == (psize >> PAGE_SHIFT)) { |
487 | /* | 498 | /* |
488 | * The address is aligned and the number of pages | 499 | * The address is aligned and the number of pages |
489 | * covers the full page. | 500 | * covers the full page. |