diff options
-rw-r--r-- | arch/i386/kernel/alternative.c | 14 | ||||
-rw-r--r-- | arch/i386/mm/init.c | 14 | ||||
-rw-r--r-- | arch/x86_64/mm/init.c | 10 |
3 files changed, 23 insertions, 15 deletions
diff --git a/arch/i386/kernel/alternative.c b/arch/i386/kernel/alternative.c index c3750c2c4113..c85598acb8fd 100644 --- a/arch/i386/kernel/alternative.c +++ b/arch/i386/kernel/alternative.c | |||
@@ -430,22 +430,12 @@ void __init alternative_instructions(void) | |||
430 | * And on the local CPU you need to be protected again NMI or MCE handlers | 430 | * And on the local CPU you need to be protected again NMI or MCE handlers |
431 | * seeing an inconsistent instruction while you patch. | 431 | * seeing an inconsistent instruction while you patch. |
432 | */ | 432 | */ |
433 | void __kprobes text_poke(void *oaddr, unsigned char *opcode, int len) | 433 | void __kprobes text_poke(void *addr, unsigned char *opcode, int len) |
434 | { | 434 | { |
435 | u8 *addr = oaddr; | ||
436 | if (!pte_write(*lookup_address((unsigned long)addr))) { | ||
437 | struct page *p[2] = { virt_to_page(addr), virt_to_page(addr+PAGE_SIZE) }; | ||
438 | addr = vmap(p, 2, VM_MAP, PAGE_KERNEL); | ||
439 | if (!addr) | ||
440 | return; | ||
441 | addr += ((unsigned long)oaddr) % PAGE_SIZE; | ||
442 | } | ||
443 | memcpy(addr, opcode, len); | 435 | memcpy(addr, opcode, len); |
444 | sync_core(); | 436 | sync_core(); |
445 | /* Not strictly needed, but can speed CPU recovery up. Ignore cross cacheline | 437 | /* Not strictly needed, but can speed CPU recovery up. Ignore cross cacheline |
446 | case. */ | 438 | case. */ |
447 | if (cpu_has_clflush) | 439 | if (cpu_has_clflush) |
448 | asm("clflush (%0) " :: "r" (oaddr) : "memory"); | 440 | asm("clflush (%0) " :: "r" (addr) : "memory"); |
449 | if (addr != oaddr) | ||
450 | vunmap(addr); | ||
451 | } | 441 | } |
diff --git a/arch/i386/mm/init.c b/arch/i386/mm/init.c index 1b1a1e66d099..4c4809f13cb1 100644 --- a/arch/i386/mm/init.c +++ b/arch/i386/mm/init.c | |||
@@ -800,9 +800,17 @@ void mark_rodata_ro(void) | |||
800 | unsigned long start = PFN_ALIGN(_text); | 800 | unsigned long start = PFN_ALIGN(_text); |
801 | unsigned long size = PFN_ALIGN(_etext) - start; | 801 | unsigned long size = PFN_ALIGN(_etext) - start; |
802 | 802 | ||
803 | change_page_attr(virt_to_page(start), | 803 | #ifndef CONFIG_KPROBES |
804 | size >> PAGE_SHIFT, PAGE_KERNEL_RX); | 804 | #ifdef CONFIG_HOTPLUG_CPU |
805 | printk("Write protecting the kernel text: %luk\n", size >> 10); | 805 | /* It must still be possible to apply SMP alternatives. */ |
806 | if (num_possible_cpus() <= 1) | ||
807 | #endif | ||
808 | { | ||
809 | change_page_attr(virt_to_page(start), | ||
810 | size >> PAGE_SHIFT, PAGE_KERNEL_RX); | ||
811 | printk("Write protecting the kernel text: %luk\n", size >> 10); | ||
812 | } | ||
813 | #endif | ||
806 | start += size; | 814 | start += size; |
807 | size = (unsigned long)__end_rodata - start; | 815 | size = (unsigned long)__end_rodata - start; |
808 | change_page_attr(virt_to_page(start), | 816 | change_page_attr(virt_to_page(start), |
diff --git a/arch/x86_64/mm/init.c b/arch/x86_64/mm/init.c index 38f5d6368006..458893b376f8 100644 --- a/arch/x86_64/mm/init.c +++ b/arch/x86_64/mm/init.c | |||
@@ -600,6 +600,16 @@ void mark_rodata_ro(void) | |||
600 | { | 600 | { |
601 | unsigned long start = (unsigned long)_stext, end; | 601 | unsigned long start = (unsigned long)_stext, end; |
602 | 602 | ||
603 | #ifdef CONFIG_HOTPLUG_CPU | ||
604 | /* It must still be possible to apply SMP alternatives. */ | ||
605 | if (num_possible_cpus() > 1) | ||
606 | start = (unsigned long)_etext; | ||
607 | #endif | ||
608 | |||
609 | #ifdef CONFIG_KPROBES | ||
610 | start = (unsigned long)__start_rodata; | ||
611 | #endif | ||
612 | |||
603 | end = (unsigned long)__end_rodata; | 613 | end = (unsigned long)__end_rodata; |
604 | start = (start + PAGE_SIZE - 1) & PAGE_MASK; | 614 | start = (start + PAGE_SIZE - 1) & PAGE_MASK; |
605 | end &= PAGE_MASK; | 615 | end &= PAGE_MASK; |