aboutsummaryrefslogtreecommitdiffstats
path: root/arch/i386/kernel/alternative.c
diff options
context:
space:
mode:
authorAndi Kleen <ak@suse.de>2007-07-22 05:12:31 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-07-22 14:03:37 -0400
commit19d36ccdc34f5ed444f8a6af0cbfdb6790eb1177 (patch)
tree39942efb826f2793692da60b04fc0e7b015fa23d /arch/i386/kernel/alternative.c
parentf51c94528a9bc73504928926ca4d791a2b7ddd7c (diff)
x86: Fix alternatives and kprobes to remap write-protected kernel text
Reenable kprobes and alternative patching when the kernel text is write protected by DEBUG_RODATA Add a general utility function to change write protected text. The new function remaps the code using vmap to write it and takes care of CPU synchronization. It also does CLFLUSH to make icache recovery faster. There are some limitations on when the function can be used, see the comment. This is a newer version that also changes the paravirt_ops code. text_poke also supports multi byte patching now. Contains bug fixes from Zach Amsden and suggestions from Mathieu Desnoyers. Cc: Jan Beulich <jbeulich@novell.com> Cc: Jeremy Fitzhardinge <jeremy@goop.org> Cc: Mathieu Desnoyers <compudj@krystal.dyndns.org> Cc: Zach Amsden <zach@vmware.com> Signed-off-by: Andi Kleen <ak@suse.de> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/i386/kernel/alternative.c')
-rw-r--r--arch/i386/kernel/alternative.c40
1 files changed, 34 insertions, 6 deletions
diff --git a/arch/i386/kernel/alternative.c b/arch/i386/kernel/alternative.c
index 0695be538de5..206ea2ca63cc 100644
--- a/arch/i386/kernel/alternative.c
+++ b/arch/i386/kernel/alternative.c
@@ -2,8 +2,12 @@
2#include <linux/sched.h> 2#include <linux/sched.h>
3#include <linux/spinlock.h> 3#include <linux/spinlock.h>
4#include <linux/list.h> 4#include <linux/list.h>
5#include <linux/kprobes.h>
6#include <linux/mm.h>
7#include <linux/vmalloc.h>
5#include <asm/alternative.h> 8#include <asm/alternative.h>
6#include <asm/sections.h> 9#include <asm/sections.h>
10#include <asm/pgtable.h>
7 11
8#ifdef CONFIG_HOTPLUG_CPU 12#ifdef CONFIG_HOTPLUG_CPU
9static int smp_alt_once; 13static int smp_alt_once;
@@ -150,7 +154,7 @@ static void nop_out(void *insns, unsigned int len)
150 unsigned int noplen = len; 154 unsigned int noplen = len;
151 if (noplen > ASM_NOP_MAX) 155 if (noplen > ASM_NOP_MAX)
152 noplen = ASM_NOP_MAX; 156 noplen = ASM_NOP_MAX;
153 memcpy(insns, noptable[noplen], noplen); 157 text_poke(insns, noptable[noplen], noplen);
154 insns += noplen; 158 insns += noplen;
155 len -= noplen; 159 len -= noplen;
156 } 160 }
@@ -202,7 +206,7 @@ static void alternatives_smp_lock(u8 **start, u8 **end, u8 *text, u8 *text_end)
202 continue; 206 continue;
203 if (*ptr > text_end) 207 if (*ptr > text_end)
204 continue; 208 continue;
205 **ptr = 0xf0; /* lock prefix */ 209 text_poke(*ptr, ((unsigned char []){0xf0}), 1); /* add lock prefix */
206 }; 210 };
207} 211}
208 212
@@ -360,10 +364,6 @@ void apply_paravirt(struct paravirt_patch_site *start,
360 /* Pad the rest with nops */ 364 /* Pad the rest with nops */
361 nop_out(p->instr + used, p->len - used); 365 nop_out(p->instr + used, p->len - used);
362 } 366 }
363
364 /* Sync to be conservative, in case we patched following
365 * instructions */
366 sync_core();
367} 367}
368extern struct paravirt_patch_site __start_parainstructions[], 368extern struct paravirt_patch_site __start_parainstructions[],
369 __stop_parainstructions[]; 369 __stop_parainstructions[];
@@ -406,3 +406,31 @@ void __init alternative_instructions(void)
406 apply_paravirt(__parainstructions, __parainstructions_end); 406 apply_paravirt(__parainstructions, __parainstructions_end);
407 local_irq_restore(flags); 407 local_irq_restore(flags);
408} 408}
409
410/*
411 * Warning:
412 * When you use this code to patch more than one byte of an instruction
413 * you need to make sure that other CPUs cannot execute this code in parallel.
414 * Also no thread must be currently preempted in the middle of these instructions.
415 * And on the local CPU you need to be protected again NMI or MCE handlers
416 * seeing an inconsistent instruction while you patch.
417 */
418void __kprobes text_poke(void *oaddr, unsigned char *opcode, int len)
419{
420 u8 *addr = oaddr;
421 if (!pte_write(*lookup_address((unsigned long)addr))) {
422 struct page *p[2] = { virt_to_page(addr), virt_to_page(addr+PAGE_SIZE) };
423 addr = vmap(p, 2, VM_MAP, PAGE_KERNEL);
424 if (!addr)
425 return;
426 addr += ((unsigned long)oaddr) % PAGE_SIZE;
427 }
428 memcpy(addr, opcode, len);
429 sync_core();
430 /* Not strictly needed, but can speed CPU recovery up. Ignore cross cacheline
431 case. */
432 if (cpu_has_clflush)
433 asm("clflush (%0) " :: "r" (oaddr) : "memory");
434 if (addr != oaddr)
435 vunmap(addr);
436}