diff options
author | Andi Kleen <ak@suse.de> | 2007-08-10 16:31:03 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-08-11 18:58:13 -0400 |
commit | ab144f5ec64c42218a555ec1dbde6b60cf2982d6 (patch) | |
tree | e3a4532e1db116e87060c9b18f4cfbf6258fdba3 /arch/i386/kernel/vmi.c | |
parent | d3f3c9346979bfa074c64eac5fc3ed5bba4f40ed (diff) |
i386: Make patching more robust, fix paravirt issue
Commit 19d36ccdc34f5ed444f8a6af0cbfdb6790eb1177 "x86: Fix alternatives
and kprobes to remap write-protected kernel text" uses code which is
being patched for patching.
In particular, paravirt_ops does patching in two stages: first it
calls paravirt_ops.patch, then it fills any remaining instructions
with nop_out(). nop_out calls text_poke() which calls
lookup_address() which calls pgd_val() (aka paravirt_ops.pgd_val):
that call site is one of the places we patch.
If we always do patching as one single call to text_poke(), we only
need make sure we're not patching the memcpy in text_poke itself.
This means the prototype to paravirt_ops.patch needs to change, to
marshal the new code into a buffer rather than patching in place as it
does now. It also means all patching goes through text_poke(), which
is known to be safe (apply_alternatives is also changed to make a
single patch).
AK: fix compilation on x86-64 (bad rusty!)
AK: fix boot on x86-64 (sigh)
AK: merged with other patches
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Signed-off-by: Andi Kleen <ak@suse.de>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/i386/kernel/vmi.c')
-rw-r--r-- | arch/i386/kernel/vmi.c | 35 |
1 files changed, 21 insertions, 14 deletions
diff --git a/arch/i386/kernel/vmi.c b/arch/i386/kernel/vmi.c index 72042bb7ec94..18673e0f193b 100644 --- a/arch/i386/kernel/vmi.c +++ b/arch/i386/kernel/vmi.c | |||
@@ -87,12 +87,14 @@ struct vmi_timer_ops vmi_timer_ops; | |||
87 | #define IRQ_PATCH_INT_MASK 0 | 87 | #define IRQ_PATCH_INT_MASK 0 |
88 | #define IRQ_PATCH_DISABLE 5 | 88 | #define IRQ_PATCH_DISABLE 5 |
89 | 89 | ||
90 | static inline void patch_offset(unsigned char *eip, unsigned char *dest) | 90 | static inline void patch_offset(void *insnbuf, |
91 | unsigned long eip, unsigned long dest) | ||
91 | { | 92 | { |
92 | *(unsigned long *)(eip+1) = dest-eip-5; | 93 | *(unsigned long *)(insnbuf+1) = dest-eip-5; |
93 | } | 94 | } |
94 | 95 | ||
95 | static unsigned patch_internal(int call, unsigned len, void *insns) | 96 | static unsigned patch_internal(int call, unsigned len, void *insnbuf, |
97 | unsigned long eip) | ||
96 | { | 98 | { |
97 | u64 reloc; | 99 | u64 reloc; |
98 | struct vmi_relocation_info *const rel = (struct vmi_relocation_info *)&reloc; | 100 | struct vmi_relocation_info *const rel = (struct vmi_relocation_info *)&reloc; |
@@ -100,14 +102,14 @@ static unsigned patch_internal(int call, unsigned len, void *insns) | |||
100 | switch(rel->type) { | 102 | switch(rel->type) { |
101 | case VMI_RELOCATION_CALL_REL: | 103 | case VMI_RELOCATION_CALL_REL: |
102 | BUG_ON(len < 5); | 104 | BUG_ON(len < 5); |
103 | *(char *)insns = MNEM_CALL; | 105 | *(char *)insnbuf = MNEM_CALL; |
104 | patch_offset(insns, rel->eip); | 106 | patch_offset(insnbuf, eip, (unsigned long)rel->eip); |
105 | return 5; | 107 | return 5; |
106 | 108 | ||
107 | case VMI_RELOCATION_JUMP_REL: | 109 | case VMI_RELOCATION_JUMP_REL: |
108 | BUG_ON(len < 5); | 110 | BUG_ON(len < 5); |
109 | *(char *)insns = MNEM_JMP; | 111 | *(char *)insnbuf = MNEM_JMP; |
110 | patch_offset(insns, rel->eip); | 112 | patch_offset(insnbuf, eip, (unsigned long)rel->eip); |
111 | return 5; | 113 | return 5; |
112 | 114 | ||
113 | case VMI_RELOCATION_NOP: | 115 | case VMI_RELOCATION_NOP: |
@@ -128,21 +130,26 @@ static unsigned patch_internal(int call, unsigned len, void *insns) | |||
128 | * Apply patch if appropriate, return length of new instruction | 130 | * Apply patch if appropriate, return length of new instruction |
129 | * sequence. The callee does nop padding for us. | 131 | * sequence. The callee does nop padding for us. |
130 | */ | 132 | */ |
131 | static unsigned vmi_patch(u8 type, u16 clobbers, void *insns, unsigned len) | 133 | static unsigned vmi_patch(u8 type, u16 clobbers, void *insns, |
134 | unsigned long eip, unsigned len) | ||
132 | { | 135 | { |
133 | switch (type) { | 136 | switch (type) { |
134 | case PARAVIRT_PATCH(irq_disable): | 137 | case PARAVIRT_PATCH(irq_disable): |
135 | return patch_internal(VMI_CALL_DisableInterrupts, len, insns); | 138 | return patch_internal(VMI_CALL_DisableInterrupts, len, |
139 | insns, eip); | ||
136 | case PARAVIRT_PATCH(irq_enable): | 140 | case PARAVIRT_PATCH(irq_enable): |
137 | return patch_internal(VMI_CALL_EnableInterrupts, len, insns); | 141 | return patch_internal(VMI_CALL_EnableInterrupts, len, |
142 | insns, eip); | ||
138 | case PARAVIRT_PATCH(restore_fl): | 143 | case PARAVIRT_PATCH(restore_fl): |
139 | return patch_internal(VMI_CALL_SetInterruptMask, len, insns); | 144 | return patch_internal(VMI_CALL_SetInterruptMask, len, |
145 | insns, eip); | ||
140 | case PARAVIRT_PATCH(save_fl): | 146 | case PARAVIRT_PATCH(save_fl): |
141 | return patch_internal(VMI_CALL_GetInterruptMask, len, insns); | 147 | return patch_internal(VMI_CALL_GetInterruptMask, len, |
148 | insns, eip); | ||
142 | case PARAVIRT_PATCH(iret): | 149 | case PARAVIRT_PATCH(iret): |
143 | return patch_internal(VMI_CALL_IRET, len, insns); | 150 | return patch_internal(VMI_CALL_IRET, len, insns, eip); |
144 | case PARAVIRT_PATCH(irq_enable_sysexit): | 151 | case PARAVIRT_PATCH(irq_enable_sysexit): |
145 | return patch_internal(VMI_CALL_SYSEXIT, len, insns); | 152 | return patch_internal(VMI_CALL_SYSEXIT, len, insns, eip); |
146 | default: | 153 | default: |
147 | break; | 154 | break; |
148 | } | 155 | } |