aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/alternative.c
diff options
context:
space:
mode:
authorAndy Lutomirski <luto@mit.edu>2011-07-13 09:24:10 -0400
committerH. Peter Anvin <hpa@linux.intel.com>2011-07-13 14:22:56 -0400
commit59e97e4d6fbcd5b74a94cb48bcbfc6f8478a5e93 (patch)
tree015fd8e63e1fcd8fdf7a066bd2e09a5636a14449 /arch/x86/kernel/alternative.c
parentc9712944b2a12373cb6ff8059afcfb7e826a6c54 (diff)
x86: Make alternative instruction pointers relative
This save a few bytes on x86-64 and means that future patches can apply alternatives to unrelocated code. Signed-off-by: Andy Lutomirski <luto@mit.edu> Link: http://lkml.kernel.org/r/ff64a6b9a1a3860ca4a7b8b6dc7b4754f9491cd7.1310563276.git.luto@mit.edu Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Diffstat (limited to 'arch/x86/kernel/alternative.c')
-rw-r--r--arch/x86/kernel/alternative.c21
1 files changed, 13 insertions, 8 deletions
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index a81f2d52f869..ddb207bb5f91 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -263,6 +263,7 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
263 struct alt_instr *end) 263 struct alt_instr *end)
264{ 264{
265 struct alt_instr *a; 265 struct alt_instr *a;
266 u8 *instr, *replacement;
266 u8 insnbuf[MAX_PATCH_LEN]; 267 u8 insnbuf[MAX_PATCH_LEN];
267 268
268 DPRINTK("%s: alt table %p -> %p\n", __func__, start, end); 269 DPRINTK("%s: alt table %p -> %p\n", __func__, start, end);
@@ -276,25 +277,29 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
276 * order. 277 * order.
277 */ 278 */
278 for (a = start; a < end; a++) { 279 for (a = start; a < end; a++) {
279 u8 *instr = a->instr; 280 instr = (u8 *)&a->instr_offset + a->instr_offset;
281 replacement = (u8 *)&a->repl_offset + a->repl_offset;
280 BUG_ON(a->replacementlen > a->instrlen); 282 BUG_ON(a->replacementlen > a->instrlen);
281 BUG_ON(a->instrlen > sizeof(insnbuf)); 283 BUG_ON(a->instrlen > sizeof(insnbuf));
282 BUG_ON(a->cpuid >= NCAPINTS*32); 284 BUG_ON(a->cpuid >= NCAPINTS*32);
283 if (!boot_cpu_has(a->cpuid)) 285 if (!boot_cpu_has(a->cpuid))
284 continue; 286 continue;
287
288 memcpy(insnbuf, replacement, a->replacementlen);
289
290 /* 0xe8 is a relative jump; fix the offset. */
291 if (*insnbuf == 0xe8 && a->replacementlen == 5)
292 *(s32 *)(insnbuf + 1) += replacement - instr;
293
294 add_nops(insnbuf + a->replacementlen,
295 a->instrlen - a->replacementlen);
296
285#ifdef CONFIG_X86_64 297#ifdef CONFIG_X86_64
286 /* vsyscall code is not mapped yet. resolve it manually. */ 298 /* vsyscall code is not mapped yet. resolve it manually. */
287 if (instr >= (u8 *)VSYSCALL_START && instr < (u8*)VSYSCALL_END) { 299 if (instr >= (u8 *)VSYSCALL_START && instr < (u8*)VSYSCALL_END) {
288 instr = __va(instr - (u8*)VSYSCALL_START + (u8*)__pa_symbol(&__vsyscall_0)); 300 instr = __va(instr - (u8*)VSYSCALL_START + (u8*)__pa_symbol(&__vsyscall_0));
289 DPRINTK("%s: vsyscall fixup: %p => %p\n",
290 __func__, a->instr, instr);
291 } 301 }
292#endif 302#endif
293 memcpy(insnbuf, a->replacement, a->replacementlen);
294 if (*insnbuf == 0xe8 && a->replacementlen == 5)
295 *(s32 *)(insnbuf + 1) += a->replacement - a->instr;
296 add_nops(insnbuf + a->replacementlen,
297 a->instrlen - a->replacementlen);
298 text_poke_early(instr, insnbuf, a->instrlen); 303 text_poke_early(instr, insnbuf, a->instrlen);
299 } 304 }
300} 305}