aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorDenys Vlasenko <dvlasenk@redhat.com>2014-05-01 10:52:46 -0400
committerOleg Nesterov <oleg@redhat.com>2014-05-14 07:57:25 -0400
commit50204c6f6dd01b5bce1b53e0b003d01849455512 (patch)
tree2ed16e91199c0d7fac54f06bb843300615915030 /arch
parent29dedee0e693aa113164c820395ce51446a71ace (diff)
uprobes/x86: Simplify rip-relative handling
It is possible to replace rip-relative addressing mode with addressing mode of the same length: (reg+disp32). This eliminates the need to fix up immediate and correct for changing instruction length. And we can kill arch_uprobe->def.riprel_target. Signed-off-by: Denys Vlasenko <dvlasenk@redhat.com> Reviewed-by: Jim Keniston <jkenisto@us.ibm.com> Signed-off-by: Oleg Nesterov <oleg@redhat.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/include/asm/uprobes.h3
-rw-r--r--arch/x86/kernel/uprobes.c71
2 files changed, 30 insertions, 44 deletions
diff --git a/arch/x86/include/asm/uprobes.h b/arch/x86/include/asm/uprobes.h
index a040d493a4f9..7be3c079e389 100644
--- a/arch/x86/include/asm/uprobes.h
+++ b/arch/x86/include/asm/uprobes.h
@@ -50,9 +50,6 @@ struct arch_uprobe {
50 u8 opc1; 50 u8 opc1;
51 } branch; 51 } branch;
52 struct { 52 struct {
53#ifdef CONFIG_X86_64
54 long riprel_target;
55#endif
56 u8 fixups; 53 u8 fixups;
57 u8 ilen; 54 u8 ilen;
58 } def; 55 } def;
diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
index 2ebadb252093..31dcb4d5ea46 100644
--- a/arch/x86/kernel/uprobes.c
+++ b/arch/x86/kernel/uprobes.c
@@ -251,9 +251,9 @@ static inline bool is_64bit_mm(struct mm_struct *mm)
251 * If arch_uprobe->insn doesn't use rip-relative addressing, return 251 * If arch_uprobe->insn doesn't use rip-relative addressing, return
252 * immediately. Otherwise, rewrite the instruction so that it accesses 252 * immediately. Otherwise, rewrite the instruction so that it accesses
253 * its memory operand indirectly through a scratch register. Set 253 * its memory operand indirectly through a scratch register. Set
254 * def->fixups and def->riprel_target accordingly. (The contents of the 254 * def->fixups accordingly. (The contents of the scratch register
255 * scratch register will be saved before we single-step the modified 255 * will be saved before we single-step the modified instruction,
256 * instruction, and restored afterward). 256 * and restored afterward).
257 * 257 *
258 * We do this because a rip-relative instruction can access only a 258 * We do this because a rip-relative instruction can access only a
259 * relatively small area (+/- 2 GB from the instruction), and the XOL 259 * relatively small area (+/- 2 GB from the instruction), and the XOL
@@ -264,9 +264,12 @@ static inline bool is_64bit_mm(struct mm_struct *mm)
264 * 264 *
265 * Some useful facts about rip-relative instructions: 265 * Some useful facts about rip-relative instructions:
266 * 266 *
267 * - There's always a modrm byte. 267 * - There's always a modrm byte with bit layout "00 reg 101".
268 * - There's never a SIB byte. 268 * - There's never a SIB byte.
269 * - The displacement is always 4 bytes. 269 * - The displacement is always 4 bytes.
270 * - REX.B=1 bit in REX prefix, which normally extends r/m field,
271 * has no effect on rip-relative mode. It doesn't make modrm byte
272 * with r/m=101 refer to register 1101 = R13.
270 */ 273 */
271static void riprel_analyze(struct arch_uprobe *auprobe, struct insn *insn) 274static void riprel_analyze(struct arch_uprobe *auprobe, struct insn *insn)
272{ 275{
@@ -293,9 +296,8 @@ static void riprel_analyze(struct arch_uprobe *auprobe, struct insn *insn)
293 */ 296 */
294 cursor = auprobe->insn + insn_offset_modrm(insn); 297 cursor = auprobe->insn + insn_offset_modrm(insn);
295 /* 298 /*
296 * Convert from rip-relative addressing to indirect addressing 299 * Convert from rip-relative addressing
297 * via a scratch register. Change the r/m field from 0x5 (%rip) 300 * to register-relative addressing via a scratch register.
298 * to 0x0 (%rax) or 0x1 (%rcx), and squeeze out the offset field.
299 */ 301 */
300 reg = MODRM_REG(insn); 302 reg = MODRM_REG(insn);
301 if (reg == 0) { 303 if (reg == 0) {
@@ -307,22 +309,21 @@ static void riprel_analyze(struct arch_uprobe *auprobe, struct insn *insn)
307 * #1) for the scratch register. 309 * #1) for the scratch register.
308 */ 310 */
309 auprobe->def.fixups |= UPROBE_FIX_RIP_CX; 311 auprobe->def.fixups |= UPROBE_FIX_RIP_CX;
310 /* Change modrm from 00 000 101 to 00 000 001. */ 312 /*
311 *cursor = 0x1; 313 * Change modrm from "00 000 101" to "10 000 001". Example:
314 * 89 05 disp32 mov %eax,disp32(%rip) becomes
315 * 89 81 disp32 mov %eax,disp32(%rcx)
316 */
317 *cursor = 0x81;
312 } else { 318 } else {
313 /* Use %rax (register #0) for the scratch register. */ 319 /* Use %rax (register #0) for the scratch register. */
314 auprobe->def.fixups |= UPROBE_FIX_RIP_AX; 320 auprobe->def.fixups |= UPROBE_FIX_RIP_AX;
315 /* Change modrm from 00 xxx 101 to 00 xxx 000 */ 321 /*
316 *cursor = (reg << 3); 322 * Change modrm from "00 reg 101" to "10 reg 000". Example:
317 } 323 * 89 1d disp32 mov %edx,disp32(%rip) becomes
318 324 * 89 98 disp32 mov %edx,disp32(%rax)
319 /* Target address = address of next instruction + (signed) offset */ 325 */
320 auprobe->def.riprel_target = (long)insn->length + insn->displacement.value; 326 *cursor = (reg << 3) | 0x80;
321
322 /* Displacement field is gone; slide immediate field (if any) over. */
323 if (insn->immediate.nbytes) {
324 cursor++;
325 memmove(cursor, cursor + insn->displacement.nbytes, insn->immediate.nbytes);
326 } 327 }
327} 328}
328 329
@@ -343,26 +344,17 @@ static void riprel_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
343 unsigned long *sr = scratch_reg(auprobe, regs); 344 unsigned long *sr = scratch_reg(auprobe, regs);
344 345
345 utask->autask.saved_scratch_register = *sr; 346 utask->autask.saved_scratch_register = *sr;
346 *sr = utask->vaddr + auprobe->def.riprel_target; 347 *sr = utask->vaddr + auprobe->def.ilen;
347 } 348 }
348} 349}
349 350
350static void riprel_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs, 351static void riprel_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
351 long *correction)
352{ 352{
353 if (auprobe->def.fixups & (UPROBE_FIX_RIP_AX | UPROBE_FIX_RIP_CX)) { 353 if (auprobe->def.fixups & (UPROBE_FIX_RIP_AX | UPROBE_FIX_RIP_CX)) {
354 struct uprobe_task *utask = current->utask; 354 struct uprobe_task *utask = current->utask;
355 unsigned long *sr = scratch_reg(auprobe, regs); 355 unsigned long *sr = scratch_reg(auprobe, regs);
356 356
357 *sr = utask->autask.saved_scratch_register; 357 *sr = utask->autask.saved_scratch_register;
358 /*
359 * The original instruction includes a displacement, and so
360 * is 4 bytes longer than what we've just single-stepped.
361 * Caller may need to apply other fixups to handle stuff
362 * like "jmpq *...(%rip)" and "callq *...(%rip)".
363 */
364 if (correction)
365 *correction += 4;
366 } 358 }
367} 359}
368#else /* 32-bit: */ 360#else /* 32-bit: */
@@ -379,8 +371,7 @@ static void riprel_analyze(struct arch_uprobe *auprobe, struct insn *insn)
379static void riprel_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) 371static void riprel_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
380{ 372{
381} 373}
382static void riprel_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs, 374static void riprel_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
383 long *correction)
384{ 375{
385} 376}
386#endif /* CONFIG_X86_64 */ 377#endif /* CONFIG_X86_64 */
@@ -417,10 +408,10 @@ static int push_ret_address(struct pt_regs *regs, unsigned long ip)
417static int default_post_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs) 408static int default_post_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
418{ 409{
419 struct uprobe_task *utask = current->utask; 410 struct uprobe_task *utask = current->utask;
420 long correction = (long)(utask->vaddr - utask->xol_vaddr);
421 411
422 riprel_post_xol(auprobe, regs, &correction); 412 riprel_post_xol(auprobe, regs);
423 if (auprobe->def.fixups & UPROBE_FIX_IP) { 413 if (auprobe->def.fixups & UPROBE_FIX_IP) {
414 long correction = utask->vaddr - utask->xol_vaddr;
424 regs->ip += correction; 415 regs->ip += correction;
425 } else if (auprobe->def.fixups & UPROBE_FIX_CALL) { 416 } else if (auprobe->def.fixups & UPROBE_FIX_CALL) {
426 regs->sp += sizeof_long(); 417 regs->sp += sizeof_long();
@@ -436,7 +427,7 @@ static int default_post_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs
436 427
437static void default_abort_op(struct arch_uprobe *auprobe, struct pt_regs *regs) 428static void default_abort_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
438{ 429{
439 riprel_post_xol(auprobe, regs, NULL); 430 riprel_post_xol(auprobe, regs);
440} 431}
441 432
442static struct uprobe_xol_ops default_xol_ops = { 433static struct uprobe_xol_ops default_xol_ops = {
@@ -732,11 +723,9 @@ bool arch_uprobe_xol_was_trapped(struct task_struct *t)
732 * 723 *
733 * If the original instruction was a rip-relative instruction such as 724 * If the original instruction was a rip-relative instruction such as
734 * "movl %edx,0xnnnn(%rip)", we have instead executed an equivalent 725 * "movl %edx,0xnnnn(%rip)", we have instead executed an equivalent
735 * instruction using a scratch register -- e.g., "movl %edx,(%rax)". 726 * instruction using a scratch register -- e.g., "movl %edx,0xnnnn(%rax)".
736 * We need to restore the contents of the scratch register and adjust 727 * We need to restore the contents of the scratch register
737 * the ip, keeping in mind that the instruction we executed is 4 bytes 728 * (FIX_RIP_AX or FIX_RIP_CX).
738 * shorter than the original instruction (since we squeezed out the offset
739 * field). (FIX_RIP_AX or FIX_RIP_CX)
740 */ 729 */
741int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) 730int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
742{ 731{