diff options
author | H. Peter Anvin <hpa@zytor.com> | 2008-01-30 07:30:56 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-01-30 07:30:56 -0500 |
commit | 65ea5b0349903585bfed9720fa06f5edb4f1cd25 (patch) | |
tree | 6c252228c34416b7e2077f23475de34500c2ab8a /arch/x86/kernel/kprobes_32.c | |
parent | 53756d3722172815f52272b28c6d5d5e9639adde (diff) |
x86: rename the struct pt_regs members for 32/64-bit consistency
We have a lot of code which differs only by the naming of specific
members of structures that contain registers. In order to enable
additional unifications, this patch drops the e- or r- size prefix
from the register names in struct pt_regs, and drops the x- prefixes
for segment registers on the 32-bit side.
This patch also performs the equivalent renames in some additional
places that might be candidates for unification in the future.
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86/kernel/kprobes_32.c')
-rw-r--r-- | arch/x86/kernel/kprobes_32.c | 92 |
1 files changed, 46 insertions, 46 deletions
diff --git a/arch/x86/kernel/kprobes_32.c b/arch/x86/kernel/kprobes_32.c index bc4a68367cd..d708cd4f956 100644 --- a/arch/x86/kernel/kprobes_32.c +++ b/arch/x86/kernel/kprobes_32.c | |||
@@ -212,7 +212,7 @@ static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs, | |||
212 | { | 212 | { |
213 | __get_cpu_var(current_kprobe) = p; | 213 | __get_cpu_var(current_kprobe) = p; |
214 | kcb->kprobe_saved_eflags = kcb->kprobe_old_eflags | 214 | kcb->kprobe_saved_eflags = kcb->kprobe_old_eflags |
215 | = (regs->eflags & (TF_MASK | IF_MASK)); | 215 | = (regs->flags & (TF_MASK | IF_MASK)); |
216 | if (is_IF_modifier(p->opcode)) | 216 | if (is_IF_modifier(p->opcode)) |
217 | kcb->kprobe_saved_eflags &= ~IF_MASK; | 217 | kcb->kprobe_saved_eflags &= ~IF_MASK; |
218 | } | 218 | } |
@@ -232,20 +232,20 @@ static __always_inline void restore_btf(void) | |||
232 | static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs) | 232 | static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs) |
233 | { | 233 | { |
234 | clear_btf(); | 234 | clear_btf(); |
235 | regs->eflags |= TF_MASK; | 235 | regs->flags |= TF_MASK; |
236 | regs->eflags &= ~IF_MASK; | 236 | regs->flags &= ~IF_MASK; |
237 | /*single step inline if the instruction is an int3*/ | 237 | /*single step inline if the instruction is an int3*/ |
238 | if (p->opcode == BREAKPOINT_INSTRUCTION) | 238 | if (p->opcode == BREAKPOINT_INSTRUCTION) |
239 | regs->eip = (unsigned long)p->addr; | 239 | regs->ip = (unsigned long)p->addr; |
240 | else | 240 | else |
241 | regs->eip = (unsigned long)p->ainsn.insn; | 241 | regs->ip = (unsigned long)p->ainsn.insn; |
242 | } | 242 | } |
243 | 243 | ||
244 | /* Called with kretprobe_lock held */ | 244 | /* Called with kretprobe_lock held */ |
245 | void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, | 245 | void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, |
246 | struct pt_regs *regs) | 246 | struct pt_regs *regs) |
247 | { | 247 | { |
248 | unsigned long *sara = (unsigned long *)®s->esp; | 248 | unsigned long *sara = (unsigned long *)®s->sp; |
249 | 249 | ||
250 | ri->ret_addr = (kprobe_opcode_t *) *sara; | 250 | ri->ret_addr = (kprobe_opcode_t *) *sara; |
251 | 251 | ||
@@ -264,7 +264,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs) | |||
264 | kprobe_opcode_t *addr; | 264 | kprobe_opcode_t *addr; |
265 | struct kprobe_ctlblk *kcb; | 265 | struct kprobe_ctlblk *kcb; |
266 | 266 | ||
267 | addr = (kprobe_opcode_t *)(regs->eip - sizeof(kprobe_opcode_t)); | 267 | addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t)); |
268 | 268 | ||
269 | /* | 269 | /* |
270 | * We don't want to be preempted for the entire | 270 | * We don't want to be preempted for the entire |
@@ -279,8 +279,8 @@ static int __kprobes kprobe_handler(struct pt_regs *regs) | |||
279 | if (p) { | 279 | if (p) { |
280 | if (kcb->kprobe_status == KPROBE_HIT_SS && | 280 | if (kcb->kprobe_status == KPROBE_HIT_SS && |
281 | *p->ainsn.insn == BREAKPOINT_INSTRUCTION) { | 281 | *p->ainsn.insn == BREAKPOINT_INSTRUCTION) { |
282 | regs->eflags &= ~TF_MASK; | 282 | regs->flags &= ~TF_MASK; |
283 | regs->eflags |= kcb->kprobe_saved_eflags; | 283 | regs->flags |= kcb->kprobe_saved_eflags; |
284 | goto no_kprobe; | 284 | goto no_kprobe; |
285 | } | 285 | } |
286 | /* We have reentered the kprobe_handler(), since | 286 | /* We have reentered the kprobe_handler(), since |
@@ -301,7 +301,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs) | |||
301 | * another cpu right after we hit, no further | 301 | * another cpu right after we hit, no further |
302 | * handling of this interrupt is appropriate | 302 | * handling of this interrupt is appropriate |
303 | */ | 303 | */ |
304 | regs->eip -= sizeof(kprobe_opcode_t); | 304 | regs->ip -= sizeof(kprobe_opcode_t); |
305 | ret = 1; | 305 | ret = 1; |
306 | goto no_kprobe; | 306 | goto no_kprobe; |
307 | } | 307 | } |
@@ -325,7 +325,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs) | |||
325 | * Back up over the (now missing) int3 and run | 325 | * Back up over the (now missing) int3 and run |
326 | * the original instruction. | 326 | * the original instruction. |
327 | */ | 327 | */ |
328 | regs->eip -= sizeof(kprobe_opcode_t); | 328 | regs->ip -= sizeof(kprobe_opcode_t); |
329 | ret = 1; | 329 | ret = 1; |
330 | } | 330 | } |
331 | /* Not one of ours: let kernel handle it */ | 331 | /* Not one of ours: let kernel handle it */ |
@@ -344,7 +344,7 @@ ss_probe: | |||
344 | if (p->ainsn.boostable == 1 && !p->post_handler){ | 344 | if (p->ainsn.boostable == 1 && !p->post_handler){ |
345 | /* Boost up -- we can execute copied instructions directly */ | 345 | /* Boost up -- we can execute copied instructions directly */ |
346 | reset_current_kprobe(); | 346 | reset_current_kprobe(); |
347 | regs->eip = (unsigned long)p->ainsn.insn; | 347 | regs->ip = (unsigned long)p->ainsn.insn; |
348 | preempt_enable_no_resched(); | 348 | preempt_enable_no_resched(); |
349 | return 1; | 349 | return 1; |
350 | } | 350 | } |
@@ -368,7 +368,7 @@ no_kprobe: | |||
368 | asm volatile ( ".global kretprobe_trampoline\n" | 368 | asm volatile ( ".global kretprobe_trampoline\n" |
369 | "kretprobe_trampoline: \n" | 369 | "kretprobe_trampoline: \n" |
370 | " pushf\n" | 370 | " pushf\n" |
371 | /* skip cs, eip, orig_eax */ | 371 | /* skip cs, ip, orig_ax */ |
372 | " subl $12, %esp\n" | 372 | " subl $12, %esp\n" |
373 | " pushl %fs\n" | 373 | " pushl %fs\n" |
374 | " pushl %ds\n" | 374 | " pushl %ds\n" |
@@ -382,10 +382,10 @@ no_kprobe: | |||
382 | " pushl %ebx\n" | 382 | " pushl %ebx\n" |
383 | " movl %esp, %eax\n" | 383 | " movl %esp, %eax\n" |
384 | " call trampoline_handler\n" | 384 | " call trampoline_handler\n" |
385 | /* move eflags to cs */ | 385 | /* move flags to cs */ |
386 | " movl 52(%esp), %edx\n" | 386 | " movl 52(%esp), %edx\n" |
387 | " movl %edx, 48(%esp)\n" | 387 | " movl %edx, 48(%esp)\n" |
388 | /* save true return address on eflags */ | 388 | /* save true return address on flags */ |
389 | " movl %eax, 52(%esp)\n" | 389 | " movl %eax, 52(%esp)\n" |
390 | " popl %ebx\n" | 390 | " popl %ebx\n" |
391 | " popl %ecx\n" | 391 | " popl %ecx\n" |
@@ -394,7 +394,7 @@ no_kprobe: | |||
394 | " popl %edi\n" | 394 | " popl %edi\n" |
395 | " popl %ebp\n" | 395 | " popl %ebp\n" |
396 | " popl %eax\n" | 396 | " popl %eax\n" |
397 | /* skip eip, orig_eax, es, ds, fs */ | 397 | /* skip ip, orig_ax, es, ds, fs */ |
398 | " addl $20, %esp\n" | 398 | " addl $20, %esp\n" |
399 | " popf\n" | 399 | " popf\n" |
400 | " ret\n"); | 400 | " ret\n"); |
@@ -415,9 +415,9 @@ fastcall void *__kprobes trampoline_handler(struct pt_regs *regs) | |||
415 | spin_lock_irqsave(&kretprobe_lock, flags); | 415 | spin_lock_irqsave(&kretprobe_lock, flags); |
416 | head = kretprobe_inst_table_head(current); | 416 | head = kretprobe_inst_table_head(current); |
417 | /* fixup registers */ | 417 | /* fixup registers */ |
418 | regs->xcs = __KERNEL_CS | get_kernel_rpl(); | 418 | regs->cs = __KERNEL_CS | get_kernel_rpl(); |
419 | regs->eip = trampoline_address; | 419 | regs->ip = trampoline_address; |
420 | regs->orig_eax = 0xffffffff; | 420 | regs->orig_ax = 0xffffffff; |
421 | 421 | ||
422 | /* | 422 | /* |
423 | * It is possible to have multiple instances associated with a given | 423 | * It is possible to have multiple instances associated with a given |
@@ -478,11 +478,11 @@ fastcall void *__kprobes trampoline_handler(struct pt_regs *regs) | |||
478 | * interrupt. We have to fix up the stack as follows: | 478 | * interrupt. We have to fix up the stack as follows: |
479 | * | 479 | * |
480 | * 0) Except in the case of absolute or indirect jump or call instructions, | 480 | * 0) Except in the case of absolute or indirect jump or call instructions, |
481 | * the new eip is relative to the copied instruction. We need to make | 481 | * the new ip is relative to the copied instruction. We need to make |
482 | * it relative to the original instruction. | 482 | * it relative to the original instruction. |
483 | * | 483 | * |
484 | * 1) If the single-stepped instruction was pushfl, then the TF and IF | 484 | * 1) If the single-stepped instruction was pushfl, then the TF and IF |
485 | * flags are set in the just-pushed eflags, and may need to be cleared. | 485 | * flags are set in the just-pushed flags, and may need to be cleared. |
486 | * | 486 | * |
487 | * 2) If the single-stepped instruction was a call, the return address | 487 | * 2) If the single-stepped instruction was a call, the return address |
488 | * that is atop the stack is the address following the copied instruction. | 488 | * that is atop the stack is the address following the copied instruction. |
@@ -493,11 +493,11 @@ fastcall void *__kprobes trampoline_handler(struct pt_regs *regs) | |||
493 | static void __kprobes resume_execution(struct kprobe *p, | 493 | static void __kprobes resume_execution(struct kprobe *p, |
494 | struct pt_regs *regs, struct kprobe_ctlblk *kcb) | 494 | struct pt_regs *regs, struct kprobe_ctlblk *kcb) |
495 | { | 495 | { |
496 | unsigned long *tos = (unsigned long *)®s->esp; | 496 | unsigned long *tos = (unsigned long *)®s->sp; |
497 | unsigned long copy_eip = (unsigned long)p->ainsn.insn; | 497 | unsigned long copy_eip = (unsigned long)p->ainsn.insn; |
498 | unsigned long orig_eip = (unsigned long)p->addr; | 498 | unsigned long orig_eip = (unsigned long)p->addr; |
499 | 499 | ||
500 | regs->eflags &= ~TF_MASK; | 500 | regs->flags &= ~TF_MASK; |
501 | switch (p->ainsn.insn[0]) { | 501 | switch (p->ainsn.insn[0]) { |
502 | case 0x9c: /* pushfl */ | 502 | case 0x9c: /* pushfl */ |
503 | *tos &= ~(TF_MASK | IF_MASK); | 503 | *tos &= ~(TF_MASK | IF_MASK); |
@@ -508,8 +508,8 @@ static void __kprobes resume_execution(struct kprobe *p, | |||
508 | case 0xca: | 508 | case 0xca: |
509 | case 0xcb: | 509 | case 0xcb: |
510 | case 0xcf: | 510 | case 0xcf: |
511 | case 0xea: /* jmp absolute -- eip is correct */ | 511 | case 0xea: /* jmp absolute -- ip is correct */ |
512 | /* eip is already adjusted, no more changes required */ | 512 | /* ip is already adjusted, no more changes required */ |
513 | p->ainsn.boostable = 1; | 513 | p->ainsn.boostable = 1; |
514 | goto no_change; | 514 | goto no_change; |
515 | case 0xe8: /* call relative - Fix return addr */ | 515 | case 0xe8: /* call relative - Fix return addr */ |
@@ -522,14 +522,14 @@ static void __kprobes resume_execution(struct kprobe *p, | |||
522 | if ((p->ainsn.insn[1] & 0x30) == 0x10) { | 522 | if ((p->ainsn.insn[1] & 0x30) == 0x10) { |
523 | /* | 523 | /* |
524 | * call absolute, indirect | 524 | * call absolute, indirect |
525 | * Fix return addr; eip is correct. | 525 | * Fix return addr; ip is correct. |
526 | * But this is not boostable | 526 | * But this is not boostable |
527 | */ | 527 | */ |
528 | *tos = orig_eip + (*tos - copy_eip); | 528 | *tos = orig_eip + (*tos - copy_eip); |
529 | goto no_change; | 529 | goto no_change; |
530 | } else if (((p->ainsn.insn[1] & 0x31) == 0x20) || /* jmp near, absolute indirect */ | 530 | } else if (((p->ainsn.insn[1] & 0x31) == 0x20) || /* jmp near, absolute indirect */ |
531 | ((p->ainsn.insn[1] & 0x31) == 0x21)) { /* jmp far, absolute indirect */ | 531 | ((p->ainsn.insn[1] & 0x31) == 0x21)) { /* jmp far, absolute indirect */ |
532 | /* eip is correct. And this is boostable */ | 532 | /* ip is correct. And this is boostable */ |
533 | p->ainsn.boostable = 1; | 533 | p->ainsn.boostable = 1; |
534 | goto no_change; | 534 | goto no_change; |
535 | } | 535 | } |
@@ -538,21 +538,21 @@ static void __kprobes resume_execution(struct kprobe *p, | |||
538 | } | 538 | } |
539 | 539 | ||
540 | if (p->ainsn.boostable == 0) { | 540 | if (p->ainsn.boostable == 0) { |
541 | if ((regs->eip > copy_eip) && | 541 | if ((regs->ip > copy_eip) && |
542 | (regs->eip - copy_eip) + 5 < MAX_INSN_SIZE) { | 542 | (regs->ip - copy_eip) + 5 < MAX_INSN_SIZE) { |
543 | /* | 543 | /* |
544 | * These instructions can be executed directly if it | 544 | * These instructions can be executed directly if it |
545 | * jumps back to correct address. | 545 | * jumps back to correct address. |
546 | */ | 546 | */ |
547 | set_jmp_op((void *)regs->eip, | 547 | set_jmp_op((void *)regs->ip, |
548 | (void *)orig_eip + (regs->eip - copy_eip)); | 548 | (void *)orig_eip + (regs->ip - copy_eip)); |
549 | p->ainsn.boostable = 1; | 549 | p->ainsn.boostable = 1; |
550 | } else { | 550 | } else { |
551 | p->ainsn.boostable = -1; | 551 | p->ainsn.boostable = -1; |
552 | } | 552 | } |
553 | } | 553 | } |
554 | 554 | ||
555 | regs->eip = orig_eip + (regs->eip - copy_eip); | 555 | regs->ip = orig_eip + (regs->ip - copy_eip); |
556 | 556 | ||
557 | no_change: | 557 | no_change: |
558 | restore_btf(); | 558 | restore_btf(); |
@@ -578,8 +578,8 @@ static int __kprobes post_kprobe_handler(struct pt_regs *regs) | |||
578 | } | 578 | } |
579 | 579 | ||
580 | resume_execution(cur, regs, kcb); | 580 | resume_execution(cur, regs, kcb); |
581 | regs->eflags |= kcb->kprobe_saved_eflags; | 581 | regs->flags |= kcb->kprobe_saved_eflags; |
582 | trace_hardirqs_fixup_flags(regs->eflags); | 582 | trace_hardirqs_fixup_flags(regs->flags); |
583 | 583 | ||
584 | /*Restore back the original saved kprobes variables and continue. */ | 584 | /*Restore back the original saved kprobes variables and continue. */ |
585 | if (kcb->kprobe_status == KPROBE_REENTER) { | 585 | if (kcb->kprobe_status == KPROBE_REENTER) { |
@@ -591,11 +591,11 @@ out: | |||
591 | preempt_enable_no_resched(); | 591 | preempt_enable_no_resched(); |
592 | 592 | ||
593 | /* | 593 | /* |
594 | * if somebody else is singlestepping across a probe point, eflags | 594 | * if somebody else is singlestepping across a probe point, flags |
595 | * will have TF set, in which case, continue the remaining processing | 595 | * will have TF set, in which case, continue the remaining processing |
596 | * of do_debug, as if this is not a probe hit. | 596 | * of do_debug, as if this is not a probe hit. |
597 | */ | 597 | */ |
598 | if (regs->eflags & TF_MASK) | 598 | if (regs->flags & TF_MASK) |
599 | return 0; | 599 | return 0; |
600 | 600 | ||
601 | return 1; | 601 | return 1; |
@@ -612,12 +612,12 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr) | |||
612 | /* | 612 | /* |
613 | * We are here because the instruction being single | 613 | * We are here because the instruction being single |
614 | * stepped caused a page fault. We reset the current | 614 | * stepped caused a page fault. We reset the current |
615 | * kprobe and the eip points back to the probe address | 615 | * kprobe and the ip points back to the probe address |
616 | * and allow the page fault handler to continue as a | 616 | * and allow the page fault handler to continue as a |
617 | * normal page fault. | 617 | * normal page fault. |
618 | */ | 618 | */ |
619 | regs->eip = (unsigned long)cur->addr; | 619 | regs->ip = (unsigned long)cur->addr; |
620 | regs->eflags |= kcb->kprobe_old_eflags; | 620 | regs->flags |= kcb->kprobe_old_eflags; |
621 | if (kcb->kprobe_status == KPROBE_REENTER) | 621 | if (kcb->kprobe_status == KPROBE_REENTER) |
622 | restore_previous_kprobe(kcb); | 622 | restore_previous_kprobe(kcb); |
623 | else | 623 | else |
@@ -703,7 +703,7 @@ int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) | |||
703 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); | 703 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); |
704 | 704 | ||
705 | kcb->jprobe_saved_regs = *regs; | 705 | kcb->jprobe_saved_regs = *regs; |
706 | kcb->jprobe_saved_esp = ®s->esp; | 706 | kcb->jprobe_saved_esp = ®s->sp; |
707 | addr = (unsigned long)(kcb->jprobe_saved_esp); | 707 | addr = (unsigned long)(kcb->jprobe_saved_esp); |
708 | 708 | ||
709 | /* | 709 | /* |
@@ -715,9 +715,9 @@ int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) | |||
715 | */ | 715 | */ |
716 | memcpy(kcb->jprobes_stack, (kprobe_opcode_t *)addr, | 716 | memcpy(kcb->jprobes_stack, (kprobe_opcode_t *)addr, |
717 | MIN_STACK_SIZE(addr)); | 717 | MIN_STACK_SIZE(addr)); |
718 | regs->eflags &= ~IF_MASK; | 718 | regs->flags &= ~IF_MASK; |
719 | trace_hardirqs_off(); | 719 | trace_hardirqs_off(); |
720 | regs->eip = (unsigned long)(jp->entry); | 720 | regs->ip = (unsigned long)(jp->entry); |
721 | return 1; | 721 | return 1; |
722 | } | 722 | } |
723 | 723 | ||
@@ -736,15 +736,15 @@ void __kprobes jprobe_return(void) | |||
736 | int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) | 736 | int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) |
737 | { | 737 | { |
738 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); | 738 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); |
739 | u8 *addr = (u8 *) (regs->eip - 1); | 739 | u8 *addr = (u8 *) (regs->ip - 1); |
740 | unsigned long stack_addr = (unsigned long)(kcb->jprobe_saved_esp); | 740 | unsigned long stack_addr = (unsigned long)(kcb->jprobe_saved_esp); |
741 | struct jprobe *jp = container_of(p, struct jprobe, kp); | 741 | struct jprobe *jp = container_of(p, struct jprobe, kp); |
742 | 742 | ||
743 | if ((addr > (u8 *) jprobe_return) && (addr < (u8 *) jprobe_return_end)) { | 743 | if ((addr > (u8 *) jprobe_return) && (addr < (u8 *) jprobe_return_end)) { |
744 | if (®s->esp != kcb->jprobe_saved_esp) { | 744 | if (®s->sp != kcb->jprobe_saved_esp) { |
745 | struct pt_regs *saved_regs = &kcb->jprobe_saved_regs; | 745 | struct pt_regs *saved_regs = &kcb->jprobe_saved_regs; |
746 | printk("current esp %p does not match saved esp %p\n", | 746 | printk("current sp %p does not match saved sp %p\n", |
747 | ®s->esp, kcb->jprobe_saved_esp); | 747 | ®s->sp, kcb->jprobe_saved_esp); |
748 | printk("Saved registers for jprobe %p\n", jp); | 748 | printk("Saved registers for jprobe %p\n", jp); |
749 | show_registers(saved_regs); | 749 | show_registers(saved_regs); |
750 | printk("Current registers\n"); | 750 | printk("Current registers\n"); |