aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/kernel/kprobes.c
diff options
context:
space:
mode:
authorHeiko Carstens <heiko.carstens@de.ibm.com>2014-10-15 06:17:38 -0400
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2014-10-27 08:27:27 -0400
commitc933146a5e41e42ea3eb4f34fa02e201da3f068e (patch)
treeb5c108df5c2e4756e2c4fc2014a83663bdaba549 /arch/s390/kernel/kprobes.c
parentf7f242ff004499e0904d3664713dfba01f24c408 (diff)
s390/ftrace,kprobes: allow to patch first instruction
If the function tracer is enabled, allow to set kprobes on the first instruction of a function (which is the function trace caller): If no kprobe is set handling of enabling and disabling function tracing of a function simply patches the first instruction. Either it is a nop (right now it's an unconditional branch, which skips the mcount block), or it's a branch to the ftrace_caller() function. If a kprobe is being placed on a function tracer calling instruction we encode if we actually have a nop or branch in the remaining bytes after the breakpoint instruction (illegal opcode). This is possible, since the size of the instruction used for the nop and branch is six bytes, while the size of the breakpoint is only two bytes. Therefore the first two bytes contain the illegal opcode and the last four bytes contain either "0" for nop or "1" for branch. The kprobes code will then execute/simulate the correct instruction. Instruction patching for kprobes and function tracer is always done with stop_machine(). Therefore we don't have any races where an instruction is patched concurrently on a different cpu. Besides that also the program check handler which executes the function trace caller instruction won't be executed concurrently to any stop_machine() execution. This allows to keep full fault based kprobes handling which generates correct pt_regs contents automatically. Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390/kernel/kprobes.c')
-rw-r--r--arch/s390/kernel/kprobes.c92
1 files changed, 65 insertions, 27 deletions
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
index 014d4729b134..d6716c29b7f8 100644
--- a/arch/s390/kernel/kprobes.c
+++ b/arch/s390/kernel/kprobes.c
@@ -29,6 +29,7 @@
29#include <linux/module.h> 29#include <linux/module.h>
30#include <linux/slab.h> 30#include <linux/slab.h>
31#include <linux/hardirq.h> 31#include <linux/hardirq.h>
32#include <linux/ftrace.h>
32#include <asm/cacheflush.h> 33#include <asm/cacheflush.h>
33#include <asm/sections.h> 34#include <asm/sections.h>
34#include <asm/dis.h> 35#include <asm/dis.h>
@@ -60,10 +61,21 @@ struct kprobe_insn_cache kprobe_dmainsn_slots = {
60 61
61static void __kprobes copy_instruction(struct kprobe *p) 62static void __kprobes copy_instruction(struct kprobe *p)
62{ 63{
64 unsigned long ip = (unsigned long) p->addr;
63 s64 disp, new_disp; 65 s64 disp, new_disp;
64 u64 addr, new_addr; 66 u64 addr, new_addr;
65 67
66 memcpy(p->ainsn.insn, p->addr, insn_length(p->opcode >> 8)); 68 if (ftrace_location(ip) == ip) {
69 /*
70 * If kprobes patches the instruction that is morphed by
71 * ftrace make sure that kprobes always sees the branch
72 * "jg .+24" that skips the mcount block
73 */
74 ftrace_generate_nop_insn((struct ftrace_insn *)p->ainsn.insn);
75 p->ainsn.is_ftrace_insn = 1;
76 } else
77 memcpy(p->ainsn.insn, p->addr, insn_length(p->opcode >> 8));
78 p->opcode = p->ainsn.insn[0];
67 if (!probe_is_insn_relative_long(p->ainsn.insn)) 79 if (!probe_is_insn_relative_long(p->ainsn.insn))
68 return; 80 return;
69 /* 81 /*
@@ -85,18 +97,6 @@ static inline int is_kernel_addr(void *addr)
85 return addr < (void *)_end; 97 return addr < (void *)_end;
86} 98}
87 99
88static inline int is_module_addr(void *addr)
89{
90#ifdef CONFIG_64BIT
91 BUILD_BUG_ON(MODULES_LEN > (1UL << 31));
92 if (addr < (void *)MODULES_VADDR)
93 return 0;
94 if (addr > (void *)MODULES_END)
95 return 0;
96#endif
97 return 1;
98}
99
100static int __kprobes s390_get_insn_slot(struct kprobe *p) 100static int __kprobes s390_get_insn_slot(struct kprobe *p)
101{ 101{
102 /* 102 /*
@@ -132,43 +132,63 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
132 return -EINVAL; 132 return -EINVAL;
133 if (s390_get_insn_slot(p)) 133 if (s390_get_insn_slot(p))
134 return -ENOMEM; 134 return -ENOMEM;
135 p->opcode = *p->addr;
136 copy_instruction(p); 135 copy_instruction(p);
137 return 0; 136 return 0;
138} 137}
139 138
140struct ins_replace_args { 139int arch_check_ftrace_location(struct kprobe *p)
141 kprobe_opcode_t *ptr; 140{
142 kprobe_opcode_t opcode; 141 return 0;
142}
143
144struct swap_insn_args {
145 struct kprobe *p;
146 unsigned int arm_kprobe : 1;
143}; 147};
144 148
145static int __kprobes swap_instruction(void *aref) 149static int __kprobes swap_instruction(void *data)
146{ 150{
147 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 151 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
148 unsigned long status = kcb->kprobe_status; 152 unsigned long status = kcb->kprobe_status;
149 struct ins_replace_args *args = aref; 153 struct swap_insn_args *args = data;
150 154 struct ftrace_insn new_insn, *insn;
155 struct kprobe *p = args->p;
156 size_t len;
157
158 new_insn.opc = args->arm_kprobe ? BREAKPOINT_INSTRUCTION : p->opcode;
159 len = sizeof(new_insn.opc);
160 if (!p->ainsn.is_ftrace_insn)
161 goto skip_ftrace;
162 len = sizeof(new_insn);
163 insn = (struct ftrace_insn *) p->addr;
164 if (args->arm_kprobe) {
165 if (is_ftrace_nop(insn))
166 new_insn.disp = KPROBE_ON_FTRACE_NOP;
167 else
168 new_insn.disp = KPROBE_ON_FTRACE_CALL;
169 } else {
170 ftrace_generate_call_insn(&new_insn, (unsigned long)p->addr);
171 if (insn->disp == KPROBE_ON_FTRACE_NOP)
172 ftrace_generate_nop_insn(&new_insn);
173 }
174skip_ftrace:
151 kcb->kprobe_status = KPROBE_SWAP_INST; 175 kcb->kprobe_status = KPROBE_SWAP_INST;
152 probe_kernel_write(args->ptr, &args->opcode, sizeof(args->opcode)); 176 probe_kernel_write(p->addr, &new_insn, len);
153 kcb->kprobe_status = status; 177 kcb->kprobe_status = status;
154 return 0; 178 return 0;
155} 179}
156 180
157void __kprobes arch_arm_kprobe(struct kprobe *p) 181void __kprobes arch_arm_kprobe(struct kprobe *p)
158{ 182{
159 struct ins_replace_args args; 183 struct swap_insn_args args = {.p = p, .arm_kprobe = 1};
160 184
161 args.ptr = p->addr;
162 args.opcode = BREAKPOINT_INSTRUCTION;
163 stop_machine(swap_instruction, &args, NULL); 185 stop_machine(swap_instruction, &args, NULL);
164} 186}
165 187
166void __kprobes arch_disarm_kprobe(struct kprobe *p) 188void __kprobes arch_disarm_kprobe(struct kprobe *p)
167{ 189{
168 struct ins_replace_args args; 190 struct swap_insn_args args = {.p = p, .arm_kprobe = 0};
169 191
170 args.ptr = p->addr;
171 args.opcode = p->opcode;
172 stop_machine(swap_instruction, &args, NULL); 192 stop_machine(swap_instruction, &args, NULL);
173} 193}
174 194
@@ -459,6 +479,24 @@ static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs)
459 unsigned long ip = regs->psw.addr & PSW_ADDR_INSN; 479 unsigned long ip = regs->psw.addr & PSW_ADDR_INSN;
460 int fixup = probe_get_fixup_type(p->ainsn.insn); 480 int fixup = probe_get_fixup_type(p->ainsn.insn);
461 481
482 /* Check if the kprobes location is an enabled ftrace caller */
483 if (p->ainsn.is_ftrace_insn) {
484 struct ftrace_insn *insn = (struct ftrace_insn *) p->addr;
485 struct ftrace_insn call_insn;
486
487 ftrace_generate_call_insn(&call_insn, (unsigned long) p->addr);
488 /*
489 * A kprobe on an enabled ftrace call site actually single
490 * stepped an unconditional branch (ftrace nop equivalent).
491 * Now we need to fixup things and pretend that a brasl r0,...
492 * was executed instead.
493 */
494 if (insn->disp == KPROBE_ON_FTRACE_CALL) {
495 ip += call_insn.disp * 2 - MCOUNT_INSN_SIZE;
496 regs->gprs[0] = (unsigned long)p->addr + sizeof(*insn);
497 }
498 }
499
462 if (fixup & FIXUP_PSW_NORMAL) 500 if (fixup & FIXUP_PSW_NORMAL)
463 ip += (unsigned long) p->addr - (unsigned long) p->ainsn.insn; 501 ip += (unsigned long) p->addr - (unsigned long) p->ainsn.insn;
464 502