aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorHeiko Carstens <heiko.carstens@de.ibm.com>2014-10-15 06:17:38 -0400
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2014-10-27 08:27:27 -0400
commitc933146a5e41e42ea3eb4f34fa02e201da3f068e (patch)
treeb5c108df5c2e4756e2c4fc2014a83663bdaba549 /arch
parentf7f242ff004499e0904d3664713dfba01f24c408 (diff)
s390/ftrace,kprobes: allow to patch first instruction
If the function tracer is enabled, allow to set kprobes on the first instruction of a function (which is the function trace caller): If no kprobe is set handling of enabling and disabling function tracing of a function simply patches the first instruction. Either it is a nop (right now it's an unconditional branch, which skips the mcount block), or it's a branch to the ftrace_caller() function. If a kprobe is being placed on a function tracer calling instruction we encode if we actually have a nop or branch in the remaining bytes after the breakpoint instruction (illegal opcode). This is possible, since the size of the instruction used for the nop and branch is six bytes, while the size of the breakpoint is only two bytes. Therefore the first two bytes contain the illegal opcode and the last four bytes contain either "0" for nop or "1" for branch. The kprobes code will then execute/simulate the correct instruction. Instruction patching for kprobes and function tracer is always done with stop_machine(). Therefore we don't have any races where an instruction is patched concurrently on a different cpu. Besides that also the program check handler which executes the function trace caller instruction won't be executed concurrently to any stop_machine() execution. This allows to keep full fault based kprobes handling which generates correct pt_regs contents automatically. Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/s390/include/asm/ftrace.h52
-rw-r--r--arch/s390/include/asm/kprobes.h1
-rw-r--r--arch/s390/include/asm/lowcore.h4
-rw-r--r--arch/s390/include/asm/pgtable.h12
-rw-r--r--arch/s390/kernel/asm-offsets.c1
-rw-r--r--arch/s390/kernel/early.c4
-rw-r--r--arch/s390/kernel/ftrace.c132
-rw-r--r--arch/s390/kernel/kprobes.c92
-rw-r--r--arch/s390/kernel/mcount.S1
-rw-r--r--arch/s390/kernel/setup.c2
-rw-r--r--arch/s390/kernel/smp.c1
11 files changed, 212 insertions, 90 deletions
diff --git a/arch/s390/include/asm/ftrace.h b/arch/s390/include/asm/ftrace.h
index 3aef8afec336..785041f1dc77 100644
--- a/arch/s390/include/asm/ftrace.h
+++ b/arch/s390/include/asm/ftrace.h
@@ -1,25 +1,67 @@
1#ifndef _ASM_S390_FTRACE_H 1#ifndef _ASM_S390_FTRACE_H
2#define _ASM_S390_FTRACE_H 2#define _ASM_S390_FTRACE_H
3 3
4#define ARCH_SUPPORTS_FTRACE_OPS 1
5
6#define MCOUNT_INSN_SIZE 24
7#define MCOUNT_RETURN_FIXUP 18
8
4#ifndef __ASSEMBLY__ 9#ifndef __ASSEMBLY__
5 10
6extern void _mcount(void); 11void _mcount(void);
12void ftrace_caller(void);
13
7extern char ftrace_graph_caller_end; 14extern char ftrace_graph_caller_end;
15extern unsigned long ftrace_plt;
8 16
9struct dyn_arch_ftrace { }; 17struct dyn_arch_ftrace { };
10 18
11#define MCOUNT_ADDR ((long)_mcount) 19#define MCOUNT_ADDR ((unsigned long)_mcount)
20#define FTRACE_ADDR ((unsigned long)ftrace_caller)
12 21
22#define KPROBE_ON_FTRACE_NOP 0
23#define KPROBE_ON_FTRACE_CALL 1
13 24
14static inline unsigned long ftrace_call_adjust(unsigned long addr) 25static inline unsigned long ftrace_call_adjust(unsigned long addr)
15{ 26{
16 return addr; 27 return addr;
17} 28}
18 29
19#endif /* __ASSEMBLY__ */ 30struct ftrace_insn {
31 u16 opc;
32 s32 disp;
33} __packed;
20 34
21#define MCOUNT_INSN_SIZE 18 35static inline void ftrace_generate_nop_insn(struct ftrace_insn *insn)
36{
37#ifdef CONFIG_FUNCTION_TRACER
38 /* jg .+24 */
39 insn->opc = 0xc0f4;
40 insn->disp = MCOUNT_INSN_SIZE / 2;
41#endif
42}
22 43
23#define ARCH_SUPPORTS_FTRACE_OPS 1 44static inline int is_ftrace_nop(struct ftrace_insn *insn)
45{
46#ifdef CONFIG_FUNCTION_TRACER
47 if (insn->disp == MCOUNT_INSN_SIZE / 2)
48 return 1;
49#endif
50 return 0;
51}
52
53static inline void ftrace_generate_call_insn(struct ftrace_insn *insn,
54 unsigned long ip)
55{
56#ifdef CONFIG_FUNCTION_TRACER
57 unsigned long target;
24 58
59 /* brasl r0,ftrace_caller */
60 target = is_module_addr((void *) ip) ? ftrace_plt : FTRACE_ADDR;
61 insn->opc = 0xc005;
62 insn->disp = (target - ip) / 2;
63#endif
64}
65
66#endif /* __ASSEMBLY__ */
25#endif /* _ASM_S390_FTRACE_H */ 67#endif /* _ASM_S390_FTRACE_H */
diff --git a/arch/s390/include/asm/kprobes.h b/arch/s390/include/asm/kprobes.h
index 98629173ce3b..b47ad3b642cc 100644
--- a/arch/s390/include/asm/kprobes.h
+++ b/arch/s390/include/asm/kprobes.h
@@ -60,6 +60,7 @@ typedef u16 kprobe_opcode_t;
60struct arch_specific_insn { 60struct arch_specific_insn {
61 /* copy of original instruction */ 61 /* copy of original instruction */
62 kprobe_opcode_t *insn; 62 kprobe_opcode_t *insn;
63 unsigned int is_ftrace_insn : 1;
63}; 64};
64 65
65struct prev_kprobe { 66struct prev_kprobe {
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
index 6cc51fe84410..34fbcac61133 100644
--- a/arch/s390/include/asm/lowcore.h
+++ b/arch/s390/include/asm/lowcore.h
@@ -147,7 +147,7 @@ struct _lowcore {
147 __u32 softirq_pending; /* 0x02ec */ 147 __u32 softirq_pending; /* 0x02ec */
148 __u32 percpu_offset; /* 0x02f0 */ 148 __u32 percpu_offset; /* 0x02f0 */
149 __u32 machine_flags; /* 0x02f4 */ 149 __u32 machine_flags; /* 0x02f4 */
150 __u32 ftrace_func; /* 0x02f8 */ 150 __u8 pad_0x02f8[0x02fc-0x02f8]; /* 0x02f8 */
151 __u32 spinlock_lockval; /* 0x02fc */ 151 __u32 spinlock_lockval; /* 0x02fc */
152 152
153 __u8 pad_0x0300[0x0e00-0x0300]; /* 0x0300 */ 153 __u8 pad_0x0300[0x0e00-0x0300]; /* 0x0300 */
@@ -297,7 +297,7 @@ struct _lowcore {
297 __u64 percpu_offset; /* 0x0378 */ 297 __u64 percpu_offset; /* 0x0378 */
298 __u64 vdso_per_cpu_data; /* 0x0380 */ 298 __u64 vdso_per_cpu_data; /* 0x0380 */
299 __u64 machine_flags; /* 0x0388 */ 299 __u64 machine_flags; /* 0x0388 */
300 __u64 ftrace_func; /* 0x0390 */ 300 __u8 pad_0x0390[0x0398-0x0390]; /* 0x0390 */
301 __u64 gmap; /* 0x0398 */ 301 __u64 gmap; /* 0x0398 */
302 __u32 spinlock_lockval; /* 0x03a0 */ 302 __u32 spinlock_lockval; /* 0x03a0 */
303 __u8 pad_0x03a0[0x0400-0x03a4]; /* 0x03a4 */ 303 __u8 pad_0x03a0[0x0400-0x03a4]; /* 0x03a4 */
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 00d460742e1e..5ef1a266936a 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -133,6 +133,18 @@ extern unsigned long MODULES_END;
133#define MODULES_LEN (1UL << 31) 133#define MODULES_LEN (1UL << 31)
134#endif 134#endif
135 135
136static inline int is_module_addr(void *addr)
137{
138#ifdef CONFIG_64BIT
139 BUILD_BUG_ON(MODULES_LEN > (1UL << 31));
140 if (addr < (void *)MODULES_VADDR)
141 return 0;
142 if (addr > (void *)MODULES_END)
143 return 0;
144#endif
145 return 1;
146}
147
136/* 148/*
137 * A 31 bit pagetable entry of S390 has following format: 149 * A 31 bit pagetable entry of S390 has following format:
138 * | PFRA | | OS | 150 * | PFRA | | OS |
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index ef279a136801..f3a78337ca86 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -156,7 +156,6 @@ int main(void)
156 DEFINE(__LC_INT_CLOCK, offsetof(struct _lowcore, int_clock)); 156 DEFINE(__LC_INT_CLOCK, offsetof(struct _lowcore, int_clock));
157 DEFINE(__LC_MCCK_CLOCK, offsetof(struct _lowcore, mcck_clock)); 157 DEFINE(__LC_MCCK_CLOCK, offsetof(struct _lowcore, mcck_clock));
158 DEFINE(__LC_MACHINE_FLAGS, offsetof(struct _lowcore, machine_flags)); 158 DEFINE(__LC_MACHINE_FLAGS, offsetof(struct _lowcore, machine_flags));
159 DEFINE(__LC_FTRACE_FUNC, offsetof(struct _lowcore, ftrace_func));
160 DEFINE(__LC_DUMP_REIPL, offsetof(struct _lowcore, ipib)); 159 DEFINE(__LC_DUMP_REIPL, offsetof(struct _lowcore, ipib));
161 BLANK(); 160 BLANK();
162 DEFINE(__LC_CPU_TIMER_SAVE_AREA, offsetof(struct _lowcore, cpu_timer_save_area)); 161 DEFINE(__LC_CPU_TIMER_SAVE_AREA, offsetof(struct _lowcore, cpu_timer_save_area));
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index cef2879edff3..302ac1f7f8e7 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -12,7 +12,6 @@
12#include <linux/errno.h> 12#include <linux/errno.h>
13#include <linux/string.h> 13#include <linux/string.h>
14#include <linux/ctype.h> 14#include <linux/ctype.h>
15#include <linux/ftrace.h>
16#include <linux/lockdep.h> 15#include <linux/lockdep.h>
17#include <linux/module.h> 16#include <linux/module.h>
18#include <linux/pfn.h> 17#include <linux/pfn.h>
@@ -490,8 +489,5 @@ void __init startup_init(void)
490 detect_machine_facilities(); 489 detect_machine_facilities();
491 setup_topology(); 490 setup_topology();
492 sclp_early_detect(); 491 sclp_early_detect();
493#ifdef CONFIG_DYNAMIC_FTRACE
494 S390_lowcore.ftrace_func = (unsigned long)ftrace_caller;
495#endif
496 lockdep_on(); 492 lockdep_on();
497} 493}
diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c
index 51d14fe5eb9a..5744d25c1d33 100644
--- a/arch/s390/kernel/ftrace.c
+++ b/arch/s390/kernel/ftrace.c
@@ -7,6 +7,7 @@
7 * Martin Schwidefsky <schwidefsky@de.ibm.com> 7 * Martin Schwidefsky <schwidefsky@de.ibm.com>
8 */ 8 */
9 9
10#include <linux/moduleloader.h>
10#include <linux/hardirq.h> 11#include <linux/hardirq.h>
11#include <linux/uaccess.h> 12#include <linux/uaccess.h>
12#include <linux/ftrace.h> 13#include <linux/ftrace.h>
@@ -15,60 +16,39 @@
15#include <linux/kprobes.h> 16#include <linux/kprobes.h>
16#include <trace/syscall.h> 17#include <trace/syscall.h>
17#include <asm/asm-offsets.h> 18#include <asm/asm-offsets.h>
19#include <asm/cacheflush.h>
18#include "entry.h" 20#include "entry.h"
19 21
20void mcount_replace_code(void);
21void ftrace_disable_code(void);
22void ftrace_enable_insn(void);
23
24/* 22/*
25 * The mcount code looks like this: 23 * The mcount code looks like this:
26 * stg %r14,8(%r15) # offset 0 24 * stg %r14,8(%r15) # offset 0
27 * larl %r1,<&counter> # offset 6 25 * larl %r1,<&counter> # offset 6
28 * brasl %r14,_mcount # offset 12 26 * brasl %r14,_mcount # offset 12
29 * lg %r14,8(%r15) # offset 18 27 * lg %r14,8(%r15) # offset 18
30 * Total length is 24 bytes. The complete mcount block initially gets replaced 28 * Total length is 24 bytes. Only the first instruction will be patched
31 * by ftrace_make_nop. Subsequent calls to ftrace_make_call / ftrace_make_nop 29 * by ftrace_make_call / ftrace_make_nop.
32 * only patch the jg/lg instruction within the block.
33 * Note: we do not patch the first instruction to an unconditional branch,
34 * since that would break kprobes/jprobes. It is easier to leave the larl
35 * instruction in and only modify the second instruction.
36 * The enabled ftrace code block looks like this: 30 * The enabled ftrace code block looks like this:
37 * larl %r0,.+24 # offset 0 31 * > brasl %r0,ftrace_caller # offset 0
38 * > lg %r1,__LC_FTRACE_FUNC # offset 6 32 * larl %r1,<&counter> # offset 6
39 * br %r1 # offset 12 33 * brasl %r14,_mcount # offset 12
40 * brcl 0,0 # offset 14 34 * lg %r14,8(%r15) # offset 18
41 * brc 0,0 # offset 20
42 * The ftrace function gets called with a non-standard C function call ABI 35 * The ftrace function gets called with a non-standard C function call ABI
43 * where r0 contains the return address. It is also expected that the called 36 * where r0 contains the return address. It is also expected that the called
44 * function only clobbers r0 and r1, but restores r2-r15. 37 * function only clobbers r0 and r1, but restores r2-r15.
38 * For module code we can't directly jump to ftrace caller, but need a
39 * trampoline (ftrace_plt), which clobbers also r1.
45 * The return point of the ftrace function has offset 24, so execution 40 * The return point of the ftrace function has offset 24, so execution
46 * continues behind the mcount block. 41 * continues behind the mcount block.
47 * larl %r0,.+24 # offset 0 42 * The disabled ftrace code block looks like this:
48 * > jg .+18 # offset 6 43 * > jg .+24 # offset 0
49 * br %r1 # offset 12 44 * larl %r1,<&counter> # offset 6
50 * brcl 0,0 # offset 14 45 * brasl %r14,_mcount # offset 12
51 * brc 0,0 # offset 20 46 * lg %r14,8(%r15) # offset 18
52 * The jg instruction branches to offset 24 to skip as many instructions 47 * The jg instruction branches to offset 24 to skip as many instructions
53 * as possible. 48 * as possible.
54 */ 49 */
55asm( 50
56 " .align 4\n" 51unsigned long ftrace_plt;
57 "mcount_replace_code:\n"
58 " larl %r0,0f\n"
59 "ftrace_disable_code:\n"
60 " jg 0f\n"
61 " br %r1\n"
62 " brcl 0,0\n"
63 " brc 0,0\n"
64 "0:\n"
65 " .align 4\n"
66 "ftrace_enable_insn:\n"
67 " lg %r1,"__stringify(__LC_FTRACE_FUNC)"\n");
68
69#define MCOUNT_BLOCK_SIZE 24
70#define MCOUNT_INSN_OFFSET 6
71#define FTRACE_INSN_SIZE 6
72 52
73int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, 53int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
74 unsigned long addr) 54 unsigned long addr)
@@ -79,24 +59,62 @@ int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
79int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, 59int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
80 unsigned long addr) 60 unsigned long addr)
81{ 61{
82 /* Initial replacement of the whole mcount block */ 62 struct ftrace_insn insn;
83 if (addr == MCOUNT_ADDR) { 63 unsigned short op;
84 if (probe_kernel_write((void *) rec->ip - MCOUNT_INSN_OFFSET, 64 void *from, *to;
85 mcount_replace_code, 65 size_t size;
86 MCOUNT_BLOCK_SIZE)) 66
87 return -EPERM; 67 ftrace_generate_nop_insn(&insn);
88 return 0; 68 size = sizeof(insn);
69 from = &insn;
70 to = (void *) rec->ip;
71 if (probe_kernel_read(&op, (void *) rec->ip, sizeof(op)))
72 return -EFAULT;
73 /*
74 * If we find a breakpoint instruction, a kprobe has been placed
75 * at the beginning of the function. We write the constant
76 * KPROBE_ON_FTRACE_NOP into the remaining four bytes of the original
77 * instruction so that the kprobes handler can execute a nop, if it
78 * reaches this breakpoint.
79 */
80 if (op == BREAKPOINT_INSTRUCTION) {
81 size -= 2;
82 from += 2;
83 to += 2;
84 insn.disp = KPROBE_ON_FTRACE_NOP;
89 } 85 }
90 if (probe_kernel_write((void *) rec->ip, ftrace_disable_code, 86 if (probe_kernel_write(to, from, size))
91 MCOUNT_INSN_SIZE))
92 return -EPERM; 87 return -EPERM;
93 return 0; 88 return 0;
94} 89}
95 90
96int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) 91int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
97{ 92{
98 if (probe_kernel_write((void *) rec->ip, ftrace_enable_insn, 93 struct ftrace_insn insn;
99 FTRACE_INSN_SIZE)) 94 unsigned short op;
95 void *from, *to;
96 size_t size;
97
98 ftrace_generate_call_insn(&insn, rec->ip);
99 size = sizeof(insn);
100 from = &insn;
101 to = (void *) rec->ip;
102 if (probe_kernel_read(&op, (void *) rec->ip, sizeof(op)))
103 return -EFAULT;
104 /*
105 * If we find a breakpoint instruction, a kprobe has been placed
106 * at the beginning of the function. We write the constant
107 * KPROBE_ON_FTRACE_CALL into the remaining four bytes of the original
108 * instruction so that the kprobes handler can execute a brasl if it
109 * reaches this breakpoint.
110 */
111 if (op == BREAKPOINT_INSTRUCTION) {
112 size -= 2;
113 from += 2;
114 to += 2;
115 insn.disp = KPROBE_ON_FTRACE_CALL;
116 }
117 if (probe_kernel_write(to, from, size))
100 return -EPERM; 118 return -EPERM;
101 return 0; 119 return 0;
102} 120}
@@ -111,6 +129,24 @@ int __init ftrace_dyn_arch_init(void)
111 return 0; 129 return 0;
112} 130}
113 131
132static int __init ftrace_plt_init(void)
133{
134 unsigned int *ip;
135
136 ftrace_plt = (unsigned long) module_alloc(PAGE_SIZE);
137 if (!ftrace_plt)
138 panic("cannot allocate ftrace plt\n");
139 ip = (unsigned int *) ftrace_plt;
140 ip[0] = 0x0d10e310; /* basr 1,0; lg 1,10(1); br 1 */
141 ip[1] = 0x100a0004;
142 ip[2] = 0x07f10000;
143 ip[3] = FTRACE_ADDR >> 32;
144 ip[4] = FTRACE_ADDR & 0xffffffff;
145 set_memory_ro(ftrace_plt, 1);
146 return 0;
147}
148device_initcall(ftrace_plt_init);
149
114#ifdef CONFIG_FUNCTION_GRAPH_TRACER 150#ifdef CONFIG_FUNCTION_GRAPH_TRACER
115/* 151/*
116 * Hook the return address and push it in the stack of return addresses 152 * Hook the return address and push it in the stack of return addresses
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
index 014d4729b134..d6716c29b7f8 100644
--- a/arch/s390/kernel/kprobes.c
+++ b/arch/s390/kernel/kprobes.c
@@ -29,6 +29,7 @@
29#include <linux/module.h> 29#include <linux/module.h>
30#include <linux/slab.h> 30#include <linux/slab.h>
31#include <linux/hardirq.h> 31#include <linux/hardirq.h>
32#include <linux/ftrace.h>
32#include <asm/cacheflush.h> 33#include <asm/cacheflush.h>
33#include <asm/sections.h> 34#include <asm/sections.h>
34#include <asm/dis.h> 35#include <asm/dis.h>
@@ -60,10 +61,21 @@ struct kprobe_insn_cache kprobe_dmainsn_slots = {
60 61
61static void __kprobes copy_instruction(struct kprobe *p) 62static void __kprobes copy_instruction(struct kprobe *p)
62{ 63{
64 unsigned long ip = (unsigned long) p->addr;
63 s64 disp, new_disp; 65 s64 disp, new_disp;
64 u64 addr, new_addr; 66 u64 addr, new_addr;
65 67
66 memcpy(p->ainsn.insn, p->addr, insn_length(p->opcode >> 8)); 68 if (ftrace_location(ip) == ip) {
69 /*
70 * If kprobes patches the instruction that is morphed by
71 * ftrace make sure that kprobes always sees the branch
72 * "jg .+24" that skips the mcount block
73 */
74 ftrace_generate_nop_insn((struct ftrace_insn *)p->ainsn.insn);
75 p->ainsn.is_ftrace_insn = 1;
76 } else
77 memcpy(p->ainsn.insn, p->addr, insn_length(p->opcode >> 8));
78 p->opcode = p->ainsn.insn[0];
67 if (!probe_is_insn_relative_long(p->ainsn.insn)) 79 if (!probe_is_insn_relative_long(p->ainsn.insn))
68 return; 80 return;
69 /* 81 /*
@@ -85,18 +97,6 @@ static inline int is_kernel_addr(void *addr)
85 return addr < (void *)_end; 97 return addr < (void *)_end;
86} 98}
87 99
88static inline int is_module_addr(void *addr)
89{
90#ifdef CONFIG_64BIT
91 BUILD_BUG_ON(MODULES_LEN > (1UL << 31));
92 if (addr < (void *)MODULES_VADDR)
93 return 0;
94 if (addr > (void *)MODULES_END)
95 return 0;
96#endif
97 return 1;
98}
99
100static int __kprobes s390_get_insn_slot(struct kprobe *p) 100static int __kprobes s390_get_insn_slot(struct kprobe *p)
101{ 101{
102 /* 102 /*
@@ -132,43 +132,63 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
132 return -EINVAL; 132 return -EINVAL;
133 if (s390_get_insn_slot(p)) 133 if (s390_get_insn_slot(p))
134 return -ENOMEM; 134 return -ENOMEM;
135 p->opcode = *p->addr;
136 copy_instruction(p); 135 copy_instruction(p);
137 return 0; 136 return 0;
138} 137}
139 138
140struct ins_replace_args { 139int arch_check_ftrace_location(struct kprobe *p)
141 kprobe_opcode_t *ptr; 140{
142 kprobe_opcode_t opcode; 141 return 0;
142}
143
144struct swap_insn_args {
145 struct kprobe *p;
146 unsigned int arm_kprobe : 1;
143}; 147};
144 148
145static int __kprobes swap_instruction(void *aref) 149static int __kprobes swap_instruction(void *data)
146{ 150{
147 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 151 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
148 unsigned long status = kcb->kprobe_status; 152 unsigned long status = kcb->kprobe_status;
149 struct ins_replace_args *args = aref; 153 struct swap_insn_args *args = data;
150 154 struct ftrace_insn new_insn, *insn;
155 struct kprobe *p = args->p;
156 size_t len;
157
158 new_insn.opc = args->arm_kprobe ? BREAKPOINT_INSTRUCTION : p->opcode;
159 len = sizeof(new_insn.opc);
160 if (!p->ainsn.is_ftrace_insn)
161 goto skip_ftrace;
162 len = sizeof(new_insn);
163 insn = (struct ftrace_insn *) p->addr;
164 if (args->arm_kprobe) {
165 if (is_ftrace_nop(insn))
166 new_insn.disp = KPROBE_ON_FTRACE_NOP;
167 else
168 new_insn.disp = KPROBE_ON_FTRACE_CALL;
169 } else {
170 ftrace_generate_call_insn(&new_insn, (unsigned long)p->addr);
171 if (insn->disp == KPROBE_ON_FTRACE_NOP)
172 ftrace_generate_nop_insn(&new_insn);
173 }
174skip_ftrace:
151 kcb->kprobe_status = KPROBE_SWAP_INST; 175 kcb->kprobe_status = KPROBE_SWAP_INST;
152 probe_kernel_write(args->ptr, &args->opcode, sizeof(args->opcode)); 176 probe_kernel_write(p->addr, &new_insn, len);
153 kcb->kprobe_status = status; 177 kcb->kprobe_status = status;
154 return 0; 178 return 0;
155} 179}
156 180
157void __kprobes arch_arm_kprobe(struct kprobe *p) 181void __kprobes arch_arm_kprobe(struct kprobe *p)
158{ 182{
159 struct ins_replace_args args; 183 struct swap_insn_args args = {.p = p, .arm_kprobe = 1};
160 184
161 args.ptr = p->addr;
162 args.opcode = BREAKPOINT_INSTRUCTION;
163 stop_machine(swap_instruction, &args, NULL); 185 stop_machine(swap_instruction, &args, NULL);
164} 186}
165 187
166void __kprobes arch_disarm_kprobe(struct kprobe *p) 188void __kprobes arch_disarm_kprobe(struct kprobe *p)
167{ 189{
168 struct ins_replace_args args; 190 struct swap_insn_args args = {.p = p, .arm_kprobe = 0};
169 191
170 args.ptr = p->addr;
171 args.opcode = p->opcode;
172 stop_machine(swap_instruction, &args, NULL); 192 stop_machine(swap_instruction, &args, NULL);
173} 193}
174 194
@@ -459,6 +479,24 @@ static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs)
459 unsigned long ip = regs->psw.addr & PSW_ADDR_INSN; 479 unsigned long ip = regs->psw.addr & PSW_ADDR_INSN;
460 int fixup = probe_get_fixup_type(p->ainsn.insn); 480 int fixup = probe_get_fixup_type(p->ainsn.insn);
461 481
482 /* Check if the kprobes location is an enabled ftrace caller */
483 if (p->ainsn.is_ftrace_insn) {
484 struct ftrace_insn *insn = (struct ftrace_insn *) p->addr;
485 struct ftrace_insn call_insn;
486
487 ftrace_generate_call_insn(&call_insn, (unsigned long) p->addr);
488 /*
489 * A kprobe on an enabled ftrace call site actually single
490 * stepped an unconditional branch (ftrace nop equivalent).
491 * Now we need to fixup things and pretend that a brasl r0,...
492 * was executed instead.
493 */
494 if (insn->disp == KPROBE_ON_FTRACE_CALL) {
495 ip += call_insn.disp * 2 - MCOUNT_INSN_SIZE;
496 regs->gprs[0] = (unsigned long)p->addr + sizeof(*insn);
497 }
498 }
499
462 if (fixup & FIXUP_PSW_NORMAL) 500 if (fixup & FIXUP_PSW_NORMAL)
463 ip += (unsigned long) p->addr - (unsigned long) p->ainsn.insn; 501 ip += (unsigned long) p->addr - (unsigned long) p->ainsn.insn;
464 502
diff --git a/arch/s390/kernel/mcount.S b/arch/s390/kernel/mcount.S
index 4300ea374826..b6dfc5bfcb89 100644
--- a/arch/s390/kernel/mcount.S
+++ b/arch/s390/kernel/mcount.S
@@ -27,6 +27,7 @@ ENTRY(ftrace_caller)
27 .globl ftrace_regs_caller 27 .globl ftrace_regs_caller
28 .set ftrace_regs_caller,ftrace_caller 28 .set ftrace_regs_caller,ftrace_caller
29 lgr %r1,%r15 29 lgr %r1,%r15
30 aghi %r0,MCOUNT_RETURN_FIXUP
30 aghi %r15,-STACK_FRAME_SIZE 31 aghi %r15,-STACK_FRAME_SIZE
31 stg %r1,__SF_BACKCHAIN(%r15) 32 stg %r1,__SF_BACKCHAIN(%r15)
32 stg %r1,(STACK_PTREGS_GPRS+15*8)(%r15) 33 stg %r1,(STACK_PTREGS_GPRS+15*8)(%r15)
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index e80d9ff9a56d..4e532c67832f 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -41,7 +41,6 @@
41#include <linux/ctype.h> 41#include <linux/ctype.h>
42#include <linux/reboot.h> 42#include <linux/reboot.h>
43#include <linux/topology.h> 43#include <linux/topology.h>
44#include <linux/ftrace.h>
45#include <linux/kexec.h> 44#include <linux/kexec.h>
46#include <linux/crash_dump.h> 45#include <linux/crash_dump.h>
47#include <linux/memory.h> 46#include <linux/memory.h>
@@ -356,7 +355,6 @@ static void __init setup_lowcore(void)
356 lc->steal_timer = S390_lowcore.steal_timer; 355 lc->steal_timer = S390_lowcore.steal_timer;
357 lc->last_update_timer = S390_lowcore.last_update_timer; 356 lc->last_update_timer = S390_lowcore.last_update_timer;
358 lc->last_update_clock = S390_lowcore.last_update_clock; 357 lc->last_update_clock = S390_lowcore.last_update_clock;
359 lc->ftrace_func = S390_lowcore.ftrace_func;
360 358
361 restart_stack = __alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0); 359 restart_stack = __alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0);
362 restart_stack += ASYNC_SIZE; 360 restart_stack += ASYNC_SIZE;
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 6fd9e60101f1..0b499f5cbe19 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -236,7 +236,6 @@ static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu)
236 lc->percpu_offset = __per_cpu_offset[cpu]; 236 lc->percpu_offset = __per_cpu_offset[cpu];
237 lc->kernel_asce = S390_lowcore.kernel_asce; 237 lc->kernel_asce = S390_lowcore.kernel_asce;
238 lc->machine_flags = S390_lowcore.machine_flags; 238 lc->machine_flags = S390_lowcore.machine_flags;
239 lc->ftrace_func = S390_lowcore.ftrace_func;
240 lc->user_timer = lc->system_timer = lc->steal_timer = 0; 239 lc->user_timer = lc->system_timer = lc->steal_timer = 0;
241 __ctl_store(lc->cregs_save_area, 0, 15); 240 __ctl_store(lc->cregs_save_area, 0, 15);
242 save_access_regs((unsigned int *) lc->access_regs_save_area); 241 save_access_regs((unsigned int *) lc->access_regs_save_area);