aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/kernel/ftrace.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-02-11 20:42:32 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2015-02-11 20:42:32 -0500
commitb3d6524ff7956c5a898d51a18eaecb62a60a2b84 (patch)
treecc049e7ec9edd9f5a76f286e04d8db9a1caa516a /arch/s390/kernel/ftrace.c
parent07f80d41cf24b7e6e76cd97d420167932c9a7f82 (diff)
parent6a039eab53c01a58bfff95c78fc800ca7de27c77 (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux
Pull s390 updates from Martin Schwidefsky: - The remaining patches for the z13 machine support: kernel build option for z13, the cache synonym avoidance, SMT support, compare-and-delay for spinloops and the CES5S crypto adapater. - The ftrace support for function tracing with the gcc hotpatch option. This touches common code Makefiles, Steven is ok with the changes. - The hypfs file system gets an extension to access diagnose 0x0c data in user space for performance analysis for Linux running under z/VM. - The iucv hvc console gets wildcard spport for the user id filtering. - The cacheinfo code is converted to use the generic infrastructure. - Cleanup and bug fixes. * 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (42 commits) s390/process: free vx save area when releasing tasks s390/hypfs: Eliminate hypfs interval s390/hypfs: Add diagnose 0c support s390/cacheinfo: don't use smp_processor_id() in preemptible context s390/zcrypt: fixed domain scanning problem (again) s390/smp: increase maximum value of NR_CPUS to 512 s390/jump label: use different nop instruction s390/jump label: add sanity checks s390/mm: correct missing space when reporting user process faults s390/dasd: cleanup profiling s390/dasd: add locking for global_profile access s390/ftrace: hotpatch support for function tracing ftrace: let notrace function attribute disable hotpatching if necessary ftrace: allow architectures to specify ftrace compile options s390: reintroduce diag 44 calls for cpu_relax() s390/zcrypt: Add support for new crypto express (CEX5S) adapter. s390/zcrypt: Number of supported ap domains is not retrievable. s390/spinlock: add compare-and-delay to lock wait loops s390/tape: remove redundant if statement s390/hvc_iucv: add simple wildcard matches to the iucv allow filter ...
Diffstat (limited to 'arch/s390/kernel/ftrace.c')
-rw-r--r--arch/s390/kernel/ftrace.c108
1 files changed, 62 insertions, 46 deletions
diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c
index b86bb8823f15..82c19899574f 100644
--- a/arch/s390/kernel/ftrace.c
+++ b/arch/s390/kernel/ftrace.c
@@ -46,6 +46,13 @@
46 * lg %r14,8(%r15) # offset 18 46 * lg %r14,8(%r15) # offset 18
47 * The jg instruction branches to offset 24 to skip as many instructions 47 * The jg instruction branches to offset 24 to skip as many instructions
48 * as possible. 48 * as possible.
49 * In case we use gcc's hotpatch feature the original and also the disabled
50 * function prologue contains only a single six byte instruction and looks
51 * like this:
52 * > brcl 0,0 # offset 0
53 * To enable ftrace the code gets patched like above and afterwards looks
54 * like this:
55 * > brasl %r0,ftrace_caller # offset 0
49 */ 56 */
50 57
51unsigned long ftrace_plt; 58unsigned long ftrace_plt;
@@ -59,62 +66,71 @@ int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
59int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, 66int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
60 unsigned long addr) 67 unsigned long addr)
61{ 68{
62 struct ftrace_insn insn; 69 struct ftrace_insn orig, new, old;
63 unsigned short op; 70
64 void *from, *to; 71 if (probe_kernel_read(&old, (void *) rec->ip, sizeof(old)))
65 size_t size;
66
67 ftrace_generate_nop_insn(&insn);
68 size = sizeof(insn);
69 from = &insn;
70 to = (void *) rec->ip;
71 if (probe_kernel_read(&op, (void *) rec->ip, sizeof(op)))
72 return -EFAULT; 72 return -EFAULT;
73 /* 73 if (addr == MCOUNT_ADDR) {
74 * If we find a breakpoint instruction, a kprobe has been placed 74 /* Initial code replacement */
75 * at the beginning of the function. We write the constant 75#ifdef CC_USING_HOTPATCH
76 * KPROBE_ON_FTRACE_NOP into the remaining four bytes of the original 76 /* We expect to see brcl 0,0 */
77 * instruction so that the kprobes handler can execute a nop, if it 77 ftrace_generate_nop_insn(&orig);
78 * reaches this breakpoint. 78#else
79 */ 79 /* We expect to see stg r14,8(r15) */
80 if (op == BREAKPOINT_INSTRUCTION) { 80 orig.opc = 0xe3e0;
81 size -= 2; 81 orig.disp = 0xf0080024;
82 from += 2; 82#endif
83 to += 2; 83 ftrace_generate_nop_insn(&new);
84 insn.disp = KPROBE_ON_FTRACE_NOP; 84 } else if (old.opc == BREAKPOINT_INSTRUCTION) {
85 /*
86 * If we find a breakpoint instruction, a kprobe has been
87 * placed at the beginning of the function. We write the
88 * constant KPROBE_ON_FTRACE_NOP into the remaining four
89 * bytes of the original instruction so that the kprobes
90 * handler can execute a nop, if it reaches this breakpoint.
91 */
92 new.opc = orig.opc = BREAKPOINT_INSTRUCTION;
93 orig.disp = KPROBE_ON_FTRACE_CALL;
94 new.disp = KPROBE_ON_FTRACE_NOP;
95 } else {
96 /* Replace ftrace call with a nop. */
97 ftrace_generate_call_insn(&orig, rec->ip);
98 ftrace_generate_nop_insn(&new);
85 } 99 }
86 if (probe_kernel_write(to, from, size)) 100 /* Verify that the to be replaced code matches what we expect. */
101 if (memcmp(&orig, &old, sizeof(old)))
102 return -EINVAL;
103 if (probe_kernel_write((void *) rec->ip, &new, sizeof(new)))
87 return -EPERM; 104 return -EPERM;
88 return 0; 105 return 0;
89} 106}
90 107
91int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) 108int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
92{ 109{
93 struct ftrace_insn insn; 110 struct ftrace_insn orig, new, old;
94 unsigned short op; 111
95 void *from, *to; 112 if (probe_kernel_read(&old, (void *) rec->ip, sizeof(old)))
96 size_t size;
97
98 ftrace_generate_call_insn(&insn, rec->ip);
99 size = sizeof(insn);
100 from = &insn;
101 to = (void *) rec->ip;
102 if (probe_kernel_read(&op, (void *) rec->ip, sizeof(op)))
103 return -EFAULT; 113 return -EFAULT;
104 /* 114 if (old.opc == BREAKPOINT_INSTRUCTION) {
105 * If we find a breakpoint instruction, a kprobe has been placed 115 /*
106 * at the beginning of the function. We write the constant 116 * If we find a breakpoint instruction, a kprobe has been
107 * KPROBE_ON_FTRACE_CALL into the remaining four bytes of the original 117 * placed at the beginning of the function. We write the
108 * instruction so that the kprobes handler can execute a brasl if it 118 * constant KPROBE_ON_FTRACE_CALL into the remaining four
109 * reaches this breakpoint. 119 * bytes of the original instruction so that the kprobes
110 */ 120 * handler can execute a brasl if it reaches this breakpoint.
111 if (op == BREAKPOINT_INSTRUCTION) { 121 */
112 size -= 2; 122 new.opc = orig.opc = BREAKPOINT_INSTRUCTION;
113 from += 2; 123 orig.disp = KPROBE_ON_FTRACE_NOP;
114 to += 2; 124 new.disp = KPROBE_ON_FTRACE_CALL;
115 insn.disp = KPROBE_ON_FTRACE_CALL; 125 } else {
126 /* Replace nop with an ftrace call. */
127 ftrace_generate_nop_insn(&orig);
128 ftrace_generate_call_insn(&new, rec->ip);
116 } 129 }
117 if (probe_kernel_write(to, from, size)) 130 /* Verify that the to be replaced code matches what we expect. */
131 if (memcmp(&orig, &old, sizeof(old)))
132 return -EINVAL;
133 if (probe_kernel_write((void *) rec->ip, &new, sizeof(new)))
118 return -EPERM; 134 return -EPERM;
119 return 0; 135 return 0;
120} 136}