aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorMichael Ellerman <mpe@ellerman.id.au>2016-07-19 00:48:30 -0400
committerMichael Ellerman <mpe@ellerman.id.au>2016-07-21 06:10:37 -0400
commit9d636109511a000882f8dff4eaafa874eec5ece8 (patch)
tree321f23e7a84c48b008f5996238d67e6db1dc07fd /arch/powerpc
parentb1923caa6e641f3d0a93b5d045aef67ded5aef67 (diff)
powerpc/ftrace: Separate the heuristics for checking call sites
In __ftrace_make_nop() (the 64-bit version), we have code to deal with two ftrace ABIs. There is the original ABI, which looks mostly like a function call, and then the mprofile-kernel ABI which is just a branch. The code tries to handle both cases, by looking for the presence of a load to restore the TOC pointer (PPC_INST_LD_TOC). If we detect the TOC load, we assume the call site is for an mcount() call using the old ABI. That means we patch the mcount() call with a b +8, to branch over the TOC load. However if the kernel was built with mprofile-kernel, then there will never be a call site using the original ftrace ABI. If for some reason we do see a TOC load, then it's there for a good reason, and we should not jump over it. So split the code, using the existing CC_USING_MPROFILE_KERNEL. Kernels built with mprofile-kernel will only look for, and expect, the new ABI, and similarly for the original ABI. Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/kernel/ftrace.c35
1 files changed, 18 insertions, 17 deletions
diff --git a/arch/powerpc/kernel/ftrace.c b/arch/powerpc/kernel/ftrace.c
index 7af6c4de044b..cc52d9795f88 100644
--- a/arch/powerpc/kernel/ftrace.c
+++ b/arch/powerpc/kernel/ftrace.c
@@ -144,6 +144,21 @@ __ftrace_make_nop(struct module *mod,
144 return -EINVAL; 144 return -EINVAL;
145 } 145 }
146 146
147#ifdef CC_USING_MPROFILE_KERNEL
148 /* When using -mkernel_profile there is no load to jump over */
149 pop = PPC_INST_NOP;
150
151 if (probe_kernel_read(&op, (void *)(ip - 4), 4)) {
152 pr_err("Fetching instruction at %lx failed.\n", ip - 4);
153 return -EFAULT;
154 }
155
156 /* We expect either a mflr r0, or a std r0, LRSAVE(r1) */
157 if (op != PPC_INST_MFLR && op != PPC_INST_STD_LR) {
158 pr_err("Unexpected instruction %08x around bl _mcount\n", op);
159 return -EINVAL;
160 }
161#else
147 /* 162 /*
148 * Our original call site looks like: 163 * Our original call site looks like:
149 * 164 *
@@ -170,24 +185,10 @@ __ftrace_make_nop(struct module *mod,
170 } 185 }
171 186
172 if (op != PPC_INST_LD_TOC) { 187 if (op != PPC_INST_LD_TOC) {
173 unsigned int inst; 188 pr_err("Expected %08x found %08x\n", PPC_INST_LD_TOC, op);
174 189 return -EINVAL;
175 if (probe_kernel_read(&inst, (void *)(ip - 4), 4)) {
176 pr_err("Fetching instruction at %lx failed.\n", ip - 4);
177 return -EFAULT;
178 }
179
180 /* We expect either a mlfr r0, or a std r0, LRSAVE(r1) */
181 if (inst != PPC_INST_MFLR && inst != PPC_INST_STD_LR) {
182 pr_err("Unexpected instructions around bl _mcount\n"
183 "when enabling dynamic ftrace!\t"
184 "(%08x,bl,%08x)\n", inst, op);
185 return -EINVAL;
186 }
187
188 /* When using -mkernel_profile there is no load to jump over */
189 pop = PPC_INST_NOP;
190 } 190 }
191#endif /* CC_USING_MPROFILE_KERNEL */
191 192
192 if (patch_instruction((unsigned int *)ip, pop)) { 193 if (patch_instruction((unsigned int *)ip, pop)) {
193 pr_err("Patching NOP failed.\n"); 194 pr_err("Patching NOP failed.\n");