aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/markers.txt15
-rw-r--r--Documentation/tracepoints.txt4
-rw-r--r--arch/powerpc/kernel/Makefile1
-rw-r--r--arch/powerpc/kernel/entry_32.S40
-rw-r--r--arch/powerpc/kernel/entry_64.S12
-rw-r--r--arch/powerpc/kernel/ftrace.c182
-rw-r--r--arch/powerpc/lib/Makefile3
-rw-r--r--fs/seq_file.c2
-rw-r--r--include/linux/marker.h6
-rw-r--r--kernel/trace/trace.c2
-rw-r--r--kernel/trace/trace_branch.c1
-rw-r--r--kernel/trace/trace_functions_graph.c267
12 files changed, 364 insertions, 171 deletions
diff --git a/Documentation/markers.txt b/Documentation/markers.txt
index 6d275e4ef385..d2b3d0e91b26 100644
--- a/Documentation/markers.txt
+++ b/Documentation/markers.txt
@@ -51,11 +51,16 @@ to call) for the specific marker through marker_probe_register() and can be
51activated by calling marker_arm(). Marker deactivation can be done by calling 51activated by calling marker_arm(). Marker deactivation can be done by calling
52marker_disarm() as many times as marker_arm() has been called. Removing a probe 52marker_disarm() as many times as marker_arm() has been called. Removing a probe
53is done through marker_probe_unregister(); it will disarm the probe. 53is done through marker_probe_unregister(); it will disarm the probe.
54marker_synchronize_unregister() must be called before the end of the module exit 54
55function to make sure there is no caller left using the probe. This, and the 55marker_synchronize_unregister() must be called between probe unregistration and
56fact that preemption is disabled around the probe call, make sure that probe 56the first occurrence of
57removal and module unload are safe. See the "Probe example" section below for a 57- the end of module exit function,
58sample probe module. 58 to make sure there is no caller left using the probe;
59- the free of any resource used by the probes,
60 to make sure the probes wont be accessing invalid data.
61This, and the fact that preemption is disabled around the probe call, make sure
62that probe removal and module unload are safe. See the "Probe example" section
63below for a sample probe module.
59 64
60The marker mechanism supports inserting multiple instances of the same marker. 65The marker mechanism supports inserting multiple instances of the same marker.
61Markers can be put in inline functions, inlined static functions, and 66Markers can be put in inline functions, inlined static functions, and
diff --git a/Documentation/tracepoints.txt b/Documentation/tracepoints.txt
index 2d42241a25c3..6f0a044f5b5e 100644
--- a/Documentation/tracepoints.txt
+++ b/Documentation/tracepoints.txt
@@ -45,7 +45,7 @@ In include/trace/subsys.h :
45#include <linux/tracepoint.h> 45#include <linux/tracepoint.h>
46 46
47DECLARE_TRACE(subsys_eventname, 47DECLARE_TRACE(subsys_eventname,
48 TPPTOTO(int firstarg, struct task_struct *p), 48 TPPROTO(int firstarg, struct task_struct *p),
49 TPARGS(firstarg, p)); 49 TPARGS(firstarg, p));
50 50
51In subsys/file.c (where the tracing statement must be added) : 51In subsys/file.c (where the tracing statement must be added) :
@@ -66,7 +66,7 @@ Where :
66 - subsys is the name of your subsystem. 66 - subsys is the name of your subsystem.
67 - eventname is the name of the event to trace. 67 - eventname is the name of the event to trace.
68 68
69- TPPTOTO(int firstarg, struct task_struct *p) is the prototype of the 69- TPPROTO(int firstarg, struct task_struct *p) is the prototype of the
70 function called by this tracepoint. 70 function called by this tracepoint.
71 71
72- TPARGS(firstarg, p) are the parameters names, same as found in the 72- TPARGS(firstarg, p) are the parameters names, same as found in the
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index 92673b43858d..d17edb4a2f9d 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -17,6 +17,7 @@ ifdef CONFIG_FUNCTION_TRACER
17CFLAGS_REMOVE_cputable.o = -pg -mno-sched-epilog 17CFLAGS_REMOVE_cputable.o = -pg -mno-sched-epilog
18CFLAGS_REMOVE_prom_init.o = -pg -mno-sched-epilog 18CFLAGS_REMOVE_prom_init.o = -pg -mno-sched-epilog
19CFLAGS_REMOVE_btext.o = -pg -mno-sched-epilog 19CFLAGS_REMOVE_btext.o = -pg -mno-sched-epilog
20CFLAGS_REMOVE_prom.o = -pg -mno-sched-epilog
20 21
21ifdef CONFIG_DYNAMIC_FTRACE 22ifdef CONFIG_DYNAMIC_FTRACE
22# dynamic ftrace setup. 23# dynamic ftrace setup.
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index 7ecc0d1855c3..6f7eb7e00c79 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -1162,39 +1162,17 @@ machine_check_in_rtas:
1162#ifdef CONFIG_DYNAMIC_FTRACE 1162#ifdef CONFIG_DYNAMIC_FTRACE
1163_GLOBAL(mcount) 1163_GLOBAL(mcount)
1164_GLOBAL(_mcount) 1164_GLOBAL(_mcount)
1165 stwu r1,-48(r1) 1165 /*
1166 stw r3, 12(r1) 1166 * It is required that _mcount on PPC32 must preserve the
1167 stw r4, 16(r1) 1167 * link register. But we have r0 to play with. We use r0
1168 stw r5, 20(r1) 1168 * to push the return address back to the caller of mcount
1169 stw r6, 24(r1) 1169 * into the ctr register, restore the link register and
1170 mflr r3 1170 * then jump back using the ctr register.
1171 stw r7, 28(r1) 1171 */
1172 mfcr r5 1172 mflr r0
1173 stw r8, 32(r1)
1174 stw r9, 36(r1)
1175 stw r10,40(r1)
1176 stw r3, 44(r1)
1177 stw r5, 8(r1)
1178 subi r3, r3, MCOUNT_INSN_SIZE
1179 .globl mcount_call
1180mcount_call:
1181 bl ftrace_stub
1182 nop
1183 lwz r6, 8(r1)
1184 lwz r0, 44(r1)
1185 lwz r3, 12(r1)
1186 mtctr r0 1173 mtctr r0
1187 lwz r4, 16(r1) 1174 lwz r0, 4(r1)
1188 mtcr r6
1189 lwz r5, 20(r1)
1190 lwz r6, 24(r1)
1191 lwz r0, 52(r1)
1192 lwz r7, 28(r1)
1193 lwz r8, 32(r1)
1194 mtlr r0 1175 mtlr r0
1195 lwz r9, 36(r1)
1196 lwz r10,40(r1)
1197 addi r1, r1, 48
1198 bctr 1176 bctr
1199 1177
1200_GLOBAL(ftrace_caller) 1178_GLOBAL(ftrace_caller)
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index e6d52845854f..b00982e0d1ee 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -888,18 +888,6 @@ _GLOBAL(enter_prom)
888#ifdef CONFIG_DYNAMIC_FTRACE 888#ifdef CONFIG_DYNAMIC_FTRACE
889_GLOBAL(mcount) 889_GLOBAL(mcount)
890_GLOBAL(_mcount) 890_GLOBAL(_mcount)
891 /* Taken from output of objdump from lib64/glibc */
892 mflr r3
893 stdu r1, -112(r1)
894 std r3, 128(r1)
895 subi r3, r3, MCOUNT_INSN_SIZE
896 .globl mcount_call
897mcount_call:
898 bl ftrace_stub
899 nop
900 ld r0, 128(r1)
901 mtlr r0
902 addi r1, r1, 112
903 blr 891 blr
904 892
905_GLOBAL(ftrace_caller) 893_GLOBAL(ftrace_caller)
diff --git a/arch/powerpc/kernel/ftrace.c b/arch/powerpc/kernel/ftrace.c
index 3271cd698e4c..5355244c99ff 100644
--- a/arch/powerpc/kernel/ftrace.c
+++ b/arch/powerpc/kernel/ftrace.c
@@ -114,19 +114,9 @@ ftrace_modify_code(unsigned long ip, unsigned char *old_code,
114 */ 114 */
115static int test_24bit_addr(unsigned long ip, unsigned long addr) 115static int test_24bit_addr(unsigned long ip, unsigned long addr)
116{ 116{
117 long diff;
118 117
119 /* 118 /* use the create_branch to verify that this offset can be branched */
120 * Can we get to addr from ip in 24 bits? 119 return create_branch((unsigned int *)ip, addr, 0);
121 * (26 really, since we mulitply by 4 for 4 byte alignment)
122 */
123 diff = addr - ip;
124
125 /*
126 * Return true if diff is less than 1 << 25
127 * and greater than -1 << 26.
128 */
129 return (diff < (1 << 25)) && (diff > (-1 << 26));
130} 120}
131 121
132static int is_bl_op(unsigned int op) 122static int is_bl_op(unsigned int op)
@@ -134,11 +124,6 @@ static int is_bl_op(unsigned int op)
134 return (op & 0xfc000003) == 0x48000001; 124 return (op & 0xfc000003) == 0x48000001;
135} 125}
136 126
137static int test_offset(unsigned long offset)
138{
139 return (offset + 0x2000000 > 0x3ffffff) || ((offset & 3) != 0);
140}
141
142static unsigned long find_bl_target(unsigned long ip, unsigned int op) 127static unsigned long find_bl_target(unsigned long ip, unsigned int op)
143{ 128{
144 static int offset; 129 static int offset;
@@ -151,37 +136,30 @@ static unsigned long find_bl_target(unsigned long ip, unsigned int op)
151 return ip + (long)offset; 136 return ip + (long)offset;
152} 137}
153 138
154static unsigned int branch_offset(unsigned long offset)
155{
156 /* return "bl ip+offset" */
157 return 0x48000001 | (offset & 0x03fffffc);
158}
159
160#ifdef CONFIG_PPC64 139#ifdef CONFIG_PPC64
161static int 140static int
162__ftrace_make_nop(struct module *mod, 141__ftrace_make_nop(struct module *mod,
163 struct dyn_ftrace *rec, unsigned long addr) 142 struct dyn_ftrace *rec, unsigned long addr)
164{ 143{
165 unsigned char replaced[MCOUNT_INSN_SIZE * 2]; 144 unsigned int op;
166 unsigned int *op = (unsigned *)&replaced; 145 unsigned int jmp[5];
167 unsigned char jmp[8]; 146 unsigned long ptr;
168 unsigned long *ptr = (unsigned long *)&jmp;
169 unsigned long ip = rec->ip; 147 unsigned long ip = rec->ip;
170 unsigned long tramp; 148 unsigned long tramp;
171 int offset; 149 int offset;
172 150
173 /* read where this goes */ 151 /* read where this goes */
174 if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE)) 152 if (probe_kernel_read(&op, (void *)ip, sizeof(int)))
175 return -EFAULT; 153 return -EFAULT;
176 154
177 /* Make sure that that this is still a 24bit jump */ 155 /* Make sure that that this is still a 24bit jump */
178 if (!is_bl_op(*op)) { 156 if (!is_bl_op(op)) {
179 printk(KERN_ERR "Not expected bl: opcode is %x\n", *op); 157 printk(KERN_ERR "Not expected bl: opcode is %x\n", op);
180 return -EINVAL; 158 return -EINVAL;
181 } 159 }
182 160
183 /* lets find where the pointer goes */ 161 /* lets find where the pointer goes */
184 tramp = find_bl_target(ip, *op); 162 tramp = find_bl_target(ip, op);
185 163
186 /* 164 /*
187 * On PPC64 the trampoline looks like: 165 * On PPC64 the trampoline looks like:
@@ -200,19 +178,25 @@ __ftrace_make_nop(struct module *mod,
200 DEBUGP("ip:%lx jumps to %lx r2: %lx", ip, tramp, mod->arch.toc); 178 DEBUGP("ip:%lx jumps to %lx r2: %lx", ip, tramp, mod->arch.toc);
201 179
202 /* Find where the trampoline jumps to */ 180 /* Find where the trampoline jumps to */
203 if (probe_kernel_read(jmp, (void *)tramp, 8)) { 181 if (probe_kernel_read(jmp, (void *)tramp, sizeof(jmp))) {
204 printk(KERN_ERR "Failed to read %lx\n", tramp); 182 printk(KERN_ERR "Failed to read %lx\n", tramp);
205 return -EFAULT; 183 return -EFAULT;
206 } 184 }
207 185
208 DEBUGP(" %08x %08x", 186 DEBUGP(" %08x %08x", jmp[0], jmp[1]);
209 (unsigned)(*ptr >> 32), 187
210 (unsigned)*ptr); 188 /* verify that this is what we expect it to be */
189 if (((jmp[0] & 0xffff0000) != 0x3d820000) ||
190 ((jmp[1] & 0xffff0000) != 0x398c0000) ||
191 (jmp[2] != 0xf8410028) ||
192 (jmp[3] != 0xe96c0020) ||
193 (jmp[4] != 0xe84c0028)) {
194 printk(KERN_ERR "Not a trampoline\n");
195 return -EINVAL;
196 }
211 197
212 offset = (unsigned)jmp[2] << 24 | 198 offset = (unsigned)((unsigned short)jmp[0]) << 16 |
213 (unsigned)jmp[3] << 16 | 199 (unsigned)((unsigned short)jmp[1]);
214 (unsigned)jmp[6] << 8 |
215 (unsigned)jmp[7];
216 200
217 DEBUGP(" %x ", offset); 201 DEBUGP(" %x ", offset);
218 202
@@ -225,13 +209,13 @@ __ftrace_make_nop(struct module *mod,
225 return -EFAULT; 209 return -EFAULT;
226 } 210 }
227 211
228 DEBUGP(" %08x %08x\n", 212 DEBUGP(" %08x %08x\n", jmp[0], jmp[1]);
229 (unsigned)(*ptr >> 32), 213
230 (unsigned)*ptr); 214 ptr = ((unsigned long)jmp[0] << 32) + jmp[1];
231 215
232 /* This should match what was called */ 216 /* This should match what was called */
233 if (*ptr != GET_ADDR(addr)) { 217 if (ptr != GET_ADDR(addr)) {
234 printk(KERN_ERR "addr does not match %lx\n", *ptr); 218 printk(KERN_ERR "addr does not match %lx\n", ptr);
235 return -EINVAL; 219 return -EINVAL;
236 } 220 }
237 221
@@ -240,11 +224,11 @@ __ftrace_make_nop(struct module *mod,
240 * 0xe8, 0x41, 0x00, 0x28 ld r2,40(r1) 224 * 0xe8, 0x41, 0x00, 0x28 ld r2,40(r1)
241 * This needs to be turned to a nop too. 225 * This needs to be turned to a nop too.
242 */ 226 */
243 if (probe_kernel_read(replaced, (void *)(ip+4), MCOUNT_INSN_SIZE)) 227 if (probe_kernel_read(&op, (void *)(ip+4), MCOUNT_INSN_SIZE))
244 return -EFAULT; 228 return -EFAULT;
245 229
246 if (*op != 0xe8410028) { 230 if (op != 0xe8410028) {
247 printk(KERN_ERR "Next line is not ld! (%08x)\n", *op); 231 printk(KERN_ERR "Next line is not ld! (%08x)\n", op);
248 return -EINVAL; 232 return -EINVAL;
249 } 233 }
250 234
@@ -261,11 +245,14 @@ __ftrace_make_nop(struct module *mod,
261 * ld r2,40(r1) 245 * ld r2,40(r1)
262 * 1: 246 * 1:
263 */ 247 */
264 op[0] = 0x48000008; /* b +8 */ 248 op = 0x48000008; /* b +8 */
265 249
266 if (probe_kernel_write((void *)ip, replaced, MCOUNT_INSN_SIZE)) 250 if (probe_kernel_write((void *)ip, &op, MCOUNT_INSN_SIZE))
267 return -EPERM; 251 return -EPERM;
268 252
253
254 flush_icache_range(ip, ip + 8);
255
269 return 0; 256 return 0;
270} 257}
271 258
@@ -274,46 +261,52 @@ static int
274__ftrace_make_nop(struct module *mod, 261__ftrace_make_nop(struct module *mod,
275 struct dyn_ftrace *rec, unsigned long addr) 262 struct dyn_ftrace *rec, unsigned long addr)
276{ 263{
277 unsigned char replaced[MCOUNT_INSN_SIZE]; 264 unsigned int op;
278 unsigned int *op = (unsigned *)&replaced; 265 unsigned int jmp[4];
279 unsigned char jmp[8];
280 unsigned int *ptr = (unsigned int *)&jmp;
281 unsigned long ip = rec->ip; 266 unsigned long ip = rec->ip;
282 unsigned long tramp; 267 unsigned long tramp;
283 int offset;
284 268
285 if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE)) 269 if (probe_kernel_read(&op, (void *)ip, MCOUNT_INSN_SIZE))
286 return -EFAULT; 270 return -EFAULT;
287 271
288 /* Make sure that that this is still a 24bit jump */ 272 /* Make sure that that this is still a 24bit jump */
289 if (!is_bl_op(*op)) { 273 if (!is_bl_op(op)) {
290 printk(KERN_ERR "Not expected bl: opcode is %x\n", *op); 274 printk(KERN_ERR "Not expected bl: opcode is %x\n", op);
291 return -EINVAL; 275 return -EINVAL;
292 } 276 }
293 277
294 /* lets find where the pointer goes */ 278 /* lets find where the pointer goes */
295 tramp = find_bl_target(ip, *op); 279 tramp = find_bl_target(ip, op);
296 280
297 /* 281 /*
298 * On PPC32 the trampoline looks like: 282 * On PPC32 the trampoline looks like:
299 * lis r11,sym@ha 283 * 0x3d, 0x60, 0x00, 0x00 lis r11,sym@ha
300 * addi r11,r11,sym@l 284 * 0x39, 0x6b, 0x00, 0x00 addi r11,r11,sym@l
301 * mtctr r11 285 * 0x7d, 0x69, 0x03, 0xa6 mtctr r11
302 * bctr 286 * 0x4e, 0x80, 0x04, 0x20 bctr
303 */ 287 */
304 288
305 DEBUGP("ip:%lx jumps to %lx", ip, tramp); 289 DEBUGP("ip:%lx jumps to %lx", ip, tramp);
306 290
307 /* Find where the trampoline jumps to */ 291 /* Find where the trampoline jumps to */
308 if (probe_kernel_read(jmp, (void *)tramp, 8)) { 292 if (probe_kernel_read(jmp, (void *)tramp, sizeof(jmp))) {
309 printk(KERN_ERR "Failed to read %lx\n", tramp); 293 printk(KERN_ERR "Failed to read %lx\n", tramp);
310 return -EFAULT; 294 return -EFAULT;
311 } 295 }
312 296
313 DEBUGP(" %08x %08x ", ptr[0], ptr[1]); 297 DEBUGP(" %08x %08x ", jmp[0], jmp[1]);
298
299 /* verify that this is what we expect it to be */
300 if (((jmp[0] & 0xffff0000) != 0x3d600000) ||
301 ((jmp[1] & 0xffff0000) != 0x396b0000) ||
302 (jmp[2] != 0x7d6903a6) ||
303 (jmp[3] != 0x4e800420)) {
304 printk(KERN_ERR "Not a trampoline\n");
305 return -EINVAL;
306 }
314 307
315 tramp = (ptr[1] & 0xffff) | 308 tramp = (jmp[1] & 0xffff) |
316 ((ptr[0] & 0xffff) << 16); 309 ((jmp[0] & 0xffff) << 16);
317 if (tramp & 0x8000) 310 if (tramp & 0x8000)
318 tramp -= 0x10000; 311 tramp -= 0x10000;
319 312
@@ -326,11 +319,13 @@ __ftrace_make_nop(struct module *mod,
326 return -EINVAL; 319 return -EINVAL;
327 } 320 }
328 321
329 op[0] = PPC_NOP_INSTR; 322 op = PPC_NOP_INSTR;
330 323
331 if (probe_kernel_write((void *)ip, replaced, MCOUNT_INSN_SIZE)) 324 if (probe_kernel_write((void *)ip, &op, MCOUNT_INSN_SIZE))
332 return -EPERM; 325 return -EPERM;
333 326
327 flush_icache_range(ip, ip + 8);
328
334 return 0; 329 return 0;
335} 330}
336#endif /* PPC64 */ 331#endif /* PPC64 */
@@ -384,13 +379,11 @@ int ftrace_make_nop(struct module *mod,
384static int 379static int
385__ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) 380__ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
386{ 381{
387 unsigned char replaced[MCOUNT_INSN_SIZE * 2]; 382 unsigned int op[2];
388 unsigned int *op = (unsigned *)&replaced;
389 unsigned long ip = rec->ip; 383 unsigned long ip = rec->ip;
390 unsigned long offset;
391 384
392 /* read where this goes */ 385 /* read where this goes */
393 if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE * 2)) 386 if (probe_kernel_read(op, (void *)ip, MCOUNT_INSN_SIZE * 2))
394 return -EFAULT; 387 return -EFAULT;
395 388
396 /* 389 /*
@@ -409,43 +402,40 @@ __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
409 return -EINVAL; 402 return -EINVAL;
410 } 403 }
411 404
412 /* now calculate a jump to the ftrace caller trampoline */ 405 /* create the branch to the trampoline */
413 offset = rec->arch.mod->arch.tramp - ip; 406 op[0] = create_branch((unsigned int *)ip,
414 407 rec->arch.mod->arch.tramp, BRANCH_SET_LINK);
415 if (test_offset(offset)) { 408 if (!op[0]) {
416 printk(KERN_ERR "REL24 %li out of range!\n", 409 printk(KERN_ERR "REL24 out of range!\n");
417 (long int)offset);
418 return -EINVAL; 410 return -EINVAL;
419 } 411 }
420 412
421 /* Set to "bl addr" */
422 op[0] = branch_offset(offset);
423 /* ld r2,40(r1) */ 413 /* ld r2,40(r1) */
424 op[1] = 0xe8410028; 414 op[1] = 0xe8410028;
425 415
426 DEBUGP("write to %lx\n", rec->ip); 416 DEBUGP("write to %lx\n", rec->ip);
427 417
428 if (probe_kernel_write((void *)ip, replaced, MCOUNT_INSN_SIZE * 2)) 418 if (probe_kernel_write((void *)ip, op, MCOUNT_INSN_SIZE * 2))
429 return -EPERM; 419 return -EPERM;
430 420
421 flush_icache_range(ip, ip + 8);
422
431 return 0; 423 return 0;
432} 424}
433#else 425#else
434static int 426static int
435__ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) 427__ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
436{ 428{
437 unsigned char replaced[MCOUNT_INSN_SIZE]; 429 unsigned int op;
438 unsigned int *op = (unsigned *)&replaced;
439 unsigned long ip = rec->ip; 430 unsigned long ip = rec->ip;
440 unsigned long offset;
441 431
442 /* read where this goes */ 432 /* read where this goes */
443 if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE)) 433 if (probe_kernel_read(&op, (void *)ip, MCOUNT_INSN_SIZE))
444 return -EFAULT; 434 return -EFAULT;
445 435
446 /* It should be pointing to a nop */ 436 /* It should be pointing to a nop */
447 if (op[0] != PPC_NOP_INSTR) { 437 if (op != PPC_NOP_INSTR) {
448 printk(KERN_ERR "Expected NOP but have %x\n", op[0]); 438 printk(KERN_ERR "Expected NOP but have %x\n", op);
449 return -EINVAL; 439 return -EINVAL;
450 } 440 }
451 441
@@ -455,23 +445,21 @@ __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
455 return -EINVAL; 445 return -EINVAL;
456 } 446 }
457 447
458 /* now calculate a jump to the ftrace caller trampoline */ 448 /* create the branch to the trampoline */
459 offset = rec->arch.mod->arch.tramp - ip; 449 op = create_branch((unsigned int *)ip,
460 450 rec->arch.mod->arch.tramp, BRANCH_SET_LINK);
461 if (test_offset(offset)) { 451 if (!op) {
462 printk(KERN_ERR "REL24 %li out of range!\n", 452 printk(KERN_ERR "REL24 out of range!\n");
463 (long int)offset);
464 return -EINVAL; 453 return -EINVAL;
465 } 454 }
466 455
467 /* Set to "bl addr" */
468 op[0] = branch_offset(offset);
469
470 DEBUGP("write to %lx\n", rec->ip); 456 DEBUGP("write to %lx\n", rec->ip);
471 457
472 if (probe_kernel_write((void *)ip, replaced, MCOUNT_INSN_SIZE)) 458 if (probe_kernel_write((void *)ip, &op, MCOUNT_INSN_SIZE))
473 return -EPERM; 459 return -EPERM;
474 460
461 flush_icache_range(ip, ip + 8);
462
475 return 0; 463 return 0;
476} 464}
477#endif /* CONFIG_PPC64 */ 465#endif /* CONFIG_PPC64 */
diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile
index d69912c07ce7..8db35278a4b4 100644
--- a/arch/powerpc/lib/Makefile
+++ b/arch/powerpc/lib/Makefile
@@ -6,6 +6,9 @@ ifeq ($(CONFIG_PPC64),y)
6EXTRA_CFLAGS += -mno-minimal-toc 6EXTRA_CFLAGS += -mno-minimal-toc
7endif 7endif
8 8
9CFLAGS_REMOVE_code-patching.o = -pg
10CFLAGS_REMOVE_feature-fixups.o = -pg
11
9obj-y := string.o alloc.o \ 12obj-y := string.o alloc.o \
10 checksum_$(CONFIG_WORD_SIZE).o 13 checksum_$(CONFIG_WORD_SIZE).o
11obj-$(CONFIG_PPC32) += div64.o copy_32.o crtsavres.o 14obj-$(CONFIG_PPC32) += div64.o copy_32.o crtsavres.o
diff --git a/fs/seq_file.c b/fs/seq_file.c
index f03220d7891b..16c211558c22 100644
--- a/fs/seq_file.c
+++ b/fs/seq_file.c
@@ -387,7 +387,7 @@ char *mangle_path(char *s, char *p, char *esc)
387 } 387 }
388 return NULL; 388 return NULL;
389} 389}
390EXPORT_SYMBOL_GPL(mangle_path); 390EXPORT_SYMBOL(mangle_path);
391 391
392/* 392/*
393 * return the absolute path of 'dentry' residing in mount 'mnt'. 393 * return the absolute path of 'dentry' residing in mount 'mnt'.
diff --git a/include/linux/marker.h b/include/linux/marker.h
index 34c14bc957f5..b85e74ca782f 100644
--- a/include/linux/marker.h
+++ b/include/linux/marker.h
@@ -211,8 +211,10 @@ extern void *marker_get_private_data(const char *name, marker_probe_func *probe,
211 211
212/* 212/*
213 * marker_synchronize_unregister must be called between the last marker probe 213 * marker_synchronize_unregister must be called between the last marker probe
214 * unregistration and the end of module exit to make sure there is no caller 214 * unregistration and the first one of
215 * executing a probe when it is freed. 215 * - the end of module exit function
216 * - the free of any resource used by the probes
217 * to ensure the code and data are valid for any possibly running probes.
216 */ 218 */
217#define marker_synchronize_unregister() synchronize_sched() 219#define marker_synchronize_unregister() synchronize_sched()
218 220
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 5811e0a5f732..91887a280ab9 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -983,6 +983,7 @@ static void ftrace_trace_userstack(struct trace_array *tr,
983 struct trace_array_cpu *data, 983 struct trace_array_cpu *data,
984 unsigned long flags, int pc) 984 unsigned long flags, int pc)
985{ 985{
986#ifdef CONFIG_STACKTRACE
986 struct ring_buffer_event *event; 987 struct ring_buffer_event *event;
987 struct userstack_entry *entry; 988 struct userstack_entry *entry;
988 struct stack_trace trace; 989 struct stack_trace trace;
@@ -1008,6 +1009,7 @@ static void ftrace_trace_userstack(struct trace_array *tr,
1008 1009
1009 save_stack_trace_user(&trace); 1010 save_stack_trace_user(&trace);
1010 ring_buffer_unlock_commit(tr->buffer, event, irq_flags); 1011 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
1012#endif
1011} 1013}
1012 1014
1013void __trace_userstack(struct trace_array *tr, 1015void __trace_userstack(struct trace_array *tr,
diff --git a/kernel/trace/trace_branch.c b/kernel/trace/trace_branch.c
index 877ee88e6a74..bc972753568d 100644
--- a/kernel/trace/trace_branch.c
+++ b/kernel/trace/trace_branch.c
@@ -6,6 +6,7 @@
6#include <linux/kallsyms.h> 6#include <linux/kallsyms.h>
7#include <linux/seq_file.h> 7#include <linux/seq_file.h>
8#include <linux/spinlock.h> 8#include <linux/spinlock.h>
9#include <linux/irqflags.h>
9#include <linux/debugfs.h> 10#include <linux/debugfs.h>
10#include <linux/uaccess.h> 11#include <linux/uaccess.h>
11#include <linux/module.h> 12#include <linux/module.h>
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index d31d695174aa..894b50bca313 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -15,15 +15,24 @@
15 15
16#define TRACE_GRAPH_INDENT 2 16#define TRACE_GRAPH_INDENT 2
17 17
18/* Flag options */
18#define TRACE_GRAPH_PRINT_OVERRUN 0x1 19#define TRACE_GRAPH_PRINT_OVERRUN 0x1
20#define TRACE_GRAPH_PRINT_CPU 0x2
21#define TRACE_GRAPH_PRINT_OVERHEAD 0x4
22
19static struct tracer_opt trace_opts[] = { 23static struct tracer_opt trace_opts[] = {
20 /* Display overruns or not */ 24 /* Display overruns ? */
21 { TRACER_OPT(overrun, TRACE_GRAPH_PRINT_OVERRUN) }, 25 { TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) },
26 /* Display CPU ? */
27 { TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) },
28 /* Display Overhead ? */
29 { TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) },
22 { } /* Empty entry */ 30 { } /* Empty entry */
23}; 31};
24 32
25static struct tracer_flags tracer_flags = { 33static struct tracer_flags tracer_flags = {
26 .val = 0, /* Don't display overruns by default */ 34 /* Don't display overruns by default */
35 .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD,
27 .opts = trace_opts 36 .opts = trace_opts
28}; 37};
29 38
@@ -52,37 +61,208 @@ static void graph_trace_reset(struct trace_array *tr)
52 unregister_ftrace_graph(); 61 unregister_ftrace_graph();
53} 62}
54 63
64static inline int log10_cpu(int nb)
65{
66 if (nb / 100)
67 return 3;
68 if (nb / 10)
69 return 2;
70 return 1;
71}
72
73static enum print_line_t
74print_graph_cpu(struct trace_seq *s, int cpu)
75{
76 int i;
77 int ret;
78 int log10_this = log10_cpu(cpu);
79 int log10_all = log10_cpu(cpus_weight_nr(cpu_online_map));
80
81
82 /*
83 * Start with a space character - to make it stand out
84 * to the right a bit when trace output is pasted into
85 * email:
86 */
87 ret = trace_seq_printf(s, " ");
88
89 /*
90 * Tricky - we space the CPU field according to the max
91 * number of online CPUs. On a 2-cpu system it would take
92 * a maximum of 1 digit - on a 128 cpu system it would
93 * take up to 3 digits:
94 */
95 for (i = 0; i < log10_all - log10_this; i++) {
96 ret = trace_seq_printf(s, " ");
97 if (!ret)
98 return TRACE_TYPE_PARTIAL_LINE;
99 }
100 ret = trace_seq_printf(s, "%d) ", cpu);
101 if (!ret)
102 return TRACE_TYPE_PARTIAL_LINE;
103
104 return TRACE_TYPE_HANDLED;
105}
106
107
55/* If the pid changed since the last trace, output this event */ 108/* If the pid changed since the last trace, output this event */
56static int verif_pid(struct trace_seq *s, pid_t pid, int cpu) 109static int verif_pid(struct trace_seq *s, pid_t pid, int cpu)
57{ 110{
58 char *comm; 111 char *comm, *prev_comm;
112 pid_t prev_pid;
113 int ret;
59 114
60 if (last_pid[cpu] != -1 && last_pid[cpu] == pid) 115 if (last_pid[cpu] != -1 && last_pid[cpu] == pid)
61 return 1; 116 return 1;
62 117
118 prev_pid = last_pid[cpu];
63 last_pid[cpu] = pid; 119 last_pid[cpu] = pid;
120
64 comm = trace_find_cmdline(pid); 121 comm = trace_find_cmdline(pid);
122 prev_comm = trace_find_cmdline(prev_pid);
65 123
66 return trace_seq_printf(s, "\nCPU[%03d]" 124/*
67 " ------------8<---------- thread %s-%d" 125 * Context-switch trace line:
68 " ------------8<----------\n\n", 126
69 cpu, comm, pid); 127 ------------------------------------------
128 | 1) migration/0--1 => sshd-1755
129 ------------------------------------------
130
131 */
132 ret = trace_seq_printf(s,
133 " ------------------------------------------\n");
134 ret += trace_seq_printf(s, " | %d) %s-%d => %s-%d\n",
135 cpu, prev_comm, prev_pid, comm, pid);
136 ret += trace_seq_printf(s,
137 " ------------------------------------------\n\n");
138 return ret;
70} 139}
71 140
141static bool
142trace_branch_is_leaf(struct trace_iterator *iter,
143 struct ftrace_graph_ent_entry *curr)
144{
145 struct ring_buffer_iter *ring_iter;
146 struct ring_buffer_event *event;
147 struct ftrace_graph_ret_entry *next;
148
149 ring_iter = iter->buffer_iter[iter->cpu];
150
151 if (!ring_iter)
152 return false;
153
154 event = ring_buffer_iter_peek(ring_iter, NULL);
155
156 if (!event)
157 return false;
158
159 next = ring_buffer_event_data(event);
160
161 if (next->ent.type != TRACE_GRAPH_RET)
162 return false;
163
164 if (curr->ent.pid != next->ent.pid ||
165 curr->graph_ent.func != next->ret.func)
166 return false;
167
168 return true;
169}
170
171
172static inline int
173print_graph_duration(unsigned long long duration, struct trace_seq *s)
174{
175 unsigned long nsecs_rem = do_div(duration, 1000);
176 return trace_seq_printf(s, "%4llu.%3lu us | ", duration, nsecs_rem);
177}
178
179/* Signal a overhead of time execution to the output */
180static int
181print_graph_overhead(unsigned long long duration, struct trace_seq *s)
182{
183 /* Duration exceeded 100 msecs */
184 if (duration > 100000ULL)
185 return trace_seq_printf(s, "! ");
186
187 /* Duration exceeded 10 msecs */
188 if (duration > 10000ULL)
189 return trace_seq_printf(s, "+ ");
190
191 return trace_seq_printf(s, " ");
192}
193
194/* Case of a leaf function on its call entry */
72static enum print_line_t 195static enum print_line_t
73print_graph_entry(struct ftrace_graph_ent *call, struct trace_seq *s, 196print_graph_entry_leaf(struct trace_iterator *iter,
74 struct trace_entry *ent, int cpu) 197 struct ftrace_graph_ent_entry *entry, struct trace_seq *s)
75{ 198{
76 int i; 199 struct ftrace_graph_ret_entry *ret_entry;
200 struct ftrace_graph_ret *graph_ret;
201 struct ring_buffer_event *event;
202 struct ftrace_graph_ent *call;
203 unsigned long long duration;
77 int ret; 204 int ret;
205 int i;
78 206
79 if (!verif_pid(s, ent->pid, cpu)) 207 event = ring_buffer_read(iter->buffer_iter[iter->cpu], NULL);
208 ret_entry = ring_buffer_event_data(event);
209 graph_ret = &ret_entry->ret;
210 call = &entry->graph_ent;
211 duration = graph_ret->rettime - graph_ret->calltime;
212
213 /* Must not exceed 8 characters: 9999.999 us */
214 if (duration > 10000000ULL)
215 duration = 9999999ULL;
216
217 /* Overhead */
218 if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) {
219 ret = print_graph_overhead(duration, s);
220 if (!ret)
221 return TRACE_TYPE_PARTIAL_LINE;
222 }
223
224 /* Duration */
225 ret = print_graph_duration(duration, s);
226 if (!ret)
80 return TRACE_TYPE_PARTIAL_LINE; 227 return TRACE_TYPE_PARTIAL_LINE;
81 228
82 ret = trace_seq_printf(s, "CPU[%03d] ", cpu); 229 /* Function */
230 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
231 ret = trace_seq_printf(s, " ");
232 if (!ret)
233 return TRACE_TYPE_PARTIAL_LINE;
234 }
235
236 ret = seq_print_ip_sym(s, call->func, 0);
237 if (!ret)
238 return TRACE_TYPE_PARTIAL_LINE;
239
240 ret = trace_seq_printf(s, "();\n");
83 if (!ret) 241 if (!ret)
84 return TRACE_TYPE_PARTIAL_LINE; 242 return TRACE_TYPE_PARTIAL_LINE;
85 243
244 return TRACE_TYPE_HANDLED;
245}
246
247static enum print_line_t
248print_graph_entry_nested(struct ftrace_graph_ent_entry *entry,
249 struct trace_seq *s)
250{
251 int i;
252 int ret;
253 struct ftrace_graph_ent *call = &entry->graph_ent;
254
255 /* No overhead */
256 if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) {
257 ret = trace_seq_printf(s, " ");
258 if (!ret)
259 return TRACE_TYPE_PARTIAL_LINE;
260 }
261
262 /* No time */
263 ret = trace_seq_printf(s, " | ");
264
265 /* Function */
86 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) { 266 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
87 ret = trace_seq_printf(s, " "); 267 ret = trace_seq_printf(s, " ");
88 if (!ret) 268 if (!ret)
@@ -96,37 +276,82 @@ print_graph_entry(struct ftrace_graph_ent *call, struct trace_seq *s,
96 ret = trace_seq_printf(s, "() {\n"); 276 ret = trace_seq_printf(s, "() {\n");
97 if (!ret) 277 if (!ret)
98 return TRACE_TYPE_PARTIAL_LINE; 278 return TRACE_TYPE_PARTIAL_LINE;
279
99 return TRACE_TYPE_HANDLED; 280 return TRACE_TYPE_HANDLED;
100} 281}
101 282
102static enum print_line_t 283static enum print_line_t
284print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
285 struct trace_iterator *iter, int cpu)
286{
287 int ret;
288 struct trace_entry *ent = iter->ent;
289
290 /* Pid */
291 if (!verif_pid(s, ent->pid, cpu))
292 return TRACE_TYPE_PARTIAL_LINE;
293
294 /* Cpu */
295 if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) {
296 ret = print_graph_cpu(s, cpu);
297 if (!ret)
298 return TRACE_TYPE_PARTIAL_LINE;
299 }
300
301 if (trace_branch_is_leaf(iter, field))
302 return print_graph_entry_leaf(iter, field, s);
303 else
304 return print_graph_entry_nested(field, s);
305
306}
307
308static enum print_line_t
103print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s, 309print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
104 struct trace_entry *ent, int cpu) 310 struct trace_entry *ent, int cpu)
105{ 311{
106 int i; 312 int i;
107 int ret; 313 int ret;
314 unsigned long long duration = trace->rettime - trace->calltime;
315
316 /* Must not exceed 8 characters: xxxx.yyy us */
317 if (duration > 10000000ULL)
318 duration = 9999999ULL;
108 319
320 /* Pid */
109 if (!verif_pid(s, ent->pid, cpu)) 321 if (!verif_pid(s, ent->pid, cpu))
110 return TRACE_TYPE_PARTIAL_LINE; 322 return TRACE_TYPE_PARTIAL_LINE;
111 323
112 ret = trace_seq_printf(s, "CPU[%03d] ", cpu); 324 /* Cpu */
325 if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) {
326 ret = print_graph_cpu(s, cpu);
327 if (!ret)
328 return TRACE_TYPE_PARTIAL_LINE;
329 }
330
331 /* Overhead */
332 if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) {
333 ret = print_graph_overhead(duration, s);
334 if (!ret)
335 return TRACE_TYPE_PARTIAL_LINE;
336 }
337
338 /* Duration */
339 ret = print_graph_duration(duration, s);
113 if (!ret) 340 if (!ret)
114 return TRACE_TYPE_PARTIAL_LINE; 341 return TRACE_TYPE_PARTIAL_LINE;
115 342
343 /* Closing brace */
116 for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) { 344 for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) {
117 ret = trace_seq_printf(s, " "); 345 ret = trace_seq_printf(s, " ");
118 if (!ret) 346 if (!ret)
119 return TRACE_TYPE_PARTIAL_LINE; 347 return TRACE_TYPE_PARTIAL_LINE;
120 } 348 }
121 349
122 ret = trace_seq_printf(s, "} "); 350 ret = trace_seq_printf(s, "}\n");
123 if (!ret)
124 return TRACE_TYPE_PARTIAL_LINE;
125
126 ret = trace_seq_printf(s, "%llu\n", trace->rettime - trace->calltime);
127 if (!ret) 351 if (!ret)
128 return TRACE_TYPE_PARTIAL_LINE; 352 return TRACE_TYPE_PARTIAL_LINE;
129 353
354 /* Overrun */
130 if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERRUN) { 355 if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERRUN) {
131 ret = trace_seq_printf(s, " (Overruns: %lu)\n", 356 ret = trace_seq_printf(s, " (Overruns: %lu)\n",
132 trace->overrun); 357 trace->overrun);
@@ -146,7 +371,7 @@ print_graph_function(struct trace_iterator *iter)
146 case TRACE_GRAPH_ENT: { 371 case TRACE_GRAPH_ENT: {
147 struct ftrace_graph_ent_entry *field; 372 struct ftrace_graph_ent_entry *field;
148 trace_assign_type(field, entry); 373 trace_assign_type(field, entry);
149 return print_graph_entry(&field->graph_ent, s, entry, 374 return print_graph_entry(field, s, iter,
150 iter->cpu); 375 iter->cpu);
151 } 376 }
152 case TRACE_GRAPH_RET: { 377 case TRACE_GRAPH_RET: {
@@ -160,7 +385,7 @@ print_graph_function(struct trace_iterator *iter)
160} 385}
161 386
162static struct tracer graph_trace __read_mostly = { 387static struct tracer graph_trace __read_mostly = {
163 .name = "function-graph", 388 .name = "function_graph",
164 .init = graph_trace_init, 389 .init = graph_trace_init,
165 .reset = graph_trace_reset, 390 .reset = graph_trace_reset,
166 .print_line = print_graph_function, 391 .print_line = print_graph_function,