aboutsummaryrefslogtreecommitdiffstats
path: root/arch/microblaze/kernel/mcount.S
diff options
context:
space:
mode:
authorMichal Simek <monstr@monstr.eu>2009-11-16 03:40:14 -0500
committerMichal Simek <monstr@monstr.eu>2009-12-14 02:40:09 -0500
commit2fd7c761a24c28e83d7194b4b4a099451126a503 (patch)
treec8647ae1bc4519649e35e46b231f3f2af77f9a76 /arch/microblaze/kernel/mcount.S
parenta3cd613b2e775eb59816c2c7c49c038d54917208 (diff)
microblaze: ftrace: add static function tracer
If -pg of gcc is enabled with CONFIG_FUNCTION_TRACER=y. a calling to _mcount will be inserted into each kernel function. so, there is a possibility to trace the kernel functions in _mcount. This patch add the specific _mcount support for static function tracing. by default, ftrace_trace_function is initialized as ftrace_stub(an empty function), so, the default _mcount will introduce very little overhead. after enabling ftrace in user-space, it will jump to a real tracing function and do static function tracing for us. Commit message from Wu Zhangjin <wuzhangjin@gmail.com> Signed-off-by: Michal Simek <monstr@monstr.eu>
Diffstat (limited to 'arch/microblaze/kernel/mcount.S')
-rw-r--r--arch/microblaze/kernel/mcount.S105
1 files changed, 105 insertions, 0 deletions
diff --git a/arch/microblaze/kernel/mcount.S b/arch/microblaze/kernel/mcount.S
new file mode 100644
index 000000000000..a257a1b75ed2
--- /dev/null
+++ b/arch/microblaze/kernel/mcount.S
@@ -0,0 +1,105 @@
1/*
2 * Low-level ftrace handling
3 *
4 * Copyright (C) 2009 Michal Simek <monstr@monstr.eu>
5 * Copyright (C) 2009 PetaLogix
6 *
7 * This file is subject to the terms and conditions of the GNU General
8 * Public License. See the file COPYING in the main directory of this
9 * archive for more details.
10 */
11
12#include <linux/linkage.h>
13
14#define NOALIGN_ENTRY(name) .globl name; name:
15
16/* FIXME MS: I think that I don't need to save all regs */
17#define SAVE_REGS \
18 addik r1, r1, -120; \
19 swi r2, r1, 4; \
20 swi r3, r1, 8; \
21 swi r4, r1, 12; \
22 swi r5, r1, 116; \
23 swi r6, r1, 16; \
24 swi r7, r1, 20; \
25 swi r8, r1, 24; \
26 swi r9, r1, 28; \
27 swi r10, r1, 32; \
28 swi r11, r1, 36; \
29 swi r12, r1, 40; \
30 swi r13, r1, 44; \
31 swi r14, r1, 48; \
32 swi r16, r1, 52; \
33 swi r17, r1, 56; \
34 swi r18, r1, 60; \
35 swi r19, r1, 64; \
36 swi r20, r1, 68; \
37 swi r21, r1, 72; \
38 swi r22, r1, 76; \
39 swi r23, r1, 80; \
40 swi r24, r1, 84; \
41 swi r25, r1, 88; \
42 swi r26, r1, 92; \
43 swi r27, r1, 96; \
44 swi r28, r1, 100; \
45 swi r29, r1, 104; \
46 swi r30, r1, 108; \
47 swi r31, r1, 112;
48
49#define RESTORE_REGS \
50 lwi r2, r1, 4; \
51 lwi r3, r1, 8; \
52 lwi r4, r1, 12; \
53 lwi r5, r1, 116; \
54 lwi r6, r1, 16; \
55 lwi r7, r1, 20; \
56 lwi r8, r1, 24; \
57 lwi r9, r1, 28; \
58 lwi r10, r1, 32; \
59 lwi r11, r1, 36; \
60 lwi r12, r1, 40; \
61 lwi r13, r1, 44; \
62 lwi r14, r1, 48; \
63 lwi r16, r1, 52; \
64 lwi r17, r1, 56; \
65 lwi r18, r1, 60; \
66 lwi r19, r1, 64; \
67 lwi r20, r1, 68; \
68 lwi r21, r1, 72; \
69 lwi r22, r1, 76; \
70 lwi r23, r1, 80; \
71 lwi r24, r1, 84; \
72 lwi r25, r1, 88; \
73 lwi r26, r1, 92; \
74 lwi r27, r1, 96; \
75 lwi r28, r1, 100; \
76 lwi r29, r1, 104; \
77 lwi r30, r1, 108; \
78 lwi r31, r1, 112; \
79 addik r1, r1, 120;
80
81ENTRY(ftrace_stub)
82 rtsd r15, 8;
83 nop;
84
85ENTRY(_mcount)
86 SAVE_REGS
87 swi r15, r1, 0;
88 /* MS: test function trace if is taken or not */
89 lwi r20, r0, ftrace_trace_function;
90 addik r6, r0, ftrace_stub;
91 cmpu r5, r20, r6; /* ftrace_trace_function != ftrace_stub */
92 beqid r5, end; /* MS: not taken -> jump over */
93 nop;
94/* static normal trace */
95 lwi r6, r1, 120; /* MS: load parent addr */
96 addik r5, r15, 0; /* MS: load current function addr */
97 /* MS: here is dependency on previous code */
98 brald r15, r20; /* MS: jump to ftrace handler */
99 nop;
100end:
101 lwi r15, r1, 0;
102 RESTORE_REGS
103
104 rtsd r15, 8; /* MS: jump back */
105 nop;