diff options
author | Shaohua Li <shaohua.li@intel.com> | 2009-01-08 22:29:49 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-01-14 06:11:31 -0500 |
commit | a14a07b8018b714e03a39ff2180c66e307ef4238 (patch) | |
tree | 7c4b9249f862969b80b74e5e11e56480b0e3d15c /arch/ia64/kernel | |
parent | d3e75ff14bc1453c4762428395aac9953a023efc (diff) |
ftrace, ia64: IA64 dynamic ftrace support
IA64 dynamic ftrace support.
The original _mcount stub for each function is like:
alloc r40=ar.pfs,12,8,0
mov r43=r0;;
mov r42=b0
mov r41=r1
nop.i 0x0
br.call.sptk.many b0 = _mcount;;
The patch convert it to below for nop:
[MII] nop.m 0x0
mov r3=ip
nop.i 0x0
[MLX] nop.m 0x0
nop.x 0x0;;
This isn't completely nop, as there is one instuction 'mov r3=ip', but
it should be light and harmless for code follow it.
And below is for call
[MII] nop.m 0x0
mov r3=ip
nop.i 0x0
[MLX] nop.m 0x0
brl.many .;;
In this way, only one instruction is changed to convert code between nop
and call. This should meet dyn-ftrace's requirement.
But this requires CPU support brl instruction, so dyn-ftrace isn't
supported for old Itanium system. Assume there are quite few such old
system running.
Signed-off-by: Shaohua Li <shaohua.li@intel.com>
Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/ia64/kernel')
-rw-r--r-- | arch/ia64/kernel/Makefile | 5 | ||||
-rw-r--r-- | arch/ia64/kernel/entry.S | 51 | ||||
-rw-r--r-- | arch/ia64/kernel/ftrace.c | 206 |
3 files changed, 262 insertions, 0 deletions
diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile index c381ea954892..ab6e7ec0bba3 100644 --- a/arch/ia64/kernel/Makefile +++ b/arch/ia64/kernel/Makefile | |||
@@ -2,6 +2,10 @@ | |||
2 | # Makefile for the linux kernel. | 2 | # Makefile for the linux kernel. |
3 | # | 3 | # |
4 | 4 | ||
5 | ifdef CONFIG_DYNAMIC_FTRACE | ||
6 | CFLAGS_REMOVE_ftrace.o = -pg | ||
7 | endif | ||
8 | |||
5 | extra-y := head.o init_task.o vmlinux.lds | 9 | extra-y := head.o init_task.o vmlinux.lds |
6 | 10 | ||
7 | obj-y := acpi.o entry.o efi.o efi_stub.o gate-data.o fsys.o ia64_ksyms.o irq.o irq_ia64.o \ | 11 | obj-y := acpi.o entry.o efi.o efi_stub.o gate-data.o fsys.o ia64_ksyms.o irq.o irq_ia64.o \ |
@@ -28,6 +32,7 @@ obj-$(CONFIG_IA64_CYCLONE) += cyclone.o | |||
28 | obj-$(CONFIG_CPU_FREQ) += cpufreq/ | 32 | obj-$(CONFIG_CPU_FREQ) += cpufreq/ |
29 | obj-$(CONFIG_IA64_MCA_RECOVERY) += mca_recovery.o | 33 | obj-$(CONFIG_IA64_MCA_RECOVERY) += mca_recovery.o |
30 | obj-$(CONFIG_KPROBES) += kprobes.o jprobes.o | 34 | obj-$(CONFIG_KPROBES) += kprobes.o jprobes.o |
35 | obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o | ||
31 | obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o crash.o | 36 | obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o crash.o |
32 | obj-$(CONFIG_CRASH_DUMP) += crash_dump.o | 37 | obj-$(CONFIG_CRASH_DUMP) += crash_dump.o |
33 | obj-$(CONFIG_IA64_UNCACHED_ALLOCATOR) += uncached.o | 38 | obj-$(CONFIG_IA64_UNCACHED_ALLOCATOR) += uncached.o |
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S index c2f7d798e2a5..e0be92a6abb0 100644 --- a/arch/ia64/kernel/entry.S +++ b/arch/ia64/kernel/entry.S | |||
@@ -1406,6 +1406,56 @@ GLOBAL_ENTRY(unw_init_running) | |||
1406 | END(unw_init_running) | 1406 | END(unw_init_running) |
1407 | 1407 | ||
1408 | #ifdef CONFIG_FUNCTION_TRACER | 1408 | #ifdef CONFIG_FUNCTION_TRACER |
1409 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
1410 | GLOBAL_ENTRY(_mcount) | ||
1411 | br ftrace_stub | ||
1412 | END(_mcount) | ||
1413 | |||
1414 | .here: | ||
1415 | br.ret.sptk.many b0 | ||
1416 | |||
1417 | GLOBAL_ENTRY(ftrace_caller) | ||
1418 | alloc out0 = ar.pfs, 8, 0, 4, 0 | ||
1419 | mov out3 = r0 | ||
1420 | ;; | ||
1421 | mov out2 = b0 | ||
1422 | add r3 = 0x20, r3 | ||
1423 | mov out1 = r1; | ||
1424 | br.call.sptk.many b0 = ftrace_patch_gp | ||
1425 | //this might be called from module, so we must patch gp | ||
1426 | ftrace_patch_gp: | ||
1427 | movl gp=__gp | ||
1428 | mov b0 = r3 | ||
1429 | ;; | ||
1430 | .global ftrace_call; | ||
1431 | ftrace_call: | ||
1432 | { | ||
1433 | .mlx | ||
1434 | nop.m 0x0 | ||
1435 | movl r3 = .here;; | ||
1436 | } | ||
1437 | alloc loc0 = ar.pfs, 4, 4, 2, 0 | ||
1438 | ;; | ||
1439 | mov loc1 = b0 | ||
1440 | mov out0 = b0 | ||
1441 | mov loc2 = r8 | ||
1442 | mov loc3 = r15 | ||
1443 | ;; | ||
1444 | adds out0 = -MCOUNT_INSN_SIZE, out0 | ||
1445 | mov out1 = in2 | ||
1446 | mov b6 = r3 | ||
1447 | |||
1448 | br.call.sptk.many b0 = b6 | ||
1449 | ;; | ||
1450 | mov ar.pfs = loc0 | ||
1451 | mov b0 = loc1 | ||
1452 | mov r8 = loc2 | ||
1453 | mov r15 = loc3 | ||
1454 | br ftrace_stub | ||
1455 | ;; | ||
1456 | END(ftrace_caller) | ||
1457 | |||
1458 | #else | ||
1409 | GLOBAL_ENTRY(_mcount) | 1459 | GLOBAL_ENTRY(_mcount) |
1410 | movl r2 = ftrace_stub | 1460 | movl r2 = ftrace_stub |
1411 | movl r3 = ftrace_trace_function;; | 1461 | movl r3 = ftrace_trace_function;; |
@@ -1435,6 +1485,7 @@ GLOBAL_ENTRY(_mcount) | |||
1435 | br ftrace_stub | 1485 | br ftrace_stub |
1436 | ;; | 1486 | ;; |
1437 | END(_mcount) | 1487 | END(_mcount) |
1488 | #endif | ||
1438 | 1489 | ||
1439 | GLOBAL_ENTRY(ftrace_stub) | 1490 | GLOBAL_ENTRY(ftrace_stub) |
1440 | mov r3 = b0 | 1491 | mov r3 = b0 |
diff --git a/arch/ia64/kernel/ftrace.c b/arch/ia64/kernel/ftrace.c new file mode 100644 index 000000000000..7fc8c961b1f7 --- /dev/null +++ b/arch/ia64/kernel/ftrace.c | |||
@@ -0,0 +1,206 @@ | |||
1 | /* | ||
2 | * Dynamic function tracing support. | ||
3 | * | ||
4 | * Copyright (C) 2008 Shaohua Li <shaohua.li@intel.com> | ||
5 | * | ||
6 | * For licencing details, see COPYING. | ||
7 | * | ||
8 | * Defines low-level handling of mcount calls when the kernel | ||
9 | * is compiled with the -pg flag. When using dynamic ftrace, the | ||
10 | * mcount call-sites get patched lazily with NOP till they are | ||
11 | * enabled. All code mutation routines here take effect atomically. | ||
12 | */ | ||
13 | |||
14 | #include <linux/uaccess.h> | ||
15 | #include <linux/ftrace.h> | ||
16 | |||
17 | #include <asm/cacheflush.h> | ||
18 | #include <asm/patch.h> | ||
19 | |||
20 | /* In IA64, each function will be added below two bundles with -pg option */ | ||
21 | static unsigned char __attribute__((aligned(8))) | ||
22 | ftrace_orig_code[MCOUNT_INSN_SIZE] = { | ||
23 | 0x02, 0x40, 0x31, 0x10, 0x80, 0x05, /* alloc r40=ar.pfs,12,8,0 */ | ||
24 | 0xb0, 0x02, 0x00, 0x00, 0x42, 0x40, /* mov r43=r0;; */ | ||
25 | 0x05, 0x00, 0xc4, 0x00, /* mov r42=b0 */ | ||
26 | 0x11, 0x48, 0x01, 0x02, 0x00, 0x21, /* mov r41=r1 */ | ||
27 | 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, /* nop.i 0x0 */ | ||
28 | 0x08, 0x00, 0x00, 0x50 /* br.call.sptk.many b0 = _mcount;; */ | ||
29 | }; | ||
30 | |||
31 | struct ftrace_orig_insn { | ||
32 | u64 dummy1, dummy2, dummy3; | ||
33 | u64 dummy4:64-41+13; | ||
34 | u64 imm20:20; | ||
35 | u64 dummy5:3; | ||
36 | u64 sign:1; | ||
37 | u64 dummy6:4; | ||
38 | }; | ||
39 | |||
40 | /* mcount stub will be converted below for nop */ | ||
41 | static unsigned char ftrace_nop_code[MCOUNT_INSN_SIZE] = { | ||
42 | 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MII] nop.m 0x0 */ | ||
43 | 0x30, 0x00, 0x00, 0x60, 0x00, 0x00, /* mov r3=ip */ | ||
44 | 0x00, 0x00, 0x04, 0x00, /* nop.i 0x0 */ | ||
45 | 0x05, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MLX] nop.m 0x0 */ | ||
46 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* nop.x 0x0;; */ | ||
47 | 0x00, 0x00, 0x04, 0x00 | ||
48 | }; | ||
49 | |||
50 | static unsigned char *ftrace_nop_replace(void) | ||
51 | { | ||
52 | return ftrace_nop_code; | ||
53 | } | ||
54 | |||
55 | /* | ||
56 | * mcount stub will be converted below for call | ||
57 | * Note: Just the last instruction is changed against nop | ||
58 | * */ | ||
59 | static unsigned char __attribute__((aligned(8))) | ||
60 | ftrace_call_code[MCOUNT_INSN_SIZE] = { | ||
61 | 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MII] nop.m 0x0 */ | ||
62 | 0x30, 0x00, 0x00, 0x60, 0x00, 0x00, /* mov r3=ip */ | ||
63 | 0x00, 0x00, 0x04, 0x00, /* nop.i 0x0 */ | ||
64 | 0x05, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MLX] nop.m 0x0 */ | ||
65 | 0xff, 0xff, 0xff, 0xff, 0x7f, 0x00, /* brl.many .;;*/ | ||
66 | 0xf8, 0xff, 0xff, 0xc8 | ||
67 | }; | ||
68 | |||
69 | struct ftrace_call_insn { | ||
70 | u64 dummy1, dummy2; | ||
71 | u64 dummy3:48; | ||
72 | u64 imm39_l:16; | ||
73 | u64 imm39_h:23; | ||
74 | u64 dummy4:13; | ||
75 | u64 imm20:20; | ||
76 | u64 dummy5:3; | ||
77 | u64 i:1; | ||
78 | u64 dummy6:4; | ||
79 | }; | ||
80 | |||
81 | static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr) | ||
82 | { | ||
83 | struct ftrace_call_insn *code = (void *)ftrace_call_code; | ||
84 | unsigned long offset = addr - (ip + 0x10); | ||
85 | |||
86 | code->imm39_l = offset >> 24; | ||
87 | code->imm39_h = offset >> 40; | ||
88 | code->imm20 = offset >> 4; | ||
89 | code->i = offset >> 63; | ||
90 | return ftrace_call_code; | ||
91 | } | ||
92 | |||
93 | static int | ||
94 | ftrace_modify_code(unsigned long ip, unsigned char *old_code, | ||
95 | unsigned char *new_code, int do_check) | ||
96 | { | ||
97 | unsigned char replaced[MCOUNT_INSN_SIZE]; | ||
98 | |||
99 | /* | ||
100 | * Note: Due to modules and __init, code can | ||
101 | * disappear and change, we need to protect against faulting | ||
102 | * as well as code changing. We do this by using the | ||
103 | * probe_kernel_* functions. | ||
104 | * | ||
105 | * No real locking needed, this code is run through | ||
106 | * kstop_machine, or before SMP starts. | ||
107 | */ | ||
108 | |||
109 | if (!do_check) | ||
110 | goto skip_check; | ||
111 | |||
112 | /* read the text we want to modify */ | ||
113 | if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE)) | ||
114 | return -EFAULT; | ||
115 | |||
116 | /* Make sure it is what we expect it to be */ | ||
117 | if (memcmp(replaced, old_code, MCOUNT_INSN_SIZE) != 0) | ||
118 | return -EINVAL; | ||
119 | |||
120 | skip_check: | ||
121 | /* replace the text with the new text */ | ||
122 | if (probe_kernel_write(((void *)ip), new_code, MCOUNT_INSN_SIZE)) | ||
123 | return -EPERM; | ||
124 | flush_icache_range(ip, ip + MCOUNT_INSN_SIZE); | ||
125 | |||
126 | return 0; | ||
127 | } | ||
128 | |||
129 | static int ftrace_make_nop_check(struct dyn_ftrace *rec, unsigned long addr) | ||
130 | { | ||
131 | unsigned char __attribute__((aligned(8))) replaced[MCOUNT_INSN_SIZE]; | ||
132 | unsigned long ip = rec->ip; | ||
133 | |||
134 | if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE)) | ||
135 | return -EFAULT; | ||
136 | if (rec->flags & FTRACE_FL_CONVERTED) { | ||
137 | struct ftrace_call_insn *call_insn, *tmp_call; | ||
138 | |||
139 | call_insn = (void *)ftrace_call_code; | ||
140 | tmp_call = (void *)replaced; | ||
141 | call_insn->imm39_l = tmp_call->imm39_l; | ||
142 | call_insn->imm39_h = tmp_call->imm39_h; | ||
143 | call_insn->imm20 = tmp_call->imm20; | ||
144 | call_insn->i = tmp_call->i; | ||
145 | if (memcmp(replaced, ftrace_call_code, MCOUNT_INSN_SIZE) != 0) | ||
146 | return -EINVAL; | ||
147 | return 0; | ||
148 | } else { | ||
149 | struct ftrace_orig_insn *call_insn, *tmp_call; | ||
150 | |||
151 | call_insn = (void *)ftrace_orig_code; | ||
152 | tmp_call = (void *)replaced; | ||
153 | call_insn->sign = tmp_call->sign; | ||
154 | call_insn->imm20 = tmp_call->imm20; | ||
155 | if (memcmp(replaced, ftrace_orig_code, MCOUNT_INSN_SIZE) != 0) | ||
156 | return -EINVAL; | ||
157 | return 0; | ||
158 | } | ||
159 | } | ||
160 | |||
161 | int ftrace_make_nop(struct module *mod, | ||
162 | struct dyn_ftrace *rec, unsigned long addr) | ||
163 | { | ||
164 | int ret; | ||
165 | char *new; | ||
166 | |||
167 | ret = ftrace_make_nop_check(rec, addr); | ||
168 | if (ret) | ||
169 | return ret; | ||
170 | new = ftrace_nop_replace(); | ||
171 | return ftrace_modify_code(rec->ip, NULL, new, 0); | ||
172 | } | ||
173 | |||
174 | int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) | ||
175 | { | ||
176 | unsigned long ip = rec->ip; | ||
177 | unsigned char *old, *new; | ||
178 | |||
179 | old= ftrace_nop_replace(); | ||
180 | new = ftrace_call_replace(ip, addr); | ||
181 | return ftrace_modify_code(ip, old, new, 1); | ||
182 | } | ||
183 | |||
184 | /* in IA64, _mcount can't directly call ftrace_stub. Only jump is ok */ | ||
185 | int ftrace_update_ftrace_func(ftrace_func_t func) | ||
186 | { | ||
187 | unsigned long ip; | ||
188 | unsigned long addr = ((struct fnptr *)ftrace_call)->ip; | ||
189 | |||
190 | if (func == ftrace_stub) | ||
191 | return 0; | ||
192 | ip = ((struct fnptr *)func)->ip; | ||
193 | |||
194 | ia64_patch_imm64(addr + 2, ip); | ||
195 | |||
196 | flush_icache_range(addr, addr + 16); | ||
197 | return 0; | ||
198 | } | ||
199 | |||
200 | /* run from kstop_machine */ | ||
201 | int __init ftrace_dyn_arch_init(void *data) | ||
202 | { | ||
203 | *(unsigned long *)data = 0; | ||
204 | |||
205 | return 0; | ||
206 | } | ||