aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh
diff options
context:
space:
mode:
authorMatt Fleming <mjf@gentoo.org>2008-11-12 06:11:47 -0500
committerPaul Mundt <lethal@linux-sh.org>2008-12-22 04:42:52 -0500
commitfad57feba77d2e5b183e068cb6b90693e4567b40 (patch)
tree478788d5a05a30f638540d345e9d09c5733687da /arch/sh
parentef6aff6884408db95ceb0f678f583536e0bd48f8 (diff)
sh: dynamic ftrace support.
First cut at dynamic ftrace support. Signed-off-by: Matt Fleming <mjf@gentoo.org> Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh')
-rw-r--r--arch/sh/Kconfig2
-rw-r--r--arch/sh/include/asm/ftrace.h21
-rw-r--r--arch/sh/kernel/Makefile_326
-rw-r--r--arch/sh/kernel/entry-common.S44
-rw-r--r--arch/sh/kernel/ftrace.c109
-rw-r--r--arch/sh/lib/Makefile1
-rw-r--r--arch/sh/lib/mcount.S90
7 files changed, 229 insertions, 44 deletions
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index 5c9cbfc14c4d..fd2c02d614ba 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -25,6 +25,8 @@ config SUPERH32
25 select HAVE_KRETPROBES 25 select HAVE_KRETPROBES
26 select HAVE_ARCH_TRACEHOOK 26 select HAVE_ARCH_TRACEHOOK
27 select HAVE_FUNCTION_TRACER 27 select HAVE_FUNCTION_TRACER
28 select HAVE_FTRACE_MCOUNT_RECORD
29 select HAVE_DYNAMIC_FTRACE
28 30
29config SUPERH64 31config SUPERH64
30 def_bool y if CPU_SH5 32 def_bool y if CPU_SH5
diff --git a/arch/sh/include/asm/ftrace.h b/arch/sh/include/asm/ftrace.h
index 3aed362c9463..4cb5dbfc404a 100644
--- a/arch/sh/include/asm/ftrace.h
+++ b/arch/sh/include/asm/ftrace.h
@@ -1,8 +1,29 @@
1#ifndef __ASM_SH_FTRACE_H 1#ifndef __ASM_SH_FTRACE_H
2#define __ASM_SH_FTRACE_H 2#define __ASM_SH_FTRACE_H
3 3
4#ifdef CONFIG_FUNCTION_TRACER
5
6#define MCOUNT_INSN_SIZE 4 /* sizeof mcount call */
7
4#ifndef __ASSEMBLY__ 8#ifndef __ASSEMBLY__
5extern void mcount(void); 9extern void mcount(void);
10
11#define MCOUNT_ADDR ((long)(mcount))
12
13#ifdef CONFIG_DYNAMIC_FTRACE
14#define CALLER_ADDR ((long)(ftrace_caller))
15#define STUB_ADDR ((long)(ftrace_stub))
16
17#define MCOUNT_INSN_OFFSET ((STUB_ADDR - CALLER_ADDR) >> 1)
18#endif
19
20static inline unsigned long ftrace_call_adjust(unsigned long addr)
21{
22 /* 'addr' is the memory table address. */
23 return addr;
24}
6#endif 25#endif
7 26
27#endif /* CONFIG_FUNCTION_TRACER */
28
8#endif /* __ASM_SH_FTRACE_H */ 29#endif /* __ASM_SH_FTRACE_H */
diff --git a/arch/sh/kernel/Makefile_32 b/arch/sh/kernel/Makefile_32
index 48edfb145fb4..76fcac1596ce 100644
--- a/arch/sh/kernel/Makefile_32
+++ b/arch/sh/kernel/Makefile_32
@@ -4,6 +4,11 @@
4 4
5extra-y := head_32.o init_task.o vmlinux.lds 5extra-y := head_32.o init_task.o vmlinux.lds
6 6
7ifdef CONFIG_FUNCTION_TRACER
8# Do not profile debug and lowlevel utilities
9CFLAGS_REMOVE_ftrace.o = -pg
10endif
11
7obj-y := debugtraps.o io.o io_generic.o irq.o machvec.o process_32.o \ 12obj-y := debugtraps.o io.o io_generic.o irq.o machvec.o process_32.o \
8 ptrace_32.o setup.o signal_32.o sys_sh.o sys_sh32.o \ 13 ptrace_32.o setup.o signal_32.o sys_sh.o sys_sh32.o \
9 syscalls_32.o time_32.o topology.o traps.o traps_32.o 14 syscalls_32.o time_32.o topology.o traps.o traps_32.o
@@ -24,5 +29,6 @@ obj-$(CONFIG_STACKTRACE) += stacktrace.o
24obj-$(CONFIG_IO_TRAPPED) += io_trapped.o 29obj-$(CONFIG_IO_TRAPPED) += io_trapped.o
25obj-$(CONFIG_KPROBES) += kprobes.o 30obj-$(CONFIG_KPROBES) += kprobes.o
26obj-$(CONFIG_GENERIC_GPIO) += gpio.o 31obj-$(CONFIG_GENERIC_GPIO) += gpio.o
32obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
27 33
28EXTRA_CFLAGS += -Werror 34EXTRA_CFLAGS += -Werror
diff --git a/arch/sh/kernel/entry-common.S b/arch/sh/kernel/entry-common.S
index 5b7efc4016fa..efbb4268875e 100644
--- a/arch/sh/kernel/entry-common.S
+++ b/arch/sh/kernel/entry-common.S
@@ -371,47 +371,3 @@ syscall_exit:
371#endif 371#endif
3727: .long do_syscall_trace_enter 3727: .long do_syscall_trace_enter
3738: .long do_syscall_trace_leave 3738: .long do_syscall_trace_leave
374
375#ifdef CONFIG_FUNCTION_TRACER
376 .align 2
377 .globl _mcount
378 .type _mcount,@function
379 .globl mcount
380 .type mcount,@function
381_mcount:
382mcount:
383 mov.l r4, @-r15
384 mov.l r5, @-r15
385 mov.l r6, @-r15
386 mov.l r7, @-r15
387 sts.l pr, @-r15
388
389 mov.l @(20,r15),r4
390 sts pr, r5
391
392 mov.l 1f, r6
393 mov.l ftrace_stub, r7
394 cmp/eq r6, r7
395 bt skip_trace
396
397 mov.l @r6, r6
398 jsr @r6
399 nop
400
401skip_trace:
402
403 lds.l @r15+, pr
404 mov.l @r15+, r7
405 mov.l @r15+, r6
406 mov.l @r15+, r5
407 rts
408 mov.l @r15+, r4
409
410 .align 2
4111: .long ftrace_trace_function
412
413 .globl ftrace_stub
414ftrace_stub:
415 rts
416 nop
417#endif /* CONFIG_FUNCTION_TRACER */
diff --git a/arch/sh/kernel/ftrace.c b/arch/sh/kernel/ftrace.c
new file mode 100644
index 000000000000..6c193d56c2e7
--- /dev/null
+++ b/arch/sh/kernel/ftrace.c
@@ -0,0 +1,109 @@
1/*
2 * Copyright (C) 2008 Matt Fleming <mjf@gentoo.org>
3 *
4 * Code for replacing ftrace calls with jumps.
5 *
6 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
7 *
8 * Thanks goes to Ingo Molnar, for suggesting the idea.
9 * Mathieu Desnoyers, for suggesting postponing the modifications.
10 * Arjan van de Ven, for keeping me straight, and explaining to me
11 * the dangers of modifying code on the run.
12 */
13#include <linux/uaccess.h>
14#include <linux/ftrace.h>
15#include <linux/string.h>
16#include <linux/init.h>
17#include <linux/io.h>
18#include <asm/ftrace.h>
19#include <asm/cacheflush.h>
20
21static unsigned char ftrace_nop[] = {
22 0x09, 0x00, /* nop */
23 0x09, 0x00, /* nop */
24};
25
26static unsigned char ftrace_replaced_code[MCOUNT_INSN_SIZE];
27
28unsigned char *ftrace_nop_replace(void)
29{
30 return ftrace_nop;
31}
32
33static int is_sh_nop(unsigned char *ip)
34{
35 return strncmp(ip, ftrace_nop, sizeof(ftrace_nop));
36}
37
38unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
39{
40 /* Place the address in the memory table. */
41 if (addr == CALLER_ADDR)
42 __raw_writel(addr + MCOUNT_INSN_OFFSET, ftrace_replaced_code);
43 else
44 __raw_writel(addr, ftrace_replaced_code);
45
46 /*
47 * No locking needed, this must be called via kstop_machine
48 * which in essence is like running on a uniprocessor machine.
49 */
50 return ftrace_replaced_code;
51}
52
53int ftrace_modify_code(unsigned long ip, unsigned char *old_code,
54 unsigned char *new_code)
55{
56 unsigned char replaced[MCOUNT_INSN_SIZE];
57
58 /*
59 * Note: Due to modules and __init, code can
60 * disappear and change, we need to protect against faulting
61 * as well as code changing. We do this by using the
62 * probe_kernel_* functions.
63 *
64 * No real locking needed, this code is run through
65 * kstop_machine, or before SMP starts.
66 */
67
68 /*
69 * If we're trying to nop out a call to a function, we instead
70 * place a call to the address after the memory table.
71 */
72 if (is_sh_nop(new_code) == 0)
73 __raw_writel(ip + MCOUNT_INSN_SIZE, (unsigned long)new_code);
74
75 /* read the text we want to modify */
76 if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
77 return -EFAULT;
78
79 /* Make sure it is what we expect it to be */
80 if (memcmp(replaced, old_code, MCOUNT_INSN_SIZE) != 0)
81 return -EINVAL;
82
83 /* replace the text with the new text */
84 if (probe_kernel_write((void *)ip, new_code, MCOUNT_INSN_SIZE))
85 return -EPERM;
86
87 flush_icache_range(ip, ip + MCOUNT_INSN_SIZE);
88
89 return 0;
90}
91
92int ftrace_update_ftrace_func(ftrace_func_t func)
93{
94 unsigned long ip = (unsigned long)(&ftrace_call);
95 unsigned char old[MCOUNT_INSN_SIZE], *new;
96
97 memcpy(old, (unsigned char *)(ip + MCOUNT_INSN_OFFSET), MCOUNT_INSN_SIZE);
98 new = ftrace_call_replace(ip, (unsigned long)func);
99
100 return ftrace_modify_code(ip + MCOUNT_INSN_OFFSET, old, new);
101}
102
103int __init ftrace_dyn_arch_init(void *data)
104{
105 /* The return code is retured via data */
106 __raw_writel(0, (unsigned long)data);
107
108 return 0;
109}
diff --git a/arch/sh/lib/Makefile b/arch/sh/lib/Makefile
index 8596cc78e18d..596421821d08 100644
--- a/arch/sh/lib/Makefile
+++ b/arch/sh/lib/Makefile
@@ -11,6 +11,7 @@ memcpy-y := memcpy.o
11memcpy-$(CONFIG_CPU_SH4) := memcpy-sh4.o 11memcpy-$(CONFIG_CPU_SH4) := memcpy-sh4.o
12 12
13lib-$(CONFIG_MMU) += copy_page.o clear_page.o 13lib-$(CONFIG_MMU) += copy_page.o clear_page.o
14lib-$(CONFIG_FUNCTION_TRACER) += mcount.o
14lib-y += $(memcpy-y) 15lib-y += $(memcpy-y)
15 16
16EXTRA_CFLAGS += -Werror 17EXTRA_CFLAGS += -Werror
diff --git a/arch/sh/lib/mcount.S b/arch/sh/lib/mcount.S
new file mode 100644
index 000000000000..110fbfe1831f
--- /dev/null
+++ b/arch/sh/lib/mcount.S
@@ -0,0 +1,90 @@
1/*
2 * arch/sh/lib/mcount.S
3 *
4 * Copyright (C) 2008 Paul Mundt
5 * Copyright (C) 2008 Matt Fleming
6 *
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License. See the file "COPYING" in the main directory of this archive
9 * for more details.
10 */
11#include <asm/ftrace.h>
12
13#define MCOUNT_ENTER() \
14 mov.l r4, @-r15; \
15 mov.l r5, @-r15; \
16 mov.l r6, @-r15; \
17 mov.l r7, @-r15; \
18 sts.l pr, @-r15; \
19 \
20 mov.l @(20,r15),r4; \
21 sts pr, r5
22
23#define MCOUNT_LEAVE() \
24 lds.l @r15+, pr; \
25 mov.l @r15+, r7; \
26 mov.l @r15+, r6; \
27 mov.l @r15+, r5; \
28 rts; \
29 mov.l @r15+, r4
30
31 .align 2
32 .globl _mcount
33 .type _mcount,@function
34 .globl mcount
35 .type mcount,@function
36_mcount:
37mcount:
38 MCOUNT_ENTER()
39
40#ifdef CONFIG_DYNAMIC_FTRACE
41 .globl mcount_call
42mcount_call:
43 mov.l .Lftrace_stub, r6
44#else
45 mov.l .Lftrace_trace_function, r6
46 mov.l ftrace_stub, r7
47 cmp/eq r6, r7
48 bt skip_trace
49 mov.l @r6, r6
50#endif
51
52 jsr @r6
53 nop
54
55skip_trace:
56 MCOUNT_LEAVE()
57
58 .align 2
59.Lftrace_trace_function:
60 .long ftrace_trace_function
61
62#ifdef CONFIG_DYNAMIC_FTRACE
63 .globl ftrace_caller
64ftrace_caller:
65 MCOUNT_ENTER()
66
67 .globl ftrace_call
68ftrace_call:
69 mov.l .Lftrace_stub, r6
70 jsr @r6
71 nop
72
73 MCOUNT_LEAVE()
74#endif /* CONFIG_DYNAMIC_FTRACE */
75
76/*
77 * NOTE: From here on the locations of the .Lftrace_stub label and
78 * ftrace_stub itself are fixed. Adding additional data here will skew
79 * the displacement for the memory table and break the block replacement.
80 * Place new labels either after the ftrace_stub body, or before
81 * ftrace_caller. You have been warned.
82 */
83 .align 2
84.Lftrace_stub:
85 .long ftrace_stub
86
87 .globl ftrace_stub
88ftrace_stub:
89 rts
90 nop