diff options
-rw-r--r-- | arch/x86/kernel/Makefile | 1 | ||||
-rw-r--r-- | arch/x86/kernel/ftrace.c | 237 | ||||
-rw-r--r-- | include/linux/ftrace.h | 18 | ||||
-rw-r--r-- | kernel/trace/Kconfig | 17 | ||||
-rw-r--r-- | kernel/trace/ftrace.c | 356 |
5 files changed, 597 insertions, 32 deletions
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 5e618c3b4720..e142091524b0 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile | |||
@@ -56,6 +56,7 @@ obj-$(CONFIG_X86_MPPARSE) += mpparse.o | |||
56 | obj-$(CONFIG_X86_LOCAL_APIC) += apic_$(BITS).o nmi_$(BITS).o | 56 | obj-$(CONFIG_X86_LOCAL_APIC) += apic_$(BITS).o nmi_$(BITS).o |
57 | obj-$(CONFIG_X86_IO_APIC) += io_apic_$(BITS).o | 57 | obj-$(CONFIG_X86_IO_APIC) += io_apic_$(BITS).o |
58 | obj-$(CONFIG_X86_REBOOTFIXUPS) += reboot_fixups_32.o | 58 | obj-$(CONFIG_X86_REBOOTFIXUPS) += reboot_fixups_32.o |
59 | obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o | ||
59 | obj-$(CONFIG_KEXEC) += machine_kexec_$(BITS).o | 60 | obj-$(CONFIG_KEXEC) += machine_kexec_$(BITS).o |
60 | obj-$(CONFIG_KEXEC) += relocate_kernel_$(BITS).o crash.o | 61 | obj-$(CONFIG_KEXEC) += relocate_kernel_$(BITS).o crash.o |
61 | obj-$(CONFIG_CRASH_DUMP) += crash_dump_$(BITS).o | 62 | obj-$(CONFIG_CRASH_DUMP) += crash_dump_$(BITS).o |
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c new file mode 100644 index 000000000000..5dd58136ef02 --- /dev/null +++ b/arch/x86/kernel/ftrace.c | |||
@@ -0,0 +1,237 @@ | |||
1 | /* | ||
2 | * Code for replacing ftrace calls with jumps. | ||
3 | * | ||
4 | * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> | ||
5 | * | ||
6 | * Thanks goes to Ingo Molnar, for suggesting the idea. | ||
7 | * Mathieu Desnoyers, for suggesting postponing the modifications. | ||
8 | * Arjan van de Ven, for keeping me straight, and explaining to me | ||
9 | * the dangers of modifying code on the run. | ||
10 | */ | ||
11 | |||
12 | #include <linux/spinlock.h> | ||
13 | #include <linux/hardirq.h> | ||
14 | #include <linux/ftrace.h> | ||
15 | #include <linux/percpu.h> | ||
16 | #include <linux/init.h> | ||
17 | #include <linux/list.h> | ||
18 | |||
19 | #define CALL_BACK 5 | ||
20 | |||
21 | #define JMPFWD 0x03eb | ||
22 | |||
23 | static unsigned short ftrace_jmp = JMPFWD; | ||
24 | |||
25 | struct ftrace_record { | ||
26 | struct dyn_ftrace rec; | ||
27 | int failed; | ||
28 | } __attribute__((packed)); | ||
29 | |||
30 | struct ftrace_page { | ||
31 | struct ftrace_page *next; | ||
32 | int index; | ||
33 | struct ftrace_record records[]; | ||
34 | } __attribute__((packed)); | ||
35 | |||
36 | #define ENTRIES_PER_PAGE \ | ||
37 | ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct ftrace_record)) | ||
38 | |||
39 | /* estimate from running different kernels */ | ||
40 | #define NR_TO_INIT 10000 | ||
41 | |||
42 | #define MCOUNT_ADDR ((long)(&mcount)) | ||
43 | |||
44 | union ftrace_code_union { | ||
45 | char code[5]; | ||
46 | struct { | ||
47 | char e8; | ||
48 | int offset; | ||
49 | } __attribute__((packed)); | ||
50 | }; | ||
51 | |||
52 | static struct ftrace_page *ftrace_pages_start; | ||
53 | static struct ftrace_page *ftrace_pages; | ||
54 | |||
55 | notrace struct dyn_ftrace *ftrace_alloc_shutdown_node(unsigned long ip) | ||
56 | { | ||
57 | struct ftrace_record *rec; | ||
58 | unsigned short save; | ||
59 | |||
60 | ip -= CALL_BACK; | ||
61 | save = *(short *)ip; | ||
62 | |||
63 | /* If this was already converted, skip it */ | ||
64 | if (save == JMPFWD) | ||
65 | return NULL; | ||
66 | |||
67 | if (ftrace_pages->index == ENTRIES_PER_PAGE) { | ||
68 | if (!ftrace_pages->next) | ||
69 | return NULL; | ||
70 | ftrace_pages = ftrace_pages->next; | ||
71 | } | ||
72 | |||
73 | rec = &ftrace_pages->records[ftrace_pages->index++]; | ||
74 | |||
75 | return &rec->rec; | ||
76 | } | ||
77 | |||
78 | static int notrace | ||
79 | ftrace_modify_code(unsigned long ip, unsigned char *old_code, | ||
80 | unsigned char *new_code) | ||
81 | { | ||
82 | unsigned short old = *(unsigned short *)old_code; | ||
83 | unsigned short new = *(unsigned short *)new_code; | ||
84 | unsigned short replaced; | ||
85 | int faulted = 0; | ||
86 | |||
87 | /* | ||
88 | * Note: Due to modules and __init, code can | ||
89 | * disappear and change, we need to protect against faulting | ||
90 | * as well as code changing. | ||
91 | * | ||
92 | * No real locking needed, this code is run through | ||
93 | * kstop_machine. | ||
94 | */ | ||
95 | asm volatile ( | ||
96 | "1: lock\n" | ||
97 | " cmpxchg %w3, (%2)\n" | ||
98 | "2:\n" | ||
99 | ".section .fixup, \"ax\"\n" | ||
100 | " movl $1, %0\n" | ||
101 | "3: jmp 2b\n" | ||
102 | ".previous\n" | ||
103 | _ASM_EXTABLE(1b, 3b) | ||
104 | : "=r"(faulted), "=a"(replaced) | ||
105 | : "r"(ip), "r"(new), "0"(faulted), "a"(old) | ||
106 | : "memory"); | ||
107 | sync_core(); | ||
108 | |||
109 | if (replaced != old) | ||
110 | faulted = 2; | ||
111 | |||
112 | return faulted; | ||
113 | } | ||
114 | |||
115 | static int notrace ftrace_calc_offset(long ip) | ||
116 | { | ||
117 | return (int)(MCOUNT_ADDR - ip); | ||
118 | } | ||
119 | |||
120 | notrace void ftrace_code_disable(struct dyn_ftrace *rec) | ||
121 | { | ||
122 | unsigned long ip; | ||
123 | union ftrace_code_union save; | ||
124 | struct ftrace_record *r = | ||
125 | container_of(rec, struct ftrace_record, rec); | ||
126 | |||
127 | ip = rec->ip; | ||
128 | |||
129 | save.e8 = 0xe8; | ||
130 | save.offset = ftrace_calc_offset(ip); | ||
131 | |||
132 | /* move the IP back to the start of the call */ | ||
133 | ip -= CALL_BACK; | ||
134 | |||
135 | r->failed = ftrace_modify_code(ip, save.code, (char *)&ftrace_jmp); | ||
136 | } | ||
137 | |||
138 | static void notrace ftrace_replace_code(int saved) | ||
139 | { | ||
140 | unsigned char *new = NULL, *old = NULL; | ||
141 | struct ftrace_record *rec; | ||
142 | struct ftrace_page *pg; | ||
143 | unsigned long ip; | ||
144 | int i; | ||
145 | |||
146 | if (saved) | ||
147 | old = (char *)&ftrace_jmp; | ||
148 | else | ||
149 | new = (char *)&ftrace_jmp; | ||
150 | |||
151 | for (pg = ftrace_pages_start; pg; pg = pg->next) { | ||
152 | for (i = 0; i < pg->index; i++) { | ||
153 | union ftrace_code_union calc; | ||
154 | rec = &pg->records[i]; | ||
155 | |||
156 | /* don't modify code that has already faulted */ | ||
157 | if (rec->failed) | ||
158 | continue; | ||
159 | |||
160 | ip = rec->rec.ip; | ||
161 | |||
162 | calc.e8 = 0xe8; | ||
163 | calc.offset = ftrace_calc_offset(ip); | ||
164 | |||
165 | if (saved) | ||
166 | new = calc.code; | ||
167 | else | ||
168 | old = calc.code; | ||
169 | |||
170 | ip -= CALL_BACK; | ||
171 | |||
172 | rec->failed = ftrace_modify_code(ip, old, new); | ||
173 | } | ||
174 | } | ||
175 | |||
176 | } | ||
177 | |||
178 | notrace void ftrace_startup_code(void) | ||
179 | { | ||
180 | ftrace_replace_code(1); | ||
181 | } | ||
182 | |||
183 | notrace void ftrace_shutdown_code(void) | ||
184 | { | ||
185 | ftrace_replace_code(0); | ||
186 | } | ||
187 | |||
188 | notrace void ftrace_shutdown_replenish(void) | ||
189 | { | ||
190 | if (ftrace_pages->next) | ||
191 | return; | ||
192 | |||
193 | /* allocate another page */ | ||
194 | ftrace_pages->next = (void *)get_zeroed_page(GFP_KERNEL); | ||
195 | } | ||
196 | |||
197 | notrace int ftrace_shutdown_arch_init(void) | ||
198 | { | ||
199 | struct ftrace_page *pg; | ||
200 | int cnt; | ||
201 | int i; | ||
202 | |||
203 | /* allocate a few pages */ | ||
204 | ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL); | ||
205 | if (!ftrace_pages_start) | ||
206 | return -1; | ||
207 | |||
208 | /* | ||
209 | * Allocate a few more pages. | ||
210 | * | ||
211 | * TODO: have some parser search vmlinux before | ||
212 | * final linking to find all calls to ftrace. | ||
213 | * Then we can: | ||
214 | * a) know how many pages to allocate. | ||
215 | * and/or | ||
216 | * b) set up the table then. | ||
217 | * | ||
218 | * The dynamic code is still necessary for | ||
219 | * modules. | ||
220 | */ | ||
221 | |||
222 | pg = ftrace_pages = ftrace_pages_start; | ||
223 | |||
224 | cnt = NR_TO_INIT / ENTRIES_PER_PAGE; | ||
225 | |||
226 | for (i = 0; i < cnt; i++) { | ||
227 | pg->next = (void *)get_zeroed_page(GFP_KERNEL); | ||
228 | |||
229 | /* If we fail, we'll try later anyway */ | ||
230 | if (!pg->next) | ||
231 | break; | ||
232 | |||
233 | pg = pg->next; | ||
234 | } | ||
235 | |||
236 | return 0; | ||
237 | } | ||
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 740c97dcf9cb..90dbc0ee2046 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h | |||
@@ -32,6 +32,24 @@ extern void mcount(void); | |||
32 | # define clear_ftrace_function(ops) do { } while (0) | 32 | # define clear_ftrace_function(ops) do { } while (0) |
33 | #endif /* CONFIG_FTRACE */ | 33 | #endif /* CONFIG_FTRACE */ |
34 | 34 | ||
35 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
36 | # define FTRACE_HASHBITS 10 | ||
37 | # define FTRACE_HASHSIZE (1<<FTRACE_HASHBITS) | ||
38 | |||
39 | struct dyn_ftrace { | ||
40 | struct hlist_node node; | ||
41 | unsigned long ip; | ||
42 | }; | ||
43 | |||
44 | /* defined in arch */ | ||
45 | extern struct dyn_ftrace * | ||
46 | ftrace_alloc_shutdown_node(unsigned long ip); | ||
47 | extern int ftrace_shutdown_arch_init(void); | ||
48 | extern void ftrace_code_disable(struct dyn_ftrace *rec); | ||
49 | extern void ftrace_startup_code(void); | ||
50 | extern void ftrace_shutdown_code(void); | ||
51 | extern void ftrace_shutdown_replenish(void); | ||
52 | #endif | ||
35 | 53 | ||
36 | #ifdef CONFIG_FRAME_POINTER | 54 | #ifdef CONFIG_FRAME_POINTER |
37 | /* TODO: need to fix this for ARM */ | 55 | /* TODO: need to fix this for ARM */ |
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index 6430016b98e8..cad9db1dee02 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig | |||
@@ -88,3 +88,20 @@ config CONTEXT_SWITCH_TRACER | |||
88 | This tracer gets called from the context switch and records | 88 | This tracer gets called from the context switch and records |
89 | all switching of tasks. | 89 | all switching of tasks. |
90 | 90 | ||
91 | config DYNAMIC_FTRACE | ||
92 | bool "enable/disable ftrace tracepoints dynamically" | ||
93 | depends on FTRACE | ||
94 | default y | ||
95 | help | ||
96 | This option will modify all the calls to ftrace dynamically | ||
97 | (will patch them out of the binary image and replaces them | ||
98 | with a No-Op instruction) as they are called. A table is | ||
99 | created to dynamically enable them again. | ||
100 | |||
101 | This way a CONFIG_FTRACE kernel is slightly larger, but otherwise | ||
102 | has native performance as long as no tracing is active. | ||
103 | |||
104 | The changes to the code are done by a kernel thread that | ||
105 | wakes up once a second and checks to see if any ftrace calls | ||
106 | were made. If so, it runs stop_machine (stops all CPUS) | ||
107 | and modifies the code to jump over the call to ftrace. | ||
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index b6a80b98a3fb..d1ae2ba25274 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -13,10 +13,19 @@ | |||
13 | * Copyright (C) 2004 William Lee Irwin III | 13 | * Copyright (C) 2004 William Lee Irwin III |
14 | */ | 14 | */ |
15 | 15 | ||
16 | #include <linux/module.h> | 16 | #include <linux/stop_machine.h> |
17 | #include <linux/clocksource.h> | ||
18 | #include <linux/kallsyms.h> | ||
19 | #include <linux/kthread.h> | ||
20 | #include <linux/hardirq.h> | ||
17 | #include <linux/ftrace.h> | 21 | #include <linux/ftrace.h> |
22 | #include <linux/module.h> | ||
23 | #include <linux/hash.h> | ||
24 | #include <linux/list.h> | ||
25 | |||
26 | #include "trace.h" | ||
18 | 27 | ||
19 | static DEFINE_SPINLOCK(ftrace_func_lock); | 28 | static DEFINE_SPINLOCK(ftrace_lock); |
20 | static struct ftrace_ops ftrace_list_end __read_mostly = | 29 | static struct ftrace_ops ftrace_list_end __read_mostly = |
21 | { | 30 | { |
22 | .func = ftrace_stub, | 31 | .func = ftrace_stub, |
@@ -44,21 +53,21 @@ notrace void ftrace_list_func(unsigned long ip, unsigned long parent_ip) | |||
44 | } | 53 | } |
45 | 54 | ||
46 | /** | 55 | /** |
47 | * register_ftrace_function - register a function for profiling | 56 | * clear_ftrace_function - reset the ftrace function |
48 | * @ops - ops structure that holds the function for profiling. | ||
49 | * | ||
50 | * Register a function to be called by all functions in the | ||
51 | * kernel. | ||
52 | * | 57 | * |
53 | * Note: @ops->func and all the functions it calls must be labeled | 58 | * This NULLs the ftrace function and in essence stops |
54 | * with "notrace", otherwise it will go into a | 59 | * tracing. There may be lag |
55 | * recursive loop. | ||
56 | */ | 60 | */ |
57 | int register_ftrace_function(struct ftrace_ops *ops) | 61 | void clear_ftrace_function(void) |
58 | { | 62 | { |
59 | unsigned long flags; | 63 | ftrace_trace_function = ftrace_stub; |
64 | } | ||
65 | |||
66 | static int notrace __register_ftrace_function(struct ftrace_ops *ops) | ||
67 | { | ||
68 | /* Should never be called by interrupts */ | ||
69 | spin_lock(&ftrace_lock); | ||
60 | 70 | ||
61 | spin_lock_irqsave(&ftrace_func_lock, flags); | ||
62 | ops->next = ftrace_list; | 71 | ops->next = ftrace_list; |
63 | /* | 72 | /* |
64 | * We are entering ops into the ftrace_list but another | 73 | * We are entering ops into the ftrace_list but another |
@@ -68,6 +77,7 @@ int register_ftrace_function(struct ftrace_ops *ops) | |||
68 | */ | 77 | */ |
69 | smp_wmb(); | 78 | smp_wmb(); |
70 | ftrace_list = ops; | 79 | ftrace_list = ops; |
80 | |||
71 | /* | 81 | /* |
72 | * For one func, simply call it directly. | 82 | * For one func, simply call it directly. |
73 | * For more than one func, call the chain. | 83 | * For more than one func, call the chain. |
@@ -76,28 +86,22 @@ int register_ftrace_function(struct ftrace_ops *ops) | |||
76 | ftrace_trace_function = ops->func; | 86 | ftrace_trace_function = ops->func; |
77 | else | 87 | else |
78 | ftrace_trace_function = ftrace_list_func; | 88 | ftrace_trace_function = ftrace_list_func; |
79 | spin_unlock_irqrestore(&ftrace_func_lock, flags); | 89 | |
90 | spin_unlock(&ftrace_lock); | ||
80 | 91 | ||
81 | return 0; | 92 | return 0; |
82 | } | 93 | } |
83 | 94 | ||
84 | /** | 95 | static int notrace __unregister_ftrace_function(struct ftrace_ops *ops) |
85 | * unregister_ftrace_function - unresgister a function for profiling. | ||
86 | * @ops - ops structure that holds the function to unregister | ||
87 | * | ||
88 | * Unregister a function that was added to be called by ftrace profiling. | ||
89 | */ | ||
90 | int unregister_ftrace_function(struct ftrace_ops *ops) | ||
91 | { | 96 | { |
92 | unsigned long flags; | ||
93 | struct ftrace_ops **p; | 97 | struct ftrace_ops **p; |
94 | int ret = 0; | 98 | int ret = 0; |
95 | 99 | ||
96 | spin_lock_irqsave(&ftrace_func_lock, flags); | 100 | spin_lock(&ftrace_lock); |
97 | 101 | ||
98 | /* | 102 | /* |
99 | * If we are the only function, then the ftrace pointer is | 103 | * If we are removing the last function, then simply point |
100 | * pointing directly to that function. | 104 | * to the ftrace_stub. |
101 | */ | 105 | */ |
102 | if (ftrace_list == ops && ops->next == &ftrace_list_end) { | 106 | if (ftrace_list == ops && ops->next == &ftrace_list_end) { |
103 | ftrace_trace_function = ftrace_stub; | 107 | ftrace_trace_function = ftrace_stub; |
@@ -117,22 +121,310 @@ int unregister_ftrace_function(struct ftrace_ops *ops) | |||
117 | *p = (*p)->next; | 121 | *p = (*p)->next; |
118 | 122 | ||
119 | /* If we only have one func left, then call that directly */ | 123 | /* If we only have one func left, then call that directly */ |
120 | if (ftrace_list->next == &ftrace_list_end) | 124 | if (ftrace_list == &ftrace_list_end || |
125 | ftrace_list->next == &ftrace_list_end) | ||
121 | ftrace_trace_function = ftrace_list->func; | 126 | ftrace_trace_function = ftrace_list->func; |
122 | 127 | ||
123 | out: | 128 | out: |
124 | spin_unlock_irqrestore(&ftrace_func_lock, flags); | 129 | spin_unlock(&ftrace_lock); |
130 | |||
131 | return ret; | ||
132 | } | ||
133 | |||
134 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
135 | |||
136 | static struct hlist_head ftrace_hash[FTRACE_HASHSIZE]; | ||
137 | |||
138 | static DEFINE_PER_CPU(int, ftrace_shutdown_disable_cpu); | ||
139 | |||
140 | static DEFINE_SPINLOCK(ftrace_shutdown_lock); | ||
141 | static DEFINE_MUTEX(ftraced_lock); | ||
142 | |||
143 | static int ftraced_trigger; | ||
144 | static int ftraced_suspend; | ||
145 | |||
146 | static int ftrace_record_suspend; | ||
147 | |||
148 | static inline int | ||
149 | notrace ftrace_ip_in_hash(unsigned long ip, unsigned long key) | ||
150 | { | ||
151 | struct dyn_ftrace *p; | ||
152 | struct hlist_node *t; | ||
153 | int found = 0; | ||
154 | |||
155 | hlist_for_each_entry(p, t, &ftrace_hash[key], node) { | ||
156 | if (p->ip == ip) { | ||
157 | found = 1; | ||
158 | break; | ||
159 | } | ||
160 | } | ||
161 | |||
162 | return found; | ||
163 | } | ||
164 | |||
165 | static inline void notrace | ||
166 | ftrace_add_hash(struct dyn_ftrace *node, unsigned long key) | ||
167 | { | ||
168 | hlist_add_head(&node->node, &ftrace_hash[key]); | ||
169 | } | ||
170 | |||
171 | static void notrace | ||
172 | ftrace_record_ip(unsigned long ip, unsigned long parent_ip) | ||
173 | { | ||
174 | struct dyn_ftrace *node; | ||
175 | unsigned long flags; | ||
176 | unsigned long key; | ||
177 | int resched; | ||
178 | int atomic; | ||
179 | |||
180 | resched = need_resched(); | ||
181 | preempt_disable_notrace(); | ||
182 | |||
183 | /* We simply need to protect against recursion */ | ||
184 | __get_cpu_var(ftrace_shutdown_disable_cpu)++; | ||
185 | if (__get_cpu_var(ftrace_shutdown_disable_cpu) != 1) | ||
186 | goto out; | ||
187 | |||
188 | if (unlikely(ftrace_record_suspend)) | ||
189 | goto out; | ||
190 | |||
191 | key = hash_long(ip, FTRACE_HASHBITS); | ||
192 | |||
193 | WARN_ON_ONCE(key >= FTRACE_HASHSIZE); | ||
194 | |||
195 | if (ftrace_ip_in_hash(ip, key)) | ||
196 | goto out; | ||
197 | |||
198 | atomic = irqs_disabled(); | ||
199 | |||
200 | spin_lock_irqsave(&ftrace_shutdown_lock, flags); | ||
201 | |||
202 | /* This ip may have hit the hash before the lock */ | ||
203 | if (ftrace_ip_in_hash(ip, key)) | ||
204 | goto out_unlock; | ||
205 | |||
206 | /* | ||
207 | * There's a slight race that the ftraced will update the | ||
208 | * hash and reset here. The arch alloc is responsible | ||
209 | * for seeing if the IP has already changed, and if | ||
210 | * it has, the alloc will fail. | ||
211 | */ | ||
212 | node = ftrace_alloc_shutdown_node(ip); | ||
213 | if (!node) | ||
214 | goto out_unlock; | ||
215 | |||
216 | node->ip = ip; | ||
217 | |||
218 | ftrace_add_hash(node, key); | ||
219 | |||
220 | ftraced_trigger = 1; | ||
221 | |||
222 | out_unlock: | ||
223 | spin_unlock_irqrestore(&ftrace_shutdown_lock, flags); | ||
224 | out: | ||
225 | __get_cpu_var(ftrace_shutdown_disable_cpu)--; | ||
226 | |||
227 | /* prevent recursion with scheduler */ | ||
228 | if (resched) | ||
229 | preempt_enable_no_resched_notrace(); | ||
230 | else | ||
231 | preempt_enable_notrace(); | ||
232 | } | ||
233 | |||
234 | static struct ftrace_ops ftrace_shutdown_ops __read_mostly = | ||
235 | { | ||
236 | .func = ftrace_record_ip, | ||
237 | }; | ||
238 | |||
239 | |||
240 | static int notrace __ftrace_modify_code(void *data) | ||
241 | { | ||
242 | void (*func)(void) = data; | ||
243 | |||
244 | func(); | ||
245 | return 0; | ||
246 | } | ||
247 | |||
248 | static void notrace ftrace_run_startup_code(void) | ||
249 | { | ||
250 | stop_machine_run(__ftrace_modify_code, ftrace_startup_code, NR_CPUS); | ||
251 | } | ||
252 | |||
253 | static void notrace ftrace_run_shutdown_code(void) | ||
254 | { | ||
255 | stop_machine_run(__ftrace_modify_code, ftrace_shutdown_code, NR_CPUS); | ||
256 | } | ||
257 | |||
258 | static void notrace ftrace_startup(void) | ||
259 | { | ||
260 | mutex_lock(&ftraced_lock); | ||
261 | ftraced_suspend++; | ||
262 | if (ftraced_suspend != 1) | ||
263 | goto out; | ||
264 | __unregister_ftrace_function(&ftrace_shutdown_ops); | ||
265 | |||
266 | ftrace_run_startup_code(); | ||
267 | out: | ||
268 | mutex_unlock(&ftraced_lock); | ||
269 | } | ||
270 | |||
271 | static void notrace ftrace_shutdown(void) | ||
272 | { | ||
273 | mutex_lock(&ftraced_lock); | ||
274 | ftraced_suspend--; | ||
275 | if (ftraced_suspend) | ||
276 | goto out; | ||
277 | |||
278 | ftrace_run_shutdown_code(); | ||
279 | |||
280 | __register_ftrace_function(&ftrace_shutdown_ops); | ||
281 | out: | ||
282 | mutex_unlock(&ftraced_lock); | ||
283 | } | ||
284 | |||
285 | static cycle_t ftrace_update_time; | ||
286 | static unsigned long ftrace_update_cnt; | ||
287 | unsigned long ftrace_update_tot_cnt; | ||
288 | |||
289 | static int notrace __ftrace_update_code(void *ignore) | ||
290 | { | ||
291 | struct dyn_ftrace *p; | ||
292 | struct hlist_head head; | ||
293 | struct hlist_node *t; | ||
294 | cycle_t start, stop; | ||
295 | int i; | ||
296 | |||
297 | /* Don't be calling ftrace ops now */ | ||
298 | __unregister_ftrace_function(&ftrace_shutdown_ops); | ||
299 | |||
300 | start = now(raw_smp_processor_id()); | ||
301 | ftrace_update_cnt = 0; | ||
302 | |||
303 | /* No locks needed, the machine is stopped! */ | ||
304 | for (i = 0; i < FTRACE_HASHSIZE; i++) { | ||
305 | if (hlist_empty(&ftrace_hash[i])) | ||
306 | continue; | ||
307 | |||
308 | head = ftrace_hash[i]; | ||
309 | INIT_HLIST_HEAD(&ftrace_hash[i]); | ||
310 | |||
311 | /* all CPUS are stopped, we are safe to modify code */ | ||
312 | hlist_for_each_entry(p, t, &head, node) { | ||
313 | ftrace_code_disable(p); | ||
314 | ftrace_update_cnt++; | ||
315 | } | ||
316 | |||
317 | } | ||
318 | |||
319 | stop = now(raw_smp_processor_id()); | ||
320 | ftrace_update_time = stop - start; | ||
321 | ftrace_update_tot_cnt += ftrace_update_cnt; | ||
322 | |||
323 | __register_ftrace_function(&ftrace_shutdown_ops); | ||
125 | 324 | ||
126 | return 0; | 325 | return 0; |
127 | } | 326 | } |
128 | 327 | ||
328 | static void notrace ftrace_update_code(void) | ||
329 | { | ||
330 | stop_machine_run(__ftrace_update_code, NULL, NR_CPUS); | ||
331 | } | ||
332 | |||
333 | static int notrace ftraced(void *ignore) | ||
334 | { | ||
335 | unsigned long usecs; | ||
336 | |||
337 | set_current_state(TASK_INTERRUPTIBLE); | ||
338 | |||
339 | while (!kthread_should_stop()) { | ||
340 | |||
341 | /* check once a second */ | ||
342 | schedule_timeout(HZ); | ||
343 | |||
344 | mutex_lock(&ftraced_lock); | ||
345 | if (ftraced_trigger && !ftraced_suspend) { | ||
346 | ftrace_record_suspend++; | ||
347 | ftrace_update_code(); | ||
348 | usecs = nsecs_to_usecs(ftrace_update_time); | ||
349 | if (ftrace_update_tot_cnt > 100000) { | ||
350 | ftrace_update_tot_cnt = 0; | ||
351 | pr_info("hm, dftrace overflow: %lu change%s" | ||
352 | " (%lu total) in %lu usec%s\n", | ||
353 | ftrace_update_cnt, | ||
354 | ftrace_update_cnt != 1 ? "s" : "", | ||
355 | ftrace_update_tot_cnt, | ||
356 | usecs, usecs != 1 ? "s" : ""); | ||
357 | WARN_ON_ONCE(1); | ||
358 | } | ||
359 | ftraced_trigger = 0; | ||
360 | ftrace_record_suspend--; | ||
361 | } | ||
362 | mutex_unlock(&ftraced_lock); | ||
363 | |||
364 | ftrace_shutdown_replenish(); | ||
365 | |||
366 | set_current_state(TASK_INTERRUPTIBLE); | ||
367 | } | ||
368 | __set_current_state(TASK_RUNNING); | ||
369 | return 0; | ||
370 | } | ||
371 | |||
372 | static int __init notrace ftrace_shutdown_init(void) | ||
373 | { | ||
374 | struct task_struct *p; | ||
375 | int ret; | ||
376 | |||
377 | ret = ftrace_shutdown_arch_init(); | ||
378 | if (ret) | ||
379 | return ret; | ||
380 | |||
381 | p = kthread_run(ftraced, NULL, "ftraced"); | ||
382 | if (IS_ERR(p)) | ||
383 | return -1; | ||
384 | |||
385 | __register_ftrace_function(&ftrace_shutdown_ops); | ||
386 | |||
387 | return 0; | ||
388 | } | ||
389 | |||
390 | core_initcall(ftrace_shutdown_init); | ||
391 | #else | ||
392 | # define ftrace_startup() do { } while (0) | ||
393 | # define ftrace_shutdown() do { } while (0) | ||
394 | #endif /* CONFIG_DYNAMIC_FTRACE */ | ||
395 | |||
129 | /** | 396 | /** |
130 | * clear_ftrace_function - reset the ftrace function | 397 | * register_ftrace_function - register a function for profiling |
398 | * @ops - ops structure that holds the function for profiling. | ||
131 | * | 399 | * |
132 | * This NULLs the ftrace function and in essence stops | 400 | * Register a function to be called by all functions in the |
133 | * tracing. There may be lag | 401 | * kernel. |
402 | * | ||
403 | * Note: @ops->func and all the functions it calls must be labeled | ||
404 | * with "notrace", otherwise it will go into a | ||
405 | * recursive loop. | ||
134 | */ | 406 | */ |
135 | void clear_ftrace_function(void) | 407 | int register_ftrace_function(struct ftrace_ops *ops) |
136 | { | 408 | { |
137 | ftrace_trace_function = ftrace_stub; | 409 | ftrace_startup(); |
410 | |||
411 | return __register_ftrace_function(ops); | ||
412 | } | ||
413 | |||
414 | /** | ||
415 | * unregister_ftrace_function - unresgister a function for profiling. | ||
416 | * @ops - ops structure that holds the function to unregister | ||
417 | * | ||
418 | * Unregister a function that was added to be called by ftrace profiling. | ||
419 | */ | ||
420 | int unregister_ftrace_function(struct ftrace_ops *ops) | ||
421 | { | ||
422 | int ret; | ||
423 | |||
424 | ret = __unregister_ftrace_function(ops); | ||
425 | |||
426 | if (ftrace_list == &ftrace_list_end) | ||
427 | ftrace_shutdown(); | ||
428 | |||
429 | return ret; | ||
138 | } | 430 | } |