diff options
Diffstat (limited to 'kernel/trace/ftrace.c')
-rw-r--r-- | kernel/trace/ftrace.c | 691 |
1 files changed, 210 insertions, 481 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index f6e3af31b403..4a39d24568c8 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -25,13 +25,24 @@ | |||
25 | #include <linux/ftrace.h> | 25 | #include <linux/ftrace.h> |
26 | #include <linux/sysctl.h> | 26 | #include <linux/sysctl.h> |
27 | #include <linux/ctype.h> | 27 | #include <linux/ctype.h> |
28 | #include <linux/hash.h> | ||
29 | #include <linux/list.h> | 28 | #include <linux/list.h> |
30 | 29 | ||
31 | #include <asm/ftrace.h> | 30 | #include <asm/ftrace.h> |
32 | 31 | ||
33 | #include "trace.h" | 32 | #include "trace.h" |
34 | 33 | ||
34 | #define FTRACE_WARN_ON(cond) \ | ||
35 | do { \ | ||
36 | if (WARN_ON(cond)) \ | ||
37 | ftrace_kill(); \ | ||
38 | } while (0) | ||
39 | |||
40 | #define FTRACE_WARN_ON_ONCE(cond) \ | ||
41 | do { \ | ||
42 | if (WARN_ON_ONCE(cond)) \ | ||
43 | ftrace_kill(); \ | ||
44 | } while (0) | ||
45 | |||
35 | /* ftrace_enabled is a method to turn ftrace on or off */ | 46 | /* ftrace_enabled is a method to turn ftrace on or off */ |
36 | int ftrace_enabled __read_mostly; | 47 | int ftrace_enabled __read_mostly; |
37 | static int last_ftrace_enabled; | 48 | static int last_ftrace_enabled; |
@@ -81,7 +92,7 @@ void clear_ftrace_function(void) | |||
81 | 92 | ||
82 | static int __register_ftrace_function(struct ftrace_ops *ops) | 93 | static int __register_ftrace_function(struct ftrace_ops *ops) |
83 | { | 94 | { |
84 | /* Should never be called by interrupts */ | 95 | /* should not be called from interrupt context */ |
85 | spin_lock(&ftrace_lock); | 96 | spin_lock(&ftrace_lock); |
86 | 97 | ||
87 | ops->next = ftrace_list; | 98 | ops->next = ftrace_list; |
@@ -115,6 +126,7 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops) | |||
115 | struct ftrace_ops **p; | 126 | struct ftrace_ops **p; |
116 | int ret = 0; | 127 | int ret = 0; |
117 | 128 | ||
129 | /* should not be called from interrupt context */ | ||
118 | spin_lock(&ftrace_lock); | 130 | spin_lock(&ftrace_lock); |
119 | 131 | ||
120 | /* | 132 | /* |
@@ -152,8 +164,17 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops) | |||
152 | } | 164 | } |
153 | 165 | ||
154 | #ifdef CONFIG_DYNAMIC_FTRACE | 166 | #ifdef CONFIG_DYNAMIC_FTRACE |
167 | #ifndef CONFIG_FTRACE_MCOUNT_RECORD | ||
168 | # error Dynamic ftrace depends on MCOUNT_RECORD | ||
169 | #endif | ||
155 | 170 | ||
156 | static struct task_struct *ftraced_task; | 171 | /* |
172 | * Since MCOUNT_ADDR may point to mcount itself, we do not want | ||
173 | * to get it confused by reading a reference in the code as we | ||
174 | * are parsing on objcopy output of text. Use a variable for | ||
175 | * it instead. | ||
176 | */ | ||
177 | static unsigned long mcount_addr = MCOUNT_ADDR; | ||
157 | 178 | ||
158 | enum { | 179 | enum { |
159 | FTRACE_ENABLE_CALLS = (1 << 0), | 180 | FTRACE_ENABLE_CALLS = (1 << 0), |
@@ -165,14 +186,9 @@ enum { | |||
165 | 186 | ||
166 | static int ftrace_filtered; | 187 | static int ftrace_filtered; |
167 | static int tracing_on; | 188 | static int tracing_on; |
168 | static int frozen_record_count; | ||
169 | 189 | ||
170 | static struct hlist_head ftrace_hash[FTRACE_HASHSIZE]; | 190 | static LIST_HEAD(ftrace_new_addrs); |
171 | 191 | ||
172 | static DEFINE_PER_CPU(int, ftrace_shutdown_disable_cpu); | ||
173 | |||
174 | static DEFINE_SPINLOCK(ftrace_shutdown_lock); | ||
175 | static DEFINE_MUTEX(ftraced_lock); | ||
176 | static DEFINE_MUTEX(ftrace_regex_lock); | 192 | static DEFINE_MUTEX(ftrace_regex_lock); |
177 | 193 | ||
178 | struct ftrace_page { | 194 | struct ftrace_page { |
@@ -190,16 +206,13 @@ struct ftrace_page { | |||
190 | static struct ftrace_page *ftrace_pages_start; | 206 | static struct ftrace_page *ftrace_pages_start; |
191 | static struct ftrace_page *ftrace_pages; | 207 | static struct ftrace_page *ftrace_pages; |
192 | 208 | ||
193 | static int ftraced_trigger; | ||
194 | static int ftraced_suspend; | ||
195 | static int ftraced_stop; | ||
196 | |||
197 | static int ftrace_record_suspend; | ||
198 | |||
199 | static struct dyn_ftrace *ftrace_free_records; | 209 | static struct dyn_ftrace *ftrace_free_records; |
200 | 210 | ||
201 | 211 | ||
202 | #ifdef CONFIG_KPROBES | 212 | #ifdef CONFIG_KPROBES |
213 | |||
214 | static int frozen_record_count; | ||
215 | |||
203 | static inline void freeze_record(struct dyn_ftrace *rec) | 216 | static inline void freeze_record(struct dyn_ftrace *rec) |
204 | { | 217 | { |
205 | if (!(rec->flags & FTRACE_FL_FROZEN)) { | 218 | if (!(rec->flags & FTRACE_FL_FROZEN)) { |
@@ -226,79 +239,36 @@ static inline int record_frozen(struct dyn_ftrace *rec) | |||
226 | # define record_frozen(rec) ({ 0; }) | 239 | # define record_frozen(rec) ({ 0; }) |
227 | #endif /* CONFIG_KPROBES */ | 240 | #endif /* CONFIG_KPROBES */ |
228 | 241 | ||
229 | int skip_trace(unsigned long ip) | 242 | static void ftrace_free_rec(struct dyn_ftrace *rec) |
230 | { | 243 | { |
231 | unsigned long fl; | 244 | rec->ip = (unsigned long)ftrace_free_records; |
232 | struct dyn_ftrace *rec; | 245 | ftrace_free_records = rec; |
233 | struct hlist_node *t; | 246 | rec->flags |= FTRACE_FL_FREE; |
234 | struct hlist_head *head; | ||
235 | |||
236 | if (frozen_record_count == 0) | ||
237 | return 0; | ||
238 | |||
239 | head = &ftrace_hash[hash_long(ip, FTRACE_HASHBITS)]; | ||
240 | hlist_for_each_entry_rcu(rec, t, head, node) { | ||
241 | if (rec->ip == ip) { | ||
242 | if (record_frozen(rec)) { | ||
243 | if (rec->flags & FTRACE_FL_FAILED) | ||
244 | return 1; | ||
245 | |||
246 | if (!(rec->flags & FTRACE_FL_CONVERTED)) | ||
247 | return 1; | ||
248 | |||
249 | if (!tracing_on || !ftrace_enabled) | ||
250 | return 1; | ||
251 | |||
252 | if (ftrace_filtered) { | ||
253 | fl = rec->flags & (FTRACE_FL_FILTER | | ||
254 | FTRACE_FL_NOTRACE); | ||
255 | if (!fl || (fl & FTRACE_FL_NOTRACE)) | ||
256 | return 1; | ||
257 | } | ||
258 | } | ||
259 | break; | ||
260 | } | ||
261 | } | ||
262 | |||
263 | return 0; | ||
264 | } | 247 | } |
265 | 248 | ||
266 | static inline int | 249 | void ftrace_release(void *start, unsigned long size) |
267 | ftrace_ip_in_hash(unsigned long ip, unsigned long key) | ||
268 | { | 250 | { |
269 | struct dyn_ftrace *p; | 251 | struct dyn_ftrace *rec; |
270 | struct hlist_node *t; | 252 | struct ftrace_page *pg; |
271 | int found = 0; | 253 | unsigned long s = (unsigned long)start; |
272 | 254 | unsigned long e = s + size; | |
273 | hlist_for_each_entry_rcu(p, t, &ftrace_hash[key], node) { | 255 | int i; |
274 | if (p->ip == ip) { | ||
275 | found = 1; | ||
276 | break; | ||
277 | } | ||
278 | } | ||
279 | |||
280 | return found; | ||
281 | } | ||
282 | 256 | ||
283 | static inline void | 257 | if (ftrace_disabled || !start) |
284 | ftrace_add_hash(struct dyn_ftrace *node, unsigned long key) | 258 | return; |
285 | { | ||
286 | hlist_add_head_rcu(&node->node, &ftrace_hash[key]); | ||
287 | } | ||
288 | 259 | ||
289 | /* called from kstop_machine */ | 260 | /* should not be called from interrupt context */ |
290 | static inline void ftrace_del_hash(struct dyn_ftrace *node) | 261 | spin_lock(&ftrace_lock); |
291 | { | ||
292 | hlist_del(&node->node); | ||
293 | } | ||
294 | 262 | ||
295 | static void ftrace_free_rec(struct dyn_ftrace *rec) | 263 | for (pg = ftrace_pages_start; pg; pg = pg->next) { |
296 | { | 264 | for (i = 0; i < pg->index; i++) { |
297 | /* no locking, only called from kstop_machine */ | 265 | rec = &pg->records[i]; |
298 | 266 | ||
299 | rec->ip = (unsigned long)ftrace_free_records; | 267 | if ((rec->ip >= s) && (rec->ip < e)) |
300 | ftrace_free_records = rec; | 268 | ftrace_free_rec(rec); |
301 | rec->flags |= FTRACE_FL_FREE; | 269 | } |
270 | } | ||
271 | spin_unlock(&ftrace_lock); | ||
302 | } | 272 | } |
303 | 273 | ||
304 | static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip) | 274 | static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip) |
@@ -310,10 +280,8 @@ static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip) | |||
310 | rec = ftrace_free_records; | 280 | rec = ftrace_free_records; |
311 | 281 | ||
312 | if (unlikely(!(rec->flags & FTRACE_FL_FREE))) { | 282 | if (unlikely(!(rec->flags & FTRACE_FL_FREE))) { |
313 | WARN_ON_ONCE(1); | 283 | FTRACE_WARN_ON_ONCE(1); |
314 | ftrace_free_records = NULL; | 284 | ftrace_free_records = NULL; |
315 | ftrace_disabled = 1; | ||
316 | ftrace_enabled = 0; | ||
317 | return NULL; | 285 | return NULL; |
318 | } | 286 | } |
319 | 287 | ||
@@ -323,79 +291,36 @@ static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip) | |||
323 | } | 291 | } |
324 | 292 | ||
325 | if (ftrace_pages->index == ENTRIES_PER_PAGE) { | 293 | if (ftrace_pages->index == ENTRIES_PER_PAGE) { |
326 | if (!ftrace_pages->next) | 294 | if (!ftrace_pages->next) { |
327 | return NULL; | 295 | /* allocate another page */ |
296 | ftrace_pages->next = | ||
297 | (void *)get_zeroed_page(GFP_KERNEL); | ||
298 | if (!ftrace_pages->next) | ||
299 | return NULL; | ||
300 | } | ||
328 | ftrace_pages = ftrace_pages->next; | 301 | ftrace_pages = ftrace_pages->next; |
329 | } | 302 | } |
330 | 303 | ||
331 | return &ftrace_pages->records[ftrace_pages->index++]; | 304 | return &ftrace_pages->records[ftrace_pages->index++]; |
332 | } | 305 | } |
333 | 306 | ||
334 | static void | 307 | static struct dyn_ftrace * |
335 | ftrace_record_ip(unsigned long ip) | 308 | ftrace_record_ip(unsigned long ip) |
336 | { | 309 | { |
337 | struct dyn_ftrace *node; | 310 | struct dyn_ftrace *rec; |
338 | unsigned long flags; | ||
339 | unsigned long key; | ||
340 | int resched; | ||
341 | int atomic; | ||
342 | int cpu; | ||
343 | 311 | ||
344 | if (!ftrace_enabled || ftrace_disabled) | 312 | if (!ftrace_enabled || ftrace_disabled) |
345 | return; | 313 | return NULL; |
346 | |||
347 | resched = need_resched(); | ||
348 | preempt_disable_notrace(); | ||
349 | |||
350 | /* | ||
351 | * We simply need to protect against recursion. | ||
352 | * Use the the raw version of smp_processor_id and not | ||
353 | * __get_cpu_var which can call debug hooks that can | ||
354 | * cause a recursive crash here. | ||
355 | */ | ||
356 | cpu = raw_smp_processor_id(); | ||
357 | per_cpu(ftrace_shutdown_disable_cpu, cpu)++; | ||
358 | if (per_cpu(ftrace_shutdown_disable_cpu, cpu) != 1) | ||
359 | goto out; | ||
360 | |||
361 | if (unlikely(ftrace_record_suspend)) | ||
362 | goto out; | ||
363 | |||
364 | key = hash_long(ip, FTRACE_HASHBITS); | ||
365 | |||
366 | WARN_ON_ONCE(key >= FTRACE_HASHSIZE); | ||
367 | |||
368 | if (ftrace_ip_in_hash(ip, key)) | ||
369 | goto out; | ||
370 | |||
371 | atomic = irqs_disabled(); | ||
372 | |||
373 | spin_lock_irqsave(&ftrace_shutdown_lock, flags); | ||
374 | |||
375 | /* This ip may have hit the hash before the lock */ | ||
376 | if (ftrace_ip_in_hash(ip, key)) | ||
377 | goto out_unlock; | ||
378 | |||
379 | node = ftrace_alloc_dyn_node(ip); | ||
380 | if (!node) | ||
381 | goto out_unlock; | ||
382 | 314 | ||
383 | node->ip = ip; | 315 | rec = ftrace_alloc_dyn_node(ip); |
384 | 316 | if (!rec) | |
385 | ftrace_add_hash(node, key); | 317 | return NULL; |
386 | 318 | ||
387 | ftraced_trigger = 1; | 319 | rec->ip = ip; |
388 | 320 | ||
389 | out_unlock: | 321 | list_add(&rec->list, &ftrace_new_addrs); |
390 | spin_unlock_irqrestore(&ftrace_shutdown_lock, flags); | ||
391 | out: | ||
392 | per_cpu(ftrace_shutdown_disable_cpu, cpu)--; | ||
393 | 322 | ||
394 | /* prevent recursion with scheduler */ | 323 | return rec; |
395 | if (resched) | ||
396 | preempt_enable_no_resched_notrace(); | ||
397 | else | ||
398 | preempt_enable_notrace(); | ||
399 | } | 324 | } |
400 | 325 | ||
401 | #define FTRACE_ADDR ((long)(ftrace_caller)) | 326 | #define FTRACE_ADDR ((long)(ftrace_caller)) |
@@ -514,7 +439,6 @@ static void ftrace_replace_code(int enable) | |||
514 | rec->flags |= FTRACE_FL_FAILED; | 439 | rec->flags |= FTRACE_FL_FAILED; |
515 | if ((system_state == SYSTEM_BOOTING) || | 440 | if ((system_state == SYSTEM_BOOTING) || |
516 | !core_kernel_text(rec->ip)) { | 441 | !core_kernel_text(rec->ip)) { |
517 | ftrace_del_hash(rec); | ||
518 | ftrace_free_rec(rec); | 442 | ftrace_free_rec(rec); |
519 | } | 443 | } |
520 | } | 444 | } |
@@ -522,13 +446,14 @@ static void ftrace_replace_code(int enable) | |||
522 | } | 446 | } |
523 | } | 447 | } |
524 | 448 | ||
525 | static void ftrace_shutdown_replenish(void) | 449 | static void print_ip_ins(const char *fmt, unsigned char *p) |
526 | { | 450 | { |
527 | if (ftrace_pages->next) | 451 | int i; |
528 | return; | 452 | |
453 | printk(KERN_CONT "%s", fmt); | ||
529 | 454 | ||
530 | /* allocate another page */ | 455 | for (i = 0; i < MCOUNT_INSN_SIZE; i++) |
531 | ftrace_pages->next = (void *)get_zeroed_page(GFP_KERNEL); | 456 | printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]); |
532 | } | 457 | } |
533 | 458 | ||
534 | static int | 459 | static int |
@@ -536,34 +461,52 @@ ftrace_code_disable(struct dyn_ftrace *rec) | |||
536 | { | 461 | { |
537 | unsigned long ip; | 462 | unsigned long ip; |
538 | unsigned char *nop, *call; | 463 | unsigned char *nop, *call; |
539 | int failed; | 464 | int ret; |
540 | 465 | ||
541 | ip = rec->ip; | 466 | ip = rec->ip; |
542 | 467 | ||
543 | nop = ftrace_nop_replace(); | 468 | nop = ftrace_nop_replace(); |
544 | call = ftrace_call_replace(ip, MCOUNT_ADDR); | 469 | call = ftrace_call_replace(ip, mcount_addr); |
470 | |||
471 | ret = ftrace_modify_code(ip, call, nop); | ||
472 | if (ret) { | ||
473 | switch (ret) { | ||
474 | case -EFAULT: | ||
475 | FTRACE_WARN_ON_ONCE(1); | ||
476 | pr_info("ftrace faulted on modifying "); | ||
477 | print_ip_sym(ip); | ||
478 | break; | ||
479 | case -EINVAL: | ||
480 | FTRACE_WARN_ON_ONCE(1); | ||
481 | pr_info("ftrace failed to modify "); | ||
482 | print_ip_sym(ip); | ||
483 | print_ip_ins(" expected: ", call); | ||
484 | print_ip_ins(" actual: ", (unsigned char *)ip); | ||
485 | print_ip_ins(" replace: ", nop); | ||
486 | printk(KERN_CONT "\n"); | ||
487 | break; | ||
488 | case -EPERM: | ||
489 | FTRACE_WARN_ON_ONCE(1); | ||
490 | pr_info("ftrace faulted on writing "); | ||
491 | print_ip_sym(ip); | ||
492 | break; | ||
493 | default: | ||
494 | FTRACE_WARN_ON_ONCE(1); | ||
495 | pr_info("ftrace faulted on unknown error "); | ||
496 | print_ip_sym(ip); | ||
497 | } | ||
545 | 498 | ||
546 | failed = ftrace_modify_code(ip, call, nop); | ||
547 | if (failed) { | ||
548 | rec->flags |= FTRACE_FL_FAILED; | 499 | rec->flags |= FTRACE_FL_FAILED; |
549 | return 0; | 500 | return 0; |
550 | } | 501 | } |
551 | return 1; | 502 | return 1; |
552 | } | 503 | } |
553 | 504 | ||
554 | static int __ftrace_update_code(void *ignore); | ||
555 | |||
556 | static int __ftrace_modify_code(void *data) | 505 | static int __ftrace_modify_code(void *data) |
557 | { | 506 | { |
558 | unsigned long addr; | ||
559 | int *command = data; | 507 | int *command = data; |
560 | 508 | ||
561 | if (*command & FTRACE_ENABLE_CALLS) { | 509 | if (*command & FTRACE_ENABLE_CALLS) { |
562 | /* | ||
563 | * Update any recorded ips now that we have the | ||
564 | * machine stopped | ||
565 | */ | ||
566 | __ftrace_update_code(NULL); | ||
567 | ftrace_replace_code(1); | 510 | ftrace_replace_code(1); |
568 | tracing_on = 1; | 511 | tracing_on = 1; |
569 | } else if (*command & FTRACE_DISABLE_CALLS) { | 512 | } else if (*command & FTRACE_DISABLE_CALLS) { |
@@ -574,14 +517,6 @@ static int __ftrace_modify_code(void *data) | |||
574 | if (*command & FTRACE_UPDATE_TRACE_FUNC) | 517 | if (*command & FTRACE_UPDATE_TRACE_FUNC) |
575 | ftrace_update_ftrace_func(ftrace_trace_function); | 518 | ftrace_update_ftrace_func(ftrace_trace_function); |
576 | 519 | ||
577 | if (*command & FTRACE_ENABLE_MCOUNT) { | ||
578 | addr = (unsigned long)ftrace_record_ip; | ||
579 | ftrace_mcount_set(&addr); | ||
580 | } else if (*command & FTRACE_DISABLE_MCOUNT) { | ||
581 | addr = (unsigned long)ftrace_stub; | ||
582 | ftrace_mcount_set(&addr); | ||
583 | } | ||
584 | |||
585 | return 0; | 520 | return 0; |
586 | } | 521 | } |
587 | 522 | ||
@@ -590,26 +525,9 @@ static void ftrace_run_update_code(int command) | |||
590 | stop_machine(__ftrace_modify_code, &command, NULL); | 525 | stop_machine(__ftrace_modify_code, &command, NULL); |
591 | } | 526 | } |
592 | 527 | ||
593 | void ftrace_disable_daemon(void) | ||
594 | { | ||
595 | /* Stop the daemon from calling kstop_machine */ | ||
596 | mutex_lock(&ftraced_lock); | ||
597 | ftraced_stop = 1; | ||
598 | mutex_unlock(&ftraced_lock); | ||
599 | |||
600 | ftrace_force_update(); | ||
601 | } | ||
602 | |||
603 | void ftrace_enable_daemon(void) | ||
604 | { | ||
605 | mutex_lock(&ftraced_lock); | ||
606 | ftraced_stop = 0; | ||
607 | mutex_unlock(&ftraced_lock); | ||
608 | |||
609 | ftrace_force_update(); | ||
610 | } | ||
611 | |||
612 | static ftrace_func_t saved_ftrace_func; | 528 | static ftrace_func_t saved_ftrace_func; |
529 | static int ftrace_start; | ||
530 | static DEFINE_MUTEX(ftrace_start_lock); | ||
613 | 531 | ||
614 | static void ftrace_startup(void) | 532 | static void ftrace_startup(void) |
615 | { | 533 | { |
@@ -618,9 +536,9 @@ static void ftrace_startup(void) | |||
618 | if (unlikely(ftrace_disabled)) | 536 | if (unlikely(ftrace_disabled)) |
619 | return; | 537 | return; |
620 | 538 | ||
621 | mutex_lock(&ftraced_lock); | 539 | mutex_lock(&ftrace_start_lock); |
622 | ftraced_suspend++; | 540 | ftrace_start++; |
623 | if (ftraced_suspend == 1) | 541 | if (ftrace_start == 1) |
624 | command |= FTRACE_ENABLE_CALLS; | 542 | command |= FTRACE_ENABLE_CALLS; |
625 | 543 | ||
626 | if (saved_ftrace_func != ftrace_trace_function) { | 544 | if (saved_ftrace_func != ftrace_trace_function) { |
@@ -633,7 +551,7 @@ static void ftrace_startup(void) | |||
633 | 551 | ||
634 | ftrace_run_update_code(command); | 552 | ftrace_run_update_code(command); |
635 | out: | 553 | out: |
636 | mutex_unlock(&ftraced_lock); | 554 | mutex_unlock(&ftrace_start_lock); |
637 | } | 555 | } |
638 | 556 | ||
639 | static void ftrace_shutdown(void) | 557 | static void ftrace_shutdown(void) |
@@ -643,9 +561,9 @@ static void ftrace_shutdown(void) | |||
643 | if (unlikely(ftrace_disabled)) | 561 | if (unlikely(ftrace_disabled)) |
644 | return; | 562 | return; |
645 | 563 | ||
646 | mutex_lock(&ftraced_lock); | 564 | mutex_lock(&ftrace_start_lock); |
647 | ftraced_suspend--; | 565 | ftrace_start--; |
648 | if (!ftraced_suspend) | 566 | if (!ftrace_start) |
649 | command |= FTRACE_DISABLE_CALLS; | 567 | command |= FTRACE_DISABLE_CALLS; |
650 | 568 | ||
651 | if (saved_ftrace_func != ftrace_trace_function) { | 569 | if (saved_ftrace_func != ftrace_trace_function) { |
@@ -658,7 +576,7 @@ static void ftrace_shutdown(void) | |||
658 | 576 | ||
659 | ftrace_run_update_code(command); | 577 | ftrace_run_update_code(command); |
660 | out: | 578 | out: |
661 | mutex_unlock(&ftraced_lock); | 579 | mutex_unlock(&ftrace_start_lock); |
662 | } | 580 | } |
663 | 581 | ||
664 | static void ftrace_startup_sysctl(void) | 582 | static void ftrace_startup_sysctl(void) |
@@ -668,15 +586,15 @@ static void ftrace_startup_sysctl(void) | |||
668 | if (unlikely(ftrace_disabled)) | 586 | if (unlikely(ftrace_disabled)) |
669 | return; | 587 | return; |
670 | 588 | ||
671 | mutex_lock(&ftraced_lock); | 589 | mutex_lock(&ftrace_start_lock); |
672 | /* Force update next time */ | 590 | /* Force update next time */ |
673 | saved_ftrace_func = NULL; | 591 | saved_ftrace_func = NULL; |
674 | /* ftraced_suspend is true if we want ftrace running */ | 592 | /* ftrace_start is true if we want ftrace running */ |
675 | if (ftraced_suspend) | 593 | if (ftrace_start) |
676 | command |= FTRACE_ENABLE_CALLS; | 594 | command |= FTRACE_ENABLE_CALLS; |
677 | 595 | ||
678 | ftrace_run_update_code(command); | 596 | ftrace_run_update_code(command); |
679 | mutex_unlock(&ftraced_lock); | 597 | mutex_unlock(&ftrace_start_lock); |
680 | } | 598 | } |
681 | 599 | ||
682 | static void ftrace_shutdown_sysctl(void) | 600 | static void ftrace_shutdown_sysctl(void) |
@@ -686,153 +604,51 @@ static void ftrace_shutdown_sysctl(void) | |||
686 | if (unlikely(ftrace_disabled)) | 604 | if (unlikely(ftrace_disabled)) |
687 | return; | 605 | return; |
688 | 606 | ||
689 | mutex_lock(&ftraced_lock); | 607 | mutex_lock(&ftrace_start_lock); |
690 | /* ftraced_suspend is true if ftrace is running */ | 608 | /* ftrace_start is true if ftrace is running */ |
691 | if (ftraced_suspend) | 609 | if (ftrace_start) |
692 | command |= FTRACE_DISABLE_CALLS; | 610 | command |= FTRACE_DISABLE_CALLS; |
693 | 611 | ||
694 | ftrace_run_update_code(command); | 612 | ftrace_run_update_code(command); |
695 | mutex_unlock(&ftraced_lock); | 613 | mutex_unlock(&ftrace_start_lock); |
696 | } | 614 | } |
697 | 615 | ||
698 | static cycle_t ftrace_update_time; | 616 | static cycle_t ftrace_update_time; |
699 | static unsigned long ftrace_update_cnt; | 617 | static unsigned long ftrace_update_cnt; |
700 | unsigned long ftrace_update_tot_cnt; | 618 | unsigned long ftrace_update_tot_cnt; |
701 | 619 | ||
702 | static int __ftrace_update_code(void *ignore) | 620 | static int ftrace_update_code(void) |
703 | { | 621 | { |
704 | int i, save_ftrace_enabled; | 622 | struct dyn_ftrace *p, *t; |
705 | cycle_t start, stop; | 623 | cycle_t start, stop; |
706 | struct dyn_ftrace *p; | ||
707 | struct hlist_node *t, *n; | ||
708 | struct hlist_head *head, temp_list; | ||
709 | |||
710 | /* Don't be recording funcs now */ | ||
711 | ftrace_record_suspend++; | ||
712 | save_ftrace_enabled = ftrace_enabled; | ||
713 | ftrace_enabled = 0; | ||
714 | 624 | ||
715 | start = ftrace_now(raw_smp_processor_id()); | 625 | start = ftrace_now(raw_smp_processor_id()); |
716 | ftrace_update_cnt = 0; | 626 | ftrace_update_cnt = 0; |
717 | 627 | ||
718 | /* No locks needed, the machine is stopped! */ | 628 | list_for_each_entry_safe(p, t, &ftrace_new_addrs, list) { |
719 | for (i = 0; i < FTRACE_HASHSIZE; i++) { | ||
720 | INIT_HLIST_HEAD(&temp_list); | ||
721 | head = &ftrace_hash[i]; | ||
722 | 629 | ||
723 | /* all CPUS are stopped, we are safe to modify code */ | 630 | /* If something went wrong, bail without enabling anything */ |
724 | hlist_for_each_entry_safe(p, t, n, head, node) { | 631 | if (unlikely(ftrace_disabled)) |
725 | /* Skip over failed records which have not been | 632 | return -1; |
726 | * freed. */ | ||
727 | if (p->flags & FTRACE_FL_FAILED) | ||
728 | continue; | ||
729 | |||
730 | /* Unconverted records are always at the head of the | ||
731 | * hash bucket. Once we encounter a converted record, | ||
732 | * simply skip over to the next bucket. Saves ftraced | ||
733 | * some processor cycles (ftrace does its bid for | ||
734 | * global warming :-p ). */ | ||
735 | if (p->flags & (FTRACE_FL_CONVERTED)) | ||
736 | break; | ||
737 | 633 | ||
738 | /* Ignore updates to this record's mcount site. | 634 | list_del_init(&p->list); |
739 | * Reintroduce this record at the head of this | ||
740 | * bucket to attempt to "convert" it again if | ||
741 | * the kprobe on it is unregistered before the | ||
742 | * next run. */ | ||
743 | if (get_kprobe((void *)p->ip)) { | ||
744 | ftrace_del_hash(p); | ||
745 | INIT_HLIST_NODE(&p->node); | ||
746 | hlist_add_head(&p->node, &temp_list); | ||
747 | freeze_record(p); | ||
748 | continue; | ||
749 | } else { | ||
750 | unfreeze_record(p); | ||
751 | } | ||
752 | 635 | ||
753 | /* convert record (i.e, patch mcount-call with NOP) */ | 636 | /* convert record (i.e, patch mcount-call with NOP) */ |
754 | if (ftrace_code_disable(p)) { | 637 | if (ftrace_code_disable(p)) { |
755 | p->flags |= FTRACE_FL_CONVERTED; | 638 | p->flags |= FTRACE_FL_CONVERTED; |
756 | ftrace_update_cnt++; | 639 | ftrace_update_cnt++; |
757 | } else { | 640 | } else |
758 | if ((system_state == SYSTEM_BOOTING) || | 641 | ftrace_free_rec(p); |
759 | !core_kernel_text(p->ip)) { | ||
760 | ftrace_del_hash(p); | ||
761 | ftrace_free_rec(p); | ||
762 | } | ||
763 | } | ||
764 | } | ||
765 | |||
766 | hlist_for_each_entry_safe(p, t, n, &temp_list, node) { | ||
767 | hlist_del(&p->node); | ||
768 | INIT_HLIST_NODE(&p->node); | ||
769 | hlist_add_head(&p->node, head); | ||
770 | } | ||
771 | } | 642 | } |
772 | 643 | ||
773 | stop = ftrace_now(raw_smp_processor_id()); | 644 | stop = ftrace_now(raw_smp_processor_id()); |
774 | ftrace_update_time = stop - start; | 645 | ftrace_update_time = stop - start; |
775 | ftrace_update_tot_cnt += ftrace_update_cnt; | 646 | ftrace_update_tot_cnt += ftrace_update_cnt; |
776 | ftraced_trigger = 0; | ||
777 | |||
778 | ftrace_enabled = save_ftrace_enabled; | ||
779 | ftrace_record_suspend--; | ||
780 | 647 | ||
781 | return 0; | 648 | return 0; |
782 | } | 649 | } |
783 | 650 | ||
784 | static int ftrace_update_code(void) | 651 | static int __init ftrace_dyn_table_alloc(unsigned long num_to_init) |
785 | { | ||
786 | if (unlikely(ftrace_disabled) || | ||
787 | !ftrace_enabled || !ftraced_trigger) | ||
788 | return 0; | ||
789 | |||
790 | stop_machine(__ftrace_update_code, NULL, NULL); | ||
791 | |||
792 | return 1; | ||
793 | } | ||
794 | |||
795 | static int ftraced(void *ignore) | ||
796 | { | ||
797 | unsigned long usecs; | ||
798 | |||
799 | while (!kthread_should_stop()) { | ||
800 | |||
801 | set_current_state(TASK_INTERRUPTIBLE); | ||
802 | |||
803 | /* check once a second */ | ||
804 | schedule_timeout(HZ); | ||
805 | |||
806 | if (unlikely(ftrace_disabled)) | ||
807 | continue; | ||
808 | |||
809 | mutex_lock(&ftrace_sysctl_lock); | ||
810 | mutex_lock(&ftraced_lock); | ||
811 | if (!ftraced_suspend && !ftraced_stop && | ||
812 | ftrace_update_code()) { | ||
813 | usecs = nsecs_to_usecs(ftrace_update_time); | ||
814 | if (ftrace_update_tot_cnt > 100000) { | ||
815 | ftrace_update_tot_cnt = 0; | ||
816 | pr_info("hm, dftrace overflow: %lu change%s" | ||
817 | " (%lu total) in %lu usec%s\n", | ||
818 | ftrace_update_cnt, | ||
819 | ftrace_update_cnt != 1 ? "s" : "", | ||
820 | ftrace_update_tot_cnt, | ||
821 | usecs, usecs != 1 ? "s" : ""); | ||
822 | ftrace_disabled = 1; | ||
823 | WARN_ON_ONCE(1); | ||
824 | } | ||
825 | } | ||
826 | mutex_unlock(&ftraced_lock); | ||
827 | mutex_unlock(&ftrace_sysctl_lock); | ||
828 | |||
829 | ftrace_shutdown_replenish(); | ||
830 | } | ||
831 | __set_current_state(TASK_RUNNING); | ||
832 | return 0; | ||
833 | } | ||
834 | |||
835 | static int __init ftrace_dyn_table_alloc(void) | ||
836 | { | 652 | { |
837 | struct ftrace_page *pg; | 653 | struct ftrace_page *pg; |
838 | int cnt; | 654 | int cnt; |
@@ -859,7 +675,9 @@ static int __init ftrace_dyn_table_alloc(void) | |||
859 | 675 | ||
860 | pg = ftrace_pages = ftrace_pages_start; | 676 | pg = ftrace_pages = ftrace_pages_start; |
861 | 677 | ||
862 | cnt = NR_TO_INIT / ENTRIES_PER_PAGE; | 678 | cnt = num_to_init / ENTRIES_PER_PAGE; |
679 | pr_info("ftrace: allocating %ld entries in %d pages\n", | ||
680 | num_to_init, cnt); | ||
863 | 681 | ||
864 | for (i = 0; i < cnt; i++) { | 682 | for (i = 0; i < cnt; i++) { |
865 | pg->next = (void *)get_zeroed_page(GFP_KERNEL); | 683 | pg->next = (void *)get_zeroed_page(GFP_KERNEL); |
@@ -901,6 +719,8 @@ t_next(struct seq_file *m, void *v, loff_t *pos) | |||
901 | 719 | ||
902 | (*pos)++; | 720 | (*pos)++; |
903 | 721 | ||
722 | /* should not be called from interrupt context */ | ||
723 | spin_lock(&ftrace_lock); | ||
904 | retry: | 724 | retry: |
905 | if (iter->idx >= iter->pg->index) { | 725 | if (iter->idx >= iter->pg->index) { |
906 | if (iter->pg->next) { | 726 | if (iter->pg->next) { |
@@ -910,15 +730,13 @@ t_next(struct seq_file *m, void *v, loff_t *pos) | |||
910 | } | 730 | } |
911 | } else { | 731 | } else { |
912 | rec = &iter->pg->records[iter->idx++]; | 732 | rec = &iter->pg->records[iter->idx++]; |
913 | if ((!(iter->flags & FTRACE_ITER_FAILURES) && | 733 | if ((rec->flags & FTRACE_FL_FREE) || |
734 | |||
735 | (!(iter->flags & FTRACE_ITER_FAILURES) && | ||
914 | (rec->flags & FTRACE_FL_FAILED)) || | 736 | (rec->flags & FTRACE_FL_FAILED)) || |
915 | 737 | ||
916 | ((iter->flags & FTRACE_ITER_FAILURES) && | 738 | ((iter->flags & FTRACE_ITER_FAILURES) && |
917 | (!(rec->flags & FTRACE_FL_FAILED) || | 739 | !(rec->flags & FTRACE_FL_FAILED)) || |
918 | (rec->flags & FTRACE_FL_FREE))) || | ||
919 | |||
920 | ((iter->flags & FTRACE_ITER_FILTER) && | ||
921 | !(rec->flags & FTRACE_FL_FILTER)) || | ||
922 | 740 | ||
923 | ((iter->flags & FTRACE_ITER_NOTRACE) && | 741 | ((iter->flags & FTRACE_ITER_NOTRACE) && |
924 | !(rec->flags & FTRACE_FL_NOTRACE))) { | 742 | !(rec->flags & FTRACE_FL_NOTRACE))) { |
@@ -926,6 +744,7 @@ t_next(struct seq_file *m, void *v, loff_t *pos) | |||
926 | goto retry; | 744 | goto retry; |
927 | } | 745 | } |
928 | } | 746 | } |
747 | spin_unlock(&ftrace_lock); | ||
929 | 748 | ||
930 | iter->pos = *pos; | 749 | iter->pos = *pos; |
931 | 750 | ||
@@ -1039,8 +858,8 @@ static void ftrace_filter_reset(int enable) | |||
1039 | unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE; | 858 | unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE; |
1040 | unsigned i; | 859 | unsigned i; |
1041 | 860 | ||
1042 | /* keep kstop machine from running */ | 861 | /* should not be called from interrupt context */ |
1043 | preempt_disable(); | 862 | spin_lock(&ftrace_lock); |
1044 | if (enable) | 863 | if (enable) |
1045 | ftrace_filtered = 0; | 864 | ftrace_filtered = 0; |
1046 | pg = ftrace_pages_start; | 865 | pg = ftrace_pages_start; |
@@ -1053,7 +872,7 @@ static void ftrace_filter_reset(int enable) | |||
1053 | } | 872 | } |
1054 | pg = pg->next; | 873 | pg = pg->next; |
1055 | } | 874 | } |
1056 | preempt_enable(); | 875 | spin_unlock(&ftrace_lock); |
1057 | } | 876 | } |
1058 | 877 | ||
1059 | static int | 878 | static int |
@@ -1165,8 +984,8 @@ ftrace_match(unsigned char *buff, int len, int enable) | |||
1165 | } | 984 | } |
1166 | } | 985 | } |
1167 | 986 | ||
1168 | /* keep kstop machine from running */ | 987 | /* should not be called from interrupt context */ |
1169 | preempt_disable(); | 988 | spin_lock(&ftrace_lock); |
1170 | if (enable) | 989 | if (enable) |
1171 | ftrace_filtered = 1; | 990 | ftrace_filtered = 1; |
1172 | pg = ftrace_pages_start; | 991 | pg = ftrace_pages_start; |
@@ -1203,7 +1022,7 @@ ftrace_match(unsigned char *buff, int len, int enable) | |||
1203 | } | 1022 | } |
1204 | pg = pg->next; | 1023 | pg = pg->next; |
1205 | } | 1024 | } |
1206 | preempt_enable(); | 1025 | spin_unlock(&ftrace_lock); |
1207 | } | 1026 | } |
1208 | 1027 | ||
1209 | static ssize_t | 1028 | static ssize_t |
@@ -1366,10 +1185,10 @@ ftrace_regex_release(struct inode *inode, struct file *file, int enable) | |||
1366 | } | 1185 | } |
1367 | 1186 | ||
1368 | mutex_lock(&ftrace_sysctl_lock); | 1187 | mutex_lock(&ftrace_sysctl_lock); |
1369 | mutex_lock(&ftraced_lock); | 1188 | mutex_lock(&ftrace_start_lock); |
1370 | if (iter->filtered && ftraced_suspend && ftrace_enabled) | 1189 | if (iter->filtered && ftrace_start && ftrace_enabled) |
1371 | ftrace_run_update_code(FTRACE_ENABLE_CALLS); | 1190 | ftrace_run_update_code(FTRACE_ENABLE_CALLS); |
1372 | mutex_unlock(&ftraced_lock); | 1191 | mutex_unlock(&ftrace_start_lock); |
1373 | mutex_unlock(&ftrace_sysctl_lock); | 1192 | mutex_unlock(&ftrace_sysctl_lock); |
1374 | 1193 | ||
1375 | kfree(iter); | 1194 | kfree(iter); |
@@ -1389,55 +1208,6 @@ ftrace_notrace_release(struct inode *inode, struct file *file) | |||
1389 | return ftrace_regex_release(inode, file, 0); | 1208 | return ftrace_regex_release(inode, file, 0); |
1390 | } | 1209 | } |
1391 | 1210 | ||
1392 | static ssize_t | ||
1393 | ftraced_read(struct file *filp, char __user *ubuf, | ||
1394 | size_t cnt, loff_t *ppos) | ||
1395 | { | ||
1396 | /* don't worry about races */ | ||
1397 | char *buf = ftraced_stop ? "disabled\n" : "enabled\n"; | ||
1398 | int r = strlen(buf); | ||
1399 | |||
1400 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | ||
1401 | } | ||
1402 | |||
1403 | static ssize_t | ||
1404 | ftraced_write(struct file *filp, const char __user *ubuf, | ||
1405 | size_t cnt, loff_t *ppos) | ||
1406 | { | ||
1407 | char buf[64]; | ||
1408 | long val; | ||
1409 | int ret; | ||
1410 | |||
1411 | if (cnt >= sizeof(buf)) | ||
1412 | return -EINVAL; | ||
1413 | |||
1414 | if (copy_from_user(&buf, ubuf, cnt)) | ||
1415 | return -EFAULT; | ||
1416 | |||
1417 | if (strncmp(buf, "enable", 6) == 0) | ||
1418 | val = 1; | ||
1419 | else if (strncmp(buf, "disable", 7) == 0) | ||
1420 | val = 0; | ||
1421 | else { | ||
1422 | buf[cnt] = 0; | ||
1423 | |||
1424 | ret = strict_strtoul(buf, 10, &val); | ||
1425 | if (ret < 0) | ||
1426 | return ret; | ||
1427 | |||
1428 | val = !!val; | ||
1429 | } | ||
1430 | |||
1431 | if (val) | ||
1432 | ftrace_enable_daemon(); | ||
1433 | else | ||
1434 | ftrace_disable_daemon(); | ||
1435 | |||
1436 | filp->f_pos += cnt; | ||
1437 | |||
1438 | return cnt; | ||
1439 | } | ||
1440 | |||
1441 | static struct file_operations ftrace_avail_fops = { | 1211 | static struct file_operations ftrace_avail_fops = { |
1442 | .open = ftrace_avail_open, | 1212 | .open = ftrace_avail_open, |
1443 | .read = seq_read, | 1213 | .read = seq_read, |
@@ -1468,54 +1238,6 @@ static struct file_operations ftrace_notrace_fops = { | |||
1468 | .release = ftrace_notrace_release, | 1238 | .release = ftrace_notrace_release, |
1469 | }; | 1239 | }; |
1470 | 1240 | ||
1471 | static struct file_operations ftraced_fops = { | ||
1472 | .open = tracing_open_generic, | ||
1473 | .read = ftraced_read, | ||
1474 | .write = ftraced_write, | ||
1475 | }; | ||
1476 | |||
1477 | /** | ||
1478 | * ftrace_force_update - force an update to all recording ftrace functions | ||
1479 | */ | ||
1480 | int ftrace_force_update(void) | ||
1481 | { | ||
1482 | int ret = 0; | ||
1483 | |||
1484 | if (unlikely(ftrace_disabled)) | ||
1485 | return -ENODEV; | ||
1486 | |||
1487 | mutex_lock(&ftrace_sysctl_lock); | ||
1488 | mutex_lock(&ftraced_lock); | ||
1489 | |||
1490 | /* | ||
1491 | * If ftraced_trigger is not set, then there is nothing | ||
1492 | * to update. | ||
1493 | */ | ||
1494 | if (ftraced_trigger && !ftrace_update_code()) | ||
1495 | ret = -EBUSY; | ||
1496 | |||
1497 | mutex_unlock(&ftraced_lock); | ||
1498 | mutex_unlock(&ftrace_sysctl_lock); | ||
1499 | |||
1500 | return ret; | ||
1501 | } | ||
1502 | |||
1503 | static void ftrace_force_shutdown(void) | ||
1504 | { | ||
1505 | struct task_struct *task; | ||
1506 | int command = FTRACE_DISABLE_CALLS | FTRACE_UPDATE_TRACE_FUNC; | ||
1507 | |||
1508 | mutex_lock(&ftraced_lock); | ||
1509 | task = ftraced_task; | ||
1510 | ftraced_task = NULL; | ||
1511 | ftraced_suspend = -1; | ||
1512 | ftrace_run_update_code(command); | ||
1513 | mutex_unlock(&ftraced_lock); | ||
1514 | |||
1515 | if (task) | ||
1516 | kthread_stop(task); | ||
1517 | } | ||
1518 | |||
1519 | static __init int ftrace_init_debugfs(void) | 1241 | static __init int ftrace_init_debugfs(void) |
1520 | { | 1242 | { |
1521 | struct dentry *d_tracer; | 1243 | struct dentry *d_tracer; |
@@ -1546,97 +1268,103 @@ static __init int ftrace_init_debugfs(void) | |||
1546 | pr_warning("Could not create debugfs " | 1268 | pr_warning("Could not create debugfs " |
1547 | "'set_ftrace_notrace' entry\n"); | 1269 | "'set_ftrace_notrace' entry\n"); |
1548 | 1270 | ||
1549 | entry = debugfs_create_file("ftraced_enabled", 0644, d_tracer, | ||
1550 | NULL, &ftraced_fops); | ||
1551 | if (!entry) | ||
1552 | pr_warning("Could not create debugfs " | ||
1553 | "'ftraced_enabled' entry\n"); | ||
1554 | return 0; | 1271 | return 0; |
1555 | } | 1272 | } |
1556 | 1273 | ||
1557 | fs_initcall(ftrace_init_debugfs); | 1274 | fs_initcall(ftrace_init_debugfs); |
1558 | 1275 | ||
1559 | static int __init ftrace_dynamic_init(void) | 1276 | static int ftrace_convert_nops(unsigned long *start, |
1277 | unsigned long *end) | ||
1560 | { | 1278 | { |
1561 | struct task_struct *p; | 1279 | unsigned long *p; |
1562 | unsigned long addr; | 1280 | unsigned long addr; |
1281 | unsigned long flags; | ||
1282 | |||
1283 | mutex_lock(&ftrace_start_lock); | ||
1284 | p = start; | ||
1285 | while (p < end) { | ||
1286 | addr = ftrace_call_adjust(*p++); | ||
1287 | ftrace_record_ip(addr); | ||
1288 | } | ||
1289 | |||
1290 | /* disable interrupts to prevent kstop machine */ | ||
1291 | local_irq_save(flags); | ||
1292 | ftrace_update_code(); | ||
1293 | local_irq_restore(flags); | ||
1294 | mutex_unlock(&ftrace_start_lock); | ||
1295 | |||
1296 | return 0; | ||
1297 | } | ||
1298 | |||
1299 | void ftrace_init_module(unsigned long *start, unsigned long *end) | ||
1300 | { | ||
1301 | if (ftrace_disabled || start == end) | ||
1302 | return; | ||
1303 | ftrace_convert_nops(start, end); | ||
1304 | } | ||
1305 | |||
1306 | extern unsigned long __start_mcount_loc[]; | ||
1307 | extern unsigned long __stop_mcount_loc[]; | ||
1308 | |||
1309 | void __init ftrace_init(void) | ||
1310 | { | ||
1311 | unsigned long count, addr, flags; | ||
1563 | int ret; | 1312 | int ret; |
1564 | 1313 | ||
1565 | addr = (unsigned long)ftrace_record_ip; | 1314 | /* Keep the ftrace pointer to the stub */ |
1315 | addr = (unsigned long)ftrace_stub; | ||
1566 | 1316 | ||
1567 | stop_machine(ftrace_dyn_arch_init, &addr, NULL); | 1317 | local_irq_save(flags); |
1318 | ftrace_dyn_arch_init(&addr); | ||
1319 | local_irq_restore(flags); | ||
1568 | 1320 | ||
1569 | /* ftrace_dyn_arch_init places the return code in addr */ | 1321 | /* ftrace_dyn_arch_init places the return code in addr */ |
1570 | if (addr) { | 1322 | if (addr) |
1571 | ret = (int)addr; | ||
1572 | goto failed; | 1323 | goto failed; |
1573 | } | ||
1574 | 1324 | ||
1575 | ret = ftrace_dyn_table_alloc(); | 1325 | count = __stop_mcount_loc - __start_mcount_loc; |
1576 | if (ret) | ||
1577 | goto failed; | ||
1578 | 1326 | ||
1579 | p = kthread_run(ftraced, NULL, "ftraced"); | 1327 | ret = ftrace_dyn_table_alloc(count); |
1580 | if (IS_ERR(p)) { | 1328 | if (ret) |
1581 | ret = -1; | ||
1582 | goto failed; | 1329 | goto failed; |
1583 | } | ||
1584 | 1330 | ||
1585 | last_ftrace_enabled = ftrace_enabled = 1; | 1331 | last_ftrace_enabled = ftrace_enabled = 1; |
1586 | ftraced_task = p; | ||
1587 | 1332 | ||
1588 | return 0; | 1333 | ret = ftrace_convert_nops(__start_mcount_loc, |
1334 | __stop_mcount_loc); | ||
1589 | 1335 | ||
1336 | return; | ||
1590 | failed: | 1337 | failed: |
1591 | ftrace_disabled = 1; | 1338 | ftrace_disabled = 1; |
1592 | return ret; | ||
1593 | } | 1339 | } |
1594 | 1340 | ||
1595 | core_initcall(ftrace_dynamic_init); | ||
1596 | #else | 1341 | #else |
1342 | |||
1343 | static int __init ftrace_nodyn_init(void) | ||
1344 | { | ||
1345 | ftrace_enabled = 1; | ||
1346 | return 0; | ||
1347 | } | ||
1348 | device_initcall(ftrace_nodyn_init); | ||
1349 | |||
1597 | # define ftrace_startup() do { } while (0) | 1350 | # define ftrace_startup() do { } while (0) |
1598 | # define ftrace_shutdown() do { } while (0) | 1351 | # define ftrace_shutdown() do { } while (0) |
1599 | # define ftrace_startup_sysctl() do { } while (0) | 1352 | # define ftrace_startup_sysctl() do { } while (0) |
1600 | # define ftrace_shutdown_sysctl() do { } while (0) | 1353 | # define ftrace_shutdown_sysctl() do { } while (0) |
1601 | # define ftrace_force_shutdown() do { } while (0) | ||
1602 | #endif /* CONFIG_DYNAMIC_FTRACE */ | 1354 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
1603 | 1355 | ||
1604 | /** | 1356 | /** |
1605 | * ftrace_kill_atomic - kill ftrace from critical sections | 1357 | * ftrace_kill - kill ftrace |
1606 | * | 1358 | * |
1607 | * This function should be used by panic code. It stops ftrace | 1359 | * This function should be used by panic code. It stops ftrace |
1608 | * but in a not so nice way. If you need to simply kill ftrace | 1360 | * but in a not so nice way. If you need to simply kill ftrace |
1609 | * from a non-atomic section, use ftrace_kill. | 1361 | * from a non-atomic section, use ftrace_kill. |
1610 | */ | 1362 | */ |
1611 | void ftrace_kill_atomic(void) | ||
1612 | { | ||
1613 | ftrace_disabled = 1; | ||
1614 | ftrace_enabled = 0; | ||
1615 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
1616 | ftraced_suspend = -1; | ||
1617 | #endif | ||
1618 | clear_ftrace_function(); | ||
1619 | } | ||
1620 | |||
1621 | /** | ||
1622 | * ftrace_kill - totally shutdown ftrace | ||
1623 | * | ||
1624 | * This is a safety measure. If something was detected that seems | ||
1625 | * wrong, calling this function will keep ftrace from doing | ||
1626 | * any more modifications, and updates. | ||
1627 | * used when something went wrong. | ||
1628 | */ | ||
1629 | void ftrace_kill(void) | 1363 | void ftrace_kill(void) |
1630 | { | 1364 | { |
1631 | mutex_lock(&ftrace_sysctl_lock); | ||
1632 | ftrace_disabled = 1; | 1365 | ftrace_disabled = 1; |
1633 | ftrace_enabled = 0; | 1366 | ftrace_enabled = 0; |
1634 | |||
1635 | clear_ftrace_function(); | 1367 | clear_ftrace_function(); |
1636 | mutex_unlock(&ftrace_sysctl_lock); | ||
1637 | |||
1638 | /* Try to totally disable ftrace */ | ||
1639 | ftrace_force_shutdown(); | ||
1640 | } | 1368 | } |
1641 | 1369 | ||
1642 | /** | 1370 | /** |
@@ -1725,3 +1453,4 @@ ftrace_enable_sysctl(struct ctl_table *table, int write, | |||
1725 | mutex_unlock(&ftrace_sysctl_lock); | 1453 | mutex_unlock(&ftrace_sysctl_lock); |
1726 | return ret; | 1454 | return ret; |
1727 | } | 1455 | } |
1456 | |||