aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/ftrace.c183
1 files changed, 105 insertions, 78 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index f6d9af3bf66b..88544f9bc0ed 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -26,14 +26,8 @@
26 26
27#include "trace.h" 27#include "trace.h"
28 28
29#ifdef CONFIG_DYNAMIC_FTRACE 29int ftrace_enabled;
30# define FTRACE_ENABLED_INIT 1 30static int last_ftrace_enabled;
31#else
32# define FTRACE_ENABLED_INIT 0
33#endif
34
35int ftrace_enabled = FTRACE_ENABLED_INIT;
36static int last_ftrace_enabled = FTRACE_ENABLED_INIT;
37 31
38static DEFINE_SPINLOCK(ftrace_lock); 32static DEFINE_SPINLOCK(ftrace_lock);
39static DEFINE_MUTEX(ftrace_sysctl_lock); 33static DEFINE_MUTEX(ftrace_sysctl_lock);
@@ -149,6 +143,14 @@ static int notrace __unregister_ftrace_function(struct ftrace_ops *ops)
149 143
150#ifdef CONFIG_DYNAMIC_FTRACE 144#ifdef CONFIG_DYNAMIC_FTRACE
151 145
146enum {
147 FTRACE_ENABLE_CALLS = (1 << 0),
148 FTRACE_DISABLE_CALLS = (1 << 1),
149 FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
150 FTRACE_ENABLE_MCOUNT = (1 << 3),
151 FTRACE_DISABLE_MCOUNT = (1 << 4),
152};
153
152static struct hlist_head ftrace_hash[FTRACE_HASHSIZE]; 154static struct hlist_head ftrace_hash[FTRACE_HASHSIZE];
153 155
154static DEFINE_PER_CPU(int, ftrace_shutdown_disable_cpu); 156static DEFINE_PER_CPU(int, ftrace_shutdown_disable_cpu);
@@ -199,12 +201,8 @@ ftrace_add_hash(struct dyn_ftrace *node, unsigned long key)
199 hlist_add_head(&node->node, &ftrace_hash[key]); 201 hlist_add_head(&node->node, &ftrace_hash[key]);
200} 202}
201 203
202static notrace struct dyn_ftrace *ftrace_alloc_shutdown_node(unsigned long ip) 204static notrace struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
203{ 205{
204 /* If this was already converted, skip it */
205 if (ftrace_ip_converted(ip))
206 return NULL;
207
208 if (ftrace_pages->index == ENTRIES_PER_PAGE) { 206 if (ftrace_pages->index == ENTRIES_PER_PAGE) {
209 if (!ftrace_pages->next) 207 if (!ftrace_pages->next)
210 return NULL; 208 return NULL;
@@ -215,7 +213,7 @@ static notrace struct dyn_ftrace *ftrace_alloc_shutdown_node(unsigned long ip)
215} 213}
216 214
217static void notrace 215static void notrace
218ftrace_record_ip(unsigned long ip, unsigned long parent_ip) 216ftrace_record_ip(unsigned long ip)
219{ 217{
220 struct dyn_ftrace *node; 218 struct dyn_ftrace *node;
221 unsigned long flags; 219 unsigned long flags;
@@ -223,6 +221,9 @@ ftrace_record_ip(unsigned long ip, unsigned long parent_ip)
223 int resched; 221 int resched;
224 int atomic; 222 int atomic;
225 223
224 if (!ftrace_enabled)
225 return;
226
226 resched = need_resched(); 227 resched = need_resched();
227 preempt_disable_notrace(); 228 preempt_disable_notrace();
228 229
@@ -251,11 +252,12 @@ ftrace_record_ip(unsigned long ip, unsigned long parent_ip)
251 252
252 /* 253 /*
253 * There's a slight race that the ftraced will update the 254 * There's a slight race that the ftraced will update the
254 * hash and reset here. The arch alloc is responsible 255 * hash and reset here. If it is already converted, skip it.
255 * for seeing if the IP has already changed, and if
256 * it has, the alloc will fail.
257 */ 256 */
258 node = ftrace_alloc_shutdown_node(ip); 257 if (ftrace_ip_converted(ip))
258 goto out_unlock;
259
260 node = ftrace_alloc_dyn_node(ip);
259 if (!node) 261 if (!node)
260 goto out_unlock; 262 goto out_unlock;
261 263
@@ -277,11 +279,7 @@ ftrace_record_ip(unsigned long ip, unsigned long parent_ip)
277 preempt_enable_notrace(); 279 preempt_enable_notrace();
278} 280}
279 281
280static struct ftrace_ops ftrace_shutdown_ops __read_mostly = 282#define FTRACE_ADDR ((long)(&ftrace_caller))
281{
282 .func = ftrace_record_ip,
283};
284
285#define MCOUNT_ADDR ((long)(&mcount)) 283#define MCOUNT_ADDR ((long)(&mcount))
286 284
287static void notrace ftrace_replace_code(int saved) 285static void notrace ftrace_replace_code(int saved)
@@ -309,9 +307,9 @@ static void notrace ftrace_replace_code(int saved)
309 ip = rec->ip; 307 ip = rec->ip;
310 308
311 if (saved) 309 if (saved)
312 new = ftrace_call_replace(ip, MCOUNT_ADDR); 310 new = ftrace_call_replace(ip, FTRACE_ADDR);
313 else 311 else
314 old = ftrace_call_replace(ip, MCOUNT_ADDR); 312 old = ftrace_call_replace(ip, FTRACE_ADDR);
315 313
316 failed = ftrace_modify_code(ip, old, new); 314 failed = ftrace_modify_code(ip, old, new);
317 if (failed) 315 if (failed)
@@ -320,16 +318,6 @@ static void notrace ftrace_replace_code(int saved)
320 } 318 }
321} 319}
322 320
323static notrace void ftrace_startup_code(void)
324{
325 ftrace_replace_code(1);
326}
327
328static notrace void ftrace_shutdown_code(void)
329{
330 ftrace_replace_code(0);
331}
332
333static notrace void ftrace_shutdown_replenish(void) 321static notrace void ftrace_shutdown_replenish(void)
334{ 322{
335 if (ftrace_pages->next) 323 if (ftrace_pages->next)
@@ -339,16 +327,8 @@ static notrace void ftrace_shutdown_replenish(void)
339 ftrace_pages->next = (void *)get_zeroed_page(GFP_KERNEL); 327 ftrace_pages->next = (void *)get_zeroed_page(GFP_KERNEL);
340} 328}
341 329
342static int notrace __ftrace_modify_code(void *data)
343{
344 void (*func)(void) = data;
345
346 func();
347 return 0;
348}
349
350static notrace void 330static notrace void
351ftrace_code_disable(struct dyn_ftrace *rec, unsigned long addr) 331ftrace_code_disable(struct dyn_ftrace *rec)
352{ 332{
353 unsigned long ip; 333 unsigned long ip;
354 unsigned char *nop, *call; 334 unsigned char *nop, *call;
@@ -357,67 +337,113 @@ ftrace_code_disable(struct dyn_ftrace *rec, unsigned long addr)
357 ip = rec->ip; 337 ip = rec->ip;
358 338
359 nop = ftrace_nop_replace(); 339 nop = ftrace_nop_replace();
360 call = ftrace_call_replace(ip, addr); 340 call = ftrace_call_replace(ip, MCOUNT_ADDR);
361 341
362 failed = ftrace_modify_code(ip, call, nop); 342 failed = ftrace_modify_code(ip, call, nop);
363 if (failed) 343 if (failed)
364 rec->flags |= FTRACE_FL_FAILED; 344 rec->flags |= FTRACE_FL_FAILED;
365} 345}
366 346
367static void notrace ftrace_run_startup_code(void) 347static int notrace __ftrace_modify_code(void *data)
368{ 348{
369 stop_machine_run(__ftrace_modify_code, ftrace_startup_code, NR_CPUS); 349 unsigned long addr;
350 int *command = data;
351
352 if (*command & FTRACE_ENABLE_CALLS)
353 ftrace_replace_code(1);
354 else if (*command & FTRACE_DISABLE_CALLS)
355 ftrace_replace_code(0);
356
357 if (*command & FTRACE_UPDATE_TRACE_FUNC)
358 ftrace_update_ftrace_func(ftrace_trace_function);
359
360 if (*command & FTRACE_ENABLE_MCOUNT) {
361 addr = (unsigned long)ftrace_record_ip;
362 ftrace_mcount_set(&addr);
363 } else if (*command & FTRACE_DISABLE_MCOUNT) {
364 addr = (unsigned long)ftrace_stub;
365 ftrace_mcount_set(&addr);
366 }
367
368 return 0;
370} 369}
371 370
372static void notrace ftrace_run_shutdown_code(void) 371static void notrace ftrace_run_update_code(int command)
373{ 372{
374 stop_machine_run(__ftrace_modify_code, ftrace_shutdown_code, NR_CPUS); 373 stop_machine_run(__ftrace_modify_code, &command, NR_CPUS);
375} 374}
376 375
376static ftrace_func_t saved_ftrace_func;
377
377static void notrace ftrace_startup(void) 378static void notrace ftrace_startup(void)
378{ 379{
380 int command = 0;
381
379 mutex_lock(&ftraced_lock); 382 mutex_lock(&ftraced_lock);
380 ftraced_suspend++; 383 ftraced_suspend++;
381 if (ftraced_suspend != 1) 384 if (ftraced_suspend == 1)
385 command |= FTRACE_ENABLE_CALLS;
386
387 if (saved_ftrace_func != ftrace_trace_function) {
388 saved_ftrace_func = ftrace_trace_function;
389 command |= FTRACE_UPDATE_TRACE_FUNC;
390 }
391
392 if (!command || !ftrace_enabled)
382 goto out; 393 goto out;
383 __unregister_ftrace_function(&ftrace_shutdown_ops);
384 394
385 if (ftrace_enabled) 395 ftrace_run_update_code(command);
386 ftrace_run_startup_code();
387 out: 396 out:
388 mutex_unlock(&ftraced_lock); 397 mutex_unlock(&ftraced_lock);
389} 398}
390 399
391static void notrace ftrace_shutdown(void) 400static void notrace ftrace_shutdown(void)
392{ 401{
402 int command = 0;
403
393 mutex_lock(&ftraced_lock); 404 mutex_lock(&ftraced_lock);
394 ftraced_suspend--; 405 ftraced_suspend--;
395 if (ftraced_suspend) 406 if (!ftraced_suspend)
396 goto out; 407 command |= FTRACE_DISABLE_CALLS;
397 408
398 if (ftrace_enabled) 409 if (saved_ftrace_func != ftrace_trace_function) {
399 ftrace_run_shutdown_code(); 410 saved_ftrace_func = ftrace_trace_function;
411 command |= FTRACE_UPDATE_TRACE_FUNC;
412 }
400 413
401 __register_ftrace_function(&ftrace_shutdown_ops); 414 if (!command || !ftrace_enabled)
415 goto out;
416
417 ftrace_run_update_code(command);
402 out: 418 out:
403 mutex_unlock(&ftraced_lock); 419 mutex_unlock(&ftraced_lock);
404} 420}
405 421
406static void notrace ftrace_startup_sysctl(void) 422static void notrace ftrace_startup_sysctl(void)
407{ 423{
424 int command = FTRACE_ENABLE_MCOUNT;
425
408 mutex_lock(&ftraced_lock); 426 mutex_lock(&ftraced_lock);
427 /* Force update next time */
428 saved_ftrace_func = NULL;
409 /* ftraced_suspend is true if we want ftrace running */ 429 /* ftraced_suspend is true if we want ftrace running */
410 if (ftraced_suspend) 430 if (ftraced_suspend)
411 ftrace_run_startup_code(); 431 command |= FTRACE_ENABLE_CALLS;
432
433 ftrace_run_update_code(command);
412 mutex_unlock(&ftraced_lock); 434 mutex_unlock(&ftraced_lock);
413} 435}
414 436
415static void notrace ftrace_shutdown_sysctl(void) 437static void notrace ftrace_shutdown_sysctl(void)
416{ 438{
439 int command = FTRACE_DISABLE_MCOUNT;
440
417 mutex_lock(&ftraced_lock); 441 mutex_lock(&ftraced_lock);
418 /* ftraced_suspend is true if ftrace is running */ 442 /* ftraced_suspend is true if ftrace is running */
419 if (ftraced_suspend) 443 if (ftraced_suspend)
420 ftrace_run_shutdown_code(); 444 command |= FTRACE_DISABLE_CALLS;
445
446 ftrace_run_update_code(command);
421 mutex_unlock(&ftraced_lock); 447 mutex_unlock(&ftraced_lock);
422} 448}
423 449
@@ -430,11 +456,13 @@ static int notrace __ftrace_update_code(void *ignore)
430 struct dyn_ftrace *p; 456 struct dyn_ftrace *p;
431 struct hlist_head head; 457 struct hlist_head head;
432 struct hlist_node *t; 458 struct hlist_node *t;
459 int save_ftrace_enabled;
433 cycle_t start, stop; 460 cycle_t start, stop;
434 int i; 461 int i;
435 462
436 /* Don't be calling ftrace ops now */ 463 /* Don't be recording funcs now */
437 __unregister_ftrace_function(&ftrace_shutdown_ops); 464 save_ftrace_enabled = ftrace_enabled;
465 ftrace_enabled = 0;
438 466
439 start = now(raw_smp_processor_id()); 467 start = now(raw_smp_processor_id());
440 ftrace_update_cnt = 0; 468 ftrace_update_cnt = 0;
@@ -449,7 +477,7 @@ static int notrace __ftrace_update_code(void *ignore)
449 477
450 /* all CPUS are stopped, we are safe to modify code */ 478 /* all CPUS are stopped, we are safe to modify code */
451 hlist_for_each_entry(p, t, &head, node) { 479 hlist_for_each_entry(p, t, &head, node) {
452 ftrace_code_disable(p, MCOUNT_ADDR); 480 ftrace_code_disable(p);
453 ftrace_update_cnt++; 481 ftrace_update_cnt++;
454 } 482 }
455 483
@@ -459,7 +487,7 @@ static int notrace __ftrace_update_code(void *ignore)
459 ftrace_update_time = stop - start; 487 ftrace_update_time = stop - start;
460 ftrace_update_tot_cnt += ftrace_update_cnt; 488 ftrace_update_tot_cnt += ftrace_update_cnt;
461 489
462 __register_ftrace_function(&ftrace_shutdown_ops); 490 ftrace_enabled = save_ftrace_enabled;
463 491
464 return 0; 492 return 0;
465} 493}
@@ -515,11 +543,6 @@ static int __init ftrace_dyn_table_alloc(void)
515 struct ftrace_page *pg; 543 struct ftrace_page *pg;
516 int cnt; 544 int cnt;
517 int i; 545 int i;
518 int ret;
519
520 ret = ftrace_dyn_arch_init();
521 if (ret)
522 return ret;
523 546
524 /* allocate a few pages */ 547 /* allocate a few pages */
525 ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL); 548 ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
@@ -557,11 +580,19 @@ static int __init ftrace_dyn_table_alloc(void)
557 return 0; 580 return 0;
558} 581}
559 582
560static int __init notrace ftrace_shutdown_init(void) 583static int __init notrace ftrace_dynamic_init(void)
561{ 584{
562 struct task_struct *p; 585 struct task_struct *p;
586 unsigned long addr;
563 int ret; 587 int ret;
564 588
589 addr = (unsigned long)ftrace_record_ip;
590 stop_machine_run(ftrace_dyn_arch_init, &addr, NR_CPUS);
591
592 /* ftrace_dyn_arch_init places the return code in addr */
593 if (addr)
594 return addr;
595
565 ret = ftrace_dyn_table_alloc(); 596 ret = ftrace_dyn_table_alloc();
566 if (ret) 597 if (ret)
567 return ret; 598 return ret;
@@ -570,12 +601,12 @@ static int __init notrace ftrace_shutdown_init(void)
570 if (IS_ERR(p)) 601 if (IS_ERR(p))
571 return -1; 602 return -1;
572 603
573 __register_ftrace_function(&ftrace_shutdown_ops); 604 last_ftrace_enabled = ftrace_enabled = 1;
574 605
575 return 0; 606 return 0;
576} 607}
577 608
578core_initcall(ftrace_shutdown_init); 609core_initcall(ftrace_dynamic_init);
579#else 610#else
580# define ftrace_startup() do { } while (0) 611# define ftrace_startup() do { } while (0)
581# define ftrace_shutdown() do { } while (0) 612# define ftrace_shutdown() do { } while (0)
@@ -599,9 +630,8 @@ int register_ftrace_function(struct ftrace_ops *ops)
599 int ret; 630 int ret;
600 631
601 mutex_lock(&ftrace_sysctl_lock); 632 mutex_lock(&ftrace_sysctl_lock);
602 ftrace_startup();
603
604 ret = __register_ftrace_function(ops); 633 ret = __register_ftrace_function(ops);
634 ftrace_startup();
605 mutex_unlock(&ftrace_sysctl_lock); 635 mutex_unlock(&ftrace_sysctl_lock);
606 636
607 return ret; 637 return ret;
@@ -619,10 +649,7 @@ int unregister_ftrace_function(struct ftrace_ops *ops)
619 649
620 mutex_lock(&ftrace_sysctl_lock); 650 mutex_lock(&ftrace_sysctl_lock);
621 ret = __unregister_ftrace_function(ops); 651 ret = __unregister_ftrace_function(ops);
622 652 ftrace_shutdown();
623 if (ftrace_list == &ftrace_list_end)
624 ftrace_shutdown();
625
626 mutex_unlock(&ftrace_sysctl_lock); 653 mutex_unlock(&ftrace_sysctl_lock);
627 654
628 return ret; 655 return ret;