diff options
Diffstat (limited to 'kernel/trace')
-rw-r--r-- | kernel/trace/Kconfig | 1 | ||||
-rw-r--r-- | kernel/trace/ftrace.c | 15 | ||||
-rw-r--r-- | kernel/trace/ring_buffer.c | 311 | ||||
-rw-r--r-- | kernel/trace/trace.c | 16 | ||||
-rw-r--r-- | kernel/trace/trace.h | 2 | ||||
-rw-r--r-- | kernel/trace/trace_branch.c | 4 | ||||
-rw-r--r-- | kernel/trace/trace_functions_graph.c | 168 | ||||
-rw-r--r-- | kernel/trace/trace_stack.c | 13 |
8 files changed, 418 insertions, 112 deletions
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index 8b6b673b4d6c..bde6f03512d5 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig | |||
@@ -67,6 +67,7 @@ config FUNCTION_GRAPH_TRACER | |||
67 | bool "Kernel Function Graph Tracer" | 67 | bool "Kernel Function Graph Tracer" |
68 | depends on HAVE_FUNCTION_GRAPH_TRACER | 68 | depends on HAVE_FUNCTION_GRAPH_TRACER |
69 | depends on FUNCTION_TRACER | 69 | depends on FUNCTION_TRACER |
70 | default y | ||
70 | help | 71 | help |
71 | Enable the kernel to trace a function at both its return | 72 | Enable the kernel to trace a function at both its return |
72 | and its entry. | 73 | and its entry. |
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 2e78628443e8..65b9e863056b 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -1636,11 +1636,15 @@ ftrace_enable_sysctl(struct ctl_table *table, int write, | |||
1636 | 1636 | ||
1637 | static atomic_t ftrace_graph_active; | 1637 | static atomic_t ftrace_graph_active; |
1638 | 1638 | ||
1639 | int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace) | ||
1640 | { | ||
1641 | return 0; | ||
1642 | } | ||
1643 | |||
1639 | /* The callbacks that hook a function */ | 1644 | /* The callbacks that hook a function */ |
1640 | trace_func_graph_ret_t ftrace_graph_return = | 1645 | trace_func_graph_ret_t ftrace_graph_return = |
1641 | (trace_func_graph_ret_t)ftrace_stub; | 1646 | (trace_func_graph_ret_t)ftrace_stub; |
1642 | trace_func_graph_ent_t ftrace_graph_entry = | 1647 | trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub; |
1643 | (trace_func_graph_ent_t)ftrace_stub; | ||
1644 | 1648 | ||
1645 | /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */ | 1649 | /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */ |
1646 | static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list) | 1650 | static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list) |
@@ -1738,7 +1742,7 @@ void unregister_ftrace_graph(void) | |||
1738 | 1742 | ||
1739 | atomic_dec(&ftrace_graph_active); | 1743 | atomic_dec(&ftrace_graph_active); |
1740 | ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub; | 1744 | ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub; |
1741 | ftrace_graph_entry = (trace_func_graph_ent_t)ftrace_stub; | 1745 | ftrace_graph_entry = ftrace_graph_entry_stub; |
1742 | ftrace_shutdown(FTRACE_STOP_FUNC_RET); | 1746 | ftrace_shutdown(FTRACE_STOP_FUNC_RET); |
1743 | 1747 | ||
1744 | mutex_unlock(&ftrace_sysctl_lock); | 1748 | mutex_unlock(&ftrace_sysctl_lock); |
@@ -1769,5 +1773,10 @@ void ftrace_graph_exit_task(struct task_struct *t) | |||
1769 | 1773 | ||
1770 | kfree(ret_stack); | 1774 | kfree(ret_stack); |
1771 | } | 1775 | } |
1776 | |||
1777 | void ftrace_graph_stop(void) | ||
1778 | { | ||
1779 | ftrace_stop(); | ||
1780 | } | ||
1772 | #endif | 1781 | #endif |
1773 | 1782 | ||
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index e206951603c1..7f69cfeaadf7 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -195,20 +195,24 @@ void *ring_buffer_event_data(struct ring_buffer_event *event) | |||
195 | #define TS_MASK ((1ULL << TS_SHIFT) - 1) | 195 | #define TS_MASK ((1ULL << TS_SHIFT) - 1) |
196 | #define TS_DELTA_TEST (~TS_MASK) | 196 | #define TS_DELTA_TEST (~TS_MASK) |
197 | 197 | ||
198 | /* | 198 | struct buffer_data_page { |
199 | * This hack stolen from mm/slob.c. | ||
200 | * We can store per page timing information in the page frame of the page. | ||
201 | * Thanks to Peter Zijlstra for suggesting this idea. | ||
202 | */ | ||
203 | struct buffer_page { | ||
204 | u64 time_stamp; /* page time stamp */ | 199 | u64 time_stamp; /* page time stamp */ |
205 | local_t write; /* index for next write */ | ||
206 | local_t commit; /* write commited index */ | 200 | local_t commit; /* write commited index */ |
201 | unsigned char data[]; /* data of buffer page */ | ||
202 | }; | ||
203 | |||
204 | struct buffer_page { | ||
205 | local_t write; /* index for next write */ | ||
207 | unsigned read; /* index for next read */ | 206 | unsigned read; /* index for next read */ |
208 | struct list_head list; /* list of free pages */ | 207 | struct list_head list; /* list of free pages */ |
209 | void *page; /* Actual data page */ | 208 | struct buffer_data_page *page; /* Actual data page */ |
210 | }; | 209 | }; |
211 | 210 | ||
211 | static void rb_init_page(struct buffer_data_page *bpage) | ||
212 | { | ||
213 | local_set(&bpage->commit, 0); | ||
214 | } | ||
215 | |||
212 | /* | 216 | /* |
213 | * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing | 217 | * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing |
214 | * this issue out. | 218 | * this issue out. |
@@ -230,7 +234,7 @@ static inline int test_time_stamp(u64 delta) | |||
230 | return 0; | 234 | return 0; |
231 | } | 235 | } |
232 | 236 | ||
233 | #define BUF_PAGE_SIZE PAGE_SIZE | 237 | #define BUF_PAGE_SIZE (PAGE_SIZE - sizeof(struct buffer_data_page)) |
234 | 238 | ||
235 | /* | 239 | /* |
236 | * head_page == tail_page && head == tail then buffer is empty. | 240 | * head_page == tail_page && head == tail then buffer is empty. |
@@ -294,19 +298,19 @@ struct ring_buffer_iter { | |||
294 | static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer) | 298 | static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer) |
295 | { | 299 | { |
296 | struct list_head *head = &cpu_buffer->pages; | 300 | struct list_head *head = &cpu_buffer->pages; |
297 | struct buffer_page *page, *tmp; | 301 | struct buffer_page *bpage, *tmp; |
298 | 302 | ||
299 | if (RB_WARN_ON(cpu_buffer, head->next->prev != head)) | 303 | if (RB_WARN_ON(cpu_buffer, head->next->prev != head)) |
300 | return -1; | 304 | return -1; |
301 | if (RB_WARN_ON(cpu_buffer, head->prev->next != head)) | 305 | if (RB_WARN_ON(cpu_buffer, head->prev->next != head)) |
302 | return -1; | 306 | return -1; |
303 | 307 | ||
304 | list_for_each_entry_safe(page, tmp, head, list) { | 308 | list_for_each_entry_safe(bpage, tmp, head, list) { |
305 | if (RB_WARN_ON(cpu_buffer, | 309 | if (RB_WARN_ON(cpu_buffer, |
306 | page->list.next->prev != &page->list)) | 310 | bpage->list.next->prev != &bpage->list)) |
307 | return -1; | 311 | return -1; |
308 | if (RB_WARN_ON(cpu_buffer, | 312 | if (RB_WARN_ON(cpu_buffer, |
309 | page->list.prev->next != &page->list)) | 313 | bpage->list.prev->next != &bpage->list)) |
310 | return -1; | 314 | return -1; |
311 | } | 315 | } |
312 | 316 | ||
@@ -317,22 +321,23 @@ static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer, | |||
317 | unsigned nr_pages) | 321 | unsigned nr_pages) |
318 | { | 322 | { |
319 | struct list_head *head = &cpu_buffer->pages; | 323 | struct list_head *head = &cpu_buffer->pages; |
320 | struct buffer_page *page, *tmp; | 324 | struct buffer_page *bpage, *tmp; |
321 | unsigned long addr; | 325 | unsigned long addr; |
322 | LIST_HEAD(pages); | 326 | LIST_HEAD(pages); |
323 | unsigned i; | 327 | unsigned i; |
324 | 328 | ||
325 | for (i = 0; i < nr_pages; i++) { | 329 | for (i = 0; i < nr_pages; i++) { |
326 | page = kzalloc_node(ALIGN(sizeof(*page), cache_line_size()), | 330 | bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()), |
327 | GFP_KERNEL, cpu_to_node(cpu_buffer->cpu)); | 331 | GFP_KERNEL, cpu_to_node(cpu_buffer->cpu)); |
328 | if (!page) | 332 | if (!bpage) |
329 | goto free_pages; | 333 | goto free_pages; |
330 | list_add(&page->list, &pages); | 334 | list_add(&bpage->list, &pages); |
331 | 335 | ||
332 | addr = __get_free_page(GFP_KERNEL); | 336 | addr = __get_free_page(GFP_KERNEL); |
333 | if (!addr) | 337 | if (!addr) |
334 | goto free_pages; | 338 | goto free_pages; |
335 | page->page = (void *)addr; | 339 | bpage->page = (void *)addr; |
340 | rb_init_page(bpage->page); | ||
336 | } | 341 | } |
337 | 342 | ||
338 | list_splice(&pages, head); | 343 | list_splice(&pages, head); |
@@ -342,9 +347,9 @@ static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer, | |||
342 | return 0; | 347 | return 0; |
343 | 348 | ||
344 | free_pages: | 349 | free_pages: |
345 | list_for_each_entry_safe(page, tmp, &pages, list) { | 350 | list_for_each_entry_safe(bpage, tmp, &pages, list) { |
346 | list_del_init(&page->list); | 351 | list_del_init(&bpage->list); |
347 | free_buffer_page(page); | 352 | free_buffer_page(bpage); |
348 | } | 353 | } |
349 | return -ENOMEM; | 354 | return -ENOMEM; |
350 | } | 355 | } |
@@ -353,7 +358,7 @@ static struct ring_buffer_per_cpu * | |||
353 | rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu) | 358 | rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu) |
354 | { | 359 | { |
355 | struct ring_buffer_per_cpu *cpu_buffer; | 360 | struct ring_buffer_per_cpu *cpu_buffer; |
356 | struct buffer_page *page; | 361 | struct buffer_page *bpage; |
357 | unsigned long addr; | 362 | unsigned long addr; |
358 | int ret; | 363 | int ret; |
359 | 364 | ||
@@ -368,16 +373,17 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu) | |||
368 | cpu_buffer->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; | 373 | cpu_buffer->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; |
369 | INIT_LIST_HEAD(&cpu_buffer->pages); | 374 | INIT_LIST_HEAD(&cpu_buffer->pages); |
370 | 375 | ||
371 | page = kzalloc_node(ALIGN(sizeof(*page), cache_line_size()), | 376 | bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()), |
372 | GFP_KERNEL, cpu_to_node(cpu)); | 377 | GFP_KERNEL, cpu_to_node(cpu)); |
373 | if (!page) | 378 | if (!bpage) |
374 | goto fail_free_buffer; | 379 | goto fail_free_buffer; |
375 | 380 | ||
376 | cpu_buffer->reader_page = page; | 381 | cpu_buffer->reader_page = bpage; |
377 | addr = __get_free_page(GFP_KERNEL); | 382 | addr = __get_free_page(GFP_KERNEL); |
378 | if (!addr) | 383 | if (!addr) |
379 | goto fail_free_reader; | 384 | goto fail_free_reader; |
380 | page->page = (void *)addr; | 385 | bpage->page = (void *)addr; |
386 | rb_init_page(bpage->page); | ||
381 | 387 | ||
382 | INIT_LIST_HEAD(&cpu_buffer->reader_page->list); | 388 | INIT_LIST_HEAD(&cpu_buffer->reader_page->list); |
383 | 389 | ||
@@ -402,14 +408,14 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu) | |||
402 | static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer) | 408 | static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer) |
403 | { | 409 | { |
404 | struct list_head *head = &cpu_buffer->pages; | 410 | struct list_head *head = &cpu_buffer->pages; |
405 | struct buffer_page *page, *tmp; | 411 | struct buffer_page *bpage, *tmp; |
406 | 412 | ||
407 | list_del_init(&cpu_buffer->reader_page->list); | 413 | list_del_init(&cpu_buffer->reader_page->list); |
408 | free_buffer_page(cpu_buffer->reader_page); | 414 | free_buffer_page(cpu_buffer->reader_page); |
409 | 415 | ||
410 | list_for_each_entry_safe(page, tmp, head, list) { | 416 | list_for_each_entry_safe(bpage, tmp, head, list) { |
411 | list_del_init(&page->list); | 417 | list_del_init(&bpage->list); |
412 | free_buffer_page(page); | 418 | free_buffer_page(bpage); |
413 | } | 419 | } |
414 | kfree(cpu_buffer); | 420 | kfree(cpu_buffer); |
415 | } | 421 | } |
@@ -506,7 +512,7 @@ static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer); | |||
506 | static void | 512 | static void |
507 | rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages) | 513 | rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages) |
508 | { | 514 | { |
509 | struct buffer_page *page; | 515 | struct buffer_page *bpage; |
510 | struct list_head *p; | 516 | struct list_head *p; |
511 | unsigned i; | 517 | unsigned i; |
512 | 518 | ||
@@ -517,9 +523,9 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages) | |||
517 | if (RB_WARN_ON(cpu_buffer, list_empty(&cpu_buffer->pages))) | 523 | if (RB_WARN_ON(cpu_buffer, list_empty(&cpu_buffer->pages))) |
518 | return; | 524 | return; |
519 | p = cpu_buffer->pages.next; | 525 | p = cpu_buffer->pages.next; |
520 | page = list_entry(p, struct buffer_page, list); | 526 | bpage = list_entry(p, struct buffer_page, list); |
521 | list_del_init(&page->list); | 527 | list_del_init(&bpage->list); |
522 | free_buffer_page(page); | 528 | free_buffer_page(bpage); |
523 | } | 529 | } |
524 | if (RB_WARN_ON(cpu_buffer, list_empty(&cpu_buffer->pages))) | 530 | if (RB_WARN_ON(cpu_buffer, list_empty(&cpu_buffer->pages))) |
525 | return; | 531 | return; |
@@ -536,7 +542,7 @@ static void | |||
536 | rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer, | 542 | rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer, |
537 | struct list_head *pages, unsigned nr_pages) | 543 | struct list_head *pages, unsigned nr_pages) |
538 | { | 544 | { |
539 | struct buffer_page *page; | 545 | struct buffer_page *bpage; |
540 | struct list_head *p; | 546 | struct list_head *p; |
541 | unsigned i; | 547 | unsigned i; |
542 | 548 | ||
@@ -547,9 +553,9 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer, | |||
547 | if (RB_WARN_ON(cpu_buffer, list_empty(pages))) | 553 | if (RB_WARN_ON(cpu_buffer, list_empty(pages))) |
548 | return; | 554 | return; |
549 | p = pages->next; | 555 | p = pages->next; |
550 | page = list_entry(p, struct buffer_page, list); | 556 | bpage = list_entry(p, struct buffer_page, list); |
551 | list_del_init(&page->list); | 557 | list_del_init(&bpage->list); |
552 | list_add_tail(&page->list, &cpu_buffer->pages); | 558 | list_add_tail(&bpage->list, &cpu_buffer->pages); |
553 | } | 559 | } |
554 | rb_reset_cpu(cpu_buffer); | 560 | rb_reset_cpu(cpu_buffer); |
555 | 561 | ||
@@ -576,7 +582,7 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size) | |||
576 | { | 582 | { |
577 | struct ring_buffer_per_cpu *cpu_buffer; | 583 | struct ring_buffer_per_cpu *cpu_buffer; |
578 | unsigned nr_pages, rm_pages, new_pages; | 584 | unsigned nr_pages, rm_pages, new_pages; |
579 | struct buffer_page *page, *tmp; | 585 | struct buffer_page *bpage, *tmp; |
580 | unsigned long buffer_size; | 586 | unsigned long buffer_size; |
581 | unsigned long addr; | 587 | unsigned long addr; |
582 | LIST_HEAD(pages); | 588 | LIST_HEAD(pages); |
@@ -637,16 +643,17 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size) | |||
637 | 643 | ||
638 | for_each_buffer_cpu(buffer, cpu) { | 644 | for_each_buffer_cpu(buffer, cpu) { |
639 | for (i = 0; i < new_pages; i++) { | 645 | for (i = 0; i < new_pages; i++) { |
640 | page = kzalloc_node(ALIGN(sizeof(*page), | 646 | bpage = kzalloc_node(ALIGN(sizeof(*bpage), |
641 | cache_line_size()), | 647 | cache_line_size()), |
642 | GFP_KERNEL, cpu_to_node(cpu)); | 648 | GFP_KERNEL, cpu_to_node(cpu)); |
643 | if (!page) | 649 | if (!bpage) |
644 | goto free_pages; | 650 | goto free_pages; |
645 | list_add(&page->list, &pages); | 651 | list_add(&bpage->list, &pages); |
646 | addr = __get_free_page(GFP_KERNEL); | 652 | addr = __get_free_page(GFP_KERNEL); |
647 | if (!addr) | 653 | if (!addr) |
648 | goto free_pages; | 654 | goto free_pages; |
649 | page->page = (void *)addr; | 655 | bpage->page = (void *)addr; |
656 | rb_init_page(bpage->page); | ||
650 | } | 657 | } |
651 | } | 658 | } |
652 | 659 | ||
@@ -667,9 +674,9 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size) | |||
667 | return size; | 674 | return size; |
668 | 675 | ||
669 | free_pages: | 676 | free_pages: |
670 | list_for_each_entry_safe(page, tmp, &pages, list) { | 677 | list_for_each_entry_safe(bpage, tmp, &pages, list) { |
671 | list_del_init(&page->list); | 678 | list_del_init(&bpage->list); |
672 | free_buffer_page(page); | 679 | free_buffer_page(bpage); |
673 | } | 680 | } |
674 | mutex_unlock(&buffer->mutex); | 681 | mutex_unlock(&buffer->mutex); |
675 | return -ENOMEM; | 682 | return -ENOMEM; |
@@ -680,9 +687,15 @@ static inline int rb_null_event(struct ring_buffer_event *event) | |||
680 | return event->type == RINGBUF_TYPE_PADDING; | 687 | return event->type == RINGBUF_TYPE_PADDING; |
681 | } | 688 | } |
682 | 689 | ||
683 | static inline void *__rb_page_index(struct buffer_page *page, unsigned index) | 690 | static inline void * |
691 | __rb_data_page_index(struct buffer_data_page *bpage, unsigned index) | ||
684 | { | 692 | { |
685 | return page->page + index; | 693 | return bpage->data + index; |
694 | } | ||
695 | |||
696 | static inline void *__rb_page_index(struct buffer_page *bpage, unsigned index) | ||
697 | { | ||
698 | return bpage->page->data + index; | ||
686 | } | 699 | } |
687 | 700 | ||
688 | static inline struct ring_buffer_event * | 701 | static inline struct ring_buffer_event * |
@@ -712,7 +725,7 @@ static inline unsigned rb_page_write(struct buffer_page *bpage) | |||
712 | 725 | ||
713 | static inline unsigned rb_page_commit(struct buffer_page *bpage) | 726 | static inline unsigned rb_page_commit(struct buffer_page *bpage) |
714 | { | 727 | { |
715 | return local_read(&bpage->commit); | 728 | return local_read(&bpage->page->commit); |
716 | } | 729 | } |
717 | 730 | ||
718 | /* Size is determined by what has been commited */ | 731 | /* Size is determined by what has been commited */ |
@@ -758,14 +771,14 @@ static void rb_update_overflow(struct ring_buffer_per_cpu *cpu_buffer) | |||
758 | } | 771 | } |
759 | 772 | ||
760 | static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer, | 773 | static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer, |
761 | struct buffer_page **page) | 774 | struct buffer_page **bpage) |
762 | { | 775 | { |
763 | struct list_head *p = (*page)->list.next; | 776 | struct list_head *p = (*bpage)->list.next; |
764 | 777 | ||
765 | if (p == &cpu_buffer->pages) | 778 | if (p == &cpu_buffer->pages) |
766 | p = p->next; | 779 | p = p->next; |
767 | 780 | ||
768 | *page = list_entry(p, struct buffer_page, list); | 781 | *bpage = list_entry(p, struct buffer_page, list); |
769 | } | 782 | } |
770 | 783 | ||
771 | static inline unsigned | 784 | static inline unsigned |
@@ -804,14 +817,15 @@ rb_set_commit_event(struct ring_buffer_per_cpu *cpu_buffer, | |||
804 | if (RB_WARN_ON(cpu_buffer, | 817 | if (RB_WARN_ON(cpu_buffer, |
805 | cpu_buffer->commit_page == cpu_buffer->tail_page)) | 818 | cpu_buffer->commit_page == cpu_buffer->tail_page)) |
806 | return; | 819 | return; |
807 | cpu_buffer->commit_page->commit = | 820 | cpu_buffer->commit_page->page->commit = |
808 | cpu_buffer->commit_page->write; | 821 | cpu_buffer->commit_page->write; |
809 | rb_inc_page(cpu_buffer, &cpu_buffer->commit_page); | 822 | rb_inc_page(cpu_buffer, &cpu_buffer->commit_page); |
810 | cpu_buffer->write_stamp = cpu_buffer->commit_page->time_stamp; | 823 | cpu_buffer->write_stamp = |
824 | cpu_buffer->commit_page->page->time_stamp; | ||
811 | } | 825 | } |
812 | 826 | ||
813 | /* Now set the commit to the event's index */ | 827 | /* Now set the commit to the event's index */ |
814 | local_set(&cpu_buffer->commit_page->commit, index); | 828 | local_set(&cpu_buffer->commit_page->page->commit, index); |
815 | } | 829 | } |
816 | 830 | ||
817 | static inline void | 831 | static inline void |
@@ -826,16 +840,17 @@ rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer) | |||
826 | * assign the commit to the tail. | 840 | * assign the commit to the tail. |
827 | */ | 841 | */ |
828 | while (cpu_buffer->commit_page != cpu_buffer->tail_page) { | 842 | while (cpu_buffer->commit_page != cpu_buffer->tail_page) { |
829 | cpu_buffer->commit_page->commit = | 843 | cpu_buffer->commit_page->page->commit = |
830 | cpu_buffer->commit_page->write; | 844 | cpu_buffer->commit_page->write; |
831 | rb_inc_page(cpu_buffer, &cpu_buffer->commit_page); | 845 | rb_inc_page(cpu_buffer, &cpu_buffer->commit_page); |
832 | cpu_buffer->write_stamp = cpu_buffer->commit_page->time_stamp; | 846 | cpu_buffer->write_stamp = |
847 | cpu_buffer->commit_page->page->time_stamp; | ||
833 | /* add barrier to keep gcc from optimizing too much */ | 848 | /* add barrier to keep gcc from optimizing too much */ |
834 | barrier(); | 849 | barrier(); |
835 | } | 850 | } |
836 | while (rb_commit_index(cpu_buffer) != | 851 | while (rb_commit_index(cpu_buffer) != |
837 | rb_page_write(cpu_buffer->commit_page)) { | 852 | rb_page_write(cpu_buffer->commit_page)) { |
838 | cpu_buffer->commit_page->commit = | 853 | cpu_buffer->commit_page->page->commit = |
839 | cpu_buffer->commit_page->write; | 854 | cpu_buffer->commit_page->write; |
840 | barrier(); | 855 | barrier(); |
841 | } | 856 | } |
@@ -843,7 +858,7 @@ rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer) | |||
843 | 858 | ||
844 | static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer) | 859 | static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer) |
845 | { | 860 | { |
846 | cpu_buffer->read_stamp = cpu_buffer->reader_page->time_stamp; | 861 | cpu_buffer->read_stamp = cpu_buffer->reader_page->page->time_stamp; |
847 | cpu_buffer->reader_page->read = 0; | 862 | cpu_buffer->reader_page->read = 0; |
848 | } | 863 | } |
849 | 864 | ||
@@ -862,7 +877,7 @@ static inline void rb_inc_iter(struct ring_buffer_iter *iter) | |||
862 | else | 877 | else |
863 | rb_inc_page(cpu_buffer, &iter->head_page); | 878 | rb_inc_page(cpu_buffer, &iter->head_page); |
864 | 879 | ||
865 | iter->read_stamp = iter->head_page->time_stamp; | 880 | iter->read_stamp = iter->head_page->page->time_stamp; |
866 | iter->head = 0; | 881 | iter->head = 0; |
867 | } | 882 | } |
868 | 883 | ||
@@ -998,12 +1013,12 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, | |||
998 | */ | 1013 | */ |
999 | if (tail_page == cpu_buffer->tail_page) { | 1014 | if (tail_page == cpu_buffer->tail_page) { |
1000 | local_set(&next_page->write, 0); | 1015 | local_set(&next_page->write, 0); |
1001 | local_set(&next_page->commit, 0); | 1016 | local_set(&next_page->page->commit, 0); |
1002 | cpu_buffer->tail_page = next_page; | 1017 | cpu_buffer->tail_page = next_page; |
1003 | 1018 | ||
1004 | /* reread the time stamp */ | 1019 | /* reread the time stamp */ |
1005 | *ts = ring_buffer_time_stamp(cpu_buffer->cpu); | 1020 | *ts = ring_buffer_time_stamp(cpu_buffer->cpu); |
1006 | cpu_buffer->tail_page->time_stamp = *ts; | 1021 | cpu_buffer->tail_page->page->time_stamp = *ts; |
1007 | } | 1022 | } |
1008 | 1023 | ||
1009 | /* | 1024 | /* |
@@ -1048,7 +1063,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, | |||
1048 | * this page's time stamp. | 1063 | * this page's time stamp. |
1049 | */ | 1064 | */ |
1050 | if (!tail && rb_is_commit(cpu_buffer, event)) | 1065 | if (!tail && rb_is_commit(cpu_buffer, event)) |
1051 | cpu_buffer->commit_page->time_stamp = *ts; | 1066 | cpu_buffer->commit_page->page->time_stamp = *ts; |
1052 | 1067 | ||
1053 | return event; | 1068 | return event; |
1054 | 1069 | ||
@@ -1099,7 +1114,7 @@ rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer, | |||
1099 | event->time_delta = *delta & TS_MASK; | 1114 | event->time_delta = *delta & TS_MASK; |
1100 | event->array[0] = *delta >> TS_SHIFT; | 1115 | event->array[0] = *delta >> TS_SHIFT; |
1101 | } else { | 1116 | } else { |
1102 | cpu_buffer->commit_page->time_stamp = *ts; | 1117 | cpu_buffer->commit_page->page->time_stamp = *ts; |
1103 | event->time_delta = 0; | 1118 | event->time_delta = 0; |
1104 | event->array[0] = 0; | 1119 | event->array[0] = 0; |
1105 | } | 1120 | } |
@@ -1552,7 +1567,7 @@ static void rb_iter_reset(struct ring_buffer_iter *iter) | |||
1552 | if (iter->head) | 1567 | if (iter->head) |
1553 | iter->read_stamp = cpu_buffer->read_stamp; | 1568 | iter->read_stamp = cpu_buffer->read_stamp; |
1554 | else | 1569 | else |
1555 | iter->read_stamp = iter->head_page->time_stamp; | 1570 | iter->read_stamp = iter->head_page->page->time_stamp; |
1556 | } | 1571 | } |
1557 | 1572 | ||
1558 | /** | 1573 | /** |
@@ -1696,7 +1711,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) | |||
1696 | cpu_buffer->reader_page->list.prev = reader->list.prev; | 1711 | cpu_buffer->reader_page->list.prev = reader->list.prev; |
1697 | 1712 | ||
1698 | local_set(&cpu_buffer->reader_page->write, 0); | 1713 | local_set(&cpu_buffer->reader_page->write, 0); |
1699 | local_set(&cpu_buffer->reader_page->commit, 0); | 1714 | local_set(&cpu_buffer->reader_page->page->commit, 0); |
1700 | 1715 | ||
1701 | /* Make the reader page now replace the head */ | 1716 | /* Make the reader page now replace the head */ |
1702 | reader->list.prev->next = &cpu_buffer->reader_page->list; | 1717 | reader->list.prev->next = &cpu_buffer->reader_page->list; |
@@ -2088,7 +2103,7 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer) | |||
2088 | cpu_buffer->head_page | 2103 | cpu_buffer->head_page |
2089 | = list_entry(cpu_buffer->pages.next, struct buffer_page, list); | 2104 | = list_entry(cpu_buffer->pages.next, struct buffer_page, list); |
2090 | local_set(&cpu_buffer->head_page->write, 0); | 2105 | local_set(&cpu_buffer->head_page->write, 0); |
2091 | local_set(&cpu_buffer->head_page->commit, 0); | 2106 | local_set(&cpu_buffer->head_page->page->commit, 0); |
2092 | 2107 | ||
2093 | cpu_buffer->head_page->read = 0; | 2108 | cpu_buffer->head_page->read = 0; |
2094 | 2109 | ||
@@ -2097,7 +2112,7 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer) | |||
2097 | 2112 | ||
2098 | INIT_LIST_HEAD(&cpu_buffer->reader_page->list); | 2113 | INIT_LIST_HEAD(&cpu_buffer->reader_page->list); |
2099 | local_set(&cpu_buffer->reader_page->write, 0); | 2114 | local_set(&cpu_buffer->reader_page->write, 0); |
2100 | local_set(&cpu_buffer->reader_page->commit, 0); | 2115 | local_set(&cpu_buffer->reader_page->page->commit, 0); |
2101 | cpu_buffer->reader_page->read = 0; | 2116 | cpu_buffer->reader_page->read = 0; |
2102 | 2117 | ||
2103 | cpu_buffer->overrun = 0; | 2118 | cpu_buffer->overrun = 0; |
@@ -2223,6 +2238,166 @@ int ring_buffer_swap_cpu(struct ring_buffer *buffer_a, | |||
2223 | return 0; | 2238 | return 0; |
2224 | } | 2239 | } |
2225 | 2240 | ||
2241 | static void rb_remove_entries(struct ring_buffer_per_cpu *cpu_buffer, | ||
2242 | struct buffer_data_page *bpage) | ||
2243 | { | ||
2244 | struct ring_buffer_event *event; | ||
2245 | unsigned long head; | ||
2246 | |||
2247 | __raw_spin_lock(&cpu_buffer->lock); | ||
2248 | for (head = 0; head < local_read(&bpage->commit); | ||
2249 | head += rb_event_length(event)) { | ||
2250 | |||
2251 | event = __rb_data_page_index(bpage, head); | ||
2252 | if (RB_WARN_ON(cpu_buffer, rb_null_event(event))) | ||
2253 | return; | ||
2254 | /* Only count data entries */ | ||
2255 | if (event->type != RINGBUF_TYPE_DATA) | ||
2256 | continue; | ||
2257 | cpu_buffer->entries--; | ||
2258 | } | ||
2259 | __raw_spin_unlock(&cpu_buffer->lock); | ||
2260 | } | ||
2261 | |||
2262 | /** | ||
2263 | * ring_buffer_alloc_read_page - allocate a page to read from buffer | ||
2264 | * @buffer: the buffer to allocate for. | ||
2265 | * | ||
2266 | * This function is used in conjunction with ring_buffer_read_page. | ||
2267 | * When reading a full page from the ring buffer, these functions | ||
2268 | * can be used to speed up the process. The calling function should | ||
2269 | * allocate a few pages first with this function. Then when it | ||
2270 | * needs to get pages from the ring buffer, it passes the result | ||
2271 | * of this function into ring_buffer_read_page, which will swap | ||
2272 | * the page that was allocated, with the read page of the buffer. | ||
2273 | * | ||
2274 | * Returns: | ||
2275 | * The page allocated, or NULL on error. | ||
2276 | */ | ||
2277 | void *ring_buffer_alloc_read_page(struct ring_buffer *buffer) | ||
2278 | { | ||
2279 | unsigned long addr; | ||
2280 | struct buffer_data_page *bpage; | ||
2281 | |||
2282 | addr = __get_free_page(GFP_KERNEL); | ||
2283 | if (!addr) | ||
2284 | return NULL; | ||
2285 | |||
2286 | bpage = (void *)addr; | ||
2287 | |||
2288 | return bpage; | ||
2289 | } | ||
2290 | |||
2291 | /** | ||
2292 | * ring_buffer_free_read_page - free an allocated read page | ||
2293 | * @buffer: the buffer the page was allocate for | ||
2294 | * @data: the page to free | ||
2295 | * | ||
2296 | * Free a page allocated from ring_buffer_alloc_read_page. | ||
2297 | */ | ||
2298 | void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data) | ||
2299 | { | ||
2300 | free_page((unsigned long)data); | ||
2301 | } | ||
2302 | |||
2303 | /** | ||
2304 | * ring_buffer_read_page - extract a page from the ring buffer | ||
2305 | * @buffer: buffer to extract from | ||
2306 | * @data_page: the page to use allocated from ring_buffer_alloc_read_page | ||
2307 | * @cpu: the cpu of the buffer to extract | ||
2308 | * @full: should the extraction only happen when the page is full. | ||
2309 | * | ||
2310 | * This function will pull out a page from the ring buffer and consume it. | ||
2311 | * @data_page must be the address of the variable that was returned | ||
2312 | * from ring_buffer_alloc_read_page. This is because the page might be used | ||
2313 | * to swap with a page in the ring buffer. | ||
2314 | * | ||
2315 | * for example: | ||
2316 | * rpage = ring_buffer_alloc_page(buffer); | ||
2317 | * if (!rpage) | ||
2318 | * return error; | ||
2319 | * ret = ring_buffer_read_page(buffer, &rpage, cpu, 0); | ||
2320 | * if (ret) | ||
2321 | * process_page(rpage); | ||
2322 | * | ||
2323 | * When @full is set, the function will not return true unless | ||
2324 | * the writer is off the reader page. | ||
2325 | * | ||
2326 | * Note: it is up to the calling functions to handle sleeps and wakeups. | ||
2327 | * The ring buffer can be used anywhere in the kernel and can not | ||
2328 | * blindly call wake_up. The layer that uses the ring buffer must be | ||
2329 | * responsible for that. | ||
2330 | * | ||
2331 | * Returns: | ||
2332 | * 1 if data has been transferred | ||
2333 | * 0 if no data has been transferred. | ||
2334 | */ | ||
2335 | int ring_buffer_read_page(struct ring_buffer *buffer, | ||
2336 | void **data_page, int cpu, int full) | ||
2337 | { | ||
2338 | struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; | ||
2339 | struct ring_buffer_event *event; | ||
2340 | struct buffer_data_page *bpage; | ||
2341 | unsigned long flags; | ||
2342 | int ret = 0; | ||
2343 | |||
2344 | if (!data_page) | ||
2345 | return 0; | ||
2346 | |||
2347 | bpage = *data_page; | ||
2348 | if (!bpage) | ||
2349 | return 0; | ||
2350 | |||
2351 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); | ||
2352 | |||
2353 | /* | ||
2354 | * rb_buffer_peek will get the next ring buffer if | ||
2355 | * the current reader page is empty. | ||
2356 | */ | ||
2357 | event = rb_buffer_peek(buffer, cpu, NULL); | ||
2358 | if (!event) | ||
2359 | goto out; | ||
2360 | |||
2361 | /* check for data */ | ||
2362 | if (!local_read(&cpu_buffer->reader_page->page->commit)) | ||
2363 | goto out; | ||
2364 | /* | ||
2365 | * If the writer is already off of the read page, then simply | ||
2366 | * switch the read page with the given page. Otherwise | ||
2367 | * we need to copy the data from the reader to the writer. | ||
2368 | */ | ||
2369 | if (cpu_buffer->reader_page == cpu_buffer->commit_page) { | ||
2370 | unsigned int read = cpu_buffer->reader_page->read; | ||
2371 | |||
2372 | if (full) | ||
2373 | goto out; | ||
2374 | /* The writer is still on the reader page, we must copy */ | ||
2375 | bpage = cpu_buffer->reader_page->page; | ||
2376 | memcpy(bpage->data, | ||
2377 | cpu_buffer->reader_page->page->data + read, | ||
2378 | local_read(&bpage->commit) - read); | ||
2379 | |||
2380 | /* consume what was read */ | ||
2381 | cpu_buffer->reader_page += read; | ||
2382 | |||
2383 | } else { | ||
2384 | /* swap the pages */ | ||
2385 | rb_init_page(bpage); | ||
2386 | bpage = cpu_buffer->reader_page->page; | ||
2387 | cpu_buffer->reader_page->page = *data_page; | ||
2388 | cpu_buffer->reader_page->read = 0; | ||
2389 | *data_page = bpage; | ||
2390 | } | ||
2391 | ret = 1; | ||
2392 | |||
2393 | /* update the entry counter */ | ||
2394 | rb_remove_entries(cpu_buffer, bpage); | ||
2395 | out: | ||
2396 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); | ||
2397 | |||
2398 | return ret; | ||
2399 | } | ||
2400 | |||
2226 | static ssize_t | 2401 | static ssize_t |
2227 | rb_simple_read(struct file *filp, char __user *ubuf, | 2402 | rb_simple_read(struct file *filp, char __user *ubuf, |
2228 | size_t cnt, loff_t *ppos) | 2403 | size_t cnt, loff_t *ppos) |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 91887a280ab9..8b6409a62b54 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -1200,7 +1200,7 @@ function_trace_call(unsigned long ip, unsigned long parent_ip) | |||
1200 | } | 1200 | } |
1201 | 1201 | ||
1202 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 1202 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
1203 | void trace_graph_entry(struct ftrace_graph_ent *trace) | 1203 | int trace_graph_entry(struct ftrace_graph_ent *trace) |
1204 | { | 1204 | { |
1205 | struct trace_array *tr = &global_trace; | 1205 | struct trace_array *tr = &global_trace; |
1206 | struct trace_array_cpu *data; | 1206 | struct trace_array_cpu *data; |
@@ -1209,7 +1209,7 @@ void trace_graph_entry(struct ftrace_graph_ent *trace) | |||
1209 | int cpu; | 1209 | int cpu; |
1210 | int pc; | 1210 | int pc; |
1211 | 1211 | ||
1212 | raw_local_irq_save(flags); | 1212 | local_irq_save(flags); |
1213 | cpu = raw_smp_processor_id(); | 1213 | cpu = raw_smp_processor_id(); |
1214 | data = tr->data[cpu]; | 1214 | data = tr->data[cpu]; |
1215 | disabled = atomic_inc_return(&data->disabled); | 1215 | disabled = atomic_inc_return(&data->disabled); |
@@ -1218,7 +1218,9 @@ void trace_graph_entry(struct ftrace_graph_ent *trace) | |||
1218 | __trace_graph_entry(tr, data, trace, flags, pc); | 1218 | __trace_graph_entry(tr, data, trace, flags, pc); |
1219 | } | 1219 | } |
1220 | atomic_dec(&data->disabled); | 1220 | atomic_dec(&data->disabled); |
1221 | raw_local_irq_restore(flags); | 1221 | local_irq_restore(flags); |
1222 | |||
1223 | return 1; | ||
1222 | } | 1224 | } |
1223 | 1225 | ||
1224 | void trace_graph_return(struct ftrace_graph_ret *trace) | 1226 | void trace_graph_return(struct ftrace_graph_ret *trace) |
@@ -1230,7 +1232,7 @@ void trace_graph_return(struct ftrace_graph_ret *trace) | |||
1230 | int cpu; | 1232 | int cpu; |
1231 | int pc; | 1233 | int pc; |
1232 | 1234 | ||
1233 | raw_local_irq_save(flags); | 1235 | local_irq_save(flags); |
1234 | cpu = raw_smp_processor_id(); | 1236 | cpu = raw_smp_processor_id(); |
1235 | data = tr->data[cpu]; | 1237 | data = tr->data[cpu]; |
1236 | disabled = atomic_inc_return(&data->disabled); | 1238 | disabled = atomic_inc_return(&data->disabled); |
@@ -1239,7 +1241,7 @@ void trace_graph_return(struct ftrace_graph_ret *trace) | |||
1239 | __trace_graph_return(tr, data, trace, flags, pc); | 1241 | __trace_graph_return(tr, data, trace, flags, pc); |
1240 | } | 1242 | } |
1241 | atomic_dec(&data->disabled); | 1243 | atomic_dec(&data->disabled); |
1242 | raw_local_irq_restore(flags); | 1244 | local_irq_restore(flags); |
1243 | } | 1245 | } |
1244 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | 1246 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
1245 | 1247 | ||
@@ -2645,7 +2647,7 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf, | |||
2645 | if (err) | 2647 | if (err) |
2646 | goto err_unlock; | 2648 | goto err_unlock; |
2647 | 2649 | ||
2648 | raw_local_irq_disable(); | 2650 | local_irq_disable(); |
2649 | __raw_spin_lock(&ftrace_max_lock); | 2651 | __raw_spin_lock(&ftrace_max_lock); |
2650 | for_each_tracing_cpu(cpu) { | 2652 | for_each_tracing_cpu(cpu) { |
2651 | /* | 2653 | /* |
@@ -2662,7 +2664,7 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf, | |||
2662 | } | 2664 | } |
2663 | } | 2665 | } |
2664 | __raw_spin_unlock(&ftrace_max_lock); | 2666 | __raw_spin_unlock(&ftrace_max_lock); |
2665 | raw_local_irq_enable(); | 2667 | local_irq_enable(); |
2666 | 2668 | ||
2667 | tracing_cpumask = tracing_cpumask_new; | 2669 | tracing_cpumask = tracing_cpumask_new; |
2668 | 2670 | ||
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index f96f4e787ff3..0565ae9a2210 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
@@ -412,7 +412,7 @@ void trace_function(struct trace_array *tr, | |||
412 | unsigned long flags, int pc); | 412 | unsigned long flags, int pc); |
413 | 413 | ||
414 | void trace_graph_return(struct ftrace_graph_ret *trace); | 414 | void trace_graph_return(struct ftrace_graph_ret *trace); |
415 | void trace_graph_entry(struct ftrace_graph_ent *trace); | 415 | int trace_graph_entry(struct ftrace_graph_ent *trace); |
416 | void trace_bts(struct trace_array *tr, | 416 | void trace_bts(struct trace_array *tr, |
417 | unsigned long from, | 417 | unsigned long from, |
418 | unsigned long to); | 418 | unsigned long to); |
diff --git a/kernel/trace/trace_branch.c b/kernel/trace/trace_branch.c index bc972753568d..6c00feb3bac7 100644 --- a/kernel/trace/trace_branch.c +++ b/kernel/trace/trace_branch.c | |||
@@ -42,7 +42,7 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect) | |||
42 | if (unlikely(!tr)) | 42 | if (unlikely(!tr)) |
43 | return; | 43 | return; |
44 | 44 | ||
45 | raw_local_irq_save(flags); | 45 | local_irq_save(flags); |
46 | cpu = raw_smp_processor_id(); | 46 | cpu = raw_smp_processor_id(); |
47 | if (atomic_inc_return(&tr->data[cpu]->disabled) != 1) | 47 | if (atomic_inc_return(&tr->data[cpu]->disabled) != 1) |
48 | goto out; | 48 | goto out; |
@@ -74,7 +74,7 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect) | |||
74 | 74 | ||
75 | out: | 75 | out: |
76 | atomic_dec(&tr->data[cpu]->disabled); | 76 | atomic_dec(&tr->data[cpu]->disabled); |
77 | raw_local_irq_restore(flags); | 77 | local_irq_restore(flags); |
78 | } | 78 | } |
79 | 79 | ||
80 | static inline | 80 | static inline |
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index 894b50bca313..c66578f2fdc2 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #define TRACE_GRAPH_PRINT_OVERRUN 0x1 | 19 | #define TRACE_GRAPH_PRINT_OVERRUN 0x1 |
20 | #define TRACE_GRAPH_PRINT_CPU 0x2 | 20 | #define TRACE_GRAPH_PRINT_CPU 0x2 |
21 | #define TRACE_GRAPH_PRINT_OVERHEAD 0x4 | 21 | #define TRACE_GRAPH_PRINT_OVERHEAD 0x4 |
22 | #define TRACE_GRAPH_PRINT_PROC 0x8 | ||
22 | 23 | ||
23 | static struct tracer_opt trace_opts[] = { | 24 | static struct tracer_opt trace_opts[] = { |
24 | /* Display overruns ? */ | 25 | /* Display overruns ? */ |
@@ -27,11 +28,13 @@ static struct tracer_opt trace_opts[] = { | |||
27 | { TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) }, | 28 | { TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) }, |
28 | /* Display Overhead ? */ | 29 | /* Display Overhead ? */ |
29 | { TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) }, | 30 | { TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) }, |
31 | /* Display proc name/pid */ | ||
32 | { TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) }, | ||
30 | { } /* Empty entry */ | 33 | { } /* Empty entry */ |
31 | }; | 34 | }; |
32 | 35 | ||
33 | static struct tracer_flags tracer_flags = { | 36 | static struct tracer_flags tracer_flags = { |
34 | /* Don't display overruns by default */ | 37 | /* Don't display overruns and proc by default */ |
35 | .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD, | 38 | .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD, |
36 | .opts = trace_opts | 39 | .opts = trace_opts |
37 | }; | 40 | }; |
@@ -104,23 +107,63 @@ print_graph_cpu(struct trace_seq *s, int cpu) | |||
104 | return TRACE_TYPE_HANDLED; | 107 | return TRACE_TYPE_HANDLED; |
105 | } | 108 | } |
106 | 109 | ||
110 | #define TRACE_GRAPH_PROCINFO_LENGTH 14 | ||
111 | |||
112 | static enum print_line_t | ||
113 | print_graph_proc(struct trace_seq *s, pid_t pid) | ||
114 | { | ||
115 | int i; | ||
116 | int ret; | ||
117 | int len; | ||
118 | char comm[8]; | ||
119 | int spaces = 0; | ||
120 | /* sign + log10(MAX_INT) + '\0' */ | ||
121 | char pid_str[11]; | ||
122 | |||
123 | strncpy(comm, trace_find_cmdline(pid), 7); | ||
124 | comm[7] = '\0'; | ||
125 | sprintf(pid_str, "%d", pid); | ||
126 | |||
127 | /* 1 stands for the "-" character */ | ||
128 | len = strlen(comm) + strlen(pid_str) + 1; | ||
129 | |||
130 | if (len < TRACE_GRAPH_PROCINFO_LENGTH) | ||
131 | spaces = TRACE_GRAPH_PROCINFO_LENGTH - len; | ||
132 | |||
133 | /* First spaces to align center */ | ||
134 | for (i = 0; i < spaces / 2; i++) { | ||
135 | ret = trace_seq_printf(s, " "); | ||
136 | if (!ret) | ||
137 | return TRACE_TYPE_PARTIAL_LINE; | ||
138 | } | ||
139 | |||
140 | ret = trace_seq_printf(s, "%s-%s", comm, pid_str); | ||
141 | if (!ret) | ||
142 | return TRACE_TYPE_PARTIAL_LINE; | ||
143 | |||
144 | /* Last spaces to align center */ | ||
145 | for (i = 0; i < spaces - (spaces / 2); i++) { | ||
146 | ret = trace_seq_printf(s, " "); | ||
147 | if (!ret) | ||
148 | return TRACE_TYPE_PARTIAL_LINE; | ||
149 | } | ||
150 | return TRACE_TYPE_HANDLED; | ||
151 | } | ||
152 | |||
107 | 153 | ||
108 | /* If the pid changed since the last trace, output this event */ | 154 | /* If the pid changed since the last trace, output this event */ |
109 | static int verif_pid(struct trace_seq *s, pid_t pid, int cpu) | 155 | static enum print_line_t |
156 | verif_pid(struct trace_seq *s, pid_t pid, int cpu) | ||
110 | { | 157 | { |
111 | char *comm, *prev_comm; | ||
112 | pid_t prev_pid; | 158 | pid_t prev_pid; |
113 | int ret; | 159 | int ret; |
114 | 160 | ||
115 | if (last_pid[cpu] != -1 && last_pid[cpu] == pid) | 161 | if (last_pid[cpu] != -1 && last_pid[cpu] == pid) |
116 | return 1; | 162 | return TRACE_TYPE_HANDLED; |
117 | 163 | ||
118 | prev_pid = last_pid[cpu]; | 164 | prev_pid = last_pid[cpu]; |
119 | last_pid[cpu] = pid; | 165 | last_pid[cpu] = pid; |
120 | 166 | ||
121 | comm = trace_find_cmdline(pid); | ||
122 | prev_comm = trace_find_cmdline(prev_pid); | ||
123 | |||
124 | /* | 167 | /* |
125 | * Context-switch trace line: | 168 | * Context-switch trace line: |
126 | 169 | ||
@@ -130,11 +173,31 @@ static int verif_pid(struct trace_seq *s, pid_t pid, int cpu) | |||
130 | 173 | ||
131 | */ | 174 | */ |
132 | ret = trace_seq_printf(s, | 175 | ret = trace_seq_printf(s, |
133 | " ------------------------------------------\n"); | 176 | "\n ------------------------------------------\n |"); |
134 | ret += trace_seq_printf(s, " | %d) %s-%d => %s-%d\n", | 177 | if (!ret) |
135 | cpu, prev_comm, prev_pid, comm, pid); | 178 | TRACE_TYPE_PARTIAL_LINE; |
136 | ret += trace_seq_printf(s, | 179 | |
137 | " ------------------------------------------\n\n"); | 180 | ret = print_graph_cpu(s, cpu); |
181 | if (ret == TRACE_TYPE_PARTIAL_LINE) | ||
182 | TRACE_TYPE_PARTIAL_LINE; | ||
183 | |||
184 | ret = print_graph_proc(s, prev_pid); | ||
185 | if (ret == TRACE_TYPE_PARTIAL_LINE) | ||
186 | TRACE_TYPE_PARTIAL_LINE; | ||
187 | |||
188 | ret = trace_seq_printf(s, " => "); | ||
189 | if (!ret) | ||
190 | TRACE_TYPE_PARTIAL_LINE; | ||
191 | |||
192 | ret = print_graph_proc(s, pid); | ||
193 | if (ret == TRACE_TYPE_PARTIAL_LINE) | ||
194 | TRACE_TYPE_PARTIAL_LINE; | ||
195 | |||
196 | ret = trace_seq_printf(s, | ||
197 | "\n ------------------------------------------\n\n"); | ||
198 | if (!ret) | ||
199 | TRACE_TYPE_PARTIAL_LINE; | ||
200 | |||
138 | return ret; | 201 | return ret; |
139 | } | 202 | } |
140 | 203 | ||
@@ -169,11 +232,50 @@ trace_branch_is_leaf(struct trace_iterator *iter, | |||
169 | } | 232 | } |
170 | 233 | ||
171 | 234 | ||
172 | static inline int | 235 | static enum print_line_t |
173 | print_graph_duration(unsigned long long duration, struct trace_seq *s) | 236 | print_graph_duration(unsigned long long duration, struct trace_seq *s) |
174 | { | 237 | { |
175 | unsigned long nsecs_rem = do_div(duration, 1000); | 238 | unsigned long nsecs_rem = do_div(duration, 1000); |
176 | return trace_seq_printf(s, "%4llu.%3lu us | ", duration, nsecs_rem); | 239 | /* log10(ULONG_MAX) + '\0' */ |
240 | char msecs_str[21]; | ||
241 | char nsecs_str[5]; | ||
242 | int ret, len; | ||
243 | int i; | ||
244 | |||
245 | sprintf(msecs_str, "%lu", (unsigned long) duration); | ||
246 | |||
247 | /* Print msecs */ | ||
248 | ret = trace_seq_printf(s, msecs_str); | ||
249 | if (!ret) | ||
250 | return TRACE_TYPE_PARTIAL_LINE; | ||
251 | |||
252 | len = strlen(msecs_str); | ||
253 | |||
254 | /* Print nsecs (we don't want to exceed 7 numbers) */ | ||
255 | if (len < 7) { | ||
256 | snprintf(nsecs_str, 8 - len, "%03lu", nsecs_rem); | ||
257 | ret = trace_seq_printf(s, ".%s", nsecs_str); | ||
258 | if (!ret) | ||
259 | return TRACE_TYPE_PARTIAL_LINE; | ||
260 | len += strlen(nsecs_str); | ||
261 | } | ||
262 | |||
263 | ret = trace_seq_printf(s, " us "); | ||
264 | if (!ret) | ||
265 | return TRACE_TYPE_PARTIAL_LINE; | ||
266 | |||
267 | /* Print remaining spaces to fit the row's width */ | ||
268 | for (i = len; i < 7; i++) { | ||
269 | ret = trace_seq_printf(s, " "); | ||
270 | if (!ret) | ||
271 | return TRACE_TYPE_PARTIAL_LINE; | ||
272 | } | ||
273 | |||
274 | ret = trace_seq_printf(s, "| "); | ||
275 | if (!ret) | ||
276 | return TRACE_TYPE_PARTIAL_LINE; | ||
277 | return TRACE_TYPE_HANDLED; | ||
278 | |||
177 | } | 279 | } |
178 | 280 | ||
179 | /* Signal a overhead of time execution to the output */ | 281 | /* Signal a overhead of time execution to the output */ |
@@ -210,10 +312,6 @@ print_graph_entry_leaf(struct trace_iterator *iter, | |||
210 | call = &entry->graph_ent; | 312 | call = &entry->graph_ent; |
211 | duration = graph_ret->rettime - graph_ret->calltime; | 313 | duration = graph_ret->rettime - graph_ret->calltime; |
212 | 314 | ||
213 | /* Must not exceed 8 characters: 9999.999 us */ | ||
214 | if (duration > 10000000ULL) | ||
215 | duration = 9999999ULL; | ||
216 | |||
217 | /* Overhead */ | 315 | /* Overhead */ |
218 | if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) { | 316 | if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) { |
219 | ret = print_graph_overhead(duration, s); | 317 | ret = print_graph_overhead(duration, s); |
@@ -223,7 +321,7 @@ print_graph_entry_leaf(struct trace_iterator *iter, | |||
223 | 321 | ||
224 | /* Duration */ | 322 | /* Duration */ |
225 | ret = print_graph_duration(duration, s); | 323 | ret = print_graph_duration(duration, s); |
226 | if (!ret) | 324 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
227 | return TRACE_TYPE_PARTIAL_LINE; | 325 | return TRACE_TYPE_PARTIAL_LINE; |
228 | 326 | ||
229 | /* Function */ | 327 | /* Function */ |
@@ -288,12 +386,23 @@ print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s, | |||
288 | struct trace_entry *ent = iter->ent; | 386 | struct trace_entry *ent = iter->ent; |
289 | 387 | ||
290 | /* Pid */ | 388 | /* Pid */ |
291 | if (!verif_pid(s, ent->pid, cpu)) | 389 | if (verif_pid(s, ent->pid, cpu) == TRACE_TYPE_PARTIAL_LINE) |
292 | return TRACE_TYPE_PARTIAL_LINE; | 390 | return TRACE_TYPE_PARTIAL_LINE; |
293 | 391 | ||
294 | /* Cpu */ | 392 | /* Cpu */ |
295 | if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) { | 393 | if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) { |
296 | ret = print_graph_cpu(s, cpu); | 394 | ret = print_graph_cpu(s, cpu); |
395 | if (ret == TRACE_TYPE_PARTIAL_LINE) | ||
396 | return TRACE_TYPE_PARTIAL_LINE; | ||
397 | } | ||
398 | |||
399 | /* Proc */ | ||
400 | if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) { | ||
401 | ret = print_graph_proc(s, ent->pid); | ||
402 | if (ret == TRACE_TYPE_PARTIAL_LINE) | ||
403 | return TRACE_TYPE_PARTIAL_LINE; | ||
404 | |||
405 | ret = trace_seq_printf(s, " | "); | ||
297 | if (!ret) | 406 | if (!ret) |
298 | return TRACE_TYPE_PARTIAL_LINE; | 407 | return TRACE_TYPE_PARTIAL_LINE; |
299 | } | 408 | } |
@@ -313,17 +422,24 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s, | |||
313 | int ret; | 422 | int ret; |
314 | unsigned long long duration = trace->rettime - trace->calltime; | 423 | unsigned long long duration = trace->rettime - trace->calltime; |
315 | 424 | ||
316 | /* Must not exceed 8 characters: xxxx.yyy us */ | ||
317 | if (duration > 10000000ULL) | ||
318 | duration = 9999999ULL; | ||
319 | |||
320 | /* Pid */ | 425 | /* Pid */ |
321 | if (!verif_pid(s, ent->pid, cpu)) | 426 | if (verif_pid(s, ent->pid, cpu) == TRACE_TYPE_PARTIAL_LINE) |
322 | return TRACE_TYPE_PARTIAL_LINE; | 427 | return TRACE_TYPE_PARTIAL_LINE; |
323 | 428 | ||
324 | /* Cpu */ | 429 | /* Cpu */ |
325 | if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) { | 430 | if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) { |
326 | ret = print_graph_cpu(s, cpu); | 431 | ret = print_graph_cpu(s, cpu); |
432 | if (ret == TRACE_TYPE_PARTIAL_LINE) | ||
433 | return TRACE_TYPE_PARTIAL_LINE; | ||
434 | } | ||
435 | |||
436 | /* Proc */ | ||
437 | if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) { | ||
438 | ret = print_graph_proc(s, ent->pid); | ||
439 | if (ret == TRACE_TYPE_PARTIAL_LINE) | ||
440 | return TRACE_TYPE_PARTIAL_LINE; | ||
441 | |||
442 | ret = trace_seq_printf(s, " | "); | ||
327 | if (!ret) | 443 | if (!ret) |
328 | return TRACE_TYPE_PARTIAL_LINE; | 444 | return TRACE_TYPE_PARTIAL_LINE; |
329 | } | 445 | } |
@@ -337,7 +453,7 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s, | |||
337 | 453 | ||
338 | /* Duration */ | 454 | /* Duration */ |
339 | ret = print_graph_duration(duration, s); | 455 | ret = print_graph_duration(duration, s); |
340 | if (!ret) | 456 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
341 | return TRACE_TYPE_PARTIAL_LINE; | 457 | return TRACE_TYPE_PARTIAL_LINE; |
342 | 458 | ||
343 | /* Closing brace */ | 459 | /* Closing brace */ |
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c index fde3be15c642..0b863f2cbc8e 100644 --- a/kernel/trace/trace_stack.c +++ b/kernel/trace/trace_stack.c | |||
@@ -48,7 +48,7 @@ static inline void check_stack(void) | |||
48 | if (!object_is_on_stack(&this_size)) | 48 | if (!object_is_on_stack(&this_size)) |
49 | return; | 49 | return; |
50 | 50 | ||
51 | raw_local_irq_save(flags); | 51 | local_irq_save(flags); |
52 | __raw_spin_lock(&max_stack_lock); | 52 | __raw_spin_lock(&max_stack_lock); |
53 | 53 | ||
54 | /* a race could have already updated it */ | 54 | /* a race could have already updated it */ |
@@ -78,6 +78,7 @@ static inline void check_stack(void) | |||
78 | * on a new max, so it is far from a fast path. | 78 | * on a new max, so it is far from a fast path. |
79 | */ | 79 | */ |
80 | while (i < max_stack_trace.nr_entries) { | 80 | while (i < max_stack_trace.nr_entries) { |
81 | int found = 0; | ||
81 | 82 | ||
82 | stack_dump_index[i] = this_size; | 83 | stack_dump_index[i] = this_size; |
83 | p = start; | 84 | p = start; |
@@ -86,17 +87,19 @@ static inline void check_stack(void) | |||
86 | if (*p == stack_dump_trace[i]) { | 87 | if (*p == stack_dump_trace[i]) { |
87 | this_size = stack_dump_index[i++] = | 88 | this_size = stack_dump_index[i++] = |
88 | (top - p) * sizeof(unsigned long); | 89 | (top - p) * sizeof(unsigned long); |
90 | found = 1; | ||
89 | /* Start the search from here */ | 91 | /* Start the search from here */ |
90 | start = p + 1; | 92 | start = p + 1; |
91 | } | 93 | } |
92 | } | 94 | } |
93 | 95 | ||
94 | i++; | 96 | if (!found) |
97 | i++; | ||
95 | } | 98 | } |
96 | 99 | ||
97 | out: | 100 | out: |
98 | __raw_spin_unlock(&max_stack_lock); | 101 | __raw_spin_unlock(&max_stack_lock); |
99 | raw_local_irq_restore(flags); | 102 | local_irq_restore(flags); |
100 | } | 103 | } |
101 | 104 | ||
102 | static void | 105 | static void |
@@ -162,11 +165,11 @@ stack_max_size_write(struct file *filp, const char __user *ubuf, | |||
162 | if (ret < 0) | 165 | if (ret < 0) |
163 | return ret; | 166 | return ret; |
164 | 167 | ||
165 | raw_local_irq_save(flags); | 168 | local_irq_save(flags); |
166 | __raw_spin_lock(&max_stack_lock); | 169 | __raw_spin_lock(&max_stack_lock); |
167 | *ptr = val; | 170 | *ptr = val; |
168 | __raw_spin_unlock(&max_stack_lock); | 171 | __raw_spin_unlock(&max_stack_lock); |
169 | raw_local_irq_restore(flags); | 172 | local_irq_restore(flags); |
170 | 173 | ||
171 | return count; | 174 | return count; |
172 | } | 175 | } |