diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/trace/trace.c | 123 | ||||
-rw-r--r-- | kernel/trace/trace.h | 13 | ||||
-rw-r--r-- | kernel/trace/trace_boot.c | 2 | ||||
-rw-r--r-- | kernel/trace/trace_irqsoff.c | 13 | ||||
-rw-r--r-- | kernel/trace/trace_mmiotrace.c | 4 | ||||
-rw-r--r-- | kernel/trace/trace_sched_switch.c | 9 | ||||
-rw-r--r-- | kernel/trace/trace_sched_wakeup.c | 13 |
7 files changed, 97 insertions, 80 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 948f7d821c62..1cd2e8143bb4 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -652,12 +652,10 @@ void tracing_record_cmdline(struct task_struct *tsk) | |||
652 | } | 652 | } |
653 | 653 | ||
654 | void | 654 | void |
655 | tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags) | 655 | tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags, |
656 | int pc) | ||
656 | { | 657 | { |
657 | struct task_struct *tsk = current; | 658 | struct task_struct *tsk = current; |
658 | unsigned long pc; | ||
659 | |||
660 | pc = preempt_count(); | ||
661 | 659 | ||
662 | entry->preempt_count = pc & 0xff; | 660 | entry->preempt_count = pc & 0xff; |
663 | entry->pid = (tsk) ? tsk->pid : 0; | 661 | entry->pid = (tsk) ? tsk->pid : 0; |
@@ -670,7 +668,8 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags) | |||
670 | 668 | ||
671 | void | 669 | void |
672 | trace_function(struct trace_array *tr, struct trace_array_cpu *data, | 670 | trace_function(struct trace_array *tr, struct trace_array_cpu *data, |
673 | unsigned long ip, unsigned long parent_ip, unsigned long flags) | 671 | unsigned long ip, unsigned long parent_ip, unsigned long flags, |
672 | int pc) | ||
674 | { | 673 | { |
675 | struct ring_buffer_event *event; | 674 | struct ring_buffer_event *event; |
676 | struct ftrace_entry *entry; | 675 | struct ftrace_entry *entry; |
@@ -685,7 +684,7 @@ trace_function(struct trace_array *tr, struct trace_array_cpu *data, | |||
685 | if (!event) | 684 | if (!event) |
686 | return; | 685 | return; |
687 | entry = ring_buffer_event_data(event); | 686 | entry = ring_buffer_event_data(event); |
688 | tracing_generic_entry_update(&entry->ent, flags); | 687 | tracing_generic_entry_update(&entry->ent, flags, pc); |
689 | entry->ent.type = TRACE_FN; | 688 | entry->ent.type = TRACE_FN; |
690 | entry->ip = ip; | 689 | entry->ip = ip; |
691 | entry->parent_ip = parent_ip; | 690 | entry->parent_ip = parent_ip; |
@@ -694,16 +693,17 @@ trace_function(struct trace_array *tr, struct trace_array_cpu *data, | |||
694 | 693 | ||
695 | void | 694 | void |
696 | ftrace(struct trace_array *tr, struct trace_array_cpu *data, | 695 | ftrace(struct trace_array *tr, struct trace_array_cpu *data, |
697 | unsigned long ip, unsigned long parent_ip, unsigned long flags) | 696 | unsigned long ip, unsigned long parent_ip, unsigned long flags, |
697 | int pc) | ||
698 | { | 698 | { |
699 | if (likely(!atomic_read(&data->disabled))) | 699 | if (likely(!atomic_read(&data->disabled))) |
700 | trace_function(tr, data, ip, parent_ip, flags); | 700 | trace_function(tr, data, ip, parent_ip, flags, pc); |
701 | } | 701 | } |
702 | 702 | ||
703 | void __trace_stack(struct trace_array *tr, | 703 | static void ftrace_trace_stack(struct trace_array *tr, |
704 | struct trace_array_cpu *data, | 704 | struct trace_array_cpu *data, |
705 | unsigned long flags, | 705 | unsigned long flags, |
706 | int skip) | 706 | int skip, int pc) |
707 | { | 707 | { |
708 | struct ring_buffer_event *event; | 708 | struct ring_buffer_event *event; |
709 | struct stack_entry *entry; | 709 | struct stack_entry *entry; |
@@ -718,7 +718,7 @@ void __trace_stack(struct trace_array *tr, | |||
718 | if (!event) | 718 | if (!event) |
719 | return; | 719 | return; |
720 | entry = ring_buffer_event_data(event); | 720 | entry = ring_buffer_event_data(event); |
721 | tracing_generic_entry_update(&entry->ent, flags); | 721 | tracing_generic_entry_update(&entry->ent, flags, pc); |
722 | entry->ent.type = TRACE_STACK; | 722 | entry->ent.type = TRACE_STACK; |
723 | 723 | ||
724 | memset(&entry->caller, 0, sizeof(entry->caller)); | 724 | memset(&entry->caller, 0, sizeof(entry->caller)); |
@@ -732,9 +732,18 @@ void __trace_stack(struct trace_array *tr, | |||
732 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); | 732 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); |
733 | } | 733 | } |
734 | 734 | ||
735 | void | 735 | void __trace_stack(struct trace_array *tr, |
736 | __trace_special(void *__tr, void *__data, | 736 | struct trace_array_cpu *data, |
737 | unsigned long arg1, unsigned long arg2, unsigned long arg3) | 737 | unsigned long flags, |
738 | int skip) | ||
739 | { | ||
740 | ftrace_trace_stack(tr, data, flags, skip, preempt_count()); | ||
741 | } | ||
742 | |||
743 | static void | ||
744 | ftrace_trace_special(void *__tr, void *__data, | ||
745 | unsigned long arg1, unsigned long arg2, unsigned long arg3, | ||
746 | int pc) | ||
738 | { | 747 | { |
739 | struct ring_buffer_event *event; | 748 | struct ring_buffer_event *event; |
740 | struct trace_array_cpu *data = __data; | 749 | struct trace_array_cpu *data = __data; |
@@ -747,23 +756,30 @@ __trace_special(void *__tr, void *__data, | |||
747 | if (!event) | 756 | if (!event) |
748 | return; | 757 | return; |
749 | entry = ring_buffer_event_data(event); | 758 | entry = ring_buffer_event_data(event); |
750 | tracing_generic_entry_update(&entry->ent, 0); | 759 | tracing_generic_entry_update(&entry->ent, 0, pc); |
751 | entry->ent.type = TRACE_SPECIAL; | 760 | entry->ent.type = TRACE_SPECIAL; |
752 | entry->arg1 = arg1; | 761 | entry->arg1 = arg1; |
753 | entry->arg2 = arg2; | 762 | entry->arg2 = arg2; |
754 | entry->arg3 = arg3; | 763 | entry->arg3 = arg3; |
755 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); | 764 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); |
756 | __trace_stack(tr, data, irq_flags, 4); | 765 | ftrace_trace_stack(tr, data, irq_flags, 4, pc); |
757 | 766 | ||
758 | trace_wake_up(); | 767 | trace_wake_up(); |
759 | } | 768 | } |
760 | 769 | ||
761 | void | 770 | void |
771 | __trace_special(void *__tr, void *__data, | ||
772 | unsigned long arg1, unsigned long arg2, unsigned long arg3) | ||
773 | { | ||
774 | ftrace_trace_special(__tr, __data, arg1, arg2, arg3, preempt_count()); | ||
775 | } | ||
776 | |||
777 | void | ||
762 | tracing_sched_switch_trace(struct trace_array *tr, | 778 | tracing_sched_switch_trace(struct trace_array *tr, |
763 | struct trace_array_cpu *data, | 779 | struct trace_array_cpu *data, |
764 | struct task_struct *prev, | 780 | struct task_struct *prev, |
765 | struct task_struct *next, | 781 | struct task_struct *next, |
766 | unsigned long flags) | 782 | unsigned long flags, int pc) |
767 | { | 783 | { |
768 | struct ring_buffer_event *event; | 784 | struct ring_buffer_event *event; |
769 | struct ctx_switch_entry *entry; | 785 | struct ctx_switch_entry *entry; |
@@ -774,7 +790,7 @@ tracing_sched_switch_trace(struct trace_array *tr, | |||
774 | if (!event) | 790 | if (!event) |
775 | return; | 791 | return; |
776 | entry = ring_buffer_event_data(event); | 792 | entry = ring_buffer_event_data(event); |
777 | tracing_generic_entry_update(&entry->ent, flags); | 793 | tracing_generic_entry_update(&entry->ent, flags, pc); |
778 | entry->ent.type = TRACE_CTX; | 794 | entry->ent.type = TRACE_CTX; |
779 | entry->prev_pid = prev->pid; | 795 | entry->prev_pid = prev->pid; |
780 | entry->prev_prio = prev->prio; | 796 | entry->prev_prio = prev->prio; |
@@ -784,7 +800,7 @@ tracing_sched_switch_trace(struct trace_array *tr, | |||
784 | entry->next_state = next->state; | 800 | entry->next_state = next->state; |
785 | entry->next_cpu = task_cpu(next); | 801 | entry->next_cpu = task_cpu(next); |
786 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); | 802 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); |
787 | __trace_stack(tr, data, flags, 5); | 803 | ftrace_trace_stack(tr, data, flags, 5, pc); |
788 | } | 804 | } |
789 | 805 | ||
790 | void | 806 | void |
@@ -792,7 +808,7 @@ tracing_sched_wakeup_trace(struct trace_array *tr, | |||
792 | struct trace_array_cpu *data, | 808 | struct trace_array_cpu *data, |
793 | struct task_struct *wakee, | 809 | struct task_struct *wakee, |
794 | struct task_struct *curr, | 810 | struct task_struct *curr, |
795 | unsigned long flags) | 811 | unsigned long flags, int pc) |
796 | { | 812 | { |
797 | struct ring_buffer_event *event; | 813 | struct ring_buffer_event *event; |
798 | struct ctx_switch_entry *entry; | 814 | struct ctx_switch_entry *entry; |
@@ -803,7 +819,7 @@ tracing_sched_wakeup_trace(struct trace_array *tr, | |||
803 | if (!event) | 819 | if (!event) |
804 | return; | 820 | return; |
805 | entry = ring_buffer_event_data(event); | 821 | entry = ring_buffer_event_data(event); |
806 | tracing_generic_entry_update(&entry->ent, flags); | 822 | tracing_generic_entry_update(&entry->ent, flags, pc); |
807 | entry->ent.type = TRACE_WAKE; | 823 | entry->ent.type = TRACE_WAKE; |
808 | entry->prev_pid = curr->pid; | 824 | entry->prev_pid = curr->pid; |
809 | entry->prev_prio = curr->prio; | 825 | entry->prev_prio = curr->prio; |
@@ -813,7 +829,7 @@ tracing_sched_wakeup_trace(struct trace_array *tr, | |||
813 | entry->next_state = wakee->state; | 829 | entry->next_state = wakee->state; |
814 | entry->next_cpu = task_cpu(wakee); | 830 | entry->next_cpu = task_cpu(wakee); |
815 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); | 831 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); |
816 | __trace_stack(tr, data, flags, 6); | 832 | ftrace_trace_stack(tr, data, flags, 6, pc); |
817 | 833 | ||
818 | trace_wake_up(); | 834 | trace_wake_up(); |
819 | } | 835 | } |
@@ -823,23 +839,24 @@ ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) | |||
823 | { | 839 | { |
824 | struct trace_array *tr = &global_trace; | 840 | struct trace_array *tr = &global_trace; |
825 | struct trace_array_cpu *data; | 841 | struct trace_array_cpu *data; |
826 | unsigned long flags; | ||
827 | long disabled; | 842 | long disabled; |
828 | int cpu; | 843 | int cpu; |
844 | int pc; | ||
829 | 845 | ||
830 | if (tracing_disabled || !tr->ctrl) | 846 | if (tracing_disabled || !tr->ctrl) |
831 | return; | 847 | return; |
832 | 848 | ||
833 | local_irq_save(flags); | 849 | pc = preempt_count(); |
850 | preempt_disable_notrace(); | ||
834 | cpu = raw_smp_processor_id(); | 851 | cpu = raw_smp_processor_id(); |
835 | data = tr->data[cpu]; | 852 | data = tr->data[cpu]; |
836 | disabled = atomic_inc_return(&data->disabled); | 853 | disabled = atomic_inc_return(&data->disabled); |
837 | 854 | ||
838 | if (likely(disabled == 1)) | 855 | if (likely(disabled == 1)) |
839 | __trace_special(tr, data, arg1, arg2, arg3); | 856 | ftrace_trace_special(tr, data, arg1, arg2, arg3, pc); |
840 | 857 | ||
841 | atomic_dec(&data->disabled); | 858 | atomic_dec(&data->disabled); |
842 | local_irq_restore(flags); | 859 | preempt_enable_notrace(); |
843 | } | 860 | } |
844 | 861 | ||
845 | #ifdef CONFIG_FTRACE | 862 | #ifdef CONFIG_FTRACE |
@@ -850,7 +867,8 @@ function_trace_call(unsigned long ip, unsigned long parent_ip) | |||
850 | struct trace_array_cpu *data; | 867 | struct trace_array_cpu *data; |
851 | unsigned long flags; | 868 | unsigned long flags; |
852 | long disabled; | 869 | long disabled; |
853 | int cpu; | 870 | int cpu, resched; |
871 | int pc; | ||
854 | 872 | ||
855 | if (unlikely(!ftrace_function_enabled)) | 873 | if (unlikely(!ftrace_function_enabled)) |
856 | return; | 874 | return; |
@@ -858,16 +876,22 @@ function_trace_call(unsigned long ip, unsigned long parent_ip) | |||
858 | if (skip_trace(ip)) | 876 | if (skip_trace(ip)) |
859 | return; | 877 | return; |
860 | 878 | ||
861 | local_irq_save(flags); | 879 | pc = preempt_count(); |
880 | resched = need_resched(); | ||
881 | preempt_disable_notrace(); | ||
882 | local_save_flags(flags); | ||
862 | cpu = raw_smp_processor_id(); | 883 | cpu = raw_smp_processor_id(); |
863 | data = tr->data[cpu]; | 884 | data = tr->data[cpu]; |
864 | disabled = atomic_inc_return(&data->disabled); | 885 | disabled = atomic_inc_return(&data->disabled); |
865 | 886 | ||
866 | if (likely(disabled == 1)) | 887 | if (likely(disabled == 1)) |
867 | trace_function(tr, data, ip, parent_ip, flags); | 888 | trace_function(tr, data, ip, parent_ip, flags, pc); |
868 | 889 | ||
869 | atomic_dec(&data->disabled); | 890 | atomic_dec(&data->disabled); |
870 | local_irq_restore(flags); | 891 | if (resched) |
892 | preempt_enable_no_resched_notrace(); | ||
893 | else | ||
894 | preempt_enable_notrace(); | ||
871 | } | 895 | } |
872 | 896 | ||
873 | static struct ftrace_ops trace_ops __read_mostly = | 897 | static struct ftrace_ops trace_ops __read_mostly = |
@@ -2508,9 +2532,6 @@ tracing_read_pipe(struct file *filp, char __user *ubuf, | |||
2508 | size_t cnt, loff_t *ppos) | 2532 | size_t cnt, loff_t *ppos) |
2509 | { | 2533 | { |
2510 | struct trace_iterator *iter = filp->private_data; | 2534 | struct trace_iterator *iter = filp->private_data; |
2511 | #ifdef CONFIG_FTRACE | ||
2512 | int ftrace_save; | ||
2513 | #endif | ||
2514 | ssize_t sret; | 2535 | ssize_t sret; |
2515 | 2536 | ||
2516 | /* return any leftover data */ | 2537 | /* return any leftover data */ |
@@ -2593,20 +2614,6 @@ waitagain: | |||
2593 | offsetof(struct trace_iterator, seq)); | 2614 | offsetof(struct trace_iterator, seq)); |
2594 | iter->pos = -1; | 2615 | iter->pos = -1; |
2595 | 2616 | ||
2596 | /* | ||
2597 | * We need to stop all tracing on all CPUS to read the | ||
2598 | * the next buffer. This is a bit expensive, but is | ||
2599 | * not done often. We fill all what we can read, | ||
2600 | * and then release the locks again. | ||
2601 | */ | ||
2602 | |||
2603 | local_irq_disable(); | ||
2604 | #ifdef CONFIG_FTRACE | ||
2605 | ftrace_save = ftrace_enabled; | ||
2606 | ftrace_enabled = 0; | ||
2607 | #endif | ||
2608 | smp_wmb(); | ||
2609 | |||
2610 | while (find_next_entry_inc(iter) != NULL) { | 2617 | while (find_next_entry_inc(iter) != NULL) { |
2611 | enum print_line_t ret; | 2618 | enum print_line_t ret; |
2612 | int len = iter->seq.len; | 2619 | int len = iter->seq.len; |
@@ -2624,11 +2631,6 @@ waitagain: | |||
2624 | break; | 2631 | break; |
2625 | } | 2632 | } |
2626 | 2633 | ||
2627 | #ifdef CONFIG_FTRACE | ||
2628 | ftrace_enabled = ftrace_save; | ||
2629 | #endif | ||
2630 | local_irq_enable(); | ||
2631 | |||
2632 | /* Now copy what we have to the user */ | 2634 | /* Now copy what we have to the user */ |
2633 | sret = trace_seq_to_user(&iter->seq, ubuf, cnt); | 2635 | sret = trace_seq_to_user(&iter->seq, ubuf, cnt); |
2634 | if (iter->seq.readpos >= iter->seq.len) | 2636 | if (iter->seq.readpos >= iter->seq.len) |
@@ -2960,12 +2962,13 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args) | |||
2960 | struct print_entry *entry; | 2962 | struct print_entry *entry; |
2961 | unsigned long flags, irq_flags; | 2963 | unsigned long flags, irq_flags; |
2962 | long disabled; | 2964 | long disabled; |
2963 | int cpu, len = 0, size; | 2965 | int cpu, len = 0, size, pc; |
2964 | 2966 | ||
2965 | if (!tr->ctrl || tracing_disabled) | 2967 | if (!tr->ctrl || tracing_disabled) |
2966 | return 0; | 2968 | return 0; |
2967 | 2969 | ||
2968 | local_irq_save(flags); | 2970 | pc = preempt_count(); |
2971 | preempt_disable_notrace(); | ||
2969 | cpu = raw_smp_processor_id(); | 2972 | cpu = raw_smp_processor_id(); |
2970 | data = tr->data[cpu]; | 2973 | data = tr->data[cpu]; |
2971 | disabled = atomic_inc_return(&data->disabled); | 2974 | disabled = atomic_inc_return(&data->disabled); |
@@ -2973,7 +2976,7 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args) | |||
2973 | if (unlikely(disabled != 1)) | 2976 | if (unlikely(disabled != 1)) |
2974 | goto out; | 2977 | goto out; |
2975 | 2978 | ||
2976 | spin_lock(&trace_buf_lock); | 2979 | spin_lock_irqsave(&trace_buf_lock, flags); |
2977 | len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args); | 2980 | len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args); |
2978 | 2981 | ||
2979 | len = min(len, TRACE_BUF_SIZE-1); | 2982 | len = min(len, TRACE_BUF_SIZE-1); |
@@ -2984,7 +2987,7 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args) | |||
2984 | if (!event) | 2987 | if (!event) |
2985 | goto out_unlock; | 2988 | goto out_unlock; |
2986 | entry = ring_buffer_event_data(event); | 2989 | entry = ring_buffer_event_data(event); |
2987 | tracing_generic_entry_update(&entry->ent, flags); | 2990 | tracing_generic_entry_update(&entry->ent, flags, pc); |
2988 | entry->ent.type = TRACE_PRINT; | 2991 | entry->ent.type = TRACE_PRINT; |
2989 | entry->ip = ip; | 2992 | entry->ip = ip; |
2990 | 2993 | ||
@@ -2993,11 +2996,11 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args) | |||
2993 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); | 2996 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); |
2994 | 2997 | ||
2995 | out_unlock: | 2998 | out_unlock: |
2996 | spin_unlock(&trace_buf_lock); | 2999 | spin_unlock_irqrestore(&trace_buf_lock, flags); |
2997 | 3000 | ||
2998 | out: | 3001 | out: |
2999 | atomic_dec(&data->disabled); | 3002 | atomic_dec(&data->disabled); |
3000 | local_irq_restore(flags); | 3003 | preempt_enable_notrace(); |
3001 | 3004 | ||
3002 | return len; | 3005 | return len; |
3003 | } | 3006 | } |
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index f02042d0d828..f1f99572cde7 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
@@ -288,35 +288,36 @@ void init_tracer_sysprof_debugfs(struct dentry *d_tracer); | |||
288 | struct trace_entry *tracing_get_trace_entry(struct trace_array *tr, | 288 | struct trace_entry *tracing_get_trace_entry(struct trace_array *tr, |
289 | struct trace_array_cpu *data); | 289 | struct trace_array_cpu *data); |
290 | void tracing_generic_entry_update(struct trace_entry *entry, | 290 | void tracing_generic_entry_update(struct trace_entry *entry, |
291 | unsigned long flags); | 291 | unsigned long flags, |
292 | int pc); | ||
292 | 293 | ||
293 | void ftrace(struct trace_array *tr, | 294 | void ftrace(struct trace_array *tr, |
294 | struct trace_array_cpu *data, | 295 | struct trace_array_cpu *data, |
295 | unsigned long ip, | 296 | unsigned long ip, |
296 | unsigned long parent_ip, | 297 | unsigned long parent_ip, |
297 | unsigned long flags); | 298 | unsigned long flags, int pc); |
298 | void tracing_sched_switch_trace(struct trace_array *tr, | 299 | void tracing_sched_switch_trace(struct trace_array *tr, |
299 | struct trace_array_cpu *data, | 300 | struct trace_array_cpu *data, |
300 | struct task_struct *prev, | 301 | struct task_struct *prev, |
301 | struct task_struct *next, | 302 | struct task_struct *next, |
302 | unsigned long flags); | 303 | unsigned long flags, int pc); |
303 | void tracing_record_cmdline(struct task_struct *tsk); | 304 | void tracing_record_cmdline(struct task_struct *tsk); |
304 | 305 | ||
305 | void tracing_sched_wakeup_trace(struct trace_array *tr, | 306 | void tracing_sched_wakeup_trace(struct trace_array *tr, |
306 | struct trace_array_cpu *data, | 307 | struct trace_array_cpu *data, |
307 | struct task_struct *wakee, | 308 | struct task_struct *wakee, |
308 | struct task_struct *cur, | 309 | struct task_struct *cur, |
309 | unsigned long flags); | 310 | unsigned long flags, int pc); |
310 | void trace_special(struct trace_array *tr, | 311 | void trace_special(struct trace_array *tr, |
311 | struct trace_array_cpu *data, | 312 | struct trace_array_cpu *data, |
312 | unsigned long arg1, | 313 | unsigned long arg1, |
313 | unsigned long arg2, | 314 | unsigned long arg2, |
314 | unsigned long arg3); | 315 | unsigned long arg3, int pc); |
315 | void trace_function(struct trace_array *tr, | 316 | void trace_function(struct trace_array *tr, |
316 | struct trace_array_cpu *data, | 317 | struct trace_array_cpu *data, |
317 | unsigned long ip, | 318 | unsigned long ip, |
318 | unsigned long parent_ip, | 319 | unsigned long parent_ip, |
319 | unsigned long flags); | 320 | unsigned long flags, int pc); |
320 | 321 | ||
321 | void tracing_start_cmdline_record(void); | 322 | void tracing_start_cmdline_record(void); |
322 | void tracing_stop_cmdline_record(void); | 323 | void tracing_stop_cmdline_record(void); |
diff --git a/kernel/trace/trace_boot.c b/kernel/trace/trace_boot.c index 43bde20b95bd..f2dac6f1cf06 100644 --- a/kernel/trace/trace_boot.c +++ b/kernel/trace/trace_boot.c | |||
@@ -95,7 +95,7 @@ void trace_boot(struct boot_trace *it) | |||
95 | if (!event) | 95 | if (!event) |
96 | goto out; | 96 | goto out; |
97 | entry = ring_buffer_event_data(event); | 97 | entry = ring_buffer_event_data(event); |
98 | tracing_generic_entry_update(&entry->ent, 0); | 98 | tracing_generic_entry_update(&entry->ent, 0, 0); |
99 | entry->ent.type = TRACE_BOOT; | 99 | entry->ent.type = TRACE_BOOT; |
100 | entry->initcall = *it; | 100 | entry->initcall = *it; |
101 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); | 101 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); |
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c index 37ad49407f27..f925dbbff2a6 100644 --- a/kernel/trace/trace_irqsoff.c +++ b/kernel/trace/trace_irqsoff.c | |||
@@ -95,7 +95,7 @@ irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip) | |||
95 | disabled = atomic_inc_return(&data->disabled); | 95 | disabled = atomic_inc_return(&data->disabled); |
96 | 96 | ||
97 | if (likely(disabled == 1)) | 97 | if (likely(disabled == 1)) |
98 | trace_function(tr, data, ip, parent_ip, flags); | 98 | trace_function(tr, data, ip, parent_ip, flags, preempt_count()); |
99 | 99 | ||
100 | atomic_dec(&data->disabled); | 100 | atomic_dec(&data->disabled); |
101 | } | 101 | } |
@@ -130,6 +130,7 @@ check_critical_timing(struct trace_array *tr, | |||
130 | unsigned long latency, t0, t1; | 130 | unsigned long latency, t0, t1; |
131 | cycle_t T0, T1, delta; | 131 | cycle_t T0, T1, delta; |
132 | unsigned long flags; | 132 | unsigned long flags; |
133 | int pc; | ||
133 | 134 | ||
134 | /* | 135 | /* |
135 | * usecs conversion is slow so we try to delay the conversion | 136 | * usecs conversion is slow so we try to delay the conversion |
@@ -144,13 +145,15 @@ check_critical_timing(struct trace_array *tr, | |||
144 | if (!report_latency(delta)) | 145 | if (!report_latency(delta)) |
145 | goto out; | 146 | goto out; |
146 | 147 | ||
148 | pc = preempt_count(); | ||
149 | |||
147 | spin_lock_irqsave(&max_trace_lock, flags); | 150 | spin_lock_irqsave(&max_trace_lock, flags); |
148 | 151 | ||
149 | /* check if we are still the max latency */ | 152 | /* check if we are still the max latency */ |
150 | if (!report_latency(delta)) | 153 | if (!report_latency(delta)) |
151 | goto out_unlock; | 154 | goto out_unlock; |
152 | 155 | ||
153 | trace_function(tr, data, CALLER_ADDR0, parent_ip, flags); | 156 | trace_function(tr, data, CALLER_ADDR0, parent_ip, flags, pc); |
154 | 157 | ||
155 | latency = nsecs_to_usecs(delta); | 158 | latency = nsecs_to_usecs(delta); |
156 | 159 | ||
@@ -174,7 +177,7 @@ out: | |||
174 | data->critical_sequence = max_sequence; | 177 | data->critical_sequence = max_sequence; |
175 | data->preempt_timestamp = ftrace_now(cpu); | 178 | data->preempt_timestamp = ftrace_now(cpu); |
176 | tracing_reset(tr, cpu); | 179 | tracing_reset(tr, cpu); |
177 | trace_function(tr, data, CALLER_ADDR0, parent_ip, flags); | 180 | trace_function(tr, data, CALLER_ADDR0, parent_ip, flags, pc); |
178 | } | 181 | } |
179 | 182 | ||
180 | static inline void | 183 | static inline void |
@@ -207,7 +210,7 @@ start_critical_timing(unsigned long ip, unsigned long parent_ip) | |||
207 | 210 | ||
208 | local_save_flags(flags); | 211 | local_save_flags(flags); |
209 | 212 | ||
210 | trace_function(tr, data, ip, parent_ip, flags); | 213 | trace_function(tr, data, ip, parent_ip, flags, preempt_count()); |
211 | 214 | ||
212 | per_cpu(tracing_cpu, cpu) = 1; | 215 | per_cpu(tracing_cpu, cpu) = 1; |
213 | 216 | ||
@@ -241,7 +244,7 @@ stop_critical_timing(unsigned long ip, unsigned long parent_ip) | |||
241 | atomic_inc(&data->disabled); | 244 | atomic_inc(&data->disabled); |
242 | 245 | ||
243 | local_save_flags(flags); | 246 | local_save_flags(flags); |
244 | trace_function(tr, data, ip, parent_ip, flags); | 247 | trace_function(tr, data, ip, parent_ip, flags, preempt_count()); |
245 | check_critical_timing(tr, data, parent_ip ? : ip, cpu); | 248 | check_critical_timing(tr, data, parent_ip ? : ip, cpu); |
246 | data->critical_start = 0; | 249 | data->critical_start = 0; |
247 | atomic_dec(&data->disabled); | 250 | atomic_dec(&data->disabled); |
diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c index 0e819f47bb7a..f28484618ff0 100644 --- a/kernel/trace/trace_mmiotrace.c +++ b/kernel/trace/trace_mmiotrace.c | |||
@@ -324,7 +324,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr, | |||
324 | if (!event) | 324 | if (!event) |
325 | return; | 325 | return; |
326 | entry = ring_buffer_event_data(event); | 326 | entry = ring_buffer_event_data(event); |
327 | tracing_generic_entry_update(&entry->ent, 0); | 327 | tracing_generic_entry_update(&entry->ent, 0, preempt_count()); |
328 | entry->ent.type = TRACE_MMIO_RW; | 328 | entry->ent.type = TRACE_MMIO_RW; |
329 | entry->rw = *rw; | 329 | entry->rw = *rw; |
330 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); | 330 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); |
@@ -352,7 +352,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr, | |||
352 | if (!event) | 352 | if (!event) |
353 | return; | 353 | return; |
354 | entry = ring_buffer_event_data(event); | 354 | entry = ring_buffer_event_data(event); |
355 | tracing_generic_entry_update(&entry->ent, 0); | 355 | tracing_generic_entry_update(&entry->ent, 0, preempt_count()); |
356 | entry->ent.type = TRACE_MMIO_MAP; | 356 | entry->ent.type = TRACE_MMIO_MAP; |
357 | entry->map = *map; | 357 | entry->map = *map; |
358 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); | 358 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); |
diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c index e0b06db0f7af..c7fa08a5b7f4 100644 --- a/kernel/trace/trace_sched_switch.c +++ b/kernel/trace/trace_sched_switch.c | |||
@@ -26,6 +26,7 @@ probe_sched_switch(struct rq *__rq, struct task_struct *prev, | |||
26 | unsigned long flags; | 26 | unsigned long flags; |
27 | long disabled; | 27 | long disabled; |
28 | int cpu; | 28 | int cpu; |
29 | int pc; | ||
29 | 30 | ||
30 | if (!atomic_read(&sched_ref)) | 31 | if (!atomic_read(&sched_ref)) |
31 | return; | 32 | return; |
@@ -36,13 +37,14 @@ probe_sched_switch(struct rq *__rq, struct task_struct *prev, | |||
36 | if (!tracer_enabled) | 37 | if (!tracer_enabled) |
37 | return; | 38 | return; |
38 | 39 | ||
40 | pc = preempt_count(); | ||
39 | local_irq_save(flags); | 41 | local_irq_save(flags); |
40 | cpu = raw_smp_processor_id(); | 42 | cpu = raw_smp_processor_id(); |
41 | data = ctx_trace->data[cpu]; | 43 | data = ctx_trace->data[cpu]; |
42 | disabled = atomic_inc_return(&data->disabled); | 44 | disabled = atomic_inc_return(&data->disabled); |
43 | 45 | ||
44 | if (likely(disabled == 1)) | 46 | if (likely(disabled == 1)) |
45 | tracing_sched_switch_trace(ctx_trace, data, prev, next, flags); | 47 | tracing_sched_switch_trace(ctx_trace, data, prev, next, flags, pc); |
46 | 48 | ||
47 | atomic_dec(&data->disabled); | 49 | atomic_dec(&data->disabled); |
48 | local_irq_restore(flags); | 50 | local_irq_restore(flags); |
@@ -54,11 +56,12 @@ probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee) | |||
54 | struct trace_array_cpu *data; | 56 | struct trace_array_cpu *data; |
55 | unsigned long flags; | 57 | unsigned long flags; |
56 | long disabled; | 58 | long disabled; |
57 | int cpu; | 59 | int cpu, pc; |
58 | 60 | ||
59 | if (!likely(tracer_enabled)) | 61 | if (!likely(tracer_enabled)) |
60 | return; | 62 | return; |
61 | 63 | ||
64 | pc = preempt_count(); | ||
62 | tracing_record_cmdline(current); | 65 | tracing_record_cmdline(current); |
63 | 66 | ||
64 | local_irq_save(flags); | 67 | local_irq_save(flags); |
@@ -68,7 +71,7 @@ probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee) | |||
68 | 71 | ||
69 | if (likely(disabled == 1)) | 72 | if (likely(disabled == 1)) |
70 | tracing_sched_wakeup_trace(ctx_trace, data, wakee, current, | 73 | tracing_sched_wakeup_trace(ctx_trace, data, wakee, current, |
71 | flags); | 74 | flags, pc); |
72 | 75 | ||
73 | atomic_dec(&data->disabled); | 76 | atomic_dec(&data->disabled); |
74 | local_irq_restore(flags); | 77 | local_irq_restore(flags); |
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c index 01e75e0639b7..fe4a252c2363 100644 --- a/kernel/trace/trace_sched_wakeup.c +++ b/kernel/trace/trace_sched_wakeup.c | |||
@@ -44,10 +44,12 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip) | |||
44 | long disabled; | 44 | long disabled; |
45 | int resched; | 45 | int resched; |
46 | int cpu; | 46 | int cpu; |
47 | int pc; | ||
47 | 48 | ||
48 | if (likely(!wakeup_task)) | 49 | if (likely(!wakeup_task)) |
49 | return; | 50 | return; |
50 | 51 | ||
52 | pc = preempt_count(); | ||
51 | resched = need_resched(); | 53 | resched = need_resched(); |
52 | preempt_disable_notrace(); | 54 | preempt_disable_notrace(); |
53 | 55 | ||
@@ -70,7 +72,7 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip) | |||
70 | if (task_cpu(wakeup_task) != cpu) | 72 | if (task_cpu(wakeup_task) != cpu) |
71 | goto unlock; | 73 | goto unlock; |
72 | 74 | ||
73 | trace_function(tr, data, ip, parent_ip, flags); | 75 | trace_function(tr, data, ip, parent_ip, flags, pc); |
74 | 76 | ||
75 | unlock: | 77 | unlock: |
76 | __raw_spin_unlock(&wakeup_lock); | 78 | __raw_spin_unlock(&wakeup_lock); |
@@ -121,6 +123,7 @@ probe_wakeup_sched_switch(struct rq *rq, struct task_struct *prev, | |||
121 | unsigned long flags; | 123 | unsigned long flags; |
122 | long disabled; | 124 | long disabled; |
123 | int cpu; | 125 | int cpu; |
126 | int pc; | ||
124 | 127 | ||
125 | tracing_record_cmdline(prev); | 128 | tracing_record_cmdline(prev); |
126 | 129 | ||
@@ -139,6 +142,8 @@ probe_wakeup_sched_switch(struct rq *rq, struct task_struct *prev, | |||
139 | if (next != wakeup_task) | 142 | if (next != wakeup_task) |
140 | return; | 143 | return; |
141 | 144 | ||
145 | pc = preempt_count(); | ||
146 | |||
142 | /* The task we are waiting for is waking up */ | 147 | /* The task we are waiting for is waking up */ |
143 | data = wakeup_trace->data[wakeup_cpu]; | 148 | data = wakeup_trace->data[wakeup_cpu]; |
144 | 149 | ||
@@ -155,7 +160,7 @@ probe_wakeup_sched_switch(struct rq *rq, struct task_struct *prev, | |||
155 | if (unlikely(!tracer_enabled || next != wakeup_task)) | 160 | if (unlikely(!tracer_enabled || next != wakeup_task)) |
156 | goto out_unlock; | 161 | goto out_unlock; |
157 | 162 | ||
158 | trace_function(wakeup_trace, data, CALLER_ADDR1, CALLER_ADDR2, flags); | 163 | trace_function(wakeup_trace, data, CALLER_ADDR1, CALLER_ADDR2, flags, pc); |
159 | 164 | ||
160 | /* | 165 | /* |
161 | * usecs conversion is slow so we try to delay the conversion | 166 | * usecs conversion is slow so we try to delay the conversion |
@@ -220,6 +225,7 @@ probe_wakeup(struct rq *rq, struct task_struct *p) | |||
220 | int cpu = smp_processor_id(); | 225 | int cpu = smp_processor_id(); |
221 | unsigned long flags; | 226 | unsigned long flags; |
222 | long disabled; | 227 | long disabled; |
228 | int pc; | ||
223 | 229 | ||
224 | if (likely(!tracer_enabled)) | 230 | if (likely(!tracer_enabled)) |
225 | return; | 231 | return; |
@@ -232,6 +238,7 @@ probe_wakeup(struct rq *rq, struct task_struct *p) | |||
232 | p->prio >= current->prio) | 238 | p->prio >= current->prio) |
233 | return; | 239 | return; |
234 | 240 | ||
241 | pc = preempt_count(); | ||
235 | disabled = atomic_inc_return(&wakeup_trace->data[cpu]->disabled); | 242 | disabled = atomic_inc_return(&wakeup_trace->data[cpu]->disabled); |
236 | if (unlikely(disabled != 1)) | 243 | if (unlikely(disabled != 1)) |
237 | goto out; | 244 | goto out; |
@@ -256,7 +263,7 @@ probe_wakeup(struct rq *rq, struct task_struct *p) | |||
256 | 263 | ||
257 | wakeup_trace->data[wakeup_cpu]->preempt_timestamp = ftrace_now(cpu); | 264 | wakeup_trace->data[wakeup_cpu]->preempt_timestamp = ftrace_now(cpu); |
258 | trace_function(wakeup_trace, wakeup_trace->data[wakeup_cpu], | 265 | trace_function(wakeup_trace, wakeup_trace->data[wakeup_cpu], |
259 | CALLER_ADDR1, CALLER_ADDR2, flags); | 266 | CALLER_ADDR1, CALLER_ADDR2, flags, pc); |
260 | 267 | ||
261 | out_locked: | 268 | out_locked: |
262 | __raw_spin_unlock(&wakeup_lock); | 269 | __raw_spin_unlock(&wakeup_lock); |