aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace/trace.c')
-rw-r--r--kernel/trace/trace.c123
1 files changed, 63 insertions, 60 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 948f7d821c62..1cd2e8143bb4 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -652,12 +652,10 @@ void tracing_record_cmdline(struct task_struct *tsk)
652} 652}
653 653
654void 654void
655tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags) 655tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
656 int pc)
656{ 657{
657 struct task_struct *tsk = current; 658 struct task_struct *tsk = current;
658 unsigned long pc;
659
660 pc = preempt_count();
661 659
662 entry->preempt_count = pc & 0xff; 660 entry->preempt_count = pc & 0xff;
663 entry->pid = (tsk) ? tsk->pid : 0; 661 entry->pid = (tsk) ? tsk->pid : 0;
@@ -670,7 +668,8 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags)
670 668
671void 669void
672trace_function(struct trace_array *tr, struct trace_array_cpu *data, 670trace_function(struct trace_array *tr, struct trace_array_cpu *data,
673 unsigned long ip, unsigned long parent_ip, unsigned long flags) 671 unsigned long ip, unsigned long parent_ip, unsigned long flags,
672 int pc)
674{ 673{
675 struct ring_buffer_event *event; 674 struct ring_buffer_event *event;
676 struct ftrace_entry *entry; 675 struct ftrace_entry *entry;
@@ -685,7 +684,7 @@ trace_function(struct trace_array *tr, struct trace_array_cpu *data,
685 if (!event) 684 if (!event)
686 return; 685 return;
687 entry = ring_buffer_event_data(event); 686 entry = ring_buffer_event_data(event);
688 tracing_generic_entry_update(&entry->ent, flags); 687 tracing_generic_entry_update(&entry->ent, flags, pc);
689 entry->ent.type = TRACE_FN; 688 entry->ent.type = TRACE_FN;
690 entry->ip = ip; 689 entry->ip = ip;
691 entry->parent_ip = parent_ip; 690 entry->parent_ip = parent_ip;
@@ -694,16 +693,17 @@ trace_function(struct trace_array *tr, struct trace_array_cpu *data,
694 693
695void 694void
696ftrace(struct trace_array *tr, struct trace_array_cpu *data, 695ftrace(struct trace_array *tr, struct trace_array_cpu *data,
697 unsigned long ip, unsigned long parent_ip, unsigned long flags) 696 unsigned long ip, unsigned long parent_ip, unsigned long flags,
697 int pc)
698{ 698{
699 if (likely(!atomic_read(&data->disabled))) 699 if (likely(!atomic_read(&data->disabled)))
700 trace_function(tr, data, ip, parent_ip, flags); 700 trace_function(tr, data, ip, parent_ip, flags, pc);
701} 701}
702 702
703void __trace_stack(struct trace_array *tr, 703static void ftrace_trace_stack(struct trace_array *tr,
704 struct trace_array_cpu *data, 704 struct trace_array_cpu *data,
705 unsigned long flags, 705 unsigned long flags,
706 int skip) 706 int skip, int pc)
707{ 707{
708 struct ring_buffer_event *event; 708 struct ring_buffer_event *event;
709 struct stack_entry *entry; 709 struct stack_entry *entry;
@@ -718,7 +718,7 @@ void __trace_stack(struct trace_array *tr,
718 if (!event) 718 if (!event)
719 return; 719 return;
720 entry = ring_buffer_event_data(event); 720 entry = ring_buffer_event_data(event);
721 tracing_generic_entry_update(&entry->ent, flags); 721 tracing_generic_entry_update(&entry->ent, flags, pc);
722 entry->ent.type = TRACE_STACK; 722 entry->ent.type = TRACE_STACK;
723 723
724 memset(&entry->caller, 0, sizeof(entry->caller)); 724 memset(&entry->caller, 0, sizeof(entry->caller));
@@ -732,9 +732,18 @@ void __trace_stack(struct trace_array *tr,
732 ring_buffer_unlock_commit(tr->buffer, event, irq_flags); 732 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
733} 733}
734 734
735void 735void __trace_stack(struct trace_array *tr,
736__trace_special(void *__tr, void *__data, 736 struct trace_array_cpu *data,
737 unsigned long arg1, unsigned long arg2, unsigned long arg3) 737 unsigned long flags,
738 int skip)
739{
740 ftrace_trace_stack(tr, data, flags, skip, preempt_count());
741}
742
743static void
744ftrace_trace_special(void *__tr, void *__data,
745 unsigned long arg1, unsigned long arg2, unsigned long arg3,
746 int pc)
738{ 747{
739 struct ring_buffer_event *event; 748 struct ring_buffer_event *event;
740 struct trace_array_cpu *data = __data; 749 struct trace_array_cpu *data = __data;
@@ -747,23 +756,30 @@ __trace_special(void *__tr, void *__data,
747 if (!event) 756 if (!event)
748 return; 757 return;
749 entry = ring_buffer_event_data(event); 758 entry = ring_buffer_event_data(event);
750 tracing_generic_entry_update(&entry->ent, 0); 759 tracing_generic_entry_update(&entry->ent, 0, pc);
751 entry->ent.type = TRACE_SPECIAL; 760 entry->ent.type = TRACE_SPECIAL;
752 entry->arg1 = arg1; 761 entry->arg1 = arg1;
753 entry->arg2 = arg2; 762 entry->arg2 = arg2;
754 entry->arg3 = arg3; 763 entry->arg3 = arg3;
755 ring_buffer_unlock_commit(tr->buffer, event, irq_flags); 764 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
756 __trace_stack(tr, data, irq_flags, 4); 765 ftrace_trace_stack(tr, data, irq_flags, 4, pc);
757 766
758 trace_wake_up(); 767 trace_wake_up();
759} 768}
760 769
761void 770void
771__trace_special(void *__tr, void *__data,
772 unsigned long arg1, unsigned long arg2, unsigned long arg3)
773{
774 ftrace_trace_special(__tr, __data, arg1, arg2, arg3, preempt_count());
775}
776
777void
762tracing_sched_switch_trace(struct trace_array *tr, 778tracing_sched_switch_trace(struct trace_array *tr,
763 struct trace_array_cpu *data, 779 struct trace_array_cpu *data,
764 struct task_struct *prev, 780 struct task_struct *prev,
765 struct task_struct *next, 781 struct task_struct *next,
766 unsigned long flags) 782 unsigned long flags, int pc)
767{ 783{
768 struct ring_buffer_event *event; 784 struct ring_buffer_event *event;
769 struct ctx_switch_entry *entry; 785 struct ctx_switch_entry *entry;
@@ -774,7 +790,7 @@ tracing_sched_switch_trace(struct trace_array *tr,
774 if (!event) 790 if (!event)
775 return; 791 return;
776 entry = ring_buffer_event_data(event); 792 entry = ring_buffer_event_data(event);
777 tracing_generic_entry_update(&entry->ent, flags); 793 tracing_generic_entry_update(&entry->ent, flags, pc);
778 entry->ent.type = TRACE_CTX; 794 entry->ent.type = TRACE_CTX;
779 entry->prev_pid = prev->pid; 795 entry->prev_pid = prev->pid;
780 entry->prev_prio = prev->prio; 796 entry->prev_prio = prev->prio;
@@ -784,7 +800,7 @@ tracing_sched_switch_trace(struct trace_array *tr,
784 entry->next_state = next->state; 800 entry->next_state = next->state;
785 entry->next_cpu = task_cpu(next); 801 entry->next_cpu = task_cpu(next);
786 ring_buffer_unlock_commit(tr->buffer, event, irq_flags); 802 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
787 __trace_stack(tr, data, flags, 5); 803 ftrace_trace_stack(tr, data, flags, 5, pc);
788} 804}
789 805
790void 806void
@@ -792,7 +808,7 @@ tracing_sched_wakeup_trace(struct trace_array *tr,
792 struct trace_array_cpu *data, 808 struct trace_array_cpu *data,
793 struct task_struct *wakee, 809 struct task_struct *wakee,
794 struct task_struct *curr, 810 struct task_struct *curr,
795 unsigned long flags) 811 unsigned long flags, int pc)
796{ 812{
797 struct ring_buffer_event *event; 813 struct ring_buffer_event *event;
798 struct ctx_switch_entry *entry; 814 struct ctx_switch_entry *entry;
@@ -803,7 +819,7 @@ tracing_sched_wakeup_trace(struct trace_array *tr,
803 if (!event) 819 if (!event)
804 return; 820 return;
805 entry = ring_buffer_event_data(event); 821 entry = ring_buffer_event_data(event);
806 tracing_generic_entry_update(&entry->ent, flags); 822 tracing_generic_entry_update(&entry->ent, flags, pc);
807 entry->ent.type = TRACE_WAKE; 823 entry->ent.type = TRACE_WAKE;
808 entry->prev_pid = curr->pid; 824 entry->prev_pid = curr->pid;
809 entry->prev_prio = curr->prio; 825 entry->prev_prio = curr->prio;
@@ -813,7 +829,7 @@ tracing_sched_wakeup_trace(struct trace_array *tr,
813 entry->next_state = wakee->state; 829 entry->next_state = wakee->state;
814 entry->next_cpu = task_cpu(wakee); 830 entry->next_cpu = task_cpu(wakee);
815 ring_buffer_unlock_commit(tr->buffer, event, irq_flags); 831 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
816 __trace_stack(tr, data, flags, 6); 832 ftrace_trace_stack(tr, data, flags, 6, pc);
817 833
818 trace_wake_up(); 834 trace_wake_up();
819} 835}
@@ -823,23 +839,24 @@ ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3)
823{ 839{
824 struct trace_array *tr = &global_trace; 840 struct trace_array *tr = &global_trace;
825 struct trace_array_cpu *data; 841 struct trace_array_cpu *data;
826 unsigned long flags;
827 long disabled; 842 long disabled;
828 int cpu; 843 int cpu;
844 int pc;
829 845
830 if (tracing_disabled || !tr->ctrl) 846 if (tracing_disabled || !tr->ctrl)
831 return; 847 return;
832 848
833 local_irq_save(flags); 849 pc = preempt_count();
850 preempt_disable_notrace();
834 cpu = raw_smp_processor_id(); 851 cpu = raw_smp_processor_id();
835 data = tr->data[cpu]; 852 data = tr->data[cpu];
836 disabled = atomic_inc_return(&data->disabled); 853 disabled = atomic_inc_return(&data->disabled);
837 854
838 if (likely(disabled == 1)) 855 if (likely(disabled == 1))
839 __trace_special(tr, data, arg1, arg2, arg3); 856 ftrace_trace_special(tr, data, arg1, arg2, arg3, pc);
840 857
841 atomic_dec(&data->disabled); 858 atomic_dec(&data->disabled);
842 local_irq_restore(flags); 859 preempt_enable_notrace();
843} 860}
844 861
845#ifdef CONFIG_FTRACE 862#ifdef CONFIG_FTRACE
@@ -850,7 +867,8 @@ function_trace_call(unsigned long ip, unsigned long parent_ip)
850 struct trace_array_cpu *data; 867 struct trace_array_cpu *data;
851 unsigned long flags; 868 unsigned long flags;
852 long disabled; 869 long disabled;
853 int cpu; 870 int cpu, resched;
871 int pc;
854 872
855 if (unlikely(!ftrace_function_enabled)) 873 if (unlikely(!ftrace_function_enabled))
856 return; 874 return;
@@ -858,16 +876,22 @@ function_trace_call(unsigned long ip, unsigned long parent_ip)
858 if (skip_trace(ip)) 876 if (skip_trace(ip))
859 return; 877 return;
860 878
861 local_irq_save(flags); 879 pc = preempt_count();
880 resched = need_resched();
881 preempt_disable_notrace();
882 local_save_flags(flags);
862 cpu = raw_smp_processor_id(); 883 cpu = raw_smp_processor_id();
863 data = tr->data[cpu]; 884 data = tr->data[cpu];
864 disabled = atomic_inc_return(&data->disabled); 885 disabled = atomic_inc_return(&data->disabled);
865 886
866 if (likely(disabled == 1)) 887 if (likely(disabled == 1))
867 trace_function(tr, data, ip, parent_ip, flags); 888 trace_function(tr, data, ip, parent_ip, flags, pc);
868 889
869 atomic_dec(&data->disabled); 890 atomic_dec(&data->disabled);
870 local_irq_restore(flags); 891 if (resched)
892 preempt_enable_no_resched_notrace();
893 else
894 preempt_enable_notrace();
871} 895}
872 896
873static struct ftrace_ops trace_ops __read_mostly = 897static struct ftrace_ops trace_ops __read_mostly =
@@ -2508,9 +2532,6 @@ tracing_read_pipe(struct file *filp, char __user *ubuf,
2508 size_t cnt, loff_t *ppos) 2532 size_t cnt, loff_t *ppos)
2509{ 2533{
2510 struct trace_iterator *iter = filp->private_data; 2534 struct trace_iterator *iter = filp->private_data;
2511#ifdef CONFIG_FTRACE
2512 int ftrace_save;
2513#endif
2514 ssize_t sret; 2535 ssize_t sret;
2515 2536
2516 /* return any leftover data */ 2537 /* return any leftover data */
@@ -2593,20 +2614,6 @@ waitagain:
2593 offsetof(struct trace_iterator, seq)); 2614 offsetof(struct trace_iterator, seq));
2594 iter->pos = -1; 2615 iter->pos = -1;
2595 2616
2596 /*
2597 * We need to stop all tracing on all CPUS to read the
2598 * the next buffer. This is a bit expensive, but is
2599 * not done often. We fill all what we can read,
2600 * and then release the locks again.
2601 */
2602
2603 local_irq_disable();
2604#ifdef CONFIG_FTRACE
2605 ftrace_save = ftrace_enabled;
2606 ftrace_enabled = 0;
2607#endif
2608 smp_wmb();
2609
2610 while (find_next_entry_inc(iter) != NULL) { 2617 while (find_next_entry_inc(iter) != NULL) {
2611 enum print_line_t ret; 2618 enum print_line_t ret;
2612 int len = iter->seq.len; 2619 int len = iter->seq.len;
@@ -2624,11 +2631,6 @@ waitagain:
2624 break; 2631 break;
2625 } 2632 }
2626 2633
2627#ifdef CONFIG_FTRACE
2628 ftrace_enabled = ftrace_save;
2629#endif
2630 local_irq_enable();
2631
2632 /* Now copy what we have to the user */ 2634 /* Now copy what we have to the user */
2633 sret = trace_seq_to_user(&iter->seq, ubuf, cnt); 2635 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
2634 if (iter->seq.readpos >= iter->seq.len) 2636 if (iter->seq.readpos >= iter->seq.len)
@@ -2960,12 +2962,13 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
2960 struct print_entry *entry; 2962 struct print_entry *entry;
2961 unsigned long flags, irq_flags; 2963 unsigned long flags, irq_flags;
2962 long disabled; 2964 long disabled;
2963 int cpu, len = 0, size; 2965 int cpu, len = 0, size, pc;
2964 2966
2965 if (!tr->ctrl || tracing_disabled) 2967 if (!tr->ctrl || tracing_disabled)
2966 return 0; 2968 return 0;
2967 2969
2968 local_irq_save(flags); 2970 pc = preempt_count();
2971 preempt_disable_notrace();
2969 cpu = raw_smp_processor_id(); 2972 cpu = raw_smp_processor_id();
2970 data = tr->data[cpu]; 2973 data = tr->data[cpu];
2971 disabled = atomic_inc_return(&data->disabled); 2974 disabled = atomic_inc_return(&data->disabled);
@@ -2973,7 +2976,7 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
2973 if (unlikely(disabled != 1)) 2976 if (unlikely(disabled != 1))
2974 goto out; 2977 goto out;
2975 2978
2976 spin_lock(&trace_buf_lock); 2979 spin_lock_irqsave(&trace_buf_lock, flags);
2977 len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args); 2980 len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args);
2978 2981
2979 len = min(len, TRACE_BUF_SIZE-1); 2982 len = min(len, TRACE_BUF_SIZE-1);
@@ -2984,7 +2987,7 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
2984 if (!event) 2987 if (!event)
2985 goto out_unlock; 2988 goto out_unlock;
2986 entry = ring_buffer_event_data(event); 2989 entry = ring_buffer_event_data(event);
2987 tracing_generic_entry_update(&entry->ent, flags); 2990 tracing_generic_entry_update(&entry->ent, flags, pc);
2988 entry->ent.type = TRACE_PRINT; 2991 entry->ent.type = TRACE_PRINT;
2989 entry->ip = ip; 2992 entry->ip = ip;
2990 2993
@@ -2993,11 +2996,11 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
2993 ring_buffer_unlock_commit(tr->buffer, event, irq_flags); 2996 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
2994 2997
2995 out_unlock: 2998 out_unlock:
2996 spin_unlock(&trace_buf_lock); 2999 spin_unlock_irqrestore(&trace_buf_lock, flags);
2997 3000
2998 out: 3001 out:
2999 atomic_dec(&data->disabled); 3002 atomic_dec(&data->disabled);
3000 local_irq_restore(flags); 3003 preempt_enable_notrace();
3001 3004
3002 return len; 3005 return len;
3003} 3006}