diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-12-10 22:58:13 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-12-10 22:58:13 -0500 |
commit | 1dd7dcb6eaa677b034e7ef63df8320277507ae70 (patch) | |
tree | 3f1592b634d7bdde94e00570925be2dade8433d4 | |
parent | b6da0076bab5a12afb19312ffee41c95490af2a0 (diff) | |
parent | 3558a5ac50dbb2419cc649d5e154af161d661037 (diff) |
Merge tag 'trace-3.19' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace
Pull tracing updates from Steven Rostedt:
"There was a lot of clean ups and minor fixes. One of those clean ups
was to the trace_seq code. It also removed the return values to the
trace_seq_*() functions and use trace_seq_has_overflowed() to see if
the buffer filled up or not. This is similar to work being done to
the seq_file code as well in another tree.
Some of the other goodies include:
- Added some "!" (NOT) logic to the tracing filter.
- Fixed the frame pointer logic to the x86_64 mcount trampolines
- Added the logic for dynamic trampolines on !CONFIG_PREEMPT systems.
That is, the ftrace trampoline can be dynamically allocated and be
called directly by functions that only have a single hook to them"
* tag 'trace-3.19' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace: (55 commits)
tracing: Truncated output is better than nothing
tracing: Add additional marks to signal very large time deltas
Documentation: describe trace_buf_size parameter more accurately
tracing: Allow NOT to filter AND and OR clauses
tracing: Add NOT to filtering logic
ftrace/fgraph/x86: Have prepare_ftrace_return() take ip as first parameter
ftrace/x86: Get rid of ftrace_caller_setup
ftrace/x86: Have save_mcount_regs macro also save stack frames if needed
ftrace/x86: Add macro MCOUNT_REG_SIZE for amount of stack used to save mcount regs
ftrace/x86: Simplify save_mcount_regs on getting RIP
ftrace/x86: Have save_mcount_regs store RIP in %rdi for first parameter
ftrace/x86: Rename MCOUNT_SAVE_FRAME and add more detailed comments
ftrace/x86: Move MCOUNT_SAVE_FRAME out of header file
ftrace/x86: Have static tracing also use ftrace_caller_setup
ftrace/x86: Have static function tracing always test for function graph
kprobes: Add IPMODIFY flag to kprobe_ftrace_ops
ftrace, kprobes: Support IPMODIFY flag to find IP modify conflict
kprobes/ftrace: Recover original IP if pre_handler doesn't change it
tracing/trivial: Fix typos and make an int into a bool
tracing: Deletion of an unnecessary check before iput()
...
38 files changed, 1680 insertions, 1312 deletions
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index d6eb3636fe5a..32ba9ea5934a 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt | |||
@@ -3520,7 +3520,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted. | |||
3520 | are saved. | 3520 | are saved. |
3521 | 3521 | ||
3522 | trace_buf_size=nn[KMG] | 3522 | trace_buf_size=nn[KMG] |
3523 | [FTRACE] will set tracing buffer size. | 3523 | [FTRACE] will set tracing buffer size on each cpu. |
3524 | 3524 | ||
3525 | trace_event=[event-list] | 3525 | trace_event=[event-list] |
3526 | [FTRACE] Set and start specified trace events in order | 3526 | [FTRACE] Set and start specified trace events in order |
diff --git a/Documentation/trace/ftrace.txt b/Documentation/trace/ftrace.txt index 4da42616939f..8408e040f06f 100644 --- a/Documentation/trace/ftrace.txt +++ b/Documentation/trace/ftrace.txt | |||
@@ -234,6 +234,11 @@ of ftrace. Here is a list of some of the key files: | |||
234 | will be displayed on the same line as the function that | 234 | will be displayed on the same line as the function that |
235 | is returning registers. | 235 | is returning registers. |
236 | 236 | ||
237 | If the callback registered to be traced by a function with | ||
238 | the "ip modify" attribute (thus the regs->ip can be changed), | ||
239 | an 'I' will be displayed on the same line as the function that | ||
240 | can be overridden. | ||
241 | |||
237 | function_profile_enabled: | 242 | function_profile_enabled: |
238 | 243 | ||
239 | When set it will enable all functions with either the function | 244 | When set it will enable all functions with either the function |
@@ -680,9 +685,11 @@ The above is mostly meaningful for kernel developers. | |||
680 | needs to be fixed to be only relative to the same CPU. | 685 | needs to be fixed to be only relative to the same CPU. |
681 | The marks are determined by the difference between this | 686 | The marks are determined by the difference between this |
682 | current trace and the next trace. | 687 | current trace and the next trace. |
683 | '!' - greater than preempt_mark_thresh (default 100) | 688 | '$' - greater than 1 second |
684 | '+' - greater than 1 microsecond | 689 | '#' - greater than 1000 microsecond |
685 | ' ' - less than or equal to 1 microsecond. | 690 | '!' - greater than 100 microsecond |
691 | '+' - greater than 10 microsecond | ||
692 | ' ' - less than or equal to 10 microsecond. | ||
686 | 693 | ||
687 | The rest is the same as the 'trace' file. | 694 | The rest is the same as the 'trace' file. |
688 | 695 | ||
@@ -1951,6 +1958,8 @@ want, depending on your needs. | |||
1951 | 1958 | ||
1952 | + means that the function exceeded 10 usecs. | 1959 | + means that the function exceeded 10 usecs. |
1953 | ! means that the function exceeded 100 usecs. | 1960 | ! means that the function exceeded 100 usecs. |
1961 | # means that the function exceeded 1000 usecs. | ||
1962 | $ means that the function exceeded 1 sec. | ||
1954 | 1963 | ||
1955 | 1964 | ||
1956 | - The task/pid field displays the thread cmdline and pid which | 1965 | - The task/pid field displays the thread cmdline and pid which |
diff --git a/arch/powerpc/kernel/ftrace.c b/arch/powerpc/kernel/ftrace.c index 390311c0f03d..e66af6d265e8 100644 --- a/arch/powerpc/kernel/ftrace.c +++ b/arch/powerpc/kernel/ftrace.c | |||
@@ -449,7 +449,7 @@ void ftrace_replace_code(int enable) | |||
449 | rec = ftrace_rec_iter_record(iter); | 449 | rec = ftrace_rec_iter_record(iter); |
450 | ret = __ftrace_replace_code(rec, enable); | 450 | ret = __ftrace_replace_code(rec, enable); |
451 | if (ret) { | 451 | if (ret) { |
452 | ftrace_bug(ret, rec->ip); | 452 | ftrace_bug(ret, rec); |
453 | return; | 453 | return; |
454 | } | 454 | } |
455 | } | 455 | } |
diff --git a/arch/x86/include/asm/ftrace.h b/arch/x86/include/asm/ftrace.h index e1f7fecaa7d6..f45acad3c4b6 100644 --- a/arch/x86/include/asm/ftrace.h +++ b/arch/x86/include/asm/ftrace.h | |||
@@ -1,39 +1,6 @@ | |||
1 | #ifndef _ASM_X86_FTRACE_H | 1 | #ifndef _ASM_X86_FTRACE_H |
2 | #define _ASM_X86_FTRACE_H | 2 | #define _ASM_X86_FTRACE_H |
3 | 3 | ||
4 | #ifdef __ASSEMBLY__ | ||
5 | |||
6 | /* skip is set if the stack was already partially adjusted */ | ||
7 | .macro MCOUNT_SAVE_FRAME skip=0 | ||
8 | /* | ||
9 | * We add enough stack to save all regs. | ||
10 | */ | ||
11 | subq $(SS+8-\skip), %rsp | ||
12 | movq %rax, RAX(%rsp) | ||
13 | movq %rcx, RCX(%rsp) | ||
14 | movq %rdx, RDX(%rsp) | ||
15 | movq %rsi, RSI(%rsp) | ||
16 | movq %rdi, RDI(%rsp) | ||
17 | movq %r8, R8(%rsp) | ||
18 | movq %r9, R9(%rsp) | ||
19 | /* Move RIP to its proper location */ | ||
20 | movq SS+8(%rsp), %rdx | ||
21 | movq %rdx, RIP(%rsp) | ||
22 | .endm | ||
23 | |||
24 | .macro MCOUNT_RESTORE_FRAME skip=0 | ||
25 | movq R9(%rsp), %r9 | ||
26 | movq R8(%rsp), %r8 | ||
27 | movq RDI(%rsp), %rdi | ||
28 | movq RSI(%rsp), %rsi | ||
29 | movq RDX(%rsp), %rdx | ||
30 | movq RCX(%rsp), %rcx | ||
31 | movq RAX(%rsp), %rax | ||
32 | addq $(SS+8-\skip), %rsp | ||
33 | .endm | ||
34 | |||
35 | #endif | ||
36 | |||
37 | #ifdef CONFIG_FUNCTION_TRACER | 4 | #ifdef CONFIG_FUNCTION_TRACER |
38 | #ifdef CC_USING_FENTRY | 5 | #ifdef CC_USING_FENTRY |
39 | # define MCOUNT_ADDR ((long)(__fentry__)) | 6 | # define MCOUNT_ADDR ((long)(__fentry__)) |
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c index 3386dc9aa333..2142376dc8c6 100644 --- a/arch/x86/kernel/ftrace.c +++ b/arch/x86/kernel/ftrace.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/ftrace.h> | 17 | #include <linux/ftrace.h> |
18 | #include <linux/percpu.h> | 18 | #include <linux/percpu.h> |
19 | #include <linux/sched.h> | 19 | #include <linux/sched.h> |
20 | #include <linux/slab.h> | ||
20 | #include <linux/init.h> | 21 | #include <linux/init.h> |
21 | #include <linux/list.h> | 22 | #include <linux/list.h> |
22 | #include <linux/module.h> | 23 | #include <linux/module.h> |
@@ -47,7 +48,7 @@ int ftrace_arch_code_modify_post_process(void) | |||
47 | union ftrace_code_union { | 48 | union ftrace_code_union { |
48 | char code[MCOUNT_INSN_SIZE]; | 49 | char code[MCOUNT_INSN_SIZE]; |
49 | struct { | 50 | struct { |
50 | char e8; | 51 | unsigned char e8; |
51 | int offset; | 52 | int offset; |
52 | } __attribute__((packed)); | 53 | } __attribute__((packed)); |
53 | }; | 54 | }; |
@@ -582,7 +583,7 @@ void ftrace_replace_code(int enable) | |||
582 | 583 | ||
583 | remove_breakpoints: | 584 | remove_breakpoints: |
584 | pr_warn("Failed on %s (%d):\n", report, count); | 585 | pr_warn("Failed on %s (%d):\n", report, count); |
585 | ftrace_bug(ret, rec ? rec->ip : 0); | 586 | ftrace_bug(ret, rec); |
586 | for_ftrace_rec_iter(iter) { | 587 | for_ftrace_rec_iter(iter) { |
587 | rec = ftrace_rec_iter_record(iter); | 588 | rec = ftrace_rec_iter_record(iter); |
588 | /* | 589 | /* |
@@ -644,13 +645,8 @@ int __init ftrace_dyn_arch_init(void) | |||
644 | { | 645 | { |
645 | return 0; | 646 | return 0; |
646 | } | 647 | } |
647 | #endif | ||
648 | |||
649 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
650 | |||
651 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
652 | extern void ftrace_graph_call(void); | ||
653 | 648 | ||
649 | #if defined(CONFIG_X86_64) || defined(CONFIG_FUNCTION_GRAPH_TRACER) | ||
654 | static unsigned char *ftrace_jmp_replace(unsigned long ip, unsigned long addr) | 650 | static unsigned char *ftrace_jmp_replace(unsigned long ip, unsigned long addr) |
655 | { | 651 | { |
656 | static union ftrace_code_union calc; | 652 | static union ftrace_code_union calc; |
@@ -664,6 +660,280 @@ static unsigned char *ftrace_jmp_replace(unsigned long ip, unsigned long addr) | |||
664 | */ | 660 | */ |
665 | return calc.code; | 661 | return calc.code; |
666 | } | 662 | } |
663 | #endif | ||
664 | |||
665 | /* Currently only x86_64 supports dynamic trampolines */ | ||
666 | #ifdef CONFIG_X86_64 | ||
667 | |||
668 | #ifdef CONFIG_MODULES | ||
669 | #include <linux/moduleloader.h> | ||
670 | /* Module allocation simplifies allocating memory for code */ | ||
671 | static inline void *alloc_tramp(unsigned long size) | ||
672 | { | ||
673 | return module_alloc(size); | ||
674 | } | ||
675 | static inline void tramp_free(void *tramp) | ||
676 | { | ||
677 | module_free(NULL, tramp); | ||
678 | } | ||
679 | #else | ||
680 | /* Trampolines can only be created if modules are supported */ | ||
681 | static inline void *alloc_tramp(unsigned long size) | ||
682 | { | ||
683 | return NULL; | ||
684 | } | ||
685 | static inline void tramp_free(void *tramp) { } | ||
686 | #endif | ||
687 | |||
688 | /* Defined as markers to the end of the ftrace default trampolines */ | ||
689 | extern void ftrace_caller_end(void); | ||
690 | extern void ftrace_regs_caller_end(void); | ||
691 | extern void ftrace_return(void); | ||
692 | extern void ftrace_caller_op_ptr(void); | ||
693 | extern void ftrace_regs_caller_op_ptr(void); | ||
694 | |||
695 | /* movq function_trace_op(%rip), %rdx */ | ||
696 | /* 0x48 0x8b 0x15 <offset-to-ftrace_trace_op (4 bytes)> */ | ||
697 | #define OP_REF_SIZE 7 | ||
698 | |||
699 | /* | ||
700 | * The ftrace_ops is passed to the function callback. Since the | ||
701 | * trampoline only services a single ftrace_ops, we can pass in | ||
702 | * that ops directly. | ||
703 | * | ||
704 | * The ftrace_op_code_union is used to create a pointer to the | ||
705 | * ftrace_ops that will be passed to the callback function. | ||
706 | */ | ||
707 | union ftrace_op_code_union { | ||
708 | char code[OP_REF_SIZE]; | ||
709 | struct { | ||
710 | char op[3]; | ||
711 | int offset; | ||
712 | } __attribute__((packed)); | ||
713 | }; | ||
714 | |||
715 | static unsigned long | ||
716 | create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size) | ||
717 | { | ||
718 | unsigned const char *jmp; | ||
719 | unsigned long start_offset; | ||
720 | unsigned long end_offset; | ||
721 | unsigned long op_offset; | ||
722 | unsigned long offset; | ||
723 | unsigned long size; | ||
724 | unsigned long ip; | ||
725 | unsigned long *ptr; | ||
726 | void *trampoline; | ||
727 | /* 48 8b 15 <offset> is movq <offset>(%rip), %rdx */ | ||
728 | unsigned const char op_ref[] = { 0x48, 0x8b, 0x15 }; | ||
729 | union ftrace_op_code_union op_ptr; | ||
730 | int ret; | ||
731 | |||
732 | if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) { | ||
733 | start_offset = (unsigned long)ftrace_regs_caller; | ||
734 | end_offset = (unsigned long)ftrace_regs_caller_end; | ||
735 | op_offset = (unsigned long)ftrace_regs_caller_op_ptr; | ||
736 | } else { | ||
737 | start_offset = (unsigned long)ftrace_caller; | ||
738 | end_offset = (unsigned long)ftrace_caller_end; | ||
739 | op_offset = (unsigned long)ftrace_caller_op_ptr; | ||
740 | } | ||
741 | |||
742 | size = end_offset - start_offset; | ||
743 | |||
744 | /* | ||
745 | * Allocate enough size to store the ftrace_caller code, | ||
746 | * the jmp to ftrace_return, as well as the address of | ||
747 | * the ftrace_ops this trampoline is used for. | ||
748 | */ | ||
749 | trampoline = alloc_tramp(size + MCOUNT_INSN_SIZE + sizeof(void *)); | ||
750 | if (!trampoline) | ||
751 | return 0; | ||
752 | |||
753 | *tramp_size = size + MCOUNT_INSN_SIZE + sizeof(void *); | ||
754 | |||
755 | /* Copy ftrace_caller onto the trampoline memory */ | ||
756 | ret = probe_kernel_read(trampoline, (void *)start_offset, size); | ||
757 | if (WARN_ON(ret < 0)) { | ||
758 | tramp_free(trampoline); | ||
759 | return 0; | ||
760 | } | ||
761 | |||
762 | ip = (unsigned long)trampoline + size; | ||
763 | |||
764 | /* The trampoline ends with a jmp to ftrace_return */ | ||
765 | jmp = ftrace_jmp_replace(ip, (unsigned long)ftrace_return); | ||
766 | memcpy(trampoline + size, jmp, MCOUNT_INSN_SIZE); | ||
767 | |||
768 | /* | ||
769 | * The address of the ftrace_ops that is used for this trampoline | ||
770 | * is stored at the end of the trampoline. This will be used to | ||
771 | * load the third parameter for the callback. Basically, that | ||
772 | * location at the end of the trampoline takes the place of | ||
773 | * the global function_trace_op variable. | ||
774 | */ | ||
775 | |||
776 | ptr = (unsigned long *)(trampoline + size + MCOUNT_INSN_SIZE); | ||
777 | *ptr = (unsigned long)ops; | ||
778 | |||
779 | op_offset -= start_offset; | ||
780 | memcpy(&op_ptr, trampoline + op_offset, OP_REF_SIZE); | ||
781 | |||
782 | /* Are we pointing to the reference? */ | ||
783 | if (WARN_ON(memcmp(op_ptr.op, op_ref, 3) != 0)) { | ||
784 | tramp_free(trampoline); | ||
785 | return 0; | ||
786 | } | ||
787 | |||
788 | /* Load the contents of ptr into the callback parameter */ | ||
789 | offset = (unsigned long)ptr; | ||
790 | offset -= (unsigned long)trampoline + op_offset + OP_REF_SIZE; | ||
791 | |||
792 | op_ptr.offset = offset; | ||
793 | |||
794 | /* put in the new offset to the ftrace_ops */ | ||
795 | memcpy(trampoline + op_offset, &op_ptr, OP_REF_SIZE); | ||
796 | |||
797 | /* ALLOC_TRAMP flags lets us know we created it */ | ||
798 | ops->flags |= FTRACE_OPS_FL_ALLOC_TRAMP; | ||
799 | |||
800 | return (unsigned long)trampoline; | ||
801 | } | ||
802 | |||
803 | static unsigned long calc_trampoline_call_offset(bool save_regs) | ||
804 | { | ||
805 | unsigned long start_offset; | ||
806 | unsigned long call_offset; | ||
807 | |||
808 | if (save_regs) { | ||
809 | start_offset = (unsigned long)ftrace_regs_caller; | ||
810 | call_offset = (unsigned long)ftrace_regs_call; | ||
811 | } else { | ||
812 | start_offset = (unsigned long)ftrace_caller; | ||
813 | call_offset = (unsigned long)ftrace_call; | ||
814 | } | ||
815 | |||
816 | return call_offset - start_offset; | ||
817 | } | ||
818 | |||
819 | void arch_ftrace_update_trampoline(struct ftrace_ops *ops) | ||
820 | { | ||
821 | ftrace_func_t func; | ||
822 | unsigned char *new; | ||
823 | unsigned long offset; | ||
824 | unsigned long ip; | ||
825 | unsigned int size; | ||
826 | int ret; | ||
827 | |||
828 | if (ops->trampoline) { | ||
829 | /* | ||
830 | * The ftrace_ops caller may set up its own trampoline. | ||
831 | * In such a case, this code must not modify it. | ||
832 | */ | ||
833 | if (!(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP)) | ||
834 | return; | ||
835 | } else { | ||
836 | ops->trampoline = create_trampoline(ops, &size); | ||
837 | if (!ops->trampoline) | ||
838 | return; | ||
839 | ops->trampoline_size = size; | ||
840 | } | ||
841 | |||
842 | offset = calc_trampoline_call_offset(ops->flags & FTRACE_OPS_FL_SAVE_REGS); | ||
843 | ip = ops->trampoline + offset; | ||
844 | |||
845 | func = ftrace_ops_get_func(ops); | ||
846 | |||
847 | /* Do a safe modify in case the trampoline is executing */ | ||
848 | new = ftrace_call_replace(ip, (unsigned long)func); | ||
849 | ret = update_ftrace_func(ip, new); | ||
850 | |||
851 | /* The update should never fail */ | ||
852 | WARN_ON(ret); | ||
853 | } | ||
854 | |||
855 | /* Return the address of the function the trampoline calls */ | ||
856 | static void *addr_from_call(void *ptr) | ||
857 | { | ||
858 | union ftrace_code_union calc; | ||
859 | int ret; | ||
860 | |||
861 | ret = probe_kernel_read(&calc, ptr, MCOUNT_INSN_SIZE); | ||
862 | if (WARN_ON_ONCE(ret < 0)) | ||
863 | return NULL; | ||
864 | |||
865 | /* Make sure this is a call */ | ||
866 | if (WARN_ON_ONCE(calc.e8 != 0xe8)) { | ||
867 | pr_warn("Expected e8, got %x\n", calc.e8); | ||
868 | return NULL; | ||
869 | } | ||
870 | |||
871 | return ptr + MCOUNT_INSN_SIZE + calc.offset; | ||
872 | } | ||
873 | |||
874 | void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent, | ||
875 | unsigned long frame_pointer); | ||
876 | |||
877 | /* | ||
878 | * If the ops->trampoline was not allocated, then it probably | ||
879 | * has a static trampoline func, or is the ftrace caller itself. | ||
880 | */ | ||
881 | static void *static_tramp_func(struct ftrace_ops *ops, struct dyn_ftrace *rec) | ||
882 | { | ||
883 | unsigned long offset; | ||
884 | bool save_regs = rec->flags & FTRACE_FL_REGS_EN; | ||
885 | void *ptr; | ||
886 | |||
887 | if (ops && ops->trampoline) { | ||
888 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
889 | /* | ||
890 | * We only know about function graph tracer setting as static | ||
891 | * trampoline. | ||
892 | */ | ||
893 | if (ops->trampoline == FTRACE_GRAPH_ADDR) | ||
894 | return (void *)prepare_ftrace_return; | ||
895 | #endif | ||
896 | return NULL; | ||
897 | } | ||
898 | |||
899 | offset = calc_trampoline_call_offset(save_regs); | ||
900 | |||
901 | if (save_regs) | ||
902 | ptr = (void *)FTRACE_REGS_ADDR + offset; | ||
903 | else | ||
904 | ptr = (void *)FTRACE_ADDR + offset; | ||
905 | |||
906 | return addr_from_call(ptr); | ||
907 | } | ||
908 | |||
909 | void *arch_ftrace_trampoline_func(struct ftrace_ops *ops, struct dyn_ftrace *rec) | ||
910 | { | ||
911 | unsigned long offset; | ||
912 | |||
913 | /* If we didn't allocate this trampoline, consider it static */ | ||
914 | if (!ops || !(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP)) | ||
915 | return static_tramp_func(ops, rec); | ||
916 | |||
917 | offset = calc_trampoline_call_offset(ops->flags & FTRACE_OPS_FL_SAVE_REGS); | ||
918 | return addr_from_call((void *)ops->trampoline + offset); | ||
919 | } | ||
920 | |||
921 | void arch_ftrace_trampoline_free(struct ftrace_ops *ops) | ||
922 | { | ||
923 | if (!ops || !(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP)) | ||
924 | return; | ||
925 | |||
926 | tramp_free((void *)ops->trampoline); | ||
927 | ops->trampoline = 0; | ||
928 | } | ||
929 | |||
930 | #endif /* CONFIG_X86_64 */ | ||
931 | #endif /* CONFIG_DYNAMIC_FTRACE */ | ||
932 | |||
933 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
934 | |||
935 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
936 | extern void ftrace_graph_call(void); | ||
667 | 937 | ||
668 | static int ftrace_mod_jmp(unsigned long ip, void *func) | 938 | static int ftrace_mod_jmp(unsigned long ip, void *func) |
669 | { | 939 | { |
@@ -694,7 +964,7 @@ int ftrace_disable_ftrace_graph_caller(void) | |||
694 | * Hook the return address and push it in the stack of return addrs | 964 | * Hook the return address and push it in the stack of return addrs |
695 | * in current thread info. | 965 | * in current thread info. |
696 | */ | 966 | */ |
697 | void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, | 967 | void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent, |
698 | unsigned long frame_pointer) | 968 | unsigned long frame_pointer) |
699 | { | 969 | { |
700 | unsigned long old; | 970 | unsigned long old; |
diff --git a/arch/x86/kernel/kprobes/ftrace.c b/arch/x86/kernel/kprobes/ftrace.c index 717b02a22e67..5f8f0b3cc674 100644 --- a/arch/x86/kernel/kprobes/ftrace.c +++ b/arch/x86/kernel/kprobes/ftrace.c | |||
@@ -27,7 +27,7 @@ | |||
27 | 27 | ||
28 | static nokprobe_inline | 28 | static nokprobe_inline |
29 | int __skip_singlestep(struct kprobe *p, struct pt_regs *regs, | 29 | int __skip_singlestep(struct kprobe *p, struct pt_regs *regs, |
30 | struct kprobe_ctlblk *kcb) | 30 | struct kprobe_ctlblk *kcb, unsigned long orig_ip) |
31 | { | 31 | { |
32 | /* | 32 | /* |
33 | * Emulate singlestep (and also recover regs->ip) | 33 | * Emulate singlestep (and also recover regs->ip) |
@@ -39,6 +39,8 @@ int __skip_singlestep(struct kprobe *p, struct pt_regs *regs, | |||
39 | p->post_handler(p, regs, 0); | 39 | p->post_handler(p, regs, 0); |
40 | } | 40 | } |
41 | __this_cpu_write(current_kprobe, NULL); | 41 | __this_cpu_write(current_kprobe, NULL); |
42 | if (orig_ip) | ||
43 | regs->ip = orig_ip; | ||
42 | return 1; | 44 | return 1; |
43 | } | 45 | } |
44 | 46 | ||
@@ -46,7 +48,7 @@ int skip_singlestep(struct kprobe *p, struct pt_regs *regs, | |||
46 | struct kprobe_ctlblk *kcb) | 48 | struct kprobe_ctlblk *kcb) |
47 | { | 49 | { |
48 | if (kprobe_ftrace(p)) | 50 | if (kprobe_ftrace(p)) |
49 | return __skip_singlestep(p, regs, kcb); | 51 | return __skip_singlestep(p, regs, kcb, 0); |
50 | else | 52 | else |
51 | return 0; | 53 | return 0; |
52 | } | 54 | } |
@@ -71,13 +73,14 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, | |||
71 | if (kprobe_running()) { | 73 | if (kprobe_running()) { |
72 | kprobes_inc_nmissed_count(p); | 74 | kprobes_inc_nmissed_count(p); |
73 | } else { | 75 | } else { |
76 | unsigned long orig_ip = regs->ip; | ||
74 | /* Kprobe handler expects regs->ip = ip + 1 as breakpoint hit */ | 77 | /* Kprobe handler expects regs->ip = ip + 1 as breakpoint hit */ |
75 | regs->ip = ip + sizeof(kprobe_opcode_t); | 78 | regs->ip = ip + sizeof(kprobe_opcode_t); |
76 | 79 | ||
77 | __this_cpu_write(current_kprobe, p); | 80 | __this_cpu_write(current_kprobe, p); |
78 | kcb->kprobe_status = KPROBE_HIT_ACTIVE; | 81 | kcb->kprobe_status = KPROBE_HIT_ACTIVE; |
79 | if (!p->pre_handler || !p->pre_handler(p, regs)) | 82 | if (!p->pre_handler || !p->pre_handler(p, regs)) |
80 | __skip_singlestep(p, regs, kcb); | 83 | __skip_singlestep(p, regs, kcb, orig_ip); |
81 | /* | 84 | /* |
82 | * If pre_handler returns !0, it sets regs->ip and | 85 | * If pre_handler returns !0, it sets regs->ip and |
83 | * resets current kprobe. | 86 | * resets current kprobe. |
diff --git a/arch/x86/kernel/mcount_64.S b/arch/x86/kernel/mcount_64.S index c73aecf10d34..94ea120fa21f 100644 --- a/arch/x86/kernel/mcount_64.S +++ b/arch/x86/kernel/mcount_64.S | |||
@@ -21,40 +21,159 @@ | |||
21 | # define function_hook mcount | 21 | # define function_hook mcount |
22 | #endif | 22 | #endif |
23 | 23 | ||
24 | /* All cases save the original rbp (8 bytes) */ | ||
25 | #ifdef CONFIG_FRAME_POINTER | ||
26 | # ifdef CC_USING_FENTRY | ||
27 | /* Save parent and function stack frames (rip and rbp) */ | ||
28 | # define MCOUNT_FRAME_SIZE (8+16*2) | ||
29 | # else | ||
30 | /* Save just function stack frame (rip and rbp) */ | ||
31 | # define MCOUNT_FRAME_SIZE (8+16) | ||
32 | # endif | ||
33 | #else | ||
34 | /* No need to save a stack frame */ | ||
35 | # define MCOUNT_FRAME_SIZE 8 | ||
36 | #endif /* CONFIG_FRAME_POINTER */ | ||
37 | |||
38 | /* Size of stack used to save mcount regs in save_mcount_regs */ | ||
39 | #define MCOUNT_REG_SIZE (SS+8 + MCOUNT_FRAME_SIZE) | ||
40 | |||
41 | /* | ||
42 | * gcc -pg option adds a call to 'mcount' in most functions. | ||
43 | * When -mfentry is used, the call is to 'fentry' and not 'mcount' | ||
44 | * and is done before the function's stack frame is set up. | ||
45 | * They both require a set of regs to be saved before calling | ||
46 | * any C code and restored before returning back to the function. | ||
47 | * | ||
48 | * On boot up, all these calls are converted into nops. When tracing | ||
49 | * is enabled, the call can jump to either ftrace_caller or | ||
50 | * ftrace_regs_caller. Callbacks (tracing functions) that require | ||
51 | * ftrace_regs_caller (like kprobes) need to have pt_regs passed to | ||
52 | * it. For this reason, the size of the pt_regs structure will be | ||
53 | * allocated on the stack and the required mcount registers will | ||
54 | * be saved in the locations that pt_regs has them in. | ||
55 | */ | ||
56 | |||
57 | /* | ||
58 | * @added: the amount of stack added before calling this | ||
59 | * | ||
60 | * After this is called, the following registers contain: | ||
61 | * | ||
62 | * %rdi - holds the address that called the trampoline | ||
63 | * %rsi - holds the parent function (traced function's return address) | ||
64 | * %rdx - holds the original %rbp | ||
65 | */ | ||
66 | .macro save_mcount_regs added=0 | ||
67 | |||
68 | /* Always save the original rbp */ | ||
69 | pushq %rbp | ||
70 | |||
71 | #ifdef CONFIG_FRAME_POINTER | ||
72 | /* | ||
73 | * Stack traces will stop at the ftrace trampoline if the frame pointer | ||
74 | * is not set up properly. If fentry is used, we need to save a frame | ||
75 | * pointer for the parent as well as the function traced, because the | ||
76 | * fentry is called before the stack frame is set up, where as mcount | ||
77 | * is called afterward. | ||
78 | */ | ||
79 | #ifdef CC_USING_FENTRY | ||
80 | /* Save the parent pointer (skip orig rbp and our return address) */ | ||
81 | pushq \added+8*2(%rsp) | ||
82 | pushq %rbp | ||
83 | movq %rsp, %rbp | ||
84 | /* Save the return address (now skip orig rbp, rbp and parent) */ | ||
85 | pushq \added+8*3(%rsp) | ||
86 | #else | ||
87 | /* Can't assume that rip is before this (unless added was zero) */ | ||
88 | pushq \added+8(%rsp) | ||
89 | #endif | ||
90 | pushq %rbp | ||
91 | movq %rsp, %rbp | ||
92 | #endif /* CONFIG_FRAME_POINTER */ | ||
93 | |||
94 | /* | ||
95 | * We add enough stack to save all regs. | ||
96 | */ | ||
97 | subq $(MCOUNT_REG_SIZE - MCOUNT_FRAME_SIZE), %rsp | ||
98 | movq %rax, RAX(%rsp) | ||
99 | movq %rcx, RCX(%rsp) | ||
100 | movq %rdx, RDX(%rsp) | ||
101 | movq %rsi, RSI(%rsp) | ||
102 | movq %rdi, RDI(%rsp) | ||
103 | movq %r8, R8(%rsp) | ||
104 | movq %r9, R9(%rsp) | ||
105 | /* | ||
106 | * Save the original RBP. Even though the mcount ABI does not | ||
107 | * require this, it helps out callers. | ||
108 | */ | ||
109 | movq MCOUNT_REG_SIZE-8(%rsp), %rdx | ||
110 | movq %rdx, RBP(%rsp) | ||
111 | |||
112 | /* Copy the parent address into %rsi (second parameter) */ | ||
113 | #ifdef CC_USING_FENTRY | ||
114 | movq MCOUNT_REG_SIZE+8+\added(%rsp), %rsi | ||
115 | #else | ||
116 | /* %rdx contains original %rbp */ | ||
117 | movq 8(%rdx), %rsi | ||
118 | #endif | ||
119 | |||
120 | /* Move RIP to its proper location */ | ||
121 | movq MCOUNT_REG_SIZE+\added(%rsp), %rdi | ||
122 | movq %rdi, RIP(%rsp) | ||
123 | |||
124 | /* | ||
125 | * Now %rdi (the first parameter) has the return address of | ||
126 | * where ftrace_call returns. But the callbacks expect the | ||
127 | * address of the call itself. | ||
128 | */ | ||
129 | subq $MCOUNT_INSN_SIZE, %rdi | ||
130 | .endm | ||
131 | |||
132 | .macro restore_mcount_regs | ||
133 | movq R9(%rsp), %r9 | ||
134 | movq R8(%rsp), %r8 | ||
135 | movq RDI(%rsp), %rdi | ||
136 | movq RSI(%rsp), %rsi | ||
137 | movq RDX(%rsp), %rdx | ||
138 | movq RCX(%rsp), %rcx | ||
139 | movq RAX(%rsp), %rax | ||
140 | |||
141 | /* ftrace_regs_caller can modify %rbp */ | ||
142 | movq RBP(%rsp), %rbp | ||
143 | |||
144 | addq $MCOUNT_REG_SIZE, %rsp | ||
145 | |||
146 | .endm | ||
147 | |||
24 | #ifdef CONFIG_DYNAMIC_FTRACE | 148 | #ifdef CONFIG_DYNAMIC_FTRACE |
25 | 149 | ||
26 | ENTRY(function_hook) | 150 | ENTRY(function_hook) |
27 | retq | 151 | retq |
28 | END(function_hook) | 152 | END(function_hook) |
29 | 153 | ||
30 | /* skip is set if stack has been adjusted */ | 154 | ENTRY(ftrace_caller) |
31 | .macro ftrace_caller_setup skip=0 | 155 | /* save_mcount_regs fills in first two parameters */ |
32 | MCOUNT_SAVE_FRAME \skip | 156 | save_mcount_regs |
33 | 157 | ||
158 | GLOBAL(ftrace_caller_op_ptr) | ||
34 | /* Load the ftrace_ops into the 3rd parameter */ | 159 | /* Load the ftrace_ops into the 3rd parameter */ |
35 | movq function_trace_op(%rip), %rdx | 160 | movq function_trace_op(%rip), %rdx |
36 | 161 | ||
37 | /* Load ip into the first parameter */ | ||
38 | movq RIP(%rsp), %rdi | ||
39 | subq $MCOUNT_INSN_SIZE, %rdi | ||
40 | /* Load the parent_ip into the second parameter */ | ||
41 | #ifdef CC_USING_FENTRY | ||
42 | movq SS+16(%rsp), %rsi | ||
43 | #else | ||
44 | movq 8(%rbp), %rsi | ||
45 | #endif | ||
46 | .endm | ||
47 | |||
48 | ENTRY(ftrace_caller) | ||
49 | ftrace_caller_setup | ||
50 | /* regs go into 4th parameter (but make it NULL) */ | 162 | /* regs go into 4th parameter (but make it NULL) */ |
51 | movq $0, %rcx | 163 | movq $0, %rcx |
52 | 164 | ||
53 | GLOBAL(ftrace_call) | 165 | GLOBAL(ftrace_call) |
54 | call ftrace_stub | 166 | call ftrace_stub |
55 | 167 | ||
56 | MCOUNT_RESTORE_FRAME | 168 | restore_mcount_regs |
57 | ftrace_return: | 169 | |
170 | /* | ||
171 | * The copied trampoline must call ftrace_return as it | ||
172 | * still may need to call the function graph tracer. | ||
173 | */ | ||
174 | GLOBAL(ftrace_caller_end) | ||
175 | |||
176 | GLOBAL(ftrace_return) | ||
58 | 177 | ||
59 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 178 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
60 | GLOBAL(ftrace_graph_call) | 179 | GLOBAL(ftrace_graph_call) |
@@ -66,11 +185,16 @@ GLOBAL(ftrace_stub) | |||
66 | END(ftrace_caller) | 185 | END(ftrace_caller) |
67 | 186 | ||
68 | ENTRY(ftrace_regs_caller) | 187 | ENTRY(ftrace_regs_caller) |
69 | /* Save the current flags before compare (in SS location)*/ | 188 | /* Save the current flags before any operations that can change them */ |
70 | pushfq | 189 | pushfq |
71 | 190 | ||
72 | /* skip=8 to skip flags saved in SS */ | 191 | /* added 8 bytes to save flags */ |
73 | ftrace_caller_setup 8 | 192 | save_mcount_regs 8 |
193 | /* save_mcount_regs fills in first two parameters */ | ||
194 | |||
195 | GLOBAL(ftrace_regs_caller_op_ptr) | ||
196 | /* Load the ftrace_ops into the 3rd parameter */ | ||
197 | movq function_trace_op(%rip), %rdx | ||
74 | 198 | ||
75 | /* Save the rest of pt_regs */ | 199 | /* Save the rest of pt_regs */ |
76 | movq %r15, R15(%rsp) | 200 | movq %r15, R15(%rsp) |
@@ -79,18 +203,17 @@ ENTRY(ftrace_regs_caller) | |||
79 | movq %r12, R12(%rsp) | 203 | movq %r12, R12(%rsp) |
80 | movq %r11, R11(%rsp) | 204 | movq %r11, R11(%rsp) |
81 | movq %r10, R10(%rsp) | 205 | movq %r10, R10(%rsp) |
82 | movq %rbp, RBP(%rsp) | ||
83 | movq %rbx, RBX(%rsp) | 206 | movq %rbx, RBX(%rsp) |
84 | /* Copy saved flags */ | 207 | /* Copy saved flags */ |
85 | movq SS(%rsp), %rcx | 208 | movq MCOUNT_REG_SIZE(%rsp), %rcx |
86 | movq %rcx, EFLAGS(%rsp) | 209 | movq %rcx, EFLAGS(%rsp) |
87 | /* Kernel segments */ | 210 | /* Kernel segments */ |
88 | movq $__KERNEL_DS, %rcx | 211 | movq $__KERNEL_DS, %rcx |
89 | movq %rcx, SS(%rsp) | 212 | movq %rcx, SS(%rsp) |
90 | movq $__KERNEL_CS, %rcx | 213 | movq $__KERNEL_CS, %rcx |
91 | movq %rcx, CS(%rsp) | 214 | movq %rcx, CS(%rsp) |
92 | /* Stack - skipping return address */ | 215 | /* Stack - skipping return address and flags */ |
93 | leaq SS+16(%rsp), %rcx | 216 | leaq MCOUNT_REG_SIZE+8*2(%rsp), %rcx |
94 | movq %rcx, RSP(%rsp) | 217 | movq %rcx, RSP(%rsp) |
95 | 218 | ||
96 | /* regs go into 4th parameter */ | 219 | /* regs go into 4th parameter */ |
@@ -101,11 +224,11 @@ GLOBAL(ftrace_regs_call) | |||
101 | 224 | ||
102 | /* Copy flags back to SS, to restore them */ | 225 | /* Copy flags back to SS, to restore them */ |
103 | movq EFLAGS(%rsp), %rax | 226 | movq EFLAGS(%rsp), %rax |
104 | movq %rax, SS(%rsp) | 227 | movq %rax, MCOUNT_REG_SIZE(%rsp) |
105 | 228 | ||
106 | /* Handlers can change the RIP */ | 229 | /* Handlers can change the RIP */ |
107 | movq RIP(%rsp), %rax | 230 | movq RIP(%rsp), %rax |
108 | movq %rax, SS+8(%rsp) | 231 | movq %rax, MCOUNT_REG_SIZE+8(%rsp) |
109 | 232 | ||
110 | /* restore the rest of pt_regs */ | 233 | /* restore the rest of pt_regs */ |
111 | movq R15(%rsp), %r15 | 234 | movq R15(%rsp), %r15 |
@@ -113,19 +236,22 @@ GLOBAL(ftrace_regs_call) | |||
113 | movq R13(%rsp), %r13 | 236 | movq R13(%rsp), %r13 |
114 | movq R12(%rsp), %r12 | 237 | movq R12(%rsp), %r12 |
115 | movq R10(%rsp), %r10 | 238 | movq R10(%rsp), %r10 |
116 | movq RBP(%rsp), %rbp | ||
117 | movq RBX(%rsp), %rbx | 239 | movq RBX(%rsp), %rbx |
118 | 240 | ||
119 | /* skip=8 to skip flags saved in SS */ | 241 | restore_mcount_regs |
120 | MCOUNT_RESTORE_FRAME 8 | ||
121 | 242 | ||
122 | /* Restore flags */ | 243 | /* Restore flags */ |
123 | popfq | 244 | popfq |
124 | 245 | ||
125 | jmp ftrace_return | 246 | /* |
247 | * As this jmp to ftrace_return can be a short jump | ||
248 | * it must not be copied into the trampoline. | ||
249 | * The trampoline will add the code to jump | ||
250 | * to the return. | ||
251 | */ | ||
252 | GLOBAL(ftrace_regs_caller_end) | ||
126 | 253 | ||
127 | popfq | 254 | jmp ftrace_return |
128 | jmp ftrace_stub | ||
129 | 255 | ||
130 | END(ftrace_regs_caller) | 256 | END(ftrace_regs_caller) |
131 | 257 | ||
@@ -136,6 +262,7 @@ ENTRY(function_hook) | |||
136 | cmpq $ftrace_stub, ftrace_trace_function | 262 | cmpq $ftrace_stub, ftrace_trace_function |
137 | jnz trace | 263 | jnz trace |
138 | 264 | ||
265 | fgraph_trace: | ||
139 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 266 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
140 | cmpq $ftrace_stub, ftrace_graph_return | 267 | cmpq $ftrace_stub, ftrace_graph_return |
141 | jnz ftrace_graph_caller | 268 | jnz ftrace_graph_caller |
@@ -148,42 +275,35 @@ GLOBAL(ftrace_stub) | |||
148 | retq | 275 | retq |
149 | 276 | ||
150 | trace: | 277 | trace: |
151 | MCOUNT_SAVE_FRAME | 278 | /* save_mcount_regs fills in first two parameters */ |
152 | 279 | save_mcount_regs | |
153 | movq RIP(%rsp), %rdi | ||
154 | #ifdef CC_USING_FENTRY | ||
155 | movq SS+16(%rsp), %rsi | ||
156 | #else | ||
157 | movq 8(%rbp), %rsi | ||
158 | #endif | ||
159 | subq $MCOUNT_INSN_SIZE, %rdi | ||
160 | 280 | ||
161 | call *ftrace_trace_function | 281 | call *ftrace_trace_function |
162 | 282 | ||
163 | MCOUNT_RESTORE_FRAME | 283 | restore_mcount_regs |
164 | 284 | ||
165 | jmp ftrace_stub | 285 | jmp fgraph_trace |
166 | END(function_hook) | 286 | END(function_hook) |
167 | #endif /* CONFIG_DYNAMIC_FTRACE */ | 287 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
168 | #endif /* CONFIG_FUNCTION_TRACER */ | 288 | #endif /* CONFIG_FUNCTION_TRACER */ |
169 | 289 | ||
170 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 290 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
171 | ENTRY(ftrace_graph_caller) | 291 | ENTRY(ftrace_graph_caller) |
172 | MCOUNT_SAVE_FRAME | 292 | /* Saves rbp into %rdx and fills first parameter */ |
293 | save_mcount_regs | ||
173 | 294 | ||
174 | #ifdef CC_USING_FENTRY | 295 | #ifdef CC_USING_FENTRY |
175 | leaq SS+16(%rsp), %rdi | 296 | leaq MCOUNT_REG_SIZE+8(%rsp), %rsi |
176 | movq $0, %rdx /* No framepointers needed */ | 297 | movq $0, %rdx /* No framepointers needed */ |
177 | #else | 298 | #else |
178 | leaq 8(%rbp), %rdi | 299 | /* Save address of the return address of traced function */ |
179 | movq (%rbp), %rdx | 300 | leaq 8(%rdx), %rsi |
301 | /* ftrace does sanity checks against frame pointers */ | ||
302 | movq (%rdx), %rdx | ||
180 | #endif | 303 | #endif |
181 | movq RIP(%rsp), %rsi | ||
182 | subq $MCOUNT_INSN_SIZE, %rsi | ||
183 | |||
184 | call prepare_ftrace_return | 304 | call prepare_ftrace_return |
185 | 305 | ||
186 | MCOUNT_RESTORE_FRAME | 306 | restore_mcount_regs |
187 | 307 | ||
188 | retq | 308 | retq |
189 | END(ftrace_graph_caller) | 309 | END(ftrace_graph_caller) |
diff --git a/arch/x86/kvm/mmutrace.h b/arch/x86/kvm/mmutrace.h index 5aaf35641768..ce463a9cc8fb 100644 --- a/arch/x86/kvm/mmutrace.h +++ b/arch/x86/kvm/mmutrace.h | |||
@@ -22,7 +22,7 @@ | |||
22 | __entry->unsync = sp->unsync; | 22 | __entry->unsync = sp->unsync; |
23 | 23 | ||
24 | #define KVM_MMU_PAGE_PRINTK() ({ \ | 24 | #define KVM_MMU_PAGE_PRINTK() ({ \ |
25 | const u32 saved_len = p->len; \ | 25 | const char *saved_ptr = trace_seq_buffer_ptr(p); \ |
26 | static const char *access_str[] = { \ | 26 | static const char *access_str[] = { \ |
27 | "---", "--x", "w--", "w-x", "-u-", "-ux", "wu-", "wux" \ | 27 | "---", "--x", "w--", "w-x", "-u-", "-ux", "wu-", "wux" \ |
28 | }; \ | 28 | }; \ |
@@ -41,7 +41,7 @@ | |||
41 | role.nxe ? "" : "!", \ | 41 | role.nxe ? "" : "!", \ |
42 | __entry->root_count, \ | 42 | __entry->root_count, \ |
43 | __entry->unsync ? "unsync" : "sync", 0); \ | 43 | __entry->unsync ? "unsync" : "sync", 0); \ |
44 | p->buffer + saved_len; \ | 44 | saved_ptr; \ |
45 | }) | 45 | }) |
46 | 46 | ||
47 | #define kvm_mmu_trace_pferr_flags \ | 47 | #define kvm_mmu_trace_pferr_flags \ |
diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c index 5b53d6183b6b..4fd9961d552e 100644 --- a/drivers/firmware/efi/cper.c +++ b/drivers/firmware/efi/cper.c | |||
@@ -294,7 +294,7 @@ void cper_mem_err_pack(const struct cper_sec_mem_err *mem, | |||
294 | const char *cper_mem_err_unpack(struct trace_seq *p, | 294 | const char *cper_mem_err_unpack(struct trace_seq *p, |
295 | struct cper_mem_err_compact *cmem) | 295 | struct cper_mem_err_compact *cmem) |
296 | { | 296 | { |
297 | const char *ret = p->buffer + p->len; | 297 | const char *ret = trace_seq_buffer_ptr(p); |
298 | 298 | ||
299 | if (cper_mem_err_location(cmem, rcd_decode_str)) | 299 | if (cper_mem_err_location(cmem, rcd_decode_str)) |
300 | trace_seq_printf(p, "%s", rcd_decode_str); | 300 | trace_seq_printf(p, "%s", rcd_decode_str); |
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 662697babd48..ed501953f0b2 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h | |||
@@ -61,6 +61,11 @@ ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops); | |||
61 | /* | 61 | /* |
62 | * FTRACE_OPS_FL_* bits denote the state of ftrace_ops struct and are | 62 | * FTRACE_OPS_FL_* bits denote the state of ftrace_ops struct and are |
63 | * set in the flags member. | 63 | * set in the flags member. |
64 | * CONTROL, SAVE_REGS, SAVE_REGS_IF_SUPPORTED, RECURSION_SAFE, STUB and | ||
65 | * IPMODIFY are a kind of attribute flags which can be set only before | ||
66 | * registering the ftrace_ops, and can not be modified while registered. | ||
67 | * Changing those attribute flags after regsitering ftrace_ops will | ||
68 | * cause unexpected results. | ||
64 | * | 69 | * |
65 | * ENABLED - set/unset when ftrace_ops is registered/unregistered | 70 | * ENABLED - set/unset when ftrace_ops is registered/unregistered |
66 | * DYNAMIC - set when ftrace_ops is registered to denote dynamically | 71 | * DYNAMIC - set when ftrace_ops is registered to denote dynamically |
@@ -94,6 +99,17 @@ ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops); | |||
94 | * ADDING - The ops is in the process of being added. | 99 | * ADDING - The ops is in the process of being added. |
95 | * REMOVING - The ops is in the process of being removed. | 100 | * REMOVING - The ops is in the process of being removed. |
96 | * MODIFYING - The ops is in the process of changing its filter functions. | 101 | * MODIFYING - The ops is in the process of changing its filter functions. |
102 | * ALLOC_TRAMP - A dynamic trampoline was allocated by the core code. | ||
103 | * The arch specific code sets this flag when it allocated a | ||
104 | * trampoline. This lets the arch know that it can update the | ||
105 | * trampoline in case the callback function changes. | ||
106 | * The ftrace_ops trampoline can be set by the ftrace users, and | ||
107 | * in such cases the arch must not modify it. Only the arch ftrace | ||
108 | * core code should set this flag. | ||
109 | * IPMODIFY - The ops can modify the IP register. This can only be set with | ||
110 | * SAVE_REGS. If another ops with this flag set is already registered | ||
111 | * for any of the functions that this ops will be registered for, then | ||
112 | * this ops will fail to register or set_filter_ip. | ||
97 | */ | 113 | */ |
98 | enum { | 114 | enum { |
99 | FTRACE_OPS_FL_ENABLED = 1 << 0, | 115 | FTRACE_OPS_FL_ENABLED = 1 << 0, |
@@ -108,6 +124,8 @@ enum { | |||
108 | FTRACE_OPS_FL_ADDING = 1 << 9, | 124 | FTRACE_OPS_FL_ADDING = 1 << 9, |
109 | FTRACE_OPS_FL_REMOVING = 1 << 10, | 125 | FTRACE_OPS_FL_REMOVING = 1 << 10, |
110 | FTRACE_OPS_FL_MODIFYING = 1 << 11, | 126 | FTRACE_OPS_FL_MODIFYING = 1 << 11, |
127 | FTRACE_OPS_FL_ALLOC_TRAMP = 1 << 12, | ||
128 | FTRACE_OPS_FL_IPMODIFY = 1 << 13, | ||
111 | }; | 129 | }; |
112 | 130 | ||
113 | #ifdef CONFIG_DYNAMIC_FTRACE | 131 | #ifdef CONFIG_DYNAMIC_FTRACE |
@@ -142,6 +160,7 @@ struct ftrace_ops { | |||
142 | struct ftrace_ops_hash *func_hash; | 160 | struct ftrace_ops_hash *func_hash; |
143 | struct ftrace_ops_hash old_hash; | 161 | struct ftrace_ops_hash old_hash; |
144 | unsigned long trampoline; | 162 | unsigned long trampoline; |
163 | unsigned long trampoline_size; | ||
145 | #endif | 164 | #endif |
146 | }; | 165 | }; |
147 | 166 | ||
@@ -255,7 +274,9 @@ struct ftrace_func_command { | |||
255 | int ftrace_arch_code_modify_prepare(void); | 274 | int ftrace_arch_code_modify_prepare(void); |
256 | int ftrace_arch_code_modify_post_process(void); | 275 | int ftrace_arch_code_modify_post_process(void); |
257 | 276 | ||
258 | void ftrace_bug(int err, unsigned long ip); | 277 | struct dyn_ftrace; |
278 | |||
279 | void ftrace_bug(int err, struct dyn_ftrace *rec); | ||
259 | 280 | ||
260 | struct seq_file; | 281 | struct seq_file; |
261 | 282 | ||
@@ -287,6 +308,8 @@ extern int ftrace_text_reserved(const void *start, const void *end); | |||
287 | 308 | ||
288 | extern int ftrace_nr_registered_ops(void); | 309 | extern int ftrace_nr_registered_ops(void); |
289 | 310 | ||
311 | bool is_ftrace_trampoline(unsigned long addr); | ||
312 | |||
290 | /* | 313 | /* |
291 | * The dyn_ftrace record's flags field is split into two parts. | 314 | * The dyn_ftrace record's flags field is split into two parts. |
292 | * the first part which is '0-FTRACE_REF_MAX' is a counter of | 315 | * the first part which is '0-FTRACE_REF_MAX' is a counter of |
@@ -297,6 +320,7 @@ extern int ftrace_nr_registered_ops(void); | |||
297 | * ENABLED - the function is being traced | 320 | * ENABLED - the function is being traced |
298 | * REGS - the record wants the function to save regs | 321 | * REGS - the record wants the function to save regs |
299 | * REGS_EN - the function is set up to save regs. | 322 | * REGS_EN - the function is set up to save regs. |
323 | * IPMODIFY - the record allows for the IP address to be changed. | ||
300 | * | 324 | * |
301 | * When a new ftrace_ops is registered and wants a function to save | 325 | * When a new ftrace_ops is registered and wants a function to save |
302 | * pt_regs, the rec->flag REGS is set. When the function has been | 326 | * pt_regs, the rec->flag REGS is set. When the function has been |
@@ -310,10 +334,11 @@ enum { | |||
310 | FTRACE_FL_REGS_EN = (1UL << 29), | 334 | FTRACE_FL_REGS_EN = (1UL << 29), |
311 | FTRACE_FL_TRAMP = (1UL << 28), | 335 | FTRACE_FL_TRAMP = (1UL << 28), |
312 | FTRACE_FL_TRAMP_EN = (1UL << 27), | 336 | FTRACE_FL_TRAMP_EN = (1UL << 27), |
337 | FTRACE_FL_IPMODIFY = (1UL << 26), | ||
313 | }; | 338 | }; |
314 | 339 | ||
315 | #define FTRACE_REF_MAX_SHIFT 27 | 340 | #define FTRACE_REF_MAX_SHIFT 26 |
316 | #define FTRACE_FL_BITS 5 | 341 | #define FTRACE_FL_BITS 6 |
317 | #define FTRACE_FL_MASKED_BITS ((1UL << FTRACE_FL_BITS) - 1) | 342 | #define FTRACE_FL_MASKED_BITS ((1UL << FTRACE_FL_BITS) - 1) |
318 | #define FTRACE_FL_MASK (FTRACE_FL_MASKED_BITS << FTRACE_REF_MAX_SHIFT) | 343 | #define FTRACE_FL_MASK (FTRACE_FL_MASKED_BITS << FTRACE_REF_MAX_SHIFT) |
319 | #define FTRACE_REF_MAX ((1UL << FTRACE_REF_MAX_SHIFT) - 1) | 344 | #define FTRACE_REF_MAX ((1UL << FTRACE_REF_MAX_SHIFT) - 1) |
@@ -586,6 +611,11 @@ static inline ssize_t ftrace_notrace_write(struct file *file, const char __user | |||
586 | size_t cnt, loff_t *ppos) { return -ENODEV; } | 611 | size_t cnt, loff_t *ppos) { return -ENODEV; } |
587 | static inline int | 612 | static inline int |
588 | ftrace_regex_release(struct inode *inode, struct file *file) { return -ENODEV; } | 613 | ftrace_regex_release(struct inode *inode, struct file *file) { return -ENODEV; } |
614 | |||
615 | static inline bool is_ftrace_trampoline(unsigned long addr) | ||
616 | { | ||
617 | return false; | ||
618 | } | ||
589 | #endif /* CONFIG_DYNAMIC_FTRACE */ | 619 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
590 | 620 | ||
591 | /* totally disable ftrace - can not re-enable after this */ | 621 | /* totally disable ftrace - can not re-enable after this */ |
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h index 28672e87e910..0bebb5c348b8 100644 --- a/include/linux/ftrace_event.h +++ b/include/linux/ftrace_event.h | |||
@@ -138,6 +138,17 @@ enum print_line_t { | |||
138 | TRACE_TYPE_NO_CONSUME = 3 /* Handled but ask to not consume */ | 138 | TRACE_TYPE_NO_CONSUME = 3 /* Handled but ask to not consume */ |
139 | }; | 139 | }; |
140 | 140 | ||
141 | /* | ||
142 | * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq | ||
143 | * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function | ||
144 | * simplifies those functions and keeps them in sync. | ||
145 | */ | ||
146 | static inline enum print_line_t trace_handle_return(struct trace_seq *s) | ||
147 | { | ||
148 | return trace_seq_has_overflowed(s) ? | ||
149 | TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED; | ||
150 | } | ||
151 | |||
141 | void tracing_generic_entry_update(struct trace_entry *entry, | 152 | void tracing_generic_entry_update(struct trace_entry *entry, |
142 | unsigned long flags, | 153 | unsigned long flags, |
143 | int pc); | 154 | int pc); |
diff --git a/include/linux/trace_seq.h b/include/linux/trace_seq.h index ea6c9dea79e3..db8a73224f1a 100644 --- a/include/linux/trace_seq.h +++ b/include/linux/trace_seq.h | |||
@@ -40,45 +40,54 @@ trace_seq_buffer_ptr(struct trace_seq *s) | |||
40 | return s->buffer + s->len; | 40 | return s->buffer + s->len; |
41 | } | 41 | } |
42 | 42 | ||
43 | /** | ||
44 | * trace_seq_has_overflowed - return true if the trace_seq took too much | ||
45 | * @s: trace sequence descriptor | ||
46 | * | ||
47 | * Returns true if too much data was added to the trace_seq and it is | ||
48 | * now full and will not take anymore. | ||
49 | */ | ||
50 | static inline bool trace_seq_has_overflowed(struct trace_seq *s) | ||
51 | { | ||
52 | return s->full || s->len > PAGE_SIZE - 1; | ||
53 | } | ||
54 | |||
43 | /* | 55 | /* |
44 | * Currently only defined when tracing is enabled. | 56 | * Currently only defined when tracing is enabled. |
45 | */ | 57 | */ |
46 | #ifdef CONFIG_TRACING | 58 | #ifdef CONFIG_TRACING |
47 | extern __printf(2, 3) | 59 | extern __printf(2, 3) |
48 | int trace_seq_printf(struct trace_seq *s, const char *fmt, ...); | 60 | void trace_seq_printf(struct trace_seq *s, const char *fmt, ...); |
49 | extern __printf(2, 0) | 61 | extern __printf(2, 0) |
50 | int trace_seq_vprintf(struct trace_seq *s, const char *fmt, va_list args); | 62 | void trace_seq_vprintf(struct trace_seq *s, const char *fmt, va_list args); |
51 | extern int | 63 | extern void |
52 | trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary); | 64 | trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary); |
53 | extern int trace_print_seq(struct seq_file *m, struct trace_seq *s); | 65 | extern int trace_print_seq(struct seq_file *m, struct trace_seq *s); |
54 | extern int trace_seq_to_user(struct trace_seq *s, char __user *ubuf, | 66 | extern int trace_seq_to_user(struct trace_seq *s, char __user *ubuf, |
55 | int cnt); | 67 | int cnt); |
56 | extern int trace_seq_puts(struct trace_seq *s, const char *str); | 68 | extern void trace_seq_puts(struct trace_seq *s, const char *str); |
57 | extern int trace_seq_putc(struct trace_seq *s, unsigned char c); | 69 | extern void trace_seq_putc(struct trace_seq *s, unsigned char c); |
58 | extern int trace_seq_putmem(struct trace_seq *s, const void *mem, unsigned int len); | 70 | extern void trace_seq_putmem(struct trace_seq *s, const void *mem, unsigned int len); |
59 | extern int trace_seq_putmem_hex(struct trace_seq *s, const void *mem, | 71 | extern void trace_seq_putmem_hex(struct trace_seq *s, const void *mem, |
60 | unsigned int len); | 72 | unsigned int len); |
61 | extern int trace_seq_path(struct trace_seq *s, const struct path *path); | 73 | extern int trace_seq_path(struct trace_seq *s, const struct path *path); |
62 | 74 | ||
63 | extern int trace_seq_bitmask(struct trace_seq *s, const unsigned long *maskp, | 75 | extern void trace_seq_bitmask(struct trace_seq *s, const unsigned long *maskp, |
64 | int nmaskbits); | 76 | int nmaskbits); |
65 | 77 | ||
66 | #else /* CONFIG_TRACING */ | 78 | #else /* CONFIG_TRACING */ |
67 | static inline int trace_seq_printf(struct trace_seq *s, const char *fmt, ...) | 79 | static inline void trace_seq_printf(struct trace_seq *s, const char *fmt, ...) |
68 | { | 80 | { |
69 | return 0; | ||
70 | } | 81 | } |
71 | static inline int | 82 | static inline void |
72 | trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary) | 83 | trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary) |
73 | { | 84 | { |
74 | return 0; | ||
75 | } | 85 | } |
76 | 86 | ||
77 | static inline int | 87 | static inline void |
78 | trace_seq_bitmask(struct trace_seq *s, const unsigned long *maskp, | 88 | trace_seq_bitmask(struct trace_seq *s, const unsigned long *maskp, |
79 | int nmaskbits) | 89 | int nmaskbits) |
80 | { | 90 | { |
81 | return 0; | ||
82 | } | 91 | } |
83 | 92 | ||
84 | static inline int trace_print_seq(struct seq_file *m, struct trace_seq *s) | 93 | static inline int trace_print_seq(struct seq_file *m, struct trace_seq *s) |
@@ -90,23 +99,19 @@ static inline int trace_seq_to_user(struct trace_seq *s, char __user *ubuf, | |||
90 | { | 99 | { |
91 | return 0; | 100 | return 0; |
92 | } | 101 | } |
93 | static inline int trace_seq_puts(struct trace_seq *s, const char *str) | 102 | static inline void trace_seq_puts(struct trace_seq *s, const char *str) |
94 | { | 103 | { |
95 | return 0; | ||
96 | } | 104 | } |
97 | static inline int trace_seq_putc(struct trace_seq *s, unsigned char c) | 105 | static inline void trace_seq_putc(struct trace_seq *s, unsigned char c) |
98 | { | 106 | { |
99 | return 0; | ||
100 | } | 107 | } |
101 | static inline int | 108 | static inline void |
102 | trace_seq_putmem(struct trace_seq *s, const void *mem, unsigned int len) | 109 | trace_seq_putmem(struct trace_seq *s, const void *mem, unsigned int len) |
103 | { | 110 | { |
104 | return 0; | ||
105 | } | 111 | } |
106 | static inline int trace_seq_putmem_hex(struct trace_seq *s, const void *mem, | 112 | static inline void trace_seq_putmem_hex(struct trace_seq *s, const void *mem, |
107 | unsigned int len) | 113 | unsigned int len) |
108 | { | 114 | { |
109 | return 0; | ||
110 | } | 115 | } |
111 | static inline int trace_seq_path(struct trace_seq *s, const struct path *path) | 116 | static inline int trace_seq_path(struct trace_seq *s, const struct path *path) |
112 | { | 117 | { |
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h index 26b4f2e13275..139b5067345b 100644 --- a/include/trace/ftrace.h +++ b/include/trace/ftrace.h | |||
@@ -277,14 +277,12 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \ | |||
277 | field = (typeof(field))iter->ent; \ | 277 | field = (typeof(field))iter->ent; \ |
278 | \ | 278 | \ |
279 | ret = ftrace_raw_output_prep(iter, trace_event); \ | 279 | ret = ftrace_raw_output_prep(iter, trace_event); \ |
280 | if (ret) \ | 280 | if (ret != TRACE_TYPE_HANDLED) \ |
281 | return ret; \ | 281 | return ret; \ |
282 | \ | 282 | \ |
283 | ret = trace_seq_printf(s, print); \ | 283 | trace_seq_printf(s, print); \ |
284 | if (!ret) \ | ||
285 | return TRACE_TYPE_PARTIAL_LINE; \ | ||
286 | \ | 284 | \ |
287 | return TRACE_TYPE_HANDLED; \ | 285 | return trace_handle_return(s); \ |
288 | } \ | 286 | } \ |
289 | static struct trace_event_functions ftrace_event_type_funcs_##call = { \ | 287 | static struct trace_event_functions ftrace_event_type_funcs_##call = { \ |
290 | .trace = ftrace_raw_output_##call, \ | 288 | .trace = ftrace_raw_output_##call, \ |
diff --git a/kernel/extable.c b/kernel/extable.c index d8a6446adbcb..c98f926277a8 100644 --- a/kernel/extable.c +++ b/kernel/extable.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <linux/ftrace.h> | 18 | #include <linux/ftrace.h> |
19 | #include <linux/memory.h> | 19 | #include <linux/memory.h> |
20 | #include <linux/module.h> | 20 | #include <linux/module.h> |
21 | #include <linux/ftrace.h> | ||
21 | #include <linux/mutex.h> | 22 | #include <linux/mutex.h> |
22 | #include <linux/init.h> | 23 | #include <linux/init.h> |
23 | 24 | ||
@@ -102,6 +103,8 @@ int __kernel_text_address(unsigned long addr) | |||
102 | return 1; | 103 | return 1; |
103 | if (is_module_text_address(addr)) | 104 | if (is_module_text_address(addr)) |
104 | return 1; | 105 | return 1; |
106 | if (is_ftrace_trampoline(addr)) | ||
107 | return 1; | ||
105 | /* | 108 | /* |
106 | * There might be init symbols in saved stacktraces. | 109 | * There might be init symbols in saved stacktraces. |
107 | * Give those symbols a chance to be printed in | 110 | * Give those symbols a chance to be printed in |
@@ -119,7 +122,9 @@ int kernel_text_address(unsigned long addr) | |||
119 | { | 122 | { |
120 | if (core_kernel_text(addr)) | 123 | if (core_kernel_text(addr)) |
121 | return 1; | 124 | return 1; |
122 | return is_module_text_address(addr); | 125 | if (is_module_text_address(addr)) |
126 | return 1; | ||
127 | return is_ftrace_trampoline(addr); | ||
123 | } | 128 | } |
124 | 129 | ||
125 | /* | 130 | /* |
diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 3995f546d0f3..831978cebf1d 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c | |||
@@ -915,7 +915,7 @@ static struct kprobe *alloc_aggr_kprobe(struct kprobe *p) | |||
915 | #ifdef CONFIG_KPROBES_ON_FTRACE | 915 | #ifdef CONFIG_KPROBES_ON_FTRACE |
916 | static struct ftrace_ops kprobe_ftrace_ops __read_mostly = { | 916 | static struct ftrace_ops kprobe_ftrace_ops __read_mostly = { |
917 | .func = kprobe_ftrace_handler, | 917 | .func = kprobe_ftrace_handler, |
918 | .flags = FTRACE_OPS_FL_SAVE_REGS, | 918 | .flags = FTRACE_OPS_FL_SAVE_REGS | FTRACE_OPS_FL_IPMODIFY, |
919 | }; | 919 | }; |
920 | static int kprobe_ftrace_enabled; | 920 | static int kprobe_ftrace_enabled; |
921 | 921 | ||
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index c1bd4ada2a04..11b9cb36092b 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c | |||
@@ -1142,9 +1142,9 @@ static void get_pdu_remap(const struct trace_entry *ent, | |||
1142 | r->sector_from = be64_to_cpu(sector_from); | 1142 | r->sector_from = be64_to_cpu(sector_from); |
1143 | } | 1143 | } |
1144 | 1144 | ||
1145 | typedef int (blk_log_action_t) (struct trace_iterator *iter, const char *act); | 1145 | typedef void (blk_log_action_t) (struct trace_iterator *iter, const char *act); |
1146 | 1146 | ||
1147 | static int blk_log_action_classic(struct trace_iterator *iter, const char *act) | 1147 | static void blk_log_action_classic(struct trace_iterator *iter, const char *act) |
1148 | { | 1148 | { |
1149 | char rwbs[RWBS_LEN]; | 1149 | char rwbs[RWBS_LEN]; |
1150 | unsigned long long ts = iter->ts; | 1150 | unsigned long long ts = iter->ts; |
@@ -1154,33 +1154,33 @@ static int blk_log_action_classic(struct trace_iterator *iter, const char *act) | |||
1154 | 1154 | ||
1155 | fill_rwbs(rwbs, t); | 1155 | fill_rwbs(rwbs, t); |
1156 | 1156 | ||
1157 | return trace_seq_printf(&iter->seq, | 1157 | trace_seq_printf(&iter->seq, |
1158 | "%3d,%-3d %2d %5d.%09lu %5u %2s %3s ", | 1158 | "%3d,%-3d %2d %5d.%09lu %5u %2s %3s ", |
1159 | MAJOR(t->device), MINOR(t->device), iter->cpu, | 1159 | MAJOR(t->device), MINOR(t->device), iter->cpu, |
1160 | secs, nsec_rem, iter->ent->pid, act, rwbs); | 1160 | secs, nsec_rem, iter->ent->pid, act, rwbs); |
1161 | } | 1161 | } |
1162 | 1162 | ||
1163 | static int blk_log_action(struct trace_iterator *iter, const char *act) | 1163 | static void blk_log_action(struct trace_iterator *iter, const char *act) |
1164 | { | 1164 | { |
1165 | char rwbs[RWBS_LEN]; | 1165 | char rwbs[RWBS_LEN]; |
1166 | const struct blk_io_trace *t = te_blk_io_trace(iter->ent); | 1166 | const struct blk_io_trace *t = te_blk_io_trace(iter->ent); |
1167 | 1167 | ||
1168 | fill_rwbs(rwbs, t); | 1168 | fill_rwbs(rwbs, t); |
1169 | return trace_seq_printf(&iter->seq, "%3d,%-3d %2s %3s ", | 1169 | trace_seq_printf(&iter->seq, "%3d,%-3d %2s %3s ", |
1170 | MAJOR(t->device), MINOR(t->device), act, rwbs); | 1170 | MAJOR(t->device), MINOR(t->device), act, rwbs); |
1171 | } | 1171 | } |
1172 | 1172 | ||
1173 | static int blk_log_dump_pdu(struct trace_seq *s, const struct trace_entry *ent) | 1173 | static void blk_log_dump_pdu(struct trace_seq *s, const struct trace_entry *ent) |
1174 | { | 1174 | { |
1175 | const unsigned char *pdu_buf; | 1175 | const unsigned char *pdu_buf; |
1176 | int pdu_len; | 1176 | int pdu_len; |
1177 | int i, end, ret; | 1177 | int i, end; |
1178 | 1178 | ||
1179 | pdu_buf = pdu_start(ent); | 1179 | pdu_buf = pdu_start(ent); |
1180 | pdu_len = te_blk_io_trace(ent)->pdu_len; | 1180 | pdu_len = te_blk_io_trace(ent)->pdu_len; |
1181 | 1181 | ||
1182 | if (!pdu_len) | 1182 | if (!pdu_len) |
1183 | return 1; | 1183 | return; |
1184 | 1184 | ||
1185 | /* find the last zero that needs to be printed */ | 1185 | /* find the last zero that needs to be printed */ |
1186 | for (end = pdu_len - 1; end >= 0; end--) | 1186 | for (end = pdu_len - 1; end >= 0; end--) |
@@ -1188,119 +1188,107 @@ static int blk_log_dump_pdu(struct trace_seq *s, const struct trace_entry *ent) | |||
1188 | break; | 1188 | break; |
1189 | end++; | 1189 | end++; |
1190 | 1190 | ||
1191 | if (!trace_seq_putc(s, '(')) | 1191 | trace_seq_putc(s, '('); |
1192 | return 0; | ||
1193 | 1192 | ||
1194 | for (i = 0; i < pdu_len; i++) { | 1193 | for (i = 0; i < pdu_len; i++) { |
1195 | 1194 | ||
1196 | ret = trace_seq_printf(s, "%s%02x", | 1195 | trace_seq_printf(s, "%s%02x", |
1197 | i == 0 ? "" : " ", pdu_buf[i]); | 1196 | i == 0 ? "" : " ", pdu_buf[i]); |
1198 | if (!ret) | ||
1199 | return ret; | ||
1200 | 1197 | ||
1201 | /* | 1198 | /* |
1202 | * stop when the rest is just zeroes and indicate so | 1199 | * stop when the rest is just zeroes and indicate so |
1203 | * with a ".." appended | 1200 | * with a ".." appended |
1204 | */ | 1201 | */ |
1205 | if (i == end && end != pdu_len - 1) | 1202 | if (i == end && end != pdu_len - 1) { |
1206 | return trace_seq_puts(s, " ..) "); | 1203 | trace_seq_puts(s, " ..) "); |
1204 | return; | ||
1205 | } | ||
1207 | } | 1206 | } |
1208 | 1207 | ||
1209 | return trace_seq_puts(s, ") "); | 1208 | trace_seq_puts(s, ") "); |
1210 | } | 1209 | } |
1211 | 1210 | ||
1212 | static int blk_log_generic(struct trace_seq *s, const struct trace_entry *ent) | 1211 | static void blk_log_generic(struct trace_seq *s, const struct trace_entry *ent) |
1213 | { | 1212 | { |
1214 | char cmd[TASK_COMM_LEN]; | 1213 | char cmd[TASK_COMM_LEN]; |
1215 | 1214 | ||
1216 | trace_find_cmdline(ent->pid, cmd); | 1215 | trace_find_cmdline(ent->pid, cmd); |
1217 | 1216 | ||
1218 | if (t_action(ent) & BLK_TC_ACT(BLK_TC_PC)) { | 1217 | if (t_action(ent) & BLK_TC_ACT(BLK_TC_PC)) { |
1219 | int ret; | 1218 | trace_seq_printf(s, "%u ", t_bytes(ent)); |
1220 | 1219 | blk_log_dump_pdu(s, ent); | |
1221 | ret = trace_seq_printf(s, "%u ", t_bytes(ent)); | 1220 | trace_seq_printf(s, "[%s]\n", cmd); |
1222 | if (!ret) | ||
1223 | return 0; | ||
1224 | ret = blk_log_dump_pdu(s, ent); | ||
1225 | if (!ret) | ||
1226 | return 0; | ||
1227 | return trace_seq_printf(s, "[%s]\n", cmd); | ||
1228 | } else { | 1221 | } else { |
1229 | if (t_sec(ent)) | 1222 | if (t_sec(ent)) |
1230 | return trace_seq_printf(s, "%llu + %u [%s]\n", | 1223 | trace_seq_printf(s, "%llu + %u [%s]\n", |
1231 | t_sector(ent), t_sec(ent), cmd); | 1224 | t_sector(ent), t_sec(ent), cmd); |
1232 | return trace_seq_printf(s, "[%s]\n", cmd); | 1225 | else |
1226 | trace_seq_printf(s, "[%s]\n", cmd); | ||
1233 | } | 1227 | } |
1234 | } | 1228 | } |
1235 | 1229 | ||
1236 | static int blk_log_with_error(struct trace_seq *s, | 1230 | static void blk_log_with_error(struct trace_seq *s, |
1237 | const struct trace_entry *ent) | 1231 | const struct trace_entry *ent) |
1238 | { | 1232 | { |
1239 | if (t_action(ent) & BLK_TC_ACT(BLK_TC_PC)) { | 1233 | if (t_action(ent) & BLK_TC_ACT(BLK_TC_PC)) { |
1240 | int ret; | 1234 | blk_log_dump_pdu(s, ent); |
1241 | 1235 | trace_seq_printf(s, "[%d]\n", t_error(ent)); | |
1242 | ret = blk_log_dump_pdu(s, ent); | ||
1243 | if (ret) | ||
1244 | return trace_seq_printf(s, "[%d]\n", t_error(ent)); | ||
1245 | return 0; | ||
1246 | } else { | 1236 | } else { |
1247 | if (t_sec(ent)) | 1237 | if (t_sec(ent)) |
1248 | return trace_seq_printf(s, "%llu + %u [%d]\n", | 1238 | trace_seq_printf(s, "%llu + %u [%d]\n", |
1249 | t_sector(ent), | 1239 | t_sector(ent), |
1250 | t_sec(ent), t_error(ent)); | 1240 | t_sec(ent), t_error(ent)); |
1251 | return trace_seq_printf(s, "%llu [%d]\n", | 1241 | else |
1252 | t_sector(ent), t_error(ent)); | 1242 | trace_seq_printf(s, "%llu [%d]\n", |
1243 | t_sector(ent), t_error(ent)); | ||
1253 | } | 1244 | } |
1254 | } | 1245 | } |
1255 | 1246 | ||
1256 | static int blk_log_remap(struct trace_seq *s, const struct trace_entry *ent) | 1247 | static void blk_log_remap(struct trace_seq *s, const struct trace_entry *ent) |
1257 | { | 1248 | { |
1258 | struct blk_io_trace_remap r = { .device_from = 0, }; | 1249 | struct blk_io_trace_remap r = { .device_from = 0, }; |
1259 | 1250 | ||
1260 | get_pdu_remap(ent, &r); | 1251 | get_pdu_remap(ent, &r); |
1261 | return trace_seq_printf(s, "%llu + %u <- (%d,%d) %llu\n", | 1252 | trace_seq_printf(s, "%llu + %u <- (%d,%d) %llu\n", |
1262 | t_sector(ent), t_sec(ent), | 1253 | t_sector(ent), t_sec(ent), |
1263 | MAJOR(r.device_from), MINOR(r.device_from), | 1254 | MAJOR(r.device_from), MINOR(r.device_from), |
1264 | (unsigned long long)r.sector_from); | 1255 | (unsigned long long)r.sector_from); |
1265 | } | 1256 | } |
1266 | 1257 | ||
1267 | static int blk_log_plug(struct trace_seq *s, const struct trace_entry *ent) | 1258 | static void blk_log_plug(struct trace_seq *s, const struct trace_entry *ent) |
1268 | { | 1259 | { |
1269 | char cmd[TASK_COMM_LEN]; | 1260 | char cmd[TASK_COMM_LEN]; |
1270 | 1261 | ||
1271 | trace_find_cmdline(ent->pid, cmd); | 1262 | trace_find_cmdline(ent->pid, cmd); |
1272 | 1263 | ||
1273 | return trace_seq_printf(s, "[%s]\n", cmd); | 1264 | trace_seq_printf(s, "[%s]\n", cmd); |
1274 | } | 1265 | } |
1275 | 1266 | ||
1276 | static int blk_log_unplug(struct trace_seq *s, const struct trace_entry *ent) | 1267 | static void blk_log_unplug(struct trace_seq *s, const struct trace_entry *ent) |
1277 | { | 1268 | { |
1278 | char cmd[TASK_COMM_LEN]; | 1269 | char cmd[TASK_COMM_LEN]; |
1279 | 1270 | ||
1280 | trace_find_cmdline(ent->pid, cmd); | 1271 | trace_find_cmdline(ent->pid, cmd); |
1281 | 1272 | ||
1282 | return trace_seq_printf(s, "[%s] %llu\n", cmd, get_pdu_int(ent)); | 1273 | trace_seq_printf(s, "[%s] %llu\n", cmd, get_pdu_int(ent)); |
1283 | } | 1274 | } |
1284 | 1275 | ||
1285 | static int blk_log_split(struct trace_seq *s, const struct trace_entry *ent) | 1276 | static void blk_log_split(struct trace_seq *s, const struct trace_entry *ent) |
1286 | { | 1277 | { |
1287 | char cmd[TASK_COMM_LEN]; | 1278 | char cmd[TASK_COMM_LEN]; |
1288 | 1279 | ||
1289 | trace_find_cmdline(ent->pid, cmd); | 1280 | trace_find_cmdline(ent->pid, cmd); |
1290 | 1281 | ||
1291 | return trace_seq_printf(s, "%llu / %llu [%s]\n", t_sector(ent), | 1282 | trace_seq_printf(s, "%llu / %llu [%s]\n", t_sector(ent), |
1292 | get_pdu_int(ent), cmd); | 1283 | get_pdu_int(ent), cmd); |
1293 | } | 1284 | } |
1294 | 1285 | ||
1295 | static int blk_log_msg(struct trace_seq *s, const struct trace_entry *ent) | 1286 | static void blk_log_msg(struct trace_seq *s, const struct trace_entry *ent) |
1296 | { | 1287 | { |
1297 | int ret; | ||
1298 | const struct blk_io_trace *t = te_blk_io_trace(ent); | 1288 | const struct blk_io_trace *t = te_blk_io_trace(ent); |
1299 | 1289 | ||
1300 | ret = trace_seq_putmem(s, t + 1, t->pdu_len); | 1290 | trace_seq_putmem(s, t + 1, t->pdu_len); |
1301 | if (ret) | 1291 | trace_seq_putc(s, '\n'); |
1302 | return trace_seq_putc(s, '\n'); | ||
1303 | return ret; | ||
1304 | } | 1292 | } |
1305 | 1293 | ||
1306 | /* | 1294 | /* |
@@ -1339,7 +1327,7 @@ static void blk_tracer_reset(struct trace_array *tr) | |||
1339 | 1327 | ||
1340 | static const struct { | 1328 | static const struct { |
1341 | const char *act[2]; | 1329 | const char *act[2]; |
1342 | int (*print)(struct trace_seq *s, const struct trace_entry *ent); | 1330 | void (*print)(struct trace_seq *s, const struct trace_entry *ent); |
1343 | } what2act[] = { | 1331 | } what2act[] = { |
1344 | [__BLK_TA_QUEUE] = {{ "Q", "queue" }, blk_log_generic }, | 1332 | [__BLK_TA_QUEUE] = {{ "Q", "queue" }, blk_log_generic }, |
1345 | [__BLK_TA_BACKMERGE] = {{ "M", "backmerge" }, blk_log_generic }, | 1333 | [__BLK_TA_BACKMERGE] = {{ "M", "backmerge" }, blk_log_generic }, |
@@ -1364,7 +1352,6 @@ static enum print_line_t print_one_line(struct trace_iterator *iter, | |||
1364 | struct trace_seq *s = &iter->seq; | 1352 | struct trace_seq *s = &iter->seq; |
1365 | const struct blk_io_trace *t; | 1353 | const struct blk_io_trace *t; |
1366 | u16 what; | 1354 | u16 what; |
1367 | int ret; | ||
1368 | bool long_act; | 1355 | bool long_act; |
1369 | blk_log_action_t *log_action; | 1356 | blk_log_action_t *log_action; |
1370 | 1357 | ||
@@ -1374,21 +1361,18 @@ static enum print_line_t print_one_line(struct trace_iterator *iter, | |||
1374 | log_action = classic ? &blk_log_action_classic : &blk_log_action; | 1361 | log_action = classic ? &blk_log_action_classic : &blk_log_action; |
1375 | 1362 | ||
1376 | if (t->action == BLK_TN_MESSAGE) { | 1363 | if (t->action == BLK_TN_MESSAGE) { |
1377 | ret = log_action(iter, long_act ? "message" : "m"); | 1364 | log_action(iter, long_act ? "message" : "m"); |
1378 | if (ret) | 1365 | blk_log_msg(s, iter->ent); |
1379 | ret = blk_log_msg(s, iter->ent); | ||
1380 | goto out; | ||
1381 | } | 1366 | } |
1382 | 1367 | ||
1383 | if (unlikely(what == 0 || what >= ARRAY_SIZE(what2act))) | 1368 | if (unlikely(what == 0 || what >= ARRAY_SIZE(what2act))) |
1384 | ret = trace_seq_printf(s, "Unknown action %x\n", what); | 1369 | trace_seq_printf(s, "Unknown action %x\n", what); |
1385 | else { | 1370 | else { |
1386 | ret = log_action(iter, what2act[what].act[long_act]); | 1371 | log_action(iter, what2act[what].act[long_act]); |
1387 | if (ret) | 1372 | what2act[what].print(s, iter->ent); |
1388 | ret = what2act[what].print(s, iter->ent); | ||
1389 | } | 1373 | } |
1390 | out: | 1374 | |
1391 | return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE; | 1375 | return trace_handle_return(s); |
1392 | } | 1376 | } |
1393 | 1377 | ||
1394 | static enum print_line_t blk_trace_event_print(struct trace_iterator *iter, | 1378 | static enum print_line_t blk_trace_event_print(struct trace_iterator *iter, |
@@ -1397,7 +1381,7 @@ static enum print_line_t blk_trace_event_print(struct trace_iterator *iter, | |||
1397 | return print_one_line(iter, false); | 1381 | return print_one_line(iter, false); |
1398 | } | 1382 | } |
1399 | 1383 | ||
1400 | static int blk_trace_synthesize_old_trace(struct trace_iterator *iter) | 1384 | static void blk_trace_synthesize_old_trace(struct trace_iterator *iter) |
1401 | { | 1385 | { |
1402 | struct trace_seq *s = &iter->seq; | 1386 | struct trace_seq *s = &iter->seq; |
1403 | struct blk_io_trace *t = (struct blk_io_trace *)iter->ent; | 1387 | struct blk_io_trace *t = (struct blk_io_trace *)iter->ent; |
@@ -1407,18 +1391,18 @@ static int blk_trace_synthesize_old_trace(struct trace_iterator *iter) | |||
1407 | .time = iter->ts, | 1391 | .time = iter->ts, |
1408 | }; | 1392 | }; |
1409 | 1393 | ||
1410 | if (!trace_seq_putmem(s, &old, offset)) | 1394 | trace_seq_putmem(s, &old, offset); |
1411 | return 0; | 1395 | trace_seq_putmem(s, &t->sector, |
1412 | return trace_seq_putmem(s, &t->sector, | 1396 | sizeof(old) - offset + t->pdu_len); |
1413 | sizeof(old) - offset + t->pdu_len); | ||
1414 | } | 1397 | } |
1415 | 1398 | ||
1416 | static enum print_line_t | 1399 | static enum print_line_t |
1417 | blk_trace_event_print_binary(struct trace_iterator *iter, int flags, | 1400 | blk_trace_event_print_binary(struct trace_iterator *iter, int flags, |
1418 | struct trace_event *event) | 1401 | struct trace_event *event) |
1419 | { | 1402 | { |
1420 | return blk_trace_synthesize_old_trace(iter) ? | 1403 | blk_trace_synthesize_old_trace(iter); |
1421 | TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE; | 1404 | |
1405 | return trace_handle_return(&iter->seq); | ||
1422 | } | 1406 | } |
1423 | 1407 | ||
1424 | static enum print_line_t blk_tracer_print_line(struct trace_iterator *iter) | 1408 | static enum print_line_t blk_tracer_print_line(struct trace_iterator *iter) |
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 31c90fec4158..929a733d302e 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -387,6 +387,8 @@ static int remove_ftrace_list_ops(struct ftrace_ops **list, | |||
387 | return ret; | 387 | return ret; |
388 | } | 388 | } |
389 | 389 | ||
390 | static void ftrace_update_trampoline(struct ftrace_ops *ops); | ||
391 | |||
390 | static int __register_ftrace_function(struct ftrace_ops *ops) | 392 | static int __register_ftrace_function(struct ftrace_ops *ops) |
391 | { | 393 | { |
392 | if (ops->flags & FTRACE_OPS_FL_DELETED) | 394 | if (ops->flags & FTRACE_OPS_FL_DELETED) |
@@ -416,9 +418,13 @@ static int __register_ftrace_function(struct ftrace_ops *ops) | |||
416 | if (control_ops_alloc(ops)) | 418 | if (control_ops_alloc(ops)) |
417 | return -ENOMEM; | 419 | return -ENOMEM; |
418 | add_ftrace_list_ops(&ftrace_control_list, &control_ops, ops); | 420 | add_ftrace_list_ops(&ftrace_control_list, &control_ops, ops); |
421 | /* The control_ops needs the trampoline update */ | ||
422 | ops = &control_ops; | ||
419 | } else | 423 | } else |
420 | add_ftrace_ops(&ftrace_ops_list, ops); | 424 | add_ftrace_ops(&ftrace_ops_list, ops); |
421 | 425 | ||
426 | ftrace_update_trampoline(ops); | ||
427 | |||
422 | if (ftrace_enabled) | 428 | if (ftrace_enabled) |
423 | update_ftrace_function(); | 429 | update_ftrace_function(); |
424 | 430 | ||
@@ -565,13 +571,13 @@ static int function_stat_cmp(void *p1, void *p2) | |||
565 | static int function_stat_headers(struct seq_file *m) | 571 | static int function_stat_headers(struct seq_file *m) |
566 | { | 572 | { |
567 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 573 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
568 | seq_printf(m, " Function " | 574 | seq_puts(m, " Function " |
569 | "Hit Time Avg s^2\n" | 575 | "Hit Time Avg s^2\n" |
570 | " -------- " | 576 | " -------- " |
571 | "--- ---- --- ---\n"); | 577 | "--- ---- --- ---\n"); |
572 | #else | 578 | #else |
573 | seq_printf(m, " Function Hit\n" | 579 | seq_puts(m, " Function Hit\n" |
574 | " -------- ---\n"); | 580 | " -------- ---\n"); |
575 | #endif | 581 | #endif |
576 | return 0; | 582 | return 0; |
577 | } | 583 | } |
@@ -598,7 +604,7 @@ static int function_stat_show(struct seq_file *m, void *v) | |||
598 | seq_printf(m, " %-30.30s %10lu", str, rec->counter); | 604 | seq_printf(m, " %-30.30s %10lu", str, rec->counter); |
599 | 605 | ||
600 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 606 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
601 | seq_printf(m, " "); | 607 | seq_puts(m, " "); |
602 | avg = rec->time; | 608 | avg = rec->time; |
603 | do_div(avg, rec->counter); | 609 | do_div(avg, rec->counter); |
604 | 610 | ||
@@ -1111,6 +1117,43 @@ static struct ftrace_ops global_ops = { | |||
1111 | FTRACE_OPS_FL_INITIALIZED, | 1117 | FTRACE_OPS_FL_INITIALIZED, |
1112 | }; | 1118 | }; |
1113 | 1119 | ||
1120 | /* | ||
1121 | * This is used by __kernel_text_address() to return true if the | ||
1122 | * address is on a dynamically allocated trampoline that would | ||
1123 | * not return true for either core_kernel_text() or | ||
1124 | * is_module_text_address(). | ||
1125 | */ | ||
1126 | bool is_ftrace_trampoline(unsigned long addr) | ||
1127 | { | ||
1128 | struct ftrace_ops *op; | ||
1129 | bool ret = false; | ||
1130 | |||
1131 | /* | ||
1132 | * Some of the ops may be dynamically allocated, | ||
1133 | * they are freed after a synchronize_sched(). | ||
1134 | */ | ||
1135 | preempt_disable_notrace(); | ||
1136 | |||
1137 | do_for_each_ftrace_op(op, ftrace_ops_list) { | ||
1138 | /* | ||
1139 | * This is to check for dynamically allocated trampolines. | ||
1140 | * Trampolines that are in kernel text will have | ||
1141 | * core_kernel_text() return true. | ||
1142 | */ | ||
1143 | if (op->trampoline && op->trampoline_size) | ||
1144 | if (addr >= op->trampoline && | ||
1145 | addr < op->trampoline + op->trampoline_size) { | ||
1146 | ret = true; | ||
1147 | goto out; | ||
1148 | } | ||
1149 | } while_for_each_ftrace_op(op); | ||
1150 | |||
1151 | out: | ||
1152 | preempt_enable_notrace(); | ||
1153 | |||
1154 | return ret; | ||
1155 | } | ||
1156 | |||
1114 | struct ftrace_page { | 1157 | struct ftrace_page { |
1115 | struct ftrace_page *next; | 1158 | struct ftrace_page *next; |
1116 | struct dyn_ftrace *records; | 1159 | struct dyn_ftrace *records; |
@@ -1315,6 +1358,9 @@ ftrace_hash_rec_disable_modify(struct ftrace_ops *ops, int filter_hash); | |||
1315 | static void | 1358 | static void |
1316 | ftrace_hash_rec_enable_modify(struct ftrace_ops *ops, int filter_hash); | 1359 | ftrace_hash_rec_enable_modify(struct ftrace_ops *ops, int filter_hash); |
1317 | 1360 | ||
1361 | static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops, | ||
1362 | struct ftrace_hash *new_hash); | ||
1363 | |||
1318 | static int | 1364 | static int |
1319 | ftrace_hash_move(struct ftrace_ops *ops, int enable, | 1365 | ftrace_hash_move(struct ftrace_ops *ops, int enable, |
1320 | struct ftrace_hash **dst, struct ftrace_hash *src) | 1366 | struct ftrace_hash **dst, struct ftrace_hash *src) |
@@ -1325,8 +1371,13 @@ ftrace_hash_move(struct ftrace_ops *ops, int enable, | |||
1325 | struct ftrace_hash *new_hash; | 1371 | struct ftrace_hash *new_hash; |
1326 | int size = src->count; | 1372 | int size = src->count; |
1327 | int bits = 0; | 1373 | int bits = 0; |
1374 | int ret; | ||
1328 | int i; | 1375 | int i; |
1329 | 1376 | ||
1377 | /* Reject setting notrace hash on IPMODIFY ftrace_ops */ | ||
1378 | if (ops->flags & FTRACE_OPS_FL_IPMODIFY && !enable) | ||
1379 | return -EINVAL; | ||
1380 | |||
1330 | /* | 1381 | /* |
1331 | * If the new source is empty, just free dst and assign it | 1382 | * If the new source is empty, just free dst and assign it |
1332 | * the empty_hash. | 1383 | * the empty_hash. |
@@ -1360,6 +1411,16 @@ ftrace_hash_move(struct ftrace_ops *ops, int enable, | |||
1360 | } | 1411 | } |
1361 | 1412 | ||
1362 | update: | 1413 | update: |
1414 | /* Make sure this can be applied if it is IPMODIFY ftrace_ops */ | ||
1415 | if (enable) { | ||
1416 | /* IPMODIFY should be updated only when filter_hash updating */ | ||
1417 | ret = ftrace_hash_ipmodify_update(ops, new_hash); | ||
1418 | if (ret < 0) { | ||
1419 | free_ftrace_hash(new_hash); | ||
1420 | return ret; | ||
1421 | } | ||
1422 | } | ||
1423 | |||
1363 | /* | 1424 | /* |
1364 | * Remove the current set, update the hash and add | 1425 | * Remove the current set, update the hash and add |
1365 | * them back. | 1426 | * them back. |
@@ -1724,6 +1785,114 @@ static void ftrace_hash_rec_enable_modify(struct ftrace_ops *ops, | |||
1724 | ftrace_hash_rec_update_modify(ops, filter_hash, 1); | 1785 | ftrace_hash_rec_update_modify(ops, filter_hash, 1); |
1725 | } | 1786 | } |
1726 | 1787 | ||
1788 | /* | ||
1789 | * Try to update IPMODIFY flag on each ftrace_rec. Return 0 if it is OK | ||
1790 | * or no-needed to update, -EBUSY if it detects a conflict of the flag | ||
1791 | * on a ftrace_rec, and -EINVAL if the new_hash tries to trace all recs. | ||
1792 | * Note that old_hash and new_hash has below meanings | ||
1793 | * - If the hash is NULL, it hits all recs (if IPMODIFY is set, this is rejected) | ||
1794 | * - If the hash is EMPTY_HASH, it hits nothing | ||
1795 | * - Anything else hits the recs which match the hash entries. | ||
1796 | */ | ||
1797 | static int __ftrace_hash_update_ipmodify(struct ftrace_ops *ops, | ||
1798 | struct ftrace_hash *old_hash, | ||
1799 | struct ftrace_hash *new_hash) | ||
1800 | { | ||
1801 | struct ftrace_page *pg; | ||
1802 | struct dyn_ftrace *rec, *end = NULL; | ||
1803 | int in_old, in_new; | ||
1804 | |||
1805 | /* Only update if the ops has been registered */ | ||
1806 | if (!(ops->flags & FTRACE_OPS_FL_ENABLED)) | ||
1807 | return 0; | ||
1808 | |||
1809 | if (!(ops->flags & FTRACE_OPS_FL_IPMODIFY)) | ||
1810 | return 0; | ||
1811 | |||
1812 | /* | ||
1813 | * Since the IPMODIFY is a very address sensitive action, we do not | ||
1814 | * allow ftrace_ops to set all functions to new hash. | ||
1815 | */ | ||
1816 | if (!new_hash || !old_hash) | ||
1817 | return -EINVAL; | ||
1818 | |||
1819 | /* Update rec->flags */ | ||
1820 | do_for_each_ftrace_rec(pg, rec) { | ||
1821 | /* We need to update only differences of filter_hash */ | ||
1822 | in_old = !!ftrace_lookup_ip(old_hash, rec->ip); | ||
1823 | in_new = !!ftrace_lookup_ip(new_hash, rec->ip); | ||
1824 | if (in_old == in_new) | ||
1825 | continue; | ||
1826 | |||
1827 | if (in_new) { | ||
1828 | /* New entries must ensure no others are using it */ | ||
1829 | if (rec->flags & FTRACE_FL_IPMODIFY) | ||
1830 | goto rollback; | ||
1831 | rec->flags |= FTRACE_FL_IPMODIFY; | ||
1832 | } else /* Removed entry */ | ||
1833 | rec->flags &= ~FTRACE_FL_IPMODIFY; | ||
1834 | } while_for_each_ftrace_rec(); | ||
1835 | |||
1836 | return 0; | ||
1837 | |||
1838 | rollback: | ||
1839 | end = rec; | ||
1840 | |||
1841 | /* Roll back what we did above */ | ||
1842 | do_for_each_ftrace_rec(pg, rec) { | ||
1843 | if (rec == end) | ||
1844 | goto err_out; | ||
1845 | |||
1846 | in_old = !!ftrace_lookup_ip(old_hash, rec->ip); | ||
1847 | in_new = !!ftrace_lookup_ip(new_hash, rec->ip); | ||
1848 | if (in_old == in_new) | ||
1849 | continue; | ||
1850 | |||
1851 | if (in_new) | ||
1852 | rec->flags &= ~FTRACE_FL_IPMODIFY; | ||
1853 | else | ||
1854 | rec->flags |= FTRACE_FL_IPMODIFY; | ||
1855 | } while_for_each_ftrace_rec(); | ||
1856 | |||
1857 | err_out: | ||
1858 | return -EBUSY; | ||
1859 | } | ||
1860 | |||
1861 | static int ftrace_hash_ipmodify_enable(struct ftrace_ops *ops) | ||
1862 | { | ||
1863 | struct ftrace_hash *hash = ops->func_hash->filter_hash; | ||
1864 | |||
1865 | if (ftrace_hash_empty(hash)) | ||
1866 | hash = NULL; | ||
1867 | |||
1868 | return __ftrace_hash_update_ipmodify(ops, EMPTY_HASH, hash); | ||
1869 | } | ||
1870 | |||
1871 | /* Disabling always succeeds */ | ||
1872 | static void ftrace_hash_ipmodify_disable(struct ftrace_ops *ops) | ||
1873 | { | ||
1874 | struct ftrace_hash *hash = ops->func_hash->filter_hash; | ||
1875 | |||
1876 | if (ftrace_hash_empty(hash)) | ||
1877 | hash = NULL; | ||
1878 | |||
1879 | __ftrace_hash_update_ipmodify(ops, hash, EMPTY_HASH); | ||
1880 | } | ||
1881 | |||
1882 | static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops, | ||
1883 | struct ftrace_hash *new_hash) | ||
1884 | { | ||
1885 | struct ftrace_hash *old_hash = ops->func_hash->filter_hash; | ||
1886 | |||
1887 | if (ftrace_hash_empty(old_hash)) | ||
1888 | old_hash = NULL; | ||
1889 | |||
1890 | if (ftrace_hash_empty(new_hash)) | ||
1891 | new_hash = NULL; | ||
1892 | |||
1893 | return __ftrace_hash_update_ipmodify(ops, old_hash, new_hash); | ||
1894 | } | ||
1895 | |||
1727 | static void print_ip_ins(const char *fmt, unsigned char *p) | 1896 | static void print_ip_ins(const char *fmt, unsigned char *p) |
1728 | { | 1897 | { |
1729 | int i; | 1898 | int i; |
@@ -1734,10 +1903,13 @@ static void print_ip_ins(const char *fmt, unsigned char *p) | |||
1734 | printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]); | 1903 | printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]); |
1735 | } | 1904 | } |
1736 | 1905 | ||
1906 | static struct ftrace_ops * | ||
1907 | ftrace_find_tramp_ops_any(struct dyn_ftrace *rec); | ||
1908 | |||
1737 | /** | 1909 | /** |
1738 | * ftrace_bug - report and shutdown function tracer | 1910 | * ftrace_bug - report and shutdown function tracer |
1739 | * @failed: The failed type (EFAULT, EINVAL, EPERM) | 1911 | * @failed: The failed type (EFAULT, EINVAL, EPERM) |
1740 | * @ip: The address that failed | 1912 | * @rec: The record that failed |
1741 | * | 1913 | * |
1742 | * The arch code that enables or disables the function tracing | 1914 | * The arch code that enables or disables the function tracing |
1743 | * can call ftrace_bug() when it has detected a problem in | 1915 | * can call ftrace_bug() when it has detected a problem in |
@@ -1746,8 +1918,10 @@ static void print_ip_ins(const char *fmt, unsigned char *p) | |||
1746 | * EINVAL - if what is read at @ip is not what was expected | 1918 | * EINVAL - if what is read at @ip is not what was expected |
1747 | * EPERM - if the problem happens on writting to the @ip address | 1919 | * EPERM - if the problem happens on writting to the @ip address |
1748 | */ | 1920 | */ |
1749 | void ftrace_bug(int failed, unsigned long ip) | 1921 | void ftrace_bug(int failed, struct dyn_ftrace *rec) |
1750 | { | 1922 | { |
1923 | unsigned long ip = rec ? rec->ip : 0; | ||
1924 | |||
1751 | switch (failed) { | 1925 | switch (failed) { |
1752 | case -EFAULT: | 1926 | case -EFAULT: |
1753 | FTRACE_WARN_ON_ONCE(1); | 1927 | FTRACE_WARN_ON_ONCE(1); |
@@ -1759,7 +1933,7 @@ void ftrace_bug(int failed, unsigned long ip) | |||
1759 | pr_info("ftrace failed to modify "); | 1933 | pr_info("ftrace failed to modify "); |
1760 | print_ip_sym(ip); | 1934 | print_ip_sym(ip); |
1761 | print_ip_ins(" actual: ", (unsigned char *)ip); | 1935 | print_ip_ins(" actual: ", (unsigned char *)ip); |
1762 | printk(KERN_CONT "\n"); | 1936 | pr_cont("\n"); |
1763 | break; | 1937 | break; |
1764 | case -EPERM: | 1938 | case -EPERM: |
1765 | FTRACE_WARN_ON_ONCE(1); | 1939 | FTRACE_WARN_ON_ONCE(1); |
@@ -1771,6 +1945,24 @@ void ftrace_bug(int failed, unsigned long ip) | |||
1771 | pr_info("ftrace faulted on unknown error "); | 1945 | pr_info("ftrace faulted on unknown error "); |
1772 | print_ip_sym(ip); | 1946 | print_ip_sym(ip); |
1773 | } | 1947 | } |
1948 | if (rec) { | ||
1949 | struct ftrace_ops *ops = NULL; | ||
1950 | |||
1951 | pr_info("ftrace record flags: %lx\n", rec->flags); | ||
1952 | pr_cont(" (%ld)%s", ftrace_rec_count(rec), | ||
1953 | rec->flags & FTRACE_FL_REGS ? " R" : " "); | ||
1954 | if (rec->flags & FTRACE_FL_TRAMP_EN) { | ||
1955 | ops = ftrace_find_tramp_ops_any(rec); | ||
1956 | if (ops) | ||
1957 | pr_cont("\ttramp: %pS", | ||
1958 | (void *)ops->trampoline); | ||
1959 | else | ||
1960 | pr_cont("\ttramp: ERROR!"); | ||
1961 | |||
1962 | } | ||
1963 | ip = ftrace_get_addr_curr(rec); | ||
1964 | pr_cont(" expected tramp: %lx\n", ip); | ||
1965 | } | ||
1774 | } | 1966 | } |
1775 | 1967 | ||
1776 | static int ftrace_check_record(struct dyn_ftrace *rec, int enable, int update) | 1968 | static int ftrace_check_record(struct dyn_ftrace *rec, int enable, int update) |
@@ -2093,7 +2285,7 @@ void __weak ftrace_replace_code(int enable) | |||
2093 | do_for_each_ftrace_rec(pg, rec) { | 2285 | do_for_each_ftrace_rec(pg, rec) { |
2094 | failed = __ftrace_replace_code(rec, enable); | 2286 | failed = __ftrace_replace_code(rec, enable); |
2095 | if (failed) { | 2287 | if (failed) { |
2096 | ftrace_bug(failed, rec->ip); | 2288 | ftrace_bug(failed, rec); |
2097 | /* Stop processing */ | 2289 | /* Stop processing */ |
2098 | return; | 2290 | return; |
2099 | } | 2291 | } |
@@ -2175,17 +2367,14 @@ struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter) | |||
2175 | static int | 2367 | static int |
2176 | ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec) | 2368 | ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec) |
2177 | { | 2369 | { |
2178 | unsigned long ip; | ||
2179 | int ret; | 2370 | int ret; |
2180 | 2371 | ||
2181 | ip = rec->ip; | ||
2182 | |||
2183 | if (unlikely(ftrace_disabled)) | 2372 | if (unlikely(ftrace_disabled)) |
2184 | return 0; | 2373 | return 0; |
2185 | 2374 | ||
2186 | ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR); | 2375 | ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR); |
2187 | if (ret) { | 2376 | if (ret) { |
2188 | ftrace_bug(ret, ip); | 2377 | ftrace_bug(ret, rec); |
2189 | return 0; | 2378 | return 0; |
2190 | } | 2379 | } |
2191 | return 1; | 2380 | return 1; |
@@ -2320,6 +2509,10 @@ static void ftrace_run_modify_code(struct ftrace_ops *ops, int command, | |||
2320 | static ftrace_func_t saved_ftrace_func; | 2509 | static ftrace_func_t saved_ftrace_func; |
2321 | static int ftrace_start_up; | 2510 | static int ftrace_start_up; |
2322 | 2511 | ||
2512 | void __weak arch_ftrace_trampoline_free(struct ftrace_ops *ops) | ||
2513 | { | ||
2514 | } | ||
2515 | |||
2323 | static void control_ops_free(struct ftrace_ops *ops) | 2516 | static void control_ops_free(struct ftrace_ops *ops) |
2324 | { | 2517 | { |
2325 | free_percpu(ops->disabled); | 2518 | free_percpu(ops->disabled); |
@@ -2369,6 +2562,15 @@ static int ftrace_startup(struct ftrace_ops *ops, int command) | |||
2369 | */ | 2562 | */ |
2370 | ops->flags |= FTRACE_OPS_FL_ENABLED | FTRACE_OPS_FL_ADDING; | 2563 | ops->flags |= FTRACE_OPS_FL_ENABLED | FTRACE_OPS_FL_ADDING; |
2371 | 2564 | ||
2565 | ret = ftrace_hash_ipmodify_enable(ops); | ||
2566 | if (ret < 0) { | ||
2567 | /* Rollback registration process */ | ||
2568 | __unregister_ftrace_function(ops); | ||
2569 | ftrace_start_up--; | ||
2570 | ops->flags &= ~FTRACE_OPS_FL_ENABLED; | ||
2571 | return ret; | ||
2572 | } | ||
2573 | |||
2372 | ftrace_hash_rec_enable(ops, 1); | 2574 | ftrace_hash_rec_enable(ops, 1); |
2373 | 2575 | ||
2374 | ftrace_startup_enable(command); | 2576 | ftrace_startup_enable(command); |
@@ -2397,6 +2599,8 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command) | |||
2397 | */ | 2599 | */ |
2398 | WARN_ON_ONCE(ftrace_start_up < 0); | 2600 | WARN_ON_ONCE(ftrace_start_up < 0); |
2399 | 2601 | ||
2602 | /* Disabling ipmodify never fails */ | ||
2603 | ftrace_hash_ipmodify_disable(ops); | ||
2400 | ftrace_hash_rec_disable(ops, 1); | 2604 | ftrace_hash_rec_disable(ops, 1); |
2401 | 2605 | ||
2402 | ops->flags &= ~FTRACE_OPS_FL_ENABLED; | 2606 | ops->flags &= ~FTRACE_OPS_FL_ENABLED; |
@@ -2471,6 +2675,8 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command) | |||
2471 | if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_CONTROL)) { | 2675 | if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_CONTROL)) { |
2472 | schedule_on_each_cpu(ftrace_sync); | 2676 | schedule_on_each_cpu(ftrace_sync); |
2473 | 2677 | ||
2678 | arch_ftrace_trampoline_free(ops); | ||
2679 | |||
2474 | if (ops->flags & FTRACE_OPS_FL_CONTROL) | 2680 | if (ops->flags & FTRACE_OPS_FL_CONTROL) |
2475 | control_ops_free(ops); | 2681 | control_ops_free(ops); |
2476 | } | 2682 | } |
@@ -2623,7 +2829,7 @@ static int ftrace_update_code(struct module *mod, struct ftrace_page *new_pgs) | |||
2623 | if (ftrace_start_up && cnt) { | 2829 | if (ftrace_start_up && cnt) { |
2624 | int failed = __ftrace_replace_code(p, 1); | 2830 | int failed = __ftrace_replace_code(p, 1); |
2625 | if (failed) | 2831 | if (failed) |
2626 | ftrace_bug(failed, p->ip); | 2832 | ftrace_bug(failed, p); |
2627 | } | 2833 | } |
2628 | } | 2834 | } |
2629 | } | 2835 | } |
@@ -2948,6 +3154,22 @@ static void t_stop(struct seq_file *m, void *p) | |||
2948 | mutex_unlock(&ftrace_lock); | 3154 | mutex_unlock(&ftrace_lock); |
2949 | } | 3155 | } |
2950 | 3156 | ||
3157 | void * __weak | ||
3158 | arch_ftrace_trampoline_func(struct ftrace_ops *ops, struct dyn_ftrace *rec) | ||
3159 | { | ||
3160 | return NULL; | ||
3161 | } | ||
3162 | |||
3163 | static void add_trampoline_func(struct seq_file *m, struct ftrace_ops *ops, | ||
3164 | struct dyn_ftrace *rec) | ||
3165 | { | ||
3166 | void *ptr; | ||
3167 | |||
3168 | ptr = arch_ftrace_trampoline_func(ops, rec); | ||
3169 | if (ptr) | ||
3170 | seq_printf(m, " ->%pS", ptr); | ||
3171 | } | ||
3172 | |||
2951 | static int t_show(struct seq_file *m, void *v) | 3173 | static int t_show(struct seq_file *m, void *v) |
2952 | { | 3174 | { |
2953 | struct ftrace_iterator *iter = m->private; | 3175 | struct ftrace_iterator *iter = m->private; |
@@ -2958,9 +3180,9 @@ static int t_show(struct seq_file *m, void *v) | |||
2958 | 3180 | ||
2959 | if (iter->flags & FTRACE_ITER_PRINTALL) { | 3181 | if (iter->flags & FTRACE_ITER_PRINTALL) { |
2960 | if (iter->flags & FTRACE_ITER_NOTRACE) | 3182 | if (iter->flags & FTRACE_ITER_NOTRACE) |
2961 | seq_printf(m, "#### no functions disabled ####\n"); | 3183 | seq_puts(m, "#### no functions disabled ####\n"); |
2962 | else | 3184 | else |
2963 | seq_printf(m, "#### all functions enabled ####\n"); | 3185 | seq_puts(m, "#### all functions enabled ####\n"); |
2964 | return 0; | 3186 | return 0; |
2965 | } | 3187 | } |
2966 | 3188 | ||
@@ -2971,22 +3193,25 @@ static int t_show(struct seq_file *m, void *v) | |||
2971 | 3193 | ||
2972 | seq_printf(m, "%ps", (void *)rec->ip); | 3194 | seq_printf(m, "%ps", (void *)rec->ip); |
2973 | if (iter->flags & FTRACE_ITER_ENABLED) { | 3195 | if (iter->flags & FTRACE_ITER_ENABLED) { |
2974 | seq_printf(m, " (%ld)%s", | 3196 | struct ftrace_ops *ops = NULL; |
3197 | |||
3198 | seq_printf(m, " (%ld)%s%s", | ||
2975 | ftrace_rec_count(rec), | 3199 | ftrace_rec_count(rec), |
2976 | rec->flags & FTRACE_FL_REGS ? " R" : " "); | 3200 | rec->flags & FTRACE_FL_REGS ? " R" : " ", |
3201 | rec->flags & FTRACE_FL_IPMODIFY ? " I" : " "); | ||
2977 | if (rec->flags & FTRACE_FL_TRAMP_EN) { | 3202 | if (rec->flags & FTRACE_FL_TRAMP_EN) { |
2978 | struct ftrace_ops *ops; | ||
2979 | |||
2980 | ops = ftrace_find_tramp_ops_any(rec); | 3203 | ops = ftrace_find_tramp_ops_any(rec); |
2981 | if (ops) | 3204 | if (ops) |
2982 | seq_printf(m, "\ttramp: %pS", | 3205 | seq_printf(m, "\ttramp: %pS", |
2983 | (void *)ops->trampoline); | 3206 | (void *)ops->trampoline); |
2984 | else | 3207 | else |
2985 | seq_printf(m, "\ttramp: ERROR!"); | 3208 | seq_puts(m, "\ttramp: ERROR!"); |
3209 | |||
2986 | } | 3210 | } |
3211 | add_trampoline_func(m, ops, rec); | ||
2987 | } | 3212 | } |
2988 | 3213 | ||
2989 | seq_printf(m, "\n"); | 3214 | seq_putc(m, '\n'); |
2990 | 3215 | ||
2991 | return 0; | 3216 | return 0; |
2992 | } | 3217 | } |
@@ -3020,9 +3245,6 @@ ftrace_enabled_open(struct inode *inode, struct file *file) | |||
3020 | { | 3245 | { |
3021 | struct ftrace_iterator *iter; | 3246 | struct ftrace_iterator *iter; |
3022 | 3247 | ||
3023 | if (unlikely(ftrace_disabled)) | ||
3024 | return -ENODEV; | ||
3025 | |||
3026 | iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter)); | 3248 | iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter)); |
3027 | if (iter) { | 3249 | if (iter) { |
3028 | iter->pg = ftrace_pages_start; | 3250 | iter->pg = ftrace_pages_start; |
@@ -3975,6 +4197,9 @@ static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata; | |||
3975 | static char ftrace_graph_notrace_buf[FTRACE_FILTER_SIZE] __initdata; | 4197 | static char ftrace_graph_notrace_buf[FTRACE_FILTER_SIZE] __initdata; |
3976 | static int ftrace_set_func(unsigned long *array, int *idx, int size, char *buffer); | 4198 | static int ftrace_set_func(unsigned long *array, int *idx, int size, char *buffer); |
3977 | 4199 | ||
4200 | static unsigned long save_global_trampoline; | ||
4201 | static unsigned long save_global_flags; | ||
4202 | |||
3978 | static int __init set_graph_function(char *str) | 4203 | static int __init set_graph_function(char *str) |
3979 | { | 4204 | { |
3980 | strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE); | 4205 | strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE); |
@@ -4183,9 +4408,9 @@ static int g_show(struct seq_file *m, void *v) | |||
4183 | struct ftrace_graph_data *fgd = m->private; | 4408 | struct ftrace_graph_data *fgd = m->private; |
4184 | 4409 | ||
4185 | if (fgd->table == ftrace_graph_funcs) | 4410 | if (fgd->table == ftrace_graph_funcs) |
4186 | seq_printf(m, "#### all functions enabled ####\n"); | 4411 | seq_puts(m, "#### all functions enabled ####\n"); |
4187 | else | 4412 | else |
4188 | seq_printf(m, "#### no functions disabled ####\n"); | 4413 | seq_puts(m, "#### no functions disabled ####\n"); |
4189 | return 0; | 4414 | return 0; |
4190 | } | 4415 | } |
4191 | 4416 | ||
@@ -4696,6 +4921,32 @@ void __init ftrace_init(void) | |||
4696 | ftrace_disabled = 1; | 4921 | ftrace_disabled = 1; |
4697 | } | 4922 | } |
4698 | 4923 | ||
4924 | /* Do nothing if arch does not support this */ | ||
4925 | void __weak arch_ftrace_update_trampoline(struct ftrace_ops *ops) | ||
4926 | { | ||
4927 | } | ||
4928 | |||
4929 | static void ftrace_update_trampoline(struct ftrace_ops *ops) | ||
4930 | { | ||
4931 | |||
4932 | /* | ||
4933 | * Currently there's no safe way to free a trampoline when the kernel | ||
4934 | * is configured with PREEMPT. That is because a task could be preempted | ||
4935 | * when it jumped to the trampoline, it may be preempted for a long time | ||
4936 | * depending on the system load, and currently there's no way to know | ||
4937 | * when it will be off the trampoline. If the trampoline is freed | ||
4938 | * too early, when the task runs again, it will be executing on freed | ||
4939 | * memory and crash. | ||
4940 | */ | ||
4941 | #ifdef CONFIG_PREEMPT | ||
4942 | /* Currently, only non dynamic ops can have a trampoline */ | ||
4943 | if (ops->flags & FTRACE_OPS_FL_DYNAMIC) | ||
4944 | return; | ||
4945 | #endif | ||
4946 | |||
4947 | arch_ftrace_update_trampoline(ops); | ||
4948 | } | ||
4949 | |||
4699 | #else | 4950 | #else |
4700 | 4951 | ||
4701 | static struct ftrace_ops global_ops = { | 4952 | static struct ftrace_ops global_ops = { |
@@ -4738,6 +4989,10 @@ ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs) | |||
4738 | return 1; | 4989 | return 1; |
4739 | } | 4990 | } |
4740 | 4991 | ||
4992 | static void ftrace_update_trampoline(struct ftrace_ops *ops) | ||
4993 | { | ||
4994 | } | ||
4995 | |||
4741 | #endif /* CONFIG_DYNAMIC_FTRACE */ | 4996 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
4742 | 4997 | ||
4743 | __init void ftrace_init_global_array_ops(struct trace_array *tr) | 4998 | __init void ftrace_init_global_array_ops(struct trace_array *tr) |
@@ -5075,12 +5330,12 @@ static int fpid_show(struct seq_file *m, void *v) | |||
5075 | const struct ftrace_pid *fpid = list_entry(v, struct ftrace_pid, list); | 5330 | const struct ftrace_pid *fpid = list_entry(v, struct ftrace_pid, list); |
5076 | 5331 | ||
5077 | if (v == (void *)1) { | 5332 | if (v == (void *)1) { |
5078 | seq_printf(m, "no pid\n"); | 5333 | seq_puts(m, "no pid\n"); |
5079 | return 0; | 5334 | return 0; |
5080 | } | 5335 | } |
5081 | 5336 | ||
5082 | if (fpid->pid == ftrace_swapper_pid) | 5337 | if (fpid->pid == ftrace_swapper_pid) |
5083 | seq_printf(m, "swapper tasks\n"); | 5338 | seq_puts(m, "swapper tasks\n"); |
5084 | else | 5339 | else |
5085 | seq_printf(m, "%u\n", pid_vnr(fpid->pid)); | 5340 | seq_printf(m, "%u\n", pid_vnr(fpid->pid)); |
5086 | 5341 | ||
@@ -5293,6 +5548,7 @@ static struct ftrace_ops graph_ops = { | |||
5293 | FTRACE_OPS_FL_STUB, | 5548 | FTRACE_OPS_FL_STUB, |
5294 | #ifdef FTRACE_GRAPH_TRAMP_ADDR | 5549 | #ifdef FTRACE_GRAPH_TRAMP_ADDR |
5295 | .trampoline = FTRACE_GRAPH_TRAMP_ADDR, | 5550 | .trampoline = FTRACE_GRAPH_TRAMP_ADDR, |
5551 | /* trampoline_size is only needed for dynamically allocated tramps */ | ||
5296 | #endif | 5552 | #endif |
5297 | ASSIGN_OPS_HASH(graph_ops, &global_ops.local_hash) | 5553 | ASSIGN_OPS_HASH(graph_ops, &global_ops.local_hash) |
5298 | }; | 5554 | }; |
@@ -5522,7 +5778,6 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc, | |||
5522 | update_function_graph_func(); | 5778 | update_function_graph_func(); |
5523 | 5779 | ||
5524 | ret = ftrace_startup(&graph_ops, FTRACE_START_FUNC_RET); | 5780 | ret = ftrace_startup(&graph_ops, FTRACE_START_FUNC_RET); |
5525 | |||
5526 | out: | 5781 | out: |
5527 | mutex_unlock(&ftrace_lock); | 5782 | mutex_unlock(&ftrace_lock); |
5528 | return ret; | 5783 | return ret; |
@@ -5543,6 +5798,17 @@ void unregister_ftrace_graph(void) | |||
5543 | unregister_pm_notifier(&ftrace_suspend_notifier); | 5798 | unregister_pm_notifier(&ftrace_suspend_notifier); |
5544 | unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL); | 5799 | unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL); |
5545 | 5800 | ||
5801 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
5802 | /* | ||
5803 | * Function graph does not allocate the trampoline, but | ||
5804 | * other global_ops do. We need to reset the ALLOC_TRAMP flag | ||
5805 | * if one was used. | ||
5806 | */ | ||
5807 | global_ops.trampoline = save_global_trampoline; | ||
5808 | if (save_global_flags & FTRACE_OPS_FL_ALLOC_TRAMP) | ||
5809 | global_ops.flags |= FTRACE_OPS_FL_ALLOC_TRAMP; | ||
5810 | #endif | ||
5811 | |||
5546 | out: | 5812 | out: |
5547 | mutex_unlock(&ftrace_lock); | 5813 | mutex_unlock(&ftrace_lock); |
5548 | } | 5814 | } |
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index a56e07c8d15b..7a4104cb95cb 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -34,21 +34,19 @@ static void update_pages_handler(struct work_struct *work); | |||
34 | */ | 34 | */ |
35 | int ring_buffer_print_entry_header(struct trace_seq *s) | 35 | int ring_buffer_print_entry_header(struct trace_seq *s) |
36 | { | 36 | { |
37 | int ret; | 37 | trace_seq_puts(s, "# compressed entry header\n"); |
38 | 38 | trace_seq_puts(s, "\ttype_len : 5 bits\n"); | |
39 | ret = trace_seq_puts(s, "# compressed entry header\n"); | 39 | trace_seq_puts(s, "\ttime_delta : 27 bits\n"); |
40 | ret = trace_seq_puts(s, "\ttype_len : 5 bits\n"); | 40 | trace_seq_puts(s, "\tarray : 32 bits\n"); |
41 | ret = trace_seq_puts(s, "\ttime_delta : 27 bits\n"); | 41 | trace_seq_putc(s, '\n'); |
42 | ret = trace_seq_puts(s, "\tarray : 32 bits\n"); | 42 | trace_seq_printf(s, "\tpadding : type == %d\n", |
43 | ret = trace_seq_putc(s, '\n'); | 43 | RINGBUF_TYPE_PADDING); |
44 | ret = trace_seq_printf(s, "\tpadding : type == %d\n", | 44 | trace_seq_printf(s, "\ttime_extend : type == %d\n", |
45 | RINGBUF_TYPE_PADDING); | 45 | RINGBUF_TYPE_TIME_EXTEND); |
46 | ret = trace_seq_printf(s, "\ttime_extend : type == %d\n", | 46 | trace_seq_printf(s, "\tdata max type_len == %d\n", |
47 | RINGBUF_TYPE_TIME_EXTEND); | 47 | RINGBUF_TYPE_DATA_TYPE_LEN_MAX); |
48 | ret = trace_seq_printf(s, "\tdata max type_len == %d\n", | ||
49 | RINGBUF_TYPE_DATA_TYPE_LEN_MAX); | ||
50 | 48 | ||
51 | return ret; | 49 | return !trace_seq_has_overflowed(s); |
52 | } | 50 | } |
53 | 51 | ||
54 | /* | 52 | /* |
@@ -419,32 +417,31 @@ static inline int test_time_stamp(u64 delta) | |||
419 | int ring_buffer_print_page_header(struct trace_seq *s) | 417 | int ring_buffer_print_page_header(struct trace_seq *s) |
420 | { | 418 | { |
421 | struct buffer_data_page field; | 419 | struct buffer_data_page field; |
422 | int ret; | ||
423 | |||
424 | ret = trace_seq_printf(s, "\tfield: u64 timestamp;\t" | ||
425 | "offset:0;\tsize:%u;\tsigned:%u;\n", | ||
426 | (unsigned int)sizeof(field.time_stamp), | ||
427 | (unsigned int)is_signed_type(u64)); | ||
428 | |||
429 | ret = trace_seq_printf(s, "\tfield: local_t commit;\t" | ||
430 | "offset:%u;\tsize:%u;\tsigned:%u;\n", | ||
431 | (unsigned int)offsetof(typeof(field), commit), | ||
432 | (unsigned int)sizeof(field.commit), | ||
433 | (unsigned int)is_signed_type(long)); | ||
434 | |||
435 | ret = trace_seq_printf(s, "\tfield: int overwrite;\t" | ||
436 | "offset:%u;\tsize:%u;\tsigned:%u;\n", | ||
437 | (unsigned int)offsetof(typeof(field), commit), | ||
438 | 1, | ||
439 | (unsigned int)is_signed_type(long)); | ||
440 | |||
441 | ret = trace_seq_printf(s, "\tfield: char data;\t" | ||
442 | "offset:%u;\tsize:%u;\tsigned:%u;\n", | ||
443 | (unsigned int)offsetof(typeof(field), data), | ||
444 | (unsigned int)BUF_PAGE_SIZE, | ||
445 | (unsigned int)is_signed_type(char)); | ||
446 | 420 | ||
447 | return ret; | 421 | trace_seq_printf(s, "\tfield: u64 timestamp;\t" |
422 | "offset:0;\tsize:%u;\tsigned:%u;\n", | ||
423 | (unsigned int)sizeof(field.time_stamp), | ||
424 | (unsigned int)is_signed_type(u64)); | ||
425 | |||
426 | trace_seq_printf(s, "\tfield: local_t commit;\t" | ||
427 | "offset:%u;\tsize:%u;\tsigned:%u;\n", | ||
428 | (unsigned int)offsetof(typeof(field), commit), | ||
429 | (unsigned int)sizeof(field.commit), | ||
430 | (unsigned int)is_signed_type(long)); | ||
431 | |||
432 | trace_seq_printf(s, "\tfield: int overwrite;\t" | ||
433 | "offset:%u;\tsize:%u;\tsigned:%u;\n", | ||
434 | (unsigned int)offsetof(typeof(field), commit), | ||
435 | 1, | ||
436 | (unsigned int)is_signed_type(long)); | ||
437 | |||
438 | trace_seq_printf(s, "\tfield: char data;\t" | ||
439 | "offset:%u;\tsize:%u;\tsigned:%u;\n", | ||
440 | (unsigned int)offsetof(typeof(field), data), | ||
441 | (unsigned int)BUF_PAGE_SIZE, | ||
442 | (unsigned int)is_signed_type(char)); | ||
443 | |||
444 | return !trace_seq_has_overflowed(s); | ||
448 | } | 445 | } |
449 | 446 | ||
450 | struct rb_irq_work { | 447 | struct rb_irq_work { |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 426962b04183..ce11fa50a2f0 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -155,10 +155,11 @@ __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops); | |||
155 | 155 | ||
156 | static int __init stop_trace_on_warning(char *str) | 156 | static int __init stop_trace_on_warning(char *str) |
157 | { | 157 | { |
158 | __disable_trace_on_warning = 1; | 158 | if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0)) |
159 | __disable_trace_on_warning = 1; | ||
159 | return 1; | 160 | return 1; |
160 | } | 161 | } |
161 | __setup("traceoff_on_warning=", stop_trace_on_warning); | 162 | __setup("traceoff_on_warning", stop_trace_on_warning); |
162 | 163 | ||
163 | static int __init boot_alloc_snapshot(char *str) | 164 | static int __init boot_alloc_snapshot(char *str) |
164 | { | 165 | { |
@@ -2158,9 +2159,7 @@ __trace_array_vprintk(struct ring_buffer *buffer, | |||
2158 | goto out; | 2159 | goto out; |
2159 | } | 2160 | } |
2160 | 2161 | ||
2161 | len = vsnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args); | 2162 | len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args); |
2162 | if (len > TRACE_BUF_SIZE) | ||
2163 | goto out; | ||
2164 | 2163 | ||
2165 | local_save_flags(flags); | 2164 | local_save_flags(flags); |
2166 | size = sizeof(*entry) + len + 1; | 2165 | size = sizeof(*entry) + len + 1; |
@@ -2171,8 +2170,7 @@ __trace_array_vprintk(struct ring_buffer *buffer, | |||
2171 | entry = ring_buffer_event_data(event); | 2170 | entry = ring_buffer_event_data(event); |
2172 | entry->ip = ip; | 2171 | entry->ip = ip; |
2173 | 2172 | ||
2174 | memcpy(&entry->buf, tbuffer, len); | 2173 | memcpy(&entry->buf, tbuffer, len + 1); |
2175 | entry->buf[len] = '\0'; | ||
2176 | if (!call_filter_check_discard(call, entry, buffer, event)) { | 2174 | if (!call_filter_check_discard(call, entry, buffer, event)) { |
2177 | __buffer_unlock_commit(buffer, event); | 2175 | __buffer_unlock_commit(buffer, event); |
2178 | ftrace_trace_stack(buffer, flags, 6, pc); | 2176 | ftrace_trace_stack(buffer, flags, 6, pc); |
@@ -2509,14 +2507,14 @@ get_total_entries(struct trace_buffer *buf, | |||
2509 | 2507 | ||
2510 | static void print_lat_help_header(struct seq_file *m) | 2508 | static void print_lat_help_header(struct seq_file *m) |
2511 | { | 2509 | { |
2512 | seq_puts(m, "# _------=> CPU# \n"); | 2510 | seq_puts(m, "# _------=> CPU# \n" |
2513 | seq_puts(m, "# / _-----=> irqs-off \n"); | 2511 | "# / _-----=> irqs-off \n" |
2514 | seq_puts(m, "# | / _----=> need-resched \n"); | 2512 | "# | / _----=> need-resched \n" |
2515 | seq_puts(m, "# || / _---=> hardirq/softirq \n"); | 2513 | "# || / _---=> hardirq/softirq \n" |
2516 | seq_puts(m, "# ||| / _--=> preempt-depth \n"); | 2514 | "# ||| / _--=> preempt-depth \n" |
2517 | seq_puts(m, "# |||| / delay \n"); | 2515 | "# |||| / delay \n" |
2518 | seq_puts(m, "# cmd pid ||||| time | caller \n"); | 2516 | "# cmd pid ||||| time | caller \n" |
2519 | seq_puts(m, "# \\ / ||||| \\ | / \n"); | 2517 | "# \\ / ||||| \\ | / \n"); |
2520 | } | 2518 | } |
2521 | 2519 | ||
2522 | static void print_event_info(struct trace_buffer *buf, struct seq_file *m) | 2520 | static void print_event_info(struct trace_buffer *buf, struct seq_file *m) |
@@ -2533,20 +2531,20 @@ static void print_event_info(struct trace_buffer *buf, struct seq_file *m) | |||
2533 | static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m) | 2531 | static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m) |
2534 | { | 2532 | { |
2535 | print_event_info(buf, m); | 2533 | print_event_info(buf, m); |
2536 | seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n"); | 2534 | seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n" |
2537 | seq_puts(m, "# | | | | |\n"); | 2535 | "# | | | | |\n"); |
2538 | } | 2536 | } |
2539 | 2537 | ||
2540 | static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m) | 2538 | static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m) |
2541 | { | 2539 | { |
2542 | print_event_info(buf, m); | 2540 | print_event_info(buf, m); |
2543 | seq_puts(m, "# _-----=> irqs-off\n"); | 2541 | seq_puts(m, "# _-----=> irqs-off\n" |
2544 | seq_puts(m, "# / _----=> need-resched\n"); | 2542 | "# / _----=> need-resched\n" |
2545 | seq_puts(m, "# | / _---=> hardirq/softirq\n"); | 2543 | "# | / _---=> hardirq/softirq\n" |
2546 | seq_puts(m, "# || / _--=> preempt-depth\n"); | 2544 | "# || / _--=> preempt-depth\n" |
2547 | seq_puts(m, "# ||| / delay\n"); | 2545 | "# ||| / delay\n" |
2548 | seq_puts(m, "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n"); | 2546 | "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n" |
2549 | seq_puts(m, "# | | | |||| | |\n"); | 2547 | "# | | | |||| | |\n"); |
2550 | } | 2548 | } |
2551 | 2549 | ||
2552 | void | 2550 | void |
@@ -2649,24 +2647,21 @@ static enum print_line_t print_trace_fmt(struct trace_iterator *iter) | |||
2649 | event = ftrace_find_event(entry->type); | 2647 | event = ftrace_find_event(entry->type); |
2650 | 2648 | ||
2651 | if (trace_flags & TRACE_ITER_CONTEXT_INFO) { | 2649 | if (trace_flags & TRACE_ITER_CONTEXT_INFO) { |
2652 | if (iter->iter_flags & TRACE_FILE_LAT_FMT) { | 2650 | if (iter->iter_flags & TRACE_FILE_LAT_FMT) |
2653 | if (!trace_print_lat_context(iter)) | 2651 | trace_print_lat_context(iter); |
2654 | goto partial; | 2652 | else |
2655 | } else { | 2653 | trace_print_context(iter); |
2656 | if (!trace_print_context(iter)) | ||
2657 | goto partial; | ||
2658 | } | ||
2659 | } | 2654 | } |
2660 | 2655 | ||
2656 | if (trace_seq_has_overflowed(s)) | ||
2657 | return TRACE_TYPE_PARTIAL_LINE; | ||
2658 | |||
2661 | if (event) | 2659 | if (event) |
2662 | return event->funcs->trace(iter, sym_flags, event); | 2660 | return event->funcs->trace(iter, sym_flags, event); |
2663 | 2661 | ||
2664 | if (!trace_seq_printf(s, "Unknown type %d\n", entry->type)) | 2662 | trace_seq_printf(s, "Unknown type %d\n", entry->type); |
2665 | goto partial; | ||
2666 | 2663 | ||
2667 | return TRACE_TYPE_HANDLED; | 2664 | return trace_handle_return(s); |
2668 | partial: | ||
2669 | return TRACE_TYPE_PARTIAL_LINE; | ||
2670 | } | 2665 | } |
2671 | 2666 | ||
2672 | static enum print_line_t print_raw_fmt(struct trace_iterator *iter) | 2667 | static enum print_line_t print_raw_fmt(struct trace_iterator *iter) |
@@ -2677,22 +2672,20 @@ static enum print_line_t print_raw_fmt(struct trace_iterator *iter) | |||
2677 | 2672 | ||
2678 | entry = iter->ent; | 2673 | entry = iter->ent; |
2679 | 2674 | ||
2680 | if (trace_flags & TRACE_ITER_CONTEXT_INFO) { | 2675 | if (trace_flags & TRACE_ITER_CONTEXT_INFO) |
2681 | if (!trace_seq_printf(s, "%d %d %llu ", | 2676 | trace_seq_printf(s, "%d %d %llu ", |
2682 | entry->pid, iter->cpu, iter->ts)) | 2677 | entry->pid, iter->cpu, iter->ts); |
2683 | goto partial; | 2678 | |
2684 | } | 2679 | if (trace_seq_has_overflowed(s)) |
2680 | return TRACE_TYPE_PARTIAL_LINE; | ||
2685 | 2681 | ||
2686 | event = ftrace_find_event(entry->type); | 2682 | event = ftrace_find_event(entry->type); |
2687 | if (event) | 2683 | if (event) |
2688 | return event->funcs->raw(iter, 0, event); | 2684 | return event->funcs->raw(iter, 0, event); |
2689 | 2685 | ||
2690 | if (!trace_seq_printf(s, "%d ?\n", entry->type)) | 2686 | trace_seq_printf(s, "%d ?\n", entry->type); |
2691 | goto partial; | ||
2692 | 2687 | ||
2693 | return TRACE_TYPE_HANDLED; | 2688 | return trace_handle_return(s); |
2694 | partial: | ||
2695 | return TRACE_TYPE_PARTIAL_LINE; | ||
2696 | } | 2689 | } |
2697 | 2690 | ||
2698 | static enum print_line_t print_hex_fmt(struct trace_iterator *iter) | 2691 | static enum print_line_t print_hex_fmt(struct trace_iterator *iter) |
@@ -2705,9 +2698,11 @@ static enum print_line_t print_hex_fmt(struct trace_iterator *iter) | |||
2705 | entry = iter->ent; | 2698 | entry = iter->ent; |
2706 | 2699 | ||
2707 | if (trace_flags & TRACE_ITER_CONTEXT_INFO) { | 2700 | if (trace_flags & TRACE_ITER_CONTEXT_INFO) { |
2708 | SEQ_PUT_HEX_FIELD_RET(s, entry->pid); | 2701 | SEQ_PUT_HEX_FIELD(s, entry->pid); |
2709 | SEQ_PUT_HEX_FIELD_RET(s, iter->cpu); | 2702 | SEQ_PUT_HEX_FIELD(s, iter->cpu); |
2710 | SEQ_PUT_HEX_FIELD_RET(s, iter->ts); | 2703 | SEQ_PUT_HEX_FIELD(s, iter->ts); |
2704 | if (trace_seq_has_overflowed(s)) | ||
2705 | return TRACE_TYPE_PARTIAL_LINE; | ||
2711 | } | 2706 | } |
2712 | 2707 | ||
2713 | event = ftrace_find_event(entry->type); | 2708 | event = ftrace_find_event(entry->type); |
@@ -2717,9 +2712,9 @@ static enum print_line_t print_hex_fmt(struct trace_iterator *iter) | |||
2717 | return ret; | 2712 | return ret; |
2718 | } | 2713 | } |
2719 | 2714 | ||
2720 | SEQ_PUT_FIELD_RET(s, newline); | 2715 | SEQ_PUT_FIELD(s, newline); |
2721 | 2716 | ||
2722 | return TRACE_TYPE_HANDLED; | 2717 | return trace_handle_return(s); |
2723 | } | 2718 | } |
2724 | 2719 | ||
2725 | static enum print_line_t print_bin_fmt(struct trace_iterator *iter) | 2720 | static enum print_line_t print_bin_fmt(struct trace_iterator *iter) |
@@ -2731,9 +2726,11 @@ static enum print_line_t print_bin_fmt(struct trace_iterator *iter) | |||
2731 | entry = iter->ent; | 2726 | entry = iter->ent; |
2732 | 2727 | ||
2733 | if (trace_flags & TRACE_ITER_CONTEXT_INFO) { | 2728 | if (trace_flags & TRACE_ITER_CONTEXT_INFO) { |
2734 | SEQ_PUT_FIELD_RET(s, entry->pid); | 2729 | SEQ_PUT_FIELD(s, entry->pid); |
2735 | SEQ_PUT_FIELD_RET(s, iter->cpu); | 2730 | SEQ_PUT_FIELD(s, iter->cpu); |
2736 | SEQ_PUT_FIELD_RET(s, iter->ts); | 2731 | SEQ_PUT_FIELD(s, iter->ts); |
2732 | if (trace_seq_has_overflowed(s)) | ||
2733 | return TRACE_TYPE_PARTIAL_LINE; | ||
2737 | } | 2734 | } |
2738 | 2735 | ||
2739 | event = ftrace_find_event(entry->type); | 2736 | event = ftrace_find_event(entry->type); |
@@ -2779,10 +2776,12 @@ enum print_line_t print_trace_line(struct trace_iterator *iter) | |||
2779 | { | 2776 | { |
2780 | enum print_line_t ret; | 2777 | enum print_line_t ret; |
2781 | 2778 | ||
2782 | if (iter->lost_events && | 2779 | if (iter->lost_events) { |
2783 | !trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n", | 2780 | trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n", |
2784 | iter->cpu, iter->lost_events)) | 2781 | iter->cpu, iter->lost_events); |
2785 | return TRACE_TYPE_PARTIAL_LINE; | 2782 | if (trace_seq_has_overflowed(&iter->seq)) |
2783 | return TRACE_TYPE_PARTIAL_LINE; | ||
2784 | } | ||
2786 | 2785 | ||
2787 | if (iter->trace && iter->trace->print_line) { | 2786 | if (iter->trace && iter->trace->print_line) { |
2788 | ret = iter->trace->print_line(iter); | 2787 | ret = iter->trace->print_line(iter); |
@@ -2860,44 +2859,44 @@ static void test_ftrace_alive(struct seq_file *m) | |||
2860 | { | 2859 | { |
2861 | if (!ftrace_is_dead()) | 2860 | if (!ftrace_is_dead()) |
2862 | return; | 2861 | return; |
2863 | seq_printf(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"); | 2862 | seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n" |
2864 | seq_printf(m, "# MAY BE MISSING FUNCTION EVENTS\n"); | 2863 | "# MAY BE MISSING FUNCTION EVENTS\n"); |
2865 | } | 2864 | } |
2866 | 2865 | ||
2867 | #ifdef CONFIG_TRACER_MAX_TRACE | 2866 | #ifdef CONFIG_TRACER_MAX_TRACE |
2868 | static void show_snapshot_main_help(struct seq_file *m) | 2867 | static void show_snapshot_main_help(struct seq_file *m) |
2869 | { | 2868 | { |
2870 | seq_printf(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"); | 2869 | seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n" |
2871 | seq_printf(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"); | 2870 | "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n" |
2872 | seq_printf(m, "# Takes a snapshot of the main buffer.\n"); | 2871 | "# Takes a snapshot of the main buffer.\n" |
2873 | seq_printf(m, "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"); | 2872 | "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n" |
2874 | seq_printf(m, "# (Doesn't have to be '2' works with any number that\n"); | 2873 | "# (Doesn't have to be '2' works with any number that\n" |
2875 | seq_printf(m, "# is not a '0' or '1')\n"); | 2874 | "# is not a '0' or '1')\n"); |
2876 | } | 2875 | } |
2877 | 2876 | ||
2878 | static void show_snapshot_percpu_help(struct seq_file *m) | 2877 | static void show_snapshot_percpu_help(struct seq_file *m) |
2879 | { | 2878 | { |
2880 | seq_printf(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n"); | 2879 | seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n"); |
2881 | #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP | 2880 | #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP |
2882 | seq_printf(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"); | 2881 | seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n" |
2883 | seq_printf(m, "# Takes a snapshot of the main buffer for this cpu.\n"); | 2882 | "# Takes a snapshot of the main buffer for this cpu.\n"); |
2884 | #else | 2883 | #else |
2885 | seq_printf(m, "# echo 1 > snapshot : Not supported with this kernel.\n"); | 2884 | seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n" |
2886 | seq_printf(m, "# Must use main snapshot file to allocate.\n"); | 2885 | "# Must use main snapshot file to allocate.\n"); |
2887 | #endif | 2886 | #endif |
2888 | seq_printf(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"); | 2887 | seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n" |
2889 | seq_printf(m, "# (Doesn't have to be '2' works with any number that\n"); | 2888 | "# (Doesn't have to be '2' works with any number that\n" |
2890 | seq_printf(m, "# is not a '0' or '1')\n"); | 2889 | "# is not a '0' or '1')\n"); |
2891 | } | 2890 | } |
2892 | 2891 | ||
2893 | static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) | 2892 | static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) |
2894 | { | 2893 | { |
2895 | if (iter->tr->allocated_snapshot) | 2894 | if (iter->tr->allocated_snapshot) |
2896 | seq_printf(m, "#\n# * Snapshot is allocated *\n#\n"); | 2895 | seq_puts(m, "#\n# * Snapshot is allocated *\n#\n"); |
2897 | else | 2896 | else |
2898 | seq_printf(m, "#\n# * Snapshot is freed *\n#\n"); | 2897 | seq_puts(m, "#\n# * Snapshot is freed *\n#\n"); |
2899 | 2898 | ||
2900 | seq_printf(m, "# Snapshot commands:\n"); | 2899 | seq_puts(m, "# Snapshot commands:\n"); |
2901 | if (iter->cpu_file == RING_BUFFER_ALL_CPUS) | 2900 | if (iter->cpu_file == RING_BUFFER_ALL_CPUS) |
2902 | show_snapshot_main_help(m); | 2901 | show_snapshot_main_help(m); |
2903 | else | 2902 | else |
@@ -3251,7 +3250,7 @@ static int t_show(struct seq_file *m, void *v) | |||
3251 | if (!t) | 3250 | if (!t) |
3252 | return 0; | 3251 | return 0; |
3253 | 3252 | ||
3254 | seq_printf(m, "%s", t->name); | 3253 | seq_puts(m, t->name); |
3255 | if (t->next) | 3254 | if (t->next) |
3256 | seq_putc(m, ' '); | 3255 | seq_putc(m, ' '); |
3257 | else | 3256 | else |
@@ -5749,10 +5748,10 @@ ftrace_snapshot_print(struct seq_file *m, unsigned long ip, | |||
5749 | 5748 | ||
5750 | seq_printf(m, "%ps:", (void *)ip); | 5749 | seq_printf(m, "%ps:", (void *)ip); |
5751 | 5750 | ||
5752 | seq_printf(m, "snapshot"); | 5751 | seq_puts(m, "snapshot"); |
5753 | 5752 | ||
5754 | if (count == -1) | 5753 | if (count == -1) |
5755 | seq_printf(m, ":unlimited\n"); | 5754 | seq_puts(m, ":unlimited\n"); |
5756 | else | 5755 | else |
5757 | seq_printf(m, ":count=%ld\n", count); | 5756 | seq_printf(m, ":count=%ld\n", count); |
5758 | 5757 | ||
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 385391fb1d3b..3255dfb054a0 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <linux/trace_seq.h> | 14 | #include <linux/trace_seq.h> |
15 | #include <linux/ftrace_event.h> | 15 | #include <linux/ftrace_event.h> |
16 | #include <linux/compiler.h> | 16 | #include <linux/compiler.h> |
17 | #include <linux/trace_seq.h> | ||
17 | 18 | ||
18 | #ifdef CONFIG_FTRACE_SYSCALLS | 19 | #ifdef CONFIG_FTRACE_SYSCALLS |
19 | #include <asm/unistd.h> /* For NR_SYSCALLS */ | 20 | #include <asm/unistd.h> /* For NR_SYSCALLS */ |
@@ -569,15 +570,6 @@ void trace_init_global_iter(struct trace_iterator *iter); | |||
569 | 570 | ||
570 | void tracing_iter_reset(struct trace_iterator *iter, int cpu); | 571 | void tracing_iter_reset(struct trace_iterator *iter, int cpu); |
571 | 572 | ||
572 | void tracing_sched_switch_trace(struct trace_array *tr, | ||
573 | struct task_struct *prev, | ||
574 | struct task_struct *next, | ||
575 | unsigned long flags, int pc); | ||
576 | |||
577 | void tracing_sched_wakeup_trace(struct trace_array *tr, | ||
578 | struct task_struct *wakee, | ||
579 | struct task_struct *cur, | ||
580 | unsigned long flags, int pc); | ||
581 | void trace_function(struct trace_array *tr, | 573 | void trace_function(struct trace_array *tr, |
582 | unsigned long ip, | 574 | unsigned long ip, |
583 | unsigned long parent_ip, | 575 | unsigned long parent_ip, |
@@ -597,9 +589,6 @@ void set_graph_array(struct trace_array *tr); | |||
597 | 589 | ||
598 | void tracing_start_cmdline_record(void); | 590 | void tracing_start_cmdline_record(void); |
599 | void tracing_stop_cmdline_record(void); | 591 | void tracing_stop_cmdline_record(void); |
600 | void tracing_sched_switch_assign_trace(struct trace_array *tr); | ||
601 | void tracing_stop_sched_switch_record(void); | ||
602 | void tracing_start_sched_switch_record(void); | ||
603 | int register_tracer(struct tracer *type); | 592 | int register_tracer(struct tracer *type); |
604 | int is_tracing_stopped(void); | 593 | int is_tracing_stopped(void); |
605 | 594 | ||
@@ -719,6 +708,8 @@ enum print_line_t print_trace_line(struct trace_iterator *iter); | |||
719 | 708 | ||
720 | extern unsigned long trace_flags; | 709 | extern unsigned long trace_flags; |
721 | 710 | ||
711 | extern char trace_find_mark(unsigned long long duration); | ||
712 | |||
722 | /* Standard output formatting function used for function return traces */ | 713 | /* Standard output formatting function used for function return traces */ |
723 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 714 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
724 | 715 | ||
@@ -737,7 +728,7 @@ extern unsigned long trace_flags; | |||
737 | extern enum print_line_t | 728 | extern enum print_line_t |
738 | print_graph_function_flags(struct trace_iterator *iter, u32 flags); | 729 | print_graph_function_flags(struct trace_iterator *iter, u32 flags); |
739 | extern void print_graph_headers_flags(struct seq_file *s, u32 flags); | 730 | extern void print_graph_headers_flags(struct seq_file *s, u32 flags); |
740 | extern enum print_line_t | 731 | extern void |
741 | trace_print_graph_duration(unsigned long long duration, struct trace_seq *s); | 732 | trace_print_graph_duration(unsigned long long duration, struct trace_seq *s); |
742 | extern void graph_trace_open(struct trace_iterator *iter); | 733 | extern void graph_trace_open(struct trace_iterator *iter); |
743 | extern void graph_trace_close(struct trace_iterator *iter); | 734 | extern void graph_trace_close(struct trace_iterator *iter); |
diff --git a/kernel/trace/trace_branch.c b/kernel/trace/trace_branch.c index 697fb9bac8f0..7d6e2afde669 100644 --- a/kernel/trace/trace_branch.c +++ b/kernel/trace/trace_branch.c | |||
@@ -151,22 +151,21 @@ static enum print_line_t trace_branch_print(struct trace_iterator *iter, | |||
151 | 151 | ||
152 | trace_assign_type(field, iter->ent); | 152 | trace_assign_type(field, iter->ent); |
153 | 153 | ||
154 | if (trace_seq_printf(&iter->seq, "[%s] %s:%s:%d\n", | 154 | trace_seq_printf(&iter->seq, "[%s] %s:%s:%d\n", |
155 | field->correct ? " ok " : " MISS ", | 155 | field->correct ? " ok " : " MISS ", |
156 | field->func, | 156 | field->func, |
157 | field->file, | 157 | field->file, |
158 | field->line)) | 158 | field->line); |
159 | return TRACE_TYPE_PARTIAL_LINE; | 159 | |
160 | 160 | return trace_handle_return(&iter->seq); | |
161 | return TRACE_TYPE_HANDLED; | ||
162 | } | 161 | } |
163 | 162 | ||
164 | static void branch_print_header(struct seq_file *s) | 163 | static void branch_print_header(struct seq_file *s) |
165 | { | 164 | { |
166 | seq_puts(s, "# TASK-PID CPU# TIMESTAMP CORRECT" | 165 | seq_puts(s, "# TASK-PID CPU# TIMESTAMP CORRECT" |
167 | " FUNC:FILE:LINE\n"); | 166 | " FUNC:FILE:LINE\n" |
168 | seq_puts(s, "# | | | | | " | 167 | "# | | | | | " |
169 | " |\n"); | 168 | " |\n"); |
170 | } | 169 | } |
171 | 170 | ||
172 | static struct trace_event_functions trace_branch_funcs = { | 171 | static struct trace_event_functions trace_branch_funcs = { |
@@ -233,12 +232,12 @@ extern unsigned long __stop_annotated_branch_profile[]; | |||
233 | 232 | ||
234 | static int annotated_branch_stat_headers(struct seq_file *m) | 233 | static int annotated_branch_stat_headers(struct seq_file *m) |
235 | { | 234 | { |
236 | seq_printf(m, " correct incorrect %% "); | 235 | seq_puts(m, " correct incorrect % " |
237 | seq_printf(m, " Function " | 236 | " Function " |
238 | " File Line\n" | 237 | " File Line\n" |
239 | " ------- --------- - " | 238 | " ------- --------- - " |
240 | " -------- " | 239 | " -------- " |
241 | " ---- ----\n"); | 240 | " ---- ----\n"); |
242 | return 0; | 241 | return 0; |
243 | } | 242 | } |
244 | 243 | ||
@@ -274,7 +273,7 @@ static int branch_stat_show(struct seq_file *m, void *v) | |||
274 | 273 | ||
275 | seq_printf(m, "%8lu %8lu ", p->correct, p->incorrect); | 274 | seq_printf(m, "%8lu %8lu ", p->correct, p->incorrect); |
276 | if (percent < 0) | 275 | if (percent < 0) |
277 | seq_printf(m, " X "); | 276 | seq_puts(m, " X "); |
278 | else | 277 | else |
279 | seq_printf(m, "%3ld ", percent); | 278 | seq_printf(m, "%3ld ", percent); |
280 | seq_printf(m, "%-30.30s %-20.20s %d\n", p->func, f, p->line); | 279 | seq_printf(m, "%-30.30s %-20.20s %d\n", p->func, f, p->line); |
@@ -362,12 +361,12 @@ extern unsigned long __stop_branch_profile[]; | |||
362 | 361 | ||
363 | static int all_branch_stat_headers(struct seq_file *m) | 362 | static int all_branch_stat_headers(struct seq_file *m) |
364 | { | 363 | { |
365 | seq_printf(m, " miss hit %% "); | 364 | seq_puts(m, " miss hit % " |
366 | seq_printf(m, " Function " | 365 | " Function " |
367 | " File Line\n" | 366 | " File Line\n" |
368 | " ------- --------- - " | 367 | " ------- --------- - " |
369 | " -------- " | 368 | " -------- " |
370 | " ---- ----\n"); | 369 | " ---- ----\n"); |
371 | return 0; | 370 | return 0; |
372 | } | 371 | } |
373 | 372 | ||
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 1b0df1e504f0..139716bcef7a 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c | |||
@@ -918,7 +918,7 @@ static int f_show(struct seq_file *m, void *v) | |||
918 | case FORMAT_HEADER: | 918 | case FORMAT_HEADER: |
919 | seq_printf(m, "name: %s\n", ftrace_event_name(call)); | 919 | seq_printf(m, "name: %s\n", ftrace_event_name(call)); |
920 | seq_printf(m, "ID: %d\n", call->event.type); | 920 | seq_printf(m, "ID: %d\n", call->event.type); |
921 | seq_printf(m, "format:\n"); | 921 | seq_puts(m, "format:\n"); |
922 | return 0; | 922 | return 0; |
923 | 923 | ||
924 | case FORMAT_FIELD_SEPERATOR: | 924 | case FORMAT_FIELD_SEPERATOR: |
@@ -1988,7 +1988,7 @@ event_enable_print(struct seq_file *m, unsigned long ip, | |||
1988 | ftrace_event_name(data->file->event_call)); | 1988 | ftrace_event_name(data->file->event_call)); |
1989 | 1989 | ||
1990 | if (data->count == -1) | 1990 | if (data->count == -1) |
1991 | seq_printf(m, ":unlimited\n"); | 1991 | seq_puts(m, ":unlimited\n"); |
1992 | else | 1992 | else |
1993 | seq_printf(m, ":count=%ld\n", data->count); | 1993 | seq_printf(m, ":count=%ld\n", data->count); |
1994 | 1994 | ||
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c index 7a8c1528e141..ced69da0ff55 100644 --- a/kernel/trace/trace_events_filter.c +++ b/kernel/trace/trace_events_filter.c | |||
@@ -45,6 +45,7 @@ enum filter_op_ids | |||
45 | OP_GT, | 45 | OP_GT, |
46 | OP_GE, | 46 | OP_GE, |
47 | OP_BAND, | 47 | OP_BAND, |
48 | OP_NOT, | ||
48 | OP_NONE, | 49 | OP_NONE, |
49 | OP_OPEN_PAREN, | 50 | OP_OPEN_PAREN, |
50 | }; | 51 | }; |
@@ -67,6 +68,7 @@ static struct filter_op filter_ops[] = { | |||
67 | { OP_GT, ">", 5 }, | 68 | { OP_GT, ">", 5 }, |
68 | { OP_GE, ">=", 5 }, | 69 | { OP_GE, ">=", 5 }, |
69 | { OP_BAND, "&", 6 }, | 70 | { OP_BAND, "&", 6 }, |
71 | { OP_NOT, "!", 6 }, | ||
70 | { OP_NONE, "OP_NONE", 0 }, | 72 | { OP_NONE, "OP_NONE", 0 }, |
71 | { OP_OPEN_PAREN, "(", 0 }, | 73 | { OP_OPEN_PAREN, "(", 0 }, |
72 | }; | 74 | }; |
@@ -85,6 +87,7 @@ enum { | |||
85 | FILT_ERR_MISSING_FIELD, | 87 | FILT_ERR_MISSING_FIELD, |
86 | FILT_ERR_INVALID_FILTER, | 88 | FILT_ERR_INVALID_FILTER, |
87 | FILT_ERR_IP_FIELD_ONLY, | 89 | FILT_ERR_IP_FIELD_ONLY, |
90 | FILT_ERR_ILLEGAL_NOT_OP, | ||
88 | }; | 91 | }; |
89 | 92 | ||
90 | static char *err_text[] = { | 93 | static char *err_text[] = { |
@@ -101,6 +104,7 @@ static char *err_text[] = { | |||
101 | "Missing field name and/or value", | 104 | "Missing field name and/or value", |
102 | "Meaningless filter expression", | 105 | "Meaningless filter expression", |
103 | "Only 'ip' field is supported for function trace", | 106 | "Only 'ip' field is supported for function trace", |
107 | "Illegal use of '!'", | ||
104 | }; | 108 | }; |
105 | 109 | ||
106 | struct opstack_op { | 110 | struct opstack_op { |
@@ -139,6 +143,7 @@ struct pred_stack { | |||
139 | int index; | 143 | int index; |
140 | }; | 144 | }; |
141 | 145 | ||
146 | /* If not of not match is equal to not of not, then it is a match */ | ||
142 | #define DEFINE_COMPARISON_PRED(type) \ | 147 | #define DEFINE_COMPARISON_PRED(type) \ |
143 | static int filter_pred_##type(struct filter_pred *pred, void *event) \ | 148 | static int filter_pred_##type(struct filter_pred *pred, void *event) \ |
144 | { \ | 149 | { \ |
@@ -166,7 +171,7 @@ static int filter_pred_##type(struct filter_pred *pred, void *event) \ | |||
166 | break; \ | 171 | break; \ |
167 | } \ | 172 | } \ |
168 | \ | 173 | \ |
169 | return match; \ | 174 | return !!match == !pred->not; \ |
170 | } | 175 | } |
171 | 176 | ||
172 | #define DEFINE_EQUALITY_PRED(size) \ | 177 | #define DEFINE_EQUALITY_PRED(size) \ |
@@ -484,9 +489,10 @@ static int process_ops(struct filter_pred *preds, | |||
484 | if (!WARN_ON_ONCE(!pred->fn)) | 489 | if (!WARN_ON_ONCE(!pred->fn)) |
485 | match = pred->fn(pred, rec); | 490 | match = pred->fn(pred, rec); |
486 | if (!!match == type) | 491 | if (!!match == type) |
487 | return match; | 492 | break; |
488 | } | 493 | } |
489 | return match; | 494 | /* If not of not match is equal to not of not, then it is a match */ |
495 | return !!match == !op->not; | ||
490 | } | 496 | } |
491 | 497 | ||
492 | struct filter_match_preds_data { | 498 | struct filter_match_preds_data { |
@@ -735,10 +741,10 @@ static int filter_set_pred(struct event_filter *filter, | |||
735 | * then this op can be folded. | 741 | * then this op can be folded. |
736 | */ | 742 | */ |
737 | if (left->index & FILTER_PRED_FOLD && | 743 | if (left->index & FILTER_PRED_FOLD && |
738 | (left->op == dest->op || | 744 | ((left->op == dest->op && !left->not) || |
739 | left->left == FILTER_PRED_INVALID) && | 745 | left->left == FILTER_PRED_INVALID) && |
740 | right->index & FILTER_PRED_FOLD && | 746 | right->index & FILTER_PRED_FOLD && |
741 | (right->op == dest->op || | 747 | ((right->op == dest->op && !right->not) || |
742 | right->left == FILTER_PRED_INVALID)) | 748 | right->left == FILTER_PRED_INVALID)) |
743 | dest->index |= FILTER_PRED_FOLD; | 749 | dest->index |= FILTER_PRED_FOLD; |
744 | 750 | ||
@@ -1028,7 +1034,7 @@ static int init_pred(struct filter_parse_state *ps, | |||
1028 | } | 1034 | } |
1029 | 1035 | ||
1030 | if (pred->op == OP_NE) | 1036 | if (pred->op == OP_NE) |
1031 | pred->not = 1; | 1037 | pred->not ^= 1; |
1032 | 1038 | ||
1033 | pred->fn = fn; | 1039 | pred->fn = fn; |
1034 | return 0; | 1040 | return 0; |
@@ -1590,6 +1596,17 @@ static int replace_preds(struct ftrace_event_call *call, | |||
1590 | continue; | 1596 | continue; |
1591 | } | 1597 | } |
1592 | 1598 | ||
1599 | if (elt->op == OP_NOT) { | ||
1600 | if (!n_preds || operand1 || operand2) { | ||
1601 | parse_error(ps, FILT_ERR_ILLEGAL_NOT_OP, 0); | ||
1602 | err = -EINVAL; | ||
1603 | goto fail; | ||
1604 | } | ||
1605 | if (!dry_run) | ||
1606 | filter->preds[n_preds - 1].not ^= 1; | ||
1607 | continue; | ||
1608 | } | ||
1609 | |||
1593 | if (WARN_ON(n_preds++ == MAX_FILTER_PRED)) { | 1610 | if (WARN_ON(n_preds++ == MAX_FILTER_PRED)) { |
1594 | parse_error(ps, FILT_ERR_TOO_MANY_PREDS, 0); | 1611 | parse_error(ps, FILT_ERR_TOO_MANY_PREDS, 0); |
1595 | err = -ENOSPC; | 1612 | err = -ENOSPC; |
diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c index 4747b476a030..8712df9decb4 100644 --- a/kernel/trace/trace_events_trigger.c +++ b/kernel/trace/trace_events_trigger.c | |||
@@ -373,7 +373,7 @@ event_trigger_print(const char *name, struct seq_file *m, | |||
373 | { | 373 | { |
374 | long count = (long)data; | 374 | long count = (long)data; |
375 | 375 | ||
376 | seq_printf(m, "%s", name); | 376 | seq_puts(m, name); |
377 | 377 | ||
378 | if (count == -1) | 378 | if (count == -1) |
379 | seq_puts(m, ":unlimited"); | 379 | seq_puts(m, ":unlimited"); |
@@ -383,7 +383,7 @@ event_trigger_print(const char *name, struct seq_file *m, | |||
383 | if (filter_str) | 383 | if (filter_str) |
384 | seq_printf(m, " if %s\n", filter_str); | 384 | seq_printf(m, " if %s\n", filter_str); |
385 | else | 385 | else |
386 | seq_puts(m, "\n"); | 386 | seq_putc(m, '\n'); |
387 | 387 | ||
388 | return 0; | 388 | return 0; |
389 | } | 389 | } |
@@ -1105,7 +1105,7 @@ event_enable_trigger_print(struct seq_file *m, struct event_trigger_ops *ops, | |||
1105 | if (data->filter_str) | 1105 | if (data->filter_str) |
1106 | seq_printf(m, " if %s\n", data->filter_str); | 1106 | seq_printf(m, " if %s\n", data->filter_str); |
1107 | else | 1107 | else |
1108 | seq_puts(m, "\n"); | 1108 | seq_putc(m, '\n'); |
1109 | 1109 | ||
1110 | return 0; | 1110 | return 0; |
1111 | } | 1111 | } |
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c index 57f0ec962d2c..fcd41a166405 100644 --- a/kernel/trace/trace_functions.c +++ b/kernel/trace/trace_functions.c | |||
@@ -261,37 +261,74 @@ static struct tracer function_trace __tracer_data = | |||
261 | }; | 261 | }; |
262 | 262 | ||
263 | #ifdef CONFIG_DYNAMIC_FTRACE | 263 | #ifdef CONFIG_DYNAMIC_FTRACE |
264 | static int update_count(void **data) | 264 | static void update_traceon_count(void **data, bool on) |
265 | { | 265 | { |
266 | unsigned long *count = (long *)data; | 266 | long *count = (long *)data; |
267 | long old_count = *count; | ||
267 | 268 | ||
268 | if (!*count) | 269 | /* |
269 | return 0; | 270 | * Tracing gets disabled (or enabled) once per count. |
271 | * This function can be called at the same time on multiple CPUs. | ||
272 | * It is fine if both disable (or enable) tracing, as disabling | ||
273 | * (or enabling) the second time doesn't do anything as the | ||
274 | * state of the tracer is already disabled (or enabled). | ||
275 | * What needs to be synchronized in this case is that the count | ||
276 | * only gets decremented once, even if the tracer is disabled | ||
277 | * (or enabled) twice, as the second one is really a nop. | ||
278 | * | ||
279 | * The memory barriers guarantee that we only decrement the | ||
280 | * counter once. First the count is read to a local variable | ||
281 | * and a read barrier is used to make sure that it is loaded | ||
282 | * before checking if the tracer is in the state we want. | ||
283 | * If the tracer is not in the state we want, then the count | ||
284 | * is guaranteed to be the old count. | ||
285 | * | ||
286 | * Next the tracer is set to the state we want (disabled or enabled) | ||
287 | * then a write memory barrier is used to make sure that | ||
288 | * the new state is visible before changing the counter by | ||
289 | * one minus the old counter. This guarantees that another CPU | ||
290 | * executing this code will see the new state before seeing | ||
291 | * the new counter value, and would not do anything if the new | ||
292 | * counter is seen. | ||
293 | * | ||
294 | * Note, there is no synchronization between this and a user | ||
295 | * setting the tracing_on file. But we currently don't care | ||
296 | * about that. | ||
297 | */ | ||
298 | if (!old_count) | ||
299 | return; | ||
270 | 300 | ||
271 | if (*count != -1) | 301 | /* Make sure we see count before checking tracing state */ |
272 | (*count)--; | 302 | smp_rmb(); |
273 | 303 | ||
274 | return 1; | 304 | if (on == !!tracing_is_on()) |
305 | return; | ||
306 | |||
307 | if (on) | ||
308 | tracing_on(); | ||
309 | else | ||
310 | tracing_off(); | ||
311 | |||
312 | /* unlimited? */ | ||
313 | if (old_count == -1) | ||
314 | return; | ||
315 | |||
316 | /* Make sure tracing state is visible before updating count */ | ||
317 | smp_wmb(); | ||
318 | |||
319 | *count = old_count - 1; | ||
275 | } | 320 | } |
276 | 321 | ||
277 | static void | 322 | static void |
278 | ftrace_traceon_count(unsigned long ip, unsigned long parent_ip, void **data) | 323 | ftrace_traceon_count(unsigned long ip, unsigned long parent_ip, void **data) |
279 | { | 324 | { |
280 | if (tracing_is_on()) | 325 | update_traceon_count(data, 1); |
281 | return; | ||
282 | |||
283 | if (update_count(data)) | ||
284 | tracing_on(); | ||
285 | } | 326 | } |
286 | 327 | ||
287 | static void | 328 | static void |
288 | ftrace_traceoff_count(unsigned long ip, unsigned long parent_ip, void **data) | 329 | ftrace_traceoff_count(unsigned long ip, unsigned long parent_ip, void **data) |
289 | { | 330 | { |
290 | if (!tracing_is_on()) | 331 | update_traceon_count(data, 0); |
291 | return; | ||
292 | |||
293 | if (update_count(data)) | ||
294 | tracing_off(); | ||
295 | } | 332 | } |
296 | 333 | ||
297 | static void | 334 | static void |
@@ -330,11 +367,49 @@ ftrace_stacktrace(unsigned long ip, unsigned long parent_ip, void **data) | |||
330 | static void | 367 | static void |
331 | ftrace_stacktrace_count(unsigned long ip, unsigned long parent_ip, void **data) | 368 | ftrace_stacktrace_count(unsigned long ip, unsigned long parent_ip, void **data) |
332 | { | 369 | { |
333 | if (!tracing_is_on()) | 370 | long *count = (long *)data; |
334 | return; | 371 | long old_count; |
372 | long new_count; | ||
335 | 373 | ||
336 | if (update_count(data)) | 374 | /* |
337 | trace_dump_stack(STACK_SKIP); | 375 | * Stack traces should only execute the number of times the |
376 | * user specified in the counter. | ||
377 | */ | ||
378 | do { | ||
379 | |||
380 | if (!tracing_is_on()) | ||
381 | return; | ||
382 | |||
383 | old_count = *count; | ||
384 | |||
385 | if (!old_count) | ||
386 | return; | ||
387 | |||
388 | /* unlimited? */ | ||
389 | if (old_count == -1) { | ||
390 | trace_dump_stack(STACK_SKIP); | ||
391 | return; | ||
392 | } | ||
393 | |||
394 | new_count = old_count - 1; | ||
395 | new_count = cmpxchg(count, old_count, new_count); | ||
396 | if (new_count == old_count) | ||
397 | trace_dump_stack(STACK_SKIP); | ||
398 | |||
399 | } while (new_count != old_count); | ||
400 | } | ||
401 | |||
402 | static int update_count(void **data) | ||
403 | { | ||
404 | unsigned long *count = (long *)data; | ||
405 | |||
406 | if (!*count) | ||
407 | return 0; | ||
408 | |||
409 | if (*count != -1) | ||
410 | (*count)--; | ||
411 | |||
412 | return 1; | ||
338 | } | 413 | } |
339 | 414 | ||
340 | static void | 415 | static void |
@@ -361,7 +436,7 @@ ftrace_probe_print(const char *name, struct seq_file *m, | |||
361 | seq_printf(m, "%ps:%s", (void *)ip, name); | 436 | seq_printf(m, "%ps:%s", (void *)ip, name); |
362 | 437 | ||
363 | if (count == -1) | 438 | if (count == -1) |
364 | seq_printf(m, ":unlimited\n"); | 439 | seq_puts(m, ":unlimited\n"); |
365 | else | 440 | else |
366 | seq_printf(m, ":count=%ld\n", count); | 441 | seq_printf(m, ":count=%ld\n", count); |
367 | 442 | ||
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index f0a0c982cde3..6c2ab955018c 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c | |||
@@ -107,7 +107,7 @@ enum { | |||
107 | FLAGS_FILL_END = 3 << TRACE_GRAPH_PRINT_FILL_SHIFT, | 107 | FLAGS_FILL_END = 3 << TRACE_GRAPH_PRINT_FILL_SHIFT, |
108 | }; | 108 | }; |
109 | 109 | ||
110 | static enum print_line_t | 110 | static void |
111 | print_graph_duration(unsigned long long duration, struct trace_seq *s, | 111 | print_graph_duration(unsigned long long duration, struct trace_seq *s, |
112 | u32 flags); | 112 | u32 flags); |
113 | 113 | ||
@@ -483,33 +483,24 @@ static int graph_trace_update_thresh(struct trace_array *tr) | |||
483 | 483 | ||
484 | static int max_bytes_for_cpu; | 484 | static int max_bytes_for_cpu; |
485 | 485 | ||
486 | static enum print_line_t | 486 | static void print_graph_cpu(struct trace_seq *s, int cpu) |
487 | print_graph_cpu(struct trace_seq *s, int cpu) | ||
488 | { | 487 | { |
489 | int ret; | ||
490 | |||
491 | /* | 488 | /* |
492 | * Start with a space character - to make it stand out | 489 | * Start with a space character - to make it stand out |
493 | * to the right a bit when trace output is pasted into | 490 | * to the right a bit when trace output is pasted into |
494 | * email: | 491 | * email: |
495 | */ | 492 | */ |
496 | ret = trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu); | 493 | trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu); |
497 | if (!ret) | ||
498 | return TRACE_TYPE_PARTIAL_LINE; | ||
499 | |||
500 | return TRACE_TYPE_HANDLED; | ||
501 | } | 494 | } |
502 | 495 | ||
503 | #define TRACE_GRAPH_PROCINFO_LENGTH 14 | 496 | #define TRACE_GRAPH_PROCINFO_LENGTH 14 |
504 | 497 | ||
505 | static enum print_line_t | 498 | static void print_graph_proc(struct trace_seq *s, pid_t pid) |
506 | print_graph_proc(struct trace_seq *s, pid_t pid) | ||
507 | { | 499 | { |
508 | char comm[TASK_COMM_LEN]; | 500 | char comm[TASK_COMM_LEN]; |
509 | /* sign + log10(MAX_INT) + '\0' */ | 501 | /* sign + log10(MAX_INT) + '\0' */ |
510 | char pid_str[11]; | 502 | char pid_str[11]; |
511 | int spaces = 0; | 503 | int spaces = 0; |
512 | int ret; | ||
513 | int len; | 504 | int len; |
514 | int i; | 505 | int i; |
515 | 506 | ||
@@ -524,56 +515,43 @@ print_graph_proc(struct trace_seq *s, pid_t pid) | |||
524 | spaces = TRACE_GRAPH_PROCINFO_LENGTH - len; | 515 | spaces = TRACE_GRAPH_PROCINFO_LENGTH - len; |
525 | 516 | ||
526 | /* First spaces to align center */ | 517 | /* First spaces to align center */ |
527 | for (i = 0; i < spaces / 2; i++) { | 518 | for (i = 0; i < spaces / 2; i++) |
528 | ret = trace_seq_putc(s, ' '); | 519 | trace_seq_putc(s, ' '); |
529 | if (!ret) | ||
530 | return TRACE_TYPE_PARTIAL_LINE; | ||
531 | } | ||
532 | 520 | ||
533 | ret = trace_seq_printf(s, "%s-%s", comm, pid_str); | 521 | trace_seq_printf(s, "%s-%s", comm, pid_str); |
534 | if (!ret) | ||
535 | return TRACE_TYPE_PARTIAL_LINE; | ||
536 | 522 | ||
537 | /* Last spaces to align center */ | 523 | /* Last spaces to align center */ |
538 | for (i = 0; i < spaces - (spaces / 2); i++) { | 524 | for (i = 0; i < spaces - (spaces / 2); i++) |
539 | ret = trace_seq_putc(s, ' '); | 525 | trace_seq_putc(s, ' '); |
540 | if (!ret) | ||
541 | return TRACE_TYPE_PARTIAL_LINE; | ||
542 | } | ||
543 | return TRACE_TYPE_HANDLED; | ||
544 | } | 526 | } |
545 | 527 | ||
546 | 528 | ||
547 | static enum print_line_t | 529 | static void print_graph_lat_fmt(struct trace_seq *s, struct trace_entry *entry) |
548 | print_graph_lat_fmt(struct trace_seq *s, struct trace_entry *entry) | ||
549 | { | 530 | { |
550 | if (!trace_seq_putc(s, ' ')) | 531 | trace_seq_putc(s, ' '); |
551 | return 0; | 532 | trace_print_lat_fmt(s, entry); |
552 | |||
553 | return trace_print_lat_fmt(s, entry); | ||
554 | } | 533 | } |
555 | 534 | ||
556 | /* If the pid changed since the last trace, output this event */ | 535 | /* If the pid changed since the last trace, output this event */ |
557 | static enum print_line_t | 536 | static void |
558 | verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data) | 537 | verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data) |
559 | { | 538 | { |
560 | pid_t prev_pid; | 539 | pid_t prev_pid; |
561 | pid_t *last_pid; | 540 | pid_t *last_pid; |
562 | int ret; | ||
563 | 541 | ||
564 | if (!data) | 542 | if (!data) |
565 | return TRACE_TYPE_HANDLED; | 543 | return; |
566 | 544 | ||
567 | last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid); | 545 | last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid); |
568 | 546 | ||
569 | if (*last_pid == pid) | 547 | if (*last_pid == pid) |
570 | return TRACE_TYPE_HANDLED; | 548 | return; |
571 | 549 | ||
572 | prev_pid = *last_pid; | 550 | prev_pid = *last_pid; |
573 | *last_pid = pid; | 551 | *last_pid = pid; |
574 | 552 | ||
575 | if (prev_pid == -1) | 553 | if (prev_pid == -1) |
576 | return TRACE_TYPE_HANDLED; | 554 | return; |
577 | /* | 555 | /* |
578 | * Context-switch trace line: | 556 | * Context-switch trace line: |
579 | 557 | ||
@@ -582,33 +560,12 @@ verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data) | |||
582 | ------------------------------------------ | 560 | ------------------------------------------ |
583 | 561 | ||
584 | */ | 562 | */ |
585 | ret = trace_seq_puts(s, | 563 | trace_seq_puts(s, " ------------------------------------------\n"); |
586 | " ------------------------------------------\n"); | 564 | print_graph_cpu(s, cpu); |
587 | if (!ret) | 565 | print_graph_proc(s, prev_pid); |
588 | return TRACE_TYPE_PARTIAL_LINE; | 566 | trace_seq_puts(s, " => "); |
589 | 567 | print_graph_proc(s, pid); | |
590 | ret = print_graph_cpu(s, cpu); | 568 | trace_seq_puts(s, "\n ------------------------------------------\n\n"); |
591 | if (ret == TRACE_TYPE_PARTIAL_LINE) | ||
592 | return TRACE_TYPE_PARTIAL_LINE; | ||
593 | |||
594 | ret = print_graph_proc(s, prev_pid); | ||
595 | if (ret == TRACE_TYPE_PARTIAL_LINE) | ||
596 | return TRACE_TYPE_PARTIAL_LINE; | ||
597 | |||
598 | ret = trace_seq_puts(s, " => "); | ||
599 | if (!ret) | ||
600 | return TRACE_TYPE_PARTIAL_LINE; | ||
601 | |||
602 | ret = print_graph_proc(s, pid); | ||
603 | if (ret == TRACE_TYPE_PARTIAL_LINE) | ||
604 | return TRACE_TYPE_PARTIAL_LINE; | ||
605 | |||
606 | ret = trace_seq_puts(s, | ||
607 | "\n ------------------------------------------\n\n"); | ||
608 | if (!ret) | ||
609 | return TRACE_TYPE_PARTIAL_LINE; | ||
610 | |||
611 | return TRACE_TYPE_HANDLED; | ||
612 | } | 569 | } |
613 | 570 | ||
614 | static struct ftrace_graph_ret_entry * | 571 | static struct ftrace_graph_ret_entry * |
@@ -682,175 +639,122 @@ get_return_for_leaf(struct trace_iterator *iter, | |||
682 | return next; | 639 | return next; |
683 | } | 640 | } |
684 | 641 | ||
685 | static int print_graph_abs_time(u64 t, struct trace_seq *s) | 642 | static void print_graph_abs_time(u64 t, struct trace_seq *s) |
686 | { | 643 | { |
687 | unsigned long usecs_rem; | 644 | unsigned long usecs_rem; |
688 | 645 | ||
689 | usecs_rem = do_div(t, NSEC_PER_SEC); | 646 | usecs_rem = do_div(t, NSEC_PER_SEC); |
690 | usecs_rem /= 1000; | 647 | usecs_rem /= 1000; |
691 | 648 | ||
692 | return trace_seq_printf(s, "%5lu.%06lu | ", | 649 | trace_seq_printf(s, "%5lu.%06lu | ", |
693 | (unsigned long)t, usecs_rem); | 650 | (unsigned long)t, usecs_rem); |
694 | } | 651 | } |
695 | 652 | ||
696 | static enum print_line_t | 653 | static void |
697 | print_graph_irq(struct trace_iterator *iter, unsigned long addr, | 654 | print_graph_irq(struct trace_iterator *iter, unsigned long addr, |
698 | enum trace_type type, int cpu, pid_t pid, u32 flags) | 655 | enum trace_type type, int cpu, pid_t pid, u32 flags) |
699 | { | 656 | { |
700 | int ret; | ||
701 | struct trace_seq *s = &iter->seq; | 657 | struct trace_seq *s = &iter->seq; |
658 | struct trace_entry *ent = iter->ent; | ||
702 | 659 | ||
703 | if (addr < (unsigned long)__irqentry_text_start || | 660 | if (addr < (unsigned long)__irqentry_text_start || |
704 | addr >= (unsigned long)__irqentry_text_end) | 661 | addr >= (unsigned long)__irqentry_text_end) |
705 | return TRACE_TYPE_UNHANDLED; | 662 | return; |
706 | 663 | ||
707 | if (trace_flags & TRACE_ITER_CONTEXT_INFO) { | 664 | if (trace_flags & TRACE_ITER_CONTEXT_INFO) { |
708 | /* Absolute time */ | 665 | /* Absolute time */ |
709 | if (flags & TRACE_GRAPH_PRINT_ABS_TIME) { | 666 | if (flags & TRACE_GRAPH_PRINT_ABS_TIME) |
710 | ret = print_graph_abs_time(iter->ts, s); | 667 | print_graph_abs_time(iter->ts, s); |
711 | if (!ret) | ||
712 | return TRACE_TYPE_PARTIAL_LINE; | ||
713 | } | ||
714 | 668 | ||
715 | /* Cpu */ | 669 | /* Cpu */ |
716 | if (flags & TRACE_GRAPH_PRINT_CPU) { | 670 | if (flags & TRACE_GRAPH_PRINT_CPU) |
717 | ret = print_graph_cpu(s, cpu); | 671 | print_graph_cpu(s, cpu); |
718 | if (ret == TRACE_TYPE_PARTIAL_LINE) | ||
719 | return TRACE_TYPE_PARTIAL_LINE; | ||
720 | } | ||
721 | 672 | ||
722 | /* Proc */ | 673 | /* Proc */ |
723 | if (flags & TRACE_GRAPH_PRINT_PROC) { | 674 | if (flags & TRACE_GRAPH_PRINT_PROC) { |
724 | ret = print_graph_proc(s, pid); | 675 | print_graph_proc(s, pid); |
725 | if (ret == TRACE_TYPE_PARTIAL_LINE) | 676 | trace_seq_puts(s, " | "); |
726 | return TRACE_TYPE_PARTIAL_LINE; | ||
727 | ret = trace_seq_puts(s, " | "); | ||
728 | if (!ret) | ||
729 | return TRACE_TYPE_PARTIAL_LINE; | ||
730 | } | 677 | } |
678 | |||
679 | /* Latency format */ | ||
680 | if (trace_flags & TRACE_ITER_LATENCY_FMT) | ||
681 | print_graph_lat_fmt(s, ent); | ||
731 | } | 682 | } |
732 | 683 | ||
733 | /* No overhead */ | 684 | /* No overhead */ |
734 | ret = print_graph_duration(0, s, flags | FLAGS_FILL_START); | 685 | print_graph_duration(0, s, flags | FLAGS_FILL_START); |
735 | if (ret != TRACE_TYPE_HANDLED) | ||
736 | return ret; | ||
737 | 686 | ||
738 | if (type == TRACE_GRAPH_ENT) | 687 | if (type == TRACE_GRAPH_ENT) |
739 | ret = trace_seq_puts(s, "==========>"); | 688 | trace_seq_puts(s, "==========>"); |
740 | else | 689 | else |
741 | ret = trace_seq_puts(s, "<=========="); | 690 | trace_seq_puts(s, "<=========="); |
742 | |||
743 | if (!ret) | ||
744 | return TRACE_TYPE_PARTIAL_LINE; | ||
745 | 691 | ||
746 | ret = print_graph_duration(0, s, flags | FLAGS_FILL_END); | 692 | print_graph_duration(0, s, flags | FLAGS_FILL_END); |
747 | if (ret != TRACE_TYPE_HANDLED) | 693 | trace_seq_putc(s, '\n'); |
748 | return ret; | ||
749 | |||
750 | ret = trace_seq_putc(s, '\n'); | ||
751 | |||
752 | if (!ret) | ||
753 | return TRACE_TYPE_PARTIAL_LINE; | ||
754 | return TRACE_TYPE_HANDLED; | ||
755 | } | 694 | } |
756 | 695 | ||
757 | enum print_line_t | 696 | void |
758 | trace_print_graph_duration(unsigned long long duration, struct trace_seq *s) | 697 | trace_print_graph_duration(unsigned long long duration, struct trace_seq *s) |
759 | { | 698 | { |
760 | unsigned long nsecs_rem = do_div(duration, 1000); | 699 | unsigned long nsecs_rem = do_div(duration, 1000); |
761 | /* log10(ULONG_MAX) + '\0' */ | 700 | /* log10(ULONG_MAX) + '\0' */ |
762 | char msecs_str[21]; | 701 | char usecs_str[21]; |
763 | char nsecs_str[5]; | 702 | char nsecs_str[5]; |
764 | int ret, len; | 703 | int len; |
765 | int i; | 704 | int i; |
766 | 705 | ||
767 | sprintf(msecs_str, "%lu", (unsigned long) duration); | 706 | sprintf(usecs_str, "%lu", (unsigned long) duration); |
768 | 707 | ||
769 | /* Print msecs */ | 708 | /* Print msecs */ |
770 | ret = trace_seq_printf(s, "%s", msecs_str); | 709 | trace_seq_printf(s, "%s", usecs_str); |
771 | if (!ret) | ||
772 | return TRACE_TYPE_PARTIAL_LINE; | ||
773 | 710 | ||
774 | len = strlen(msecs_str); | 711 | len = strlen(usecs_str); |
775 | 712 | ||
776 | /* Print nsecs (we don't want to exceed 7 numbers) */ | 713 | /* Print nsecs (we don't want to exceed 7 numbers) */ |
777 | if (len < 7) { | 714 | if (len < 7) { |
778 | size_t slen = min_t(size_t, sizeof(nsecs_str), 8UL - len); | 715 | size_t slen = min_t(size_t, sizeof(nsecs_str), 8UL - len); |
779 | 716 | ||
780 | snprintf(nsecs_str, slen, "%03lu", nsecs_rem); | 717 | snprintf(nsecs_str, slen, "%03lu", nsecs_rem); |
781 | ret = trace_seq_printf(s, ".%s", nsecs_str); | 718 | trace_seq_printf(s, ".%s", nsecs_str); |
782 | if (!ret) | ||
783 | return TRACE_TYPE_PARTIAL_LINE; | ||
784 | len += strlen(nsecs_str); | 719 | len += strlen(nsecs_str); |
785 | } | 720 | } |
786 | 721 | ||
787 | ret = trace_seq_puts(s, " us "); | 722 | trace_seq_puts(s, " us "); |
788 | if (!ret) | ||
789 | return TRACE_TYPE_PARTIAL_LINE; | ||
790 | 723 | ||
791 | /* Print remaining spaces to fit the row's width */ | 724 | /* Print remaining spaces to fit the row's width */ |
792 | for (i = len; i < 7; i++) { | 725 | for (i = len; i < 7; i++) |
793 | ret = trace_seq_putc(s, ' '); | 726 | trace_seq_putc(s, ' '); |
794 | if (!ret) | ||
795 | return TRACE_TYPE_PARTIAL_LINE; | ||
796 | } | ||
797 | return TRACE_TYPE_HANDLED; | ||
798 | } | 727 | } |
799 | 728 | ||
800 | static enum print_line_t | 729 | static void |
801 | print_graph_duration(unsigned long long duration, struct trace_seq *s, | 730 | print_graph_duration(unsigned long long duration, struct trace_seq *s, |
802 | u32 flags) | 731 | u32 flags) |
803 | { | 732 | { |
804 | int ret = -1; | ||
805 | |||
806 | if (!(flags & TRACE_GRAPH_PRINT_DURATION) || | 733 | if (!(flags & TRACE_GRAPH_PRINT_DURATION) || |
807 | !(trace_flags & TRACE_ITER_CONTEXT_INFO)) | 734 | !(trace_flags & TRACE_ITER_CONTEXT_INFO)) |
808 | return TRACE_TYPE_HANDLED; | 735 | return; |
809 | 736 | ||
810 | /* No real adata, just filling the column with spaces */ | 737 | /* No real adata, just filling the column with spaces */ |
811 | switch (flags & TRACE_GRAPH_PRINT_FILL_MASK) { | 738 | switch (flags & TRACE_GRAPH_PRINT_FILL_MASK) { |
812 | case FLAGS_FILL_FULL: | 739 | case FLAGS_FILL_FULL: |
813 | ret = trace_seq_puts(s, " | "); | 740 | trace_seq_puts(s, " | "); |
814 | return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE; | 741 | return; |
815 | case FLAGS_FILL_START: | 742 | case FLAGS_FILL_START: |
816 | ret = trace_seq_puts(s, " "); | 743 | trace_seq_puts(s, " "); |
817 | return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE; | 744 | return; |
818 | case FLAGS_FILL_END: | 745 | case FLAGS_FILL_END: |
819 | ret = trace_seq_puts(s, " |"); | 746 | trace_seq_puts(s, " |"); |
820 | return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE; | 747 | return; |
821 | } | 748 | } |
822 | 749 | ||
823 | /* Signal a overhead of time execution to the output */ | 750 | /* Signal a overhead of time execution to the output */ |
824 | if (flags & TRACE_GRAPH_PRINT_OVERHEAD) { | 751 | if (flags & TRACE_GRAPH_PRINT_OVERHEAD) |
825 | /* Duration exceeded 100 msecs */ | 752 | trace_seq_printf(s, "%c ", trace_find_mark(duration)); |
826 | if (duration > 100000ULL) | 753 | else |
827 | ret = trace_seq_puts(s, "! "); | 754 | trace_seq_puts(s, " "); |
828 | /* Duration exceeded 10 msecs */ | ||
829 | else if (duration > 10000ULL) | ||
830 | ret = trace_seq_puts(s, "+ "); | ||
831 | } | ||
832 | |||
833 | /* | ||
834 | * The -1 means we either did not exceed the duration tresholds | ||
835 | * or we dont want to print out the overhead. Either way we need | ||
836 | * to fill out the space. | ||
837 | */ | ||
838 | if (ret == -1) | ||
839 | ret = trace_seq_puts(s, " "); | ||
840 | |||
841 | /* Catching here any failure happenned above */ | ||
842 | if (!ret) | ||
843 | return TRACE_TYPE_PARTIAL_LINE; | ||
844 | |||
845 | ret = trace_print_graph_duration(duration, s); | ||
846 | if (ret != TRACE_TYPE_HANDLED) | ||
847 | return ret; | ||
848 | |||
849 | ret = trace_seq_puts(s, "| "); | ||
850 | if (!ret) | ||
851 | return TRACE_TYPE_PARTIAL_LINE; | ||
852 | 755 | ||
853 | return TRACE_TYPE_HANDLED; | 756 | trace_print_graph_duration(duration, s); |
757 | trace_seq_puts(s, "| "); | ||
854 | } | 758 | } |
855 | 759 | ||
856 | /* Case of a leaf function on its call entry */ | 760 | /* Case of a leaf function on its call entry */ |
@@ -864,7 +768,6 @@ print_graph_entry_leaf(struct trace_iterator *iter, | |||
864 | struct ftrace_graph_ret *graph_ret; | 768 | struct ftrace_graph_ret *graph_ret; |
865 | struct ftrace_graph_ent *call; | 769 | struct ftrace_graph_ent *call; |
866 | unsigned long long duration; | 770 | unsigned long long duration; |
867 | int ret; | ||
868 | int i; | 771 | int i; |
869 | 772 | ||
870 | graph_ret = &ret_entry->ret; | 773 | graph_ret = &ret_entry->ret; |
@@ -890,22 +793,15 @@ print_graph_entry_leaf(struct trace_iterator *iter, | |||
890 | } | 793 | } |
891 | 794 | ||
892 | /* Overhead and duration */ | 795 | /* Overhead and duration */ |
893 | ret = print_graph_duration(duration, s, flags); | 796 | print_graph_duration(duration, s, flags); |
894 | if (ret == TRACE_TYPE_PARTIAL_LINE) | ||
895 | return TRACE_TYPE_PARTIAL_LINE; | ||
896 | 797 | ||
897 | /* Function */ | 798 | /* Function */ |
898 | for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) { | 799 | for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) |
899 | ret = trace_seq_putc(s, ' '); | 800 | trace_seq_putc(s, ' '); |
900 | if (!ret) | ||
901 | return TRACE_TYPE_PARTIAL_LINE; | ||
902 | } | ||
903 | 801 | ||
904 | ret = trace_seq_printf(s, "%ps();\n", (void *)call->func); | 802 | trace_seq_printf(s, "%ps();\n", (void *)call->func); |
905 | if (!ret) | ||
906 | return TRACE_TYPE_PARTIAL_LINE; | ||
907 | 803 | ||
908 | return TRACE_TYPE_HANDLED; | 804 | return trace_handle_return(s); |
909 | } | 805 | } |
910 | 806 | ||
911 | static enum print_line_t | 807 | static enum print_line_t |
@@ -915,7 +811,6 @@ print_graph_entry_nested(struct trace_iterator *iter, | |||
915 | { | 811 | { |
916 | struct ftrace_graph_ent *call = &entry->graph_ent; | 812 | struct ftrace_graph_ent *call = &entry->graph_ent; |
917 | struct fgraph_data *data = iter->private; | 813 | struct fgraph_data *data = iter->private; |
918 | int ret; | ||
919 | int i; | 814 | int i; |
920 | 815 | ||
921 | if (data) { | 816 | if (data) { |
@@ -931,19 +826,15 @@ print_graph_entry_nested(struct trace_iterator *iter, | |||
931 | } | 826 | } |
932 | 827 | ||
933 | /* No time */ | 828 | /* No time */ |
934 | ret = print_graph_duration(0, s, flags | FLAGS_FILL_FULL); | 829 | print_graph_duration(0, s, flags | FLAGS_FILL_FULL); |
935 | if (ret != TRACE_TYPE_HANDLED) | ||
936 | return ret; | ||
937 | 830 | ||
938 | /* Function */ | 831 | /* Function */ |
939 | for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) { | 832 | for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) |
940 | ret = trace_seq_putc(s, ' '); | 833 | trace_seq_putc(s, ' '); |
941 | if (!ret) | ||
942 | return TRACE_TYPE_PARTIAL_LINE; | ||
943 | } | ||
944 | 834 | ||
945 | ret = trace_seq_printf(s, "%ps() {\n", (void *)call->func); | 835 | trace_seq_printf(s, "%ps() {\n", (void *)call->func); |
946 | if (!ret) | 836 | |
837 | if (trace_seq_has_overflowed(s)) | ||
947 | return TRACE_TYPE_PARTIAL_LINE; | 838 | return TRACE_TYPE_PARTIAL_LINE; |
948 | 839 | ||
949 | /* | 840 | /* |
@@ -953,62 +844,43 @@ print_graph_entry_nested(struct trace_iterator *iter, | |||
953 | return TRACE_TYPE_NO_CONSUME; | 844 | return TRACE_TYPE_NO_CONSUME; |
954 | } | 845 | } |
955 | 846 | ||
956 | static enum print_line_t | 847 | static void |
957 | print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s, | 848 | print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s, |
958 | int type, unsigned long addr, u32 flags) | 849 | int type, unsigned long addr, u32 flags) |
959 | { | 850 | { |
960 | struct fgraph_data *data = iter->private; | 851 | struct fgraph_data *data = iter->private; |
961 | struct trace_entry *ent = iter->ent; | 852 | struct trace_entry *ent = iter->ent; |
962 | int cpu = iter->cpu; | 853 | int cpu = iter->cpu; |
963 | int ret; | ||
964 | 854 | ||
965 | /* Pid */ | 855 | /* Pid */ |
966 | if (verif_pid(s, ent->pid, cpu, data) == TRACE_TYPE_PARTIAL_LINE) | 856 | verif_pid(s, ent->pid, cpu, data); |
967 | return TRACE_TYPE_PARTIAL_LINE; | ||
968 | 857 | ||
969 | if (type) { | 858 | if (type) |
970 | /* Interrupt */ | 859 | /* Interrupt */ |
971 | ret = print_graph_irq(iter, addr, type, cpu, ent->pid, flags); | 860 | print_graph_irq(iter, addr, type, cpu, ent->pid, flags); |
972 | if (ret == TRACE_TYPE_PARTIAL_LINE) | ||
973 | return TRACE_TYPE_PARTIAL_LINE; | ||
974 | } | ||
975 | 861 | ||
976 | if (!(trace_flags & TRACE_ITER_CONTEXT_INFO)) | 862 | if (!(trace_flags & TRACE_ITER_CONTEXT_INFO)) |
977 | return 0; | 863 | return; |
978 | 864 | ||
979 | /* Absolute time */ | 865 | /* Absolute time */ |
980 | if (flags & TRACE_GRAPH_PRINT_ABS_TIME) { | 866 | if (flags & TRACE_GRAPH_PRINT_ABS_TIME) |
981 | ret = print_graph_abs_time(iter->ts, s); | 867 | print_graph_abs_time(iter->ts, s); |
982 | if (!ret) | ||
983 | return TRACE_TYPE_PARTIAL_LINE; | ||
984 | } | ||
985 | 868 | ||
986 | /* Cpu */ | 869 | /* Cpu */ |
987 | if (flags & TRACE_GRAPH_PRINT_CPU) { | 870 | if (flags & TRACE_GRAPH_PRINT_CPU) |
988 | ret = print_graph_cpu(s, cpu); | 871 | print_graph_cpu(s, cpu); |
989 | if (ret == TRACE_TYPE_PARTIAL_LINE) | ||
990 | return TRACE_TYPE_PARTIAL_LINE; | ||
991 | } | ||
992 | 872 | ||
993 | /* Proc */ | 873 | /* Proc */ |
994 | if (flags & TRACE_GRAPH_PRINT_PROC) { | 874 | if (flags & TRACE_GRAPH_PRINT_PROC) { |
995 | ret = print_graph_proc(s, ent->pid); | 875 | print_graph_proc(s, ent->pid); |
996 | if (ret == TRACE_TYPE_PARTIAL_LINE) | 876 | trace_seq_puts(s, " | "); |
997 | return TRACE_TYPE_PARTIAL_LINE; | ||
998 | |||
999 | ret = trace_seq_puts(s, " | "); | ||
1000 | if (!ret) | ||
1001 | return TRACE_TYPE_PARTIAL_LINE; | ||
1002 | } | 877 | } |
1003 | 878 | ||
1004 | /* Latency format */ | 879 | /* Latency format */ |
1005 | if (trace_flags & TRACE_ITER_LATENCY_FMT) { | 880 | if (trace_flags & TRACE_ITER_LATENCY_FMT) |
1006 | ret = print_graph_lat_fmt(s, ent); | 881 | print_graph_lat_fmt(s, ent); |
1007 | if (ret == TRACE_TYPE_PARTIAL_LINE) | ||
1008 | return TRACE_TYPE_PARTIAL_LINE; | ||
1009 | } | ||
1010 | 882 | ||
1011 | return 0; | 883 | return; |
1012 | } | 884 | } |
1013 | 885 | ||
1014 | /* | 886 | /* |
@@ -1126,8 +998,7 @@ print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s, | |||
1126 | if (check_irq_entry(iter, flags, call->func, call->depth)) | 998 | if (check_irq_entry(iter, flags, call->func, call->depth)) |
1127 | return TRACE_TYPE_HANDLED; | 999 | return TRACE_TYPE_HANDLED; |
1128 | 1000 | ||
1129 | if (print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags)) | 1001 | print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags); |
1130 | return TRACE_TYPE_PARTIAL_LINE; | ||
1131 | 1002 | ||
1132 | leaf_ret = get_return_for_leaf(iter, field); | 1003 | leaf_ret = get_return_for_leaf(iter, field); |
1133 | if (leaf_ret) | 1004 | if (leaf_ret) |
@@ -1160,7 +1031,6 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s, | |||
1160 | pid_t pid = ent->pid; | 1031 | pid_t pid = ent->pid; |
1161 | int cpu = iter->cpu; | 1032 | int cpu = iter->cpu; |
1162 | int func_match = 1; | 1033 | int func_match = 1; |
1163 | int ret; | ||
1164 | int i; | 1034 | int i; |
1165 | 1035 | ||
1166 | if (check_irq_return(iter, flags, trace->depth)) | 1036 | if (check_irq_return(iter, flags, trace->depth)) |
@@ -1186,20 +1056,14 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s, | |||
1186 | } | 1056 | } |
1187 | } | 1057 | } |
1188 | 1058 | ||
1189 | if (print_graph_prologue(iter, s, 0, 0, flags)) | 1059 | print_graph_prologue(iter, s, 0, 0, flags); |
1190 | return TRACE_TYPE_PARTIAL_LINE; | ||
1191 | 1060 | ||
1192 | /* Overhead and duration */ | 1061 | /* Overhead and duration */ |
1193 | ret = print_graph_duration(duration, s, flags); | 1062 | print_graph_duration(duration, s, flags); |
1194 | if (ret == TRACE_TYPE_PARTIAL_LINE) | ||
1195 | return TRACE_TYPE_PARTIAL_LINE; | ||
1196 | 1063 | ||
1197 | /* Closing brace */ | 1064 | /* Closing brace */ |
1198 | for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) { | 1065 | for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) |
1199 | ret = trace_seq_putc(s, ' '); | 1066 | trace_seq_putc(s, ' '); |
1200 | if (!ret) | ||
1201 | return TRACE_TYPE_PARTIAL_LINE; | ||
1202 | } | ||
1203 | 1067 | ||
1204 | /* | 1068 | /* |
1205 | * If the return function does not have a matching entry, | 1069 | * If the return function does not have a matching entry, |
@@ -1208,30 +1072,20 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s, | |||
1208 | * belongs to, write out the function name. Always do | 1072 | * belongs to, write out the function name. Always do |
1209 | * that if the funcgraph-tail option is enabled. | 1073 | * that if the funcgraph-tail option is enabled. |
1210 | */ | 1074 | */ |
1211 | if (func_match && !(flags & TRACE_GRAPH_PRINT_TAIL)) { | 1075 | if (func_match && !(flags & TRACE_GRAPH_PRINT_TAIL)) |
1212 | ret = trace_seq_puts(s, "}\n"); | 1076 | trace_seq_puts(s, "}\n"); |
1213 | if (!ret) | 1077 | else |
1214 | return TRACE_TYPE_PARTIAL_LINE; | 1078 | trace_seq_printf(s, "} /* %ps */\n", (void *)trace->func); |
1215 | } else { | ||
1216 | ret = trace_seq_printf(s, "} /* %ps */\n", (void *)trace->func); | ||
1217 | if (!ret) | ||
1218 | return TRACE_TYPE_PARTIAL_LINE; | ||
1219 | } | ||
1220 | 1079 | ||
1221 | /* Overrun */ | 1080 | /* Overrun */ |
1222 | if (flags & TRACE_GRAPH_PRINT_OVERRUN) { | 1081 | if (flags & TRACE_GRAPH_PRINT_OVERRUN) |
1223 | ret = trace_seq_printf(s, " (Overruns: %lu)\n", | 1082 | trace_seq_printf(s, " (Overruns: %lu)\n", |
1224 | trace->overrun); | 1083 | trace->overrun); |
1225 | if (!ret) | ||
1226 | return TRACE_TYPE_PARTIAL_LINE; | ||
1227 | } | ||
1228 | 1084 | ||
1229 | ret = print_graph_irq(iter, trace->func, TRACE_GRAPH_RET, | 1085 | print_graph_irq(iter, trace->func, TRACE_GRAPH_RET, |
1230 | cpu, pid, flags); | 1086 | cpu, pid, flags); |
1231 | if (ret == TRACE_TYPE_PARTIAL_LINE) | ||
1232 | return TRACE_TYPE_PARTIAL_LINE; | ||
1233 | 1087 | ||
1234 | return TRACE_TYPE_HANDLED; | 1088 | return trace_handle_return(s); |
1235 | } | 1089 | } |
1236 | 1090 | ||
1237 | static enum print_line_t | 1091 | static enum print_line_t |
@@ -1248,26 +1102,18 @@ print_graph_comment(struct trace_seq *s, struct trace_entry *ent, | |||
1248 | if (data) | 1102 | if (data) |
1249 | depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth; | 1103 | depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth; |
1250 | 1104 | ||
1251 | if (print_graph_prologue(iter, s, 0, 0, flags)) | 1105 | print_graph_prologue(iter, s, 0, 0, flags); |
1252 | return TRACE_TYPE_PARTIAL_LINE; | ||
1253 | 1106 | ||
1254 | /* No time */ | 1107 | /* No time */ |
1255 | ret = print_graph_duration(0, s, flags | FLAGS_FILL_FULL); | 1108 | print_graph_duration(0, s, flags | FLAGS_FILL_FULL); |
1256 | if (ret != TRACE_TYPE_HANDLED) | ||
1257 | return ret; | ||
1258 | 1109 | ||
1259 | /* Indentation */ | 1110 | /* Indentation */ |
1260 | if (depth > 0) | 1111 | if (depth > 0) |
1261 | for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++) { | 1112 | for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++) |
1262 | ret = trace_seq_putc(s, ' '); | 1113 | trace_seq_putc(s, ' '); |
1263 | if (!ret) | ||
1264 | return TRACE_TYPE_PARTIAL_LINE; | ||
1265 | } | ||
1266 | 1114 | ||
1267 | /* The comment */ | 1115 | /* The comment */ |
1268 | ret = trace_seq_puts(s, "/* "); | 1116 | trace_seq_puts(s, "/* "); |
1269 | if (!ret) | ||
1270 | return TRACE_TYPE_PARTIAL_LINE; | ||
1271 | 1117 | ||
1272 | switch (iter->ent->type) { | 1118 | switch (iter->ent->type) { |
1273 | case TRACE_BPRINT: | 1119 | case TRACE_BPRINT: |
@@ -1296,11 +1142,9 @@ print_graph_comment(struct trace_seq *s, struct trace_entry *ent, | |||
1296 | s->len--; | 1142 | s->len--; |
1297 | } | 1143 | } |
1298 | 1144 | ||
1299 | ret = trace_seq_puts(s, " */\n"); | 1145 | trace_seq_puts(s, " */\n"); |
1300 | if (!ret) | ||
1301 | return TRACE_TYPE_PARTIAL_LINE; | ||
1302 | 1146 | ||
1303 | return TRACE_TYPE_HANDLED; | 1147 | return trace_handle_return(s); |
1304 | } | 1148 | } |
1305 | 1149 | ||
1306 | 1150 | ||
@@ -1407,32 +1251,32 @@ static void __print_graph_headers_flags(struct seq_file *s, u32 flags) | |||
1407 | print_lat_header(s, flags); | 1251 | print_lat_header(s, flags); |
1408 | 1252 | ||
1409 | /* 1st line */ | 1253 | /* 1st line */ |
1410 | seq_printf(s, "#"); | 1254 | seq_putc(s, '#'); |
1411 | if (flags & TRACE_GRAPH_PRINT_ABS_TIME) | 1255 | if (flags & TRACE_GRAPH_PRINT_ABS_TIME) |
1412 | seq_printf(s, " TIME "); | 1256 | seq_puts(s, " TIME "); |
1413 | if (flags & TRACE_GRAPH_PRINT_CPU) | 1257 | if (flags & TRACE_GRAPH_PRINT_CPU) |
1414 | seq_printf(s, " CPU"); | 1258 | seq_puts(s, " CPU"); |
1415 | if (flags & TRACE_GRAPH_PRINT_PROC) | 1259 | if (flags & TRACE_GRAPH_PRINT_PROC) |
1416 | seq_printf(s, " TASK/PID "); | 1260 | seq_puts(s, " TASK/PID "); |
1417 | if (lat) | 1261 | if (lat) |
1418 | seq_printf(s, "||||"); | 1262 | seq_puts(s, "||||"); |
1419 | if (flags & TRACE_GRAPH_PRINT_DURATION) | 1263 | if (flags & TRACE_GRAPH_PRINT_DURATION) |
1420 | seq_printf(s, " DURATION "); | 1264 | seq_puts(s, " DURATION "); |
1421 | seq_printf(s, " FUNCTION CALLS\n"); | 1265 | seq_puts(s, " FUNCTION CALLS\n"); |
1422 | 1266 | ||
1423 | /* 2nd line */ | 1267 | /* 2nd line */ |
1424 | seq_printf(s, "#"); | 1268 | seq_putc(s, '#'); |
1425 | if (flags & TRACE_GRAPH_PRINT_ABS_TIME) | 1269 | if (flags & TRACE_GRAPH_PRINT_ABS_TIME) |
1426 | seq_printf(s, " | "); | 1270 | seq_puts(s, " | "); |
1427 | if (flags & TRACE_GRAPH_PRINT_CPU) | 1271 | if (flags & TRACE_GRAPH_PRINT_CPU) |
1428 | seq_printf(s, " | "); | 1272 | seq_puts(s, " | "); |
1429 | if (flags & TRACE_GRAPH_PRINT_PROC) | 1273 | if (flags & TRACE_GRAPH_PRINT_PROC) |
1430 | seq_printf(s, " | | "); | 1274 | seq_puts(s, " | | "); |
1431 | if (lat) | 1275 | if (lat) |
1432 | seq_printf(s, "||||"); | 1276 | seq_puts(s, "||||"); |
1433 | if (flags & TRACE_GRAPH_PRINT_DURATION) | 1277 | if (flags & TRACE_GRAPH_PRINT_DURATION) |
1434 | seq_printf(s, " | | "); | 1278 | seq_puts(s, " | | "); |
1435 | seq_printf(s, " | | | |\n"); | 1279 | seq_puts(s, " | | | |\n"); |
1436 | } | 1280 | } |
1437 | 1281 | ||
1438 | static void print_graph_headers(struct seq_file *s) | 1282 | static void print_graph_headers(struct seq_file *s) |
diff --git a/kernel/trace/trace_kdb.c b/kernel/trace/trace_kdb.c index bd90e1b06088..b0b1c44e923a 100644 --- a/kernel/trace/trace_kdb.c +++ b/kernel/trace/trace_kdb.c | |||
@@ -20,10 +20,12 @@ static void ftrace_dump_buf(int skip_lines, long cpu_file) | |||
20 | { | 20 | { |
21 | /* use static because iter can be a bit big for the stack */ | 21 | /* use static because iter can be a bit big for the stack */ |
22 | static struct trace_iterator iter; | 22 | static struct trace_iterator iter; |
23 | static struct ring_buffer_iter *buffer_iter[CONFIG_NR_CPUS]; | ||
23 | unsigned int old_userobj; | 24 | unsigned int old_userobj; |
24 | int cnt = 0, cpu; | 25 | int cnt = 0, cpu; |
25 | 26 | ||
26 | trace_init_global_iter(&iter); | 27 | trace_init_global_iter(&iter); |
28 | iter.buffer_iter = buffer_iter; | ||
27 | 29 | ||
28 | for_each_tracing_cpu(cpu) { | 30 | for_each_tracing_cpu(cpu) { |
29 | atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled); | 31 | atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled); |
@@ -57,19 +59,19 @@ static void ftrace_dump_buf(int skip_lines, long cpu_file) | |||
57 | ring_buffer_read_start(iter.buffer_iter[cpu_file]); | 59 | ring_buffer_read_start(iter.buffer_iter[cpu_file]); |
58 | tracing_iter_reset(&iter, cpu_file); | 60 | tracing_iter_reset(&iter, cpu_file); |
59 | } | 61 | } |
60 | if (!trace_empty(&iter)) | 62 | |
61 | trace_find_next_entry_inc(&iter); | 63 | while (trace_find_next_entry_inc(&iter)) { |
62 | while (!trace_empty(&iter)) { | ||
63 | if (!cnt) | 64 | if (!cnt) |
64 | kdb_printf("---------------------------------\n"); | 65 | kdb_printf("---------------------------------\n"); |
65 | cnt++; | 66 | cnt++; |
66 | 67 | ||
67 | if (trace_find_next_entry_inc(&iter) != NULL && !skip_lines) | 68 | if (!skip_lines) { |
68 | print_trace_line(&iter); | 69 | print_trace_line(&iter); |
69 | if (!skip_lines) | ||
70 | trace_printk_seq(&iter.seq); | 70 | trace_printk_seq(&iter.seq); |
71 | else | 71 | } else { |
72 | skip_lines--; | 72 | skip_lines--; |
73 | } | ||
74 | |||
73 | if (KDB_FLAG(CMD_INTERRUPT)) | 75 | if (KDB_FLAG(CMD_INTERRUPT)) |
74 | goto out; | 76 | goto out; |
75 | } | 77 | } |
@@ -86,9 +88,12 @@ out: | |||
86 | atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled); | 88 | atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled); |
87 | } | 89 | } |
88 | 90 | ||
89 | for_each_tracing_cpu(cpu) | 91 | for_each_tracing_cpu(cpu) { |
90 | if (iter.buffer_iter[cpu]) | 92 | if (iter.buffer_iter[cpu]) { |
91 | ring_buffer_read_finish(iter.buffer_iter[cpu]); | 93 | ring_buffer_read_finish(iter.buffer_iter[cpu]); |
94 | iter.buffer_iter[cpu] = NULL; | ||
95 | } | ||
96 | } | ||
92 | } | 97 | } |
93 | 98 | ||
94 | /* | 99 | /* |
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 282f6e4e5539..5edb518be345 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c | |||
@@ -826,7 +826,7 @@ static int probes_seq_show(struct seq_file *m, void *v) | |||
826 | struct trace_kprobe *tk = v; | 826 | struct trace_kprobe *tk = v; |
827 | int i; | 827 | int i; |
828 | 828 | ||
829 | seq_printf(m, "%c", trace_kprobe_is_return(tk) ? 'r' : 'p'); | 829 | seq_putc(m, trace_kprobe_is_return(tk) ? 'r' : 'p'); |
830 | seq_printf(m, ":%s/%s", tk->tp.call.class->system, | 830 | seq_printf(m, ":%s/%s", tk->tp.call.class->system, |
831 | ftrace_event_name(&tk->tp.call)); | 831 | ftrace_event_name(&tk->tp.call)); |
832 | 832 | ||
@@ -840,7 +840,7 @@ static int probes_seq_show(struct seq_file *m, void *v) | |||
840 | 840 | ||
841 | for (i = 0; i < tk->tp.nr_args; i++) | 841 | for (i = 0; i < tk->tp.nr_args; i++) |
842 | seq_printf(m, " %s=%s", tk->tp.args[i].name, tk->tp.args[i].comm); | 842 | seq_printf(m, " %s=%s", tk->tp.args[i].name, tk->tp.args[i].comm); |
843 | seq_printf(m, "\n"); | 843 | seq_putc(m, '\n'); |
844 | 844 | ||
845 | return 0; | 845 | return 0; |
846 | } | 846 | } |
@@ -1024,27 +1024,22 @@ print_kprobe_event(struct trace_iterator *iter, int flags, | |||
1024 | field = (struct kprobe_trace_entry_head *)iter->ent; | 1024 | field = (struct kprobe_trace_entry_head *)iter->ent; |
1025 | tp = container_of(event, struct trace_probe, call.event); | 1025 | tp = container_of(event, struct trace_probe, call.event); |
1026 | 1026 | ||
1027 | if (!trace_seq_printf(s, "%s: (", ftrace_event_name(&tp->call))) | 1027 | trace_seq_printf(s, "%s: (", ftrace_event_name(&tp->call)); |
1028 | goto partial; | ||
1029 | 1028 | ||
1030 | if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET)) | 1029 | if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET)) |
1031 | goto partial; | 1030 | goto out; |
1032 | 1031 | ||
1033 | if (!trace_seq_puts(s, ")")) | 1032 | trace_seq_putc(s, ')'); |
1034 | goto partial; | ||
1035 | 1033 | ||
1036 | data = (u8 *)&field[1]; | 1034 | data = (u8 *)&field[1]; |
1037 | for (i = 0; i < tp->nr_args; i++) | 1035 | for (i = 0; i < tp->nr_args; i++) |
1038 | if (!tp->args[i].type->print(s, tp->args[i].name, | 1036 | if (!tp->args[i].type->print(s, tp->args[i].name, |
1039 | data + tp->args[i].offset, field)) | 1037 | data + tp->args[i].offset, field)) |
1040 | goto partial; | 1038 | goto out; |
1041 | |||
1042 | if (!trace_seq_puts(s, "\n")) | ||
1043 | goto partial; | ||
1044 | 1039 | ||
1045 | return TRACE_TYPE_HANDLED; | 1040 | trace_seq_putc(s, '\n'); |
1046 | partial: | 1041 | out: |
1047 | return TRACE_TYPE_PARTIAL_LINE; | 1042 | return trace_handle_return(s); |
1048 | } | 1043 | } |
1049 | 1044 | ||
1050 | static enum print_line_t | 1045 | static enum print_line_t |
@@ -1060,33 +1055,28 @@ print_kretprobe_event(struct trace_iterator *iter, int flags, | |||
1060 | field = (struct kretprobe_trace_entry_head *)iter->ent; | 1055 | field = (struct kretprobe_trace_entry_head *)iter->ent; |
1061 | tp = container_of(event, struct trace_probe, call.event); | 1056 | tp = container_of(event, struct trace_probe, call.event); |
1062 | 1057 | ||
1063 | if (!trace_seq_printf(s, "%s: (", ftrace_event_name(&tp->call))) | 1058 | trace_seq_printf(s, "%s: (", ftrace_event_name(&tp->call)); |
1064 | goto partial; | ||
1065 | 1059 | ||
1066 | if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET)) | 1060 | if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET)) |
1067 | goto partial; | 1061 | goto out; |
1068 | 1062 | ||
1069 | if (!trace_seq_puts(s, " <- ")) | 1063 | trace_seq_puts(s, " <- "); |
1070 | goto partial; | ||
1071 | 1064 | ||
1072 | if (!seq_print_ip_sym(s, field->func, flags & ~TRACE_ITER_SYM_OFFSET)) | 1065 | if (!seq_print_ip_sym(s, field->func, flags & ~TRACE_ITER_SYM_OFFSET)) |
1073 | goto partial; | 1066 | goto out; |
1074 | 1067 | ||
1075 | if (!trace_seq_puts(s, ")")) | 1068 | trace_seq_putc(s, ')'); |
1076 | goto partial; | ||
1077 | 1069 | ||
1078 | data = (u8 *)&field[1]; | 1070 | data = (u8 *)&field[1]; |
1079 | for (i = 0; i < tp->nr_args; i++) | 1071 | for (i = 0; i < tp->nr_args; i++) |
1080 | if (!tp->args[i].type->print(s, tp->args[i].name, | 1072 | if (!tp->args[i].type->print(s, tp->args[i].name, |
1081 | data + tp->args[i].offset, field)) | 1073 | data + tp->args[i].offset, field)) |
1082 | goto partial; | 1074 | goto out; |
1083 | 1075 | ||
1084 | if (!trace_seq_puts(s, "\n")) | 1076 | trace_seq_putc(s, '\n'); |
1085 | goto partial; | ||
1086 | 1077 | ||
1087 | return TRACE_TYPE_HANDLED; | 1078 | out: |
1088 | partial: | 1079 | return trace_handle_return(s); |
1089 | return TRACE_TYPE_PARTIAL_LINE; | ||
1090 | } | 1080 | } |
1091 | 1081 | ||
1092 | 1082 | ||
diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c index 0abd9b863474..7a9ba62e9fef 100644 --- a/kernel/trace/trace_mmiotrace.c +++ b/kernel/trace/trace_mmiotrace.c | |||
@@ -59,17 +59,15 @@ static void mmio_trace_start(struct trace_array *tr) | |||
59 | mmio_reset_data(tr); | 59 | mmio_reset_data(tr); |
60 | } | 60 | } |
61 | 61 | ||
62 | static int mmio_print_pcidev(struct trace_seq *s, const struct pci_dev *dev) | 62 | static void mmio_print_pcidev(struct trace_seq *s, const struct pci_dev *dev) |
63 | { | 63 | { |
64 | int ret = 0; | ||
65 | int i; | 64 | int i; |
66 | resource_size_t start, end; | 65 | resource_size_t start, end; |
67 | const struct pci_driver *drv = pci_dev_driver(dev); | 66 | const struct pci_driver *drv = pci_dev_driver(dev); |
68 | 67 | ||
69 | /* XXX: incomplete checks for trace_seq_printf() return value */ | 68 | trace_seq_printf(s, "PCIDEV %02x%02x %04x%04x %x", |
70 | ret += trace_seq_printf(s, "PCIDEV %02x%02x %04x%04x %x", | 69 | dev->bus->number, dev->devfn, |
71 | dev->bus->number, dev->devfn, | 70 | dev->vendor, dev->device, dev->irq); |
72 | dev->vendor, dev->device, dev->irq); | ||
73 | /* | 71 | /* |
74 | * XXX: is pci_resource_to_user() appropriate, since we are | 72 | * XXX: is pci_resource_to_user() appropriate, since we are |
75 | * supposed to interpret the __ioremap() phys_addr argument based on | 73 | * supposed to interpret the __ioremap() phys_addr argument based on |
@@ -77,21 +75,20 @@ static int mmio_print_pcidev(struct trace_seq *s, const struct pci_dev *dev) | |||
77 | */ | 75 | */ |
78 | for (i = 0; i < 7; i++) { | 76 | for (i = 0; i < 7; i++) { |
79 | pci_resource_to_user(dev, i, &dev->resource[i], &start, &end); | 77 | pci_resource_to_user(dev, i, &dev->resource[i], &start, &end); |
80 | ret += trace_seq_printf(s, " %llx", | 78 | trace_seq_printf(s, " %llx", |
81 | (unsigned long long)(start | | 79 | (unsigned long long)(start | |
82 | (dev->resource[i].flags & PCI_REGION_FLAG_MASK))); | 80 | (dev->resource[i].flags & PCI_REGION_FLAG_MASK))); |
83 | } | 81 | } |
84 | for (i = 0; i < 7; i++) { | 82 | for (i = 0; i < 7; i++) { |
85 | pci_resource_to_user(dev, i, &dev->resource[i], &start, &end); | 83 | pci_resource_to_user(dev, i, &dev->resource[i], &start, &end); |
86 | ret += trace_seq_printf(s, " %llx", | 84 | trace_seq_printf(s, " %llx", |
87 | dev->resource[i].start < dev->resource[i].end ? | 85 | dev->resource[i].start < dev->resource[i].end ? |
88 | (unsigned long long)(end - start) + 1 : 0); | 86 | (unsigned long long)(end - start) + 1 : 0); |
89 | } | 87 | } |
90 | if (drv) | 88 | if (drv) |
91 | ret += trace_seq_printf(s, " %s\n", drv->name); | 89 | trace_seq_printf(s, " %s\n", drv->name); |
92 | else | 90 | else |
93 | ret += trace_seq_puts(s, " \n"); | 91 | trace_seq_puts(s, " \n"); |
94 | return ret; | ||
95 | } | 92 | } |
96 | 93 | ||
97 | static void destroy_header_iter(struct header_iter *hiter) | 94 | static void destroy_header_iter(struct header_iter *hiter) |
@@ -179,28 +176,27 @@ static enum print_line_t mmio_print_rw(struct trace_iterator *iter) | |||
179 | unsigned long long t = ns2usecs(iter->ts); | 176 | unsigned long long t = ns2usecs(iter->ts); |
180 | unsigned long usec_rem = do_div(t, USEC_PER_SEC); | 177 | unsigned long usec_rem = do_div(t, USEC_PER_SEC); |
181 | unsigned secs = (unsigned long)t; | 178 | unsigned secs = (unsigned long)t; |
182 | int ret = 1; | ||
183 | 179 | ||
184 | trace_assign_type(field, entry); | 180 | trace_assign_type(field, entry); |
185 | rw = &field->rw; | 181 | rw = &field->rw; |
186 | 182 | ||
187 | switch (rw->opcode) { | 183 | switch (rw->opcode) { |
188 | case MMIO_READ: | 184 | case MMIO_READ: |
189 | ret = trace_seq_printf(s, | 185 | trace_seq_printf(s, |
190 | "R %d %u.%06lu %d 0x%llx 0x%lx 0x%lx %d\n", | 186 | "R %d %u.%06lu %d 0x%llx 0x%lx 0x%lx %d\n", |
191 | rw->width, secs, usec_rem, rw->map_id, | 187 | rw->width, secs, usec_rem, rw->map_id, |
192 | (unsigned long long)rw->phys, | 188 | (unsigned long long)rw->phys, |
193 | rw->value, rw->pc, 0); | 189 | rw->value, rw->pc, 0); |
194 | break; | 190 | break; |
195 | case MMIO_WRITE: | 191 | case MMIO_WRITE: |
196 | ret = trace_seq_printf(s, | 192 | trace_seq_printf(s, |
197 | "W %d %u.%06lu %d 0x%llx 0x%lx 0x%lx %d\n", | 193 | "W %d %u.%06lu %d 0x%llx 0x%lx 0x%lx %d\n", |
198 | rw->width, secs, usec_rem, rw->map_id, | 194 | rw->width, secs, usec_rem, rw->map_id, |
199 | (unsigned long long)rw->phys, | 195 | (unsigned long long)rw->phys, |
200 | rw->value, rw->pc, 0); | 196 | rw->value, rw->pc, 0); |
201 | break; | 197 | break; |
202 | case MMIO_UNKNOWN_OP: | 198 | case MMIO_UNKNOWN_OP: |
203 | ret = trace_seq_printf(s, | 199 | trace_seq_printf(s, |
204 | "UNKNOWN %u.%06lu %d 0x%llx %02lx,%02lx," | 200 | "UNKNOWN %u.%06lu %d 0x%llx %02lx,%02lx," |
205 | "%02lx 0x%lx %d\n", | 201 | "%02lx 0x%lx %d\n", |
206 | secs, usec_rem, rw->map_id, | 202 | secs, usec_rem, rw->map_id, |
@@ -209,12 +205,11 @@ static enum print_line_t mmio_print_rw(struct trace_iterator *iter) | |||
209 | (rw->value >> 0) & 0xff, rw->pc, 0); | 205 | (rw->value >> 0) & 0xff, rw->pc, 0); |
210 | break; | 206 | break; |
211 | default: | 207 | default: |
212 | ret = trace_seq_puts(s, "rw what?\n"); | 208 | trace_seq_puts(s, "rw what?\n"); |
213 | break; | 209 | break; |
214 | } | 210 | } |
215 | if (ret) | 211 | |
216 | return TRACE_TYPE_HANDLED; | 212 | return trace_handle_return(s); |
217 | return TRACE_TYPE_PARTIAL_LINE; | ||
218 | } | 213 | } |
219 | 214 | ||
220 | static enum print_line_t mmio_print_map(struct trace_iterator *iter) | 215 | static enum print_line_t mmio_print_map(struct trace_iterator *iter) |
@@ -226,31 +221,29 @@ static enum print_line_t mmio_print_map(struct trace_iterator *iter) | |||
226 | unsigned long long t = ns2usecs(iter->ts); | 221 | unsigned long long t = ns2usecs(iter->ts); |
227 | unsigned long usec_rem = do_div(t, USEC_PER_SEC); | 222 | unsigned long usec_rem = do_div(t, USEC_PER_SEC); |
228 | unsigned secs = (unsigned long)t; | 223 | unsigned secs = (unsigned long)t; |
229 | int ret; | ||
230 | 224 | ||
231 | trace_assign_type(field, entry); | 225 | trace_assign_type(field, entry); |
232 | m = &field->map; | 226 | m = &field->map; |
233 | 227 | ||
234 | switch (m->opcode) { | 228 | switch (m->opcode) { |
235 | case MMIO_PROBE: | 229 | case MMIO_PROBE: |
236 | ret = trace_seq_printf(s, | 230 | trace_seq_printf(s, |
237 | "MAP %u.%06lu %d 0x%llx 0x%lx 0x%lx 0x%lx %d\n", | 231 | "MAP %u.%06lu %d 0x%llx 0x%lx 0x%lx 0x%lx %d\n", |
238 | secs, usec_rem, m->map_id, | 232 | secs, usec_rem, m->map_id, |
239 | (unsigned long long)m->phys, m->virt, m->len, | 233 | (unsigned long long)m->phys, m->virt, m->len, |
240 | 0UL, 0); | 234 | 0UL, 0); |
241 | break; | 235 | break; |
242 | case MMIO_UNPROBE: | 236 | case MMIO_UNPROBE: |
243 | ret = trace_seq_printf(s, | 237 | trace_seq_printf(s, |
244 | "UNMAP %u.%06lu %d 0x%lx %d\n", | 238 | "UNMAP %u.%06lu %d 0x%lx %d\n", |
245 | secs, usec_rem, m->map_id, 0UL, 0); | 239 | secs, usec_rem, m->map_id, 0UL, 0); |
246 | break; | 240 | break; |
247 | default: | 241 | default: |
248 | ret = trace_seq_puts(s, "map what?\n"); | 242 | trace_seq_puts(s, "map what?\n"); |
249 | break; | 243 | break; |
250 | } | 244 | } |
251 | if (ret) | 245 | |
252 | return TRACE_TYPE_HANDLED; | 246 | return trace_handle_return(s); |
253 | return TRACE_TYPE_PARTIAL_LINE; | ||
254 | } | 247 | } |
255 | 248 | ||
256 | static enum print_line_t mmio_print_mark(struct trace_iterator *iter) | 249 | static enum print_line_t mmio_print_mark(struct trace_iterator *iter) |
@@ -262,14 +255,11 @@ static enum print_line_t mmio_print_mark(struct trace_iterator *iter) | |||
262 | unsigned long long t = ns2usecs(iter->ts); | 255 | unsigned long long t = ns2usecs(iter->ts); |
263 | unsigned long usec_rem = do_div(t, USEC_PER_SEC); | 256 | unsigned long usec_rem = do_div(t, USEC_PER_SEC); |
264 | unsigned secs = (unsigned long)t; | 257 | unsigned secs = (unsigned long)t; |
265 | int ret; | ||
266 | 258 | ||
267 | /* The trailing newline must be in the message. */ | 259 | /* The trailing newline must be in the message. */ |
268 | ret = trace_seq_printf(s, "MARK %u.%06lu %s", secs, usec_rem, msg); | 260 | trace_seq_printf(s, "MARK %u.%06lu %s", secs, usec_rem, msg); |
269 | if (!ret) | ||
270 | return TRACE_TYPE_PARTIAL_LINE; | ||
271 | 261 | ||
272 | return TRACE_TYPE_HANDLED; | 262 | return trace_handle_return(s); |
273 | } | 263 | } |
274 | 264 | ||
275 | static enum print_line_t mmio_print_line(struct trace_iterator *iter) | 265 | static enum print_line_t mmio_print_line(struct trace_iterator *iter) |
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c index c6977d5a9b12..b77b9a697619 100644 --- a/kernel/trace/trace_output.c +++ b/kernel/trace/trace_output.c | |||
@@ -25,15 +25,12 @@ enum print_line_t trace_print_bputs_msg_only(struct trace_iterator *iter) | |||
25 | struct trace_seq *s = &iter->seq; | 25 | struct trace_seq *s = &iter->seq; |
26 | struct trace_entry *entry = iter->ent; | 26 | struct trace_entry *entry = iter->ent; |
27 | struct bputs_entry *field; | 27 | struct bputs_entry *field; |
28 | int ret; | ||
29 | 28 | ||
30 | trace_assign_type(field, entry); | 29 | trace_assign_type(field, entry); |
31 | 30 | ||
32 | ret = trace_seq_puts(s, field->str); | 31 | trace_seq_puts(s, field->str); |
33 | if (!ret) | ||
34 | return TRACE_TYPE_PARTIAL_LINE; | ||
35 | 32 | ||
36 | return TRACE_TYPE_HANDLED; | 33 | return trace_handle_return(s); |
37 | } | 34 | } |
38 | 35 | ||
39 | enum print_line_t trace_print_bprintk_msg_only(struct trace_iterator *iter) | 36 | enum print_line_t trace_print_bprintk_msg_only(struct trace_iterator *iter) |
@@ -41,15 +38,12 @@ enum print_line_t trace_print_bprintk_msg_only(struct trace_iterator *iter) | |||
41 | struct trace_seq *s = &iter->seq; | 38 | struct trace_seq *s = &iter->seq; |
42 | struct trace_entry *entry = iter->ent; | 39 | struct trace_entry *entry = iter->ent; |
43 | struct bprint_entry *field; | 40 | struct bprint_entry *field; |
44 | int ret; | ||
45 | 41 | ||
46 | trace_assign_type(field, entry); | 42 | trace_assign_type(field, entry); |
47 | 43 | ||
48 | ret = trace_seq_bprintf(s, field->fmt, field->buf); | 44 | trace_seq_bprintf(s, field->fmt, field->buf); |
49 | if (!ret) | ||
50 | return TRACE_TYPE_PARTIAL_LINE; | ||
51 | 45 | ||
52 | return TRACE_TYPE_HANDLED; | 46 | return trace_handle_return(s); |
53 | } | 47 | } |
54 | 48 | ||
55 | enum print_line_t trace_print_printk_msg_only(struct trace_iterator *iter) | 49 | enum print_line_t trace_print_printk_msg_only(struct trace_iterator *iter) |
@@ -57,15 +51,12 @@ enum print_line_t trace_print_printk_msg_only(struct trace_iterator *iter) | |||
57 | struct trace_seq *s = &iter->seq; | 51 | struct trace_seq *s = &iter->seq; |
58 | struct trace_entry *entry = iter->ent; | 52 | struct trace_entry *entry = iter->ent; |
59 | struct print_entry *field; | 53 | struct print_entry *field; |
60 | int ret; | ||
61 | 54 | ||
62 | trace_assign_type(field, entry); | 55 | trace_assign_type(field, entry); |
63 | 56 | ||
64 | ret = trace_seq_puts(s, field->buf); | 57 | trace_seq_puts(s, field->buf); |
65 | if (!ret) | ||
66 | return TRACE_TYPE_PARTIAL_LINE; | ||
67 | 58 | ||
68 | return TRACE_TYPE_HANDLED; | 59 | return trace_handle_return(s); |
69 | } | 60 | } |
70 | 61 | ||
71 | const char * | 62 | const char * |
@@ -124,7 +115,7 @@ ftrace_print_symbols_seq(struct trace_seq *p, unsigned long val, | |||
124 | 115 | ||
125 | if (ret == (const char *)(trace_seq_buffer_ptr(p))) | 116 | if (ret == (const char *)(trace_seq_buffer_ptr(p))) |
126 | trace_seq_printf(p, "0x%lx", val); | 117 | trace_seq_printf(p, "0x%lx", val); |
127 | 118 | ||
128 | trace_seq_putc(p, 0); | 119 | trace_seq_putc(p, 0); |
129 | 120 | ||
130 | return ret; | 121 | return ret; |
@@ -193,7 +184,6 @@ int ftrace_raw_output_prep(struct trace_iterator *iter, | |||
193 | struct trace_seq *s = &iter->seq; | 184 | struct trace_seq *s = &iter->seq; |
194 | struct trace_seq *p = &iter->tmp_seq; | 185 | struct trace_seq *p = &iter->tmp_seq; |
195 | struct trace_entry *entry; | 186 | struct trace_entry *entry; |
196 | int ret; | ||
197 | 187 | ||
198 | event = container_of(trace_event, struct ftrace_event_call, event); | 188 | event = container_of(trace_event, struct ftrace_event_call, event); |
199 | entry = iter->ent; | 189 | entry = iter->ent; |
@@ -204,11 +194,9 @@ int ftrace_raw_output_prep(struct trace_iterator *iter, | |||
204 | } | 194 | } |
205 | 195 | ||
206 | trace_seq_init(p); | 196 | trace_seq_init(p); |
207 | ret = trace_seq_printf(s, "%s: ", ftrace_event_name(event)); | 197 | trace_seq_printf(s, "%s: ", ftrace_event_name(event)); |
208 | if (!ret) | ||
209 | return TRACE_TYPE_PARTIAL_LINE; | ||
210 | 198 | ||
211 | return 0; | 199 | return trace_handle_return(s); |
212 | } | 200 | } |
213 | EXPORT_SYMBOL(ftrace_raw_output_prep); | 201 | EXPORT_SYMBOL(ftrace_raw_output_prep); |
214 | 202 | ||
@@ -216,18 +204,11 @@ static int ftrace_output_raw(struct trace_iterator *iter, char *name, | |||
216 | char *fmt, va_list ap) | 204 | char *fmt, va_list ap) |
217 | { | 205 | { |
218 | struct trace_seq *s = &iter->seq; | 206 | struct trace_seq *s = &iter->seq; |
219 | int ret; | ||
220 | |||
221 | ret = trace_seq_printf(s, "%s: ", name); | ||
222 | if (!ret) | ||
223 | return TRACE_TYPE_PARTIAL_LINE; | ||
224 | |||
225 | ret = trace_seq_vprintf(s, fmt, ap); | ||
226 | 207 | ||
227 | if (!ret) | 208 | trace_seq_printf(s, "%s: ", name); |
228 | return TRACE_TYPE_PARTIAL_LINE; | 209 | trace_seq_vprintf(s, fmt, ap); |
229 | 210 | ||
230 | return TRACE_TYPE_HANDLED; | 211 | return trace_handle_return(s); |
231 | } | 212 | } |
232 | 213 | ||
233 | int ftrace_output_call(struct trace_iterator *iter, char *name, char *fmt, ...) | 214 | int ftrace_output_call(struct trace_iterator *iter, char *name, char *fmt, ...) |
@@ -260,7 +241,7 @@ static inline const char *kretprobed(const char *name) | |||
260 | } | 241 | } |
261 | #endif /* CONFIG_KRETPROBES */ | 242 | #endif /* CONFIG_KRETPROBES */ |
262 | 243 | ||
263 | static int | 244 | static void |
264 | seq_print_sym_short(struct trace_seq *s, const char *fmt, unsigned long address) | 245 | seq_print_sym_short(struct trace_seq *s, const char *fmt, unsigned long address) |
265 | { | 246 | { |
266 | #ifdef CONFIG_KALLSYMS | 247 | #ifdef CONFIG_KALLSYMS |
@@ -271,12 +252,11 @@ seq_print_sym_short(struct trace_seq *s, const char *fmt, unsigned long address) | |||
271 | 252 | ||
272 | name = kretprobed(str); | 253 | name = kretprobed(str); |
273 | 254 | ||
274 | return trace_seq_printf(s, fmt, name); | 255 | trace_seq_printf(s, fmt, name); |
275 | #endif | 256 | #endif |
276 | return 1; | ||
277 | } | 257 | } |
278 | 258 | ||
279 | static int | 259 | static void |
280 | seq_print_sym_offset(struct trace_seq *s, const char *fmt, | 260 | seq_print_sym_offset(struct trace_seq *s, const char *fmt, |
281 | unsigned long address) | 261 | unsigned long address) |
282 | { | 262 | { |
@@ -287,9 +267,8 @@ seq_print_sym_offset(struct trace_seq *s, const char *fmt, | |||
287 | sprint_symbol(str, address); | 267 | sprint_symbol(str, address); |
288 | name = kretprobed(str); | 268 | name = kretprobed(str); |
289 | 269 | ||
290 | return trace_seq_printf(s, fmt, name); | 270 | trace_seq_printf(s, fmt, name); |
291 | #endif | 271 | #endif |
292 | return 1; | ||
293 | } | 272 | } |
294 | 273 | ||
295 | #ifndef CONFIG_64BIT | 274 | #ifndef CONFIG_64BIT |
@@ -320,14 +299,14 @@ int seq_print_user_ip(struct trace_seq *s, struct mm_struct *mm, | |||
320 | if (file) { | 299 | if (file) { |
321 | ret = trace_seq_path(s, &file->f_path); | 300 | ret = trace_seq_path(s, &file->f_path); |
322 | if (ret) | 301 | if (ret) |
323 | ret = trace_seq_printf(s, "[+0x%lx]", | 302 | trace_seq_printf(s, "[+0x%lx]", |
324 | ip - vmstart); | 303 | ip - vmstart); |
325 | } | 304 | } |
326 | up_read(&mm->mmap_sem); | 305 | up_read(&mm->mmap_sem); |
327 | } | 306 | } |
328 | if (ret && ((sym_flags & TRACE_ITER_SYM_ADDR) || !file)) | 307 | if (ret && ((sym_flags & TRACE_ITER_SYM_ADDR) || !file)) |
329 | ret = trace_seq_printf(s, " <" IP_FMT ">", ip); | 308 | trace_seq_printf(s, " <" IP_FMT ">", ip); |
330 | return ret; | 309 | return !trace_seq_has_overflowed(s); |
331 | } | 310 | } |
332 | 311 | ||
333 | int | 312 | int |
@@ -335,7 +314,6 @@ seq_print_userip_objs(const struct userstack_entry *entry, struct trace_seq *s, | |||
335 | unsigned long sym_flags) | 314 | unsigned long sym_flags) |
336 | { | 315 | { |
337 | struct mm_struct *mm = NULL; | 316 | struct mm_struct *mm = NULL; |
338 | int ret = 1; | ||
339 | unsigned int i; | 317 | unsigned int i; |
340 | 318 | ||
341 | if (trace_flags & TRACE_ITER_SYM_USEROBJ) { | 319 | if (trace_flags & TRACE_ITER_SYM_USEROBJ) { |
@@ -354,48 +332,45 @@ seq_print_userip_objs(const struct userstack_entry *entry, struct trace_seq *s, | |||
354 | for (i = 0; i < FTRACE_STACK_ENTRIES; i++) { | 332 | for (i = 0; i < FTRACE_STACK_ENTRIES; i++) { |
355 | unsigned long ip = entry->caller[i]; | 333 | unsigned long ip = entry->caller[i]; |
356 | 334 | ||
357 | if (ip == ULONG_MAX || !ret) | 335 | if (ip == ULONG_MAX || trace_seq_has_overflowed(s)) |
358 | break; | 336 | break; |
359 | if (ret) | 337 | |
360 | ret = trace_seq_puts(s, " => "); | 338 | trace_seq_puts(s, " => "); |
339 | |||
361 | if (!ip) { | 340 | if (!ip) { |
362 | if (ret) | 341 | trace_seq_puts(s, "??"); |
363 | ret = trace_seq_puts(s, "??"); | 342 | trace_seq_putc(s, '\n'); |
364 | if (ret) | ||
365 | ret = trace_seq_putc(s, '\n'); | ||
366 | continue; | 343 | continue; |
367 | } | 344 | } |
368 | if (!ret) | 345 | |
369 | break; | 346 | seq_print_user_ip(s, mm, ip, sym_flags); |
370 | if (ret) | 347 | trace_seq_putc(s, '\n'); |
371 | ret = seq_print_user_ip(s, mm, ip, sym_flags); | ||
372 | ret = trace_seq_putc(s, '\n'); | ||
373 | } | 348 | } |
374 | 349 | ||
375 | if (mm) | 350 | if (mm) |
376 | mmput(mm); | 351 | mmput(mm); |
377 | return ret; | 352 | |
353 | return !trace_seq_has_overflowed(s); | ||
378 | } | 354 | } |
379 | 355 | ||
380 | int | 356 | int |
381 | seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags) | 357 | seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags) |
382 | { | 358 | { |
383 | int ret; | 359 | if (!ip) { |
384 | 360 | trace_seq_putc(s, '0'); | |
385 | if (!ip) | 361 | goto out; |
386 | return trace_seq_putc(s, '0'); | 362 | } |
387 | 363 | ||
388 | if (sym_flags & TRACE_ITER_SYM_OFFSET) | 364 | if (sym_flags & TRACE_ITER_SYM_OFFSET) |
389 | ret = seq_print_sym_offset(s, "%s", ip); | 365 | seq_print_sym_offset(s, "%s", ip); |
390 | else | 366 | else |
391 | ret = seq_print_sym_short(s, "%s", ip); | 367 | seq_print_sym_short(s, "%s", ip); |
392 | |||
393 | if (!ret) | ||
394 | return 0; | ||
395 | 368 | ||
396 | if (sym_flags & TRACE_ITER_SYM_ADDR) | 369 | if (sym_flags & TRACE_ITER_SYM_ADDR) |
397 | ret = trace_seq_printf(s, " <" IP_FMT ">", ip); | 370 | trace_seq_printf(s, " <" IP_FMT ">", ip); |
398 | return ret; | 371 | |
372 | out: | ||
373 | return !trace_seq_has_overflowed(s); | ||
399 | } | 374 | } |
400 | 375 | ||
401 | /** | 376 | /** |
@@ -413,7 +388,6 @@ int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry) | |||
413 | char irqs_off; | 388 | char irqs_off; |
414 | int hardirq; | 389 | int hardirq; |
415 | int softirq; | 390 | int softirq; |
416 | int ret; | ||
417 | 391 | ||
418 | hardirq = entry->flags & TRACE_FLAG_HARDIRQ; | 392 | hardirq = entry->flags & TRACE_FLAG_HARDIRQ; |
419 | softirq = entry->flags & TRACE_FLAG_SOFTIRQ; | 393 | softirq = entry->flags & TRACE_FLAG_SOFTIRQ; |
@@ -445,16 +419,15 @@ int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry) | |||
445 | softirq ? 's' : | 419 | softirq ? 's' : |
446 | '.'; | 420 | '.'; |
447 | 421 | ||
448 | if (!trace_seq_printf(s, "%c%c%c", | 422 | trace_seq_printf(s, "%c%c%c", |
449 | irqs_off, need_resched, hardsoft_irq)) | 423 | irqs_off, need_resched, hardsoft_irq); |
450 | return 0; | ||
451 | 424 | ||
452 | if (entry->preempt_count) | 425 | if (entry->preempt_count) |
453 | ret = trace_seq_printf(s, "%x", entry->preempt_count); | 426 | trace_seq_printf(s, "%x", entry->preempt_count); |
454 | else | 427 | else |
455 | ret = trace_seq_putc(s, '.'); | 428 | trace_seq_putc(s, '.'); |
456 | 429 | ||
457 | return ret; | 430 | return !trace_seq_has_overflowed(s); |
458 | } | 431 | } |
459 | 432 | ||
460 | static int | 433 | static int |
@@ -464,14 +437,38 @@ lat_print_generic(struct trace_seq *s, struct trace_entry *entry, int cpu) | |||
464 | 437 | ||
465 | trace_find_cmdline(entry->pid, comm); | 438 | trace_find_cmdline(entry->pid, comm); |
466 | 439 | ||
467 | if (!trace_seq_printf(s, "%8.8s-%-5d %3d", | 440 | trace_seq_printf(s, "%8.8s-%-5d %3d", |
468 | comm, entry->pid, cpu)) | 441 | comm, entry->pid, cpu); |
469 | return 0; | ||
470 | 442 | ||
471 | return trace_print_lat_fmt(s, entry); | 443 | return trace_print_lat_fmt(s, entry); |
472 | } | 444 | } |
473 | 445 | ||
474 | static unsigned long preempt_mark_thresh_us = 100; | 446 | #undef MARK |
447 | #define MARK(v, s) {.val = v, .sym = s} | ||
448 | /* trace overhead mark */ | ||
449 | static const struct trace_mark { | ||
450 | unsigned long long val; /* unit: nsec */ | ||
451 | char sym; | ||
452 | } mark[] = { | ||
453 | MARK(1000000000ULL , '$'), /* 1 sec */ | ||
454 | MARK(1000000ULL , '#'), /* 1000 usecs */ | ||
455 | MARK(100000ULL , '!'), /* 100 usecs */ | ||
456 | MARK(10000ULL , '+'), /* 10 usecs */ | ||
457 | }; | ||
458 | #undef MARK | ||
459 | |||
460 | char trace_find_mark(unsigned long long d) | ||
461 | { | ||
462 | int i; | ||
463 | int size = ARRAY_SIZE(mark); | ||
464 | |||
465 | for (i = 0; i < size; i++) { | ||
466 | if (d >= mark[i].val) | ||
467 | break; | ||
468 | } | ||
469 | |||
470 | return (i == size) ? ' ' : mark[i].sym; | ||
471 | } | ||
475 | 472 | ||
476 | static int | 473 | static int |
477 | lat_print_timestamp(struct trace_iterator *iter, u64 next_ts) | 474 | lat_print_timestamp(struct trace_iterator *iter, u64 next_ts) |
@@ -493,24 +490,28 @@ lat_print_timestamp(struct trace_iterator *iter, u64 next_ts) | |||
493 | unsigned long rel_usec = do_div(rel_ts, USEC_PER_MSEC); | 490 | unsigned long rel_usec = do_div(rel_ts, USEC_PER_MSEC); |
494 | unsigned long rel_msec = (unsigned long)rel_ts; | 491 | unsigned long rel_msec = (unsigned long)rel_ts; |
495 | 492 | ||
496 | return trace_seq_printf( | 493 | trace_seq_printf( |
497 | s, "[%08llx] %ld.%03ldms (+%ld.%03ldms): ", | 494 | s, "[%08llx] %ld.%03ldms (+%ld.%03ldms): ", |
498 | ns2usecs(iter->ts), | 495 | ns2usecs(iter->ts), |
499 | abs_msec, abs_usec, | 496 | abs_msec, abs_usec, |
500 | rel_msec, rel_usec); | 497 | rel_msec, rel_usec); |
498 | |||
501 | } else if (verbose && !in_ns) { | 499 | } else if (verbose && !in_ns) { |
502 | return trace_seq_printf( | 500 | trace_seq_printf( |
503 | s, "[%016llx] %lld (+%lld): ", | 501 | s, "[%016llx] %lld (+%lld): ", |
504 | iter->ts, abs_ts, rel_ts); | 502 | iter->ts, abs_ts, rel_ts); |
503 | |||
505 | } else if (!verbose && in_ns) { | 504 | } else if (!verbose && in_ns) { |
506 | return trace_seq_printf( | 505 | trace_seq_printf( |
507 | s, " %4lldus%c: ", | 506 | s, " %4lldus%c: ", |
508 | abs_ts, | 507 | abs_ts, |
509 | rel_ts > preempt_mark_thresh_us ? '!' : | 508 | trace_find_mark(rel_ts * NSEC_PER_USEC)); |
510 | rel_ts > 1 ? '+' : ' '); | 509 | |
511 | } else { /* !verbose && !in_ns */ | 510 | } else { /* !verbose && !in_ns */ |
512 | return trace_seq_printf(s, " %4lld: ", abs_ts); | 511 | trace_seq_printf(s, " %4lld: ", abs_ts); |
513 | } | 512 | } |
513 | |||
514 | return !trace_seq_has_overflowed(s); | ||
514 | } | 515 | } |
515 | 516 | ||
516 | int trace_print_context(struct trace_iterator *iter) | 517 | int trace_print_context(struct trace_iterator *iter) |
@@ -520,34 +521,29 @@ int trace_print_context(struct trace_iterator *iter) | |||
520 | unsigned long long t; | 521 | unsigned long long t; |
521 | unsigned long secs, usec_rem; | 522 | unsigned long secs, usec_rem; |
522 | char comm[TASK_COMM_LEN]; | 523 | char comm[TASK_COMM_LEN]; |
523 | int ret; | ||
524 | 524 | ||
525 | trace_find_cmdline(entry->pid, comm); | 525 | trace_find_cmdline(entry->pid, comm); |
526 | 526 | ||
527 | ret = trace_seq_printf(s, "%16s-%-5d [%03d] ", | 527 | trace_seq_printf(s, "%16s-%-5d [%03d] ", |
528 | comm, entry->pid, iter->cpu); | 528 | comm, entry->pid, iter->cpu); |
529 | if (!ret) | ||
530 | return 0; | ||
531 | 529 | ||
532 | if (trace_flags & TRACE_ITER_IRQ_INFO) { | 530 | if (trace_flags & TRACE_ITER_IRQ_INFO) |
533 | ret = trace_print_lat_fmt(s, entry); | 531 | trace_print_lat_fmt(s, entry); |
534 | if (!ret) | ||
535 | return 0; | ||
536 | } | ||
537 | 532 | ||
538 | if (iter->iter_flags & TRACE_FILE_TIME_IN_NS) { | 533 | if (iter->iter_flags & TRACE_FILE_TIME_IN_NS) { |
539 | t = ns2usecs(iter->ts); | 534 | t = ns2usecs(iter->ts); |
540 | usec_rem = do_div(t, USEC_PER_SEC); | 535 | usec_rem = do_div(t, USEC_PER_SEC); |
541 | secs = (unsigned long)t; | 536 | secs = (unsigned long)t; |
542 | return trace_seq_printf(s, " %5lu.%06lu: ", secs, usec_rem); | 537 | trace_seq_printf(s, " %5lu.%06lu: ", secs, usec_rem); |
543 | } else | 538 | } else |
544 | return trace_seq_printf(s, " %12llu: ", iter->ts); | 539 | trace_seq_printf(s, " %12llu: ", iter->ts); |
540 | |||
541 | return !trace_seq_has_overflowed(s); | ||
545 | } | 542 | } |
546 | 543 | ||
547 | int trace_print_lat_context(struct trace_iterator *iter) | 544 | int trace_print_lat_context(struct trace_iterator *iter) |
548 | { | 545 | { |
549 | u64 next_ts; | 546 | u64 next_ts; |
550 | int ret; | ||
551 | /* trace_find_next_entry will reset ent_size */ | 547 | /* trace_find_next_entry will reset ent_size */ |
552 | int ent_size = iter->ent_size; | 548 | int ent_size = iter->ent_size; |
553 | struct trace_seq *s = &iter->seq; | 549 | struct trace_seq *s = &iter->seq; |
@@ -567,18 +563,17 @@ int trace_print_lat_context(struct trace_iterator *iter) | |||
567 | 563 | ||
568 | trace_find_cmdline(entry->pid, comm); | 564 | trace_find_cmdline(entry->pid, comm); |
569 | 565 | ||
570 | ret = trace_seq_printf( | 566 | trace_seq_printf( |
571 | s, "%16s %5d %3d %d %08x %08lx ", | 567 | s, "%16s %5d %3d %d %08x %08lx ", |
572 | comm, entry->pid, iter->cpu, entry->flags, | 568 | comm, entry->pid, iter->cpu, entry->flags, |
573 | entry->preempt_count, iter->idx); | 569 | entry->preempt_count, iter->idx); |
574 | } else { | 570 | } else { |
575 | ret = lat_print_generic(s, entry, iter->cpu); | 571 | lat_print_generic(s, entry, iter->cpu); |
576 | } | 572 | } |
577 | 573 | ||
578 | if (ret) | 574 | lat_print_timestamp(iter, next_ts); |
579 | ret = lat_print_timestamp(iter, next_ts); | ||
580 | 575 | ||
581 | return ret; | 576 | return !trace_seq_has_overflowed(s); |
582 | } | 577 | } |
583 | 578 | ||
584 | static const char state_to_char[] = TASK_STATE_TO_CHAR_STR; | 579 | static const char state_to_char[] = TASK_STATE_TO_CHAR_STR; |
@@ -692,7 +687,7 @@ int register_ftrace_event(struct trace_event *event) | |||
692 | goto out; | 687 | goto out; |
693 | 688 | ||
694 | } else { | 689 | } else { |
695 | 690 | ||
696 | event->type = next_event_type++; | 691 | event->type = next_event_type++; |
697 | list = &ftrace_event_list; | 692 | list = &ftrace_event_list; |
698 | } | 693 | } |
@@ -764,10 +759,9 @@ EXPORT_SYMBOL_GPL(unregister_ftrace_event); | |||
764 | enum print_line_t trace_nop_print(struct trace_iterator *iter, int flags, | 759 | enum print_line_t trace_nop_print(struct trace_iterator *iter, int flags, |
765 | struct trace_event *event) | 760 | struct trace_event *event) |
766 | { | 761 | { |
767 | if (!trace_seq_printf(&iter->seq, "type: %d\n", iter->ent->type)) | 762 | trace_seq_printf(&iter->seq, "type: %d\n", iter->ent->type); |
768 | return TRACE_TYPE_PARTIAL_LINE; | ||
769 | 763 | ||
770 | return TRACE_TYPE_HANDLED; | 764 | return trace_handle_return(&iter->seq); |
771 | } | 765 | } |
772 | 766 | ||
773 | /* TRACE_FN */ | 767 | /* TRACE_FN */ |
@@ -779,24 +773,16 @@ static enum print_line_t trace_fn_trace(struct trace_iterator *iter, int flags, | |||
779 | 773 | ||
780 | trace_assign_type(field, iter->ent); | 774 | trace_assign_type(field, iter->ent); |
781 | 775 | ||
782 | if (!seq_print_ip_sym(s, field->ip, flags)) | 776 | seq_print_ip_sym(s, field->ip, flags); |
783 | goto partial; | ||
784 | 777 | ||
785 | if ((flags & TRACE_ITER_PRINT_PARENT) && field->parent_ip) { | 778 | if ((flags & TRACE_ITER_PRINT_PARENT) && field->parent_ip) { |
786 | if (!trace_seq_puts(s, " <-")) | 779 | trace_seq_puts(s, " <-"); |
787 | goto partial; | 780 | seq_print_ip_sym(s, field->parent_ip, flags); |
788 | if (!seq_print_ip_sym(s, | ||
789 | field->parent_ip, | ||
790 | flags)) | ||
791 | goto partial; | ||
792 | } | 781 | } |
793 | if (!trace_seq_putc(s, '\n')) | ||
794 | goto partial; | ||
795 | 782 | ||
796 | return TRACE_TYPE_HANDLED; | 783 | trace_seq_putc(s, '\n'); |
797 | 784 | ||
798 | partial: | 785 | return trace_handle_return(s); |
799 | return TRACE_TYPE_PARTIAL_LINE; | ||
800 | } | 786 | } |
801 | 787 | ||
802 | static enum print_line_t trace_fn_raw(struct trace_iterator *iter, int flags, | 788 | static enum print_line_t trace_fn_raw(struct trace_iterator *iter, int flags, |
@@ -806,12 +792,11 @@ static enum print_line_t trace_fn_raw(struct trace_iterator *iter, int flags, | |||
806 | 792 | ||
807 | trace_assign_type(field, iter->ent); | 793 | trace_assign_type(field, iter->ent); |
808 | 794 | ||
809 | if (!trace_seq_printf(&iter->seq, "%lx %lx\n", | 795 | trace_seq_printf(&iter->seq, "%lx %lx\n", |
810 | field->ip, | 796 | field->ip, |
811 | field->parent_ip)) | 797 | field->parent_ip); |
812 | return TRACE_TYPE_PARTIAL_LINE; | ||
813 | 798 | ||
814 | return TRACE_TYPE_HANDLED; | 799 | return trace_handle_return(&iter->seq); |
815 | } | 800 | } |
816 | 801 | ||
817 | static enum print_line_t trace_fn_hex(struct trace_iterator *iter, int flags, | 802 | static enum print_line_t trace_fn_hex(struct trace_iterator *iter, int flags, |
@@ -822,10 +807,10 @@ static enum print_line_t trace_fn_hex(struct trace_iterator *iter, int flags, | |||
822 | 807 | ||
823 | trace_assign_type(field, iter->ent); | 808 | trace_assign_type(field, iter->ent); |
824 | 809 | ||
825 | SEQ_PUT_HEX_FIELD_RET(s, field->ip); | 810 | SEQ_PUT_HEX_FIELD(s, field->ip); |
826 | SEQ_PUT_HEX_FIELD_RET(s, field->parent_ip); | 811 | SEQ_PUT_HEX_FIELD(s, field->parent_ip); |
827 | 812 | ||
828 | return TRACE_TYPE_HANDLED; | 813 | return trace_handle_return(s); |
829 | } | 814 | } |
830 | 815 | ||
831 | static enum print_line_t trace_fn_bin(struct trace_iterator *iter, int flags, | 816 | static enum print_line_t trace_fn_bin(struct trace_iterator *iter, int flags, |
@@ -836,10 +821,10 @@ static enum print_line_t trace_fn_bin(struct trace_iterator *iter, int flags, | |||
836 | 821 | ||
837 | trace_assign_type(field, iter->ent); | 822 | trace_assign_type(field, iter->ent); |
838 | 823 | ||
839 | SEQ_PUT_FIELD_RET(s, field->ip); | 824 | SEQ_PUT_FIELD(s, field->ip); |
840 | SEQ_PUT_FIELD_RET(s, field->parent_ip); | 825 | SEQ_PUT_FIELD(s, field->parent_ip); |
841 | 826 | ||
842 | return TRACE_TYPE_HANDLED; | 827 | return trace_handle_return(s); |
843 | } | 828 | } |
844 | 829 | ||
845 | static struct trace_event_functions trace_fn_funcs = { | 830 | static struct trace_event_functions trace_fn_funcs = { |
@@ -868,18 +853,17 @@ static enum print_line_t trace_ctxwake_print(struct trace_iterator *iter, | |||
868 | T = task_state_char(field->next_state); | 853 | T = task_state_char(field->next_state); |
869 | S = task_state_char(field->prev_state); | 854 | S = task_state_char(field->prev_state); |
870 | trace_find_cmdline(field->next_pid, comm); | 855 | trace_find_cmdline(field->next_pid, comm); |
871 | if (!trace_seq_printf(&iter->seq, | 856 | trace_seq_printf(&iter->seq, |
872 | " %5d:%3d:%c %s [%03d] %5d:%3d:%c %s\n", | 857 | " %5d:%3d:%c %s [%03d] %5d:%3d:%c %s\n", |
873 | field->prev_pid, | 858 | field->prev_pid, |
874 | field->prev_prio, | 859 | field->prev_prio, |
875 | S, delim, | 860 | S, delim, |
876 | field->next_cpu, | 861 | field->next_cpu, |
877 | field->next_pid, | 862 | field->next_pid, |
878 | field->next_prio, | 863 | field->next_prio, |
879 | T, comm)) | 864 | T, comm); |
880 | return TRACE_TYPE_PARTIAL_LINE; | 865 | |
881 | 866 | return trace_handle_return(&iter->seq); | |
882 | return TRACE_TYPE_HANDLED; | ||
883 | } | 867 | } |
884 | 868 | ||
885 | static enum print_line_t trace_ctx_print(struct trace_iterator *iter, int flags, | 869 | static enum print_line_t trace_ctx_print(struct trace_iterator *iter, int flags, |
@@ -904,17 +888,16 @@ static int trace_ctxwake_raw(struct trace_iterator *iter, char S) | |||
904 | if (!S) | 888 | if (!S) |
905 | S = task_state_char(field->prev_state); | 889 | S = task_state_char(field->prev_state); |
906 | T = task_state_char(field->next_state); | 890 | T = task_state_char(field->next_state); |
907 | if (!trace_seq_printf(&iter->seq, "%d %d %c %d %d %d %c\n", | 891 | trace_seq_printf(&iter->seq, "%d %d %c %d %d %d %c\n", |
908 | field->prev_pid, | 892 | field->prev_pid, |
909 | field->prev_prio, | 893 | field->prev_prio, |
910 | S, | 894 | S, |
911 | field->next_cpu, | 895 | field->next_cpu, |
912 | field->next_pid, | 896 | field->next_pid, |
913 | field->next_prio, | 897 | field->next_prio, |
914 | T)) | 898 | T); |
915 | return TRACE_TYPE_PARTIAL_LINE; | 899 | |
916 | 900 | return trace_handle_return(&iter->seq); | |
917 | return TRACE_TYPE_HANDLED; | ||
918 | } | 901 | } |
919 | 902 | ||
920 | static enum print_line_t trace_ctx_raw(struct trace_iterator *iter, int flags, | 903 | static enum print_line_t trace_ctx_raw(struct trace_iterator *iter, int flags, |
@@ -942,15 +925,15 @@ static int trace_ctxwake_hex(struct trace_iterator *iter, char S) | |||
942 | S = task_state_char(field->prev_state); | 925 | S = task_state_char(field->prev_state); |
943 | T = task_state_char(field->next_state); | 926 | T = task_state_char(field->next_state); |
944 | 927 | ||
945 | SEQ_PUT_HEX_FIELD_RET(s, field->prev_pid); | 928 | SEQ_PUT_HEX_FIELD(s, field->prev_pid); |
946 | SEQ_PUT_HEX_FIELD_RET(s, field->prev_prio); | 929 | SEQ_PUT_HEX_FIELD(s, field->prev_prio); |
947 | SEQ_PUT_HEX_FIELD_RET(s, S); | 930 | SEQ_PUT_HEX_FIELD(s, S); |
948 | SEQ_PUT_HEX_FIELD_RET(s, field->next_cpu); | 931 | SEQ_PUT_HEX_FIELD(s, field->next_cpu); |
949 | SEQ_PUT_HEX_FIELD_RET(s, field->next_pid); | 932 | SEQ_PUT_HEX_FIELD(s, field->next_pid); |
950 | SEQ_PUT_HEX_FIELD_RET(s, field->next_prio); | 933 | SEQ_PUT_HEX_FIELD(s, field->next_prio); |
951 | SEQ_PUT_HEX_FIELD_RET(s, T); | 934 | SEQ_PUT_HEX_FIELD(s, T); |
952 | 935 | ||
953 | return TRACE_TYPE_HANDLED; | 936 | return trace_handle_return(s); |
954 | } | 937 | } |
955 | 938 | ||
956 | static enum print_line_t trace_ctx_hex(struct trace_iterator *iter, int flags, | 939 | static enum print_line_t trace_ctx_hex(struct trace_iterator *iter, int flags, |
@@ -973,14 +956,15 @@ static enum print_line_t trace_ctxwake_bin(struct trace_iterator *iter, | |||
973 | 956 | ||
974 | trace_assign_type(field, iter->ent); | 957 | trace_assign_type(field, iter->ent); |
975 | 958 | ||
976 | SEQ_PUT_FIELD_RET(s, field->prev_pid); | 959 | SEQ_PUT_FIELD(s, field->prev_pid); |
977 | SEQ_PUT_FIELD_RET(s, field->prev_prio); | 960 | SEQ_PUT_FIELD(s, field->prev_prio); |
978 | SEQ_PUT_FIELD_RET(s, field->prev_state); | 961 | SEQ_PUT_FIELD(s, field->prev_state); |
979 | SEQ_PUT_FIELD_RET(s, field->next_pid); | 962 | SEQ_PUT_FIELD(s, field->next_cpu); |
980 | SEQ_PUT_FIELD_RET(s, field->next_prio); | 963 | SEQ_PUT_FIELD(s, field->next_pid); |
981 | SEQ_PUT_FIELD_RET(s, field->next_state); | 964 | SEQ_PUT_FIELD(s, field->next_prio); |
965 | SEQ_PUT_FIELD(s, field->next_state); | ||
982 | 966 | ||
983 | return TRACE_TYPE_HANDLED; | 967 | return trace_handle_return(s); |
984 | } | 968 | } |
985 | 969 | ||
986 | static struct trace_event_functions trace_ctx_funcs = { | 970 | static struct trace_event_functions trace_ctx_funcs = { |
@@ -1020,23 +1004,19 @@ static enum print_line_t trace_stack_print(struct trace_iterator *iter, | |||
1020 | trace_assign_type(field, iter->ent); | 1004 | trace_assign_type(field, iter->ent); |
1021 | end = (unsigned long *)((long)iter->ent + iter->ent_size); | 1005 | end = (unsigned long *)((long)iter->ent + iter->ent_size); |
1022 | 1006 | ||
1023 | if (!trace_seq_puts(s, "<stack trace>\n")) | 1007 | trace_seq_puts(s, "<stack trace>\n"); |
1024 | goto partial; | ||
1025 | 1008 | ||
1026 | for (p = field->caller; p && *p != ULONG_MAX && p < end; p++) { | 1009 | for (p = field->caller; p && *p != ULONG_MAX && p < end; p++) { |
1027 | if (!trace_seq_puts(s, " => ")) | ||
1028 | goto partial; | ||
1029 | 1010 | ||
1030 | if (!seq_print_ip_sym(s, *p, flags)) | 1011 | if (trace_seq_has_overflowed(s)) |
1031 | goto partial; | 1012 | break; |
1032 | if (!trace_seq_putc(s, '\n')) | ||
1033 | goto partial; | ||
1034 | } | ||
1035 | 1013 | ||
1036 | return TRACE_TYPE_HANDLED; | 1014 | trace_seq_puts(s, " => "); |
1015 | seq_print_ip_sym(s, *p, flags); | ||
1016 | trace_seq_putc(s, '\n'); | ||
1017 | } | ||
1037 | 1018 | ||
1038 | partial: | 1019 | return trace_handle_return(s); |
1039 | return TRACE_TYPE_PARTIAL_LINE; | ||
1040 | } | 1020 | } |
1041 | 1021 | ||
1042 | static struct trace_event_functions trace_stack_funcs = { | 1022 | static struct trace_event_functions trace_stack_funcs = { |
@@ -1057,16 +1037,10 @@ static enum print_line_t trace_user_stack_print(struct trace_iterator *iter, | |||
1057 | 1037 | ||
1058 | trace_assign_type(field, iter->ent); | 1038 | trace_assign_type(field, iter->ent); |
1059 | 1039 | ||
1060 | if (!trace_seq_puts(s, "<user stack trace>\n")) | 1040 | trace_seq_puts(s, "<user stack trace>\n"); |
1061 | goto partial; | 1041 | seq_print_userip_objs(field, s, flags); |
1062 | |||
1063 | if (!seq_print_userip_objs(field, s, flags)) | ||
1064 | goto partial; | ||
1065 | |||
1066 | return TRACE_TYPE_HANDLED; | ||
1067 | 1042 | ||
1068 | partial: | 1043 | return trace_handle_return(s); |
1069 | return TRACE_TYPE_PARTIAL_LINE; | ||
1070 | } | 1044 | } |
1071 | 1045 | ||
1072 | static struct trace_event_functions trace_user_stack_funcs = { | 1046 | static struct trace_event_functions trace_user_stack_funcs = { |
@@ -1089,19 +1063,11 @@ trace_bputs_print(struct trace_iterator *iter, int flags, | |||
1089 | 1063 | ||
1090 | trace_assign_type(field, entry); | 1064 | trace_assign_type(field, entry); |
1091 | 1065 | ||
1092 | if (!seq_print_ip_sym(s, field->ip, flags)) | 1066 | seq_print_ip_sym(s, field->ip, flags); |
1093 | goto partial; | 1067 | trace_seq_puts(s, ": "); |
1068 | trace_seq_puts(s, field->str); | ||
1094 | 1069 | ||
1095 | if (!trace_seq_puts(s, ": ")) | 1070 | return trace_handle_return(s); |
1096 | goto partial; | ||
1097 | |||
1098 | if (!trace_seq_puts(s, field->str)) | ||
1099 | goto partial; | ||
1100 | |||
1101 | return TRACE_TYPE_HANDLED; | ||
1102 | |||
1103 | partial: | ||
1104 | return TRACE_TYPE_PARTIAL_LINE; | ||
1105 | } | 1071 | } |
1106 | 1072 | ||
1107 | 1073 | ||
@@ -1114,16 +1080,10 @@ trace_bputs_raw(struct trace_iterator *iter, int flags, | |||
1114 | 1080 | ||
1115 | trace_assign_type(field, iter->ent); | 1081 | trace_assign_type(field, iter->ent); |
1116 | 1082 | ||
1117 | if (!trace_seq_printf(s, ": %lx : ", field->ip)) | 1083 | trace_seq_printf(s, ": %lx : ", field->ip); |
1118 | goto partial; | 1084 | trace_seq_puts(s, field->str); |
1119 | |||
1120 | if (!trace_seq_puts(s, field->str)) | ||
1121 | goto partial; | ||
1122 | 1085 | ||
1123 | return TRACE_TYPE_HANDLED; | 1086 | return trace_handle_return(s); |
1124 | |||
1125 | partial: | ||
1126 | return TRACE_TYPE_PARTIAL_LINE; | ||
1127 | } | 1087 | } |
1128 | 1088 | ||
1129 | static struct trace_event_functions trace_bputs_funcs = { | 1089 | static struct trace_event_functions trace_bputs_funcs = { |
@@ -1147,19 +1107,11 @@ trace_bprint_print(struct trace_iterator *iter, int flags, | |||
1147 | 1107 | ||
1148 | trace_assign_type(field, entry); | 1108 | trace_assign_type(field, entry); |
1149 | 1109 | ||
1150 | if (!seq_print_ip_sym(s, field->ip, flags)) | 1110 | seq_print_ip_sym(s, field->ip, flags); |
1151 | goto partial; | 1111 | trace_seq_puts(s, ": "); |
1152 | 1112 | trace_seq_bprintf(s, field->fmt, field->buf); | |
1153 | if (!trace_seq_puts(s, ": ")) | ||
1154 | goto partial; | ||
1155 | |||
1156 | if (!trace_seq_bprintf(s, field->fmt, field->buf)) | ||
1157 | goto partial; | ||
1158 | 1113 | ||
1159 | return TRACE_TYPE_HANDLED; | 1114 | return trace_handle_return(s); |
1160 | |||
1161 | partial: | ||
1162 | return TRACE_TYPE_PARTIAL_LINE; | ||
1163 | } | 1115 | } |
1164 | 1116 | ||
1165 | 1117 | ||
@@ -1172,16 +1124,10 @@ trace_bprint_raw(struct trace_iterator *iter, int flags, | |||
1172 | 1124 | ||
1173 | trace_assign_type(field, iter->ent); | 1125 | trace_assign_type(field, iter->ent); |
1174 | 1126 | ||
1175 | if (!trace_seq_printf(s, ": %lx : ", field->ip)) | 1127 | trace_seq_printf(s, ": %lx : ", field->ip); |
1176 | goto partial; | 1128 | trace_seq_bprintf(s, field->fmt, field->buf); |
1177 | |||
1178 | if (!trace_seq_bprintf(s, field->fmt, field->buf)) | ||
1179 | goto partial; | ||
1180 | 1129 | ||
1181 | return TRACE_TYPE_HANDLED; | 1130 | return trace_handle_return(s); |
1182 | |||
1183 | partial: | ||
1184 | return TRACE_TYPE_PARTIAL_LINE; | ||
1185 | } | 1131 | } |
1186 | 1132 | ||
1187 | static struct trace_event_functions trace_bprint_funcs = { | 1133 | static struct trace_event_functions trace_bprint_funcs = { |
@@ -1203,16 +1149,10 @@ static enum print_line_t trace_print_print(struct trace_iterator *iter, | |||
1203 | 1149 | ||
1204 | trace_assign_type(field, iter->ent); | 1150 | trace_assign_type(field, iter->ent); |
1205 | 1151 | ||
1206 | if (!seq_print_ip_sym(s, field->ip, flags)) | 1152 | seq_print_ip_sym(s, field->ip, flags); |
1207 | goto partial; | 1153 | trace_seq_printf(s, ": %s", field->buf); |
1208 | |||
1209 | if (!trace_seq_printf(s, ": %s", field->buf)) | ||
1210 | goto partial; | ||
1211 | 1154 | ||
1212 | return TRACE_TYPE_HANDLED; | 1155 | return trace_handle_return(s); |
1213 | |||
1214 | partial: | ||
1215 | return TRACE_TYPE_PARTIAL_LINE; | ||
1216 | } | 1156 | } |
1217 | 1157 | ||
1218 | static enum print_line_t trace_print_raw(struct trace_iterator *iter, int flags, | 1158 | static enum print_line_t trace_print_raw(struct trace_iterator *iter, int flags, |
@@ -1222,13 +1162,9 @@ static enum print_line_t trace_print_raw(struct trace_iterator *iter, int flags, | |||
1222 | 1162 | ||
1223 | trace_assign_type(field, iter->ent); | 1163 | trace_assign_type(field, iter->ent); |
1224 | 1164 | ||
1225 | if (!trace_seq_printf(&iter->seq, "# %lx %s", field->ip, field->buf)) | 1165 | trace_seq_printf(&iter->seq, "# %lx %s", field->ip, field->buf); |
1226 | goto partial; | ||
1227 | |||
1228 | return TRACE_TYPE_HANDLED; | ||
1229 | 1166 | ||
1230 | partial: | 1167 | return trace_handle_return(&iter->seq); |
1231 | return TRACE_TYPE_PARTIAL_LINE; | ||
1232 | } | 1168 | } |
1233 | 1169 | ||
1234 | static struct trace_event_functions trace_print_funcs = { | 1170 | static struct trace_event_functions trace_print_funcs = { |
diff --git a/kernel/trace/trace_output.h b/kernel/trace/trace_output.h index 80b25b585a70..8ef2c40efb3c 100644 --- a/kernel/trace/trace_output.h +++ b/kernel/trace/trace_output.h | |||
@@ -35,17 +35,11 @@ trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry); | |||
35 | extern int __unregister_ftrace_event(struct trace_event *event); | 35 | extern int __unregister_ftrace_event(struct trace_event *event); |
36 | extern struct rw_semaphore trace_event_sem; | 36 | extern struct rw_semaphore trace_event_sem; |
37 | 37 | ||
38 | #define SEQ_PUT_FIELD_RET(s, x) \ | 38 | #define SEQ_PUT_FIELD(s, x) \ |
39 | do { \ | 39 | trace_seq_putmem(s, &(x), sizeof(x)) |
40 | if (!trace_seq_putmem(s, &(x), sizeof(x))) \ | 40 | |
41 | return TRACE_TYPE_PARTIAL_LINE; \ | 41 | #define SEQ_PUT_HEX_FIELD(s, x) \ |
42 | } while (0) | 42 | trace_seq_putmem_hex(s, &(x), sizeof(x)) |
43 | |||
44 | #define SEQ_PUT_HEX_FIELD_RET(s, x) \ | ||
45 | do { \ | ||
46 | if (!trace_seq_putmem_hex(s, &(x), sizeof(x))) \ | ||
47 | return TRACE_TYPE_PARTIAL_LINE; \ | ||
48 | } while (0) | ||
49 | 43 | ||
50 | #endif | 44 | #endif |
51 | 45 | ||
diff --git a/kernel/trace/trace_printk.c b/kernel/trace/trace_printk.c index 2900817ba65c..c4e70b6bd7fa 100644 --- a/kernel/trace/trace_printk.c +++ b/kernel/trace/trace_printk.c | |||
@@ -305,7 +305,7 @@ static int t_show(struct seq_file *m, void *v) | |||
305 | seq_puts(m, "\\t"); | 305 | seq_puts(m, "\\t"); |
306 | break; | 306 | break; |
307 | case '\\': | 307 | case '\\': |
308 | seq_puts(m, "\\"); | 308 | seq_putc(m, '\\'); |
309 | break; | 309 | break; |
310 | case '"': | 310 | case '"': |
311 | seq_puts(m, "\\\""); | 311 | seq_puts(m, "\\\""); |
diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c index d4b9fc22cd27..b983b2fd2ca1 100644 --- a/kernel/trace/trace_probe.c +++ b/kernel/trace/trace_probe.c | |||
@@ -40,7 +40,8 @@ const char *reserved_field_names[] = { | |||
40 | int PRINT_TYPE_FUNC_NAME(type)(struct trace_seq *s, const char *name, \ | 40 | int PRINT_TYPE_FUNC_NAME(type)(struct trace_seq *s, const char *name, \ |
41 | void *data, void *ent) \ | 41 | void *data, void *ent) \ |
42 | { \ | 42 | { \ |
43 | return trace_seq_printf(s, " %s=" fmt, name, *(type *)data); \ | 43 | trace_seq_printf(s, " %s=" fmt, name, *(type *)data); \ |
44 | return !trace_seq_has_overflowed(s); \ | ||
44 | } \ | 45 | } \ |
45 | const char PRINT_TYPE_FMT_NAME(type)[] = fmt; \ | 46 | const char PRINT_TYPE_FMT_NAME(type)[] = fmt; \ |
46 | NOKPROBE_SYMBOL(PRINT_TYPE_FUNC_NAME(type)); | 47 | NOKPROBE_SYMBOL(PRINT_TYPE_FUNC_NAME(type)); |
@@ -61,10 +62,11 @@ int PRINT_TYPE_FUNC_NAME(string)(struct trace_seq *s, const char *name, | |||
61 | int len = *(u32 *)data >> 16; | 62 | int len = *(u32 *)data >> 16; |
62 | 63 | ||
63 | if (!len) | 64 | if (!len) |
64 | return trace_seq_printf(s, " %s=(fault)", name); | 65 | trace_seq_printf(s, " %s=(fault)", name); |
65 | else | 66 | else |
66 | return trace_seq_printf(s, " %s=\"%s\"", name, | 67 | trace_seq_printf(s, " %s=\"%s\"", name, |
67 | (const char *)get_loc_data(data, ent)); | 68 | (const char *)get_loc_data(data, ent)); |
69 | return !trace_seq_has_overflowed(s); | ||
68 | } | 70 | } |
69 | NOKPROBE_SYMBOL(PRINT_TYPE_FUNC_NAME(string)); | 71 | NOKPROBE_SYMBOL(PRINT_TYPE_FUNC_NAME(string)); |
70 | 72 | ||
diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c index 3f34dc9b40f3..2e293beb186e 100644 --- a/kernel/trace/trace_sched_switch.c +++ b/kernel/trace/trace_sched_switch.c | |||
@@ -14,122 +14,26 @@ | |||
14 | 14 | ||
15 | #include "trace.h" | 15 | #include "trace.h" |
16 | 16 | ||
17 | static struct trace_array *ctx_trace; | ||
18 | static int __read_mostly tracer_enabled; | ||
19 | static int sched_ref; | 17 | static int sched_ref; |
20 | static DEFINE_MUTEX(sched_register_mutex); | 18 | static DEFINE_MUTEX(sched_register_mutex); |
21 | static int sched_stopped; | ||
22 | |||
23 | |||
24 | void | ||
25 | tracing_sched_switch_trace(struct trace_array *tr, | ||
26 | struct task_struct *prev, | ||
27 | struct task_struct *next, | ||
28 | unsigned long flags, int pc) | ||
29 | { | ||
30 | struct ftrace_event_call *call = &event_context_switch; | ||
31 | struct ring_buffer *buffer = tr->trace_buffer.buffer; | ||
32 | struct ring_buffer_event *event; | ||
33 | struct ctx_switch_entry *entry; | ||
34 | |||
35 | event = trace_buffer_lock_reserve(buffer, TRACE_CTX, | ||
36 | sizeof(*entry), flags, pc); | ||
37 | if (!event) | ||
38 | return; | ||
39 | entry = ring_buffer_event_data(event); | ||
40 | entry->prev_pid = prev->pid; | ||
41 | entry->prev_prio = prev->prio; | ||
42 | entry->prev_state = prev->state; | ||
43 | entry->next_pid = next->pid; | ||
44 | entry->next_prio = next->prio; | ||
45 | entry->next_state = next->state; | ||
46 | entry->next_cpu = task_cpu(next); | ||
47 | |||
48 | if (!call_filter_check_discard(call, entry, buffer, event)) | ||
49 | trace_buffer_unlock_commit(buffer, event, flags, pc); | ||
50 | } | ||
51 | 19 | ||
52 | static void | 20 | static void |
53 | probe_sched_switch(void *ignore, struct task_struct *prev, struct task_struct *next) | 21 | probe_sched_switch(void *ignore, struct task_struct *prev, struct task_struct *next) |
54 | { | 22 | { |
55 | struct trace_array_cpu *data; | ||
56 | unsigned long flags; | ||
57 | int cpu; | ||
58 | int pc; | ||
59 | |||
60 | if (unlikely(!sched_ref)) | 23 | if (unlikely(!sched_ref)) |
61 | return; | 24 | return; |
62 | 25 | ||
63 | tracing_record_cmdline(prev); | 26 | tracing_record_cmdline(prev); |
64 | tracing_record_cmdline(next); | 27 | tracing_record_cmdline(next); |
65 | |||
66 | if (!tracer_enabled || sched_stopped) | ||
67 | return; | ||
68 | |||
69 | pc = preempt_count(); | ||
70 | local_irq_save(flags); | ||
71 | cpu = raw_smp_processor_id(); | ||
72 | data = per_cpu_ptr(ctx_trace->trace_buffer.data, cpu); | ||
73 | |||
74 | if (likely(!atomic_read(&data->disabled))) | ||
75 | tracing_sched_switch_trace(ctx_trace, prev, next, flags, pc); | ||
76 | |||
77 | local_irq_restore(flags); | ||
78 | } | ||
79 | |||
80 | void | ||
81 | tracing_sched_wakeup_trace(struct trace_array *tr, | ||
82 | struct task_struct *wakee, | ||
83 | struct task_struct *curr, | ||
84 | unsigned long flags, int pc) | ||
85 | { | ||
86 | struct ftrace_event_call *call = &event_wakeup; | ||
87 | struct ring_buffer_event *event; | ||
88 | struct ctx_switch_entry *entry; | ||
89 | struct ring_buffer *buffer = tr->trace_buffer.buffer; | ||
90 | |||
91 | event = trace_buffer_lock_reserve(buffer, TRACE_WAKE, | ||
92 | sizeof(*entry), flags, pc); | ||
93 | if (!event) | ||
94 | return; | ||
95 | entry = ring_buffer_event_data(event); | ||
96 | entry->prev_pid = curr->pid; | ||
97 | entry->prev_prio = curr->prio; | ||
98 | entry->prev_state = curr->state; | ||
99 | entry->next_pid = wakee->pid; | ||
100 | entry->next_prio = wakee->prio; | ||
101 | entry->next_state = wakee->state; | ||
102 | entry->next_cpu = task_cpu(wakee); | ||
103 | |||
104 | if (!call_filter_check_discard(call, entry, buffer, event)) | ||
105 | trace_buffer_unlock_commit(buffer, event, flags, pc); | ||
106 | } | 28 | } |
107 | 29 | ||
108 | static void | 30 | static void |
109 | probe_sched_wakeup(void *ignore, struct task_struct *wakee, int success) | 31 | probe_sched_wakeup(void *ignore, struct task_struct *wakee, int success) |
110 | { | 32 | { |
111 | struct trace_array_cpu *data; | ||
112 | unsigned long flags; | ||
113 | int cpu, pc; | ||
114 | |||
115 | if (unlikely(!sched_ref)) | 33 | if (unlikely(!sched_ref)) |
116 | return; | 34 | return; |
117 | 35 | ||
118 | tracing_record_cmdline(current); | 36 | tracing_record_cmdline(current); |
119 | |||
120 | if (!tracer_enabled || sched_stopped) | ||
121 | return; | ||
122 | |||
123 | pc = preempt_count(); | ||
124 | local_irq_save(flags); | ||
125 | cpu = raw_smp_processor_id(); | ||
126 | data = per_cpu_ptr(ctx_trace->trace_buffer.data, cpu); | ||
127 | |||
128 | if (likely(!atomic_read(&data->disabled))) | ||
129 | tracing_sched_wakeup_trace(ctx_trace, wakee, current, | ||
130 | flags, pc); | ||
131 | |||
132 | local_irq_restore(flags); | ||
133 | } | 37 | } |
134 | 38 | ||
135 | static int tracing_sched_register(void) | 39 | static int tracing_sched_register(void) |
@@ -197,51 +101,3 @@ void tracing_stop_cmdline_record(void) | |||
197 | { | 101 | { |
198 | tracing_stop_sched_switch(); | 102 | tracing_stop_sched_switch(); |
199 | } | 103 | } |
200 | |||
201 | /** | ||
202 | * tracing_start_sched_switch_record - start tracing context switches | ||
203 | * | ||
204 | * Turns on context switch tracing for a tracer. | ||
205 | */ | ||
206 | void tracing_start_sched_switch_record(void) | ||
207 | { | ||
208 | if (unlikely(!ctx_trace)) { | ||
209 | WARN_ON(1); | ||
210 | return; | ||
211 | } | ||
212 | |||
213 | tracing_start_sched_switch(); | ||
214 | |||
215 | mutex_lock(&sched_register_mutex); | ||
216 | tracer_enabled++; | ||
217 | mutex_unlock(&sched_register_mutex); | ||
218 | } | ||
219 | |||
220 | /** | ||
221 | * tracing_stop_sched_switch_record - start tracing context switches | ||
222 | * | ||
223 | * Turns off context switch tracing for a tracer. | ||
224 | */ | ||
225 | void tracing_stop_sched_switch_record(void) | ||
226 | { | ||
227 | mutex_lock(&sched_register_mutex); | ||
228 | tracer_enabled--; | ||
229 | WARN_ON(tracer_enabled < 0); | ||
230 | mutex_unlock(&sched_register_mutex); | ||
231 | |||
232 | tracing_stop_sched_switch(); | ||
233 | } | ||
234 | |||
235 | /** | ||
236 | * tracing_sched_switch_assign_trace - assign a trace array for ctx switch | ||
237 | * @tr: trace array pointer to assign | ||
238 | * | ||
239 | * Some tracers might want to record the context switches in their | ||
240 | * trace. This function lets those tracers assign the trace array | ||
241 | * to use. | ||
242 | */ | ||
243 | void tracing_sched_switch_assign_trace(struct trace_array *tr) | ||
244 | { | ||
245 | ctx_trace = tr; | ||
246 | } | ||
247 | |||
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c index 19bd8928ce94..8fb84b362816 100644 --- a/kernel/trace/trace_sched_wakeup.c +++ b/kernel/trace/trace_sched_wakeup.c | |||
@@ -365,6 +365,62 @@ probe_wakeup_migrate_task(void *ignore, struct task_struct *task, int cpu) | |||
365 | wakeup_current_cpu = cpu; | 365 | wakeup_current_cpu = cpu; |
366 | } | 366 | } |
367 | 367 | ||
368 | static void | ||
369 | tracing_sched_switch_trace(struct trace_array *tr, | ||
370 | struct task_struct *prev, | ||
371 | struct task_struct *next, | ||
372 | unsigned long flags, int pc) | ||
373 | { | ||
374 | struct ftrace_event_call *call = &event_context_switch; | ||
375 | struct ring_buffer *buffer = tr->trace_buffer.buffer; | ||
376 | struct ring_buffer_event *event; | ||
377 | struct ctx_switch_entry *entry; | ||
378 | |||
379 | event = trace_buffer_lock_reserve(buffer, TRACE_CTX, | ||
380 | sizeof(*entry), flags, pc); | ||
381 | if (!event) | ||
382 | return; | ||
383 | entry = ring_buffer_event_data(event); | ||
384 | entry->prev_pid = prev->pid; | ||
385 | entry->prev_prio = prev->prio; | ||
386 | entry->prev_state = prev->state; | ||
387 | entry->next_pid = next->pid; | ||
388 | entry->next_prio = next->prio; | ||
389 | entry->next_state = next->state; | ||
390 | entry->next_cpu = task_cpu(next); | ||
391 | |||
392 | if (!call_filter_check_discard(call, entry, buffer, event)) | ||
393 | trace_buffer_unlock_commit(buffer, event, flags, pc); | ||
394 | } | ||
395 | |||
396 | static void | ||
397 | tracing_sched_wakeup_trace(struct trace_array *tr, | ||
398 | struct task_struct *wakee, | ||
399 | struct task_struct *curr, | ||
400 | unsigned long flags, int pc) | ||
401 | { | ||
402 | struct ftrace_event_call *call = &event_wakeup; | ||
403 | struct ring_buffer_event *event; | ||
404 | struct ctx_switch_entry *entry; | ||
405 | struct ring_buffer *buffer = tr->trace_buffer.buffer; | ||
406 | |||
407 | event = trace_buffer_lock_reserve(buffer, TRACE_WAKE, | ||
408 | sizeof(*entry), flags, pc); | ||
409 | if (!event) | ||
410 | return; | ||
411 | entry = ring_buffer_event_data(event); | ||
412 | entry->prev_pid = curr->pid; | ||
413 | entry->prev_prio = curr->prio; | ||
414 | entry->prev_state = curr->state; | ||
415 | entry->next_pid = wakee->pid; | ||
416 | entry->next_prio = wakee->prio; | ||
417 | entry->next_state = wakee->state; | ||
418 | entry->next_cpu = task_cpu(wakee); | ||
419 | |||
420 | if (!call_filter_check_discard(call, entry, buffer, event)) | ||
421 | trace_buffer_unlock_commit(buffer, event, flags, pc); | ||
422 | } | ||
423 | |||
368 | static void notrace | 424 | static void notrace |
369 | probe_wakeup_sched_switch(void *ignore, | 425 | probe_wakeup_sched_switch(void *ignore, |
370 | struct task_struct *prev, struct task_struct *next) | 426 | struct task_struct *prev, struct task_struct *next) |
diff --git a/kernel/trace/trace_seq.c b/kernel/trace/trace_seq.c index 1f24ed99dca2..fabfa0f190a3 100644 --- a/kernel/trace/trace_seq.c +++ b/kernel/trace/trace_seq.c | |||
@@ -69,20 +69,15 @@ int trace_print_seq(struct seq_file *m, struct trace_seq *s) | |||
69 | * trace_seq_printf() is used to store strings into a special | 69 | * trace_seq_printf() is used to store strings into a special |
70 | * buffer (@s). Then the output may be either used by | 70 | * buffer (@s). Then the output may be either used by |
71 | * the sequencer or pulled into another buffer. | 71 | * the sequencer or pulled into another buffer. |
72 | * | ||
73 | * Returns 1 if we successfully written all the contents to | ||
74 | * the buffer. | ||
75 | * Returns 0 if we the length to write is bigger than the | ||
76 | * reserved buffer space. In this case, nothing gets written. | ||
77 | */ | 72 | */ |
78 | int trace_seq_printf(struct trace_seq *s, const char *fmt, ...) | 73 | void trace_seq_printf(struct trace_seq *s, const char *fmt, ...) |
79 | { | 74 | { |
80 | unsigned int len = TRACE_SEQ_BUF_LEFT(s); | 75 | unsigned int len = TRACE_SEQ_BUF_LEFT(s); |
81 | va_list ap; | 76 | va_list ap; |
82 | int ret; | 77 | int ret; |
83 | 78 | ||
84 | if (s->full || !len) | 79 | if (s->full || !len) |
85 | return 0; | 80 | return; |
86 | 81 | ||
87 | va_start(ap, fmt); | 82 | va_start(ap, fmt); |
88 | ret = vsnprintf(s->buffer + s->len, len, fmt, ap); | 83 | ret = vsnprintf(s->buffer + s->len, len, fmt, ap); |
@@ -91,12 +86,10 @@ int trace_seq_printf(struct trace_seq *s, const char *fmt, ...) | |||
91 | /* If we can't write it all, don't bother writing anything */ | 86 | /* If we can't write it all, don't bother writing anything */ |
92 | if (ret >= len) { | 87 | if (ret >= len) { |
93 | s->full = 1; | 88 | s->full = 1; |
94 | return 0; | 89 | return; |
95 | } | 90 | } |
96 | 91 | ||
97 | s->len += ret; | 92 | s->len += ret; |
98 | |||
99 | return 1; | ||
100 | } | 93 | } |
101 | EXPORT_SYMBOL_GPL(trace_seq_printf); | 94 | EXPORT_SYMBOL_GPL(trace_seq_printf); |
102 | 95 | ||
@@ -107,25 +100,18 @@ EXPORT_SYMBOL_GPL(trace_seq_printf); | |||
107 | * @nmaskbits: The number of bits that are valid in @maskp | 100 | * @nmaskbits: The number of bits that are valid in @maskp |
108 | * | 101 | * |
109 | * Writes a ASCII representation of a bitmask string into @s. | 102 | * Writes a ASCII representation of a bitmask string into @s. |
110 | * | ||
111 | * Returns 1 if we successfully written all the contents to | ||
112 | * the buffer. | ||
113 | * Returns 0 if we the length to write is bigger than the | ||
114 | * reserved buffer space. In this case, nothing gets written. | ||
115 | */ | 103 | */ |
116 | int trace_seq_bitmask(struct trace_seq *s, const unsigned long *maskp, | 104 | void trace_seq_bitmask(struct trace_seq *s, const unsigned long *maskp, |
117 | int nmaskbits) | 105 | int nmaskbits) |
118 | { | 106 | { |
119 | unsigned int len = TRACE_SEQ_BUF_LEFT(s); | 107 | unsigned int len = TRACE_SEQ_BUF_LEFT(s); |
120 | int ret; | 108 | int ret; |
121 | 109 | ||
122 | if (s->full || !len) | 110 | if (s->full || !len) |
123 | return 0; | 111 | return; |
124 | 112 | ||
125 | ret = bitmap_scnprintf(s->buffer, len, maskp, nmaskbits); | 113 | ret = bitmap_scnprintf(s->buffer + s->len, len, maskp, nmaskbits); |
126 | s->len += ret; | 114 | s->len += ret; |
127 | |||
128 | return 1; | ||
129 | } | 115 | } |
130 | EXPORT_SYMBOL_GPL(trace_seq_bitmask); | 116 | EXPORT_SYMBOL_GPL(trace_seq_bitmask); |
131 | 117 | ||
@@ -139,28 +125,24 @@ EXPORT_SYMBOL_GPL(trace_seq_bitmask); | |||
139 | * trace_seq_printf is used to store strings into a special | 125 | * trace_seq_printf is used to store strings into a special |
140 | * buffer (@s). Then the output may be either used by | 126 | * buffer (@s). Then the output may be either used by |
141 | * the sequencer or pulled into another buffer. | 127 | * the sequencer or pulled into another buffer. |
142 | * | ||
143 | * Returns how much it wrote to the buffer. | ||
144 | */ | 128 | */ |
145 | int trace_seq_vprintf(struct trace_seq *s, const char *fmt, va_list args) | 129 | void trace_seq_vprintf(struct trace_seq *s, const char *fmt, va_list args) |
146 | { | 130 | { |
147 | unsigned int len = TRACE_SEQ_BUF_LEFT(s); | 131 | unsigned int len = TRACE_SEQ_BUF_LEFT(s); |
148 | int ret; | 132 | int ret; |
149 | 133 | ||
150 | if (s->full || !len) | 134 | if (s->full || !len) |
151 | return 0; | 135 | return; |
152 | 136 | ||
153 | ret = vsnprintf(s->buffer + s->len, len, fmt, args); | 137 | ret = vsnprintf(s->buffer + s->len, len, fmt, args); |
154 | 138 | ||
155 | /* If we can't write it all, don't bother writing anything */ | 139 | /* If we can't write it all, don't bother writing anything */ |
156 | if (ret >= len) { | 140 | if (ret >= len) { |
157 | s->full = 1; | 141 | s->full = 1; |
158 | return 0; | 142 | return; |
159 | } | 143 | } |
160 | 144 | ||
161 | s->len += ret; | 145 | s->len += ret; |
162 | |||
163 | return len; | ||
164 | } | 146 | } |
165 | EXPORT_SYMBOL_GPL(trace_seq_vprintf); | 147 | EXPORT_SYMBOL_GPL(trace_seq_vprintf); |
166 | 148 | ||
@@ -178,28 +160,24 @@ EXPORT_SYMBOL_GPL(trace_seq_vprintf); | |||
178 | * | 160 | * |
179 | * This function will take the format and the binary array and finish | 161 | * This function will take the format and the binary array and finish |
180 | * the conversion into the ASCII string within the buffer. | 162 | * the conversion into the ASCII string within the buffer. |
181 | * | ||
182 | * Returns how much it wrote to the buffer. | ||
183 | */ | 163 | */ |
184 | int trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary) | 164 | void trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary) |
185 | { | 165 | { |
186 | unsigned int len = TRACE_SEQ_BUF_LEFT(s); | 166 | unsigned int len = TRACE_SEQ_BUF_LEFT(s); |
187 | int ret; | 167 | int ret; |
188 | 168 | ||
189 | if (s->full || !len) | 169 | if (s->full || !len) |
190 | return 0; | 170 | return; |
191 | 171 | ||
192 | ret = bstr_printf(s->buffer + s->len, len, fmt, binary); | 172 | ret = bstr_printf(s->buffer + s->len, len, fmt, binary); |
193 | 173 | ||
194 | /* If we can't write it all, don't bother writing anything */ | 174 | /* If we can't write it all, don't bother writing anything */ |
195 | if (ret >= len) { | 175 | if (ret >= len) { |
196 | s->full = 1; | 176 | s->full = 1; |
197 | return 0; | 177 | return; |
198 | } | 178 | } |
199 | 179 | ||
200 | s->len += ret; | 180 | s->len += ret; |
201 | |||
202 | return len; | ||
203 | } | 181 | } |
204 | EXPORT_SYMBOL_GPL(trace_seq_bprintf); | 182 | EXPORT_SYMBOL_GPL(trace_seq_bprintf); |
205 | 183 | ||
@@ -212,25 +190,21 @@ EXPORT_SYMBOL_GPL(trace_seq_bprintf); | |||
212 | * copy to user routines. This function records a simple string | 190 | * copy to user routines. This function records a simple string |
213 | * into a special buffer (@s) for later retrieval by a sequencer | 191 | * into a special buffer (@s) for later retrieval by a sequencer |
214 | * or other mechanism. | 192 | * or other mechanism. |
215 | * | ||
216 | * Returns how much it wrote to the buffer. | ||
217 | */ | 193 | */ |
218 | int trace_seq_puts(struct trace_seq *s, const char *str) | 194 | void trace_seq_puts(struct trace_seq *s, const char *str) |
219 | { | 195 | { |
220 | unsigned int len = strlen(str); | 196 | unsigned int len = strlen(str); |
221 | 197 | ||
222 | if (s->full) | 198 | if (s->full) |
223 | return 0; | 199 | return; |
224 | 200 | ||
225 | if (len > TRACE_SEQ_BUF_LEFT(s)) { | 201 | if (len > TRACE_SEQ_BUF_LEFT(s)) { |
226 | s->full = 1; | 202 | s->full = 1; |
227 | return 0; | 203 | return; |
228 | } | 204 | } |
229 | 205 | ||
230 | memcpy(s->buffer + s->len, str, len); | 206 | memcpy(s->buffer + s->len, str, len); |
231 | s->len += len; | 207 | s->len += len; |
232 | |||
233 | return len; | ||
234 | } | 208 | } |
235 | EXPORT_SYMBOL_GPL(trace_seq_puts); | 209 | EXPORT_SYMBOL_GPL(trace_seq_puts); |
236 | 210 | ||
@@ -243,22 +217,18 @@ EXPORT_SYMBOL_GPL(trace_seq_puts); | |||
243 | * copy to user routines. This function records a simple charater | 217 | * copy to user routines. This function records a simple charater |
244 | * into a special buffer (@s) for later retrieval by a sequencer | 218 | * into a special buffer (@s) for later retrieval by a sequencer |
245 | * or other mechanism. | 219 | * or other mechanism. |
246 | * | ||
247 | * Returns how much it wrote to the buffer. | ||
248 | */ | 220 | */ |
249 | int trace_seq_putc(struct trace_seq *s, unsigned char c) | 221 | void trace_seq_putc(struct trace_seq *s, unsigned char c) |
250 | { | 222 | { |
251 | if (s->full) | 223 | if (s->full) |
252 | return 0; | 224 | return; |
253 | 225 | ||
254 | if (TRACE_SEQ_BUF_LEFT(s) < 1) { | 226 | if (TRACE_SEQ_BUF_LEFT(s) < 1) { |
255 | s->full = 1; | 227 | s->full = 1; |
256 | return 0; | 228 | return; |
257 | } | 229 | } |
258 | 230 | ||
259 | s->buffer[s->len++] = c; | 231 | s->buffer[s->len++] = c; |
260 | |||
261 | return 1; | ||
262 | } | 232 | } |
263 | EXPORT_SYMBOL_GPL(trace_seq_putc); | 233 | EXPORT_SYMBOL_GPL(trace_seq_putc); |
264 | 234 | ||
@@ -271,23 +241,19 @@ EXPORT_SYMBOL_GPL(trace_seq_putc); | |||
271 | * There may be cases where raw memory needs to be written into the | 241 | * There may be cases where raw memory needs to be written into the |
272 | * buffer and a strcpy() would not work. Using this function allows | 242 | * buffer and a strcpy() would not work. Using this function allows |
273 | * for such cases. | 243 | * for such cases. |
274 | * | ||
275 | * Returns how much it wrote to the buffer. | ||
276 | */ | 244 | */ |
277 | int trace_seq_putmem(struct trace_seq *s, const void *mem, unsigned int len) | 245 | void trace_seq_putmem(struct trace_seq *s, const void *mem, unsigned int len) |
278 | { | 246 | { |
279 | if (s->full) | 247 | if (s->full) |
280 | return 0; | 248 | return; |
281 | 249 | ||
282 | if (len > TRACE_SEQ_BUF_LEFT(s)) { | 250 | if (len > TRACE_SEQ_BUF_LEFT(s)) { |
283 | s->full = 1; | 251 | s->full = 1; |
284 | return 0; | 252 | return; |
285 | } | 253 | } |
286 | 254 | ||
287 | memcpy(s->buffer + s->len, mem, len); | 255 | memcpy(s->buffer + s->len, mem, len); |
288 | s->len += len; | 256 | s->len += len; |
289 | |||
290 | return len; | ||
291 | } | 257 | } |
292 | EXPORT_SYMBOL_GPL(trace_seq_putmem); | 258 | EXPORT_SYMBOL_GPL(trace_seq_putmem); |
293 | 259 | ||
@@ -303,20 +269,17 @@ EXPORT_SYMBOL_GPL(trace_seq_putmem); | |||
303 | * This is similar to trace_seq_putmem() except instead of just copying the | 269 | * This is similar to trace_seq_putmem() except instead of just copying the |
304 | * raw memory into the buffer it writes its ASCII representation of it | 270 | * raw memory into the buffer it writes its ASCII representation of it |
305 | * in hex characters. | 271 | * in hex characters. |
306 | * | ||
307 | * Returns how much it wrote to the buffer. | ||
308 | */ | 272 | */ |
309 | int trace_seq_putmem_hex(struct trace_seq *s, const void *mem, | 273 | void trace_seq_putmem_hex(struct trace_seq *s, const void *mem, |
310 | unsigned int len) | 274 | unsigned int len) |
311 | { | 275 | { |
312 | unsigned char hex[HEX_CHARS]; | 276 | unsigned char hex[HEX_CHARS]; |
313 | const unsigned char *data = mem; | 277 | const unsigned char *data = mem; |
314 | unsigned int start_len; | 278 | unsigned int start_len; |
315 | int i, j; | 279 | int i, j; |
316 | int cnt = 0; | ||
317 | 280 | ||
318 | if (s->full) | 281 | if (s->full) |
319 | return 0; | 282 | return; |
320 | 283 | ||
321 | while (len) { | 284 | while (len) { |
322 | start_len = min(len, HEX_CHARS - 1); | 285 | start_len = min(len, HEX_CHARS - 1); |
@@ -335,9 +298,8 @@ int trace_seq_putmem_hex(struct trace_seq *s, const void *mem, | |||
335 | len -= j / 2; | 298 | len -= j / 2; |
336 | hex[j++] = ' '; | 299 | hex[j++] = ' '; |
337 | 300 | ||
338 | cnt += trace_seq_putmem(s, hex, j); | 301 | trace_seq_putmem(s, hex, j); |
339 | } | 302 | } |
340 | return cnt; | ||
341 | } | 303 | } |
342 | EXPORT_SYMBOL_GPL(trace_seq_putmem_hex); | 304 | EXPORT_SYMBOL_GPL(trace_seq_putmem_hex); |
343 | 305 | ||
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c index 29228c4d5696..dfe00a4f3f3e 100644 --- a/kernel/trace/trace_syscalls.c +++ b/kernel/trace/trace_syscalls.c | |||
@@ -114,7 +114,7 @@ print_syscall_enter(struct trace_iterator *iter, int flags, | |||
114 | struct trace_entry *ent = iter->ent; | 114 | struct trace_entry *ent = iter->ent; |
115 | struct syscall_trace_enter *trace; | 115 | struct syscall_trace_enter *trace; |
116 | struct syscall_metadata *entry; | 116 | struct syscall_metadata *entry; |
117 | int i, ret, syscall; | 117 | int i, syscall; |
118 | 118 | ||
119 | trace = (typeof(trace))ent; | 119 | trace = (typeof(trace))ent; |
120 | syscall = trace->nr; | 120 | syscall = trace->nr; |
@@ -128,35 +128,28 @@ print_syscall_enter(struct trace_iterator *iter, int flags, | |||
128 | goto end; | 128 | goto end; |
129 | } | 129 | } |
130 | 130 | ||
131 | ret = trace_seq_printf(s, "%s(", entry->name); | 131 | trace_seq_printf(s, "%s(", entry->name); |
132 | if (!ret) | ||
133 | return TRACE_TYPE_PARTIAL_LINE; | ||
134 | 132 | ||
135 | for (i = 0; i < entry->nb_args; i++) { | 133 | for (i = 0; i < entry->nb_args; i++) { |
134 | |||
135 | if (trace_seq_has_overflowed(s)) | ||
136 | goto end; | ||
137 | |||
136 | /* parameter types */ | 138 | /* parameter types */ |
137 | if (trace_flags & TRACE_ITER_VERBOSE) { | 139 | if (trace_flags & TRACE_ITER_VERBOSE) |
138 | ret = trace_seq_printf(s, "%s ", entry->types[i]); | 140 | trace_seq_printf(s, "%s ", entry->types[i]); |
139 | if (!ret) | 141 | |
140 | return TRACE_TYPE_PARTIAL_LINE; | ||
141 | } | ||
142 | /* parameter values */ | 142 | /* parameter values */ |
143 | ret = trace_seq_printf(s, "%s: %lx%s", entry->args[i], | 143 | trace_seq_printf(s, "%s: %lx%s", entry->args[i], |
144 | trace->args[i], | 144 | trace->args[i], |
145 | i == entry->nb_args - 1 ? "" : ", "); | 145 | i == entry->nb_args - 1 ? "" : ", "); |
146 | if (!ret) | ||
147 | return TRACE_TYPE_PARTIAL_LINE; | ||
148 | } | 146 | } |
149 | 147 | ||
150 | ret = trace_seq_putc(s, ')'); | 148 | trace_seq_putc(s, ')'); |
151 | if (!ret) | ||
152 | return TRACE_TYPE_PARTIAL_LINE; | ||
153 | |||
154 | end: | 149 | end: |
155 | ret = trace_seq_putc(s, '\n'); | 150 | trace_seq_putc(s, '\n'); |
156 | if (!ret) | ||
157 | return TRACE_TYPE_PARTIAL_LINE; | ||
158 | 151 | ||
159 | return TRACE_TYPE_HANDLED; | 152 | return trace_handle_return(s); |
160 | } | 153 | } |
161 | 154 | ||
162 | static enum print_line_t | 155 | static enum print_line_t |
@@ -168,7 +161,6 @@ print_syscall_exit(struct trace_iterator *iter, int flags, | |||
168 | struct syscall_trace_exit *trace; | 161 | struct syscall_trace_exit *trace; |
169 | int syscall; | 162 | int syscall; |
170 | struct syscall_metadata *entry; | 163 | struct syscall_metadata *entry; |
171 | int ret; | ||
172 | 164 | ||
173 | trace = (typeof(trace))ent; | 165 | trace = (typeof(trace))ent; |
174 | syscall = trace->nr; | 166 | syscall = trace->nr; |
@@ -176,7 +168,7 @@ print_syscall_exit(struct trace_iterator *iter, int flags, | |||
176 | 168 | ||
177 | if (!entry) { | 169 | if (!entry) { |
178 | trace_seq_putc(s, '\n'); | 170 | trace_seq_putc(s, '\n'); |
179 | return TRACE_TYPE_HANDLED; | 171 | goto out; |
180 | } | 172 | } |
181 | 173 | ||
182 | if (entry->exit_event->event.type != ent->type) { | 174 | if (entry->exit_event->event.type != ent->type) { |
@@ -184,12 +176,11 @@ print_syscall_exit(struct trace_iterator *iter, int flags, | |||
184 | return TRACE_TYPE_UNHANDLED; | 176 | return TRACE_TYPE_UNHANDLED; |
185 | } | 177 | } |
186 | 178 | ||
187 | ret = trace_seq_printf(s, "%s -> 0x%lx\n", entry->name, | 179 | trace_seq_printf(s, "%s -> 0x%lx\n", entry->name, |
188 | trace->ret); | 180 | trace->ret); |
189 | if (!ret) | ||
190 | return TRACE_TYPE_PARTIAL_LINE; | ||
191 | 181 | ||
192 | return TRACE_TYPE_HANDLED; | 182 | out: |
183 | return trace_handle_return(s); | ||
193 | } | 184 | } |
194 | 185 | ||
195 | extern char *__bad_type_size(void); | 186 | extern char *__bad_type_size(void); |
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c index 33ff6a24b802..8520acc34b18 100644 --- a/kernel/trace/trace_uprobe.c +++ b/kernel/trace/trace_uprobe.c | |||
@@ -552,8 +552,7 @@ error: | |||
552 | return ret; | 552 | return ret; |
553 | 553 | ||
554 | fail_address_parse: | 554 | fail_address_parse: |
555 | if (inode) | 555 | iput(inode); |
556 | iput(inode); | ||
557 | 556 | ||
558 | pr_info("Failed to parse address or file.\n"); | 557 | pr_info("Failed to parse address or file.\n"); |
559 | 558 | ||
@@ -606,7 +605,7 @@ static int probes_seq_show(struct seq_file *m, void *v) | |||
606 | for (i = 0; i < tu->tp.nr_args; i++) | 605 | for (i = 0; i < tu->tp.nr_args; i++) |
607 | seq_printf(m, " %s=%s", tu->tp.args[i].name, tu->tp.args[i].comm); | 606 | seq_printf(m, " %s=%s", tu->tp.args[i].name, tu->tp.args[i].comm); |
608 | 607 | ||
609 | seq_printf(m, "\n"); | 608 | seq_putc(m, '\n'); |
610 | return 0; | 609 | return 0; |
611 | } | 610 | } |
612 | 611 | ||
@@ -852,16 +851,14 @@ print_uprobe_event(struct trace_iterator *iter, int flags, struct trace_event *e | |||
852 | tu = container_of(event, struct trace_uprobe, tp.call.event); | 851 | tu = container_of(event, struct trace_uprobe, tp.call.event); |
853 | 852 | ||
854 | if (is_ret_probe(tu)) { | 853 | if (is_ret_probe(tu)) { |
855 | if (!trace_seq_printf(s, "%s: (0x%lx <- 0x%lx)", | 854 | trace_seq_printf(s, "%s: (0x%lx <- 0x%lx)", |
856 | ftrace_event_name(&tu->tp.call), | 855 | ftrace_event_name(&tu->tp.call), |
857 | entry->vaddr[1], entry->vaddr[0])) | 856 | entry->vaddr[1], entry->vaddr[0]); |
858 | goto partial; | ||
859 | data = DATAOF_TRACE_ENTRY(entry, true); | 857 | data = DATAOF_TRACE_ENTRY(entry, true); |
860 | } else { | 858 | } else { |
861 | if (!trace_seq_printf(s, "%s: (0x%lx)", | 859 | trace_seq_printf(s, "%s: (0x%lx)", |
862 | ftrace_event_name(&tu->tp.call), | 860 | ftrace_event_name(&tu->tp.call), |
863 | entry->vaddr[0])) | 861 | entry->vaddr[0]); |
864 | goto partial; | ||
865 | data = DATAOF_TRACE_ENTRY(entry, false); | 862 | data = DATAOF_TRACE_ENTRY(entry, false); |
866 | } | 863 | } |
867 | 864 | ||
@@ -869,14 +866,13 @@ print_uprobe_event(struct trace_iterator *iter, int flags, struct trace_event *e | |||
869 | struct probe_arg *parg = &tu->tp.args[i]; | 866 | struct probe_arg *parg = &tu->tp.args[i]; |
870 | 867 | ||
871 | if (!parg->type->print(s, parg->name, data + parg->offset, entry)) | 868 | if (!parg->type->print(s, parg->name, data + parg->offset, entry)) |
872 | goto partial; | 869 | goto out; |
873 | } | 870 | } |
874 | 871 | ||
875 | if (trace_seq_puts(s, "\n")) | 872 | trace_seq_putc(s, '\n'); |
876 | return TRACE_TYPE_HANDLED; | ||
877 | 873 | ||
878 | partial: | 874 | out: |
879 | return TRACE_TYPE_PARTIAL_LINE; | 875 | return trace_handle_return(s); |
880 | } | 876 | } |
881 | 877 | ||
882 | typedef bool (*filter_func_t)(struct uprobe_consumer *self, | 878 | typedef bool (*filter_func_t)(struct uprobe_consumer *self, |