aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/trace/ftrace-design.txt26
-rw-r--r--Documentation/trace/ftrace.txt2
-rw-r--r--kernel/trace/Kconfig4
-rw-r--r--kernel/trace/ring_buffer.c24
-rw-r--r--kernel/trace/trace.c5
5 files changed, 51 insertions, 10 deletions
diff --git a/Documentation/trace/ftrace-design.txt b/Documentation/trace/ftrace-design.txt
index 239f14b2b55a..6a5a579126b0 100644
--- a/Documentation/trace/ftrace-design.txt
+++ b/Documentation/trace/ftrace-design.txt
@@ -1,5 +1,6 @@
1 function tracer guts 1 function tracer guts
2 ==================== 2 ====================
3 By Mike Frysinger
3 4
4Introduction 5Introduction
5------------ 6------------
@@ -173,14 +174,16 @@ void ftrace_graph_caller(void)
173 174
174 unsigned long *frompc = &...; 175 unsigned long *frompc = &...;
175 unsigned long selfpc = <return address> - MCOUNT_INSN_SIZE; 176 unsigned long selfpc = <return address> - MCOUNT_INSN_SIZE;
176 prepare_ftrace_return(frompc, selfpc); 177 /* passing frame pointer up is optional -- see below */
178 prepare_ftrace_return(frompc, selfpc, frame_pointer);
177 179
178 /* restore all state needed by the ABI */ 180 /* restore all state needed by the ABI */
179} 181}
180#endif 182#endif
181 183
182For information on how to implement prepare_ftrace_return(), simply look at 184For information on how to implement prepare_ftrace_return(), simply look at the
183the x86 version. The only architecture-specific piece in it is the setup of 185x86 version (the frame pointer passing is optional; see the next section for
186more information). The only architecture-specific piece in it is the setup of
184the fault recovery table (the asm(...) code). The rest should be the same 187the fault recovery table (the asm(...) code). The rest should be the same
185across architectures. 188across architectures.
186 189
@@ -205,6 +208,23 @@ void return_to_handler(void)
205#endif 208#endif
206 209
207 210
211HAVE_FUNCTION_GRAPH_FP_TEST
212---------------------------
213
214An arch may pass in a unique value (frame pointer) to both the entering and
215exiting of a function. On exit, the value is compared and if it does not
216match, then it will panic the kernel. This is largely a sanity check for bad
217code generation with gcc. If gcc for your port sanely updates the frame
218pointer under different opitmization levels, then ignore this option.
219
220However, adding support for it isn't terribly difficult. In your assembly code
221that calls prepare_ftrace_return(), pass the frame pointer as the 3rd argument.
222Then in the C version of that function, do what the x86 port does and pass it
223along to ftrace_push_return_trace() instead of a stub value of 0.
224
225Similarly, when you call ftrace_return_to_handler(), pass it the frame pointer.
226
227
208HAVE_FTRACE_NMI_ENTER 228HAVE_FTRACE_NMI_ENTER
209--------------------- 229---------------------
210 230
diff --git a/Documentation/trace/ftrace.txt b/Documentation/trace/ftrace.txt
index 8179692fbb90..bab3040da548 100644
--- a/Documentation/trace/ftrace.txt
+++ b/Documentation/trace/ftrace.txt
@@ -1625,7 +1625,7 @@ If I am only interested in sys_nanosleep and hrtimer_interrupt:
1625 1625
1626 # echo sys_nanosleep hrtimer_interrupt \ 1626 # echo sys_nanosleep hrtimer_interrupt \
1627 > set_ftrace_filter 1627 > set_ftrace_filter
1628 # echo ftrace > current_tracer 1628 # echo function > current_tracer
1629 # echo 1 > tracing_enabled 1629 # echo 1 > tracing_enabled
1630 # usleep 1 1630 # usleep 1
1631 # echo 0 > tracing_enabled 1631 # echo 0 > tracing_enabled
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index 6c22d8a2f289..60e2ce0181ee 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -27,9 +27,7 @@ config HAVE_FUNCTION_GRAPH_TRACER
27config HAVE_FUNCTION_GRAPH_FP_TEST 27config HAVE_FUNCTION_GRAPH_FP_TEST
28 bool 28 bool
29 help 29 help
30 An arch may pass in a unique value (frame pointer) to both the 30 See Documentation/trace/ftrace-design.txt
31 entering and exiting of a function. On exit, the value is compared
32 and if it does not match, then it will panic the kernel.
33 31
34config HAVE_FUNCTION_TRACE_MCOUNT_TEST 32config HAVE_FUNCTION_TRACE_MCOUNT_TEST
35 bool 33 bool
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index edefe3b2801b..8c1b2d290718 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -464,6 +464,8 @@ struct ring_buffer_iter {
464 struct ring_buffer_per_cpu *cpu_buffer; 464 struct ring_buffer_per_cpu *cpu_buffer;
465 unsigned long head; 465 unsigned long head;
466 struct buffer_page *head_page; 466 struct buffer_page *head_page;
467 struct buffer_page *cache_reader_page;
468 unsigned long cache_read;
467 u64 read_stamp; 469 u64 read_stamp;
468}; 470};
469 471
@@ -2716,6 +2718,8 @@ static void rb_iter_reset(struct ring_buffer_iter *iter)
2716 iter->read_stamp = cpu_buffer->read_stamp; 2718 iter->read_stamp = cpu_buffer->read_stamp;
2717 else 2719 else
2718 iter->read_stamp = iter->head_page->page->time_stamp; 2720 iter->read_stamp = iter->head_page->page->time_stamp;
2721 iter->cache_reader_page = cpu_buffer->reader_page;
2722 iter->cache_read = cpu_buffer->read;
2719} 2723}
2720 2724
2721/** 2725/**
@@ -3060,13 +3064,22 @@ rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
3060 struct ring_buffer_event *event; 3064 struct ring_buffer_event *event;
3061 int nr_loops = 0; 3065 int nr_loops = 0;
3062 3066
3063 if (ring_buffer_iter_empty(iter))
3064 return NULL;
3065
3066 cpu_buffer = iter->cpu_buffer; 3067 cpu_buffer = iter->cpu_buffer;
3067 buffer = cpu_buffer->buffer; 3068 buffer = cpu_buffer->buffer;
3068 3069
3070 /*
3071 * Check if someone performed a consuming read to
3072 * the buffer. A consuming read invalidates the iterator
3073 * and we need to reset the iterator in this case.
3074 */
3075 if (unlikely(iter->cache_read != cpu_buffer->read ||
3076 iter->cache_reader_page != cpu_buffer->reader_page))
3077 rb_iter_reset(iter);
3078
3069 again: 3079 again:
3080 if (ring_buffer_iter_empty(iter))
3081 return NULL;
3082
3070 /* 3083 /*
3071 * We repeat when a timestamp is encountered. 3084 * We repeat when a timestamp is encountered.
3072 * We can get multiple timestamps by nested interrupts or also 3085 * We can get multiple timestamps by nested interrupts or also
@@ -3081,6 +3094,11 @@ rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
3081 if (rb_per_cpu_empty(cpu_buffer)) 3094 if (rb_per_cpu_empty(cpu_buffer))
3082 return NULL; 3095 return NULL;
3083 3096
3097 if (iter->head >= local_read(&iter->head_page->page->commit)) {
3098 rb_inc_iter(iter);
3099 goto again;
3100 }
3101
3084 event = rb_iter_head_event(iter); 3102 event = rb_iter_head_event(iter);
3085 3103
3086 switch (event->type_len) { 3104 switch (event->type_len) {
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 0df1b0f2cb9e..eac6875cb990 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -951,6 +951,11 @@ void trace_find_cmdline(int pid, char comm[])
951 return; 951 return;
952 } 952 }
953 953
954 if (WARN_ON_ONCE(pid < 0)) {
955 strcpy(comm, "<XXX>");
956 return;
957 }
958
954 if (pid > PID_MAX_DEFAULT) { 959 if (pid > PID_MAX_DEFAULT) {
955 strcpy(comm, "<...>"); 960 strcpy(comm, "<...>");
956 return; 961 return;