diff options
author | Ingo Molnar <mingo@elte.hu> | 2009-03-18 01:59:56 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-03-18 01:59:56 -0400 |
commit | 327019b01e068d66dada6a8b2571180ab3674d20 (patch) | |
tree | c81354a509d6962f6878145fcf3cdbe50a000a89 | |
parent | 03418c7efaa429dc7647ac93e3862e3fe1816873 (diff) | |
parent | 62524d55e5b9ffe36e3bf3dd7a594114f150b449 (diff) |
Merge branch 'tip/tracing/ftrace' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-2.6-trace into tracing/ftrace
-rw-r--r-- | include/linux/compiler.h | 6 | ||||
-rw-r--r-- | include/linux/ring_buffer.h | 7 | ||||
-rw-r--r-- | kernel/trace/ring_buffer.c | 65 | ||||
-rw-r--r-- | kernel/trace/trace.c | 55 | ||||
-rw-r--r-- | kernel/trace/trace.h | 1 | ||||
-rw-r--r-- | kernel/trace/trace_power.c | 8 | ||||
-rw-r--r-- | kernel/trace/trace_sched_switch.c | 9 |
7 files changed, 101 insertions, 50 deletions
diff --git a/include/linux/compiler.h b/include/linux/compiler.h index d95da1020f1c..6faa7e549de4 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h | |||
@@ -68,6 +68,7 @@ struct ftrace_branch_data { | |||
68 | unsigned long miss; | 68 | unsigned long miss; |
69 | unsigned long hit; | 69 | unsigned long hit; |
70 | }; | 70 | }; |
71 | unsigned long miss_hit[2]; | ||
71 | }; | 72 | }; |
72 | }; | 73 | }; |
73 | 74 | ||
@@ -125,10 +126,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect); | |||
125 | .line = __LINE__, \ | 126 | .line = __LINE__, \ |
126 | }; \ | 127 | }; \ |
127 | ______r = !!(cond); \ | 128 | ______r = !!(cond); \ |
128 | if (______r) \ | 129 | ______f.miss_hit[______r]++; \ |
129 | ______f.hit++; \ | ||
130 | else \ | ||
131 | ______f.miss++; \ | ||
132 | ______r; \ | 130 | ______r; \ |
133 | })) | 131 | })) |
134 | #endif /* CONFIG_PROFILE_ALL_BRANCHES */ | 132 | #endif /* CONFIG_PROFILE_ALL_BRANCHES */ |
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h index b1a0068a5557..9e6052bd1a1c 100644 --- a/include/linux/ring_buffer.h +++ b/include/linux/ring_buffer.h | |||
@@ -118,8 +118,11 @@ unsigned long ring_buffer_overruns(struct ring_buffer *buffer); | |||
118 | unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu); | 118 | unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu); |
119 | unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu); | 119 | unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu); |
120 | 120 | ||
121 | u64 ring_buffer_time_stamp(int cpu); | 121 | u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu); |
122 | void ring_buffer_normalize_time_stamp(int cpu, u64 *ts); | 122 | void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer, |
123 | int cpu, u64 *ts); | ||
124 | void ring_buffer_set_clock(struct ring_buffer *buffer, | ||
125 | u64 (*clock)(void)); | ||
123 | 126 | ||
124 | size_t ring_buffer_page_len(void *page); | 127 | size_t ring_buffer_page_len(void *page); |
125 | 128 | ||
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 58128ad2fde0..bbf51922a8ca 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -180,29 +180,6 @@ EXPORT_SYMBOL_GPL(tracing_is_on); | |||
180 | 180 | ||
181 | #include "trace.h" | 181 | #include "trace.h" |
182 | 182 | ||
183 | /* Up this if you want to test the TIME_EXTENTS and normalization */ | ||
184 | #define DEBUG_SHIFT 0 | ||
185 | |||
186 | u64 ring_buffer_time_stamp(int cpu) | ||
187 | { | ||
188 | u64 time; | ||
189 | |||
190 | preempt_disable_notrace(); | ||
191 | /* shift to debug/test normalization and TIME_EXTENTS */ | ||
192 | time = trace_clock_local() << DEBUG_SHIFT; | ||
193 | preempt_enable_no_resched_notrace(); | ||
194 | |||
195 | return time; | ||
196 | } | ||
197 | EXPORT_SYMBOL_GPL(ring_buffer_time_stamp); | ||
198 | |||
199 | void ring_buffer_normalize_time_stamp(int cpu, u64 *ts) | ||
200 | { | ||
201 | /* Just stupid testing the normalize function and deltas */ | ||
202 | *ts >>= DEBUG_SHIFT; | ||
203 | } | ||
204 | EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp); | ||
205 | |||
206 | #define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array)) | 183 | #define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array)) |
207 | #define RB_ALIGNMENT 4U | 184 | #define RB_ALIGNMENT 4U |
208 | #define RB_MAX_SMALL_DATA 28 | 185 | #define RB_MAX_SMALL_DATA 28 |
@@ -374,6 +351,7 @@ struct ring_buffer { | |||
374 | #ifdef CONFIG_HOTPLUG_CPU | 351 | #ifdef CONFIG_HOTPLUG_CPU |
375 | struct notifier_block cpu_notify; | 352 | struct notifier_block cpu_notify; |
376 | #endif | 353 | #endif |
354 | u64 (*clock)(void); | ||
377 | }; | 355 | }; |
378 | 356 | ||
379 | struct ring_buffer_iter { | 357 | struct ring_buffer_iter { |
@@ -394,6 +372,30 @@ struct ring_buffer_iter { | |||
394 | _____ret; \ | 372 | _____ret; \ |
395 | }) | 373 | }) |
396 | 374 | ||
375 | /* Up this if you want to test the TIME_EXTENTS and normalization */ | ||
376 | #define DEBUG_SHIFT 0 | ||
377 | |||
378 | u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu) | ||
379 | { | ||
380 | u64 time; | ||
381 | |||
382 | preempt_disable_notrace(); | ||
383 | /* shift to debug/test normalization and TIME_EXTENTS */ | ||
384 | time = buffer->clock() << DEBUG_SHIFT; | ||
385 | preempt_enable_no_resched_notrace(); | ||
386 | |||
387 | return time; | ||
388 | } | ||
389 | EXPORT_SYMBOL_GPL(ring_buffer_time_stamp); | ||
390 | |||
391 | void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer, | ||
392 | int cpu, u64 *ts) | ||
393 | { | ||
394 | /* Just stupid testing the normalize function and deltas */ | ||
395 | *ts >>= DEBUG_SHIFT; | ||
396 | } | ||
397 | EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp); | ||
398 | |||
397 | /** | 399 | /** |
398 | * check_pages - integrity check of buffer pages | 400 | * check_pages - integrity check of buffer pages |
399 | * @cpu_buffer: CPU buffer with pages to test | 401 | * @cpu_buffer: CPU buffer with pages to test |
@@ -569,6 +571,7 @@ struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags) | |||
569 | 571 | ||
570 | buffer->pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE); | 572 | buffer->pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE); |
571 | buffer->flags = flags; | 573 | buffer->flags = flags; |
574 | buffer->clock = trace_clock_local; | ||
572 | 575 | ||
573 | /* need at least two pages */ | 576 | /* need at least two pages */ |
574 | if (buffer->pages == 1) | 577 | if (buffer->pages == 1) |
@@ -645,6 +648,12 @@ ring_buffer_free(struct ring_buffer *buffer) | |||
645 | } | 648 | } |
646 | EXPORT_SYMBOL_GPL(ring_buffer_free); | 649 | EXPORT_SYMBOL_GPL(ring_buffer_free); |
647 | 650 | ||
651 | void ring_buffer_set_clock(struct ring_buffer *buffer, | ||
652 | u64 (*clock)(void)) | ||
653 | { | ||
654 | buffer->clock = clock; | ||
655 | } | ||
656 | |||
648 | static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer); | 657 | static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer); |
649 | 658 | ||
650 | static void | 659 | static void |
@@ -1191,7 +1200,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, | |||
1191 | cpu_buffer->tail_page = next_page; | 1200 | cpu_buffer->tail_page = next_page; |
1192 | 1201 | ||
1193 | /* reread the time stamp */ | 1202 | /* reread the time stamp */ |
1194 | *ts = ring_buffer_time_stamp(cpu_buffer->cpu); | 1203 | *ts = ring_buffer_time_stamp(buffer, cpu_buffer->cpu); |
1195 | cpu_buffer->tail_page->page->time_stamp = *ts; | 1204 | cpu_buffer->tail_page->page->time_stamp = *ts; |
1196 | } | 1205 | } |
1197 | 1206 | ||
@@ -1334,7 +1343,7 @@ rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer, | |||
1334 | if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000)) | 1343 | if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000)) |
1335 | return NULL; | 1344 | return NULL; |
1336 | 1345 | ||
1337 | ts = ring_buffer_time_stamp(cpu_buffer->cpu); | 1346 | ts = ring_buffer_time_stamp(cpu_buffer->buffer, cpu_buffer->cpu); |
1338 | 1347 | ||
1339 | /* | 1348 | /* |
1340 | * Only the first commit can update the timestamp. | 1349 | * Only the first commit can update the timestamp. |
@@ -2051,7 +2060,8 @@ rb_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts) | |||
2051 | case RINGBUF_TYPE_DATA: | 2060 | case RINGBUF_TYPE_DATA: |
2052 | if (ts) { | 2061 | if (ts) { |
2053 | *ts = cpu_buffer->read_stamp + event->time_delta; | 2062 | *ts = cpu_buffer->read_stamp + event->time_delta; |
2054 | ring_buffer_normalize_time_stamp(cpu_buffer->cpu, ts); | 2063 | ring_buffer_normalize_time_stamp(buffer, |
2064 | cpu_buffer->cpu, ts); | ||
2055 | } | 2065 | } |
2056 | return event; | 2066 | return event; |
2057 | 2067 | ||
@@ -2112,7 +2122,8 @@ rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts) | |||
2112 | case RINGBUF_TYPE_DATA: | 2122 | case RINGBUF_TYPE_DATA: |
2113 | if (ts) { | 2123 | if (ts) { |
2114 | *ts = iter->read_stamp + event->time_delta; | 2124 | *ts = iter->read_stamp + event->time_delta; |
2115 | ring_buffer_normalize_time_stamp(cpu_buffer->cpu, ts); | 2125 | ring_buffer_normalize_time_stamp(buffer, |
2126 | cpu_buffer->cpu, ts); | ||
2116 | } | 2127 | } |
2117 | return event; | 2128 | return event; |
2118 | 2129 | ||
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 1ce6208fd727..a2d13e8c8fd8 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -155,13 +155,6 @@ ns2usecs(cycle_t nsec) | |||
155 | return nsec; | 155 | return nsec; |
156 | } | 156 | } |
157 | 157 | ||
158 | cycle_t ftrace_now(int cpu) | ||
159 | { | ||
160 | u64 ts = ring_buffer_time_stamp(cpu); | ||
161 | ring_buffer_normalize_time_stamp(cpu, &ts); | ||
162 | return ts; | ||
163 | } | ||
164 | |||
165 | /* | 158 | /* |
166 | * The global_trace is the descriptor that holds the tracing | 159 | * The global_trace is the descriptor that holds the tracing |
167 | * buffers for the live tracing. For each CPU, it contains | 160 | * buffers for the live tracing. For each CPU, it contains |
@@ -178,6 +171,20 @@ static struct trace_array global_trace; | |||
178 | 171 | ||
179 | static DEFINE_PER_CPU(struct trace_array_cpu, global_trace_cpu); | 172 | static DEFINE_PER_CPU(struct trace_array_cpu, global_trace_cpu); |
180 | 173 | ||
174 | cycle_t ftrace_now(int cpu) | ||
175 | { | ||
176 | u64 ts; | ||
177 | |||
178 | /* Early boot up does not have a buffer yet */ | ||
179 | if (!global_trace.buffer) | ||
180 | return trace_clock_local(); | ||
181 | |||
182 | ts = ring_buffer_time_stamp(global_trace.buffer, cpu); | ||
183 | ring_buffer_normalize_time_stamp(global_trace.buffer, cpu, &ts); | ||
184 | |||
185 | return ts; | ||
186 | } | ||
187 | |||
181 | /* | 188 | /* |
182 | * The max_tr is used to snapshot the global_trace when a maximum | 189 | * The max_tr is used to snapshot the global_trace when a maximum |
183 | * latency is reached. Some tracers will use this to store a maximum | 190 | * latency is reached. Some tracers will use this to store a maximum |
@@ -308,6 +315,7 @@ static const char *trace_options[] = { | |||
308 | "printk-msg-only", | 315 | "printk-msg-only", |
309 | "context-info", | 316 | "context-info", |
310 | "latency-format", | 317 | "latency-format", |
318 | "global-clock", | ||
311 | NULL | 319 | NULL |
312 | }; | 320 | }; |
313 | 321 | ||
@@ -2244,6 +2252,34 @@ static int set_tracer_option(struct tracer *trace, char *cmp, int neg) | |||
2244 | return 0; | 2252 | return 0; |
2245 | } | 2253 | } |
2246 | 2254 | ||
2255 | static void set_tracer_flags(unsigned int mask, int enabled) | ||
2256 | { | ||
2257 | /* do nothing if flag is already set */ | ||
2258 | if (!!(trace_flags & mask) == !!enabled) | ||
2259 | return; | ||
2260 | |||
2261 | if (enabled) | ||
2262 | trace_flags |= mask; | ||
2263 | else | ||
2264 | trace_flags &= ~mask; | ||
2265 | |||
2266 | if (mask == TRACE_ITER_GLOBAL_CLK) { | ||
2267 | u64 (*func)(void); | ||
2268 | |||
2269 | if (enabled) | ||
2270 | func = trace_clock_global; | ||
2271 | else | ||
2272 | func = trace_clock_local; | ||
2273 | |||
2274 | mutex_lock(&trace_types_lock); | ||
2275 | ring_buffer_set_clock(global_trace.buffer, func); | ||
2276 | |||
2277 | if (max_tr.buffer) | ||
2278 | ring_buffer_set_clock(max_tr.buffer, func); | ||
2279 | mutex_unlock(&trace_types_lock); | ||
2280 | } | ||
2281 | } | ||
2282 | |||
2247 | static ssize_t | 2283 | static ssize_t |
2248 | tracing_trace_options_write(struct file *filp, const char __user *ubuf, | 2284 | tracing_trace_options_write(struct file *filp, const char __user *ubuf, |
2249 | size_t cnt, loff_t *ppos) | 2285 | size_t cnt, loff_t *ppos) |
@@ -2271,10 +2307,7 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf, | |||
2271 | int len = strlen(trace_options[i]); | 2307 | int len = strlen(trace_options[i]); |
2272 | 2308 | ||
2273 | if (strncmp(cmp, trace_options[i], len) == 0) { | 2309 | if (strncmp(cmp, trace_options[i], len) == 0) { |
2274 | if (neg) | 2310 | set_tracer_flags(1 << i, !neg); |
2275 | trace_flags &= ~(1 << i); | ||
2276 | else | ||
2277 | trace_flags |= (1 << i); | ||
2278 | break; | 2311 | break; |
2279 | } | 2312 | } |
2280 | } | 2313 | } |
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 546bcbd92a0c..38276d1638e3 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
@@ -684,6 +684,7 @@ enum trace_iterator_flags { | |||
684 | TRACE_ITER_PRINTK_MSGONLY = 0x10000, | 684 | TRACE_ITER_PRINTK_MSGONLY = 0x10000, |
685 | TRACE_ITER_CONTEXT_INFO = 0x20000, /* Print pid/cpu/time */ | 685 | TRACE_ITER_CONTEXT_INFO = 0x20000, /* Print pid/cpu/time */ |
686 | TRACE_ITER_LATENCY_FMT = 0x40000, | 686 | TRACE_ITER_LATENCY_FMT = 0x40000, |
687 | TRACE_ITER_GLOBAL_CLK = 0x80000, | ||
687 | }; | 688 | }; |
688 | 689 | ||
689 | /* | 690 | /* |
diff --git a/kernel/trace/trace_power.c b/kernel/trace/trace_power.c index 91ce672fb037..bae791ebcc51 100644 --- a/kernel/trace/trace_power.c +++ b/kernel/trace/trace_power.c | |||
@@ -122,12 +122,16 @@ fail_start: | |||
122 | static void start_power_trace(struct trace_array *tr) | 122 | static void start_power_trace(struct trace_array *tr) |
123 | { | 123 | { |
124 | trace_power_enabled = 1; | 124 | trace_power_enabled = 1; |
125 | tracing_power_register(); | ||
126 | } | 125 | } |
127 | 126 | ||
128 | static void stop_power_trace(struct trace_array *tr) | 127 | static void stop_power_trace(struct trace_array *tr) |
129 | { | 128 | { |
130 | trace_power_enabled = 0; | 129 | trace_power_enabled = 0; |
130 | } | ||
131 | |||
132 | static void power_trace_reset(struct trace_array *tr) | ||
133 | { | ||
134 | trace_power_enabled = 0; | ||
131 | unregister_trace_power_start(probe_power_start); | 135 | unregister_trace_power_start(probe_power_start); |
132 | unregister_trace_power_end(probe_power_end); | 136 | unregister_trace_power_end(probe_power_end); |
133 | unregister_trace_power_mark(probe_power_mark); | 137 | unregister_trace_power_mark(probe_power_mark); |
@@ -188,7 +192,7 @@ static struct tracer power_tracer __read_mostly = | |||
188 | .init = power_trace_init, | 192 | .init = power_trace_init, |
189 | .start = start_power_trace, | 193 | .start = start_power_trace, |
190 | .stop = stop_power_trace, | 194 | .stop = stop_power_trace, |
191 | .reset = stop_power_trace, | 195 | .reset = power_trace_reset, |
192 | .print_line = power_print_line, | 196 | .print_line = power_print_line, |
193 | }; | 197 | }; |
194 | 198 | ||
diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c index 77132c2cf3d9..de35f200abd3 100644 --- a/kernel/trace/trace_sched_switch.c +++ b/kernel/trace/trace_sched_switch.c | |||
@@ -18,6 +18,7 @@ static struct trace_array *ctx_trace; | |||
18 | static int __read_mostly tracer_enabled; | 18 | static int __read_mostly tracer_enabled; |
19 | static int sched_ref; | 19 | static int sched_ref; |
20 | static DEFINE_MUTEX(sched_register_mutex); | 20 | static DEFINE_MUTEX(sched_register_mutex); |
21 | static int sched_stopped; | ||
21 | 22 | ||
22 | static void | 23 | static void |
23 | probe_sched_switch(struct rq *__rq, struct task_struct *prev, | 24 | probe_sched_switch(struct rq *__rq, struct task_struct *prev, |
@@ -28,7 +29,7 @@ probe_sched_switch(struct rq *__rq, struct task_struct *prev, | |||
28 | int cpu; | 29 | int cpu; |
29 | int pc; | 30 | int pc; |
30 | 31 | ||
31 | if (!sched_ref) | 32 | if (!sched_ref || sched_stopped) |
32 | return; | 33 | return; |
33 | 34 | ||
34 | tracing_record_cmdline(prev); | 35 | tracing_record_cmdline(prev); |
@@ -193,6 +194,7 @@ static void stop_sched_trace(struct trace_array *tr) | |||
193 | static int sched_switch_trace_init(struct trace_array *tr) | 194 | static int sched_switch_trace_init(struct trace_array *tr) |
194 | { | 195 | { |
195 | ctx_trace = tr; | 196 | ctx_trace = tr; |
197 | tracing_reset_online_cpus(tr); | ||
196 | tracing_start_sched_switch_record(); | 198 | tracing_start_sched_switch_record(); |
197 | return 0; | 199 | return 0; |
198 | } | 200 | } |
@@ -205,13 +207,12 @@ static void sched_switch_trace_reset(struct trace_array *tr) | |||
205 | 207 | ||
206 | static void sched_switch_trace_start(struct trace_array *tr) | 208 | static void sched_switch_trace_start(struct trace_array *tr) |
207 | { | 209 | { |
208 | tracing_reset_online_cpus(tr); | 210 | sched_stopped = 0; |
209 | tracing_start_sched_switch(); | ||
210 | } | 211 | } |
211 | 212 | ||
212 | static void sched_switch_trace_stop(struct trace_array *tr) | 213 | static void sched_switch_trace_stop(struct trace_array *tr) |
213 | { | 214 | { |
214 | tracing_stop_sched_switch(); | 215 | sched_stopped = 1; |
215 | } | 216 | } |
216 | 217 | ||
217 | static struct tracer sched_switch_trace __read_mostly = | 218 | static struct tracer sched_switch_trace __read_mostly = |