diff options
author | Steven Rostedt <rostedt@goodmis.org> | 2008-09-29 23:02:42 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-10-14 04:38:59 -0400 |
commit | 777e208d40d0953efc6fb4ab58590da3f7d8f02d (patch) | |
tree | 1e5940ccafd26c958b358f7ce85926659f12c37d /kernel/trace/trace_mmiotrace.c | |
parent | 3928a8a2d98081d1bc3c0a84a2d70e29b90ecf1c (diff) |
ftrace: take advantage of variable length entries
Now that the underlining ring buffer for ftrace now hold variable length
entries, we can take advantage of this by only storing the size of the
actual event into the buffer. This happens to increase the number of
entries in the buffer dramatically.
We can also get rid of the "trace_cont" operation, but I'm keeping that
until we have no more users. Some of the ftrace tracers can now change
their code to adapt to this new feature.
Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/trace/trace_mmiotrace.c')
-rw-r--r-- | kernel/trace/trace_mmiotrace.c | 31 |
1 files changed, 17 insertions, 14 deletions
diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c index bdbf09d8413c..3df441ea2749 100644 --- a/kernel/trace/trace_mmiotrace.c +++ b/kernel/trace/trace_mmiotrace.c | |||
@@ -178,14 +178,16 @@ print_out: | |||
178 | static int mmio_print_rw(struct trace_iterator *iter) | 178 | static int mmio_print_rw(struct trace_iterator *iter) |
179 | { | 179 | { |
180 | struct trace_entry *entry = iter->ent; | 180 | struct trace_entry *entry = iter->ent; |
181 | struct mmiotrace_rw *rw = &entry->field.mmiorw; | 181 | struct trace_mmiotrace_rw *field = |
182 | (struct trace_mmiotrace_rw *)entry; | ||
183 | struct mmiotrace_rw *rw = &field->rw; | ||
182 | struct trace_seq *s = &iter->seq; | 184 | struct trace_seq *s = &iter->seq; |
183 | unsigned long long t = ns2usecs(iter->ts); | 185 | unsigned long long t = ns2usecs(iter->ts); |
184 | unsigned long usec_rem = do_div(t, 1000000ULL); | 186 | unsigned long usec_rem = do_div(t, 1000000ULL); |
185 | unsigned secs = (unsigned long)t; | 187 | unsigned secs = (unsigned long)t; |
186 | int ret = 1; | 188 | int ret = 1; |
187 | 189 | ||
188 | switch (entry->field.mmiorw.opcode) { | 190 | switch (rw->opcode) { |
189 | case MMIO_READ: | 191 | case MMIO_READ: |
190 | ret = trace_seq_printf(s, | 192 | ret = trace_seq_printf(s, |
191 | "R %d %lu.%06lu %d 0x%llx 0x%lx 0x%lx %d\n", | 193 | "R %d %lu.%06lu %d 0x%llx 0x%lx 0x%lx %d\n", |
@@ -220,14 +222,14 @@ static int mmio_print_rw(struct trace_iterator *iter) | |||
220 | static int mmio_print_map(struct trace_iterator *iter) | 222 | static int mmio_print_map(struct trace_iterator *iter) |
221 | { | 223 | { |
222 | struct trace_entry *entry = iter->ent; | 224 | struct trace_entry *entry = iter->ent; |
223 | struct mmiotrace_map *m = &entry->field.mmiomap; | 225 | struct mmiotrace_map *m = (struct mmiotrace_map *)entry; |
224 | struct trace_seq *s = &iter->seq; | 226 | struct trace_seq *s = &iter->seq; |
225 | unsigned long long t = ns2usecs(iter->ts); | 227 | unsigned long long t = ns2usecs(iter->ts); |
226 | unsigned long usec_rem = do_div(t, 1000000ULL); | 228 | unsigned long usec_rem = do_div(t, 1000000ULL); |
227 | unsigned secs = (unsigned long)t; | 229 | unsigned secs = (unsigned long)t; |
228 | int ret = 1; | 230 | int ret = 1; |
229 | 231 | ||
230 | switch (entry->field.mmiorw.opcode) { | 232 | switch (m->opcode) { |
231 | case MMIO_PROBE: | 233 | case MMIO_PROBE: |
232 | ret = trace_seq_printf(s, | 234 | ret = trace_seq_printf(s, |
233 | "MAP %lu.%06lu %d 0x%llx 0x%lx 0x%lx 0x%lx %d\n", | 235 | "MAP %lu.%06lu %d 0x%llx 0x%lx 0x%lx 0x%lx %d\n", |
@@ -252,7 +254,8 @@ static int mmio_print_map(struct trace_iterator *iter) | |||
252 | static int mmio_print_mark(struct trace_iterator *iter) | 254 | static int mmio_print_mark(struct trace_iterator *iter) |
253 | { | 255 | { |
254 | struct trace_entry *entry = iter->ent; | 256 | struct trace_entry *entry = iter->ent; |
255 | const char *msg = entry->field.print.buf; | 257 | struct print_entry *print = (struct print_entry *)entry; |
258 | const char *msg = print->buf; | ||
256 | struct trace_seq *s = &iter->seq; | 259 | struct trace_seq *s = &iter->seq; |
257 | unsigned long long t = ns2usecs(iter->ts); | 260 | unsigned long long t = ns2usecs(iter->ts); |
258 | unsigned long usec_rem = do_div(t, 1000000ULL); | 261 | unsigned long usec_rem = do_div(t, 1000000ULL); |
@@ -264,7 +267,7 @@ static int mmio_print_mark(struct trace_iterator *iter) | |||
264 | if (!ret) | 267 | if (!ret) |
265 | return 0; | 268 | return 0; |
266 | 269 | ||
267 | if (entry->field.flags & TRACE_FLAG_CONT) | 270 | if (entry->flags & TRACE_FLAG_CONT) |
268 | trace_seq_print_cont(s, iter); | 271 | trace_seq_print_cont(s, iter); |
269 | 272 | ||
270 | return 1; | 273 | return 1; |
@@ -308,7 +311,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr, | |||
308 | struct mmiotrace_rw *rw) | 311 | struct mmiotrace_rw *rw) |
309 | { | 312 | { |
310 | struct ring_buffer_event *event; | 313 | struct ring_buffer_event *event; |
311 | struct trace_entry *entry; | 314 | struct trace_mmiotrace_rw *entry; |
312 | unsigned long irq_flags; | 315 | unsigned long irq_flags; |
313 | 316 | ||
314 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), | 317 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), |
@@ -316,9 +319,9 @@ static void __trace_mmiotrace_rw(struct trace_array *tr, | |||
316 | if (!event) | 319 | if (!event) |
317 | return; | 320 | return; |
318 | entry = ring_buffer_event_data(event); | 321 | entry = ring_buffer_event_data(event); |
319 | tracing_generic_entry_update(entry, 0); | 322 | tracing_generic_entry_update(&entry->ent, 0); |
320 | entry->type = TRACE_MMIO_RW; | 323 | entry->ent.type = TRACE_MMIO_RW; |
321 | entry->field.mmiorw = *rw; | 324 | entry->rw = *rw; |
322 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); | 325 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); |
323 | 326 | ||
324 | trace_wake_up(); | 327 | trace_wake_up(); |
@@ -336,7 +339,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr, | |||
336 | struct mmiotrace_map *map) | 339 | struct mmiotrace_map *map) |
337 | { | 340 | { |
338 | struct ring_buffer_event *event; | 341 | struct ring_buffer_event *event; |
339 | struct trace_entry *entry; | 342 | struct trace_mmiotrace_map *entry; |
340 | unsigned long irq_flags; | 343 | unsigned long irq_flags; |
341 | 344 | ||
342 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), | 345 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), |
@@ -344,9 +347,9 @@ static void __trace_mmiotrace_map(struct trace_array *tr, | |||
344 | if (!event) | 347 | if (!event) |
345 | return; | 348 | return; |
346 | entry = ring_buffer_event_data(event); | 349 | entry = ring_buffer_event_data(event); |
347 | tracing_generic_entry_update(entry, 0); | 350 | tracing_generic_entry_update(&entry->ent, 0); |
348 | entry->type = TRACE_MMIO_MAP; | 351 | entry->ent.type = TRACE_MMIO_MAP; |
349 | entry->field.mmiomap = *map; | 352 | entry->map = *map; |
350 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); | 353 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); |
351 | 354 | ||
352 | trace_wake_up(); | 355 | trace_wake_up(); |