aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace
diff options
context:
space:
mode:
authorPekka Paalanen <pq@iki.fi>2009-01-06 06:57:11 -0500
committerIngo Molnar <mingo@elte.hu>2009-01-10 22:01:30 -0500
commit173ed24ee2d64f5de28654eb456ec1ee18a142e5 (patch)
tree0e47a59eca31c6d09f1bfe5496c3f7b03e15f16a /kernel/trace
parentfe6f90e57fd31af8daca534ea01db2e5666c15da (diff)
mmiotrace: count events lost due to not recording
Impact: enhances lost events counting in mmiotrace The tracing framework, or the ring buffer facility it uses, has a switch to stop recording data. When recording is off, the trace events will be lost. The framework does not count these, so mmiotrace has to count them itself. Signed-off-by: Pekka Paalanen <pq@iki.fi> Signed-off-by: Steven Rostedt <srostedt@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/trace_mmiotrace.c14
1 files changed, 10 insertions, 4 deletions
diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
index fcec59ff2355..621c8c3f3139 100644
--- a/kernel/trace/trace_mmiotrace.c
+++ b/kernel/trace/trace_mmiotrace.c
@@ -9,6 +9,7 @@
9#include <linux/kernel.h> 9#include <linux/kernel.h>
10#include <linux/mmiotrace.h> 10#include <linux/mmiotrace.h>
11#include <linux/pci.h> 11#include <linux/pci.h>
12#include <asm/atomic.h>
12 13
13#include "trace.h" 14#include "trace.h"
14#include "trace_output.h" 15#include "trace_output.h"
@@ -20,6 +21,7 @@ struct header_iter {
20static struct trace_array *mmio_trace_array; 21static struct trace_array *mmio_trace_array;
21static bool overrun_detected; 22static bool overrun_detected;
22static unsigned long prev_overruns; 23static unsigned long prev_overruns;
24static atomic_t dropped_count;
23 25
24static void mmio_reset_data(struct trace_array *tr) 26static void mmio_reset_data(struct trace_array *tr)
25{ 27{
@@ -122,11 +124,11 @@ static void mmio_close(struct trace_iterator *iter)
122 124
123static unsigned long count_overruns(struct trace_iterator *iter) 125static unsigned long count_overruns(struct trace_iterator *iter)
124{ 126{
125 unsigned long cnt = 0; 127 unsigned long cnt = atomic_xchg(&dropped_count, 0);
126 unsigned long over = ring_buffer_overruns(iter->tr->buffer); 128 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
127 129
128 if (over > prev_overruns) 130 if (over > prev_overruns)
129 cnt = over - prev_overruns; 131 cnt += over - prev_overruns;
130 prev_overruns = over; 132 prev_overruns = over;
131 return cnt; 133 return cnt;
132} 134}
@@ -308,8 +310,10 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
308 310
309 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), 311 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
310 &irq_flags); 312 &irq_flags);
311 if (!event) 313 if (!event) {
314 atomic_inc(&dropped_count);
312 return; 315 return;
316 }
313 entry = ring_buffer_event_data(event); 317 entry = ring_buffer_event_data(event);
314 tracing_generic_entry_update(&entry->ent, 0, preempt_count()); 318 tracing_generic_entry_update(&entry->ent, 0, preempt_count());
315 entry->ent.type = TRACE_MMIO_RW; 319 entry->ent.type = TRACE_MMIO_RW;
@@ -336,8 +340,10 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
336 340
337 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), 341 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
338 &irq_flags); 342 &irq_flags);
339 if (!event) 343 if (!event) {
344 atomic_inc(&dropped_count);
340 return; 345 return;
346 }
341 entry = ring_buffer_event_data(event); 347 entry = ring_buffer_event_data(event);
342 tracing_generic_entry_update(&entry->ent, 0, preempt_count()); 348 tracing_generic_entry_update(&entry->ent, 0, preempt_count());
343 entry->ent.type = TRACE_MMIO_MAP; 349 entry->ent.type = TRACE_MMIO_MAP;