diff options
-rw-r--r-- | kernel/trace/trace_mmiotrace.c | 14 |
1 files changed, 10 insertions, 4 deletions
diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c index fffcb069f1dc..80e503ef6136 100644 --- a/kernel/trace/trace_mmiotrace.c +++ b/kernel/trace/trace_mmiotrace.c | |||
@@ -9,6 +9,7 @@ | |||
9 | #include <linux/kernel.h> | 9 | #include <linux/kernel.h> |
10 | #include <linux/mmiotrace.h> | 10 | #include <linux/mmiotrace.h> |
11 | #include <linux/pci.h> | 11 | #include <linux/pci.h> |
12 | #include <asm/atomic.h> | ||
12 | 13 | ||
13 | #include "trace.h" | 14 | #include "trace.h" |
14 | 15 | ||
@@ -19,6 +20,7 @@ struct header_iter { | |||
19 | static struct trace_array *mmio_trace_array; | 20 | static struct trace_array *mmio_trace_array; |
20 | static bool overrun_detected; | 21 | static bool overrun_detected; |
21 | static unsigned long prev_overruns; | 22 | static unsigned long prev_overruns; |
23 | static atomic_t dropped_count; | ||
22 | 24 | ||
23 | static void mmio_reset_data(struct trace_array *tr) | 25 | static void mmio_reset_data(struct trace_array *tr) |
24 | { | 26 | { |
@@ -121,11 +123,11 @@ static void mmio_close(struct trace_iterator *iter) | |||
121 | 123 | ||
122 | static unsigned long count_overruns(struct trace_iterator *iter) | 124 | static unsigned long count_overruns(struct trace_iterator *iter) |
123 | { | 125 | { |
124 | unsigned long cnt = 0; | 126 | unsigned long cnt = atomic_xchg(&dropped_count, 0); |
125 | unsigned long over = ring_buffer_overruns(iter->tr->buffer); | 127 | unsigned long over = ring_buffer_overruns(iter->tr->buffer); |
126 | 128 | ||
127 | if (over > prev_overruns) | 129 | if (over > prev_overruns) |
128 | cnt = over - prev_overruns; | 130 | cnt += over - prev_overruns; |
129 | prev_overruns = over; | 131 | prev_overruns = over; |
130 | return cnt; | 132 | return cnt; |
131 | } | 133 | } |
@@ -310,8 +312,10 @@ static void __trace_mmiotrace_rw(struct trace_array *tr, | |||
310 | 312 | ||
311 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), | 313 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), |
312 | &irq_flags); | 314 | &irq_flags); |
313 | if (!event) | 315 | if (!event) { |
316 | atomic_inc(&dropped_count); | ||
314 | return; | 317 | return; |
318 | } | ||
315 | entry = ring_buffer_event_data(event); | 319 | entry = ring_buffer_event_data(event); |
316 | tracing_generic_entry_update(&entry->ent, 0, preempt_count()); | 320 | tracing_generic_entry_update(&entry->ent, 0, preempt_count()); |
317 | entry->ent.type = TRACE_MMIO_RW; | 321 | entry->ent.type = TRACE_MMIO_RW; |
@@ -338,8 +342,10 @@ static void __trace_mmiotrace_map(struct trace_array *tr, | |||
338 | 342 | ||
339 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), | 343 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), |
340 | &irq_flags); | 344 | &irq_flags); |
341 | if (!event) | 345 | if (!event) { |
346 | atomic_inc(&dropped_count); | ||
342 | return; | 347 | return; |
348 | } | ||
343 | entry = ring_buffer_event_data(event); | 349 | entry = ring_buffer_event_data(event); |
344 | tracing_generic_entry_update(&entry->ent, 0, preempt_count()); | 350 | tracing_generic_entry_update(&entry->ent, 0, preempt_count()); |
345 | entry->ent.type = TRACE_MMIO_MAP; | 351 | entry->ent.type = TRACE_MMIO_MAP; |