diff options
Diffstat (limited to 'kernel/trace/trace_mmiotrace.c')
| -rw-r--r-- | kernel/trace/trace_mmiotrace.c | 16 |
1 files changed, 12 insertions, 4 deletions
diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c index d53b45ed0806..0acd834659ed 100644 --- a/kernel/trace/trace_mmiotrace.c +++ b/kernel/trace/trace_mmiotrace.c | |||
| @@ -307,11 +307,13 @@ static void __trace_mmiotrace_rw(struct trace_array *tr, | |||
| 307 | struct trace_array_cpu *data, | 307 | struct trace_array_cpu *data, |
| 308 | struct mmiotrace_rw *rw) | 308 | struct mmiotrace_rw *rw) |
| 309 | { | 309 | { |
| 310 | struct ftrace_event_call *call = &event_mmiotrace_rw; | ||
| 311 | struct ring_buffer *buffer = tr->buffer; | ||
| 310 | struct ring_buffer_event *event; | 312 | struct ring_buffer_event *event; |
| 311 | struct trace_mmiotrace_rw *entry; | 313 | struct trace_mmiotrace_rw *entry; |
| 312 | int pc = preempt_count(); | 314 | int pc = preempt_count(); |
| 313 | 315 | ||
| 314 | event = trace_buffer_lock_reserve(tr, TRACE_MMIO_RW, | 316 | event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW, |
| 315 | sizeof(*entry), 0, pc); | 317 | sizeof(*entry), 0, pc); |
| 316 | if (!event) { | 318 | if (!event) { |
| 317 | atomic_inc(&dropped_count); | 319 | atomic_inc(&dropped_count); |
| @@ -319,7 +321,9 @@ static void __trace_mmiotrace_rw(struct trace_array *tr, | |||
| 319 | } | 321 | } |
| 320 | entry = ring_buffer_event_data(event); | 322 | entry = ring_buffer_event_data(event); |
| 321 | entry->rw = *rw; | 323 | entry->rw = *rw; |
| 322 | trace_buffer_unlock_commit(tr, event, 0, pc); | 324 | |
| 325 | if (!filter_check_discard(call, entry, buffer, event)) | ||
| 326 | trace_buffer_unlock_commit(buffer, event, 0, pc); | ||
| 323 | } | 327 | } |
| 324 | 328 | ||
| 325 | void mmio_trace_rw(struct mmiotrace_rw *rw) | 329 | void mmio_trace_rw(struct mmiotrace_rw *rw) |
| @@ -333,11 +337,13 @@ static void __trace_mmiotrace_map(struct trace_array *tr, | |||
| 333 | struct trace_array_cpu *data, | 337 | struct trace_array_cpu *data, |
| 334 | struct mmiotrace_map *map) | 338 | struct mmiotrace_map *map) |
| 335 | { | 339 | { |
| 340 | struct ftrace_event_call *call = &event_mmiotrace_map; | ||
| 341 | struct ring_buffer *buffer = tr->buffer; | ||
| 336 | struct ring_buffer_event *event; | 342 | struct ring_buffer_event *event; |
| 337 | struct trace_mmiotrace_map *entry; | 343 | struct trace_mmiotrace_map *entry; |
| 338 | int pc = preempt_count(); | 344 | int pc = preempt_count(); |
| 339 | 345 | ||
| 340 | event = trace_buffer_lock_reserve(tr, TRACE_MMIO_MAP, | 346 | event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP, |
| 341 | sizeof(*entry), 0, pc); | 347 | sizeof(*entry), 0, pc); |
| 342 | if (!event) { | 348 | if (!event) { |
| 343 | atomic_inc(&dropped_count); | 349 | atomic_inc(&dropped_count); |
| @@ -345,7 +351,9 @@ static void __trace_mmiotrace_map(struct trace_array *tr, | |||
| 345 | } | 351 | } |
| 346 | entry = ring_buffer_event_data(event); | 352 | entry = ring_buffer_event_data(event); |
| 347 | entry->map = *map; | 353 | entry->map = *map; |
| 348 | trace_buffer_unlock_commit(tr, event, 0, pc); | 354 | |
| 355 | if (!filter_check_discard(call, entry, buffer, event)) | ||
| 356 | trace_buffer_unlock_commit(buffer, event, 0, pc); | ||
| 349 | } | 357 | } |
| 350 | 358 | ||
| 351 | void mmio_trace_mapping(struct mmiotrace_map *map) | 359 | void mmio_trace_mapping(struct mmiotrace_map *map) |
