diff options
Diffstat (limited to 'kernel/trace/ring_buffer.c')
| -rw-r--r-- | kernel/trace/ring_buffer.c | 179 |
1 files changed, 154 insertions, 25 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 41ca394feb22..7f6059c5aa94 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
| @@ -319,6 +319,11 @@ EXPORT_SYMBOL_GPL(ring_buffer_event_data); | |||
| 319 | #define TS_MASK ((1ULL << TS_SHIFT) - 1) | 319 | #define TS_MASK ((1ULL << TS_SHIFT) - 1) |
| 320 | #define TS_DELTA_TEST (~TS_MASK) | 320 | #define TS_DELTA_TEST (~TS_MASK) |
| 321 | 321 | ||
| 322 | /* Flag when events were overwritten */ | ||
| 323 | #define RB_MISSED_EVENTS (1 << 31) | ||
| 324 | /* Missed count stored at end */ | ||
| 325 | #define RB_MISSED_STORED (1 << 30) | ||
| 326 | |||
| 322 | struct buffer_data_page { | 327 | struct buffer_data_page { |
| 323 | u64 time_stamp; /* page time stamp */ | 328 | u64 time_stamp; /* page time stamp */ |
| 324 | local_t commit; /* write committed index */ | 329 | local_t commit; /* write committed index */ |
| @@ -338,6 +343,7 @@ struct buffer_page { | |||
| 338 | local_t write; /* index for next write */ | 343 | local_t write; /* index for next write */ |
| 339 | unsigned read; /* index for next read */ | 344 | unsigned read; /* index for next read */ |
| 340 | local_t entries; /* entries on this page */ | 345 | local_t entries; /* entries on this page */ |
| 346 | unsigned long real_end; /* real end of data */ | ||
| 341 | struct buffer_data_page *page; /* Actual data page */ | 347 | struct buffer_data_page *page; /* Actual data page */ |
| 342 | }; | 348 | }; |
| 343 | 349 | ||
| @@ -417,6 +423,12 @@ int ring_buffer_print_page_header(struct trace_seq *s) | |||
| 417 | (unsigned int)sizeof(field.commit), | 423 | (unsigned int)sizeof(field.commit), |
| 418 | (unsigned int)is_signed_type(long)); | 424 | (unsigned int)is_signed_type(long)); |
| 419 | 425 | ||
| 426 | ret = trace_seq_printf(s, "\tfield: int overwrite;\t" | ||
| 427 | "offset:%u;\tsize:%u;\tsigned:%u;\n", | ||
| 428 | (unsigned int)offsetof(typeof(field), commit), | ||
| 429 | 1, | ||
| 430 | (unsigned int)is_signed_type(long)); | ||
| 431 | |||
| 420 | ret = trace_seq_printf(s, "\tfield: char data;\t" | 432 | ret = trace_seq_printf(s, "\tfield: char data;\t" |
| 421 | "offset:%u;\tsize:%u;\tsigned:%u;\n", | 433 | "offset:%u;\tsize:%u;\tsigned:%u;\n", |
| 422 | (unsigned int)offsetof(typeof(field), data), | 434 | (unsigned int)offsetof(typeof(field), data), |
| @@ -440,6 +452,8 @@ struct ring_buffer_per_cpu { | |||
| 440 | struct buffer_page *tail_page; /* write to tail */ | 452 | struct buffer_page *tail_page; /* write to tail */ |
| 441 | struct buffer_page *commit_page; /* committed pages */ | 453 | struct buffer_page *commit_page; /* committed pages */ |
| 442 | struct buffer_page *reader_page; | 454 | struct buffer_page *reader_page; |
| 455 | unsigned long lost_events; | ||
| 456 | unsigned long last_overrun; | ||
| 443 | local_t commit_overrun; | 457 | local_t commit_overrun; |
| 444 | local_t overrun; | 458 | local_t overrun; |
| 445 | local_t entries; | 459 | local_t entries; |
| @@ -1762,6 +1776,13 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer, | |||
| 1762 | kmemcheck_annotate_bitfield(event, bitfield); | 1776 | kmemcheck_annotate_bitfield(event, bitfield); |
| 1763 | 1777 | ||
| 1764 | /* | 1778 | /* |
| 1779 | * Save the original length to the meta data. | ||
| 1780 | * This will be used by the reader to add lost event | ||
| 1781 | * counter. | ||
| 1782 | */ | ||
| 1783 | tail_page->real_end = tail; | ||
| 1784 | |||
| 1785 | /* | ||
| 1765 | * If this event is bigger than the minimum size, then | 1786 | * If this event is bigger than the minimum size, then |
| 1766 | * we need to be careful that we don't subtract the | 1787 | * we need to be careful that we don't subtract the |
| 1767 | * write counter enough to allow another writer to slip | 1788 | * write counter enough to allow another writer to slip |
| @@ -1979,17 +2000,13 @@ rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer, | |||
| 1979 | u64 *ts, u64 *delta) | 2000 | u64 *ts, u64 *delta) |
| 1980 | { | 2001 | { |
| 1981 | struct ring_buffer_event *event; | 2002 | struct ring_buffer_event *event; |
| 1982 | static int once; | ||
| 1983 | int ret; | 2003 | int ret; |
| 1984 | 2004 | ||
| 1985 | if (unlikely(*delta > (1ULL << 59) && !once++)) { | 2005 | WARN_ONCE(*delta > (1ULL << 59), |
| 1986 | printk(KERN_WARNING "Delta way too big! %llu" | 2006 | KERN_WARNING "Delta way too big! %llu ts=%llu write stamp = %llu\n", |
| 1987 | " ts=%llu write stamp = %llu\n", | 2007 | (unsigned long long)*delta, |
| 1988 | (unsigned long long)*delta, | 2008 | (unsigned long long)*ts, |
| 1989 | (unsigned long long)*ts, | 2009 | (unsigned long long)cpu_buffer->write_stamp); |
| 1990 | (unsigned long long)cpu_buffer->write_stamp); | ||
| 1991 | WARN_ON(1); | ||
| 1992 | } | ||
| 1993 | 2010 | ||
| 1994 | /* | 2011 | /* |
| 1995 | * The delta is too big, we to add a | 2012 | * The delta is too big, we to add a |
| @@ -2838,6 +2855,7 @@ static struct buffer_page * | |||
| 2838 | rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) | 2855 | rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) |
| 2839 | { | 2856 | { |
| 2840 | struct buffer_page *reader = NULL; | 2857 | struct buffer_page *reader = NULL; |
| 2858 | unsigned long overwrite; | ||
| 2841 | unsigned long flags; | 2859 | unsigned long flags; |
| 2842 | int nr_loops = 0; | 2860 | int nr_loops = 0; |
| 2843 | int ret; | 2861 | int ret; |
| @@ -2879,6 +2897,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) | |||
| 2879 | local_set(&cpu_buffer->reader_page->write, 0); | 2897 | local_set(&cpu_buffer->reader_page->write, 0); |
| 2880 | local_set(&cpu_buffer->reader_page->entries, 0); | 2898 | local_set(&cpu_buffer->reader_page->entries, 0); |
| 2881 | local_set(&cpu_buffer->reader_page->page->commit, 0); | 2899 | local_set(&cpu_buffer->reader_page->page->commit, 0); |
| 2900 | cpu_buffer->reader_page->real_end = 0; | ||
| 2882 | 2901 | ||
| 2883 | spin: | 2902 | spin: |
| 2884 | /* | 2903 | /* |
| @@ -2899,6 +2918,18 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) | |||
| 2899 | rb_set_list_to_head(cpu_buffer, &cpu_buffer->reader_page->list); | 2918 | rb_set_list_to_head(cpu_buffer, &cpu_buffer->reader_page->list); |
| 2900 | 2919 | ||
| 2901 | /* | 2920 | /* |
| 2921 | * We want to make sure we read the overruns after we set up our | ||
| 2922 | * pointers to the next object. The writer side does a | ||
| 2923 | * cmpxchg to cross pages which acts as the mb on the writer | ||
| 2924 | * side. Note, the reader will constantly fail the swap | ||
| 2925 | * while the writer is updating the pointers, so this | ||
| 2926 | * guarantees that the overwrite recorded here is the one we | ||
| 2927 | * want to compare with the last_overrun. | ||
| 2928 | */ | ||
| 2929 | smp_mb(); | ||
| 2930 | overwrite = local_read(&(cpu_buffer->overrun)); | ||
| 2931 | |||
| 2932 | /* | ||
| 2902 | * Here's the tricky part. | 2933 | * Here's the tricky part. |
| 2903 | * | 2934 | * |
| 2904 | * We need to move the pointer past the header page. | 2935 | * We need to move the pointer past the header page. |
| @@ -2929,6 +2960,11 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) | |||
| 2929 | cpu_buffer->reader_page = reader; | 2960 | cpu_buffer->reader_page = reader; |
| 2930 | rb_reset_reader_page(cpu_buffer); | 2961 | rb_reset_reader_page(cpu_buffer); |
| 2931 | 2962 | ||
| 2963 | if (overwrite != cpu_buffer->last_overrun) { | ||
| 2964 | cpu_buffer->lost_events = overwrite - cpu_buffer->last_overrun; | ||
| 2965 | cpu_buffer->last_overrun = overwrite; | ||
| 2966 | } | ||
| 2967 | |||
| 2932 | goto again; | 2968 | goto again; |
| 2933 | 2969 | ||
| 2934 | out: | 2970 | out: |
| @@ -3005,8 +3041,14 @@ static void rb_advance_iter(struct ring_buffer_iter *iter) | |||
| 3005 | rb_advance_iter(iter); | 3041 | rb_advance_iter(iter); |
| 3006 | } | 3042 | } |
| 3007 | 3043 | ||
| 3044 | static int rb_lost_events(struct ring_buffer_per_cpu *cpu_buffer) | ||
| 3045 | { | ||
| 3046 | return cpu_buffer->lost_events; | ||
| 3047 | } | ||
| 3048 | |||
| 3008 | static struct ring_buffer_event * | 3049 | static struct ring_buffer_event * |
| 3009 | rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts) | 3050 | rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts, |
| 3051 | unsigned long *lost_events) | ||
| 3010 | { | 3052 | { |
| 3011 | struct ring_buffer_event *event; | 3053 | struct ring_buffer_event *event; |
| 3012 | struct buffer_page *reader; | 3054 | struct buffer_page *reader; |
| @@ -3058,6 +3100,8 @@ rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts) | |||
| 3058 | ring_buffer_normalize_time_stamp(cpu_buffer->buffer, | 3100 | ring_buffer_normalize_time_stamp(cpu_buffer->buffer, |
| 3059 | cpu_buffer->cpu, ts); | 3101 | cpu_buffer->cpu, ts); |
| 3060 | } | 3102 | } |
| 3103 | if (lost_events) | ||
| 3104 | *lost_events = rb_lost_events(cpu_buffer); | ||
| 3061 | return event; | 3105 | return event; |
| 3062 | 3106 | ||
| 3063 | default: | 3107 | default: |
| @@ -3168,12 +3212,14 @@ static inline int rb_ok_to_lock(void) | |||
| 3168 | * @buffer: The ring buffer to read | 3212 | * @buffer: The ring buffer to read |
| 3169 | * @cpu: The cpu to peak at | 3213 | * @cpu: The cpu to peak at |
| 3170 | * @ts: The timestamp counter of this event. | 3214 | * @ts: The timestamp counter of this event. |
| 3215 | * @lost_events: a variable to store if events were lost (may be NULL) | ||
| 3171 | * | 3216 | * |
| 3172 | * This will return the event that will be read next, but does | 3217 | * This will return the event that will be read next, but does |
| 3173 | * not consume the data. | 3218 | * not consume the data. |
| 3174 | */ | 3219 | */ |
| 3175 | struct ring_buffer_event * | 3220 | struct ring_buffer_event * |
| 3176 | ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts) | 3221 | ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts, |
| 3222 | unsigned long *lost_events) | ||
| 3177 | { | 3223 | { |
| 3178 | struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; | 3224 | struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; |
| 3179 | struct ring_buffer_event *event; | 3225 | struct ring_buffer_event *event; |
| @@ -3188,7 +3234,7 @@ ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts) | |||
| 3188 | local_irq_save(flags); | 3234 | local_irq_save(flags); |
| 3189 | if (dolock) | 3235 | if (dolock) |
| 3190 | spin_lock(&cpu_buffer->reader_lock); | 3236 | spin_lock(&cpu_buffer->reader_lock); |
| 3191 | event = rb_buffer_peek(cpu_buffer, ts); | 3237 | event = rb_buffer_peek(cpu_buffer, ts, lost_events); |
| 3192 | if (event && event->type_len == RINGBUF_TYPE_PADDING) | 3238 | if (event && event->type_len == RINGBUF_TYPE_PADDING) |
| 3193 | rb_advance_reader(cpu_buffer); | 3239 | rb_advance_reader(cpu_buffer); |
| 3194 | if (dolock) | 3240 | if (dolock) |
| @@ -3230,13 +3276,17 @@ ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts) | |||
| 3230 | /** | 3276 | /** |
| 3231 | * ring_buffer_consume - return an event and consume it | 3277 | * ring_buffer_consume - return an event and consume it |
| 3232 | * @buffer: The ring buffer to get the next event from | 3278 | * @buffer: The ring buffer to get the next event from |
| 3279 | * @cpu: the cpu to read the buffer from | ||
| 3280 | * @ts: a variable to store the timestamp (may be NULL) | ||
| 3281 | * @lost_events: a variable to store if events were lost (may be NULL) | ||
| 3233 | * | 3282 | * |
| 3234 | * Returns the next event in the ring buffer, and that event is consumed. | 3283 | * Returns the next event in the ring buffer, and that event is consumed. |
| 3235 | * Meaning, that sequential reads will keep returning a different event, | 3284 | * Meaning, that sequential reads will keep returning a different event, |
| 3236 | * and eventually empty the ring buffer if the producer is slower. | 3285 | * and eventually empty the ring buffer if the producer is slower. |
| 3237 | */ | 3286 | */ |
| 3238 | struct ring_buffer_event * | 3287 | struct ring_buffer_event * |
| 3239 | ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts) | 3288 | ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts, |
| 3289 | unsigned long *lost_events) | ||
| 3240 | { | 3290 | { |
| 3241 | struct ring_buffer_per_cpu *cpu_buffer; | 3291 | struct ring_buffer_per_cpu *cpu_buffer; |
| 3242 | struct ring_buffer_event *event = NULL; | 3292 | struct ring_buffer_event *event = NULL; |
| @@ -3257,9 +3307,11 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts) | |||
| 3257 | if (dolock) | 3307 | if (dolock) |
| 3258 | spin_lock(&cpu_buffer->reader_lock); | 3308 | spin_lock(&cpu_buffer->reader_lock); |
| 3259 | 3309 | ||
| 3260 | event = rb_buffer_peek(cpu_buffer, ts); | 3310 | event = rb_buffer_peek(cpu_buffer, ts, lost_events); |
| 3261 | if (event) | 3311 | if (event) { |
| 3312 | cpu_buffer->lost_events = 0; | ||
| 3262 | rb_advance_reader(cpu_buffer); | 3313 | rb_advance_reader(cpu_buffer); |
| 3314 | } | ||
| 3263 | 3315 | ||
| 3264 | if (dolock) | 3316 | if (dolock) |
| 3265 | spin_unlock(&cpu_buffer->reader_lock); | 3317 | spin_unlock(&cpu_buffer->reader_lock); |
| @@ -3276,23 +3328,30 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts) | |||
| 3276 | EXPORT_SYMBOL_GPL(ring_buffer_consume); | 3328 | EXPORT_SYMBOL_GPL(ring_buffer_consume); |
| 3277 | 3329 | ||
| 3278 | /** | 3330 | /** |
| 3279 | * ring_buffer_read_start - start a non consuming read of the buffer | 3331 | * ring_buffer_read_prepare - Prepare for a non consuming read of the buffer |
| 3280 | * @buffer: The ring buffer to read from | 3332 | * @buffer: The ring buffer to read from |
| 3281 | * @cpu: The cpu buffer to iterate over | 3333 | * @cpu: The cpu buffer to iterate over |
| 3282 | * | 3334 | * |
| 3283 | * This starts up an iteration through the buffer. It also disables | 3335 | * This performs the initial preparations necessary to iterate |
| 3284 | * the recording to the buffer until the reading is finished. | 3336 | * through the buffer. Memory is allocated, buffer recording |
| 3285 | * This prevents the reading from being corrupted. This is not | 3337 | * is disabled, and the iterator pointer is returned to the caller. |
| 3286 | * a consuming read, so a producer is not expected. | ||
| 3287 | * | 3338 | * |
| 3288 | * Must be paired with ring_buffer_finish. | 3339 | * Disabling buffer recordng prevents the reading from being |
| 3340 | * corrupted. This is not a consuming read, so a producer is not | ||
| 3341 | * expected. | ||
| 3342 | * | ||
| 3343 | * After a sequence of ring_buffer_read_prepare calls, the user is | ||
| 3344 | * expected to make at least one call to ring_buffer_prepare_sync. | ||
| 3345 | * Afterwards, ring_buffer_read_start is invoked to get things going | ||
| 3346 | * for real. | ||
| 3347 | * | ||
| 3348 | * This overall must be paired with ring_buffer_finish. | ||
| 3289 | */ | 3349 | */ |
| 3290 | struct ring_buffer_iter * | 3350 | struct ring_buffer_iter * |
| 3291 | ring_buffer_read_start(struct ring_buffer *buffer, int cpu) | 3351 | ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu) |
| 3292 | { | 3352 | { |
| 3293 | struct ring_buffer_per_cpu *cpu_buffer; | 3353 | struct ring_buffer_per_cpu *cpu_buffer; |
| 3294 | struct ring_buffer_iter *iter; | 3354 | struct ring_buffer_iter *iter; |
| 3295 | unsigned long flags; | ||
| 3296 | 3355 | ||
| 3297 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) | 3356 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) |
| 3298 | return NULL; | 3357 | return NULL; |
| @@ -3306,15 +3365,52 @@ ring_buffer_read_start(struct ring_buffer *buffer, int cpu) | |||
| 3306 | iter->cpu_buffer = cpu_buffer; | 3365 | iter->cpu_buffer = cpu_buffer; |
| 3307 | 3366 | ||
| 3308 | atomic_inc(&cpu_buffer->record_disabled); | 3367 | atomic_inc(&cpu_buffer->record_disabled); |
| 3368 | |||
| 3369 | return iter; | ||
| 3370 | } | ||
| 3371 | EXPORT_SYMBOL_GPL(ring_buffer_read_prepare); | ||
| 3372 | |||
| 3373 | /** | ||
| 3374 | * ring_buffer_read_prepare_sync - Synchronize a set of prepare calls | ||
| 3375 | * | ||
| 3376 | * All previously invoked ring_buffer_read_prepare calls to prepare | ||
| 3377 | * iterators will be synchronized. Afterwards, read_buffer_read_start | ||
| 3378 | * calls on those iterators are allowed. | ||
| 3379 | */ | ||
| 3380 | void | ||
| 3381 | ring_buffer_read_prepare_sync(void) | ||
| 3382 | { | ||
| 3309 | synchronize_sched(); | 3383 | synchronize_sched(); |
| 3384 | } | ||
| 3385 | EXPORT_SYMBOL_GPL(ring_buffer_read_prepare_sync); | ||
| 3386 | |||
| 3387 | /** | ||
| 3388 | * ring_buffer_read_start - start a non consuming read of the buffer | ||
| 3389 | * @iter: The iterator returned by ring_buffer_read_prepare | ||
| 3390 | * | ||
| 3391 | * This finalizes the startup of an iteration through the buffer. | ||
| 3392 | * The iterator comes from a call to ring_buffer_read_prepare and | ||
| 3393 | * an intervening ring_buffer_read_prepare_sync must have been | ||
| 3394 | * performed. | ||
| 3395 | * | ||
| 3396 | * Must be paired with ring_buffer_finish. | ||
| 3397 | */ | ||
| 3398 | void | ||
| 3399 | ring_buffer_read_start(struct ring_buffer_iter *iter) | ||
| 3400 | { | ||
| 3401 | struct ring_buffer_per_cpu *cpu_buffer; | ||
| 3402 | unsigned long flags; | ||
| 3403 | |||
| 3404 | if (!iter) | ||
| 3405 | return; | ||
| 3406 | |||
| 3407 | cpu_buffer = iter->cpu_buffer; | ||
| 3310 | 3408 | ||
| 3311 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); | 3409 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); |
| 3312 | arch_spin_lock(&cpu_buffer->lock); | 3410 | arch_spin_lock(&cpu_buffer->lock); |
| 3313 | rb_iter_reset(iter); | 3411 | rb_iter_reset(iter); |
| 3314 | arch_spin_unlock(&cpu_buffer->lock); | 3412 | arch_spin_unlock(&cpu_buffer->lock); |
| 3315 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); | 3413 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); |
| 3316 | |||
| 3317 | return iter; | ||
| 3318 | } | 3414 | } |
| 3319 | EXPORT_SYMBOL_GPL(ring_buffer_read_start); | 3415 | EXPORT_SYMBOL_GPL(ring_buffer_read_start); |
| 3320 | 3416 | ||
| @@ -3408,6 +3504,9 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer) | |||
| 3408 | cpu_buffer->write_stamp = 0; | 3504 | cpu_buffer->write_stamp = 0; |
| 3409 | cpu_buffer->read_stamp = 0; | 3505 | cpu_buffer->read_stamp = 0; |
| 3410 | 3506 | ||
| 3507 | cpu_buffer->lost_events = 0; | ||
| 3508 | cpu_buffer->last_overrun = 0; | ||
| 3509 | |||
| 3411 | rb_head_page_activate(cpu_buffer); | 3510 | rb_head_page_activate(cpu_buffer); |
| 3412 | } | 3511 | } |
| 3413 | 3512 | ||
| @@ -3683,6 +3782,7 @@ int ring_buffer_read_page(struct ring_buffer *buffer, | |||
| 3683 | struct ring_buffer_event *event; | 3782 | struct ring_buffer_event *event; |
| 3684 | struct buffer_data_page *bpage; | 3783 | struct buffer_data_page *bpage; |
| 3685 | struct buffer_page *reader; | 3784 | struct buffer_page *reader; |
| 3785 | unsigned long missed_events; | ||
| 3686 | unsigned long flags; | 3786 | unsigned long flags; |
| 3687 | unsigned int commit; | 3787 | unsigned int commit; |
| 3688 | unsigned int read; | 3788 | unsigned int read; |
| @@ -3719,6 +3819,9 @@ int ring_buffer_read_page(struct ring_buffer *buffer, | |||
| 3719 | read = reader->read; | 3819 | read = reader->read; |
| 3720 | commit = rb_page_commit(reader); | 3820 | commit = rb_page_commit(reader); |
| 3721 | 3821 | ||
| 3822 | /* Check if any events were dropped */ | ||
| 3823 | missed_events = cpu_buffer->lost_events; | ||
| 3824 | |||
| 3722 | /* | 3825 | /* |
| 3723 | * If this page has been partially read or | 3826 | * If this page has been partially read or |
| 3724 | * if len is not big enough to read the rest of the page or | 3827 | * if len is not big enough to read the rest of the page or |
| @@ -3779,9 +3882,35 @@ int ring_buffer_read_page(struct ring_buffer *buffer, | |||
| 3779 | local_set(&reader->entries, 0); | 3882 | local_set(&reader->entries, 0); |
| 3780 | reader->read = 0; | 3883 | reader->read = 0; |
| 3781 | *data_page = bpage; | 3884 | *data_page = bpage; |
| 3885 | |||
| 3886 | /* | ||
| 3887 | * Use the real_end for the data size, | ||
| 3888 | * This gives us a chance to store the lost events | ||
| 3889 | * on the page. | ||
| 3890 | */ | ||
| 3891 | if (reader->real_end) | ||
| 3892 | local_set(&bpage->commit, reader->real_end); | ||
| 3782 | } | 3893 | } |
| 3783 | ret = read; | 3894 | ret = read; |
| 3784 | 3895 | ||
| 3896 | cpu_buffer->lost_events = 0; | ||
| 3897 | /* | ||
| 3898 | * Set a flag in the commit field if we lost events | ||
| 3899 | */ | ||
| 3900 | if (missed_events) { | ||
| 3901 | commit = local_read(&bpage->commit); | ||
| 3902 | |||
| 3903 | /* If there is room at the end of the page to save the | ||
| 3904 | * missed events, then record it there. | ||
| 3905 | */ | ||
| 3906 | if (BUF_PAGE_SIZE - commit >= sizeof(missed_events)) { | ||
| 3907 | memcpy(&bpage->data[commit], &missed_events, | ||
| 3908 | sizeof(missed_events)); | ||
| 3909 | local_add(RB_MISSED_STORED, &bpage->commit); | ||
| 3910 | } | ||
| 3911 | local_add(RB_MISSED_EVENTS, &bpage->commit); | ||
| 3912 | } | ||
| 3913 | |||
| 3785 | out_unlock: | 3914 | out_unlock: |
| 3786 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); | 3915 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); |
| 3787 | 3916 | ||
