diff options
Diffstat (limited to 'kernel/trace/ring_buffer.c')
| -rw-r--r-- | kernel/trace/ring_buffer.c | 154 |
1 files changed, 89 insertions, 65 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 2d75c94ae87d..7a4104cb95cb 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
| @@ -34,21 +34,19 @@ static void update_pages_handler(struct work_struct *work); | |||
| 34 | */ | 34 | */ |
| 35 | int ring_buffer_print_entry_header(struct trace_seq *s) | 35 | int ring_buffer_print_entry_header(struct trace_seq *s) |
| 36 | { | 36 | { |
| 37 | int ret; | 37 | trace_seq_puts(s, "# compressed entry header\n"); |
| 38 | 38 | trace_seq_puts(s, "\ttype_len : 5 bits\n"); | |
| 39 | ret = trace_seq_puts(s, "# compressed entry header\n"); | 39 | trace_seq_puts(s, "\ttime_delta : 27 bits\n"); |
| 40 | ret = trace_seq_puts(s, "\ttype_len : 5 bits\n"); | 40 | trace_seq_puts(s, "\tarray : 32 bits\n"); |
| 41 | ret = trace_seq_puts(s, "\ttime_delta : 27 bits\n"); | 41 | trace_seq_putc(s, '\n'); |
| 42 | ret = trace_seq_puts(s, "\tarray : 32 bits\n"); | 42 | trace_seq_printf(s, "\tpadding : type == %d\n", |
| 43 | ret = trace_seq_putc(s, '\n'); | 43 | RINGBUF_TYPE_PADDING); |
| 44 | ret = trace_seq_printf(s, "\tpadding : type == %d\n", | 44 | trace_seq_printf(s, "\ttime_extend : type == %d\n", |
| 45 | RINGBUF_TYPE_PADDING); | 45 | RINGBUF_TYPE_TIME_EXTEND); |
| 46 | ret = trace_seq_printf(s, "\ttime_extend : type == %d\n", | 46 | trace_seq_printf(s, "\tdata max type_len == %d\n", |
| 47 | RINGBUF_TYPE_TIME_EXTEND); | 47 | RINGBUF_TYPE_DATA_TYPE_LEN_MAX); |
| 48 | ret = trace_seq_printf(s, "\tdata max type_len == %d\n", | ||
| 49 | RINGBUF_TYPE_DATA_TYPE_LEN_MAX); | ||
| 50 | 48 | ||
| 51 | return ret; | 49 | return !trace_seq_has_overflowed(s); |
| 52 | } | 50 | } |
| 53 | 51 | ||
| 54 | /* | 52 | /* |
| @@ -419,32 +417,31 @@ static inline int test_time_stamp(u64 delta) | |||
| 419 | int ring_buffer_print_page_header(struct trace_seq *s) | 417 | int ring_buffer_print_page_header(struct trace_seq *s) |
| 420 | { | 418 | { |
| 421 | struct buffer_data_page field; | 419 | struct buffer_data_page field; |
| 422 | int ret; | ||
| 423 | 420 | ||
| 424 | ret = trace_seq_printf(s, "\tfield: u64 timestamp;\t" | 421 | trace_seq_printf(s, "\tfield: u64 timestamp;\t" |
| 425 | "offset:0;\tsize:%u;\tsigned:%u;\n", | 422 | "offset:0;\tsize:%u;\tsigned:%u;\n", |
| 426 | (unsigned int)sizeof(field.time_stamp), | 423 | (unsigned int)sizeof(field.time_stamp), |
| 427 | (unsigned int)is_signed_type(u64)); | 424 | (unsigned int)is_signed_type(u64)); |
| 428 | |||
| 429 | ret = trace_seq_printf(s, "\tfield: local_t commit;\t" | ||
| 430 | "offset:%u;\tsize:%u;\tsigned:%u;\n", | ||
| 431 | (unsigned int)offsetof(typeof(field), commit), | ||
| 432 | (unsigned int)sizeof(field.commit), | ||
| 433 | (unsigned int)is_signed_type(long)); | ||
| 434 | |||
| 435 | ret = trace_seq_printf(s, "\tfield: int overwrite;\t" | ||
| 436 | "offset:%u;\tsize:%u;\tsigned:%u;\n", | ||
| 437 | (unsigned int)offsetof(typeof(field), commit), | ||
| 438 | 1, | ||
| 439 | (unsigned int)is_signed_type(long)); | ||
| 440 | |||
| 441 | ret = trace_seq_printf(s, "\tfield: char data;\t" | ||
| 442 | "offset:%u;\tsize:%u;\tsigned:%u;\n", | ||
| 443 | (unsigned int)offsetof(typeof(field), data), | ||
| 444 | (unsigned int)BUF_PAGE_SIZE, | ||
| 445 | (unsigned int)is_signed_type(char)); | ||
| 446 | 425 | ||
| 447 | return ret; | 426 | trace_seq_printf(s, "\tfield: local_t commit;\t" |
| 427 | "offset:%u;\tsize:%u;\tsigned:%u;\n", | ||
| 428 | (unsigned int)offsetof(typeof(field), commit), | ||
| 429 | (unsigned int)sizeof(field.commit), | ||
| 430 | (unsigned int)is_signed_type(long)); | ||
| 431 | |||
| 432 | trace_seq_printf(s, "\tfield: int overwrite;\t" | ||
| 433 | "offset:%u;\tsize:%u;\tsigned:%u;\n", | ||
| 434 | (unsigned int)offsetof(typeof(field), commit), | ||
| 435 | 1, | ||
| 436 | (unsigned int)is_signed_type(long)); | ||
| 437 | |||
| 438 | trace_seq_printf(s, "\tfield: char data;\t" | ||
| 439 | "offset:%u;\tsize:%u;\tsigned:%u;\n", | ||
| 440 | (unsigned int)offsetof(typeof(field), data), | ||
| 441 | (unsigned int)BUF_PAGE_SIZE, | ||
| 442 | (unsigned int)is_signed_type(char)); | ||
| 443 | |||
| 444 | return !trace_seq_has_overflowed(s); | ||
| 448 | } | 445 | } |
| 449 | 446 | ||
| 450 | struct rb_irq_work { | 447 | struct rb_irq_work { |
| @@ -538,16 +535,18 @@ static void rb_wake_up_waiters(struct irq_work *work) | |||
| 538 | * ring_buffer_wait - wait for input to the ring buffer | 535 | * ring_buffer_wait - wait for input to the ring buffer |
| 539 | * @buffer: buffer to wait on | 536 | * @buffer: buffer to wait on |
| 540 | * @cpu: the cpu buffer to wait on | 537 | * @cpu: the cpu buffer to wait on |
| 538 | * @full: wait until a full page is available, if @cpu != RING_BUFFER_ALL_CPUS | ||
| 541 | * | 539 | * |
| 542 | * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon | 540 | * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon |
| 543 | * as data is added to any of the @buffer's cpu buffers. Otherwise | 541 | * as data is added to any of the @buffer's cpu buffers. Otherwise |
| 544 | * it will wait for data to be added to a specific cpu buffer. | 542 | * it will wait for data to be added to a specific cpu buffer. |
| 545 | */ | 543 | */ |
| 546 | int ring_buffer_wait(struct ring_buffer *buffer, int cpu) | 544 | int ring_buffer_wait(struct ring_buffer *buffer, int cpu, bool full) |
| 547 | { | 545 | { |
| 548 | struct ring_buffer_per_cpu *cpu_buffer; | 546 | struct ring_buffer_per_cpu *uninitialized_var(cpu_buffer); |
| 549 | DEFINE_WAIT(wait); | 547 | DEFINE_WAIT(wait); |
| 550 | struct rb_irq_work *work; | 548 | struct rb_irq_work *work; |
| 549 | int ret = 0; | ||
| 551 | 550 | ||
| 552 | /* | 551 | /* |
| 553 | * Depending on what the caller is waiting for, either any | 552 | * Depending on what the caller is waiting for, either any |
| @@ -564,36 +563,61 @@ int ring_buffer_wait(struct ring_buffer *buffer, int cpu) | |||
| 564 | } | 563 | } |
| 565 | 564 | ||
| 566 | 565 | ||
| 567 | prepare_to_wait(&work->waiters, &wait, TASK_INTERRUPTIBLE); | 566 | while (true) { |
| 567 | prepare_to_wait(&work->waiters, &wait, TASK_INTERRUPTIBLE); | ||
| 568 | 568 | ||
| 569 | /* | 569 | /* |
| 570 | * The events can happen in critical sections where | 570 | * The events can happen in critical sections where |
| 571 | * checking a work queue can cause deadlocks. | 571 | * checking a work queue can cause deadlocks. |
| 572 | * After adding a task to the queue, this flag is set | 572 | * After adding a task to the queue, this flag is set |
| 573 | * only to notify events to try to wake up the queue | 573 | * only to notify events to try to wake up the queue |
| 574 | * using irq_work. | 574 | * using irq_work. |
| 575 | * | 575 | * |
| 576 | * We don't clear it even if the buffer is no longer | 576 | * We don't clear it even if the buffer is no longer |
| 577 | * empty. The flag only causes the next event to run | 577 | * empty. The flag only causes the next event to run |
| 578 | * irq_work to do the work queue wake up. The worse | 578 | * irq_work to do the work queue wake up. The worse |
| 579 | * that can happen if we race with !trace_empty() is that | 579 | * that can happen if we race with !trace_empty() is that |
| 580 | * an event will cause an irq_work to try to wake up | 580 | * an event will cause an irq_work to try to wake up |
| 581 | * an empty queue. | 581 | * an empty queue. |
| 582 | * | 582 | * |
| 583 | * There's no reason to protect this flag either, as | 583 | * There's no reason to protect this flag either, as |
| 584 | * the work queue and irq_work logic will do the necessary | 584 | * the work queue and irq_work logic will do the necessary |
| 585 | * synchronization for the wake ups. The only thing | 585 | * synchronization for the wake ups. The only thing |
| 586 | * that is necessary is that the wake up happens after | 586 | * that is necessary is that the wake up happens after |
| 587 | * a task has been queued. It's OK for spurious wake ups. | 587 | * a task has been queued. It's OK for spurious wake ups. |
| 588 | */ | 588 | */ |
| 589 | work->waiters_pending = true; | 589 | work->waiters_pending = true; |
| 590 | |||
| 591 | if (signal_pending(current)) { | ||
| 592 | ret = -EINTR; | ||
| 593 | break; | ||
| 594 | } | ||
| 595 | |||
| 596 | if (cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) | ||
| 597 | break; | ||
| 598 | |||
| 599 | if (cpu != RING_BUFFER_ALL_CPUS && | ||
| 600 | !ring_buffer_empty_cpu(buffer, cpu)) { | ||
| 601 | unsigned long flags; | ||
| 602 | bool pagebusy; | ||
| 603 | |||
| 604 | if (!full) | ||
| 605 | break; | ||
| 606 | |||
| 607 | raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); | ||
| 608 | pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page; | ||
| 609 | raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); | ||
| 610 | |||
| 611 | if (!pagebusy) | ||
| 612 | break; | ||
| 613 | } | ||
| 590 | 614 | ||
| 591 | if ((cpu == RING_BUFFER_ALL_CPUS && ring_buffer_empty(buffer)) || | ||
| 592 | (cpu != RING_BUFFER_ALL_CPUS && ring_buffer_empty_cpu(buffer, cpu))) | ||
| 593 | schedule(); | 615 | schedule(); |
| 616 | } | ||
| 594 | 617 | ||
| 595 | finish_wait(&work->waiters, &wait); | 618 | finish_wait(&work->waiters, &wait); |
| 596 | return 0; | 619 | |
| 620 | return ret; | ||
| 597 | } | 621 | } |
| 598 | 622 | ||
| 599 | /** | 623 | /** |
