diff options
Diffstat (limited to 'kernel/trace/ring_buffer.c')
| -rw-r--r-- | kernel/trace/ring_buffer.c | 19 |
1 files changed, 17 insertions, 2 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index bb6922a931b1..76f34c0ef29c 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
| @@ -838,6 +838,7 @@ rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer) | |||
| 838 | * back to us). This allows us to do a simple loop to | 838 | * back to us). This allows us to do a simple loop to |
| 839 | * assign the commit to the tail. | 839 | * assign the commit to the tail. |
| 840 | */ | 840 | */ |
| 841 | again: | ||
| 841 | while (cpu_buffer->commit_page != cpu_buffer->tail_page) { | 842 | while (cpu_buffer->commit_page != cpu_buffer->tail_page) { |
| 842 | cpu_buffer->commit_page->page->commit = | 843 | cpu_buffer->commit_page->page->commit = |
| 843 | cpu_buffer->commit_page->write; | 844 | cpu_buffer->commit_page->write; |
| @@ -853,6 +854,17 @@ rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer) | |||
| 853 | cpu_buffer->commit_page->write; | 854 | cpu_buffer->commit_page->write; |
| 854 | barrier(); | 855 | barrier(); |
| 855 | } | 856 | } |
| 857 | |||
| 858 | /* again, keep gcc from optimizing */ | ||
| 859 | barrier(); | ||
| 860 | |||
| 861 | /* | ||
| 862 | * If an interrupt came in just after the first while loop | ||
| 863 | * and pushed the tail page forward, we will be left with | ||
| 864 | * a dangling commit that will never go forward. | ||
| 865 | */ | ||
| 866 | if (unlikely(cpu_buffer->commit_page != cpu_buffer->tail_page)) | ||
| 867 | goto again; | ||
| 856 | } | 868 | } |
| 857 | 869 | ||
| 858 | static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer) | 870 | static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer) |
| @@ -950,12 +962,15 @@ static struct ring_buffer_event * | |||
| 950 | __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, | 962 | __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, |
| 951 | unsigned type, unsigned long length, u64 *ts) | 963 | unsigned type, unsigned long length, u64 *ts) |
| 952 | { | 964 | { |
| 953 | struct buffer_page *tail_page, *head_page, *reader_page; | 965 | struct buffer_page *tail_page, *head_page, *reader_page, *commit_page; |
| 954 | unsigned long tail, write; | 966 | unsigned long tail, write; |
| 955 | struct ring_buffer *buffer = cpu_buffer->buffer; | 967 | struct ring_buffer *buffer = cpu_buffer->buffer; |
| 956 | struct ring_buffer_event *event; | 968 | struct ring_buffer_event *event; |
| 957 | unsigned long flags; | 969 | unsigned long flags; |
| 958 | 970 | ||
| 971 | commit_page = cpu_buffer->commit_page; | ||
| 972 | /* we just need to protect against interrupts */ | ||
| 973 | barrier(); | ||
| 959 | tail_page = cpu_buffer->tail_page; | 974 | tail_page = cpu_buffer->tail_page; |
| 960 | write = local_add_return(length, &tail_page->write); | 975 | write = local_add_return(length, &tail_page->write); |
| 961 | tail = write - length; | 976 | tail = write - length; |
| @@ -981,7 +996,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, | |||
| 981 | * it all the way around the buffer, bail, and warn | 996 | * it all the way around the buffer, bail, and warn |
| 982 | * about it. | 997 | * about it. |
| 983 | */ | 998 | */ |
| 984 | if (unlikely(next_page == cpu_buffer->commit_page)) { | 999 | if (unlikely(next_page == commit_page)) { |
| 985 | WARN_ON_ONCE(1); | 1000 | WARN_ON_ONCE(1); |
| 986 | goto out_unlock; | 1001 | goto out_unlock; |
| 987 | } | 1002 | } |
