aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--kernel/events/ring_buffer.c24
1 files changed, 20 insertions, 4 deletions
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
index 674b35383491..009467a60578 100644
--- a/kernel/events/ring_buffer.c
+++ b/kernel/events/ring_buffer.c
@@ -51,11 +51,18 @@ again:
51 head = local_read(&rb->head); 51 head = local_read(&rb->head);
52 52
53 /* 53 /*
54 * IRQ/NMI can happen here, which means we can miss a head update. 54 * IRQ/NMI can happen here and advance @rb->head, causing our
55 * load above to be stale.
55 */ 56 */
56 57
57 if (!local_dec_and_test(&rb->nest)) 58 /*
59 * If this isn't the outermost nesting, we don't have to update
60 * @rb->user_page->data_head.
61 */
62 if (local_read(&rb->nest) > 1) {
63 local_dec(&rb->nest);
58 goto out; 64 goto out;
65 }
59 66
60 /* 67 /*
61 * Since the mmap() consumer (userspace) can run on a different CPU: 68 * Since the mmap() consumer (userspace) can run on a different CPU:
@@ -87,9 +94,18 @@ again:
87 rb->user_page->data_head = head; 94 rb->user_page->data_head = head;
88 95
89 /* 96 /*
90 * Now check if we missed an update -- rely on previous implied 97 * We must publish the head before decrementing the nest count,
91 * compiler barriers to force a re-read. 98 * otherwise an IRQ/NMI can publish a more recent head value and our
99 * write will (temporarily) publish a stale value.
100 */
101 barrier();
102 local_set(&rb->nest, 0);
103
104 /*
105 * Ensure we decrement @rb->nest before we validate the @rb->head.
106 * Otherwise we cannot be sure we caught the 'last' nested update.
92 */ 107 */
108 barrier();
93 if (unlikely(head != local_read(&rb->head))) { 109 if (unlikely(head != local_read(&rb->head))) {
94 local_inc(&rb->nest); 110 local_inc(&rb->nest);
95 goto again; 111 goto again;