aboutsummaryrefslogtreecommitdiffstats
path: root/Documentation/circular-buffers.txt
diff options
context:
space:
mode:
Diffstat (limited to 'Documentation/circular-buffers.txt')
-rw-r--r--Documentation/circular-buffers.txt45
1 files changed, 27 insertions, 18 deletions
diff --git a/Documentation/circular-buffers.txt b/Documentation/circular-buffers.txt
index 8117e5bf6065..88951b179262 100644
--- a/Documentation/circular-buffers.txt
+++ b/Documentation/circular-buffers.txt
@@ -160,6 +160,7 @@ The producer will look something like this:
160 spin_lock(&producer_lock); 160 spin_lock(&producer_lock);
161 161
162 unsigned long head = buffer->head; 162 unsigned long head = buffer->head;
163 /* The spin_unlock() and next spin_lock() provide needed ordering. */
163 unsigned long tail = ACCESS_ONCE(buffer->tail); 164 unsigned long tail = ACCESS_ONCE(buffer->tail);
164 165
165 if (CIRC_SPACE(head, tail, buffer->size) >= 1) { 166 if (CIRC_SPACE(head, tail, buffer->size) >= 1) {
@@ -168,9 +169,8 @@ The producer will look something like this:
168 169
169 produce_item(item); 170 produce_item(item);
170 171
171 smp_wmb(); /* commit the item before incrementing the head */ 172 smp_store_release(buffer->head,
172 173 (head + 1) & (buffer->size - 1));
173 buffer->head = (head + 1) & (buffer->size - 1);
174 174
175 /* wake_up() will make sure that the head is committed before 175 /* wake_up() will make sure that the head is committed before
176 * waking anyone up */ 176 * waking anyone up */
@@ -183,9 +183,14 @@ This will instruct the CPU that the contents of the new item must be written
183before the head index makes it available to the consumer and then instructs the 183before the head index makes it available to the consumer and then instructs the
184CPU that the revised head index must be written before the consumer is woken. 184CPU that the revised head index must be written before the consumer is woken.
185 185
186Note that wake_up() doesn't have to be the exact mechanism used, but whatever 186Note that wake_up() does not guarantee any sort of barrier unless something
187is used must guarantee a (write) memory barrier between the update of the head 187is actually awakened. We therefore cannot rely on it for ordering. However,
188index and the change of state of the consumer, if a change of state occurs. 188there is always one element of the array left empty. Therefore, the
189producer must produce two elements before it could possibly corrupt the
190element currently being read by the consumer. Therefore, the unlock-lock
191pair between consecutive invocations of the consumer provides the necessary
192ordering between the read of the index indicating that the consumer has
193vacated a given element and the write by the producer to that same element.
189 194
190 195
191THE CONSUMER 196THE CONSUMER
@@ -195,21 +200,20 @@ The consumer will look something like this:
195 200
196 spin_lock(&consumer_lock); 201 spin_lock(&consumer_lock);
197 202
198 unsigned long head = ACCESS_ONCE(buffer->head); 203 /* Read index before reading contents at that index. */
204 unsigned long head = smp_load_acquire(buffer->head);
199 unsigned long tail = buffer->tail; 205 unsigned long tail = buffer->tail;
200 206
201 if (CIRC_CNT(head, tail, buffer->size) >= 1) { 207 if (CIRC_CNT(head, tail, buffer->size) >= 1) {
202 /* read index before reading contents at that index */
203 smp_read_barrier_depends();
204 208
205 /* extract one item from the buffer */ 209 /* extract one item from the buffer */
206 struct item *item = buffer[tail]; 210 struct item *item = buffer[tail];
207 211
208 consume_item(item); 212 consume_item(item);
209 213
210 smp_mb(); /* finish reading descriptor before incrementing tail */ 214 /* Finish reading descriptor before incrementing tail. */
211 215 smp_store_release(buffer->tail,
212 buffer->tail = (tail + 1) & (buffer->size - 1); 216 (tail + 1) & (buffer->size - 1));
213 } 217 }
214 218
215 spin_unlock(&consumer_lock); 219 spin_unlock(&consumer_lock);
@@ -218,12 +222,17 @@ This will instruct the CPU to make sure the index is up to date before reading
218the new item, and then it shall make sure the CPU has finished reading the item 222the new item, and then it shall make sure the CPU has finished reading the item
219before it writes the new tail pointer, which will erase the item. 223before it writes the new tail pointer, which will erase the item.
220 224
221 225Note the use of ACCESS_ONCE() and smp_load_acquire() to read the
222Note the use of ACCESS_ONCE() in both algorithms to read the opposition index. 226opposition index. This prevents the compiler from discarding and
223This prevents the compiler from discarding and reloading its cached value - 227reloading its cached value - which some compilers will do across
224which some compilers will do across smp_read_barrier_depends(). This isn't 228smp_read_barrier_depends(). This isn't strictly needed if you can
225strictly needed if you can be sure that the opposition index will _only_ be 229be sure that the opposition index will _only_ be used the once.
226used the once. 230The smp_load_acquire() additionally forces the CPU to order against
231subsequent memory references. Similarly, smp_store_release() is used
232in both algorithms to write the thread's index. This documents the
233fact that we are writing to something that can be read concurrently,
234prevents the compiler from tearing the store, and enforces ordering
235against previous accesses.
227 236
228 237
229=============== 238===============