diff options
-rw-r--r-- | Documentation/atomic_ops.txt | 28 |
1 files changed, 14 insertions, 14 deletions
diff --git a/Documentation/atomic_ops.txt b/Documentation/atomic_ops.txt index 23a1c2402bcc..2a63d5662a93 100644 --- a/Documentation/atomic_ops.txt +++ b/Documentation/atomic_ops.txt | |||
@@ -157,13 +157,13 @@ For example, smp_mb__before_atomic_dec() can be used like so: | |||
157 | smp_mb__before_atomic_dec(); | 157 | smp_mb__before_atomic_dec(); |
158 | atomic_dec(&obj->ref_count); | 158 | atomic_dec(&obj->ref_count); |
159 | 159 | ||
160 | It makes sure that all memory operations preceeding the atomic_dec() | 160 | It makes sure that all memory operations preceding the atomic_dec() |
161 | call are strongly ordered with respect to the atomic counter | 161 | call are strongly ordered with respect to the atomic counter |
162 | operation. In the above example, it guarentees that the assignment of | 162 | operation. In the above example, it guarantees that the assignment of |
163 | "1" to obj->dead will be globally visible to other cpus before the | 163 | "1" to obj->dead will be globally visible to other cpus before the |
164 | atomic counter decrement. | 164 | atomic counter decrement. |
165 | 165 | ||
166 | Without the explicitl smp_mb__before_atomic_dec() call, the | 166 | Without the explicit smp_mb__before_atomic_dec() call, the |
167 | implementation could legally allow the atomic counter update visible | 167 | implementation could legally allow the atomic counter update visible |
168 | to other cpus before the "obj->dead = 1;" assignment. | 168 | to other cpus before the "obj->dead = 1;" assignment. |
169 | 169 | ||
@@ -173,11 +173,11 @@ ordering with respect to memory operations after an atomic_dec() call | |||
173 | (smp_mb__{before,after}_atomic_inc()). | 173 | (smp_mb__{before,after}_atomic_inc()). |
174 | 174 | ||
175 | A missing memory barrier in the cases where they are required by the | 175 | A missing memory barrier in the cases where they are required by the |
176 | atomic_t implementation above can have disasterous results. Here is | 176 | atomic_t implementation above can have disastrous results. Here is |
177 | an example, which follows a pattern occuring frequently in the Linux | 177 | an example, which follows a pattern occurring frequently in the Linux |
178 | kernel. It is the use of atomic counters to implement reference | 178 | kernel. It is the use of atomic counters to implement reference |
179 | counting, and it works such that once the counter falls to zero it can | 179 | counting, and it works such that once the counter falls to zero it can |
180 | be guarenteed that no other entity can be accessing the object: | 180 | be guaranteed that no other entity can be accessing the object: |
181 | 181 | ||
182 | static void obj_list_add(struct obj *obj) | 182 | static void obj_list_add(struct obj *obj) |
183 | { | 183 | { |
@@ -291,9 +291,9 @@ to the size of an "unsigned long" C data type, and are least of that | |||
291 | size. The endianness of the bits within each "unsigned long" are the | 291 | size. The endianness of the bits within each "unsigned long" are the |
292 | native endianness of the cpu. | 292 | native endianness of the cpu. |
293 | 293 | ||
294 | void set_bit(unsigned long nr, volatils unsigned long *addr); | 294 | void set_bit(unsigned long nr, volatile unsigned long *addr); |
295 | void clear_bit(unsigned long nr, volatils unsigned long *addr); | 295 | void clear_bit(unsigned long nr, volatile unsigned long *addr); |
296 | void change_bit(unsigned long nr, volatils unsigned long *addr); | 296 | void change_bit(unsigned long nr, volatile unsigned long *addr); |
297 | 297 | ||
298 | These routines set, clear, and change, respectively, the bit number | 298 | These routines set, clear, and change, respectively, the bit number |
299 | indicated by "nr" on the bit mask pointed to by "ADDR". | 299 | indicated by "nr" on the bit mask pointed to by "ADDR". |
@@ -301,9 +301,9 @@ indicated by "nr" on the bit mask pointed to by "ADDR". | |||
301 | They must execute atomically, yet there are no implicit memory barrier | 301 | They must execute atomically, yet there are no implicit memory barrier |
302 | semantics required of these interfaces. | 302 | semantics required of these interfaces. |
303 | 303 | ||
304 | int test_and_set_bit(unsigned long nr, volatils unsigned long *addr); | 304 | int test_and_set_bit(unsigned long nr, volatile unsigned long *addr); |
305 | int test_and_clear_bit(unsigned long nr, volatils unsigned long *addr); | 305 | int test_and_clear_bit(unsigned long nr, volatile unsigned long *addr); |
306 | int test_and_change_bit(unsigned long nr, volatils unsigned long *addr); | 306 | int test_and_change_bit(unsigned long nr, volatile unsigned long *addr); |
307 | 307 | ||
308 | Like the above, except that these routines return a boolean which | 308 | Like the above, except that these routines return a boolean which |
309 | indicates whether the changed bit was set _BEFORE_ the atomic bit | 309 | indicates whether the changed bit was set _BEFORE_ the atomic bit |
@@ -335,7 +335,7 @@ subsequent memory operation is made visible. For example: | |||
335 | /* ... */; | 335 | /* ... */; |
336 | obj->killed = 1; | 336 | obj->killed = 1; |
337 | 337 | ||
338 | The implementation of test_and_set_bit() must guarentee that | 338 | The implementation of test_and_set_bit() must guarantee that |
339 | "obj->dead = 1;" is visible to cpus before the atomic memory operation | 339 | "obj->dead = 1;" is visible to cpus before the atomic memory operation |
340 | done by test_and_set_bit() becomes visible. Likewise, the atomic | 340 | done by test_and_set_bit() becomes visible. Likewise, the atomic |
341 | memory operation done by test_and_set_bit() must become visible before | 341 | memory operation done by test_and_set_bit() must become visible before |
@@ -474,7 +474,7 @@ Now, as far as memory barriers go, as long as spin_lock() | |||
474 | strictly orders all subsequent memory operations (including | 474 | strictly orders all subsequent memory operations (including |
475 | the cas()) with respect to itself, things will be fine. | 475 | the cas()) with respect to itself, things will be fine. |
476 | 476 | ||
477 | Said another way, _atomic_dec_and_lock() must guarentee that | 477 | Said another way, _atomic_dec_and_lock() must guarantee that |
478 | a counter dropping to zero is never made visible before the | 478 | a counter dropping to zero is never made visible before the |
479 | spinlock being acquired. | 479 | spinlock being acquired. |
480 | 480 | ||