diff options
Diffstat (limited to 'Documentation/atomic_ops.txt')
-rw-r--r-- | Documentation/atomic_ops.txt | 55 |
1 files changed, 50 insertions, 5 deletions
diff --git a/Documentation/atomic_ops.txt b/Documentation/atomic_ops.txt index 938f99957052..d46306fea230 100644 --- a/Documentation/atomic_ops.txt +++ b/Documentation/atomic_ops.txt | |||
@@ -14,12 +14,15 @@ suffice: | |||
14 | 14 | ||
15 | typedef struct { volatile int counter; } atomic_t; | 15 | typedef struct { volatile int counter; } atomic_t; |
16 | 16 | ||
17 | Historically, counter has been declared volatile. This is now discouraged. | ||
18 | See Documentation/volatile-considered-harmful.txt for the complete rationale. | ||
19 | |||
17 | local_t is very similar to atomic_t. If the counter is per CPU and only | 20 | local_t is very similar to atomic_t. If the counter is per CPU and only |
18 | updated by one CPU, local_t is probably more appropriate. Please see | 21 | updated by one CPU, local_t is probably more appropriate. Please see |
19 | Documentation/local_ops.txt for the semantics of local_t. | 22 | Documentation/local_ops.txt for the semantics of local_t. |
20 | 23 | ||
21 | The first operations to implement for atomic_t's are the | 24 | The first operations to implement for atomic_t's are the initializers and |
22 | initializers and plain reads. | 25 | plain reads. |
23 | 26 | ||
24 | #define ATOMIC_INIT(i) { (i) } | 27 | #define ATOMIC_INIT(i) { (i) } |
25 | #define atomic_set(v, i) ((v)->counter = (i)) | 28 | #define atomic_set(v, i) ((v)->counter = (i)) |
@@ -28,6 +31,12 @@ The first macro is used in definitions, such as: | |||
28 | 31 | ||
29 | static atomic_t my_counter = ATOMIC_INIT(1); | 32 | static atomic_t my_counter = ATOMIC_INIT(1); |
30 | 33 | ||
34 | The initializer is atomic in that the return values of the atomic operations | ||
35 | are guaranteed to be correct reflecting the initialized value if the | ||
36 | initializer is used before runtime. If the initializer is used at runtime, a | ||
37 | proper implicit or explicit read memory barrier is needed before reading the | ||
38 | value with atomic_read from another thread. | ||
39 | |||
31 | The second interface can be used at runtime, as in: | 40 | The second interface can be used at runtime, as in: |
32 | 41 | ||
33 | struct foo { atomic_t counter; }; | 42 | struct foo { atomic_t counter; }; |
@@ -40,13 +49,43 @@ The second interface can be used at runtime, as in: | |||
40 | return -ENOMEM; | 49 | return -ENOMEM; |
41 | atomic_set(&k->counter, 0); | 50 | atomic_set(&k->counter, 0); |
42 | 51 | ||
52 | The setting is atomic in that the return values of the atomic operations by | ||
53 | all threads are guaranteed to be correct reflecting either the value that has | ||
54 | been set with this operation or set with another operation. A proper implicit | ||
55 | or explicit memory barrier is needed before the value set with the operation | ||
56 | is guaranteed to be readable with atomic_read from another thread. | ||
57 | |||
43 | Next, we have: | 58 | Next, we have: |
44 | 59 | ||
45 | #define atomic_read(v) ((v)->counter) | 60 | #define atomic_read(v) ((v)->counter) |
46 | 61 | ||
47 | which simply reads the current value of the counter. | 62 | which simply reads the counter value currently visible to the calling thread. |
48 | 63 | The read is atomic in that the return value is guaranteed to be one of the | |
49 | Now, we move onto the actual atomic operation interfaces. | 64 | values initialized or modified with the interface operations if a proper |
65 | implicit or explicit memory barrier is used after possible runtime | ||
66 | initialization by any other thread and the value is modified only with the | ||
67 | interface operations. atomic_read does not guarantee that the runtime | ||
68 | initialization by any other thread is visible yet, so the user of the | ||
69 | interface must take care of that with a proper implicit or explicit memory | ||
70 | barrier. | ||
71 | |||
72 | *** WARNING: atomic_read() and atomic_set() DO NOT IMPLY BARRIERS! *** | ||
73 | |||
74 | Some architectures may choose to use the volatile keyword, barriers, or inline | ||
75 | assembly to guarantee some degree of immediacy for atomic_read() and | ||
76 | atomic_set(). This is not uniformly guaranteed, and may change in the future, | ||
77 | so all users of atomic_t should treat atomic_read() and atomic_set() as simple | ||
78 | C statements that may be reordered or optimized away entirely by the compiler | ||
79 | or processor, and explicitly invoke the appropriate compiler and/or memory | ||
80 | barrier for each use case. Failure to do so will result in code that may | ||
81 | suddenly break when used with different architectures or compiler | ||
82 | optimizations, or even changes in unrelated code which changes how the | ||
83 | compiler optimizes the section accessing atomic_t variables. | ||
84 | |||
85 | *** YOU HAVE BEEN WARNED! *** | ||
86 | |||
87 | Now, we move onto the atomic operation interfaces typically implemented with | ||
88 | the help of assembly code. | ||
50 | 89 | ||
51 | void atomic_add(int i, atomic_t *v); | 90 | void atomic_add(int i, atomic_t *v); |
52 | void atomic_sub(int i, atomic_t *v); | 91 | void atomic_sub(int i, atomic_t *v); |
@@ -121,6 +160,12 @@ operation. | |||
121 | 160 | ||
122 | Then: | 161 | Then: |
123 | 162 | ||
163 | int atomic_xchg(atomic_t *v, int new); | ||
164 | |||
165 | This performs an atomic exchange operation on the atomic variable v, setting | ||
166 | the given new value. It returns the old value that the atomic variable v had | ||
167 | just before the operation. | ||
168 | |||
124 | int atomic_cmpxchg(atomic_t *v, int old, int new); | 169 | int atomic_cmpxchg(atomic_t *v, int old, int new); |
125 | 170 | ||
126 | This performs an atomic compare exchange operation on the atomic value v, | 171 | This performs an atomic compare exchange operation on the atomic value v, |