aboutsummaryrefslogtreecommitdiffstats
path: root/Documentation/RCU/listRCU.txt
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
commit1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch)
tree0bba044c4ce775e45a88a51686b5d9f90697ea9d /Documentation/RCU/listRCU.txt
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history, even though we have it. We can create a separate "historical" git archive of that later if we want to, and in the meantime it's about 3.2GB when imported into git - space that would just make the early git days unnecessarily complicated, when we don't have a lot of good infrastructure for it. Let it rip!
Diffstat (limited to 'Documentation/RCU/listRCU.txt')
-rw-r--r--Documentation/RCU/listRCU.txt307
1 files changed, 307 insertions, 0 deletions
diff --git a/Documentation/RCU/listRCU.txt b/Documentation/RCU/listRCU.txt
new file mode 100644
index 000000000000..bda6ead69bd0
--- /dev/null
+++ b/Documentation/RCU/listRCU.txt
@@ -0,0 +1,307 @@
1Using RCU to Protect Read-Mostly Linked Lists
2
3
4One of the best applications of RCU is to protect read-mostly linked lists
5("struct list_head" in list.h). One big advantage of this approach
6is that all of the required memory barriers are included for you in
7the list macros. This document describes several applications of RCU,
8with the best fits first.
9
10
11Example 1: Read-Side Action Taken Outside of Lock, No In-Place Updates
12
13The best applications are cases where, if reader-writer locking were
14used, the read-side lock would be dropped before taking any action
15based on the results of the search. The most celebrated example is
16the routing table. Because the routing table is tracking the state of
17equipment outside of the computer, it will at times contain stale data.
18Therefore, once the route has been computed, there is no need to hold
19the routing table static during transmission of the packet. After all,
20you can hold the routing table static all you want, but that won't keep
21the external Internet from changing, and it is the state of the external
22Internet that really matters. In addition, routing entries are typically
23added or deleted, rather than being modified in place.
24
25A straightforward example of this use of RCU may be found in the
26system-call auditing support. For example, a reader-writer locked
27implementation of audit_filter_task() might be as follows:
28
29 static enum audit_state audit_filter_task(struct task_struct *tsk)
30 {
31 struct audit_entry *e;
32 enum audit_state state;
33
34 read_lock(&auditsc_lock);
35 list_for_each_entry(e, &audit_tsklist, list) {
36 if (audit_filter_rules(tsk, &e->rule, NULL, &state)) {
37 read_unlock(&auditsc_lock);
38 return state;
39 }
40 }
41 read_unlock(&auditsc_lock);
42 return AUDIT_BUILD_CONTEXT;
43 }
44
45Here the list is searched under the lock, but the lock is dropped before
46the corresponding value is returned. By the time that this value is acted
47on, the list may well have been modified. This makes sense, since if
48you are turning auditing off, it is OK to audit a few extra system calls.
49
50This means that RCU can be easily applied to the read side, as follows:
51
52 static enum audit_state audit_filter_task(struct task_struct *tsk)
53 {
54 struct audit_entry *e;
55 enum audit_state state;
56
57 rcu_read_lock();
58 list_for_each_entry_rcu(e, &audit_tsklist, list) {
59 if (audit_filter_rules(tsk, &e->rule, NULL, &state)) {
60 rcu_read_unlock();
61 return state;
62 }
63 }
64 rcu_read_unlock();
65 return AUDIT_BUILD_CONTEXT;
66 }
67
68The read_lock() and read_unlock() calls have become rcu_read_lock()
69and rcu_read_unlock(), respectively, and the list_for_each_entry() has
70become list_for_each_entry_rcu(). The _rcu() list-traversal primitives
71insert the read-side memory barriers that are required on DEC Alpha CPUs.
72
73The changes to the update side are also straightforward. A reader-writer
74lock might be used as follows for deletion and insertion:
75
76 static inline int audit_del_rule(struct audit_rule *rule,
77 struct list_head *list)
78 {
79 struct audit_entry *e;
80
81 write_lock(&auditsc_lock);
82 list_for_each_entry(e, list, list) {
83 if (!audit_compare_rule(rule, &e->rule)) {
84 list_del(&e->list);
85 write_unlock(&auditsc_lock);
86 return 0;
87 }
88 }
89 write_unlock(&auditsc_lock);
90 return -EFAULT; /* No matching rule */
91 }
92
93 static inline int audit_add_rule(struct audit_entry *entry,
94 struct list_head *list)
95 {
96 write_lock(&auditsc_lock);
97 if (entry->rule.flags & AUDIT_PREPEND) {
98 entry->rule.flags &= ~AUDIT_PREPEND;
99 list_add(&entry->list, list);
100 } else {
101 list_add_tail(&entry->list, list);
102 }
103 write_unlock(&auditsc_lock);
104 return 0;
105 }
106
107Following are the RCU equivalents for these two functions:
108
109 static inline int audit_del_rule(struct audit_rule *rule,
110 struct list_head *list)
111 {
112 struct audit_entry *e;
113
114 /* Do not use the _rcu iterator here, since this is the only
115 * deletion routine. */
116 list_for_each_entry(e, list, list) {
117 if (!audit_compare_rule(rule, &e->rule)) {
118 list_del_rcu(&e->list);
119 call_rcu(&e->rcu, audit_free_rule, e);
120 return 0;
121 }
122 }
123 return -EFAULT; /* No matching rule */
124 }
125
126 static inline int audit_add_rule(struct audit_entry *entry,
127 struct list_head *list)
128 {
129 if (entry->rule.flags & AUDIT_PREPEND) {
130 entry->rule.flags &= ~AUDIT_PREPEND;
131 list_add_rcu(&entry->list, list);
132 } else {
133 list_add_tail_rcu(&entry->list, list);
134 }
135 return 0;
136 }
137
138Normally, the write_lock() and write_unlock() would be replaced by
139a spin_lock() and a spin_unlock(), but in this case, all callers hold
140audit_netlink_sem, so no additional locking is required. The auditsc_lock
141can therefore be eliminated, since use of RCU eliminates the need for
142writers to exclude readers.
143
144The list_del(), list_add(), and list_add_tail() primitives have been
145replaced by list_del_rcu(), list_add_rcu(), and list_add_tail_rcu().
146The _rcu() list-manipulation primitives add memory barriers that are
147needed on weakly ordered CPUs (most of them!).
148
149So, when readers can tolerate stale data and when entries are either added
150or deleted, without in-place modification, it is very easy to use RCU!
151
152
153Example 2: Handling In-Place Updates
154
155The system-call auditing code does not update auditing rules in place.
156However, if it did, reader-writer-locked code to do so might look as
157follows (presumably, the field_count is only permitted to decrease,
158otherwise, the added fields would need to be filled in):
159
160 static inline int audit_upd_rule(struct audit_rule *rule,
161 struct list_head *list,
162 __u32 newaction,
163 __u32 newfield_count)
164 {
165 struct audit_entry *e;
166 struct audit_newentry *ne;
167
168 write_lock(&auditsc_lock);
169 list_for_each_entry(e, list, list) {
170 if (!audit_compare_rule(rule, &e->rule)) {
171 e->rule.action = newaction;
172 e->rule.file_count = newfield_count;
173 write_unlock(&auditsc_lock);
174 return 0;
175 }
176 }
177 write_unlock(&auditsc_lock);
178 return -EFAULT; /* No matching rule */
179 }
180
181The RCU version creates a copy, updates the copy, then replaces the old
182entry with the newly updated entry. This sequence of actions, allowing
183concurrent reads while doing a copy to perform an update, is what gives
184RCU ("read-copy update") its name. The RCU code is as follows:
185
186 static inline int audit_upd_rule(struct audit_rule *rule,
187 struct list_head *list,
188 __u32 newaction,
189 __u32 newfield_count)
190 {
191 struct audit_entry *e;
192 struct audit_newentry *ne;
193
194 list_for_each_entry(e, list, list) {
195 if (!audit_compare_rule(rule, &e->rule)) {
196 ne = kmalloc(sizeof(*entry), GFP_ATOMIC);
197 if (ne == NULL)
198 return -ENOMEM;
199 audit_copy_rule(&ne->rule, &e->rule);
200 ne->rule.action = newaction;
201 ne->rule.file_count = newfield_count;
202 list_add_rcu(ne, e);
203 list_del(e);
204 call_rcu(&e->rcu, audit_free_rule, e);
205 return 0;
206 }
207 }
208 return -EFAULT; /* No matching rule */
209 }
210
211Again, this assumes that the caller holds audit_netlink_sem. Normally,
212the reader-writer lock would become a spinlock in this sort of code.
213
214
215Example 3: Eliminating Stale Data
216
217The auditing examples above tolerate stale data, as do most algorithms
218that are tracking external state. Because there is a delay from the
219time the external state changes before Linux becomes aware of the change,
220additional RCU-induced staleness is normally not a problem.
221
222However, there are many examples where stale data cannot be tolerated.
223One example in the Linux kernel is the System V IPC (see the ipc_lock()
224function in ipc/util.c). This code checks a "deleted" flag under a
225per-entry spinlock, and, if the "deleted" flag is set, pretends that the
226entry does not exist. For this to be helpful, the search function must
227return holding the per-entry spinlock, as ipc_lock() does in fact do.
228
229Quick Quiz: Why does the search function need to return holding the
230per-entry lock for this deleted-flag technique to be helpful?
231
232If the system-call audit module were to ever need to reject stale data,
233one way to accomplish this would be to add a "deleted" flag and a "lock"
234spinlock to the audit_entry structure, and modify audit_filter_task()
235as follows:
236
237 static enum audit_state audit_filter_task(struct task_struct *tsk)
238 {
239 struct audit_entry *e;
240 enum audit_state state;
241
242 rcu_read_lock();
243 list_for_each_entry_rcu(e, &audit_tsklist, list) {
244 if (audit_filter_rules(tsk, &e->rule, NULL, &state)) {
245 spin_lock(&e->lock);
246 if (e->deleted) {
247 spin_unlock(&e->lock);
248 rcu_read_unlock();
249 return AUDIT_BUILD_CONTEXT;
250 }
251 rcu_read_unlock();
252 return state;
253 }
254 }
255 rcu_read_unlock();
256 return AUDIT_BUILD_CONTEXT;
257 }
258
259Note that this example assumes that entries are only added and deleted.
260Additional mechanism is required to deal correctly with the
261update-in-place performed by audit_upd_rule(). For one thing,
262audit_upd_rule() would need additional memory barriers to ensure
263that the list_add_rcu() was really executed before the list_del_rcu().
264
265The audit_del_rule() function would need to set the "deleted"
266flag under the spinlock as follows:
267
268 static inline int audit_del_rule(struct audit_rule *rule,
269 struct list_head *list)
270 {
271 struct audit_entry *e;
272
273 /* Do not use the _rcu iterator here, since this is the only
274 * deletion routine. */
275 list_for_each_entry(e, list, list) {
276 if (!audit_compare_rule(rule, &e->rule)) {
277 spin_lock(&e->lock);
278 list_del_rcu(&e->list);
279 e->deleted = 1;
280 spin_unlock(&e->lock);
281 call_rcu(&e->rcu, audit_free_rule, e);
282 return 0;
283 }
284 }
285 return -EFAULT; /* No matching rule */
286 }
287
288
289Summary
290
291Read-mostly list-based data structures that can tolerate stale data are
292the most amenable to use of RCU. The simplest case is where entries are
293either added or deleted from the data structure (or atomically modified
294in place), but non-atomic in-place modifications can be handled by making
295a copy, updating the copy, then replacing the original with the copy.
296If stale data cannot be tolerated, then a "deleted" flag may be used
297in conjunction with a per-entry spinlock in order to allow the search
298function to reject newly deleted data.
299
300
301Answer to Quick Quiz
302
303If the search function drops the per-entry lock before returning, then
304the caller will be processing stale data in any case. If it is really
305OK to be processing stale data, then you don't need a "deleted" flag.
306If processing stale data really is a problem, then you need to hold the
307per-entry lock across all of the code that uses the value looked up.