diff options
Diffstat (limited to 'Documentation/DocBook/kernel-locking.tmpl')
-rw-r--r-- | Documentation/DocBook/kernel-locking.tmpl | 2088 |
1 files changed, 2088 insertions, 0 deletions
diff --git a/Documentation/DocBook/kernel-locking.tmpl b/Documentation/DocBook/kernel-locking.tmpl new file mode 100644 index 000000000000..90dc2de8e0af --- /dev/null +++ b/Documentation/DocBook/kernel-locking.tmpl | |||
@@ -0,0 +1,2088 @@ | |||
1 | <?xml version="1.0" encoding="UTF-8"?> | ||
2 | <!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.1.2//EN" | ||
3 | "http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd" []> | ||
4 | |||
5 | <book id="LKLockingGuide"> | ||
6 | <bookinfo> | ||
7 | <title>Unreliable Guide To Locking</title> | ||
8 | |||
9 | <authorgroup> | ||
10 | <author> | ||
11 | <firstname>Rusty</firstname> | ||
12 | <surname>Russell</surname> | ||
13 | <affiliation> | ||
14 | <address> | ||
15 | <email>rusty@rustcorp.com.au</email> | ||
16 | </address> | ||
17 | </affiliation> | ||
18 | </author> | ||
19 | </authorgroup> | ||
20 | |||
21 | <copyright> | ||
22 | <year>2003</year> | ||
23 | <holder>Rusty Russell</holder> | ||
24 | </copyright> | ||
25 | |||
26 | <legalnotice> | ||
27 | <para> | ||
28 | This documentation is free software; you can redistribute | ||
29 | it and/or modify it under the terms of the GNU General Public | ||
30 | License as published by the Free Software Foundation; either | ||
31 | version 2 of the License, or (at your option) any later | ||
32 | version. | ||
33 | </para> | ||
34 | |||
35 | <para> | ||
36 | This program is distributed in the hope that it will be | ||
37 | useful, but WITHOUT ANY WARRANTY; without even the implied | ||
38 | warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. | ||
39 | See the GNU General Public License for more details. | ||
40 | </para> | ||
41 | |||
42 | <para> | ||
43 | You should have received a copy of the GNU General Public | ||
44 | License along with this program; if not, write to the Free | ||
45 | Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, | ||
46 | MA 02111-1307 USA | ||
47 | </para> | ||
48 | |||
49 | <para> | ||
50 | For more details see the file COPYING in the source | ||
51 | distribution of Linux. | ||
52 | </para> | ||
53 | </legalnotice> | ||
54 | </bookinfo> | ||
55 | |||
56 | <toc></toc> | ||
57 | <chapter id="intro"> | ||
58 | <title>Introduction</title> | ||
59 | <para> | ||
60 | Welcome, to Rusty's Remarkably Unreliable Guide to Kernel | ||
61 | Locking issues. This document describes the locking systems in | ||
62 | the Linux Kernel in 2.6. | ||
63 | </para> | ||
64 | <para> | ||
65 | With the wide availability of HyperThreading, and <firstterm | ||
66 | linkend="gloss-preemption">preemption </firstterm> in the Linux | ||
67 | Kernel, everyone hacking on the kernel needs to know the | ||
68 | fundamentals of concurrency and locking for | ||
69 | <firstterm linkend="gloss-smp"><acronym>SMP</acronym></firstterm>. | ||
70 | </para> | ||
71 | </chapter> | ||
72 | |||
73 | <chapter id="races"> | ||
74 | <title>The Problem With Concurrency</title> | ||
75 | <para> | ||
76 | (Skip this if you know what a Race Condition is). | ||
77 | </para> | ||
78 | <para> | ||
79 | In a normal program, you can increment a counter like so: | ||
80 | </para> | ||
81 | <programlisting> | ||
82 | very_important_count++; | ||
83 | </programlisting> | ||
84 | |||
85 | <para> | ||
86 | This is what they would expect to happen: | ||
87 | </para> | ||
88 | |||
89 | <table> | ||
90 | <title>Expected Results</title> | ||
91 | |||
92 | <tgroup cols="2" align="left"> | ||
93 | |||
94 | <thead> | ||
95 | <row> | ||
96 | <entry>Instance 1</entry> | ||
97 | <entry>Instance 2</entry> | ||
98 | </row> | ||
99 | </thead> | ||
100 | |||
101 | <tbody> | ||
102 | <row> | ||
103 | <entry>read very_important_count (5)</entry> | ||
104 | <entry></entry> | ||
105 | </row> | ||
106 | <row> | ||
107 | <entry>add 1 (6)</entry> | ||
108 | <entry></entry> | ||
109 | </row> | ||
110 | <row> | ||
111 | <entry>write very_important_count (6)</entry> | ||
112 | <entry></entry> | ||
113 | </row> | ||
114 | <row> | ||
115 | <entry></entry> | ||
116 | <entry>read very_important_count (6)</entry> | ||
117 | </row> | ||
118 | <row> | ||
119 | <entry></entry> | ||
120 | <entry>add 1 (7)</entry> | ||
121 | </row> | ||
122 | <row> | ||
123 | <entry></entry> | ||
124 | <entry>write very_important_count (7)</entry> | ||
125 | </row> | ||
126 | </tbody> | ||
127 | |||
128 | </tgroup> | ||
129 | </table> | ||
130 | |||
131 | <para> | ||
132 | This is what might happen: | ||
133 | </para> | ||
134 | |||
135 | <table> | ||
136 | <title>Possible Results</title> | ||
137 | |||
138 | <tgroup cols="2" align="left"> | ||
139 | <thead> | ||
140 | <row> | ||
141 | <entry>Instance 1</entry> | ||
142 | <entry>Instance 2</entry> | ||
143 | </row> | ||
144 | </thead> | ||
145 | |||
146 | <tbody> | ||
147 | <row> | ||
148 | <entry>read very_important_count (5)</entry> | ||
149 | <entry></entry> | ||
150 | </row> | ||
151 | <row> | ||
152 | <entry></entry> | ||
153 | <entry>read very_important_count (5)</entry> | ||
154 | </row> | ||
155 | <row> | ||
156 | <entry>add 1 (6)</entry> | ||
157 | <entry></entry> | ||
158 | </row> | ||
159 | <row> | ||
160 | <entry></entry> | ||
161 | <entry>add 1 (6)</entry> | ||
162 | </row> | ||
163 | <row> | ||
164 | <entry>write very_important_count (6)</entry> | ||
165 | <entry></entry> | ||
166 | </row> | ||
167 | <row> | ||
168 | <entry></entry> | ||
169 | <entry>write very_important_count (6)</entry> | ||
170 | </row> | ||
171 | </tbody> | ||
172 | </tgroup> | ||
173 | </table> | ||
174 | |||
175 | <sect1 id="race-condition"> | ||
176 | <title>Race Conditions and Critical Regions</title> | ||
177 | <para> | ||
178 | This overlap, where the result depends on the | ||
179 | relative timing of multiple tasks, is called a <firstterm>race condition</firstterm>. | ||
180 | The piece of code containing the concurrency issue is called a | ||
181 | <firstterm>critical region</firstterm>. And especially since Linux starting running | ||
182 | on SMP machines, they became one of the major issues in kernel | ||
183 | design and implementation. | ||
184 | </para> | ||
185 | <para> | ||
186 | Preemption can have the same effect, even if there is only one | ||
187 | CPU: by preempting one task during the critical region, we have | ||
188 | exactly the same race condition. In this case the thread which | ||
189 | preempts might run the critical region itself. | ||
190 | </para> | ||
191 | <para> | ||
192 | The solution is to recognize when these simultaneous accesses | ||
193 | occur, and use locks to make sure that only one instance can | ||
194 | enter the critical region at any time. There are many | ||
195 | friendly primitives in the Linux kernel to help you do this. | ||
196 | And then there are the unfriendly primitives, but I'll pretend | ||
197 | they don't exist. | ||
198 | </para> | ||
199 | </sect1> | ||
200 | </chapter> | ||
201 | |||
202 | <chapter id="locks"> | ||
203 | <title>Locking in the Linux Kernel</title> | ||
204 | |||
205 | <para> | ||
206 | If I could give you one piece of advice: never sleep with anyone | ||
207 | crazier than yourself. But if I had to give you advice on | ||
208 | locking: <emphasis>keep it simple</emphasis>. | ||
209 | </para> | ||
210 | |||
211 | <para> | ||
212 | Be reluctant to introduce new locks. | ||
213 | </para> | ||
214 | |||
215 | <para> | ||
216 | Strangely enough, this last one is the exact reverse of my advice when | ||
217 | you <emphasis>have</emphasis> slept with someone crazier than yourself. | ||
218 | And you should think about getting a big dog. | ||
219 | </para> | ||
220 | |||
221 | <sect1 id="lock-intro"> | ||
222 | <title>Two Main Types of Kernel Locks: Spinlocks and Semaphores</title> | ||
223 | |||
224 | <para> | ||
225 | There are two main types of kernel locks. The fundamental type | ||
226 | is the spinlock | ||
227 | (<filename class="headerfile">include/asm/spinlock.h</filename>), | ||
228 | which is a very simple single-holder lock: if you can't get the | ||
229 | spinlock, you keep trying (spinning) until you can. Spinlocks are | ||
230 | very small and fast, and can be used anywhere. | ||
231 | </para> | ||
232 | <para> | ||
233 | The second type is a semaphore | ||
234 | (<filename class="headerfile">include/asm/semaphore.h</filename>): it | ||
235 | can have more than one holder at any time (the number decided at | ||
236 | initialization time), although it is most commonly used as a | ||
237 | single-holder lock (a mutex). If you can't get a semaphore, | ||
238 | your task will put itself on the queue, and be woken up when the | ||
239 | semaphore is released. This means the CPU will do something | ||
240 | else while you are waiting, but there are many cases when you | ||
241 | simply can't sleep (see <xref linkend="sleeping-things"/>), and so | ||
242 | have to use a spinlock instead. | ||
243 | </para> | ||
244 | <para> | ||
245 | Neither type of lock is recursive: see | ||
246 | <xref linkend="deadlock"/>. | ||
247 | </para> | ||
248 | </sect1> | ||
249 | |||
250 | <sect1 id="uniprocessor"> | ||
251 | <title>Locks and Uniprocessor Kernels</title> | ||
252 | |||
253 | <para> | ||
254 | For kernels compiled without <symbol>CONFIG_SMP</symbol>, and | ||
255 | without <symbol>CONFIG_PREEMPT</symbol> spinlocks do not exist at | ||
256 | all. This is an excellent design decision: when no-one else can | ||
257 | run at the same time, there is no reason to have a lock. | ||
258 | </para> | ||
259 | |||
260 | <para> | ||
261 | If the kernel is compiled without <symbol>CONFIG_SMP</symbol>, | ||
262 | but <symbol>CONFIG_PREEMPT</symbol> is set, then spinlocks | ||
263 | simply disable preemption, which is sufficient to prevent any | ||
264 | races. For most purposes, we can think of preemption as | ||
265 | equivalent to SMP, and not worry about it separately. | ||
266 | </para> | ||
267 | |||
268 | <para> | ||
269 | You should always test your locking code with <symbol>CONFIG_SMP</symbol> | ||
270 | and <symbol>CONFIG_PREEMPT</symbol> enabled, even if you don't have an SMP test box, because it | ||
271 | will still catch some kinds of locking bugs. | ||
272 | </para> | ||
273 | |||
274 | <para> | ||
275 | Semaphores still exist, because they are required for | ||
276 | synchronization between <firstterm linkend="gloss-usercontext">user | ||
277 | contexts</firstterm>, as we will see below. | ||
278 | </para> | ||
279 | </sect1> | ||
280 | |||
281 | <sect1 id="usercontextlocking"> | ||
282 | <title>Locking Only In User Context</title> | ||
283 | |||
284 | <para> | ||
285 | If you have a data structure which is only ever accessed from | ||
286 | user context, then you can use a simple semaphore | ||
287 | (<filename>linux/asm/semaphore.h</filename>) to protect it. This | ||
288 | is the most trivial case: you initialize the semaphore to the number | ||
289 | of resources available (usually 1), and call | ||
290 | <function>down_interruptible()</function> to grab the semaphore, and | ||
291 | <function>up()</function> to release it. There is also a | ||
292 | <function>down()</function>, which should be avoided, because it | ||
293 | will not return if a signal is received. | ||
294 | </para> | ||
295 | |||
296 | <para> | ||
297 | Example: <filename>linux/net/core/netfilter.c</filename> allows | ||
298 | registration of new <function>setsockopt()</function> and | ||
299 | <function>getsockopt()</function> calls, with | ||
300 | <function>nf_register_sockopt()</function>. Registration and | ||
301 | de-registration are only done on module load and unload (and boot | ||
302 | time, where there is no concurrency), and the list of registrations | ||
303 | is only consulted for an unknown <function>setsockopt()</function> | ||
304 | or <function>getsockopt()</function> system call. The | ||
305 | <varname>nf_sockopt_mutex</varname> is perfect to protect this, | ||
306 | especially since the setsockopt and getsockopt calls may well | ||
307 | sleep. | ||
308 | </para> | ||
309 | </sect1> | ||
310 | |||
311 | <sect1 id="lock-user-bh"> | ||
312 | <title>Locking Between User Context and Softirqs</title> | ||
313 | |||
314 | <para> | ||
315 | If a <firstterm linkend="gloss-softirq">softirq</firstterm> shares | ||
316 | data with user context, you have two problems. Firstly, the current | ||
317 | user context can be interrupted by a softirq, and secondly, the | ||
318 | critical region could be entered from another CPU. This is where | ||
319 | <function>spin_lock_bh()</function> | ||
320 | (<filename class="headerfile">include/linux/spinlock.h</filename>) is | ||
321 | used. It disables softirqs on that CPU, then grabs the lock. | ||
322 | <function>spin_unlock_bh()</function> does the reverse. (The | ||
323 | '_bh' suffix is a historical reference to "Bottom Halves", the | ||
324 | old name for software interrupts. It should really be | ||
325 | called spin_lock_softirq()' in a perfect world). | ||
326 | </para> | ||
327 | |||
328 | <para> | ||
329 | Note that you can also use <function>spin_lock_irq()</function> | ||
330 | or <function>spin_lock_irqsave()</function> here, which stop | ||
331 | hardware interrupts as well: see <xref linkend="hardirq-context"/>. | ||
332 | </para> | ||
333 | |||
334 | <para> | ||
335 | This works perfectly for <firstterm linkend="gloss-up"><acronym>UP | ||
336 | </acronym></firstterm> as well: the spin lock vanishes, and this macro | ||
337 | simply becomes <function>local_bh_disable()</function> | ||
338 | (<filename class="headerfile">include/linux/interrupt.h</filename>), which | ||
339 | protects you from the softirq being run. | ||
340 | </para> | ||
341 | </sect1> | ||
342 | |||
343 | <sect1 id="lock-user-tasklet"> | ||
344 | <title>Locking Between User Context and Tasklets</title> | ||
345 | |||
346 | <para> | ||
347 | This is exactly the same as above, because <firstterm | ||
348 | linkend="gloss-tasklet">tasklets</firstterm> are actually run | ||
349 | from a softirq. | ||
350 | </para> | ||
351 | </sect1> | ||
352 | |||
353 | <sect1 id="lock-user-timers"> | ||
354 | <title>Locking Between User Context and Timers</title> | ||
355 | |||
356 | <para> | ||
357 | This, too, is exactly the same as above, because <firstterm | ||
358 | linkend="gloss-timers">timers</firstterm> are actually run from | ||
359 | a softirq. From a locking point of view, tasklets and timers | ||
360 | are identical. | ||
361 | </para> | ||
362 | </sect1> | ||
363 | |||
364 | <sect1 id="lock-tasklets"> | ||
365 | <title>Locking Between Tasklets/Timers</title> | ||
366 | |||
367 | <para> | ||
368 | Sometimes a tasklet or timer might want to share data with | ||
369 | another tasklet or timer. | ||
370 | </para> | ||
371 | |||
372 | <sect2 id="lock-tasklets-same"> | ||
373 | <title>The Same Tasklet/Timer</title> | ||
374 | <para> | ||
375 | Since a tasklet is never run on two CPUs at once, you don't | ||
376 | need to worry about your tasklet being reentrant (running | ||
377 | twice at once), even on SMP. | ||
378 | </para> | ||
379 | </sect2> | ||
380 | |||
381 | <sect2 id="lock-tasklets-different"> | ||
382 | <title>Different Tasklets/Timers</title> | ||
383 | <para> | ||
384 | If another tasklet/timer wants | ||
385 | to share data with your tasklet or timer , you will both need to use | ||
386 | <function>spin_lock()</function> and | ||
387 | <function>spin_unlock()</function> calls. | ||
388 | <function>spin_lock_bh()</function> is | ||
389 | unnecessary here, as you are already in a tasklet, and | ||
390 | none will be run on the same CPU. | ||
391 | </para> | ||
392 | </sect2> | ||
393 | </sect1> | ||
394 | |||
395 | <sect1 id="lock-softirqs"> | ||
396 | <title>Locking Between Softirqs</title> | ||
397 | |||
398 | <para> | ||
399 | Often a softirq might | ||
400 | want to share data with itself or a tasklet/timer. | ||
401 | </para> | ||
402 | |||
403 | <sect2 id="lock-softirqs-same"> | ||
404 | <title>The Same Softirq</title> | ||
405 | |||
406 | <para> | ||
407 | The same softirq can run on the other CPUs: you can use a | ||
408 | per-CPU array (see <xref linkend="per-cpu"/>) for better | ||
409 | performance. If you're going so far as to use a softirq, | ||
410 | you probably care about scalable performance enough | ||
411 | to justify the extra complexity. | ||
412 | </para> | ||
413 | |||
414 | <para> | ||
415 | You'll need to use <function>spin_lock()</function> and | ||
416 | <function>spin_unlock()</function> for shared data. | ||
417 | </para> | ||
418 | </sect2> | ||
419 | |||
420 | <sect2 id="lock-softirqs-different"> | ||
421 | <title>Different Softirqs</title> | ||
422 | |||
423 | <para> | ||
424 | You'll need to use <function>spin_lock()</function> and | ||
425 | <function>spin_unlock()</function> for shared data, whether it | ||
426 | be a timer, tasklet, different softirq or the same or another | ||
427 | softirq: any of them could be running on a different CPU. | ||
428 | </para> | ||
429 | </sect2> | ||
430 | </sect1> | ||
431 | </chapter> | ||
432 | |||
433 | <chapter id="hardirq-context"> | ||
434 | <title>Hard IRQ Context</title> | ||
435 | |||
436 | <para> | ||
437 | Hardware interrupts usually communicate with a | ||
438 | tasklet or softirq. Frequently this involves putting work in a | ||
439 | queue, which the softirq will take out. | ||
440 | </para> | ||
441 | |||
442 | <sect1 id="hardirq-softirq"> | ||
443 | <title>Locking Between Hard IRQ and Softirqs/Tasklets</title> | ||
444 | |||
445 | <para> | ||
446 | If a hardware irq handler shares data with a softirq, you have | ||
447 | two concerns. Firstly, the softirq processing can be | ||
448 | interrupted by a hardware interrupt, and secondly, the | ||
449 | critical region could be entered by a hardware interrupt on | ||
450 | another CPU. This is where <function>spin_lock_irq()</function> is | ||
451 | used. It is defined to disable interrupts on that cpu, then grab | ||
452 | the lock. <function>spin_unlock_irq()</function> does the reverse. | ||
453 | </para> | ||
454 | |||
455 | <para> | ||
456 | The irq handler does not to use | ||
457 | <function>spin_lock_irq()</function>, because the softirq cannot | ||
458 | run while the irq handler is running: it can use | ||
459 | <function>spin_lock()</function>, which is slightly faster. The | ||
460 | only exception would be if a different hardware irq handler uses | ||
461 | the same lock: <function>spin_lock_irq()</function> will stop | ||
462 | that from interrupting us. | ||
463 | </para> | ||
464 | |||
465 | <para> | ||
466 | This works perfectly for UP as well: the spin lock vanishes, | ||
467 | and this macro simply becomes <function>local_irq_disable()</function> | ||
468 | (<filename class="headerfile">include/asm/smp.h</filename>), which | ||
469 | protects you from the softirq/tasklet/BH being run. | ||
470 | </para> | ||
471 | |||
472 | <para> | ||
473 | <function>spin_lock_irqsave()</function> | ||
474 | (<filename>include/linux/spinlock.h</filename>) is a variant | ||
475 | which saves whether interrupts were on or off in a flags word, | ||
476 | which is passed to <function>spin_unlock_irqrestore()</function>. This | ||
477 | means that the same code can be used inside an hard irq handler (where | ||
478 | interrupts are already off) and in softirqs (where the irq | ||
479 | disabling is required). | ||
480 | </para> | ||
481 | |||
482 | <para> | ||
483 | Note that softirqs (and hence tasklets and timers) are run on | ||
484 | return from hardware interrupts, so | ||
485 | <function>spin_lock_irq()</function> also stops these. In that | ||
486 | sense, <function>spin_lock_irqsave()</function> is the most | ||
487 | general and powerful locking function. | ||
488 | </para> | ||
489 | |||
490 | </sect1> | ||
491 | <sect1 id="hardirq-hardirq"> | ||
492 | <title>Locking Between Two Hard IRQ Handlers</title> | ||
493 | <para> | ||
494 | It is rare to have to share data between two IRQ handlers, but | ||
495 | if you do, <function>spin_lock_irqsave()</function> should be | ||
496 | used: it is architecture-specific whether all interrupts are | ||
497 | disabled inside irq handlers themselves. | ||
498 | </para> | ||
499 | </sect1> | ||
500 | |||
501 | </chapter> | ||
502 | |||
503 | <chapter id="cheatsheet"> | ||
504 | <title>Cheat Sheet For Locking</title> | ||
505 | <para> | ||
506 | Pete Zaitcev gives the following summary: | ||
507 | </para> | ||
508 | <itemizedlist> | ||
509 | <listitem> | ||
510 | <para> | ||
511 | If you are in a process context (any syscall) and want to | ||
512 | lock other process out, use a semaphore. You can take a semaphore | ||
513 | and sleep (<function>copy_from_user*(</function> or | ||
514 | <function>kmalloc(x,GFP_KERNEL)</function>). | ||
515 | </para> | ||
516 | </listitem> | ||
517 | <listitem> | ||
518 | <para> | ||
519 | Otherwise (== data can be touched in an interrupt), use | ||
520 | <function>spin_lock_irqsave()</function> and | ||
521 | <function>spin_unlock_irqrestore()</function>. | ||
522 | </para> | ||
523 | </listitem> | ||
524 | <listitem> | ||
525 | <para> | ||
526 | Avoid holding spinlock for more than 5 lines of code and | ||
527 | across any function call (except accessors like | ||
528 | <function>readb</function>). | ||
529 | </para> | ||
530 | </listitem> | ||
531 | </itemizedlist> | ||
532 | |||
533 | <sect1 id="minimum-lock-reqirements"> | ||
534 | <title>Table of Minimum Requirements</title> | ||
535 | |||
536 | <para> The following table lists the <emphasis>minimum</emphasis> | ||
537 | locking requirements between various contexts. In some cases, | ||
538 | the same context can only be running on one CPU at a time, so | ||
539 | no locking is required for that context (eg. a particular | ||
540 | thread can only run on one CPU at a time, but if it needs | ||
541 | shares data with another thread, locking is required). | ||
542 | </para> | ||
543 | <para> | ||
544 | Remember the advice above: you can always use | ||
545 | <function>spin_lock_irqsave()</function>, which is a superset | ||
546 | of all other spinlock primitives. | ||
547 | </para> | ||
548 | <table> | ||
549 | <title>Table of Locking Requirements</title> | ||
550 | <tgroup cols="11"> | ||
551 | <tbody> | ||
552 | <row> | ||
553 | <entry></entry> | ||
554 | <entry>IRQ Handler A</entry> | ||
555 | <entry>IRQ Handler B</entry> | ||
556 | <entry>Softirq A</entry> | ||
557 | <entry>Softirq B</entry> | ||
558 | <entry>Tasklet A</entry> | ||
559 | <entry>Tasklet B</entry> | ||
560 | <entry>Timer A</entry> | ||
561 | <entry>Timer B</entry> | ||
562 | <entry>User Context A</entry> | ||
563 | <entry>User Context B</entry> | ||
564 | </row> | ||
565 | |||
566 | <row> | ||
567 | <entry>IRQ Handler A</entry> | ||
568 | <entry>None</entry> | ||
569 | </row> | ||
570 | |||
571 | <row> | ||
572 | <entry>IRQ Handler B</entry> | ||
573 | <entry>spin_lock_irqsave</entry> | ||
574 | <entry>None</entry> | ||
575 | </row> | ||
576 | |||
577 | <row> | ||
578 | <entry>Softirq A</entry> | ||
579 | <entry>spin_lock_irq</entry> | ||
580 | <entry>spin_lock_irq</entry> | ||
581 | <entry>spin_lock</entry> | ||
582 | </row> | ||
583 | |||
584 | <row> | ||
585 | <entry>Softirq B</entry> | ||
586 | <entry>spin_lock_irq</entry> | ||
587 | <entry>spin_lock_irq</entry> | ||
588 | <entry>spin_lock</entry> | ||
589 | <entry>spin_lock</entry> | ||
590 | </row> | ||
591 | |||
592 | <row> | ||
593 | <entry>Tasklet A</entry> | ||
594 | <entry>spin_lock_irq</entry> | ||
595 | <entry>spin_lock_irq</entry> | ||
596 | <entry>spin_lock</entry> | ||
597 | <entry>spin_lock</entry> | ||
598 | <entry>None</entry> | ||
599 | </row> | ||
600 | |||
601 | <row> | ||
602 | <entry>Tasklet B</entry> | ||
603 | <entry>spin_lock_irq</entry> | ||
604 | <entry>spin_lock_irq</entry> | ||
605 | <entry>spin_lock</entry> | ||
606 | <entry>spin_lock</entry> | ||
607 | <entry>spin_lock</entry> | ||
608 | <entry>None</entry> | ||
609 | </row> | ||
610 | |||
611 | <row> | ||
612 | <entry>Timer A</entry> | ||
613 | <entry>spin_lock_irq</entry> | ||
614 | <entry>spin_lock_irq</entry> | ||
615 | <entry>spin_lock</entry> | ||
616 | <entry>spin_lock</entry> | ||
617 | <entry>spin_lock</entry> | ||
618 | <entry>spin_lock</entry> | ||
619 | <entry>None</entry> | ||
620 | </row> | ||
621 | |||
622 | <row> | ||
623 | <entry>Timer B</entry> | ||
624 | <entry>spin_lock_irq</entry> | ||
625 | <entry>spin_lock_irq</entry> | ||
626 | <entry>spin_lock</entry> | ||
627 | <entry>spin_lock</entry> | ||
628 | <entry>spin_lock</entry> | ||
629 | <entry>spin_lock</entry> | ||
630 | <entry>spin_lock</entry> | ||
631 | <entry>None</entry> | ||
632 | </row> | ||
633 | |||
634 | <row> | ||
635 | <entry>User Context A</entry> | ||
636 | <entry>spin_lock_irq</entry> | ||
637 | <entry>spin_lock_irq</entry> | ||
638 | <entry>spin_lock_bh</entry> | ||
639 | <entry>spin_lock_bh</entry> | ||
640 | <entry>spin_lock_bh</entry> | ||
641 | <entry>spin_lock_bh</entry> | ||
642 | <entry>spin_lock_bh</entry> | ||
643 | <entry>spin_lock_bh</entry> | ||
644 | <entry>None</entry> | ||
645 | </row> | ||
646 | |||
647 | <row> | ||
648 | <entry>User Context B</entry> | ||
649 | <entry>spin_lock_irq</entry> | ||
650 | <entry>spin_lock_irq</entry> | ||
651 | <entry>spin_lock_bh</entry> | ||
652 | <entry>spin_lock_bh</entry> | ||
653 | <entry>spin_lock_bh</entry> | ||
654 | <entry>spin_lock_bh</entry> | ||
655 | <entry>spin_lock_bh</entry> | ||
656 | <entry>spin_lock_bh</entry> | ||
657 | <entry>down_interruptible</entry> | ||
658 | <entry>None</entry> | ||
659 | </row> | ||
660 | |||
661 | </tbody> | ||
662 | </tgroup> | ||
663 | </table> | ||
664 | </sect1> | ||
665 | </chapter> | ||
666 | |||
667 | <chapter id="Examples"> | ||
668 | <title>Common Examples</title> | ||
669 | <para> | ||
670 | Let's step through a simple example: a cache of number to name | ||
671 | mappings. The cache keeps a count of how often each of the objects is | ||
672 | used, and when it gets full, throws out the least used one. | ||
673 | |||
674 | </para> | ||
675 | |||
676 | <sect1 id="examples-usercontext"> | ||
677 | <title>All In User Context</title> | ||
678 | <para> | ||
679 | For our first example, we assume that all operations are in user | ||
680 | context (ie. from system calls), so we can sleep. This means we can | ||
681 | use a semaphore to protect the cache and all the objects within | ||
682 | it. Here's the code: | ||
683 | </para> | ||
684 | |||
685 | <programlisting> | ||
686 | #include <linux/list.h> | ||
687 | #include <linux/slab.h> | ||
688 | #include <linux/string.h> | ||
689 | #include <asm/semaphore.h> | ||
690 | #include <asm/errno.h> | ||
691 | |||
692 | struct object | ||
693 | { | ||
694 | struct list_head list; | ||
695 | int id; | ||
696 | char name[32]; | ||
697 | int popularity; | ||
698 | }; | ||
699 | |||
700 | /* Protects the cache, cache_num, and the objects within it */ | ||
701 | static DECLARE_MUTEX(cache_lock); | ||
702 | static LIST_HEAD(cache); | ||
703 | static unsigned int cache_num = 0; | ||
704 | #define MAX_CACHE_SIZE 10 | ||
705 | |||
706 | /* Must be holding cache_lock */ | ||
707 | static struct object *__cache_find(int id) | ||
708 | { | ||
709 | struct object *i; | ||
710 | |||
711 | list_for_each_entry(i, &cache, list) | ||
712 | if (i->id == id) { | ||
713 | i->popularity++; | ||
714 | return i; | ||
715 | } | ||
716 | return NULL; | ||
717 | } | ||
718 | |||
719 | /* Must be holding cache_lock */ | ||
720 | static void __cache_delete(struct object *obj) | ||
721 | { | ||
722 | BUG_ON(!obj); | ||
723 | list_del(&obj->list); | ||
724 | kfree(obj); | ||
725 | cache_num--; | ||
726 | } | ||
727 | |||
728 | /* Must be holding cache_lock */ | ||
729 | static void __cache_add(struct object *obj) | ||
730 | { | ||
731 | list_add(&obj->list, &cache); | ||
732 | if (++cache_num > MAX_CACHE_SIZE) { | ||
733 | struct object *i, *outcast = NULL; | ||
734 | list_for_each_entry(i, &cache, list) { | ||
735 | if (!outcast || i->popularity < outcast->popularity) | ||
736 | outcast = i; | ||
737 | } | ||
738 | __cache_delete(outcast); | ||
739 | } | ||
740 | } | ||
741 | |||
742 | int cache_add(int id, const char *name) | ||
743 | { | ||
744 | struct object *obj; | ||
745 | |||
746 | if ((obj = kmalloc(sizeof(*obj), GFP_KERNEL)) == NULL) | ||
747 | return -ENOMEM; | ||
748 | |||
749 | strlcpy(obj->name, name, sizeof(obj->name)); | ||
750 | obj->id = id; | ||
751 | obj->popularity = 0; | ||
752 | |||
753 | down(&cache_lock); | ||
754 | __cache_add(obj); | ||
755 | up(&cache_lock); | ||
756 | return 0; | ||
757 | } | ||
758 | |||
759 | void cache_delete(int id) | ||
760 | { | ||
761 | down(&cache_lock); | ||
762 | __cache_delete(__cache_find(id)); | ||
763 | up(&cache_lock); | ||
764 | } | ||
765 | |||
766 | int cache_find(int id, char *name) | ||
767 | { | ||
768 | struct object *obj; | ||
769 | int ret = -ENOENT; | ||
770 | |||
771 | down(&cache_lock); | ||
772 | obj = __cache_find(id); | ||
773 | if (obj) { | ||
774 | ret = 0; | ||
775 | strcpy(name, obj->name); | ||
776 | } | ||
777 | up(&cache_lock); | ||
778 | return ret; | ||
779 | } | ||
780 | </programlisting> | ||
781 | |||
782 | <para> | ||
783 | Note that we always make sure we have the cache_lock when we add, | ||
784 | delete, or look up the cache: both the cache infrastructure itself and | ||
785 | the contents of the objects are protected by the lock. In this case | ||
786 | it's easy, since we copy the data for the user, and never let them | ||
787 | access the objects directly. | ||
788 | </para> | ||
789 | <para> | ||
790 | There is a slight (and common) optimization here: in | ||
791 | <function>cache_add</function> we set up the fields of the object | ||
792 | before grabbing the lock. This is safe, as no-one else can access it | ||
793 | until we put it in cache. | ||
794 | </para> | ||
795 | </sect1> | ||
796 | |||
797 | <sect1 id="examples-interrupt"> | ||
798 | <title>Accessing From Interrupt Context</title> | ||
799 | <para> | ||
800 | Now consider the case where <function>cache_find</function> can be | ||
801 | called from interrupt context: either a hardware interrupt or a | ||
802 | softirq. An example would be a timer which deletes object from the | ||
803 | cache. | ||
804 | </para> | ||
805 | <para> | ||
806 | The change is shown below, in standard patch format: the | ||
807 | <symbol>-</symbol> are lines which are taken away, and the | ||
808 | <symbol>+</symbol> are lines which are added. | ||
809 | </para> | ||
810 | <programlisting> | ||
811 | --- cache.c.usercontext 2003-12-09 13:58:54.000000000 +1100 | ||
812 | +++ cache.c.interrupt 2003-12-09 14:07:49.000000000 +1100 | ||
813 | @@ -12,7 +12,7 @@ | ||
814 | int popularity; | ||
815 | }; | ||
816 | |||
817 | -static DECLARE_MUTEX(cache_lock); | ||
818 | +static spinlock_t cache_lock = SPIN_LOCK_UNLOCKED; | ||
819 | static LIST_HEAD(cache); | ||
820 | static unsigned int cache_num = 0; | ||
821 | #define MAX_CACHE_SIZE 10 | ||
822 | @@ -55,6 +55,7 @@ | ||
823 | int cache_add(int id, const char *name) | ||
824 | { | ||
825 | struct object *obj; | ||
826 | + unsigned long flags; | ||
827 | |||
828 | if ((obj = kmalloc(sizeof(*obj), GFP_KERNEL)) == NULL) | ||
829 | return -ENOMEM; | ||
830 | @@ -63,30 +64,33 @@ | ||
831 | obj->id = id; | ||
832 | obj->popularity = 0; | ||
833 | |||
834 | - down(&cache_lock); | ||
835 | + spin_lock_irqsave(&cache_lock, flags); | ||
836 | __cache_add(obj); | ||
837 | - up(&cache_lock); | ||
838 | + spin_unlock_irqrestore(&cache_lock, flags); | ||
839 | return 0; | ||
840 | } | ||
841 | |||
842 | void cache_delete(int id) | ||
843 | { | ||
844 | - down(&cache_lock); | ||
845 | + unsigned long flags; | ||
846 | + | ||
847 | + spin_lock_irqsave(&cache_lock, flags); | ||
848 | __cache_delete(__cache_find(id)); | ||
849 | - up(&cache_lock); | ||
850 | + spin_unlock_irqrestore(&cache_lock, flags); | ||
851 | } | ||
852 | |||
853 | int cache_find(int id, char *name) | ||
854 | { | ||
855 | struct object *obj; | ||
856 | int ret = -ENOENT; | ||
857 | + unsigned long flags; | ||
858 | |||
859 | - down(&cache_lock); | ||
860 | + spin_lock_irqsave(&cache_lock, flags); | ||
861 | obj = __cache_find(id); | ||
862 | if (obj) { | ||
863 | ret = 0; | ||
864 | strcpy(name, obj->name); | ||
865 | } | ||
866 | - up(&cache_lock); | ||
867 | + spin_unlock_irqrestore(&cache_lock, flags); | ||
868 | return ret; | ||
869 | } | ||
870 | </programlisting> | ||
871 | |||
872 | <para> | ||
873 | Note that the <function>spin_lock_irqsave</function> will turn off | ||
874 | interrupts if they are on, otherwise does nothing (if we are already | ||
875 | in an interrupt handler), hence these functions are safe to call from | ||
876 | any context. | ||
877 | </para> | ||
878 | <para> | ||
879 | Unfortunately, <function>cache_add</function> calls | ||
880 | <function>kmalloc</function> with the <symbol>GFP_KERNEL</symbol> | ||
881 | flag, which is only legal in user context. I have assumed that | ||
882 | <function>cache_add</function> is still only called in user context, | ||
883 | otherwise this should become a parameter to | ||
884 | <function>cache_add</function>. | ||
885 | </para> | ||
886 | </sect1> | ||
887 | <sect1 id="examples-refcnt"> | ||
888 | <title>Exposing Objects Outside This File</title> | ||
889 | <para> | ||
890 | If our objects contained more information, it might not be sufficient | ||
891 | to copy the information in and out: other parts of the code might want | ||
892 | to keep pointers to these objects, for example, rather than looking up | ||
893 | the id every time. This produces two problems. | ||
894 | </para> | ||
895 | <para> | ||
896 | The first problem is that we use the <symbol>cache_lock</symbol> to | ||
897 | protect objects: we'd need to make this non-static so the rest of the | ||
898 | code can use it. This makes locking trickier, as it is no longer all | ||
899 | in one place. | ||
900 | </para> | ||
901 | <para> | ||
902 | The second problem is the lifetime problem: if another structure keeps | ||
903 | a pointer to an object, it presumably expects that pointer to remain | ||
904 | valid. Unfortunately, this is only guaranteed while you hold the | ||
905 | lock, otherwise someone might call <function>cache_delete</function> | ||
906 | and even worse, add another object, re-using the same address. | ||
907 | </para> | ||
908 | <para> | ||
909 | As there is only one lock, you can't hold it forever: no-one else would | ||
910 | get any work done. | ||
911 | </para> | ||
912 | <para> | ||
913 | The solution to this problem is to use a reference count: everyone who | ||
914 | has a pointer to the object increases it when they first get the | ||
915 | object, and drops the reference count when they're finished with it. | ||
916 | Whoever drops it to zero knows it is unused, and can actually delete it. | ||
917 | </para> | ||
918 | <para> | ||
919 | Here is the code: | ||
920 | </para> | ||
921 | |||
922 | <programlisting> | ||
923 | --- cache.c.interrupt 2003-12-09 14:25:43.000000000 +1100 | ||
924 | +++ cache.c.refcnt 2003-12-09 14:33:05.000000000 +1100 | ||
925 | @@ -7,6 +7,7 @@ | ||
926 | struct object | ||
927 | { | ||
928 | struct list_head list; | ||
929 | + unsigned int refcnt; | ||
930 | int id; | ||
931 | char name[32]; | ||
932 | int popularity; | ||
933 | @@ -17,6 +18,35 @@ | ||
934 | static unsigned int cache_num = 0; | ||
935 | #define MAX_CACHE_SIZE 10 | ||
936 | |||
937 | +static void __object_put(struct object *obj) | ||
938 | +{ | ||
939 | + if (--obj->refcnt == 0) | ||
940 | + kfree(obj); | ||
941 | +} | ||
942 | + | ||
943 | +static void __object_get(struct object *obj) | ||
944 | +{ | ||
945 | + obj->refcnt++; | ||
946 | +} | ||
947 | + | ||
948 | +void object_put(struct object *obj) | ||
949 | +{ | ||
950 | + unsigned long flags; | ||
951 | + | ||
952 | + spin_lock_irqsave(&cache_lock, flags); | ||
953 | + __object_put(obj); | ||
954 | + spin_unlock_irqrestore(&cache_lock, flags); | ||
955 | +} | ||
956 | + | ||
957 | +void object_get(struct object *obj) | ||
958 | +{ | ||
959 | + unsigned long flags; | ||
960 | + | ||
961 | + spin_lock_irqsave(&cache_lock, flags); | ||
962 | + __object_get(obj); | ||
963 | + spin_unlock_irqrestore(&cache_lock, flags); | ||
964 | +} | ||
965 | + | ||
966 | /* Must be holding cache_lock */ | ||
967 | static struct object *__cache_find(int id) | ||
968 | { | ||
969 | @@ -35,6 +65,7 @@ | ||
970 | { | ||
971 | BUG_ON(!obj); | ||
972 | list_del(&obj->list); | ||
973 | + __object_put(obj); | ||
974 | cache_num--; | ||
975 | } | ||
976 | |||
977 | @@ -63,6 +94,7 @@ | ||
978 | strlcpy(obj->name, name, sizeof(obj->name)); | ||
979 | obj->id = id; | ||
980 | obj->popularity = 0; | ||
981 | + obj->refcnt = 1; /* The cache holds a reference */ | ||
982 | |||
983 | spin_lock_irqsave(&cache_lock, flags); | ||
984 | __cache_add(obj); | ||
985 | @@ -79,18 +111,15 @@ | ||
986 | spin_unlock_irqrestore(&cache_lock, flags); | ||
987 | } | ||
988 | |||
989 | -int cache_find(int id, char *name) | ||
990 | +struct object *cache_find(int id) | ||
991 | { | ||
992 | struct object *obj; | ||
993 | - int ret = -ENOENT; | ||
994 | unsigned long flags; | ||
995 | |||
996 | spin_lock_irqsave(&cache_lock, flags); | ||
997 | obj = __cache_find(id); | ||
998 | - if (obj) { | ||
999 | - ret = 0; | ||
1000 | - strcpy(name, obj->name); | ||
1001 | - } | ||
1002 | + if (obj) | ||
1003 | + __object_get(obj); | ||
1004 | spin_unlock_irqrestore(&cache_lock, flags); | ||
1005 | - return ret; | ||
1006 | + return obj; | ||
1007 | } | ||
1008 | </programlisting> | ||
1009 | |||
1010 | <para> | ||
1011 | We encapsulate the reference counting in the standard 'get' and 'put' | ||
1012 | functions. Now we can return the object itself from | ||
1013 | <function>cache_find</function> which has the advantage that the user | ||
1014 | can now sleep holding the object (eg. to | ||
1015 | <function>copy_to_user</function> to name to userspace). | ||
1016 | </para> | ||
1017 | <para> | ||
1018 | The other point to note is that I said a reference should be held for | ||
1019 | every pointer to the object: thus the reference count is 1 when first | ||
1020 | inserted into the cache. In some versions the framework does not hold | ||
1021 | a reference count, but they are more complicated. | ||
1022 | </para> | ||
1023 | |||
1024 | <sect2 id="examples-refcnt-atomic"> | ||
1025 | <title>Using Atomic Operations For The Reference Count</title> | ||
1026 | <para> | ||
1027 | In practice, <type>atomic_t</type> would usually be used for | ||
1028 | <structfield>refcnt</structfield>. There are a number of atomic | ||
1029 | operations defined in | ||
1030 | |||
1031 | <filename class="headerfile">include/asm/atomic.h</filename>: these are | ||
1032 | guaranteed to be seen atomically from all CPUs in the system, so no | ||
1033 | lock is required. In this case, it is simpler than using spinlocks, | ||
1034 | although for anything non-trivial using spinlocks is clearer. The | ||
1035 | <function>atomic_inc</function> and | ||
1036 | <function>atomic_dec_and_test</function> are used instead of the | ||
1037 | standard increment and decrement operators, and the lock is no longer | ||
1038 | used to protect the reference count itself. | ||
1039 | </para> | ||
1040 | |||
1041 | <programlisting> | ||
1042 | --- cache.c.refcnt 2003-12-09 15:00:35.000000000 +1100 | ||
1043 | +++ cache.c.refcnt-atomic 2003-12-11 15:49:42.000000000 +1100 | ||
1044 | @@ -7,7 +7,7 @@ | ||
1045 | struct object | ||
1046 | { | ||
1047 | struct list_head list; | ||
1048 | - unsigned int refcnt; | ||
1049 | + atomic_t refcnt; | ||
1050 | int id; | ||
1051 | char name[32]; | ||
1052 | int popularity; | ||
1053 | @@ -18,33 +18,15 @@ | ||
1054 | static unsigned int cache_num = 0; | ||
1055 | #define MAX_CACHE_SIZE 10 | ||
1056 | |||
1057 | -static void __object_put(struct object *obj) | ||
1058 | -{ | ||
1059 | - if (--obj->refcnt == 0) | ||
1060 | - kfree(obj); | ||
1061 | -} | ||
1062 | - | ||
1063 | -static void __object_get(struct object *obj) | ||
1064 | -{ | ||
1065 | - obj->refcnt++; | ||
1066 | -} | ||
1067 | - | ||
1068 | void object_put(struct object *obj) | ||
1069 | { | ||
1070 | - unsigned long flags; | ||
1071 | - | ||
1072 | - spin_lock_irqsave(&cache_lock, flags); | ||
1073 | - __object_put(obj); | ||
1074 | - spin_unlock_irqrestore(&cache_lock, flags); | ||
1075 | + if (atomic_dec_and_test(&obj->refcnt)) | ||
1076 | + kfree(obj); | ||
1077 | } | ||
1078 | |||
1079 | void object_get(struct object *obj) | ||
1080 | { | ||
1081 | - unsigned long flags; | ||
1082 | - | ||
1083 | - spin_lock_irqsave(&cache_lock, flags); | ||
1084 | - __object_get(obj); | ||
1085 | - spin_unlock_irqrestore(&cache_lock, flags); | ||
1086 | + atomic_inc(&obj->refcnt); | ||
1087 | } | ||
1088 | |||
1089 | /* Must be holding cache_lock */ | ||
1090 | @@ -65,7 +47,7 @@ | ||
1091 | { | ||
1092 | BUG_ON(!obj); | ||
1093 | list_del(&obj->list); | ||
1094 | - __object_put(obj); | ||
1095 | + object_put(obj); | ||
1096 | cache_num--; | ||
1097 | } | ||
1098 | |||
1099 | @@ -94,7 +76,7 @@ | ||
1100 | strlcpy(obj->name, name, sizeof(obj->name)); | ||
1101 | obj->id = id; | ||
1102 | obj->popularity = 0; | ||
1103 | - obj->refcnt = 1; /* The cache holds a reference */ | ||
1104 | + atomic_set(&obj->refcnt, 1); /* The cache holds a reference */ | ||
1105 | |||
1106 | spin_lock_irqsave(&cache_lock, flags); | ||
1107 | __cache_add(obj); | ||
1108 | @@ -119,7 +101,7 @@ | ||
1109 | spin_lock_irqsave(&cache_lock, flags); | ||
1110 | obj = __cache_find(id); | ||
1111 | if (obj) | ||
1112 | - __object_get(obj); | ||
1113 | + object_get(obj); | ||
1114 | spin_unlock_irqrestore(&cache_lock, flags); | ||
1115 | return obj; | ||
1116 | } | ||
1117 | </programlisting> | ||
1118 | </sect2> | ||
1119 | </sect1> | ||
1120 | |||
1121 | <sect1 id="examples-lock-per-obj"> | ||
1122 | <title>Protecting The Objects Themselves</title> | ||
1123 | <para> | ||
1124 | In these examples, we assumed that the objects (except the reference | ||
1125 | counts) never changed once they are created. If we wanted to allow | ||
1126 | the name to change, there are three possibilities: | ||
1127 | </para> | ||
1128 | <itemizedlist> | ||
1129 | <listitem> | ||
1130 | <para> | ||
1131 | You can make <symbol>cache_lock</symbol> non-static, and tell people | ||
1132 | to grab that lock before changing the name in any object. | ||
1133 | </para> | ||
1134 | </listitem> | ||
1135 | <listitem> | ||
1136 | <para> | ||
1137 | You can provide a <function>cache_obj_rename</function> which grabs | ||
1138 | this lock and changes the name for the caller, and tell everyone to | ||
1139 | use that function. | ||
1140 | </para> | ||
1141 | </listitem> | ||
1142 | <listitem> | ||
1143 | <para> | ||
1144 | You can make the <symbol>cache_lock</symbol> protect only the cache | ||
1145 | itself, and use another lock to protect the name. | ||
1146 | </para> | ||
1147 | </listitem> | ||
1148 | </itemizedlist> | ||
1149 | |||
1150 | <para> | ||
1151 | Theoretically, you can make the locks as fine-grained as one lock for | ||
1152 | every field, for every object. In practice, the most common variants | ||
1153 | are: | ||
1154 | </para> | ||
1155 | <itemizedlist> | ||
1156 | <listitem> | ||
1157 | <para> | ||
1158 | One lock which protects the infrastructure (the <symbol>cache</symbol> | ||
1159 | list in this example) and all the objects. This is what we have done | ||
1160 | so far. | ||
1161 | </para> | ||
1162 | </listitem> | ||
1163 | <listitem> | ||
1164 | <para> | ||
1165 | One lock which protects the infrastructure (including the list | ||
1166 | pointers inside the objects), and one lock inside the object which | ||
1167 | protects the rest of that object. | ||
1168 | </para> | ||
1169 | </listitem> | ||
1170 | <listitem> | ||
1171 | <para> | ||
1172 | Multiple locks to protect the infrastructure (eg. one lock per hash | ||
1173 | chain), possibly with a separate per-object lock. | ||
1174 | </para> | ||
1175 | </listitem> | ||
1176 | </itemizedlist> | ||
1177 | |||
1178 | <para> | ||
1179 | Here is the "lock-per-object" implementation: | ||
1180 | </para> | ||
1181 | <programlisting> | ||
1182 | --- cache.c.refcnt-atomic 2003-12-11 15:50:54.000000000 +1100 | ||
1183 | +++ cache.c.perobjectlock 2003-12-11 17:15:03.000000000 +1100 | ||
1184 | @@ -6,11 +6,17 @@ | ||
1185 | |||
1186 | struct object | ||
1187 | { | ||
1188 | + /* These two protected by cache_lock. */ | ||
1189 | struct list_head list; | ||
1190 | + int popularity; | ||
1191 | + | ||
1192 | atomic_t refcnt; | ||
1193 | + | ||
1194 | + /* Doesn't change once created. */ | ||
1195 | int id; | ||
1196 | + | ||
1197 | + spinlock_t lock; /* Protects the name */ | ||
1198 | char name[32]; | ||
1199 | - int popularity; | ||
1200 | }; | ||
1201 | |||
1202 | static spinlock_t cache_lock = SPIN_LOCK_UNLOCKED; | ||
1203 | @@ -77,6 +84,7 @@ | ||
1204 | obj->id = id; | ||
1205 | obj->popularity = 0; | ||
1206 | atomic_set(&obj->refcnt, 1); /* The cache holds a reference */ | ||
1207 | + spin_lock_init(&obj->lock); | ||
1208 | |||
1209 | spin_lock_irqsave(&cache_lock, flags); | ||
1210 | __cache_add(obj); | ||
1211 | </programlisting> | ||
1212 | |||
1213 | <para> | ||
1214 | Note that I decide that the <structfield>popularity</structfield> | ||
1215 | count should be protected by the <symbol>cache_lock</symbol> rather | ||
1216 | than the per-object lock: this is because it (like the | ||
1217 | <structname>struct list_head</structname> inside the object) is | ||
1218 | logically part of the infrastructure. This way, I don't need to grab | ||
1219 | the lock of every object in <function>__cache_add</function> when | ||
1220 | seeking the least popular. | ||
1221 | </para> | ||
1222 | |||
1223 | <para> | ||
1224 | I also decided that the <structfield>id</structfield> member is | ||
1225 | unchangeable, so I don't need to grab each object lock in | ||
1226 | <function>__cache_find()</function> to examine the | ||
1227 | <structfield>id</structfield>: the object lock is only used by a | ||
1228 | caller who wants to read or write the <structfield>name</structfield> | ||
1229 | field. | ||
1230 | </para> | ||
1231 | |||
1232 | <para> | ||
1233 | Note also that I added a comment describing what data was protected by | ||
1234 | which locks. This is extremely important, as it describes the runtime | ||
1235 | behavior of the code, and can be hard to gain from just reading. And | ||
1236 | as Alan Cox says, <quote>Lock data, not code</quote>. | ||
1237 | </para> | ||
1238 | </sect1> | ||
1239 | </chapter> | ||
1240 | |||
1241 | <chapter id="common-problems"> | ||
1242 | <title>Common Problems</title> | ||
1243 | <sect1 id="deadlock"> | ||
1244 | <title>Deadlock: Simple and Advanced</title> | ||
1245 | |||
1246 | <para> | ||
1247 | There is a coding bug where a piece of code tries to grab a | ||
1248 | spinlock twice: it will spin forever, waiting for the lock to | ||
1249 | be released (spinlocks, rwlocks and semaphores are not | ||
1250 | recursive in Linux). This is trivial to diagnose: not a | ||
1251 | stay-up-five-nights-talk-to-fluffy-code-bunnies kind of | ||
1252 | problem. | ||
1253 | </para> | ||
1254 | |||
1255 | <para> | ||
1256 | For a slightly more complex case, imagine you have a region | ||
1257 | shared by a softirq and user context. If you use a | ||
1258 | <function>spin_lock()</function> call to protect it, it is | ||
1259 | possible that the user context will be interrupted by the softirq | ||
1260 | while it holds the lock, and the softirq will then spin | ||
1261 | forever trying to get the same lock. | ||
1262 | </para> | ||
1263 | |||
1264 | <para> | ||
1265 | Both of these are called deadlock, and as shown above, it can | ||
1266 | occur even with a single CPU (although not on UP compiles, | ||
1267 | since spinlocks vanish on kernel compiles with | ||
1268 | <symbol>CONFIG_SMP</symbol>=n. You'll still get data corruption | ||
1269 | in the second example). | ||
1270 | </para> | ||
1271 | |||
1272 | <para> | ||
1273 | This complete lockup is easy to diagnose: on SMP boxes the | ||
1274 | watchdog timer or compiling with <symbol>DEBUG_SPINLOCKS</symbol> set | ||
1275 | (<filename>include/linux/spinlock.h</filename>) will show this up | ||
1276 | immediately when it happens. | ||
1277 | </para> | ||
1278 | |||
1279 | <para> | ||
1280 | A more complex problem is the so-called 'deadly embrace', | ||
1281 | involving two or more locks. Say you have a hash table: each | ||
1282 | entry in the table is a spinlock, and a chain of hashed | ||
1283 | objects. Inside a softirq handler, you sometimes want to | ||
1284 | alter an object from one place in the hash to another: you | ||
1285 | grab the spinlock of the old hash chain and the spinlock of | ||
1286 | the new hash chain, and delete the object from the old one, | ||
1287 | and insert it in the new one. | ||
1288 | </para> | ||
1289 | |||
1290 | <para> | ||
1291 | There are two problems here. First, if your code ever | ||
1292 | tries to move the object to the same chain, it will deadlock | ||
1293 | with itself as it tries to lock it twice. Secondly, if the | ||
1294 | same softirq on another CPU is trying to move another object | ||
1295 | in the reverse direction, the following could happen: | ||
1296 | </para> | ||
1297 | |||
1298 | <table> | ||
1299 | <title>Consequences</title> | ||
1300 | |||
1301 | <tgroup cols="2" align="left"> | ||
1302 | |||
1303 | <thead> | ||
1304 | <row> | ||
1305 | <entry>CPU 1</entry> | ||
1306 | <entry>CPU 2</entry> | ||
1307 | </row> | ||
1308 | </thead> | ||
1309 | |||
1310 | <tbody> | ||
1311 | <row> | ||
1312 | <entry>Grab lock A -> OK</entry> | ||
1313 | <entry>Grab lock B -> OK</entry> | ||
1314 | </row> | ||
1315 | <row> | ||
1316 | <entry>Grab lock B -> spin</entry> | ||
1317 | <entry>Grab lock A -> spin</entry> | ||
1318 | </row> | ||
1319 | </tbody> | ||
1320 | </tgroup> | ||
1321 | </table> | ||
1322 | |||
1323 | <para> | ||
1324 | The two CPUs will spin forever, waiting for the other to give up | ||
1325 | their lock. It will look, smell, and feel like a crash. | ||
1326 | </para> | ||
1327 | </sect1> | ||
1328 | |||
1329 | <sect1 id="techs-deadlock-prevent"> | ||
1330 | <title>Preventing Deadlock</title> | ||
1331 | |||
1332 | <para> | ||
1333 | Textbooks will tell you that if you always lock in the same | ||
1334 | order, you will never get this kind of deadlock. Practice | ||
1335 | will tell you that this approach doesn't scale: when I | ||
1336 | create a new lock, I don't understand enough of the kernel | ||
1337 | to figure out where in the 5000 lock hierarchy it will fit. | ||
1338 | </para> | ||
1339 | |||
1340 | <para> | ||
1341 | The best locks are encapsulated: they never get exposed in | ||
1342 | headers, and are never held around calls to non-trivial | ||
1343 | functions outside the same file. You can read through this | ||
1344 | code and see that it will never deadlock, because it never | ||
1345 | tries to grab another lock while it has that one. People | ||
1346 | using your code don't even need to know you are using a | ||
1347 | lock. | ||
1348 | </para> | ||
1349 | |||
1350 | <para> | ||
1351 | A classic problem here is when you provide callbacks or | ||
1352 | hooks: if you call these with the lock held, you risk simple | ||
1353 | deadlock, or a deadly embrace (who knows what the callback | ||
1354 | will do?). Remember, the other programmers are out to get | ||
1355 | you, so don't do this. | ||
1356 | </para> | ||
1357 | |||
1358 | <sect2 id="techs-deadlock-overprevent"> | ||
1359 | <title>Overzealous Prevention Of Deadlocks</title> | ||
1360 | |||
1361 | <para> | ||
1362 | Deadlocks are problematic, but not as bad as data | ||
1363 | corruption. Code which grabs a read lock, searches a list, | ||
1364 | fails to find what it wants, drops the read lock, grabs a | ||
1365 | write lock and inserts the object has a race condition. | ||
1366 | </para> | ||
1367 | |||
1368 | <para> | ||
1369 | If you don't see why, please stay the fuck away from my code. | ||
1370 | </para> | ||
1371 | </sect2> | ||
1372 | </sect1> | ||
1373 | |||
1374 | <sect1 id="racing-timers"> | ||
1375 | <title>Racing Timers: A Kernel Pastime</title> | ||
1376 | |||
1377 | <para> | ||
1378 | Timers can produce their own special problems with races. | ||
1379 | Consider a collection of objects (list, hash, etc) where each | ||
1380 | object has a timer which is due to destroy it. | ||
1381 | </para> | ||
1382 | |||
1383 | <para> | ||
1384 | If you want to destroy the entire collection (say on module | ||
1385 | removal), you might do the following: | ||
1386 | </para> | ||
1387 | |||
1388 | <programlisting> | ||
1389 | /* THIS CODE BAD BAD BAD BAD: IF IT WAS ANY WORSE IT WOULD USE | ||
1390 | HUNGARIAN NOTATION */ | ||
1391 | spin_lock_bh(&list_lock); | ||
1392 | |||
1393 | while (list) { | ||
1394 | struct foo *next = list->next; | ||
1395 | del_timer(&list->timer); | ||
1396 | kfree(list); | ||
1397 | list = next; | ||
1398 | } | ||
1399 | |||
1400 | spin_unlock_bh(&list_lock); | ||
1401 | </programlisting> | ||
1402 | |||
1403 | <para> | ||
1404 | Sooner or later, this will crash on SMP, because a timer can | ||
1405 | have just gone off before the <function>spin_lock_bh()</function>, | ||
1406 | and it will only get the lock after we | ||
1407 | <function>spin_unlock_bh()</function>, and then try to free | ||
1408 | the element (which has already been freed!). | ||
1409 | </para> | ||
1410 | |||
1411 | <para> | ||
1412 | This can be avoided by checking the result of | ||
1413 | <function>del_timer()</function>: if it returns | ||
1414 | <returnvalue>1</returnvalue>, the timer has been deleted. | ||
1415 | If <returnvalue>0</returnvalue>, it means (in this | ||
1416 | case) that it is currently running, so we can do: | ||
1417 | </para> | ||
1418 | |||
1419 | <programlisting> | ||
1420 | retry: | ||
1421 | spin_lock_bh(&list_lock); | ||
1422 | |||
1423 | while (list) { | ||
1424 | struct foo *next = list->next; | ||
1425 | if (!del_timer(&list->timer)) { | ||
1426 | /* Give timer a chance to delete this */ | ||
1427 | spin_unlock_bh(&list_lock); | ||
1428 | goto retry; | ||
1429 | } | ||
1430 | kfree(list); | ||
1431 | list = next; | ||
1432 | } | ||
1433 | |||
1434 | spin_unlock_bh(&list_lock); | ||
1435 | </programlisting> | ||
1436 | |||
1437 | <para> | ||
1438 | Another common problem is deleting timers which restart | ||
1439 | themselves (by calling <function>add_timer()</function> at the end | ||
1440 | of their timer function). Because this is a fairly common case | ||
1441 | which is prone to races, you should use <function>del_timer_sync()</function> | ||
1442 | (<filename class="headerfile">include/linux/timer.h</filename>) | ||
1443 | to handle this case. It returns the number of times the timer | ||
1444 | had to be deleted before we finally stopped it from adding itself back | ||
1445 | in. | ||
1446 | </para> | ||
1447 | </sect1> | ||
1448 | |||
1449 | </chapter> | ||
1450 | |||
1451 | <chapter id="Efficiency"> | ||
1452 | <title>Locking Speed</title> | ||
1453 | |||
1454 | <para> | ||
1455 | There are three main things to worry about when considering speed of | ||
1456 | some code which does locking. First is concurrency: how many things | ||
1457 | are going to be waiting while someone else is holding a lock. Second | ||
1458 | is the time taken to actually acquire and release an uncontended lock. | ||
1459 | Third is using fewer, or smarter locks. I'm assuming that the lock is | ||
1460 | used fairly often: otherwise, you wouldn't be concerned about | ||
1461 | efficiency. | ||
1462 | </para> | ||
1463 | <para> | ||
1464 | Concurrency depends on how long the lock is usually held: you should | ||
1465 | hold the lock for as long as needed, but no longer. In the cache | ||
1466 | example, we always create the object without the lock held, and then | ||
1467 | grab the lock only when we are ready to insert it in the list. | ||
1468 | </para> | ||
1469 | <para> | ||
1470 | Acquisition times depend on how much damage the lock operations do to | ||
1471 | the pipeline (pipeline stalls) and how likely it is that this CPU was | ||
1472 | the last one to grab the lock (ie. is the lock cache-hot for this | ||
1473 | CPU): on a machine with more CPUs, this likelihood drops fast. | ||
1474 | Consider a 700MHz Intel Pentium III: an instruction takes about 0.7ns, | ||
1475 | an atomic increment takes about 58ns, a lock which is cache-hot on | ||
1476 | this CPU takes 160ns, and a cacheline transfer from another CPU takes | ||
1477 | an additional 170 to 360ns. (These figures from Paul McKenney's | ||
1478 | <ulink url="http://www.linuxjournal.com/article.php?sid=6993"> Linux | ||
1479 | Journal RCU article</ulink>). | ||
1480 | </para> | ||
1481 | <para> | ||
1482 | These two aims conflict: holding a lock for a short time might be done | ||
1483 | by splitting locks into parts (such as in our final per-object-lock | ||
1484 | example), but this increases the number of lock acquisitions, and the | ||
1485 | results are often slower than having a single lock. This is another | ||
1486 | reason to advocate locking simplicity. | ||
1487 | </para> | ||
1488 | <para> | ||
1489 | The third concern is addressed below: there are some methods to reduce | ||
1490 | the amount of locking which needs to be done. | ||
1491 | </para> | ||
1492 | |||
1493 | <sect1 id="efficiency-rwlocks"> | ||
1494 | <title>Read/Write Lock Variants</title> | ||
1495 | |||
1496 | <para> | ||
1497 | Both spinlocks and semaphores have read/write variants: | ||
1498 | <type>rwlock_t</type> and <structname>struct rw_semaphore</structname>. | ||
1499 | These divide users into two classes: the readers and the writers. If | ||
1500 | you are only reading the data, you can get a read lock, but to write to | ||
1501 | the data you need the write lock. Many people can hold a read lock, | ||
1502 | but a writer must be sole holder. | ||
1503 | </para> | ||
1504 | |||
1505 | <para> | ||
1506 | If your code divides neatly along reader/writer lines (as our | ||
1507 | cache code does), and the lock is held by readers for | ||
1508 | significant lengths of time, using these locks can help. They | ||
1509 | are slightly slower than the normal locks though, so in practice | ||
1510 | <type>rwlock_t</type> is not usually worthwhile. | ||
1511 | </para> | ||
1512 | </sect1> | ||
1513 | |||
1514 | <sect1 id="efficiency-read-copy-update"> | ||
1515 | <title>Avoiding Locks: Read Copy Update</title> | ||
1516 | |||
1517 | <para> | ||
1518 | There is a special method of read/write locking called Read Copy | ||
1519 | Update. Using RCU, the readers can avoid taking a lock | ||
1520 | altogether: as we expect our cache to be read more often than | ||
1521 | updated (otherwise the cache is a waste of time), it is a | ||
1522 | candidate for this optimization. | ||
1523 | </para> | ||
1524 | |||
1525 | <para> | ||
1526 | How do we get rid of read locks? Getting rid of read locks | ||
1527 | means that writers may be changing the list underneath the | ||
1528 | readers. That is actually quite simple: we can read a linked | ||
1529 | list while an element is being added if the writer adds the | ||
1530 | element very carefully. For example, adding | ||
1531 | <symbol>new</symbol> to a single linked list called | ||
1532 | <symbol>list</symbol>: | ||
1533 | </para> | ||
1534 | |||
1535 | <programlisting> | ||
1536 | new->next = list->next; | ||
1537 | wmb(); | ||
1538 | list->next = new; | ||
1539 | </programlisting> | ||
1540 | |||
1541 | <para> | ||
1542 | The <function>wmb()</function> is a write memory barrier. It | ||
1543 | ensures that the first operation (setting the new element's | ||
1544 | <symbol>next</symbol> pointer) is complete and will be seen by | ||
1545 | all CPUs, before the second operation is (putting the new | ||
1546 | element into the list). This is important, since modern | ||
1547 | compilers and modern CPUs can both reorder instructions unless | ||
1548 | told otherwise: we want a reader to either not see the new | ||
1549 | element at all, or see the new element with the | ||
1550 | <symbol>next</symbol> pointer correctly pointing at the rest of | ||
1551 | the list. | ||
1552 | </para> | ||
1553 | <para> | ||
1554 | Fortunately, there is a function to do this for standard | ||
1555 | <structname>struct list_head</structname> lists: | ||
1556 | <function>list_add_rcu()</function> | ||
1557 | (<filename>include/linux/list.h</filename>). | ||
1558 | </para> | ||
1559 | <para> | ||
1560 | Removing an element from the list is even simpler: we replace | ||
1561 | the pointer to the old element with a pointer to its successor, | ||
1562 | and readers will either see it, or skip over it. | ||
1563 | </para> | ||
1564 | <programlisting> | ||
1565 | list->next = old->next; | ||
1566 | </programlisting> | ||
1567 | <para> | ||
1568 | There is <function>list_del_rcu()</function> | ||
1569 | (<filename>include/linux/list.h</filename>) which does this (the | ||
1570 | normal version poisons the old object, which we don't want). | ||
1571 | </para> | ||
1572 | <para> | ||
1573 | The reader must also be careful: some CPUs can look through the | ||
1574 | <symbol>next</symbol> pointer to start reading the contents of | ||
1575 | the next element early, but don't realize that the pre-fetched | ||
1576 | contents is wrong when the <symbol>next</symbol> pointer changes | ||
1577 | underneath them. Once again, there is a | ||
1578 | <function>list_for_each_entry_rcu()</function> | ||
1579 | (<filename>include/linux/list.h</filename>) to help you. Of | ||
1580 | course, writers can just use | ||
1581 | <function>list_for_each_entry()</function>, since there cannot | ||
1582 | be two simultaneous writers. | ||
1583 | </para> | ||
1584 | <para> | ||
1585 | Our final dilemma is this: when can we actually destroy the | ||
1586 | removed element? Remember, a reader might be stepping through | ||
1587 | this element in the list right now: it we free this element and | ||
1588 | the <symbol>next</symbol> pointer changes, the reader will jump | ||
1589 | off into garbage and crash. We need to wait until we know that | ||
1590 | all the readers who were traversing the list when we deleted the | ||
1591 | element are finished. We use <function>call_rcu()</function> to | ||
1592 | register a callback which will actually destroy the object once | ||
1593 | the readers are finished. | ||
1594 | </para> | ||
1595 | <para> | ||
1596 | But how does Read Copy Update know when the readers are | ||
1597 | finished? The method is this: firstly, the readers always | ||
1598 | traverse the list inside | ||
1599 | <function>rcu_read_lock()</function>/<function>rcu_read_unlock()</function> | ||
1600 | pairs: these simply disable preemption so the reader won't go to | ||
1601 | sleep while reading the list. | ||
1602 | </para> | ||
1603 | <para> | ||
1604 | RCU then waits until every other CPU has slept at least once: | ||
1605 | since readers cannot sleep, we know that any readers which were | ||
1606 | traversing the list during the deletion are finished, and the | ||
1607 | callback is triggered. The real Read Copy Update code is a | ||
1608 | little more optimized than this, but this is the fundamental | ||
1609 | idea. | ||
1610 | </para> | ||
1611 | |||
1612 | <programlisting> | ||
1613 | --- cache.c.perobjectlock 2003-12-11 17:15:03.000000000 +1100 | ||
1614 | +++ cache.c.rcupdate 2003-12-11 17:55:14.000000000 +1100 | ||
1615 | @@ -1,15 +1,18 @@ | ||
1616 | #include <linux/list.h> | ||
1617 | #include <linux/slab.h> | ||
1618 | #include <linux/string.h> | ||
1619 | +#include <linux/rcupdate.h> | ||
1620 | #include <asm/semaphore.h> | ||
1621 | #include <asm/errno.h> | ||
1622 | |||
1623 | struct object | ||
1624 | { | ||
1625 | - /* These two protected by cache_lock. */ | ||
1626 | + /* This is protected by RCU */ | ||
1627 | struct list_head list; | ||
1628 | int popularity; | ||
1629 | |||
1630 | + struct rcu_head rcu; | ||
1631 | + | ||
1632 | atomic_t refcnt; | ||
1633 | |||
1634 | /* Doesn't change once created. */ | ||
1635 | @@ -40,7 +43,7 @@ | ||
1636 | { | ||
1637 | struct object *i; | ||
1638 | |||
1639 | - list_for_each_entry(i, &cache, list) { | ||
1640 | + list_for_each_entry_rcu(i, &cache, list) { | ||
1641 | if (i->id == id) { | ||
1642 | i->popularity++; | ||
1643 | return i; | ||
1644 | @@ -49,19 +52,25 @@ | ||
1645 | return NULL; | ||
1646 | } | ||
1647 | |||
1648 | +/* Final discard done once we know no readers are looking. */ | ||
1649 | +static void cache_delete_rcu(void *arg) | ||
1650 | +{ | ||
1651 | + object_put(arg); | ||
1652 | +} | ||
1653 | + | ||
1654 | /* Must be holding cache_lock */ | ||
1655 | static void __cache_delete(struct object *obj) | ||
1656 | { | ||
1657 | BUG_ON(!obj); | ||
1658 | - list_del(&obj->list); | ||
1659 | - object_put(obj); | ||
1660 | + list_del_rcu(&obj->list); | ||
1661 | cache_num--; | ||
1662 | + call_rcu(&obj->rcu, cache_delete_rcu, obj); | ||
1663 | } | ||
1664 | |||
1665 | /* Must be holding cache_lock */ | ||
1666 | static void __cache_add(struct object *obj) | ||
1667 | { | ||
1668 | - list_add(&obj->list, &cache); | ||
1669 | + list_add_rcu(&obj->list, &cache); | ||
1670 | if (++cache_num > MAX_CACHE_SIZE) { | ||
1671 | struct object *i, *outcast = NULL; | ||
1672 | list_for_each_entry(i, &cache, list) { | ||
1673 | @@ -85,6 +94,7 @@ | ||
1674 | obj->popularity = 0; | ||
1675 | atomic_set(&obj->refcnt, 1); /* The cache holds a reference */ | ||
1676 | spin_lock_init(&obj->lock); | ||
1677 | + INIT_RCU_HEAD(&obj->rcu); | ||
1678 | |||
1679 | spin_lock_irqsave(&cache_lock, flags); | ||
1680 | __cache_add(obj); | ||
1681 | @@ -104,12 +114,11 @@ | ||
1682 | struct object *cache_find(int id) | ||
1683 | { | ||
1684 | struct object *obj; | ||
1685 | - unsigned long flags; | ||
1686 | |||
1687 | - spin_lock_irqsave(&cache_lock, flags); | ||
1688 | + rcu_read_lock(); | ||
1689 | obj = __cache_find(id); | ||
1690 | if (obj) | ||
1691 | object_get(obj); | ||
1692 | - spin_unlock_irqrestore(&cache_lock, flags); | ||
1693 | + rcu_read_unlock(); | ||
1694 | return obj; | ||
1695 | } | ||
1696 | </programlisting> | ||
1697 | |||
1698 | <para> | ||
1699 | Note that the reader will alter the | ||
1700 | <structfield>popularity</structfield> member in | ||
1701 | <function>__cache_find()</function>, and now it doesn't hold a lock. | ||
1702 | One solution would be to make it an <type>atomic_t</type>, but for | ||
1703 | this usage, we don't really care about races: an approximate result is | ||
1704 | good enough, so I didn't change it. | ||
1705 | </para> | ||
1706 | |||
1707 | <para> | ||
1708 | The result is that <function>cache_find()</function> requires no | ||
1709 | synchronization with any other functions, so is almost as fast on SMP | ||
1710 | as it would be on UP. | ||
1711 | </para> | ||
1712 | |||
1713 | <para> | ||
1714 | There is a furthur optimization possible here: remember our original | ||
1715 | cache code, where there were no reference counts and the caller simply | ||
1716 | held the lock whenever using the object? This is still possible: if | ||
1717 | you hold the lock, noone can delete the object, so you don't need to | ||
1718 | get and put the reference count. | ||
1719 | </para> | ||
1720 | |||
1721 | <para> | ||
1722 | Now, because the 'read lock' in RCU is simply disabling preemption, a | ||
1723 | caller which always has preemption disabled between calling | ||
1724 | <function>cache_find()</function> and | ||
1725 | <function>object_put()</function> does not need to actually get and | ||
1726 | put the reference count: we could expose | ||
1727 | <function>__cache_find()</function> by making it non-static, and | ||
1728 | such callers could simply call that. | ||
1729 | </para> | ||
1730 | <para> | ||
1731 | The benefit here is that the reference count is not written to: the | ||
1732 | object is not altered in any way, which is much faster on SMP | ||
1733 | machines due to caching. | ||
1734 | </para> | ||
1735 | </sect1> | ||
1736 | |||
1737 | <sect1 id="per-cpu"> | ||
1738 | <title>Per-CPU Data</title> | ||
1739 | |||
1740 | <para> | ||
1741 | Another technique for avoiding locking which is used fairly | ||
1742 | widely is to duplicate information for each CPU. For example, | ||
1743 | if you wanted to keep a count of a common condition, you could | ||
1744 | use a spin lock and a single counter. Nice and simple. | ||
1745 | </para> | ||
1746 | |||
1747 | <para> | ||
1748 | If that was too slow (it's usually not, but if you've got a | ||
1749 | really big machine to test on and can show that it is), you | ||
1750 | could instead use a counter for each CPU, then none of them need | ||
1751 | an exclusive lock. See <function>DEFINE_PER_CPU()</function>, | ||
1752 | <function>get_cpu_var()</function> and | ||
1753 | <function>put_cpu_var()</function> | ||
1754 | (<filename class="headerfile">include/linux/percpu.h</filename>). | ||
1755 | </para> | ||
1756 | |||
1757 | <para> | ||
1758 | Of particular use for simple per-cpu counters is the | ||
1759 | <type>local_t</type> type, and the | ||
1760 | <function>cpu_local_inc()</function> and related functions, | ||
1761 | which are more efficient than simple code on some architectures | ||
1762 | (<filename class="headerfile">include/asm/local.h</filename>). | ||
1763 | </para> | ||
1764 | |||
1765 | <para> | ||
1766 | Note that there is no simple, reliable way of getting an exact | ||
1767 | value of such a counter, without introducing more locks. This | ||
1768 | is not a problem for some uses. | ||
1769 | </para> | ||
1770 | </sect1> | ||
1771 | |||
1772 | <sect1 id="mostly-hardirq"> | ||
1773 | <title>Data Which Mostly Used By An IRQ Handler</title> | ||
1774 | |||
1775 | <para> | ||
1776 | If data is always accessed from within the same IRQ handler, you | ||
1777 | don't need a lock at all: the kernel already guarantees that the | ||
1778 | irq handler will not run simultaneously on multiple CPUs. | ||
1779 | </para> | ||
1780 | <para> | ||
1781 | Manfred Spraul points out that you can still do this, even if | ||
1782 | the data is very occasionally accessed in user context or | ||
1783 | softirqs/tasklets. The irq handler doesn't use a lock, and | ||
1784 | all other accesses are done as so: | ||
1785 | </para> | ||
1786 | |||
1787 | <programlisting> | ||
1788 | spin_lock(&lock); | ||
1789 | disable_irq(irq); | ||
1790 | ... | ||
1791 | enable_irq(irq); | ||
1792 | spin_unlock(&lock); | ||
1793 | </programlisting> | ||
1794 | <para> | ||
1795 | The <function>disable_irq()</function> prevents the irq handler | ||
1796 | from running (and waits for it to finish if it's currently | ||
1797 | running on other CPUs). The spinlock prevents any other | ||
1798 | accesses happening at the same time. Naturally, this is slower | ||
1799 | than just a <function>spin_lock_irq()</function> call, so it | ||
1800 | only makes sense if this type of access happens extremely | ||
1801 | rarely. | ||
1802 | </para> | ||
1803 | </sect1> | ||
1804 | </chapter> | ||
1805 | |||
1806 | <chapter id="sleeping-things"> | ||
1807 | <title>What Functions Are Safe To Call From Interrupts?</title> | ||
1808 | |||
1809 | <para> | ||
1810 | Many functions in the kernel sleep (ie. call schedule()) | ||
1811 | directly or indirectly: you can never call them while holding a | ||
1812 | spinlock, or with preemption disabled. This also means you need | ||
1813 | to be in user context: calling them from an interrupt is illegal. | ||
1814 | </para> | ||
1815 | |||
1816 | <sect1 id="sleeping"> | ||
1817 | <title>Some Functions Which Sleep</title> | ||
1818 | |||
1819 | <para> | ||
1820 | The most common ones are listed below, but you usually have to | ||
1821 | read the code to find out if other calls are safe. If everyone | ||
1822 | else who calls it can sleep, you probably need to be able to | ||
1823 | sleep, too. In particular, registration and deregistration | ||
1824 | functions usually expect to be called from user context, and can | ||
1825 | sleep. | ||
1826 | </para> | ||
1827 | |||
1828 | <itemizedlist> | ||
1829 | <listitem> | ||
1830 | <para> | ||
1831 | Accesses to | ||
1832 | <firstterm linkend="gloss-userspace">userspace</firstterm>: | ||
1833 | </para> | ||
1834 | <itemizedlist> | ||
1835 | <listitem> | ||
1836 | <para> | ||
1837 | <function>copy_from_user()</function> | ||
1838 | </para> | ||
1839 | </listitem> | ||
1840 | <listitem> | ||
1841 | <para> | ||
1842 | <function>copy_to_user()</function> | ||
1843 | </para> | ||
1844 | </listitem> | ||
1845 | <listitem> | ||
1846 | <para> | ||
1847 | <function>get_user()</function> | ||
1848 | </para> | ||
1849 | </listitem> | ||
1850 | <listitem> | ||
1851 | <para> | ||
1852 | <function> put_user()</function> | ||
1853 | </para> | ||
1854 | </listitem> | ||
1855 | </itemizedlist> | ||
1856 | </listitem> | ||
1857 | |||
1858 | <listitem> | ||
1859 | <para> | ||
1860 | <function>kmalloc(GFP_KERNEL)</function> | ||
1861 | </para> | ||
1862 | </listitem> | ||
1863 | |||
1864 | <listitem> | ||
1865 | <para> | ||
1866 | <function>down_interruptible()</function> and | ||
1867 | <function>down()</function> | ||
1868 | </para> | ||
1869 | <para> | ||
1870 | There is a <function>down_trylock()</function> which can be | ||
1871 | used inside interrupt context, as it will not sleep. | ||
1872 | <function>up()</function> will also never sleep. | ||
1873 | </para> | ||
1874 | </listitem> | ||
1875 | </itemizedlist> | ||
1876 | </sect1> | ||
1877 | |||
1878 | <sect1 id="dont-sleep"> | ||
1879 | <title>Some Functions Which Don't Sleep</title> | ||
1880 | |||
1881 | <para> | ||
1882 | Some functions are safe to call from any context, or holding | ||
1883 | almost any lock. | ||
1884 | </para> | ||
1885 | |||
1886 | <itemizedlist> | ||
1887 | <listitem> | ||
1888 | <para> | ||
1889 | <function>printk()</function> | ||
1890 | </para> | ||
1891 | </listitem> | ||
1892 | <listitem> | ||
1893 | <para> | ||
1894 | <function>kfree()</function> | ||
1895 | </para> | ||
1896 | </listitem> | ||
1897 | <listitem> | ||
1898 | <para> | ||
1899 | <function>add_timer()</function> and <function>del_timer()</function> | ||
1900 | </para> | ||
1901 | </listitem> | ||
1902 | </itemizedlist> | ||
1903 | </sect1> | ||
1904 | </chapter> | ||
1905 | |||
1906 | <chapter id="references"> | ||
1907 | <title>Further reading</title> | ||
1908 | |||
1909 | <itemizedlist> | ||
1910 | <listitem> | ||
1911 | <para> | ||
1912 | <filename>Documentation/spinlocks.txt</filename>: | ||
1913 | Linus Torvalds' spinlocking tutorial in the kernel sources. | ||
1914 | </para> | ||
1915 | </listitem> | ||
1916 | |||
1917 | <listitem> | ||
1918 | <para> | ||
1919 | Unix Systems for Modern Architectures: Symmetric | ||
1920 | Multiprocessing and Caching for Kernel Programmers: | ||
1921 | </para> | ||
1922 | |||
1923 | <para> | ||
1924 | Curt Schimmel's very good introduction to kernel level | ||
1925 | locking (not written for Linux, but nearly everything | ||
1926 | applies). The book is expensive, but really worth every | ||
1927 | penny to understand SMP locking. [ISBN: 0201633388] | ||
1928 | </para> | ||
1929 | </listitem> | ||
1930 | </itemizedlist> | ||
1931 | </chapter> | ||
1932 | |||
1933 | <chapter id="thanks"> | ||
1934 | <title>Thanks</title> | ||
1935 | |||
1936 | <para> | ||
1937 | Thanks to Telsa Gwynne for DocBooking, neatening and adding | ||
1938 | style. | ||
1939 | </para> | ||
1940 | |||
1941 | <para> | ||
1942 | Thanks to Martin Pool, Philipp Rumpf, Stephen Rothwell, Paul | ||
1943 | Mackerras, Ruedi Aschwanden, Alan Cox, Manfred Spraul, Tim | ||
1944 | Waugh, Pete Zaitcev, James Morris, Robert Love, Paul McKenney, | ||
1945 | John Ashby for proofreading, correcting, flaming, commenting. | ||
1946 | </para> | ||
1947 | |||
1948 | <para> | ||
1949 | Thanks to the cabal for having no influence on this document. | ||
1950 | </para> | ||
1951 | </chapter> | ||
1952 | |||
1953 | <glossary id="glossary"> | ||
1954 | <title>Glossary</title> | ||
1955 | |||
1956 | <glossentry id="gloss-preemption"> | ||
1957 | <glossterm>preemption</glossterm> | ||
1958 | <glossdef> | ||
1959 | <para> | ||
1960 | Prior to 2.5, or when <symbol>CONFIG_PREEMPT</symbol> is | ||
1961 | unset, processes in user context inside the kernel would not | ||
1962 | preempt each other (ie. you had that CPU until you have it up, | ||
1963 | except for interrupts). With the addition of | ||
1964 | <symbol>CONFIG_PREEMPT</symbol> in 2.5.4, this changed: when | ||
1965 | in user context, higher priority tasks can "cut in": spinlocks | ||
1966 | were changed to disable preemption, even on UP. | ||
1967 | </para> | ||
1968 | </glossdef> | ||
1969 | </glossentry> | ||
1970 | |||
1971 | <glossentry id="gloss-bh"> | ||
1972 | <glossterm>bh</glossterm> | ||
1973 | <glossdef> | ||
1974 | <para> | ||
1975 | Bottom Half: for historical reasons, functions with | ||
1976 | '_bh' in them often now refer to any software interrupt, e.g. | ||
1977 | <function>spin_lock_bh()</function> blocks any software interrupt | ||
1978 | on the current CPU. Bottom halves are deprecated, and will | ||
1979 | eventually be replaced by tasklets. Only one bottom half will be | ||
1980 | running at any time. | ||
1981 | </para> | ||
1982 | </glossdef> | ||
1983 | </glossentry> | ||
1984 | |||
1985 | <glossentry id="gloss-hwinterrupt"> | ||
1986 | <glossterm>Hardware Interrupt / Hardware IRQ</glossterm> | ||
1987 | <glossdef> | ||
1988 | <para> | ||
1989 | Hardware interrupt request. <function>in_irq()</function> returns | ||
1990 | <returnvalue>true</returnvalue> in a hardware interrupt handler. | ||
1991 | </para> | ||
1992 | </glossdef> | ||
1993 | </glossentry> | ||
1994 | |||
1995 | <glossentry id="gloss-interruptcontext"> | ||
1996 | <glossterm>Interrupt Context</glossterm> | ||
1997 | <glossdef> | ||
1998 | <para> | ||
1999 | Not user context: processing a hardware irq or software irq. | ||
2000 | Indicated by the <function>in_interrupt()</function> macro | ||
2001 | returning <returnvalue>true</returnvalue>. | ||
2002 | </para> | ||
2003 | </glossdef> | ||
2004 | </glossentry> | ||
2005 | |||
2006 | <glossentry id="gloss-smp"> | ||
2007 | <glossterm><acronym>SMP</acronym></glossterm> | ||
2008 | <glossdef> | ||
2009 | <para> | ||
2010 | Symmetric Multi-Processor: kernels compiled for multiple-CPU | ||
2011 | machines. (CONFIG_SMP=y). | ||
2012 | </para> | ||
2013 | </glossdef> | ||
2014 | </glossentry> | ||
2015 | |||
2016 | <glossentry id="gloss-softirq"> | ||
2017 | <glossterm>Software Interrupt / softirq</glossterm> | ||
2018 | <glossdef> | ||
2019 | <para> | ||
2020 | Software interrupt handler. <function>in_irq()</function> returns | ||
2021 | <returnvalue>false</returnvalue>; <function>in_softirq()</function> | ||
2022 | returns <returnvalue>true</returnvalue>. Tasklets and softirqs | ||
2023 | both fall into the category of 'software interrupts'. | ||
2024 | </para> | ||
2025 | <para> | ||
2026 | Strictly speaking a softirq is one of up to 32 enumerated software | ||
2027 | interrupts which can run on multiple CPUs at once. | ||
2028 | Sometimes used to refer to tasklets as | ||
2029 | well (ie. all software interrupts). | ||
2030 | </para> | ||
2031 | </glossdef> | ||
2032 | </glossentry> | ||
2033 | |||
2034 | <glossentry id="gloss-tasklet"> | ||
2035 | <glossterm>tasklet</glossterm> | ||
2036 | <glossdef> | ||
2037 | <para> | ||
2038 | A dynamically-registrable software interrupt, | ||
2039 | which is guaranteed to only run on one CPU at a time. | ||
2040 | </para> | ||
2041 | </glossdef> | ||
2042 | </glossentry> | ||
2043 | |||
2044 | <glossentry id="gloss-timers"> | ||
2045 | <glossterm>timer</glossterm> | ||
2046 | <glossdef> | ||
2047 | <para> | ||
2048 | A dynamically-registrable software interrupt, which is run at | ||
2049 | (or close to) a given time. When running, it is just like a | ||
2050 | tasklet (in fact, they are called from the TIMER_SOFTIRQ). | ||
2051 | </para> | ||
2052 | </glossdef> | ||
2053 | </glossentry> | ||
2054 | |||
2055 | <glossentry id="gloss-up"> | ||
2056 | <glossterm><acronym>UP</acronym></glossterm> | ||
2057 | <glossdef> | ||
2058 | <para> | ||
2059 | Uni-Processor: Non-SMP. (CONFIG_SMP=n). | ||
2060 | </para> | ||
2061 | </glossdef> | ||
2062 | </glossentry> | ||
2063 | |||
2064 | <glossentry id="gloss-usercontext"> | ||
2065 | <glossterm>User Context</glossterm> | ||
2066 | <glossdef> | ||
2067 | <para> | ||
2068 | The kernel executing on behalf of a particular process (ie. a | ||
2069 | system call or trap) or kernel thread. You can tell which | ||
2070 | process with the <symbol>current</symbol> macro.) Not to | ||
2071 | be confused with userspace. Can be interrupted by software or | ||
2072 | hardware interrupts. | ||
2073 | </para> | ||
2074 | </glossdef> | ||
2075 | </glossentry> | ||
2076 | |||
2077 | <glossentry id="gloss-userspace"> | ||
2078 | <glossterm>Userspace</glossterm> | ||
2079 | <glossdef> | ||
2080 | <para> | ||
2081 | A process executing its own code outside the kernel. | ||
2082 | </para> | ||
2083 | </glossdef> | ||
2084 | </glossentry> | ||
2085 | |||
2086 | </glossary> | ||
2087 | </book> | ||
2088 | |||