aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/RCU/00-INDEX10
-rw-r--r--Documentation/RCU/RTFP.txt61
-rw-r--r--Documentation/RCU/checklist.txt208
-rw-r--r--Documentation/RCU/lockdep.txt67
-rw-r--r--Documentation/RCU/rcu.txt48
-rw-r--r--Documentation/RCU/stallwarn.txt58
-rw-r--r--Documentation/RCU/torture.txt12
-rw-r--r--Documentation/RCU/whatisRCU.txt16
-rw-r--r--Documentation/filesystems/dentry-locking.txt3
-rw-r--r--MAINTAINERS13
-rw-r--r--fs/file.c2
-rw-r--r--fs/proc/array.c2
-rw-r--r--fs/proc/base.c6
-rw-r--r--include/linux/cgroup.h5
-rw-r--r--include/linux/cpumask.h14
-rw-r--r--include/linux/cred.h2
-rw-r--r--include/linux/fdtable.h11
-rw-r--r--include/linux/lockdep.h4
-rw-r--r--include/linux/rculist.h14
-rw-r--r--include/linux/rculist_nulls.h4
-rw-r--r--include/linux/rcupdate.h165
-rw-r--r--include/linux/rcutiny.h16
-rw-r--r--include/linux/rcutree.h4
-rw-r--r--include/linux/rtnetlink.h3
-rw-r--r--include/linux/srcu.h95
-rw-r--r--include/net/addrconf.h4
-rw-r--r--init/Kconfig16
-rw-r--r--init/main.c2
-rw-r--r--kernel/cgroup.c15
-rw-r--r--kernel/exit.c14
-rw-r--r--kernel/fork.c1
-rw-r--r--kernel/lockdep.c18
-rw-r--r--kernel/notifier.c6
-rw-r--r--kernel/pid.c2
-rw-r--r--kernel/rcupdate.c29
-rw-r--r--kernel/rcutorture.c94
-rw-r--r--kernel/rcutree.c268
-rw-r--r--kernel/rcutree.h61
-rw-r--r--kernel/rcutree_plugin.h229
-rw-r--r--kernel/rcutree_trace.c14
-rw-r--r--kernel/sched.c11
-rw-r--r--kernel/srcu.c52
-rw-r--r--lib/Kconfig.debug26
-rw-r--r--lib/debug_locks.c1
-rw-r--r--lib/idr.c8
-rw-r--r--lib/radix-tree.c24
-rw-r--r--net/core/dev.c2
-rw-r--r--net/core/filter.c6
-rw-r--r--net/core/rtnetlink.c8
-rw-r--r--net/core/sock.c3
-rw-r--r--net/decnet/dn_route.c14
-rw-r--r--net/ipv4/route.c14
-rw-r--r--net/packet/af_packet.c2
-rw-r--r--security/keys/gc.c3
-rw-r--r--security/keys/keyring.c4
55 files changed, 1346 insertions, 448 deletions
diff --git a/Documentation/RCU/00-INDEX b/Documentation/RCU/00-INDEX
index 9bb62f7b89c3..71b6f500ddb9 100644
--- a/Documentation/RCU/00-INDEX
+++ b/Documentation/RCU/00-INDEX
@@ -6,16 +6,22 @@ checklist.txt
6 - Review Checklist for RCU Patches 6 - Review Checklist for RCU Patches
7listRCU.txt 7listRCU.txt
8 - Using RCU to Protect Read-Mostly Linked Lists 8 - Using RCU to Protect Read-Mostly Linked Lists
9lockdep.txt
10 - RCU and lockdep checking
9NMI-RCU.txt 11NMI-RCU.txt
10 - Using RCU to Protect Dynamic NMI Handlers 12 - Using RCU to Protect Dynamic NMI Handlers
13rcubarrier.txt
14 - RCU and Unloadable Modules
15rculist_nulls.txt
16 - RCU list primitives for use with SLAB_DESTROY_BY_RCU
11rcuref.txt 17rcuref.txt
12 - Reference-count design for elements of lists/arrays protected by RCU 18 - Reference-count design for elements of lists/arrays protected by RCU
13rcu.txt 19rcu.txt
14 - RCU Concepts 20 - RCU Concepts
15rcubarrier.txt
16 - Unloading modules that use RCU callbacks
17RTFP.txt 21RTFP.txt
18 - List of RCU papers (bibliography) going back to 1980. 22 - List of RCU papers (bibliography) going back to 1980.
23stallwarn.txt
24 - RCU CPU stall warnings (CONFIG_RCU_CPU_STALL_DETECTOR)
19torture.txt 25torture.txt
20 - RCU Torture Test Operation (CONFIG_RCU_TORTURE_TEST) 26 - RCU Torture Test Operation (CONFIG_RCU_TORTURE_TEST)
21trace.txt 27trace.txt
diff --git a/Documentation/RCU/RTFP.txt b/Documentation/RCU/RTFP.txt
index d2b85237c76e..5aea459e3dd6 100644
--- a/Documentation/RCU/RTFP.txt
+++ b/Documentation/RCU/RTFP.txt
@@ -25,10 +25,10 @@ to be referencing the data structure. However, this mechanism was not
25optimized for modern computer systems, which is not surprising given 25optimized for modern computer systems, which is not surprising given
26that these overheads were not so expensive in the mid-80s. Nonetheless, 26that these overheads were not so expensive in the mid-80s. Nonetheless,
27passive serialization appears to be the first deferred-destruction 27passive serialization appears to be the first deferred-destruction
28mechanism to be used in production. Furthermore, the relevant patent has 28mechanism to be used in production. Furthermore, the relevant patent
29lapsed, so this approach may be used in non-GPL software, if desired. 29has lapsed, so this approach may be used in non-GPL software, if desired.
30(In contrast, use of RCU is permitted only in software licensed under 30(In contrast, implementation of RCU is permitted only in software licensed
31GPL. Sorry!!!) 31under either GPL or LGPL. Sorry!!!)
32 32
33In 1990, Pugh [Pugh90] noted that explicitly tracking which threads 33In 1990, Pugh [Pugh90] noted that explicitly tracking which threads
34were reading a given data structure permitted deferred free to operate 34were reading a given data structure permitted deferred free to operate
@@ -150,6 +150,18 @@ preemptible RCU [PaulEMcKenney2007PreemptibleRCU], and the three-part
150LWN "What is RCU?" series [PaulEMcKenney2007WhatIsRCUFundamentally, 150LWN "What is RCU?" series [PaulEMcKenney2007WhatIsRCUFundamentally,
151PaulEMcKenney2008WhatIsRCUUsage, and PaulEMcKenney2008WhatIsRCUAPI]. 151PaulEMcKenney2008WhatIsRCUUsage, and PaulEMcKenney2008WhatIsRCUAPI].
152 152
1532008 saw a journal paper on real-time RCU [DinakarGuniguntala2008IBMSysJ],
154a history of how Linux changed RCU more than RCU changed Linux
155[PaulEMcKenney2008RCUOSR], and a design overview of hierarchical RCU
156[PaulEMcKenney2008HierarchicalRCU].
157
1582009 introduced user-level RCU algorithms [PaulEMcKenney2009MaliciousURCU],
159which Mathieu Desnoyers is now maintaining [MathieuDesnoyers2009URCU]
160[MathieuDesnoyersPhD]. TINY_RCU [PaulEMcKenney2009BloatWatchRCU] made
161its appearance, as did expedited RCU [PaulEMcKenney2009expeditedRCU].
162The problem of resizeable RCU-protected hash tables may now be on a path
163to a solution [JoshTriplett2009RPHash].
164
153Bibtex Entries 165Bibtex Entries
154 166
155@article{Kung80 167@article{Kung80
@@ -730,6 +742,11 @@ Revised:
730" 742"
731} 743}
732 744
745#
746# "What is RCU?" LWN series.
747#
748########################################################################
749
733@article{DinakarGuniguntala2008IBMSysJ 750@article{DinakarGuniguntala2008IBMSysJ
734,author="D. Guniguntala and P. E. McKenney and J. Triplett and J. Walpole" 751,author="D. Guniguntala and P. E. McKenney and J. Triplett and J. Walpole"
735,title="The read-copy-update mechanism for supporting real-time applications on shared-memory multiprocessor systems with {Linux}" 752,title="The read-copy-update mechanism for supporting real-time applications on shared-memory multiprocessor systems with {Linux}"
@@ -820,3 +837,39 @@ Revised:
820 Uniprocessor assumptions allow simplified RCU implementation. 837 Uniprocessor assumptions allow simplified RCU implementation.
821" 838"
822} 839}
840
841@unpublished{PaulEMcKenney2009expeditedRCU
842,Author="Paul E. McKenney"
843,Title="[{PATCH} -tip 0/3] expedited 'big hammer' {RCU} grace periods"
844,month="June"
845,day="25"
846,year="2009"
847,note="Available:
848\url{http://lkml.org/lkml/2009/6/25/306}
849[Viewed August 16, 2009]"
850,annotation="
851 First posting of expedited RCU to be accepted into -tip.
852"
853}
854
855@unpublished{JoshTriplett2009RPHash
856,Author="Josh Triplett"
857,Title="Scalable concurrent hash tables via relativistic programming"
858,month="September"
859,year="2009"
860,note="Linux Plumbers Conference presentation"
861,annotation="
862 RP fun with hash tables.
863"
864}
865
866@phdthesis{MathieuDesnoyersPhD
867, title = "Low-Impact Operating System Tracing"
868, author = "Mathieu Desnoyers"
869, school = "Ecole Polytechnique de Montr\'{e}al"
870, month = "December"
871, year = 2009
872,note="Available:
873\url{http://www.lttng.org/pub/thesis/desnoyers-dissertation-2009-12.pdf}
874[Viewed December 9, 2009]"
875}
diff --git a/Documentation/RCU/checklist.txt b/Documentation/RCU/checklist.txt
index 51525a30e8b4..cbc180f90194 100644
--- a/Documentation/RCU/checklist.txt
+++ b/Documentation/RCU/checklist.txt
@@ -8,13 +8,12 @@ would cause. This list is based on experiences reviewing such patches
8over a rather long period of time, but improvements are always welcome! 8over a rather long period of time, but improvements are always welcome!
9 9
100. Is RCU being applied to a read-mostly situation? If the data 100. Is RCU being applied to a read-mostly situation? If the data
11 structure is updated more than about 10% of the time, then 11 structure is updated more than about 10% of the time, then you
12 you should strongly consider some other approach, unless 12 should strongly consider some other approach, unless detailed
13 detailed performance measurements show that RCU is nonetheless 13 performance measurements show that RCU is nonetheless the right
14 the right tool for the job. Yes, you might think of RCU 14 tool for the job. Yes, RCU does reduce read-side overhead by
15 as simply cutting overhead off of the readers and imposing it 15 increasing write-side overhead, which is exactly why normal uses
16 on the writers. That is exactly why normal uses of RCU will 16 of RCU will do much more reading than updating.
17 do much more reading than updating.
18 17
19 Another exception is where performance is not an issue, and RCU 18 Another exception is where performance is not an issue, and RCU
20 provides a simpler implementation. An example of this situation 19 provides a simpler implementation. An example of this situation
@@ -35,13 +34,13 @@ over a rather long period of time, but improvements are always welcome!
35 34
36 If you choose #b, be prepared to describe how you have handled 35 If you choose #b, be prepared to describe how you have handled
37 memory barriers on weakly ordered machines (pretty much all of 36 memory barriers on weakly ordered machines (pretty much all of
38 them -- even x86 allows reads to be reordered), and be prepared 37 them -- even x86 allows later loads to be reordered to precede
39 to explain why this added complexity is worthwhile. If you 38 earlier stores), and be prepared to explain why this added
40 choose #c, be prepared to explain how this single task does not 39 complexity is worthwhile. If you choose #c, be prepared to
41 become a major bottleneck on big multiprocessor machines (for 40 explain how this single task does not become a major bottleneck on
42 example, if the task is updating information relating to itself 41 big multiprocessor machines (for example, if the task is updating
43 that other tasks can read, there by definition can be no 42 information relating to itself that other tasks can read, there
44 bottleneck). 43 by definition can be no bottleneck).
45 44
462. Do the RCU read-side critical sections make proper use of 452. Do the RCU read-side critical sections make proper use of
47 rcu_read_lock() and friends? These primitives are needed 46 rcu_read_lock() and friends? These primitives are needed
@@ -51,8 +50,10 @@ over a rather long period of time, but improvements are always welcome!
51 actuarial risk of your kernel. 50 actuarial risk of your kernel.
52 51
53 As a rough rule of thumb, any dereference of an RCU-protected 52 As a rough rule of thumb, any dereference of an RCU-protected
54 pointer must be covered by rcu_read_lock() or rcu_read_lock_bh() 53 pointer must be covered by rcu_read_lock(), rcu_read_lock_bh(),
55 or by the appropriate update-side lock. 54 rcu_read_lock_sched(), or by the appropriate update-side lock.
55 Disabling of preemption can serve as rcu_read_lock_sched(), but
56 is less readable.
56 57
573. Does the update code tolerate concurrent accesses? 583. Does the update code tolerate concurrent accesses?
58 59
@@ -62,25 +63,27 @@ over a rather long period of time, but improvements are always welcome!
62 of ways to handle this concurrency, depending on the situation: 63 of ways to handle this concurrency, depending on the situation:
63 64
64 a. Use the RCU variants of the list and hlist update 65 a. Use the RCU variants of the list and hlist update
65 primitives to add, remove, and replace elements on an 66 primitives to add, remove, and replace elements on
66 RCU-protected list. Alternatively, use the RCU-protected 67 an RCU-protected list. Alternatively, use the other
67 trees that have been added to the Linux kernel. 68 RCU-protected data structures that have been added to
69 the Linux kernel.
68 70
69 This is almost always the best approach. 71 This is almost always the best approach.
70 72
71 b. Proceed as in (a) above, but also maintain per-element 73 b. Proceed as in (a) above, but also maintain per-element
72 locks (that are acquired by both readers and writers) 74 locks (that are acquired by both readers and writers)
73 that guard per-element state. Of course, fields that 75 that guard per-element state. Of course, fields that
74 the readers refrain from accessing can be guarded by the 76 the readers refrain from accessing can be guarded by
75 update-side lock. 77 some other lock acquired only by updaters, if desired.
76 78
77 This works quite well, also. 79 This works quite well, also.
78 80
79 c. Make updates appear atomic to readers. For example, 81 c. Make updates appear atomic to readers. For example,
80 pointer updates to properly aligned fields will appear 82 pointer updates to properly aligned fields will
81 atomic, as will individual atomic primitives. Operations 83 appear atomic, as will individual atomic primitives.
82 performed under a lock and sequences of multiple atomic 84 Sequences of perations performed under a lock will -not-
83 primitives will -not- appear to be atomic. 85 appear to be atomic to RCU readers, nor will sequences
86 of multiple atomic primitives.
84 87
85 This can work, but is starting to get a bit tricky. 88 This can work, but is starting to get a bit tricky.
86 89
@@ -98,9 +101,9 @@ over a rather long period of time, but improvements are always welcome!
98 a new structure containing updated values. 101 a new structure containing updated values.
99 102
1004. Weakly ordered CPUs pose special challenges. Almost all CPUs 1034. Weakly ordered CPUs pose special challenges. Almost all CPUs
101 are weakly ordered -- even i386 CPUs allow reads to be reordered. 104 are weakly ordered -- even x86 CPUs allow later loads to be
102 RCU code must take all of the following measures to prevent 105 reordered to precede earlier stores. RCU code must take all of
103 memory-corruption problems: 106 the following measures to prevent memory-corruption problems:
104 107
105 a. Readers must maintain proper ordering of their memory 108 a. Readers must maintain proper ordering of their memory
106 accesses. The rcu_dereference() primitive ensures that 109 accesses. The rcu_dereference() primitive ensures that
@@ -113,14 +116,25 @@ over a rather long period of time, but improvements are always welcome!
113 The rcu_dereference() primitive is also an excellent 116 The rcu_dereference() primitive is also an excellent
114 documentation aid, letting the person reading the code 117 documentation aid, letting the person reading the code
115 know exactly which pointers are protected by RCU. 118 know exactly which pointers are protected by RCU.
116 119 Please note that compilers can also reorder code, and
117 The rcu_dereference() primitive is used by the various 120 they are becoming increasingly aggressive about doing
118 "_rcu()" list-traversal primitives, such as the 121 just that. The rcu_dereference() primitive therefore
119 list_for_each_entry_rcu(). Note that it is perfectly 122 also prevents destructive compiler optimizations.
120 legal (if redundant) for update-side code to use 123
121 rcu_dereference() and the "_rcu()" list-traversal 124 The rcu_dereference() primitive is used by the
122 primitives. This is particularly useful in code 125 various "_rcu()" list-traversal primitives, such
123 that is common to readers and updaters. 126 as the list_for_each_entry_rcu(). Note that it is
127 perfectly legal (if redundant) for update-side code to
128 use rcu_dereference() and the "_rcu()" list-traversal
129 primitives. This is particularly useful in code that
130 is common to readers and updaters. However, lockdep
131 will complain if you access rcu_dereference() outside
132 of an RCU read-side critical section. See lockdep.txt
133 to learn what to do about this.
134
135 Of course, neither rcu_dereference() nor the "_rcu()"
136 list-traversal primitives can substitute for a good
137 concurrency design coordinating among multiple updaters.
124 138
125 b. If the list macros are being used, the list_add_tail_rcu() 139 b. If the list macros are being used, the list_add_tail_rcu()
126 and list_add_rcu() primitives must be used in order 140 and list_add_rcu() primitives must be used in order
@@ -135,11 +149,14 @@ over a rather long period of time, but improvements are always welcome!
135 readers. Similarly, if the hlist macros are being used, 149 readers. Similarly, if the hlist macros are being used,
136 the hlist_del_rcu() primitive is required. 150 the hlist_del_rcu() primitive is required.
137 151
138 The list_replace_rcu() primitive may be used to 152 The list_replace_rcu() and hlist_replace_rcu() primitives
139 replace an old structure with a new one in an 153 may be used to replace an old structure with a new one
140 RCU-protected list. 154 in their respective types of RCU-protected lists.
155
156 d. Rules similar to (4b) and (4c) apply to the "hlist_nulls"
157 type of RCU-protected linked lists.
141 158
142 d. Updates must ensure that initialization of a given 159 e. Updates must ensure that initialization of a given
143 structure happens before pointers to that structure are 160 structure happens before pointers to that structure are
144 publicized. Use the rcu_assign_pointer() primitive 161 publicized. Use the rcu_assign_pointer() primitive
145 when publicizing a pointer to a structure that can 162 when publicizing a pointer to a structure that can
@@ -151,16 +168,31 @@ over a rather long period of time, but improvements are always welcome!
151 it cannot block. 168 it cannot block.
152 169
1536. Since synchronize_rcu() can block, it cannot be called from 1706. Since synchronize_rcu() can block, it cannot be called from
154 any sort of irq context. Ditto for synchronize_sched() and 171 any sort of irq context. The same rule applies for
155 synchronize_srcu(). 172 synchronize_rcu_bh(), synchronize_sched(), synchronize_srcu(),
156 173 synchronize_rcu_expedited(), synchronize_rcu_bh_expedited(),
1577. If the updater uses call_rcu(), then the corresponding readers 174 synchronize_sched_expedite(), and synchronize_srcu_expedited().
158 must use rcu_read_lock() and rcu_read_unlock(). If the updater 175
159 uses call_rcu_bh(), then the corresponding readers must use 176 The expedited forms of these primitives have the same semantics
160 rcu_read_lock_bh() and rcu_read_unlock_bh(). If the updater 177 as the non-expedited forms, but expediting is both expensive
161 uses call_rcu_sched(), then the corresponding readers must 178 and unfriendly to real-time workloads. Use of the expedited
162 disable preemption. Mixing things up will result in confusion 179 primitives should be restricted to rare configuration-change
163 and broken kernels. 180 operations that would not normally be undertaken while a real-time
181 workload is running.
182
1837. If the updater uses call_rcu() or synchronize_rcu(), then the
184 corresponding readers must use rcu_read_lock() and
185 rcu_read_unlock(). If the updater uses call_rcu_bh() or
186 synchronize_rcu_bh(), then the corresponding readers must
187 use rcu_read_lock_bh() and rcu_read_unlock_bh(). If the
188 updater uses call_rcu_sched() or synchronize_sched(), then
189 the corresponding readers must disable preemption, possibly
190 by calling rcu_read_lock_sched() and rcu_read_unlock_sched().
191 If the updater uses synchronize_srcu(), the the corresponding
192 readers must use srcu_read_lock() and srcu_read_unlock(),
193 and with the same srcu_struct. The rules for the expedited
194 primitives are the same as for their non-expedited counterparts.
195 Mixing things up will result in confusion and broken kernels.
164 196
165 One exception to this rule: rcu_read_lock() and rcu_read_unlock() 197 One exception to this rule: rcu_read_lock() and rcu_read_unlock()
166 may be substituted for rcu_read_lock_bh() and rcu_read_unlock_bh() 198 may be substituted for rcu_read_lock_bh() and rcu_read_unlock_bh()
@@ -212,6 +244,8 @@ over a rather long period of time, but improvements are always welcome!
212 e. Periodically invoke synchronize_rcu(), permitting a limited 244 e. Periodically invoke synchronize_rcu(), permitting a limited
213 number of updates per grace period. 245 number of updates per grace period.
214 246
247 The same cautions apply to call_rcu_bh() and call_rcu_sched().
248
2159. All RCU list-traversal primitives, which include 2499. All RCU list-traversal primitives, which include
216 rcu_dereference(), list_for_each_entry_rcu(), 250 rcu_dereference(), list_for_each_entry_rcu(),
217 list_for_each_continue_rcu(), and list_for_each_safe_rcu(), 251 list_for_each_continue_rcu(), and list_for_each_safe_rcu(),
@@ -219,7 +253,9 @@ over a rather long period of time, but improvements are always welcome!
219 must be protected by appropriate update-side locks. RCU 253 must be protected by appropriate update-side locks. RCU
220 read-side critical sections are delimited by rcu_read_lock() 254 read-side critical sections are delimited by rcu_read_lock()
221 and rcu_read_unlock(), or by similar primitives such as 255 and rcu_read_unlock(), or by similar primitives such as
222 rcu_read_lock_bh() and rcu_read_unlock_bh(). 256 rcu_read_lock_bh() and rcu_read_unlock_bh(), in which case
257 the matching rcu_dereference() primitive must be used in order
258 to keep lockdep happy, in this case, rcu_dereference_bh().
223 259
224 The reason that it is permissible to use RCU list-traversal 260 The reason that it is permissible to use RCU list-traversal
225 primitives when the update-side lock is held is that doing so 261 primitives when the update-side lock is held is that doing so
@@ -229,7 +265,8 @@ over a rather long period of time, but improvements are always welcome!
22910. Conversely, if you are in an RCU read-side critical section, 26510. Conversely, if you are in an RCU read-side critical section,
230 and you don't hold the appropriate update-side lock, you -must- 266 and you don't hold the appropriate update-side lock, you -must-
231 use the "_rcu()" variants of the list macros. Failing to do so 267 use the "_rcu()" variants of the list macros. Failing to do so
232 will break Alpha and confuse people reading your code. 268 will break Alpha, cause aggressive compilers to generate bad code,
269 and confuse people trying to read your code.
233 270
23411. Note that synchronize_rcu() -only- guarantees to wait until 27111. Note that synchronize_rcu() -only- guarantees to wait until
235 all currently executing rcu_read_lock()-protected RCU read-side 272 all currently executing rcu_read_lock()-protected RCU read-side
@@ -239,15 +276,21 @@ over a rather long period of time, but improvements are always welcome!
239 rcu_read_lock()-protected read-side critical sections, do -not- 276 rcu_read_lock()-protected read-side critical sections, do -not-
240 use synchronize_rcu(). 277 use synchronize_rcu().
241 278
242 If you want to wait for some of these other things, you might 279 Similarly, disabling preemption is not an acceptable substitute
243 instead need to use synchronize_irq() or synchronize_sched(). 280 for rcu_read_lock(). Code that attempts to use preemption
281 disabling where it should be using rcu_read_lock() will break
282 in real-time kernel builds.
283
284 If you want to wait for interrupt handlers, NMI handlers, and
285 code under the influence of preempt_disable(), you instead
286 need to use synchronize_irq() or synchronize_sched().
244 287
24512. Any lock acquired by an RCU callback must be acquired elsewhere 28812. Any lock acquired by an RCU callback must be acquired elsewhere
246 with softirq disabled, e.g., via spin_lock_irqsave(), 289 with softirq disabled, e.g., via spin_lock_irqsave(),
247 spin_lock_bh(), etc. Failing to disable irq on a given 290 spin_lock_bh(), etc. Failing to disable irq on a given
248 acquisition of that lock will result in deadlock as soon as the 291 acquisition of that lock will result in deadlock as soon as
249 RCU callback happens to interrupt that acquisition's critical 292 the RCU softirq handler happens to run your RCU callback while
250 section. 293 interrupting that acquisition's critical section.
251 294
25213. RCU callbacks can be and are executed in parallel. In many cases, 29513. RCU callbacks can be and are executed in parallel. In many cases,
253 the callback code simply wrappers around kfree(), so that this 296 the callback code simply wrappers around kfree(), so that this
@@ -265,29 +308,30 @@ over a rather long period of time, but improvements are always welcome!
265 not the case, a self-spawning RCU callback would prevent the 308 not the case, a self-spawning RCU callback would prevent the
266 victim CPU from ever going offline.) 309 victim CPU from ever going offline.)
267 310
26814. SRCU (srcu_read_lock(), srcu_read_unlock(), and synchronize_srcu()) 31114. SRCU (srcu_read_lock(), srcu_read_unlock(), srcu_dereference(),
269 may only be invoked from process context. Unlike other forms of 312 synchronize_srcu(), and synchronize_srcu_expedited()) may only
270 RCU, it -is- permissible to block in an SRCU read-side critical 313 be invoked from process context. Unlike other forms of RCU, it
271 section (demarked by srcu_read_lock() and srcu_read_unlock()), 314 -is- permissible to block in an SRCU read-side critical section
272 hence the "SRCU": "sleepable RCU". Please note that if you 315 (demarked by srcu_read_lock() and srcu_read_unlock()), hence the
273 don't need to sleep in read-side critical sections, you should 316 "SRCU": "sleepable RCU". Please note that if you don't need
274 be using RCU rather than SRCU, because RCU is almost always 317 to sleep in read-side critical sections, you should be using
275 faster and easier to use than is SRCU. 318 RCU rather than SRCU, because RCU is almost always faster and
319 easier to use than is SRCU.
276 320
277 Also unlike other forms of RCU, explicit initialization 321 Also unlike other forms of RCU, explicit initialization
278 and cleanup is required via init_srcu_struct() and 322 and cleanup is required via init_srcu_struct() and
279 cleanup_srcu_struct(). These are passed a "struct srcu_struct" 323 cleanup_srcu_struct(). These are passed a "struct srcu_struct"
280 that defines the scope of a given SRCU domain. Once initialized, 324 that defines the scope of a given SRCU domain. Once initialized,
281 the srcu_struct is passed to srcu_read_lock(), srcu_read_unlock() 325 the srcu_struct is passed to srcu_read_lock(), srcu_read_unlock()
282 and synchronize_srcu(). A given synchronize_srcu() waits only 326 synchronize_srcu(), and synchronize_srcu_expedited(). A given
283 for SRCU read-side critical sections governed by srcu_read_lock() 327 synchronize_srcu() waits only for SRCU read-side critical
284 and srcu_read_unlock() calls that have been passd the same 328 sections governed by srcu_read_lock() and srcu_read_unlock()
285 srcu_struct. This property is what makes sleeping read-side 329 calls that have been passed the same srcu_struct. This property
286 critical sections tolerable -- a given subsystem delays only 330 is what makes sleeping read-side critical sections tolerable --
287 its own updates, not those of other subsystems using SRCU. 331 a given subsystem delays only its own updates, not those of other
288 Therefore, SRCU is less prone to OOM the system than RCU would 332 subsystems using SRCU. Therefore, SRCU is less prone to OOM the
289 be if RCU's read-side critical sections were permitted to 333 system than RCU would be if RCU's read-side critical sections
290 sleep. 334 were permitted to sleep.
291 335
292 The ability to sleep in read-side critical sections does not 336 The ability to sleep in read-side critical sections does not
293 come for free. First, corresponding srcu_read_lock() and 337 come for free. First, corresponding srcu_read_lock() and
@@ -311,12 +355,12 @@ over a rather long period of time, but improvements are always welcome!
311 destructive operation, and -only- -then- invoke call_rcu(), 355 destructive operation, and -only- -then- invoke call_rcu(),
312 synchronize_rcu(), or friends. 356 synchronize_rcu(), or friends.
313 357
314 Because these primitives only wait for pre-existing readers, 358 Because these primitives only wait for pre-existing readers, it
315 it is the caller's responsibility to guarantee safety to 359 is the caller's responsibility to guarantee that any subsequent
316 any subsequent readers. 360 readers will execute safely.
317 361
31816. The various RCU read-side primitives do -not- contain memory 36216. The various RCU read-side primitives do -not- necessarily contain
319 barriers. The CPU (and in some cases, the compiler) is free 363 memory barriers. You should therefore plan for the CPU
320 to reorder code into and out of RCU read-side critical sections. 364 and the compiler to freely reorder code into and out of RCU
321 It is the responsibility of the RCU update-side primitives to 365 read-side critical sections. It is the responsibility of the
322 deal with this. 366 RCU update-side primitives to deal with this.
diff --git a/Documentation/RCU/lockdep.txt b/Documentation/RCU/lockdep.txt
new file mode 100644
index 000000000000..fe24b58627bd
--- /dev/null
+++ b/Documentation/RCU/lockdep.txt
@@ -0,0 +1,67 @@
1RCU and lockdep checking
2
3All flavors of RCU have lockdep checking available, so that lockdep is
4aware of when each task enters and leaves any flavor of RCU read-side
5critical section. Each flavor of RCU is tracked separately (but note
6that this is not the case in 2.6.32 and earlier). This allows lockdep's
7tracking to include RCU state, which can sometimes help when debugging
8deadlocks and the like.
9
10In addition, RCU provides the following primitives that check lockdep's
11state:
12
13 rcu_read_lock_held() for normal RCU.
14 rcu_read_lock_bh_held() for RCU-bh.
15 rcu_read_lock_sched_held() for RCU-sched.
16 srcu_read_lock_held() for SRCU.
17
18These functions are conservative, and will therefore return 1 if they
19aren't certain (for example, if CONFIG_DEBUG_LOCK_ALLOC is not set).
20This prevents things like WARN_ON(!rcu_read_lock_held()) from giving false
21positives when lockdep is disabled.
22
23In addition, a separate kernel config parameter CONFIG_PROVE_RCU enables
24checking of rcu_dereference() primitives:
25
26 rcu_dereference(p):
27 Check for RCU read-side critical section.
28 rcu_dereference_bh(p):
29 Check for RCU-bh read-side critical section.
30 rcu_dereference_sched(p):
31 Check for RCU-sched read-side critical section.
32 srcu_dereference(p, sp):
33 Check for SRCU read-side critical section.
34 rcu_dereference_check(p, c):
35 Use explicit check expression "c".
36 rcu_dereference_raw(p)
37 Don't check. (Use sparingly, if at all.)
38
39The rcu_dereference_check() check expression can be any boolean
40expression, but would normally include one of the rcu_read_lock_held()
41family of functions and a lockdep expression. However, any boolean
42expression can be used. For a moderately ornate example, consider
43the following:
44
45 file = rcu_dereference_check(fdt->fd[fd],
46 rcu_read_lock_held() ||
47 lockdep_is_held(&files->file_lock) ||
48 atomic_read(&files->count) == 1);
49
50This expression picks up the pointer "fdt->fd[fd]" in an RCU-safe manner,
51and, if CONFIG_PROVE_RCU is configured, verifies that this expression
52is used in:
53
541. An RCU read-side critical section, or
552. with files->file_lock held, or
563. on an unshared files_struct.
57
58In case (1), the pointer is picked up in an RCU-safe manner for vanilla
59RCU read-side critical sections, in case (2) the ->file_lock prevents
60any change from taking place, and finally, in case (3) the current task
61is the only task accessing the file_struct, again preventing any change
62from taking place.
63
64There are currently only "universal" versions of the rcu_assign_pointer()
65and RCU list-/tree-traversal primitives, which do not (yet) check for
66being in an RCU read-side critical section. In the future, separate
67versions of these primitives might be created.
diff --git a/Documentation/RCU/rcu.txt b/Documentation/RCU/rcu.txt
index 2a23523ce471..31852705b586 100644
--- a/Documentation/RCU/rcu.txt
+++ b/Documentation/RCU/rcu.txt
@@ -75,6 +75,8 @@ o I hear that RCU is patented? What is with that?
75 search for the string "Patent" in RTFP.txt to find them. 75 search for the string "Patent" in RTFP.txt to find them.
76 Of these, one was allowed to lapse by the assignee, and the 76 Of these, one was allowed to lapse by the assignee, and the
77 others have been contributed to the Linux kernel under GPL. 77 others have been contributed to the Linux kernel under GPL.
78 There are now also LGPL implementations of user-level RCU
79 available (http://lttng.org/?q=node/18).
78 80
79o I hear that RCU needs work in order to support realtime kernels? 81o I hear that RCU needs work in order to support realtime kernels?
80 82
@@ -91,48 +93,4 @@ o Where can I find more information on RCU?
91 93
92o What are all these files in this directory? 94o What are all these files in this directory?
93 95
94 96 See 00-INDEX for the list.
95 NMI-RCU.txt
96
97 Describes how to use RCU to implement dynamic
98 NMI handlers, which can be revectored on the fly,
99 without rebooting.
100
101 RTFP.txt
102
103 List of RCU-related publications and web sites.
104
105 UP.txt
106
107 Discussion of RCU usage in UP kernels.
108
109 arrayRCU.txt
110
111 Describes how to use RCU to protect arrays, with
112 resizeable arrays whose elements reference other
113 data structures being of the most interest.
114
115 checklist.txt
116
117 Lists things to check for when inspecting code that
118 uses RCU.
119
120 listRCU.txt
121
122 Describes how to use RCU to protect linked lists.
123 This is the simplest and most common use of RCU
124 in the Linux kernel.
125
126 rcu.txt
127
128 You are reading it!
129
130 rcuref.txt
131
132 Describes how to combine use of reference counts
133 with RCU.
134
135 whatisRCU.txt
136
137 Overview of how the RCU implementation works. Along
138 the way, presents a conceptual view of RCU.
diff --git a/Documentation/RCU/stallwarn.txt b/Documentation/RCU/stallwarn.txt
new file mode 100644
index 000000000000..1423d2570d78
--- /dev/null
+++ b/Documentation/RCU/stallwarn.txt
@@ -0,0 +1,58 @@
1Using RCU's CPU Stall Detector
2
3The CONFIG_RCU_CPU_STALL_DETECTOR kernel config parameter enables
4RCU's CPU stall detector, which detects conditions that unduly delay
5RCU grace periods. The stall detector's idea of what constitutes
6"unduly delayed" is controlled by a pair of C preprocessor macros:
7
8RCU_SECONDS_TILL_STALL_CHECK
9
10 This macro defines the period of time that RCU will wait from
11 the beginning of a grace period until it issues an RCU CPU
12 stall warning. It is normally ten seconds.
13
14RCU_SECONDS_TILL_STALL_RECHECK
15
16 This macro defines the period of time that RCU will wait after
17 issuing a stall warning until it issues another stall warning.
18 It is normally set to thirty seconds.
19
20RCU_STALL_RAT_DELAY
21
22 The CPU stall detector tries to make the offending CPU rat on itself,
23 as this often gives better-quality stack traces. However, if
24 the offending CPU does not detect its own stall in the number
25 of jiffies specified by RCU_STALL_RAT_DELAY, then other CPUs will
26 complain. This is normally set to two jiffies.
27
28The following problems can result in an RCU CPU stall warning:
29
30o A CPU looping in an RCU read-side critical section.
31
32o A CPU looping with interrupts disabled.
33
34o A CPU looping with preemption disabled.
35
36o For !CONFIG_PREEMPT kernels, a CPU looping anywhere in the kernel
37 without invoking schedule().
38
39o A bug in the RCU implementation.
40
41o A hardware failure. This is quite unlikely, but has occurred
42 at least once in a former life. A CPU failed in a running system,
43 becoming unresponsive, but not causing an immediate crash.
44 This resulted in a series of RCU CPU stall warnings, eventually
45 leading the realization that the CPU had failed.
46
47The RCU, RCU-sched, and RCU-bh implementations have CPU stall warning.
48SRCU does not do so directly, but its calls to synchronize_sched() will
49result in RCU-sched detecting any CPU stalls that might be occurring.
50
51To diagnose the cause of the stall, inspect the stack traces. The offending
52function will usually be near the top of the stack. If you have a series
53of stall warnings from a single extended stall, comparing the stack traces
54can often help determine where the stall is occurring, which will usually
55be in the function nearest the top of the stack that stays the same from
56trace to trace.
57
58RCU bugs can often be debugged with the help of CONFIG_RCU_TRACE.
diff --git a/Documentation/RCU/torture.txt b/Documentation/RCU/torture.txt
index 9dba3bb90e60..0e50bc2aa1e2 100644
--- a/Documentation/RCU/torture.txt
+++ b/Documentation/RCU/torture.txt
@@ -30,6 +30,18 @@ MODULE PARAMETERS
30 30
31This module has the following parameters: 31This module has the following parameters:
32 32
33fqs_duration Duration (in microseconds) of artificially induced bursts
34 of force_quiescent_state() invocations. In RCU
35 implementations having force_quiescent_state(), these
36 bursts help force races between forcing a given grace
37 period and that grace period ending on its own.
38
39fqs_holdoff Holdoff time (in microseconds) between consecutive calls
40 to force_quiescent_state() within a burst.
41
42fqs_stutter Wait time (in seconds) between consecutive bursts
43 of calls to force_quiescent_state().
44
33irqreaders Says to invoke RCU readers from irq level. This is currently 45irqreaders Says to invoke RCU readers from irq level. This is currently
34 done via timers. Defaults to "1" for variants of RCU that 46 done via timers. Defaults to "1" for variants of RCU that
35 permit this. (Or, more accurately, variants of RCU that do 47 permit this. (Or, more accurately, variants of RCU that do
diff --git a/Documentation/RCU/whatisRCU.txt b/Documentation/RCU/whatisRCU.txt
index d542ca243b80..1dc00ee97163 100644
--- a/Documentation/RCU/whatisRCU.txt
+++ b/Documentation/RCU/whatisRCU.txt
@@ -323,14 +323,17 @@ used as follows:
323 Defer Protect 323 Defer Protect
324 324
325a. synchronize_rcu() rcu_read_lock() / rcu_read_unlock() 325a. synchronize_rcu() rcu_read_lock() / rcu_read_unlock()
326 call_rcu() 326 call_rcu() rcu_dereference()
327 327
328b. call_rcu_bh() rcu_read_lock_bh() / rcu_read_unlock_bh() 328b. call_rcu_bh() rcu_read_lock_bh() / rcu_read_unlock_bh()
329 rcu_dereference_bh()
329 330
330c. synchronize_sched() preempt_disable() / preempt_enable() 331c. synchronize_sched() rcu_read_lock_sched() / rcu_read_unlock_sched()
332 preempt_disable() / preempt_enable()
331 local_irq_save() / local_irq_restore() 333 local_irq_save() / local_irq_restore()
332 hardirq enter / hardirq exit 334 hardirq enter / hardirq exit
333 NMI enter / NMI exit 335 NMI enter / NMI exit
336 rcu_dereference_sched()
334 337
335These three mechanisms are used as follows: 338These three mechanisms are used as follows:
336 339
@@ -780,9 +783,8 @@ Linux-kernel source code, but it helps to have a full list of the
780APIs, since there does not appear to be a way to categorize them 783APIs, since there does not appear to be a way to categorize them
781in docbook. Here is the list, by category. 784in docbook. Here is the list, by category.
782 785
783RCU pointer/list traversal: 786RCU list traversal:
784 787
785 rcu_dereference
786 list_for_each_entry_rcu 788 list_for_each_entry_rcu
787 hlist_for_each_entry_rcu 789 hlist_for_each_entry_rcu
788 hlist_nulls_for_each_entry_rcu 790 hlist_nulls_for_each_entry_rcu
@@ -808,7 +810,7 @@ RCU: Critical sections Grace period Barrier
808 810
809 rcu_read_lock synchronize_net rcu_barrier 811 rcu_read_lock synchronize_net rcu_barrier
810 rcu_read_unlock synchronize_rcu 812 rcu_read_unlock synchronize_rcu
811 synchronize_rcu_expedited 813 rcu_dereference synchronize_rcu_expedited
812 call_rcu 814 call_rcu
813 815
814 816
@@ -816,7 +818,7 @@ bh: Critical sections Grace period Barrier
816 818
817 rcu_read_lock_bh call_rcu_bh rcu_barrier_bh 819 rcu_read_lock_bh call_rcu_bh rcu_barrier_bh
818 rcu_read_unlock_bh synchronize_rcu_bh 820 rcu_read_unlock_bh synchronize_rcu_bh
819 synchronize_rcu_bh_expedited 821 rcu_dereference_bh synchronize_rcu_bh_expedited
820 822
821 823
822sched: Critical sections Grace period Barrier 824sched: Critical sections Grace period Barrier
@@ -825,12 +827,14 @@ sched: Critical sections Grace period Barrier
825 rcu_read_unlock_sched call_rcu_sched 827 rcu_read_unlock_sched call_rcu_sched
826 [preempt_disable] synchronize_sched_expedited 828 [preempt_disable] synchronize_sched_expedited
827 [and friends] 829 [and friends]
830 rcu_dereference_sched
828 831
829 832
830SRCU: Critical sections Grace period Barrier 833SRCU: Critical sections Grace period Barrier
831 834
832 srcu_read_lock synchronize_srcu N/A 835 srcu_read_lock synchronize_srcu N/A
833 srcu_read_unlock synchronize_srcu_expedited 836 srcu_read_unlock synchronize_srcu_expedited
837 srcu_dereference
834 838
835SRCU: Initialization/cleanup 839SRCU: Initialization/cleanup
836 init_srcu_struct 840 init_srcu_struct
diff --git a/Documentation/filesystems/dentry-locking.txt b/Documentation/filesystems/dentry-locking.txt
index 4c0c575a4012..79334ed5daa7 100644
--- a/Documentation/filesystems/dentry-locking.txt
+++ b/Documentation/filesystems/dentry-locking.txt
@@ -62,7 +62,8 @@ changes are :
622. Insertion of a dentry into the hash table is done using 622. Insertion of a dentry into the hash table is done using
63 hlist_add_head_rcu() which take care of ordering the writes - the 63 hlist_add_head_rcu() which take care of ordering the writes - the
64 writes to the dentry must be visible before the dentry is 64 writes to the dentry must be visible before the dentry is
65 inserted. This works in conjunction with hlist_for_each_rcu() while 65 inserted. This works in conjunction with hlist_for_each_rcu(),
66 which has since been replaced by hlist_for_each_entry_rcu(), while
66 walking the hash chain. The only requirement is that all 67 walking the hash chain. The only requirement is that all
67 initialization to the dentry must be done before 68 initialization to the dentry must be done before
68 hlist_add_head_rcu() since we don't have dcache_lock protection 69 hlist_add_head_rcu() since we don't have dcache_lock protection
diff --git a/MAINTAINERS b/MAINTAINERS
index 2653b44f90da..f520dd0862b1 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -4544,7 +4544,7 @@ F: drivers/net/wireless/ray*
4544RCUTORTURE MODULE 4544RCUTORTURE MODULE
4545M: Josh Triplett <josh@freedesktop.org> 4545M: Josh Triplett <josh@freedesktop.org>
4546M: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com> 4546M: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
4547S: Maintained 4547S: Supported
4548F: Documentation/RCU/torture.txt 4548F: Documentation/RCU/torture.txt
4549F: kernel/rcutorture.c 4549F: kernel/rcutorture.c
4550 4550
@@ -4569,11 +4569,12 @@ M: Dipankar Sarma <dipankar@in.ibm.com>
4569M: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com> 4569M: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
4570W: http://www.rdrop.com/users/paulmck/rclock/ 4570W: http://www.rdrop.com/users/paulmck/rclock/
4571S: Supported 4571S: Supported
4572F: Documentation/RCU/rcu.txt 4572F: Documentation/RCU/
4573F: Documentation/RCU/rcuref.txt 4573F: include/linux/rcu*
4574F: include/linux/rcupdate.h 4574F: include/linux/srcu*
4575F: include/linux/srcu.h 4575F: kernel/rcu*
4576F: kernel/rcupdate.c 4576F: kernel/srcu*
4577X: kernel/rcutorture.c
4577 4578
4578REAL TIME CLOCK DRIVER 4579REAL TIME CLOCK DRIVER
4579M: Paul Gortmaker <p_gortmaker@yahoo.com> 4580M: Paul Gortmaker <p_gortmaker@yahoo.com>
diff --git a/fs/file.c b/fs/file.c
index 87e129030ab1..38039af67663 100644
--- a/fs/file.c
+++ b/fs/file.c
@@ -478,7 +478,7 @@ repeat:
478 error = fd; 478 error = fd;
479#if 1 479#if 1
480 /* Sanity check */ 480 /* Sanity check */
481 if (rcu_dereference(fdt->fd[fd]) != NULL) { 481 if (rcu_dereference_raw(fdt->fd[fd]) != NULL) {
482 printk(KERN_WARNING "alloc_fd: slot %d not NULL!\n", fd); 482 printk(KERN_WARNING "alloc_fd: slot %d not NULL!\n", fd);
483 rcu_assign_pointer(fdt->fd[fd], NULL); 483 rcu_assign_pointer(fdt->fd[fd], NULL);
484 } 484 }
diff --git a/fs/proc/array.c b/fs/proc/array.c
index 13b5d0708175..18e20feee251 100644
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -270,7 +270,9 @@ static inline void task_sig(struct seq_file *m, struct task_struct *p)
270 blocked = p->blocked; 270 blocked = p->blocked;
271 collect_sigign_sigcatch(p, &ignored, &caught); 271 collect_sigign_sigcatch(p, &ignored, &caught);
272 num_threads = atomic_read(&p->signal->count); 272 num_threads = atomic_read(&p->signal->count);
273 rcu_read_lock(); /* FIXME: is this correct? */
273 qsize = atomic_read(&__task_cred(p)->user->sigpending); 274 qsize = atomic_read(&__task_cred(p)->user->sigpending);
275 rcu_read_unlock();
274 qlim = p->signal->rlim[RLIMIT_SIGPENDING].rlim_cur; 276 qlim = p->signal->rlim[RLIMIT_SIGPENDING].rlim_cur;
275 unlock_task_sighand(p, &flags); 277 unlock_task_sighand(p, &flags);
276 } 278 }
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 58324c299165..623e2ffb5d2b 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -1095,8 +1095,12 @@ static ssize_t proc_loginuid_write(struct file * file, const char __user * buf,
1095 if (!capable(CAP_AUDIT_CONTROL)) 1095 if (!capable(CAP_AUDIT_CONTROL))
1096 return -EPERM; 1096 return -EPERM;
1097 1097
1098 if (current != pid_task(proc_pid(inode), PIDTYPE_PID)) 1098 rcu_read_lock();
1099 if (current != pid_task(proc_pid(inode), PIDTYPE_PID)) {
1100 rcu_read_unlock();
1099 return -EPERM; 1101 return -EPERM;
1102 }
1103 rcu_read_unlock();
1100 1104
1101 if (count >= PAGE_SIZE) 1105 if (count >= PAGE_SIZE)
1102 count = PAGE_SIZE - 1; 1106 count = PAGE_SIZE - 1;
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 0008dee66514..c9bbcb2a75ae 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -28,6 +28,7 @@ struct css_id;
28extern int cgroup_init_early(void); 28extern int cgroup_init_early(void);
29extern int cgroup_init(void); 29extern int cgroup_init(void);
30extern void cgroup_lock(void); 30extern void cgroup_lock(void);
31extern int cgroup_lock_is_held(void);
31extern bool cgroup_lock_live_group(struct cgroup *cgrp); 32extern bool cgroup_lock_live_group(struct cgroup *cgrp);
32extern void cgroup_unlock(void); 33extern void cgroup_unlock(void);
33extern void cgroup_fork(struct task_struct *p); 34extern void cgroup_fork(struct task_struct *p);
@@ -486,7 +487,9 @@ static inline struct cgroup_subsys_state *cgroup_subsys_state(
486static inline struct cgroup_subsys_state *task_subsys_state( 487static inline struct cgroup_subsys_state *task_subsys_state(
487 struct task_struct *task, int subsys_id) 488 struct task_struct *task, int subsys_id)
488{ 489{
489 return rcu_dereference(task->cgroups->subsys[subsys_id]); 490 return rcu_dereference_check(task->cgroups->subsys[subsys_id],
491 rcu_read_lock_held() ||
492 cgroup_lock_is_held());
490} 493}
491 494
492static inline struct cgroup* task_cgroup(struct task_struct *task, 495static inline struct cgroup* task_cgroup(struct task_struct *task,
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index d77b54733c5b..dbcee7647d9a 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -143,6 +143,8 @@ static inline unsigned int cpumask_any_but(const struct cpumask *mask,
143 143
144#define for_each_cpu(cpu, mask) \ 144#define for_each_cpu(cpu, mask) \
145 for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask) 145 for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask)
146#define for_each_cpu_not(cpu, mask) \
147 for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask)
146#define for_each_cpu_and(cpu, mask, and) \ 148#define for_each_cpu_and(cpu, mask, and) \
147 for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask, (void)and) 149 for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask, (void)and)
148#else 150#else
@@ -203,6 +205,18 @@ int cpumask_any_but(const struct cpumask *mask, unsigned int cpu);
203 (cpu) < nr_cpu_ids;) 205 (cpu) < nr_cpu_ids;)
204 206
205/** 207/**
208 * for_each_cpu_not - iterate over every cpu in a complemented mask
209 * @cpu: the (optionally unsigned) integer iterator
210 * @mask: the cpumask pointer
211 *
212 * After the loop, cpu is >= nr_cpu_ids.
213 */
214#define for_each_cpu_not(cpu, mask) \
215 for ((cpu) = -1; \
216 (cpu) = cpumask_next_zero((cpu), (mask)), \
217 (cpu) < nr_cpu_ids;)
218
219/**
206 * for_each_cpu_and - iterate over every cpu in both masks 220 * for_each_cpu_and - iterate over every cpu in both masks
207 * @cpu: the (optionally unsigned) integer iterator 221 * @cpu: the (optionally unsigned) integer iterator
208 * @mask: the first cpumask pointer 222 * @mask: the first cpumask pointer
diff --git a/include/linux/cred.h b/include/linux/cred.h
index 4e3387a89cb9..4db09f89b637 100644
--- a/include/linux/cred.h
+++ b/include/linux/cred.h
@@ -280,7 +280,7 @@ static inline void put_cred(const struct cred *_cred)
280 * task or by holding tasklist_lock to prevent it from being unlinked. 280 * task or by holding tasklist_lock to prevent it from being unlinked.
281 */ 281 */
282#define __task_cred(task) \ 282#define __task_cred(task) \
283 ((const struct cred *)(rcu_dereference((task)->real_cred))) 283 ((const struct cred *)(rcu_dereference_check((task)->real_cred, rcu_read_lock_held() || lockdep_is_held(&tasklist_lock))))
284 284
285/** 285/**
286 * get_task_cred - Get another task's objective credentials 286 * get_task_cred - Get another task's objective credentials
diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h
index a2ec74bc4812..013dc529e95f 100644
--- a/include/linux/fdtable.h
+++ b/include/linux/fdtable.h
@@ -57,7 +57,14 @@ struct files_struct {
57 struct file * fd_array[NR_OPEN_DEFAULT]; 57 struct file * fd_array[NR_OPEN_DEFAULT];
58}; 58};
59 59
60#define files_fdtable(files) (rcu_dereference((files)->fdt)) 60#define rcu_dereference_check_fdtable(files, fdtfd) \
61 (rcu_dereference_check((fdtfd), \
62 rcu_read_lock_held() || \
63 lockdep_is_held(&(files)->file_lock) || \
64 atomic_read(&(files)->count) == 1))
65
66#define files_fdtable(files) \
67 (rcu_dereference_check_fdtable((files), (files)->fdt))
61 68
62struct file_operations; 69struct file_operations;
63struct vfsmount; 70struct vfsmount;
@@ -78,7 +85,7 @@ static inline struct file * fcheck_files(struct files_struct *files, unsigned in
78 struct fdtable *fdt = files_fdtable(files); 85 struct fdtable *fdt = files_fdtable(files);
79 86
80 if (fd < fdt->max_fds) 87 if (fd < fdt->max_fds)
81 file = rcu_dereference(fdt->fd[fd]); 88 file = rcu_dereference_check_fdtable(files, fdt->fd[fd]);
82 return file; 89 return file;
83} 90}
84 91
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 9ccf0e286b2a..10206a87da19 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -534,4 +534,8 @@ do { \
534# define might_lock_read(lock) do { } while (0) 534# define might_lock_read(lock) do { } while (0)
535#endif 535#endif
536 536
537#ifdef CONFIG_PROVE_RCU
538extern void lockdep_rcu_dereference(const char *file, const int line);
539#endif
540
537#endif /* __LINUX_LOCKDEP_H */ 541#endif /* __LINUX_LOCKDEP_H */
diff --git a/include/linux/rculist.h b/include/linux/rculist.h
index 1bf0f708c4fc..779d70749beb 100644
--- a/include/linux/rculist.h
+++ b/include/linux/rculist.h
@@ -208,7 +208,7 @@ static inline void list_splice_init_rcu(struct list_head *list,
208 * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock(). 208 * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock().
209 */ 209 */
210#define list_entry_rcu(ptr, type, member) \ 210#define list_entry_rcu(ptr, type, member) \
211 container_of(rcu_dereference(ptr), type, member) 211 container_of(rcu_dereference_raw(ptr), type, member)
212 212
213/** 213/**
214 * list_first_entry_rcu - get the first element from a list 214 * list_first_entry_rcu - get the first element from a list
@@ -225,9 +225,9 @@ static inline void list_splice_init_rcu(struct list_head *list,
225 list_entry_rcu((ptr)->next, type, member) 225 list_entry_rcu((ptr)->next, type, member)
226 226
227#define __list_for_each_rcu(pos, head) \ 227#define __list_for_each_rcu(pos, head) \
228 for (pos = rcu_dereference((head)->next); \ 228 for (pos = rcu_dereference_raw((head)->next); \
229 pos != (head); \ 229 pos != (head); \
230 pos = rcu_dereference(pos->next)) 230 pos = rcu_dereference_raw(pos->next))
231 231
232/** 232/**
233 * list_for_each_entry_rcu - iterate over rcu list of given type 233 * list_for_each_entry_rcu - iterate over rcu list of given type
@@ -257,9 +257,9 @@ static inline void list_splice_init_rcu(struct list_head *list,
257 * as long as the traversal is guarded by rcu_read_lock(). 257 * as long as the traversal is guarded by rcu_read_lock().
258 */ 258 */
259#define list_for_each_continue_rcu(pos, head) \ 259#define list_for_each_continue_rcu(pos, head) \
260 for ((pos) = rcu_dereference((pos)->next); \ 260 for ((pos) = rcu_dereference_raw((pos)->next); \
261 prefetch((pos)->next), (pos) != (head); \ 261 prefetch((pos)->next), (pos) != (head); \
262 (pos) = rcu_dereference((pos)->next)) 262 (pos) = rcu_dereference_raw((pos)->next))
263 263
264/** 264/**
265 * list_for_each_entry_continue_rcu - continue iteration over list of given type 265 * list_for_each_entry_continue_rcu - continue iteration over list of given type
@@ -418,10 +418,10 @@ static inline void hlist_add_after_rcu(struct hlist_node *prev,
418 * as long as the traversal is guarded by rcu_read_lock(). 418 * as long as the traversal is guarded by rcu_read_lock().
419 */ 419 */
420#define hlist_for_each_entry_rcu(tpos, pos, head, member) \ 420#define hlist_for_each_entry_rcu(tpos, pos, head, member) \
421 for (pos = rcu_dereference((head)->first); \ 421 for (pos = rcu_dereference_raw((head)->first); \
422 pos && ({ prefetch(pos->next); 1; }) && \ 422 pos && ({ prefetch(pos->next); 1; }) && \
423 ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); \ 423 ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); \
424 pos = rcu_dereference(pos->next)) 424 pos = rcu_dereference_raw(pos->next))
425 425
426#endif /* __KERNEL__ */ 426#endif /* __KERNEL__ */
427#endif 427#endif
diff --git a/include/linux/rculist_nulls.h b/include/linux/rculist_nulls.h
index 589a40919f01..b70ffe53cb9f 100644
--- a/include/linux/rculist_nulls.h
+++ b/include/linux/rculist_nulls.h
@@ -101,10 +101,10 @@ static inline void hlist_nulls_add_head_rcu(struct hlist_nulls_node *n,
101 * 101 *
102 */ 102 */
103#define hlist_nulls_for_each_entry_rcu(tpos, pos, head, member) \ 103#define hlist_nulls_for_each_entry_rcu(tpos, pos, head, member) \
104 for (pos = rcu_dereference((head)->first); \ 104 for (pos = rcu_dereference_raw((head)->first); \
105 (!is_a_nulls(pos)) && \ 105 (!is_a_nulls(pos)) && \
106 ({ tpos = hlist_nulls_entry(pos, typeof(*tpos), member); 1; }); \ 106 ({ tpos = hlist_nulls_entry(pos, typeof(*tpos), member); 1; }); \
107 pos = rcu_dereference(pos->next)) 107 pos = rcu_dereference_raw(pos->next))
108 108
109#endif 109#endif
110#endif 110#endif
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 24440f4bf476..c84373626336 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -62,6 +62,8 @@ extern int sched_expedited_torture_stats(char *page);
62 62
63/* Internal to kernel */ 63/* Internal to kernel */
64extern void rcu_init(void); 64extern void rcu_init(void);
65extern int rcu_scheduler_active;
66extern void rcu_scheduler_starting(void);
65 67
66#if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) 68#if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU)
67#include <linux/rcutree.h> 69#include <linux/rcutree.h>
@@ -78,14 +80,120 @@ extern void rcu_init(void);
78} while (0) 80} while (0)
79 81
80#ifdef CONFIG_DEBUG_LOCK_ALLOC 82#ifdef CONFIG_DEBUG_LOCK_ALLOC
83
81extern struct lockdep_map rcu_lock_map; 84extern struct lockdep_map rcu_lock_map;
82# define rcu_read_acquire() \ 85# define rcu_read_acquire() \
83 lock_acquire(&rcu_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_) 86 lock_acquire(&rcu_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_)
84# define rcu_read_release() lock_release(&rcu_lock_map, 1, _THIS_IP_) 87# define rcu_read_release() lock_release(&rcu_lock_map, 1, _THIS_IP_)
85#else 88
86# define rcu_read_acquire() do { } while (0) 89extern struct lockdep_map rcu_bh_lock_map;
87# define rcu_read_release() do { } while (0) 90# define rcu_read_acquire_bh() \
88#endif 91 lock_acquire(&rcu_bh_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_)
92# define rcu_read_release_bh() lock_release(&rcu_bh_lock_map, 1, _THIS_IP_)
93
94extern struct lockdep_map rcu_sched_lock_map;
95# define rcu_read_acquire_sched() \
96 lock_acquire(&rcu_sched_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_)
97# define rcu_read_release_sched() \
98 lock_release(&rcu_sched_lock_map, 1, _THIS_IP_)
99
100/**
101 * rcu_read_lock_held - might we be in RCU read-side critical section?
102 *
103 * If CONFIG_PROVE_LOCKING is selected and enabled, returns nonzero iff in
104 * an RCU read-side critical section. In absence of CONFIG_PROVE_LOCKING,
105 * this assumes we are in an RCU read-side critical section unless it can
106 * prove otherwise.
107 */
108static inline int rcu_read_lock_held(void)
109{
110 if (debug_locks)
111 return lock_is_held(&rcu_lock_map);
112 return 1;
113}
114
115/**
116 * rcu_read_lock_bh_held - might we be in RCU-bh read-side critical section?
117 *
118 * If CONFIG_PROVE_LOCKING is selected and enabled, returns nonzero iff in
119 * an RCU-bh read-side critical section. In absence of CONFIG_PROVE_LOCKING,
120 * this assumes we are in an RCU-bh read-side critical section unless it can
121 * prove otherwise.
122 */
123static inline int rcu_read_lock_bh_held(void)
124{
125 if (debug_locks)
126 return lock_is_held(&rcu_bh_lock_map);
127 return 1;
128}
129
130/**
131 * rcu_read_lock_sched_held - might we be in RCU-sched read-side critical section?
132 *
133 * If CONFIG_PROVE_LOCKING is selected and enabled, returns nonzero iff in an
134 * RCU-sched read-side critical section. In absence of CONFIG_PROVE_LOCKING,
135 * this assumes we are in an RCU-sched read-side critical section unless it
136 * can prove otherwise. Note that disabling of preemption (including
137 * disabling irqs) counts as an RCU-sched read-side critical section.
138 */
139static inline int rcu_read_lock_sched_held(void)
140{
141 int lockdep_opinion = 0;
142
143 if (debug_locks)
144 lockdep_opinion = lock_is_held(&rcu_sched_lock_map);
145 return lockdep_opinion || preempt_count() != 0 || !rcu_scheduler_active;
146}
147
148#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
149
150# define rcu_read_acquire() do { } while (0)
151# define rcu_read_release() do { } while (0)
152# define rcu_read_acquire_bh() do { } while (0)
153# define rcu_read_release_bh() do { } while (0)
154# define rcu_read_acquire_sched() do { } while (0)
155# define rcu_read_release_sched() do { } while (0)
156
157static inline int rcu_read_lock_held(void)
158{
159 return 1;
160}
161
162static inline int rcu_read_lock_bh_held(void)
163{
164 return 1;
165}
166
167static inline int rcu_read_lock_sched_held(void)
168{
169 return preempt_count() != 0 || !rcu_scheduler_active;
170}
171
172#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
173
174#ifdef CONFIG_PROVE_RCU
175
176/**
177 * rcu_dereference_check - rcu_dereference with debug checking
178 *
179 * Do an rcu_dereference(), but check that the context is correct.
180 * For example, rcu_dereference_check(gp, rcu_read_lock_held()) to
181 * ensure that the rcu_dereference_check() executes within an RCU
182 * read-side critical section. It is also possible to check for
183 * locks being held, for example, by using lockdep_is_held().
184 */
185#define rcu_dereference_check(p, c) \
186 ({ \
187 if (debug_locks && !(c)) \
188 lockdep_rcu_dereference(__FILE__, __LINE__); \
189 rcu_dereference_raw(p); \
190 })
191
192#else /* #ifdef CONFIG_PROVE_RCU */
193
194#define rcu_dereference_check(p, c) rcu_dereference_raw(p)
195
196#endif /* #else #ifdef CONFIG_PROVE_RCU */
89 197
90/** 198/**
91 * rcu_read_lock - mark the beginning of an RCU read-side critical section. 199 * rcu_read_lock - mark the beginning of an RCU read-side critical section.
@@ -160,7 +268,7 @@ static inline void rcu_read_lock_bh(void)
160{ 268{
161 __rcu_read_lock_bh(); 269 __rcu_read_lock_bh();
162 __acquire(RCU_BH); 270 __acquire(RCU_BH);
163 rcu_read_acquire(); 271 rcu_read_acquire_bh();
164} 272}
165 273
166/* 274/*
@@ -170,7 +278,7 @@ static inline void rcu_read_lock_bh(void)
170 */ 278 */
171static inline void rcu_read_unlock_bh(void) 279static inline void rcu_read_unlock_bh(void)
172{ 280{
173 rcu_read_release(); 281 rcu_read_release_bh();
174 __release(RCU_BH); 282 __release(RCU_BH);
175 __rcu_read_unlock_bh(); 283 __rcu_read_unlock_bh();
176} 284}
@@ -188,7 +296,7 @@ static inline void rcu_read_lock_sched(void)
188{ 296{
189 preempt_disable(); 297 preempt_disable();
190 __acquire(RCU_SCHED); 298 __acquire(RCU_SCHED);
191 rcu_read_acquire(); 299 rcu_read_acquire_sched();
192} 300}
193 301
194/* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */ 302/* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */
@@ -205,7 +313,7 @@ static inline notrace void rcu_read_lock_sched_notrace(void)
205 */ 313 */
206static inline void rcu_read_unlock_sched(void) 314static inline void rcu_read_unlock_sched(void)
207{ 315{
208 rcu_read_release(); 316 rcu_read_release_sched();
209 __release(RCU_SCHED); 317 __release(RCU_SCHED);
210 preempt_enable(); 318 preempt_enable();
211} 319}
@@ -219,22 +327,49 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
219 327
220 328
221/** 329/**
222 * rcu_dereference - fetch an RCU-protected pointer in an 330 * rcu_dereference_raw - fetch an RCU-protected pointer
223 * RCU read-side critical section. This pointer may later 331 *
224 * be safely dereferenced. 332 * The caller must be within some flavor of RCU read-side critical
333 * section, or must be otherwise preventing the pointer from changing,
334 * for example, by holding an appropriate lock. This pointer may later
335 * be safely dereferenced. It is the caller's responsibility to have
336 * done the right thing, as this primitive does no checking of any kind.
225 * 337 *
226 * Inserts memory barriers on architectures that require them 338 * Inserts memory barriers on architectures that require them
227 * (currently only the Alpha), and, more importantly, documents 339 * (currently only the Alpha), and, more importantly, documents
228 * exactly which pointers are protected by RCU. 340 * exactly which pointers are protected by RCU.
229 */ 341 */
230 342#define rcu_dereference_raw(p) ({ \
231#define rcu_dereference(p) ({ \
232 typeof(p) _________p1 = ACCESS_ONCE(p); \ 343 typeof(p) _________p1 = ACCESS_ONCE(p); \
233 smp_read_barrier_depends(); \ 344 smp_read_barrier_depends(); \
234 (_________p1); \ 345 (_________p1); \
235 }) 346 })
236 347
237/** 348/**
349 * rcu_dereference - fetch an RCU-protected pointer, checking for RCU
350 *
351 * Makes rcu_dereference_check() do the dirty work.
352 */
353#define rcu_dereference(p) \
354 rcu_dereference_check(p, rcu_read_lock_held())
355
356/**
357 * rcu_dereference_bh - fetch an RCU-protected pointer, checking for RCU-bh
358 *
359 * Makes rcu_dereference_check() do the dirty work.
360 */
361#define rcu_dereference_bh(p) \
362 rcu_dereference_check(p, rcu_read_lock_bh_held())
363
364/**
365 * rcu_dereference_sched - fetch RCU-protected pointer, checking for RCU-sched
366 *
367 * Makes rcu_dereference_check() do the dirty work.
368 */
369#define rcu_dereference_sched(p) \
370 rcu_dereference_check(p, rcu_read_lock_sched_held())
371
372/**
238 * rcu_assign_pointer - assign (publicize) a pointer to a newly 373 * rcu_assign_pointer - assign (publicize) a pointer to a newly
239 * initialized structure that will be dereferenced by RCU read-side 374 * initialized structure that will be dereferenced by RCU read-side
240 * critical sections. Returns the value assigned. 375 * critical sections. Returns the value assigned.
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index 96cc307ed9f4..a5195875480a 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -62,6 +62,18 @@ static inline long rcu_batches_completed_bh(void)
62 62
63extern int rcu_expedited_torture_stats(char *page); 63extern int rcu_expedited_torture_stats(char *page);
64 64
65static inline void rcu_force_quiescent_state(void)
66{
67}
68
69static inline void rcu_bh_force_quiescent_state(void)
70{
71}
72
73static inline void rcu_sched_force_quiescent_state(void)
74{
75}
76
65#define synchronize_rcu synchronize_sched 77#define synchronize_rcu synchronize_sched
66 78
67static inline void synchronize_rcu_expedited(void) 79static inline void synchronize_rcu_expedited(void)
@@ -93,10 +105,6 @@ static inline void rcu_exit_nohz(void)
93 105
94#endif /* #else #ifdef CONFIG_NO_HZ */ 106#endif /* #else #ifdef CONFIG_NO_HZ */
95 107
96static inline void rcu_scheduler_starting(void)
97{
98}
99
100static inline void exit_rcu(void) 108static inline void exit_rcu(void)
101{ 109{
102} 110}
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index 8044b1b94333..42cc3a04779e 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -35,7 +35,6 @@ struct notifier_block;
35extern void rcu_sched_qs(int cpu); 35extern void rcu_sched_qs(int cpu);
36extern void rcu_bh_qs(int cpu); 36extern void rcu_bh_qs(int cpu);
37extern int rcu_needs_cpu(int cpu); 37extern int rcu_needs_cpu(int cpu);
38extern void rcu_scheduler_starting(void);
39extern int rcu_expedited_torture_stats(char *page); 38extern int rcu_expedited_torture_stats(char *page);
40 39
41#ifdef CONFIG_TREE_PREEMPT_RCU 40#ifdef CONFIG_TREE_PREEMPT_RCU
@@ -99,6 +98,9 @@ extern void rcu_check_callbacks(int cpu, int user);
99extern long rcu_batches_completed(void); 98extern long rcu_batches_completed(void);
100extern long rcu_batches_completed_bh(void); 99extern long rcu_batches_completed_bh(void);
101extern long rcu_batches_completed_sched(void); 100extern long rcu_batches_completed_sched(void);
101extern void rcu_force_quiescent_state(void);
102extern void rcu_bh_force_quiescent_state(void);
103extern void rcu_sched_force_quiescent_state(void);
102 104
103#ifdef CONFIG_NO_HZ 105#ifdef CONFIG_NO_HZ
104void rcu_enter_nohz(void); 106void rcu_enter_nohz(void);
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h
index 05330fc5b436..5c52fa43785c 100644
--- a/include/linux/rtnetlink.h
+++ b/include/linux/rtnetlink.h
@@ -735,6 +735,9 @@ extern void rtnl_lock(void);
735extern void rtnl_unlock(void); 735extern void rtnl_unlock(void);
736extern int rtnl_trylock(void); 736extern int rtnl_trylock(void);
737extern int rtnl_is_locked(void); 737extern int rtnl_is_locked(void);
738#ifdef CONFIG_PROVE_LOCKING
739extern int lockdep_rtnl_is_held(void);
740#endif /* #ifdef CONFIG_PROVE_LOCKING */
738 741
739extern void rtnetlink_init(void); 742extern void rtnetlink_init(void);
740extern void __rtnl_unlock(void); 743extern void __rtnl_unlock(void);
diff --git a/include/linux/srcu.h b/include/linux/srcu.h
index 4765d97dcafb..3084f80909cd 100644
--- a/include/linux/srcu.h
+++ b/include/linux/srcu.h
@@ -35,6 +35,9 @@ struct srcu_struct {
35 int completed; 35 int completed;
36 struct srcu_struct_array *per_cpu_ref; 36 struct srcu_struct_array *per_cpu_ref;
37 struct mutex mutex; 37 struct mutex mutex;
38#ifdef CONFIG_DEBUG_LOCK_ALLOC
39 struct lockdep_map dep_map;
40#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
38}; 41};
39 42
40#ifndef CONFIG_PREEMPT 43#ifndef CONFIG_PREEMPT
@@ -43,12 +46,100 @@ struct srcu_struct {
43#define srcu_barrier() 46#define srcu_barrier()
44#endif /* #else #ifndef CONFIG_PREEMPT */ 47#endif /* #else #ifndef CONFIG_PREEMPT */
45 48
49#ifdef CONFIG_DEBUG_LOCK_ALLOC
50
51int __init_srcu_struct(struct srcu_struct *sp, const char *name,
52 struct lock_class_key *key);
53
54#define init_srcu_struct(sp) \
55({ \
56 static struct lock_class_key __srcu_key; \
57 \
58 __init_srcu_struct((sp), #sp, &__srcu_key); \
59})
60
61# define srcu_read_acquire(sp) \
62 lock_acquire(&(sp)->dep_map, 0, 0, 2, 1, NULL, _THIS_IP_)
63# define srcu_read_release(sp) \
64 lock_release(&(sp)->dep_map, 1, _THIS_IP_)
65
66#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
67
46int init_srcu_struct(struct srcu_struct *sp); 68int init_srcu_struct(struct srcu_struct *sp);
69
70# define srcu_read_acquire(sp) do { } while (0)
71# define srcu_read_release(sp) do { } while (0)
72
73#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
74
47void cleanup_srcu_struct(struct srcu_struct *sp); 75void cleanup_srcu_struct(struct srcu_struct *sp);
48int srcu_read_lock(struct srcu_struct *sp) __acquires(sp); 76int __srcu_read_lock(struct srcu_struct *sp) __acquires(sp);
49void srcu_read_unlock(struct srcu_struct *sp, int idx) __releases(sp); 77void __srcu_read_unlock(struct srcu_struct *sp, int idx) __releases(sp);
50void synchronize_srcu(struct srcu_struct *sp); 78void synchronize_srcu(struct srcu_struct *sp);
51void synchronize_srcu_expedited(struct srcu_struct *sp); 79void synchronize_srcu_expedited(struct srcu_struct *sp);
52long srcu_batches_completed(struct srcu_struct *sp); 80long srcu_batches_completed(struct srcu_struct *sp);
53 81
82#ifdef CONFIG_DEBUG_LOCK_ALLOC
83
84/**
85 * srcu_read_lock_held - might we be in SRCU read-side critical section?
86 *
87 * If CONFIG_PROVE_LOCKING is selected and enabled, returns nonzero iff in
88 * an SRCU read-side critical section. In absence of CONFIG_PROVE_LOCKING,
89 * this assumes we are in an SRCU read-side critical section unless it can
90 * prove otherwise.
91 */
92static inline int srcu_read_lock_held(struct srcu_struct *sp)
93{
94 if (debug_locks)
95 return lock_is_held(&sp->dep_map);
96 return 1;
97}
98
99#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
100
101static inline int srcu_read_lock_held(struct srcu_struct *sp)
102{
103 return 1;
104}
105
106#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
107
108/**
109 * srcu_dereference - fetch SRCU-protected pointer with checking
110 *
111 * Makes rcu_dereference_check() do the dirty work.
112 */
113#define srcu_dereference(p, sp) \
114 rcu_dereference_check(p, srcu_read_lock_held(sp))
115
116/**
117 * srcu_read_lock - register a new reader for an SRCU-protected structure.
118 * @sp: srcu_struct in which to register the new reader.
119 *
120 * Enter an SRCU read-side critical section. Note that SRCU read-side
121 * critical sections may be nested.
122 */
123static inline int srcu_read_lock(struct srcu_struct *sp) __acquires(sp)
124{
125 int retval = __srcu_read_lock(sp);
126
127 srcu_read_acquire(sp);
128 return retval;
129}
130
131/**
132 * srcu_read_unlock - unregister a old reader from an SRCU-protected structure.
133 * @sp: srcu_struct in which to unregister the old reader.
134 * @idx: return value from corresponding srcu_read_lock().
135 *
136 * Exit an SRCU read-side critical section.
137 */
138static inline void srcu_read_unlock(struct srcu_struct *sp, int idx)
139 __releases(sp)
140{
141 srcu_read_release(sp);
142 __srcu_read_unlock(sp, idx);
143}
144
54#endif 145#endif
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index 0f7c37825fc1..45375b41a2a0 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -177,7 +177,9 @@ extern int unregister_inet6addr_notifier(struct notifier_block *nb);
177static inline struct inet6_dev * 177static inline struct inet6_dev *
178__in6_dev_get(struct net_device *dev) 178__in6_dev_get(struct net_device *dev)
179{ 179{
180 return rcu_dereference(dev->ip6_ptr); 180 return rcu_dereference_check(dev->ip6_ptr,
181 rcu_read_lock_held() ||
182 lockdep_rtnl_is_held());
181} 183}
182 184
183static inline struct inet6_dev * 185static inline struct inet6_dev *
diff --git a/init/Kconfig b/init/Kconfig
index 1510e17a2902..d038a57004a2 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -396,6 +396,22 @@ config RCU_FANOUT_EXACT
396 396
397 Say N if unsure. 397 Say N if unsure.
398 398
399config RCU_FAST_NO_HZ
400 bool "Accelerate last non-dyntick-idle CPU's grace periods"
401 depends on TREE_RCU && NO_HZ && SMP
402 default n
403 help
404 This option causes RCU to attempt to accelerate grace periods
405 in order to allow the final CPU to enter dynticks-idle state
406 more quickly. On the other hand, this option increases the
407 overhead of the dynticks-idle checking, particularly on systems
408 with large numbers of CPUs.
409
410 Say Y if energy efficiency is critically important, particularly
411 if you have relatively few CPUs.
412
413 Say N if you are unsure.
414
399config TREE_RCU_TRACE 415config TREE_RCU_TRACE
400 def_bool RCU_TRACE && ( TREE_RCU || TREE_PREEMPT_RCU ) 416 def_bool RCU_TRACE && ( TREE_RCU || TREE_PREEMPT_RCU )
401 select DEBUG_FS 417 select DEBUG_FS
diff --git a/init/main.c b/init/main.c
index 4cb47a159f02..c75dcd6eef09 100644
--- a/init/main.c
+++ b/init/main.c
@@ -416,7 +416,9 @@ static noinline void __init_refok rest_init(void)
416 kernel_thread(kernel_init, NULL, CLONE_FS | CLONE_SIGHAND); 416 kernel_thread(kernel_init, NULL, CLONE_FS | CLONE_SIGHAND);
417 numa_default_policy(); 417 numa_default_policy();
418 pid = kernel_thread(kthreadd, NULL, CLONE_FS | CLONE_FILES); 418 pid = kernel_thread(kthreadd, NULL, CLONE_FS | CLONE_FILES);
419 rcu_read_lock();
419 kthreadd_task = find_task_by_pid_ns(pid, &init_pid_ns); 420 kthreadd_task = find_task_by_pid_ns(pid, &init_pid_ns);
421 rcu_read_unlock();
420 unlock_kernel(); 422 unlock_kernel();
421 423
422 /* 424 /*
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index aa3bee566446..4fd90e129772 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -23,6 +23,7 @@
23 */ 23 */
24 24
25#include <linux/cgroup.h> 25#include <linux/cgroup.h>
26#include <linux/module.h>
26#include <linux/ctype.h> 27#include <linux/ctype.h>
27#include <linux/errno.h> 28#include <linux/errno.h>
28#include <linux/fs.h> 29#include <linux/fs.h>
@@ -166,6 +167,20 @@ static DEFINE_SPINLOCK(hierarchy_id_lock);
166 */ 167 */
167static int need_forkexit_callback __read_mostly; 168static int need_forkexit_callback __read_mostly;
168 169
170#ifdef CONFIG_PROVE_LOCKING
171int cgroup_lock_is_held(void)
172{
173 return lockdep_is_held(&cgroup_mutex);
174}
175#else /* #ifdef CONFIG_PROVE_LOCKING */
176int cgroup_lock_is_held(void)
177{
178 return mutex_is_locked(&cgroup_mutex);
179}
180#endif /* #else #ifdef CONFIG_PROVE_LOCKING */
181
182EXPORT_SYMBOL_GPL(cgroup_lock_is_held);
183
169/* convenient tests for these bits */ 184/* convenient tests for these bits */
170inline int cgroup_is_removed(const struct cgroup *cgrp) 185inline int cgroup_is_removed(const struct cgroup *cgrp)
171{ 186{
diff --git a/kernel/exit.c b/kernel/exit.c
index 546774a31a66..45ed043b8bf5 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -85,7 +85,9 @@ static void __exit_signal(struct task_struct *tsk)
85 BUG_ON(!sig); 85 BUG_ON(!sig);
86 BUG_ON(!atomic_read(&sig->count)); 86 BUG_ON(!atomic_read(&sig->count));
87 87
88 sighand = rcu_dereference(tsk->sighand); 88 sighand = rcu_dereference_check(tsk->sighand,
89 rcu_read_lock_held() ||
90 lockdep_is_held(&tasklist_lock));
89 spin_lock(&sighand->siglock); 91 spin_lock(&sighand->siglock);
90 92
91 posix_cpu_timers_exit(tsk); 93 posix_cpu_timers_exit(tsk);
@@ -170,8 +172,10 @@ void release_task(struct task_struct * p)
170repeat: 172repeat:
171 tracehook_prepare_release_task(p); 173 tracehook_prepare_release_task(p);
172 /* don't need to get the RCU readlock here - the process is dead and 174 /* don't need to get the RCU readlock here - the process is dead and
173 * can't be modifying its own credentials */ 175 * can't be modifying its own credentials. But shut RCU-lockdep up */
176 rcu_read_lock();
174 atomic_dec(&__task_cred(p)->user->processes); 177 atomic_dec(&__task_cred(p)->user->processes);
178 rcu_read_unlock();
175 179
176 proc_flush_task(p); 180 proc_flush_task(p);
177 181
@@ -473,9 +477,11 @@ static void close_files(struct files_struct * files)
473 /* 477 /*
474 * It is safe to dereference the fd table without RCU or 478 * It is safe to dereference the fd table without RCU or
475 * ->file_lock because this is the last reference to the 479 * ->file_lock because this is the last reference to the
476 * files structure. 480 * files structure. But use RCU to shut RCU-lockdep up.
477 */ 481 */
482 rcu_read_lock();
478 fdt = files_fdtable(files); 483 fdt = files_fdtable(files);
484 rcu_read_unlock();
479 for (;;) { 485 for (;;) {
480 unsigned long set; 486 unsigned long set;
481 i = j * __NFDBITS; 487 i = j * __NFDBITS;
@@ -521,10 +527,12 @@ void put_files_struct(struct files_struct *files)
521 * at the end of the RCU grace period. Otherwise, 527 * at the end of the RCU grace period. Otherwise,
522 * you can free files immediately. 528 * you can free files immediately.
523 */ 529 */
530 rcu_read_lock();
524 fdt = files_fdtable(files); 531 fdt = files_fdtable(files);
525 if (fdt != &files->fdtab) 532 if (fdt != &files->fdtab)
526 kmem_cache_free(files_cachep, files); 533 kmem_cache_free(files_cachep, files);
527 free_fdtable(fdt); 534 free_fdtable(fdt);
535 rcu_read_unlock();
528 } 536 }
529} 537}
530 538
diff --git a/kernel/fork.c b/kernel/fork.c
index f88bd984df35..17bbf093356d 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -86,6 +86,7 @@ int max_threads; /* tunable limit on nr_threads */
86DEFINE_PER_CPU(unsigned long, process_counts) = 0; 86DEFINE_PER_CPU(unsigned long, process_counts) = 0;
87 87
88__cacheline_aligned DEFINE_RWLOCK(tasklist_lock); /* outer */ 88__cacheline_aligned DEFINE_RWLOCK(tasklist_lock); /* outer */
89EXPORT_SYMBOL_GPL(tasklist_lock);
89 90
90int nr_processes(void) 91int nr_processes(void)
91{ 92{
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index c62ec14609b9..0c30d0455de1 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -3809,3 +3809,21 @@ void lockdep_sys_exit(void)
3809 lockdep_print_held_locks(curr); 3809 lockdep_print_held_locks(curr);
3810 } 3810 }
3811} 3811}
3812
3813void lockdep_rcu_dereference(const char *file, const int line)
3814{
3815 struct task_struct *curr = current;
3816
3817 if (!debug_locks_off())
3818 return;
3819 printk("\n===================================================\n");
3820 printk( "[ INFO: suspicious rcu_dereference_check() usage. ]\n");
3821 printk( "---------------------------------------------------\n");
3822 printk("%s:%d invoked rcu_dereference_check() without protection!\n",
3823 file, line);
3824 printk("\nother info that might help us debug this:\n\n");
3825 lockdep_print_held_locks(curr);
3826 printk("\nstack backtrace:\n");
3827 dump_stack();
3828}
3829EXPORT_SYMBOL_GPL(lockdep_rcu_dereference);
diff --git a/kernel/notifier.c b/kernel/notifier.c
index acd24e7643eb..2488ba7eb568 100644
--- a/kernel/notifier.c
+++ b/kernel/notifier.c
@@ -78,10 +78,10 @@ static int __kprobes notifier_call_chain(struct notifier_block **nl,
78 int ret = NOTIFY_DONE; 78 int ret = NOTIFY_DONE;
79 struct notifier_block *nb, *next_nb; 79 struct notifier_block *nb, *next_nb;
80 80
81 nb = rcu_dereference(*nl); 81 nb = rcu_dereference_raw(*nl);
82 82
83 while (nb && nr_to_call) { 83 while (nb && nr_to_call) {
84 next_nb = rcu_dereference(nb->next); 84 next_nb = rcu_dereference_raw(nb->next);
85 85
86#ifdef CONFIG_DEBUG_NOTIFIERS 86#ifdef CONFIG_DEBUG_NOTIFIERS
87 if (unlikely(!func_ptr_is_kernel_text(nb->notifier_call))) { 87 if (unlikely(!func_ptr_is_kernel_text(nb->notifier_call))) {
@@ -309,7 +309,7 @@ int __blocking_notifier_call_chain(struct blocking_notifier_head *nh,
309 * racy then it does not matter what the result of the test 309 * racy then it does not matter what the result of the test
310 * is, we re-check the list after having taken the lock anyway: 310 * is, we re-check the list after having taken the lock anyway:
311 */ 311 */
312 if (rcu_dereference(nh->head)) { 312 if (rcu_dereference_raw(nh->head)) {
313 down_read(&nh->rwsem); 313 down_read(&nh->rwsem);
314 ret = notifier_call_chain(&nh->head, val, v, nr_to_call, 314 ret = notifier_call_chain(&nh->head, val, v, nr_to_call,
315 nr_calls); 315 nr_calls);
diff --git a/kernel/pid.c b/kernel/pid.c
index 2e17c9c92cbe..b08e697cd83f 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -367,7 +367,7 @@ struct task_struct *pid_task(struct pid *pid, enum pid_type type)
367 struct task_struct *result = NULL; 367 struct task_struct *result = NULL;
368 if (pid) { 368 if (pid) {
369 struct hlist_node *first; 369 struct hlist_node *first;
370 first = rcu_dereference(pid->tasks[type].first); 370 first = rcu_dereference_check(pid->tasks[type].first, rcu_read_lock_held() || lockdep_is_held(&tasklist_lock));
371 if (first) 371 if (first)
372 result = hlist_entry(first, struct task_struct, pids[(type)].node); 372 result = hlist_entry(first, struct task_struct, pids[(type)].node);
373 } 373 }
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
index 9b7fd4723878..f1125c1a6321 100644
--- a/kernel/rcupdate.c
+++ b/kernel/rcupdate.c
@@ -44,14 +44,43 @@
44#include <linux/cpu.h> 44#include <linux/cpu.h>
45#include <linux/mutex.h> 45#include <linux/mutex.h>
46#include <linux/module.h> 46#include <linux/module.h>
47#include <linux/kernel_stat.h>
47 48
48#ifdef CONFIG_DEBUG_LOCK_ALLOC 49#ifdef CONFIG_DEBUG_LOCK_ALLOC
49static struct lock_class_key rcu_lock_key; 50static struct lock_class_key rcu_lock_key;
50struct lockdep_map rcu_lock_map = 51struct lockdep_map rcu_lock_map =
51 STATIC_LOCKDEP_MAP_INIT("rcu_read_lock", &rcu_lock_key); 52 STATIC_LOCKDEP_MAP_INIT("rcu_read_lock", &rcu_lock_key);
52EXPORT_SYMBOL_GPL(rcu_lock_map); 53EXPORT_SYMBOL_GPL(rcu_lock_map);
54
55static struct lock_class_key rcu_bh_lock_key;
56struct lockdep_map rcu_bh_lock_map =
57 STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_bh", &rcu_bh_lock_key);
58EXPORT_SYMBOL_GPL(rcu_bh_lock_map);
59
60static struct lock_class_key rcu_sched_lock_key;
61struct lockdep_map rcu_sched_lock_map =
62 STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_sched", &rcu_sched_lock_key);
63EXPORT_SYMBOL_GPL(rcu_sched_lock_map);
53#endif 64#endif
54 65
66int rcu_scheduler_active __read_mostly;
67EXPORT_SYMBOL_GPL(rcu_scheduler_active);
68
69/*
70 * This function is invoked towards the end of the scheduler's initialization
71 * process. Before this is called, the idle task might contain
72 * RCU read-side critical sections (during which time, this idle
73 * task is booting the system). After this function is called, the
74 * idle tasks are prohibited from containing RCU read-side critical
75 * sections.
76 */
77void rcu_scheduler_starting(void)
78{
79 WARN_ON(num_online_cpus() != 1);
80 WARN_ON(nr_context_switches() > 0);
81 rcu_scheduler_active = 1;
82}
83
55/* 84/*
56 * Awaken the corresponding synchronize_rcu() instance now that a 85 * Awaken the corresponding synchronize_rcu() instance now that a
57 * grace period has elapsed. 86 * grace period has elapsed.
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
index 9bb52177af02..258cdf0a91eb 100644
--- a/kernel/rcutorture.c
+++ b/kernel/rcutorture.c
@@ -61,6 +61,9 @@ static int test_no_idle_hz; /* Test RCU's support for tickless idle CPUs. */
61static int shuffle_interval = 3; /* Interval between shuffles (in sec)*/ 61static int shuffle_interval = 3; /* Interval between shuffles (in sec)*/
62static int stutter = 5; /* Start/stop testing interval (in sec) */ 62static int stutter = 5; /* Start/stop testing interval (in sec) */
63static int irqreader = 1; /* RCU readers from irq (timers). */ 63static int irqreader = 1; /* RCU readers from irq (timers). */
64static int fqs_duration = 0; /* Duration of bursts (us), 0 to disable. */
65static int fqs_holdoff = 0; /* Hold time within burst (us). */
66static int fqs_stutter = 3; /* Wait time between bursts (s). */
64static char *torture_type = "rcu"; /* What RCU implementation to torture. */ 67static char *torture_type = "rcu"; /* What RCU implementation to torture. */
65 68
66module_param(nreaders, int, 0444); 69module_param(nreaders, int, 0444);
@@ -79,6 +82,12 @@ module_param(stutter, int, 0444);
79MODULE_PARM_DESC(stutter, "Number of seconds to run/halt test"); 82MODULE_PARM_DESC(stutter, "Number of seconds to run/halt test");
80module_param(irqreader, int, 0444); 83module_param(irqreader, int, 0444);
81MODULE_PARM_DESC(irqreader, "Allow RCU readers from irq handlers"); 84MODULE_PARM_DESC(irqreader, "Allow RCU readers from irq handlers");
85module_param(fqs_duration, int, 0444);
86MODULE_PARM_DESC(fqs_duration, "Duration of fqs bursts (us)");
87module_param(fqs_holdoff, int, 0444);
88MODULE_PARM_DESC(fqs_holdoff, "Holdoff time within fqs bursts (us)");
89module_param(fqs_stutter, int, 0444);
90MODULE_PARM_DESC(fqs_stutter, "Wait time between fqs bursts (s)");
82module_param(torture_type, charp, 0444); 91module_param(torture_type, charp, 0444);
83MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, rcu_bh, srcu)"); 92MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, rcu_bh, srcu)");
84 93
@@ -99,6 +108,7 @@ static struct task_struct **reader_tasks;
99static struct task_struct *stats_task; 108static struct task_struct *stats_task;
100static struct task_struct *shuffler_task; 109static struct task_struct *shuffler_task;
101static struct task_struct *stutter_task; 110static struct task_struct *stutter_task;
111static struct task_struct *fqs_task;
102 112
103#define RCU_TORTURE_PIPE_LEN 10 113#define RCU_TORTURE_PIPE_LEN 10
104 114
@@ -263,6 +273,7 @@ struct rcu_torture_ops {
263 void (*deferred_free)(struct rcu_torture *p); 273 void (*deferred_free)(struct rcu_torture *p);
264 void (*sync)(void); 274 void (*sync)(void);
265 void (*cb_barrier)(void); 275 void (*cb_barrier)(void);
276 void (*fqs)(void);
266 int (*stats)(char *page); 277 int (*stats)(char *page);
267 int irq_capable; 278 int irq_capable;
268 char *name; 279 char *name;
@@ -347,6 +358,7 @@ static struct rcu_torture_ops rcu_ops = {
347 .deferred_free = rcu_torture_deferred_free, 358 .deferred_free = rcu_torture_deferred_free,
348 .sync = synchronize_rcu, 359 .sync = synchronize_rcu,
349 .cb_barrier = rcu_barrier, 360 .cb_barrier = rcu_barrier,
361 .fqs = rcu_force_quiescent_state,
350 .stats = NULL, 362 .stats = NULL,
351 .irq_capable = 1, 363 .irq_capable = 1,
352 .name = "rcu" 364 .name = "rcu"
@@ -388,6 +400,7 @@ static struct rcu_torture_ops rcu_sync_ops = {
388 .deferred_free = rcu_sync_torture_deferred_free, 400 .deferred_free = rcu_sync_torture_deferred_free,
389 .sync = synchronize_rcu, 401 .sync = synchronize_rcu,
390 .cb_barrier = NULL, 402 .cb_barrier = NULL,
403 .fqs = rcu_force_quiescent_state,
391 .stats = NULL, 404 .stats = NULL,
392 .irq_capable = 1, 405 .irq_capable = 1,
393 .name = "rcu_sync" 406 .name = "rcu_sync"
@@ -403,6 +416,7 @@ static struct rcu_torture_ops rcu_expedited_ops = {
403 .deferred_free = rcu_sync_torture_deferred_free, 416 .deferred_free = rcu_sync_torture_deferred_free,
404 .sync = synchronize_rcu_expedited, 417 .sync = synchronize_rcu_expedited,
405 .cb_barrier = NULL, 418 .cb_barrier = NULL,
419 .fqs = rcu_force_quiescent_state,
406 .stats = NULL, 420 .stats = NULL,
407 .irq_capable = 1, 421 .irq_capable = 1,
408 .name = "rcu_expedited" 422 .name = "rcu_expedited"
@@ -465,6 +479,7 @@ static struct rcu_torture_ops rcu_bh_ops = {
465 .deferred_free = rcu_bh_torture_deferred_free, 479 .deferred_free = rcu_bh_torture_deferred_free,
466 .sync = rcu_bh_torture_synchronize, 480 .sync = rcu_bh_torture_synchronize,
467 .cb_barrier = rcu_barrier_bh, 481 .cb_barrier = rcu_barrier_bh,
482 .fqs = rcu_bh_force_quiescent_state,
468 .stats = NULL, 483 .stats = NULL,
469 .irq_capable = 1, 484 .irq_capable = 1,
470 .name = "rcu_bh" 485 .name = "rcu_bh"
@@ -480,6 +495,7 @@ static struct rcu_torture_ops rcu_bh_sync_ops = {
480 .deferred_free = rcu_sync_torture_deferred_free, 495 .deferred_free = rcu_sync_torture_deferred_free,
481 .sync = rcu_bh_torture_synchronize, 496 .sync = rcu_bh_torture_synchronize,
482 .cb_barrier = NULL, 497 .cb_barrier = NULL,
498 .fqs = rcu_bh_force_quiescent_state,
483 .stats = NULL, 499 .stats = NULL,
484 .irq_capable = 1, 500 .irq_capable = 1,
485 .name = "rcu_bh_sync" 501 .name = "rcu_bh_sync"
@@ -621,6 +637,7 @@ static struct rcu_torture_ops sched_ops = {
621 .deferred_free = rcu_sched_torture_deferred_free, 637 .deferred_free = rcu_sched_torture_deferred_free,
622 .sync = sched_torture_synchronize, 638 .sync = sched_torture_synchronize,
623 .cb_barrier = rcu_barrier_sched, 639 .cb_barrier = rcu_barrier_sched,
640 .fqs = rcu_sched_force_quiescent_state,
624 .stats = NULL, 641 .stats = NULL,
625 .irq_capable = 1, 642 .irq_capable = 1,
626 .name = "sched" 643 .name = "sched"
@@ -636,6 +653,7 @@ static struct rcu_torture_ops sched_sync_ops = {
636 .deferred_free = rcu_sync_torture_deferred_free, 653 .deferred_free = rcu_sync_torture_deferred_free,
637 .sync = sched_torture_synchronize, 654 .sync = sched_torture_synchronize,
638 .cb_barrier = NULL, 655 .cb_barrier = NULL,
656 .fqs = rcu_sched_force_quiescent_state,
639 .stats = NULL, 657 .stats = NULL,
640 .name = "sched_sync" 658 .name = "sched_sync"
641}; 659};
@@ -650,12 +668,45 @@ static struct rcu_torture_ops sched_expedited_ops = {
650 .deferred_free = rcu_sync_torture_deferred_free, 668 .deferred_free = rcu_sync_torture_deferred_free,
651 .sync = synchronize_sched_expedited, 669 .sync = synchronize_sched_expedited,
652 .cb_barrier = NULL, 670 .cb_barrier = NULL,
671 .fqs = rcu_sched_force_quiescent_state,
653 .stats = rcu_expedited_torture_stats, 672 .stats = rcu_expedited_torture_stats,
654 .irq_capable = 1, 673 .irq_capable = 1,
655 .name = "sched_expedited" 674 .name = "sched_expedited"
656}; 675};
657 676
658/* 677/*
678 * RCU torture force-quiescent-state kthread. Repeatedly induces
679 * bursts of calls to force_quiescent_state(), increasing the probability
680 * of occurrence of some important types of race conditions.
681 */
682static int
683rcu_torture_fqs(void *arg)
684{
685 unsigned long fqs_resume_time;
686 int fqs_burst_remaining;
687
688 VERBOSE_PRINTK_STRING("rcu_torture_fqs task started");
689 do {
690 fqs_resume_time = jiffies + fqs_stutter * HZ;
691 while (jiffies - fqs_resume_time > LONG_MAX) {
692 schedule_timeout_interruptible(1);
693 }
694 fqs_burst_remaining = fqs_duration;
695 while (fqs_burst_remaining > 0) {
696 cur_ops->fqs();
697 udelay(fqs_holdoff);
698 fqs_burst_remaining -= fqs_holdoff;
699 }
700 rcu_stutter_wait("rcu_torture_fqs");
701 } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP);
702 VERBOSE_PRINTK_STRING("rcu_torture_fqs task stopping");
703 rcutorture_shutdown_absorb("rcu_torture_fqs");
704 while (!kthread_should_stop())
705 schedule_timeout_uninterruptible(1);
706 return 0;
707}
708
709/*
659 * RCU torture writer kthread. Repeatedly substitutes a new structure 710 * RCU torture writer kthread. Repeatedly substitutes a new structure
660 * for that pointed to by rcu_torture_current, freeing the old structure 711 * for that pointed to by rcu_torture_current, freeing the old structure
661 * after a series of grace periods (the "pipeline"). 712 * after a series of grace periods (the "pipeline").
@@ -745,7 +796,11 @@ static void rcu_torture_timer(unsigned long unused)
745 796
746 idx = cur_ops->readlock(); 797 idx = cur_ops->readlock();
747 completed = cur_ops->completed(); 798 completed = cur_ops->completed();
748 p = rcu_dereference(rcu_torture_current); 799 p = rcu_dereference_check(rcu_torture_current,
800 rcu_read_lock_held() ||
801 rcu_read_lock_bh_held() ||
802 rcu_read_lock_sched_held() ||
803 srcu_read_lock_held(&srcu_ctl));
749 if (p == NULL) { 804 if (p == NULL) {
750 /* Leave because rcu_torture_writer is not yet underway */ 805 /* Leave because rcu_torture_writer is not yet underway */
751 cur_ops->readunlock(idx); 806 cur_ops->readunlock(idx);
@@ -798,11 +853,15 @@ rcu_torture_reader(void *arg)
798 do { 853 do {
799 if (irqreader && cur_ops->irq_capable) { 854 if (irqreader && cur_ops->irq_capable) {
800 if (!timer_pending(&t)) 855 if (!timer_pending(&t))
801 mod_timer(&t, 1); 856 mod_timer(&t, jiffies + 1);
802 } 857 }
803 idx = cur_ops->readlock(); 858 idx = cur_ops->readlock();
804 completed = cur_ops->completed(); 859 completed = cur_ops->completed();
805 p = rcu_dereference(rcu_torture_current); 860 p = rcu_dereference_check(rcu_torture_current,
861 rcu_read_lock_held() ||
862 rcu_read_lock_bh_held() ||
863 rcu_read_lock_sched_held() ||
864 srcu_read_lock_held(&srcu_ctl));
806 if (p == NULL) { 865 if (p == NULL) {
807 /* Wait for rcu_torture_writer to get underway */ 866 /* Wait for rcu_torture_writer to get underway */
808 cur_ops->readunlock(idx); 867 cur_ops->readunlock(idx);
@@ -1030,10 +1089,11 @@ rcu_torture_print_module_parms(char *tag)
1030 printk(KERN_ALERT "%s" TORTURE_FLAG 1089 printk(KERN_ALERT "%s" TORTURE_FLAG
1031 "--- %s: nreaders=%d nfakewriters=%d " 1090 "--- %s: nreaders=%d nfakewriters=%d "
1032 "stat_interval=%d verbose=%d test_no_idle_hz=%d " 1091 "stat_interval=%d verbose=%d test_no_idle_hz=%d "
1033 "shuffle_interval=%d stutter=%d irqreader=%d\n", 1092 "shuffle_interval=%d stutter=%d irqreader=%d "
1093 "fqs_duration=%d fqs_holdoff=%d fqs_stutter=%d\n",
1034 torture_type, tag, nrealreaders, nfakewriters, 1094 torture_type, tag, nrealreaders, nfakewriters,
1035 stat_interval, verbose, test_no_idle_hz, shuffle_interval, 1095 stat_interval, verbose, test_no_idle_hz, shuffle_interval,
1036 stutter, irqreader); 1096 stutter, irqreader, fqs_duration, fqs_holdoff, fqs_stutter);
1037} 1097}
1038 1098
1039static struct notifier_block rcutorture_nb = { 1099static struct notifier_block rcutorture_nb = {
@@ -1109,6 +1169,12 @@ rcu_torture_cleanup(void)
1109 } 1169 }
1110 stats_task = NULL; 1170 stats_task = NULL;
1111 1171
1172 if (fqs_task) {
1173 VERBOSE_PRINTK_STRING("Stopping rcu_torture_fqs task");
1174 kthread_stop(fqs_task);
1175 }
1176 fqs_task = NULL;
1177
1112 /* Wait for all RCU callbacks to fire. */ 1178 /* Wait for all RCU callbacks to fire. */
1113 1179
1114 if (cur_ops->cb_barrier != NULL) 1180 if (cur_ops->cb_barrier != NULL)
@@ -1154,6 +1220,11 @@ rcu_torture_init(void)
1154 mutex_unlock(&fullstop_mutex); 1220 mutex_unlock(&fullstop_mutex);
1155 return -EINVAL; 1221 return -EINVAL;
1156 } 1222 }
1223 if (cur_ops->fqs == NULL && fqs_duration != 0) {
1224 printk(KERN_ALERT "rcu-torture: ->fqs NULL and non-zero "
1225 "fqs_duration, fqs disabled.\n");
1226 fqs_duration = 0;
1227 }
1157 if (cur_ops->init) 1228 if (cur_ops->init)
1158 cur_ops->init(); /* no "goto unwind" prior to this point!!! */ 1229 cur_ops->init(); /* no "goto unwind" prior to this point!!! */
1159 1230
@@ -1282,6 +1353,19 @@ rcu_torture_init(void)
1282 goto unwind; 1353 goto unwind;
1283 } 1354 }
1284 } 1355 }
1356 if (fqs_duration < 0)
1357 fqs_duration = 0;
1358 if (fqs_duration) {
1359 /* Create the stutter thread */
1360 fqs_task = kthread_run(rcu_torture_fqs, NULL,
1361 "rcu_torture_fqs");
1362 if (IS_ERR(fqs_task)) {
1363 firsterr = PTR_ERR(fqs_task);
1364 VERBOSE_PRINTK_ERRSTRING("Failed to create fqs");
1365 fqs_task = NULL;
1366 goto unwind;
1367 }
1368 }
1285 register_reboot_notifier(&rcutorture_nb); 1369 register_reboot_notifier(&rcutorture_nb);
1286 mutex_unlock(&fullstop_mutex); 1370 mutex_unlock(&fullstop_mutex);
1287 return 0; 1371 return 0;
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index 53ae9598f798..3ec8160fc75f 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -46,7 +46,6 @@
46#include <linux/cpu.h> 46#include <linux/cpu.h>
47#include <linux/mutex.h> 47#include <linux/mutex.h>
48#include <linux/time.h> 48#include <linux/time.h>
49#include <linux/kernel_stat.h>
50 49
51#include "rcutree.h" 50#include "rcutree.h"
52 51
@@ -66,11 +65,11 @@ static struct lock_class_key rcu_node_class[NUM_RCU_LVLS];
66 .signaled = RCU_GP_IDLE, \ 65 .signaled = RCU_GP_IDLE, \
67 .gpnum = -300, \ 66 .gpnum = -300, \
68 .completed = -300, \ 67 .completed = -300, \
69 .onofflock = __SPIN_LOCK_UNLOCKED(&name.onofflock), \ 68 .onofflock = __RAW_SPIN_LOCK_UNLOCKED(&name.onofflock), \
70 .orphan_cbs_list = NULL, \ 69 .orphan_cbs_list = NULL, \
71 .orphan_cbs_tail = &name.orphan_cbs_list, \ 70 .orphan_cbs_tail = &name.orphan_cbs_list, \
72 .orphan_qlen = 0, \ 71 .orphan_qlen = 0, \
73 .fqslock = __SPIN_LOCK_UNLOCKED(&name.fqslock), \ 72 .fqslock = __RAW_SPIN_LOCK_UNLOCKED(&name.fqslock), \
74 .n_force_qs = 0, \ 73 .n_force_qs = 0, \
75 .n_force_qs_ngp = 0, \ 74 .n_force_qs_ngp = 0, \
76} 75}
@@ -81,9 +80,6 @@ DEFINE_PER_CPU(struct rcu_data, rcu_sched_data);
81struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh_state); 80struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh_state);
82DEFINE_PER_CPU(struct rcu_data, rcu_bh_data); 81DEFINE_PER_CPU(struct rcu_data, rcu_bh_data);
83 82
84static int rcu_scheduler_active __read_mostly;
85
86
87/* 83/*
88 * Return true if an RCU grace period is in progress. The ACCESS_ONCE()s 84 * Return true if an RCU grace period is in progress. The ACCESS_ONCE()s
89 * permit this function to be invoked without holding the root rcu_node 85 * permit this function to be invoked without holding the root rcu_node
@@ -157,6 +153,24 @@ long rcu_batches_completed_bh(void)
157EXPORT_SYMBOL_GPL(rcu_batches_completed_bh); 153EXPORT_SYMBOL_GPL(rcu_batches_completed_bh);
158 154
159/* 155/*
156 * Force a quiescent state for RCU BH.
157 */
158void rcu_bh_force_quiescent_state(void)
159{
160 force_quiescent_state(&rcu_bh_state, 0);
161}
162EXPORT_SYMBOL_GPL(rcu_bh_force_quiescent_state);
163
164/*
165 * Force a quiescent state for RCU-sched.
166 */
167void rcu_sched_force_quiescent_state(void)
168{
169 force_quiescent_state(&rcu_sched_state, 0);
170}
171EXPORT_SYMBOL_GPL(rcu_sched_force_quiescent_state);
172
173/*
160 * Does the CPU have callbacks ready to be invoked? 174 * Does the CPU have callbacks ready to be invoked?
161 */ 175 */
162static int 176static int
@@ -439,10 +453,10 @@ static void print_other_cpu_stall(struct rcu_state *rsp)
439 453
440 /* Only let one CPU complain about others per time interval. */ 454 /* Only let one CPU complain about others per time interval. */
441 455
442 spin_lock_irqsave(&rnp->lock, flags); 456 raw_spin_lock_irqsave(&rnp->lock, flags);
443 delta = jiffies - rsp->jiffies_stall; 457 delta = jiffies - rsp->jiffies_stall;
444 if (delta < RCU_STALL_RAT_DELAY || !rcu_gp_in_progress(rsp)) { 458 if (delta < RCU_STALL_RAT_DELAY || !rcu_gp_in_progress(rsp)) {
445 spin_unlock_irqrestore(&rnp->lock, flags); 459 raw_spin_unlock_irqrestore(&rnp->lock, flags);
446 return; 460 return;
447 } 461 }
448 rsp->jiffies_stall = jiffies + RCU_SECONDS_TILL_STALL_RECHECK; 462 rsp->jiffies_stall = jiffies + RCU_SECONDS_TILL_STALL_RECHECK;
@@ -452,13 +466,15 @@ static void print_other_cpu_stall(struct rcu_state *rsp)
452 * due to CPU offlining. 466 * due to CPU offlining.
453 */ 467 */
454 rcu_print_task_stall(rnp); 468 rcu_print_task_stall(rnp);
455 spin_unlock_irqrestore(&rnp->lock, flags); 469 raw_spin_unlock_irqrestore(&rnp->lock, flags);
456 470
457 /* OK, time to rat on our buddy... */ 471 /* OK, time to rat on our buddy... */
458 472
459 printk(KERN_ERR "INFO: RCU detected CPU stalls:"); 473 printk(KERN_ERR "INFO: RCU detected CPU stalls:");
460 rcu_for_each_leaf_node(rsp, rnp) { 474 rcu_for_each_leaf_node(rsp, rnp) {
475 raw_spin_lock_irqsave(&rnp->lock, flags);
461 rcu_print_task_stall(rnp); 476 rcu_print_task_stall(rnp);
477 raw_spin_unlock_irqrestore(&rnp->lock, flags);
462 if (rnp->qsmask == 0) 478 if (rnp->qsmask == 0)
463 continue; 479 continue;
464 for (cpu = 0; cpu <= rnp->grphi - rnp->grplo; cpu++) 480 for (cpu = 0; cpu <= rnp->grphi - rnp->grplo; cpu++)
@@ -469,6 +485,10 @@ static void print_other_cpu_stall(struct rcu_state *rsp)
469 smp_processor_id(), (long)(jiffies - rsp->gp_start)); 485 smp_processor_id(), (long)(jiffies - rsp->gp_start));
470 trigger_all_cpu_backtrace(); 486 trigger_all_cpu_backtrace();
471 487
488 /* If so configured, complain about tasks blocking the grace period. */
489
490 rcu_print_detail_task_stall(rsp);
491
472 force_quiescent_state(rsp, 0); /* Kick them all. */ 492 force_quiescent_state(rsp, 0); /* Kick them all. */
473} 493}
474 494
@@ -481,11 +501,11 @@ static void print_cpu_stall(struct rcu_state *rsp)
481 smp_processor_id(), jiffies - rsp->gp_start); 501 smp_processor_id(), jiffies - rsp->gp_start);
482 trigger_all_cpu_backtrace(); 502 trigger_all_cpu_backtrace();
483 503
484 spin_lock_irqsave(&rnp->lock, flags); 504 raw_spin_lock_irqsave(&rnp->lock, flags);
485 if ((long)(jiffies - rsp->jiffies_stall) >= 0) 505 if (ULONG_CMP_GE(jiffies, rsp->jiffies_stall))
486 rsp->jiffies_stall = 506 rsp->jiffies_stall =
487 jiffies + RCU_SECONDS_TILL_STALL_RECHECK; 507 jiffies + RCU_SECONDS_TILL_STALL_RECHECK;
488 spin_unlock_irqrestore(&rnp->lock, flags); 508 raw_spin_unlock_irqrestore(&rnp->lock, flags);
489 509
490 set_need_resched(); /* kick ourselves to get things going. */ 510 set_need_resched(); /* kick ourselves to get things going. */
491} 511}
@@ -545,12 +565,12 @@ static void note_new_gpnum(struct rcu_state *rsp, struct rcu_data *rdp)
545 local_irq_save(flags); 565 local_irq_save(flags);
546 rnp = rdp->mynode; 566 rnp = rdp->mynode;
547 if (rdp->gpnum == ACCESS_ONCE(rnp->gpnum) || /* outside lock. */ 567 if (rdp->gpnum == ACCESS_ONCE(rnp->gpnum) || /* outside lock. */
548 !spin_trylock(&rnp->lock)) { /* irqs already off, retry later. */ 568 !raw_spin_trylock(&rnp->lock)) { /* irqs already off, so later. */
549 local_irq_restore(flags); 569 local_irq_restore(flags);
550 return; 570 return;
551 } 571 }
552 __note_new_gpnum(rsp, rnp, rdp); 572 __note_new_gpnum(rsp, rnp, rdp);
553 spin_unlock_irqrestore(&rnp->lock, flags); 573 raw_spin_unlock_irqrestore(&rnp->lock, flags);
554} 574}
555 575
556/* 576/*
@@ -609,12 +629,12 @@ rcu_process_gp_end(struct rcu_state *rsp, struct rcu_data *rdp)
609 local_irq_save(flags); 629 local_irq_save(flags);
610 rnp = rdp->mynode; 630 rnp = rdp->mynode;
611 if (rdp->completed == ACCESS_ONCE(rnp->completed) || /* outside lock. */ 631 if (rdp->completed == ACCESS_ONCE(rnp->completed) || /* outside lock. */
612 !spin_trylock(&rnp->lock)) { /* irqs already off, retry later. */ 632 !raw_spin_trylock(&rnp->lock)) { /* irqs already off, so later. */
613 local_irq_restore(flags); 633 local_irq_restore(flags);
614 return; 634 return;
615 } 635 }
616 __rcu_process_gp_end(rsp, rnp, rdp); 636 __rcu_process_gp_end(rsp, rnp, rdp);
617 spin_unlock_irqrestore(&rnp->lock, flags); 637 raw_spin_unlock_irqrestore(&rnp->lock, flags);
618} 638}
619 639
620/* 640/*
@@ -659,12 +679,14 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
659 struct rcu_data *rdp = rsp->rda[smp_processor_id()]; 679 struct rcu_data *rdp = rsp->rda[smp_processor_id()];
660 struct rcu_node *rnp = rcu_get_root(rsp); 680 struct rcu_node *rnp = rcu_get_root(rsp);
661 681
662 if (!cpu_needs_another_gp(rsp, rdp)) { 682 if (!cpu_needs_another_gp(rsp, rdp) || rsp->fqs_active) {
683 if (cpu_needs_another_gp(rsp, rdp))
684 rsp->fqs_need_gp = 1;
663 if (rnp->completed == rsp->completed) { 685 if (rnp->completed == rsp->completed) {
664 spin_unlock_irqrestore(&rnp->lock, flags); 686 raw_spin_unlock_irqrestore(&rnp->lock, flags);
665 return; 687 return;
666 } 688 }
667 spin_unlock(&rnp->lock); /* irqs remain disabled. */ 689 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
668 690
669 /* 691 /*
670 * Propagate new ->completed value to rcu_node structures 692 * Propagate new ->completed value to rcu_node structures
@@ -672,9 +694,9 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
672 * of the next grace period to process their callbacks. 694 * of the next grace period to process their callbacks.
673 */ 695 */
674 rcu_for_each_node_breadth_first(rsp, rnp) { 696 rcu_for_each_node_breadth_first(rsp, rnp) {
675 spin_lock(&rnp->lock); /* irqs already disabled. */ 697 raw_spin_lock(&rnp->lock); /* irqs already disabled. */
676 rnp->completed = rsp->completed; 698 rnp->completed = rsp->completed;
677 spin_unlock(&rnp->lock); /* irqs remain disabled. */ 699 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
678 } 700 }
679 local_irq_restore(flags); 701 local_irq_restore(flags);
680 return; 702 return;
@@ -695,15 +717,15 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
695 rnp->completed = rsp->completed; 717 rnp->completed = rsp->completed;
696 rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state OK. */ 718 rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state OK. */
697 rcu_start_gp_per_cpu(rsp, rnp, rdp); 719 rcu_start_gp_per_cpu(rsp, rnp, rdp);
698 spin_unlock_irqrestore(&rnp->lock, flags); 720 raw_spin_unlock_irqrestore(&rnp->lock, flags);
699 return; 721 return;
700 } 722 }
701 723
702 spin_unlock(&rnp->lock); /* leave irqs disabled. */ 724 raw_spin_unlock(&rnp->lock); /* leave irqs disabled. */
703 725
704 726
705 /* Exclude any concurrent CPU-hotplug operations. */ 727 /* Exclude any concurrent CPU-hotplug operations. */
706 spin_lock(&rsp->onofflock); /* irqs already disabled. */ 728 raw_spin_lock(&rsp->onofflock); /* irqs already disabled. */
707 729
708 /* 730 /*
709 * Set the quiescent-state-needed bits in all the rcu_node 731 * Set the quiescent-state-needed bits in all the rcu_node
@@ -723,21 +745,21 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
723 * irqs disabled. 745 * irqs disabled.
724 */ 746 */
725 rcu_for_each_node_breadth_first(rsp, rnp) { 747 rcu_for_each_node_breadth_first(rsp, rnp) {
726 spin_lock(&rnp->lock); /* irqs already disabled. */ 748 raw_spin_lock(&rnp->lock); /* irqs already disabled. */
727 rcu_preempt_check_blocked_tasks(rnp); 749 rcu_preempt_check_blocked_tasks(rnp);
728 rnp->qsmask = rnp->qsmaskinit; 750 rnp->qsmask = rnp->qsmaskinit;
729 rnp->gpnum = rsp->gpnum; 751 rnp->gpnum = rsp->gpnum;
730 rnp->completed = rsp->completed; 752 rnp->completed = rsp->completed;
731 if (rnp == rdp->mynode) 753 if (rnp == rdp->mynode)
732 rcu_start_gp_per_cpu(rsp, rnp, rdp); 754 rcu_start_gp_per_cpu(rsp, rnp, rdp);
733 spin_unlock(&rnp->lock); /* irqs remain disabled. */ 755 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
734 } 756 }
735 757
736 rnp = rcu_get_root(rsp); 758 rnp = rcu_get_root(rsp);
737 spin_lock(&rnp->lock); /* irqs already disabled. */ 759 raw_spin_lock(&rnp->lock); /* irqs already disabled. */
738 rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state now OK. */ 760 rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state now OK. */
739 spin_unlock(&rnp->lock); /* irqs remain disabled. */ 761 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
740 spin_unlock_irqrestore(&rsp->onofflock, flags); 762 raw_spin_unlock_irqrestore(&rsp->onofflock, flags);
741} 763}
742 764
743/* 765/*
@@ -776,14 +798,14 @@ rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
776 if (!(rnp->qsmask & mask)) { 798 if (!(rnp->qsmask & mask)) {
777 799
778 /* Our bit has already been cleared, so done. */ 800 /* Our bit has already been cleared, so done. */
779 spin_unlock_irqrestore(&rnp->lock, flags); 801 raw_spin_unlock_irqrestore(&rnp->lock, flags);
780 return; 802 return;
781 } 803 }
782 rnp->qsmask &= ~mask; 804 rnp->qsmask &= ~mask;
783 if (rnp->qsmask != 0 || rcu_preempted_readers(rnp)) { 805 if (rnp->qsmask != 0 || rcu_preempted_readers(rnp)) {
784 806
785 /* Other bits still set at this level, so done. */ 807 /* Other bits still set at this level, so done. */
786 spin_unlock_irqrestore(&rnp->lock, flags); 808 raw_spin_unlock_irqrestore(&rnp->lock, flags);
787 return; 809 return;
788 } 810 }
789 mask = rnp->grpmask; 811 mask = rnp->grpmask;
@@ -793,10 +815,10 @@ rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
793 815
794 break; 816 break;
795 } 817 }
796 spin_unlock_irqrestore(&rnp->lock, flags); 818 raw_spin_unlock_irqrestore(&rnp->lock, flags);
797 rnp_c = rnp; 819 rnp_c = rnp;
798 rnp = rnp->parent; 820 rnp = rnp->parent;
799 spin_lock_irqsave(&rnp->lock, flags); 821 raw_spin_lock_irqsave(&rnp->lock, flags);
800 WARN_ON_ONCE(rnp_c->qsmask); 822 WARN_ON_ONCE(rnp_c->qsmask);
801 } 823 }
802 824
@@ -825,7 +847,7 @@ rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp, long las
825 struct rcu_node *rnp; 847 struct rcu_node *rnp;
826 848
827 rnp = rdp->mynode; 849 rnp = rdp->mynode;
828 spin_lock_irqsave(&rnp->lock, flags); 850 raw_spin_lock_irqsave(&rnp->lock, flags);
829 if (lastcomp != rnp->completed) { 851 if (lastcomp != rnp->completed) {
830 852
831 /* 853 /*
@@ -837,12 +859,12 @@ rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp, long las
837 * race occurred. 859 * race occurred.
838 */ 860 */
839 rdp->passed_quiesc = 0; /* try again later! */ 861 rdp->passed_quiesc = 0; /* try again later! */
840 spin_unlock_irqrestore(&rnp->lock, flags); 862 raw_spin_unlock_irqrestore(&rnp->lock, flags);
841 return; 863 return;
842 } 864 }
843 mask = rdp->grpmask; 865 mask = rdp->grpmask;
844 if ((rnp->qsmask & mask) == 0) { 866 if ((rnp->qsmask & mask) == 0) {
845 spin_unlock_irqrestore(&rnp->lock, flags); 867 raw_spin_unlock_irqrestore(&rnp->lock, flags);
846 } else { 868 } else {
847 rdp->qs_pending = 0; 869 rdp->qs_pending = 0;
848 870
@@ -906,7 +928,7 @@ static void rcu_send_cbs_to_orphanage(struct rcu_state *rsp)
906 928
907 if (rdp->nxtlist == NULL) 929 if (rdp->nxtlist == NULL)
908 return; /* irqs disabled, so comparison is stable. */ 930 return; /* irqs disabled, so comparison is stable. */
909 spin_lock(&rsp->onofflock); /* irqs already disabled. */ 931 raw_spin_lock(&rsp->onofflock); /* irqs already disabled. */
910 *rsp->orphan_cbs_tail = rdp->nxtlist; 932 *rsp->orphan_cbs_tail = rdp->nxtlist;
911 rsp->orphan_cbs_tail = rdp->nxttail[RCU_NEXT_TAIL]; 933 rsp->orphan_cbs_tail = rdp->nxttail[RCU_NEXT_TAIL];
912 rdp->nxtlist = NULL; 934 rdp->nxtlist = NULL;
@@ -914,7 +936,7 @@ static void rcu_send_cbs_to_orphanage(struct rcu_state *rsp)
914 rdp->nxttail[i] = &rdp->nxtlist; 936 rdp->nxttail[i] = &rdp->nxtlist;
915 rsp->orphan_qlen += rdp->qlen; 937 rsp->orphan_qlen += rdp->qlen;
916 rdp->qlen = 0; 938 rdp->qlen = 0;
917 spin_unlock(&rsp->onofflock); /* irqs remain disabled. */ 939 raw_spin_unlock(&rsp->onofflock); /* irqs remain disabled. */
918} 940}
919 941
920/* 942/*
@@ -925,10 +947,10 @@ static void rcu_adopt_orphan_cbs(struct rcu_state *rsp)
925 unsigned long flags; 947 unsigned long flags;
926 struct rcu_data *rdp; 948 struct rcu_data *rdp;
927 949
928 spin_lock_irqsave(&rsp->onofflock, flags); 950 raw_spin_lock_irqsave(&rsp->onofflock, flags);
929 rdp = rsp->rda[smp_processor_id()]; 951 rdp = rsp->rda[smp_processor_id()];
930 if (rsp->orphan_cbs_list == NULL) { 952 if (rsp->orphan_cbs_list == NULL) {
931 spin_unlock_irqrestore(&rsp->onofflock, flags); 953 raw_spin_unlock_irqrestore(&rsp->onofflock, flags);
932 return; 954 return;
933 } 955 }
934 *rdp->nxttail[RCU_NEXT_TAIL] = rsp->orphan_cbs_list; 956 *rdp->nxttail[RCU_NEXT_TAIL] = rsp->orphan_cbs_list;
@@ -937,7 +959,7 @@ static void rcu_adopt_orphan_cbs(struct rcu_state *rsp)
937 rsp->orphan_cbs_list = NULL; 959 rsp->orphan_cbs_list = NULL;
938 rsp->orphan_cbs_tail = &rsp->orphan_cbs_list; 960 rsp->orphan_cbs_tail = &rsp->orphan_cbs_list;
939 rsp->orphan_qlen = 0; 961 rsp->orphan_qlen = 0;
940 spin_unlock_irqrestore(&rsp->onofflock, flags); 962 raw_spin_unlock_irqrestore(&rsp->onofflock, flags);
941} 963}
942 964
943/* 965/*
@@ -953,23 +975,23 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp)
953 struct rcu_node *rnp; 975 struct rcu_node *rnp;
954 976
955 /* Exclude any attempts to start a new grace period. */ 977 /* Exclude any attempts to start a new grace period. */
956 spin_lock_irqsave(&rsp->onofflock, flags); 978 raw_spin_lock_irqsave(&rsp->onofflock, flags);
957 979
958 /* Remove the outgoing CPU from the masks in the rcu_node hierarchy. */ 980 /* Remove the outgoing CPU from the masks in the rcu_node hierarchy. */
959 rnp = rdp->mynode; /* this is the outgoing CPU's rnp. */ 981 rnp = rdp->mynode; /* this is the outgoing CPU's rnp. */
960 mask = rdp->grpmask; /* rnp->grplo is constant. */ 982 mask = rdp->grpmask; /* rnp->grplo is constant. */
961 do { 983 do {
962 spin_lock(&rnp->lock); /* irqs already disabled. */ 984 raw_spin_lock(&rnp->lock); /* irqs already disabled. */
963 rnp->qsmaskinit &= ~mask; 985 rnp->qsmaskinit &= ~mask;
964 if (rnp->qsmaskinit != 0) { 986 if (rnp->qsmaskinit != 0) {
965 if (rnp != rdp->mynode) 987 if (rnp != rdp->mynode)
966 spin_unlock(&rnp->lock); /* irqs remain disabled. */ 988 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
967 break; 989 break;
968 } 990 }
969 if (rnp == rdp->mynode) 991 if (rnp == rdp->mynode)
970 need_report = rcu_preempt_offline_tasks(rsp, rnp, rdp); 992 need_report = rcu_preempt_offline_tasks(rsp, rnp, rdp);
971 else 993 else
972 spin_unlock(&rnp->lock); /* irqs remain disabled. */ 994 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
973 mask = rnp->grpmask; 995 mask = rnp->grpmask;
974 rnp = rnp->parent; 996 rnp = rnp->parent;
975 } while (rnp != NULL); 997 } while (rnp != NULL);
@@ -980,12 +1002,12 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp)
980 * because invoking rcu_report_unblock_qs_rnp() with ->onofflock 1002 * because invoking rcu_report_unblock_qs_rnp() with ->onofflock
981 * held leads to deadlock. 1003 * held leads to deadlock.
982 */ 1004 */
983 spin_unlock(&rsp->onofflock); /* irqs remain disabled. */ 1005 raw_spin_unlock(&rsp->onofflock); /* irqs remain disabled. */
984 rnp = rdp->mynode; 1006 rnp = rdp->mynode;
985 if (need_report & RCU_OFL_TASKS_NORM_GP) 1007 if (need_report & RCU_OFL_TASKS_NORM_GP)
986 rcu_report_unblock_qs_rnp(rnp, flags); 1008 rcu_report_unblock_qs_rnp(rnp, flags);
987 else 1009 else
988 spin_unlock_irqrestore(&rnp->lock, flags); 1010 raw_spin_unlock_irqrestore(&rnp->lock, flags);
989 if (need_report & RCU_OFL_TASKS_EXP_GP) 1011 if (need_report & RCU_OFL_TASKS_EXP_GP)
990 rcu_report_exp_rnp(rsp, rnp); 1012 rcu_report_exp_rnp(rsp, rnp);
991 1013
@@ -1144,11 +1166,9 @@ void rcu_check_callbacks(int cpu, int user)
1144/* 1166/*
1145 * Scan the leaf rcu_node structures, processing dyntick state for any that 1167 * Scan the leaf rcu_node structures, processing dyntick state for any that
1146 * have not yet encountered a quiescent state, using the function specified. 1168 * have not yet encountered a quiescent state, using the function specified.
1147 * Returns 1 if the current grace period ends while scanning (possibly 1169 * The caller must have suppressed start of new grace periods.
1148 * because we made it end).
1149 */ 1170 */
1150static int rcu_process_dyntick(struct rcu_state *rsp, long lastcomp, 1171static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *))
1151 int (*f)(struct rcu_data *))
1152{ 1172{
1153 unsigned long bit; 1173 unsigned long bit;
1154 int cpu; 1174 int cpu;
@@ -1158,13 +1178,13 @@ static int rcu_process_dyntick(struct rcu_state *rsp, long lastcomp,
1158 1178
1159 rcu_for_each_leaf_node(rsp, rnp) { 1179 rcu_for_each_leaf_node(rsp, rnp) {
1160 mask = 0; 1180 mask = 0;
1161 spin_lock_irqsave(&rnp->lock, flags); 1181 raw_spin_lock_irqsave(&rnp->lock, flags);
1162 if (rnp->completed != lastcomp) { 1182 if (!rcu_gp_in_progress(rsp)) {
1163 spin_unlock_irqrestore(&rnp->lock, flags); 1183 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1164 return 1; 1184 return;
1165 } 1185 }
1166 if (rnp->qsmask == 0) { 1186 if (rnp->qsmask == 0) {
1167 spin_unlock_irqrestore(&rnp->lock, flags); 1187 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1168 continue; 1188 continue;
1169 } 1189 }
1170 cpu = rnp->grplo; 1190 cpu = rnp->grplo;
@@ -1173,15 +1193,14 @@ static int rcu_process_dyntick(struct rcu_state *rsp, long lastcomp,
1173 if ((rnp->qsmask & bit) != 0 && f(rsp->rda[cpu])) 1193 if ((rnp->qsmask & bit) != 0 && f(rsp->rda[cpu]))
1174 mask |= bit; 1194 mask |= bit;
1175 } 1195 }
1176 if (mask != 0 && rnp->completed == lastcomp) { 1196 if (mask != 0) {
1177 1197
1178 /* rcu_report_qs_rnp() releases rnp->lock. */ 1198 /* rcu_report_qs_rnp() releases rnp->lock. */
1179 rcu_report_qs_rnp(mask, rsp, rnp, flags); 1199 rcu_report_qs_rnp(mask, rsp, rnp, flags);
1180 continue; 1200 continue;
1181 } 1201 }
1182 spin_unlock_irqrestore(&rnp->lock, flags); 1202 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1183 } 1203 }
1184 return 0;
1185} 1204}
1186 1205
1187/* 1206/*
@@ -1191,32 +1210,26 @@ static int rcu_process_dyntick(struct rcu_state *rsp, long lastcomp,
1191static void force_quiescent_state(struct rcu_state *rsp, int relaxed) 1210static void force_quiescent_state(struct rcu_state *rsp, int relaxed)
1192{ 1211{
1193 unsigned long flags; 1212 unsigned long flags;
1194 long lastcomp;
1195 struct rcu_node *rnp = rcu_get_root(rsp); 1213 struct rcu_node *rnp = rcu_get_root(rsp);
1196 u8 signaled;
1197 u8 forcenow;
1198 1214
1199 if (!rcu_gp_in_progress(rsp)) 1215 if (!rcu_gp_in_progress(rsp))
1200 return; /* No grace period in progress, nothing to force. */ 1216 return; /* No grace period in progress, nothing to force. */
1201 if (!spin_trylock_irqsave(&rsp->fqslock, flags)) { 1217 if (!raw_spin_trylock_irqsave(&rsp->fqslock, flags)) {
1202 rsp->n_force_qs_lh++; /* Inexact, can lose counts. Tough! */ 1218 rsp->n_force_qs_lh++; /* Inexact, can lose counts. Tough! */
1203 return; /* Someone else is already on the job. */ 1219 return; /* Someone else is already on the job. */
1204 } 1220 }
1205 if (relaxed && 1221 if (relaxed && ULONG_CMP_GE(rsp->jiffies_force_qs, jiffies))
1206 (long)(rsp->jiffies_force_qs - jiffies) >= 0) 1222 goto unlock_fqs_ret; /* no emergency and done recently. */
1207 goto unlock_ret; /* no emergency and done recently. */
1208 rsp->n_force_qs++; 1223 rsp->n_force_qs++;
1209 spin_lock(&rnp->lock); 1224 raw_spin_lock(&rnp->lock); /* irqs already disabled */
1210 lastcomp = rsp->gpnum - 1;
1211 signaled = rsp->signaled;
1212 rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS; 1225 rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS;
1213 if(!rcu_gp_in_progress(rsp)) { 1226 if(!rcu_gp_in_progress(rsp)) {
1214 rsp->n_force_qs_ngp++; 1227 rsp->n_force_qs_ngp++;
1215 spin_unlock(&rnp->lock); 1228 raw_spin_unlock(&rnp->lock); /* irqs remain disabled */
1216 goto unlock_ret; /* no GP in progress, time updated. */ 1229 goto unlock_fqs_ret; /* no GP in progress, time updated. */
1217 } 1230 }
1218 spin_unlock(&rnp->lock); 1231 rsp->fqs_active = 1;
1219 switch (signaled) { 1232 switch (rsp->signaled) {
1220 case RCU_GP_IDLE: 1233 case RCU_GP_IDLE:
1221 case RCU_GP_INIT: 1234 case RCU_GP_INIT:
1222 1235
@@ -1224,45 +1237,38 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed)
1224 1237
1225 case RCU_SAVE_DYNTICK: 1238 case RCU_SAVE_DYNTICK:
1226 1239
1240 raw_spin_unlock(&rnp->lock); /* irqs remain disabled */
1227 if (RCU_SIGNAL_INIT != RCU_SAVE_DYNTICK) 1241 if (RCU_SIGNAL_INIT != RCU_SAVE_DYNTICK)
1228 break; /* So gcc recognizes the dead code. */ 1242 break; /* So gcc recognizes the dead code. */
1229 1243
1230 /* Record dyntick-idle state. */ 1244 /* Record dyntick-idle state. */
1231 if (rcu_process_dyntick(rsp, lastcomp, 1245 force_qs_rnp(rsp, dyntick_save_progress_counter);
1232 dyntick_save_progress_counter)) 1246 raw_spin_lock(&rnp->lock); /* irqs already disabled */
1233 goto unlock_ret; 1247 if (rcu_gp_in_progress(rsp))
1234 /* fall into next case. */
1235
1236 case RCU_SAVE_COMPLETED:
1237
1238 /* Update state, record completion counter. */
1239 forcenow = 0;
1240 spin_lock(&rnp->lock);
1241 if (lastcomp + 1 == rsp->gpnum &&
1242 lastcomp == rsp->completed &&
1243 rsp->signaled == signaled) {
1244 rsp->signaled = RCU_FORCE_QS; 1248 rsp->signaled = RCU_FORCE_QS;
1245 rsp->completed_fqs = lastcomp; 1249 break;
1246 forcenow = signaled == RCU_SAVE_COMPLETED;
1247 }
1248 spin_unlock(&rnp->lock);
1249 if (!forcenow)
1250 break;
1251 /* fall into next case. */
1252 1250
1253 case RCU_FORCE_QS: 1251 case RCU_FORCE_QS:
1254 1252
1255 /* Check dyntick-idle state, send IPI to laggarts. */ 1253 /* Check dyntick-idle state, send IPI to laggarts. */
1256 if (rcu_process_dyntick(rsp, rsp->completed_fqs, 1254 raw_spin_unlock(&rnp->lock); /* irqs remain disabled */
1257 rcu_implicit_dynticks_qs)) 1255 force_qs_rnp(rsp, rcu_implicit_dynticks_qs);
1258 goto unlock_ret;
1259 1256
1260 /* Leave state in case more forcing is required. */ 1257 /* Leave state in case more forcing is required. */
1261 1258
1259 raw_spin_lock(&rnp->lock); /* irqs already disabled */
1262 break; 1260 break;
1263 } 1261 }
1264unlock_ret: 1262 rsp->fqs_active = 0;
1265 spin_unlock_irqrestore(&rsp->fqslock, flags); 1263 if (rsp->fqs_need_gp) {
1264 raw_spin_unlock(&rsp->fqslock); /* irqs remain disabled */
1265 rsp->fqs_need_gp = 0;
1266 rcu_start_gp(rsp, flags); /* releases rnp->lock */
1267 return;
1268 }
1269 raw_spin_unlock(&rnp->lock); /* irqs remain disabled */
1270unlock_fqs_ret:
1271 raw_spin_unlock_irqrestore(&rsp->fqslock, flags);
1266} 1272}
1267 1273
1268#else /* #ifdef CONFIG_SMP */ 1274#else /* #ifdef CONFIG_SMP */
@@ -1290,7 +1296,7 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
1290 * If an RCU GP has gone long enough, go check for dyntick 1296 * If an RCU GP has gone long enough, go check for dyntick
1291 * idle CPUs and, if needed, send resched IPIs. 1297 * idle CPUs and, if needed, send resched IPIs.
1292 */ 1298 */
1293 if ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0) 1299 if (ULONG_CMP_LT(ACCESS_ONCE(rsp->jiffies_force_qs), jiffies))
1294 force_quiescent_state(rsp, 1); 1300 force_quiescent_state(rsp, 1);
1295 1301
1296 /* 1302 /*
@@ -1304,7 +1310,7 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
1304 1310
1305 /* Does this CPU require a not-yet-started grace period? */ 1311 /* Does this CPU require a not-yet-started grace period? */
1306 if (cpu_needs_another_gp(rsp, rdp)) { 1312 if (cpu_needs_another_gp(rsp, rdp)) {
1307 spin_lock_irqsave(&rcu_get_root(rsp)->lock, flags); 1313 raw_spin_lock_irqsave(&rcu_get_root(rsp)->lock, flags);
1308 rcu_start_gp(rsp, flags); /* releases above lock */ 1314 rcu_start_gp(rsp, flags); /* releases above lock */
1309 } 1315 }
1310 1316
@@ -1335,6 +1341,9 @@ static void rcu_process_callbacks(struct softirq_action *unused)
1335 * grace-period manipulations above. 1341 * grace-period manipulations above.
1336 */ 1342 */
1337 smp_mb(); /* See above block comment. */ 1343 smp_mb(); /* See above block comment. */
1344
1345 /* If we are last CPU on way to dyntick-idle mode, accelerate it. */
1346 rcu_needs_cpu_flush();
1338} 1347}
1339 1348
1340static void 1349static void
@@ -1369,7 +1378,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
1369 unsigned long nestflag; 1378 unsigned long nestflag;
1370 struct rcu_node *rnp_root = rcu_get_root(rsp); 1379 struct rcu_node *rnp_root = rcu_get_root(rsp);
1371 1380
1372 spin_lock_irqsave(&rnp_root->lock, nestflag); 1381 raw_spin_lock_irqsave(&rnp_root->lock, nestflag);
1373 rcu_start_gp(rsp, nestflag); /* releases rnp_root->lock. */ 1382 rcu_start_gp(rsp, nestflag); /* releases rnp_root->lock. */
1374 } 1383 }
1375 1384
@@ -1387,7 +1396,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
1387 force_quiescent_state(rsp, 0); 1396 force_quiescent_state(rsp, 0);
1388 rdp->n_force_qs_snap = rsp->n_force_qs; 1397 rdp->n_force_qs_snap = rsp->n_force_qs;
1389 rdp->qlen_last_fqs_check = rdp->qlen; 1398 rdp->qlen_last_fqs_check = rdp->qlen;
1390 } else if ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0) 1399 } else if (ULONG_CMP_LT(ACCESS_ONCE(rsp->jiffies_force_qs), jiffies))
1391 force_quiescent_state(rsp, 1); 1400 force_quiescent_state(rsp, 1);
1392 local_irq_restore(flags); 1401 local_irq_restore(flags);
1393} 1402}
@@ -1520,7 +1529,7 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
1520 1529
1521 /* Has an RCU GP gone long enough to send resched IPIs &c? */ 1530 /* Has an RCU GP gone long enough to send resched IPIs &c? */
1522 if (rcu_gp_in_progress(rsp) && 1531 if (rcu_gp_in_progress(rsp) &&
1523 ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0)) { 1532 ULONG_CMP_LT(ACCESS_ONCE(rsp->jiffies_force_qs), jiffies)) {
1524 rdp->n_rp_need_fqs++; 1533 rdp->n_rp_need_fqs++;
1525 return 1; 1534 return 1;
1526 } 1535 }
@@ -1545,10 +1554,9 @@ static int rcu_pending(int cpu)
1545/* 1554/*
1546 * Check to see if any future RCU-related work will need to be done 1555 * Check to see if any future RCU-related work will need to be done
1547 * by the current CPU, even if none need be done immediately, returning 1556 * by the current CPU, even if none need be done immediately, returning
1548 * 1 if so. This function is part of the RCU implementation; it is -not- 1557 * 1 if so.
1549 * an exported member of the RCU API.
1550 */ 1558 */
1551int rcu_needs_cpu(int cpu) 1559static int rcu_needs_cpu_quick_check(int cpu)
1552{ 1560{
1553 /* RCU callbacks either ready or pending? */ 1561 /* RCU callbacks either ready or pending? */
1554 return per_cpu(rcu_sched_data, cpu).nxtlist || 1562 return per_cpu(rcu_sched_data, cpu).nxtlist ||
@@ -1556,21 +1564,6 @@ int rcu_needs_cpu(int cpu)
1556 rcu_preempt_needs_cpu(cpu); 1564 rcu_preempt_needs_cpu(cpu);
1557} 1565}
1558 1566
1559/*
1560 * This function is invoked towards the end of the scheduler's initialization
1561 * process. Before this is called, the idle task might contain
1562 * RCU read-side critical sections (during which time, this idle
1563 * task is booting the system). After this function is called, the
1564 * idle tasks are prohibited from containing RCU read-side critical
1565 * sections.
1566 */
1567void rcu_scheduler_starting(void)
1568{
1569 WARN_ON(num_online_cpus() != 1);
1570 WARN_ON(nr_context_switches() > 0);
1571 rcu_scheduler_active = 1;
1572}
1573
1574static DEFINE_PER_CPU(struct rcu_head, rcu_barrier_head) = {NULL}; 1567static DEFINE_PER_CPU(struct rcu_head, rcu_barrier_head) = {NULL};
1575static atomic_t rcu_barrier_cpu_count; 1568static atomic_t rcu_barrier_cpu_count;
1576static DEFINE_MUTEX(rcu_barrier_mutex); 1569static DEFINE_MUTEX(rcu_barrier_mutex);
@@ -1659,7 +1652,7 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
1659 struct rcu_node *rnp = rcu_get_root(rsp); 1652 struct rcu_node *rnp = rcu_get_root(rsp);
1660 1653
1661 /* Set up local state, ensuring consistent view of global state. */ 1654 /* Set up local state, ensuring consistent view of global state. */
1662 spin_lock_irqsave(&rnp->lock, flags); 1655 raw_spin_lock_irqsave(&rnp->lock, flags);
1663 rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo); 1656 rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo);
1664 rdp->nxtlist = NULL; 1657 rdp->nxtlist = NULL;
1665 for (i = 0; i < RCU_NEXT_SIZE; i++) 1658 for (i = 0; i < RCU_NEXT_SIZE; i++)
@@ -1669,7 +1662,7 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
1669 rdp->dynticks = &per_cpu(rcu_dynticks, cpu); 1662 rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
1670#endif /* #ifdef CONFIG_NO_HZ */ 1663#endif /* #ifdef CONFIG_NO_HZ */
1671 rdp->cpu = cpu; 1664 rdp->cpu = cpu;
1672 spin_unlock_irqrestore(&rnp->lock, flags); 1665 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1673} 1666}
1674 1667
1675/* 1668/*
@@ -1687,7 +1680,7 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptable)
1687 struct rcu_node *rnp = rcu_get_root(rsp); 1680 struct rcu_node *rnp = rcu_get_root(rsp);
1688 1681
1689 /* Set up local state, ensuring consistent view of global state. */ 1682 /* Set up local state, ensuring consistent view of global state. */
1690 spin_lock_irqsave(&rnp->lock, flags); 1683 raw_spin_lock_irqsave(&rnp->lock, flags);
1691 rdp->passed_quiesc = 0; /* We could be racing with new GP, */ 1684 rdp->passed_quiesc = 0; /* We could be racing with new GP, */
1692 rdp->qs_pending = 1; /* so set up to respond to current GP. */ 1685 rdp->qs_pending = 1; /* so set up to respond to current GP. */
1693 rdp->beenonline = 1; /* We have now been online. */ 1686 rdp->beenonline = 1; /* We have now been online. */
@@ -1695,7 +1688,7 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptable)
1695 rdp->qlen_last_fqs_check = 0; 1688 rdp->qlen_last_fqs_check = 0;
1696 rdp->n_force_qs_snap = rsp->n_force_qs; 1689 rdp->n_force_qs_snap = rsp->n_force_qs;
1697 rdp->blimit = blimit; 1690 rdp->blimit = blimit;
1698 spin_unlock(&rnp->lock); /* irqs remain disabled. */ 1691 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
1699 1692
1700 /* 1693 /*
1701 * A new grace period might start here. If so, we won't be part 1694 * A new grace period might start here. If so, we won't be part
@@ -1703,14 +1696,14 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptable)
1703 */ 1696 */
1704 1697
1705 /* Exclude any attempts to start a new GP on large systems. */ 1698 /* Exclude any attempts to start a new GP on large systems. */
1706 spin_lock(&rsp->onofflock); /* irqs already disabled. */ 1699 raw_spin_lock(&rsp->onofflock); /* irqs already disabled. */
1707 1700
1708 /* Add CPU to rcu_node bitmasks. */ 1701 /* Add CPU to rcu_node bitmasks. */
1709 rnp = rdp->mynode; 1702 rnp = rdp->mynode;
1710 mask = rdp->grpmask; 1703 mask = rdp->grpmask;
1711 do { 1704 do {
1712 /* Exclude any attempts to start a new GP on small systems. */ 1705 /* Exclude any attempts to start a new GP on small systems. */
1713 spin_lock(&rnp->lock); /* irqs already disabled. */ 1706 raw_spin_lock(&rnp->lock); /* irqs already disabled. */
1714 rnp->qsmaskinit |= mask; 1707 rnp->qsmaskinit |= mask;
1715 mask = rnp->grpmask; 1708 mask = rnp->grpmask;
1716 if (rnp == rdp->mynode) { 1709 if (rnp == rdp->mynode) {
@@ -1718,11 +1711,11 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptable)
1718 rdp->completed = rnp->completed; 1711 rdp->completed = rnp->completed;
1719 rdp->passed_quiesc_completed = rnp->completed - 1; 1712 rdp->passed_quiesc_completed = rnp->completed - 1;
1720 } 1713 }
1721 spin_unlock(&rnp->lock); /* irqs already disabled. */ 1714 raw_spin_unlock(&rnp->lock); /* irqs already disabled. */
1722 rnp = rnp->parent; 1715 rnp = rnp->parent;
1723 } while (rnp != NULL && !(rnp->qsmaskinit & mask)); 1716 } while (rnp != NULL && !(rnp->qsmaskinit & mask));
1724 1717
1725 spin_unlock_irqrestore(&rsp->onofflock, flags); 1718 raw_spin_unlock_irqrestore(&rsp->onofflock, flags);
1726} 1719}
1727 1720
1728static void __cpuinit rcu_online_cpu(int cpu) 1721static void __cpuinit rcu_online_cpu(int cpu)
@@ -1806,11 +1799,17 @@ static void __init rcu_init_levelspread(struct rcu_state *rsp)
1806 */ 1799 */
1807static void __init rcu_init_one(struct rcu_state *rsp) 1800static void __init rcu_init_one(struct rcu_state *rsp)
1808{ 1801{
1802 static char *buf[] = { "rcu_node_level_0",
1803 "rcu_node_level_1",
1804 "rcu_node_level_2",
1805 "rcu_node_level_3" }; /* Match MAX_RCU_LVLS */
1809 int cpustride = 1; 1806 int cpustride = 1;
1810 int i; 1807 int i;
1811 int j; 1808 int j;
1812 struct rcu_node *rnp; 1809 struct rcu_node *rnp;
1813 1810
1811 BUILD_BUG_ON(MAX_RCU_LVLS > ARRAY_SIZE(buf)); /* Fix buf[] init! */
1812
1814 /* Initialize the level-tracking arrays. */ 1813 /* Initialize the level-tracking arrays. */
1815 1814
1816 for (i = 1; i < NUM_RCU_LVLS; i++) 1815 for (i = 1; i < NUM_RCU_LVLS; i++)
@@ -1823,8 +1822,9 @@ static void __init rcu_init_one(struct rcu_state *rsp)
1823 cpustride *= rsp->levelspread[i]; 1822 cpustride *= rsp->levelspread[i];
1824 rnp = rsp->level[i]; 1823 rnp = rsp->level[i];
1825 for (j = 0; j < rsp->levelcnt[i]; j++, rnp++) { 1824 for (j = 0; j < rsp->levelcnt[i]; j++, rnp++) {
1826 spin_lock_init(&rnp->lock); 1825 raw_spin_lock_init(&rnp->lock);
1827 lockdep_set_class(&rnp->lock, &rcu_node_class[i]); 1826 lockdep_set_class_and_name(&rnp->lock,
1827 &rcu_node_class[i], buf[i]);
1828 rnp->gpnum = 0; 1828 rnp->gpnum = 0;
1829 rnp->qsmask = 0; 1829 rnp->qsmask = 0;
1830 rnp->qsmaskinit = 0; 1830 rnp->qsmaskinit = 0;
@@ -1876,7 +1876,7 @@ do { \
1876 1876
1877void __init rcu_init(void) 1877void __init rcu_init(void)
1878{ 1878{
1879 int i; 1879 int cpu;
1880 1880
1881 rcu_bootup_announce(); 1881 rcu_bootup_announce();
1882#ifdef CONFIG_RCU_CPU_STALL_DETECTOR 1882#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
@@ -1896,8 +1896,8 @@ void __init rcu_init(void)
1896 * or the scheduler are operational. 1896 * or the scheduler are operational.
1897 */ 1897 */
1898 cpu_notifier(rcu_cpu_notify, 0); 1898 cpu_notifier(rcu_cpu_notify, 0);
1899 for_each_online_cpu(i) 1899 for_each_online_cpu(cpu)
1900 rcu_cpu_notify(NULL, CPU_UP_PREPARE, (void *)(long)i); 1900 rcu_cpu_notify(NULL, CPU_UP_PREPARE, (void *)(long)cpu);
1901} 1901}
1902 1902
1903#include "rcutree_plugin.h" 1903#include "rcutree_plugin.h"
diff --git a/kernel/rcutree.h b/kernel/rcutree.h
index d2a0046f63b2..1439eb504c22 100644
--- a/kernel/rcutree.h
+++ b/kernel/rcutree.h
@@ -90,12 +90,12 @@ struct rcu_dynticks {
90 * Definition for node within the RCU grace-period-detection hierarchy. 90 * Definition for node within the RCU grace-period-detection hierarchy.
91 */ 91 */
92struct rcu_node { 92struct rcu_node {
93 spinlock_t lock; /* Root rcu_node's lock protects some */ 93 raw_spinlock_t lock; /* Root rcu_node's lock protects some */
94 /* rcu_state fields as well as following. */ 94 /* rcu_state fields as well as following. */
95 long gpnum; /* Current grace period for this node. */ 95 unsigned long gpnum; /* Current grace period for this node. */
96 /* This will either be equal to or one */ 96 /* This will either be equal to or one */
97 /* behind the root rcu_node's gpnum. */ 97 /* behind the root rcu_node's gpnum. */
98 long completed; /* Last grace period completed for this node. */ 98 unsigned long completed; /* Last GP completed for this node. */
99 /* This will either be equal to or one */ 99 /* This will either be equal to or one */
100 /* behind the root rcu_node's gpnum. */ 100 /* behind the root rcu_node's gpnum. */
101 unsigned long qsmask; /* CPUs or groups that need to switch in */ 101 unsigned long qsmask; /* CPUs or groups that need to switch in */
@@ -161,11 +161,11 @@ struct rcu_node {
161/* Per-CPU data for read-copy update. */ 161/* Per-CPU data for read-copy update. */
162struct rcu_data { 162struct rcu_data {
163 /* 1) quiescent-state and grace-period handling : */ 163 /* 1) quiescent-state and grace-period handling : */
164 long completed; /* Track rsp->completed gp number */ 164 unsigned long completed; /* Track rsp->completed gp number */
165 /* in order to detect GP end. */ 165 /* in order to detect GP end. */
166 long gpnum; /* Highest gp number that this CPU */ 166 unsigned long gpnum; /* Highest gp number that this CPU */
167 /* is aware of having started. */ 167 /* is aware of having started. */
168 long passed_quiesc_completed; 168 unsigned long passed_quiesc_completed;
169 /* Value of completed at time of qs. */ 169 /* Value of completed at time of qs. */
170 bool passed_quiesc; /* User-mode/idle loop etc. */ 170 bool passed_quiesc; /* User-mode/idle loop etc. */
171 bool qs_pending; /* Core waits for quiesc state. */ 171 bool qs_pending; /* Core waits for quiesc state. */
@@ -221,14 +221,14 @@ struct rcu_data {
221 unsigned long resched_ipi; /* Sent a resched IPI. */ 221 unsigned long resched_ipi; /* Sent a resched IPI. */
222 222
223 /* 5) __rcu_pending() statistics. */ 223 /* 5) __rcu_pending() statistics. */
224 long n_rcu_pending; /* rcu_pending() calls since boot. */ 224 unsigned long n_rcu_pending; /* rcu_pending() calls since boot. */
225 long n_rp_qs_pending; 225 unsigned long n_rp_qs_pending;
226 long n_rp_cb_ready; 226 unsigned long n_rp_cb_ready;
227 long n_rp_cpu_needs_gp; 227 unsigned long n_rp_cpu_needs_gp;
228 long n_rp_gp_completed; 228 unsigned long n_rp_gp_completed;
229 long n_rp_gp_started; 229 unsigned long n_rp_gp_started;
230 long n_rp_need_fqs; 230 unsigned long n_rp_need_fqs;
231 long n_rp_need_nothing; 231 unsigned long n_rp_need_nothing;
232 232
233 int cpu; 233 int cpu;
234}; 234};
@@ -237,12 +237,11 @@ struct rcu_data {
237#define RCU_GP_IDLE 0 /* No grace period in progress. */ 237#define RCU_GP_IDLE 0 /* No grace period in progress. */
238#define RCU_GP_INIT 1 /* Grace period being initialized. */ 238#define RCU_GP_INIT 1 /* Grace period being initialized. */
239#define RCU_SAVE_DYNTICK 2 /* Need to scan dyntick state. */ 239#define RCU_SAVE_DYNTICK 2 /* Need to scan dyntick state. */
240#define RCU_SAVE_COMPLETED 3 /* Need to save rsp->completed. */ 240#define RCU_FORCE_QS 3 /* Need to force quiescent state. */
241#define RCU_FORCE_QS 4 /* Need to force quiescent state. */
242#ifdef CONFIG_NO_HZ 241#ifdef CONFIG_NO_HZ
243#define RCU_SIGNAL_INIT RCU_SAVE_DYNTICK 242#define RCU_SIGNAL_INIT RCU_SAVE_DYNTICK
244#else /* #ifdef CONFIG_NO_HZ */ 243#else /* #ifdef CONFIG_NO_HZ */
245#define RCU_SIGNAL_INIT RCU_SAVE_COMPLETED 244#define RCU_SIGNAL_INIT RCU_FORCE_QS
246#endif /* #else #ifdef CONFIG_NO_HZ */ 245#endif /* #else #ifdef CONFIG_NO_HZ */
247 246
248#define RCU_JIFFIES_TILL_FORCE_QS 3 /* for rsp->jiffies_force_qs */ 247#define RCU_JIFFIES_TILL_FORCE_QS 3 /* for rsp->jiffies_force_qs */
@@ -256,6 +255,9 @@ struct rcu_data {
256 255
257#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ 256#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
258 257
258#define ULONG_CMP_GE(a, b) (ULONG_MAX / 2 >= (a) - (b))
259#define ULONG_CMP_LT(a, b) (ULONG_MAX / 2 < (a) - (b))
260
259/* 261/*
260 * RCU global state, including node hierarchy. This hierarchy is 262 * RCU global state, including node hierarchy. This hierarchy is
261 * represented in "heap" form in a dense array. The root (first level) 263 * represented in "heap" form in a dense array. The root (first level)
@@ -277,12 +279,19 @@ struct rcu_state {
277 279
278 u8 signaled ____cacheline_internodealigned_in_smp; 280 u8 signaled ____cacheline_internodealigned_in_smp;
279 /* Force QS state. */ 281 /* Force QS state. */
280 long gpnum; /* Current gp number. */ 282 u8 fqs_active; /* force_quiescent_state() */
281 long completed; /* # of last completed gp. */ 283 /* is running. */
284 u8 fqs_need_gp; /* A CPU was prevented from */
285 /* starting a new grace */
286 /* period because */
287 /* force_quiescent_state() */
288 /* was running. */
289 unsigned long gpnum; /* Current gp number. */
290 unsigned long completed; /* # of last completed gp. */
282 291
283 /* End of fields guarded by root rcu_node's lock. */ 292 /* End of fields guarded by root rcu_node's lock. */
284 293
285 spinlock_t onofflock; /* exclude on/offline and */ 294 raw_spinlock_t onofflock; /* exclude on/offline and */
286 /* starting new GP. Also */ 295 /* starting new GP. Also */
287 /* protects the following */ 296 /* protects the following */
288 /* orphan_cbs fields. */ 297 /* orphan_cbs fields. */
@@ -292,10 +301,8 @@ struct rcu_state {
292 /* going offline. */ 301 /* going offline. */
293 struct rcu_head **orphan_cbs_tail; /* And tail pointer. */ 302 struct rcu_head **orphan_cbs_tail; /* And tail pointer. */
294 long orphan_qlen; /* Number of orphaned cbs. */ 303 long orphan_qlen; /* Number of orphaned cbs. */
295 spinlock_t fqslock; /* Only one task forcing */ 304 raw_spinlock_t fqslock; /* Only one task forcing */
296 /* quiescent states. */ 305 /* quiescent states. */
297 long completed_fqs; /* Value of completed @ snap. */
298 /* Protected by fqslock. */
299 unsigned long jiffies_force_qs; /* Time at which to invoke */ 306 unsigned long jiffies_force_qs; /* Time at which to invoke */
300 /* force_quiescent_state(). */ 307 /* force_quiescent_state(). */
301 unsigned long n_force_qs; /* Number of calls to */ 308 unsigned long n_force_qs; /* Number of calls to */
@@ -319,8 +326,6 @@ struct rcu_state {
319#define RCU_OFL_TASKS_EXP_GP 0x2 /* Tasks blocking expedited */ 326#define RCU_OFL_TASKS_EXP_GP 0x2 /* Tasks blocking expedited */
320 /* GP were moved to root. */ 327 /* GP were moved to root. */
321 328
322#ifdef RCU_TREE_NONCORE
323
324/* 329/*
325 * RCU implementation internal declarations: 330 * RCU implementation internal declarations:
326 */ 331 */
@@ -335,7 +340,7 @@ extern struct rcu_state rcu_preempt_state;
335DECLARE_PER_CPU(struct rcu_data, rcu_preempt_data); 340DECLARE_PER_CPU(struct rcu_data, rcu_preempt_data);
336#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */ 341#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
337 342
338#else /* #ifdef RCU_TREE_NONCORE */ 343#ifndef RCU_TREE_NONCORE
339 344
340/* Forward declarations for rcutree_plugin.h */ 345/* Forward declarations for rcutree_plugin.h */
341static void rcu_bootup_announce(void); 346static void rcu_bootup_announce(void);
@@ -347,6 +352,7 @@ static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp,
347 unsigned long flags); 352 unsigned long flags);
348#endif /* #ifdef CONFIG_HOTPLUG_CPU */ 353#endif /* #ifdef CONFIG_HOTPLUG_CPU */
349#ifdef CONFIG_RCU_CPU_STALL_DETECTOR 354#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
355static void rcu_print_detail_task_stall(struct rcu_state *rsp);
350static void rcu_print_task_stall(struct rcu_node *rnp); 356static void rcu_print_task_stall(struct rcu_node *rnp);
351#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ 357#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
352static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp); 358static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp);
@@ -367,5 +373,6 @@ static int rcu_preempt_needs_cpu(int cpu);
367static void __cpuinit rcu_preempt_init_percpu_data(int cpu); 373static void __cpuinit rcu_preempt_init_percpu_data(int cpu);
368static void rcu_preempt_send_cbs_to_orphanage(void); 374static void rcu_preempt_send_cbs_to_orphanage(void);
369static void __init __rcu_init_preempt(void); 375static void __init __rcu_init_preempt(void);
376static void rcu_needs_cpu_flush(void);
370 377
371#endif /* #else #ifdef RCU_TREE_NONCORE */ 378#endif /* #ifndef RCU_TREE_NONCORE */
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
index 37fbccdf41d5..464ad2cdee00 100644
--- a/kernel/rcutree_plugin.h
+++ b/kernel/rcutree_plugin.h
@@ -62,6 +62,15 @@ long rcu_batches_completed(void)
62EXPORT_SYMBOL_GPL(rcu_batches_completed); 62EXPORT_SYMBOL_GPL(rcu_batches_completed);
63 63
64/* 64/*
65 * Force a quiescent state for preemptible RCU.
66 */
67void rcu_force_quiescent_state(void)
68{
69 force_quiescent_state(&rcu_preempt_state, 0);
70}
71EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
72
73/*
65 * Record a preemptable-RCU quiescent state for the specified CPU. Note 74 * Record a preemptable-RCU quiescent state for the specified CPU. Note
66 * that this just means that the task currently running on the CPU is 75 * that this just means that the task currently running on the CPU is
67 * not in a quiescent state. There might be any number of tasks blocked 76 * not in a quiescent state. There might be any number of tasks blocked
@@ -102,7 +111,7 @@ static void rcu_preempt_note_context_switch(int cpu)
102 /* Possibly blocking in an RCU read-side critical section. */ 111 /* Possibly blocking in an RCU read-side critical section. */
103 rdp = rcu_preempt_state.rda[cpu]; 112 rdp = rcu_preempt_state.rda[cpu];
104 rnp = rdp->mynode; 113 rnp = rdp->mynode;
105 spin_lock_irqsave(&rnp->lock, flags); 114 raw_spin_lock_irqsave(&rnp->lock, flags);
106 t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED; 115 t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED;
107 t->rcu_blocked_node = rnp; 116 t->rcu_blocked_node = rnp;
108 117
@@ -123,7 +132,7 @@ static void rcu_preempt_note_context_switch(int cpu)
123 WARN_ON_ONCE(!list_empty(&t->rcu_node_entry)); 132 WARN_ON_ONCE(!list_empty(&t->rcu_node_entry));
124 phase = (rnp->gpnum + !(rnp->qsmask & rdp->grpmask)) & 0x1; 133 phase = (rnp->gpnum + !(rnp->qsmask & rdp->grpmask)) & 0x1;
125 list_add(&t->rcu_node_entry, &rnp->blocked_tasks[phase]); 134 list_add(&t->rcu_node_entry, &rnp->blocked_tasks[phase]);
126 spin_unlock_irqrestore(&rnp->lock, flags); 135 raw_spin_unlock_irqrestore(&rnp->lock, flags);
127 } 136 }
128 137
129 /* 138 /*
@@ -180,7 +189,7 @@ static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
180 struct rcu_node *rnp_p; 189 struct rcu_node *rnp_p;
181 190
182 if (rnp->qsmask != 0 || rcu_preempted_readers(rnp)) { 191 if (rnp->qsmask != 0 || rcu_preempted_readers(rnp)) {
183 spin_unlock_irqrestore(&rnp->lock, flags); 192 raw_spin_unlock_irqrestore(&rnp->lock, flags);
184 return; /* Still need more quiescent states! */ 193 return; /* Still need more quiescent states! */
185 } 194 }
186 195
@@ -197,8 +206,8 @@ static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
197 206
198 /* Report up the rest of the hierarchy. */ 207 /* Report up the rest of the hierarchy. */
199 mask = rnp->grpmask; 208 mask = rnp->grpmask;
200 spin_unlock(&rnp->lock); /* irqs remain disabled. */ 209 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
201 spin_lock(&rnp_p->lock); /* irqs already disabled. */ 210 raw_spin_lock(&rnp_p->lock); /* irqs already disabled. */
202 rcu_report_qs_rnp(mask, &rcu_preempt_state, rnp_p, flags); 211 rcu_report_qs_rnp(mask, &rcu_preempt_state, rnp_p, flags);
203} 212}
204 213
@@ -248,10 +257,10 @@ static void rcu_read_unlock_special(struct task_struct *t)
248 */ 257 */
249 for (;;) { 258 for (;;) {
250 rnp = t->rcu_blocked_node; 259 rnp = t->rcu_blocked_node;
251 spin_lock(&rnp->lock); /* irqs already disabled. */ 260 raw_spin_lock(&rnp->lock); /* irqs already disabled. */
252 if (rnp == t->rcu_blocked_node) 261 if (rnp == t->rcu_blocked_node)
253 break; 262 break;
254 spin_unlock(&rnp->lock); /* irqs remain disabled. */ 263 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
255 } 264 }
256 empty = !rcu_preempted_readers(rnp); 265 empty = !rcu_preempted_readers(rnp);
257 empty_exp = !rcu_preempted_readers_exp(rnp); 266 empty_exp = !rcu_preempted_readers_exp(rnp);
@@ -265,7 +274,7 @@ static void rcu_read_unlock_special(struct task_struct *t)
265 * Note that rcu_report_unblock_qs_rnp() releases rnp->lock. 274 * Note that rcu_report_unblock_qs_rnp() releases rnp->lock.
266 */ 275 */
267 if (empty) 276 if (empty)
268 spin_unlock_irqrestore(&rnp->lock, flags); 277 raw_spin_unlock_irqrestore(&rnp->lock, flags);
269 else 278 else
270 rcu_report_unblock_qs_rnp(rnp, flags); 279 rcu_report_unblock_qs_rnp(rnp, flags);
271 280
@@ -295,29 +304,73 @@ void __rcu_read_unlock(void)
295 if (--ACCESS_ONCE(t->rcu_read_lock_nesting) == 0 && 304 if (--ACCESS_ONCE(t->rcu_read_lock_nesting) == 0 &&
296 unlikely(ACCESS_ONCE(t->rcu_read_unlock_special))) 305 unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
297 rcu_read_unlock_special(t); 306 rcu_read_unlock_special(t);
307#ifdef CONFIG_PROVE_LOCKING
308 WARN_ON_ONCE(ACCESS_ONCE(t->rcu_read_lock_nesting) < 0);
309#endif /* #ifdef CONFIG_PROVE_LOCKING */
298} 310}
299EXPORT_SYMBOL_GPL(__rcu_read_unlock); 311EXPORT_SYMBOL_GPL(__rcu_read_unlock);
300 312
301#ifdef CONFIG_RCU_CPU_STALL_DETECTOR 313#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
302 314
315#ifdef CONFIG_RCU_CPU_STALL_VERBOSE
316
317/*
318 * Dump detailed information for all tasks blocking the current RCU
319 * grace period on the specified rcu_node structure.
320 */
321static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp)
322{
323 unsigned long flags;
324 struct list_head *lp;
325 int phase;
326 struct task_struct *t;
327
328 if (rcu_preempted_readers(rnp)) {
329 raw_spin_lock_irqsave(&rnp->lock, flags);
330 phase = rnp->gpnum & 0x1;
331 lp = &rnp->blocked_tasks[phase];
332 list_for_each_entry(t, lp, rcu_node_entry)
333 sched_show_task(t);
334 raw_spin_unlock_irqrestore(&rnp->lock, flags);
335 }
336}
337
338/*
339 * Dump detailed information for all tasks blocking the current RCU
340 * grace period.
341 */
342static void rcu_print_detail_task_stall(struct rcu_state *rsp)
343{
344 struct rcu_node *rnp = rcu_get_root(rsp);
345
346 rcu_print_detail_task_stall_rnp(rnp);
347 rcu_for_each_leaf_node(rsp, rnp)
348 rcu_print_detail_task_stall_rnp(rnp);
349}
350
351#else /* #ifdef CONFIG_RCU_CPU_STALL_VERBOSE */
352
353static void rcu_print_detail_task_stall(struct rcu_state *rsp)
354{
355}
356
357#endif /* #else #ifdef CONFIG_RCU_CPU_STALL_VERBOSE */
358
303/* 359/*
304 * Scan the current list of tasks blocked within RCU read-side critical 360 * Scan the current list of tasks blocked within RCU read-side critical
305 * sections, printing out the tid of each. 361 * sections, printing out the tid of each.
306 */ 362 */
307static void rcu_print_task_stall(struct rcu_node *rnp) 363static void rcu_print_task_stall(struct rcu_node *rnp)
308{ 364{
309 unsigned long flags;
310 struct list_head *lp; 365 struct list_head *lp;
311 int phase; 366 int phase;
312 struct task_struct *t; 367 struct task_struct *t;
313 368
314 if (rcu_preempted_readers(rnp)) { 369 if (rcu_preempted_readers(rnp)) {
315 spin_lock_irqsave(&rnp->lock, flags);
316 phase = rnp->gpnum & 0x1; 370 phase = rnp->gpnum & 0x1;
317 lp = &rnp->blocked_tasks[phase]; 371 lp = &rnp->blocked_tasks[phase];
318 list_for_each_entry(t, lp, rcu_node_entry) 372 list_for_each_entry(t, lp, rcu_node_entry)
319 printk(" P%d", t->pid); 373 printk(" P%d", t->pid);
320 spin_unlock_irqrestore(&rnp->lock, flags);
321 } 374 }
322} 375}
323 376
@@ -388,11 +441,11 @@ static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
388 lp_root = &rnp_root->blocked_tasks[i]; 441 lp_root = &rnp_root->blocked_tasks[i];
389 while (!list_empty(lp)) { 442 while (!list_empty(lp)) {
390 tp = list_entry(lp->next, typeof(*tp), rcu_node_entry); 443 tp = list_entry(lp->next, typeof(*tp), rcu_node_entry);
391 spin_lock(&rnp_root->lock); /* irqs already disabled */ 444 raw_spin_lock(&rnp_root->lock); /* irqs already disabled */
392 list_del(&tp->rcu_node_entry); 445 list_del(&tp->rcu_node_entry);
393 tp->rcu_blocked_node = rnp_root; 446 tp->rcu_blocked_node = rnp_root;
394 list_add(&tp->rcu_node_entry, lp_root); 447 list_add(&tp->rcu_node_entry, lp_root);
395 spin_unlock(&rnp_root->lock); /* irqs remain disabled */ 448 raw_spin_unlock(&rnp_root->lock); /* irqs remain disabled */
396 } 449 }
397 } 450 }
398 return retval; 451 return retval;
@@ -516,7 +569,7 @@ static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp)
516 unsigned long flags; 569 unsigned long flags;
517 unsigned long mask; 570 unsigned long mask;
518 571
519 spin_lock_irqsave(&rnp->lock, flags); 572 raw_spin_lock_irqsave(&rnp->lock, flags);
520 for (;;) { 573 for (;;) {
521 if (!sync_rcu_preempt_exp_done(rnp)) 574 if (!sync_rcu_preempt_exp_done(rnp))
522 break; 575 break;
@@ -525,12 +578,12 @@ static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp)
525 break; 578 break;
526 } 579 }
527 mask = rnp->grpmask; 580 mask = rnp->grpmask;
528 spin_unlock(&rnp->lock); /* irqs remain disabled */ 581 raw_spin_unlock(&rnp->lock); /* irqs remain disabled */
529 rnp = rnp->parent; 582 rnp = rnp->parent;
530 spin_lock(&rnp->lock); /* irqs already disabled */ 583 raw_spin_lock(&rnp->lock); /* irqs already disabled */
531 rnp->expmask &= ~mask; 584 rnp->expmask &= ~mask;
532 } 585 }
533 spin_unlock_irqrestore(&rnp->lock, flags); 586 raw_spin_unlock_irqrestore(&rnp->lock, flags);
534} 587}
535 588
536/* 589/*
@@ -545,11 +598,11 @@ sync_rcu_preempt_exp_init(struct rcu_state *rsp, struct rcu_node *rnp)
545{ 598{
546 int must_wait; 599 int must_wait;
547 600
548 spin_lock(&rnp->lock); /* irqs already disabled */ 601 raw_spin_lock(&rnp->lock); /* irqs already disabled */
549 list_splice_init(&rnp->blocked_tasks[0], &rnp->blocked_tasks[2]); 602 list_splice_init(&rnp->blocked_tasks[0], &rnp->blocked_tasks[2]);
550 list_splice_init(&rnp->blocked_tasks[1], &rnp->blocked_tasks[3]); 603 list_splice_init(&rnp->blocked_tasks[1], &rnp->blocked_tasks[3]);
551 must_wait = rcu_preempted_readers_exp(rnp); 604 must_wait = rcu_preempted_readers_exp(rnp);
552 spin_unlock(&rnp->lock); /* irqs remain disabled */ 605 raw_spin_unlock(&rnp->lock); /* irqs remain disabled */
553 if (!must_wait) 606 if (!must_wait)
554 rcu_report_exp_rnp(rsp, rnp); 607 rcu_report_exp_rnp(rsp, rnp);
555} 608}
@@ -594,13 +647,13 @@ void synchronize_rcu_expedited(void)
594 /* force all RCU readers onto blocked_tasks[]. */ 647 /* force all RCU readers onto blocked_tasks[]. */
595 synchronize_sched_expedited(); 648 synchronize_sched_expedited();
596 649
597 spin_lock_irqsave(&rsp->onofflock, flags); 650 raw_spin_lock_irqsave(&rsp->onofflock, flags);
598 651
599 /* Initialize ->expmask for all non-leaf rcu_node structures. */ 652 /* Initialize ->expmask for all non-leaf rcu_node structures. */
600 rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) { 653 rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) {
601 spin_lock(&rnp->lock); /* irqs already disabled. */ 654 raw_spin_lock(&rnp->lock); /* irqs already disabled. */
602 rnp->expmask = rnp->qsmaskinit; 655 rnp->expmask = rnp->qsmaskinit;
603 spin_unlock(&rnp->lock); /* irqs remain disabled. */ 656 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
604 } 657 }
605 658
606 /* Snapshot current state of ->blocked_tasks[] lists. */ 659 /* Snapshot current state of ->blocked_tasks[] lists. */
@@ -609,7 +662,7 @@ void synchronize_rcu_expedited(void)
609 if (NUM_RCU_NODES > 1) 662 if (NUM_RCU_NODES > 1)
610 sync_rcu_preempt_exp_init(rsp, rcu_get_root(rsp)); 663 sync_rcu_preempt_exp_init(rsp, rcu_get_root(rsp));
611 664
612 spin_unlock_irqrestore(&rsp->onofflock, flags); 665 raw_spin_unlock_irqrestore(&rsp->onofflock, flags);
613 666
614 /* Wait for snapshotted ->blocked_tasks[] lists to drain. */ 667 /* Wait for snapshotted ->blocked_tasks[] lists to drain. */
615 rnp = rcu_get_root(rsp); 668 rnp = rcu_get_root(rsp);
@@ -713,6 +766,16 @@ long rcu_batches_completed(void)
713EXPORT_SYMBOL_GPL(rcu_batches_completed); 766EXPORT_SYMBOL_GPL(rcu_batches_completed);
714 767
715/* 768/*
769 * Force a quiescent state for RCU, which, because there is no preemptible
770 * RCU, becomes the same as rcu-sched.
771 */
772void rcu_force_quiescent_state(void)
773{
774 rcu_sched_force_quiescent_state();
775}
776EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
777
778/*
716 * Because preemptable RCU does not exist, we never have to check for 779 * Because preemptable RCU does not exist, we never have to check for
717 * CPUs being in quiescent states. 780 * CPUs being in quiescent states.
718 */ 781 */
@@ -734,7 +797,7 @@ static int rcu_preempted_readers(struct rcu_node *rnp)
734/* Because preemptible RCU does not exist, no quieting of tasks. */ 797/* Because preemptible RCU does not exist, no quieting of tasks. */
735static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags) 798static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
736{ 799{
737 spin_unlock_irqrestore(&rnp->lock, flags); 800 raw_spin_unlock_irqrestore(&rnp->lock, flags);
738} 801}
739 802
740#endif /* #ifdef CONFIG_HOTPLUG_CPU */ 803#endif /* #ifdef CONFIG_HOTPLUG_CPU */
@@ -745,6 +808,14 @@ static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
745 * Because preemptable RCU does not exist, we never have to check for 808 * Because preemptable RCU does not exist, we never have to check for
746 * tasks blocked within RCU read-side critical sections. 809 * tasks blocked within RCU read-side critical sections.
747 */ 810 */
811static void rcu_print_detail_task_stall(struct rcu_state *rsp)
812{
813}
814
815/*
816 * Because preemptable RCU does not exist, we never have to check for
817 * tasks blocked within RCU read-side critical sections.
818 */
748static void rcu_print_task_stall(struct rcu_node *rnp) 819static void rcu_print_task_stall(struct rcu_node *rnp)
749{ 820{
750} 821}
@@ -884,3 +955,113 @@ static void __init __rcu_init_preempt(void)
884} 955}
885 956
886#endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */ 957#endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */
958
959#if !defined(CONFIG_RCU_FAST_NO_HZ)
960
961/*
962 * Check to see if any future RCU-related work will need to be done
963 * by the current CPU, even if none need be done immediately, returning
964 * 1 if so. This function is part of the RCU implementation; it is -not-
965 * an exported member of the RCU API.
966 *
967 * Because we have preemptible RCU, just check whether this CPU needs
968 * any flavor of RCU. Do not chew up lots of CPU cycles with preemption
969 * disabled in a most-likely vain attempt to cause RCU not to need this CPU.
970 */
971int rcu_needs_cpu(int cpu)
972{
973 return rcu_needs_cpu_quick_check(cpu);
974}
975
976/*
977 * Check to see if we need to continue a callback-flush operations to
978 * allow the last CPU to enter dyntick-idle mode. But fast dyntick-idle
979 * entry is not configured, so we never do need to.
980 */
981static void rcu_needs_cpu_flush(void)
982{
983}
984
985#else /* #if !defined(CONFIG_RCU_FAST_NO_HZ) */
986
987#define RCU_NEEDS_CPU_FLUSHES 5
988static DEFINE_PER_CPU(int, rcu_dyntick_drain);
989static DEFINE_PER_CPU(unsigned long, rcu_dyntick_holdoff);
990
991/*
992 * Check to see if any future RCU-related work will need to be done
993 * by the current CPU, even if none need be done immediately, returning
994 * 1 if so. This function is part of the RCU implementation; it is -not-
995 * an exported member of the RCU API.
996 *
997 * Because we are not supporting preemptible RCU, attempt to accelerate
998 * any current grace periods so that RCU no longer needs this CPU, but
999 * only if all other CPUs are already in dynticks-idle mode. This will
1000 * allow the CPU cores to be powered down immediately, as opposed to after
1001 * waiting many milliseconds for grace periods to elapse.
1002 *
1003 * Because it is not legal to invoke rcu_process_callbacks() with irqs
1004 * disabled, we do one pass of force_quiescent_state(), then do a
1005 * raise_softirq() to cause rcu_process_callbacks() to be invoked later.
1006 * The per-cpu rcu_dyntick_drain variable controls the sequencing.
1007 */
1008int rcu_needs_cpu(int cpu)
1009{
1010 int c = 0;
1011 int thatcpu;
1012
1013 /* Don't bother unless we are the last non-dyntick-idle CPU. */
1014 for_each_cpu_not(thatcpu, nohz_cpu_mask)
1015 if (thatcpu != cpu) {
1016 per_cpu(rcu_dyntick_drain, cpu) = 0;
1017 per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1;
1018 return rcu_needs_cpu_quick_check(cpu);
1019 }
1020
1021 /* Check and update the rcu_dyntick_drain sequencing. */
1022 if (per_cpu(rcu_dyntick_drain, cpu) <= 0) {
1023 /* First time through, initialize the counter. */
1024 per_cpu(rcu_dyntick_drain, cpu) = RCU_NEEDS_CPU_FLUSHES;
1025 } else if (--per_cpu(rcu_dyntick_drain, cpu) <= 0) {
1026 /* We have hit the limit, so time to give up. */
1027 per_cpu(rcu_dyntick_holdoff, cpu) = jiffies;
1028 return rcu_needs_cpu_quick_check(cpu);
1029 }
1030
1031 /* Do one step pushing remaining RCU callbacks through. */
1032 if (per_cpu(rcu_sched_data, cpu).nxtlist) {
1033 rcu_sched_qs(cpu);
1034 force_quiescent_state(&rcu_sched_state, 0);
1035 c = c || per_cpu(rcu_sched_data, cpu).nxtlist;
1036 }
1037 if (per_cpu(rcu_bh_data, cpu).nxtlist) {
1038 rcu_bh_qs(cpu);
1039 force_quiescent_state(&rcu_bh_state, 0);
1040 c = c || per_cpu(rcu_bh_data, cpu).nxtlist;
1041 }
1042
1043 /* If RCU callbacks are still pending, RCU still needs this CPU. */
1044 if (c) {
1045 raise_softirq(RCU_SOFTIRQ);
1046 per_cpu(rcu_dyntick_holdoff, cpu) = jiffies;
1047 }
1048 return c;
1049}
1050
1051/*
1052 * Check to see if we need to continue a callback-flush operations to
1053 * allow the last CPU to enter dyntick-idle mode.
1054 */
1055static void rcu_needs_cpu_flush(void)
1056{
1057 int cpu = smp_processor_id();
1058 unsigned long flags;
1059
1060 if (per_cpu(rcu_dyntick_drain, cpu) <= 0)
1061 return;
1062 local_irq_save(flags);
1063 (void)rcu_needs_cpu(cpu);
1064 local_irq_restore(flags);
1065}
1066
1067#endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */
diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c
index 9d2c88423b31..d45db2e35d27 100644
--- a/kernel/rcutree_trace.c
+++ b/kernel/rcutree_trace.c
@@ -50,7 +50,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
50{ 50{
51 if (!rdp->beenonline) 51 if (!rdp->beenonline)
52 return; 52 return;
53 seq_printf(m, "%3d%cc=%ld g=%ld pq=%d pqc=%ld qp=%d", 53 seq_printf(m, "%3d%cc=%lu g=%lu pq=%d pqc=%lu qp=%d",
54 rdp->cpu, 54 rdp->cpu,
55 cpu_is_offline(rdp->cpu) ? '!' : ' ', 55 cpu_is_offline(rdp->cpu) ? '!' : ' ',
56 rdp->completed, rdp->gpnum, 56 rdp->completed, rdp->gpnum,
@@ -105,7 +105,7 @@ static void print_one_rcu_data_csv(struct seq_file *m, struct rcu_data *rdp)
105{ 105{
106 if (!rdp->beenonline) 106 if (!rdp->beenonline)
107 return; 107 return;
108 seq_printf(m, "%d,%s,%ld,%ld,%d,%ld,%d", 108 seq_printf(m, "%d,%s,%lu,%lu,%d,%lu,%d",
109 rdp->cpu, 109 rdp->cpu,
110 cpu_is_offline(rdp->cpu) ? "\"N\"" : "\"Y\"", 110 cpu_is_offline(rdp->cpu) ? "\"N\"" : "\"Y\"",
111 rdp->completed, rdp->gpnum, 111 rdp->completed, rdp->gpnum,
@@ -155,13 +155,13 @@ static const struct file_operations rcudata_csv_fops = {
155 155
156static void print_one_rcu_state(struct seq_file *m, struct rcu_state *rsp) 156static void print_one_rcu_state(struct seq_file *m, struct rcu_state *rsp)
157{ 157{
158 long gpnum; 158 unsigned long gpnum;
159 int level = 0; 159 int level = 0;
160 int phase; 160 int phase;
161 struct rcu_node *rnp; 161 struct rcu_node *rnp;
162 162
163 gpnum = rsp->gpnum; 163 gpnum = rsp->gpnum;
164 seq_printf(m, "c=%ld g=%ld s=%d jfq=%ld j=%x " 164 seq_printf(m, "c=%lu g=%lu s=%d jfq=%ld j=%x "
165 "nfqs=%lu/nfqsng=%lu(%lu) fqlh=%lu oqlen=%ld\n", 165 "nfqs=%lu/nfqsng=%lu(%lu) fqlh=%lu oqlen=%ld\n",
166 rsp->completed, gpnum, rsp->signaled, 166 rsp->completed, gpnum, rsp->signaled,
167 (long)(rsp->jiffies_force_qs - jiffies), 167 (long)(rsp->jiffies_force_qs - jiffies),
@@ -215,12 +215,12 @@ static const struct file_operations rcuhier_fops = {
215static int show_rcugp(struct seq_file *m, void *unused) 215static int show_rcugp(struct seq_file *m, void *unused)
216{ 216{
217#ifdef CONFIG_TREE_PREEMPT_RCU 217#ifdef CONFIG_TREE_PREEMPT_RCU
218 seq_printf(m, "rcu_preempt: completed=%ld gpnum=%ld\n", 218 seq_printf(m, "rcu_preempt: completed=%ld gpnum=%lu\n",
219 rcu_preempt_state.completed, rcu_preempt_state.gpnum); 219 rcu_preempt_state.completed, rcu_preempt_state.gpnum);
220#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */ 220#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
221 seq_printf(m, "rcu_sched: completed=%ld gpnum=%ld\n", 221 seq_printf(m, "rcu_sched: completed=%ld gpnum=%lu\n",
222 rcu_sched_state.completed, rcu_sched_state.gpnum); 222 rcu_sched_state.completed, rcu_sched_state.gpnum);
223 seq_printf(m, "rcu_bh: completed=%ld gpnum=%ld\n", 223 seq_printf(m, "rcu_bh: completed=%ld gpnum=%lu\n",
224 rcu_bh_state.completed, rcu_bh_state.gpnum); 224 rcu_bh_state.completed, rcu_bh_state.gpnum);
225 return 0; 225 return 0;
226} 226}
diff --git a/kernel/sched.c b/kernel/sched.c
index 3a8fb30a91b1..3218f5213717 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -645,6 +645,11 @@ static inline int cpu_of(struct rq *rq)
645#endif 645#endif
646} 646}
647 647
648#define rcu_dereference_check_sched_domain(p) \
649 rcu_dereference_check((p), \
650 rcu_read_lock_sched_held() || \
651 lockdep_is_held(&sched_domains_mutex))
652
648/* 653/*
649 * The domain tree (rq->sd) is protected by RCU's quiescent state transition. 654 * The domain tree (rq->sd) is protected by RCU's quiescent state transition.
650 * See detach_destroy_domains: synchronize_sched for details. 655 * See detach_destroy_domains: synchronize_sched for details.
@@ -653,7 +658,7 @@ static inline int cpu_of(struct rq *rq)
653 * preempt-disabled sections. 658 * preempt-disabled sections.
654 */ 659 */
655#define for_each_domain(cpu, __sd) \ 660#define for_each_domain(cpu, __sd) \
656 for (__sd = rcu_dereference(cpu_rq(cpu)->sd); __sd; __sd = __sd->parent) 661 for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); __sd; __sd = __sd->parent)
657 662
658#define cpu_rq(cpu) (&per_cpu(runqueues, (cpu))) 663#define cpu_rq(cpu) (&per_cpu(runqueues, (cpu)))
659#define this_rq() (&__get_cpu_var(runqueues)) 664#define this_rq() (&__get_cpu_var(runqueues))
@@ -1531,7 +1536,7 @@ static unsigned long target_load(int cpu, int type)
1531 1536
1532static struct sched_group *group_of(int cpu) 1537static struct sched_group *group_of(int cpu)
1533{ 1538{
1534 struct sched_domain *sd = rcu_dereference(cpu_rq(cpu)->sd); 1539 struct sched_domain *sd = rcu_dereference_sched(cpu_rq(cpu)->sd);
1535 1540
1536 if (!sd) 1541 if (!sd)
1537 return NULL; 1542 return NULL;
@@ -4888,7 +4893,7 @@ static void run_rebalance_domains(struct softirq_action *h)
4888 4893
4889static inline int on_null_domain(int cpu) 4894static inline int on_null_domain(int cpu)
4890{ 4895{
4891 return !rcu_dereference(cpu_rq(cpu)->sd); 4896 return !rcu_dereference_sched(cpu_rq(cpu)->sd);
4892} 4897}
4893 4898
4894/* 4899/*
diff --git a/kernel/srcu.c b/kernel/srcu.c
index 818d7d9aa03c..bde4295774c8 100644
--- a/kernel/srcu.c
+++ b/kernel/srcu.c
@@ -34,6 +34,30 @@
34#include <linux/smp.h> 34#include <linux/smp.h>
35#include <linux/srcu.h> 35#include <linux/srcu.h>
36 36
37static int init_srcu_struct_fields(struct srcu_struct *sp)
38{
39 sp->completed = 0;
40 mutex_init(&sp->mutex);
41 sp->per_cpu_ref = alloc_percpu(struct srcu_struct_array);
42 return sp->per_cpu_ref ? 0 : -ENOMEM;
43}
44
45#ifdef CONFIG_DEBUG_LOCK_ALLOC
46
47int __init_srcu_struct(struct srcu_struct *sp, const char *name,
48 struct lock_class_key *key)
49{
50#ifdef CONFIG_DEBUG_LOCK_ALLOC
51 /* Don't re-initialize a lock while it is held. */
52 debug_check_no_locks_freed((void *)sp, sizeof(*sp));
53 lockdep_init_map(&sp->dep_map, name, key, 0);
54#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
55 return init_srcu_struct_fields(sp);
56}
57EXPORT_SYMBOL_GPL(__init_srcu_struct);
58
59#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
60
37/** 61/**
38 * init_srcu_struct - initialize a sleep-RCU structure 62 * init_srcu_struct - initialize a sleep-RCU structure
39 * @sp: structure to initialize. 63 * @sp: structure to initialize.
@@ -44,13 +68,12 @@
44 */ 68 */
45int init_srcu_struct(struct srcu_struct *sp) 69int init_srcu_struct(struct srcu_struct *sp)
46{ 70{
47 sp->completed = 0; 71 return init_srcu_struct_fields(sp);
48 mutex_init(&sp->mutex);
49 sp->per_cpu_ref = alloc_percpu(struct srcu_struct_array);
50 return (sp->per_cpu_ref ? 0 : -ENOMEM);
51} 72}
52EXPORT_SYMBOL_GPL(init_srcu_struct); 73EXPORT_SYMBOL_GPL(init_srcu_struct);
53 74
75#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
76
54/* 77/*
55 * srcu_readers_active_idx -- returns approximate number of readers 78 * srcu_readers_active_idx -- returns approximate number of readers
56 * active on the specified rank of per-CPU counters. 79 * active on the specified rank of per-CPU counters.
@@ -100,15 +123,12 @@ void cleanup_srcu_struct(struct srcu_struct *sp)
100} 123}
101EXPORT_SYMBOL_GPL(cleanup_srcu_struct); 124EXPORT_SYMBOL_GPL(cleanup_srcu_struct);
102 125
103/** 126/*
104 * srcu_read_lock - register a new reader for an SRCU-protected structure.
105 * @sp: srcu_struct in which to register the new reader.
106 *
107 * Counts the new reader in the appropriate per-CPU element of the 127 * Counts the new reader in the appropriate per-CPU element of the
108 * srcu_struct. Must be called from process context. 128 * srcu_struct. Must be called from process context.
109 * Returns an index that must be passed to the matching srcu_read_unlock(). 129 * Returns an index that must be passed to the matching srcu_read_unlock().
110 */ 130 */
111int srcu_read_lock(struct srcu_struct *sp) 131int __srcu_read_lock(struct srcu_struct *sp)
112{ 132{
113 int idx; 133 int idx;
114 134
@@ -120,31 +140,27 @@ int srcu_read_lock(struct srcu_struct *sp)
120 preempt_enable(); 140 preempt_enable();
121 return idx; 141 return idx;
122} 142}
123EXPORT_SYMBOL_GPL(srcu_read_lock); 143EXPORT_SYMBOL_GPL(__srcu_read_lock);
124 144
125/** 145/*
126 * srcu_read_unlock - unregister a old reader from an SRCU-protected structure.
127 * @sp: srcu_struct in which to unregister the old reader.
128 * @idx: return value from corresponding srcu_read_lock().
129 *
130 * Removes the count for the old reader from the appropriate per-CPU 146 * Removes the count for the old reader from the appropriate per-CPU
131 * element of the srcu_struct. Note that this may well be a different 147 * element of the srcu_struct. Note that this may well be a different
132 * CPU than that which was incremented by the corresponding srcu_read_lock(). 148 * CPU than that which was incremented by the corresponding srcu_read_lock().
133 * Must be called from process context. 149 * Must be called from process context.
134 */ 150 */
135void srcu_read_unlock(struct srcu_struct *sp, int idx) 151void __srcu_read_unlock(struct srcu_struct *sp, int idx)
136{ 152{
137 preempt_disable(); 153 preempt_disable();
138 srcu_barrier(); /* ensure compiler won't misorder critical section. */ 154 srcu_barrier(); /* ensure compiler won't misorder critical section. */
139 per_cpu_ptr(sp->per_cpu_ref, smp_processor_id())->c[idx]--; 155 per_cpu_ptr(sp->per_cpu_ref, smp_processor_id())->c[idx]--;
140 preempt_enable(); 156 preempt_enable();
141} 157}
142EXPORT_SYMBOL_GPL(srcu_read_unlock); 158EXPORT_SYMBOL_GPL(__srcu_read_unlock);
143 159
144/* 160/*
145 * Helper function for synchronize_srcu() and synchronize_srcu_expedited(). 161 * Helper function for synchronize_srcu() and synchronize_srcu_expedited().
146 */ 162 */
147void __synchronize_srcu(struct srcu_struct *sp, void (*sync_func)(void)) 163static void __synchronize_srcu(struct srcu_struct *sp, void (*sync_func)(void))
148{ 164{
149 int idx; 165 int idx;
150 166
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index d62e3cdab357..5e3407d997b2 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -499,6 +499,18 @@ config PROVE_LOCKING
499 499
500 For more details, see Documentation/lockdep-design.txt. 500 For more details, see Documentation/lockdep-design.txt.
501 501
502config PROVE_RCU
503 bool "RCU debugging: prove RCU correctness"
504 depends on PROVE_LOCKING
505 default n
506 help
507 This feature enables lockdep extensions that check for correct
508 use of RCU APIs. This is currently under development. Say Y
509 if you want to debug RCU usage or help work on the PROVE_RCU
510 feature.
511
512 Say N if you are unsure.
513
502config LOCKDEP 514config LOCKDEP
503 bool 515 bool
504 depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT 516 depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
@@ -765,10 +777,22 @@ config RCU_CPU_STALL_DETECTOR
765 CPUs are delaying the current grace period, but only when 777 CPUs are delaying the current grace period, but only when
766 the grace period extends for excessive time periods. 778 the grace period extends for excessive time periods.
767 779
768 Say Y if you want RCU to perform such checks. 780 Say N if you want to disable such checks.
781
782 Say Y if you are unsure.
783
784config RCU_CPU_STALL_VERBOSE
785 bool "Print additional per-task information for RCU_CPU_STALL_DETECTOR"
786 depends on RCU_CPU_STALL_DETECTOR && TREE_PREEMPT_RCU
787 default n
788 help
789 This option causes RCU to printk detailed per-task information
790 for any tasks that are stalling the current RCU grace period.
769 791
770 Say N if you are unsure. 792 Say N if you are unsure.
771 793
794 Say Y if you want to enable such checks.
795
772config KPROBES_SANITY_TEST 796config KPROBES_SANITY_TEST
773 bool "Kprobes sanity tests" 797 bool "Kprobes sanity tests"
774 depends on DEBUG_KERNEL 798 depends on DEBUG_KERNEL
diff --git a/lib/debug_locks.c b/lib/debug_locks.c
index bc3b11731b9c..5bf0020b9248 100644
--- a/lib/debug_locks.c
+++ b/lib/debug_locks.c
@@ -23,6 +23,7 @@
23 * shut up after that. 23 * shut up after that.
24 */ 24 */
25int debug_locks = 1; 25int debug_locks = 1;
26EXPORT_SYMBOL_GPL(debug_locks);
26 27
27/* 28/*
28 * The locking-testsuite uses <debug_locks_silent> to get a 29 * The locking-testsuite uses <debug_locks_silent> to get a
diff --git a/lib/idr.c b/lib/idr.c
index 0dc782216d4b..2eb1dca03681 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -504,7 +504,7 @@ void *idr_find(struct idr *idp, int id)
504 int n; 504 int n;
505 struct idr_layer *p; 505 struct idr_layer *p;
506 506
507 p = rcu_dereference(idp->top); 507 p = rcu_dereference_raw(idp->top);
508 if (!p) 508 if (!p)
509 return NULL; 509 return NULL;
510 n = (p->layer+1) * IDR_BITS; 510 n = (p->layer+1) * IDR_BITS;
@@ -519,7 +519,7 @@ void *idr_find(struct idr *idp, int id)
519 while (n > 0 && p) { 519 while (n > 0 && p) {
520 n -= IDR_BITS; 520 n -= IDR_BITS;
521 BUG_ON(n != p->layer*IDR_BITS); 521 BUG_ON(n != p->layer*IDR_BITS);
522 p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]); 522 p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]);
523 } 523 }
524 return((void *)p); 524 return((void *)p);
525} 525}
@@ -552,7 +552,7 @@ int idr_for_each(struct idr *idp,
552 struct idr_layer **paa = &pa[0]; 552 struct idr_layer **paa = &pa[0];
553 553
554 n = idp->layers * IDR_BITS; 554 n = idp->layers * IDR_BITS;
555 p = rcu_dereference(idp->top); 555 p = rcu_dereference_raw(idp->top);
556 max = 1 << n; 556 max = 1 << n;
557 557
558 id = 0; 558 id = 0;
@@ -560,7 +560,7 @@ int idr_for_each(struct idr *idp,
560 while (n > 0 && p) { 560 while (n > 0 && p) {
561 n -= IDR_BITS; 561 n -= IDR_BITS;
562 *paa++ = p; 562 *paa++ = p;
563 p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]); 563 p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]);
564 } 564 }
565 565
566 if (p) { 566 if (p) {
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 92cdd9936e3d..6b9670d6bbf9 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -364,7 +364,7 @@ static void *radix_tree_lookup_element(struct radix_tree_root *root,
364 unsigned int height, shift; 364 unsigned int height, shift;
365 struct radix_tree_node *node, **slot; 365 struct radix_tree_node *node, **slot;
366 366
367 node = rcu_dereference(root->rnode); 367 node = rcu_dereference_raw(root->rnode);
368 if (node == NULL) 368 if (node == NULL)
369 return NULL; 369 return NULL;
370 370
@@ -384,7 +384,7 @@ static void *radix_tree_lookup_element(struct radix_tree_root *root,
384 do { 384 do {
385 slot = (struct radix_tree_node **) 385 slot = (struct radix_tree_node **)
386 (node->slots + ((index>>shift) & RADIX_TREE_MAP_MASK)); 386 (node->slots + ((index>>shift) & RADIX_TREE_MAP_MASK));
387 node = rcu_dereference(*slot); 387 node = rcu_dereference_raw(*slot);
388 if (node == NULL) 388 if (node == NULL)
389 return NULL; 389 return NULL;
390 390
@@ -568,7 +568,7 @@ int radix_tree_tag_get(struct radix_tree_root *root,
568 if (!root_tag_get(root, tag)) 568 if (!root_tag_get(root, tag))
569 return 0; 569 return 0;
570 570
571 node = rcu_dereference(root->rnode); 571 node = rcu_dereference_raw(root->rnode);
572 if (node == NULL) 572 if (node == NULL)
573 return 0; 573 return 0;
574 574
@@ -602,7 +602,7 @@ int radix_tree_tag_get(struct radix_tree_root *root,
602 BUG_ON(ret && saw_unset_tag); 602 BUG_ON(ret && saw_unset_tag);
603 return !!ret; 603 return !!ret;
604 } 604 }
605 node = rcu_dereference(node->slots[offset]); 605 node = rcu_dereference_raw(node->slots[offset]);
606 shift -= RADIX_TREE_MAP_SHIFT; 606 shift -= RADIX_TREE_MAP_SHIFT;
607 height--; 607 height--;
608 } 608 }
@@ -711,7 +711,7 @@ __lookup(struct radix_tree_node *slot, void ***results, unsigned long index,
711 } 711 }
712 712
713 shift -= RADIX_TREE_MAP_SHIFT; 713 shift -= RADIX_TREE_MAP_SHIFT;
714 slot = rcu_dereference(slot->slots[i]); 714 slot = rcu_dereference_raw(slot->slots[i]);
715 if (slot == NULL) 715 if (slot == NULL)
716 goto out; 716 goto out;
717 } 717 }
@@ -758,7 +758,7 @@ radix_tree_gang_lookup(struct radix_tree_root *root, void **results,
758 unsigned long cur_index = first_index; 758 unsigned long cur_index = first_index;
759 unsigned int ret; 759 unsigned int ret;
760 760
761 node = rcu_dereference(root->rnode); 761 node = rcu_dereference_raw(root->rnode);
762 if (!node) 762 if (!node)
763 return 0; 763 return 0;
764 764
@@ -787,7 +787,7 @@ radix_tree_gang_lookup(struct radix_tree_root *root, void **results,
787 slot = *(((void ***)results)[ret + i]); 787 slot = *(((void ***)results)[ret + i]);
788 if (!slot) 788 if (!slot)
789 continue; 789 continue;
790 results[ret + nr_found] = rcu_dereference(slot); 790 results[ret + nr_found] = rcu_dereference_raw(slot);
791 nr_found++; 791 nr_found++;
792 } 792 }
793 ret += nr_found; 793 ret += nr_found;
@@ -826,7 +826,7 @@ radix_tree_gang_lookup_slot(struct radix_tree_root *root, void ***results,
826 unsigned long cur_index = first_index; 826 unsigned long cur_index = first_index;
827 unsigned int ret; 827 unsigned int ret;
828 828
829 node = rcu_dereference(root->rnode); 829 node = rcu_dereference_raw(root->rnode);
830 if (!node) 830 if (!node)
831 return 0; 831 return 0;
832 832
@@ -915,7 +915,7 @@ __lookup_tag(struct radix_tree_node *slot, void ***results, unsigned long index,
915 } 915 }
916 } 916 }
917 shift -= RADIX_TREE_MAP_SHIFT; 917 shift -= RADIX_TREE_MAP_SHIFT;
918 slot = rcu_dereference(slot->slots[i]); 918 slot = rcu_dereference_raw(slot->slots[i]);
919 if (slot == NULL) 919 if (slot == NULL)
920 break; 920 break;
921 } 921 }
@@ -951,7 +951,7 @@ radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results,
951 if (!root_tag_get(root, tag)) 951 if (!root_tag_get(root, tag))
952 return 0; 952 return 0;
953 953
954 node = rcu_dereference(root->rnode); 954 node = rcu_dereference_raw(root->rnode);
955 if (!node) 955 if (!node)
956 return 0; 956 return 0;
957 957
@@ -980,7 +980,7 @@ radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results,
980 slot = *(((void ***)results)[ret + i]); 980 slot = *(((void ***)results)[ret + i]);
981 if (!slot) 981 if (!slot)
982 continue; 982 continue;
983 results[ret + nr_found] = rcu_dereference(slot); 983 results[ret + nr_found] = rcu_dereference_raw(slot);
984 nr_found++; 984 nr_found++;
985 } 985 }
986 ret += nr_found; 986 ret += nr_found;
@@ -1020,7 +1020,7 @@ radix_tree_gang_lookup_tag_slot(struct radix_tree_root *root, void ***results,
1020 if (!root_tag_get(root, tag)) 1020 if (!root_tag_get(root, tag))
1021 return 0; 1021 return 0;
1022 1022
1023 node = rcu_dereference(root->rnode); 1023 node = rcu_dereference_raw(root->rnode);
1024 if (!node) 1024 if (!node)
1025 return 0; 1025 return 0;
1026 1026
diff --git a/net/core/dev.c b/net/core/dev.c
index ec874218b206..bb1f1da2b8a7 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2041,7 +2041,7 @@ gso:
2041 rcu_read_lock_bh(); 2041 rcu_read_lock_bh();
2042 2042
2043 txq = dev_pick_tx(dev, skb); 2043 txq = dev_pick_tx(dev, skb);
2044 q = rcu_dereference(txq->qdisc); 2044 q = rcu_dereference_bh(txq->qdisc);
2045 2045
2046#ifdef CONFIG_NET_CLS_ACT 2046#ifdef CONFIG_NET_CLS_ACT
2047 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS); 2047 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS);
diff --git a/net/core/filter.c b/net/core/filter.c
index 08db7b9143a3..3541aa48d21d 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -86,7 +86,7 @@ int sk_filter(struct sock *sk, struct sk_buff *skb)
86 return err; 86 return err;
87 87
88 rcu_read_lock_bh(); 88 rcu_read_lock_bh();
89 filter = rcu_dereference(sk->sk_filter); 89 filter = rcu_dereference_bh(sk->sk_filter);
90 if (filter) { 90 if (filter) {
91 unsigned int pkt_len = sk_run_filter(skb, filter->insns, 91 unsigned int pkt_len = sk_run_filter(skb, filter->insns,
92 filter->len); 92 filter->len);
@@ -521,7 +521,7 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
521 } 521 }
522 522
523 rcu_read_lock_bh(); 523 rcu_read_lock_bh();
524 old_fp = rcu_dereference(sk->sk_filter); 524 old_fp = rcu_dereference_bh(sk->sk_filter);
525 rcu_assign_pointer(sk->sk_filter, fp); 525 rcu_assign_pointer(sk->sk_filter, fp);
526 rcu_read_unlock_bh(); 526 rcu_read_unlock_bh();
527 527
@@ -536,7 +536,7 @@ int sk_detach_filter(struct sock *sk)
536 struct sk_filter *filter; 536 struct sk_filter *filter;
537 537
538 rcu_read_lock_bh(); 538 rcu_read_lock_bh();
539 filter = rcu_dereference(sk->sk_filter); 539 filter = rcu_dereference_bh(sk->sk_filter);
540 if (filter) { 540 if (filter) {
541 rcu_assign_pointer(sk->sk_filter, NULL); 541 rcu_assign_pointer(sk->sk_filter, NULL);
542 sk_filter_delayed_uncharge(sk, filter); 542 sk_filter_delayed_uncharge(sk, filter);
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 794bcb897ff0..4c7d3f635ba7 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -89,6 +89,14 @@ int rtnl_is_locked(void)
89} 89}
90EXPORT_SYMBOL(rtnl_is_locked); 90EXPORT_SYMBOL(rtnl_is_locked);
91 91
92#ifdef CONFIG_PROVE_LOCKING
93int lockdep_rtnl_is_held(void)
94{
95 return lockdep_is_held(&rtnl_mutex);
96}
97EXPORT_SYMBOL(lockdep_rtnl_is_held);
98#endif /* #ifdef CONFIG_PROVE_LOCKING */
99
92static struct rtnl_link *rtnl_msg_handlers[NPROTO]; 100static struct rtnl_link *rtnl_msg_handlers[NPROTO];
93 101
94static inline int rtm_msgindex(int msgtype) 102static inline int rtm_msgindex(int msgtype)
diff --git a/net/core/sock.c b/net/core/sock.c
index e1f6f225f012..305cba401ae6 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1073,7 +1073,8 @@ static void __sk_free(struct sock *sk)
1073 if (sk->sk_destruct) 1073 if (sk->sk_destruct)
1074 sk->sk_destruct(sk); 1074 sk->sk_destruct(sk);
1075 1075
1076 filter = rcu_dereference(sk->sk_filter); 1076 filter = rcu_dereference_check(sk->sk_filter,
1077 atomic_read(&sk->sk_wmem_alloc) == 0);
1077 if (filter) { 1078 if (filter) {
1078 sk_filter_uncharge(sk, filter); 1079 sk_filter_uncharge(sk, filter);
1079 rcu_assign_pointer(sk->sk_filter, NULL); 1080 rcu_assign_pointer(sk->sk_filter, NULL);
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
index a03284061a31..a7bf03ca0a36 100644
--- a/net/decnet/dn_route.c
+++ b/net/decnet/dn_route.c
@@ -1155,8 +1155,8 @@ static int __dn_route_output_key(struct dst_entry **pprt, const struct flowi *fl
1155 1155
1156 if (!(flags & MSG_TRYHARD)) { 1156 if (!(flags & MSG_TRYHARD)) {
1157 rcu_read_lock_bh(); 1157 rcu_read_lock_bh();
1158 for(rt = rcu_dereference(dn_rt_hash_table[hash].chain); rt; 1158 for (rt = rcu_dereference_bh(dn_rt_hash_table[hash].chain); rt;
1159 rt = rcu_dereference(rt->u.dst.dn_next)) { 1159 rt = rcu_dereference_bh(rt->u.dst.dn_next)) {
1160 if ((flp->fld_dst == rt->fl.fld_dst) && 1160 if ((flp->fld_dst == rt->fl.fld_dst) &&
1161 (flp->fld_src == rt->fl.fld_src) && 1161 (flp->fld_src == rt->fl.fld_src) &&
1162 (flp->mark == rt->fl.mark) && 1162 (flp->mark == rt->fl.mark) &&
@@ -1618,9 +1618,9 @@ int dn_cache_dump(struct sk_buff *skb, struct netlink_callback *cb)
1618 if (h > s_h) 1618 if (h > s_h)
1619 s_idx = 0; 1619 s_idx = 0;
1620 rcu_read_lock_bh(); 1620 rcu_read_lock_bh();
1621 for(rt = rcu_dereference(dn_rt_hash_table[h].chain), idx = 0; 1621 for(rt = rcu_dereference_bh(dn_rt_hash_table[h].chain), idx = 0;
1622 rt; 1622 rt;
1623 rt = rcu_dereference(rt->u.dst.dn_next), idx++) { 1623 rt = rcu_dereference_bh(rt->u.dst.dn_next), idx++) {
1624 if (idx < s_idx) 1624 if (idx < s_idx)
1625 continue; 1625 continue;
1626 skb_dst_set(skb, dst_clone(&rt->u.dst)); 1626 skb_dst_set(skb, dst_clone(&rt->u.dst));
@@ -1654,12 +1654,12 @@ static struct dn_route *dn_rt_cache_get_first(struct seq_file *seq)
1654 1654
1655 for(s->bucket = dn_rt_hash_mask; s->bucket >= 0; --s->bucket) { 1655 for(s->bucket = dn_rt_hash_mask; s->bucket >= 0; --s->bucket) {
1656 rcu_read_lock_bh(); 1656 rcu_read_lock_bh();
1657 rt = dn_rt_hash_table[s->bucket].chain; 1657 rt = rcu_dereference_bh(dn_rt_hash_table[s->bucket].chain);
1658 if (rt) 1658 if (rt)
1659 break; 1659 break;
1660 rcu_read_unlock_bh(); 1660 rcu_read_unlock_bh();
1661 } 1661 }
1662 return rcu_dereference(rt); 1662 return rt;
1663} 1663}
1664 1664
1665static struct dn_route *dn_rt_cache_get_next(struct seq_file *seq, struct dn_route *rt) 1665static struct dn_route *dn_rt_cache_get_next(struct seq_file *seq, struct dn_route *rt)
@@ -1674,7 +1674,7 @@ static struct dn_route *dn_rt_cache_get_next(struct seq_file *seq, struct dn_rou
1674 rcu_read_lock_bh(); 1674 rcu_read_lock_bh();
1675 rt = dn_rt_hash_table[s->bucket].chain; 1675 rt = dn_rt_hash_table[s->bucket].chain;
1676 } 1676 }
1677 return rcu_dereference(rt); 1677 return rcu_dereference_bh(rt);
1678} 1678}
1679 1679
1680static void *dn_rt_cache_seq_start(struct seq_file *seq, loff_t *pos) 1680static void *dn_rt_cache_seq_start(struct seq_file *seq, loff_t *pos)
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index d62b05d33384..4f11faa5c824 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -287,12 +287,12 @@ static struct rtable *rt_cache_get_first(struct seq_file *seq)
287 if (!rt_hash_table[st->bucket].chain) 287 if (!rt_hash_table[st->bucket].chain)
288 continue; 288 continue;
289 rcu_read_lock_bh(); 289 rcu_read_lock_bh();
290 r = rcu_dereference(rt_hash_table[st->bucket].chain); 290 r = rcu_dereference_bh(rt_hash_table[st->bucket].chain);
291 while (r) { 291 while (r) {
292 if (dev_net(r->u.dst.dev) == seq_file_net(seq) && 292 if (dev_net(r->u.dst.dev) == seq_file_net(seq) &&
293 r->rt_genid == st->genid) 293 r->rt_genid == st->genid)
294 return r; 294 return r;
295 r = rcu_dereference(r->u.dst.rt_next); 295 r = rcu_dereference_bh(r->u.dst.rt_next);
296 } 296 }
297 rcu_read_unlock_bh(); 297 rcu_read_unlock_bh();
298 } 298 }
@@ -314,7 +314,7 @@ static struct rtable *__rt_cache_get_next(struct seq_file *seq,
314 rcu_read_lock_bh(); 314 rcu_read_lock_bh();
315 r = rt_hash_table[st->bucket].chain; 315 r = rt_hash_table[st->bucket].chain;
316 } 316 }
317 return rcu_dereference(r); 317 return rcu_dereference_bh(r);
318} 318}
319 319
320static struct rtable *rt_cache_get_next(struct seq_file *seq, 320static struct rtable *rt_cache_get_next(struct seq_file *seq,
@@ -2689,8 +2689,8 @@ int __ip_route_output_key(struct net *net, struct rtable **rp,
2689 hash = rt_hash(flp->fl4_dst, flp->fl4_src, flp->oif, rt_genid(net)); 2689 hash = rt_hash(flp->fl4_dst, flp->fl4_src, flp->oif, rt_genid(net));
2690 2690
2691 rcu_read_lock_bh(); 2691 rcu_read_lock_bh();
2692 for (rth = rcu_dereference(rt_hash_table[hash].chain); rth; 2692 for (rth = rcu_dereference_bh(rt_hash_table[hash].chain); rth;
2693 rth = rcu_dereference(rth->u.dst.rt_next)) { 2693 rth = rcu_dereference_bh(rth->u.dst.rt_next)) {
2694 if (rth->fl.fl4_dst == flp->fl4_dst && 2694 if (rth->fl.fl4_dst == flp->fl4_dst &&
2695 rth->fl.fl4_src == flp->fl4_src && 2695 rth->fl.fl4_src == flp->fl4_src &&
2696 rth->fl.iif == 0 && 2696 rth->fl.iif == 0 &&
@@ -3008,8 +3008,8 @@ int ip_rt_dump(struct sk_buff *skb, struct netlink_callback *cb)
3008 if (!rt_hash_table[h].chain) 3008 if (!rt_hash_table[h].chain)
3009 continue; 3009 continue;
3010 rcu_read_lock_bh(); 3010 rcu_read_lock_bh();
3011 for (rt = rcu_dereference(rt_hash_table[h].chain), idx = 0; rt; 3011 for (rt = rcu_dereference_bh(rt_hash_table[h].chain), idx = 0; rt;
3012 rt = rcu_dereference(rt->u.dst.rt_next), idx++) { 3012 rt = rcu_dereference_bh(rt->u.dst.rt_next), idx++) {
3013 if (!net_eq(dev_net(rt->u.dst.dev), net) || idx < s_idx) 3013 if (!net_eq(dev_net(rt->u.dst.dev), net) || idx < s_idx)
3014 continue; 3014 continue;
3015 if (rt_is_expired(rt)) 3015 if (rt_is_expired(rt))
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index f126d18dbdc4..939471ef8d50 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -508,7 +508,7 @@ static inline unsigned int run_filter(struct sk_buff *skb, struct sock *sk,
508 struct sk_filter *filter; 508 struct sk_filter *filter;
509 509
510 rcu_read_lock_bh(); 510 rcu_read_lock_bh();
511 filter = rcu_dereference(sk->sk_filter); 511 filter = rcu_dereference_bh(sk->sk_filter);
512 if (filter != NULL) 512 if (filter != NULL)
513 res = sk_run_filter(skb, filter->insns, filter->len); 513 res = sk_run_filter(skb, filter->insns, filter->len);
514 rcu_read_unlock_bh(); 514 rcu_read_unlock_bh();
diff --git a/security/keys/gc.c b/security/keys/gc.c
index 4770be375ffe..19902319d097 100644
--- a/security/keys/gc.c
+++ b/security/keys/gc.c
@@ -77,7 +77,8 @@ static bool key_gc_keyring(struct key *keyring, time_t limit)
77 goto dont_gc; 77 goto dont_gc;
78 78
79 /* scan the keyring looking for dead keys */ 79 /* scan the keyring looking for dead keys */
80 klist = rcu_dereference(keyring->payload.subscriptions); 80 klist = rcu_dereference_check(keyring->payload.subscriptions,
81 lockdep_is_held(&key_serial_lock));
81 if (!klist) 82 if (!klist)
82 goto dont_gc; 83 goto dont_gc;
83 84
diff --git a/security/keys/keyring.c b/security/keys/keyring.c
index 8ec02746ca99..e814d2109f8e 100644
--- a/security/keys/keyring.c
+++ b/security/keys/keyring.c
@@ -151,7 +151,9 @@ static void keyring_destroy(struct key *keyring)
151 write_unlock(&keyring_name_lock); 151 write_unlock(&keyring_name_lock);
152 } 152 }
153 153
154 klist = rcu_dereference(keyring->payload.subscriptions); 154 klist = rcu_dereference_check(keyring->payload.subscriptions,
155 rcu_read_lock_held() ||
156 atomic_read(&keyring->usage) == 0);
155 if (klist) { 157 if (klist) {
156 for (loop = klist->nkeys - 1; loop >= 0; loop--) 158 for (loop = klist->nkeys - 1; loop >= 0; loop--)
157 key_put(klist->keys[loop]); 159 key_put(klist->keys[loop]);